diff --git a/.config/nextest.toml b/.config/nextest.toml index 26b4a000b9..c16b7b3a63 100644 --- a/.config/nextest.toml +++ b/.config/nextest.toml @@ -15,3 +15,12 @@ slow-timeout = { period = "2m", terminate-after = 10 } [[profile.default.overrides]] filter = "binary(e2e_testsuite)" slow-timeout = { period = "2m", terminate-after = 3 } + +[[profile.default.overrides]] +filter = "package(reth-era) and binary(it)" +slow-timeout = { period = "2m", terminate-after = 10 } + +# Allow slower ethereum node e2e tests (p2p + blobs) to run up to 5 minutes. +[[profile.default.overrides]] +filter = "package(reth-node-ethereum) and binary(e2e)" +slow-timeout = { period = "1m", terminate-after = 5 } diff --git a/.config/zepter.yaml b/.config/zepter.yaml index b754d06a06..dcc18676c4 100644 --- a/.config/zepter.yaml +++ b/.config/zepter.yaml @@ -12,7 +12,7 @@ workflows: # Check that `A` activates the features of `B`. "propagate-feature", # These are the features to check: - "--features=std,op,dev,asm-keccak,jemalloc,jemalloc-prof,tracy-allocator,serde-bincode-compat,serde,test-utils,arbitrary,bench,alloy-compat", + "--features=std,op,dev,asm-keccak,jemalloc,jemalloc-prof,tracy-allocator,serde-bincode-compat,serde,test-utils,arbitrary,bench,alloy-compat,min-error-logs,min-warn-logs,min-info-logs,min-debug-logs,min-trace-logs,otlp,js-tracer,portable,keccak-cache-global", # Do not try to add a new section to `[features]` of `A` only because `B` exposes that feature. There are edge-cases where this is still needed, but we can add them manually. "--left-side-feature-missing=ignore", # Ignore the case that `A` it outside of the workspace. Otherwise it will report errors in external dependencies that we have no influence on. diff --git a/.dockerignore b/.dockerignore index 39eccd1c44..7bf7475b1a 100644 --- a/.dockerignore +++ b/.dockerignore @@ -4,6 +4,7 @@ # include source files !/bin !/crates +!/pkg !/testing !book.toml !Cargo.lock @@ -11,6 +12,7 @@ !Cross.toml !deny.toml !Makefile +!README.md # include for vergen constants !/.git diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index eed64b157f..5e334d13c6 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -40,5 +40,6 @@ crates/tasks/ @mattsse crates/tokio-util/ @fgimenez crates/transaction-pool/ @mattsse @yongkangc crates/trie/ @Rjected @shekhirin @mediocregopher +bin/reth-bench-compare/ @mediocregopher @shekhirin @yongkangc etc/ @Rjected @shekhirin .github/ @gakonst @DaniPopes diff --git a/.github/actionlint.yaml b/.github/actionlint.yaml new file mode 100644 index 0000000000..7b484ec96b --- /dev/null +++ b/.github/actionlint.yaml @@ -0,0 +1,7 @@ +self-hosted-runner: + labels: + - depot-ubuntu-latest + - depot-ubuntu-latest-2 + - depot-ubuntu-latest-4 + - depot-ubuntu-latest-8 + - depot-ubuntu-latest-16 diff --git a/.github/assets/check_wasm.sh b/.github/assets/check_wasm.sh index 3c72a8d189..874b7d508c 100755 --- a/.github/assets/check_wasm.sh +++ b/.github/assets/check_wasm.sh @@ -11,6 +11,7 @@ exclude_crates=( # The following require investigation if they can be fixed reth-basic-payload-builder reth-bench + reth-bench-compare reth-cli reth-cli-commands reth-cli-runner @@ -68,6 +69,7 @@ exclude_crates=( reth-payload-builder # reth-metrics reth-provider # tokio reth-prune # tokio + reth-prune-static-files # reth-provider reth-stages-api # reth-provider, reth-prune reth-static-file # tokio reth-transaction-pool # c-kzg diff --git a/.github/assets/hive/expected_failures.yaml b/.github/assets/hive/expected_failures.yaml index df111f97be..db18aa9ced 100644 --- a/.github/assets/hive/expected_failures.yaml +++ b/.github/assets/hive/expected_failures.yaml @@ -30,7 +30,7 @@ engine-withdrawals: - Corrupted Block Hash Payload (INVALID) (Paris) (reth) - Withdrawals Fork on Canonical Block 8 / Side Block 7 - 10 Block Re-Org (Paris) (reth) -engine-api: [] +engine-api: [ ] # no fix due to https://github.com/paradigmxyz/reth/issues/8732 engine-cancun: @@ -39,33 +39,34 @@ engine-cancun: # in hive or its dependencies - Blob Transaction Ordering, Multiple Clients (Cancun) (reth) -sync: [] +sync: [ ] -engine-auth: [] +engine-auth: [ ] -# tests/prague/eip7702_set_code_tx/test_set_code_txs.py::test_set_code_to_non_empty_storage -# no fix: it's too expensive to check whether the storage is empty on each creation (? - need more context on WHY) +# EIP-7610 related tests (Revert creation in case of non-empty storage): # -# tests/prague/eip7251_consolidations/test_contract_deployment.py::test_system_contract_deployment -# modified consolidation contract, not necessarily practical on mainnet (? - need more context) +# tests/prague/eip7702_set_code_tx/test_set_code_txs.py::test_set_code_to_non_empty_storage +# The test artificially creates an empty account with storage, then tests EIP-7610's behavior. +# On mainnet, ~25 such accounts exist as contract addresses (derived from keccak(prefix, caller, +# nonce/salt), not from public keys). No private key exists for contract addresses. To trigger +# this with EIP-7702, you'd need to recover a private key from one of the already deployed contract addresses - mathematically impossible. +# +# tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_* +# Requires hash collision on create2 address to target already deployed accounts with storage. +# ~20-30 such accounts exist from before the state-clear EIP. Creating new accounts targeting +# these requires hash collision - mathematically impossible to trigger on mainnet. +# ref: https://github.com/ethereum/go-ethereum/pull/28666#issuecomment-1891997143 +# +# System contract tests (already fixed and deployed): # # tests/prague/eip6110_deposits/test_modified_contract.py::test_invalid_layout and test_invalid_log_length -# system contract is already fixed and deployed; tests cover scenarios where contract is +# System contract is already fixed and deployed; tests cover scenarios where contract is # malformed which can't happen retroactively. No point in adding checks. # # tests/prague/eip7002_el_triggerable_withdrawals/test_contract_deployment.py::test_system_contract_deployment -# post-fork test contract deployment, should fix for spec compliance but not realistic on mainnet (? - need more context) -# -# tests/osaka/eip7594_peerdas/test_max_blob_per_tx.py::test_max_blobs_per_tx_fork_transition -# reth enforces 6 blob limit from EIP-7594, but EIP-7892 raises it to 9. -# Needs constant update in alloy. https://github.com/paradigmxyz/reth/issues/18975 -# -# tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_* -# status (27th June 2024): was discussed in ACDT meeting, need to be raised in ACDE. -# tests require hash collision on already deployed accounts with storage - mathematically -# impossible to trigger on mainnet. ~20-30 such accounts exist from before the state-clear -# EIP, but creating new accounts targeting these requires hash collision. -# ref: https://github.com/ethereum/go-ethereum/pull/28666#issuecomment-1891997143 +# tests/prague/eip7251_consolidations/test_contract_deployment.py::test_system_contract_deployment +# Post-fork system contract deployment tests. Should fix for spec compliance but not realistic +# on mainnet as these contracts are already deployed at the correct addresses. eels/consume-engine: - tests/prague/eip7702_set_code_tx/test_set_code_txs.py::test_set_code_to_non_empty_storage[fork_Prague-blockchain_test_engine-zero_nonce]-reth - tests/prague/eip7251_consolidations/test_contract_deployment.py::test_system_contract_deployment[fork_CancunToPragueAtTime15k-blockchain_test_engine-deploy_after_fork-nonzero_balance]-reth @@ -146,6 +147,13 @@ eels/consume-engine: - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Prague-tx_type_1-blockchain_test_engine_from_state_test-non-empty-balance-revert-initcode]-reth - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Prague-tx_type_2-blockchain_test_engine_from_state_test-non-empty-balance-revert-initcode]-reth - tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Shanghai-tx_type_0-blockchain_test_engine_from_state_test-non-empty-balance-correct-initcode]-reth + +# Blob limit tests: +# +# tests/osaka/eip7594_peerdas/test_max_blob_per_tx.py::test_max_blobs_per_tx_fork_transition[fork_PragueToOsakaAtTime15k-blob_count_7-blockchain_test] +# this test inserts a chain via chain.rlp where the last block is invalid, but expects import to stop there, this doesn't work properly with our pipeline import approach hence the import fails when the invalid block is detected. +#. In other words, if this test fails, this means we're correctly rejecting the block. +#. The same test exists in the consume-engine simulator where it is passing as expected eels/consume-rlp: - tests/prague/eip7702_set_code_tx/test_set_code_txs.py::test_set_code_to_non_empty_storage[fork_Prague-blockchain_test-zero_nonce]-reth - tests/prague/eip7251_consolidations/test_modified_consolidation_contract.py::test_system_contract_errors[fork_Prague-blockchain_test_engine-system_contract_reaches_gas_limit-system_contract_0x0000bbddc7ce488642fb579f8b00f3a590007251]-reth diff --git a/.github/assets/hive/run_simulator.sh b/.github/assets/hive/run_simulator.sh index cb4d8110df..45a3184da3 100755 --- a/.github/assets/hive/run_simulator.sh +++ b/.github/assets/hive/run_simulator.sh @@ -7,7 +7,7 @@ sim="${1}" limit="${2}" run_hive() { - hive --sim "${sim}" --sim.limit "${limit}" --sim.parallelism 8 --client reth 2>&1 | tee /tmp/log || true + hive --sim "${sim}" --sim.limit "${limit}" --sim.parallelism 16 --client reth 2>&1 | tee /tmp/log || true } check_log() { diff --git a/.github/workflows/bench.yml b/.github/workflows/bench.yml index 0203a4654a..264b1059ab 100644 --- a/.github/workflows/bench.yml +++ b/.github/workflows/bench.yml @@ -11,18 +11,19 @@ env: CARGO_TERM_COLOR: always BASELINE: base SEED: reth + RUSTC_WRAPPER: "sccache" name: bench jobs: codspeed: - runs-on: - group: Reth + runs-on: depot-ubuntu-latest steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@v6 with: submodules: true - uses: rui314/setup-mold@v1 - uses: dtolnay/rust-toolchain@stable + - uses: mozilla-actions/sccache-action@v0.0.9 - uses: Swatinem/rust-cache@v2 with: cache-on-failure: true diff --git a/.github/workflows/book.yml b/.github/workflows/book.yml index c4262cbb3a..c52a5007ad 100644 --- a/.github/workflows/book.yml +++ b/.github/workflows/book.yml @@ -10,13 +10,16 @@ on: types: [opened, reopened, synchronize, closed] merge_group: +env: + RUSTC_WRAPPER: "sccache" + jobs: build: - runs-on: ubuntu-latest + runs-on: depot-ubuntu-latest-8 timeout-minutes: 90 steps: - name: Checkout - uses: actions/checkout@v5 + uses: actions/checkout@v6 - name: Install bun uses: oven-sh/setup-bun@v2 @@ -33,6 +36,8 @@ jobs: - name: Install Rust nightly uses: dtolnay/rust-toolchain@nightly + - uses: mozilla-actions/sccache-action@v0.0.9 + - name: Build docs run: cd docs/vocs && bash scripts/build-cargo-docs.sh diff --git a/.github/workflows/compact.yml b/.github/workflows/compact.yml index 8a18df872d..9293d52b80 100644 --- a/.github/workflows/compact.yml +++ b/.github/workflows/compact.yml @@ -13,12 +13,12 @@ on: env: CARGO_TERM_COLOR: always + RUSTC_WRAPPER: "sccache" name: compact-codec jobs: compact-codec: - runs-on: - group: Reth + runs-on: depot-ubuntu-latest strategy: matrix: bin: @@ -27,11 +27,12 @@ jobs: steps: - uses: rui314/setup-mold@v1 - uses: dtolnay/rust-toolchain@stable + - uses: mozilla-actions/sccache-action@v0.0.9 - uses: Swatinem/rust-cache@v2 with: cache-on-failure: true - name: Checkout base - uses: actions/checkout@v5 + uses: actions/checkout@v6 with: ref: ${{ github.base_ref || 'main' }} # On `main` branch, generates test vectors and serializes them to disk using `Compact`. @@ -39,7 +40,7 @@ jobs: run: | ${{ matrix.bin }} -- test-vectors compact --write - name: Checkout PR - uses: actions/checkout@v5 + uses: actions/checkout@v6 with: clean: false # On incoming merge try to read and decode previously generated vectors with `Compact` diff --git a/.github/workflows/docker-git.yml b/.github/workflows/docker-git.yml index 62830608d6..68bcdad0df 100644 --- a/.github/workflows/docker-git.yml +++ b/.github/workflows/docker-git.yml @@ -33,7 +33,7 @@ jobs: - name: 'Build and push the git-sha-tagged op-reth image' command: 'make IMAGE_NAME=$OP_IMAGE_NAME DOCKER_IMAGE_NAME=$OP_DOCKER_IMAGE_NAME GIT_SHA=$GIT_SHA PROFILE=maxperf op-docker-build-push-git-sha' steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@v6 - uses: rui314/setup-mold@v1 - uses: dtolnay/rust-toolchain@stable - uses: Swatinem/rust-cache@v2 diff --git a/.github/workflows/docker-nightly.yml b/.github/workflows/docker-nightly.yml index 213b231406..3e67ef6877 100644 --- a/.github/workflows/docker-nightly.yml +++ b/.github/workflows/docker-nightly.yml @@ -35,7 +35,7 @@ jobs: - name: 'Build and push the nightly profiling op-reth image' command: 'make IMAGE_NAME=$OP_IMAGE_NAME DOCKER_IMAGE_NAME=$OP_DOCKER_IMAGE_NAME PROFILE=profiling op-docker-build-push-nightly-profiling' steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@v6 - name: Remove bloatware uses: laverdet/remove-bloatware@v1.0.0 with: diff --git a/.github/workflows/docker-tag-latest.yml b/.github/workflows/docker-tag-latest.yml new file mode 100644 index 0000000000..1f76254d49 --- /dev/null +++ b/.github/workflows/docker-tag-latest.yml @@ -0,0 +1,73 @@ +# Tag a specific Docker release version as latest + +name: docker-tag-latest + +on: + workflow_dispatch: + inputs: + version: + description: 'Release version to tag as latest (e.g., v1.8.4)' + required: true + type: string + tag_reth: + description: 'Tag reth image as latest' + required: false + type: boolean + default: true + tag_op_reth: + description: 'Tag op-reth image as latest' + required: false + type: boolean + default: false + +env: + DOCKER_USERNAME: ${{ github.actor }} + +jobs: + tag-reth-latest: + name: Tag reth as latest + runs-on: ubuntu-24.04 + if: ${{ inputs.tag_reth }} + permissions: + packages: write + contents: read + steps: + - name: Log in to Docker + run: | + echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io --username ${DOCKER_USERNAME} --password-stdin + + - name: Pull reth release image + run: | + docker pull ghcr.io/${{ github.repository_owner }}/reth:${{ inputs.version }} + + - name: Tag reth as latest + run: | + docker tag ghcr.io/${{ github.repository_owner }}/reth:${{ inputs.version }} ghcr.io/${{ github.repository_owner }}/reth:latest + + - name: Push reth latest tag + run: | + docker push ghcr.io/${{ github.repository_owner }}/reth:latest + + tag-op-reth-latest: + name: Tag op-reth as latest + runs-on: ubuntu-24.04 + if: ${{ inputs.tag_op_reth }} + permissions: + packages: write + contents: read + steps: + - name: Log in to Docker + run: | + echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io --username ${DOCKER_USERNAME} --password-stdin + + - name: Pull op-reth release image + run: | + docker pull ghcr.io/${{ github.repository_owner }}/op-reth:${{ inputs.version }} + + - name: Tag op-reth as latest + run: | + docker tag ghcr.io/${{ github.repository_owner }}/op-reth:${{ inputs.version }} ghcr.io/${{ github.repository_owner }}/op-reth:latest + + - name: Push op-reth latest tag + run: | + docker push ghcr.io/${{ github.repository_owner }}/op-reth:latest diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 0768ea8e79..e922677225 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -32,7 +32,7 @@ jobs: - name: "Build and push op-reth image" command: "make IMAGE_NAME=$OP_IMAGE_NAME DOCKER_IMAGE_NAME=$OP_DOCKER_IMAGE_NAME PROFILE=maxperf op-docker-build-push" steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@v6 - uses: rui314/setup-mold@v1 - uses: dtolnay/rust-toolchain@stable - uses: Swatinem/rust-cache@v2 @@ -68,7 +68,7 @@ jobs: - name: "Build and push op-reth image" command: "make IMAGE_NAME=$OP_IMAGE_NAME DOCKER_IMAGE_NAME=$OP_DOCKER_IMAGE_NAME PROFILE=maxperf op-docker-build-push-latest" steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@v6 - uses: rui314/setup-mold@v1 - uses: dtolnay/rust-toolchain@stable - uses: Swatinem/rust-cache@v2 diff --git a/.github/workflows/e2e.yml b/.github/workflows/e2e.yml index 16c9fb2f61..f31fefed35 100644 --- a/.github/workflows/e2e.yml +++ b/.github/workflows/e2e.yml @@ -11,6 +11,7 @@ on: env: CARGO_TERM_COLOR: always SEED: rustethereumethereumrust + RUSTC_WRAPPER: "sccache" concurrency: group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} @@ -19,14 +20,14 @@ concurrency: jobs: test: name: e2e-testsuite - runs-on: - group: Reth + runs-on: depot-ubuntu-latest-4 env: RUST_BACKTRACE: 1 timeout-minutes: 90 steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@v6 - uses: dtolnay/rust-toolchain@stable + - uses: mozilla-actions/sccache-action@v0.0.9 - uses: taiki-e/install-action@nextest - uses: Swatinem/rust-cache@v2 with: @@ -43,4 +44,3 @@ jobs: --exclude 'op-reth' \ --exclude 'reth' \ -E 'binary(e2e_testsuite)' - diff --git a/.github/workflows/grafana.yml b/.github/workflows/grafana.yml new file mode 100644 index 0000000000..f34b342401 --- /dev/null +++ b/.github/workflows/grafana.yml @@ -0,0 +1,21 @@ +name: grafana + +on: + pull_request: + merge_group: + push: + branches: [main] + +jobs: + check-dashboard: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v6 + - name: Check for ${DS_PROMETHEUS} in overview.json + run: | + if grep -Fn '${DS_PROMETHEUS}' etc/grafana/dashboards/overview.json; then + echo "Error: overview.json contains '\${DS_PROMETHEUS}' placeholder" + echo "Please replace it with '\${datasource}'" + exit 1 + fi + echo "✓ overview.json does not contain '\${DS_PROMETHEUS}' placeholder" diff --git a/.github/workflows/hive.yml b/.github/workflows/hive.yml index ae14797758..c9f0f267da 100644 --- a/.github/workflows/hive.yml +++ b/.github/workflows/hive.yml @@ -24,12 +24,11 @@ jobs: prepare-hive: if: github.repository == 'paradigmxyz/reth' timeout-minutes: 45 - runs-on: - group: Reth + runs-on: depot-ubuntu-latest-16 steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@v6 - name: Checkout hive tests - uses: actions/checkout@v5 + uses: actions/checkout@v6 with: repository: ethereum/hive path: hivetests @@ -45,7 +44,7 @@ jobs: - name: Restore hive assets cache id: cache-hive - uses: actions/cache@v4 + uses: actions/cache@v5 with: path: ./hive_assets key: hive-assets-${{ steps.hive-commit.outputs.hash }}-${{ hashFiles('.github/assets/hive/build_simulators.sh') }} @@ -68,7 +67,7 @@ jobs: chmod +x hive - name: Upload hive assets - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@v6 with: name: hive_assets path: ./hive_assets @@ -179,23 +178,22 @@ jobs: - prepare-reth - prepare-hive name: run ${{ matrix.scenario.sim }}${{ matrix.scenario.limit && format(' - {0}', matrix.scenario.limit) }} - runs-on: - group: Reth + runs-on: depot-ubuntu-latest-16 permissions: issues: write steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@v6 with: fetch-depth: 0 - name: Download hive assets - uses: actions/download-artifact@v5 + uses: actions/download-artifact@v7 with: name: hive_assets path: /tmp - name: Download reth image - uses: actions/download-artifact@v5 + uses: actions/download-artifact@v7 with: name: artifacts path: /tmp @@ -209,7 +207,7 @@ jobs: chmod +x /usr/local/bin/hive - name: Checkout hive tests - uses: actions/checkout@v5 + uses: actions/checkout@v6 with: repository: ethereum/hive ref: master @@ -247,8 +245,7 @@ jobs: notify-on-error: needs: test if: failure() - runs-on: - group: Reth + runs-on: ubuntu-latest steps: - name: Slack Webhook Action uses: rtCamp/action-slack-notify@v2 diff --git a/.github/workflows/integration.yml b/.github/workflows/integration.yml index 90e3287917..46f5670c72 100644 --- a/.github/workflows/integration.yml +++ b/.github/workflows/integration.yml @@ -14,6 +14,7 @@ on: env: CARGO_TERM_COLOR: always SEED: rustethereumethereumrust + RUSTC_WRAPPER: "sccache" concurrency: group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} @@ -23,8 +24,7 @@ jobs: test: name: test / ${{ matrix.network }} if: github.event_name != 'schedule' - runs-on: - group: Reth + runs-on: depot-ubuntu-latest-4 env: RUST_BACKTRACE: 1 strategy: @@ -32,12 +32,13 @@ jobs: network: ["ethereum", "optimism"] timeout-minutes: 60 steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@v6 - uses: rui314/setup-mold@v1 - uses: dtolnay/rust-toolchain@stable - name: Install Geth run: .github/assets/install_geth.sh - uses: taiki-e/install-action@nextest + - uses: mozilla-actions/sccache-action@v0.0.9 - uses: Swatinem/rust-cache@v2 with: cache-on-failure: true @@ -71,12 +72,13 @@ jobs: if: github.event_name == 'schedule' runs-on: ubuntu-latest steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@v6 - uses: rui314/setup-mold@v1 - uses: dtolnay/rust-toolchain@stable - uses: taiki-e/install-action@nextest + - uses: mozilla-actions/sccache-action@v0.0.9 - uses: Swatinem/rust-cache@v2 with: cache-on-failure: true - name: run era1 files integration tests - run: cargo nextest run --package reth-era --test it -- --ignored + run: cargo nextest run --release --package reth-era --test it -- --ignored diff --git a/.github/workflows/kurtosis-op.yml b/.github/workflows/kurtosis-op.yml index 0e08d1641d..a4ad10758c 100644 --- a/.github/workflows/kurtosis-op.yml +++ b/.github/workflows/kurtosis-op.yml @@ -9,7 +9,7 @@ on: push: tags: - - '*' + - "*" env: CARGO_TERM_COLOR: always @@ -32,17 +32,16 @@ jobs: strategy: fail-fast: false name: run kurtosis - runs-on: - group: Reth + runs-on: depot-ubuntu-latest needs: - prepare-reth steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@v6 with: fetch-depth: 0 - name: Download reth image - uses: actions/download-artifact@v5 + uses: actions/download-artifact@v7 with: name: artifacts path: /tmp @@ -83,12 +82,10 @@ jobs: kurtosis service logs -a op-devnet op-cl-2151908-2-op-node-op-reth-op-kurtosis exit 1 - notify-on-error: needs: test if: failure() - runs-on: - group: Reth + runs-on: ubuntu-latest steps: - name: Slack Webhook Action uses: rtCamp/action-slack-notify@v2 diff --git a/.github/workflows/kurtosis.yml b/.github/workflows/kurtosis.yml index f78fc81235..c846e1b585 100644 --- a/.github/workflows/kurtosis.yml +++ b/.github/workflows/kurtosis.yml @@ -9,7 +9,7 @@ on: push: tags: - - '*' + - "*" env: CARGO_TERM_COLOR: always @@ -30,17 +30,16 @@ jobs: strategy: fail-fast: false name: run kurtosis - runs-on: - group: Reth + runs-on: depot-ubuntu-latest needs: - prepare-reth steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@v6 with: fetch-depth: 0 - name: Download reth image - uses: actions/download-artifact@v5 + uses: actions/download-artifact@v7 with: name: artifacts path: /tmp @@ -54,13 +53,12 @@ jobs: - name: Run kurtosis uses: ethpandaops/kurtosis-assertoor-github-action@v1 with: - ethereum_package_args: '.github/assets/kurtosis_network_params.yaml' + ethereum_package_args: ".github/assets/kurtosis_network_params.yaml" notify-on-error: needs: test if: failure() - runs-on: - group: Reth + runs-on: ubuntu-latest steps: - name: Slack Webhook Action uses: rtCamp/action-slack-notify@v2 diff --git a/.github/workflows/label-pr.yml b/.github/workflows/label-pr.yml index d4b4bf07cc..7211f383a8 100644 --- a/.github/workflows/label-pr.yml +++ b/.github/workflows/label-pr.yml @@ -11,7 +11,7 @@ jobs: issues: write pull-requests: write steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@v6 with: fetch-depth: 0 diff --git a/.github/workflows/lint-actions.yml b/.github/workflows/lint-actions.yml index f408c4f50a..6543f8506b 100644 --- a/.github/workflows/lint-actions.yml +++ b/.github/workflows/lint-actions.yml @@ -12,7 +12,7 @@ jobs: actionlint: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@v6 - name: Download actionlint id: get_actionlint run: bash <(curl https://raw.githubusercontent.com/rhysd/actionlint/main/scripts/download-actionlint.bash) diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 309a25218b..cb3f73ea0d 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -8,11 +8,12 @@ on: env: CARGO_TERM_COLOR: always + RUSTC_WRAPPER: "sccache" jobs: clippy-binaries: name: clippy binaries / ${{ matrix.type }} - runs-on: ubuntu-latest + runs-on: depot-ubuntu-latest timeout-minutes: 30 strategy: matrix: @@ -21,11 +22,12 @@ jobs: args: --workspace --lib --examples --tests --benches --locked features: "ethereum asm-keccak jemalloc jemalloc-prof min-error-logs min-warn-logs min-info-logs min-debug-logs min-trace-logs" steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@v6 - uses: rui314/setup-mold@v1 - uses: dtolnay/rust-toolchain@clippy with: components: clippy + - uses: mozilla-actions/sccache-action@v0.0.9 - uses: Swatinem/rust-cache@v2 with: cache-on-failure: true @@ -40,14 +42,15 @@ jobs: clippy: name: clippy - runs-on: ubuntu-latest + runs-on: depot-ubuntu-latest timeout-minutes: 30 steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@v6 - uses: rui314/setup-mold@v1 - uses: dtolnay/rust-toolchain@nightly with: components: clippy + - uses: mozilla-actions/sccache-action@v0.0.9 - uses: Swatinem/rust-cache@v2 with: cache-on-failure: true @@ -56,15 +59,16 @@ jobs: RUSTFLAGS: -D warnings wasm: - runs-on: ubuntu-latest + runs-on: depot-ubuntu-latest timeout-minutes: 30 steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@v6 - uses: rui314/setup-mold@v1 - uses: dtolnay/rust-toolchain@stable with: target: wasm32-wasip1 - uses: taiki-e/install-action@cargo-hack + - uses: mozilla-actions/sccache-action@v0.0.9 - uses: Swatinem/rust-cache@v2 with: cache-on-failure: true @@ -75,15 +79,16 @@ jobs: .github/assets/check_wasm.sh riscv: - runs-on: ubuntu-latest + runs-on: depot-ubuntu-latest timeout-minutes: 60 steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@v6 - uses: rui314/setup-mold@v1 - uses: dtolnay/rust-toolchain@stable with: target: riscv32imac-unknown-none-elf - uses: taiki-e/install-action@cargo-hack + - uses: mozilla-actions/sccache-action@v0.0.9 - uses: Swatinem/rust-cache@v2 with: cache-on-failure: true @@ -92,21 +97,27 @@ jobs: run: .github/assets/check_rv32imac.sh crate-checks: - runs-on: ubuntu-latest - timeout-minutes: 30 + name: crate-checks (${{ matrix.partition }}/${{ matrix.total_partitions }}) + runs-on: depot-ubuntu-latest-4 + strategy: + matrix: + partition: [1, 2, 3] + total_partitions: [3] + timeout-minutes: 60 steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@v6 - uses: rui314/setup-mold@v1 - uses: dtolnay/rust-toolchain@stable - uses: taiki-e/install-action@cargo-hack + - uses: mozilla-actions/sccache-action@v0.0.9 - uses: Swatinem/rust-cache@v2 with: cache-on-failure: true - - run: cargo hack check --workspace + - run: cargo hack check --workspace --partition ${{ matrix.partition }}/${{ matrix.total_partitions }} msrv: name: MSRV - runs-on: ubuntu-latest + runs-on: depot-ubuntu-latest timeout-minutes: 30 strategy: matrix: @@ -114,11 +125,12 @@ jobs: - binary: reth - binary: op-reth steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@v6 - uses: rui314/setup-mold@v1 - uses: dtolnay/rust-toolchain@master with: toolchain: "1.88" # MSRV + - uses: mozilla-actions/sccache-action@v0.0.9 - uses: Swatinem/rust-cache@v2 with: cache-on-failure: true @@ -128,12 +140,13 @@ jobs: docs: name: docs - runs-on: ubuntu-latest + runs-on: depot-ubuntu-latest-4 timeout-minutes: 30 steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@v6 - uses: rui314/setup-mold@v1 - uses: dtolnay/rust-toolchain@nightly + - uses: mozilla-actions/sccache-action@v0.0.9 - uses: Swatinem/rust-cache@v2 with: cache-on-failure: true @@ -145,25 +158,27 @@ jobs: fmt: name: fmt - runs-on: ubuntu-latest + runs-on: depot-ubuntu-latest timeout-minutes: 30 steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@v6 - uses: rui314/setup-mold@v1 - uses: dtolnay/rust-toolchain@nightly with: components: rustfmt + - uses: mozilla-actions/sccache-action@v0.0.9 - name: Run fmt run: cargo fmt --all --check udeps: name: udeps - runs-on: ubuntu-latest + runs-on: depot-ubuntu-latest timeout-minutes: 30 steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@v6 - uses: rui314/setup-mold@v1 - uses: dtolnay/rust-toolchain@nightly + - uses: mozilla-actions/sccache-action@v0.0.9 - uses: Swatinem/rust-cache@v2 with: cache-on-failure: true @@ -172,19 +187,21 @@ jobs: book: name: book - runs-on: ubuntu-latest + runs-on: depot-ubuntu-latest timeout-minutes: 30 steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@v6 - uses: rui314/setup-mold@v1 - uses: dtolnay/rust-toolchain@nightly + - uses: mozilla-actions/sccache-action@v0.0.9 - uses: Swatinem/rust-cache@v2 with: cache-on-failure: true - - run: cargo build --bin reth --workspace --features ethereum + - run: cargo build --bin reth --workspace + - run: cargo build --bin op-reth --workspace env: RUSTFLAGS: -D warnings - - run: ./docs/cli/update.sh target/debug/reth + - run: ./docs/cli/update.sh target/debug/reth target/debug/op-reth - name: Check docs changes run: git diff --exit-code @@ -192,7 +209,7 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 30 steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@v6 - uses: crate-ci/typos@v1 check-toml: @@ -200,7 +217,7 @@ jobs: timeout-minutes: 30 steps: - name: Checkout repository - uses: actions/checkout@v5 + uses: actions/checkout@v6 - name: Run dprint uses: dprint/check@v2.3 with: @@ -210,7 +227,7 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 30 steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@v6 - name: Check dashboard JSON with jq uses: sergeysova/jq-action@v2 with: @@ -220,37 +237,45 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 30 steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@v6 - uses: rui314/setup-mold@v1 - uses: dtolnay/rust-toolchain@stable - name: Ensure no arbitrary or proptest dependency on default build run: cargo tree --package reth -e=features,no-dev | grep -Eq "arbitrary|proptest" && exit 1 || exit 0 - # Checks that selected rates can compile with power set of features + # Checks that selected crates can compile with power set of features features: name: features - runs-on: ubuntu-latest + runs-on: depot-ubuntu-latest timeout-minutes: 30 steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@v6 - uses: rui314/setup-mold@v1 - uses: dtolnay/rust-toolchain@clippy + - uses: mozilla-actions/sccache-action@v0.0.9 - uses: Swatinem/rust-cache@v2 with: cache-on-failure: true - name: cargo install cargo-hack uses: taiki-e/install-action@cargo-hack - - run: make check-features + - run: | + cargo hack check \ + --package reth-codecs \ + --package reth-primitives-traits \ + --package reth-primitives \ + --feature-powerset \ + --depth 2 env: RUSTFLAGS: -D warnings # Check crates correctly propagate features feature-propagation: - runs-on: ubuntu-latest + runs-on: depot-ubuntu-latest timeout-minutes: 20 steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@v6 - uses: dtolnay/rust-toolchain@stable + - uses: mozilla-actions/sccache-action@v0.0.9 - uses: rui314/setup-mold@v1 - uses: taiki-e/cache-cargo-install-action@v2 with: diff --git a/.github/workflows/prepare-reth.yml b/.github/workflows/prepare-reth.yml index 37a9445af7..c47621b7a1 100644 --- a/.github/workflows/prepare-reth.yml +++ b/.github/workflows/prepare-reth.yml @@ -26,10 +26,9 @@ jobs: prepare-reth: if: github.repository == 'paradigmxyz/reth' timeout-minutes: 45 - runs-on: - group: Reth + runs-on: depot-ubuntu-latest steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@v6 - run: mkdir artifacts - name: Set up Docker Buildx @@ -51,7 +50,7 @@ jobs: - name: Upload reth image id: upload - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@v6 with: name: artifacts path: ./artifacts diff --git a/.github/workflows/release-dist.yml b/.github/workflows/release-dist.yml index 57a6f311d0..649a968ecf 100644 --- a/.github/workflows/release-dist.yml +++ b/.github/workflows/release-dist.yml @@ -12,7 +12,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Update Homebrew formula - uses: dawidd6/action-homebrew-bump-formula@v5 + uses: dawidd6/action-homebrew-bump-formula@v7 with: token: ${{ secrets.HOMEBREW }} no_fork: true diff --git a/.github/workflows/release-reproducible.yml b/.github/workflows/release-reproducible.yml index 9726cb77b8..9c80c1e936 100644 --- a/.github/workflows/release-reproducible.yml +++ b/.github/workflows/release-reproducible.yml @@ -1,11 +1,11 @@ -# This workflow is for building and pushing reproducible Docker images for releases. +# This workflow is for building and pushing reproducible artifacts for releases name: release-reproducible on: - push: - tags: - - v* + workflow_run: + workflows: [release] + types: [completed] env: DOCKER_REPRODUCIBLE_IMAGE_NAME: ghcr.io/${{ github.repository_owner }}/reth-reproducible @@ -13,23 +13,41 @@ env: jobs: extract-version: name: extract version + if: ${{ github.event.workflow_run.conclusion == 'success' }} runs-on: ubuntu-latest steps: - - name: Extract version - run: echo "VERSION=$(echo ${GITHUB_REF#refs/tags/})" >> $GITHUB_OUTPUT + - name: Extract version from triggering tag id: extract_version + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + # Get the tag that points to the head SHA of the triggering workflow + TAG=$(gh api /repos/${{ github.repository }}/git/refs/tags \ + --jq '.[] | select(.object.sha == "${{ github.event.workflow_run.head_sha }}") | .ref' \ + | head -1 \ + | sed 's|refs/tags/||') + + if [ -z "$TAG" ]; then + echo "No tag found for SHA ${{ github.event.workflow_run.head_sha }}" + exit 1 + fi + + echo "VERSION=$TAG" >> $GITHUB_OUTPUT outputs: VERSION: ${{ steps.extract_version.outputs.VERSION }} build-reproducible: - name: build and push reproducible image + name: build and push reproducible image and binaries runs-on: ubuntu-latest - needs: extract-version + needs: [extract-version] permissions: packages: write - contents: read + contents: write steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@v6 + with: + ref: ${{ needs.extract-version.outputs.VERSION }} + - name: Set up Docker Buildx uses: docker/setup-buildx-action@v3 @@ -40,20 +58,37 @@ jobs: username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - - name: Extract Rust version from Cargo.toml + - name: Extract Rust version id: rust_version run: | - RUST_VERSION=$(cargo metadata --format-version 1 | jq -r '.packages[] | select(.name == "reth") | .rust_version' || echo "1") - echo "RUST_VERSION=$RUST_VERSION" >> $GITHUB_OUTPUT + RUST_TOOLCHAIN=$(rustc --version | cut -d' ' -f2) + echo "RUST_TOOLCHAIN=$RUST_TOOLCHAIN" >> $GITHUB_OUTPUT - - name: Build and push reproducible image + - name: Build reproducible artifacts + uses: docker/build-push-action@v6 + id: docker_build + with: + context: . + file: ./Dockerfile.reproducible + build-args: | + RUST_TOOLCHAIN=${{ steps.rust_version.outputs.RUST_TOOLCHAIN }} + VERSION=${{ needs.extract-version.outputs.VERSION }} + target: artifacts + outputs: type=local,dest=./docker-artifacts + cache-from: type=gha + cache-to: type=gha,mode=max + env: + DOCKER_BUILD_RECORD_UPLOAD: false + + - name: Build and push final image uses: docker/build-push-action@v6 with: context: . file: ./Dockerfile.reproducible push: true build-args: | - RUST_VERSION=${{ steps.rust_version.outputs.RUST_VERSION }} + RUST_TOOLCHAIN=${{ steps.rust_version.outputs.RUST_TOOLCHAIN }} + VERSION=${{ needs.extract-version.outputs.VERSION }} tags: | ${{ env.DOCKER_REPRODUCIBLE_IMAGE_NAME }}:${{ needs.extract-version.outputs.VERSION }} ${{ env.DOCKER_REPRODUCIBLE_IMAGE_NAME }}:latest @@ -62,3 +97,30 @@ jobs: provenance: false env: DOCKER_BUILD_RECORD_UPLOAD: false + + - name: Prepare artifacts from Docker build + run: | + mkdir reproducible-artifacts + cp docker-artifacts/reth reproducible-artifacts/reth-reproducible-${{ needs.extract-version.outputs.VERSION }}-x86_64-unknown-linux-gnu + cp docker-artifacts/*.deb reproducible-artifacts/reth-${{ needs.extract-version.outputs.VERSION }}-x86_64-unknown-linux-gnu-reproducible.deb + + - name: Configure GPG and create artifacts + env: + GPG_SIGNING_KEY: ${{ secrets.GPG_SIGNING_KEY }} + GPG_PASSPHRASE: ${{ secrets.GPG_PASSPHRASE }} + run: | + export GPG_TTY=$(tty) + echo -n "$GPG_SIGNING_KEY" | base64 --decode | gpg --batch --import + + cd reproducible-artifacts + tar -czf reth-reproducible-${{ needs.extract-version.outputs.VERSION }}-x86_64-unknown-linux-gnu.tar.gz reth-reproducible-${{ needs.extract-version.outputs.VERSION }}-x86_64-unknown-linux-gnu --remove-files + echo "$GPG_PASSPHRASE" | gpg --passphrase-fd 0 --pinentry-mode loopback --batch -ab reth-reproducible-${{ needs.extract-version.outputs.VERSION }}-x86_64-unknown-linux-gnu.tar.gz + echo "$GPG_PASSPHRASE" | gpg --passphrase-fd 0 --pinentry-mode loopback --batch -ab reth-${{ needs.extract-version.outputs.VERSION }}-x86_64-unknown-linux-gnu-reproducible.deb + + - name: Upload reproducible artifacts to release + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + gh release upload ${{ needs.extract-version.outputs.VERSION }} \ + reproducible-artifacts/* + diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index f871b163a2..84abb32699 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -18,10 +18,11 @@ env: REPO_NAME: ${{ github.repository_owner }}/reth IMAGE_NAME: ${{ github.repository_owner }}/reth OP_IMAGE_NAME: ${{ github.repository_owner }}/op-reth + REPRODUCIBLE_IMAGE_NAME: ${{ github.repository_owner }}/reth-reproducible CARGO_TERM_COLOR: always DOCKER_IMAGE_NAME_URL: https://ghcr.io/${{ github.repository_owner }}/reth DOCKER_OP_IMAGE_NAME_URL: https://ghcr.io/${{ github.repository_owner }}/op-reth - DEB_SUPPORTED_TARGETS: x86_64-unknown-linux-gnu aarch64-unknown-linux-gnu riscv64gc-unknown-linux-gnu + RUSTC_WRAPPER: "sccache" jobs: dry-run: @@ -49,8 +50,9 @@ jobs: needs: extract-version if: ${{ github.event.inputs.dry_run != 'true' }} steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@v6 - uses: dtolnay/rust-toolchain@stable + - uses: mozilla-actions/sccache-action@v0.0.9 - name: Verify crate version matches tag # Check that the Cargo version starts with the tag, # so that Cargo version 1.4.8 can be matched against both v1.4.8 and v1.4.8-rc.1 @@ -78,7 +80,7 @@ jobs: profile: maxperf allow_fail: false - target: x86_64-apple-darwin - os: macos-13 + os: macos-14 profile: maxperf allow_fail: false - target: aarch64-apple-darwin @@ -99,11 +101,12 @@ jobs: - command: op-build binary: op-reth steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@v6 - uses: rui314/setup-mold@v1 - uses: dtolnay/rust-toolchain@stable with: target: ${{ matrix.configs.target }} + - uses: mozilla-actions/sccache-action@v0.0.9 - name: Install cross main id: cross_main run: | @@ -120,20 +123,11 @@ jobs: - name: Build Reth run: make PROFILE=${{ matrix.configs.profile }} ${{ matrix.build.command }}-${{ matrix.configs.target }} - - - name: Build Reth deb package - if: ${{ matrix.build.binary == 'reth' && contains(env.DEB_SUPPORTED_TARGETS, matrix.configs.target) }} - run: make build-deb-${{ matrix.configs.target }} PROFILE=${{ matrix.configs.profile }} VERSION=${{ needs.extract-version.outputs.VERSION }} - - name: Move binary run: | mkdir artifacts [[ "${{ matrix.configs.target }}" == *windows* ]] && ext=".exe" - - # Move deb packages if they exist - if [[ "${{ matrix.build.binary }}" == "reth" && "${{ env.DEB_SUPPORTED_TARGETS }}" == *"${{ matrix.configs.target }}"* ]]; then - mv "target/${{ matrix.configs.target }}/${{ matrix.configs.profile }}/${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.configs.target }}-${{ matrix.configs.profile }}.deb" ./artifacts - fi + mv "target/${{ matrix.configs.target }}/${{ matrix.configs.profile }}/${{ matrix.build.binary }}${ext}" ./artifacts - name: Configure GPG and create artifacts env: @@ -143,42 +137,25 @@ jobs: export GPG_TTY=$(tty) echo -n "$GPG_SIGNING_KEY" | base64 --decode | gpg --batch --import cd artifacts - tar -czf ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.configs.target }}.tar.gz ${{ matrix.build.binary }}*[!.deb] + tar -czf ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.configs.target }}.tar.gz ${{ matrix.build.binary }}* echo "$GPG_PASSPHRASE" | gpg --passphrase-fd 0 --pinentry-mode loopback --batch -ab ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.configs.target }}.tar.gz - if [[ -f "${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.configs.target }}-${{ matrix.configs.profile }}.deb" ]]; then - echo "$GPG_PASSPHRASE" | gpg --passphrase-fd 0 --pinentry-mode loopback --batch -ab ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.configs.target }}-${{ matrix.configs.profile }}.deb - fi - mv *tar.gz* *.deb* .. + mv *tar.gz* .. shell: bash - name: Upload artifact if: ${{ github.event.inputs.dry_run != 'true' }} - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@v6 with: name: ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.configs.target }}.tar.gz path: ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.configs.target }}.tar.gz - name: Upload signature if: ${{ github.event.inputs.dry_run != 'true' }} - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@v6 with: name: ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.configs.target }}.tar.gz.asc path: ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.configs.target }}.tar.gz.asc - - name: Upload deb package - if: ${{ github.event.inputs.dry_run != 'true' && matrix.build.binary == 'reth' && contains(env.DEB_SUPPORTED_TARGETS, matrix.configs.target) }} - uses: actions/upload-artifact@v4 - with: - name: ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.configs.target }}-${{ matrix.configs.profile }}.deb - path: ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.configs.target }}-${{ matrix.configs.profile }}.deb - - - name: Upload deb package signature - if: ${{ github.event.inputs.dry_run != 'true' && matrix.build.binary == 'reth' && contains(env.DEB_SUPPORTED_TARGETS, matrix.configs.target) }} - uses: actions/upload-artifact@v4 - with: - name: ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.configs.target }}-${{ matrix.configs.profile }}.deb.asc - path: ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.configs.target }}-${{ matrix.configs.profile }}.deb.asc - draft-release: name: draft release runs-on: ubuntu-latest @@ -192,11 +169,11 @@ jobs: steps: # This is necessary for generating the changelog. # It has to come before "Download Artifacts" or else it deletes the artifacts. - - uses: actions/checkout@v5 + - uses: actions/checkout@v6 with: fetch-depth: 0 - name: Download artifacts - uses: actions/download-artifact@v5 + uses: actions/download-artifact@v7 - name: Generate full changelog id: changelog run: | diff --git a/.github/workflows/reproducible-build.yml b/.github/workflows/reproducible-build.yml index 0f5dd2e72d..5822697846 100644 --- a/.github/workflows/reproducible-build.yml +++ b/.github/workflows/reproducible-build.yml @@ -8,25 +8,73 @@ on: jobs: build: name: build reproducible binaries - runs-on: ubuntu-latest + runs-on: ${{ matrix.runner }} + strategy: + matrix: + include: + - runner: ubuntu-latest + machine: machine-1 + - runner: ubuntu-22.04 + machine: machine-2 steps: - - uses: actions/checkout@v5 - - uses: rui314/setup-mold@v1 + - uses: actions/checkout@v6 + - uses: dtolnay/rust-toolchain@stable with: target: x86_64-unknown-linux-gnu - - name: Install cargo-cache + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Build reproducible binary with Docker run: | - cargo install cargo-cache - - name: Build Reth + RUST_TOOLCHAIN=$(rustc --version | cut -d' ' -f2) + docker build \ + --build-arg "RUST_TOOLCHAIN=${RUST_TOOLCHAIN}" \ + -f Dockerfile.reproducible -t reth:release \ + --target artifacts \ + --output type=local,dest=./target . + + - name: Calculate SHA256 + id: sha256 run: | - make build-reth-reproducible - mv target/x86_64-unknown-linux-gnu/reproducible/reth reth-build-1 - - name: Clean cache - run: make clean && cargo cache -a - - name: Build Reth again + sha256sum target/reth > checksum.sha256 + echo "Binaries SHA256 on ${{ matrix.machine }}: $(cat checksum.sha256)" + + - name: Upload the hash + uses: actions/upload-artifact@v6 + with: + name: checksum-${{ matrix.machine }} + path: | + checksum.sha256 + retention-days: 1 + + compare: + name: compare reproducible binaries + needs: build + runs-on: ubuntu-latest + steps: + - name: Download artifacts from machine-1 + uses: actions/download-artifact@v7 + with: + name: checksum-machine-1 + path: machine-1/ + - name: Download artifacts from machine-2 + uses: actions/download-artifact@v7 + with: + name: checksum-machine-2 + path: machine-2/ + - name: Compare SHA256 hashes run: | - make build-reth-reproducible - mv target/x86_64-unknown-linux-gnu/reproducible/reth reth-build-2 - - name: Compare binaries - run: cmp reth-build-1 reth-build-2 + echo "=== SHA256 Comparison ===" + echo "Machine 1 hash:" + cat machine-1/checksum.sha256 + echo "Machine 2 hash:" + cat machine-2/checksum.sha256 + + if cmp -s machine-1/checksum.sha256 machine-2/checksum.sha256; then + echo "✅ SUCCESS: Binaries are identical (reproducible build verified)" + else + echo "❌ FAILURE: Binaries differ (reproducible build failed)" + exit 1 + fi diff --git a/.github/workflows/stage.yml b/.github/workflows/stage.yml index 7225d84cff..342c36a5c5 100644 --- a/.github/workflows/stage.yml +++ b/.github/workflows/stage.yml @@ -12,6 +12,7 @@ env: CARGO_TERM_COLOR: always FROM_BLOCK: 0 TO_BLOCK: 50000 + RUSTC_WRAPPER: "sccache" concurrency: group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} @@ -22,16 +23,16 @@ jobs: name: stage-run-test # Only run stage commands test in merge groups if: github.event_name == 'merge_group' - runs-on: - group: Reth + runs-on: depot-ubuntu-latest env: RUST_LOG: info,sync=error RUST_BACKTRACE: 1 timeout-minutes: 60 steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@v6 - uses: rui314/setup-mold@v1 - uses: dtolnay/rust-toolchain@stable + - uses: mozilla-actions/sccache-action@v0.0.9 - uses: Swatinem/rust-cache@v2 with: cache-on-failure: true diff --git a/.github/workflows/sync-era.yml b/.github/workflows/sync-era.yml index f2539b2fdc..e6ce12f962 100644 --- a/.github/workflows/sync-era.yml +++ b/.github/workflows/sync-era.yml @@ -9,6 +9,7 @@ on: env: CARGO_TERM_COLOR: always + RUSTC_WRAPPER: "sccache" concurrency: group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} @@ -17,8 +18,7 @@ concurrency: jobs: sync: name: sync (${{ matrix.chain.bin }}) - runs-on: - group: Reth + runs-on: depot-ubuntu-latest env: RUST_LOG: info,sync=error RUST_BACKTRACE: 1 @@ -39,9 +39,10 @@ jobs: block: 10000 unwind-target: "0x118a6e922a8c6cab221fc5adfe5056d2b72d58c6580e9c5629de55299e2cf8de" steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@v6 - uses: rui314/setup-mold@v1 - uses: dtolnay/rust-toolchain@stable + - uses: mozilla-actions/sccache-action@v0.0.9 - uses: Swatinem/rust-cache@v2 with: cache-on-failure: true @@ -64,4 +65,4 @@ jobs: ${{ matrix.chain.bin }} stage unwind num-blocks 100 --chain ${{ matrix.chain.chain }} - name: Run stage unwind to block hash run: | - ${{ matrix.chain.bin }} stage unwind to-block ${{ matrix.chain.unwind-target }} --chain ${{ matrix.chain.chain }} + ${{ matrix.chain.bin }} stage unwind to-block ${{ matrix.chain.unwind-target }} --chain ${{ matrix.chain.chain }} diff --git a/.github/workflows/sync.yml b/.github/workflows/sync.yml index e57082b83e..c1c5794fd0 100644 --- a/.github/workflows/sync.yml +++ b/.github/workflows/sync.yml @@ -9,6 +9,7 @@ on: env: CARGO_TERM_COLOR: always + RUSTC_WRAPPER: "sccache" concurrency: group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} @@ -17,8 +18,7 @@ concurrency: jobs: sync: name: sync (${{ matrix.chain.bin }}) - runs-on: - group: Reth + runs-on: depot-ubuntu-latest env: RUST_LOG: info,sync=error RUST_BACKTRACE: 1 @@ -39,9 +39,10 @@ jobs: block: 10000 unwind-target: "0x118a6e922a8c6cab221fc5adfe5056d2b72d58c6580e9c5629de55299e2cf8de" steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@v6 - uses: rui314/setup-mold@v1 - uses: dtolnay/rust-toolchain@stable + - uses: mozilla-actions/sccache-action@v0.0.9 - uses: Swatinem/rust-cache@v2 with: cache-on-failure: true @@ -63,4 +64,4 @@ jobs: ${{ matrix.chain.bin }} stage unwind num-blocks 100 --chain ${{ matrix.chain.chain }} - name: Run stage unwind to block hash run: | - ${{ matrix.chain.bin }} stage unwind to-block ${{ matrix.chain.unwind-target }} --chain ${{ matrix.chain.chain }} + ${{ matrix.chain.bin }} stage unwind to-block ${{ matrix.chain.unwind-target }} --chain ${{ matrix.chain.chain }} diff --git a/.github/workflows/unit.yml b/.github/workflows/unit.yml index d9aca93f21..d85429d24d 100644 --- a/.github/workflows/unit.yml +++ b/.github/workflows/unit.yml @@ -11,6 +11,7 @@ on: env: CARGO_TERM_COLOR: always SEED: rustethereumethereumrust + RUSTC_WRAPPER: "sccache" concurrency: group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} @@ -19,8 +20,7 @@ concurrency: jobs: test: name: test / ${{ matrix.type }} (${{ matrix.partition }}/${{ matrix.total_partitions }}) - runs-on: - group: Reth + runs-on: depot-ubuntu-latest-4 env: RUST_BACKTRACE: 1 strategy: @@ -44,9 +44,10 @@ jobs: total_partitions: 2 timeout-minutes: 30 steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@v6 - uses: rui314/setup-mold@v1 - uses: dtolnay/rust-toolchain@stable + - uses: mozilla-actions/sccache-action@v0.0.9 - uses: Swatinem/rust-cache@v2 with: cache-on-failure: true @@ -65,16 +66,15 @@ jobs: state: name: Ethereum state tests - runs-on: - group: Reth + runs-on: depot-ubuntu-latest-4 env: RUST_LOG: info,sync=error RUST_BACKTRACE: 1 timeout-minutes: 30 steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@v6 - name: Checkout ethereum/tests - uses: actions/checkout@v5 + uses: actions/checkout@v6 with: repository: ethereum/tests ref: 81862e4848585a438d64f911a19b3825f0f4cd95 @@ -93,6 +93,7 @@ jobs: - uses: rui314/setup-mold@v1 - uses: dtolnay/rust-toolchain@stable - uses: taiki-e/install-action@nextest + - uses: mozilla-actions/sccache-action@v0.0.9 - uses: Swatinem/rust-cache@v2 with: cache-on-failure: true @@ -100,15 +101,15 @@ jobs: doc: name: doc tests - runs-on: - group: Reth + runs-on: depot-ubuntu-latest env: RUST_BACKTRACE: 1 timeout-minutes: 30 steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@v6 - uses: rui314/setup-mold@v1 - uses: dtolnay/rust-toolchain@stable + - uses: mozilla-actions/sccache-action@v0.0.9 - uses: Swatinem/rust-cache@v2 with: cache-on-failure: true diff --git a/.github/workflows/update-superchain.yml b/.github/workflows/update-superchain.yml index f682f35a17..cb177953cc 100644 --- a/.github/workflows/update-superchain.yml +++ b/.github/workflows/update-superchain.yml @@ -13,7 +13,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout repository - uses: actions/checkout@v5 + uses: actions/checkout@v6 - name: Install required tools run: | @@ -27,7 +27,7 @@ jobs: ./fetch_superchain_config.sh - name: Create Pull Request - uses: peter-evans/create-pull-request@v7 + uses: peter-evans/create-pull-request@v8 with: commit-message: "chore: update superchain config" title: "chore: update superchain config" diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index 81181c2cb1..9bcadad6b8 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -9,18 +9,22 @@ on: branches: [main] merge_group: +env: + RUSTC_WRAPPER: "sccache" + jobs: check-reth: - runs-on: ubuntu-24.04 + runs-on: depot-ubuntu-latest timeout-minutes: 60 steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@v6 - uses: rui314/setup-mold@v1 - uses: dtolnay/rust-toolchain@stable with: target: x86_64-pc-windows-gnu - uses: taiki-e/install-action@cross + - uses: mozilla-actions/sccache-action@v0.0.9 - uses: Swatinem/rust-cache@v2 with: cache-on-failure: true @@ -30,16 +34,17 @@ jobs: run: cargo check --target x86_64-pc-windows-gnu check-op-reth: - runs-on: ubuntu-24.04 + runs-on: depot-ubuntu-latest timeout-minutes: 60 steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@v6 - uses: rui314/setup-mold@v1 - uses: dtolnay/rust-toolchain@stable with: target: x86_64-pc-windows-gnu - uses: taiki-e/install-action@cross + - uses: mozilla-actions/sccache-action@v0.0.9 - uses: Swatinem/rust-cache@v2 with: cache-on-failure: true diff --git a/Cargo.lock b/Cargo.lock index 7c5012b4b5..7280408365 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -58,13 +58,22 @@ dependencies = [ [[package]] name = "aho-corasick" -version = "1.1.3" +version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" +checksum = "ddd31a130427c27518df266943a5308ed92d4b226cc639f5a8f1002816174301" dependencies = [ "memchr", ] +[[package]] +name = "aligned-vec" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc890384c8602f339876ded803c97ad529f3842aba97f6392b3dba0dd171769b" +dependencies = [ + "equator", +] + [[package]] name = "alloc-no-stdlib" version = "2.0.4" @@ -88,9 +97,9 @@ checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" [[package]] name = "alloy-chains" -version = "0.2.15" +version = "0.2.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bbb778f50ecb0cebfb5c05580948501927508da7bd628833a8c4bd8545e23e2" +checksum = "35d744058a9daa51a8cf22a3009607498fcf82d3cf4c5444dd8056cdf651f471" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -103,9 +112,9 @@ dependencies = [ [[package]] name = "alloy-consensus" -version = "1.0.41" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9b151e38e42f1586a01369ec52a6934702731d07e8509a7307331b09f6c46dc" +checksum = "2e318e25fb719e747a7e8db1654170fc185024f3ed5b10f86c08d448a912f6e2" dependencies = [ "alloy-eips", "alloy-primitives", @@ -115,6 +124,7 @@ dependencies = [ "alloy-tx-macros", "arbitrary", "auto_impl", + "borsh", "c-kzg", "derive_more", "either", @@ -130,9 +140,9 @@ dependencies = [ [[package]] name = "alloy-consensus-any" -version = "1.0.41" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e2d5e8668ef6215efdb7dcca6f22277b4e483a5650e05f5de22b2350971f4b8" +checksum = "364380a845193a317bcb7a5398fc86cdb66c47ebe010771dde05f6869bf9e64a" dependencies = [ "alloy-consensus", "alloy-eips", @@ -145,9 +155,9 @@ dependencies = [ [[package]] name = "alloy-contract" -version = "1.0.41" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "630288cf4f3a34a8c6bc75c03dce1dbd47833138f65f37d53a1661eafc96b83f" +checksum = "08d39c80ffc806f27a76ed42f3351a455f3dc4f81d6ff92c8aad2cf36b7d3a34" dependencies = [ "alloy-consensus", "alloy-dyn-abi", @@ -167,9 +177,9 @@ dependencies = [ [[package]] name = "alloy-dyn-abi" -version = "1.4.1" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fdff496dd4e98a81f4861e66f7eaf5f2488971848bb42d9c892f871730245c8" +checksum = "0d48a9101f4a67c22fae57489f1ddf3057b8ab4a368d8eac3be088b6e9d9c9d9" dependencies = [ "alloy-json-abi", "alloy-primitives", @@ -199,26 +209,28 @@ dependencies = [ [[package]] name = "alloy-eip2930" -version = "0.2.1" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b82752a889170df67bbb36d42ca63c531eb16274f0d7299ae2a680facba17bd" +checksum = "9441120fa82df73e8959ae0e4ab8ade03de2aaae61be313fbf5746277847ce25" dependencies = [ "alloy-primitives", "alloy-rlp", "arbitrary", + "borsh", "rand 0.8.5", "serde", ] [[package]] name = "alloy-eip7702" -version = "0.6.1" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d4769c6ffddca380b0070d71c8b7f30bed375543fe76bb2f74ec0acf4b7cd16" +checksum = "2919c5a56a1007492da313e7a3b6d45ef5edc5d33416fdec63c0d7a2702a0d20" dependencies = [ "alloy-primitives", "alloy-rlp", "arbitrary", + "borsh", "k256", "rand 0.8.5", "serde", @@ -227,10 +239,22 @@ dependencies = [ ] [[package]] -name = "alloy-eips" -version = "1.0.41" +name = "alloy-eip7928" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5434834adaf64fa20a6fb90877bc1d33214c41b055cc49f82189c98614368cc" +checksum = "926b2c0d34e641cf8b17bf54ce50fda16715b9f68ad878fa6128bae410c6f890" +dependencies = [ + "alloy-primitives", + "alloy-rlp", + "borsh", + "serde", +] + +[[package]] +name = "alloy-eips" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4c4d7c5839d9f3a467900c625416b24328450c65702eb3d8caff8813e4d1d33" dependencies = [ "alloy-eip2124", "alloy-eip2930", @@ -240,6 +264,7 @@ dependencies = [ "alloy-serde", "arbitrary", "auto_impl", + "borsh", "c-kzg", "derive_more", "either", @@ -253,9 +278,9 @@ dependencies = [ [[package]] name = "alloy-evm" -version = "0.22.5" +version = "0.25.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28bd79e109f2b3ff81ed1a93ed3d07cf175ca627fd4fad176df721041cc40dcc" +checksum = "e6ccc4c702c840148af1ce784cc5c6ed9274a020ef32417c5b1dbeab8c317673" dependencies = [ "alloy-consensus", "alloy-eips", @@ -267,8 +292,7 @@ dependencies = [ "alloy-sol-types", "auto_impl", "derive_more", - "op-alloy-consensus", - "op-alloy-rpc-types-engine", + "op-alloy", "op-revm", "revm", "thiserror 2.0.17", @@ -276,23 +300,24 @@ dependencies = [ [[package]] name = "alloy-genesis" -version = "1.0.41" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "919a8471cfbed7bcd8cf1197a57dda583ce0e10c6385f6ff4e8b41304b223392" +checksum = "1ba4b1be0988c11f0095a2380aa596e35533276b8fa6c9e06961bbfe0aebcac5" dependencies = [ "alloy-eips", "alloy-primitives", "alloy-serde", "alloy-trie", + "borsh", "serde", "serde_with", ] [[package]] name = "alloy-hardforks" -version = "0.4.1" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd78f8e1c274581c663d7949c863b10c8b015e48f2774a4b8e8efc82d43ea95c" +checksum = "2d9a33550fc21fd77a3f8b63e99969d17660eec8dcc50a95a80f7c9964f7680b" dependencies = [ "alloy-chains", "alloy-eip2124", @@ -304,9 +329,9 @@ dependencies = [ [[package]] name = "alloy-json-abi" -version = "1.4.1" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5513d5e6bd1cba6bdcf5373470f559f320c05c8c59493b6e98912fbe6733943f" +checksum = "9914c147bb9b25f440eca68a31dc29f5c22298bfa7754aa802965695384122b0" dependencies = [ "alloy-primitives", "alloy-sol-type-parser", @@ -316,9 +341,9 @@ dependencies = [ [[package]] name = "alloy-json-rpc" -version = "1.0.41" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7c69f6c9c68a1287c9d5ff903d0010726934de0dac10989be37b75a29190d55" +checksum = "f72cf87cda808e593381fb9f005ffa4d2475552b7a6c5ac33d087bf77d82abd0" dependencies = [ "alloy-primitives", "alloy-sol-types", @@ -331,9 +356,9 @@ dependencies = [ [[package]] name = "alloy-network" -version = "1.0.41" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8eaf2ae05219e73e0979cb2cf55612aafbab191d130f203079805eaf881cca58" +checksum = "12aeb37b6f2e61b93b1c3d34d01ee720207c76fe447e2a2c217e433ac75b17f5" dependencies = [ "alloy-consensus", "alloy-consensus-any", @@ -357,9 +382,9 @@ dependencies = [ [[package]] name = "alloy-network-primitives" -version = "1.0.41" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e58f4f345cef483eab7374f2b6056973c7419ffe8ad35e994b7a7f5d8e0c7ba4" +checksum = "abd29ace62872083e30929cd9b282d82723196d196db589f3ceda67edcc05552" dependencies = [ "alloy-consensus", "alloy-eips", @@ -370,9 +395,9 @@ dependencies = [ [[package]] name = "alloy-op-evm" -version = "0.22.5" +version = "0.25.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35db78840a29b14fec51f3399a6dc82ecc815a5766eb80b32e69a0c92adddc14" +checksum = "0f640da852f93ddaa3b9a602b7ca41d80e0023f77a67b68aaaf511c32f1fe0ce" dependencies = [ "alloy-consensus", "alloy-eips", @@ -380,7 +405,7 @@ dependencies = [ "alloy-op-hardforks", "alloy-primitives", "auto_impl", - "op-alloy-consensus", + "op-alloy", "op-revm", "revm", "thiserror 2.0.17", @@ -388,9 +413,9 @@ dependencies = [ [[package]] name = "alloy-op-hardforks" -version = "0.4.1" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "777759314eaa14fb125c1deba5cbc06eee953bbe77bc7cc60b4e8685bd03479e" +checksum = "f96fb2fce4024ada5b2c11d4076acf778a0d3e4f011c6dfd2ffce6d0fcf84ee9" dependencies = [ "alloy-chains", "alloy-hardforks", @@ -401,9 +426,9 @@ dependencies = [ [[package]] name = "alloy-primitives" -version = "1.4.1" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "355bf68a433e0fd7f7d33d5a9fc2583fde70bf5c530f63b80845f8da5505cf28" +checksum = "7db950a29746be9e2f2c6288c8bd7a6202a81f999ce109a2933d2379970ec0fa" dependencies = [ "alloy-rlp", "arbitrary", @@ -411,10 +436,11 @@ dependencies = [ "cfg-if", "const-hex", "derive_more", + "fixed-cache", "foldhash 0.2.0", "getrandom 0.3.4", - "hashbrown 0.16.0", - "indexmap 2.12.0", + "hashbrown 0.16.1", + "indexmap 2.12.1", "itoa", "k256", "keccak-asm", @@ -422,6 +448,7 @@ dependencies = [ "proptest", "proptest-derive 0.6.0", "rand 0.9.2", + "rapidhash", "ruint", "rustc-hash", "serde", @@ -431,9 +458,9 @@ dependencies = [ [[package]] name = "alloy-provider" -version = "1.0.41" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de2597751539b1cc8fe4204e5325f9a9ed83fcacfb212018dfcfa7877e76de21" +checksum = "9b710636d7126e08003b8217e24c09f0cca0b46d62f650a841736891b1ed1fc1" dependencies = [ "alloy-chains", "alloy-consensus", @@ -476,9 +503,9 @@ dependencies = [ [[package]] name = "alloy-pubsub" -version = "1.0.41" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06e45a68423e732900a0c824b8e22237db461b79d2e472dd68b7547c16104427" +checksum = "cdd4c64eb250a18101d22ae622357c6b505e158e9165d4c7974d59082a600c5e" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -515,14 +542,14 @@ checksum = "64b728d511962dda67c1bc7ea7c03736ec275ed2cf4c35d9585298ac9ccf3b73" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.111", ] [[package]] name = "alloy-rpc-client" -version = "1.0.41" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edf8eb8be597cfa8c312934d2566ec4516f066d69164f9212d7a148979fdcfd8" +checksum = "d0882e72d2c1c0c79dcf4ab60a67472d3f009a949f774d4c17d0bdb669cfde05" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -546,9 +573,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types" -version = "1.0.41" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "339af7336571dd39ae3a15bde08ae6a647e62f75350bd415832640268af92c06" +checksum = "39cf1398cb33aacb139a960fa3d8cf8b1202079f320e77e952a0b95967bf7a9f" dependencies = [ "alloy-primitives", "alloy-rpc-types-engine", @@ -559,9 +586,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-admin" -version = "1.0.41" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19b33cdc0483d236cdfff763dae799ccef9646e94fb549a74f7adac6a7f7bb86" +checksum = "65a583d2029b171301f5dcf122aa2ef443a65a373778ec76540d999691ae867d" dependencies = [ "alloy-genesis", "alloy-primitives", @@ -571,9 +598,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-anvil" -version = "1.0.41" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83d98fb386a462e143f5efa64350860af39950c49e7c0cbdba419c16793116ef" +checksum = "c3ce4c24e416bd0f17fceeb2f26cd8668df08fe19e1dc02f9d41c3b8ed1e93e0" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -583,9 +610,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-any" -version = "1.0.41" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbde0801a32d21c5f111f037bee7e22874836fba7add34ed4a6919932dd7cf23" +checksum = "6a63fb40ed24e4c92505f488f9dd256e2afaed17faa1b7a221086ebba74f4122" dependencies = [ "alloy-consensus-any", "alloy-rpc-types-eth", @@ -594,9 +621,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-beacon" -version = "1.0.41" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55c8d51ebb7c5fa8be8ea739a3933c5bfea08777d2d662b30b2109ac5ca71e6b" +checksum = "16633087e23d8d75161c3a59aa183203637b817a5a8d2f662f612ccb6d129af0" dependencies = [ "alloy-eips", "alloy-primitives", @@ -614,9 +641,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-debug" -version = "1.0.41" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "388cf910e66bd4f309a81ef746dcf8f9bca2226e3577890a8d56c5839225cf46" +checksum = "4936f579d9d10eae01772b2ab3497f9d568684f05f26f8175e12f9a1a2babc33" dependencies = [ "alloy-primitives", "derive_more", @@ -626,9 +653,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-engine" -version = "1.0.41" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "605ec375d91073851f566a3082548af69a28dca831b27a8be7c1b4c49f5c6ca2" +checksum = "4c60bdce3be295924122732b7ecd0b2495ce4790bedc5370ca7019c08ad3f26e" dependencies = [ "alloy-consensus", "alloy-eips", @@ -647,9 +674,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-eth" -version = "1.0.41" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "361cd87ead4ba7659bda8127902eda92d17fa7ceb18aba1676f7be10f7222487" +checksum = "9eae0c7c40da20684548cbc8577b6b7447f7bf4ddbac363df95e3da220e41e72" dependencies = [ "alloy-consensus", "alloy-consensus-any", @@ -669,9 +696,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-mev" -version = "1.0.41" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1397926d8d06a2531578bafc3e0ec78f97a02f0e6d1631c67d80d22af6a3af02" +checksum = "81c0dd81c24944cfbf45b5df7cd149d9cd3e354db81ccf08aa47e0e05be8ab97" dependencies = [ "alloy-consensus", "alloy-eips", @@ -684,9 +711,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-trace" -version = "1.0.41" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de4e95fb0572b97b17751d0fdf5cdc42b0050f9dd9459eddd1bf2e2fbfed0a33" +checksum = "ef206a4b8d436fbb7cf2e6a61c692d11df78f9382becc3c9a283bd58e64f0583" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -698,9 +725,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-txpool" -version = "1.0.41" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cddde1bbd4feeb0d363ae7882af1e2e7955ef77c17f933f31402aad9343b57c5" +checksum = "ecb5a795264a02222f9534435b8f40dcbd88de8e9d586647884aae24f389ebf2" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -710,9 +737,9 @@ dependencies = [ [[package]] name = "alloy-serde" -version = "1.0.41" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64600fc6c312b7e0ba76f73a381059af044f4f21f43e07f51f1fa76c868fe302" +checksum = "c0df1987ed0ff2d0159d76b52e7ddfc4e4fbddacc54d2fbee765e0d14d7c01b5" dependencies = [ "alloy-primitives", "arbitrary", @@ -722,9 +749,9 @@ dependencies = [ [[package]] name = "alloy-signer" -version = "1.0.41" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5772858492b26f780468ae693405f895d6a27dea6e3eab2c36b6217de47c2647" +checksum = "6ff69deedee7232d7ce5330259025b868c5e6a52fa8dffda2c861fb3a5889b24" dependencies = [ "alloy-primitives", "async-trait", @@ -737,9 +764,9 @@ dependencies = [ [[package]] name = "alloy-signer-local" -version = "1.0.41" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4195b803d0a992d8dbaab2ca1986fc86533d4bc80967c0cce7668b26ad99ef9" +checksum = "72cfe0be3ec5a8c1a46b2e5a7047ed41121d360d97f4405bb7c1c784880c86cb" dependencies = [ "alloy-consensus", "alloy-network", @@ -756,41 +783,41 @@ dependencies = [ [[package]] name = "alloy-sol-macro" -version = "1.4.1" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3ce480400051b5217f19d6e9a82d9010cdde20f1ae9c00d53591e4a1afbb312" +checksum = "a3b96d5f5890605ba9907ce1e2158e2701587631dc005bfa582cf92dd6f21147" dependencies = [ "alloy-sol-macro-expander", "alloy-sol-macro-input", "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.111", ] [[package]] name = "alloy-sol-macro-expander" -version = "1.4.1" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d792e205ed3b72f795a8044c52877d2e6b6e9b1d13f431478121d8d4eaa9028" +checksum = "b8247b7cca5cde556e93f8b3882b01dbd272f527836049083d240c57bf7b4c15" dependencies = [ "alloy-sol-macro-input", "const-hex", "heck", - "indexmap 2.12.0", + "indexmap 2.12.1", "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.111", "syn-solidity", "tiny-keccak", ] [[package]] name = "alloy-sol-macro-input" -version = "1.4.1" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bd1247a8f90b465ef3f1207627547ec16940c35597875cdc09c49d58b19693c" +checksum = "3cd54f38512ac7bae10bbc38480eefb1b9b398ca2ce25db9cc0c048c6411c4f1" dependencies = [ "const-hex", "dunce", @@ -798,15 +825,15 @@ dependencies = [ "macro-string", "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.111", "syn-solidity", ] [[package]] name = "alloy-sol-type-parser" -version = "1.4.1" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "954d1b2533b9b2c7959652df3076954ecb1122a28cc740aa84e7b0a49f6ac0a9" +checksum = "444b09815b44899564566d4d56613d14fa9a274b1043a021f00468568752f449" dependencies = [ "serde", "winnow", @@ -814,9 +841,9 @@ dependencies = [ [[package]] name = "alloy-sol-types" -version = "1.4.1" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70319350969a3af119da6fb3e9bddb1bce66c9ea933600cb297c8b1850ad2a3c" +checksum = "dc1038284171df8bfd48befc0c7b78f667a7e2be162f45f07bd1c378078ebe58" dependencies = [ "alloy-json-abi", "alloy-primitives", @@ -826,12 +853,11 @@ dependencies = [ [[package]] name = "alloy-transport" -version = "1.0.41" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "025a940182bddaeb594c26fe3728525ae262d0806fe6a4befdf5d7bc13d54bce" +checksum = "be98b07210d24acf5b793c99b759e9a696e4a2e67593aec0487ae3b3e1a2478c" dependencies = [ "alloy-json-rpc", - "alloy-primitives", "auto_impl", "base64 0.22.1", "derive_more", @@ -850,9 +876,9 @@ dependencies = [ [[package]] name = "alloy-transport-http" -version = "1.0.41" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3b5064d1e1e1aabc918b5954e7fb8154c39e77ec6903a581b973198b26628fa" +checksum = "4198a1ee82e562cab85e7f3d5921aab725d9bd154b6ad5017f82df1695877c97" dependencies = [ "alloy-json-rpc", "alloy-transport", @@ -865,9 +891,9 @@ dependencies = [ [[package]] name = "alloy-transport-ipc" -version = "1.0.41" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d47962f3f1d9276646485458dc842b4e35675f42111c9d814ae4711c664c8300" +checksum = "d8db249779ebc20dc265920c7e706ed0d31dbde8627818d1cbde60919b875bb0" dependencies = [ "alloy-json-rpc", "alloy-pubsub", @@ -885,9 +911,9 @@ dependencies = [ [[package]] name = "alloy-transport-ws" -version = "1.0.41" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9476a36a34e2fb51b6746d009c53d309a186a825aa95435407f0e07149f4ad2d" +checksum = "5ad2344a12398d7105e3722c9b7a7044ea837128e11d453604dec6e3731a86e2" dependencies = [ "alloy-pubsub", "alloy-transport", @@ -923,15 +949,14 @@ dependencies = [ [[package]] name = "alloy-tx-macros" -version = "1.0.41" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8e52276fdb553d3c11563afad2898f4085165e4093604afe3d78b69afbf408f" +checksum = "333544408503f42d7d3792bfc0f7218b643d968a03d2c0ed383ae558fb4a76d0" dependencies = [ - "alloy-primitives", "darling 0.21.3", "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.111", ] [[package]] @@ -981,22 +1006,22 @@ dependencies = [ [[package]] name = "anstyle-query" -version = "1.1.4" +version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e231f6134f61b71076a3eab506c379d4f36122f2af15a9ff04415ea4c3339e2" +checksum = "40c48f72fd53cd289104fc64099abca73db4166ad86ea0b4341abe65af83dadc" dependencies = [ - "windows-sys 0.60.2", + "windows-sys 0.61.2", ] [[package]] name = "anstyle-wincon" -version = "3.0.10" +version = "3.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e0633414522a32ffaac8ac6cc8f748e090c5717661fddeea04219e2344f5f2a" +checksum = "291e6a250ff86cd4a820112fb8898808a366d8f9f58ce16d1f538353ad55747d" dependencies = [ "anstyle", "once_cell_polyfill", - "windows-sys 0.60.2", + "windows-sys 0.61.2", ] [[package]] @@ -1016,7 +1041,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.111", ] [[package]] @@ -1158,7 +1183,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "62945a2f7e6de02a31fe400aa489f0e0f5b2502e69f95f853adb82a96c7a6b60" dependencies = [ "quote", - "syn 2.0.108", + "syn 2.0.111", ] [[package]] @@ -1196,7 +1221,7 @@ dependencies = [ "num-traits", "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.111", ] [[package]] @@ -1285,7 +1310,7 @@ checksum = "213888f660fddcca0d257e88e54ac05bca01885f258ccdf695bafd77031bb69d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.111", ] [[package]] @@ -1358,9 +1383,9 @@ dependencies = [ [[package]] name = "async-compression" -version = "0.4.32" +version = "0.4.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a89bce6054c720275ac2432fbba080a66a2106a44a1b804553930ca6909f4e0" +checksum = "98ec5f6c2f8bc326c994cb9e241cc257ddaba9afa8555a43cffbb5dd86efaa37" dependencies = [ "compression-codecs", "compression-core", @@ -1376,7 +1401,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2e6fa871e4334a622afd6bb2f611635e8083a6f5e2936c0f90f37c7ef9856298" dependencies = [ "async-channel", - "futures-lite", + "futures-lite 1.13.0", "http-types", "log", "memchr", @@ -1402,7 +1427,7 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.111", ] [[package]] @@ -1413,7 +1438,7 @@ checksum = "9035ad2d096bed7955a320ee7e2230574d28fd3c3a0f186cbea1ff3c7eed5dbb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.111", ] [[package]] @@ -1451,7 +1476,7 @@ checksum = "ffdcb70bdbc4d478427380519163274ac86e52916e10f0a8889adf0f96d3fee7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.111", ] [[package]] @@ -1518,9 +1543,9 @@ checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" [[package]] name = "base64ct" -version = "1.8.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55248b47b0caf0546f7988906588779981c43bb1bc9d0c44087278f80cdb44ba" +checksum = "0e050f626429857a27ddccb31e0aca21356bfa709c04041aefddac081a8f068a" [[package]] name = "bech32" @@ -1578,7 +1603,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.108", + "syn 2.0.111", ] [[package]] @@ -1596,7 +1621,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.108", + "syn 2.0.111", ] [[package]] @@ -1616,15 +1641,15 @@ checksum = "5e764a1d40d510daf35e07be9eb06e75770908c27d411ee6c92109c9840eaaf7" [[package]] name = "bitcoin-io" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b47c4ab7a93edb0c7198c5535ed9b52b63095f4e9b45279c6736cec4b856baf" +checksum = "2dee39a0ee5b4095224a0cfc6bf4cc1baf0f9624b96b367e53b66d974e51d953" [[package]] name = "bitcoin_hashes" -version = "0.14.0" +version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb18c03d0db0247e147a21a6faafd5a7eb851c743db062de72018b6b7e8e4d16" +checksum = "26ec84b80c482df901772e931a9a681e26a1b9ee2302edeff23cb30328745c8b" dependencies = [ "bitcoin-io", "hex-conservative", @@ -1677,6 +1702,15 @@ dependencies = [ "generic-array", ] +[[package]] +name = "block2" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cdeb9d870516001442e364c5220d3574d2da8dc765554b4a617230d33fa58ef5" +dependencies = [ + "objc2", +] + [[package]] name = "blst" version = "0.3.16" @@ -1691,25 +1725,26 @@ dependencies = [ [[package]] name = "boa_ast" -version = "0.20.0" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c340fe0f0b267787095cbe35240c6786ff19da63ec7b69367ba338eace8169b" +checksum = "bc119a5ad34c3f459062a96907f53358989b173d104258891bb74f95d93747e8" dependencies = [ "bitflags 2.10.0", "boa_interner", "boa_macros", "boa_string", - "indexmap 2.12.0", + "indexmap 2.12.1", "num-bigint", "rustc-hash", ] [[package]] name = "boa_engine" -version = "0.20.0" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f620c3f06f51e65c0504ddf04978be1b814ac6586f0b45f6019801ab5efd37f9" +checksum = "e637ec52ea66d76b0ca86180c259d6c7bb6e6a6e14b2f36b85099306d8b00cc3" dependencies = [ + "aligned-vec", "arrayvec", "bitflags 2.10.0", "boa_ast", @@ -1717,122 +1752,146 @@ dependencies = [ "boa_interner", "boa_macros", "boa_parser", - "boa_profiler", "boa_string", "bytemuck", "cfg-if", + "cow-utils", "dashmap 6.1.0", + "dynify", "fast-float2", - "hashbrown 0.15.5", - "icu_normalizer 1.5.0", - "indexmap 2.12.0", + "float16", + "futures-channel", + "futures-concurrency", + "futures-lite 2.6.1", + "hashbrown 0.16.1", + "icu_normalizer", + "indexmap 2.12.1", "intrusive-collections", - "itertools 0.13.0", + "itertools 0.14.0", "num-bigint", "num-integer", "num-traits", "num_enum", - "once_cell", - "pollster", + "paste", "portable-atomic", - "rand 0.8.5", + "rand 0.9.2", "regress", "rustc-hash", "ryu-js", "serde", "serde_json", - "sptr", + "small_btree", "static_assertions", + "tag_ptr", "tap", "thin-vec", "thiserror 2.0.17", "time", + "xsum", ] [[package]] name = "boa_gc" -version = "0.20.0" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2425c0b7720d42d73eaa6a883fbb77a5c920da8694964a3d79a67597ac55cce2" +checksum = "f1179f690cbfcbe5364cceee5f1cb577265bb6f07b0be6f210aabe270adcf9da" dependencies = [ "boa_macros", - "boa_profiler", "boa_string", - "hashbrown 0.15.5", + "hashbrown 0.16.1", "thin-vec", ] [[package]] name = "boa_interner" -version = "0.20.0" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42407a3b724cfaecde8f7d4af566df4b56af32a2f11f0956f5570bb974e7f749" +checksum = "9626505d33dc63d349662437297df1d3afd9d5fc4a2b3ad34e5e1ce879a78848" dependencies = [ "boa_gc", "boa_macros", - "hashbrown 0.15.5", - "indexmap 2.12.0", + "hashbrown 0.16.1", + "indexmap 2.12.1", "once_cell", - "phf 0.11.3", + "phf", "rustc-hash", "static_assertions", ] [[package]] name = "boa_macros" -version = "0.20.0" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fd3f870829131332587f607a7ff909f1af5fc523fd1b192db55fbbdf52e8d3c" +checksum = "7f36418a46544b152632c141b0a0b7a453cd69ca150caeef83aee9e2f4b48b7d" dependencies = [ + "cfg-if", + "cow-utils", "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.111", "synstructure", ] [[package]] name = "boa_parser" -version = "0.20.0" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cc142dac798cdc6e2dbccfddeb50f36d2523bb977a976e19bdb3ae19b740804" +checksum = "02f99bf5b684f0de946378fcfe5f38c3a0fbd51cbf83a0f39ff773a0e218541f" dependencies = [ "bitflags 2.10.0", "boa_ast", "boa_interner", "boa_macros", - "boa_profiler", "fast-float2", - "icu_properties 1.5.1", + "icu_properties", "num-bigint", "num-traits", "regress", "rustc-hash", ] -[[package]] -name = "boa_profiler" -version = "0.20.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4064908e7cdf9b6317179e9b04dcb27f1510c1c144aeab4d0394014f37a0f922" - [[package]] name = "boa_string" -version = "0.20.0" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7debc13fbf7997bf38bf8e9b20f1ad5e2a7d27a900e1f6039fe244ce30f589b5" +checksum = "45ce9d7aa5563a2e14eab111e2ae1a06a69a812f6c0c3d843196c9d03fbef440" dependencies = [ "fast-float2", + "itoa", "paste", "rustc-hash", - "sptr", + "ryu-js", "static_assertions", ] [[package]] -name = "boyer-moore-magiclen" -version = "0.2.20" +name = "borsh" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95e6233f2d926b5b123caf9d58e3885885255567fbe7776a7fdcae2a4d7241c4" +checksum = "d1da5ab77c1437701eeff7c88d968729e7766172279eab0676857b3d63af7a6f" +dependencies = [ + "borsh-derive", + "cfg_aliases", +] + +[[package]] +name = "borsh-derive" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0686c856aa6aac0c4498f936d7d6a02df690f614c03e4d906d1018062b5c5e2c" +dependencies = [ + "once_cell", + "proc-macro-crate", + "proc-macro2", + "quote", + "syn 2.0.111", +] + +[[package]] +name = "boyer-moore-magiclen" +version = "0.2.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7441b4796eb8a7107d4cd99d829810be75f5573e1081c37faa0e8094169ea0d6" dependencies = [ "debug-helper", ] @@ -1870,9 +1929,9 @@ dependencies = [ [[package]] name = "bstr" -version = "1.12.0" +version = "1.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "234113d19d0d7d613b40e86fb654acf958910802bcceab913a4f9e7cda03b1a4" +checksum = "63044e1ae8e69f3b5a92c736ca6269b8d12fa7efe39bf34ddb06d102cf0e2cab" dependencies = [ "memchr", "regex-automata", @@ -1881,9 +1940,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.19.0" +version = "3.19.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46c5e41b57b8bba42a04676d81cb89e9ee8e859a1a66f80a5a72e1cb76b34d43" +checksum = "5dd9dc738b7a8311c7ade152424974d8115f2cdad61e8dab8dac9f2362298510" [[package]] name = "byte-slice-cast" @@ -1914,7 +1973,7 @@ checksum = "f9abbd1bc6865053c427f7198e6af43bfdedc55ab791faed4fbd361d789575ff" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.111", ] [[package]] @@ -1925,13 +1984,23 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.10.1" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a" +checksum = "b35204fbdc0b3f4446b89fc1ac2cf84a8a68971995d0bf2e925ec7cd960f9cb3" dependencies = [ "serde", ] +[[package]] +name = "bzip2-sys" +version = "0.1.13+1.0.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "225bff33b2141874fe80d71e07d6eec4f85c5c216453dd96388240f96e1acc14" +dependencies = [ + "cc", + "pkg-config", +] + [[package]] name = "c-kzg" version = "2.1.5" @@ -1950,9 +2019,9 @@ dependencies = [ [[package]] name = "camino" -version = "1.2.1" +version = "1.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "276a59bf2b2c967788139340c9f0c5b12d7fd6630315c15c217e559de85d2609" +checksum = "e629a66d692cb9ff1a1c664e41771b3dcaf961985a9774c0eb0bd1b51cf60a48" dependencies = [ "serde_core", ] @@ -2063,7 +2132,7 @@ dependencies = [ "num-traits", "serde", "wasm-bindgen", - "windows-link 0.2.1", + "windows-link", ] [[package]] @@ -2116,9 +2185,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.50" +version = "4.5.53" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c2cfd7bf8a6017ddaa4e32ffe7403d547790db06bd171c1c53926faab501623" +checksum = "c9e340e012a1bf4935f5282ed1436d1489548e8f72308207ea5df0e23d2d03f8" dependencies = [ "clap_builder", "clap_derive", @@ -2126,9 +2195,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.50" +version = "4.5.53" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a4c05b9e80c5ccd3a7ef080ad7b6ba7d6fc00a985b8b157197075677c82c7a0" +checksum = "d76b5d13eaa18c901fd2f7fca939fefe3a0727a953561fefdf3b2922b8569d00" dependencies = [ "anstream", "anstyle", @@ -2145,7 +2214,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.111", ] [[package]] @@ -2322,9 +2391,9 @@ dependencies = [ [[package]] name = "compression-codecs" -version = "0.4.31" +version = "0.4.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef8a506ec4b81c460798f572caead636d57d3d7e940f998160f52bd254bf2d23" +checksum = "b0f7ac3e5b97fdce45e8922fb05cae2c37f7bbd63d30dd94821dacfd8f3f2bf2" dependencies = [ "brotli", "compression-core", @@ -2336,9 +2405,9 @@ dependencies = [ [[package]] name = "compression-core" -version = "0.4.29" +version = "0.4.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e47641d3deaf41fb1538ac1f54735925e275eaf3bf4d55c81b137fba797e5cbb" +checksum = "75984efb6ed102a0d42db99afb6c1948f0380d1d91808d5529916e6c08b49d8d" [[package]] name = "concat-kdf" @@ -2416,13 +2485,23 @@ dependencies = [ [[package]] name = "convert_case" -version = "0.7.1" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb402b8d4c85569410425650ce3eddc7d698ed96d39a73f941b08fb63082f1e7" +checksum = "633458d4ef8c78b72454de2d54fd6ab2e60f9e02be22f3c6104cdc8a4e0fceb9" dependencies = [ "unicode-segmentation", ] +[[package]] +name = "cordyceps" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "688d7fbb8092b8de775ef2536f36c8c31f2bc4006ece2e8d8ad2d17d00ce0a2a" +dependencies = [ + "loom", + "tracing", +] + [[package]] name = "core-foundation" version = "0.10.1" @@ -2448,6 +2527,12 @@ dependencies = [ "memchr", ] +[[package]] +name = "cow-utils" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "417bef24afe1460300965a25ff4a24b8b45ad011948302ec221e8a0a81eb2c79" + [[package]] name = "cpufeatures" version = "0.2.17" @@ -2459,9 +2544,9 @@ dependencies = [ [[package]] name = "crc" -version = "3.3.0" +version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9710d3b3739c2e349eb44fe848ad0b7c8cb1e42bd87ee49371df2f7acaf3e675" +checksum = "5eb8a2a1cd12ab0d987a5d5e825195d372001a4094a0376319d5a0ad71c1ba0d" dependencies = [ "crc-catalog", ] @@ -2590,9 +2675,9 @@ dependencies = [ [[package]] name = "crypto-common" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" +checksum = "78c8292055d1c1df0cce5d180393dc8cce0abec0a7102adb6c7b1eef6016d60a" dependencies = [ "generic-array", "rand_core 0.6.4", @@ -2629,6 +2714,17 @@ dependencies = [ "cipher", ] +[[package]] +name = "ctrlc" +version = "3.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73736a89c4aff73035ba2ed2e565061954da00d4970fc9ac25dcc85a2a20d790" +dependencies = [ + "dispatch2", + "nix 0.30.1", + "windows-sys 0.61.2", +] + [[package]] name = "curve25519-dalek" version = "4.1.3" @@ -2653,7 +2749,20 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.111", +] + +[[package]] +name = "custom-hardforks" +version = "0.1.0" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-genesis", + "alloy-primitives", + "reth-chainspec", + "reth-network-peers", + "serde", ] [[package]] @@ -2687,7 +2796,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.108", + "syn 2.0.111", ] [[package]] @@ -2702,7 +2811,7 @@ dependencies = [ "quote", "serde", "strsim", - "syn 2.0.108", + "syn 2.0.111", ] [[package]] @@ -2713,7 +2822,7 @@ checksum = "fc34b93ccb385b40dc71c6fceac4b2ad23662c7eeb248cf10d529b7e055b6ead" dependencies = [ "darling_core 0.20.11", "quote", - "syn 2.0.108", + "syn 2.0.111", ] [[package]] @@ -2724,7 +2833,7 @@ checksum = "d38308df82d1080de0afee5d069fa14b0326a88c14f15c5ccda35b4a6c414c81" dependencies = [ "darling_core 0.21.3", "quote", - "syn 2.0.108", + "syn 2.0.111", ] [[package]] @@ -2777,7 +2886,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8d162beedaa69905488a8da94f5ac3edb4dd4788b732fadb7bd120b2625c1976" dependencies = [ "data-encoding", - "syn 2.0.108", + "syn 2.0.111", ] [[package]] @@ -2836,7 +2945,7 @@ checksum = "ef941ded77d15ca19b40374869ac6000af1c9f2a4c0f3d4c70926287e6364a8f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.111", ] [[package]] @@ -2847,7 +2956,7 @@ checksum = "1e567bd82dcff979e4b03460c307b3cdc9e96fde3d73bed1496d2bc75d9dd62a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.111", ] [[package]] @@ -2868,7 +2977,7 @@ dependencies = [ "darling 0.20.11", "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.111", ] [[package]] @@ -2878,31 +2987,38 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ab63b0e2bf4d5928aff72e83a7dace85d7bba5fe12dcc3c5a572d78caffd3f3c" dependencies = [ "derive_builder_core", - "syn 2.0.108", + "syn 2.0.111", ] [[package]] name = "derive_more" -version = "2.0.1" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "093242cf7570c207c83073cf82f79706fe7b8317e98620a47d5be7c3d8497678" +checksum = "10b768e943bed7bf2cab53df09f4bc34bfd217cdb57d971e769874c9a6710618" dependencies = [ "derive_more-impl", ] [[package]] name = "derive_more-impl" -version = "2.0.1" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bda628edc44c4bb645fbe0f758797143e4e07926f7ebf4e9bdfbd3d2ce621df3" +checksum = "6d286bfdaf75e988b4a78e013ecd79c581e06399ab53fbacd2d916c2f904f30b" dependencies = [ "convert_case", "proc-macro2", "quote", - "syn 2.0.108", + "rustc_version 0.4.1", + "syn 2.0.111", "unicode-xid", ] +[[package]] +name = "diatomic-waker" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab03c107fafeb3ee9f5925686dbb7a73bc76e3932abb0d2b365cb64b169cf04c" + [[package]] name = "diff" version = "0.1.13" @@ -2974,9 +3090,9 @@ dependencies = [ [[package]] name = "discv5" -version = "0.9.1" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4b4e7798d2ff74e29cee344dc490af947ae657d6ab5273dde35d58ce06a4d71" +checksum = "f170f4f6ed0e1df52bf43b403899f0081917ecf1500bfe312505cc3b515a8899" dependencies = [ "aes", "aes-gcm", @@ -3005,6 +3121,18 @@ dependencies = [ "zeroize", ] +[[package]] +name = "dispatch2" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "89a09f22a6c6069a18470eb92d2298acf25463f14256d24778e1230d789a2aec" +dependencies = [ + "bitflags 2.10.0", + "block2", + "libc", + "objc2", +] + [[package]] name = "displaydoc" version = "0.2.5" @@ -3013,7 +3141,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.111", ] [[package]] @@ -3043,6 +3171,26 @@ version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d0881ea181b1df73ff77ffaaf9c7544ecc11e82fba9b5f27b262a3c73a332555" +[[package]] +name = "dynify" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81acb15628a3e22358bf73de5e7e62360b8a777dbcb5fc9ac7dfa9ae73723747" +dependencies = [ + "dynify-macros", +] + +[[package]] +name = "dynify-macros" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ec431cd708430d5029356535259c5d645d60edd3d39c54e5eea9782d46caa7d" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.111", +] + [[package]] name = "ecdsa" version = "0.16.9" @@ -3092,12 +3240,12 @@ dependencies = [ "enum-ordinalize", "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.111", ] [[package]] name = "ef-test-runner" -version = "1.8.2" +version = "1.9.3" dependencies = [ "clap", "ef-tests", @@ -3105,7 +3253,7 @@ dependencies = [ [[package]] name = "ef-tests" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -3200,27 +3348,47 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.111", ] [[package]] name = "enum-ordinalize" -version = "4.3.0" +version = "4.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fea0dcfa4e54eeb516fe454635a95753ddd39acda650ce703031c6973e315dd5" +checksum = "4a1091a7bb1f8f2c4b28f1fe2cef4980ca2d410a3d727d67ecc3178c9b0800f0" dependencies = [ "enum-ordinalize-derive", ] [[package]] name = "enum-ordinalize-derive" -version = "4.3.1" +version = "4.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d28318a75d4aead5c4db25382e8ef717932d0346600cacae6357eb5941bc5ff" +checksum = "8ca9601fb2d62598ee17836250842873a413586e5d7ed88b356e38ddbb0ec631" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.111", +] + +[[package]] +name = "equator" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4711b213838dfee0117e3be6ac926007d7f433d7bbe33595975d4190cb07e6fc" +dependencies = [ + "equator-macro", +] + +[[package]] +name = "equator-macro" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44f23cf4b44bfce11a86ace86f8a73ffdec849c9fd00a386a53d278bd9e81fb3" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.111", ] [[package]] @@ -3296,7 +3464,7 @@ dependencies = [ "darling 0.20.11", "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.111", ] [[package]] @@ -3463,7 +3631,6 @@ dependencies = [ "op-alloy-rpc-types", "op-alloy-rpc-types-engine", "op-revm", - "reth-chain-state", "reth-codecs", "reth-db-api", "reth-engine-primitives", @@ -3518,6 +3685,17 @@ dependencies = [ "tracing", ] +[[package]] +name = "example-custom-rpc-middleware" +version = "0.0.0" +dependencies = [ + "clap", + "jsonrpsee", + "reth-ethereum", + "tower", + "tracing", +] + [[package]] name = "example-db-access" version = "0.0.0" @@ -3565,7 +3743,7 @@ dependencies = [ [[package]] name = "example-full-contract-state" -version = "1.8.2" +version = "1.9.3" dependencies = [ "eyre", "reth-ethereum", @@ -3704,7 +3882,7 @@ dependencies = [ [[package]] name = "exex-subscription" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-primitives", "clap", @@ -3809,6 +3987,15 @@ dependencies = [ "windows-sys 0.60.2", ] +[[package]] +name = "fixed-cache" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba59b6c98ba422a13f17ee1305c995cb5742bba7997f5b4d9af61b2ff0ffb213" +dependencies = [ + "equivalent", +] + [[package]] name = "fixed-hash" version = "0.8.0" @@ -3821,6 +4008,12 @@ dependencies = [ "static_assertions", ] +[[package]] +name = "fixedbitset" +version = "0.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d674e81391d1e1ab681a28d99df07927c6d4aa5b027d7da16ba32d1d21ecd99" + [[package]] name = "flate2" version = "1.1.5" @@ -3831,6 +4024,16 @@ dependencies = [ "miniz_oxide", ] +[[package]] +name = "float16" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7bffafbd079d520191c7c2779ae9cf757601266cf4167d3f659ff09617ff8483" +dependencies = [ + "cfg-if", + "rustc_version 0.2.3", +] + [[package]] name = "fnv" version = "1.0.7" @@ -3888,6 +4091,19 @@ dependencies = [ "futures-util", ] +[[package]] +name = "futures-buffered" +version = "0.2.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8e0e1f38ec07ba4abbde21eed377082f17ccb988be9d988a5adbf4bafc118fd" +dependencies = [ + "cordyceps", + "diatomic-waker", + "futures-core", + "pin-project-lite", + "spin", +] + [[package]] name = "futures-channel" version = "0.3.31" @@ -3898,6 +4114,21 @@ dependencies = [ "futures-sink", ] +[[package]] +name = "futures-concurrency" +version = "7.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0eb68017df91f2e477ed4bea586c59eaecaa47ed885a770d0444e21e62572cd2" +dependencies = [ + "fixedbitset", + "futures-buffered", + "futures-core", + "futures-lite 2.6.1", + "pin-project", + "slab", + "smallvec", +] + [[package]] name = "futures-core" version = "0.3.31" @@ -3936,6 +4167,19 @@ dependencies = [ "waker-fn", ] +[[package]] +name = "futures-lite" +version = "2.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f78e10609fe0e0b3f4157ffab1876319b5b0db102a2c60dc4626306dc46b44ad" +dependencies = [ + "fastrand 2.3.0", + "futures-core", + "futures-io", + "parking", + "pin-project-lite", +] + [[package]] name = "futures-macro" version = "0.3.31" @@ -3944,7 +4188,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.111", ] [[package]] @@ -3995,16 +4239,17 @@ checksum = "42012b0f064e01aa58b545fe3727f90f7dd4020f4a3ea735b50344965f5a57e9" [[package]] name = "generator" -version = "0.8.7" +version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "605183a538e3e2a9c1038635cc5c2d194e2ee8fd0d1b66b8349fad7dbacce5a2" +checksum = "52f04ae4152da20c76fe800fa48659201d5cf627c5149ca0b707b69d7eef6cf9" dependencies = [ "cc", "cfg-if", "libc", "log", "rustversion", - "windows 0.61.3", + "windows-link", + "windows-result 0.4.1", ] [[package]] @@ -4068,9 +4313,9 @@ dependencies = [ [[package]] name = "git2" -version = "0.20.2" +version = "0.20.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2deb07a133b1520dc1a5690e9bd08950108873d7ed5de38dcc74d3b5ebffa110" +checksum = "3e2b37e2f62729cdada11f0e6b3b6fe383c69c29fc619e391223e12856af308c" dependencies = [ "bitflags 2.10.0", "libc", @@ -4164,7 +4409,7 @@ dependencies = [ "futures-core", "futures-sink", "http", - "indexmap 2.12.0", + "indexmap 2.12.1", "slab", "tokio", "tokio-util", @@ -4222,12 +4467,15 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.16.0" +version = "0.16.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5419bdc4f6a9207fbeba6d11b604d481addf78ecd10c11ad51e76c2f6482748d" +checksum = "841d1cc9bed7f9236f321df977030373f4a4163ae1a7dbfe1a51a2c1a51d9100" dependencies = [ + "allocator-api2", + "equivalent", "foldhash 0.2.0", "serde", + "serde_core", ] [[package]] @@ -4269,9 +4517,9 @@ checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" [[package]] name = "hex-conservative" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5313b072ce3c597065a808dbf612c4c8e8590bdbf8b579508bf7a762c5eae6cd" +checksum = "fda06d18ac606267c40c04e41b9947729bf8b9efe74bd4e82b61a5f26a510b9f" dependencies = [ "arrayvec", ] @@ -4344,12 +4592,11 @@ dependencies = [ [[package]] name = "http" -version = "1.3.1" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4a85d31aea989eead29a3aaf9e1115a180df8282431156e533de47660892565" +checksum = "e3ba2a386d7f85a81f119ad7498ebe444d2e22c2af0b86b069416ace48b3311a" dependencies = [ "bytes", - "fnv", "itoa", ] @@ -4391,7 +4638,7 @@ dependencies = [ "anyhow", "async-channel", "base64 0.13.1", - "futures-lite", + "futures-lite 1.13.0", "infer", "pin-project-lite", "rand 0.7.3", @@ -4438,9 +4685,9 @@ dependencies = [ [[package]] name = "hyper" -version = "1.7.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb3aa54a13a0dfe7fbe3a59e0c76093041720fdc77b110cc0fc260fafb4dc51e" +checksum = "2ab2d4f250c3d7b1c9fcdff1cece94ea4e2dfbec68614f7b87cb205f24ca9d11" dependencies = [ "atomic-waker", "bytes", @@ -4475,7 +4722,7 @@ dependencies = [ "tokio", "tokio-rustls", "tower-service", - "webpki-roots 1.0.3", + "webpki-roots 1.0.4", ] [[package]] @@ -4493,9 +4740,9 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.17" +version = "0.1.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c6995591a8f1380fcb4ba966a252a4b29188d51d2b89e3a252f5305be65aea8" +checksum = "727805d60e7938b76b826a6ef209eb70eaa1812794f9424d4a4e2d740662df5f" dependencies = [ "base64 0.22.1", "bytes", @@ -4539,18 +4786,6 @@ dependencies = [ "cc", ] -[[package]] -name = "icu_collections" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db2fa452206ebee18c4b5c2274dbf1de17008e874b4dc4f0aea9d01ca79e4526" -dependencies = [ - "displaydoc", - "yoke 0.7.5", - "zerofrom", - "zerovec 0.10.4", -] - [[package]] name = "icu_collections" version = "2.0.0" @@ -4559,96 +4794,42 @@ checksum = "200072f5d0e3614556f94a9930d5dc3e0662a652823904c3a75dc3b0af7fee47" dependencies = [ "displaydoc", "potential_utf", - "yoke 0.8.0", + "yoke", "zerofrom", - "zerovec 0.11.4", + "zerovec", ] [[package]] name = "icu_locale_core" -version = "2.0.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0cde2700ccaed3872079a65fb1a78f6c0a36c91570f28755dda67bc8f7d9f00a" +checksum = "edba7861004dd3714265b4db54a3c390e880ab658fec5f7db895fae2046b5bb6" dependencies = [ "displaydoc", - "litemap 0.8.0", - "tinystr 0.8.1", - "writeable 0.6.1", - "zerovec 0.11.4", + "litemap", + "serde", + "tinystr", + "writeable", + "zerovec", ] -[[package]] -name = "icu_locid" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13acbb8371917fc971be86fc8057c41a64b521c184808a698c02acc242dbf637" -dependencies = [ - "displaydoc", - "litemap 0.7.5", - "tinystr 0.7.6", - "writeable 0.5.5", - "zerovec 0.10.4", -] - -[[package]] -name = "icu_locid_transform" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01d11ac35de8e40fdeda00d9e1e9d92525f3f9d887cdd7aa81d727596788b54e" -dependencies = [ - "displaydoc", - "icu_locid", - "icu_locid_transform_data", - "icu_provider 1.5.0", - "tinystr 0.7.6", - "zerovec 0.10.4", -] - -[[package]] -name = "icu_locid_transform_data" -version = "1.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7515e6d781098bf9f7205ab3fc7e9709d34554ae0b21ddbcb5febfa4bc7df11d" - [[package]] name = "icu_normalizer" -version = "1.5.0" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19ce3e0da2ec68599d193c93d088142efd7f9c5d6fc9b803774855747dc6a84f" +checksum = "8b24a59706036ba941c9476a55cd57b82b77f38a3c667d637ee7cabbc85eaedc" dependencies = [ "displaydoc", - "icu_collections 1.5.0", - "icu_normalizer_data 1.5.1", - "icu_properties 1.5.1", - "icu_provider 1.5.0", + "icu_collections", + "icu_normalizer_data", + "icu_properties", + "icu_provider", "smallvec", "utf16_iter", - "utf8_iter", "write16", - "zerovec 0.10.4", + "zerovec", ] -[[package]] -name = "icu_normalizer" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "436880e8e18df4d7bbc06d58432329d6458cc84531f7ac5f024e93deadb37979" -dependencies = [ - "displaydoc", - "icu_collections 2.0.0", - "icu_normalizer_data 2.0.0", - "icu_properties 2.0.1", - "icu_provider 2.0.0", - "smallvec", - "zerovec 0.11.4", -] - -[[package]] -name = "icu_normalizer_data" -version = "1.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5e8338228bdc8ab83303f16b797e177953730f601a96c25d10cb3ab0daa0cb7" - [[package]] name = "icu_normalizer_data" version = "2.0.0" @@ -4657,41 +4838,20 @@ checksum = "00210d6893afc98edb752b664b8890f0ef174c8adbb8d0be9710fa66fbbf72d3" [[package]] name = "icu_properties" -version = "1.5.1" +version = "2.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93d6020766cfc6302c15dbbc9c8778c37e62c14427cb7f6e601d849e092aeef5" +checksum = "f5a97b8ac6235e69506e8dacfb2adf38461d2ce6d3e9bd9c94c4cbc3cd4400a4" dependencies = [ "displaydoc", - "icu_collections 1.5.0", - "icu_locid_transform", - "icu_properties_data 1.5.1", - "icu_provider 1.5.0", - "tinystr 0.7.6", - "zerovec 0.10.4", -] - -[[package]] -name = "icu_properties" -version = "2.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "016c619c1eeb94efb86809b015c58f479963de65bdb6253345c1a1276f22e32b" -dependencies = [ - "displaydoc", - "icu_collections 2.0.0", + "icu_collections", "icu_locale_core", - "icu_properties_data 2.0.1", - "icu_provider 2.0.0", + "icu_properties_data", + "icu_provider", "potential_utf", "zerotrie", - "zerovec 0.11.4", + "zerovec", ] -[[package]] -name = "icu_properties_data" -version = "1.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85fb8799753b75aee8d2a21d7c14d9f38921b54b3dbda10f5a3c7a7b82dba5e2" - [[package]] name = "icu_properties_data" version = "2.0.1" @@ -4700,47 +4860,19 @@ checksum = "298459143998310acd25ffe6810ed544932242d3f07083eee1084d83a71bd632" [[package]] name = "icu_provider" -version = "1.5.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ed421c8a8ef78d3e2dbc98a973be2f3770cb42b606e3ab18d6237c4dfde68d9" -dependencies = [ - "displaydoc", - "icu_locid", - "icu_provider_macros", - "stable_deref_trait", - "tinystr 0.7.6", - "writeable 0.5.5", - "yoke 0.7.5", - "zerofrom", - "zerovec 0.10.4", -] - -[[package]] -name = "icu_provider" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03c80da27b5f4187909049ee2d72f276f0d9f99a42c306bd0131ecfe04d8e5af" +checksum = "85962cf0ce02e1e0a629cc34e7ca3e373ce20dda4c4d7294bbd0bf1fdb59e614" dependencies = [ "displaydoc", "icu_locale_core", + "serde", "stable_deref_trait", - "tinystr 0.8.1", - "writeable 0.6.1", - "yoke 0.8.0", + "writeable", + "yoke", "zerofrom", "zerotrie", - "zerovec 0.11.4", -] - -[[package]] -name = "icu_provider_macros" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.108", + "zerovec", ] [[package]] @@ -4766,15 +4898,15 @@ version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3acae9609540aa318d1bc588455225fb2085b9ed0c4f6bd0d9d5bcd86f1a0344" dependencies = [ - "icu_normalizer 2.0.0", - "icu_properties 2.0.1", + "icu_normalizer", + "icu_properties", ] [[package]] name = "if-addrs" -version = "0.13.4" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69b2eeee38fef3aa9b4cc5f1beea8a2444fc00e7377cafae396de3f5c2065e24" +checksum = "bf39cc0423ee66021dc5eccface85580e4a001e0c5288bae8bea7ecb69225e90" dependencies = [ "libc", "windows-sys 0.59.0", @@ -4797,7 +4929,7 @@ checksum = "a0eb5a3343abf848c0984fe4604b2b105da9539376e24fc0a3b0007411ae4fd9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.111", ] [[package]] @@ -4838,13 +4970,13 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.12.0" +version = "2.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6717a8d2a5a929a1a2eb43a12812498ed141a0bcfb7e8f7844fbdbe4303bba9f" +checksum = "0ad4bb2b565bca0645f4d68c5c9af97fba094e9791da685bf83cb5f3ce74acf2" dependencies = [ "arbitrary", "equivalent", - "hashbrown 0.16.0", + "hashbrown 0.16.1", "serde", "serde_core", ] @@ -4895,16 +5027,28 @@ dependencies = [ ] [[package]] -name = "instability" -version = "0.3.9" +name = "insta" +version = "1.45.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "435d80800b936787d62688c927b6490e887c7ef5ff9ce922c6c6050fca75eb9a" +checksum = "b76866be74d68b1595eb8060cb9191dca9c021db2316558e52ddc5d55d41b66c" +dependencies = [ + "console", + "once_cell", + "similar", + "tempfile", +] + +[[package]] +name = "instability" +version = "0.3.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6778b0196eefee7df739db78758e5cf9b37412268bfa5650bfeed028aed20d9c" dependencies = [ "darling 0.20.11", "indoc", "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.111", ] [[package]] @@ -4960,9 +5104,9 @@ checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130" [[package]] name = "iri-string" -version = "0.7.8" +version = "0.7.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbc5ebe9c3a1a7a5127f920a418f7585e9e758e911d0466ed004f393b0e380b2" +checksum = "4f867b9d1d896b67beb18518eda36fdb77a32ea590de864f1325b294a6d14397" dependencies = [ "memchr", "serde", @@ -5014,9 +5158,9 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.15" +version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" +checksum = "7ee5b5339afb4c41626dde77b7a611bd4f2c202b897852b4bcf5d03eddc61010" [[package]] name = "jni" @@ -5052,9 +5196,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.81" +version = "0.3.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec48937a97411dcb524a265206ccd4c90bb711fca92b2792c407f268825b9305" +checksum = "464a3709c7f55f1f721e5389aa6ea4e3bc6aba669353300af094b29ffbdde1d8" dependencies = [ "once_cell", "wasm-bindgen", @@ -5164,7 +5308,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.111", ] [[package]] @@ -5309,15 +5453,15 @@ checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" [[package]] name = "libc" -version = "0.2.177" +version = "0.2.178" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2874a2af47a2325c2001a6e6fad9b16a53b802102b528163885171cf92b15976" +checksum = "37c93d8daa9d8a012fd8ab92f088405fb202ea0b6ab73ee2482ae66af4f42091" [[package]] name = "libgit2-sys" -version = "0.18.2+1.9.1" +version = "0.18.3+1.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c42fe03df2bd3c53a3a9c7317ad91d80c81cd1fb0caec8d7cc4cd2bfa10c222" +checksum = "c9b3acc4b91781bb0b3386669d325163746af5f6e4f73e6d2d630e09a35f3487" dependencies = [ "cc", "libc", @@ -5332,7 +5476,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d7c4b02199fee7c5d21a5ae7d8cfa79a6ef5bb2fc834d6e9058e89c825efdc55" dependencies = [ "cfg-if", - "windows-link 0.2.1", + "windows-link", ] [[package]] @@ -5343,9 +5487,9 @@ checksum = "f9fbbcab51052fe104eb5e5d351cf728d30a5be1fe14d9be8a3b097481fb97de" [[package]] name = "libp2p-identity" -version = "0.2.12" +version = "0.2.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3104e13b51e4711ff5738caa1fb54467c8604c2e94d607e27745bcf709068774" +checksum = "f0c7892c221730ba55f7196e98b0b8ba5e04b4155651736036628e9f73ed6fc3" dependencies = [ "asn1_der", "bs58", @@ -5373,20 +5517,36 @@ dependencies = [ [[package]] name = "libredox" -version = "0.1.10" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "416f7e718bdb06000964960ffa43b4335ad4012ae8b99060261aa4a8088d5ccb" +checksum = "df15f6eac291ed1cf25865b1ee60399f57e7c227e7f51bdbd4c5270396a9ed50" dependencies = [ "bitflags 2.10.0", "libc", - "redox_syscall", + "redox_syscall 0.6.0", +] + +[[package]] +name = "librocksdb-sys" +version = "0.17.3+10.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cef2a00ee60fe526157c9023edab23943fae1ce2ab6f4abb2a807c1746835de9" +dependencies = [ + "bindgen 0.72.1", + "bzip2-sys", + "cc", + "libc", + "libz-sys", + "lz4-sys", + "tikv-jemalloc-sys", + "zstd-sys", ] [[package]] name = "libz-sys" -version = "1.1.22" +version = "1.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b70e7a7df205e92a1a4cd9aaae7898dac0aa555503cc0a649494d0d60e7651d" +checksum = "15d118bbf3771060e7311cc7bb0545b01d08a8b4a7de949198dec1fa0ca1c0f7" dependencies = [ "cc", "libc", @@ -5402,12 +5562,12 @@ checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" [[package]] name = "linked_hash_set" -version = "0.1.5" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bae85b5be22d9843c80e5fc80e9b64c8a3b1f98f867c709956eca3efff4e92e2" +checksum = "984fb35d06508d1e69fc91050cceba9c0b748f983e6739fa2c7a9237154c52c8" dependencies = [ "linked-hash-map", - "serde", + "serde_core", ] [[package]] @@ -5424,15 +5584,9 @@ checksum = "df1d3c3b53da64cf5760482273a98e575c651a67eec7f77df96b5b642de8f039" [[package]] name = "litemap" -version = "0.7.5" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23fb14cb19457329c82206317a5663005a4d404783dc74f4252769b0d5f42856" - -[[package]] -name = "litemap" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "241eaef5fd12c88705a01fc1066c48c4b36e0dd4377dcdc7ec3942cea7a69956" +checksum = "6373607a59f0be73a39b6fe456b8192fcc3585f602af20751600e974dd455e77" [[package]] name = "litrs" @@ -5452,9 +5606,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.28" +version = "0.4.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34080505efa8e45a4b816c349525ebe327ceaa8559756f0356cba97ef3bf7432" +checksum = "5e5032e24019045c762d3c0f28f5b6b8bbf38563a65908389bf7978758920897" [[package]] name = "loom" @@ -5466,7 +5620,7 @@ dependencies = [ "generator", "scoped-tls", "tracing", - "tracing-subscriber 0.3.20", + "tracing-subscriber 0.3.22", ] [[package]] @@ -5535,7 +5689,7 @@ checksum = "1b27834086c65ec3f9387b096d66e99f221cf081c2b738042aa252bcd41204e3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.111", ] [[package]] @@ -5584,9 +5738,9 @@ dependencies = [ [[package]] name = "metrics" -version = "0.24.2" +version = "0.24.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25dea7ac8057892855ec285c440160265225438c3c45072613c25a4b26e98ef5" +checksum = "5d5312e9ba3771cfa961b585728215e3d972c950a3eed9252aa093d6301277e8" dependencies = [ "ahash", "portable-atomic", @@ -5601,7 +5755,7 @@ dependencies = [ "proc-macro2", "quote", "regex", - "syn 2.0.108", + "syn 2.0.111", ] [[package]] @@ -5611,7 +5765,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dd7399781913e5393588a8d8c6a2867bf85fb38eaf2502fdce465aad2dc6f034" dependencies = [ "base64 0.22.1", - "indexmap 2.12.0", + "indexmap 2.12.1", "metrics", "metrics-util", "quanta", @@ -5643,7 +5797,7 @@ dependencies = [ "crossbeam-epoch", "crossbeam-utils", "hashbrown 0.15.5", - "indexmap 2.12.0", + "indexmap 2.12.1", "metrics", "ordered-float", "quanta", @@ -5722,14 +5876,14 @@ dependencies = [ [[package]] name = "mio" -version = "1.0.4" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78bed444cc8a2160f01cbcf811ef18cac863ad68ae8ca62092e8db51d51c761c" +checksum = "a69bcab0ad47271a0234d9422b131806bf3968021e5dc9328caf2d4cd58557fc" dependencies = [ "libc", "log", "wasi 0.11.1+wasi-snapshot-preview1", - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] @@ -5818,6 +5972,30 @@ dependencies = [ "unsigned-varint", ] +[[package]] +name = "nix" +version = "0.29.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "71e2746dc3a24dd78b3cfcb7be93368c6de9963d30f43a6a73998a9cf4b17b46" +dependencies = [ + "bitflags 2.10.0", + "cfg-if", + "cfg_aliases", + "libc", +] + +[[package]] +name = "nix" +version = "0.30.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74523f3a35e05aba87a1d978330aef40f67b0304ac79c1c00b294c9830543db6" +dependencies = [ + "bitflags 2.10.0", + "cfg-if", + "cfg_aliases", + "libc", +] + [[package]] name = "nom" version = "7.1.3" @@ -5854,9 +6032,9 @@ checksum = "5e0826a989adedc2a244799e823aece04662b66609d96af8dff7ac6df9a8925d" [[package]] name = "ntapi" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8a3895c6391c39d7fe7ebc444a87eb2991b2a0bc718fdabd071eec617fc68e4" +checksum = "c70f219e21142367c70c0b30c6a9e3a14d55b4d12a204d897fbec83a0363f081" dependencies = [ "winapi", ] @@ -5980,7 +6158,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.111", ] [[package]] @@ -6007,6 +6185,21 @@ dependencies = [ "smallvec", ] +[[package]] +name = "objc2" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b7c2599ce0ec54857b29ce62166b0ed9b4f6f1a70ccc9a71165b6154caca8c05" +dependencies = [ + "objc2-encode", +] + +[[package]] +name = "objc2-encode" +version = "4.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef25abbcd74fb2609453eb695bd2f860d389e457f67dc17cafc8b8cbc89d0c33" + [[package]] name = "once_cell" version = "1.21.3" @@ -6030,10 +6223,23 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d6790f58c7ff633d8771f42965289203411a5e5c68388703c06e14f24770b41e" [[package]] -name = "op-alloy-consensus" -version = "0.21.0" +name = "op-alloy" +version = "0.23.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf1fc8aa0e2f5b136d101630be009e4e6dbdd1f17bc3ce670f431511600d2930" +checksum = "e9b8fee21003dd4f076563de9b9d26f8c97840157ef78593cd7f262c5ca99848" +dependencies = [ + "op-alloy-consensus", + "op-alloy-network", + "op-alloy-provider", + "op-alloy-rpc-types", + "op-alloy-rpc-types-engine", +] + +[[package]] +name = "op-alloy-consensus" +version = "0.23.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "736381a95471d23e267263cfcee9e1d96d30b9754a94a2819148f83379de8a86" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6057,9 +6263,9 @@ checksum = "a79f352fc3893dcd670172e615afef993a41798a1d3fc0db88a3e60ef2e70ecc" [[package]] name = "op-alloy-network" -version = "0.21.0" +version = "0.23.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c5cca341184dbfcb49dbc124e5958e6a857499f04782907e5d969abb644e0b6" +checksum = "4034183dca6bff6632e7c24c92e75ff5f0eabb58144edb4d8241814851334d47" dependencies = [ "alloy-consensus", "alloy-network", @@ -6072,10 +6278,25 @@ dependencies = [ ] [[package]] -name = "op-alloy-rpc-jsonrpsee" -version = "0.21.0" +name = "op-alloy-provider" +version = "0.23.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "190e9884a69012d4abc26d1c0bc60fe01d57899ab5417c8f38105ffaaab4149b" +checksum = "6753d90efbaa8ea8bcb89c1737408ca85fa60d7adb875049d3f382c063666f86" +dependencies = [ + "alloy-network", + "alloy-primitives", + "alloy-provider", + "alloy-rpc-types-engine", + "alloy-transport", + "async-trait", + "op-alloy-rpc-types-engine", +] + +[[package]] +name = "op-alloy-rpc-jsonrpsee" +version = "0.23.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1c820ef9c802ebc732281a940bfb6ac2345af4d9fff041cbb64b4b546676686" dependencies = [ "alloy-primitives", "jsonrpsee", @@ -6083,9 +6304,9 @@ dependencies = [ [[package]] name = "op-alloy-rpc-types" -version = "0.21.0" +version = "0.23.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "274972c3c5e911b6675f6794ea0476b05e0bc1ea7e464f99ec2dc01b76d2eeb6" +checksum = "ddd87c6b9e5b6eee8d6b76f41b04368dca0e9f38d83338e5b00e730c282098a4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6103,9 +6324,9 @@ dependencies = [ [[package]] name = "op-alloy-rpc-types-engine" -version = "0.21.0" +version = "0.23.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "860edb8d5a8d54bbcdabcbd8642c45b974351ce4e10ed528dd4508eee2a43833" +checksum = "77727699310a18cdeed32da3928c709e2704043b6584ed416397d5da65694efc" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6119,13 +6340,14 @@ dependencies = [ "ethereum_ssz_derive", "op-alloy-consensus", "serde", + "sha2", "snap", "thiserror 2.0.17", ] [[package]] name = "op-reth" -version = "1.8.2" +version = "1.9.3" dependencies = [ "clap", "reth-cli-util", @@ -6143,9 +6365,9 @@ dependencies = [ [[package]] name = "op-revm" -version = "11.2.0" +version = "14.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a33ab6a7bbcfffcbf784de78f14593b6389003f5c69653fcffcc163459a37d69" +checksum = "1475a779c73999fc803778524042319691b31f3d6699d2b560c4ed8be1db802a" dependencies = [ "auto_impl", "revm", @@ -6308,7 +6530,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.111", ] [[package]] @@ -6335,9 +6557,9 @@ checksum = "2621685985a2ebf1c516881c026032ac7deafcda1a2c9b7850dc81e3dfcb64c1" dependencies = [ "cfg-if", "libc", - "redox_syscall", + "redox_syscall 0.5.18", "smallvec", - "windows-link 0.2.1", + "windows-link", ] [[package]] @@ -6374,9 +6596,9 @@ checksum = "9b4f627cb1b25917193a259e49bdad08f671f8d9708acfd5fe0a8c1455d87220" [[package]] name = "pest" -version = "2.8.3" +version = "2.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "989e7521a040efde50c3ab6bbadafbe15ab6dc042686926be59ac35d74607df4" +checksum = "cbcfd20a6d4eeba40179f05735784ad32bdaef05ce8e8af05f180d45bb3e7e22" dependencies = [ "memchr", "ucd-trie", @@ -6392,37 +6614,17 @@ dependencies = [ "rustc_version 0.4.1", ] -[[package]] -name = "phf" -version = "0.11.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fd6780a80ae0c52cc120a26a1a42c1ae51b247a253e4e06113d23d2c2edd078" -dependencies = [ - "phf_macros 0.11.3", - "phf_shared 0.11.3", -] - [[package]] name = "phf" version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c1562dc717473dbaa4c1f85a36410e03c047b2e7df7f45ee938fbef64ae7fadf" dependencies = [ - "phf_macros 0.13.1", - "phf_shared 0.13.1", + "phf_macros", + "phf_shared", "serde", ] -[[package]] -name = "phf_generator" -version = "0.11.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c80231409c20246a13fddb31776fb942c38553c51e871f8cbd687a4cfb5843d" -dependencies = [ - "phf_shared 0.11.3", - "rand 0.8.5", -] - [[package]] name = "phf_generator" version = "0.13.1" @@ -6430,20 +6632,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "135ace3a761e564ec88c03a77317a7c6b80bb7f7135ef2544dbe054243b89737" dependencies = [ "fastrand 2.3.0", - "phf_shared 0.13.1", -] - -[[package]] -name = "phf_macros" -version = "0.11.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f84ac04429c13a7ff43785d75ad27569f2951ce0ffd30a3321230db2fc727216" -dependencies = [ - "phf_generator 0.11.3", - "phf_shared 0.11.3", - "proc-macro2", - "quote", - "syn 2.0.108", + "phf_shared", ] [[package]] @@ -6452,20 +6641,11 @@ version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "812f032b54b1e759ccd5f8b6677695d5268c588701effba24601f6932f8269ef" dependencies = [ - "phf_generator 0.13.1", - "phf_shared 0.13.1", + "phf_generator", + "phf_shared", "proc-macro2", "quote", - "syn 2.0.108", -] - -[[package]] -name = "phf_shared" -version = "0.11.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67eabc2ef2a60eb7faa00097bd1ffdb5bd28e62bf39990626a582201b7a754e5" -dependencies = [ - "siphasher", + "syn 2.0.111", ] [[package]] @@ -6494,7 +6674,7 @@ checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.111", ] [[package]] @@ -6562,12 +6742,6 @@ dependencies = [ "plotters-backend", ] -[[package]] -name = "pollster" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f3a9f18d041e6d0e102a0a46750538147e5e8992d3b4873aaafee2520b00ce3" - [[package]] name = "polyval" version = "0.6.2" @@ -6582,17 +6756,17 @@ dependencies = [ [[package]] name = "portable-atomic" -version = "1.11.1" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f84267b20a16ea918e43c6a88433c2d54fa145c92a811b5b047ccbe153674483" +checksum = "f59e70c4aef1e55797c2e8fd94a4f2a973fc972cfde0e0b05f683667b0cd39dd" [[package]] name = "potential_utf" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84df19adbe5b5a0782edcab45899906947ab039ccf4573713735ee7de1e6b08a" +checksum = "b73949432f5e2a09657003c25bca5e19a0e9c84f8058ca374f49e0ebe605af77" dependencies = [ - "zerovec 0.11.4", + "zerovec", ] [[package]] @@ -6627,7 +6801,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "479ca8adacdd7ce8f1fb39ce9ecccbfe93a3f1344b3d0d97f20bc0196208f62b" dependencies = [ "proc-macro2", - "syn 2.0.108", + "syn 2.0.111", ] [[package]] @@ -6656,7 +6830,7 @@ version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "219cb19e96be00ab2e37d6e299658a0cfa83e52429179969b0f0121b4ac46983" dependencies = [ - "toml_edit 0.23.7", + "toml_edit 0.23.10+spec-1.0.0", ] [[package]] @@ -6678,7 +6852,7 @@ dependencies = [ "proc-macro-error-attr2", "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.111", ] [[package]] @@ -6738,14 +6912,13 @@ dependencies = [ [[package]] name = "proptest" -version = "1.8.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bb0be07becd10686a0bb407298fb425360a5c44a663774406340c59a22de4ce" +checksum = "bee689443a2bd0a16ab0348b52ee43e3b2d1b1f931c8aa5c9f8de4c86fbe8c40" dependencies = [ "bit-set", "bit-vec", "bitflags 2.10.0", - "lazy_static", "num-traits", "rand 0.9.2", "rand_chacha 0.9.0", @@ -6774,7 +6947,7 @@ checksum = "4ee1c9ac207483d5e7db4940700de86a9aae46ef90c48b57f99fe7edb8345e49" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.111", ] [[package]] @@ -6785,7 +6958,7 @@ checksum = "095a99f75c69734802359b682be8daaf8980296731f6470434ea2c652af1dd30" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.111", ] [[package]] @@ -6808,7 +6981,7 @@ dependencies = [ "itertools 0.14.0", "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.111", ] [[package]] @@ -6909,9 +7082,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.41" +version = "1.0.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce25767e7b499d1b604768e7cde645d14cc8584231ea6b295e9c9eb22c02e1d1" +checksum = "a338cc41d27e6cc6dce6cefc13a0729dfbb81c262b1f519331575dd80ef3067f" dependencies = [ "proc-macro2", ] @@ -7049,6 +7222,16 @@ dependencies = [ "rand_core 0.9.3", ] +[[package]] +name = "rapidhash" +version = "4.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2988730ee014541157f48ce4dcc603940e00915edc3c7f9a8d78092256bb2493" +dependencies = [ + "rand 0.9.2", + "rustversion", +] + [[package]] name = "ratatui" version = "0.29.0" @@ -7114,6 +7297,15 @@ dependencies = [ "bitflags 2.10.0", ] +[[package]] +name = "redox_syscall" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec96166dafa0886eb81fe1c0a388bece180fbef2135f97c1e2cf8302e74b43b5" +dependencies = [ + "bitflags 2.10.0", +] + [[package]] name = "redox_users" version = "0.4.6" @@ -7153,7 +7345,7 @@ checksum = "b7186006dcb21920990093f30e3dea63b7d6e977bf1256be20c3563a5db070da" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.111", ] [[package]] @@ -7187,11 +7379,11 @@ checksum = "7a2d987857b319362043e95f5353c0535c1f58eec5336fdfcf626430af7def58" [[package]] name = "regress" -version = "0.10.4" +version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "145bb27393fe455dd64d6cbc8d059adfa392590a45eadf079c01b11857e7b010" +checksum = "2057b2325e68a893284d1538021ab90279adac1139957ca2a74426c6f118fb48" dependencies = [ - "hashbrown 0.15.5", + "hashbrown 0.16.1", "memchr", ] @@ -7203,9 +7395,9 @@ checksum = "ba39f3699c378cd8970968dcbff9c43159ea4cfbd88d43c00b22f2ef10a435d2" [[package]] name = "reqwest" -version = "0.12.24" +version = "0.12.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d0946410b9f7b082a427e4ef5c8ff541a88b357bc6c637c40db3a68ac70a36f" +checksum = "3b4c14b2d9afca6a60277086b0cc6a6ae0b568f6f7916c943a8cdc79f8be240f" dependencies = [ "base64 0.22.1", "bytes", @@ -7241,18 +7433,18 @@ dependencies = [ "wasm-bindgen-futures", "wasm-streams", "web-sys", - "webpki-roots 1.0.3", + "webpki-roots 1.0.4", ] [[package]] name = "resolv-conf" -version = "0.7.5" +version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b3789b30bd25ba102de4beabd95d21ac45b69b1be7d14522bab988c526d6799" +checksum = "1e061d1b48cb8d38042de4ae0a7a6401009d6143dc80d2e2d6f31f0bdd6470c7" [[package]] name = "reth" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-rpc-types", "aquamarine", @@ -7299,7 +7491,7 @@ dependencies = [ [[package]] name = "reth-basic-payload-builder" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7322,7 +7514,7 @@ dependencies = [ [[package]] name = "reth-bench" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-eips", "alloy-json-rpc", @@ -7359,9 +7551,35 @@ dependencies = [ "tracing", ] +[[package]] +name = "reth-bench-compare" +version = "1.9.3" +dependencies = [ + "alloy-primitives", + "alloy-provider", + "alloy-rpc-types-eth", + "chrono", + "clap", + "csv", + "ctrlc", + "eyre", + "nix 0.29.0", + "reth-chainspec", + "reth-cli-runner", + "reth-cli-util", + "reth-node-core", + "reth-tracing", + "serde", + "serde_json", + "shellexpand", + "shlex", + "tokio", + "tracing", +] + [[package]] name = "reth-chain-state" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7393,7 +7611,7 @@ dependencies = [ [[package]] name = "reth-chainspec" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-chains", "alloy-consensus", @@ -7413,7 +7631,7 @@ dependencies = [ [[package]] name = "reth-cli" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-genesis", "clap", @@ -7426,7 +7644,7 @@ dependencies = [ [[package]] name = "reth-cli-commands" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-chains", "alloy-consensus", @@ -7445,6 +7663,7 @@ dependencies = [ "humantime", "itertools 0.14.0", "lz4", + "metrics", "proptest", "proptest-arbitrary-interop", "ratatui", @@ -7491,6 +7710,7 @@ dependencies = [ "reth-stages-types", "reth-static-file", "reth-static-file-types", + "reth-tasks", "reth-trie", "reth-trie-common", "reth-trie-db", @@ -7508,7 +7728,7 @@ dependencies = [ [[package]] name = "reth-cli-runner" -version = "1.8.2" +version = "1.9.3" dependencies = [ "reth-tasks", "tokio", @@ -7517,7 +7737,7 @@ dependencies = [ [[package]] name = "reth-cli-util" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-eips", "alloy-primitives", @@ -7537,7 +7757,7 @@ dependencies = [ [[package]] name = "reth-codecs" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7561,17 +7781,17 @@ dependencies = [ [[package]] name = "reth-codecs-derive" -version = "1.8.2" +version = "1.9.3" dependencies = [ "proc-macro2", "quote", "similar-asserts", - "syn 2.0.108", + "syn 2.0.111", ] [[package]] name = "reth-config" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-primitives", "eyre", @@ -7580,6 +7800,7 @@ dependencies = [ "reth-network-types", "reth-prune-types", "reth-stages-types", + "reth-static-file-types", "serde", "tempfile", "toml", @@ -7588,7 +7809,7 @@ dependencies = [ [[package]] name = "reth-consensus" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -7600,7 +7821,7 @@ dependencies = [ [[package]] name = "reth-consensus-common" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7614,7 +7835,7 @@ dependencies = [ [[package]] name = "reth-consensus-debug-client" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7639,7 +7860,7 @@ dependencies = [ [[package]] name = "reth-db" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -7673,7 +7894,7 @@ dependencies = [ [[package]] name = "reth-db-api" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -7703,7 +7924,7 @@ dependencies = [ [[package]] name = "reth-db-common" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -7733,7 +7954,7 @@ dependencies = [ [[package]] name = "reth-db-models" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-eips", "alloy-primitives", @@ -7749,7 +7970,7 @@ dependencies = [ [[package]] name = "reth-discv4" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -7775,7 +7996,7 @@ dependencies = [ [[package]] name = "reth-discv5" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -7800,7 +8021,7 @@ dependencies = [ [[package]] name = "reth-dns-discovery" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-chains", "alloy-primitives", @@ -7828,7 +8049,7 @@ dependencies = [ [[package]] name = "reth-downloaders" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7866,7 +8087,7 @@ dependencies = [ [[package]] name = "reth-e2e-test-utils" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7923,7 +8144,7 @@ dependencies = [ [[package]] name = "reth-ecies" -version = "1.8.2" +version = "1.9.3" dependencies = [ "aes", "alloy-primitives", @@ -7935,7 +8156,6 @@ dependencies = [ "ctr", "digest 0.10.7", "futures", - "generic-array", "hmac", "pin-project", "rand 0.8.5", @@ -7948,12 +8168,11 @@ dependencies = [ "tokio-stream", "tokio-util", "tracing", - "typenum", ] [[package]] name = "reth-engine-local" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -7967,6 +8186,7 @@ dependencies = [ "reth-optimism-chainspec", "reth-payload-builder", "reth-payload-primitives", + "reth-primitives-traits", "reth-storage-api", "reth-transaction-pool", "tokio", @@ -7976,7 +8196,7 @@ dependencies = [ [[package]] name = "reth-engine-primitives" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8000,7 +8220,7 @@ dependencies = [ [[package]] name = "reth-engine-service" -version = "1.8.2" +version = "1.9.3" dependencies = [ "futures", "pin-project", @@ -8029,9 +8249,10 @@ dependencies = [ [[package]] name = "reth-engine-tree" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-consensus", + "alloy-eip7928", "alloy-eips", "alloy-evm", "alloy-primitives", @@ -8047,6 +8268,7 @@ dependencies = [ "metrics", "metrics-util", "mini-moka", + "moka", "parking_lot", "proptest", "rand 0.8.5", @@ -8080,6 +8302,7 @@ dependencies = [ "reth-stages", "reth-stages-api", "reth-static-file", + "reth-storage-errors", "reth-tasks", "reth-testing-utils", "reth-tracing", @@ -8100,7 +8323,7 @@ dependencies = [ [[package]] name = "reth-engine-util" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-consensus", "alloy-rpc-types-engine", @@ -8127,7 +8350,7 @@ dependencies = [ [[package]] name = "reth-era" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8149,7 +8372,7 @@ dependencies = [ [[package]] name = "reth-era-downloader" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-primitives", "bytes", @@ -8157,6 +8380,7 @@ dependencies = [ "futures", "futures-util", "reqwest", + "reth-era", "reth-fs-util", "sha2", "tempfile", @@ -8166,7 +8390,7 @@ dependencies = [ [[package]] name = "reth-era-utils" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -8192,7 +8416,7 @@ dependencies = [ [[package]] name = "reth-errors" -version = "1.8.2" +version = "1.9.3" dependencies = [ "reth-consensus", "reth-execution-errors", @@ -8202,7 +8426,7 @@ dependencies = [ [[package]] name = "reth-eth-wire" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-chains", "alloy-consensus", @@ -8240,7 +8464,7 @@ dependencies = [ [[package]] name = "reth-eth-wire-types" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-chains", "alloy-consensus", @@ -8265,7 +8489,7 @@ dependencies = [ [[package]] name = "reth-ethereum" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-rpc-types-engine", "alloy-rpc-types-eth", @@ -8305,7 +8529,7 @@ dependencies = [ [[package]] name = "reth-ethereum-cli" -version = "1.8.2" +version = "1.9.3" dependencies = [ "clap", "eyre", @@ -8321,15 +8545,13 @@ dependencies = [ "reth-node-metrics", "reth-rpc-server-types", "reth-tracing", - "reth-tracing-otlp", "tempfile", "tracing", - "url", ] [[package]] name = "reth-ethereum-consensus" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8345,7 +8567,7 @@ dependencies = [ [[package]] name = "reth-ethereum-engine-primitives" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-eips", "alloy-primitives", @@ -8363,7 +8585,7 @@ dependencies = [ [[package]] name = "reth-ethereum-forks" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-eip2124", "alloy-hardforks", @@ -8376,7 +8598,7 @@ dependencies = [ [[package]] name = "reth-ethereum-payload-builder" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8404,7 +8626,7 @@ dependencies = [ [[package]] name = "reth-ethereum-primitives" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8431,7 +8653,7 @@ dependencies = [ [[package]] name = "reth-etl" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-primitives", "rayon", @@ -8441,7 +8663,7 @@ dependencies = [ [[package]] name = "reth-evm" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8451,6 +8673,7 @@ dependencies = [ "derive_more", "futures-util", "metrics", + "rayon", "reth-ethereum-forks", "reth-ethereum-primitives", "reth-execution-errors", @@ -8465,7 +8688,7 @@ dependencies = [ [[package]] name = "reth-evm-ethereum" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8489,7 +8712,7 @@ dependencies = [ [[package]] name = "reth-execution-errors" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-evm", "alloy-primitives", @@ -8501,7 +8724,7 @@ dependencies = [ [[package]] name = "reth-execution-types" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8521,7 +8744,7 @@ dependencies = [ [[package]] name = "reth-exex" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8565,7 +8788,7 @@ dependencies = [ [[package]] name = "reth-exex-test-utils" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-eips", "eyre", @@ -8596,7 +8819,7 @@ dependencies = [ [[package]] name = "reth-exex-types" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-eips", "alloy-primitives", @@ -8613,7 +8836,7 @@ dependencies = [ [[package]] name = "reth-fs-util" -version = "1.8.2" +version = "1.9.3" dependencies = [ "serde", "serde_json", @@ -8622,7 +8845,7 @@ dependencies = [ [[package]] name = "reth-invalid-block-hooks" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8655,7 +8878,7 @@ dependencies = [ [[package]] name = "reth-ipc" -version = "1.8.2" +version = "1.9.3" dependencies = [ "bytes", "futures", @@ -8677,7 +8900,7 @@ dependencies = [ [[package]] name = "reth-libmdbx" -version = "1.8.2" +version = "1.9.3" dependencies = [ "bitflags 2.10.0", "byteorder", @@ -8695,7 +8918,7 @@ dependencies = [ [[package]] name = "reth-mdbx-sys" -version = "1.8.2" +version = "1.9.3" dependencies = [ "bindgen 0.71.1", "cc", @@ -8703,7 +8926,7 @@ dependencies = [ [[package]] name = "reth-metrics" -version = "1.8.2" +version = "1.9.3" dependencies = [ "futures", "metrics", @@ -8714,14 +8937,15 @@ dependencies = [ [[package]] name = "reth-net-banlist" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-primitives", + "ipnet", ] [[package]] name = "reth-net-nat" -version = "1.8.2" +version = "1.9.3" dependencies = [ "futures-util", "if-addrs", @@ -8735,7 +8959,7 @@ dependencies = [ [[package]] name = "reth-network" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8755,6 +8979,7 @@ dependencies = [ "pin-project", "rand 0.8.5", "rand 0.9.2", + "rayon", "reth-chainspec", "reth-consensus", "reth-discv4", @@ -8795,7 +9020,7 @@ dependencies = [ [[package]] name = "reth-network-api" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -8819,7 +9044,7 @@ dependencies = [ [[package]] name = "reth-network-p2p" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8841,7 +9066,7 @@ dependencies = [ [[package]] name = "reth-network-peers" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -8858,7 +9083,7 @@ dependencies = [ [[package]] name = "reth-network-types" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-eip2124", "humantime-serde", @@ -8871,7 +9096,7 @@ dependencies = [ [[package]] name = "reth-nippy-jar" -version = "1.8.2" +version = "1.9.3" dependencies = [ "anyhow", "bincode 1.3.3", @@ -8889,7 +9114,7 @@ dependencies = [ [[package]] name = "reth-node-api" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-rpc-types-engine", "eyre", @@ -8912,7 +9137,7 @@ dependencies = [ [[package]] name = "reth-node-builder" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8925,11 +9150,11 @@ dependencies = [ "fdlimit", "futures", "jsonrpsee", + "parking_lot", "rayon", "reth-basic-payload-builder", "reth-chain-state", "reth-chainspec", - "reth-cli-util", "reth-config", "reth-consensus", "reth-consensus-debug-client", @@ -8983,7 +9208,7 @@ dependencies = [ [[package]] name = "reth-node-core" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8995,6 +9220,7 @@ dependencies = [ "eyre", "futures", "humantime", + "ipnet", "proptest", "rand 0.9.2", "reth-chainspec", @@ -9007,11 +9233,13 @@ dependencies = [ "reth-engine-local", "reth-engine-primitives", "reth-ethereum-forks", + "reth-net-banlist", "reth-net-nat", "reth-network", "reth-network-p2p", "reth-network-peers", "reth-primitives-traits", + "reth-provider", "reth-prune-types", "reth-rpc-convert", "reth-rpc-eth-types", @@ -9026,6 +9254,7 @@ dependencies = [ "serde", "shellexpand", "strum 0.27.2", + "thiserror 2.0.17", "tokio", "toml", "tracing", @@ -9036,7 +9265,7 @@ dependencies = [ [[package]] name = "reth-node-ethereum" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-consensus", "alloy-contract", @@ -9048,10 +9277,12 @@ dependencies = [ "alloy-rpc-types-beacon", "alloy-rpc-types-engine", "alloy-rpc-types-eth", + "alloy-rpc-types-trace", "alloy-signer", "alloy-sol-types", "eyre", "futures", + "jsonrpsee-core", "rand 0.9.2", "reth-chainspec", "reth-db", @@ -9079,17 +9310,22 @@ dependencies = [ "reth-rpc-eth-api", "reth-rpc-eth-types", "reth-rpc-server-types", + "reth-stages-types", "reth-tasks", + "reth-testing-utils", "reth-tracing", "reth-transaction-pool", "revm", + "serde", "serde_json", + "similar-asserts", + "tempfile", "tokio", ] [[package]] name = "reth-node-ethstats" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -9112,7 +9348,7 @@ dependencies = [ [[package]] name = "reth-node-events" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9135,7 +9371,7 @@ dependencies = [ [[package]] name = "reth-node-metrics" -version = "1.8.2" +version = "1.9.3" dependencies = [ "eyre", "http", @@ -9157,7 +9393,7 @@ dependencies = [ [[package]] name = "reth-node-types" -version = "1.8.2" +version = "1.9.3" dependencies = [ "reth-chainspec", "reth-db-api", @@ -9168,7 +9404,7 @@ dependencies = [ [[package]] name = "reth-op" -version = "1.8.2" +version = "1.9.3" dependencies = [ "reth-chainspec", "reth-cli-util", @@ -9208,13 +9444,14 @@ dependencies = [ [[package]] name = "reth-optimism-chainspec" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-chains", "alloy-consensus", "alloy-eips", "alloy-genesis", "alloy-hardforks", + "alloy-op-hardforks", "alloy-primitives", "derive_more", "miniz_oxide", @@ -9235,7 +9472,7 @@ dependencies = [ [[package]] name = "reth-optimism-cli" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9275,18 +9512,16 @@ dependencies = [ "reth-static-file", "reth-static-file-types", "reth-tracing", - "reth-tracing-otlp", "serde", "tempfile", "tokio", "tokio-util", "tracing", - "url", ] [[package]] name = "reth-optimism-consensus" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-chains", "alloy-consensus", @@ -9317,7 +9552,7 @@ dependencies = [ [[package]] name = "reth-optimism-evm" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9346,25 +9581,25 @@ dependencies = [ [[package]] name = "reth-optimism-flashblocks" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-consensus", "alloy-eips", "alloy-primitives", "alloy-rpc-types-engine", - "alloy-serde", "brotli", "derive_more", "eyre", "futures-util", "metrics", + "op-alloy-consensus", + "op-alloy-rpc-types-engine", "reth-chain-state", "reth-engine-primitives", "reth-errors", "reth-evm", "reth-execution-types", "reth-metrics", - "reth-optimism-evm", "reth-optimism-payload-builder", "reth-optimism-primitives", "reth-payload-primitives", @@ -9374,7 +9609,6 @@ dependencies = [ "reth-storage-api", "reth-tasks", "ringbuffer", - "serde", "serde_json", "test-case", "tokio", @@ -9385,7 +9619,7 @@ dependencies = [ [[package]] name = "reth-optimism-forks" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-op-hardforks", "alloy-primitives", @@ -9395,11 +9629,12 @@ dependencies = [ [[package]] name = "reth-optimism-node" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-consensus", "alloy-genesis", "alloy-network", + "alloy-op-hardforks", "alloy-primitives", "alloy-rpc-types-engine", "alloy-rpc-types-eth", @@ -9440,6 +9675,7 @@ dependencies = [ "reth-rpc-engine-api", "reth-rpc-eth-types", "reth-rpc-server-types", + "reth-stages-types", "reth-tasks", "reth-tracing", "reth-transaction-pool", @@ -9453,7 +9689,7 @@ dependencies = [ [[package]] name = "reth-optimism-payload-builder" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9463,10 +9699,10 @@ dependencies = [ "alloy-rpc-types-debug", "alloy-rpc-types-engine", "derive_more", + "either", "op-alloy-consensus", "op-alloy-rpc-types-engine", "reth-basic-payload-builder", - "reth-chain-state", "reth-chainspec", "reth-evm", "reth-execution-types", @@ -9492,7 +9728,7 @@ dependencies = [ [[package]] name = "reth-optimism-primitives" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9519,11 +9755,12 @@ dependencies = [ [[package]] name = "reth-optimism-rpc" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-consensus", "alloy-eips", "alloy-json-rpc", + "alloy-op-hardforks", "alloy-primitives", "alloy-rpc-client", "alloy-rpc-types-debug", @@ -9562,7 +9799,6 @@ dependencies = [ "reth-primitives-traits", "reth-rpc", "reth-rpc-api", - "reth-rpc-convert", "reth-rpc-engine-api", "reth-rpc-eth-api", "reth-rpc-eth-types", @@ -9581,7 +9817,7 @@ dependencies = [ [[package]] name = "reth-optimism-storage" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-consensus", "reth-codecs", @@ -9593,7 +9829,7 @@ dependencies = [ [[package]] name = "reth-optimism-txpool" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9630,7 +9866,7 @@ dependencies = [ [[package]] name = "reth-payload-builder" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -9650,7 +9886,7 @@ dependencies = [ [[package]] name = "reth-payload-builder-primitives" -version = "1.8.2" +version = "1.9.3" dependencies = [ "pin-project", "reth-payload-primitives", @@ -9661,8 +9897,9 @@ dependencies = [ [[package]] name = "reth-payload-primitives" -version = "1.8.2" +version = "1.9.3" dependencies = [ + "alloy-consensus", "alloy-eips", "alloy-primitives", "alloy-rpc-types-engine", @@ -9673,7 +9910,9 @@ dependencies = [ "reth-chain-state", "reth-chainspec", "reth-errors", + "reth-execution-types", "reth-primitives-traits", + "reth-trie-common", "serde", "thiserror 2.0.17", "tokio", @@ -9681,7 +9920,7 @@ dependencies = [ [[package]] name = "reth-payload-util" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -9690,7 +9929,7 @@ dependencies = [ [[package]] name = "reth-payload-validator" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-consensus", "alloy-rpc-types-engine", @@ -9699,7 +9938,7 @@ dependencies = [ [[package]] name = "reth-primitives" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9721,7 +9960,7 @@ dependencies = [ [[package]] name = "reth-primitives-traits" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9758,7 +9997,7 @@ dependencies = [ [[package]] name = "reth-provider" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9793,11 +10032,13 @@ dependencies = [ "reth-storage-api", "reth-storage-errors", "reth-testing-utils", + "reth-tracing", "reth-trie", "reth-trie-db", "revm-database", "revm-database-interface", "revm-state", + "rocksdb", "strum 0.27.2", "tempfile", "tokio", @@ -9806,8 +10047,9 @@ dependencies = [ [[package]] name = "reth-prune" -version = "1.8.2" +version = "1.9.3" dependencies = [ + "alloy-consensus", "alloy-eips", "alloy-primitives", "assert_matches", @@ -9824,6 +10066,7 @@ dependencies = [ "reth-provider", "reth-prune-types", "reth-stages", + "reth-stages-types", "reth-static-file-types", "reth-testing-utils", "reth-tokio-util", @@ -9836,11 +10079,11 @@ dependencies = [ [[package]] name = "reth-prune-db" -version = "1.8.2" +version = "1.9.3" [[package]] name = "reth-prune-types" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-primitives", "arbitrary", @@ -9852,13 +10095,14 @@ dependencies = [ "reth-codecs", "serde", "serde_json", + "strum 0.27.2", "thiserror 2.0.17", "toml", ] [[package]] name = "reth-ress-protocol" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -9884,7 +10128,7 @@ dependencies = [ [[package]] name = "reth-ress-provider" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -9910,7 +10154,7 @@ dependencies = [ [[package]] name = "reth-revm" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -9924,7 +10168,7 @@ dependencies = [ [[package]] name = "reth-rpc" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-consensus", "alloy-dyn-abi", @@ -9968,6 +10212,7 @@ dependencies = [ "reth-db-api", "reth-engine-primitives", "reth-errors", + "reth-ethereum-engine-primitives", "reth-ethereum-primitives", "reth-evm", "reth-evm-ethereum", @@ -10007,7 +10252,7 @@ dependencies = [ [[package]] name = "reth-rpc-api" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-eips", "alloy-genesis", @@ -10030,11 +10275,14 @@ dependencies = [ "reth-network-peers", "reth-rpc-eth-api", "reth-trie-common", + "serde", + "serde_json", + "tokio", ] [[package]] name = "reth-rpc-api-testing-util" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-eips", "alloy-primitives", @@ -10053,7 +10301,7 @@ dependencies = [ [[package]] name = "reth-rpc-builder" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-eips", "alloy-network", @@ -10094,6 +10342,7 @@ dependencies = [ "reth-rpc-server-types", "reth-storage-api", "reth-tasks", + "reth-tokio-util", "reth-tracing", "reth-transaction-pool", "serde", @@ -10108,9 +10357,10 @@ dependencies = [ [[package]] name = "reth-rpc-convert" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-consensus", + "alloy-evm", "alloy-json-rpc", "alloy-network", "alloy-primitives", @@ -10122,20 +10372,18 @@ dependencies = [ "op-alloy-consensus", "op-alloy-network", "op-alloy-rpc-types", - "op-revm", "reth-ethereum-primitives", "reth-evm", "reth-optimism-primitives", "reth-primitives-traits", "reth-storage-api", - "revm-context", "serde_json", "thiserror 2.0.17", ] [[package]] name = "reth-rpc-e2e-tests" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-genesis", "alloy-rpc-types-engine", @@ -10155,7 +10403,7 @@ dependencies = [ [[package]] name = "reth-rpc-engine-api" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-eips", "alloy-primitives", @@ -10166,12 +10414,12 @@ dependencies = [ "jsonrpsee-core", "jsonrpsee-types", "metrics", - "parking_lot", "reth-chainspec", "reth-engine-primitives", "reth-ethereum-engine-primitives", "reth-ethereum-primitives", "reth-metrics", + "reth-network-api", "reth-node-ethereum", "reth-payload-builder", "reth-payload-builder-primitives", @@ -10191,7 +10439,7 @@ dependencies = [ [[package]] name = "reth-rpc-eth-api" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-consensus", "alloy-dyn-abi", @@ -10234,7 +10482,7 @@ dependencies = [ [[package]] name = "reth-rpc-eth-types" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10281,7 +10529,7 @@ dependencies = [ [[package]] name = "reth-rpc-layer" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-rpc-types-engine", "http", @@ -10298,7 +10546,7 @@ dependencies = [ [[package]] name = "reth-rpc-server-types" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-eips", "alloy-primitives", @@ -10313,7 +10561,7 @@ dependencies = [ [[package]] name = "reth-stages" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10358,6 +10606,7 @@ dependencies = [ "reth-stages-api", "reth-static-file", "reth-static-file-types", + "reth-storage-api", "reth-storage-errors", "reth-testing-utils", "reth-trie", @@ -10370,7 +10619,7 @@ dependencies = [ [[package]] name = "reth-stages-api" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-eips", "alloy-primitives", @@ -10379,7 +10628,10 @@ dependencies = [ "auto_impl", "futures-util", "metrics", + "reth-chainspec", "reth-consensus", + "reth-db", + "reth-db-api", "reth-errors", "reth-metrics", "reth-network-p2p", @@ -10399,7 +10651,7 @@ dependencies = [ [[package]] name = "reth-stages-types" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-primitives", "arbitrary", @@ -10415,9 +10667,10 @@ dependencies = [ [[package]] name = "reth-stateless" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-consensus", + "alloy-genesis", "alloy-primitives", "alloy-rlp", "alloy-rpc-types-debug", @@ -10442,7 +10695,7 @@ dependencies = [ [[package]] name = "reth-static-file" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-primitives", "assert_matches", @@ -10465,19 +10718,21 @@ dependencies = [ [[package]] name = "reth-static-file-types" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-primitives", "clap", "derive_more", + "insta", "reth-nippy-jar", "serde", + "serde_json", "strum 0.27.2", ] [[package]] name = "reth-storage-api" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10495,11 +10750,12 @@ dependencies = [ "reth-storage-errors", "reth-trie-common", "revm-database", + "serde_json", ] [[package]] name = "reth-storage-errors" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-eips", "alloy-primitives", @@ -10514,7 +10770,7 @@ dependencies = [ [[package]] name = "reth-storage-rpc-provider" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10543,7 +10799,7 @@ dependencies = [ [[package]] name = "reth-tasks" -version = "1.8.2" +version = "1.9.3" dependencies = [ "auto_impl", "dyn-clone", @@ -10560,12 +10816,13 @@ dependencies = [ [[package]] name = "reth-testing-utils" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-consensus", "alloy-eips", "alloy-genesis", "alloy-primitives", + "alloy-rlp", "rand 0.8.5", "rand 0.9.2", "reth-ethereum-primitives", @@ -10575,7 +10832,7 @@ dependencies = [ [[package]] name = "reth-tokio-util" -version = "1.8.2" +version = "1.9.3" dependencies = [ "tokio", "tokio-stream", @@ -10584,7 +10841,7 @@ dependencies = [ [[package]] name = "reth-tracing" -version = "1.8.2" +version = "1.9.3" dependencies = [ "clap", "eyre", @@ -10594,13 +10851,13 @@ dependencies = [ "tracing-appender", "tracing-journald", "tracing-logfmt", - "tracing-subscriber 0.3.20", - "url", + "tracing-samply", + "tracing-subscriber 0.3.22", ] [[package]] name = "reth-tracing-otlp" -version = "1.8.2" +version = "1.9.3" dependencies = [ "clap", "eyre", @@ -10610,13 +10867,13 @@ dependencies = [ "opentelemetry_sdk", "tracing", "tracing-opentelemetry", - "tracing-subscriber 0.3.20", + "tracing-subscriber 0.3.22", "url", ] [[package]] name = "reth-transaction-pool" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10648,7 +10905,7 @@ dependencies = [ "reth-storage-api", "reth-tasks", "reth-tracing", - "revm-interpreter 27.0.2", + "revm-interpreter", "revm-primitives", "rustc-hash", "schnellru", @@ -10664,7 +10921,7 @@ dependencies = [ [[package]] name = "reth-trie" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10680,6 +10937,7 @@ dependencies = [ "pretty_assertions", "proptest", "proptest-arbitrary-interop", + "rand 0.9.2", "reth-ethereum-primitives", "reth-execution-errors", "reth-metrics", @@ -10697,7 +10955,7 @@ dependencies = [ [[package]] name = "reth-trie-common" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -10730,7 +10988,7 @@ dependencies = [ [[package]] name = "reth-trie-db" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -10755,7 +11013,7 @@ dependencies = [ [[package]] name = "reth-trie-parallel" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -10785,7 +11043,7 @@ dependencies = [ [[package]] name = "reth-trie-sparse" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -10818,7 +11076,7 @@ dependencies = [ [[package]] name = "reth-trie-sparse-parallel" -version = "1.8.2" +version = "1.9.3" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -10847,16 +11105,16 @@ dependencies = [ [[package]] name = "reth-zstd-compressors" -version = "1.8.2" +version = "1.9.3" dependencies = [ "zstd", ] [[package]] name = "revm" -version = "30.2.0" +version = "33.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76df793c6ef3bef8f88f05b3873ebebce1494385a3ce8f58ad2e2e111aa0de11" +checksum = "0c85ed0028f043f87b3c88d4a4cb6f0a76440085523b6a8afe5ff003cf418054" dependencies = [ "revm-bytecode", "revm-context", @@ -10865,7 +11123,7 @@ dependencies = [ "revm-database-interface", "revm-handler", "revm-inspector", - "revm-interpreter 28.0.0", + "revm-interpreter", "revm-precompile", "revm-primitives", "revm-state", @@ -10873,21 +11131,21 @@ dependencies = [ [[package]] name = "revm-bytecode" -version = "7.0.2" +version = "7.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "451748b17ac78bd2b0748ec472a5392cd78fc0f7d19d528be44770fda28fd6f7" +checksum = "e2c6b5e6e8dd1e28a4a60e5f46615d4ef0809111c9e63208e55b5c7058200fb0" dependencies = [ "bitvec", - "phf 0.13.1", + "phf", "revm-primitives", "serde", ] [[package]] name = "revm-context" -version = "10.1.2" +version = "12.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7adcce0c14cf59b7128de34185a0fbf8f63309539b9263b35ead870d73584114" +checksum = "f038f0c9c723393ac897a5df9140b21cfa98f5753a2cb7d0f28fa430c4118abf" dependencies = [ "bitvec", "cfg-if", @@ -10902,9 +11160,9 @@ dependencies = [ [[package]] name = "revm-context-interface" -version = "11.1.2" +version = "13.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d620a9725e443c171fb195a074331fa4a745fa5cbb0018b4bbf42619e64b563" +checksum = "431c9a14e4ef1be41ae503708fd02d974f80ef1f2b6b23b5e402e8d854d1b225" dependencies = [ "alloy-eip2930", "alloy-eip7702", @@ -10918,9 +11176,9 @@ dependencies = [ [[package]] name = "revm-database" -version = "9.0.2" +version = "9.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fdefd7f40835e992bab40a245124cb1243e6c7a1c4659798827c809a59b0fea9" +checksum = "980d8d6bba78c5dd35b83abbb6585b0b902eb25ea4448ed7bfba6283b0337191" dependencies = [ "alloy-eips", "revm-bytecode", @@ -10932,9 +11190,9 @@ dependencies = [ [[package]] name = "revm-database-interface" -version = "8.0.3" +version = "8.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa488a73ac2738f11478650cdf1a0f263864c09d5f0e9bf6309e891a05323c60" +checksum = "8cce03e3780287b07abe58faf4a7f5d8be7e81321f93ccf3343c8f7755602bae" dependencies = [ "auto_impl", "either", @@ -10945,9 +11203,9 @@ dependencies = [ [[package]] name = "revm-handler" -version = "11.2.0" +version = "14.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1d8049b2fbff6636150f4740c95369aa174e41b0383034e0e256cfdffcfcd23" +checksum = "d44f8f6dbeec3fecf9fe55f78ef0a758bdd92ea46cd4f1ca6e2a946b32c367f3" dependencies = [ "auto_impl", "derive-where", @@ -10955,7 +11213,7 @@ dependencies = [ "revm-context", "revm-context-interface", "revm-database-interface", - "revm-interpreter 28.0.0", + "revm-interpreter", "revm-precompile", "revm-primitives", "revm-state", @@ -10964,16 +11222,16 @@ dependencies = [ [[package]] name = "revm-inspector" -version = "11.2.0" +version = "14.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2a21dd773b654ec7e080025eecef4ac84c711150d1bd36acadf0546f471329a" +checksum = "5617e49216ce1ca6c8826bcead0386bc84f49359ef67cde6d189961735659f93" dependencies = [ "auto_impl", "either", "revm-context", "revm-database-interface", "revm-handler", - "revm-interpreter 28.0.0", + "revm-interpreter", "revm-primitives", "revm-state", "serde", @@ -10982,9 +11240,9 @@ dependencies = [ [[package]] name = "revm-inspectors" -version = "0.31.2" +version = "0.33.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "782c38fa94f99b4b15f1690bffc2c3cbf06a0f460cf163b470d126914b47d343" +checksum = "01def7351cd9af844150b8e88980bcd11304f33ce23c3d7c25f2a8dab87c1345" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -11002,22 +11260,9 @@ dependencies = [ [[package]] name = "revm-interpreter" -version = "27.0.2" +version = "31.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0834fc25c020061f0f801d8de8bb53c88a63631cca5884a6c65b90c85e241138" -dependencies = [ - "revm-bytecode", - "revm-context-interface", - "revm-primitives", - "revm-state", - "serde", -] - -[[package]] -name = "revm-interpreter" -version = "28.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1de5c790122f8ded67992312af8acd41ccfcee629b25b819e10c5b1f69caf57" +checksum = "26ec36405f7477b9dccdc6caa3be19adf5662a7a0dffa6270cdb13a090c077e5" dependencies = [ "revm-bytecode", "revm-context-interface", @@ -11028,9 +11273,9 @@ dependencies = [ [[package]] name = "revm-precompile" -version = "28.1.1" +version = "31.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e57aadd7a2087705f653b5aaacc8ad4f8e851f5d330661e3f4c43b5475bbceae" +checksum = "9a62958af953cc4043e93b5be9b8497df84cc3bd612b865c49a7a7dfa26a84e2" dependencies = [ "ark-bls12-381", "ark-bn254", @@ -11053,9 +11298,9 @@ dependencies = [ [[package]] name = "revm-primitives" -version = "21.0.1" +version = "21.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "536f30e24c3c2bf0d3d7d20fa9cf99b93040ed0f021fd9301c78cddb0dacda13" +checksum = "29e161db429d465c09ba9cbff0df49e31049fe6b549e28eb0b7bd642fcbd4412" dependencies = [ "alloy-primitives", "num_enum", @@ -11065,9 +11310,9 @@ dependencies = [ [[package]] name = "revm-state" -version = "8.0.2" +version = "8.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e6bd5e669b02007872a8ca2643a14e308fe1739ee4475d74122587c3388a06a" +checksum = "7d8be953b7e374dbdea0773cf360debed8df394ea8d82a8b240a6b5da37592fc" dependencies = [ "bitflags 2.10.0", "revm-bytecode", @@ -11165,6 +11410,16 @@ dependencies = [ "byteorder", ] +[[package]] +name = "rocksdb" +version = "0.24.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ddb7af00d2b17dbd07d82c0063e25411959748ff03e8d4f96134c2ff41fce34f" +dependencies = [ + "libc", + "librocksdb-sys", +] + [[package]] name = "rolling-file" version = "0.2.0" @@ -11206,7 +11461,7 @@ dependencies = [ "regex", "relative-path", "rustc_version 0.4.1", - "syn 2.0.108", + "syn 2.0.111", "unicode-ident", ] @@ -11278,6 +11533,15 @@ version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3e75f6a532d0fd9f7f13144f392b6ad56a32696bfcd9c78f797f16bbb6f072d6" +[[package]] +name = "rustc_version" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a" +dependencies = [ + "semver 0.9.0", +] + [[package]] name = "rustc_version" version = "0.3.3" @@ -11324,9 +11588,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.34" +version = "0.23.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a9586e9ee2b4f8fab52a0048ca7334d7024eef48e2cb9407e3497bb7cab7fa7" +checksum = "533f54bc6a7d4f647e46ad909549eda97bf5afc1585190ef692b4286b198bd8f" dependencies = [ "log", "once_cell", @@ -11351,9 +11615,9 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.12.0" +version = "1.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "229a4a4c221013e7e1f1a043678c5cc39fe5171437c88fb47151a21e6f5b5c79" +checksum = "21e6f2ab2928ca4291b86736a8bd920a277a399bba1589409d72154ff87c1282" dependencies = [ "web-time", "zeroize", @@ -11388,9 +11652,9 @@ checksum = "f87165f0995f63a9fbeea62b64d10b4d9d8e78ec6d7d51fb2125fda7bb36788f" [[package]] name = "rustls-webpki" -version = "0.103.7" +version = "0.103.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e10b3f4191e8a80e6b43eebabfac91e5dcecebb27a71f04e820c47ec41d314bf" +checksum = "2ffdfa2f5286e2247234e03f680868ac2815974dc39e00ea15adc445d0aafe52" dependencies = [ "ring", "rustls-pki-types", @@ -11417,9 +11681,9 @@ dependencies = [ [[package]] name = "ryu" -version = "1.0.20" +version = "1.0.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" +checksum = "62049b2877bf12821e8f9ad256ee38fdc31db7387ec2d3b3f403024de2034aea" [[package]] name = "ryu-js" @@ -11459,9 +11723,9 @@ dependencies = [ [[package]] name = "schemars" -version = "1.0.4" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82d20c4491bc164fa2f6c5d44565947a52ad80b9505d8e36f8d54c27c739fcd0" +checksum = "9558e172d4e8533736ba97870c4b2cd63f84b382a3d6eb063da41b91cce17289" dependencies = [ "dyn-clone", "ref-cast", @@ -11571,13 +11835,22 @@ dependencies = [ "libc", ] +[[package]] +name = "semver" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" +dependencies = [ + "semver-parser 0.7.0", +] + [[package]] name = "semver" version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f301af10236f6df4160f7c3f04eec6dbc70ace82d23326abad5edee88801c6b6" dependencies = [ - "semver-parser", + "semver-parser 0.10.3", ] [[package]] @@ -11590,6 +11863,12 @@ dependencies = [ "serde_core", ] +[[package]] +name = "semver-parser" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" + [[package]] name = "semver-parser" version = "0.10.3" @@ -11647,7 +11926,7 @@ checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.111", ] [[package]] @@ -11656,7 +11935,7 @@ version = "1.0.145" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "402a6f66d8c709116cf22f558eab210f5a50187f702eb4d7e5ef38d9a7f1c79c" dependencies = [ - "indexmap 2.12.0", + "indexmap 2.12.1", "itoa", "memchr", "ryu", @@ -11698,17 +11977,17 @@ dependencies = [ [[package]] name = "serde_with" -version = "3.15.1" +version = "3.16.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa66c845eee442168b2c8134fec70ac50dc20e760769c8ba0ad1319ca1959b04" +checksum = "4fa237f2807440d238e0364a218270b98f767a00d3dada77b1c53ae88940e2e7" dependencies = [ "base64 0.22.1", "chrono", "hex", "indexmap 1.9.3", - "indexmap 2.12.0", + "indexmap 2.12.1", "schemars 0.9.0", - "schemars 1.0.4", + "schemars 1.1.0", "serde_core", "serde_json", "serde_with_macros", @@ -11717,14 +11996,14 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "3.15.1" +version = "3.16.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b91a903660542fced4e99881aa481bdbaec1634568ee02e0b8bd57c64cb38955" +checksum = "52a8e3ca0ca629121f70ab50f95249e5a6f925cc0f6ffe8256c45b728875706c" dependencies = [ "darling 0.21.3", "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.111", ] [[package]] @@ -11815,9 +12094,9 @@ dependencies = [ [[package]] name = "signal-hook-mio" -version = "0.2.4" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34db1a06d485c9142248b7a054f034b349b212551f3dfd19c94d45a754a217cd" +checksum = "b75a19a7a740b25bc7944bdee6172368f988763b744e3d4dfe753f6b4ece40cc" dependencies = [ "libc", "mio", @@ -11826,9 +12105,9 @@ dependencies = [ [[package]] name = "signal-hook-registry" -version = "1.4.6" +version = "1.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2a4719bff48cee6b39d12c020eeb490953ad2443b7055bd0b21fca26bd8c28b" +checksum = "7664a098b8e616bdfcc2dc0e9ac44eb231eedf41db4e9fe95d8d32ec728dedad" dependencies = [ "libc", ] @@ -11845,9 +12124,9 @@ dependencies = [ [[package]] name = "simd-adler32" -version = "0.3.7" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d66dc143e6b11c1eddc06d5c423cfc97062865baf299914ab64caa38182078fe" +checksum = "e320a6c5ad31d271ad523dcf3ad13e2767ad8b1cb8f047f75a8aeaf8da139da2" [[package]] name = "similar" @@ -11915,6 +12194,15 @@ version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a2ae44ef20feb57a68b23d846850f861394c2e02dc425a50098ae8c90267589" +[[package]] +name = "small_btree" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ba60d2df92ba73864714808ca68c059734853e6ab722b40e1cf543ebb3a057a" +dependencies = [ + "arrayvec", +] + [[package]] name = "smallvec" version = "1.15.1" @@ -11986,6 +12274,12 @@ dependencies = [ "sha1", ] +[[package]] +name = "spin" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d5fe4ccb98d9c292d56fec89a5e07da7fc4cf0dc11e156b41793132775d3e591" + [[package]] name = "spki" version = "0.7.3" @@ -11996,12 +12290,6 @@ dependencies = [ "der", ] -[[package]] -name = "sptr" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b9b39299b249ad65f3b7e96443bad61c02ca5cd3589f46cb6d610a0fd6c0d6a" - [[package]] name = "stable_deref_trait" version = "1.2.1" @@ -12048,7 +12336,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.108", + "syn 2.0.111", ] [[package]] @@ -12060,7 +12348,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.111", ] [[package]] @@ -12082,9 +12370,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.108" +version = "2.0.111" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da58917d35242480a05c2897064da0a80589a2a0476c9a3f2fdc83b53502e917" +checksum = "390cc9a294ab71bdb1aa2e99d13be9c753cd2d7bd6560c77118597410c4d2e87" dependencies = [ "proc-macro2", "quote", @@ -12093,14 +12381,14 @@ dependencies = [ [[package]] name = "syn-solidity" -version = "1.4.1" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff790eb176cc81bb8936aed0f7b9f14fc4670069a2d371b3e3b0ecce908b2cb3" +checksum = "f6b1d2e2059056b66fec4a6bb2b79511d5e8d76196ef49c38996f4b48db7662f" dependencies = [ "paste", "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.111", ] [[package]] @@ -12120,7 +12408,7 @@ checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.111", ] [[package]] @@ -12136,6 +12424,12 @@ dependencies = [ "windows 0.57.0", ] +[[package]] +name = "tag_ptr" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0e973b34477b7823833469eb0f5a3a60370fef7a453e02d751b59180d0a5a05" + [[package]] name = "tagptr" version = "0.2.0" @@ -12201,7 +12495,7 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.111", ] [[package]] @@ -12212,15 +12506,15 @@ checksum = "5c89e72a01ed4c579669add59014b9a524d609c0c88c6a585ce37485879f6ffb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.111", "test-case-core", ] [[package]] name = "test-fuzz" -version = "7.2.4" +version = "7.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6696b1bcee3edb0553566f632c31b3b18fda42cf4d529327ca47f230c4acd3ab" +checksum = "11e5c77910b1d5b469a342be541cf44933f0ad2c4b8d5acb32ee46697fd60546" dependencies = [ "serde", "serde_combinators", @@ -12231,9 +12525,9 @@ dependencies = [ [[package]] name = "test-fuzz-internal" -version = "7.2.4" +version = "7.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5988511fdb342582013a17a4263e994bce92828a1bae039f92a2f05a5f95ce78" +checksum = "4d25f2f0ee315b130411a98570dd128dfe344bfaa0a28bf33d38f4a1fe85f39b" dependencies = [ "bincode 2.0.1", "cargo_metadata 0.19.2", @@ -12242,9 +12536,9 @@ dependencies = [ [[package]] name = "test-fuzz-macro" -version = "7.2.4" +version = "7.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8893e583c5af79a67761a9285535d26612cb1617fcbf388c3abc0c1d35a0b89" +checksum = "b8c03ba0a9e3e4032f94d71c85e149af147843c6f212e4ca4383542d606b04a6" dependencies = [ "darling 0.21.3", "heck", @@ -12252,14 +12546,14 @@ dependencies = [ "prettyplease", "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.111", ] [[package]] name = "test-fuzz-runtime" -version = "7.2.4" +version = "7.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47be06afdb9cb50c76ef938e2e4bda2e28e1cbb4d3d305603d57a5e374a6d6e7" +checksum = "f9a4ac481aa983d386e857a7be0006c2f0ef26e0c5326bbc7262f73c2891b91d" dependencies = [ "hex", "num-traits", @@ -12300,7 +12594,7 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.111", ] [[package]] @@ -12311,7 +12605,7 @@ checksum = "3ff15c8ecd7de3849db632e14d18d2571fa09dfc5ed93479bc4485c7a517c913" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.111", ] [[package]] @@ -12408,22 +12702,13 @@ dependencies = [ [[package]] name = "tinystr" -version = "0.7.6" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9117f5d4db391c1cf6927e7bea3db74b9a1c1add8f7eda9ffd5364f40f57b82f" +checksum = "42d3e9c45c09de15d06dd8acf5f4e0e399e85927b7f00711024eb7ae10fa4869" dependencies = [ "displaydoc", - "zerovec 0.10.4", -] - -[[package]] -name = "tinystr" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d4f6d1145dcb577acf783d4e601bc1d76a13337bb54e6233add580b07344c8b" -dependencies = [ - "displaydoc", - "zerovec 0.11.4", + "serde_core", + "zerovec", ] [[package]] @@ -12476,7 +12761,7 @@ checksum = "af407857209536a95c8e56f8231ef2c2e2aff839b22e07a1ffcbc617e9db9fa5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.111", ] [[package]] @@ -12520,9 +12805,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.16" +version = "0.7.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14307c986784f72ef81c89db7d9e28d6ac26d16213b109ea501696195e6e3ce5" +checksum = "2efa149fe76073d6e8fd97ef4f4eca7b67f599660115591483572e406e165594" dependencies = [ "bytes", "futures-core", @@ -12556,9 +12841,9 @@ dependencies = [ [[package]] name = "toml_datetime" -version = "0.7.3" +version = "0.7.5+spec-1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2cdb639ebbc97961c51720f858597f7f24c4fc295327923af55b74c3c724533" +checksum = "92e1cfed4a3038bc5a127e35a2d360f145e1f4b971b551a2ba5fd7aedf7e1347" dependencies = [ "serde_core", ] @@ -12569,7 +12854,7 @@ version = "0.22.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "41fe8c660ae4257887cf66394862d21dbca4a6ddd26f04a3560410406a2f819a" dependencies = [ - "indexmap 2.12.0", + "indexmap 2.12.1", "serde", "serde_spanned", "toml_datetime 0.6.11", @@ -12579,21 +12864,21 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.23.7" +version = "0.23.10+spec-1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6485ef6d0d9b5d0ec17244ff7eb05310113c3f316f2d14200d4de56b3cb98f8d" +checksum = "84c8b9f757e028cee9fa244aea147aab2a9ec09d5325a9b01e0a49730c2b5269" dependencies = [ - "indexmap 2.12.0", - "toml_datetime 0.7.3", + "indexmap 2.12.1", + "toml_datetime 0.7.5+spec-1.1.0", "toml_parser", "winnow", ] [[package]] name = "toml_parser" -version = "1.0.4" +version = "1.0.6+spec-1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0cbe268d35bdb4bb5a56a2de88d0ad0eb70af5384a99d648cd4b3d04039800e" +checksum = "a3198b4b0a8e11f09dd03e133c0280504d0801269e9afa46362ffde1cbeebf44" dependencies = [ "winnow", ] @@ -12650,7 +12935,7 @@ dependencies = [ "futures-core", "futures-util", "hdrhistogram", - "indexmap 2.12.0", + "indexmap 2.12.1", "pin-project-lite", "slab", "sync_wrapper", @@ -12663,9 +12948,9 @@ dependencies = [ [[package]] name = "tower-http" -version = "0.6.6" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adc82fd73de2a9722ac5da747f12383d2bfdb93591ee6c58486e0097890f05f2" +checksum = "d4e6559d53cc268e5031cd8429d05415bc4cb4aefc4aa5d6cc35fbf5b924a1f8" dependencies = [ "async-compression", "base64 0.22.1", @@ -12706,9 +12991,9 @@ checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" [[package]] name = "tracing" -version = "0.1.41" +version = "0.1.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0" +checksum = "63e71662fa4b2a2c3a26f570f037eb95bb1f85397f3cd8076caed2f026a6d100" dependencies = [ "log", "pin-project-lite", @@ -12718,32 +13003,32 @@ dependencies = [ [[package]] name = "tracing-appender" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3566e8ce28cc0a3fe42519fc80e6b4c943cc4c8cef275620eb8dac2d3d4e06cf" +checksum = "786d480bce6247ab75f005b14ae1624ad978d3029d9113f0a22fa1ac773faeaf" dependencies = [ "crossbeam-channel", - "thiserror 1.0.69", + "thiserror 2.0.17", "time", - "tracing-subscriber 0.3.20", + "tracing-subscriber 0.3.22", ] [[package]] name = "tracing-attributes" -version = "0.1.30" +version = "0.1.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81383ab64e72a7a8b8e13130c49e3dab29def6d0c7d76a03087b3cf71c5c6903" +checksum = "7490cfa5ec963746568740651ac6781f701c9c5ea257c58e057f3ba8cf69e8da" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.111", ] [[package]] name = "tracing-core" -version = "0.1.34" +version = "0.1.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9d12581f227e93f094d3af2ae690a574abb8a2b9b7a96e7cfe9647b2b617678" +checksum = "db97caf9d906fbde555dd62fa95ddba9eecfd14cb388e4f491a66d74cd5fb79a" dependencies = [ "once_cell", "valuable", @@ -12761,13 +13046,13 @@ dependencies = [ [[package]] name = "tracing-journald" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc0b4143302cf1022dac868d521e36e8b27691f72c84b3311750d5188ebba657" +checksum = "2d3a81ed245bfb62592b1e2bc153e77656d94ee6a0497683a65a12ccaf2438d0" dependencies = [ "libc", "tracing-core", - "tracing-subscriber 0.3.20", + "tracing-subscriber 0.3.22", ] [[package]] @@ -12790,7 +13075,7 @@ dependencies = [ "time", "tracing", "tracing-core", - "tracing-subscriber 0.3.20", + "tracing-subscriber 0.3.22", ] [[package]] @@ -12808,10 +13093,26 @@ dependencies = [ "tracing", "tracing-core", "tracing-log", - "tracing-subscriber 0.3.20", + "tracing-subscriber 0.3.22", "web-time", ] +[[package]] +name = "tracing-samply" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c175f7ecc002b6ef04776a39f440503e4e788790ddbdbfac8259b7a069526334" +dependencies = [ + "cfg-if", + "itoa", + "libc", + "mach2", + "memmap2", + "smallvec", + "tracing-core", + "tracing-subscriber 0.3.22", +] + [[package]] name = "tracing-serde" version = "0.2.0" @@ -12833,9 +13134,9 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.3.20" +version = "0.3.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2054a14f5307d601f88daf0553e1cbf472acc4f2c51afab632431cdcd72124d5" +checksum = "2f30143827ddab0d256fd843b7a66d164e9f271cfa0dde49142c5ca0ca291f1e" dependencies = [ "matchers", "nu-ansi-term", @@ -12854,9 +13155,9 @@ dependencies = [ [[package]] name = "tracy-client" -version = "0.18.2" +version = "0.18.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef54005d3d760186fd662dad4b7bb27ecd5531cdef54d1573ebd3f20a9205ed7" +checksum = "91d722a05fe49b31fef971c4732a7d4aa6a18283d9ba46abddab35f484872947" dependencies = [ "loom", "once_cell", @@ -12866,9 +13167,9 @@ dependencies = [ [[package]] name = "tracy-client-sys" -version = "0.26.1" +version = "0.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "319c70195101a93f56db4c74733e272d720768e13471f400c78406a326b172b0" +checksum = "2fb391ac70462b3097a755618fbf9c8f95ecc1eb379a414f7b46f202ed10db1f" dependencies = [ "cc", "windows-targets 0.52.6", @@ -12896,7 +13197,7 @@ dependencies = [ "darling 0.20.11", "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.111", ] [[package]] @@ -12990,9 +13291,9 @@ checksum = "75b844d17643ee918803943289730bec8aac480150456169e647ed0b576ba539" [[package]] name = "unicode-ident" -version = "1.0.20" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "462eeb75aeb73aea900253ce739c8e18a67423fadf006037cd3ff27e82748a06" +checksum = "9312f7c4f6ff9069b165498234ce8be658059c6728633667c526e27dc2cf1df5" [[package]] name = "unicode-segmentation" @@ -13095,9 +13396,9 @@ checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" [[package]] name = "uuid" -version = "1.18.1" +version = "1.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f87b8aa10b915a06587d0dec516c282ff295b475d94abf425d62b57710070a2" +checksum = "e2e054861b4bd027cd373e18e8d8d8e6548085000e41290d95ce0c373a654b4a" dependencies = [ "getrandom 0.3.4", "js-sys", @@ -13177,7 +13478,7 @@ checksum = "d674d135b4a8c1d7e813e2f8d1c9a58308aee4a680323066025e53132218bd91" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.111", ] [[package]] @@ -13237,9 +13538,9 @@ dependencies = [ [[package]] name = "wasm-bindgen" -version = "0.2.104" +version = "0.2.106" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1da10c01ae9f1ae40cbfac0bac3b1e724b320abfcf52229f80b547c0d250e2d" +checksum = "0d759f433fa64a2d763d1340820e46e111a7a5ab75f993d1852d70b03dbb80fd" dependencies = [ "cfg-if", "once_cell", @@ -13248,25 +13549,11 @@ dependencies = [ "wasm-bindgen-shared", ] -[[package]] -name = "wasm-bindgen-backend" -version = "0.2.104" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "671c9a5a66f49d8a47345ab942e2cb93c7d1d0339065d4f8139c486121b43b19" -dependencies = [ - "bumpalo", - "log", - "proc-macro2", - "quote", - "syn 2.0.108", - "wasm-bindgen-shared", -] - [[package]] name = "wasm-bindgen-futures" -version = "0.4.54" +version = "0.4.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e038d41e478cc73bae0ff9b36c60cff1c98b8f38f8d7e8061e79ee63608ac5c" +checksum = "836d9622d604feee9e5de25ac10e3ea5f2d65b41eac0d9ce72eb5deae707ce7c" dependencies = [ "cfg-if", "js-sys", @@ -13277,9 +13564,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.104" +version = "0.2.106" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ca60477e4c59f5f2986c50191cd972e3a50d8a95603bc9434501cf156a9a119" +checksum = "48cb0d2638f8baedbc542ed444afc0644a29166f1595371af4fecf8ce1e7eeb3" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -13287,22 +13574,22 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.104" +version = "0.2.106" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f07d2f20d4da7b26400c9f4a0511e6e0345b040694e8a75bd41d578fa4421d7" +checksum = "cefb59d5cd5f92d9dcf80e4683949f15ca4b511f4ac0a6e14d4e1ac60c6ecd40" dependencies = [ + "bumpalo", "proc-macro2", "quote", - "syn 2.0.108", - "wasm-bindgen-backend", + "syn 2.0.111", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.104" +version = "0.2.106" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bad67dc8b2a1a6e5448428adec4c3e84c43e561d8c9ee8a9e5aabeb193ec41d1" +checksum = "cbc538057e648b67f72a982e708d485b2efa771e1ac05fec311f9f63e5800db4" dependencies = [ "unicode-ident", ] @@ -13336,9 +13623,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.81" +version = "0.3.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9367c417a924a74cae129e6a2ae3b47fabb1f8995595ab474029da749a8be120" +checksum = "9b32828d774c412041098d182a8b38b16ea816958e07cf40eec2bc080ae137ac" dependencies = [ "js-sys", "wasm-bindgen", @@ -13360,14 +13647,14 @@ version = "0.26.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "75c7f0ef91146ebfb530314f5f1d24528d7f0767efbfd31dce919275413e393e" dependencies = [ - "webpki-root-certs 1.0.3", + "webpki-root-certs 1.0.4", ] [[package]] name = "webpki-root-certs" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05d651ec480de84b762e7be71e6efa7461699c19d9e2c272c8d93455f567786e" +checksum = "ee3e3b5f5e80bc89f30ce8d0343bf4e5f12341c51f3e26cbeecbc7c85443e85b" dependencies = [ "rustls-pki-types", ] @@ -13378,14 +13665,14 @@ version = "0.26.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "521bc38abb08001b01866da9f51eb7c5d647a19260e00054a8c7fd5f9e57f7a9" dependencies = [ - "webpki-roots 1.0.3", + "webpki-roots 1.0.4", ] [[package]] name = "webpki-roots" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32b130c0d2d49f8b6889abc456e795e82525204f27c42cf767cf0d7734e089b8" +checksum = "b2878ef029c47c6e8cf779119f20fcf52bde7ad42a731b2a304bc221df17571e" dependencies = [ "rustls-pki-types", ] @@ -13437,38 +13724,16 @@ dependencies = [ "windows-targets 0.52.6", ] -[[package]] -name = "windows" -version = "0.61.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9babd3a767a4c1aef6900409f85f5d53ce2544ccdfaa86dad48c91782c6d6893" -dependencies = [ - "windows-collections 0.2.0", - "windows-core 0.61.2", - "windows-future 0.2.1", - "windows-link 0.1.3", - "windows-numerics 0.2.0", -] - [[package]] name = "windows" version = "0.62.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "527fadee13e0c05939a6a05d5bd6eec6cd2e3dbd648b9f8e447c6518133d8580" dependencies = [ - "windows-collections 0.3.2", + "windows-collections", "windows-core 0.62.2", - "windows-future 0.3.2", - "windows-numerics 0.3.1", -] - -[[package]] -name = "windows-collections" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3beeceb5e5cfd9eb1d76b381630e82c4241ccd0d27f1a39ed41b2760b255c5e8" -dependencies = [ - "windows-core 0.61.2", + "windows-future", + "windows-numerics", ] [[package]] @@ -13492,19 +13757,6 @@ dependencies = [ "windows-targets 0.52.6", ] -[[package]] -name = "windows-core" -version = "0.61.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0fdd3ddb90610c7638aa2b3a3ab2904fb9e5cdbecc643ddb3647212781c4ae3" -dependencies = [ - "windows-implement 0.60.2", - "windows-interface 0.59.3", - "windows-link 0.1.3", - "windows-result 0.3.4", - "windows-strings 0.4.2", -] - [[package]] name = "windows-core" version = "0.62.2" @@ -13513,20 +13765,9 @@ checksum = "b8e83a14d34d0623b51dce9581199302a221863196a1dde71a7663a4c2be9deb" dependencies = [ "windows-implement 0.60.2", "windows-interface 0.59.3", - "windows-link 0.2.1", + "windows-link", "windows-result 0.4.1", - "windows-strings 0.5.1", -] - -[[package]] -name = "windows-future" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc6a41e98427b19fe4b73c550f060b59fa592d7d686537eebf9385621bfbad8e" -dependencies = [ - "windows-core 0.61.2", - "windows-link 0.1.3", - "windows-threading 0.1.0", + "windows-strings", ] [[package]] @@ -13536,8 +13777,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e1d6f90251fe18a279739e78025bd6ddc52a7e22f921070ccdc67dde84c605cb" dependencies = [ "windows-core 0.62.2", - "windows-link 0.2.1", - "windows-threading 0.2.1", + "windows-link", + "windows-threading", ] [[package]] @@ -13548,7 +13789,7 @@ checksum = "9107ddc059d5b6fbfbffdfa7a7fe3e22a226def0b2608f72e9d552763d3e1ad7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.111", ] [[package]] @@ -13559,7 +13800,7 @@ checksum = "053e2e040ab57b9dc951b72c264860db7eb3b0200ba345b4e4c3b14f67855ddf" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.111", ] [[package]] @@ -13570,7 +13811,7 @@ checksum = "29bee4b38ea3cde66011baa44dba677c432a78593e202392d1e9070cf2a7fca7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.111", ] [[package]] @@ -13581,31 +13822,15 @@ checksum = "3f316c4a2570ba26bbec722032c4099d8c8bc095efccdc15688708623367e358" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.111", ] -[[package]] -name = "windows-link" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e6ad25900d524eaabdbbb96d20b4311e1e7ae1699af4fb28c17ae66c80d798a" - [[package]] name = "windows-link" version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" -[[package]] -name = "windows-numerics" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9150af68066c4c5c07ddc0ce30421554771e528bde427614c61038bc2c92c2b1" -dependencies = [ - "windows-core 0.61.2", - "windows-link 0.1.3", -] - [[package]] name = "windows-numerics" version = "0.3.1" @@ -13613,7 +13838,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6e2e40844ac143cdb44aead537bbf727de9b044e107a0f1220392177d15b0f26" dependencies = [ "windows-core 0.62.2", - "windows-link 0.2.1", + "windows-link", ] [[package]] @@ -13625,31 +13850,13 @@ dependencies = [ "windows-targets 0.52.6", ] -[[package]] -name = "windows-result" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56f42bd332cc6c8eac5af113fc0c1fd6a8fd2aa08a0119358686e5160d0586c6" -dependencies = [ - "windows-link 0.1.3", -] - [[package]] name = "windows-result" version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7781fa89eaf60850ac3d2da7af8e5242a5ea78d1a11c49bf2910bb5a73853eb5" dependencies = [ - "windows-link 0.2.1", -] - -[[package]] -name = "windows-strings" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56e6c93f3a0c3b36176cb1327a4958a0353d5d166c2a35cb268ace15e91d3b57" -dependencies = [ - "windows-link 0.1.3", + "windows-link", ] [[package]] @@ -13658,7 +13865,7 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7837d08f69c77cf6b07689544538e017c1bfcf57e34b4c0ff58e6c2cd3b37091" dependencies = [ - "windows-link 0.2.1", + "windows-link", ] [[package]] @@ -13712,7 +13919,7 @@ version = "0.61.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ae137229bcbd6cdf0f7b80a31df61766145077ddf49416a728b02cb3921ff3fc" dependencies = [ - "windows-link 0.2.1", + "windows-link", ] [[package]] @@ -13767,7 +13974,7 @@ version = "0.53.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4945f9f551b88e0d65f3db0bc25c33b8acea4d9e41163edf90dcd0b19f9069f3" dependencies = [ - "windows-link 0.2.1", + "windows-link", "windows_aarch64_gnullvm 0.53.1", "windows_aarch64_msvc 0.53.1", "windows_i686_gnu 0.53.1", @@ -13778,22 +13985,13 @@ dependencies = [ "windows_x86_64_msvc 0.53.1", ] -[[package]] -name = "windows-threading" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b66463ad2e0ea3bbf808b7f1d371311c80e115c0b71d60efc142cafbcfb057a6" -dependencies = [ - "windows-link 0.1.3", -] - [[package]] name = "windows-threading" version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3949bd5b99cafdf1c7ca86b43ca564028dfe27d66958f2470940f73d86d75b37" dependencies = [ - "windows-link 0.2.1", + "windows-link", ] [[package]] @@ -13978,9 +14176,9 @@ checksum = "d6bbff5f0aada427a1e5a6da5f1f98158182f26556f345ac9e04d36d0ebed650" [[package]] name = "winnow" -version = "0.7.13" +version = "0.7.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21a0236b59786fed61e2a80582dd500fe61f18b5dca67a4a067d0bc9039339cf" +checksum = "5a5364e9d77fcdeeaa6062ced926ee3381faa2ee02d3eb83a5c27a8825540829" dependencies = [ "memchr", ] @@ -14009,15 +14207,9 @@ checksum = "d1890f4022759daae28ed4fe62859b1236caebfc61ede2f63ed4e695f3f6d936" [[package]] name = "writeable" -version = "0.5.5" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e9df38ee2d2c3c5948ea468a8406ff0db0b29ae1ffde1bcf20ef305bcc95c51" - -[[package]] -name = "writeable" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea2f10b9bb0928dfb1b42b65e1f9e36f7f54dbdf08457afefb38afcdec4fa2bb" +checksum = "9edde0db4769d2dc68579893f2306b26c6ecfbe0ef499b013d731b7b9247e0b9" [[package]] name = "ws_stream_wasm" @@ -14057,6 +14249,12 @@ dependencies = [ "rustix 1.1.2", ] +[[package]] +name = "xsum" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0637d3a5566a82fa5214bae89087bc8c9fb94cd8e8a3c07feb691bb8d9c632db" + [[package]] name = "yansi" version = "1.0.1" @@ -14065,70 +14263,45 @@ checksum = "cfe53a6657fd280eaa890a3bc59152892ffa3e30101319d168b781ed6529b049" [[package]] name = "yoke" -version = "0.7.5" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "120e6aef9aa629e3d4f52dc8cc43a015c7724194c97dfaf45180d2daf2b77f40" +checksum = "72d6e5c6afb84d73944e5cedb052c4680d5657337201555f9f2a16b7406d4954" dependencies = [ - "serde", "stable_deref_trait", - "yoke-derive 0.7.5", - "zerofrom", -] - -[[package]] -name = "yoke" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f41bb01b8226ef4bfd589436a297c53d118f65921786300e427be8d487695cc" -dependencies = [ - "serde", - "stable_deref_trait", - "yoke-derive 0.8.0", + "yoke-derive", "zerofrom", ] [[package]] name = "yoke-derive" -version = "0.7.5" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" +checksum = "b659052874eb698efe5b9e8cf382204678a0086ebf46982b79d6ca3182927e5d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", - "synstructure", -] - -[[package]] -name = "yoke-derive" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38da3c9736e16c5d3c8c597a9aaa5d1fa565d0532ae05e27c24aa62fb32c0ab6" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.108", + "syn 2.0.111", "synstructure", ] [[package]] name = "zerocopy" -version = "0.8.27" +version = "0.8.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0894878a5fa3edfd6da3f88c4805f4c8558e2b996227a3d864f47fe11e38282c" +checksum = "fd74ec98b9250adb3ca554bdde269adf631549f51d8a8f8f0a10b50f1cb298c3" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.8.27" +version = "0.8.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88d2b8d9c68ad2b9e4340d7832716a4d21a22a1154777ad56ea55c51a9cf3831" +checksum = "d8a8d209fdf45cf5138cbb5a506f6b52522a25afccc534d1475dad8e31105c6a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.111", ] [[package]] @@ -14148,7 +14321,7 @@ checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.111", "synstructure", ] @@ -14169,62 +14342,41 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", + "syn 2.0.111", ] [[package]] name = "zerotrie" -version = "0.2.2" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36f0bbd478583f79edad978b407914f61b2972f5af6fa089686016be8f9af595" +checksum = "2a59c17a5562d507e4b54960e8569ebee33bee890c70aa3fe7b97e85a9fd7851" dependencies = [ "displaydoc", - "yoke 0.8.0", + "yoke", "zerofrom", ] [[package]] name = "zerovec" -version = "0.10.4" +version = "0.11.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa2b893d79df23bfb12d5461018d408ea19dfafe76c2c7ef6d4eba614f8ff079" +checksum = "6c28719294829477f525be0186d13efa9a3c602f7ec202ca9e353d310fb9a002" dependencies = [ - "yoke 0.7.5", + "serde", + "yoke", "zerofrom", - "zerovec-derive 0.10.3", -] - -[[package]] -name = "zerovec" -version = "0.11.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7aa2bd55086f1ab526693ecbe444205da57e25f4489879da80635a46d90e73b" -dependencies = [ - "yoke 0.8.0", - "zerofrom", - "zerovec-derive 0.11.1", + "zerovec-derive", ] [[package]] name = "zerovec-derive" -version = "0.10.3" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" +checksum = "eadce39539ca5cb3985590102671f2567e659fca9666581ad3411d59207951f3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.108", -] - -[[package]] -name = "zerovec-derive" -version = "0.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b96237efa0c878c64bd89c436f661be4e46b2f3eff1ebb976f7ef2321d2f58f" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.108", + "syn 2.0.111", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 324135b223..224c784dbd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,5 +1,5 @@ [workspace.package] -version = "1.8.2" +version = "1.9.3" edition = "2024" rust-version = "1.88" license = "MIT OR Apache-2.0" @@ -10,6 +10,7 @@ exclude = [".github/"] [workspace] members = [ "bin/reth-bench/", + "bin/reth-bench-compare/", "bin/reth/", "crates/storage/rpc-provider/", "crates/chain-state/", @@ -147,10 +148,12 @@ members = [ "examples/custom-node/", "examples/custom-engine-types/", "examples/custom-evm/", + "examples/custom-hardforks/", "examples/custom-inspector/", "examples/custom-node-components/", "examples/custom-payload-builder/", "examples/custom-rlpx-subprotocol", + "examples/custom-rpc-middleware", "examples/custom-node", "examples/db-access", "examples/engine-api-access", @@ -339,6 +342,7 @@ reth = { path = "bin/reth" } reth-storage-rpc-provider = { path = "crates/storage/rpc-provider" } reth-basic-payload-builder = { path = "crates/payload/basic" } reth-bench = { path = "bin/reth-bench" } +reth-bench-compare = { path = "bin/reth-bench-compare" } reth-chain-state = { path = "crates/chain-state" } reth-chainspec = { path = "crates/chainspec", default-features = false } reth-cli = { path = "crates/cli/cli" } @@ -372,11 +376,11 @@ reth-era-utils = { path = "crates/era-utils" } reth-errors = { path = "crates/errors" } reth-eth-wire = { path = "crates/net/eth-wire" } reth-eth-wire-types = { path = "crates/net/eth-wire-types" } -reth-ethereum-cli = { path = "crates/ethereum/cli" } +reth-ethereum-payload-builder = { path = "crates/ethereum/payload" } +reth-ethereum-cli = { path = "crates/ethereum/cli", default-features = false } reth-ethereum-consensus = { path = "crates/ethereum/consensus", default-features = false } reth-ethereum-engine-primitives = { path = "crates/ethereum/engine-primitives", default-features = false } reth-ethereum-forks = { path = "crates/ethereum/hardforks", default-features = false } -reth-ethereum-payload-builder = { path = "crates/ethereum/payload" } reth-ethereum-primitives = { path = "crates/ethereum/primitives", default-features = false } reth-ethereum = { path = "crates/ethereum/reth" } reth-etl = { path = "crates/etl" } @@ -413,7 +417,7 @@ reth-optimism-node = { path = "crates/optimism/node" } reth-node-types = { path = "crates/node/types" } reth-op = { path = "crates/optimism/reth", default-features = false } reth-optimism-chainspec = { path = "crates/optimism/chainspec", default-features = false } -reth-optimism-cli = { path = "crates/optimism/cli" } +reth-optimism-cli = { path = "crates/optimism/cli", default-features = false } reth-optimism-consensus = { path = "crates/optimism/consensus", default-features = false } reth-optimism-forks = { path = "crates/optimism/hardforks", default-features = false } reth-optimism-payload-builder = { path = "crates/optimism/payload" } @@ -469,68 +473,66 @@ reth-ress-protocol = { path = "crates/ress/protocol" } reth-ress-provider = { path = "crates/ress/provider" } # revm -revm = { version = "30.1.1", default-features = false } -revm-bytecode = { version = "7.0.2", default-features = false } -revm-database = { version = "9.0.0", default-features = false } -revm-state = { version = "8.0.0", default-features = false } -revm-primitives = { version = "21.0.0", default-features = false } -revm-interpreter = { version = "27.0.0", default-features = false } -revm-inspector = { version = "11.1.0", default-features = false } -revm-context = { version = "10.1.0", default-features = false } -revm-context-interface = { version = "11.1.0", default-features = false } -revm-database-interface = { version = "8.0.1", default-features = false } -op-revm = { version = "11.2.0", default-features = false } -revm-inspectors = "0.31.0" +revm = { version = "33.1.0", default-features = false } +revm-bytecode = { version = "7.1.1", default-features = false } +revm-database = { version = "9.0.5", default-features = false } +revm-state = { version = "8.1.1", default-features = false } +revm-primitives = { version = "21.0.2", default-features = false } +revm-interpreter = { version = "31.1.0", default-features = false } +revm-database-interface = { version = "8.0.5", default-features = false } +op-revm = { version = "14.1.0", default-features = false } +revm-inspectors = "0.33.2" # eth alloy-chains = { version = "0.2.5", default-features = false } alloy-dyn-abi = "1.4.1" alloy-eip2124 = { version = "0.2.0", default-features = false } -alloy-evm = { version = "0.22.4", default-features = false } -alloy-primitives = { version = "1.4.1", default-features = false, features = ["map-foldhash"] } +alloy-eip7928 = { version = "0.1.0" } +alloy-evm = { version = "0.25.1", default-features = false } +alloy-primitives = { version = "1.5.0", default-features = false, features = ["map-foldhash"] } alloy-rlp = { version = "0.3.10", default-features = false, features = ["core-net"] } -alloy-sol-macro = "1.4.1" -alloy-sol-types = { version = "1.4.1", default-features = false } +alloy-sol-macro = "1.5.0" +alloy-sol-types = { version = "1.5.0", default-features = false } alloy-trie = { version = "0.9.1", default-features = false } -alloy-hardforks = "0.4.0" +alloy-hardforks = "0.4.5" -alloy-consensus = { version = "1.0.41", default-features = false } -alloy-contract = { version = "1.0.41", default-features = false } -alloy-eips = { version = "1.0.41", default-features = false } -alloy-genesis = { version = "1.0.41", default-features = false } -alloy-json-rpc = { version = "1.0.41", default-features = false } -alloy-network = { version = "1.0.41", default-features = false } -alloy-network-primitives = { version = "1.0.41", default-features = false } -alloy-provider = { version = "1.0.41", features = ["reqwest"], default-features = false } -alloy-pubsub = { version = "1.0.41", default-features = false } -alloy-rpc-client = { version = "1.0.41", default-features = false } -alloy-rpc-types = { version = "1.0.41", features = ["eth"], default-features = false } -alloy-rpc-types-admin = { version = "1.0.41", default-features = false } -alloy-rpc-types-anvil = { version = "1.0.41", default-features = false } -alloy-rpc-types-beacon = { version = "1.0.41", default-features = false } -alloy-rpc-types-debug = { version = "1.0.41", default-features = false } -alloy-rpc-types-engine = { version = "1.0.41", default-features = false } -alloy-rpc-types-eth = { version = "1.0.41", default-features = false } -alloy-rpc-types-mev = { version = "1.0.41", default-features = false } -alloy-rpc-types-trace = { version = "1.0.41", default-features = false } -alloy-rpc-types-txpool = { version = "1.0.41", default-features = false } -alloy-serde = { version = "1.0.41", default-features = false } -alloy-signer = { version = "1.0.41", default-features = false } -alloy-signer-local = { version = "1.0.41", default-features = false } -alloy-transport = { version = "1.0.41" } -alloy-transport-http = { version = "1.0.41", features = ["reqwest-rustls-tls"], default-features = false } -alloy-transport-ipc = { version = "1.0.41", default-features = false } -alloy-transport-ws = { version = "1.0.41", default-features = false } +alloy-consensus = { version = "1.1.3", default-features = false } +alloy-contract = { version = "1.1.3", default-features = false } +alloy-eips = { version = "1.1.3", default-features = false } +alloy-genesis = { version = "1.1.3", default-features = false } +alloy-json-rpc = { version = "1.1.3", default-features = false } +alloy-network = { version = "1.1.3", default-features = false } +alloy-network-primitives = { version = "1.1.3", default-features = false } +alloy-provider = { version = "1.1.3", features = ["reqwest", "debug-api"], default-features = false } +alloy-pubsub = { version = "1.1.3", default-features = false } +alloy-rpc-client = { version = "1.1.3", default-features = false } +alloy-rpc-types = { version = "1.1.3", features = ["eth"], default-features = false } +alloy-rpc-types-admin = { version = "1.1.3", default-features = false } +alloy-rpc-types-anvil = { version = "1.1.3", default-features = false } +alloy-rpc-types-beacon = { version = "1.1.3", default-features = false } +alloy-rpc-types-debug = { version = "1.1.3", default-features = false } +alloy-rpc-types-engine = { version = "1.1.3", default-features = false } +alloy-rpc-types-eth = { version = "1.1.3", default-features = false } +alloy-rpc-types-mev = { version = "1.1.3", default-features = false } +alloy-rpc-types-trace = { version = "1.1.3", default-features = false } +alloy-rpc-types-txpool = { version = "1.1.3", default-features = false } +alloy-serde = { version = "1.1.3", default-features = false } +alloy-signer = { version = "1.1.3", default-features = false } +alloy-signer-local = { version = "1.1.3", default-features = false } +alloy-transport = { version = "1.1.3" } +alloy-transport-http = { version = "1.1.3", features = ["reqwest-rustls-tls"], default-features = false } +alloy-transport-ipc = { version = "1.1.3", default-features = false } +alloy-transport-ws = { version = "1.1.3", default-features = false } # op -alloy-op-evm = { version = "0.22.4", default-features = false } -alloy-op-hardforks = "0.4.0" -op-alloy-rpc-types = { version = "0.21.0", default-features = false } -op-alloy-rpc-types-engine = { version = "0.21.0", default-features = false } -op-alloy-network = { version = "0.21.0", default-features = false } -op-alloy-consensus = { version = "0.21.0", default-features = false } -op-alloy-rpc-jsonrpsee = { version = "0.21.0", default-features = false } +alloy-op-evm = { version = "0.25.0", default-features = false } +alloy-op-hardforks = "0.4.4" +op-alloy-rpc-types = { version = "0.23.1", default-features = false } +op-alloy-rpc-types-engine = { version = "0.23.1", default-features = false } +op-alloy-network = { version = "0.23.1", default-features = false } +op-alloy-consensus = { version = "0.23.1", default-features = false } +op-alloy-rpc-jsonrpsee = { version = "0.23.1", default-features = false } op-alloy-flz = { version = "0.13.1", default-features = false } # misc @@ -552,8 +554,6 @@ dirs-next = "2.0.0" dyn-clone = "1.0.17" eyre = "0.6" fdlimit = "0.3.0" -# pinned until downstream crypto libs migrate to 1.0 because 0.14.8 marks all types as deprecated -generic-array = "=0.14.7" humantime = "2.1" humantime-serde = "1.1" itertools = { version = "0.14", default-features = false } @@ -574,6 +574,7 @@ serde_json = { version = "1.0", default-features = false, features = ["alloc"] } serde_with = { version = "3", default-features = false, features = ["macros"] } sha2 = { version = "0.10", default-features = false } shellexpand = "3.0.0" +shlex = "1.3" smallvec = "1" strum = { version = "0.27", default-features = false } strum_macros = "0.27" @@ -586,6 +587,7 @@ url = { version = "2.3", default-features = false } zstd = "0.13" byteorder = "1" mini-moka = "0.10" +moka = "0.12" tar-no-std = { version = "0.3.2", default-features = false } miniz_oxide = { version = "0.8.4", default-features = false } chrono = "0.4.41" @@ -623,8 +625,8 @@ tower = "0.5" tower-http = "0.6" # p2p -discv5 = "0.9" -if-addrs = "0.13" +discv5 = "0.10" +if-addrs = "0.14" # rpc jsonrpsee = "0.26.0" @@ -648,11 +650,14 @@ secp256k1 = { version = "0.30", default-features = false, features = ["global-co rand_08 = { package = "rand", version = "0.8" } # for eip-4844 -c-kzg = "2.1.4" +c-kzg = "2.1.5" # config toml = "0.8" +# rocksdb +rocksdb = { version = "0.24" } + # otlp obs opentelemetry_sdk = "0.31" opentelemetry = "0.31" @@ -664,6 +669,7 @@ tracing-opentelemetry = "0.32" arbitrary = "1.3" assert_matches = "1.5.0" criterion = { package = "codspeed-criterion-compat", version = "2.7" } +insta = "1.41" proptest = "1.7" proptest-derive = "0.5" similar-asserts = { version = "1.5.0", features = ["serde"] } @@ -694,6 +700,7 @@ concat-kdf = "0.1.0" crossbeam-channel = "0.5.13" crossterm = "0.28.0" csv = "1.3.0" +ctrlc = "3.4" ctr = "0.9.2" data-encoding = "2" delegate = "0.13" @@ -723,6 +730,7 @@ socket2 = { version = "0.5", default-features = false } sysinfo = { version = "0.33", default-features = false } tracing-journald = "0.3" tracing-logfmt = "0.3.3" +tracing-samply = "0.1" tracing-subscriber = { version = "0.3", default-features = false } triehash = "0.8" typenum = "1.15.0" @@ -731,6 +739,9 @@ visibility = "0.1.1" walkdir = "2.3.3" vergen-git2 = "1.0.5" +# networking +ipnet = "2.11" + # [patch.crates-io] # alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "3049f232fbb44d1909883e154eb38ec5962f53a3" } # alloy-contract = { git = "https://github.com/alloy-rs/alloy", rev = "3049f232fbb44d1909883e154eb38ec5962f53a3" } @@ -773,3 +784,6 @@ vergen-git2 = "1.0.5" # jsonrpsee-server = { git = "https://github.com/paradigmxyz/jsonrpsee", branch = "matt/make-rpc-service-pub" } # jsonrpsee-http-client = { git = "https://github.com/paradigmxyz/jsonrpsee", branch = "matt/make-rpc-service-pub" } # jsonrpsee-types = { git = "https://github.com/paradigmxyz/jsonrpsee", branch = "matt/make-rpc-service-pub" } + +# alloy-evm = { git = "https://github.com/alloy-rs/evm", rev = "a69f0b45a6b0286e16072cb8399e02ce6ceca353" } +# alloy-op-evm = { git = "https://github.com/alloy-rs/evm", rev = "a69f0b45a6b0286e16072cb8399e02ce6ceca353" } diff --git a/Dockerfile b/Dockerfile index fc97c160bb..22fb65ffbc 100644 --- a/Dockerfile +++ b/Dockerfile @@ -7,7 +7,7 @@ LABEL org.opencontainers.image.source=https://github.com/paradigmxyz/reth LABEL org.opencontainers.image.licenses="MIT OR Apache-2.0" # Install system dependencies -RUN apt-get update && apt-get -y upgrade && apt-get install -y libclang-dev pkg-config +RUN apt-get update && apt-get install -y libclang-dev pkg-config # Builds a cargo-chef plan FROM chef AS planner @@ -18,7 +18,7 @@ FROM chef AS builder COPY --from=planner /app/recipe.json recipe.json # Build profile, release by default -ARG BUILD_PROFILE=release +ARG BUILD_PROFILE=maxperf ENV BUILD_PROFILE=$BUILD_PROFILE # Extra Cargo flags diff --git a/Dockerfile.reproducible b/Dockerfile.reproducible index 602b9b857c..28b18906bb 100644 --- a/Dockerfile.reproducible +++ b/Dockerfile.reproducible @@ -1,20 +1,25 @@ -ARG RUST_VERSION=1 +ARG RUST_TOOLCHAIN=1.89.0 +FROM docker.io/rust:$RUST_TOOLCHAIN-trixie AS builder -FROM rust:$RUST_VERSION-bookworm AS builder - -RUN apt-get update && apt-get install -y \ - git \ - libclang-dev=1:14.0-55.7~deb12u1 - -# Copy the project to the container -COPY ./ /app +ARG PROFILE +ARG VERSION +# Switch to snapshot repository to pin dependencies +RUN sed -i '/^# http/{N;s|^# \(http[^ ]*\)\nURIs: .*|# \1\nURIs: \1|}' /etc/apt/sources.list.d/debian.sources +RUN apt-get -o Acquire::Check-Valid-Until=false update && \ + apt-get install -y \ + libjemalloc-dev \ + libclang-dev \ + mold WORKDIR /app +COPY . . +RUN RUSTFLAGS_REPRODUCIBLE_EXTRA="-Clink-arg=-fuse-ld=mold" make build-reth-reproducible && \ + PROFILE=${PROFILE:-reproducible} VERSION=$VERSION make build-deb-x86_64-unknown-linux-gnu -RUN make build-reth-reproducible -RUN mv /app/target/x86_64-unknown-linux-gnu/reproducible/reth /reth +FROM scratch AS artifacts +COPY --from=builder /app/target/x86_64-unknown-linux-gnu/reproducible/reth /reth +COPY --from=builder /app/target/x86_64-unknown-linux-gnu/reproducible/*.deb / -# Create a minimal final image with just the binary -FROM gcr.io/distroless/cc-debian12:nonroot-6755e21ccd99ddead6edc8106ba03888cbeed41a -COPY --from=builder /reth /reth +FROM gcr.io/distroless/cc-debian13:nonroot-239cdd2c8a6b275b6a6f6ed1428c57de2fff3e50 +COPY --from=artifacts /reth /reth EXPOSE 30303 30303/udp 9001 8545 8546 ENTRYPOINT [ "/reth" ] diff --git a/DockerfileOp b/DockerfileOp index d195ca2160..ba6e6627fd 100644 --- a/DockerfileOp +++ b/DockerfileOp @@ -14,7 +14,7 @@ RUN cargo chef prepare --recipe-path recipe.json FROM chef AS builder COPY --from=planner /app/recipe.json recipe.json -ARG BUILD_PROFILE=release +ARG BUILD_PROFILE=maxperf ENV BUILD_PROFILE=$BUILD_PROFILE ARG RUSTFLAGS="" @@ -31,7 +31,7 @@ RUN cargo build --profile $BUILD_PROFILE --features "$FEATURES" --bin op-reth -- RUN ls -la /app/target/$BUILD_PROFILE/op-reth RUN cp /app/target/$BUILD_PROFILE/op-reth /app/op-reth -FROM ubuntu:22.04 AS runtime +FROM ubuntu AS runtime RUN apt-get update && \ apt-get install -y ca-certificates libssl-dev pkg-config strace && \ diff --git a/Makefile b/Makefile index 8d8b0a5b3a..3e1a2dbdcc 100644 --- a/Makefile +++ b/Makefile @@ -64,14 +64,13 @@ install-op: ## Build and install the op-reth binary under `$(CARGO_HOME)/bin`. build: ## Build the reth binary into `target` directory. cargo build --bin reth --features "$(FEATURES)" --profile "$(PROFILE)" -.PHONY: build-reth -build-reth: ## Build the reth binary (alias for build target). - $(MAKE) build - # Environment variables for reproducible builds # Set timestamp from last git commit for reproducible builds SOURCE_DATE ?= $(shell git log -1 --pretty=%ct) +# Extra RUSTFLAGS for reproducible builds. Can be overridden via the environment. +RUSTFLAGS_REPRODUCIBLE_EXTRA ?= + # `reproducible` only supports reth on x86_64-unknown-linux-gnu build-%-reproducible: @if [ "$*" != "reth" ]; then \ @@ -79,14 +78,18 @@ build-%-reproducible: exit 1; \ fi SOURCE_DATE_EPOCH=$(SOURCE_DATE) \ - RUSTFLAGS="-C symbol-mangling-version=v0 -C strip=none -C link-arg=-Wl,--build-id=none -C metadata='' --remap-path-prefix $$(pwd)=." \ + RUSTFLAGS="-C symbol-mangling-version=v0 -C strip=none -C link-arg=-Wl,--build-id=none -C metadata='' --remap-path-prefix $$(pwd)=. $(RUSTFLAGS_REPRODUCIBLE_EXTRA)" \ LC_ALL=C \ TZ=UTC \ - cargo build --bin reth --features "$(FEATURES)" --profile "reproducible" --locked --target x86_64-unknown-linux-gnu + JEMALLOC_OVERRIDE=/usr/lib/x86_64-linux-gnu/libjemalloc.a \ + cargo build --bin reth --features "$(FEATURES) jemalloc-unprefixed" --profile "reproducible" --locked --target x86_64-unknown-linux-gnu .PHONY: build-debug build-debug: ## Build the reth binary into `target/debug` directory. cargo build --bin reth --features "$(FEATURES)" +.PHONY: build-debug-op +build-debug-op: ## Build the op-reth binary into `target/debug` directory. + cargo build --bin op-reth --features "$(FEATURES)" --manifest-path crates/optimism/bin/Cargo.toml .PHONY: build-op build-op: ## Build the op-reth binary into `target` directory. @@ -387,9 +390,9 @@ db-tools: ## Compile MDBX debugging tools. @echo "Run \"$(DB_TOOLS_DIR)/mdbx_chk\" for the MDBX db file integrity check." .PHONY: update-book-cli -update-book-cli: build-debug ## Update book cli documentation. +update-book-cli: build-debug build-debug-op## Update book cli documentation. @echo "Updating book cli doc..." - @./docs/cli/update.sh $(CARGO_TARGET_DIR)/debug/reth + @./docs/cli/update.sh $(CARGO_TARGET_DIR)/debug/reth $(CARGO_TARGET_DIR)/debug/op-reth .PHONY: profiling profiling: ## Builds `reth` with optimisations, but also symbols. @@ -518,10 +521,3 @@ pr: make update-book-cli && \ cargo docs --document-private-items && \ make test - -check-features: - cargo hack check \ - --package reth-codecs \ - --package reth-primitives-traits \ - --package reth-primitives \ - --feature-powerset diff --git a/bin/reth-bench-compare/Cargo.toml b/bin/reth-bench-compare/Cargo.toml new file mode 100644 index 0000000000..11d9b4f8bd --- /dev/null +++ b/bin/reth-bench-compare/Cargo.toml @@ -0,0 +1,96 @@ +[package] +name = "reth-bench-compare" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true +description = "Automated reth benchmark comparison between git references" + +[lints] +workspace = true + +[[bin]] +name = "reth-bench-compare" +path = "src/main.rs" + +[dependencies] +# reth +reth-cli-runner.workspace = true +reth-cli-util.workspace = true +reth-node-core.workspace = true +reth-tracing.workspace = true +reth-chainspec.workspace = true + +# alloy +alloy-provider = { workspace = true, features = ["reqwest-rustls-tls"], default-features = false } +alloy-rpc-types-eth.workspace = true +alloy-primitives.workspace = true + +# CLI and argument parsing +clap = { workspace = true, features = ["derive", "env"] } +eyre.workspace = true + +# Async runtime +tokio = { workspace = true, features = ["full"] } +tracing.workspace = true + +# Serialization +serde = { workspace = true, features = ["derive"] } +serde_json.workspace = true + +# Time handling +chrono = { workspace = true, features = ["serde"] } + +# Path manipulation +shellexpand.workspace = true + +# CSV handling +csv.workspace = true + +# Process management +ctrlc.workspace = true +shlex.workspace = true + +[target.'cfg(unix)'.dependencies] +nix = { version = "0.29", features = ["signal", "process"] } + +[features] +default = ["jemalloc"] + +asm-keccak = [ + "reth-node-core/asm-keccak", + "alloy-primitives/asm-keccak", +] + +jemalloc = [ + "reth-cli-util/jemalloc", + "reth-node-core/jemalloc", +] +jemalloc-prof = ["reth-cli-util/jemalloc-prof"] +tracy-allocator = ["reth-cli-util/tracy-allocator"] + +min-error-logs = [ + "tracing/release_max_level_error", + "reth-node-core/min-error-logs", +] +min-warn-logs = [ + "tracing/release_max_level_warn", + "reth-node-core/min-warn-logs", +] +min-info-logs = [ + "tracing/release_max_level_info", + "reth-node-core/min-info-logs", +] +min-debug-logs = [ + "tracing/release_max_level_debug", + "reth-node-core/min-debug-logs", +] +min-trace-logs = [ + "tracing/release_max_level_trace", + "reth-node-core/min-trace-logs", +] + +# no-op feature flag for switching between the `optimism` and default functionality in CI matrices +ethereum = [] diff --git a/bin/reth-bench-compare/src/benchmark.rs b/bin/reth-bench-compare/src/benchmark.rs new file mode 100644 index 0000000000..ba6eaea176 --- /dev/null +++ b/bin/reth-bench-compare/src/benchmark.rs @@ -0,0 +1,298 @@ +//! Benchmark execution using reth-bench. + +use crate::cli::Args; +use eyre::{eyre, Result, WrapErr}; +use std::{ + path::Path, + sync::{Arc, Mutex}, +}; +use tokio::{ + fs::File as AsyncFile, + io::{AsyncBufReadExt, AsyncWriteExt, BufReader}, + process::Command, +}; +use tracing::{debug, error, info, warn}; + +/// Manages benchmark execution using reth-bench +pub(crate) struct BenchmarkRunner { + rpc_url: String, + jwt_secret: String, + wait_time: Option, + warmup_blocks: u64, +} + +impl BenchmarkRunner { + /// Create a new `BenchmarkRunner` from CLI arguments + pub(crate) fn new(args: &Args) -> Self { + Self { + rpc_url: args.get_rpc_url(), + jwt_secret: args.jwt_secret_path().to_string_lossy().to_string(), + wait_time: args.wait_time.clone(), + warmup_blocks: args.get_warmup_blocks(), + } + } + + /// Clear filesystem caches (page cache, dentries, and inodes) + pub(crate) async fn clear_fs_caches() -> Result<()> { + info!("Clearing filesystem caches..."); + + // First sync to ensure all pending writes are flushed + let sync_output = + Command::new("sync").output().await.wrap_err("Failed to execute sync command")?; + + if !sync_output.status.success() { + return Err(eyre!("sync command failed")); + } + + // Drop caches - requires sudo/root permissions + // 3 = drop pagecache, dentries, and inodes + let drop_caches_cmd = Command::new("sudo") + .args(["-n", "sh", "-c", "echo 3 > /proc/sys/vm/drop_caches"]) + .output() + .await; + + match drop_caches_cmd { + Ok(output) if output.status.success() => { + info!("Successfully cleared filesystem caches"); + Ok(()) + } + Ok(output) => { + let stderr = String::from_utf8_lossy(&output.stderr); + if stderr.contains("sudo: a password is required") { + warn!("Unable to clear filesystem caches: sudo password required"); + warn!( + "For optimal benchmarking, configure passwordless sudo for cache clearing:" + ); + warn!(" echo '$USER ALL=(ALL) NOPASSWD: /bin/sh -c echo\\\\ [0-9]\\\\ \\\\>\\\\ /proc/sys/vm/drop_caches' | sudo tee /etc/sudoers.d/drop_caches"); + Ok(()) + } else { + Err(eyre!("Failed to clear filesystem caches: {}", stderr)) + } + } + Err(e) => { + warn!("Unable to clear filesystem caches: {}", e); + Ok(()) + } + } + } + + /// Run a warmup benchmark for cache warming + pub(crate) async fn run_warmup(&self, from_block: u64) -> Result<()> { + let to_block = from_block + self.warmup_blocks; + info!( + "Running warmup benchmark from block {} to {} ({} blocks)", + from_block, to_block, self.warmup_blocks + ); + + // Build the reth-bench command for warmup (no output flag) + let mut cmd = Command::new("reth-bench"); + cmd.args([ + "new-payload-fcu", + "--rpc-url", + &self.rpc_url, + "--jwt-secret", + &self.jwt_secret, + "--from", + &from_block.to_string(), + "--to", + &to_block.to_string(), + ]); + + // Add wait-time argument if provided + if let Some(ref wait_time) = self.wait_time { + cmd.args(["--wait-time", wait_time]); + } + + cmd.env("RUST_LOG_STYLE", "never") + .stdout(std::process::Stdio::piped()) + .stderr(std::process::Stdio::piped()) + .kill_on_drop(true); + + // Set process group for consistent signal handling + #[cfg(unix)] + { + cmd.process_group(0); + } + + debug!("Executing warmup reth-bench command: {:?}", cmd); + + // Execute the warmup benchmark + let mut child = cmd.spawn().wrap_err("Failed to start warmup reth-bench process")?; + + // Stream output at debug level + if let Some(stdout) = child.stdout.take() { + tokio::spawn(async move { + let reader = BufReader::new(stdout); + let mut lines = reader.lines(); + while let Ok(Some(line)) = lines.next_line().await { + debug!("[WARMUP] {}", line); + } + }); + } + + if let Some(stderr) = child.stderr.take() { + tokio::spawn(async move { + let reader = BufReader::new(stderr); + let mut lines = reader.lines(); + while let Ok(Some(line)) = lines.next_line().await { + debug!("[WARMUP] {}", line); + } + }); + } + + let status = child.wait().await.wrap_err("Failed to wait for warmup reth-bench")?; + + if !status.success() { + return Err(eyre!("Warmup reth-bench failed with exit code: {:?}", status.code())); + } + + info!("Warmup completed successfully"); + Ok(()) + } + + /// Run a benchmark for the specified block range + pub(crate) async fn run_benchmark( + &self, + from_block: u64, + to_block: u64, + output_dir: &Path, + ) -> Result<()> { + info!( + "Running benchmark from block {} to {} (output: {:?})", + from_block, to_block, output_dir + ); + + // Ensure output directory exists + std::fs::create_dir_all(output_dir) + .wrap_err_with(|| format!("Failed to create output directory: {output_dir:?}"))?; + + // Create log file path for reth-bench output + let log_file_path = output_dir.join("reth_bench.log"); + info!("reth-bench logs will be saved to: {:?}", log_file_path); + + // Build the reth-bench command + let mut cmd = Command::new("reth-bench"); + cmd.args([ + "new-payload-fcu", + "--rpc-url", + &self.rpc_url, + "--jwt-secret", + &self.jwt_secret, + "--from", + &from_block.to_string(), + "--to", + &to_block.to_string(), + "--output", + &output_dir.to_string_lossy(), + ]); + + // Add wait-time argument if provided + if let Some(ref wait_time) = self.wait_time { + cmd.args(["--wait-time", wait_time]); + } + + cmd.env("RUST_LOG_STYLE", "never") + .stdout(std::process::Stdio::piped()) + .stderr(std::process::Stdio::piped()) + .kill_on_drop(true); + + // Set process group for consistent signal handling + #[cfg(unix)] + { + cmd.process_group(0); + } + + // Debug log the command + debug!("Executing reth-bench command: {:?}", cmd); + + // Execute the benchmark + let mut child = cmd.spawn().wrap_err("Failed to start reth-bench process")?; + + // Capture stdout and stderr for error reporting + let stdout_lines = Arc::new(Mutex::new(Vec::new())); + let stderr_lines = Arc::new(Mutex::new(Vec::new())); + + // Stream stdout with prefix at debug level, capture for error reporting, and write to log + // file + if let Some(stdout) = child.stdout.take() { + let stdout_lines_clone = stdout_lines.clone(); + let log_file = AsyncFile::create(&log_file_path) + .await + .wrap_err(format!("Failed to create log file: {:?}", log_file_path))?; + tokio::spawn(async move { + let reader = BufReader::new(stdout); + let mut lines = reader.lines(); + let mut log_file = log_file; + while let Ok(Some(line)) = lines.next_line().await { + debug!("[RETH-BENCH] {}", line); + if let Ok(mut captured) = stdout_lines_clone.lock() { + captured.push(line.clone()); + } + // Write to log file (reth-bench output already has timestamps if needed) + let log_line = format!("{}\n", line); + if let Err(e) = log_file.write_all(log_line.as_bytes()).await { + debug!("Failed to write to log file: {}", e); + } + } + }); + } + + // Stream stderr with prefix at debug level, capture for error reporting, and write to log + // file + if let Some(stderr) = child.stderr.take() { + let stderr_lines_clone = stderr_lines.clone(); + let log_file = AsyncFile::options() + .create(true) + .append(true) + .open(&log_file_path) + .await + .wrap_err(format!("Failed to open log file for stderr: {:?}", log_file_path))?; + tokio::spawn(async move { + let reader = BufReader::new(stderr); + let mut lines = reader.lines(); + let mut log_file = log_file; + while let Ok(Some(line)) = lines.next_line().await { + debug!("[RETH-BENCH] {}", line); + if let Ok(mut captured) = stderr_lines_clone.lock() { + captured.push(line.clone()); + } + // Write to log file (reth-bench output already has timestamps if needed) + let log_line = format!("{}\n", line); + if let Err(e) = log_file.write_all(log_line.as_bytes()).await { + debug!("Failed to write to log file: {}", e); + } + } + }); + } + + let status = child.wait().await.wrap_err("Failed to wait for reth-bench")?; + + if !status.success() { + // Print all captured output when command fails + error!("reth-bench failed with exit code: {:?}", status.code()); + + if let Ok(stdout) = stdout_lines.lock() && + !stdout.is_empty() + { + error!("reth-bench stdout:"); + for line in stdout.iter() { + error!(" {}", line); + } + } + + if let Ok(stderr) = stderr_lines.lock() && + !stderr.is_empty() + { + error!("reth-bench stderr:"); + for line in stderr.iter() { + error!(" {}", line); + } + } + + return Err(eyre!("reth-bench failed with exit code: {:?}", status.code())); + } + + info!("Benchmark completed"); + Ok(()) + } +} diff --git a/bin/reth-bench-compare/src/cli.rs b/bin/reth-bench-compare/src/cli.rs new file mode 100644 index 0000000000..6c30532daf --- /dev/null +++ b/bin/reth-bench-compare/src/cli.rs @@ -0,0 +1,946 @@ +//! CLI argument parsing and main command orchestration. + +use alloy_provider::{Provider, ProviderBuilder}; +use clap::Parser; +use eyre::{eyre, Result, WrapErr}; +use reth_chainspec::Chain; +use reth_cli_runner::CliContext; +use reth_node_core::args::{DatadirArgs, LogArgs, TraceArgs}; +use reth_tracing::FileWorkerGuard; +use std::{net::TcpListener, path::PathBuf, str::FromStr}; +use tokio::process::Command; +use tracing::{debug, info, warn}; + +use crate::{ + benchmark::BenchmarkRunner, comparison::ComparisonGenerator, compilation::CompilationManager, + git::GitManager, node::NodeManager, +}; + +/// Target for disabling the --debug.startup-sync-state-idle flag +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub(crate) enum DisableStartupSyncStateIdle { + /// Disable for baseline and warmup runs + Baseline, + /// Disable for feature runs only + Feature, + /// Disable for all runs + All, +} + +impl FromStr for DisableStartupSyncStateIdle { + type Err = String; + + fn from_str(s: &str) -> std::result::Result { + match s.to_lowercase().as_str() { + "baseline" => Ok(Self::Baseline), + "feature" => Ok(Self::Feature), + "all" => Ok(Self::All), + _ => Err(format!("Invalid value '{}'. Expected 'baseline', 'feature', or 'all'", s)), + } + } +} + +impl std::fmt::Display for DisableStartupSyncStateIdle { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Self::Baseline => write!(f, "baseline"), + Self::Feature => write!(f, "feature"), + Self::All => write!(f, "all"), + } + } +} + +/// Automated reth benchmark comparison between git references +#[derive(Debug, Parser)] +#[command( + name = "reth-bench-compare", + about = "Compare reth performance between two git references (branches or tags)", + version +)] +pub(crate) struct Args { + /// Git reference (branch or tag) to use as baseline for comparison + #[arg(long, value_name = "REF")] + pub baseline_ref: String, + + /// Git reference (branch or tag) to compare against the baseline + #[arg(long, value_name = "REF")] + pub feature_ref: String, + + #[command(flatten)] + pub datadir: DatadirArgs, + + /// Number of blocks to benchmark + #[arg(long, value_name = "N", default_value = "100")] + pub blocks: u64, + + /// RPC endpoint for fetching block data + #[arg(long, value_name = "URL")] + pub rpc_url: Option, + + /// JWT secret file path + /// + /// If not provided, defaults to `//jwt.hex`. + /// If the file doesn't exist, it will be created automatically. + #[arg(long, value_name = "PATH")] + pub jwt_secret: Option, + + /// Output directory for benchmark results + #[arg(long, value_name = "PATH", default_value = "./reth-bench-compare")] + pub output_dir: String, + + /// Skip git branch validation (useful for testing) + #[arg(long)] + pub skip_git_validation: bool, + + /// Port for reth metrics endpoint + #[arg(long, value_name = "PORT", default_value = "5005")] + pub metrics_port: u16, + + /// The chain this node is running. + /// + /// Possible values are either a built-in chain name or numeric chain ID. + #[arg(long, value_name = "CHAIN", default_value = "mainnet", required = false)] + pub chain: Chain, + + /// Run reth binary with sudo (for elevated privileges) + #[arg(long)] + pub sudo: bool, + + /// Generate comparison charts using Python script + #[arg(long)] + pub draw: bool, + + /// Enable CPU profiling with samply during benchmark runs + #[arg(long)] + pub profile: bool, + + /// Wait time between engine API calls (passed to reth-bench) + #[arg(long, value_name = "DURATION")] + pub wait_time: Option, + + /// Number of blocks to run for cache warmup after clearing caches. + /// If not specified, defaults to the same as --blocks + #[arg(long, value_name = "N")] + pub warmup_blocks: Option, + + /// Disable filesystem cache clearing before warmup phase. + /// By default, filesystem caches are cleared before warmup to ensure consistent benchmarks. + #[arg(long)] + pub no_clear_cache: bool, + + #[command(flatten)] + pub logs: LogArgs, + + #[command(flatten)] + pub traces: TraceArgs, + + /// Maximum queue size for OTLP Batch Span Processor (traces). + /// Higher values prevent trace drops when benchmarking many blocks. + #[arg( + long, + value_name = "OTLP_BUFFER_SIZE", + default_value = "32768", + help_heading = "Tracing" + )] + pub otlp_max_queue_size: usize, + + /// Additional arguments to pass to baseline reth node command + /// + /// Example: `--baseline-args "--debug.tip 0xabc..."` + #[arg(long, value_name = "ARGS")] + pub baseline_args: Option, + + /// Additional arguments to pass to feature reth node command + /// + /// Example: `--feature-args "--debug.tip 0xdef..."` + #[arg(long, value_name = "ARGS")] + pub feature_args: Option, + + /// Additional arguments to pass to reth node command (applied to both baseline and feature) + /// + /// All arguments after `--` will be passed directly to the reth node command. + /// Example: `reth-bench-compare --baseline-ref main --feature-ref pr/123 -- --debug.tip + /// 0xabc...` + #[arg(trailing_var_arg = true, allow_hyphen_values = true)] + pub reth_args: Vec, + + /// Comma-separated list of features to enable during reth compilation + /// + /// Example: `jemalloc,asm-keccak` + #[arg(long, value_name = "FEATURES", default_value = "jemalloc,asm-keccak")] + pub features: String, + + /// Disable automatic --debug.startup-sync-state-idle flag for specific runs. + /// Can be "baseline", "feature", or "all". + /// By default, the flag is passed to warmup, baseline, and feature runs. + /// When "baseline" is specified, the flag is NOT passed to warmup OR baseline. + /// When "feature" is specified, the flag is NOT passed to feature. + /// When "all" is specified, the flag is NOT passed to any run. + #[arg(long, value_name = "TARGET")] + pub disable_startup_sync_state_idle: Option, +} + +impl Args { + /// Initializes tracing with the configured options. + pub(crate) fn init_tracing(&self) -> Result> { + let guard = self.logs.init_tracing()?; + Ok(guard) + } + + /// Build additional arguments for a specific ref type, conditionally including + /// --debug.startup-sync-state-idle based on the configuration + pub(crate) fn build_additional_args( + &self, + ref_type: &str, + base_args_str: Option<&String>, + ) -> Vec { + // Parse the base arguments string if provided + let mut args = base_args_str.map(|s| parse_args_string(s)).unwrap_or_default(); + + // Determine if we should add the --debug.startup-sync-state-idle flag + let should_add_flag = match self.disable_startup_sync_state_idle { + None => true, // By default, add the flag + Some(DisableStartupSyncStateIdle::All) => false, + Some(DisableStartupSyncStateIdle::Baseline) => { + ref_type != "baseline" && ref_type != "warmup" + } + Some(DisableStartupSyncStateIdle::Feature) => ref_type != "feature", + }; + + if should_add_flag { + args.push("--debug.startup-sync-state-idle".to_string()); + debug!("Adding --debug.startup-sync-state-idle flag for ref_type: {}", ref_type); + } else { + debug!("Skipping --debug.startup-sync-state-idle flag for ref_type: {}", ref_type); + } + + args + } + + /// Get the default RPC URL for a given chain + const fn get_default_rpc_url(chain: &Chain) -> &'static str { + match chain.id() { + 8453 => "https://base-mainnet.rpc.ithaca.xyz", // base + 84532 => "https://base-sepolia.rpc.ithaca.xyz", // base-sepolia + 27082 => "https://rpc.hoodi.ethpandaops.io", // hoodi + _ => "https://reth-ethereum.ithaca.xyz/rpc", // mainnet and fallback + } + } + + /// Get the RPC URL, using chain-specific default if not provided + pub(crate) fn get_rpc_url(&self) -> String { + self.rpc_url.clone().unwrap_or_else(|| Self::get_default_rpc_url(&self.chain).to_string()) + } + + /// Get the JWT secret path - either provided or derived from datadir + pub(crate) fn jwt_secret_path(&self) -> PathBuf { + match &self.jwt_secret { + Some(path) => { + let jwt_secret_str = path.to_string_lossy(); + let expanded = shellexpand::tilde(&jwt_secret_str); + PathBuf::from(expanded.as_ref()) + } + None => { + // Use the same logic as reth: //jwt.hex + let chain_path = self.datadir.clone().resolve_datadir(self.chain); + chain_path.jwt() + } + } + } + + /// Get the resolved datadir path using the chain + pub(crate) fn datadir_path(&self) -> PathBuf { + let chain_path = self.datadir.clone().resolve_datadir(self.chain); + chain_path.data_dir().to_path_buf() + } + + /// Get the expanded output directory path + pub(crate) fn output_dir_path(&self) -> PathBuf { + let expanded = shellexpand::tilde(&self.output_dir); + PathBuf::from(expanded.as_ref()) + } + + /// Get the effective warmup blocks value - either specified or defaults to blocks + pub(crate) fn get_warmup_blocks(&self) -> u64 { + self.warmup_blocks.unwrap_or(self.blocks) + } +} + +/// Validate that the RPC endpoint chain ID matches the specified chain +async fn validate_rpc_chain_id(rpc_url: &str, expected_chain: &Chain) -> Result<()> { + // Create Alloy provider + let url = rpc_url.parse().map_err(|e| eyre!("Invalid RPC URL '{}': {}", rpc_url, e))?; + let provider = ProviderBuilder::new().connect_http(url); + + // Query chain ID using Alloy + let rpc_chain_id = provider + .get_chain_id() + .await + .map_err(|e| eyre!("Failed to get chain ID from RPC endpoint {}: {:?}", rpc_url, e))?; + + let expected_chain_id = expected_chain.id(); + + if rpc_chain_id != expected_chain_id { + return Err(eyre!( + "RPC endpoint chain ID mismatch!\n\ + Expected: {} (chain: {})\n\ + Found: {} at RPC endpoint: {}\n\n\ + Please use an RPC endpoint for the correct network or change the --chain argument.", + expected_chain_id, + expected_chain, + rpc_chain_id, + rpc_url + )); + } + + info!("Validated RPC endpoint chain ID"); + Ok(()) +} + +/// Main comparison workflow execution +pub(crate) async fn run_comparison(args: Args, _ctx: CliContext) -> Result<()> { + // Create a new process group for this process and all its children + #[cfg(unix)] + { + use nix::unistd::{getpid, setpgid}; + if let Err(e) = setpgid(getpid(), getpid()) { + warn!("Failed to create process group: {e}"); + } + } + + info!( + "Starting benchmark comparison between '{}' and '{}'", + args.baseline_ref, args.feature_ref + ); + + if args.sudo { + info!("Running in sudo mode - reth commands will use elevated privileges"); + } + + // Initialize Git manager + let git_manager = GitManager::new()?; + // Fetch all branches, tags, and commits + git_manager.fetch_all()?; + + // Initialize compilation manager + let output_dir = args.output_dir_path(); + let compilation_manager = CompilationManager::new( + git_manager.repo_root().to_string(), + output_dir.clone(), + git_manager.clone(), + args.features.clone(), + args.profile, + )?; + // Initialize node manager + let mut node_manager = NodeManager::new(&args); + + let benchmark_runner = BenchmarkRunner::new(&args); + let mut comparison_generator = ComparisonGenerator::new(&args); + + // Set the comparison directory in node manager to align with results directory + node_manager.set_comparison_dir(comparison_generator.get_output_dir()); + + // Store original git state for restoration + let original_ref = git_manager.get_current_ref()?; + info!("Current git reference: {}", original_ref); + + // Validate git state + if !args.skip_git_validation { + git_manager.validate_clean_state()?; + git_manager.validate_refs(&[&args.baseline_ref, &args.feature_ref])?; + } + + // Validate RPC endpoint chain ID matches the specified chain + let rpc_url = args.get_rpc_url(); + validate_rpc_chain_id(&rpc_url, &args.chain).await?; + + // Setup signal handling for cleanup + let git_manager_cleanup = git_manager.clone(); + let original_ref_cleanup = original_ref.clone(); + ctrlc::set_handler(move || { + eprintln!("Received interrupt signal, cleaning up..."); + + // Send SIGTERM to entire process group to ensure all children exit + #[cfg(unix)] + { + use nix::{ + sys::signal::{kill, Signal}, + unistd::Pid, + }; + + // Send SIGTERM to our process group (negative PID = process group) + let current_pid = std::process::id() as i32; + let pgid = Pid::from_raw(-current_pid); + if let Err(e) = kill(pgid, Signal::SIGTERM) { + eprintln!("Failed to send SIGTERM to process group: {e}"); + } + } + + // Give a moment for any ongoing git operations to complete + std::thread::sleep(std::time::Duration::from_millis(200)); + + if let Err(e) = git_manager_cleanup.switch_ref(&original_ref_cleanup) { + eprintln!("Failed to restore original git reference: {e}"); + eprintln!("You may need to manually run: git checkout {original_ref_cleanup}"); + } + std::process::exit(1); + })?; + + let result = run_benchmark_workflow( + &git_manager, + &compilation_manager, + &mut node_manager, + &benchmark_runner, + &mut comparison_generator, + &args, + ) + .await; + + // Always restore original git reference + info!("Restoring original git reference: {}", original_ref); + git_manager.switch_ref(&original_ref)?; + + // Handle any errors from the workflow + result?; + + Ok(()) +} + +/// Parse a string of arguments into a vector of strings +fn parse_args_string(args_str: &str) -> Vec { + shlex::split(args_str).unwrap_or_else(|| { + // Fallback to simple whitespace splitting if shlex fails + args_str.split_whitespace().map(|s| s.to_string()).collect() + }) +} + +/// Run compilation phase for both baseline and feature binaries +async fn run_compilation_phase( + git_manager: &GitManager, + compilation_manager: &CompilationManager, + args: &Args, + is_optimism: bool, +) -> Result<(String, String)> { + info!("=== Running compilation phase ==="); + + // Ensure required tools are available (only need to check once) + compilation_manager.ensure_reth_bench_available()?; + if args.profile { + compilation_manager.ensure_samply_available()?; + } + + let refs = [&args.baseline_ref, &args.feature_ref]; + let ref_types = ["baseline", "feature"]; + + // First, resolve all refs to commits using a HashMap to avoid race conditions where a ref is + // pushed to mid-run. + let mut ref_commits = std::collections::HashMap::new(); + for &git_ref in &refs { + if !ref_commits.contains_key(git_ref) { + git_manager.switch_ref(git_ref)?; + let commit = git_manager.get_current_commit()?; + ref_commits.insert(git_ref.clone(), commit); + info!("Reference {} resolves to commit: {}", git_ref, &ref_commits[git_ref][..8]); + } + } + + // Now compile each ref using the resolved commits + for (i, &git_ref) in refs.iter().enumerate() { + let ref_type = ref_types[i]; + let commit = &ref_commits[git_ref]; + + info!( + "Compiling {} binary for reference: {} (commit: {})", + ref_type, + git_ref, + &commit[..8] + ); + + // Switch to target reference + git_manager.switch_ref(git_ref)?; + + // Compile reth (with caching) + compilation_manager.compile_reth(commit, is_optimism)?; + + info!("Completed compilation for {} reference", ref_type); + } + + let baseline_commit = ref_commits[&args.baseline_ref].clone(); + let feature_commit = ref_commits[&args.feature_ref].clone(); + + info!("Compilation phase completed"); + Ok((baseline_commit, feature_commit)) +} + +/// Run warmup phase to warm up caches before benchmarking +async fn run_warmup_phase( + git_manager: &GitManager, + compilation_manager: &CompilationManager, + node_manager: &mut NodeManager, + benchmark_runner: &BenchmarkRunner, + args: &Args, + is_optimism: bool, + baseline_commit: &str, +) -> Result<()> { + info!("=== Running warmup phase ==="); + + // Use baseline for warmup + let warmup_ref = &args.baseline_ref; + + // Switch to baseline reference + git_manager.switch_ref(warmup_ref)?; + + // Get the cached binary path for baseline (should already be compiled) + let binary_path = + compilation_manager.get_cached_binary_path_for_commit(baseline_commit, is_optimism); + + // Verify the cached binary exists + if !binary_path.exists() { + return Err(eyre!( + "Cached baseline binary not found at {:?}. Compilation phase should have created it.", + binary_path + )); + } + + info!("Using cached baseline binary for warmup (commit: {})", &baseline_commit[..8]); + + // Build additional args with conditional --debug.startup-sync-state-idle flag + let additional_args = args.build_additional_args("warmup", args.baseline_args.as_ref()); + + // Start reth node for warmup (command is not stored for warmup phase) + let (mut node_process, _warmup_command) = + node_manager.start_node(&binary_path, warmup_ref, "warmup", &additional_args).await?; + + // Wait for node to be ready and get its current tip + let current_tip = node_manager.wait_for_node_ready_and_get_tip().await?; + info!("Warmup node is ready at tip: {}", current_tip); + + // Store the tip we'll unwind back to + let original_tip = current_tip; + + // Clear filesystem caches before warmup run only (unless disabled) + if args.no_clear_cache { + info!("Skipping filesystem cache clearing (--no-clear-cache flag set)"); + } else { + BenchmarkRunner::clear_fs_caches().await?; + } + + // Run warmup to warm up caches + benchmark_runner.run_warmup(current_tip).await?; + + // Stop node before unwinding (node must be stopped to release database lock) + node_manager.stop_node(&mut node_process).await?; + + // Unwind back to starting block after warmup + node_manager.unwind_to_block(original_tip).await?; + + info!("Warmup phase completed"); + Ok(()) +} + +/// Execute the complete benchmark workflow for both branches +async fn run_benchmark_workflow( + git_manager: &GitManager, + compilation_manager: &CompilationManager, + node_manager: &mut NodeManager, + benchmark_runner: &BenchmarkRunner, + comparison_generator: &mut ComparisonGenerator, + args: &Args, +) -> Result<()> { + // Detect if this is an Optimism chain once at the beginning + let rpc_url = args.get_rpc_url(); + let is_optimism = compilation_manager.detect_optimism_chain(&rpc_url).await?; + + // Run compilation phase for both binaries + let (baseline_commit, feature_commit) = + run_compilation_phase(git_manager, compilation_manager, args, is_optimism).await?; + + // Run warmup phase before benchmarking (skip if warmup_blocks is 0) + if args.get_warmup_blocks() > 0 { + run_warmup_phase( + git_manager, + compilation_manager, + node_manager, + benchmark_runner, + args, + is_optimism, + &baseline_commit, + ) + .await?; + } else { + info!("Skipping warmup phase (warmup_blocks is 0)"); + } + + let refs = [&args.baseline_ref, &args.feature_ref]; + let ref_types = ["baseline", "feature"]; + let commits = [&baseline_commit, &feature_commit]; + + for (i, &git_ref) in refs.iter().enumerate() { + let ref_type = ref_types[i]; + let commit = commits[i]; + info!("=== Processing {} reference: {} ===", ref_type, git_ref); + + // Switch to target reference + git_manager.switch_ref(git_ref)?; + + // Get the cached binary path for this git reference (should already be compiled) + let binary_path = + compilation_manager.get_cached_binary_path_for_commit(commit, is_optimism); + + // Verify the cached binary exists + if !binary_path.exists() { + return Err(eyre!( + "Cached {} binary not found at {:?}. Compilation phase should have created it.", + ref_type, + binary_path + )); + } + + info!("Using cached {} binary (commit: {})", ref_type, &commit[..8]); + + // Get reference-specific base arguments string + let base_args_str = match ref_type { + "baseline" => args.baseline_args.as_ref(), + "feature" => args.feature_args.as_ref(), + _ => None, + }; + + // Build additional args with conditional --debug.startup-sync-state-idle flag + let additional_args = args.build_additional_args(ref_type, base_args_str); + + // Start reth node and capture the command for reporting + let (mut node_process, reth_command) = + node_manager.start_node(&binary_path, git_ref, ref_type, &additional_args).await?; + + // Wait for node to be ready and get its current tip (wherever it is) + let current_tip = node_manager.wait_for_node_ready_and_get_tip().await?; + info!("Node is ready at tip: {}", current_tip); + + // Store the tip we'll unwind back to + let original_tip = current_tip; + + // Calculate benchmark range + // Note: reth-bench has an off-by-one error where it consumes the first block + // of the range, so we add 1 to compensate and get exactly args.blocks blocks + let from_block = original_tip; + let to_block = original_tip + args.blocks; + + // Run benchmark + let output_dir = comparison_generator.get_ref_output_dir(ref_type); + + // Capture start timestamp for the benchmark run + let benchmark_start = chrono::Utc::now(); + + // Run benchmark (comparison logic is handled separately by ComparisonGenerator) + benchmark_runner.run_benchmark(from_block, to_block, &output_dir).await?; + + // Capture end timestamp for the benchmark run + let benchmark_end = chrono::Utc::now(); + + // Stop node + node_manager.stop_node(&mut node_process).await?; + + // Unwind back to original tip + node_manager.unwind_to_block(original_tip).await?; + + // Store results for comparison + comparison_generator.add_ref_results(ref_type, &output_dir)?; + + // Set the benchmark run timestamps and reth command + comparison_generator.set_ref_timestamps(ref_type, benchmark_start, benchmark_end)?; + comparison_generator.set_ref_command(ref_type, reth_command)?; + + info!("Completed {} reference benchmark", ref_type); + } + + // Generate comparison report + comparison_generator.generate_comparison_report().await?; + + // Generate charts if requested + if args.draw { + generate_comparison_charts(comparison_generator).await?; + } + + // Start samply servers if profiling was enabled + if args.profile { + start_samply_servers(args).await?; + } + + Ok(()) +} + +/// Generate comparison charts using the Python script +async fn generate_comparison_charts(comparison_generator: &ComparisonGenerator) -> Result<()> { + info!("Generating comparison charts with Python script..."); + + let baseline_output_dir = comparison_generator.get_ref_output_dir("baseline"); + let feature_output_dir = comparison_generator.get_ref_output_dir("feature"); + + let baseline_csv = baseline_output_dir.join("combined_latency.csv"); + let feature_csv = feature_output_dir.join("combined_latency.csv"); + + // Check if CSV files exist + if !baseline_csv.exists() { + return Err(eyre!("Baseline CSV not found: {:?}", baseline_csv)); + } + if !feature_csv.exists() { + return Err(eyre!("Feature CSV not found: {:?}", feature_csv)); + } + + let output_dir = comparison_generator.get_output_dir(); + let chart_output = output_dir.join("latency_comparison.png"); + + let script_path = "bin/reth-bench/scripts/compare_newpayload_latency.py"; + + info!("Running Python comparison script with uv..."); + let mut cmd = Command::new("uv"); + cmd.args([ + "run", + script_path, + &baseline_csv.to_string_lossy(), + &feature_csv.to_string_lossy(), + "-o", + &chart_output.to_string_lossy(), + ]); + + // Set process group for consistent signal handling + #[cfg(unix)] + { + cmd.process_group(0); + } + + let output = cmd.output().await.map_err(|e| { + eyre!("Failed to execute Python script with uv: {}. Make sure uv is installed.", e) + })?; + + if !output.status.success() { + let stderr = String::from_utf8_lossy(&output.stderr); + let stdout = String::from_utf8_lossy(&output.stdout); + return Err(eyre!( + "Python script failed with exit code {:?}:\nstdout: {}\nstderr: {}", + output.status.code(), + stdout, + stderr + )); + } + + let stdout = String::from_utf8_lossy(&output.stdout); + if !stdout.trim().is_empty() { + info!("Python script output:\n{}", stdout); + } + + info!("Comparison chart generated: {:?}", chart_output); + Ok(()) +} + +/// Start samply servers for viewing profiles +async fn start_samply_servers(args: &Args) -> Result<()> { + info!("Starting samply servers for profile viewing..."); + + let output_dir = args.output_dir_path(); + let profiles_dir = output_dir.join("profiles"); + + // Build profile paths + let baseline_profile = profiles_dir.join("baseline.json.gz"); + let feature_profile = profiles_dir.join("feature.json.gz"); + + // Check if profiles exist + if !baseline_profile.exists() { + warn!("Baseline profile not found: {:?}", baseline_profile); + return Ok(()); + } + if !feature_profile.exists() { + warn!("Feature profile not found: {:?}", feature_profile); + return Ok(()); + } + + // Find two consecutive available ports starting from 3000 + let (baseline_port, feature_port) = find_consecutive_ports(3000)?; + info!("Found available ports: {} and {}", baseline_port, feature_port); + + // Get samply path + let samply_path = get_samply_path().await?; + + // Start baseline server + info!("Starting samply server for baseline '{}' on port {}", args.baseline_ref, baseline_port); + let mut baseline_cmd = Command::new(&samply_path); + baseline_cmd + .args(["load", "--port", &baseline_port.to_string(), &baseline_profile.to_string_lossy()]) + .kill_on_drop(true); + + // Set process group for consistent signal handling + #[cfg(unix)] + { + baseline_cmd.process_group(0); + } + + // Conditionally pipe output based on log level + if tracing::enabled!(tracing::Level::DEBUG) { + baseline_cmd.stdout(std::process::Stdio::piped()).stderr(std::process::Stdio::piped()); + } else { + baseline_cmd.stdout(std::process::Stdio::null()).stderr(std::process::Stdio::null()); + } + + // Debug log the command + debug!("Executing samply load command: {:?}", baseline_cmd); + + let mut baseline_child = + baseline_cmd.spawn().wrap_err("Failed to start samply server for baseline")?; + + // Stream baseline samply output if debug logging is enabled + if tracing::enabled!(tracing::Level::DEBUG) { + if let Some(stdout) = baseline_child.stdout.take() { + tokio::spawn(async move { + use tokio::io::{AsyncBufReadExt, BufReader}; + let reader = BufReader::new(stdout); + let mut lines = reader.lines(); + while let Ok(Some(line)) = lines.next_line().await { + debug!("[SAMPLY-BASELINE] {}", line); + } + }); + } + + if let Some(stderr) = baseline_child.stderr.take() { + tokio::spawn(async move { + use tokio::io::{AsyncBufReadExt, BufReader}; + let reader = BufReader::new(stderr); + let mut lines = reader.lines(); + while let Ok(Some(line)) = lines.next_line().await { + debug!("[SAMPLY-BASELINE] {}", line); + } + }); + } + } + + // Start feature server + info!("Starting samply server for feature '{}' on port {}", args.feature_ref, feature_port); + let mut feature_cmd = Command::new(&samply_path); + feature_cmd + .args(["load", "--port", &feature_port.to_string(), &feature_profile.to_string_lossy()]) + .kill_on_drop(true); + + // Set process group for consistent signal handling + #[cfg(unix)] + { + feature_cmd.process_group(0); + } + + // Conditionally pipe output based on log level + if tracing::enabled!(tracing::Level::DEBUG) { + feature_cmd.stdout(std::process::Stdio::piped()).stderr(std::process::Stdio::piped()); + } else { + feature_cmd.stdout(std::process::Stdio::null()).stderr(std::process::Stdio::null()); + } + + // Debug log the command + debug!("Executing samply load command: {:?}", feature_cmd); + + let mut feature_child = + feature_cmd.spawn().wrap_err("Failed to start samply server for feature")?; + + // Stream feature samply output if debug logging is enabled + if tracing::enabled!(tracing::Level::DEBUG) { + if let Some(stdout) = feature_child.stdout.take() { + tokio::spawn(async move { + use tokio::io::{AsyncBufReadExt, BufReader}; + let reader = BufReader::new(stdout); + let mut lines = reader.lines(); + while let Ok(Some(line)) = lines.next_line().await { + debug!("[SAMPLY-FEATURE] {}", line); + } + }); + } + + if let Some(stderr) = feature_child.stderr.take() { + tokio::spawn(async move { + use tokio::io::{AsyncBufReadExt, BufReader}; + let reader = BufReader::new(stderr); + let mut lines = reader.lines(); + while let Ok(Some(line)) = lines.next_line().await { + debug!("[SAMPLY-FEATURE] {}", line); + } + }); + } + } + + // Give servers time to start + tokio::time::sleep(std::time::Duration::from_secs(2)).await; + + // Print access information + println!("\n=== SAMPLY PROFILE SERVERS STARTED ==="); + println!("Baseline '{}': http://127.0.0.1:{}", args.baseline_ref, baseline_port); + println!("Feature '{}': http://127.0.0.1:{}", args.feature_ref, feature_port); + println!("\nOpen the URLs in your browser to view the profiles."); + println!("Press Ctrl+C to stop the servers and exit."); + println!("=========================================\n"); + + // Wait for Ctrl+C or process termination + let ctrl_c = tokio::signal::ctrl_c(); + let baseline_wait = baseline_child.wait(); + let feature_wait = feature_child.wait(); + + tokio::select! { + _ = ctrl_c => { + info!("Received Ctrl+C, shutting down samply servers..."); + } + result = baseline_wait => { + match result { + Ok(status) => info!("Baseline samply server exited with status: {}", status), + Err(e) => warn!("Baseline samply server error: {}", e), + } + } + result = feature_wait => { + match result { + Ok(status) => info!("Feature samply server exited with status: {}", status), + Err(e) => warn!("Feature samply server error: {}", e), + } + } + } + + // Ensure both processes are terminated + let _ = baseline_child.kill().await; + let _ = feature_child.kill().await; + + info!("Samply servers stopped."); + Ok(()) +} + +/// Find two consecutive available ports starting from the given port +fn find_consecutive_ports(start_port: u16) -> Result<(u16, u16)> { + for port in start_port..=65533 { + // Check if both port and port+1 are available + if is_port_available(port) && is_port_available(port + 1) { + return Ok((port, port + 1)); + } + } + Err(eyre!("Could not find two consecutive available ports starting from {}", start_port)) +} + +/// Check if a port is available by attempting to bind to it +fn is_port_available(port: u16) -> bool { + TcpListener::bind(("127.0.0.1", port)).is_ok() +} + +/// Get the absolute path to samply using 'which' command +async fn get_samply_path() -> Result { + let output = Command::new("which") + .arg("samply") + .output() + .await + .wrap_err("Failed to execute 'which samply' command")?; + + if !output.status.success() { + return Err(eyre!("samply not found in PATH")); + } + + let samply_path = String::from_utf8(output.stdout) + .wrap_err("samply path is not valid UTF-8")? + .trim() + .to_string(); + + if samply_path.is_empty() { + return Err(eyre!("which samply returned empty path")); + } + + Ok(samply_path) +} diff --git a/bin/reth-bench-compare/src/comparison.rs b/bin/reth-bench-compare/src/comparison.rs new file mode 100644 index 0000000000..087ccaf3ce --- /dev/null +++ b/bin/reth-bench-compare/src/comparison.rs @@ -0,0 +1,710 @@ +//! Results comparison and report generation. + +use crate::cli::Args; +use chrono::{DateTime, Utc}; +use csv::Reader; +use eyre::{eyre, Result, WrapErr}; +use serde::{Deserialize, Serialize}; +use std::{ + cmp::Ordering, + collections::HashMap, + fs, + path::{Path, PathBuf}, +}; +use tracing::{info, warn}; + +/// Manages comparison between baseline and feature reference results +pub(crate) struct ComparisonGenerator { + output_dir: PathBuf, + timestamp: String, + baseline_ref_name: String, + feature_ref_name: String, + baseline_results: Option, + feature_results: Option, + baseline_command: Option, + feature_command: Option, +} + +/// Represents the results from a single benchmark run +#[derive(Debug, Clone)] +pub(crate) struct BenchmarkResults { + pub ref_name: String, + pub combined_latency_data: Vec, + pub summary: BenchmarkSummary, + pub start_timestamp: Option>, + pub end_timestamp: Option>, +} + +/// Combined latency CSV row structure +#[derive(Debug, Clone, Deserialize, Serialize)] +pub(crate) struct CombinedLatencyRow { + pub block_number: u64, + pub transaction_count: u64, + pub gas_used: u64, + pub new_payload_latency: u128, +} + +/// Total gas CSV row structure +#[derive(Debug, Clone, Deserialize, Serialize)] +pub(crate) struct TotalGasRow { + pub block_number: u64, + pub transaction_count: u64, + pub gas_used: u64, + pub time: u128, +} + +/// Summary statistics for a benchmark run. +/// +/// Latencies are derived from per-block `engine_newPayload` timings (converted from µs to ms): +/// - `mean_new_payload_latency_ms`: arithmetic mean latency across blocks. +/// - `median_new_payload_latency_ms`: p50 latency across blocks. +/// - `p90_new_payload_latency_ms` / `p99_new_payload_latency_ms`: tail latencies across blocks. +#[derive(Debug, Clone, Serialize)] +pub(crate) struct BenchmarkSummary { + pub total_blocks: u64, + pub total_gas_used: u64, + pub total_duration_ms: u128, + pub mean_new_payload_latency_ms: f64, + pub median_new_payload_latency_ms: f64, + pub p90_new_payload_latency_ms: f64, + pub p99_new_payload_latency_ms: f64, + pub gas_per_second: f64, + pub blocks_per_second: f64, + pub min_block_number: u64, + pub max_block_number: u64, +} + +/// Comparison report between two benchmark runs +#[derive(Debug, Serialize)] +pub(crate) struct ComparisonReport { + pub timestamp: String, + pub baseline: RefInfo, + pub feature: RefInfo, + pub comparison_summary: ComparisonSummary, + pub per_block_comparisons: Vec, +} + +/// Information about a reference in the comparison +#[derive(Debug, Serialize)] +pub(crate) struct RefInfo { + pub ref_name: String, + pub summary: BenchmarkSummary, + pub start_timestamp: Option>, + pub end_timestamp: Option>, + pub reth_command: Option, +} + +/// Summary of the comparison between references. +/// +/// Percent deltas are `(feature - baseline) / baseline * 100`: +/// - `new_payload_latency_p50_change_percent` / p90 / p99: percent changes of the respective +/// per-block percentiles. +/// - `per_block_latency_change_mean_percent` / `per_block_latency_change_median_percent` are the +/// mean and median of per-block percent deltas (feature vs baseline), capturing block-level +/// drift. +/// - `per_block_latency_change_std_dev_percent`: standard deviation of per-block percent changes, +/// measuring consistency of performance changes across blocks. +/// - `new_payload_total_latency_change_percent` is the percent change of the total newPayload time +/// across the run. +/// +/// Positive means slower/higher; negative means faster/lower. +#[derive(Debug, Serialize)] +pub(crate) struct ComparisonSummary { + pub per_block_latency_change_mean_percent: f64, + pub per_block_latency_change_median_percent: f64, + pub per_block_latency_change_std_dev_percent: f64, + pub new_payload_total_latency_change_percent: f64, + pub new_payload_latency_p50_change_percent: f64, + pub new_payload_latency_p90_change_percent: f64, + pub new_payload_latency_p99_change_percent: f64, + pub gas_per_second_change_percent: f64, + pub blocks_per_second_change_percent: f64, +} + +/// Per-block comparison data +#[derive(Debug, Serialize)] +pub(crate) struct BlockComparison { + pub block_number: u64, + pub transaction_count: u64, + pub gas_used: u64, + pub baseline_new_payload_latency: u128, + pub feature_new_payload_latency: u128, + pub new_payload_latency_change_percent: f64, +} + +impl ComparisonGenerator { + /// Create a new comparison generator + pub(crate) fn new(args: &Args) -> Self { + let now: DateTime = Utc::now(); + let timestamp = now.format("%Y%m%d_%H%M%S").to_string(); + + Self { + output_dir: args.output_dir_path(), + timestamp, + baseline_ref_name: args.baseline_ref.clone(), + feature_ref_name: args.feature_ref.clone(), + baseline_results: None, + feature_results: None, + baseline_command: None, + feature_command: None, + } + } + + /// Get the output directory for a specific reference + pub(crate) fn get_ref_output_dir(&self, ref_type: &str) -> PathBuf { + self.output_dir.join("results").join(&self.timestamp).join(ref_type) + } + + /// Get the main output directory for this comparison run + pub(crate) fn get_output_dir(&self) -> PathBuf { + self.output_dir.join("results").join(&self.timestamp) + } + + /// Add benchmark results for a reference + pub(crate) fn add_ref_results(&mut self, ref_type: &str, output_path: &Path) -> Result<()> { + let ref_name = match ref_type { + "baseline" => &self.baseline_ref_name, + "feature" => &self.feature_ref_name, + _ => return Err(eyre!("Unknown reference type: {}", ref_type)), + }; + + let results = self.load_benchmark_results(ref_name, output_path)?; + + match ref_type { + "baseline" => self.baseline_results = Some(results), + "feature" => self.feature_results = Some(results), + _ => return Err(eyre!("Unknown reference type: {}", ref_type)), + } + + info!("Loaded benchmark results for {} reference", ref_type); + + Ok(()) + } + + /// Set the benchmark run timestamps for a reference + pub(crate) fn set_ref_timestamps( + &mut self, + ref_type: &str, + start: DateTime, + end: DateTime, + ) -> Result<()> { + match ref_type { + "baseline" => { + if let Some(ref mut results) = self.baseline_results { + results.start_timestamp = Some(start); + results.end_timestamp = Some(end); + } else { + return Err(eyre!("Baseline results not loaded yet")); + } + } + "feature" => { + if let Some(ref mut results) = self.feature_results { + results.start_timestamp = Some(start); + results.end_timestamp = Some(end); + } else { + return Err(eyre!("Feature results not loaded yet")); + } + } + _ => return Err(eyre!("Unknown reference type: {}", ref_type)), + } + + Ok(()) + } + + /// Set the reth command for a reference + pub(crate) fn set_ref_command(&mut self, ref_type: &str, command: String) -> Result<()> { + match ref_type { + "baseline" => { + self.baseline_command = Some(command); + } + "feature" => { + self.feature_command = Some(command); + } + _ => return Err(eyre!("Unknown reference type: {}", ref_type)), + } + + Ok(()) + } + + /// Generate the final comparison report + pub(crate) async fn generate_comparison_report(&self) -> Result<()> { + info!("Generating comparison report..."); + + let baseline = + self.baseline_results.as_ref().ok_or_else(|| eyre!("Baseline results not loaded"))?; + + let feature = + self.feature_results.as_ref().ok_or_else(|| eyre!("Feature results not loaded"))?; + + let per_block_comparisons = self.calculate_per_block_comparisons(baseline, feature)?; + let comparison_summary = self.calculate_comparison_summary( + &baseline.summary, + &feature.summary, + &per_block_comparisons, + )?; + + let report = ComparisonReport { + timestamp: self.timestamp.clone(), + baseline: RefInfo { + ref_name: baseline.ref_name.clone(), + summary: baseline.summary.clone(), + start_timestamp: baseline.start_timestamp, + end_timestamp: baseline.end_timestamp, + reth_command: self.baseline_command.clone(), + }, + feature: RefInfo { + ref_name: feature.ref_name.clone(), + summary: feature.summary.clone(), + start_timestamp: feature.start_timestamp, + end_timestamp: feature.end_timestamp, + reth_command: self.feature_command.clone(), + }, + comparison_summary, + per_block_comparisons, + }; + + // Write reports + self.write_comparison_reports(&report).await?; + + // Print summary to console + self.print_comparison_summary(&report); + + Ok(()) + } + + /// Load benchmark results from CSV files + fn load_benchmark_results( + &self, + ref_name: &str, + output_path: &Path, + ) -> Result { + let combined_latency_path = output_path.join("combined_latency.csv"); + let total_gas_path = output_path.join("total_gas.csv"); + + let combined_latency_data = self.load_combined_latency_csv(&combined_latency_path)?; + let total_gas_data = self.load_total_gas_csv(&total_gas_path)?; + + let summary = self.calculate_summary(&combined_latency_data, &total_gas_data)?; + + Ok(BenchmarkResults { + ref_name: ref_name.to_string(), + combined_latency_data, + summary, + start_timestamp: None, + end_timestamp: None, + }) + } + + /// Load combined latency CSV data + fn load_combined_latency_csv(&self, path: &Path) -> Result> { + let mut reader = Reader::from_path(path) + .wrap_err_with(|| format!("Failed to open combined latency CSV: {path:?}"))?; + + let mut rows = Vec::new(); + for result in reader.deserialize() { + let row: CombinedLatencyRow = result + .wrap_err_with(|| format!("Failed to parse combined latency row in {path:?}"))?; + rows.push(row); + } + + if rows.is_empty() { + return Err(eyre!("No data found in combined latency CSV: {:?}", path)); + } + + Ok(rows) + } + + /// Load total gas CSV data + fn load_total_gas_csv(&self, path: &Path) -> Result> { + let mut reader = Reader::from_path(path) + .wrap_err_with(|| format!("Failed to open total gas CSV: {path:?}"))?; + + let mut rows = Vec::new(); + for result in reader.deserialize() { + let row: TotalGasRow = + result.wrap_err_with(|| format!("Failed to parse total gas row in {path:?}"))?; + rows.push(row); + } + + if rows.is_empty() { + return Err(eyre!("No data found in total gas CSV: {:?}", path)); + } + + Ok(rows) + } + + /// Calculate summary statistics for a benchmark run. + /// + /// Computes latency statistics from per-block `new_payload_latency` values in `combined_data` + /// (converting from µs to ms), and throughput metrics using the total run duration from + /// `total_gas_data`. Percentiles (p50/p90/p99) use linear interpolation on sorted latencies. + fn calculate_summary( + &self, + combined_data: &[CombinedLatencyRow], + total_gas_data: &[TotalGasRow], + ) -> Result { + if combined_data.is_empty() || total_gas_data.is_empty() { + return Err(eyre!("Cannot calculate summary for empty data")); + } + + let total_blocks = combined_data.len() as u64; + let total_gas_used: u64 = combined_data.iter().map(|r| r.gas_used).sum(); + + let total_duration_ms = total_gas_data.last().unwrap().time / 1000; // Convert microseconds to milliseconds + + let latencies_ms: Vec = + combined_data.iter().map(|r| r.new_payload_latency as f64 / 1000.0).collect(); + let mean_new_payload_latency_ms: f64 = + latencies_ms.iter().sum::() / total_blocks as f64; + + let mut sorted_latencies_ms = latencies_ms; + sorted_latencies_ms.sort_by(|a, b| a.partial_cmp(b).unwrap_or(Ordering::Equal)); + let median_new_payload_latency_ms = percentile(&sorted_latencies_ms, 0.5); + let p90_new_payload_latency_ms = percentile(&sorted_latencies_ms, 0.9); + let p99_new_payload_latency_ms = percentile(&sorted_latencies_ms, 0.99); + + let total_duration_seconds = total_duration_ms as f64 / 1000.0; + let gas_per_second = if total_duration_seconds > f64::EPSILON { + total_gas_used as f64 / total_duration_seconds + } else { + 0.0 + }; + + let blocks_per_second = if total_duration_seconds > f64::EPSILON { + total_blocks as f64 / total_duration_seconds + } else { + 0.0 + }; + + let min_block_number = combined_data.first().unwrap().block_number; + let max_block_number = combined_data.last().unwrap().block_number; + + Ok(BenchmarkSummary { + total_blocks, + total_gas_used, + total_duration_ms, + mean_new_payload_latency_ms, + median_new_payload_latency_ms, + p90_new_payload_latency_ms, + p99_new_payload_latency_ms, + gas_per_second, + blocks_per_second, + min_block_number, + max_block_number, + }) + } + + /// Calculate comparison summary between baseline and feature + fn calculate_comparison_summary( + &self, + baseline: &BenchmarkSummary, + feature: &BenchmarkSummary, + per_block_comparisons: &[BlockComparison], + ) -> Result { + let calc_percent_change = |baseline: f64, feature: f64| -> f64 { + if baseline.abs() > f64::EPSILON { + ((feature - baseline) / baseline) * 100.0 + } else { + 0.0 + } + }; + + // Calculate per-block statistics. "Per-block" means: for each block, compute the percent + // change (feature - baseline) / baseline * 100, then calculate statistics across those + // per-block percent changes. This captures how consistently the feature performs relative + // to baseline across all blocks. + let per_block_percent_changes: Vec = + per_block_comparisons.iter().map(|c| c.new_payload_latency_change_percent).collect(); + let per_block_latency_change_mean_percent = if per_block_percent_changes.is_empty() { + 0.0 + } else { + per_block_percent_changes.iter().sum::() / per_block_percent_changes.len() as f64 + }; + let per_block_latency_change_median_percent = if per_block_percent_changes.is_empty() { + 0.0 + } else { + let mut sorted = per_block_percent_changes.clone(); + sorted.sort_by(|a, b| a.partial_cmp(b).unwrap_or(Ordering::Equal)); + percentile(&sorted, 0.5) + }; + let per_block_latency_change_std_dev_percent = + calculate_std_dev(&per_block_percent_changes, per_block_latency_change_mean_percent); + + let baseline_total_latency_ms = + baseline.mean_new_payload_latency_ms * baseline.total_blocks as f64; + let feature_total_latency_ms = + feature.mean_new_payload_latency_ms * feature.total_blocks as f64; + let new_payload_total_latency_change_percent = + calc_percent_change(baseline_total_latency_ms, feature_total_latency_ms); + + Ok(ComparisonSummary { + per_block_latency_change_mean_percent, + per_block_latency_change_median_percent, + per_block_latency_change_std_dev_percent, + new_payload_total_latency_change_percent, + new_payload_latency_p50_change_percent: calc_percent_change( + baseline.median_new_payload_latency_ms, + feature.median_new_payload_latency_ms, + ), + new_payload_latency_p90_change_percent: calc_percent_change( + baseline.p90_new_payload_latency_ms, + feature.p90_new_payload_latency_ms, + ), + new_payload_latency_p99_change_percent: calc_percent_change( + baseline.p99_new_payload_latency_ms, + feature.p99_new_payload_latency_ms, + ), + gas_per_second_change_percent: calc_percent_change( + baseline.gas_per_second, + feature.gas_per_second, + ), + blocks_per_second_change_percent: calc_percent_change( + baseline.blocks_per_second, + feature.blocks_per_second, + ), + }) + } + + /// Calculate per-block comparisons + fn calculate_per_block_comparisons( + &self, + baseline: &BenchmarkResults, + feature: &BenchmarkResults, + ) -> Result> { + let mut baseline_map: HashMap = HashMap::new(); + for row in &baseline.combined_latency_data { + baseline_map.insert(row.block_number, row); + } + + let mut comparisons = Vec::new(); + for feature_row in &feature.combined_latency_data { + if let Some(baseline_row) = baseline_map.get(&feature_row.block_number) { + let calc_percent_change = |baseline: u128, feature: u128| -> f64 { + if baseline > 0 { + ((feature as f64 - baseline as f64) / baseline as f64) * 100.0 + } else { + 0.0 + } + }; + + let comparison = BlockComparison { + block_number: feature_row.block_number, + transaction_count: feature_row.transaction_count, + gas_used: feature_row.gas_used, + baseline_new_payload_latency: baseline_row.new_payload_latency, + feature_new_payload_latency: feature_row.new_payload_latency, + new_payload_latency_change_percent: calc_percent_change( + baseline_row.new_payload_latency, + feature_row.new_payload_latency, + ), + }; + comparisons.push(comparison); + } else { + warn!("Block {} not found in baseline data", feature_row.block_number); + } + } + + Ok(comparisons) + } + + /// Write comparison reports to files + async fn write_comparison_reports(&self, report: &ComparisonReport) -> Result<()> { + let report_dir = self.output_dir.join("results").join(&self.timestamp); + fs::create_dir_all(&report_dir) + .wrap_err_with(|| format!("Failed to create report directory: {report_dir:?}"))?; + + // Write JSON report + let json_path = report_dir.join("comparison_report.json"); + let json_content = serde_json::to_string_pretty(report) + .wrap_err("Failed to serialize comparison report to JSON")?; + fs::write(&json_path, json_content) + .wrap_err_with(|| format!("Failed to write JSON report: {json_path:?}"))?; + + // Write CSV report for per-block comparisons + let csv_path = report_dir.join("per_block_comparison.csv"); + let mut writer = csv::Writer::from_path(&csv_path) + .wrap_err_with(|| format!("Failed to create CSV writer: {csv_path:?}"))?; + + for comparison in &report.per_block_comparisons { + writer.serialize(comparison).wrap_err("Failed to write comparison row to CSV")?; + } + writer.flush().wrap_err("Failed to flush CSV writer")?; + + info!("Comparison reports written to: {:?}", report_dir); + Ok(()) + } + + /// Print comparison summary to console + fn print_comparison_summary(&self, report: &ComparisonReport) { + // Parse and format timestamp nicely + let formatted_timestamp = if let Ok(dt) = chrono::DateTime::parse_from_str( + &format!("{} +0000", report.timestamp.replace('_', " ")), + "%Y%m%d %H%M%S %z", + ) { + dt.format("%Y-%m-%d %H:%M:%S UTC").to_string() + } else { + // Fallback to original if parsing fails + report.timestamp.clone() + }; + + println!("\n=== BENCHMARK COMPARISON SUMMARY ==="); + println!("Timestamp: {formatted_timestamp}"); + println!("Baseline: {}", report.baseline.ref_name); + println!("Feature: {}", report.feature.ref_name); + println!(); + + let summary = &report.comparison_summary; + + println!("Performance Changes:"); + println!( + " NewPayload Latency per-block mean change: {:+.2}%", + summary.per_block_latency_change_mean_percent + ); + println!( + " NewPayload Latency per-block median change: {:+.2}%", + summary.per_block_latency_change_median_percent + ); + println!( + " NewPayload Latency per-block std dev: {:.2}%", + summary.per_block_latency_change_std_dev_percent + ); + println!( + " Total newPayload time change: {:+.2}%", + summary.new_payload_total_latency_change_percent + ); + println!( + " NewPayload Latency p50: {:+.2}%", + summary.new_payload_latency_p50_change_percent + ); + println!( + " NewPayload Latency p90: {:+.2}%", + summary.new_payload_latency_p90_change_percent + ); + println!( + " NewPayload Latency p99: {:+.2}%", + summary.new_payload_latency_p99_change_percent + ); + println!( + " Gas/Second: {:+.2}%", + summary.gas_per_second_change_percent + ); + println!( + " Blocks/Second: {:+.2}%", + summary.blocks_per_second_change_percent + ); + println!(); + + println!("Baseline Summary:"); + let baseline = &report.baseline.summary; + println!( + " Blocks: {} (blocks {} to {}), Gas: {}, Duration: {:.2}s", + baseline.total_blocks, + baseline.min_block_number, + baseline.max_block_number, + baseline.total_gas_used, + baseline.total_duration_ms as f64 / 1000.0 + ); + println!(" NewPayload latency (ms):"); + println!( + " mean: {:.2}, p50: {:.2}, p90: {:.2}, p99: {:.2}", + baseline.mean_new_payload_latency_ms, + baseline.median_new_payload_latency_ms, + baseline.p90_new_payload_latency_ms, + baseline.p99_new_payload_latency_ms + ); + if let (Some(start), Some(end)) = + (&report.baseline.start_timestamp, &report.baseline.end_timestamp) + { + println!( + " Started: {}, Ended: {}", + start.format("%Y-%m-%d %H:%M:%S UTC"), + end.format("%Y-%m-%d %H:%M:%S UTC") + ); + } + if let Some(ref cmd) = report.baseline.reth_command { + println!(" Command: {}", cmd); + } + println!(); + + println!("Feature Summary:"); + let feature = &report.feature.summary; + println!( + " Blocks: {} (blocks {} to {}), Gas: {}, Duration: {:.2}s", + feature.total_blocks, + feature.min_block_number, + feature.max_block_number, + feature.total_gas_used, + feature.total_duration_ms as f64 / 1000.0 + ); + println!(" NewPayload latency (ms):"); + println!( + " mean: {:.2}, p50: {:.2}, p90: {:.2}, p99: {:.2}", + feature.mean_new_payload_latency_ms, + feature.median_new_payload_latency_ms, + feature.p90_new_payload_latency_ms, + feature.p99_new_payload_latency_ms + ); + if let (Some(start), Some(end)) = + (&report.feature.start_timestamp, &report.feature.end_timestamp) + { + println!( + " Started: {}, Ended: {}", + start.format("%Y-%m-%d %H:%M:%S UTC"), + end.format("%Y-%m-%d %H:%M:%S UTC") + ); + } + if let Some(ref cmd) = report.feature.reth_command { + println!(" Command: {}", cmd); + } + println!(); + } +} + +/// Calculate standard deviation from a set of values and their mean. +/// +/// Computes the population standard deviation using the formula: +/// `sqrt(sum((x - mean)²) / n)` +/// +/// Returns 0.0 for empty input. +fn calculate_std_dev(values: &[f64], mean: f64) -> f64 { + if values.is_empty() { + return 0.0; + } + + let variance = values + .iter() + .map(|x| { + let diff = x - mean; + diff * diff + }) + .sum::() / + values.len() as f64; + + variance.sqrt() +} + +/// Calculate percentile using linear interpolation on a sorted slice. +/// +/// Computes `rank = percentile × (n - 1)` where n is the array length. If the rank falls +/// between two indices, linearly interpolates between those values. For example, with 100 values, +/// p90 computes rank = 0.9 × 99 = 89.1, then returns `values[89] × 0.9 + values[90] × 0.1`. +/// +/// Returns 0.0 for empty input. +fn percentile(sorted_values: &[f64], percentile: f64) -> f64 { + if sorted_values.is_empty() { + return 0.0; + } + + let clamped = percentile.clamp(0.0, 1.0); + let max_index = sorted_values.len() - 1; + let rank = clamped * max_index as f64; + let lower = rank.floor() as usize; + let upper = rank.ceil() as usize; + + if lower == upper { + sorted_values[lower] + } else { + let weight = rank - lower as f64; + sorted_values[lower].mul_add(1.0 - weight, sorted_values[upper] * weight) + } +} diff --git a/bin/reth-bench-compare/src/compilation.rs b/bin/reth-bench-compare/src/compilation.rs new file mode 100644 index 0000000000..3795dc58e9 --- /dev/null +++ b/bin/reth-bench-compare/src/compilation.rs @@ -0,0 +1,365 @@ +//! Compilation operations for reth and reth-bench. + +use crate::git::GitManager; +use alloy_primitives::address; +use alloy_provider::{Provider, ProviderBuilder}; +use eyre::{eyre, Result, WrapErr}; +use std::{fs, path::PathBuf, process::Command}; +use tracing::{debug, error, info, warn}; + +/// Manages compilation operations for reth components +#[derive(Debug)] +pub(crate) struct CompilationManager { + repo_root: String, + output_dir: PathBuf, + git_manager: GitManager, + features: String, + enable_profiling: bool, +} + +impl CompilationManager { + /// Create a new `CompilationManager` + pub(crate) const fn new( + repo_root: String, + output_dir: PathBuf, + git_manager: GitManager, + features: String, + enable_profiling: bool, + ) -> Result { + Ok(Self { repo_root, output_dir, git_manager, features, enable_profiling }) + } + + /// Detect if the RPC endpoint is an Optimism chain + pub(crate) async fn detect_optimism_chain(&self, rpc_url: &str) -> Result { + info!("Detecting chain type from RPC endpoint..."); + + // Create Alloy provider + let url = rpc_url.parse().map_err(|e| eyre!("Invalid RPC URL '{}': {}", rpc_url, e))?; + let provider = ProviderBuilder::new().connect_http(url); + + // Check for Optimism predeploy at address 0x420000000000000000000000000000000000000F + let is_optimism = !provider + .get_code_at(address!("0x420000000000000000000000000000000000000F")) + .await? + .is_empty(); + + if is_optimism { + info!("Detected Optimism chain"); + } else { + info!("Detected Ethereum chain"); + } + + Ok(is_optimism) + } + + /// Get the path to the cached binary using explicit commit hash + pub(crate) fn get_cached_binary_path_for_commit( + &self, + commit: &str, + is_optimism: bool, + ) -> PathBuf { + let identifier = &commit[..8]; // Use first 8 chars of commit + + let binary_name = if is_optimism { + format!("op-reth_{}", identifier) + } else { + format!("reth_{}", identifier) + }; + + self.output_dir.join("bin").join(binary_name) + } + + /// Compile reth using cargo build and cache the binary + pub(crate) fn compile_reth(&self, commit: &str, is_optimism: bool) -> Result<()> { + // Validate that current git commit matches the expected commit + let current_commit = self.git_manager.get_current_commit()?; + if current_commit != commit { + return Err(eyre!( + "Git commit mismatch! Expected: {}, but currently at: {}", + &commit[..8], + ¤t_commit[..8] + )); + } + + let cached_path = self.get_cached_binary_path_for_commit(commit, is_optimism); + + // Check if cached binary already exists (since path contains commit hash, it's valid) + if cached_path.exists() { + info!("Using cached binary (commit: {})", &commit[..8]); + return Ok(()); + } + + info!("No cached binary found, compiling (commit: {})...", &commit[..8]); + + let binary_name = if is_optimism { "op-reth" } else { "reth" }; + + info!( + "Compiling {} with profiling configuration (commit: {})...", + binary_name, + &commit[..8] + ); + + let mut cmd = Command::new("cargo"); + cmd.arg("build").arg("--profile").arg("profiling"); + + // Append samply feature when profiling to enable tracing span markers. + // NOTE: The `samply` feature must exist in the branch being compiled. If comparing + // against an older branch that predates the samply integration, compilation will fail + // or markers won't appear. In that case, omit --profile or ensure both branches + // include the samply feature support. + let features = if self.enable_profiling && !self.features.contains("samply") { + format!("{},samply", self.features) + } else { + self.features.clone() + }; + cmd.arg("--features").arg(&features); + info!("Using features: {}", features); + + // Add bin-specific arguments for optimism + if is_optimism { + cmd.arg("--bin") + .arg("op-reth") + .arg("--manifest-path") + .arg("crates/optimism/bin/Cargo.toml"); + } + + cmd.current_dir(&self.repo_root); + + // Set RUSTFLAGS for native CPU optimization + cmd.env("RUSTFLAGS", "-C target-cpu=native"); + + // Debug log the command + debug!("Executing cargo command: {:?}", cmd); + + let output = cmd.output().wrap_err("Failed to execute cargo build command")?; + + // Print stdout and stderr with prefixes at debug level + let stdout = String::from_utf8_lossy(&output.stdout); + let stderr = String::from_utf8_lossy(&output.stderr); + + for line in stdout.lines() { + if !line.trim().is_empty() { + debug!("[CARGO] {}", line); + } + } + + for line in stderr.lines() { + if !line.trim().is_empty() { + debug!("[CARGO] {}", line); + } + } + + if !output.status.success() { + // Print all output when compilation fails + error!("Cargo build failed with exit code: {:?}", output.status.code()); + + if !stdout.trim().is_empty() { + error!("Cargo stdout:"); + for line in stdout.lines() { + error!(" {}", line); + } + } + + if !stderr.trim().is_empty() { + error!("Cargo stderr:"); + for line in stderr.lines() { + error!(" {}", line); + } + } + + return Err(eyre!("Compilation failed with exit code: {:?}", output.status.code())); + } + + info!("{} compilation completed", binary_name); + + // Copy the compiled binary to cache + let source_path = + PathBuf::from(&self.repo_root).join(format!("target/profiling/{}", binary_name)); + if !source_path.exists() { + return Err(eyre!("Compiled binary not found at {:?}", source_path)); + } + + // Create bin directory if it doesn't exist + let bin_dir = self.output_dir.join("bin"); + fs::create_dir_all(&bin_dir).wrap_err("Failed to create bin directory")?; + + // Copy binary to cache + fs::copy(&source_path, &cached_path).wrap_err("Failed to copy binary to cache")?; + + // Make the cached binary executable + #[cfg(unix)] + { + use std::os::unix::fs::PermissionsExt; + let mut perms = fs::metadata(&cached_path)?.permissions(); + perms.set_mode(0o755); + fs::set_permissions(&cached_path, perms)?; + } + + info!("Cached compiled binary at: {:?}", cached_path); + Ok(()) + } + + /// Check if reth-bench is available in PATH + pub(crate) fn is_reth_bench_available(&self) -> bool { + match Command::new("which").arg("reth-bench").output() { + Ok(output) => { + if output.status.success() { + let path = String::from_utf8_lossy(&output.stdout); + info!("Found reth-bench: {}", path.trim()); + true + } else { + false + } + } + Err(_) => false, + } + } + + /// Check if samply is available in PATH + pub(crate) fn is_samply_available(&self) -> bool { + match Command::new("which").arg("samply").output() { + Ok(output) => { + if output.status.success() { + let path = String::from_utf8_lossy(&output.stdout); + info!("Found samply: {}", path.trim()); + true + } else { + false + } + } + Err(_) => false, + } + } + + /// Install samply using cargo + pub(crate) fn install_samply(&self) -> Result<()> { + info!("Installing samply via cargo..."); + + let mut cmd = Command::new("cargo"); + cmd.args(["install", "--locked", "samply"]); + + // Debug log the command + debug!("Executing cargo command: {:?}", cmd); + + let output = cmd.output().wrap_err("Failed to execute cargo install samply command")?; + + // Print stdout and stderr with prefixes at debug level + let stdout = String::from_utf8_lossy(&output.stdout); + let stderr = String::from_utf8_lossy(&output.stderr); + + for line in stdout.lines() { + if !line.trim().is_empty() { + debug!("[CARGO-SAMPLY] {}", line); + } + } + + for line in stderr.lines() { + if !line.trim().is_empty() { + debug!("[CARGO-SAMPLY] {}", line); + } + } + + if !output.status.success() { + // Print all output when installation fails + error!("Cargo install samply failed with exit code: {:?}", output.status.code()); + + if !stdout.trim().is_empty() { + error!("Cargo stdout:"); + for line in stdout.lines() { + error!(" {}", line); + } + } + + if !stderr.trim().is_empty() { + error!("Cargo stderr:"); + for line in stderr.lines() { + error!(" {}", line); + } + } + + return Err(eyre!( + "samply installation failed with exit code: {:?}", + output.status.code() + )); + } + + info!("Samply installation completed"); + Ok(()) + } + + /// Ensure samply is available, installing if necessary + pub(crate) fn ensure_samply_available(&self) -> Result<()> { + if self.is_samply_available() { + Ok(()) + } else { + warn!("samply not found in PATH, installing..."); + self.install_samply() + } + } + + /// Ensure reth-bench is available, compiling if necessary + pub(crate) fn ensure_reth_bench_available(&self) -> Result<()> { + if self.is_reth_bench_available() { + Ok(()) + } else { + warn!("reth-bench not found in PATH, compiling and installing..."); + self.compile_reth_bench() + } + } + + /// Compile and install reth-bench using `make install-reth-bench` + pub(crate) fn compile_reth_bench(&self) -> Result<()> { + info!("Compiling and installing reth-bench..."); + + let mut cmd = Command::new("make"); + cmd.arg("install-reth-bench").current_dir(&self.repo_root); + + // Debug log the command + debug!("Executing make command: {:?}", cmd); + + let output = cmd.output().wrap_err("Failed to execute make install-reth-bench command")?; + + // Print stdout and stderr with prefixes at debug level + let stdout = String::from_utf8_lossy(&output.stdout); + let stderr = String::from_utf8_lossy(&output.stderr); + + for line in stdout.lines() { + if !line.trim().is_empty() { + debug!("[MAKE-BENCH] {}", line); + } + } + + for line in stderr.lines() { + if !line.trim().is_empty() { + debug!("[MAKE-BENCH] {}", line); + } + } + + if !output.status.success() { + // Print all output when compilation fails + error!("Make install-reth-bench failed with exit code: {:?}", output.status.code()); + + if !stdout.trim().is_empty() { + error!("Make stdout:"); + for line in stdout.lines() { + error!(" {}", line); + } + } + + if !stderr.trim().is_empty() { + error!("Make stderr:"); + for line in stderr.lines() { + error!(" {}", line); + } + } + + return Err(eyre!( + "reth-bench compilation failed with exit code: {:?}", + output.status.code() + )); + } + + info!("Reth-bench compilation completed"); + Ok(()) + } +} diff --git a/bin/reth-bench-compare/src/git.rs b/bin/reth-bench-compare/src/git.rs new file mode 100644 index 0000000000..001466969d --- /dev/null +++ b/bin/reth-bench-compare/src/git.rs @@ -0,0 +1,328 @@ +//! Git operations for branch management. + +use eyre::{eyre, Result, WrapErr}; +use std::process::Command; +use tracing::{info, warn}; + +/// Manages git operations for branch switching +#[derive(Debug, Clone)] +pub(crate) struct GitManager { + repo_root: String, +} + +impl GitManager { + /// Create a new `GitManager`, detecting the repository root + pub(crate) fn new() -> Result { + let output = Command::new("git") + .args(["rev-parse", "--show-toplevel"]) + .output() + .wrap_err("Failed to execute git command - is git installed?")?; + + if !output.status.success() { + return Err(eyre!("Not in a git repository or git command failed")); + } + + let repo_root = String::from_utf8(output.stdout) + .wrap_err("Git output is not valid UTF-8")? + .trim() + .to_string(); + + let manager = Self { repo_root }; + info!( + "Detected git repository at: {}, current reference: {}", + manager.repo_root(), + manager.get_current_ref()? + ); + + Ok(manager) + } + + /// Get the current git branch name + pub(crate) fn get_current_branch(&self) -> Result { + let output = Command::new("git") + .args(["branch", "--show-current"]) + .current_dir(&self.repo_root) + .output() + .wrap_err("Failed to get current branch")?; + + if !output.status.success() { + return Err(eyre!("Failed to determine current branch")); + } + + let branch = String::from_utf8(output.stdout) + .wrap_err("Branch name is not valid UTF-8")? + .trim() + .to_string(); + + if branch.is_empty() { + return Err(eyre!("Not on a named branch (detached HEAD?)")); + } + + Ok(branch) + } + + /// Get the current git reference (branch name, tag, or commit hash) + pub(crate) fn get_current_ref(&self) -> Result { + // First try to get branch name + if let Ok(branch) = self.get_current_branch() { + return Ok(branch); + } + + // If not on a branch, check if we're on a tag + let tag_output = Command::new("git") + .args(["describe", "--exact-match", "--tags", "HEAD"]) + .current_dir(&self.repo_root) + .output() + .wrap_err("Failed to check for tag")?; + + if tag_output.status.success() { + let tag = String::from_utf8(tag_output.stdout) + .wrap_err("Tag name is not valid UTF-8")? + .trim() + .to_string(); + return Ok(tag); + } + + // If not on a branch or tag, return the commit hash + let commit_output = Command::new("git") + .args(["rev-parse", "HEAD"]) + .current_dir(&self.repo_root) + .output() + .wrap_err("Failed to get current commit")?; + + if !commit_output.status.success() { + return Err(eyre!("Failed to get current commit hash")); + } + + let commit_hash = String::from_utf8(commit_output.stdout) + .wrap_err("Commit hash is not valid UTF-8")? + .trim() + .to_string(); + + Ok(commit_hash) + } + + /// Check if the git working directory has uncommitted changes to tracked files + pub(crate) fn validate_clean_state(&self) -> Result<()> { + let output = Command::new("git") + .args(["status", "--porcelain"]) + .current_dir(&self.repo_root) + .output() + .wrap_err("Failed to check git status")?; + + if !output.status.success() { + return Err(eyre!("Git status command failed")); + } + + let status_output = + String::from_utf8(output.stdout).wrap_err("Git status output is not valid UTF-8")?; + + // Check for uncommitted changes to tracked files + // Status codes: M = modified, A = added, D = deleted, R = renamed, C = copied, U = updated + // ?? = untracked files (we want to ignore these) + let has_uncommitted_changes = status_output.lines().any(|line| { + if line.len() >= 2 { + let status = &line[0..2]; + // Ignore untracked files (??) and ignored files (!!) + !matches!(status, "??" | "!!") + } else { + false + } + }); + + if has_uncommitted_changes { + warn!("Git working directory has uncommitted changes to tracked files:"); + for line in status_output.lines() { + if line.len() >= 2 && !matches!(&line[0..2], "??" | "!!") { + warn!(" {}", line); + } + } + return Err(eyre!( + "Git working directory has uncommitted changes to tracked files. Please commit or stash changes before running benchmark comparison." + )); + } + + // Check if there are untracked files and log them as info + let untracked_files: Vec<&str> = + status_output.lines().filter(|line| line.starts_with("??")).collect(); + + if !untracked_files.is_empty() { + info!( + "Git working directory has {} untracked files (this is OK)", + untracked_files.len() + ); + } + + info!("Git working directory is clean (no uncommitted changes to tracked files)"); + Ok(()) + } + + /// Fetch all refs from remote to ensure we have latest branches and tags + pub(crate) fn fetch_all(&self) -> Result<()> { + let output = Command::new("git") + .args(["fetch", "--all", "--tags", "--quiet", "--force"]) + .current_dir(&self.repo_root) + .output() + .wrap_err("Failed to fetch latest refs")?; + + if output.status.success() { + info!("Fetched latest refs"); + } else { + let stderr = String::from_utf8_lossy(&output.stderr); + // Only warn if there's actual error content, not just fetch progress + if !stderr.trim().is_empty() && !stderr.contains("-> origin/") { + warn!("Git fetch encountered issues (continuing anyway): {}", stderr); + } + } + + Ok(()) + } + + /// Validate that the specified git references exist (branches, tags, or commits) + pub(crate) fn validate_refs(&self, refs: &[&str]) -> Result<()> { + for &git_ref in refs { + // Try to resolve the ref similar to `git checkout` by peeling to a commit. + // First try the ref as-is with ^{commit}, then fall back to origin/{ref}^{commit}. + let as_is = format!("{git_ref}^{{commit}}"); + let ref_check = Command::new("git") + .args(["rev-parse", "--verify", &as_is]) + .current_dir(&self.repo_root) + .output(); + + let found = if let Ok(output) = ref_check && + output.status.success() + { + info!("Validated reference exists: {}", git_ref); + true + } else { + // Try remote-only branches via origin/{ref} + let origin_ref = format!("origin/{git_ref}^{{commit}}"); + let origin_check = Command::new("git") + .args(["rev-parse", "--verify", &origin_ref]) + .current_dir(&self.repo_root) + .output(); + + if let Ok(output) = origin_check && + output.status.success() + { + info!("Validated remote reference exists: origin/{}", git_ref); + true + } else { + false + } + }; + + if !found { + return Err(eyre!( + "Git reference '{}' does not exist as branch, tag, or commit (tried '{}' and 'origin/{}^{{commit}}')", + git_ref, + format!("{git_ref}^{{commit}}"), + git_ref, + )); + } + } + + Ok(()) + } + + /// Switch to the specified git reference (branch, tag, or commit) + pub(crate) fn switch_ref(&self, git_ref: &str) -> Result<()> { + // First checkout the reference + let output = Command::new("git") + .args(["checkout", git_ref]) + .current_dir(&self.repo_root) + .output() + .wrap_err_with(|| format!("Failed to switch to reference '{git_ref}'"))?; + + if !output.status.success() { + let stderr = String::from_utf8_lossy(&output.stderr); + return Err(eyre!("Failed to switch to reference '{}': {}", git_ref, stderr)); + } + + // Check if this is a branch that tracks a remote and pull latest changes + let is_branch = Command::new("git") + .args(["show-ref", "--verify", "--quiet", &format!("refs/heads/{git_ref}")]) + .current_dir(&self.repo_root) + .status() + .map(|s| s.success()) + .unwrap_or(false); + + if is_branch { + // Check if the branch tracks a remote + let tracking_output = Command::new("git") + .args([ + "rev-parse", + "--abbrev-ref", + "--symbolic-full-name", + &format!("{git_ref}@{{upstream}}"), + ]) + .current_dir(&self.repo_root) + .output(); + + if let Ok(output) = tracking_output && + output.status.success() + { + let upstream = String::from_utf8_lossy(&output.stdout).trim().to_string(); + if !upstream.is_empty() && upstream != format!("{git_ref}@{{upstream}}") { + // Branch tracks a remote, pull latest changes + info!("Pulling latest changes for branch: {}", git_ref); + + let pull_output = Command::new("git") + .args(["pull", "--ff-only"]) + .current_dir(&self.repo_root) + .output() + .wrap_err_with(|| { + format!("Failed to pull latest changes for branch '{git_ref}'") + })?; + + if pull_output.status.success() { + info!("Successfully pulled latest changes for branch: {}", git_ref); + } else { + let stderr = String::from_utf8_lossy(&pull_output.stderr); + warn!("Failed to pull latest changes for branch '{}': {}", git_ref, stderr); + // Continue anyway, we'll use whatever version we have + } + } + } + } + + // Verify the checkout succeeded by checking the current commit + let current_commit_output = Command::new("git") + .args(["rev-parse", "HEAD"]) + .current_dir(&self.repo_root) + .output() + .wrap_err("Failed to get current commit")?; + + if !current_commit_output.status.success() { + return Err(eyre!("Failed to verify git checkout")); + } + + info!("Switched to reference: {}", git_ref); + Ok(()) + } + + /// Get the current commit hash + pub(crate) fn get_current_commit(&self) -> Result { + let output = Command::new("git") + .args(["rev-parse", "HEAD"]) + .current_dir(&self.repo_root) + .output() + .wrap_err("Failed to get current commit")?; + + if !output.status.success() { + return Err(eyre!("Failed to get current commit hash")); + } + + let commit_hash = String::from_utf8(output.stdout) + .wrap_err("Commit hash is not valid UTF-8")? + .trim() + .to_string(); + + Ok(commit_hash) + } + + /// Get the repository root path + pub(crate) fn repo_root(&self) -> &str { + &self.repo_root + } +} diff --git a/bin/reth-bench-compare/src/main.rs b/bin/reth-bench-compare/src/main.rs new file mode 100644 index 0000000000..e866afb250 --- /dev/null +++ b/bin/reth-bench-compare/src/main.rs @@ -0,0 +1,45 @@ +//! # reth-bench-compare +//! +//! Automated tool for comparing reth performance between two git branches. +//! This tool automates the complete workflow of compiling, running, and benchmarking +//! reth on different branches to provide meaningful performance comparisons. + +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] + +#[global_allocator] +static ALLOC: reth_cli_util::allocator::Allocator = reth_cli_util::allocator::new_allocator(); + +mod benchmark; +mod cli; +mod comparison; +mod compilation; +mod git; +mod node; + +use clap::Parser; +use cli::{run_comparison, Args}; +use eyre::Result; +use reth_cli_runner::CliRunner; + +fn main() -> Result<()> { + // Enable backtraces unless a RUST_BACKTRACE value has already been explicitly provided. + if std::env::var_os("RUST_BACKTRACE").is_none() { + unsafe { + std::env::set_var("RUST_BACKTRACE", "1"); + } + } + + let args = Args::parse(); + + // Initialize tracing + let _guard = args.init_tracing()?; + + // Run until either exit or sigint or sigterm + let runner = CliRunner::try_default_runtime()?; + runner.run_command_until_exit(|ctx| run_comparison(args, ctx)) +} diff --git a/bin/reth-bench-compare/src/node.rs b/bin/reth-bench-compare/src/node.rs new file mode 100644 index 0000000000..4de48eebf4 --- /dev/null +++ b/bin/reth-bench-compare/src/node.rs @@ -0,0 +1,554 @@ +//! Node management for starting, stopping, and controlling reth instances. + +use crate::cli::Args; +use alloy_provider::{Provider, ProviderBuilder}; +use alloy_rpc_types_eth::SyncStatus; +use eyre::{eyre, OptionExt, Result, WrapErr}; +#[cfg(unix)] +use nix::sys::signal::{killpg, Signal}; +#[cfg(unix)] +use nix::unistd::Pid; +use reth_chainspec::Chain; +use std::{fs, path::PathBuf, time::Duration}; +use tokio::{ + fs::File as AsyncFile, + io::{AsyncBufReadExt, AsyncWriteExt, BufReader as AsyncBufReader}, + process::Command, + time::{sleep, timeout}, +}; +use tracing::{debug, info, warn}; + +/// Manages reth node lifecycle and operations +pub(crate) struct NodeManager { + datadir: Option, + metrics_port: u16, + chain: Chain, + use_sudo: bool, + binary_path: Option, + enable_profiling: bool, + output_dir: PathBuf, + additional_reth_args: Vec, + comparison_dir: Option, + tracing_endpoint: Option, + otlp_max_queue_size: usize, +} + +impl NodeManager { + /// Create a new `NodeManager` with configuration from CLI args + pub(crate) fn new(args: &Args) -> Self { + Self { + datadir: Some(args.datadir_path().to_string_lossy().to_string()), + metrics_port: args.metrics_port, + chain: args.chain, + use_sudo: args.sudo, + binary_path: None, + enable_profiling: args.profile, + output_dir: args.output_dir_path(), + // Filter out empty strings to prevent invalid arguments being passed to reth node + additional_reth_args: args + .reth_args + .iter() + .filter(|s| !s.is_empty()) + .cloned() + .collect(), + comparison_dir: None, + tracing_endpoint: args.traces.otlp.as_ref().map(|u| u.to_string()), + otlp_max_queue_size: args.otlp_max_queue_size, + } + } + + /// Set the comparison directory path for logging + pub(crate) fn set_comparison_dir(&mut self, dir: PathBuf) { + self.comparison_dir = Some(dir); + } + + /// Get the log file path for a given reference type + fn get_log_file_path(&self, ref_type: &str) -> Result { + let comparison_dir = self + .comparison_dir + .as_ref() + .ok_or_eyre("Comparison directory not set. Call set_comparison_dir first.")?; + + // The comparison directory already contains the full path to results/ + let log_dir = comparison_dir.join(ref_type); + + // Create the directory if it doesn't exist + fs::create_dir_all(&log_dir) + .wrap_err(format!("Failed to create log directory: {:?}", log_dir))?; + + let log_file = log_dir.join("reth_node.log"); + Ok(log_file) + } + + /// Get the perf event max sample rate from the system, capped at 10000 + fn get_perf_sample_rate(&self) -> Option { + let perf_rate_file = "/proc/sys/kernel/perf_event_max_sample_rate"; + if let Ok(content) = fs::read_to_string(perf_rate_file) { + let rate_str = content.trim(); + if !rate_str.is_empty() { + if let Ok(system_rate) = rate_str.parse::() { + let capped_rate = std::cmp::min(system_rate, 10000); + info!( + "Detected perf_event_max_sample_rate: {}, using: {}", + system_rate, capped_rate + ); + return Some(capped_rate.to_string()); + } + warn!("Failed to parse perf_event_max_sample_rate: {}", rate_str); + } + } + None + } + + /// Get the absolute path to samply using 'which' command + async fn get_samply_path(&self) -> Result { + let output = Command::new("which") + .arg("samply") + .output() + .await + .wrap_err("Failed to execute 'which samply' command")?; + + if !output.status.success() { + return Err(eyre!("samply not found in PATH")); + } + + let samply_path = String::from_utf8(output.stdout) + .wrap_err("samply path is not valid UTF-8")? + .trim() + .to_string(); + + if samply_path.is_empty() { + return Err(eyre!("which samply returned empty path")); + } + + Ok(samply_path) + } + + /// Build reth arguments as a vector of strings + fn build_reth_args( + &self, + binary_path_str: &str, + additional_args: &[String], + ref_type: &str, + ) -> (Vec, String) { + let mut reth_args = vec![binary_path_str.to_string(), "node".to_string()]; + + // Add chain argument (skip for mainnet as it's the default) + let chain_str = self.chain.to_string(); + if chain_str != "mainnet" { + reth_args.extend_from_slice(&["--chain".to_string(), chain_str.clone()]); + } + + // Add datadir if specified + if let Some(ref datadir) = self.datadir { + reth_args.extend_from_slice(&["--datadir".to_string(), datadir.clone()]); + } + + // Add reth-specific arguments + let metrics_arg = format!("0.0.0.0:{}", self.metrics_port); + reth_args.extend_from_slice(&[ + "--engine.accept-execution-requests-hash".to_string(), + "--metrics".to_string(), + metrics_arg, + "--http".to_string(), + "--http.api".to_string(), + "eth".to_string(), + "--disable-discovery".to_string(), + "--trusted-only".to_string(), + ]); + + // Add tracing arguments if OTLP endpoint is configured + if let Some(ref endpoint) = self.tracing_endpoint { + info!("Enabling OTLP tracing export to: {} (service: reth-{})", endpoint, ref_type); + // Endpoint requires equals per clap settings in reth + reth_args.push(format!("--tracing-otlp={}", endpoint)); + } + + // Add any additional arguments passed via command line (common to both baseline and + // feature) + reth_args.extend_from_slice(&self.additional_reth_args); + + // Add reference-specific additional arguments + reth_args.extend_from_slice(additional_args); + + (reth_args, chain_str) + } + + /// Create a command for profiling mode + async fn create_profiling_command( + &self, + ref_type: &str, + reth_args: &[String], + ) -> Result { + // Create profiles directory if it doesn't exist + let profile_dir = self.output_dir.join("profiles"); + fs::create_dir_all(&profile_dir).wrap_err("Failed to create profiles directory")?; + + let profile_path = profile_dir.join(format!("{}.json.gz", ref_type)); + info!("Starting reth node with samply profiling..."); + info!("Profile output: {:?}", profile_path); + + // Get absolute path to samply + let samply_path = self.get_samply_path().await?; + + let mut cmd = if self.use_sudo { + let mut sudo_cmd = Command::new("sudo"); + sudo_cmd.arg(&samply_path); + sudo_cmd + } else { + Command::new(&samply_path) + }; + + // Add samply arguments + cmd.args(["record", "--save-only", "-o", &profile_path.to_string_lossy()]); + + // Add rate argument if available + if let Some(rate) = self.get_perf_sample_rate() { + cmd.args(["--rate", &rate]); + } + + // Add separator and complete reth command + cmd.arg("--"); + cmd.args(reth_args); + + // Set environment variable to disable log styling + cmd.env("RUST_LOG_STYLE", "never"); + + Ok(cmd) + } + + /// Create a command for direct reth execution + fn create_direct_command(&self, reth_args: &[String]) -> Command { + let binary_path = &reth_args[0]; + + let mut cmd = if self.use_sudo { + info!("Starting reth node with sudo..."); + let mut sudo_cmd = Command::new("sudo"); + sudo_cmd.args(reth_args); + sudo_cmd + } else { + info!("Starting reth node..."); + let mut reth_cmd = Command::new(binary_path); + reth_cmd.args(&reth_args[1..]); // Skip the binary path since it's the command + reth_cmd + }; + + // Set environment variable to disable log styling + cmd.env("RUST_LOG_STYLE", "never"); + + cmd + } + + /// Start a reth node using the specified binary path and return the process handle + /// along with the formatted reth command string for reporting. + pub(crate) async fn start_node( + &mut self, + binary_path: &std::path::Path, + _git_ref: &str, + ref_type: &str, + additional_args: &[String], + ) -> Result<(tokio::process::Child, String)> { + // Store the binary path for later use (e.g., in unwind_to_block) + self.binary_path = Some(binary_path.to_path_buf()); + + let binary_path_str = binary_path.to_string_lossy(); + let (reth_args, _) = self.build_reth_args(&binary_path_str, additional_args, ref_type); + + // Format the reth command string for reporting + let reth_command = shlex::try_join(reth_args.iter().map(|s| s.as_str())) + .wrap_err("Failed to format reth command string")?; + + // Log additional arguments if any + if !self.additional_reth_args.is_empty() { + info!("Using common additional reth arguments: {:?}", self.additional_reth_args); + } + if !additional_args.is_empty() { + info!("Using reference-specific additional reth arguments: {:?}", additional_args); + } + + let mut cmd = if self.enable_profiling { + self.create_profiling_command(ref_type, &reth_args).await? + } else { + self.create_direct_command(&reth_args) + }; + + // Set process group for better signal handling + #[cfg(unix)] + { + cmd.process_group(0); + } + + // Set high queue size to prevent trace dropping during benchmarks + if self.tracing_endpoint.is_some() { + cmd.env("OTEL_BSP_MAX_QUEUE_SIZE", self.otlp_max_queue_size.to_string()); // Traces + cmd.env("OTEL_BLRP_MAX_QUEUE_SIZE", "10000"); // Logs + + // Set service name to differentiate baseline vs feature runs in Jaeger + cmd.env("OTEL_SERVICE_NAME", format!("reth-{}", ref_type)); + } + + debug!("Executing reth command: {cmd:?}"); + + let mut child = cmd + .stdout(std::process::Stdio::piped()) + .stderr(std::process::Stdio::piped()) + .kill_on_drop(true) // Kill on drop so that on Ctrl-C for parent process we stop all child processes + .spawn() + .wrap_err("Failed to start reth node")?; + + info!( + "Reth node started with PID: {:?} (binary: {})", + child.id().ok_or_eyre("Reth node is not running")?, + binary_path_str + ); + + // Prepare log file path + let log_file_path = self.get_log_file_path(ref_type)?; + info!("Reth node logs will be saved to: {:?}", log_file_path); + + // Stream stdout and stderr with prefixes at debug level and to log file + if let Some(stdout) = child.stdout.take() { + let log_file = AsyncFile::create(&log_file_path) + .await + .wrap_err(format!("Failed to create log file: {:?}", log_file_path))?; + tokio::spawn(async move { + let reader = AsyncBufReader::new(stdout); + let mut lines = reader.lines(); + let mut log_file = log_file; + while let Ok(Some(line)) = lines.next_line().await { + debug!("[RETH] {}", line); + // Write to log file (reth already includes timestamps) + let log_line = format!("{}\n", line); + if let Err(e) = log_file.write_all(log_line.as_bytes()).await { + debug!("Failed to write to log file: {}", e); + } + } + }); + } + + if let Some(stderr) = child.stderr.take() { + let log_file = AsyncFile::options() + .create(true) + .append(true) + .open(&log_file_path) + .await + .wrap_err(format!("Failed to open log file for stderr: {:?}", log_file_path))?; + tokio::spawn(async move { + let reader = AsyncBufReader::new(stderr); + let mut lines = reader.lines(); + let mut log_file = log_file; + while let Ok(Some(line)) = lines.next_line().await { + debug!("[RETH] {}", line); + // Write to log file (reth already includes timestamps) + let log_line = format!("{}\n", line); + if let Err(e) = log_file.write_all(log_line.as_bytes()).await { + debug!("Failed to write to log file: {}", e); + } + } + }); + } + + // Give the node a moment to start up + sleep(Duration::from_secs(5)).await; + + Ok((child, reth_command)) + } + + /// Wait for the node to be ready and return its current tip + pub(crate) async fn wait_for_node_ready_and_get_tip(&self) -> Result { + info!("Waiting for node to be ready and synced..."); + + let max_wait = Duration::from_secs(120); // 2 minutes to allow for sync + let check_interval = Duration::from_secs(2); + let rpc_url = "http://localhost:8545"; + + // Create Alloy provider + let url = rpc_url.parse().map_err(|e| eyre!("Invalid RPC URL '{}': {}", rpc_url, e))?; + let provider = ProviderBuilder::new().connect_http(url); + + timeout(max_wait, async { + loop { + // First check if RPC is up and node is not syncing + match provider.syncing().await { + Ok(sync_result) => { + match sync_result { + SyncStatus::Info(sync_info) => { + debug!("Node is still syncing {sync_info:?}, waiting..."); + } + _ => { + // Node is not syncing, now get the tip + match provider.get_block_number().await { + Ok(tip) => { + info!("Node is ready and not syncing at block: {}", tip); + return Ok(tip); + } + Err(e) => { + debug!("Failed to get block number: {}", e); + } + } + } + } + } + Err(e) => { + debug!("Node RPC not ready yet or failed to check sync status: {}", e); + } + } + + sleep(check_interval).await; + } + }) + .await + .wrap_err("Timed out waiting for node to be ready and synced")? + } + + /// Stop the reth node gracefully + pub(crate) async fn stop_node(&self, child: &mut tokio::process::Child) -> Result<()> { + let pid = child.id().expect("Child process ID should be available"); + + // Check if the process has already exited + match child.try_wait() { + Ok(Some(status)) => { + info!("Reth node (PID: {}) has already exited with status: {:?}", pid, status); + return Ok(()); + } + Ok(None) => { + // Process is still running, proceed to stop it + info!("Stopping process gracefully with SIGINT (PID: {})...", pid); + } + Err(e) => { + return Err(eyre!("Failed to check process status: {}", e)); + } + } + + #[cfg(unix)] + { + // Send SIGINT to process group to mimic Ctrl-C behavior + let nix_pgid = Pid::from_raw(pid as i32); + + match killpg(nix_pgid, Signal::SIGINT) { + Ok(()) => {} + Err(nix::errno::Errno::ESRCH) => { + info!("Process group {} has already exited", pid); + } + Err(e) => { + return Err(eyre!("Failed to send SIGINT to process group {}: {}", pid, e)); + } + } + } + + #[cfg(not(unix))] + { + // On non-Unix systems, fall back to using external kill command + let output = Command::new("taskkill") + .args(["/PID", &pid.to_string(), "/F"]) + .output() + .await + .wrap_err("Failed to execute taskkill command")?; + + if !output.status.success() { + let stderr = String::from_utf8_lossy(&output.stderr); + // Check if the error is because the process doesn't exist + if stderr.contains("not found") || stderr.contains("not exist") { + info!("Process {} has already exited", pid); + } else { + return Err(eyre!("Failed to kill process {}: {}", pid, stderr)); + } + } + } + + // Wait for the process to exit + match child.wait().await { + Ok(status) => { + info!("Reth node (PID: {}) exited with status: {:?}", pid, status); + } + Err(e) => { + // If we get an error here, it might be because the process already exited + debug!("Error waiting for process exit (may have already exited): {}", e); + } + } + + Ok(()) + } + + /// Unwind the node to a specific block + pub(crate) async fn unwind_to_block(&self, block_number: u64) -> Result<()> { + if self.use_sudo { + info!("Unwinding node to block: {} (with sudo)", block_number); + } else { + info!("Unwinding node to block: {}", block_number); + } + + // Use the binary path from the last start_node call, or fallback to default + let binary_path = self + .binary_path + .as_ref() + .map(|p| p.to_string_lossy().to_string()) + .unwrap_or_else(|| "./target/profiling/reth".to_string()); + + let mut cmd = if self.use_sudo { + let mut sudo_cmd = Command::new("sudo"); + sudo_cmd.args([&binary_path, "stage", "unwind"]); + sudo_cmd + } else { + let mut reth_cmd = Command::new(&binary_path); + reth_cmd.args(["stage", "unwind"]); + reth_cmd + }; + + // Add chain argument (skip for mainnet as it's the default) + let chain_str = self.chain.to_string(); + if chain_str != "mainnet" { + cmd.args(["--chain", &chain_str]); + } + + // Add datadir if specified + if let Some(ref datadir) = self.datadir { + cmd.args(["--datadir", datadir]); + } + + cmd.args(["to-block", &block_number.to_string()]); + + // Set environment variable to disable log styling + cmd.env("RUST_LOG_STYLE", "never"); + + // Debug log the command + debug!("Executing reth unwind command: {:?}", cmd); + + let mut child = cmd + .stdout(std::process::Stdio::piped()) + .stderr(std::process::Stdio::piped()) + .spawn() + .wrap_err("Failed to start unwind command")?; + + // Stream stdout and stderr with prefixes in real-time + if let Some(stdout) = child.stdout.take() { + tokio::spawn(async move { + let reader = AsyncBufReader::new(stdout); + let mut lines = reader.lines(); + while let Ok(Some(line)) = lines.next_line().await { + debug!("[RETH-UNWIND] {}", line); + } + }); + } + + if let Some(stderr) = child.stderr.take() { + tokio::spawn(async move { + let reader = AsyncBufReader::new(stderr); + let mut lines = reader.lines(); + while let Ok(Some(line)) = lines.next_line().await { + debug!("[RETH-UNWIND] {}", line); + } + }); + } + + // Wait for the command to complete + let status = child.wait().await.wrap_err("Failed to wait for unwind command")?; + + if !status.success() { + return Err(eyre!("Unwind command failed with exit code: {:?}", status.code())); + } + + info!("Unwound to block: {}", block_number); + Ok(()) + } +} diff --git a/bin/reth-bench/Cargo.toml b/bin/reth-bench/Cargo.toml index 891fa4f978..a07d0f5200 100644 --- a/bin/reth-bench/Cargo.toml +++ b/bin/reth-bench/Cargo.toml @@ -81,11 +81,26 @@ jemalloc = [ jemalloc-prof = ["reth-cli-util/jemalloc-prof"] tracy-allocator = ["reth-cli-util/tracy-allocator"] -min-error-logs = ["tracing/release_max_level_error"] -min-warn-logs = ["tracing/release_max_level_warn"] -min-info-logs = ["tracing/release_max_level_info"] -min-debug-logs = ["tracing/release_max_level_debug"] -min-trace-logs = ["tracing/release_max_level_trace"] +min-error-logs = [ + "tracing/release_max_level_error", + "reth-node-core/min-error-logs", +] +min-warn-logs = [ + "tracing/release_max_level_warn", + "reth-node-core/min-warn-logs", +] +min-info-logs = [ + "tracing/release_max_level_info", + "reth-node-core/min-info-logs", +] +min-debug-logs = [ + "tracing/release_max_level_debug", + "reth-node-core/min-debug-logs", +] +min-trace-logs = [ + "tracing/release_max_level_trace", + "reth-node-core/min-trace-logs", +] # no-op feature flag for switching between the `optimism` and default functionality in CI matrices ethereum = [] diff --git a/bin/reth-bench/README.md b/bin/reth-bench/README.md index b8176749fc..9e03ac3c26 100644 --- a/bin/reth-bench/README.md +++ b/bin/reth-bench/README.md @@ -80,7 +80,7 @@ RUSTFLAGS="-C target-cpu=native" cargo build --profile profiling --no-default-fe ### Run the Benchmark: First, start the reth node. Here is an example that runs `reth` compiled with the `profiling` profile, runs `samply`, and configures `reth` to run with metrics enabled: ```bash -samply record -p 3001 target/profiling/reth node --metrics localhost:9001 --authrpc.jwt-secret +samply record -p 3001 target/profiling/reth node --metrics localhost:9001 --authrpc.jwtsecret ``` ```bash @@ -143,5 +143,5 @@ To reproduce the benchmark, first re-set the node to the block that the benchmar - **RPC Configuration**: The RPC endpoints should be accessible and configured correctly, specifically the RPC endpoint must support `eth_getBlockByNumber` and support fetching full transactions. The benchmark will make one RPC query per block as fast as possible, so ensure the RPC endpoint does not rate limit or block requests after a certain volume. - **Reproducibility**: Ensure that the node is at the same state before attempting to retry a benchmark. The `new-payload-fcu` command specifically will commit to the database, so the node must be rolled back using `reth stage unwind` to reproducibly retry benchmarks. - **Profiling tools**: If you are collecting CPU profiles, tools like [`samply`](https://github.com/mstange/samply) and [`perf`](https://perf.wiki.kernel.org/index.php/Main_Page) can be useful for analyzing node performance. -- **Benchmark Data**: `reth-bench` additionally contains a `--benchmark.output` flag, which will output gas used benchmarks across the benchmark range in CSV format. This may be useful for further data analysis. +- **Benchmark Data**: `reth-bench` additionally contains a `--output` flag, which will output gas used benchmarks across the benchmark range in CSV format. This may be useful for further data analysis. - **Platform Information**: To ensure accurate and reproducible benchmarking, document the platform details, including hardware specifications, OS version, and any other relevant information before publishing any benchmarks. diff --git a/bin/reth-bench/src/bench/new_payload_fcu.rs b/bin/reth-bench/src/bench/new_payload_fcu.rs index ce094895ee..5760184b7f 100644 --- a/bin/reth-bench/src/bench/new_payload_fcu.rs +++ b/bin/reth-bench/src/bench/new_payload_fcu.rs @@ -30,8 +30,8 @@ pub struct Command { rpc_url: String, /// How long to wait after a forkchoice update before sending the next payload. - #[arg(long, value_name = "WAIT_TIME", value_parser = parse_duration, verbatim_doc_comment)] - wait_time: Option, + #[arg(long, value_name = "WAIT_TIME", value_parser = parse_duration, default_value = "250ms", verbatim_doc_comment)] + wait_time: Duration, /// The size of the block buffer (channel capacity) for prefetching blocks from the RPC /// endpoint. @@ -79,22 +79,13 @@ impl Command { break; } }; - let header = block.header.clone(); - let (version, params) = match block_to_new_payload(block, is_optimism) { - Ok(result) => result, - Err(e) => { - tracing::error!("Failed to convert block to new payload: {e}"); - let _ = error_sender.send(e); - break; - } - }; - let head_block_hash = header.hash; - let safe_block_hash = - block_provider.get_block_by_number(header.number.saturating_sub(32).into()); + let head_block_hash = block.header.hash; + let safe_block_hash = block_provider + .get_block_by_number(block.header.number.saturating_sub(32).into()); - let finalized_block_hash = - block_provider.get_block_by_number(header.number.saturating_sub(64).into()); + let finalized_block_hash = block_provider + .get_block_by_number(block.header.number.saturating_sub(64).into()); let (safe, finalized) = tokio::join!(safe_block_hash, finalized_block_hash,); @@ -110,14 +101,7 @@ impl Command { next_block += 1; if let Err(e) = sender - .send(( - header, - version, - params, - head_block_hash, - safe_block_hash, - finalized_block_hash, - )) + .send((block, head_block_hash, safe_block_hash, finalized_block_hash)) .await { tracing::error!("Failed to send block data: {e}"); @@ -131,15 +115,16 @@ impl Command { let total_benchmark_duration = Instant::now(); let mut total_wait_time = Duration::ZERO; - while let Some((header, version, params, head, safe, finalized)) = { + while let Some((block, head, safe, finalized)) = { let wait_start = Instant::now(); let result = receiver.recv().await; total_wait_time += wait_start.elapsed(); result } { // just put gas used here - let gas_used = header.gas_used; - let block_number = header.number; + let gas_used = block.header.gas_used; + let block_number = block.header.number; + let transaction_count = block.transactions.len() as u64; debug!(target: "reth-bench", ?block_number, "Sending payload",); @@ -150,6 +135,7 @@ impl Command { finalized_block_hash: finalized, }; + let (version, params) = block_to_new_payload(block, is_optimism)?; let start = Instant::now(); call_new_payload(&auth_provider, version, params).await?; @@ -160,8 +146,13 @@ impl Command { // calculate the total duration and the fcu latency, record let total_latency = start.elapsed(); let fcu_latency = total_latency - new_payload_result.latency; - let combined_result = - CombinedResult { block_number, new_payload_result, fcu_latency, total_latency }; + let combined_result = CombinedResult { + block_number, + transaction_count, + new_payload_result, + fcu_latency, + total_latency, + }; // current duration since the start of the benchmark minus the time // waiting for blocks @@ -170,13 +161,12 @@ impl Command { // convert gas used to gigagas, then compute gigagas per second info!(%combined_result); - // wait if we need to - if let Some(wait_time) = self.wait_time { - tokio::time::sleep(wait_time).await; - } + // wait before sending the next payload + tokio::time::sleep(self.wait_time).await; // record the current result - let gas_row = TotalGasRow { block_number, gas_used, time: current_duration }; + let gas_row = + TotalGasRow { block_number, transaction_count, gas_used, time: current_duration }; results.push((gas_row, combined_result)); } diff --git a/bin/reth-bench/src/bench/new_payload_only.rs b/bin/reth-bench/src/bench/new_payload_only.rs index 3dfa619ec7..748ac999a9 100644 --- a/bin/reth-bench/src/bench/new_payload_only.rs +++ b/bin/reth-bench/src/bench/new_payload_only.rs @@ -72,19 +72,9 @@ impl Command { break; } }; - let header = block.header.clone(); - - let (version, params) = match block_to_new_payload(block, is_optimism) { - Ok(result) => result, - Err(e) => { - tracing::error!("Failed to convert block to new payload: {e}"); - let _ = error_sender.send(e); - break; - } - }; next_block += 1; - if let Err(e) = sender.send((header, version, params)).await { + if let Err(e) = sender.send(block).await { tracing::error!("Failed to send block data: {e}"); break; } @@ -96,23 +86,24 @@ impl Command { let total_benchmark_duration = Instant::now(); let mut total_wait_time = Duration::ZERO; - while let Some((header, version, params)) = { + while let Some(block) = { let wait_start = Instant::now(); let result = receiver.recv().await; total_wait_time += wait_start.elapsed(); result } { - // just put gas used here - let gas_used = header.gas_used; - - let block_number = header.number; + let block_number = block.header.number; + let transaction_count = block.transactions.len() as u64; + let gas_used = block.header.gas_used; debug!( target: "reth-bench", - number=?header.number, + number=?block.header.number, "Sending payload to engine", ); + let (version, params) = block_to_new_payload(block, is_optimism)?; + let start = Instant::now(); call_new_payload(&auth_provider, version, params).await?; @@ -124,7 +115,8 @@ impl Command { let current_duration = total_benchmark_duration.elapsed() - total_wait_time; // record the current result - let row = TotalGasRow { block_number, gas_used, time: current_duration }; + let row = + TotalGasRow { block_number, transaction_count, gas_used, time: current_duration }; results.push((row, new_payload_result)); } diff --git a/bin/reth-bench/src/bench/output.rs b/bin/reth-bench/src/bench/output.rs index 794cd2768d..17e9ad4a7a 100644 --- a/bin/reth-bench/src/bench/output.rs +++ b/bin/reth-bench/src/bench/output.rs @@ -67,6 +67,8 @@ impl Serialize for NewPayloadResult { pub(crate) struct CombinedResult { /// The block number of the block being processed. pub(crate) block_number: u64, + /// The number of transactions in the block. + pub(crate) transaction_count: u64, /// The `newPayload` result. pub(crate) new_payload_result: NewPayloadResult, /// The latency of the `forkchoiceUpdated` call. @@ -108,10 +110,11 @@ impl Serialize for CombinedResult { let fcu_latency = self.fcu_latency.as_micros(); let new_payload_latency = self.new_payload_result.latency.as_micros(); let total_latency = self.total_latency.as_micros(); - let mut state = serializer.serialize_struct("CombinedResult", 5)?; + let mut state = serializer.serialize_struct("CombinedResult", 6)?; // flatten the new payload result because this is meant for CSV writing state.serialize_field("block_number", &self.block_number)?; + state.serialize_field("transaction_count", &self.transaction_count)?; state.serialize_field("gas_used", &self.new_payload_result.gas_used)?; state.serialize_field("new_payload_latency", &new_payload_latency)?; state.serialize_field("fcu_latency", &fcu_latency)?; @@ -125,6 +128,8 @@ impl Serialize for CombinedResult { pub(crate) struct TotalGasRow { /// The block number of the block being processed. pub(crate) block_number: u64, + /// The number of transactions in the block. + pub(crate) transaction_count: u64, /// The total gas used in the block. pub(crate) gas_used: u64, /// Time since the start of the benchmark. @@ -172,8 +177,9 @@ impl Serialize for TotalGasRow { { // convert the time to microseconds let time = self.time.as_micros(); - let mut state = serializer.serialize_struct("TotalGasRow", 3)?; + let mut state = serializer.serialize_struct("TotalGasRow", 4)?; state.serialize_field("block_number", &self.block_number)?; + state.serialize_field("transaction_count", &self.transaction_count)?; state.serialize_field("gas_used", &self.gas_used)?; state.serialize_field("time", &time)?; state.end() @@ -188,7 +194,12 @@ mod tests { #[test] fn test_write_total_gas_row_csv() { - let row = TotalGasRow { block_number: 1, gas_used: 1_000, time: Duration::from_secs(1) }; + let row = TotalGasRow { + block_number: 1, + transaction_count: 10, + gas_used: 1_000, + time: Duration::from_secs(1), + }; let mut writer = Writer::from_writer(vec![]); writer.serialize(row).unwrap(); @@ -198,11 +209,11 @@ mod tests { let mut result = result.as_slice().lines(); // assert header - let expected_first_line = "block_number,gas_used,time"; + let expected_first_line = "block_number,transaction_count,gas_used,time"; let first_line = result.next().unwrap().unwrap(); assert_eq!(first_line, expected_first_line); - let expected_second_line = "1,1000,1000000"; + let expected_second_line = "1,10,1000,1000000"; let second_line = result.next().unwrap().unwrap(); assert_eq!(second_line, expected_second_line); } diff --git a/bin/reth/Cargo.toml b/bin/reth/Cargo.toml index d4e134bf48..9eef353349 100644 --- a/bin/reth/Cargo.toml +++ b/bin/reth/Cargo.toml @@ -54,7 +54,7 @@ reth-node-api.workspace = true reth-node-core.workspace = true reth-ethereum-payload-builder.workspace = true reth-ethereum-primitives.workspace = true -reth-node-ethereum = { workspace = true, features = ["js-tracer"] } +reth-node-ethereum.workspace = true reth-node-builder.workspace = true reth-node-metrics.workspace = true reth-consensus.workspace = true @@ -81,7 +81,22 @@ backon.workspace = true tempfile.workspace = true [features] -default = ["jemalloc", "reth-revm/portable"] +default = ["jemalloc", "otlp", "reth-revm/portable", "js-tracer", "keccak-cache-global", "asm-keccak"] + +otlp = [ + "reth-ethereum-cli/otlp", + "reth-node-core/otlp", +] +samply = [ + "reth-ethereum-cli/samply", + "reth-node-core/samply", +] +js-tracer = [ + "reth-node-builder/js-tracer", + "reth-node-ethereum/js-tracer", + "reth-rpc/js-tracer", + "reth-rpc-eth-types/js-tracer", +] dev = ["reth-ethereum-cli/dev"] @@ -91,7 +106,10 @@ asm-keccak = [ "reth-ethereum-cli/asm-keccak", "reth-node-ethereum/asm-keccak", ] - +keccak-cache-global = [ + "reth-node-core/keccak-cache-global", + "reth-node-ethereum/keccak-cache-global", +] jemalloc = [ "reth-cli-util/jemalloc", "reth-node-core/jemalloc", @@ -103,6 +121,12 @@ jemalloc-prof = [ "reth-cli-util/jemalloc-prof", "reth-ethereum-cli/jemalloc-prof", ] +jemalloc-unprefixed = [ + "reth-cli-util/jemalloc-unprefixed", + "reth-node-core/jemalloc", + "reth-node-metrics/jemalloc", + "reth-ethereum-cli/jemalloc", +] tracy-allocator = [ "reth-cli-util/tracy-allocator", "reth-ethereum-cli/tracy-allocator", @@ -123,22 +147,27 @@ snmalloc-native = [ min-error-logs = [ "tracing/release_max_level_error", "reth-ethereum-cli/min-error-logs", + "reth-node-core/min-error-logs", ] min-warn-logs = [ "tracing/release_max_level_warn", "reth-ethereum-cli/min-warn-logs", + "reth-node-core/min-warn-logs", ] min-info-logs = [ "tracing/release_max_level_info", "reth-ethereum-cli/min-info-logs", + "reth-node-core/min-info-logs", ] min-debug-logs = [ "tracing/release_max_level_debug", "reth-ethereum-cli/min-debug-logs", + "reth-node-core/min-debug-logs", ] min-trace-logs = [ "tracing/release_max_level_trace", "reth-ethereum-cli/min-trace-logs", + "reth-node-core/min-trace-logs", ] [[bin]] diff --git a/crates/chain-state/src/deferred_trie.rs b/crates/chain-state/src/deferred_trie.rs new file mode 100644 index 0000000000..b79d552d0c --- /dev/null +++ b/crates/chain-state/src/deferred_trie.rs @@ -0,0 +1,444 @@ +use alloy_primitives::B256; +use parking_lot::Mutex; +use reth_metrics::{metrics::Counter, Metrics}; +use reth_trie::{ + updates::{TrieUpdates, TrieUpdatesSorted}, + HashedPostState, HashedPostStateSorted, TrieInputSorted, +}; +use std::{ + fmt, + sync::{Arc, LazyLock}, +}; +use tracing::instrument; + +/// Shared handle to asynchronously populated trie data. +/// +/// Uses a try-lock + fallback computation approach for deadlock-free access. +/// If the deferred task hasn't completed, computes trie data synchronously +/// from stored unsorted inputs rather than blocking. +#[derive(Clone)] +pub struct DeferredTrieData { + /// Shared deferred state holding either raw inputs (pending) or computed result (ready). + state: Arc>, +} + +/// Sorted trie data computed for an executed block. +/// These represent the complete set of sorted trie data required to persist +/// block state for, and generate proofs on top of, a block. +#[derive(Clone, Debug, Default)] +pub struct ComputedTrieData { + /// Sorted hashed post-state produced by execution. + pub hashed_state: Arc, + /// Sorted trie updates produced by state root computation. + pub trie_updates: Arc, + /// Trie input bundled with its anchor hash, if available. + pub anchored_trie_input: Option, +} + +/// Trie input bundled with its anchor hash. +/// +/// This is used to store the trie input and anchor hash for a block together. +#[derive(Clone, Debug)] +pub struct AnchoredTrieInput { + /// The persisted ancestor hash this trie input is anchored to. + pub anchor_hash: B256, + /// Trie input constructed from in-memory overlays. + pub trie_input: Arc, +} + +/// Metrics for deferred trie computation. +#[derive(Metrics)] +#[metrics(scope = "sync.block_validation")] +struct DeferredTrieMetrics { + /// Number of times deferred trie data was ready (async task completed first). + deferred_trie_async_ready: Counter, + /// Number of times deferred trie data required synchronous computation (fallback path). + deferred_trie_sync_fallback: Counter, +} + +static DEFERRED_TRIE_METRICS: LazyLock = + LazyLock::new(DeferredTrieMetrics::default); + +/// Internal state for deferred trie data. +enum DeferredState { + /// Data is not yet available; raw inputs stored for fallback computation. + Pending(PendingInputs), + /// Data has been computed and is ready. + Ready(ComputedTrieData), +} + +/// Inputs kept while a deferred trie computation is pending. +#[derive(Clone, Debug)] +struct PendingInputs { + /// Unsorted hashed post-state from execution. + hashed_state: Arc, + /// Unsorted trie updates from state root computation. + trie_updates: Arc, + /// The persisted ancestor hash this trie input is anchored to. + anchor_hash: B256, + /// Deferred trie data from ancestor blocks for merging. + ancestors: Vec, +} + +impl fmt::Debug for DeferredTrieData { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let state = self.state.lock(); + match &*state { + DeferredState::Pending(_) => { + f.debug_struct("DeferredTrieData").field("state", &"pending").finish() + } + DeferredState::Ready(_) => { + f.debug_struct("DeferredTrieData").field("state", &"ready").finish() + } + } + } +} + +impl DeferredTrieData { + /// Create a new pending handle with fallback inputs for synchronous computation. + /// + /// If the async task hasn't completed when `wait_cloned` is called, the trie data + /// will be computed synchronously from these inputs. This eliminates deadlock risk. + /// + /// # Arguments + /// * `hashed_state` - Unsorted hashed post-state from execution + /// * `trie_updates` - Unsorted trie updates from state root computation + /// * `anchor_hash` - The persisted ancestor hash this trie input is anchored to + /// * `ancestors` - Deferred trie data from ancestor blocks for merging + pub fn pending( + hashed_state: Arc, + trie_updates: Arc, + anchor_hash: B256, + ancestors: Vec, + ) -> Self { + Self { + state: Arc::new(Mutex::new(DeferredState::Pending(PendingInputs { + hashed_state, + trie_updates, + anchor_hash, + ancestors, + }))), + } + } + + /// Create a handle that is already populated with the given [`ComputedTrieData`]. + /// + /// Useful when trie data is available immediately. + /// [`Self::wait_cloned`] will return without any computation. + pub fn ready(bundle: ComputedTrieData) -> Self { + Self { state: Arc::new(Mutex::new(DeferredState::Ready(bundle))) } + } + + /// Sort block execution outputs and build a [`TrieInputSorted`] overlay. + /// + /// The trie input overlay accumulates sorted hashed state (account/storage changes) and + /// trie node updates from all in-memory ancestor blocks. This overlay is required for: + /// - Computing state roots on top of in-memory blocks + /// - Generating storage/account proofs for unpersisted state + /// + /// # Process + /// 1. Sort the current block's hashed state and trie updates + /// 2. Merge ancestor overlays (oldest -> newest, so later state takes precedence) + /// 3. Extend the merged overlay with this block's sorted data + /// + /// Used by both the async background task and the synchronous fallback path. + /// + /// # Arguments + /// * `hashed_state` - Unsorted hashed post-state (account/storage changes) from execution + /// * `trie_updates` - Unsorted trie node updates from state root computation + /// * `anchor_hash` - The persisted ancestor hash this trie input is anchored to + /// * `ancestors` - Deferred trie data from ancestor blocks for merging + pub fn sort_and_build_trie_input( + hashed_state: &HashedPostState, + trie_updates: &TrieUpdates, + anchor_hash: B256, + ancestors: &[Self], + ) -> ComputedTrieData { + // Sort the current block's hashed state and trie updates + let sorted_hashed_state = Arc::new(hashed_state.clone_into_sorted()); + let sorted_trie_updates = Arc::new(trie_updates.clone().into_sorted()); + + // Merge trie data from ancestors (oldest -> newest so later state takes precedence) + let mut overlay = TrieInputSorted::default(); + for ancestor in ancestors { + let ancestor_data = ancestor.wait_cloned(); + { + let state_mut = Arc::make_mut(&mut overlay.state); + state_mut.extend_ref(ancestor_data.hashed_state.as_ref()); + } + { + let nodes_mut = Arc::make_mut(&mut overlay.nodes); + nodes_mut.extend_ref(ancestor_data.trie_updates.as_ref()); + } + } + + // Extend overlay with current block's sorted data + { + let state_mut = Arc::make_mut(&mut overlay.state); + state_mut.extend_ref(sorted_hashed_state.as_ref()); + } + { + let nodes_mut = Arc::make_mut(&mut overlay.nodes); + nodes_mut.extend_ref(sorted_trie_updates.as_ref()); + } + + ComputedTrieData::with_trie_input( + sorted_hashed_state, + sorted_trie_updates, + anchor_hash, + Arc::new(overlay), + ) + } + + /// Returns trie data, computing synchronously if the async task hasn't completed. + /// + /// - If the async task has completed (`Ready`), returns the cached result. + /// - If pending, computes synchronously from stored inputs. + /// + /// Deadlock is avoided as long as the provided ancestors form a true ancestor chain (a DAG): + /// - Each block only waits on its ancestors (blocks on the path to the persisted root) + /// - Sibling blocks (forks) are never in each other's ancestor lists + /// - A block never waits on its descendants + /// + /// Given that invariant, circular wait dependencies are impossible. + #[instrument(level = "debug", target = "engine::tree::deferred_trie", skip_all)] + pub fn wait_cloned(&self) -> ComputedTrieData { + let mut state = self.state.lock(); + match &*state { + // If the deferred trie data is ready, return the cached result. + DeferredState::Ready(bundle) => { + DEFERRED_TRIE_METRICS.deferred_trie_async_ready.increment(1); + bundle.clone() + } + // If the deferred trie data is pending, compute the trie data synchronously and return + // the result. This is the fallback path if the async task hasn't completed. + DeferredState::Pending(inputs) => { + DEFERRED_TRIE_METRICS.deferred_trie_sync_fallback.increment(1); + let computed = Self::sort_and_build_trie_input( + &inputs.hashed_state, + &inputs.trie_updates, + inputs.anchor_hash, + &inputs.ancestors, + ); + *state = DeferredState::Ready(computed.clone()); + computed + } + } + } +} + +impl ComputedTrieData { + /// Construct a bundle that includes trie input anchored to a persisted ancestor. + pub const fn with_trie_input( + hashed_state: Arc, + trie_updates: Arc, + anchor_hash: B256, + trie_input: Arc, + ) -> Self { + Self { + hashed_state, + trie_updates, + anchored_trie_input: Some(AnchoredTrieInput { anchor_hash, trie_input }), + } + } + + /// Construct a bundle without trie input or anchor information. + /// + /// Unlike [`Self::with_trie_input`], this constructor omits the accumulated trie input overlay + /// and its anchor hash. Use this when the trie input is not needed, such as in block builders + /// or sequencers that don't require proof generation on top of in-memory state. + /// + /// The trie input anchor identifies the persisted block hash from which the in-memory overlay + /// was built. Without it, consumers cannot determine which on-disk state to combine with. + pub const fn without_trie_input( + hashed_state: Arc, + trie_updates: Arc, + ) -> Self { + Self { hashed_state, trie_updates, anchored_trie_input: None } + } + + /// Returns the anchor hash, if present. + pub fn anchor_hash(&self) -> Option { + self.anchored_trie_input.as_ref().map(|anchored| anchored.anchor_hash) + } + + /// Returns the trie input, if present. + pub fn trie_input(&self) -> Option<&Arc> { + self.anchored_trie_input.as_ref().map(|anchored| &anchored.trie_input) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use alloy_primitives::{map::B256Map, U256}; + use reth_primitives_traits::Account; + use reth_trie::updates::TrieUpdates; + use std::{ + sync::Arc, + thread, + time::{Duration, Instant}, + }; + + fn empty_bundle() -> ComputedTrieData { + ComputedTrieData { + hashed_state: Arc::default(), + trie_updates: Arc::default(), + anchored_trie_input: None, + } + } + + fn empty_pending() -> DeferredTrieData { + empty_pending_with_anchor(B256::ZERO) + } + + fn empty_pending_with_anchor(anchor: B256) -> DeferredTrieData { + DeferredTrieData::pending( + Arc::new(HashedPostState::default()), + Arc::new(TrieUpdates::default()), + anchor, + Vec::new(), + ) + } + + /// Verifies that a ready handle returns immediately without computation. + #[test] + fn ready_returns_immediately() { + let bundle = empty_bundle(); + let deferred = DeferredTrieData::ready(bundle.clone()); + + let start = Instant::now(); + let result = deferred.wait_cloned(); + let elapsed = start.elapsed(); + + assert_eq!(result.hashed_state, bundle.hashed_state); + assert_eq!(result.trie_updates, bundle.trie_updates); + assert_eq!(result.anchor_hash(), bundle.anchor_hash()); + assert!(elapsed < Duration::from_millis(20)); + } + + /// Verifies that a pending handle computes trie data synchronously via fallback. + #[test] + fn pending_computes_fallback() { + let deferred = empty_pending(); + + // wait_cloned should compute from inputs without blocking + let start = Instant::now(); + let result = deferred.wait_cloned(); + let elapsed = start.elapsed(); + + // Should return quickly (fallback computation) + assert!(elapsed < Duration::from_millis(100)); + assert!(result.hashed_state.is_empty()); + } + + /// Verifies that fallback computation result is cached for subsequent calls. + #[test] + fn fallback_result_is_cached() { + let deferred = empty_pending(); + + // First call computes and should stash the result + let first = deferred.wait_cloned(); + // Second call should reuse the cached result (same Arc pointer) + let second = deferred.wait_cloned(); + + assert!(Arc::ptr_eq(&first.hashed_state, &second.hashed_state)); + assert!(Arc::ptr_eq(&first.trie_updates, &second.trie_updates)); + assert_eq!(first.anchor_hash(), second.anchor_hash()); + } + + /// Verifies that concurrent `wait_cloned` calls result in only one computation, + /// with all callers receiving the same cached result. + #[test] + fn concurrent_wait_cloned_computes_once() { + let deferred = empty_pending(); + + // Spawn multiple threads that all call wait_cloned concurrently + let handles: Vec<_> = (0..10) + .map(|_| { + let d = deferred.clone(); + thread::spawn(move || d.wait_cloned()) + }) + .collect(); + + // Collect all results + let results: Vec<_> = handles.into_iter().map(|h| h.join().unwrap()).collect(); + + // All results should share the same Arc pointers (same computed result) + let first = &results[0]; + for result in &results[1..] { + assert!(Arc::ptr_eq(&first.hashed_state, &result.hashed_state)); + assert!(Arc::ptr_eq(&first.trie_updates, &result.trie_updates)); + } + } + + /// Tests that ancestor trie data is merged during fallback computation and that the + /// resulting `ComputedTrieData` uses the current block's anchor hash, not the ancestor's. + #[test] + fn ancestors_are_merged() { + // Create ancestor with some data + let ancestor_bundle = ComputedTrieData { + hashed_state: Arc::default(), + trie_updates: Arc::default(), + anchored_trie_input: Some(AnchoredTrieInput { + anchor_hash: B256::with_last_byte(1), + trie_input: Arc::new(TrieInputSorted::default()), + }), + }; + let ancestor = DeferredTrieData::ready(ancestor_bundle); + + // Create pending with ancestor + let deferred = DeferredTrieData::pending( + Arc::new(HashedPostState::default()), + Arc::new(TrieUpdates::default()), + B256::with_last_byte(2), + vec![ancestor], + ); + + let result = deferred.wait_cloned(); + // Should have the current block's anchor, not the ancestor's + assert_eq!(result.anchor_hash(), Some(B256::with_last_byte(2))); + } + + /// Ensures ancestor overlays are merged oldest -> newest so latest state wins (no overwrite by + /// older ancestors). + #[test] + fn ancestors_merge_in_chronological_order() { + let key = B256::with_last_byte(1); + // Oldest ancestor sets nonce to 1 + let oldest_state = HashedPostStateSorted::new( + vec![(key, Some(Account { nonce: 1, balance: U256::ZERO, bytecode_hash: None }))], + B256Map::default(), + ); + // Newest ancestor overwrites nonce to 2 + let newest_state = HashedPostStateSorted::new( + vec![(key, Some(Account { nonce: 2, balance: U256::ZERO, bytecode_hash: None }))], + B256Map::default(), + ); + + let oldest = ComputedTrieData { + hashed_state: Arc::new(oldest_state), + trie_updates: Arc::default(), + anchored_trie_input: None, + }; + let newest = ComputedTrieData { + hashed_state: Arc::new(newest_state), + trie_updates: Arc::default(), + anchored_trie_input: None, + }; + + // Pass ancestors oldest -> newest; newest should take precedence + let deferred = DeferredTrieData::pending( + Arc::new(HashedPostState::default()), + Arc::new(TrieUpdates::default()), + B256::ZERO, + vec![DeferredTrieData::ready(oldest), DeferredTrieData::ready(newest)], + ); + + let result = deferred.wait_cloned(); + let overlay_state = &result.anchored_trie_input.as_ref().unwrap().trie_input.state.accounts; + assert_eq!(overlay_state.len(), 1); + let (_, account) = &overlay_state[0]; + assert_eq!(account.unwrap().nonce, 2); + } +} diff --git a/crates/chain-state/src/in_memory.rs b/crates/chain-state/src/in_memory.rs index a6c8553810..133bdf4a69 100644 --- a/crates/chain-state/src/in_memory.rs +++ b/crates/chain-state/src/in_memory.rs @@ -2,7 +2,7 @@ use crate::{ CanonStateNotification, CanonStateNotificationSender, CanonStateNotifications, - ChainInfoTracker, MemoryOverlayStateProvider, + ChainInfoTracker, ComputedTrieData, DeferredTrieData, MemoryOverlayStateProvider, }; use alloy_consensus::{transaction::TransactionMeta, BlockHeader}; use alloy_eips::{BlockHashOrNumber, BlockNumHash}; @@ -17,8 +17,8 @@ use reth_primitives_traits::{ SignedTransaction, }; use reth_storage_api::StateProviderBox; -use reth_trie::{updates::TrieUpdates, HashedPostState}; -use std::{collections::BTreeMap, sync::Arc, time::Instant}; +use reth_trie::{updates::TrieUpdatesSorted, HashedPostStateSorted, TrieInputSorted}; +use std::{collections::BTreeMap, ops::Deref, sync::Arc, time::Instant}; use tokio::sync::{broadcast, watch}; /// Size of the broadcast channel used to notify canonical state events. @@ -565,7 +565,7 @@ impl CanonicalInMemoryState { /// State after applying the given block, this block is part of the canonical chain that partially /// stored in memory and can be traced back to a canonical block on disk. -#[derive(Debug, PartialEq, Eq, Clone)] +#[derive(Debug, Clone)] pub struct BlockState { /// The executed block that determines the state after this block has been executed. block: ExecutedBlock, @@ -573,6 +573,12 @@ pub struct BlockState { parent: Option>, } +impl PartialEq for BlockState { + fn eq(&self, other: &Self) -> bool { + self.block == other.block && self.parent == other.parent + } +} + impl BlockState { /// [`BlockState`] constructor. pub const fn new(block: ExecutedBlock) -> Self { @@ -628,6 +634,8 @@ impl BlockState { /// We assume that the `Receipts` in the executed block `ExecutionOutcome` /// has only one element corresponding to the executed block associated to /// the state. + /// + /// This clones the vector of receipts. To avoid it, use [`Self::executed_block_receipts_ref`]. pub fn executed_block_receipts(&self) -> Vec { let receipts = self.receipts(); @@ -640,22 +648,30 @@ impl BlockState { receipts.first().cloned().unwrap_or_default() } - /// Returns a vector of __parent__ `BlockStates`. + /// Returns a slice of `Receipt` of executed block that determines the state. + /// We assume that the `Receipts` in the executed block `ExecutionOutcome` + /// has only one element corresponding to the executed block associated to + /// the state. + pub fn executed_block_receipts_ref(&self) -> &[N::Receipt] { + let receipts = self.receipts(); + + debug_assert!( + receipts.len() <= 1, + "Expected at most one block's worth of receipts, found {}", + receipts.len() + ); + + receipts.first().map(|receipts| receipts.deref()).unwrap_or_default() + } + + /// Returns an iterator over __parent__ `BlockStates`. /// - /// The block state order in the output vector is newest to oldest (highest to lowest): + /// The block state order is newest to oldest (highest to lowest): /// `[5,4,3,2,1]` /// /// Note: This does not include self. - pub fn parent_state_chain(&self) -> Vec<&Self> { - let mut parents = Vec::new(); - let mut current = self.parent.as_deref(); - - while let Some(parent) = current { - parents.push(parent); - current = parent.parent.as_deref(); - } - - parents + pub fn parent_state_chain(&self) -> impl Iterator + '_ { + std::iter::successors(self.parent.as_deref(), |state| state.parent.as_deref()) } /// Returns a vector of `BlockStates` representing the entire in memory chain. @@ -666,6 +682,11 @@ impl BlockState { } /// Appends the parent chain of this [`BlockState`] to the given vector. + /// + /// Parents are appended in order from newest to oldest (highest to lowest). + /// This does not include self, only the parent states. + /// + /// This is a convenience method equivalent to `chain.extend(self.parent_state_chain())`. pub fn append_parent_chain<'a>(&'a self, chain: &mut Vec<&'a Self>) { chain.extend(self.parent_state_chain()); } @@ -719,16 +740,17 @@ impl BlockState { } /// Represents an executed block stored in-memory. -#[derive(Clone, Debug, PartialEq, Eq)] +#[derive(Clone, Debug)] pub struct ExecutedBlock { /// Recovered Block pub recovered_block: Arc>, /// Block's execution outcome. pub execution_output: Arc>, - /// Block's hashed state. - pub hashed_state: Arc, - /// Trie updates that result from calculating the state root for the block. - pub trie_updates: Arc, + /// Deferred trie data produced by execution. + /// + /// This allows deferring the computation of the trie data which can be expensive. + /// The data can be populated asynchronously after the block was validated. + pub trie_data: DeferredTrieData, } impl Default for ExecutedBlock { @@ -736,13 +758,54 @@ impl Default for ExecutedBlock { Self { recovered_block: Default::default(), execution_output: Default::default(), - hashed_state: Default::default(), - trie_updates: Default::default(), + trie_data: DeferredTrieData::ready(ComputedTrieData::default()), } } } +impl PartialEq for ExecutedBlock { + fn eq(&self, other: &Self) -> bool { + // Trie data is computed asynchronously and doesn't define block identity. + self.recovered_block == other.recovered_block && + self.execution_output == other.execution_output + } +} + impl ExecutedBlock { + /// Create a new [`ExecutedBlock`] with already-computed trie data. + /// + /// Use this constructor when trie data is available immediately (e.g., sequencers, + /// payload builders). This is the safe default path. + pub fn new( + recovered_block: Arc>, + execution_output: Arc>, + trie_data: ComputedTrieData, + ) -> Self { + Self { recovered_block, execution_output, trie_data: DeferredTrieData::ready(trie_data) } + } + + /// Create a new [`ExecutedBlock`] with deferred trie data. + /// + /// This is useful if the trie data is populated somewhere else, e.g. asynchronously + /// after the block was validated. + /// + /// The [`DeferredTrieData`] handle allows expensive trie operations (sorting hashed state, + /// sorting trie updates, and building the accumulated trie input overlay) to be performed + /// outside the critical validation path. This can improve latency for time-sensitive + /// operations like block validation. + /// + /// If the data hasn't been populated when [`Self::trie_data()`] is called, computation + /// occurs synchronously from stored inputs, so there is no blocking or deadlock risk. + /// + /// Use [`Self::new()`] instead when trie data is already computed and available immediately. + pub const fn with_deferred_trie_data( + recovered_block: Arc>, + execution_output: Arc>, + trie_data: DeferredTrieData, + ) -> Self { + Self { recovered_block, execution_output, trie_data } + } + /// Returns a reference to an inner [`SealedBlock`] #[inline] pub fn sealed_block(&self) -> &SealedBlock { @@ -761,16 +824,55 @@ impl ExecutedBlock { &self.execution_output } - /// Returns a reference to the hashed state result of the execution outcome + /// Returns the trie data, computing it synchronously if not already cached. + /// + /// Uses `OnceLock::get_or_init` internally: + /// - If already computed: returns cached result immediately + /// - If not computed: first caller computes, others wait for that result #[inline] - pub fn hashed_state(&self) -> &HashedPostState { - &self.hashed_state + #[tracing::instrument(level = "debug", target = "engine::tree", name = "trie_data", skip_all)] + pub fn trie_data(&self) -> ComputedTrieData { + self.trie_data.wait_cloned() } - /// Returns a reference to the trie updates resulting from the execution outcome + /// Returns a clone of the deferred trie data handle. + /// + /// A handle is a lightweight reference that can be passed to descendants without + /// forcing trie data to be computed immediately. The actual work runs when + /// `wait_cloned()` is called by a consumer (e.g. when merging overlays). #[inline] - pub fn trie_updates(&self) -> &TrieUpdates { - &self.trie_updates + pub fn trie_data_handle(&self) -> DeferredTrieData { + self.trie_data.clone() + } + + /// Returns the hashed state result of the execution outcome. + /// + /// May compute trie data synchronously if the deferred task hasn't completed. + #[inline] + pub fn hashed_state(&self) -> Arc { + self.trie_data().hashed_state + } + + /// Returns the trie updates resulting from the execution outcome. + /// + /// May compute trie data synchronously if the deferred task hasn't completed. + #[inline] + pub fn trie_updates(&self) -> Arc { + self.trie_data().trie_updates + } + + /// Returns the trie input anchored to the persisted ancestor. + /// + /// May compute trie data synchronously if the deferred task hasn't completed. + #[inline] + pub fn trie_input(&self) -> Option> { + self.trie_data().trie_input().cloned() + } + + /// Returns the anchor hash of the trie input, if present. + #[inline] + pub fn anchor_hash(&self) -> Option { + self.trie_data().anchor_hash() } /// Returns a [`BlockNumber`] of the block. @@ -875,8 +977,8 @@ mod tests { StateProofProvider, StateProvider, StateRootProvider, StorageRootProvider, }; use reth_trie::{ - AccountProof, HashedStorage, MultiProof, MultiProofTargets, StorageMultiProof, - StorageProof, TrieInput, + updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, + MultiProofTargets, StorageMultiProof, StorageProof, TrieInput, }; fn create_mock_state( @@ -1348,18 +1450,18 @@ mod tests { let mut test_block_builder: TestBlockBuilder = TestBlockBuilder::default(); let chain = create_mock_state_chain(&mut test_block_builder, 4); - let parents = chain[3].parent_state_chain(); + let parents: Vec<_> = chain[3].parent_state_chain().collect(); assert_eq!(parents.len(), 3); assert_eq!(parents[0].block().recovered_block().number, 3); assert_eq!(parents[1].block().recovered_block().number, 2); assert_eq!(parents[2].block().recovered_block().number, 1); - let parents = chain[2].parent_state_chain(); + let parents: Vec<_> = chain[2].parent_state_chain().collect(); assert_eq!(parents.len(), 2); assert_eq!(parents[0].block().recovered_block().number, 2); assert_eq!(parents[1].block().recovered_block().number, 1); - let parents = chain[0].parent_state_chain(); + let parents: Vec<_> = chain[0].parent_state_chain().collect(); assert_eq!(parents.len(), 0); } @@ -1371,7 +1473,7 @@ mod tests { create_mock_state(&mut test_block_builder, single_block_number, B256::random()); let single_block_hash = single_block.block().recovered_block().hash(); - let parents = single_block.parent_state_chain(); + let parents: Vec<_> = single_block.parent_state_chain().collect(); assert_eq!(parents.len(), 0); let block_state_chain = single_block.chain().collect::>(); diff --git a/crates/chain-state/src/lib.rs b/crates/chain-state/src/lib.rs index 091201f5fa..1d98cb43eb 100644 --- a/crates/chain-state/src/lib.rs +++ b/crates/chain-state/src/lib.rs @@ -11,6 +11,9 @@ mod in_memory; pub use in_memory::*; +mod deferred_trie; +pub use deferred_trie::*; + mod noop; mod chain_info; diff --git a/crates/chain-state/src/memory_overlay.rs b/crates/chain-state/src/memory_overlay.rs index 254edb248b..2074a68f37 100644 --- a/crates/chain-state/src/memory_overlay.rs +++ b/crates/chain-state/src/memory_overlay.rs @@ -5,14 +5,14 @@ use reth_errors::ProviderResult; use reth_primitives_traits::{Account, Bytecode, NodePrimitives}; use reth_storage_api::{ AccountReader, BlockHashReader, BytecodeReader, HashedPostStateProvider, StateProofProvider, - StateProvider, StateRootProvider, StorageRootProvider, + StateProvider, StateProviderBox, StateRootProvider, StorageRootProvider, }; use reth_trie::{ updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, MultiProofTargets, StorageMultiProof, TrieInput, }; use revm_database::BundleState; -use std::sync::OnceLock; +use std::{borrow::Cow, sync::OnceLock}; /// A state provider that stores references to in-memory blocks along with their state as well as a /// reference of the historical state provider for fallback lookups. @@ -24,15 +24,11 @@ pub struct MemoryOverlayStateProviderRef< /// Historical state provider for state lookups that are not found in memory blocks. pub(crate) historical: Box, /// The collection of executed parent blocks. Expected order is newest to oldest. - pub(crate) in_memory: Vec>, + pub(crate) in_memory: Cow<'a, [ExecutedBlock]>, /// Lazy-loaded in-memory trie data. pub(crate) trie_input: OnceLock, } -/// A state provider that stores references to in-memory blocks along with their state as well as -/// the historical state provider for fallback lookups. -pub type MemoryOverlayStateProvider = MemoryOverlayStateProviderRef<'static, N>; - impl<'a, N: NodePrimitives> MemoryOverlayStateProviderRef<'a, N> { /// Create new memory overlay state provider. /// @@ -42,7 +38,7 @@ impl<'a, N: NodePrimitives> MemoryOverlayStateProviderRef<'a, N> { /// - `historical` - a historical state provider for the latest ancestor block stored in the /// database. pub fn new(historical: Box, in_memory: Vec>) -> Self { - Self { historical, in_memory, trie_input: OnceLock::new() } + Self { historical, in_memory: Cow::Owned(in_memory), trie_input: OnceLock::new() } } /// Turn this state provider into a state provider @@ -53,11 +49,10 @@ impl<'a, N: NodePrimitives> MemoryOverlayStateProviderRef<'a, N> { /// Return lazy-loaded trie state aggregated from in-memory blocks. fn trie_input(&self) -> &TrieInput { self.trie_input.get_or_init(|| { - TrieInput::from_blocks( - self.in_memory - .iter() - .rev() - .map(|block| (block.hashed_state.as_ref(), block.trie_updates.as_ref())), + let bundles: Vec<_> = + self.in_memory.iter().rev().map(|block| block.trie_data()).collect(); + TrieInput::from_blocks_sorted( + bundles.iter().map(|data| (data.hashed_state.as_ref(), data.trie_updates.as_ref())), ) }) } @@ -72,7 +67,7 @@ impl<'a, N: NodePrimitives> MemoryOverlayStateProviderRef<'a, N> { impl BlockHashReader for MemoryOverlayStateProviderRef<'_, N> { fn block_hash(&self, number: BlockNumber) -> ProviderResult> { - for block in &self.in_memory { + for block in self.in_memory.iter() { if block.recovered_block().number() == number { return Ok(Some(block.recovered_block().hash())); } @@ -91,7 +86,7 @@ impl BlockHashReader for MemoryOverlayStateProviderRef<'_, N> let mut in_memory_hashes = Vec::with_capacity(range.size_hint().0); // iterate in ascending order (oldest to newest = low to high) - for block in &self.in_memory { + for block in self.in_memory.iter() { let block_num = block.recovered_block().number(); if range.contains(&block_num) { in_memory_hashes.push(block.recovered_block().hash()); @@ -113,7 +108,7 @@ impl BlockHashReader for MemoryOverlayStateProviderRef<'_, N> impl AccountReader for MemoryOverlayStateProviderRef<'_, N> { fn basic_account(&self, address: &Address) -> ProviderResult> { - for block in &self.in_memory { + for block in self.in_memory.iter() { if let Some(account) = block.execution_output.account(address) { return Ok(account); } @@ -217,7 +212,7 @@ impl StateProvider for MemoryOverlayStateProviderRef<'_, N> { address: Address, storage_key: StorageKey, ) -> ProviderResult> { - for block in &self.in_memory { + for block in self.in_memory.iter() { if let Some(value) = block.execution_output.storage(&address, storage_key.into()) { return Ok(Some(value)); } @@ -229,7 +224,7 @@ impl StateProvider for MemoryOverlayStateProviderRef<'_, N> { impl BytecodeReader for MemoryOverlayStateProviderRef<'_, N> { fn bytecode_by_hash(&self, code_hash: &B256) -> ProviderResult> { - for block in &self.in_memory { + for block in self.in_memory.iter() { if let Some(contract) = block.execution_output.bytecode(code_hash) { return Ok(Some(contract)); } @@ -238,3 +233,46 @@ impl BytecodeReader for MemoryOverlayStateProviderRef<'_, N> self.historical.bytecode_by_hash(code_hash) } } + +/// An owned state provider that stores references to in-memory blocks along with their state as +/// well as a reference of the historical state provider for fallback lookups. +#[expect(missing_debug_implementations)] +pub struct MemoryOverlayStateProvider { + /// Historical state provider for state lookups that are not found in memory blocks. + pub(crate) historical: StateProviderBox, + /// The collection of executed parent blocks. Expected order is newest to oldest. + pub(crate) in_memory: Vec>, + /// Lazy-loaded in-memory trie data. + pub(crate) trie_input: OnceLock, +} + +impl MemoryOverlayStateProvider { + /// Create new memory overlay state provider. + /// + /// ## Arguments + /// + /// - `in_memory` - the collection of executed ancestor blocks in reverse. + /// - `historical` - a historical state provider for the latest ancestor block stored in the + /// database. + pub fn new(historical: StateProviderBox, in_memory: Vec>) -> Self { + Self { historical, in_memory, trie_input: OnceLock::new() } + } + + /// Returns a new provider that takes the `TX` as reference + #[inline(always)] + fn as_ref(&self) -> MemoryOverlayStateProviderRef<'_, N> { + MemoryOverlayStateProviderRef { + historical: Box::new(self.historical.as_ref()), + in_memory: Cow::Borrowed(&self.in_memory), + trie_input: self.trie_input.clone(), + } + } + + /// Wraps the [`Self`] in a `Box`. + pub fn boxed(self) -> StateProviderBox { + Box::new(self) + } +} + +// Delegates all provider impls to [`MemoryOverlayStateProviderRef`] +reth_storage_api::macros::delegate_provider_impls!(MemoryOverlayStateProvider where [N: NodePrimitives]); diff --git a/crates/chain-state/src/test_utils.rs b/crates/chain-state/src/test_utils.rs index 5d318aca56..26ccccd017 100644 --- a/crates/chain-state/src/test_utils.rs +++ b/crates/chain-state/src/test_utils.rs @@ -1,6 +1,6 @@ use crate::{ in_memory::ExecutedBlock, CanonStateNotification, CanonStateNotifications, - CanonStateSubscriptions, + CanonStateSubscriptions, ComputedTrieData, }; use alloy_consensus::{Header, SignableTransaction, TxEip1559, TxReceipt, EMPTY_ROOT_HASH}; use alloy_eips::{ @@ -23,7 +23,7 @@ use reth_primitives_traits::{ SignedTransaction, }; use reth_storage_api::NodePrimitivesProvider; -use reth_trie::{root::state_root_unhashed, updates::TrieUpdates, HashedPostState}; +use reth_trie::root::state_root_unhashed; use revm_database::BundleState; use revm_state::AccountInfo; use std::{ @@ -92,7 +92,7 @@ impl TestBlockBuilder { &mut self, number: BlockNumber, parent_hash: B256, - ) -> RecoveredBlock { + ) -> SealedBlock { let mut rng = rand::rng(); let mock_tx = |nonce: u64| -> Recovered<_> { @@ -167,17 +167,14 @@ impl TestBlockBuilder { ..Default::default() }; - let block = SealedBlock::from_sealed_parts( + SealedBlock::from_sealed_parts( SealedHeader::seal_slow(header), BlockBody { transactions: transactions.into_iter().map(|tx| tx.into_inner()).collect(), ommers: Vec::new(), withdrawals: Some(vec![].into()), }, - ); - - RecoveredBlock::try_recover_sealed_with_senders(block, vec![self.signer; num_txs as usize]) - .unwrap() + ) } /// Creates a fork chain with the given base block. @@ -191,7 +188,9 @@ impl TestBlockBuilder { for _ in 0..length { let block = self.generate_random_block(parent.number + 1, parent.hash()); - parent = block.clone_sealed_block(); + parent = block.clone(); + let senders = vec![self.signer; block.body().transactions.len()]; + let block = block.with_senders(senders); fork.push(block); } @@ -205,20 +204,19 @@ impl TestBlockBuilder { receipts: Vec>, parent_hash: B256, ) -> ExecutedBlock { - let block_with_senders = self.generate_random_block(block_number, parent_hash); - - let (block, senders) = block_with_senders.split_sealed(); - ExecutedBlock { - recovered_block: Arc::new(RecoveredBlock::new_sealed(block, senders)), - execution_output: Arc::new(ExecutionOutcome::new( + let block = self.generate_random_block(block_number, parent_hash); + let senders = vec![self.signer; block.body().transactions.len()]; + let trie_data = ComputedTrieData::default(); + ExecutedBlock::new( + Arc::new(RecoveredBlock::new_sealed(block, senders)), + Arc::new(ExecutionOutcome::new( BundleState::default(), receipts, block_number, vec![Requests::default()], )), - hashed_state: Arc::new(HashedPostState::default()), - trie_updates: Arc::new(TrieUpdates::default()), - } + trie_data, + ) } /// Generates an [`ExecutedBlock`] that includes the given receipts. diff --git a/crates/chainspec/src/lib.rs b/crates/chainspec/src/lib.rs index 96db768a1c..2ba17ebf2a 100644 --- a/crates/chainspec/src/lib.rs +++ b/crates/chainspec/src/lib.rs @@ -30,8 +30,9 @@ pub use info::ChainInfo; #[cfg(any(test, feature = "test-utils"))] pub use spec::test_fork_ids; pub use spec::{ - make_genesis_header, BaseFeeParams, BaseFeeParamsKind, ChainSpec, ChainSpecBuilder, - ChainSpecProvider, DepositContract, ForkBaseFeeParams, DEV, HOLESKY, HOODI, MAINNET, SEPOLIA, + blob_params_to_schedule, create_chain_config, mainnet_chain_config, make_genesis_header, + BaseFeeParams, BaseFeeParamsKind, ChainSpec, ChainSpecBuilder, ChainSpecProvider, + DepositContract, ForkBaseFeeParams, DEV, HOLESKY, HOODI, MAINNET, SEPOLIA, }; use reth_primitives_traits::sync::OnceLock; diff --git a/crates/chainspec/src/spec.rs b/crates/chainspec/src/spec.rs index e8d16886aa..65144a9571 100644 --- a/crates/chainspec/src/spec.rs +++ b/crates/chainspec/src/spec.rs @@ -4,13 +4,20 @@ use alloy_evm::eth::spec::EthExecutorSpec; use crate::{ constants::{MAINNET_DEPOSIT_CONTRACT, MAINNET_PRUNE_DELETE_LIMIT}, ethereum::SEPOLIA_PARIS_TTD, - holesky, hoodi, + holesky, hoodi, mainnet, mainnet::{MAINNET_PARIS_BLOCK, MAINNET_PARIS_TTD}, sepolia, sepolia::SEPOLIA_PARIS_BLOCK, EthChainSpec, }; -use alloc::{boxed::Box, sync::Arc, vec::Vec}; +use alloc::{ + boxed::Box, + collections::BTreeMap, + format, + string::{String, ToString}, + sync::Arc, + vec::Vec, +}; use alloy_chains::{Chain, NamedChain}; use alloy_consensus::{ constants::{ @@ -23,7 +30,7 @@ use alloy_eips::{ eip1559::INITIAL_BASE_FEE, eip7685::EMPTY_REQUESTS_HASH, eip7840::BlobParams, eip7892::BlobScheduleBlobParams, }; -use alloy_genesis::Genesis; +use alloy_genesis::{ChainConfig, Genesis}; use alloy_primitives::{address, b256, Address, BlockNumber, B256, U256}; use alloy_trie::root::state_root_ref_unhashed; use core::fmt::Debug; @@ -73,6 +80,8 @@ pub fn make_genesis_header(genesis: &Genesis, hardforks: &ChainHardforks) -> Hea .then_some(EMPTY_REQUESTS_HASH); Header { + number: genesis.number.unwrap_or_default(), + parent_hash: genesis.parent_hash.unwrap_or_default(), gas_limit: genesis.gas_limit, difficulty: genesis.difficulty, nonce: genesis.nonce.into(), @@ -113,7 +122,10 @@ pub static MAINNET: LazyLock> = LazyLock::new(|| { deposit_contract: Some(MAINNET_DEPOSIT_CONTRACT), base_fee_params: BaseFeeParamsKind::Constant(BaseFeeParams::ethereum()), prune_delete_limit: MAINNET_PRUNE_DELETE_LIMIT, - blob_params: BlobScheduleBlobParams::default(), + blob_params: BlobScheduleBlobParams::default().with_scheduled([ + (mainnet::MAINNET_BPO1_TIMESTAMP, BlobParams::bpo1()), + (mainnet::MAINNET_BPO2_TIMESTAMP, BlobParams::bpo2()), + ]), }; spec.genesis.config.dao_fork_support = true; spec.into() @@ -237,6 +249,111 @@ pub static DEV: LazyLock> = LazyLock::new(|| { .into() }); +/// Creates a [`ChainConfig`] from the given chain, hardforks, deposit contract address, and blob +/// schedule. +pub fn create_chain_config( + chain: Option, + hardforks: &ChainHardforks, + deposit_contract_address: Option
, + blob_schedule: BTreeMap, +) -> ChainConfig { + // Helper to extract block number from a hardfork condition + let block_num = |fork: EthereumHardfork| hardforks.fork(fork).block_number(); + + // Helper to extract timestamp from a hardfork condition + let timestamp = |fork: EthereumHardfork| -> Option { + match hardforks.fork(fork) { + ForkCondition::Timestamp(t) => Some(t), + _ => None, + } + }; + + // Extract TTD from Paris fork + let (terminal_total_difficulty, terminal_total_difficulty_passed) = + match hardforks.fork(EthereumHardfork::Paris) { + ForkCondition::TTD { total_difficulty, .. } => (Some(total_difficulty), true), + _ => (None, false), + }; + + // Check if DAO fork is supported (it has an activation block) + let dao_fork_support = hardforks.fork(EthereumHardfork::Dao) != ForkCondition::Never; + + ChainConfig { + chain_id: chain.map(|c| c.id()).unwrap_or(0), + homestead_block: block_num(EthereumHardfork::Homestead), + dao_fork_block: block_num(EthereumHardfork::Dao), + dao_fork_support, + eip150_block: block_num(EthereumHardfork::Tangerine), + eip155_block: block_num(EthereumHardfork::SpuriousDragon), + eip158_block: block_num(EthereumHardfork::SpuriousDragon), + byzantium_block: block_num(EthereumHardfork::Byzantium), + constantinople_block: block_num(EthereumHardfork::Constantinople), + petersburg_block: block_num(EthereumHardfork::Petersburg), + istanbul_block: block_num(EthereumHardfork::Istanbul), + muir_glacier_block: block_num(EthereumHardfork::MuirGlacier), + berlin_block: block_num(EthereumHardfork::Berlin), + london_block: block_num(EthereumHardfork::London), + arrow_glacier_block: block_num(EthereumHardfork::ArrowGlacier), + gray_glacier_block: block_num(EthereumHardfork::GrayGlacier), + merge_netsplit_block: None, + shanghai_time: timestamp(EthereumHardfork::Shanghai), + cancun_time: timestamp(EthereumHardfork::Cancun), + prague_time: timestamp(EthereumHardfork::Prague), + osaka_time: timestamp(EthereumHardfork::Osaka), + bpo1_time: timestamp(EthereumHardfork::Bpo1), + bpo2_time: timestamp(EthereumHardfork::Bpo2), + bpo3_time: timestamp(EthereumHardfork::Bpo3), + bpo4_time: timestamp(EthereumHardfork::Bpo4), + bpo5_time: timestamp(EthereumHardfork::Bpo5), + terminal_total_difficulty, + terminal_total_difficulty_passed, + ethash: None, + clique: None, + parlia: None, + extra_fields: Default::default(), + deposit_contract_address, + blob_schedule, + } +} + +/// Returns a [`ChainConfig`] for the current Ethereum mainnet chain. +pub fn mainnet_chain_config() -> ChainConfig { + let hardforks: ChainHardforks = EthereumHardfork::mainnet().into(); + let blob_schedule = blob_params_to_schedule(&MAINNET.blob_params, &hardforks); + create_chain_config( + Some(Chain::mainnet()), + &hardforks, + Some(MAINNET_DEPOSIT_CONTRACT.address), + blob_schedule, + ) +} + +/// Converts the given [`BlobScheduleBlobParams`] into blobs schedule. +pub fn blob_params_to_schedule( + params: &BlobScheduleBlobParams, + hardforks: &ChainHardforks, +) -> BTreeMap { + let mut schedule = BTreeMap::new(); + schedule.insert("cancun".to_string(), params.cancun); + schedule.insert("prague".to_string(), params.prague); + schedule.insert("osaka".to_string(), params.osaka); + + // Map scheduled entries back to bpo fork names by matching timestamps + let bpo_forks = EthereumHardfork::bpo_variants(); + for (timestamp, blob_params) in ¶ms.scheduled { + for bpo_fork in bpo_forks { + if let ForkCondition::Timestamp(fork_ts) = hardforks.fork(bpo_fork) && + fork_ts == *timestamp + { + schedule.insert(bpo_fork.name().to_lowercase(), *blob_params); + break; + } + } + } + + schedule +} + /// A wrapper around [`BaseFeeParams`] that allows for specifying constant or dynamic EIP-1559 /// parameters based on the active [Hardfork]. #[derive(Clone, Debug, PartialEq, Eq)] @@ -440,7 +557,26 @@ impl ChainSpec { /// Returns the hardfork display helper. pub fn display_hardforks(&self) -> DisplayHardforks { - DisplayHardforks::new(self.hardforks.forks_iter()) + // Create an iterator with hardfork, condition, and optional blob metadata + let hardforks_with_meta = self.hardforks.forks_iter().map(|(fork, condition)| { + // Generate blob metadata for timestamp-based hardforks that have blob params + let metadata = match condition { + ForkCondition::Timestamp(timestamp) => { + // Try to get blob params for this timestamp + // This automatically handles all hardforks with blob support + EthChainSpec::blob_params_at_timestamp(self, timestamp).map(|params| { + format!( + "blob: (target: {}, max: {}, fraction: {})", + params.target_blob_count, params.max_blob_count, params.update_fraction + ) + }) + } + _ => None, + }; + (fork, condition, metadata) + }); + + DisplayHardforks::with_meta(hardforks_with_meta) } /// Get the fork id for the given hardfork. @@ -492,8 +628,15 @@ impl ChainSpec { /// Compute the [`ForkId`] for the given [`Head`] following eip-6122 spec. /// - /// Note: In case there are multiple hardforks activated at the same block or timestamp, only - /// the first gets applied. + /// The fork hash is computed by starting from the genesis hash and iteratively adding + /// block numbers (for block-based forks) or timestamps (for timestamp-based forks) of + /// active forks. The `next` field indicates the next fork activation point, or `0` if + /// all forks are active. + /// + /// Block-based forks are processed first, then timestamp-based forks. Multiple hardforks + /// activated at the same block or timestamp: only the first one is applied. + /// + /// See: pub fn fork_id(&self, head: &Head) -> ForkId { let mut forkhash = ForkHash::from(self.genesis_hash()); @@ -550,6 +693,10 @@ impl ChainSpec { } /// An internal helper function that returns a head block that satisfies a given Fork condition. + /// + /// Creates a [`Head`] representation for a fork activation point, used by [`Self::fork_id`] to + /// compute fork IDs. For timestamp-based forks, includes the last block-based fork number + /// before the merge (if any). pub(crate) fn satisfy(&self, cond: ForkCondition) -> Head { match cond { ForkCondition::Block(number) => Head { number, ..Default::default() }, @@ -823,7 +970,7 @@ impl EthereumHardforks for ChainSpec { /// A trait for reading the current chainspec. #[auto_impl::auto_impl(&, Arc)] -pub trait ChainSpecProvider: Debug + Send + Sync { +pub trait ChainSpecProvider: Debug + Send { /// The chain spec type. type ChainSpec: EthChainSpec + 'static; @@ -883,7 +1030,7 @@ impl ChainSpecBuilder { /// Remove the given fork from the spec. pub fn without_fork(mut self, fork: H) -> Self { - self.hardforks.remove(fork); + self.hardforks.remove(&fork); self } @@ -903,9 +1050,16 @@ impl ChainSpecBuilder { self } + /// Enable Dao at genesis. + pub fn dao_activated(mut self) -> Self { + self = self.frontier_activated(); + self.hardforks.insert(EthereumHardfork::Dao, ForkCondition::Block(0)); + self + } + /// Enable Homestead at genesis. pub fn homestead_activated(mut self) -> Self { - self = self.frontier_activated(); + self = self.dao_activated(); self.hardforks.insert(EthereumHardfork::Homestead, ForkCondition::Block(0)); self } @@ -952,9 +1106,16 @@ impl ChainSpecBuilder { self } + /// Enable Muir Glacier at genesis. + pub fn muirglacier_activated(mut self) -> Self { + self = self.istanbul_activated(); + self.hardforks.insert(EthereumHardfork::MuirGlacier, ForkCondition::Block(0)); + self + } + /// Enable Berlin at genesis. pub fn berlin_activated(mut self) -> Self { - self = self.istanbul_activated(); + self = self.muirglacier_activated(); self.hardforks.insert(EthereumHardfork::Berlin, ForkCondition::Block(0)); self } @@ -966,9 +1127,23 @@ impl ChainSpecBuilder { self } + /// Enable Arrow Glacier at genesis. + pub fn arrowglacier_activated(mut self) -> Self { + self = self.london_activated(); + self.hardforks.insert(EthereumHardfork::ArrowGlacier, ForkCondition::Block(0)); + self + } + + /// Enable Gray Glacier at genesis. + pub fn grayglacier_activated(mut self) -> Self { + self = self.arrowglacier_activated(); + self.hardforks.insert(EthereumHardfork::GrayGlacier, ForkCondition::Block(0)); + self + } + /// Enable Paris at genesis. pub fn paris_activated(mut self) -> Self { - self = self.london_activated(); + self = self.grayglacier_activated(); self.hardforks.insert( EthereumHardfork::Paris, ForkCondition::TTD { @@ -1157,8 +1332,11 @@ Merge hard forks: - Paris @58750000000000000000000 (network is known to be merged) Post-merge hard forks (timestamp based): - Shanghai @1681338455 -- Cancun @1710338135 -- Prague @1746612311" +- Cancun @1710338135 blob: (target: 3, max: 6, fraction: 3338477) +- Prague @1746612311 blob: (target: 6, max: 9, fraction: 5007716) +- Osaka @1764798551 blob: (target: 6, max: 9, fraction: 5007716) +- Bpo1 @1765290071 blob: (target: 10, max: 15, fraction: 8346193) +- Bpo2 @1767747671 blob: (target: 14, max: 21, fraction: 11684671)" ); } @@ -1338,71 +1516,74 @@ Post-merge hard forks (timestamp based): &[ ( EthereumHardfork::Frontier, - ForkId { hash: ForkHash([0xfc, 0x64, 0xec, 0x04]), next: 1150000 }, + ForkId { hash: ForkHash(hex!("0xfc64ec04")), next: 1150000 }, ), ( EthereumHardfork::Homestead, - ForkId { hash: ForkHash([0x97, 0xc2, 0xc3, 0x4c]), next: 1920000 }, + ForkId { hash: ForkHash(hex!("0x97c2c34c")), next: 1920000 }, ), ( EthereumHardfork::Dao, - ForkId { hash: ForkHash([0x91, 0xd1, 0xf9, 0x48]), next: 2463000 }, + ForkId { hash: ForkHash(hex!("0x91d1f948")), next: 2463000 }, ), ( EthereumHardfork::Tangerine, - ForkId { hash: ForkHash([0x7a, 0x64, 0xda, 0x13]), next: 2675000 }, + ForkId { hash: ForkHash(hex!("0x7a64da13")), next: 2675000 }, ), ( EthereumHardfork::SpuriousDragon, - ForkId { hash: ForkHash([0x3e, 0xdd, 0x5b, 0x10]), next: 4370000 }, + ForkId { hash: ForkHash(hex!("0x3edd5b10")), next: 4370000 }, ), ( EthereumHardfork::Byzantium, - ForkId { hash: ForkHash([0xa0, 0x0b, 0xc3, 0x24]), next: 7280000 }, + ForkId { hash: ForkHash(hex!("0xa00bc324")), next: 7280000 }, ), ( EthereumHardfork::Constantinople, - ForkId { hash: ForkHash([0x66, 0x8d, 0xb0, 0xaf]), next: 9069000 }, + ForkId { hash: ForkHash(hex!("0x668db0af")), next: 9069000 }, ), ( EthereumHardfork::Petersburg, - ForkId { hash: ForkHash([0x66, 0x8d, 0xb0, 0xaf]), next: 9069000 }, + ForkId { hash: ForkHash(hex!("0x668db0af")), next: 9069000 }, ), ( EthereumHardfork::Istanbul, - ForkId { hash: ForkHash([0x87, 0x9d, 0x6e, 0x30]), next: 9200000 }, + ForkId { hash: ForkHash(hex!("0x879d6e30")), next: 9200000 }, ), ( EthereumHardfork::MuirGlacier, - ForkId { hash: ForkHash([0xe0, 0x29, 0xe9, 0x91]), next: 12244000 }, + ForkId { hash: ForkHash(hex!("0xe029e991")), next: 12244000 }, ), ( EthereumHardfork::Berlin, - ForkId { hash: ForkHash([0x0e, 0xb4, 0x40, 0xf6]), next: 12965000 }, + ForkId { hash: ForkHash(hex!("0x0eb440f6")), next: 12965000 }, ), ( EthereumHardfork::London, - ForkId { hash: ForkHash([0xb7, 0x15, 0x07, 0x7d]), next: 13773000 }, + ForkId { hash: ForkHash(hex!("0xb715077d")), next: 13773000 }, ), ( EthereumHardfork::ArrowGlacier, - ForkId { hash: ForkHash([0x20, 0xc3, 0x27, 0xfc]), next: 15050000 }, + ForkId { hash: ForkHash(hex!("0x20c327fc")), next: 15050000 }, ), ( EthereumHardfork::GrayGlacier, - ForkId { hash: ForkHash([0xf0, 0xaf, 0xd0, 0xe3]), next: 1681338455 }, + ForkId { hash: ForkHash(hex!("0xf0afd0e3")), next: 1681338455 }, ), ( EthereumHardfork::Shanghai, - ForkId { hash: ForkHash([0xdc, 0xe9, 0x6c, 0x2d]), next: 1710338135 }, + ForkId { hash: ForkHash(hex!("0xdce96c2d")), next: 1710338135 }, ), ( EthereumHardfork::Cancun, - ForkId { hash: ForkHash([0x9f, 0x3d, 0x22, 0x54]), next: 1746612311 }, + ForkId { hash: ForkHash(hex!("0x9f3d2254")), next: 1746612311 }, ), ( EthereumHardfork::Prague, - ForkId { hash: ForkHash([0xc3, 0x76, 0xcf, 0x8b]), next: 0 }, + ForkId { + hash: ForkHash(hex!("0xc376cf8b")), + next: mainnet::MAINNET_OSAKA_TIMESTAMP, + }, ), ], ); @@ -1415,60 +1596,60 @@ Post-merge hard forks (timestamp based): &[ ( EthereumHardfork::Frontier, - ForkId { hash: ForkHash([0xfe, 0x33, 0x66, 0xe7]), next: 1735371 }, + ForkId { hash: ForkHash(hex!("0xfe3366e7")), next: 1735371 }, ), ( EthereumHardfork::Homestead, - ForkId { hash: ForkHash([0xfe, 0x33, 0x66, 0xe7]), next: 1735371 }, + ForkId { hash: ForkHash(hex!("0xfe3366e7")), next: 1735371 }, ), ( EthereumHardfork::Tangerine, - ForkId { hash: ForkHash([0xfe, 0x33, 0x66, 0xe7]), next: 1735371 }, + ForkId { hash: ForkHash(hex!("0xfe3366e7")), next: 1735371 }, ), ( EthereumHardfork::SpuriousDragon, - ForkId { hash: ForkHash([0xfe, 0x33, 0x66, 0xe7]), next: 1735371 }, + ForkId { hash: ForkHash(hex!("0xfe3366e7")), next: 1735371 }, ), ( EthereumHardfork::Byzantium, - ForkId { hash: ForkHash([0xfe, 0x33, 0x66, 0xe7]), next: 1735371 }, + ForkId { hash: ForkHash(hex!("0xfe3366e7")), next: 1735371 }, ), ( EthereumHardfork::Constantinople, - ForkId { hash: ForkHash([0xfe, 0x33, 0x66, 0xe7]), next: 1735371 }, + ForkId { hash: ForkHash(hex!("0xfe3366e7")), next: 1735371 }, ), ( EthereumHardfork::Petersburg, - ForkId { hash: ForkHash([0xfe, 0x33, 0x66, 0xe7]), next: 1735371 }, + ForkId { hash: ForkHash(hex!("0xfe3366e7")), next: 1735371 }, ), ( EthereumHardfork::Istanbul, - ForkId { hash: ForkHash([0xfe, 0x33, 0x66, 0xe7]), next: 1735371 }, + ForkId { hash: ForkHash(hex!("0xfe3366e7")), next: 1735371 }, ), ( EthereumHardfork::Berlin, - ForkId { hash: ForkHash([0xfe, 0x33, 0x66, 0xe7]), next: 1735371 }, + ForkId { hash: ForkHash(hex!("0xfe3366e7")), next: 1735371 }, ), ( EthereumHardfork::London, - ForkId { hash: ForkHash([0xfe, 0x33, 0x66, 0xe7]), next: 1735371 }, + ForkId { hash: ForkHash(hex!("0xfe3366e7")), next: 1735371 }, ), ( EthereumHardfork::Paris, - ForkId { hash: ForkHash([0xb9, 0x6c, 0xbd, 0x13]), next: 1677557088 }, + ForkId { hash: ForkHash(hex!("0xb96cbd13")), next: 1677557088 }, ), ( EthereumHardfork::Shanghai, - ForkId { hash: ForkHash([0xf7, 0xf9, 0xbc, 0x08]), next: 1706655072 }, + ForkId { hash: ForkHash(hex!("0xf7f9bc08")), next: 1706655072 }, ), ( EthereumHardfork::Cancun, - ForkId { hash: ForkHash([0x88, 0xcf, 0x81, 0xd9]), next: 1741159776 }, + ForkId { hash: ForkHash(hex!("0x88cf81d9")), next: 1741159776 }, ), ( EthereumHardfork::Prague, ForkId { - hash: ForkHash([0xed, 0x88, 0xb5, 0xfd]), + hash: ForkHash(hex!("0xed88b5fd")), next: sepolia::SEPOLIA_OSAKA_TIMESTAMP, }, ), @@ -1483,75 +1664,85 @@ Post-merge hard forks (timestamp based): &[ ( Head { number: 0, ..Default::default() }, - ForkId { hash: ForkHash([0xfc, 0x64, 0xec, 0x04]), next: 1150000 }, + ForkId { hash: ForkHash(hex!("0xfc64ec04")), next: 1150000 }, ), ( Head { number: 1150000, ..Default::default() }, - ForkId { hash: ForkHash([0x97, 0xc2, 0xc3, 0x4c]), next: 1920000 }, + ForkId { hash: ForkHash(hex!("0x97c2c34c")), next: 1920000 }, ), ( Head { number: 1920000, ..Default::default() }, - ForkId { hash: ForkHash([0x91, 0xd1, 0xf9, 0x48]), next: 2463000 }, + ForkId { hash: ForkHash(hex!("0x91d1f948")), next: 2463000 }, ), ( Head { number: 2463000, ..Default::default() }, - ForkId { hash: ForkHash([0x7a, 0x64, 0xda, 0x13]), next: 2675000 }, + ForkId { hash: ForkHash(hex!("0x7a64da13")), next: 2675000 }, ), ( Head { number: 2675000, ..Default::default() }, - ForkId { hash: ForkHash([0x3e, 0xdd, 0x5b, 0x10]), next: 4370000 }, + ForkId { hash: ForkHash(hex!("0x3edd5b10")), next: 4370000 }, ), ( Head { number: 4370000, ..Default::default() }, - ForkId { hash: ForkHash([0xa0, 0x0b, 0xc3, 0x24]), next: 7280000 }, + ForkId { hash: ForkHash(hex!("0xa00bc324")), next: 7280000 }, ), ( Head { number: 7280000, ..Default::default() }, - ForkId { hash: ForkHash([0x66, 0x8d, 0xb0, 0xaf]), next: 9069000 }, + ForkId { hash: ForkHash(hex!("0x668db0af")), next: 9069000 }, ), ( Head { number: 9069000, ..Default::default() }, - ForkId { hash: ForkHash([0x87, 0x9d, 0x6e, 0x30]), next: 9200000 }, + ForkId { hash: ForkHash(hex!("0x879d6e30")), next: 9200000 }, ), ( Head { number: 9200000, ..Default::default() }, - ForkId { hash: ForkHash([0xe0, 0x29, 0xe9, 0x91]), next: 12244000 }, + ForkId { hash: ForkHash(hex!("0xe029e991")), next: 12244000 }, ), ( Head { number: 12244000, ..Default::default() }, - ForkId { hash: ForkHash([0x0e, 0xb4, 0x40, 0xf6]), next: 12965000 }, + ForkId { hash: ForkHash(hex!("0x0eb440f6")), next: 12965000 }, ), ( Head { number: 12965000, ..Default::default() }, - ForkId { hash: ForkHash([0xb7, 0x15, 0x07, 0x7d]), next: 13773000 }, + ForkId { hash: ForkHash(hex!("0xb715077d")), next: 13773000 }, ), ( Head { number: 13773000, ..Default::default() }, - ForkId { hash: ForkHash([0x20, 0xc3, 0x27, 0xfc]), next: 15050000 }, + ForkId { hash: ForkHash(hex!("0x20c327fc")), next: 15050000 }, ), ( Head { number: 15050000, ..Default::default() }, - ForkId { hash: ForkHash([0xf0, 0xaf, 0xd0, 0xe3]), next: 1681338455 }, + ForkId { hash: ForkHash(hex!("0xf0afd0e3")), next: 1681338455 }, ), // First Shanghai block ( Head { number: 20000000, timestamp: 1681338455, ..Default::default() }, - ForkId { hash: ForkHash([0xdc, 0xe9, 0x6c, 0x2d]), next: 1710338135 }, + ForkId { hash: ForkHash(hex!("0xdce96c2d")), next: 1710338135 }, ), // First Cancun block ( Head { number: 20000001, timestamp: 1710338135, ..Default::default() }, - ForkId { hash: ForkHash([0x9f, 0x3d, 0x22, 0x54]), next: 1746612311 }, + ForkId { hash: ForkHash(hex!("0x9f3d2254")), next: 1746612311 }, ), // First Prague block ( - Head { number: 20000002, timestamp: 1746612311, ..Default::default() }, - ForkId { hash: ForkHash([0xc3, 0x76, 0xcf, 0x8b]), next: 0 }, + Head { number: 20000004, timestamp: 1746612311, ..Default::default() }, + ForkId { + hash: ForkHash(hex!("0xc376cf8b")), + next: mainnet::MAINNET_OSAKA_TIMESTAMP, + }, ), - // Future Prague block + // Osaka block ( - Head { number: 20000002, timestamp: 2000000000, ..Default::default() }, - ForkId { hash: ForkHash([0xc3, 0x76, 0xcf, 0x8b]), next: 0 }, + Head { + number: 20000004, + timestamp: mainnet::MAINNET_OSAKA_TIMESTAMP, + ..Default::default() + }, + ForkId { + hash: ForkHash(hex!("0x5167e2a6")), + next: mainnet::MAINNET_BPO1_TIMESTAMP, + }, ), ], ); @@ -1564,13 +1755,13 @@ Post-merge hard forks (timestamp based): &[ ( Head { number: 0, ..Default::default() }, - ForkId { hash: ForkHash([0xbe, 0xf7, 0x1d, 0x30]), next: 1742999832 }, + ForkId { hash: ForkHash(hex!("0xbef71d30")), next: 1742999832 }, ), // First Prague block ( Head { number: 0, timestamp: 1742999833, ..Default::default() }, ForkId { - hash: ForkHash([0x09, 0x29, 0xe2, 0x4e]), + hash: ForkHash(hex!("0x0929e24e")), next: hoodi::HOODI_OSAKA_TIMESTAMP, }, ), @@ -1597,43 +1788,43 @@ Post-merge hard forks (timestamp based): &[ ( Head { number: 0, ..Default::default() }, - ForkId { hash: ForkHash([0xc6, 0x1a, 0x60, 0x98]), next: 1696000704 }, + ForkId { hash: ForkHash(hex!("0xc61a6098")), next: 1696000704 }, ), // First MergeNetsplit block ( Head { number: 123, ..Default::default() }, - ForkId { hash: ForkHash([0xc6, 0x1a, 0x60, 0x98]), next: 1696000704 }, + ForkId { hash: ForkHash(hex!("0xc61a6098")), next: 1696000704 }, ), // Last MergeNetsplit block ( Head { number: 123, timestamp: 1696000703, ..Default::default() }, - ForkId { hash: ForkHash([0xc6, 0x1a, 0x60, 0x98]), next: 1696000704 }, + ForkId { hash: ForkHash(hex!("0xc61a6098")), next: 1696000704 }, ), // First Shanghai block ( Head { number: 123, timestamp: 1696000704, ..Default::default() }, - ForkId { hash: ForkHash([0xfd, 0x4f, 0x01, 0x6b]), next: 1707305664 }, + ForkId { hash: ForkHash(hex!("0xfd4f016b")), next: 1707305664 }, ), // Last Shanghai block ( Head { number: 123, timestamp: 1707305663, ..Default::default() }, - ForkId { hash: ForkHash([0xfd, 0x4f, 0x01, 0x6b]), next: 1707305664 }, + ForkId { hash: ForkHash(hex!("0xfd4f016b")), next: 1707305664 }, ), // First Cancun block ( Head { number: 123, timestamp: 1707305664, ..Default::default() }, - ForkId { hash: ForkHash([0x9b, 0x19, 0x2a, 0xd0]), next: 1740434112 }, + ForkId { hash: ForkHash(hex!("0x9b192ad0")), next: 1740434112 }, ), // Last Cancun block ( Head { number: 123, timestamp: 1740434111, ..Default::default() }, - ForkId { hash: ForkHash([0x9b, 0x19, 0x2a, 0xd0]), next: 1740434112 }, + ForkId { hash: ForkHash(hex!("0x9b192ad0")), next: 1740434112 }, ), // First Prague block ( Head { number: 123, timestamp: 1740434112, ..Default::default() }, ForkId { - hash: ForkHash([0xdf, 0xbd, 0x9b, 0xed]), + hash: ForkHash(hex!("0xdfbd9bed")), next: holesky::HOLESKY_OSAKA_TIMESTAMP, }, ), @@ -1660,45 +1851,45 @@ Post-merge hard forks (timestamp based): &[ ( Head { number: 0, ..Default::default() }, - ForkId { hash: ForkHash([0xfe, 0x33, 0x66, 0xe7]), next: 1735371 }, + ForkId { hash: ForkHash(hex!("0xfe3366e7")), next: 1735371 }, ), ( Head { number: 1735370, ..Default::default() }, - ForkId { hash: ForkHash([0xfe, 0x33, 0x66, 0xe7]), next: 1735371 }, + ForkId { hash: ForkHash(hex!("0xfe3366e7")), next: 1735371 }, ), ( Head { number: 1735371, ..Default::default() }, - ForkId { hash: ForkHash([0xb9, 0x6c, 0xbd, 0x13]), next: 1677557088 }, + ForkId { hash: ForkHash(hex!("0xb96cbd13")), next: 1677557088 }, ), ( Head { number: 1735372, timestamp: 1677557087, ..Default::default() }, - ForkId { hash: ForkHash([0xb9, 0x6c, 0xbd, 0x13]), next: 1677557088 }, + ForkId { hash: ForkHash(hex!("0xb96cbd13")), next: 1677557088 }, ), // First Shanghai block ( Head { number: 1735373, timestamp: 1677557088, ..Default::default() }, - ForkId { hash: ForkHash([0xf7, 0xf9, 0xbc, 0x08]), next: 1706655072 }, + ForkId { hash: ForkHash(hex!("0xf7f9bc08")), next: 1706655072 }, ), // Last Shanghai block ( Head { number: 1735374, timestamp: 1706655071, ..Default::default() }, - ForkId { hash: ForkHash([0xf7, 0xf9, 0xbc, 0x08]), next: 1706655072 }, + ForkId { hash: ForkHash(hex!("0xf7f9bc08")), next: 1706655072 }, ), // First Cancun block ( Head { number: 1735375, timestamp: 1706655072, ..Default::default() }, - ForkId { hash: ForkHash([0x88, 0xcf, 0x81, 0xd9]), next: 1741159776 }, + ForkId { hash: ForkHash(hex!("0x88cf81d9")), next: 1741159776 }, ), // Last Cancun block ( Head { number: 1735376, timestamp: 1741159775, ..Default::default() }, - ForkId { hash: ForkHash([0x88, 0xcf, 0x81, 0xd9]), next: 1741159776 }, + ForkId { hash: ForkHash(hex!("0x88cf81d9")), next: 1741159776 }, ), // First Prague block ( Head { number: 1735377, timestamp: 1741159776, ..Default::default() }, ForkId { - hash: ForkHash([0xed, 0x88, 0xb5, 0xfd]), + hash: ForkHash(hex!("0xed88b5fd")), next: sepolia::SEPOLIA_OSAKA_TIMESTAMP, }, ), @@ -1724,7 +1915,7 @@ Post-merge hard forks (timestamp based): &DEV, &[( Head { number: 0, ..Default::default() }, - ForkId { hash: ForkHash([0x0b, 0x1a, 0x4e, 0xf7]), next: 0 }, + ForkId { hash: ForkHash(hex!("0x0b1a4ef7")), next: 0 }, )], ) } @@ -1740,131 +1931,142 @@ Post-merge hard forks (timestamp based): &[ ( Head { number: 0, timestamp: 0, ..Default::default() }, - ForkId { hash: ForkHash([0xfc, 0x64, 0xec, 0x04]), next: 1150000 }, + ForkId { hash: ForkHash(hex!("0xfc64ec04")), next: 1150000 }, ), // Unsynced ( Head { number: 1149999, timestamp: 0, ..Default::default() }, - ForkId { hash: ForkHash([0xfc, 0x64, 0xec, 0x04]), next: 1150000 }, + ForkId { hash: ForkHash(hex!("0xfc64ec04")), next: 1150000 }, ), // Last Frontier block ( Head { number: 1150000, timestamp: 0, ..Default::default() }, - ForkId { hash: ForkHash([0x97, 0xc2, 0xc3, 0x4c]), next: 1920000 }, + ForkId { hash: ForkHash(hex!("0x97c2c34c")), next: 1920000 }, ), // First Homestead block ( Head { number: 1919999, timestamp: 0, ..Default::default() }, - ForkId { hash: ForkHash([0x97, 0xc2, 0xc3, 0x4c]), next: 1920000 }, + ForkId { hash: ForkHash(hex!("0x97c2c34c")), next: 1920000 }, ), // Last Homestead block ( Head { number: 1920000, timestamp: 0, ..Default::default() }, - ForkId { hash: ForkHash([0x91, 0xd1, 0xf9, 0x48]), next: 2463000 }, + ForkId { hash: ForkHash(hex!("0x91d1f948")), next: 2463000 }, ), // First DAO block ( Head { number: 2462999, timestamp: 0, ..Default::default() }, - ForkId { hash: ForkHash([0x91, 0xd1, 0xf9, 0x48]), next: 2463000 }, + ForkId { hash: ForkHash(hex!("0x91d1f948")), next: 2463000 }, ), // Last DAO block ( Head { number: 2463000, timestamp: 0, ..Default::default() }, - ForkId { hash: ForkHash([0x7a, 0x64, 0xda, 0x13]), next: 2675000 }, + ForkId { hash: ForkHash(hex!("0x7a64da13")), next: 2675000 }, ), // First Tangerine block ( Head { number: 2674999, timestamp: 0, ..Default::default() }, - ForkId { hash: ForkHash([0x7a, 0x64, 0xda, 0x13]), next: 2675000 }, + ForkId { hash: ForkHash(hex!("0x7a64da13")), next: 2675000 }, ), // Last Tangerine block ( Head { number: 2675000, timestamp: 0, ..Default::default() }, - ForkId { hash: ForkHash([0x3e, 0xdd, 0x5b, 0x10]), next: 4370000 }, + ForkId { hash: ForkHash(hex!("0x3edd5b10")), next: 4370000 }, ), // First Spurious block ( Head { number: 4369999, timestamp: 0, ..Default::default() }, - ForkId { hash: ForkHash([0x3e, 0xdd, 0x5b, 0x10]), next: 4370000 }, + ForkId { hash: ForkHash(hex!("0x3edd5b10")), next: 4370000 }, ), // Last Spurious block ( Head { number: 4370000, timestamp: 0, ..Default::default() }, - ForkId { hash: ForkHash([0xa0, 0x0b, 0xc3, 0x24]), next: 7280000 }, + ForkId { hash: ForkHash(hex!("0xa00bc324")), next: 7280000 }, ), // First Byzantium block ( Head { number: 7279999, timestamp: 0, ..Default::default() }, - ForkId { hash: ForkHash([0xa0, 0x0b, 0xc3, 0x24]), next: 7280000 }, + ForkId { hash: ForkHash(hex!("0xa00bc324")), next: 7280000 }, ), // Last Byzantium block ( Head { number: 7280000, timestamp: 0, ..Default::default() }, - ForkId { hash: ForkHash([0x66, 0x8d, 0xb0, 0xaf]), next: 9069000 }, + ForkId { hash: ForkHash(hex!("0x668db0af")), next: 9069000 }, ), // First and last Constantinople, first Petersburg block ( Head { number: 9068999, timestamp: 0, ..Default::default() }, - ForkId { hash: ForkHash([0x66, 0x8d, 0xb0, 0xaf]), next: 9069000 }, + ForkId { hash: ForkHash(hex!("0x668db0af")), next: 9069000 }, ), // Last Petersburg block ( Head { number: 9069000, timestamp: 0, ..Default::default() }, - ForkId { hash: ForkHash([0x87, 0x9d, 0x6e, 0x30]), next: 9200000 }, + ForkId { hash: ForkHash(hex!("0x879d6e30")), next: 9200000 }, ), // First Istanbul and first Muir Glacier block ( Head { number: 9199999, timestamp: 0, ..Default::default() }, - ForkId { hash: ForkHash([0x87, 0x9d, 0x6e, 0x30]), next: 9200000 }, + ForkId { hash: ForkHash(hex!("0x879d6e30")), next: 9200000 }, ), // Last Istanbul and first Muir Glacier block ( Head { number: 9200000, timestamp: 0, ..Default::default() }, - ForkId { hash: ForkHash([0xe0, 0x29, 0xe9, 0x91]), next: 12244000 }, + ForkId { hash: ForkHash(hex!("0xe029e991")), next: 12244000 }, ), // First Muir Glacier block ( Head { number: 12243999, timestamp: 0, ..Default::default() }, - ForkId { hash: ForkHash([0xe0, 0x29, 0xe9, 0x91]), next: 12244000 }, + ForkId { hash: ForkHash(hex!("0xe029e991")), next: 12244000 }, ), // Last Muir Glacier block ( Head { number: 12244000, timestamp: 0, ..Default::default() }, - ForkId { hash: ForkHash([0x0e, 0xb4, 0x40, 0xf6]), next: 12965000 }, + ForkId { hash: ForkHash(hex!("0x0eb440f6")), next: 12965000 }, ), // First Berlin block ( Head { number: 12964999, timestamp: 0, ..Default::default() }, - ForkId { hash: ForkHash([0x0e, 0xb4, 0x40, 0xf6]), next: 12965000 }, + ForkId { hash: ForkHash(hex!("0x0eb440f6")), next: 12965000 }, ), // Last Berlin block ( Head { number: 12965000, timestamp: 0, ..Default::default() }, - ForkId { hash: ForkHash([0xb7, 0x15, 0x07, 0x7d]), next: 13773000 }, + ForkId { hash: ForkHash(hex!("0xb715077d")), next: 13773000 }, ), // First London block ( Head { number: 13772999, timestamp: 0, ..Default::default() }, - ForkId { hash: ForkHash([0xb7, 0x15, 0x07, 0x7d]), next: 13773000 }, + ForkId { hash: ForkHash(hex!("0xb715077d")), next: 13773000 }, ), // Last London block ( Head { number: 13773000, timestamp: 0, ..Default::default() }, - ForkId { hash: ForkHash([0x20, 0xc3, 0x27, 0xfc]), next: 15050000 }, + ForkId { hash: ForkHash(hex!("0x20c327fc")), next: 15050000 }, ), // First Arrow Glacier block ( Head { number: 15049999, timestamp: 0, ..Default::default() }, - ForkId { hash: ForkHash([0x20, 0xc3, 0x27, 0xfc]), next: 15050000 }, + ForkId { hash: ForkHash(hex!("0x20c327fc")), next: 15050000 }, ), // Last Arrow Glacier block ( Head { number: 15050000, timestamp: 0, ..Default::default() }, - ForkId { hash: ForkHash([0xf0, 0xaf, 0xd0, 0xe3]), next: 1681338455 }, + ForkId { hash: ForkHash(hex!("0xf0afd0e3")), next: 1681338455 }, ), // First Gray Glacier block ( Head { number: 19999999, timestamp: 1667999999, ..Default::default() }, - ForkId { hash: ForkHash([0xf0, 0xaf, 0xd0, 0xe3]), next: 1681338455 }, + ForkId { hash: ForkHash(hex!("0xf0afd0e3")), next: 1681338455 }, ), // Last Gray Glacier block ( Head { number: 20000000, timestamp: 1681338455, ..Default::default() }, - ForkId { hash: ForkHash([0xdc, 0xe9, 0x6c, 0x2d]), next: 1710338135 }, + ForkId { hash: ForkHash(hex!("0xdce96c2d")), next: 1710338135 }, ), // Last Shanghai block ( Head { number: 20000001, timestamp: 1710338134, ..Default::default() }, - ForkId { hash: ForkHash([0xdc, 0xe9, 0x6c, 0x2d]), next: 1710338135 }, + ForkId { hash: ForkHash(hex!("0xdce96c2d")), next: 1710338135 }, ), // First Cancun block ( Head { number: 20000002, timestamp: 1710338135, ..Default::default() }, - ForkId { hash: ForkHash([0x9f, 0x3d, 0x22, 0x54]), next: 1746612311 }, + ForkId { hash: ForkHash(hex!("0x9f3d2254")), next: 1746612311 }, ), // Last Cancun block ( Head { number: 20000003, timestamp: 1746612310, ..Default::default() }, - ForkId { hash: ForkHash([0x9f, 0x3d, 0x22, 0x54]), next: 1746612311 }, + ForkId { hash: ForkHash(hex!("0x9f3d2254")), next: 1746612311 }, ), // First Prague block ( Head { number: 20000004, timestamp: 1746612311, ..Default::default() }, - ForkId { hash: ForkHash([0xc3, 0x76, 0xcf, 0x8b]), next: 0 }, - ), // Future Prague block + ForkId { + hash: ForkHash(hex!("0xc376cf8b")), + next: mainnet::MAINNET_OSAKA_TIMESTAMP, + }, + ), + // Osaka block ( - Head { number: 20000004, timestamp: 2000000000, ..Default::default() }, - ForkId { hash: ForkHash([0xc3, 0x76, 0xcf, 0x8b]), next: 0 }, + Head { + number: 20000004, + timestamp: mainnet::MAINNET_OSAKA_TIMESTAMP, + ..Default::default() + }, + ForkId { + hash: ForkHash(hex!("0x5167e2a6")), + next: mainnet::MAINNET_BPO1_TIMESTAMP, + }, ), ], ); @@ -2320,7 +2522,7 @@ Post-merge hard forks (timestamp based): let chainspec = ChainSpec::from(genesis); // make sure we are at ForkHash("bc0c2605") with Head post-cancun - let expected_forkid = ForkId { hash: ForkHash([0xbc, 0x0c, 0x26, 0x05]), next: 0 }; + let expected_forkid = ForkId { hash: ForkHash(hex!("0xbc0c2605")), next: 0 }; let got_forkid = chainspec.fork_id(&Head { number: 73, timestamp: 840, ..Default::default() }); @@ -2430,7 +2632,7 @@ Post-merge hard forks (timestamp based): assert_eq!(genesis_hash, expected_hash); // check that the forkhash is correct - let expected_forkhash = ForkHash(hex!("8062457a")); + let expected_forkhash = ForkHash(hex!("0x8062457a")); assert_eq!(ForkHash::from(genesis_hash), expected_forkhash); } @@ -2521,10 +2723,8 @@ Post-merge hard forks (timestamp based): #[test] fn latest_eth_mainnet_fork_id() { - assert_eq!( - ForkId { hash: ForkHash([0xc3, 0x76, 0xcf, 0x8b]), next: 0 }, - MAINNET.latest_fork_id() - ) + // BPO2 + assert_eq!(ForkId { hash: ForkHash(hex!("0x07c9462e")), next: 0 }, MAINNET.latest_fork_id()) } #[test] diff --git a/crates/cli/commands/Cargo.toml b/crates/cli/commands/Cargo.toml index da1a5318f2..137340cdbd 100644 --- a/crates/cli/commands/Cargo.toml +++ b/crates/cli/commands/Cargo.toml @@ -49,6 +49,7 @@ reth-stages.workspace = true reth-stages-types = { workspace = true, optional = true } reth-static-file-types = { workspace = true, features = ["clap"] } reth-static-file.workspace = true +reth-tasks.workspace = true reth-trie = { workspace = true, features = ["metrics"] } reth-trie-db = { workspace = true, features = ["metrics"] } reth-trie-common.workspace = true @@ -82,6 +83,7 @@ backon.workspace = true secp256k1 = { workspace = true, features = ["global-context", "std", "recovery"] } tokio-stream.workspace = true reqwest.workspace = true +metrics.workspace = true # io fdlimit.workspace = true diff --git a/crates/cli/commands/src/common.rs b/crates/cli/commands/src/common.rs index 5b8cfce771..77c962f085 100644 --- a/crates/cli/commands/src/common.rs +++ b/crates/cli/commands/src/common.rs @@ -1,5 +1,7 @@ //! Contains common `reth` arguments +pub use reth_primitives_traits::header::HeaderMut; + use alloy_primitives::B256; use clap::Parser; use reth_chainspec::EthChainSpec; @@ -7,7 +9,7 @@ use reth_cli::chainspec::ChainSpecParser; use reth_config::{config::EtlConfig, Config}; use reth_consensus::noop::NoopConsensus; use reth_db::{init_db, open_db_read_only, DatabaseEnv}; -use reth_db_common::init::init_genesis; +use reth_db_common::init::init_genesis_with_settings; use reth_downloaders::{bodies::noop::NoopBodiesDownloader, headers::noop::NoopHeaderDownloader}; use reth_eth_wire::NetPrimitivesFor; use reth_evm::{noop::NoopEvmConfig, ConfigureEvm}; @@ -17,11 +19,14 @@ use reth_node_builder::{ Node, NodeComponents, NodeComponentsBuilder, NodeTypes, NodeTypesWithDBAdapter, }; use reth_node_core::{ - args::{DatabaseArgs, DatadirArgs}, + args::{DatabaseArgs, DatadirArgs, StaticFilesArgs}, dirs::{ChainPath, DataDirPath}, }; use reth_provider::{ - providers::{BlockchainProvider, NodeTypesForProvider, StaticFileProvider}, + providers::{ + BlockchainProvider, NodeTypesForProvider, RocksDBProvider, StaticFileProvider, + StaticFileProviderBuilder, + }, ProviderFactory, StaticFileProviderFactory, }; use reth_stages::{sets::DefaultStages, Pipeline, PipelineTarget}; @@ -57,6 +62,10 @@ pub struct EnvironmentArgs { /// All database related arguments #[command(flatten)] pub db: DatabaseArgs, + + /// All static files related arguments + #[command(flatten)] + pub static_files: StaticFilesArgs, } impl EnvironmentArgs { @@ -69,10 +78,12 @@ impl EnvironmentArgs { let data_dir = self.datadir.clone().resolve_datadir(self.chain.chain()); let db_path = data_dir.db(); let sf_path = data_dir.static_files(); + let rocksdb_path = data_dir.rocksdb(); if access.is_read_write() { reth_fs_util::create_dir_all(&db_path)?; reth_fs_util::create_dir_all(&sf_path)?; + reth_fs_util::create_dir_all(&rocksdb_path)?; } let config_path = self.config.clone().unwrap_or_else(|| data_dir.config()); @@ -92,21 +103,35 @@ impl EnvironmentArgs { } info!(target: "reth::cli", ?db_path, ?sf_path, "Opening storage"); + let genesis_block_number = self.chain.genesis().number.unwrap_or_default(); let (db, sfp) = match access { AccessRights::RW => ( Arc::new(init_db(db_path, self.db.database_args())?), - StaticFileProvider::read_write(sf_path)?, - ), - AccessRights::RO => ( - Arc::new(open_db_read_only(&db_path, self.db.database_args())?), - StaticFileProvider::read_only(sf_path, false)?, + StaticFileProviderBuilder::read_write(sf_path)? + .with_genesis_block_number(genesis_block_number) + .build()?, ), + AccessRights::RO | AccessRights::RoInconsistent => { + (Arc::new(open_db_read_only(&db_path, self.db.database_args())?), { + let provider = StaticFileProviderBuilder::read_only(sf_path)? + .with_genesis_block_number(genesis_block_number) + .build()?; + provider.watch_directory(); + provider + }) + } }; + // TransactionDB only support read-write mode + let rocksdb_provider = RocksDBProvider::builder(data_dir.rocksdb()) + .with_default_tables() + .with_database_log_level(self.db.log_level) + .build()?; - let provider_factory = self.create_provider_factory(&config, db, sfp)?; + let provider_factory = + self.create_provider_factory(&config, db, sfp, rocksdb_provider, access)?; if access.is_read_write() { debug!(target: "reth::cli", chain=%self.chain.chain(), genesis=?self.chain.genesis_hash(), "Initializing genesis"); - init_genesis(&provider_factory)?; + init_genesis_with_settings(&provider_factory, self.static_files.to_settings())?; } Ok(Environment { config, provider_factory, data_dir }) @@ -122,23 +147,25 @@ impl EnvironmentArgs { config: &Config, db: Arc, static_file_provider: StaticFileProvider, + rocksdb_provider: RocksDBProvider, + access: AccessRights, ) -> eyre::Result>>> where C: ChainSpecParser, { - let has_receipt_pruning = config.prune.has_receipts_pruning(); let prune_modes = config.prune.segments.clone(); let factory = ProviderFactory::>>::new( db, self.chain.clone(), static_file_provider, - ) + rocksdb_provider, + )? .with_prune_modes(prune_modes.clone()); // Check for consistency between database and static files. - if let Some(unwind_target) = factory - .static_file_provider() - .check_consistency(&factory.provider()?, has_receipt_pruning)? + if !access.is_read_only_inconsistent() && + let Some(unwind_target) = + factory.static_file_provider().check_consistency(&factory.provider()?)? { if factory.db_ref().is_read_only()? { warn!(target: "reth::cli", ?unwind_target, "Inconsistent storage. Restart node to heal."); @@ -199,6 +226,8 @@ pub enum AccessRights { RW, /// Read-only access RO, + /// Read-only access with possibly inconsistent data + RoInconsistent, } impl AccessRights { @@ -206,6 +235,12 @@ impl AccessRights { pub const fn is_read_write(&self) -> bool { matches!(self, Self::RW) } + + /// Returns `true` if it requires read-only access to the environment with possibly inconsistent + /// data. + pub const fn is_read_only_inconsistent(&self) -> bool { + matches!(self, Self::RoInconsistent) + } } /// Helper alias to satisfy `FullNodeTypes` bound on [`Node`] trait generic. @@ -215,17 +250,6 @@ type FullTypesAdapter = FullNodeTypesAdapter< BlockchainProvider>>, >; -/// Trait for block headers that can be modified through CLI operations. -pub trait CliHeader { - fn set_number(&mut self, number: u64); -} - -impl CliHeader for alloy_consensus::Header { - fn set_number(&mut self, number: u64) { - self.number = number; - } -} - /// Helper trait with a common set of requirements for the /// [`NodeTypes`] in CLI. pub trait CliNodeTypes: Node> + NodeTypesForProvider { diff --git a/crates/cli/commands/src/config_cmd.rs b/crates/cli/commands/src/config_cmd.rs index f3a24e267c..e12f468fac 100644 --- a/crates/cli/commands/src/config_cmd.rs +++ b/crates/cli/commands/src/config_cmd.rs @@ -22,13 +22,14 @@ impl Command { let config = if self.default { Config::default() } else { - let path = self.config.clone().unwrap_or_default(); - // Check if the file exists + let path = match self.config.as_ref() { + Some(path) => path, + None => bail!("No config file provided. Use --config or pass --default"), + }; if !path.exists() { bail!("Config file does not exist: {}", path.display()); } - // Read the configuration file - Config::from_path(&path) + Config::from_path(path) .wrap_err_with(|| format!("Could not load config file: {}", path.display()))? }; println!("{}", toml::to_string_pretty(&config)?); diff --git a/crates/cli/commands/src/db/account_storage.rs b/crates/cli/commands/src/db/account_storage.rs new file mode 100644 index 0000000000..f01fcce9c0 --- /dev/null +++ b/crates/cli/commands/src/db/account_storage.rs @@ -0,0 +1,92 @@ +use alloy_primitives::{keccak256, Address}; +use clap::Parser; +use human_bytes::human_bytes; +use reth_codecs::Compact; +use reth_db_api::{cursor::DbDupCursorRO, database::Database, tables, transaction::DbTx}; +use reth_db_common::DbTool; +use reth_node_builder::NodeTypesWithDB; +use std::time::{Duration, Instant}; +use tracing::info; + +/// Log progress every 5 seconds +const LOG_INTERVAL: Duration = Duration::from_secs(5); + +/// The arguments for the `reth db account-storage` command +#[derive(Parser, Debug)] +pub struct Command { + /// The account address to check storage for + address: Address, +} + +impl Command { + /// Execute `db account-storage` command + pub fn execute(self, tool: &DbTool) -> eyre::Result<()> { + let address = self.address; + let (slot_count, plain_size) = tool.provider_factory.db_ref().view(|tx| { + let mut cursor = tx.cursor_dup_read::()?; + let mut count = 0usize; + let mut total_value_bytes = 0usize; + let mut last_log = Instant::now(); + + // Walk all storage entries for this address + let walker = cursor.walk_dup(Some(address), None)?; + for entry in walker { + let (_, storage_entry) = entry?; + count += 1; + // StorageEntry encodes as: 32 bytes (key/subkey uncompressed) + compressed U256 + let mut buf = Vec::new(); + let entry_len = storage_entry.to_compact(&mut buf); + total_value_bytes += entry_len; + + if last_log.elapsed() >= LOG_INTERVAL { + info!( + target: "reth::cli", + address = %address, + slots = count, + key = %storage_entry.key, + "Processing storage slots" + ); + last_log = Instant::now(); + } + } + + // Add 20 bytes for the Address key (stored once per account in dupsort) + let total_size = if count > 0 { 20 + total_value_bytes } else { 0 }; + + Ok::<_, eyre::Report>((count, total_size)) + })??; + + // Estimate hashed storage size: 32-byte B256 key instead of 20-byte Address + let hashed_size_estimate = if slot_count > 0 { plain_size + 12 } else { 0 }; + let total_estimate = plain_size + hashed_size_estimate; + + let hashed_address = keccak256(address); + + println!("Account: {address}"); + println!("Hashed address: {hashed_address}"); + println!("Storage slots: {slot_count}"); + println!("Plain storage size: {} (estimated)", human_bytes(plain_size as f64)); + println!("Hashed storage size: {} (estimated)", human_bytes(hashed_size_estimate as f64)); + println!("Total estimated size: {}", human_bytes(total_estimate as f64)); + + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn parse_address_arg() { + let cmd = Command::try_parse_from([ + "account-storage", + "0xd8dA6BF26964aF9D7eEd9e03E53415D37aA96045", + ]) + .unwrap(); + assert_eq!( + cmd.address, + "0xd8dA6BF26964aF9D7eEd9e03E53415D37aA96045".parse::
().unwrap() + ); + } +} diff --git a/crates/cli/commands/src/db/clear.rs b/crates/cli/commands/src/db/clear.rs index fc3852154f..4ba0a63df8 100644 --- a/crates/cli/commands/src/db/clear.rs +++ b/crates/cli/commands/src/db/clear.rs @@ -6,8 +6,9 @@ use reth_db_api::{ transaction::{DbTx, DbTxMut}, TableViewer, Tables, }; +use reth_db_common::DbTool; use reth_node_builder::NodeTypesWithDB; -use reth_provider::{ProviderFactory, StaticFileProviderFactory}; +use reth_provider::StaticFileProviderFactory; use reth_static_file_types::StaticFileSegment; /// The arguments for the `reth db clear` command @@ -19,16 +20,13 @@ pub struct Command { impl Command { /// Execute `db clear` command - pub fn execute( - self, - provider_factory: ProviderFactory, - ) -> eyre::Result<()> { + pub fn execute(self, tool: &DbTool) -> eyre::Result<()> { match self.subcommand { Subcommands::Mdbx { table } => { - table.view(&ClearViewer { db: provider_factory.db_ref() })? + table.view(&ClearViewer { db: tool.provider_factory.db_ref() })? } Subcommands::StaticFile { segment } => { - let static_file_provider = provider_factory.static_file_provider(); + let static_file_provider = tool.provider_factory.static_file_provider(); let static_files = iter_static_files(static_file_provider.directory())?; if let Some(segment_static_files) = static_files.get(&segment) { diff --git a/crates/cli/commands/src/db/get.rs b/crates/cli/commands/src/db/get.rs index 9d06a35dca..8f78873b7a 100644 --- a/crates/cli/commands/src/db/get.rs +++ b/crates/cli/commands/src/db/get.rs @@ -3,16 +3,22 @@ use clap::Parser; use reth_db::{ static_file::{ ColumnSelectorOne, ColumnSelectorTwo, HeaderWithHashMask, ReceiptMask, TransactionMask, + TransactionSenderMask, }, RawDupSort, }; use reth_db_api::{ - table::{Decompress, DupSort, Table}, - tables, RawKey, RawTable, Receipts, TableViewer, Transactions, + cursor::{DbCursorRO, DbDupCursorRO}, + database::Database, + table::{Compress, Decompress, DupSort, Table}, + tables, + transaction::DbTx, + RawKey, RawTable, Receipts, TableViewer, Transactions, }; use reth_db_common::DbTool; use reth_node_api::{HeaderTy, ReceiptTy, TxTy}; use reth_node_builder::NodeTypesWithDB; +use reth_primitives_traits::ValueWithSubKey; use reth_provider::{providers::ProviderNodeTypes, StaticFileProviderFactory}; use reth_static_file_types::StaticFileSegment; use tracing::error; @@ -38,6 +44,14 @@ enum Subcommand { #[arg(value_parser = maybe_json_value_parser)] subkey: Option, + /// Optional end key for range query (exclusive upper bound) + #[arg(value_parser = maybe_json_value_parser)] + end_key: Option, + + /// Optional end subkey for range query (exclusive upper bound) + #[arg(value_parser = maybe_json_value_parser)] + end_subkey: Option, + /// Output bytes instead of human-readable decoded value #[arg(long)] raw: bool, @@ -60,8 +74,8 @@ impl Command { /// Execute `db get` command pub fn execute(self, tool: &DbTool) -> eyre::Result<()> { match self.subcommand { - Subcommand::Mdbx { table, key, subkey, raw } => { - table.view(&GetValueViewer { tool, key, subkey, raw })? + Subcommand::Mdbx { table, key, subkey, end_key, end_subkey, raw } => { + table.view(&GetValueViewer { tool, key, subkey, end_key, end_subkey, raw })? } Subcommand::StaticFile { segment, key, raw } => { let (key, mask): (u64, _) = match segment { @@ -75,19 +89,21 @@ impl Command { StaticFileSegment::Receipts => { (table_key::(&key)?, >>::MASK) } + StaticFileSegment::TransactionSenders => ( + table_key::(&key)?, + ::MASK, + ), }; - let content = tool.provider_factory.static_file_provider().find_static_file( - segment, - |provider| { - let mut cursor = provider.cursor()?; - cursor.get(key.into(), mask).map(|result| { - result.map(|vec| { - vec.iter().map(|slice| slice.to_vec()).collect::>() - }) - }) - }, - )?; + let content = tool + .provider_factory + .static_file_provider() + .get_segment_provider(segment, key)? + .cursor()? + .get(key.into(), mask) + .map(|result| { + result.map(|vec| vec.iter().map(|slice| slice.to_vec()).collect::>()) + })?; match content { Some(content) => { @@ -116,6 +132,13 @@ impl Command { )?; println!("{}", serde_json::to_string_pretty(&receipt)?); } + StaticFileSegment::TransactionSenders => { + let sender = + <::Value>::decompress( + content[0].as_slice(), + )?; + println!("{}", serde_json::to_string_pretty(&sender)?); + } } } } @@ -144,6 +167,8 @@ struct GetValueViewer<'a, N: NodeTypesWithDB> { tool: &'a DbTool, key: String, subkey: Option, + end_key: Option, + end_subkey: Option, raw: bool, } @@ -153,53 +178,158 @@ impl TableViewer<()> for GetValueViewer<'_, N> { fn view(&self) -> Result<(), Self::Error> { let key = table_key::(&self.key)?; - let content = if self.raw { - self.tool - .get::>(RawKey::from(key))? - .map(|content| hex::encode_prefixed(content.raw_value())) - } else { - self.tool.get::(key)?.as_ref().map(serde_json::to_string_pretty).transpose()? - }; + // A non-dupsort table cannot have subkeys. The `subkey` arg becomes the `end_key`. First we + // check that `end_key` and `end_subkey` weren't previously given, as that wouldn't be + // valid. + if self.end_key.is_some() || self.end_subkey.is_some() { + return Err(eyre::eyre!("Only END_KEY can be given for non-DUPSORT tables")); + } - match content { - Some(content) => { - println!("{content}"); - } - None => { - error!(target: "reth::cli", "No content for the given table key."); - } - }; + let end_key = self.subkey.clone(); + + // Check if we're doing a range query + if let Some(ref end_key_str) = end_key { + let end_key = table_key::(end_key_str)?; + + // Use walk_range to iterate over the range + self.tool.provider_factory.db_ref().view(|tx| { + let mut cursor = tx.cursor_read::()?; + let walker = cursor.walk_range(key..end_key)?; + + for result in walker { + let (k, v) = result?; + let json_val = if self.raw { + let raw_key = RawKey::from(k); + serde_json::json!({ + "key": hex::encode_prefixed(raw_key.raw_key()), + "val": hex::encode_prefixed(v.compress().as_ref()), + }) + } else { + serde_json::json!({ + "key": &k, + "val": &v, + }) + }; + + println!("{}", serde_json::to_string_pretty(&json_val)?); + } + + Ok::<_, eyre::Report>(()) + })??; + } else { + // Single key lookup + let content = if self.raw { + self.tool + .get::>(RawKey::from(key))? + .map(|content| hex::encode_prefixed(content.raw_value())) + } else { + self.tool.get::(key)?.as_ref().map(serde_json::to_string_pretty).transpose()? + }; + + match content { + Some(content) => { + println!("{content}"); + } + None => { + error!(target: "reth::cli", "No content for the given table key."); + } + }; + } Ok(()) } - fn view_dupsort(&self) -> Result<(), Self::Error> { + fn view_dupsort(&self) -> Result<(), Self::Error> + where + T::Value: reth_primitives_traits::ValueWithSubKey, + { // get a key for given table let key = table_key::(&self.key)?; - // process dupsort table - let subkey = table_subkey::(self.subkey.as_deref())?; - - let content = if self.raw { - self.tool - .get_dup::>(RawKey::from(key), RawKey::from(subkey))? - .map(|content| hex::encode_prefixed(content.raw_value())) - } else { - self.tool - .get_dup::(key, subkey)? + // Check if we're doing a range query + if let Some(ref end_key_str) = self.end_key { + let end_key = table_key::(end_key_str)?; + let start_subkey = table_subkey::(Some( + self.subkey.as_ref().expect("must have been given if end_key is given").as_str(), + ))?; + let end_subkey_parsed = self + .end_subkey .as_ref() - .map(serde_json::to_string_pretty) - .transpose()? - }; + .map(|s| table_subkey::(Some(s.as_str()))) + .transpose()?; - match content { - Some(content) => { - println!("{content}"); - } - None => { - error!(target: "reth::cli", "No content for the given table subkey."); - } - }; + self.tool.provider_factory.db_ref().view(|tx| { + let mut cursor = tx.cursor_dup_read::()?; + + // Seek to the starting key. If there is actually a key at the starting key then + // seek to the subkey within it. + if let Some((decoded_key, _)) = cursor.seek(key.clone())? && + decoded_key == key + { + cursor.seek_by_key_subkey(key.clone(), start_subkey.clone())?; + } + + // Get the current position to start iteration + let mut current = cursor.current()?; + + while let Some((decoded_key, decoded_value)) = current { + // Extract the subkey using the ValueWithSubKey trait + let decoded_subkey = decoded_value.get_subkey(); + + // Check if we've reached the end (exclusive) + if (&decoded_key, Some(&decoded_subkey)) >= + (&end_key, end_subkey_parsed.as_ref()) + { + break; + } + + // Output the entry with both key and subkey + let json_val = if self.raw { + let raw_key = RawKey::from(decoded_key.clone()); + serde_json::json!({ + "key": hex::encode_prefixed(raw_key.raw_key()), + "val": hex::encode_prefixed(decoded_value.compress().as_ref()), + }) + } else { + serde_json::json!({ + "key": &decoded_key, + "val": &decoded_value, + }) + }; + + println!("{}", serde_json::to_string_pretty(&json_val)?); + + // Move to next entry + current = cursor.next()?; + } + + Ok::<_, eyre::Report>(()) + })??; + } else { + // Single key/subkey lookup + let subkey = table_subkey::(self.subkey.as_deref())?; + + let content = if self.raw { + self.tool + .get_dup::>(RawKey::from(key), RawKey::from(subkey))? + .map(|content| hex::encode_prefixed(content.raw_value())) + } else { + self.tool + .get_dup::(key, subkey)? + .as_ref() + .map(serde_json::to_string_pretty) + .transpose()? + }; + + match content { + Some(content) => { + println!("{content}"); + } + None => { + error!(target: "reth::cli", "No content for the given table subkey."); + } + }; + } Ok(()) } } diff --git a/crates/cli/commands/src/db/list.rs b/crates/cli/commands/src/db/list.rs index 2540e77c11..5d6c055c94 100644 --- a/crates/cli/commands/src/db/list.rs +++ b/crates/cli/commands/src/db/list.rs @@ -3,7 +3,7 @@ use alloy_primitives::hex; use clap::Parser; use eyre::WrapErr; use reth_chainspec::EthereumHardforks; -use reth_db::DatabaseEnv; +use reth_db::{transaction::DbTx, DatabaseEnv}; use reth_db_api::{database::Database, table::Table, RawValue, TableViewer, Tables}; use reth_db_common::{DbTool, ListFilter}; use reth_node_builder::{NodeTypes, NodeTypesWithDBAdapter}; @@ -96,6 +96,9 @@ impl TableViewer<()> for ListTableViewer<'_, N> { fn view(&self) -> Result<(), Self::Error> { self.tool.provider_factory.db_ref().view(|tx| { + // We may be using the tui for a long time + tx.disable_long_read_transaction_safety(); + let table_db = tx.inner.open_db(Some(self.args.table.name())).wrap_err("Could not open db.")?; let stats = tx.inner.db_stat(&table_db).wrap_err(format!("Could not find table: {}", self.args.table.name()))?; let total_entries = stats.entries(); diff --git a/crates/cli/commands/src/db/mod.rs b/crates/cli/commands/src/db/mod.rs index 1ea66b2f55..d27afab79c 100644 --- a/crates/cli/commands/src/db/mod.rs +++ b/crates/cli/commands/src/db/mod.rs @@ -2,18 +2,22 @@ use crate::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; use clap::{Parser, Subcommand}; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_cli::chainspec::ChainSpecParser; +use reth_cli_runner::CliContext; use reth_db::version::{get_db_version, DatabaseVersionError, DB_VERSION}; use reth_db_common::DbTool; use std::{ io::{self, Write}, sync::Arc, }; +mod account_storage; mod checksum; mod clear; mod diff; mod get; mod list; mod repair_trie; +mod settings; +mod static_file_header; mod stats; /// DB List TUI mod tui; @@ -51,16 +55,23 @@ pub enum Subcommands { Clear(clear::Command), /// Verifies trie consistency and outputs any inconsistencies RepairTrie(repair_trie::Command), + /// Reads and displays the static file segment header + StaticFileHeader(static_file_header::Command), /// Lists current and local database versions Version, /// Returns the full database path Path, + /// Manage storage settings + Settings(settings::Command), + /// Gets storage size information for an account + AccountStorage(account_storage::Command), } -/// `db_ro_exec` opens a database in read-only mode, and then execute with the provided command -macro_rules! db_ro_exec { - ($env:expr, $tool:ident, $N:ident, $command:block) => { - let Environment { provider_factory, .. } = $env.init::<$N>(AccessRights::RO)?; +/// Initializes a provider factory with specified access rights, and then execute with the provided +/// command +macro_rules! db_exec { + ($env:expr, $tool:ident, $N:ident, $access_rights:expr, $command:block) => { + let Environment { provider_factory, .. } = $env.init::<$N>($access_rights)?; let $tool = DbTool::new(provider_factory)?; $command; @@ -69,7 +80,10 @@ macro_rules! db_ro_exec { impl> Command { /// Execute `db` command - pub async fn execute>(self) -> eyre::Result<()> { + pub async fn execute>( + self, + ctx: CliContext, + ) -> eyre::Result<()> { let data_dir = self.env.datadir.clone().resolve_datadir(self.env.chain.chain()); let db_path = data_dir.db(); let static_files_path = data_dir.static_files(); @@ -88,27 +102,32 @@ impl> Command match self.command { // TODO: We'll need to add this on the DB trait. Subcommands::Stats(command) => { - db_ro_exec!(self.env, tool, N, { + let access_rights = if command.skip_consistency_checks { + AccessRights::RoInconsistent + } else { + AccessRights::RO + }; + db_exec!(self.env, tool, N, access_rights, { command.execute(data_dir, &tool)?; }); } Subcommands::List(command) => { - db_ro_exec!(self.env, tool, N, { + db_exec!(self.env, tool, N, AccessRights::RO, { command.execute(&tool)?; }); } Subcommands::Checksum(command) => { - db_ro_exec!(self.env, tool, N, { + db_exec!(self.env, tool, N, AccessRights::RO, { command.execute(&tool)?; }); } Subcommands::Diff(command) => { - db_ro_exec!(self.env, tool, N, { + db_exec!(self.env, tool, N, AccessRights::RO, { command.execute(&tool)?; }); } Subcommands::Get(command) => { - db_ro_exec!(self.env, tool, N, { + db_exec!(self.env, tool, N, AccessRights::RO, { command.execute(&tool)?; }); } @@ -130,19 +149,26 @@ impl> Command } } - let Environment { provider_factory, .. } = self.env.init::(AccessRights::RW)?; - let tool = DbTool::new(provider_factory)?; - tool.drop(db_path, static_files_path, exex_wal_path)?; + db_exec!(self.env, tool, N, AccessRights::RW, { + tool.drop(db_path, static_files_path, exex_wal_path)?; + }); } Subcommands::Clear(command) => { - let Environment { provider_factory, .. } = self.env.init::(AccessRights::RW)?; - command.execute(provider_factory)?; + db_exec!(self.env, tool, N, AccessRights::RW, { + command.execute(&tool)?; + }); } Subcommands::RepairTrie(command) => { let access_rights = if command.dry_run { AccessRights::RO } else { AccessRights::RW }; - let Environment { provider_factory, .. } = self.env.init::(access_rights)?; - command.execute(provider_factory)?; + db_exec!(self.env, tool, N, access_rights, { + command.execute(&tool, ctx.task_executor.clone())?; + }); + } + Subcommands::StaticFileHeader(command) => { + db_exec!(self.env, tool, N, AccessRights::RoInconsistent, { + command.execute(&tool)?; + }); } Subcommands::Version => { let local_db_version = match get_db_version(&db_path) { @@ -162,6 +188,16 @@ impl> Command Subcommands::Path => { println!("{}", db_path.display()); } + Subcommands::Settings(command) => { + db_exec!(self.env, tool, N, command.access_rights(), { + command.execute(&tool)?; + }); + } + Subcommands::AccountStorage(command) => { + db_exec!(self.env, tool, N, AccessRights::RO, { + command.execute(&tool)?; + }); + } } Ok(()) diff --git a/crates/cli/commands/src/db/repair_trie.rs b/crates/cli/commands/src/db/repair_trie.rs index f7dea67b76..3ccda64afb 100644 --- a/crates/cli/commands/src/db/repair_trie.rs +++ b/crates/cli/commands/src/db/repair_trie.rs @@ -1,20 +1,34 @@ use clap::Parser; +use metrics::{self, Counter}; +use reth_chainspec::EthChainSpec; +use reth_cli_util::parse_socket_address; use reth_db_api::{ cursor::{DbCursorRO, DbCursorRW, DbDupCursorRO}, database::Database, tables, transaction::{DbTx, DbTxMut}, }; -use reth_node_builder::NodeTypesWithDB; -use reth_provider::{providers::ProviderNodeTypes, ProviderFactory, StageCheckpointReader}; +use reth_db_common::DbTool; +use reth_node_core::version::version_metadata; +use reth_node_metrics::{ + chain::ChainSpecInfo, + hooks::Hooks, + server::{MetricServer, MetricServerConfig}, + version::VersionInfo, +}; +use reth_provider::{providers::ProviderNodeTypes, ChainSpecProvider, StageCheckpointReader}; use reth_stages::StageId; +use reth_tasks::TaskExecutor; use reth_trie::{ verify::{Output, Verifier}, Nibbles, }; use reth_trie_common::{StorageTrieEntry, StoredNibbles, StoredNibblesSubKey}; use reth_trie_db::{DatabaseHashedCursorFactory, DatabaseTrieCursorFactory}; -use std::time::{Duration, Instant}; +use std::{ + net::SocketAddr, + time::{Duration, Instant}, +}; use tracing::{info, warn}; const PROGRESS_PERIOD: Duration = Duration::from_secs(5); @@ -25,27 +39,74 @@ pub struct Command { /// Only show inconsistencies without making any repairs #[arg(long)] pub(crate) dry_run: bool, + + /// Enable Prometheus metrics. + /// + /// The metrics will be served at the given interface and port. + #[arg(long = "metrics", value_name = "ADDR:PORT", value_parser = parse_socket_address)] + pub(crate) metrics: Option, } impl Command { /// Execute `db repair-trie` command pub fn execute( self, - provider_factory: ProviderFactory, + tool: &DbTool, + task_executor: TaskExecutor, ) -> eyre::Result<()> { - if self.dry_run { - verify_only(provider_factory)? + // Set up metrics server if requested + let _metrics_handle = if let Some(listen_addr) = self.metrics { + let chain_name = tool.provider_factory.chain_spec().chain().to_string(); + let executor = task_executor.clone(); + + let handle = task_executor.spawn_critical("metrics server", async move { + let config = MetricServerConfig::new( + listen_addr, + VersionInfo { + version: version_metadata().cargo_pkg_version.as_ref(), + build_timestamp: version_metadata().vergen_build_timestamp.as_ref(), + cargo_features: version_metadata().vergen_cargo_features.as_ref(), + git_sha: version_metadata().vergen_git_sha.as_ref(), + target_triple: version_metadata().vergen_cargo_target_triple.as_ref(), + build_profile: version_metadata().build_profile_name.as_ref(), + }, + ChainSpecInfo { name: chain_name }, + executor, + Hooks::builder().build(), + ); + + // Spawn the metrics server + if let Err(e) = MetricServer::new(config).serve().await { + tracing::error!("Metrics server error: {}", e); + } + }); + + Some(handle) } else { - verify_and_repair(provider_factory)? + None + }; + + if self.dry_run { + verify_only(tool)? + } else { + verify_and_repair(tool)? } Ok(()) } } -fn verify_only(provider_factory: ProviderFactory) -> eyre::Result<()> { +fn verify_only(tool: &DbTool) -> eyre::Result<()> { + // Log the database block tip from Finish stage checkpoint + let finish_checkpoint = tool + .provider_factory + .provider()? + .get_stage_checkpoint(StageId::Finish)? + .unwrap_or_default(); + info!("Database block tip: {}", finish_checkpoint.block_number); + // Get a database transaction directly from the database - let db = provider_factory.db_ref(); + let db = tool.provider_factory.db_ref(); let mut tx = db.tx()?; tx.disable_long_read_transaction_safety(); @@ -54,6 +115,8 @@ fn verify_only(provider_factory: ProviderFactory) -> eyre let trie_cursor_factory = DatabaseTrieCursorFactory::new(&tx); let verifier = Verifier::new(&trie_cursor_factory, hashed_cursor_factory)?; + let metrics = RepairTrieMetrics::new(); + let mut inconsistent_nodes = 0; let start_time = Instant::now(); let mut last_progress_time = Instant::now(); @@ -70,6 +133,21 @@ fn verify_only(provider_factory: ProviderFactory) -> eyre } else { warn!("Inconsistency found: {output:?}"); inconsistent_nodes += 1; + + // Record metrics based on output type + match output { + Output::AccountExtra(_, _) | + Output::AccountWrong { .. } | + Output::AccountMissing(_, _) => { + metrics.account_inconsistencies.increment(1); + } + Output::StorageExtra(_, _, _) | + Output::StorageWrong { .. } | + Output::StorageMissing(_, _, _) => { + metrics.storage_inconsistencies.increment(1); + } + Output::Progress(_) => unreachable!(), + } } } @@ -114,11 +192,13 @@ fn verify_checkpoints(provider: impl StageCheckpointReader) -> eyre::Result<()> Ok(()) } -fn verify_and_repair( - provider_factory: ProviderFactory, -) -> eyre::Result<()> { +fn verify_and_repair(tool: &DbTool) -> eyre::Result<()> { // Get a read-write database provider - let mut provider_rw = provider_factory.provider_rw()?; + let mut provider_rw = tool.provider_factory.provider_rw()?; + + // Log the database block tip from Finish stage checkpoint + let finish_checkpoint = provider_rw.get_stage_checkpoint(StageId::Finish)?.unwrap_or_default(); + info!("Database block tip: {}", finish_checkpoint.block_number); // Check that a pipeline sync isn't in progress. verify_checkpoints(provider_rw.as_ref())?; @@ -138,6 +218,8 @@ fn verify_and_repair( // Create the verifier let verifier = Verifier::new(&trie_cursor_factory, hashed_cursor_factory)?; + let metrics = RepairTrieMetrics::new(); + let mut inconsistent_nodes = 0; let start_time = Instant::now(); let mut last_progress_time = Instant::now(); @@ -149,6 +231,21 @@ fn verify_and_repair( if !matches!(output, Output::Progress(_)) { warn!("Inconsistency found, will repair: {output:?}"); inconsistent_nodes += 1; + + // Record metrics based on output type + match &output { + Output::AccountExtra(_, _) | + Output::AccountWrong { .. } | + Output::AccountMissing(_, _) => { + metrics.account_inconsistencies.increment(1); + } + Output::StorageExtra(_, _, _) | + Output::StorageWrong { .. } | + Output::StorageMissing(_, _, _) => { + metrics.storage_inconsistencies.increment(1); + } + Output::Progress(_) => {} + } } match output { @@ -247,3 +344,25 @@ fn output_progress(last_account: Nibbles, start_time: Instant, inconsistent_node "Repairing trie tables", ); } + +/// Metrics for tracking trie repair inconsistencies +#[derive(Debug)] +struct RepairTrieMetrics { + account_inconsistencies: Counter, + storage_inconsistencies: Counter, +} + +impl RepairTrieMetrics { + fn new() -> Self { + Self { + account_inconsistencies: metrics::counter!( + "db.repair_trie.inconsistencies_found", + "type" => "account" + ), + storage_inconsistencies: metrics::counter!( + "db.repair_trie.inconsistencies_found", + "type" => "storage" + ), + } + } +} diff --git a/crates/cli/commands/src/db/settings.rs b/crates/cli/commands/src/db/settings.rs new file mode 100644 index 0000000000..e8a4152075 --- /dev/null +++ b/crates/cli/commands/src/db/settings.rs @@ -0,0 +1,127 @@ +//! `reth db settings` command for managing storage settings + +use clap::{ArgAction, Parser, Subcommand}; +use reth_db_common::DbTool; +use reth_provider::{ + providers::ProviderNodeTypes, DBProvider, DatabaseProviderFactory, MetadataProvider, + MetadataWriter, StorageSettings, +}; + +use crate::common::AccessRights; + +/// `reth db settings` subcommand +#[derive(Debug, Parser)] +pub struct Command { + #[command(subcommand)] + command: Subcommands, +} + +impl Command { + /// Returns database access rights required for the command. + pub fn access_rights(&self) -> AccessRights { + match self.command { + Subcommands::Get => AccessRights::RO, + Subcommands::Set(_) => AccessRights::RW, + } + } +} + +#[derive(Debug, Clone, Copy, Subcommand)] +enum Subcommands { + /// Get current storage settings from database + Get, + /// Set storage settings in database + #[clap(subcommand)] + Set(SetCommand), +} + +/// Set storage settings +#[derive(Debug, Clone, Copy, Subcommand)] +#[clap(rename_all = "snake_case")] +pub enum SetCommand { + /// Store receipts in static files instead of the database + ReceiptsInStaticFiles { + #[clap(action(ArgAction::Set))] + value: bool, + }, + /// Store transaction senders in static files instead of the database + TransactionSendersInStaticFiles { + #[clap(action(ArgAction::Set))] + value: bool, + }, +} + +impl Command { + /// Execute the command + pub fn execute(self, tool: &DbTool) -> eyre::Result<()> { + match self.command { + Subcommands::Get => self.get(tool), + Subcommands::Set(cmd) => self.set(cmd, tool), + } + } + + fn get(&self, tool: &DbTool) -> eyre::Result<()> { + // Read storage settings + let provider = tool.provider_factory.provider()?; + let storage_settings = provider.storage_settings()?; + + // Display settings + match storage_settings { + Some(settings) => { + println!("Current storage settings:"); + println!("{settings:#?}"); + } + None => { + println!("No storage settings found."); + } + } + + Ok(()) + } + + fn set(&self, cmd: SetCommand, tool: &DbTool) -> eyre::Result<()> { + // Read storage settings + let provider_rw = tool.provider_factory.database_provider_rw()?; + // Destruct settings struct to not miss adding support for new fields + let settings = provider_rw.storage_settings()?; + if settings.is_none() { + println!("No storage settings found, creating new settings."); + } + + let mut settings @ StorageSettings { + receipts_in_static_files: _, + transaction_senders_in_static_files: _, + storages_history_in_rocksdb: _, + transaction_hash_numbers_in_rocksdb: _, + account_history_in_rocksdb: _, + } = settings.unwrap_or_else(StorageSettings::legacy); + + // Update the setting based on the key + match cmd { + SetCommand::ReceiptsInStaticFiles { value } => { + if settings.receipts_in_static_files == value { + println!("receipts_in_static_files is already set to {}", value); + return Ok(()); + } + settings.receipts_in_static_files = value; + println!("Set receipts_in_static_files = {}", value); + } + SetCommand::TransactionSendersInStaticFiles { value } => { + if settings.transaction_senders_in_static_files == value { + println!("transaction_senders_in_static_files is already set to {}", value); + return Ok(()); + } + settings.transaction_senders_in_static_files = value; + println!("Set transaction_senders_in_static_files = {}", value); + } + } + + // Write updated settings + provider_rw.write_storage_settings(settings)?; + provider_rw.commit()?; + + println!("Storage settings updated successfully."); + + Ok(()) + } +} diff --git a/crates/cli/commands/src/db/static_file_header.rs b/crates/cli/commands/src/db/static_file_header.rs new file mode 100644 index 0000000000..4c0ff27464 --- /dev/null +++ b/crates/cli/commands/src/db/static_file_header.rs @@ -0,0 +1,63 @@ +use clap::{Parser, Subcommand}; +use reth_db_common::DbTool; +use reth_provider::{providers::ProviderNodeTypes, StaticFileProviderFactory}; +use reth_static_file_types::StaticFileSegment; +use std::path::PathBuf; +use tracing::warn; + +/// The arguments for the `reth db static-file-header` command +#[derive(Parser, Debug)] +pub struct Command { + #[command(subcommand)] + source: Source, +} + +/// Source for locating the static file +#[derive(Subcommand, Debug)] +enum Source { + /// Query by segment and block number + Block { + /// Static file segment + #[arg(value_enum)] + segment: StaticFileSegment, + /// Block number to query + block: u64, + }, + /// Query by path to static file + Path { + /// Path to the static file + path: PathBuf, + }, +} + +impl Command { + /// Execute `db static-file-header` command + pub fn execute(self, tool: &DbTool) -> eyre::Result<()> { + let static_file_provider = tool.provider_factory.static_file_provider(); + if let Err(err) = static_file_provider.check_consistency(&tool.provider_factory.provider()?) + { + warn!("Error checking consistency of static files: {err}"); + } + + // Get the provider based on the source + let provider = match self.source { + Source::Path { path } => { + static_file_provider.get_segment_provider_for_path(&path)?.ok_or_else(|| { + eyre::eyre!("Could not find static file segment for path: {}", path.display()) + })? + } + Source::Block { segment, block } => { + static_file_provider.get_segment_provider(segment, block)? + } + }; + + let header = provider.user_header(); + + println!("Segment: {}", header.segment()); + println!("Expected Block Range: {}", header.expected_block_range()); + println!("Block Range: {:?}", header.block_range()); + println!("Transaction Range: {:?}", header.tx_range()); + + Ok(()) + } +} diff --git a/crates/cli/commands/src/db/stats.rs b/crates/cli/commands/src/db/stats.rs index c8398d795c..e225b2f991 100644 --- a/crates/cli/commands/src/db/stats.rs +++ b/crates/cli/commands/src/db/stats.rs @@ -18,6 +18,10 @@ use std::{sync::Arc, time::Duration}; #[derive(Parser, Debug)] /// The arguments for the `reth db stats` command pub struct Command { + /// Skip consistency checks for static files. + #[arg(long, default_value_t = false)] + pub(crate) skip_consistency_checks: bool, + /// Show only the total size for static files. #[arg(long, default_value_t = false)] detailed_sizes: bool, @@ -191,10 +195,11 @@ impl Command { mut segment_config_size, ) = (0, 0, 0, 0, 0, 0); - for (block_range, tx_range) in &ranges { - let fixed_block_range = static_file_provider.find_fixed_range(block_range.start()); + for (block_range, header) in &ranges { + let fixed_block_range = + static_file_provider.find_fixed_range(segment, block_range.start()); let jar_provider = static_file_provider - .get_segment_provider(segment, || Some(fixed_block_range), None)? + .get_segment_provider_for_range(segment, || Some(fixed_block_range), None)? .ok_or_else(|| { eyre::eyre!("Failed to get segment provider for segment: {}", segment) })?; @@ -220,7 +225,7 @@ impl Command { row.add_cell(Cell::new(segment)) .add_cell(Cell::new(format!("{block_range}"))) .add_cell(Cell::new( - tx_range.map_or("N/A".to_string(), |tx_range| format!("{tx_range}")), + header.tx_range().map_or("N/A".to_string(), |range| format!("{range}")), )) .add_cell(Cell::new(format!("{columns} x {rows}"))); if self.detailed_sizes { @@ -270,10 +275,12 @@ impl Command { let tx_range = { let start = ranges .iter() - .find_map(|(_, tx_range)| tx_range.map(|r| r.start())) + .find_map(|(_, header)| header.tx_range().map(|range| range.start())) .unwrap_or_default(); - let end = - ranges.iter().rev().find_map(|(_, tx_range)| tx_range.map(|r| r.end())); + let end = ranges + .iter() + .rev() + .find_map(|(_, header)| header.tx_range().map(|range| range.end())); end.map(|end| SegmentRangeInclusive::new(start, end)) }; diff --git a/crates/cli/commands/src/download.rs b/crates/cli/commands/src/download.rs index 8f09dc9b89..20bc7081f0 100644 --- a/crates/cli/commands/src/download.rs +++ b/crates/cli/commands/src/download.rs @@ -7,9 +7,10 @@ use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_cli::chainspec::ChainSpecParser; use reth_fs_util as fs; use std::{ + borrow::Cow, io::{self, Read, Write}, path::Path, - sync::Arc, + sync::{Arc, OnceLock}, time::{Duration, Instant}, }; use tar::Archive; @@ -22,24 +23,109 @@ const MERKLE_BASE_URL: &str = "https://downloads.merkle.io"; const EXTENSION_TAR_LZ4: &str = ".tar.lz4"; const EXTENSION_TAR_ZSTD: &str = ".tar.zst"; +/// Global static download defaults +static DOWNLOAD_DEFAULTS: OnceLock = OnceLock::new(); + +/// Download configuration defaults +/// +/// Global defaults can be set via [`DownloadDefaults::try_init`]. +#[derive(Debug, Clone)] +pub struct DownloadDefaults { + /// List of available snapshot sources + pub available_snapshots: Vec>, + /// Default base URL for snapshots + pub default_base_url: Cow<'static, str>, + /// Optional custom long help text that overrides the generated help + pub long_help: Option, +} + +impl DownloadDefaults { + /// Initialize the global download defaults with this configuration + pub fn try_init(self) -> Result<(), Self> { + DOWNLOAD_DEFAULTS.set(self) + } + + /// Get a reference to the global download defaults + pub fn get_global() -> &'static DownloadDefaults { + DOWNLOAD_DEFAULTS.get_or_init(DownloadDefaults::default_download_defaults) + } + + /// Default download configuration with defaults from merkle.io and publicnode + pub fn default_download_defaults() -> Self { + Self { + available_snapshots: vec![ + Cow::Borrowed("https://www.merkle.io/snapshots (default, mainnet archive)"), + Cow::Borrowed("https://publicnode.com/snapshots (full nodes & testnets)"), + ], + default_base_url: Cow::Borrowed(MERKLE_BASE_URL), + long_help: None, + } + } + + /// Generates the long help text for the download URL argument using these defaults. + /// + /// If a custom long_help is set, it will be returned. Otherwise, help text is generated + /// from the available_snapshots list. + pub fn long_help(&self) -> String { + if let Some(ref custom_help) = self.long_help { + return custom_help.clone(); + } + + let mut help = String::from( + "Specify a snapshot URL or let the command propose a default one.\n\nAvailable snapshot sources:\n", + ); + + for source in &self.available_snapshots { + help.push_str("- "); + help.push_str(source); + help.push('\n'); + } + + help.push_str( + "\nIf no URL is provided, the latest mainnet archive snapshot\nwill be proposed for download from ", + ); + help.push_str(self.default_base_url.as_ref()); + help + } + + /// Add a snapshot source to the list + pub fn with_snapshot(mut self, source: impl Into>) -> Self { + self.available_snapshots.push(source.into()); + self + } + + /// Replace all snapshot sources + pub fn with_snapshots(mut self, sources: Vec>) -> Self { + self.available_snapshots = sources; + self + } + + /// Set the default base URL, e.g. `https://downloads.merkle.io`. + pub fn with_base_url(mut self, url: impl Into>) -> Self { + self.default_base_url = url.into(); + self + } + + /// Builder: Set custom long help text, overriding the generated help + pub fn with_long_help(mut self, help: impl Into) -> Self { + self.long_help = Some(help.into()); + self + } +} + +impl Default for DownloadDefaults { + fn default() -> Self { + Self::default_download_defaults() + } +} + #[derive(Debug, Parser)] pub struct DownloadCommand { #[command(flatten)] env: EnvironmentArgs, - #[arg( - long, - short, - help = "Custom URL to download the snapshot from", - long_help = "Specify a snapshot URL or let the command propose a default one.\n\ - \n\ - Available snapshot sources:\n\ - - https://www.merkle.io/snapshots (default, mainnet archive)\n\ - - https://publicnode.com/snapshots (full nodes & testnets)\n\ - \n\ - If no URL is provided, the latest mainnet archive snapshot\n\ - will be proposed for download from merkle.io" - )] + /// Custom URL to download the snapshot from + #[arg(long, short, long_help = DownloadDefaults::get_global().long_help())] url: Option, } @@ -207,9 +293,10 @@ async fn stream_and_extract(url: &str, target_dir: &Path) -> Result<()> { Ok(()) } -// Builds default URL for latest mainnet archive snapshot +// Builds default URL for latest mainnet archive snapshot using configured defaults async fn get_latest_snapshot_url() -> Result { - let latest_url = format!("{MERKLE_BASE_URL}/latest.txt"); + let base_url = &DownloadDefaults::get_global().default_base_url; + let latest_url = format!("{base_url}/latest.txt"); let filename = Client::new() .get(latest_url) .send() @@ -220,5 +307,64 @@ async fn get_latest_snapshot_url() -> Result { .trim() .to_string(); - Ok(format!("{MERKLE_BASE_URL}/{filename}")) + Ok(format!("{base_url}/{filename}")) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_download_defaults_builder() { + let defaults = DownloadDefaults::default() + .with_snapshot("https://example.com/snapshots (example)") + .with_base_url("https://example.com"); + + assert_eq!(defaults.default_base_url, "https://example.com"); + assert_eq!(defaults.available_snapshots.len(), 3); // 2 defaults + 1 added + } + + #[test] + fn test_download_defaults_replace_snapshots() { + let defaults = DownloadDefaults::default().with_snapshots(vec![ + Cow::Borrowed("https://custom1.com"), + Cow::Borrowed("https://custom2.com"), + ]); + + assert_eq!(defaults.available_snapshots.len(), 2); + assert_eq!(defaults.available_snapshots[0], "https://custom1.com"); + } + + #[test] + fn test_long_help_generation() { + let defaults = DownloadDefaults::default(); + let help = defaults.long_help(); + + assert!(help.contains("Available snapshot sources:")); + assert!(help.contains("merkle.io")); + assert!(help.contains("publicnode.com")); + } + + #[test] + fn test_long_help_override() { + let custom_help = "This is custom help text for downloading snapshots."; + let defaults = DownloadDefaults::default().with_long_help(custom_help); + + let help = defaults.long_help(); + assert_eq!(help, custom_help); + assert!(!help.contains("Available snapshot sources:")); + } + + #[test] + fn test_builder_chaining() { + let defaults = DownloadDefaults::default() + .with_base_url("https://custom.example.com") + .with_snapshot("https://snapshot1.com") + .with_snapshot("https://snapshot2.com") + .with_long_help("Custom help for snapshots"); + + assert_eq!(defaults.default_base_url, "https://custom.example.com"); + assert_eq!(defaults.available_snapshots.len(), 4); // 2 defaults + 2 added + assert_eq!(defaults.long_help, Some("Custom help for snapshots".to_string())); + } } diff --git a/crates/cli/commands/src/export_era.rs b/crates/cli/commands/src/export_era.rs index dbedf1852e..5f4f0306bb 100644 --- a/crates/cli/commands/src/export_era.rs +++ b/crates/cli/commands/src/export_era.rs @@ -4,7 +4,7 @@ use crate::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; use clap::{Args, Parser}; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_cli::chainspec::ChainSpecParser; -use reth_era::execution_types::MAX_BLOCKS_PER_ERA1; +use reth_era::era1::types::execution::MAX_BLOCKS_PER_ERA1; use reth_era_utils as era1; use reth_provider::DatabaseProviderFactory; use std::{path::PathBuf, sync::Arc}; diff --git a/crates/cli/commands/src/init_cmd.rs b/crates/cli/commands/src/init_cmd.rs index 172fd3bee7..40ed8eb4a3 100644 --- a/crates/cli/commands/src/init_cmd.rs +++ b/crates/cli/commands/src/init_cmd.rs @@ -1,8 +1,9 @@ //! Command that initializes the node from a genesis file. use crate::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; +use alloy_consensus::BlockHeader; use clap::Parser; -use reth_chainspec::{EthChainSpec, EthereumHardforks}; +use reth_chainspec::{ChainSpecProvider, EthChainSpec, EthereumHardforks}; use reth_cli::chainspec::ChainSpecParser; use reth_provider::BlockHashReader; use std::sync::Arc; @@ -22,8 +23,9 @@ impl> InitComman let Environment { provider_factory, .. } = self.env.init::(AccessRights::RW)?; + let genesis_block_number = provider_factory.chain_spec().genesis_header().number(); let hash = provider_factory - .block_hash(0)? + .block_hash(genesis_block_number)? .ok_or_else(|| eyre::eyre!("Genesis hash not found."))?; info!(target: "reth::cli", hash = ?hash, "Genesis block written"); diff --git a/crates/cli/commands/src/init_state/mod.rs b/crates/cli/commands/src/init_state/mod.rs index 4b5c51585b..712404430e 100644 --- a/crates/cli/commands/src/init_state/mod.rs +++ b/crates/cli/commands/src/init_state/mod.rs @@ -1,6 +1,6 @@ //! Command that initializes the node from a genesis file. -use crate::common::{AccessRights, CliHeader, CliNodeTypes, Environment, EnvironmentArgs}; +use crate::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; use alloy_consensus::BlockHeader as AlloyBlockHeader; use alloy_primitives::{Sealable, B256}; use clap::Parser; @@ -8,7 +8,7 @@ use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_cli::chainspec::ChainSpecParser; use reth_db_common::init::init_from_state_dump; use reth_node_api::NodePrimitives; -use reth_primitives_traits::{BlockHeader, SealedHeader}; +use reth_primitives_traits::{header::HeaderMut, SealedHeader}; use reth_provider::{ BlockNumReader, DBProvider, DatabaseProviderFactory, StaticFileProviderFactory, StaticFileWriter, @@ -69,7 +69,7 @@ impl> InitStateC where N: CliNodeTypes< ChainSpec = C::ChainSpec, - Primitives: NodePrimitives, + Primitives: NodePrimitives, >, { info!(target: "reth::cli", "Reth init-state starting"); @@ -110,7 +110,7 @@ impl> InitStateC static_file_provider.commit()?; } else if last_block_number > 0 && last_block_number < header.number() { return Err(eyre::eyre!( - "Data directory should be empty when calling init-state with --without-evm-history." + "Data directory should be empty when calling init-state with --without-evm." )); } } diff --git a/crates/cli/commands/src/init_state/without_evm.rs b/crates/cli/commands/src/init_state/without_evm.rs index 8da0bde068..7b2180be45 100644 --- a/crates/cli/commands/src/init_state/without_evm.rs +++ b/crates/cli/commands/src/init_state/without_evm.rs @@ -79,7 +79,7 @@ where + StaticFileProviderFactory>, { provider_rw.insert_block( - SealedBlock::<::Block>::from_sealed_parts( + &SealedBlock::<::Block>::from_sealed_parts( header.clone(), Default::default(), ) diff --git a/crates/cli/commands/src/node.rs b/crates/cli/commands/src/node.rs index 240bb3c289..cba857a3a8 100644 --- a/crates/cli/commands/src/node.rs +++ b/crates/cli/commands/src/node.rs @@ -10,7 +10,7 @@ use reth_node_builder::NodeBuilder; use reth_node_core::{ args::{ DatabaseArgs, DatadirArgs, DebugArgs, DevArgs, EngineArgs, EraArgs, MetricArgs, - NetworkArgs, PayloadBuilderArgs, PruningArgs, RpcServerArgs, TxPoolArgs, + NetworkArgs, PayloadBuilderArgs, PruningArgs, RpcServerArgs, StaticFilesArgs, TxPoolArgs, }, node_config::NodeConfig, version, @@ -110,6 +110,10 @@ pub struct NodeCommand, { - tracing::info!(target: "reth::cli", version = ?version::version_metadata().short_version, "Starting reth"); + tracing::info!(target: "reth::cli", version = ?version::version_metadata().short_version, "Starting {}", version::version_metadata().name_client); let Self { datadir, @@ -162,9 +166,10 @@ where db, dev, pruning, - ext, engine, era, + static_files, + ext, } = self; // set up node config @@ -184,6 +189,7 @@ where pruning, engine, era, + static_files, }; let data_dir = node_config.datadir(); diff --git a/crates/cli/commands/src/p2p/bootnode.rs b/crates/cli/commands/src/p2p/bootnode.rs index 8e4fb5ad2d..2a6932351b 100644 --- a/crates/cli/commands/src/p2p/bootnode.rs +++ b/crates/cli/commands/src/p2p/bootnode.rs @@ -60,7 +60,7 @@ impl Command { if self.v5 { info!("Starting discv5"); let config = Config::builder(self.addr).build(); - let (_discv5, updates, _local_enr_discv5) = Discv5::start(&sk, config).await?; + let (_discv5, updates) = Discv5::start(&sk, config).await?; discv5_updates = Some(updates); }; diff --git a/crates/cli/commands/src/p2p/mod.rs b/crates/cli/commands/src/p2p/mod.rs index c72ceca78e..31d017ba92 100644 --- a/crates/cli/commands/src/p2p/mod.rs +++ b/crates/cli/commands/src/p2p/mod.rs @@ -8,7 +8,7 @@ use backon::{ConstantBuilder, Retryable}; use clap::{Parser, Subcommand}; use reth_chainspec::{EthChainSpec, EthereumHardforks, Hardforks}; use reth_cli::chainspec::ChainSpecParser; -use reth_cli_util::{get_secret_key, hash_or_num_value_parser}; +use reth_cli_util::hash_or_num_value_parser; use reth_config::Config; use reth_network::{BlockDownloaderProvider, NetworkConfigBuilder}; use reth_network_p2p::bodies::client::BodiesClient; @@ -72,7 +72,7 @@ impl .split(); if result.len() != 1 { eyre::bail!( - "Invalid number of headers received. Expected: 1. Received: {}", + "Invalid number of bodies received. Expected: 1. Received: {}", result.len() ) } @@ -183,15 +183,13 @@ impl DownloadArgs { config.peers.trusted_nodes_only = self.network.trusted_only; let default_secret_key_path = data_dir.p2p_secret(); - let secret_key_path = - self.network.p2p_secret_key.clone().unwrap_or(default_secret_key_path); - let p2p_secret_key = get_secret_key(&secret_key_path)?; + let p2p_secret_key = self.network.secret_key(default_secret_key_path)?; let rlpx_socket = (self.network.addr, self.network.port).into(); let boot_nodes = self.chain.bootnodes().unwrap_or_default(); let net = NetworkConfigBuilder::::new(p2p_secret_key) .peer_config(config.peers_config_with_basic_nodes_from_file(None)) - .external_ip_resolver(self.network.nat) + .external_ip_resolver(self.network.nat.clone()) .network_id(self.network.network_id) .boot_nodes(boot_nodes.clone()) .apply(|builder| { diff --git a/crates/cli/commands/src/re_execute.rs b/crates/cli/commands/src/re_execute.rs index 3b8ba305a4..8223ca0120 100644 --- a/crates/cli/commands/src/re_execute.rs +++ b/crates/cli/commands/src/re_execute.rs @@ -9,6 +9,7 @@ use clap::Parser; use eyre::WrapErr; use reth_chainspec::{EthChainSpec, EthereumHardforks, Hardforks}; use reth_cli::chainspec::ChainSpecParser; +use reth_cli_util::cancellation::CancellationToken; use reth_consensus::FullConsensus; use reth_evm::{execute::Executor, ConfigureEvm}; use reth_primitives_traits::{format_gas_throughput, BlockBody, GotExpected}; @@ -44,6 +45,10 @@ pub struct Command { /// Number of tasks to run in parallel #[arg(long, default_value = "10")] num_tasks: u64, + + /// Continues with execution when an invalid block is encountered and collects these blocks. + #[arg(long)] + skip_invalid_blocks: bool, } impl Command { @@ -61,11 +66,23 @@ impl { let Environment { provider_factory, .. } = self.env.init::(AccessRights::RO)?; - let provider = provider_factory.database_provider_ro()?; let components = components(provider_factory.chain_spec()); let min_block = self.from; - let max_block = self.to.unwrap_or(provider.best_block_number()?); + let best_block = DatabaseProviderFactory::database_provider_ro(&provider_factory)? + .best_block_number()?; + let mut max_block = best_block; + if let Some(to) = self.to { + if to > best_block { + warn!( + requested = to, + best_block, + "Requested --to is beyond available chain head; clamping to best block" + ); + } else { + max_block = to; + } + }; let total_blocks = max_block - min_block; let total_gas = calculate_gas_used_from_headers( @@ -83,7 +100,11 @@ impl } }; + let skip_invalid_blocks = self.skip_invalid_blocks; let (stats_tx, mut stats_rx) = mpsc::unbounded_channel(); + let (info_tx, mut info_rx) = mpsc::unbounded_channel(); + let cancellation = CancellationToken::new(); + let _guard = cancellation.drop_guard(); let mut tasks = JoinSet::new(); for i in 0..self.num_tasks { @@ -97,17 +118,40 @@ impl let consensus = components.consensus().clone(); let db_at = db_at.clone(); let stats_tx = stats_tx.clone(); + let info_tx = info_tx.clone(); + let cancellation = cancellation.clone(); tasks.spawn_blocking(move || { let mut executor = evm_config.batch_executor(db_at(start_block - 1)); - for block in start_block..end_block { + let mut executor_created = Instant::now(); + let executor_lifetime = Duration::from_secs(120); + + 'blocks: for block in start_block..end_block { + if cancellation.is_cancelled() { + // exit if the program is being terminated + break + } + let block = provider_factory .recovered_block(block.into(), TransactionVariant::NoHash)? .unwrap(); - let result = executor.execute_one(&block)?; + + let result = match executor.execute_one(&block) { + Ok(result) => result, + Err(err) => { + if skip_invalid_blocks { + executor = evm_config.batch_executor(db_at(block.number())); + let _ = info_tx.send((block, eyre::Report::new(err))); + continue + } + return Err(err.into()) + } + }; if let Err(err) = consensus .validate_block_post_execution(&block, &result) - .wrap_err_with(|| format!("Failed to validate block {}", block.number())) + .wrap_err_with(|| { + format!("Failed to validate block {} {}", block.number(), block.hash()) + }) { let correct_receipts = provider_factory.receipts_by_block(block.number().into())?.unwrap(); @@ -143,6 +187,11 @@ impl }; error!(number=?block.number(), ?mismatch, "Gas usage mismatch"); + if skip_invalid_blocks { + executor = evm_config.batch_executor(db_at(block.number())); + let _ = info_tx.send((block, err)); + continue 'blocks; + } return Err(err); } } else { @@ -154,9 +203,12 @@ impl } let _ = stats_tx.send(block.gas_used()); - // Reset DB once in a while to avoid OOM - if executor.size_hint() > 1_000_000 { + // Reset DB once in a while to avoid OOM or read tx timeouts + if executor.size_hint() > 1_000_000 || + executor_created.elapsed() > executor_lifetime + { executor = evm_config.batch_executor(db_at(block.number())); + executor_created = Instant::now(); } } @@ -171,6 +223,7 @@ impl let mut last_logged_gas = 0; let mut last_logged_blocks = 0; let mut last_logged_time = Instant::now(); + let mut invalid_blocks = Vec::new(); let mut interval = tokio::time::interval(Duration::from_secs(10)); @@ -180,6 +233,10 @@ impl total_executed_blocks += 1; total_executed_gas += gas_used; } + Some((block, err)) = info_rx.recv() => { + error!(?err, block=?block.num_hash(), "Invalid block"); + invalid_blocks.push(block.num_hash()); + } result = tasks.join_next() => { if let Some(result) = result { if matches!(result, Err(_) | Ok(Err(_))) { @@ -210,12 +267,25 @@ impl } } - info!( - start_block = min_block, - end_block = max_block, - throughput=?format_gas_throughput(total_executed_gas, instant.elapsed()), - "Re-executed successfully" - ); + if invalid_blocks.is_empty() { + info!( + start_block = min_block, + end_block = max_block, + %total_executed_blocks, + throughput=?format_gas_throughput(total_executed_gas, instant.elapsed()), + "Re-executed successfully" + ); + } else { + info!( + start_block = min_block, + end_block = max_block, + %total_executed_blocks, + invalid_block_count = invalid_blocks.len(), + ?invalid_blocks, + throughput=?format_gas_throughput(total_executed_gas, instant.elapsed()), + "Re-executed with invalid blocks" + ); + } Ok(()) } diff --git a/crates/cli/commands/src/stage/drop.rs b/crates/cli/commands/src/stage/drop.rs index 2c6e911d7b..66505b9046 100644 --- a/crates/cli/commands/src/stage/drop.rs +++ b/crates/cli/commands/src/stage/drop.rs @@ -1,10 +1,9 @@ //! Database debugging tool use crate::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; use clap::Parser; -use itertools::Itertools; use reth_chainspec::EthChainSpec; use reth_cli::chainspec::ChainSpecParser; -use reth_db::{mdbx::tx::Tx, static_file::iter_static_files, DatabaseError}; +use reth_db::{mdbx::tx::Tx, DatabaseError}; use reth_db_api::{ tables, transaction::{DbTx, DbTxMut}, @@ -15,7 +14,9 @@ use reth_db_common::{ }; use reth_node_api::{HeaderTy, ReceiptTy, TxTy}; use reth_node_core::args::StageEnum; -use reth_provider::{DBProvider, DatabaseProviderFactory, StaticFileProviderFactory, TrieWriter}; +use reth_provider::{ + DBProvider, DatabaseProviderFactory, StaticFileProviderFactory, StaticFileWriter, TrieWriter, +}; use reth_prune::PruneSegment; use reth_stages::StageId; use reth_static_file_types::StaticFileSegment; @@ -44,21 +45,48 @@ impl Command { StageEnum::Headers => Some(StaticFileSegment::Headers), StageEnum::Bodies => Some(StaticFileSegment::Transactions), StageEnum::Execution => Some(StaticFileSegment::Receipts), + StageEnum::Senders => Some(StaticFileSegment::TransactionSenders), _ => None, }; - // Delete static file segment data before inserting the genesis header below + // Calling `StaticFileProviderRW::prune_*` will instruct the writer to prune rows only + // when `StaticFileProviderRW::commit` is called. We need to do that instead of + // deleting the jar files, otherwise if the task were to be interrupted after we + // have deleted them, BUT before we have committed the checkpoints to the database, we'd + // lose essential data. if let Some(static_file_segment) = static_file_segment { let static_file_provider = tool.provider_factory.static_file_provider(); - let static_files = iter_static_files(static_file_provider.directory())?; - if let Some(segment_static_files) = static_files.get(&static_file_segment) { - // Delete static files from the highest to the lowest block range - for (block_range, _) in segment_static_files - .iter() - .sorted_by_key(|(block_range, _)| block_range.start()) - .rev() - { - static_file_provider.delete_jar(static_file_segment, block_range.start())?; + if let Some(highest_block) = + static_file_provider.get_highest_static_file_block(static_file_segment) + { + let mut writer = static_file_provider.latest_writer(static_file_segment)?; + + match static_file_segment { + StaticFileSegment::Headers => { + // Prune all headers leaving genesis intact. + writer.prune_headers(highest_block)?; + } + StaticFileSegment::Transactions => { + let to_delete = static_file_provider + .get_highest_static_file_tx(static_file_segment) + .map(|tx_num| tx_num + 1) + .unwrap_or_default(); + writer.prune_transactions(to_delete, 0)?; + } + StaticFileSegment::Receipts => { + let to_delete = static_file_provider + .get_highest_static_file_tx(static_file_segment) + .map(|tx_num| tx_num + 1) + .unwrap_or_default(); + writer.prune_receipts(to_delete, 0)?; + } + StaticFileSegment::TransactionSenders => { + let to_delete = static_file_provider + .get_highest_static_file_tx(static_file_segment) + .map(|tx_num| tx_num + 1) + .unwrap_or_default(); + writer.prune_transaction_senders(to_delete, 0)?; + } } } } diff --git a/crates/cli/commands/src/stage/dump/execution.rs b/crates/cli/commands/src/stage/dump/execution.rs index 9e8e68e980..dafbf7c31a 100644 --- a/crates/cli/commands/src/stage/dump/execution.rs +++ b/crates/cli/commands/src/stage/dump/execution.rs @@ -9,7 +9,7 @@ use reth_evm::ConfigureEvm; use reth_node_builder::NodeTypesWithDB; use reth_node_core::dirs::{ChainPath, DataDirPath}; use reth_provider::{ - providers::{ProviderNodeTypes, StaticFileProvider}, + providers::{ProviderNodeTypes, RocksDBProvider, StaticFileProvider}, DatabaseProviderFactory, ProviderFactory, }; use reth_stages::{stages::ExecutionStage, Stage, StageCheckpoint, UnwindInput}; @@ -42,7 +42,8 @@ where Arc::new(output_db), db_tool.chain(), StaticFileProvider::read_write(output_datadir.static_files())?, - ), + RocksDBProvider::builder(output_datadir.rocksdb()).build()?, + )?, to, from, evm_config, diff --git a/crates/cli/commands/src/stage/dump/hashing_account.rs b/crates/cli/commands/src/stage/dump/hashing_account.rs index 8b9ba5e937..ecd138ece3 100644 --- a/crates/cli/commands/src/stage/dump/hashing_account.rs +++ b/crates/cli/commands/src/stage/dump/hashing_account.rs @@ -6,7 +6,7 @@ use reth_db_api::{database::Database, table::TableImporter, tables}; use reth_db_common::DbTool; use reth_node_core::dirs::{ChainPath, DataDirPath}; use reth_provider::{ - providers::{ProviderNodeTypes, StaticFileProvider}, + providers::{ProviderNodeTypes, RocksDBProvider, StaticFileProvider}, DatabaseProviderFactory, ProviderFactory, }; use reth_stages::{stages::AccountHashingStage, Stage, StageCheckpoint, UnwindInput}; @@ -39,7 +39,8 @@ pub(crate) async fn dump_hashing_account_stage( let unwind_inner_tx = provider.into_tx(); - // TODO optimize we can actually just get the entries we need - output_db - .update(|tx| tx.import_dupsort::(&unwind_inner_tx))??; + output_db.update(|tx| { + tx.import_table_with_range::( + &unwind_inner_tx, + Some(BlockNumberAddress((from, Address::ZERO))), + BlockNumberAddress((to, Address::repeat_byte(0xff))), + ) + })??; output_db.update(|tx| tx.import_table::(&unwind_inner_tx))??; output_db.update(|tx| tx.import_dupsort::(&unwind_inner_tx))??; diff --git a/crates/cli/commands/src/stage/run.rs b/crates/cli/commands/src/stage/run.rs index 010277480f..202506be9c 100644 --- a/crates/cli/commands/src/stage/run.rs +++ b/crates/cli/commands/src/stage/run.rs @@ -84,6 +84,9 @@ pub struct Command { /// Commits the changes in the database. WARNING: potentially destructive. /// /// Useful when you want to run diagnostics on the database. + /// + /// NOTE: This flag is currently required for the headers, bodies, and execution stages because + /// they use static files and must commit to properly unwind and run. // TODO: We should consider allowing to run hooks at the end of the stage run, // e.g. query the DB size, or any table data. #[arg(long, short)] @@ -105,6 +108,14 @@ impl Comp: CliNodeComponents, F: FnOnce(Arc) -> Comp, { + // Quit early if the stages requires a commit and `--commit` is not provided. + if self.requires_commit() && !self.commit { + return Err(eyre::eyre!( + "The stage {} requires overwriting existing static files and must commit, but `--commit` was not provided. Please pass `--commit` and try again.", + self.stage.to_string() + )); + } + // Raise the fd limit of the process. // Does not do anything on windows. let _ = fdlimit::raise_fd_limit(); @@ -116,7 +127,6 @@ impl let components = components(provider_factory.chain_spec()); if let Some(listen_addr) = self.metrics { - info!(target: "reth::cli", "Starting metrics endpoint at {}", listen_addr); let config = MetricServerConfig::new( listen_addr, VersionInfo { @@ -384,4 +394,13 @@ impl Command { pub fn chain_spec(&self) -> Option<&Arc> { Some(&self.env.chain) } + + /// Returns whether or not the configured stage requires committing. + /// + /// This is the case for stages that mainly modify static files, as there is no way to unwind + /// these stages without committing anyways. This is because static files do not have + /// transactions and we cannot change the view of headers without writing. + pub fn requires_commit(&self) -> bool { + matches!(self.stage, StageEnum::Headers | StageEnum::Bodies | StageEnum::Execution) + } } diff --git a/crates/cli/runner/src/lib.rs b/crates/cli/runner/src/lib.rs index 79dc6b2114..63a9de7722 100644 --- a/crates/cli/runner/src/lib.rs +++ b/crates/cli/runner/src/lib.rs @@ -97,6 +97,57 @@ impl CliRunner { command_res } + /// Executes a command in a blocking context with access to `CliContext`. + /// + /// See [`Runtime::spawn_blocking`](tokio::runtime::Runtime::spawn_blocking). + pub fn run_blocking_command_until_exit( + self, + command: impl FnOnce(CliContext) -> F + Send + 'static, + ) -> Result<(), E> + where + F: Future> + Send + 'static, + E: Send + Sync + From + From + 'static, + { + let AsyncCliRunner { context, mut task_manager, tokio_runtime } = + AsyncCliRunner::new(self.tokio_runtime); + + // Spawn the command on the blocking thread pool + let handle = tokio_runtime.handle().clone(); + let command_handle = + tokio_runtime.handle().spawn_blocking(move || handle.block_on(command(context))); + + // Wait for the command to complete or ctrl-c + let command_res = tokio_runtime.block_on(run_to_completion_or_panic( + &mut task_manager, + run_until_ctrl_c( + async move { command_handle.await.expect("Failed to join blocking task") }, + ), + )); + + if command_res.is_err() { + error!(target: "reth::cli", "shutting down due to error"); + } else { + debug!(target: "reth::cli", "shutting down gracefully"); + task_manager.graceful_shutdown_with_timeout(Duration::from_secs(5)); + } + + // Shutdown the runtime on a separate thread + let (tx, rx) = mpsc::channel(); + std::thread::Builder::new() + .name("tokio-runtime-shutdown".to_string()) + .spawn(move || { + drop(tokio_runtime); + let _ = tx.send(()); + }) + .unwrap(); + + let _ = rx.recv_timeout(Duration::from_secs(5)).inspect_err(|err| { + debug!(target: "reth::cli", %err, "tokio runtime shutdown timed out"); + }); + + command_res + } + /// Executes a regular future until completion or until external signal received. pub fn run_until_ctrl_c(self, fut: F) -> Result<(), E> where diff --git a/crates/cli/util/Cargo.toml b/crates/cli/util/Cargo.toml index 2ae53e1806..5dabb7cfb6 100644 --- a/crates/cli/util/Cargo.toml +++ b/crates/cli/util/Cargo.toml @@ -42,6 +42,9 @@ jemalloc = ["dep:tikv-jemallocator"] # Enables jemalloc profiling features jemalloc-prof = ["jemalloc", "tikv-jemallocator?/profiling"] +# Enables unprefixed malloc (reproducible builds support) +jemalloc-unprefixed = ["jemalloc", "tikv-jemallocator?/unprefixed_malloc_on_supported_platforms"] + # Wraps the selected allocator in the tracy profiling allocator tracy-allocator = ["dep:tracy-client"] diff --git a/crates/cli/util/src/cancellation.rs b/crates/cli/util/src/cancellation.rs new file mode 100644 index 0000000000..31f3446ef2 --- /dev/null +++ b/crates/cli/util/src/cancellation.rs @@ -0,0 +1,103 @@ +//! Thread-safe cancellation primitives for cooperative task cancellation. + +use std::sync::{ + atomic::{AtomicBool, Ordering}, + Arc, +}; + +/// A thread-safe cancellation token that can be shared across threads. +/// +/// This token allows cooperative cancellation by providing a way to signal +/// cancellation and check cancellation status. The token can be cloned and +/// shared across multiple threads, with all clones sharing the same cancellation state. +/// +/// # Example +/// +/// ``` +/// use reth_cli_util::cancellation::CancellationToken; +/// use std::{thread, time::Duration}; +/// +/// let token = CancellationToken::new(); +/// let worker_token = token.clone(); +/// +/// let handle = thread::spawn(move || { +/// while !worker_token.is_cancelled() { +/// // Do work... +/// thread::sleep(Duration::from_millis(100)); +/// } +/// }); +/// +/// // Cancel from main thread +/// token.cancel(); +/// handle.join().unwrap(); +/// ``` +#[derive(Clone, Debug)] +pub struct CancellationToken { + cancelled: Arc, +} + +impl CancellationToken { + /// Creates a new cancellation token in the non-cancelled state. + pub fn new() -> Self { + Self { cancelled: Arc::new(AtomicBool::new(false)) } + } + + /// Signals cancellation to all holders of this token and its clones. + /// + /// Once cancelled, the token cannot be reset. This operation is thread-safe + /// and can be called multiple times without issue. + pub fn cancel(&self) { + self.cancelled.store(true, Ordering::Release); + } + + /// Checks whether cancellation has been requested. + /// + /// Returns `true` if [`cancel`](Self::cancel) has been called on this token + /// or any of its clones. + pub fn is_cancelled(&self) -> bool { + self.cancelled.load(Ordering::Relaxed) + } + + /// Creates a guard that automatically cancels this token when dropped. + /// + /// This is useful for ensuring cancellation happens when a scope exits, + /// either normally or via panic. + /// + /// # Example + /// + /// ``` + /// use reth_cli_util::cancellation::CancellationToken; + /// + /// let token = CancellationToken::new(); + /// { + /// let _guard = token.drop_guard(); + /// assert!(!token.is_cancelled()); + /// // Guard dropped here, triggering cancellation + /// } + /// assert!(token.is_cancelled()); + /// ``` + pub fn drop_guard(&self) -> CancellationGuard { + CancellationGuard { token: self.clone() } + } +} + +impl Default for CancellationToken { + fn default() -> Self { + Self::new() + } +} + +/// A guard that cancels its associated [`CancellationToken`] when dropped. +/// +/// Created by calling [`CancellationToken::drop_guard`]. When this guard is dropped, +/// it automatically calls [`cancel`](CancellationToken::cancel) on the token. +#[derive(Debug)] +pub struct CancellationGuard { + token: CancellationToken, +} + +impl Drop for CancellationGuard { + fn drop(&mut self) { + self.token.cancel(); + } +} diff --git a/crates/cli/util/src/lib.rs b/crates/cli/util/src/lib.rs index 7e0d69c186..12c3cdf1e4 100644 --- a/crates/cli/util/src/lib.rs +++ b/crates/cli/util/src/lib.rs @@ -9,10 +9,11 @@ #![cfg_attr(docsrs, feature(doc_cfg))] pub mod allocator; +pub mod cancellation; /// Helper function to load a secret key from a file. pub mod load_secret_key; -pub use load_secret_key::get_secret_key; +pub use load_secret_key::{get_secret_key, parse_secret_key_from_hex}; /// Cli parsers functions. pub mod parsers; diff --git a/crates/cli/util/src/load_secret_key.rs b/crates/cli/util/src/load_secret_key.rs index 0ca46398f1..64d756cddc 100644 --- a/crates/cli/util/src/load_secret_key.rs +++ b/crates/cli/util/src/load_secret_key.rs @@ -30,6 +30,10 @@ pub enum SecretKeyError { /// Path to the secret key file. secret_file: PathBuf, }, + + /// Invalid hex string format. + #[error("invalid hex string: {0}")] + InvalidHexString(String), } /// Attempts to load a [`SecretKey`] from a specified path. If no file exists there, then it @@ -60,3 +64,75 @@ pub fn get_secret_key(secret_key_path: &Path) -> Result Result { + // Remove "0x" prefix if present + let hex_str = hex_str.strip_prefix("0x").unwrap_or(hex_str); + + // Decode the hex string + let bytes = alloy_primitives::hex::decode(hex_str) + .map_err(|e| SecretKeyError::InvalidHexString(e.to_string()))?; + + // Parse into SecretKey + SecretKey::from_slice(&bytes).map_err(SecretKeyError::SecretKeyDecodeError) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_parse_secret_key_from_hex_without_prefix() { + // Valid 32-byte hex string (64 characters) + let hex = "4c0883a69102937d6231471b5dbb6204fe512961708279f8c5c58b3b9c4e8b8f"; + let result = parse_secret_key_from_hex(hex); + assert!(result.is_ok()); + + let secret_key = result.unwrap(); + assert_eq!(alloy_primitives::hex::encode(secret_key.secret_bytes()), hex); + } + + #[test] + fn test_parse_secret_key_from_hex_with_0x_prefix() { + // Valid 32-byte hex string with 0x prefix + let hex = "0x4c0883a69102937d6231471b5dbb6204fe512961708279f8c5c58b3b9c4e8b8f"; + let result = parse_secret_key_from_hex(hex); + assert!(result.is_ok()); + + let secret_key = result.unwrap(); + let expected = "4c0883a69102937d6231471b5dbb6204fe512961708279f8c5c58b3b9c4e8b8f"; + assert_eq!(alloy_primitives::hex::encode(secret_key.secret_bytes()), expected); + } + + #[test] + fn test_parse_secret_key_from_hex_invalid_length() { + // Invalid length (not 32 bytes) + let hex = "4c0883a69102937d"; + let result = parse_secret_key_from_hex(hex); + assert!(result.is_err()); + } + + #[test] + fn test_parse_secret_key_from_hex_invalid_chars() { + // Invalid hex characters + let hex = "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"; + let result = parse_secret_key_from_hex(hex); + assert!(result.is_err()); + + if let Err(SecretKeyError::InvalidHexString(_)) = result { + // Expected error type + } else { + panic!("Expected InvalidHexString error"); + } + } + + #[test] + fn test_parse_secret_key_from_hex_empty() { + let hex = ""; + let result = parse_secret_key_from_hex(hex); + assert!(result.is_err()); + } +} diff --git a/crates/cli/util/src/parsers.rs b/crates/cli/util/src/parsers.rs index ddb120452e..dae9f9bc6e 100644 --- a/crates/cli/util/src/parsers.rs +++ b/crates/cli/util/src/parsers.rs @@ -31,6 +31,16 @@ pub fn parse_duration_from_secs_or_ms( } } +/// Helper to format a [Duration] to the format that can be parsed by +/// [`parse_duration_from_secs_or_ms`]. +pub fn format_duration_as_secs_or_ms(duration: Duration) -> String { + if duration.as_millis().is_multiple_of(1000) { + format!("{}", duration.as_secs()) + } else { + format!("{}ms", duration.as_millis()) + } +} + /// Parse [`BlockHashOrNumber`] pub fn hash_or_num_value_parser(value: &str) -> eyre::Result { match B256::from_str(value) { diff --git a/crates/cli/util/src/sigsegv_handler.rs b/crates/cli/util/src/sigsegv_handler.rs index dabbf866ce..78e37cf157 100644 --- a/crates/cli/util/src/sigsegv_handler.rs +++ b/crates/cli/util/src/sigsegv_handler.rs @@ -126,7 +126,8 @@ pub fn install() { libc::sigaltstack(&raw const alt_stack, ptr::null_mut()); let mut sa: libc::sigaction = mem::zeroed(); - sa.sa_sigaction = print_stack_trace as libc::sighandler_t; + sa.sa_sigaction = + print_stack_trace as unsafe extern "C" fn(libc::c_int) as libc::sighandler_t; sa.sa_flags = libc::SA_NODEFER | libc::SA_RESETHAND | libc::SA_ONSTACK; libc::sigemptyset(&raw mut sa.sa_mask); libc::sigaction(libc::SIGSEGV, &raw const sa, ptr::null_mut()); diff --git a/crates/config/Cargo.toml b/crates/config/Cargo.toml index 65bca13901..6edabeab2b 100644 --- a/crates/config/Cargo.toml +++ b/crates/config/Cargo.toml @@ -15,6 +15,7 @@ workspace = true reth-network-types.workspace = true reth-prune-types.workspace = true reth-stages-types.workspace = true +reth-static-file-types.workspace = true # serde serde = { workspace = true, optional = true } @@ -22,7 +23,7 @@ humantime-serde = { workspace = true, optional = true } # toml toml = { workspace = true, optional = true } -eyre = { workspace = true, optional = true } +eyre.workspace = true # value objects url.workspace = true @@ -31,7 +32,6 @@ url.workspace = true serde = [ "dep:serde", "dep:toml", - "dep:eyre", "dep:humantime-serde", "reth-network-types/serde", "reth-prune-types/serde", diff --git a/crates/config/src/config.rs b/crates/config/src/config.rs index dd2e7046b0..eb9ed80260 100644 --- a/crates/config/src/config.rs +++ b/crates/config/src/config.rs @@ -2,7 +2,9 @@ use reth_network_types::{PeersConfig, SessionsConfig}; use reth_prune_types::PruneModes; use reth_stages_types::ExecutionStageThresholds; +use reth_static_file_types::StaticFileSegment; use std::{ + collections::HashMap, path::{Path, PathBuf}, time::Duration, }; @@ -29,11 +31,14 @@ pub struct Config { pub peers: PeersConfig, /// Configuration for peer sessions. pub sessions: SessionsConfig, + /// Configuration for static files. + #[cfg_attr(feature = "serde", serde(default))] + pub static_files: StaticFilesConfig, } impl Config { /// Sets the pruning configuration. - pub const fn set_prune_config(&mut self, prune_config: PruneConfig) { + pub fn set_prune_config(&mut self, prune_config: PruneConfig) { self.prune = prune_config; } } @@ -411,6 +416,77 @@ impl EtlConfig { } } +/// Static files configuration. +#[derive(Debug, Default, Clone, Copy, PartialEq, Eq)] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(feature = "serde", serde(default))] +pub struct StaticFilesConfig { + /// Number of blocks per file for each segment. + pub blocks_per_file: BlocksPerFileConfig, +} + +/// Configuration for the number of blocks per file for each segment. +#[derive(Debug, Default, Clone, Copy, PartialEq, Eq)] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(feature = "serde", serde(default))] +pub struct BlocksPerFileConfig { + /// Number of blocks per file for the headers segment. + pub headers: Option, + /// Number of blocks per file for the transactions segment. + pub transactions: Option, + /// Number of blocks per file for the receipts segment. + pub receipts: Option, + /// Number of blocks per file for the transaction senders segment. + pub transaction_senders: Option, +} + +impl StaticFilesConfig { + /// Validates the static files configuration. + /// + /// Returns an error if any blocks per file value is zero. + pub fn validate(&self) -> eyre::Result<()> { + let BlocksPerFileConfig { headers, transactions, receipts, transaction_senders } = + self.blocks_per_file; + eyre::ensure!(headers != Some(0), "Headers segment blocks per file must be greater than 0"); + eyre::ensure!( + transactions != Some(0), + "Transactions segment blocks per file must be greater than 0" + ); + eyre::ensure!( + receipts != Some(0), + "Receipts segment blocks per file must be greater than 0" + ); + eyre::ensure!( + transaction_senders != Some(0), + "Transaction senders segment blocks per file must be greater than 0" + ); + Ok(()) + } + + /// Converts the blocks per file configuration into a [`HashMap`] per segment. + pub fn as_blocks_per_file_map(&self) -> HashMap { + let BlocksPerFileConfig { headers, transactions, receipts, transaction_senders } = + self.blocks_per_file; + + let mut map = HashMap::new(); + // Iterating over all possible segments allows us to do an exhaustive match here, + // to not forget to configure new segments in the future. + for segment in StaticFileSegment::iter() { + let blocks_per_file = match segment { + StaticFileSegment::Headers => headers, + StaticFileSegment::Transactions => transactions, + StaticFileSegment::Receipts => receipts, + StaticFileSegment::TransactionSenders => transaction_senders, + }; + + if let Some(blocks_per_file) = blocks_per_file { + map.insert(segment, blocks_per_file); + } + } + map + } +} + /// History stage configuration. #[derive(Debug, Clone, Copy, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] @@ -451,14 +527,17 @@ impl PruneConfig { } /// Returns whether there is any kind of receipt pruning configuration. - pub const fn has_receipts_pruning(&self) -> bool { - self.segments.receipts.is_some() + pub fn has_receipts_pruning(&self) -> bool { + self.segments.receipts.is_some() || !self.segments.receipts_log_filter.is_empty() } - /// Merges another `PruneConfig` into this one, taking values from the other config if and only - /// if the corresponding value in this config is not set. + /// Merges values from `other` into `self`. + /// - `Option` fields: set from `other` only if `self` is `None`. + /// - `block_interval`: set from `other` only if `self.block_interval == + /// DEFAULT_BLOCK_INTERVAL`. + /// - `merkle_changesets`: always set from `other`. + /// - `receipts_log_filter`: set from `other` only if `self` is empty and `other` is non-empty. pub fn merge(&mut self, other: Self) { - #[expect(deprecated)] let Self { block_interval, segments: @@ -470,7 +549,7 @@ impl PruneConfig { storage_history, bodies_history, merkle_changesets, - receipts_log_filter: (), + receipts_log_filter, }, } = other; @@ -486,8 +565,12 @@ impl PruneConfig { self.segments.account_history = self.segments.account_history.or(account_history); self.segments.storage_history = self.segments.storage_history.or(storage_history); self.segments.bodies_history = self.segments.bodies_history.or(bodies_history); - // Merkle changesets is not optional, so we just replace it if provided + // Merkle changesets is not optional; always take the value from `other` self.segments.merkle_changesets = merkle_changesets; + + if self.segments.receipts_log_filter.0.is_empty() && !receipts_log_filter.0.is_empty() { + self.segments.receipts_log_filter = receipts_log_filter; + } } } @@ -514,9 +597,10 @@ where mod tests { use super::{Config, EXTENSION}; use crate::PruneConfig; + use alloy_primitives::Address; use reth_network_peers::TrustedPeer; - use reth_prune_types::{PruneMode, PruneModes}; - use std::{path::Path, str::FromStr, time::Duration}; + use reth_prune_types::{PruneMode, PruneModes, ReceiptsLogPruneConfig}; + use std::{collections::BTreeMap, path::Path, str::FromStr, time::Duration}; fn with_tempdir(filename: &str, proc: fn(&std::path::Path)) { let temp_dir = tempfile::tempdir().unwrap(); @@ -1005,8 +1089,10 @@ receipts = 'full' storage_history: Some(PruneMode::Before(5000)), bodies_history: None, merkle_changesets: PruneMode::Before(0), - #[expect(deprecated)] - receipts_log_filter: (), + receipts_log_filter: ReceiptsLogPruneConfig(BTreeMap::from([( + Address::random(), + PruneMode::Full, + )])), }, }; @@ -1020,11 +1106,14 @@ receipts = 'full' storage_history: Some(PruneMode::Distance(3000)), bodies_history: None, merkle_changesets: PruneMode::Distance(10000), - #[expect(deprecated)] - receipts_log_filter: (), + receipts_log_filter: ReceiptsLogPruneConfig(BTreeMap::from([ + (Address::random(), PruneMode::Distance(1000)), + (Address::random(), PruneMode::Before(2000)), + ])), }, }; + let original_filter = config1.segments.receipts_log_filter.clone(); config1.merge(config2); // Check that the configuration has been merged. Any configuration present in config1 @@ -1036,6 +1125,7 @@ receipts = 'full' assert_eq!(config1.segments.account_history, Some(PruneMode::Distance(2000))); assert_eq!(config1.segments.storage_history, Some(PruneMode::Before(5000))); assert_eq!(config1.segments.merkle_changesets, PruneMode::Distance(10000)); + assert_eq!(config1.segments.receipts_log_filter, original_filter); } #[test] diff --git a/crates/consensus/common/src/validation.rs b/crates/consensus/common/src/validation.rs index e14a316427..4017459be9 100644 --- a/crates/consensus/common/src/validation.rs +++ b/crates/consensus/common/src/validation.rs @@ -1,8 +1,6 @@ //! Collection of methods for block validation. -use alloy_consensus::{ - constants::MAXIMUM_EXTRA_DATA_SIZE, BlockHeader as _, Transaction, EMPTY_OMMER_ROOT_HASH, -}; +use alloy_consensus::{BlockHeader as _, Transaction, EMPTY_OMMER_ROOT_HASH}; use alloy_eips::{eip4844::DATA_GAS_PER_BLOB, eip7840::BlobParams}; use reth_chainspec::{EthChainSpec, EthereumHardfork, EthereumHardforks}; use reth_consensus::{ConsensusError, TxGasLimitTooHighErr}; @@ -225,13 +223,9 @@ where /// Validates that the EIP-4844 header fields exist and conform to the spec. This ensures that: /// /// * `blob_gas_used` exists as a header field -/// * `excess_blob_gas` exists as a header field /// * `parent_beacon_block_root` exists as a header field /// * `blob_gas_used` is a multiple of `DATA_GAS_PER_BLOB` -/// * `excess_blob_gas` is a multiple of `DATA_GAS_PER_BLOB` /// * `blob_gas_used` doesn't exceed the max allowed blob gas based on the given params -/// -/// Note: This does not enforce any restrictions on `blob_gas_used` pub fn validate_4844_header_standalone( header: &H, blob_params: BlobParams, @@ -264,9 +258,12 @@ pub fn validate_4844_header_standalone( /// From yellow paper: extraData: An arbitrary byte array containing data relevant to this block. /// This must be 32 bytes or fewer; formally Hx. #[inline] -pub fn validate_header_extra_data(header: &H) -> Result<(), ConsensusError> { +pub fn validate_header_extra_data( + header: &H, + max_size: usize, +) -> Result<(), ConsensusError> { let extra_data_len = header.extra_data().len(); - if extra_data_len > MAXIMUM_EXTRA_DATA_SIZE { + if extra_data_len > max_size { Err(ConsensusError::ExtraDataExceedsMax { len: extra_data_len }) } else { Ok(()) @@ -282,20 +279,28 @@ pub fn validate_against_parent_hash_number( header: &H, parent: &SealedHeader, ) -> Result<(), ConsensusError> { - // Parent number is consistent. - if parent.number() + 1 != header.number() { - return Err(ConsensusError::ParentBlockNumberMismatch { - parent_block_number: parent.number(), - block_number: header.number(), - }) - } - if parent.hash() != header.parent_hash() { return Err(ConsensusError::ParentHashMismatch( GotExpected { got: header.parent_hash(), expected: parent.hash() }.into(), )) } + let Some(parent_number) = parent.number().checked_add(1) else { + // parent block already reached the maximum + return Err(ConsensusError::ParentBlockNumberMismatch { + parent_block_number: parent.number(), + block_number: u64::MAX, + }) + }; + + // Parent number is consistent. + if parent_number != header.number() { + return Err(ConsensusError::ParentBlockNumberMismatch { + parent_block_number: parent.number(), + block_number: header.number(), + }) + } + Ok(()) } @@ -330,7 +335,7 @@ pub fn validate_against_parent_eip1559_base_fee( header: &H, @@ -503,4 +508,21 @@ mod tests { })) ); } + + #[test] + fn validate_header_extra_data_with_custom_limit() { + // Test with default 32 bytes - should pass + let header_32 = Header { extra_data: Bytes::from(vec![0; 32]), ..Default::default() }; + assert!(validate_header_extra_data(&header_32, 32).is_ok()); + + // Test exceeding default - should fail + let header_33 = Header { extra_data: Bytes::from(vec![0; 33]), ..Default::default() }; + assert_eq!( + validate_header_extra_data(&header_33, 32), + Err(ConsensusError::ExtraDataExceedsMax { len: 33 }) + ); + + // Test with custom larger limit - should pass + assert!(validate_header_extra_data(&header_33, 64).is_ok()); + } } diff --git a/crates/consensus/consensus/src/lib.rs b/crates/consensus/consensus/src/lib.rs index b3dfa30e61..e714b61409 100644 --- a/crates/consensus/consensus/src/lib.rs +++ b/crates/consensus/consensus/src/lib.rs @@ -16,7 +16,7 @@ use alloy_consensus::Header; use alloy_primitives::{BlockHash, BlockNumber, Bloom, B256}; use reth_execution_types::BlockExecutionResult; use reth_primitives_traits::{ - constants::{MAXIMUM_GAS_LIMIT_BLOCK, MINIMUM_GAS_LIMIT}, + constants::{GAS_LIMIT_BOUND_DIVISOR, MAXIMUM_GAS_LIMIT_BLOCK, MINIMUM_GAS_LIMIT}, transaction::error::InvalidTransactionError, Block, GotExpected, GotExpectedBoxed, NodePrimitives, RecoveredBlock, SealedBlock, SealedHeader, @@ -349,7 +349,7 @@ pub enum ConsensusError { }, /// Error when the child gas limit exceeds the maximum allowed increase. - #[error("child gas_limit {child_gas_limit} max increase is {parent_gas_limit}/1024")] + #[error("child gas_limit {child_gas_limit} exceeds the max allowed increase ({parent_gas_limit}/{GAS_LIMIT_BOUND_DIVISOR})")] GasLimitInvalidIncrease { /// The parent gas limit. parent_gas_limit: u64, @@ -378,7 +378,7 @@ pub enum ConsensusError { }, /// Error when the child gas limit exceeds the maximum allowed decrease. - #[error("child gas_limit {child_gas_limit} max decrease is {parent_gas_limit}/1024")] + #[error("child gas_limit {child_gas_limit} is below the max allowed decrease ({parent_gas_limit}/{GAS_LIMIT_BOUND_DIVISOR})")] GasLimitInvalidDecrease { /// The parent gas limit. parent_gas_limit: u64, diff --git a/crates/consensus/debug-client/src/providers/rpc.rs b/crates/consensus/debug-client/src/providers/rpc.rs index 0c9dfbce7d..22767d0db5 100644 --- a/crates/consensus/debug-client/src/providers/rpc.rs +++ b/crates/consensus/debug-client/src/providers/rpc.rs @@ -1,5 +1,5 @@ use crate::BlockProvider; -use alloy_provider::{Network, Provider, ProviderBuilder}; +use alloy_provider::{ConnectionConfig, Network, Provider, ProviderBuilder, WebSocketConfig}; use alloy_transport::TransportResult; use futures::{Stream, StreamExt}; use reth_node_api::Block; @@ -25,7 +25,19 @@ impl RpcBlockProvider { convert: impl Fn(N::BlockResponse) -> PrimitiveBlock + Send + Sync + 'static, ) -> eyre::Result { Ok(Self { - provider: Arc::new(ProviderBuilder::default().connect(rpc_url).await?), + provider: Arc::new( + ProviderBuilder::default() + .connect_with_config( + rpc_url, + ConnectionConfig::default().with_max_retries(u32::MAX).with_ws_config( + WebSocketConfig::default() + // allow larger messages/frames for big blocks + .max_frame_size(Some(128 * 1024 * 1024)) + .max_message_size(Some(128 * 1024 * 1024)), + ), + ) + .await?, + ), url: rpc_url.to_string(), convert: Arc::new(convert), }) @@ -61,34 +73,42 @@ where type Block = PrimitiveBlock; async fn subscribe_blocks(&self, tx: Sender) { - let Ok(mut stream) = self.full_block_stream().await.inspect_err(|err| { - warn!( - target: "consensus::debug-client", - %err, - url=%self.url, - "Failed to subscribe to blocks", - ); - }) else { - return - }; + loop { + let Ok(mut stream) = self.full_block_stream().await.inspect_err(|err| { + warn!( + target: "consensus::debug-client", + %err, + url=%self.url, + "Failed to subscribe to blocks", + ); + }) else { + return + }; - while let Some(res) = stream.next().await { - match res { - Ok(block) => { - if tx.send((self.convert)(block)).await.is_err() { - // Channel closed. - break; + while let Some(res) = stream.next().await { + match res { + Ok(block) => { + if tx.send((self.convert)(block)).await.is_err() { + // Channel closed. + break; + } + } + Err(err) => { + warn!( + target: "consensus::debug-client", + %err, + url=%self.url, + "Failed to fetch a block", + ); } } - Err(err) => { - warn!( - target: "consensus::debug-client", - %err, - url=%self.url, - "Failed to fetch a block", - ); - } } + // if stream terminated we want to re-establish it again + debug!( + target: "consensus::debug-client", + url=%self.url, + "Re-estbalishing block subscription", + ); } } diff --git a/crates/e2e-test-utils/src/lib.rs b/crates/e2e-test-utils/src/lib.rs index 57d03f70fa..f5a2d1b030 100644 --- a/crates/e2e-test-utils/src/lib.rs +++ b/crates/e2e-test-utils/src/lib.rs @@ -3,13 +3,12 @@ use node::NodeTestContext; use reth_chainspec::ChainSpec; use reth_db::{test_utils::TempDatabase, DatabaseEnv}; -use reth_engine_local::LocalPayloadAttributesBuilder; use reth_network_api::test_utils::PeersHandleProvider; use reth_node_builder::{ components::NodeComponentsBuilder, rpc::{EngineValidatorAddOn, RethRpcAddOns}, FullNodeTypesAdapter, Node, NodeAdapter, NodeComponents, NodeTypes, NodeTypesWithDBAdapter, - PayloadAttributesBuilder, PayloadTypes, + PayloadTypes, }; use reth_provider::providers::{BlockchainProvider, NodeTypesForProvider}; use reth_tasks::TaskManager; @@ -54,8 +53,6 @@ pub async fn setup( ) -> eyre::Result<(Vec>, TaskManager, Wallet)> where N: NodeBuilderHelper, - LocalPayloadAttributesBuilder: - PayloadAttributesBuilder<<::Payload as PayloadTypes>::PayloadAttributes>, { E2ETestSetupBuilder::new(num_nodes, chain_spec, attributes_generator) .with_node_config_modifier(move |config| config.set_dev(is_dev)) @@ -77,8 +74,6 @@ pub async fn setup_engine( )> where N: NodeBuilderHelper, - LocalPayloadAttributesBuilder: - PayloadAttributesBuilder<::PayloadAttributes>, { setup_engine_with_connection::( num_nodes, @@ -106,8 +101,6 @@ pub async fn setup_engine_with_connection( )> where N: NodeBuilderHelper, - LocalPayloadAttributesBuilder: - PayloadAttributesBuilder<::PayloadAttributes>, { E2ETestSetupBuilder::new(num_nodes, chain_spec, attributes_generator) .with_tree_config_modifier(move |_| tree_config.clone()) @@ -160,13 +153,10 @@ where >, ChainSpec: From + Clone, >, - LocalPayloadAttributesBuilder: - PayloadAttributesBuilder<::PayloadAttributes>, { } -impl NodeBuilderHelper for T -where +impl NodeBuilderHelper for T where Self: Default + NodeTypesForProvider< Payload: PayloadTypes< @@ -187,8 +177,6 @@ where Adapter>>, >, ChainSpec: From + Clone, - >, - LocalPayloadAttributesBuilder: - PayloadAttributesBuilder<::PayloadAttributes>, + > { } diff --git a/crates/e2e-test-utils/src/setup_builder.rs b/crates/e2e-test-utils/src/setup_builder.rs index 8de2280fe4..8f38b66eb5 100644 --- a/crates/e2e-test-utils/src/setup_builder.rs +++ b/crates/e2e-test-utils/src/setup_builder.rs @@ -4,18 +4,19 @@ //! configurations through closures that modify `NodeConfig` and `TreeConfig`. use crate::{node::NodeTestContext, wallet::Wallet, NodeBuilderHelper, NodeHelperType, TmpDB}; +use futures_util::future::TryJoinAll; use reth_chainspec::EthChainSpec; -use reth_engine_local::LocalPayloadAttributesBuilder; use reth_node_builder::{ EngineNodeLauncher, NodeBuilder, NodeConfig, NodeHandle, NodeTypes, NodeTypesWithDBAdapter, - PayloadAttributesBuilder, PayloadTypes, + PayloadTypes, }; use reth_node_core::args::{DiscoveryArgs, NetworkArgs, RpcServerArgs}; +use reth_primitives_traits::AlloyBlockHeader; use reth_provider::providers::BlockchainProvider; use reth_rpc_server_types::RpcModuleSelection; use reth_tasks::TaskManager; use std::sync::Arc; -use tracing::{span, Level}; +use tracing::{span, Instrument, Level}; /// Type alias for tree config modifier closure type TreeConfigModifier = @@ -37,8 +38,6 @@ where + Sync + Copy + 'static, - LocalPayloadAttributesBuilder: - PayloadAttributesBuilder<::PayloadAttributes>, { num_nodes: usize, chain_spec: Arc, @@ -56,8 +55,6 @@ where + Sync + Copy + 'static, - LocalPayloadAttributesBuilder: - PayloadAttributesBuilder<::PayloadAttributes>, { /// Creates a new builder with the required parameters. pub fn new(num_nodes: usize, chain_spec: Arc, attributes_generator: F) -> Self { @@ -122,66 +119,71 @@ where reth_node_api::TreeConfig::default() }; - let mut nodes: Vec> = Vec::with_capacity(self.num_nodes); + let mut nodes = (0..self.num_nodes) + .map(async |idx| { + // Create base node config + let base_config = NodeConfig::new(self.chain_spec.clone()) + .with_network(network_config.clone()) + .with_unused_ports() + .with_rpc( + RpcServerArgs::default() + .with_unused_ports() + .with_http() + .with_http_api(RpcModuleSelection::All), + ); + + // Apply node config modifier if present + let node_config = if let Some(modifier) = &self.node_config_modifier { + modifier(base_config) + } else { + base_config + }; + + let span = span!(Level::INFO, "node", idx); + let node = N::default(); + let NodeHandle { node, node_exit_future: _ } = NodeBuilder::new(node_config) + .testing_node(exec.clone()) + .with_types_and_provider::>() + .with_components(node.components_builder()) + .with_add_ons(node.add_ons()) + .launch_with_fn(|builder| { + let launcher = EngineNodeLauncher::new( + builder.task_executor().clone(), + builder.config().datadir(), + tree_config.clone(), + ); + builder.launch_with(launcher) + }) + .instrument(span) + .await?; + + let node = NodeTestContext::new(node, self.attributes_generator).await?; + let genesis_number = self.chain_spec.genesis_header().number(); + let genesis = node.block_hash(genesis_number); + node.update_forkchoice(genesis, genesis).await?; + + eyre::Ok(node) + }) + .collect::>() + .await?; for idx in 0..self.num_nodes { - // Create base node config - let base_config = NodeConfig::new(self.chain_spec.clone()) - .with_network(network_config.clone()) - .with_unused_ports() - .with_rpc( - RpcServerArgs::default() - .with_unused_ports() - .with_http() - .with_http_api(RpcModuleSelection::All), - ); - - // Apply node config modifier if present - let node_config = if let Some(modifier) = &self.node_config_modifier { - modifier(base_config) - } else { - base_config - }; - - let span = span!(Level::INFO, "node", idx); - let _enter = span.enter(); - let node = N::default(); - let NodeHandle { node, node_exit_future: _ } = NodeBuilder::new(node_config) - .testing_node(exec.clone()) - .with_types_and_provider::>() - .with_components(node.components_builder()) - .with_add_ons(node.add_ons()) - .launch_with_fn(|builder| { - let launcher = EngineNodeLauncher::new( - builder.task_executor().clone(), - builder.config().datadir(), - tree_config.clone(), - ); - builder.launch_with(launcher) - }) - .await?; - - let mut node = NodeTestContext::new(node, self.attributes_generator).await?; - - let genesis = node.block_hash(0); - node.update_forkchoice(genesis, genesis).await?; - + let (prev, current) = nodes.split_at_mut(idx); + let current = current.first_mut().unwrap(); // Connect nodes if requested if self.connect_nodes { - if let Some(previous_node) = nodes.last_mut() { - previous_node.connect(&mut node).await; + if let Some(prev_idx) = idx.checked_sub(1) { + prev[prev_idx].connect(current).await; } // Connect last node with the first if there are more than two if idx + 1 == self.num_nodes && self.num_nodes > 2 && - let Some(first_node) = nodes.first_mut() + let Some(first) = prev.first_mut() { - node.connect(first_node).await; + current.connect(first).await; } } - - nodes.push(node); } Ok((nodes, tasks, Wallet::default().with_chain_id(self.chain_spec.chain().into()))) @@ -196,8 +198,6 @@ where + Sync + Copy + 'static, - LocalPayloadAttributesBuilder: - PayloadAttributesBuilder<::PayloadAttributes>, { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("E2ETestSetupBuilder") diff --git a/crates/e2e-test-utils/src/setup_import.rs b/crates/e2e-test-utils/src/setup_import.rs index 81e5a386aa..da171664b6 100644 --- a/crates/e2e-test-utils/src/setup_import.rs +++ b/crates/e2e-test-utils/src/setup_import.rs @@ -110,6 +110,7 @@ pub async fn setup_engine_with_chain_import( // Create database path and static files path let db_path = datadir.join("db"); let static_files_path = datadir.join("static_files"); + let rocksdb_dir_path = datadir.join("rocksdb"); // Initialize the database using init_db (same as CLI import command) // Use the same database arguments as the node will use @@ -125,7 +126,8 @@ pub async fn setup_engine_with_chain_import( db.clone(), chain_spec.clone(), reth_provider::providers::StaticFileProvider::read_write(static_files_path.clone())?, - ); + reth_provider::providers::RocksDBProvider::builder(rocksdb_dir_path).build().unwrap(), + )?; // Initialize genesis if needed reth_db_common::init::init_genesis(&provider_factory)?; @@ -311,6 +313,7 @@ mod tests { std::fs::create_dir_all(&datadir).unwrap(); let db_path = datadir.join("db"); let static_files_path = datadir.join("static_files"); + let rocksdb_dir_path = datadir.join("rocksdb"); // Import the chain { @@ -324,7 +327,11 @@ mod tests { chain_spec.clone(), reth_provider::providers::StaticFileProvider::read_write(static_files_path.clone()) .unwrap(), - ); + reth_provider::providers::RocksDBProvider::builder(rocksdb_dir_path.clone()) + .build() + .unwrap(), + ) + .expect("failed to create provider factory"); // Initialize genesis reth_db_common::init::init_genesis(&provider_factory).unwrap(); @@ -384,7 +391,11 @@ mod tests { chain_spec.clone(), reth_provider::providers::StaticFileProvider::read_only(static_files_path, false) .unwrap(), - ); + reth_provider::providers::RocksDBProvider::builder(rocksdb_dir_path) + .build() + .unwrap(), + ) + .expect("failed to create provider factory"); let provider = provider_factory.database_provider_ro().unwrap(); @@ -470,12 +481,17 @@ mod tests { // Create static files path let static_files_path = datadir.join("static_files"); + // Create rocksdb path + let rocksdb_dir_path = datadir.join("rocksdb"); + // Create a provider factory let provider_factory: ProviderFactory = ProviderFactory::new( db.clone(), chain_spec.clone(), reth_provider::providers::StaticFileProvider::read_write(static_files_path).unwrap(), - ); + reth_provider::providers::RocksDBProvider::builder(rocksdb_dir_path).build().unwrap(), + ) + .expect("failed to create provider factory"); // Initialize genesis reth_db_common::init::init_genesis(&provider_factory).unwrap(); diff --git a/crates/e2e-test-utils/src/testsuite/actions/produce_blocks.rs b/crates/e2e-test-utils/src/testsuite/actions/produce_blocks.rs index 92bbba93b8..fe9e9133ae 100644 --- a/crates/e2e-test-utils/src/testsuite/actions/produce_blocks.rs +++ b/crates/e2e-test-utils/src/testsuite/actions/produce_blocks.rs @@ -98,31 +98,66 @@ where finalized_block_hash: parent_hash, }; - let fcu_result = EngineApiClient::::fork_choice_updated_v2( + // Try v2 first for backwards compatibility, fall back to v3 on error. + match EngineApiClient::::fork_choice_updated_v2( &engine_client, fork_choice_state, Some(self.payload_attributes.clone()), ) - .await?; - - debug!("FCU result: {:?}", fcu_result); - - // check if we got a valid payload ID - match fcu_result.payload_status.status { - PayloadStatusEnum::Valid => { - if let Some(payload_id) = fcu_result.payload_id { - debug!("Got payload ID: {payload_id}"); - - // get the payload that was built - let _engine_payload = - EngineApiClient::::get_payload_v2(&engine_client, payload_id) + .await + { + Ok(fcu_result) => { + debug!(?fcu_result, "FCU v2 result"); + match fcu_result.payload_status.status { + PayloadStatusEnum::Valid => { + if let Some(payload_id) = fcu_result.payload_id { + debug!(id=%payload_id, "Got payload"); + let _engine_payload = EngineApiClient::::get_payload_v2( + &engine_client, + payload_id, + ) .await?; - Ok(()) - } else { - Err(eyre::eyre!("No payload ID returned from forkchoiceUpdated")) + Ok(()) + } else { + Err(eyre::eyre!("No payload ID returned from forkchoiceUpdated")) + } + } + _ => Err(eyre::eyre!( + "Payload status not valid: {:?}", + fcu_result.payload_status + ))?, + } + } + Err(_) => { + // If v2 fails due to unsupported fork/missing fields, try v3 + let fcu_result = EngineApiClient::::fork_choice_updated_v3( + &engine_client, + fork_choice_state, + Some(self.payload_attributes.clone()), + ) + .await?; + + debug!(?fcu_result, "FCU v3 result"); + match fcu_result.payload_status.status { + PayloadStatusEnum::Valid => { + if let Some(payload_id) = fcu_result.payload_id { + debug!(id=%payload_id, "Got payload"); + let _engine_payload = EngineApiClient::::get_payload_v3( + &engine_client, + payload_id, + ) + .await?; + Ok(()) + } else { + Err(eyre::eyre!("No payload ID returned from forkchoiceUpdated")) + } + } + _ => Err(eyre::eyre!( + "Payload status not valid: {:?}", + fcu_result.payload_status + )), } } - _ => Err(eyre::eyre!("Payload status not valid: {:?}", fcu_result.payload_status)), } }) } diff --git a/crates/e2e-test-utils/src/testsuite/mod.rs b/crates/e2e-test-utils/src/testsuite/mod.rs index 79e906ef59..17d04f64ea 100644 --- a/crates/e2e-test-utils/src/testsuite/mod.rs +++ b/crates/e2e-test-utils/src/testsuite/mod.rs @@ -2,13 +2,12 @@ use crate::{ testsuite::actions::{Action, ActionBox}, - NodeBuilderHelper, PayloadAttributesBuilder, + NodeBuilderHelper, }; use alloy_primitives::B256; use eyre::Result; use jsonrpsee::http_client::HttpClient; -use reth_engine_local::LocalPayloadAttributesBuilder; -use reth_node_api::{EngineTypes, NodeTypes, PayloadTypes}; +use reth_node_api::{EngineTypes, PayloadTypes}; use reth_payload_builder::PayloadId; use std::{collections::HashMap, marker::PhantomData}; pub mod actions; @@ -349,9 +348,6 @@ where pub async fn run(mut self) -> Result<()> where N: NodeBuilderHelper, - LocalPayloadAttributesBuilder: PayloadAttributesBuilder< - <::Payload as PayloadTypes>::PayloadAttributes, - >, { let mut setup = self.setup.take(); diff --git a/crates/e2e-test-utils/src/testsuite/setup.rs b/crates/e2e-test-utils/src/testsuite/setup.rs index 94f661753b..e7a57e7075 100644 --- a/crates/e2e-test-utils/src/testsuite/setup.rs +++ b/crates/e2e-test-utils/src/testsuite/setup.rs @@ -1,15 +1,11 @@ //! Test setup utilities for configuring the initial state. -use crate::{ - setup_engine_with_connection, testsuite::Environment, NodeBuilderHelper, - PayloadAttributesBuilder, -}; +use crate::{setup_engine_with_connection, testsuite::Environment, NodeBuilderHelper}; use alloy_eips::BlockNumberOrTag; use alloy_primitives::B256; use alloy_rpc_types_engine::{ForkchoiceState, PayloadAttributes}; use eyre::{eyre, Result}; use reth_chainspec::ChainSpec; -use reth_engine_local::LocalPayloadAttributesBuilder; use reth_ethereum_primitives::Block; use reth_network_p2p::sync::{NetworkSyncUpdater, SyncState}; use reth_node_api::{EngineTypes, NodeTypes, PayloadTypes, TreeConfig}; @@ -138,28 +134,19 @@ where ) -> Result<()> where N: NodeBuilderHelper, - LocalPayloadAttributesBuilder: PayloadAttributesBuilder< - <::Payload as PayloadTypes>::PayloadAttributes, - >, { // Note: this future is quite large so we box it - Box::pin(self.apply_with_import_::(env, rlp_path)).await + Box::pin(self.apply_with_import_(env, rlp_path)).await } /// Apply setup using pre-imported chain data from RLP file - async fn apply_with_import_( + async fn apply_with_import_( &mut self, env: &mut Environment, rlp_path: &Path, - ) -> Result<()> - where - N: NodeBuilderHelper, - LocalPayloadAttributesBuilder: PayloadAttributesBuilder< - <::Payload as PayloadTypes>::PayloadAttributes, - >, - { + ) -> Result<()> { // Create nodes with imported chain data - let import_result = self.create_nodes_with_import::(rlp_path).await?; + let import_result = self.create_nodes_with_import(rlp_path).await?; // Extract node clients let mut node_clients = Vec::new(); @@ -186,9 +173,6 @@ where pub async fn apply(&mut self, env: &mut Environment) -> Result<()> where N: NodeBuilderHelper, - LocalPayloadAttributesBuilder: PayloadAttributesBuilder< - <::Payload as PayloadTypes>::PayloadAttributes, - >, { // Note: this future is quite large so we box it Box::pin(self.apply_::(env)).await @@ -198,9 +182,6 @@ where async fn apply_(&mut self, env: &mut Environment) -> Result<()> where N: NodeBuilderHelper, - LocalPayloadAttributesBuilder: PayloadAttributesBuilder< - <::Payload as PayloadTypes>::PayloadAttributes, - >, { // If import_rlp_path is set, use apply_with_import instead if let Some(rlp_path) = self.import_rlp_path.take() { @@ -259,16 +240,10 @@ where /// Note: Currently this only supports `EthereumNode` due to the import process /// being Ethereum-specific. The generic parameter N is kept for consistency /// with other methods but is not used. - async fn create_nodes_with_import( + async fn create_nodes_with_import( &self, rlp_path: &Path, - ) -> Result - where - N: NodeBuilderHelper, - LocalPayloadAttributesBuilder: PayloadAttributesBuilder< - <::Payload as PayloadTypes>::PayloadAttributes, - >, - { + ) -> Result { let chain_spec = self.chain_spec.clone().ok_or_else(|| eyre!("Chain specification is required"))?; @@ -301,9 +276,6 @@ where + use where N: NodeBuilderHelper, - LocalPayloadAttributesBuilder: PayloadAttributesBuilder< - <::Payload as PayloadTypes>::PayloadAttributes, - >, { move |timestamp| { let attributes = PayloadAttributes { diff --git a/crates/engine/invalid-block-hooks/src/witness.rs b/crates/engine/invalid-block-hooks/src/witness.rs index d00f3b8287..d2437eae8f 100644 --- a/crates/engine/invalid-block-hooks/src/witness.rs +++ b/crates/engine/invalid-block-hooks/src/witness.rs @@ -5,7 +5,7 @@ use pretty_assertions::Comparison; use reth_engine_primitives::InvalidBlockHook; use reth_evm::{execute::Executor, ConfigureEvm}; use reth_primitives_traits::{NodePrimitives, RecoveredBlock, SealedHeader}; -use reth_provider::{BlockExecutionOutput, StateProvider, StateProviderFactory}; +use reth_provider::{BlockExecutionOutput, StateProvider, StateProviderBox, StateProviderFactory}; use reth_revm::{ database::StateProviderDatabase, db::{BundleState, State}, @@ -114,7 +114,7 @@ fn sort_bundle_state_for_comparison(bundle_state: &BundleState) -> BundleStateSo /// Extracts execution data including codes, preimages, and hashed state from database fn collect_execution_data( - mut db: State>>, + mut db: State>, ) -> eyre::Result { let bundle_state = db.take_bundle(); let mut codes = BTreeMap::new(); @@ -278,7 +278,7 @@ where let bundle_state_sorted = sort_bundle_state_for_comparison(re_executed_state); let output_state_sorted = sort_bundle_state_for_comparison(original_state); let filename = format!("{}.bundle_state.diff", block_prefix); - let diff_path = self.save_diff(filename, &bundle_state_sorted, &output_state_sorted)?; + let diff_path = self.save_diff(filename, &output_state_sorted, &bundle_state_sorted)?; warn!( target: "engine::invalid_block_hooks::witness", @@ -308,13 +308,13 @@ where if let Some((original_updates, original_root)) = trie_updates { if re_executed_root != original_root { let filename = format!("{}.state_root.diff", block_prefix); - let diff_path = self.save_diff(filename, &re_executed_root, &original_root)?; + let diff_path = self.save_diff(filename, &original_root, &re_executed_root)?; warn!(target: "engine::invalid_block_hooks::witness", ?original_root, ?re_executed_root, diff_path = %diff_path.display(), "State root mismatch after re-execution"); } if re_executed_root != block.state_root() { let filename = format!("{}.header_state_root.diff", block_prefix); - let diff_path = self.save_diff(filename, &re_executed_root, &block.state_root())?; + let diff_path = self.save_diff(filename, &block.state_root(), &re_executed_root)?; warn!(target: "engine::invalid_block_hooks::witness", header_state_root=?block.state_root(), ?re_executed_root, diff_path = %diff_path.display(), "Re-executed state root does not match block state root"); } @@ -530,9 +530,7 @@ mod tests { // Create a State with StateProviderTest let state_provider = StateProviderTest::default(); let mut state = State::builder() - .with_database(StateProviderDatabase::new( - Box::new(state_provider) as Box - )) + .with_database(StateProviderDatabase::new(Box::new(state_provider) as StateProviderBox)) .with_bundle_update() .build(); diff --git a/crates/engine/local/Cargo.toml b/crates/engine/local/Cargo.toml index dd708dee90..8bf9e28bcb 100644 --- a/crates/engine/local/Cargo.toml +++ b/crates/engine/local/Cargo.toml @@ -15,6 +15,7 @@ reth-engine-primitives = { workspace = true, features = ["std"] } reth-ethereum-engine-primitives.workspace = true reth-payload-builder.workspace = true reth-payload-primitives.workspace = true +reth-primitives-traits.workspace = true reth-storage-api.workspace = true reth-transaction-pool.workspace = true @@ -43,4 +44,5 @@ op = [ "dep:op-alloy-rpc-types-engine", "dep:reth-optimism-chainspec", "reth-payload-primitives/op", + "reth-primitives-traits/op", ] diff --git a/crates/engine/local/src/miner.rs b/crates/engine/local/src/miner.rs index d6298502fb..67001ee73e 100644 --- a/crates/engine/local/src/miner.rs +++ b/crates/engine/local/src/miner.rs @@ -1,6 +1,5 @@ //! Contains the implementation of the mining mode for the local engine. -use alloy_consensus::BlockHeader; use alloy_primitives::{TxHash, B256}; use alloy_rpc_types_engine::ForkchoiceState; use eyre::OptionExt; @@ -10,6 +9,7 @@ use reth_payload_builder::PayloadBuilderHandle; use reth_payload_primitives::{ BuiltPayload, EngineApiMessageVersion, PayloadAttributesBuilder, PayloadKind, PayloadTypes, }; +use reth_primitives_traits::{HeaderTy, SealedHeaderFor}; use reth_storage_api::BlockReader; use reth_transaction_pool::TransactionPool; use std::{ @@ -17,7 +17,7 @@ use std::{ future::Future, pin::Pin, task::{Context, Poll}, - time::{Duration, UNIX_EPOCH}, + time::Duration, }; use tokio::time::Interval; use tokio_stream::wrappers::ReceiverStream; @@ -106,8 +106,8 @@ pub struct LocalMiner { mode: MiningMode, /// The payload builder for the engine payload_builder: PayloadBuilderHandle, - /// Timestamp for the next block. - last_timestamp: u64, + /// Latest block in the chain so far. + last_header: SealedHeaderFor<::Primitives>, /// Stores latest mined blocks. last_block_hashes: VecDeque, } @@ -115,18 +115,21 @@ pub struct LocalMiner { impl LocalMiner where T: PayloadTypes, - B: PayloadAttributesBuilder<::PayloadAttributes>, + B: PayloadAttributesBuilder< + T::PayloadAttributes, + HeaderTy<::Primitives>, + >, Pool: TransactionPool + Unpin, { /// Spawns a new [`LocalMiner`] with the given parameters. pub fn new( - provider: impl BlockReader, + provider: impl BlockReader
::Primitives>>, payload_attributes_builder: B, to_engine: ConsensusEngineHandle, mode: MiningMode, payload_builder: PayloadBuilderHandle, ) -> Self { - let latest_header = + let last_header = provider.sealed_header(provider.best_block_number().unwrap()).unwrap().unwrap(); Self { @@ -134,8 +137,8 @@ where to_engine, mode, payload_builder, - last_timestamp: latest_header.timestamp(), - last_block_hashes: VecDeque::from([latest_header.hash()]), + last_block_hashes: VecDeque::from([last_header.hash()]), + last_header, } } @@ -193,19 +196,11 @@ where /// Generates payload attributes for a new block, passes them to FCU and inserts built payload /// through newPayload. async fn advance(&mut self) -> eyre::Result<()> { - let timestamp = std::cmp::max( - self.last_timestamp.saturating_add(1), - std::time::SystemTime::now() - .duration_since(UNIX_EPOCH) - .expect("cannot be earlier than UNIX_EPOCH") - .as_secs(), - ); - let res = self .to_engine .fork_choice_updated( self.forkchoice_state(), - Some(self.payload_attributes_builder.build(timestamp)), + Some(self.payload_attributes_builder.build(&self.last_header)), EngineApiMessageVersion::default(), ) .await?; @@ -222,8 +217,7 @@ where eyre::bail!("No payload") }; - let block = payload.block(); - + let header = payload.block().sealed_header().clone(); let payload = T::block_to_payload(payload.block().clone()); let res = self.to_engine.new_payload(payload).await?; @@ -231,8 +225,8 @@ where eyre::bail!("Invalid payload") } - self.last_timestamp = timestamp; - self.last_block_hashes.push_back(block.hash()); + self.last_block_hashes.push_back(header.hash()); + self.last_header = header; // ensure we keep at most 64 blocks if self.last_block_hashes.len() > 64 { self.last_block_hashes.pop_front(); diff --git a/crates/engine/local/src/payload.rs b/crates/engine/local/src/payload.rs index 34deaf3e10..dc3be02f17 100644 --- a/crates/engine/local/src/payload.rs +++ b/crates/engine/local/src/payload.rs @@ -1,10 +1,12 @@ //! The implementation of the [`PayloadAttributesBuilder`] for the //! [`LocalMiner`](super::LocalMiner). +use alloy_consensus::BlockHeader; use alloy_primitives::{Address, B256}; -use reth_chainspec::EthereumHardforks; +use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_ethereum_engine_primitives::EthPayloadAttributes; use reth_payload_primitives::PayloadAttributesBuilder; +use reth_primitives_traits::SealedHeader; use std::sync::Arc; /// The attributes builder for local Ethereum payload. @@ -13,21 +15,36 @@ use std::sync::Arc; pub struct LocalPayloadAttributesBuilder { /// The chainspec pub chain_spec: Arc, + + /// Whether to enforce increasing timestamp. + pub enforce_increasing_timestamp: bool, } impl LocalPayloadAttributesBuilder { /// Creates a new instance of the builder. pub const fn new(chain_spec: Arc) -> Self { - Self { chain_spec } + Self { chain_spec, enforce_increasing_timestamp: true } + } + + /// Creates a new instance of the builder without enforcing increasing timestamps. + pub fn without_increasing_timestamp(self) -> Self { + Self { enforce_increasing_timestamp: false, ..self } } } -impl PayloadAttributesBuilder +impl PayloadAttributesBuilder for LocalPayloadAttributesBuilder where - ChainSpec: Send + Sync + EthereumHardforks + 'static, + ChainSpec: EthChainSpec + EthereumHardforks + 'static, { - fn build(&self, timestamp: u64) -> EthPayloadAttributes { + fn build(&self, parent: &SealedHeader) -> EthPayloadAttributes { + let mut timestamp = + std::time::SystemTime::now().duration_since(std::time::UNIX_EPOCH).unwrap().as_secs(); + + if self.enforce_increasing_timestamp { + timestamp = std::cmp::max(parent.timestamp().saturating_add(1), timestamp); + } + EthPayloadAttributes { timestamp, prev_randao: B256::random(), @@ -45,14 +62,18 @@ where } #[cfg(feature = "op")] -impl PayloadAttributesBuilder +impl + PayloadAttributesBuilder for LocalPayloadAttributesBuilder where - ChainSpec: Send + Sync + EthereumHardforks + 'static, + ChainSpec: EthChainSpec + EthereumHardforks + 'static, { - fn build(&self, timestamp: u64) -> op_alloy_rpc_types_engine::OpPayloadAttributes { + fn build( + &self, + parent: &SealedHeader, + ) -> op_alloy_rpc_types_engine::OpPayloadAttributes { op_alloy_rpc_types_engine::OpPayloadAttributes { - payload_attributes: self.build(timestamp), + payload_attributes: self.build(parent), // Add dummy system transaction transactions: Some(vec![ reth_optimism_chainspec::constants::TX_SET_L1_BLOCK_OP_MAINNET_BLOCK_124665056 diff --git a/crates/engine/primitives/src/config.rs b/crates/engine/primitives/src/config.rs index 0b9b7d9f82..10265b9a43 100644 --- a/crates/engine/primitives/src/config.rs +++ b/crates/engine/primitives/src/config.rs @@ -1,5 +1,7 @@ //! Engine tree configuration. +use alloy_eips::merge::EPOCH_SLOTS; + /// Triggers persistence when the number of canonical blocks in memory exceeds this threshold. pub const DEFAULT_PERSISTENCE_THRESHOLD: u64 = 2; @@ -30,7 +32,7 @@ fn default_account_worker_count() -> usize { } /// The size of proof targets chunk to spawn in one multiproof calculation. -pub const DEFAULT_MULTIPROOF_TASK_CHUNK_SIZE: usize = 10; +pub const DEFAULT_MULTIPROOF_TASK_CHUNK_SIZE: usize = 60; /// Default number of reserved CPU cores for non-reth processes. /// @@ -40,7 +42,7 @@ pub const DEFAULT_RESERVED_CPU_CORES: usize = 1; /// Default maximum concurrency for prewarm task. pub const DEFAULT_PREWARM_MAX_CONCURRENCY: usize = 16; -const DEFAULT_BLOCK_BUFFER_LIMIT: u32 = 256; +const DEFAULT_BLOCK_BUFFER_LIMIT: u32 = EPOCH_SLOTS as u32 * 2; const DEFAULT_MAX_INVALID_HEADER_CACHE_LENGTH: u32 = 256; const DEFAULT_MAX_EXECUTE_BLOCK_BATCH_SIZE: usize = 4; const DEFAULT_CROSS_BLOCK_CACHE_SIZE: u64 = 4 * 1024 * 1024 * 1024; @@ -89,6 +91,8 @@ pub struct TreeConfig { /// Whether to always compare trie updates from the state root task to the trie updates from /// the regular state root calculation. always_compare_trie_updates: bool, + /// Whether to disable state cache. + disable_state_cache: bool, /// Whether to disable parallel prewarming. disable_prewarming: bool, /// Whether to disable the parallel sparse trie state root algorithm. @@ -143,6 +147,7 @@ impl Default for TreeConfig { max_execute_block_batch_size: DEFAULT_MAX_EXECUTE_BLOCK_BATCH_SIZE, legacy_state_root: false, always_compare_trie_updates: false, + disable_state_cache: false, disable_prewarming: false, disable_parallel_sparse_trie: false, state_provider_metrics: false, @@ -173,6 +178,7 @@ impl TreeConfig { max_execute_block_batch_size: usize, legacy_state_root: bool, always_compare_trie_updates: bool, + disable_state_cache: bool, disable_prewarming: bool, disable_parallel_sparse_trie: bool, state_provider_metrics: bool, @@ -197,6 +203,7 @@ impl TreeConfig { max_execute_block_batch_size, legacy_state_root, always_compare_trie_updates, + disable_state_cache, disable_prewarming, disable_parallel_sparse_trie, state_provider_metrics, @@ -271,7 +278,12 @@ impl TreeConfig { self.disable_parallel_sparse_trie } - /// Returns whether or not parallel prewarming should be used. + /// Returns whether or not state cache is disabled. + pub const fn disable_state_cache(&self) -> bool { + self.disable_state_cache + } + + /// Returns whether or not parallel prewarming is disabled. pub const fn disable_prewarming(&self) -> bool { self.disable_prewarming } @@ -363,6 +375,12 @@ impl TreeConfig { self } + /// Setter for whether to disable state cache. + pub const fn without_state_cache(mut self, disable_state_cache: bool) -> Self { + self.disable_state_cache = disable_state_cache; + self + } + /// Setter for whether to disable parallel prewarming. pub const fn without_prewarming(mut self, disable_prewarming: bool) -> Self { self.disable_prewarming = disable_prewarming; diff --git a/crates/engine/primitives/src/event.rs b/crates/engine/primitives/src/event.rs index 8cced03152..8f19970eb6 100644 --- a/crates/engine/primitives/src/event.rs +++ b/crates/engine/primitives/src/event.rs @@ -4,7 +4,6 @@ use crate::ForkchoiceStatus; use alloc::boxed::Box; use alloy_consensus::BlockHeader; use alloy_eips::BlockNumHash; -use alloy_primitives::B256; use alloy_rpc_types_engine::ForkchoiceState; use core::{ fmt::{Display, Formatter, Result}, @@ -33,8 +32,6 @@ pub enum ConsensusEngineEvent { CanonicalChainCommitted(Box>, Duration), /// The consensus engine processed an invalid block. InvalidBlock(Box>), - /// The consensus engine is involved in live sync, and has specific progress - LiveSyncProgress(ConsensusEngineLiveSyncProgress), } impl ConsensusEngineEvent { @@ -73,24 +70,9 @@ where Self::InvalidBlock(block) => { write!(f, "InvalidBlock({:?})", block.num_hash()) } - Self::LiveSyncProgress(progress) => { - write!(f, "LiveSyncProgress({progress:?})") - } Self::BlockReceived(num_hash) => { write!(f, "BlockReceived({num_hash:?})") } } } } - -/// Progress of the consensus engine during live sync. -#[derive(Clone, Debug)] -pub enum ConsensusEngineLiveSyncProgress { - /// The consensus engine is downloading blocks from the network. - DownloadingBlocks { - /// The number of blocks remaining to download. - remaining_blocks: u64, - /// The target block hash to download. - target: B256, - }, -} diff --git a/crates/engine/primitives/src/lib.rs b/crates/engine/primitives/src/lib.rs index 196a3baa18..a16b5b1c45 100644 --- a/crates/engine/primitives/src/lib.rs +++ b/crates/engine/primitives/src/lib.rs @@ -17,12 +17,13 @@ use reth_payload_primitives::{ EngineApiMessageVersion, EngineObjectValidationError, InvalidPayloadAttributesError, NewPayloadError, PayloadAttributes, PayloadOrAttributes, PayloadTypes, }; -use reth_primitives_traits::{Block, RecoveredBlock}; +use reth_primitives_traits::{Block, RecoveredBlock, SealedBlock}; use reth_trie_common::HashedPostState; use serde::{de::DeserializeOwned, Serialize}; // Re-export [`ExecutionPayload`] moved to `reth_payload_primitives` -pub use reth_evm::{ConfigureEngineEvm, ExecutableTxIterator}; +#[cfg(feature = "std")] +pub use reth_evm::{ConfigureEngineEvm, ExecutableTxIterator, ExecutableTxTuple}; pub use reth_payload_primitives::ExecutionPayload; mod error; @@ -131,6 +132,21 @@ pub trait PayloadValidator: Send + Sync + Unpin + 'static { /// The block type used by the engine. type Block: Block; + /// Converts the given payload into a sealed block without recovering signatures. + /// + /// This function validates the payload and converts it into a [`SealedBlock`] which contains + /// the block hash but does not perform signature recovery on transactions. + /// + /// This is more efficient than [`Self::ensure_well_formed_payload`] when signature recovery + /// is not needed immediately or will be performed later. + /// + /// Implementers should ensure that the checks are done in the order that conforms with the + /// engine-API specification. + fn convert_payload_to_block( + &self, + payload: Types::ExecutionData, + ) -> Result, NewPayloadError>; + /// Ensures that the given payload does not violate any consensus rules that concern the block's /// layout. /// @@ -142,7 +158,10 @@ pub trait PayloadValidator: Send + Sync + Unpin + 'static { fn ensure_well_formed_payload( &self, payload: Types::ExecutionData, - ) -> Result, NewPayloadError>; + ) -> Result, NewPayloadError> { + let sealed_block = self.convert_payload_to_block(payload)?; + sealed_block.try_recover().map_err(|e| NewPayloadError::Other(e.into())) + } /// Verifies payload post-execution w.r.t. hashed state updates. fn validate_block_post_execution_with_hashed_state( diff --git a/crates/engine/tree/Cargo.toml b/crates/engine/tree/Cargo.toml index ba99898a84..e7f3c46911 100644 --- a/crates/engine/tree/Cargo.toml +++ b/crates/engine/tree/Cargo.toml @@ -16,7 +16,7 @@ reth-chain-state.workspace = true reth-chainspec = { workspace = true, optional = true } reth-consensus.workspace = true reth-db.workspace = true -reth-engine-primitives.workspace = true +reth-engine-primitives = { workspace = true, features = ["std"] } reth-errors.workspace = true reth-execution-types.workspace = true reth-evm = { workspace = true, features = ["metrics"] } @@ -29,6 +29,7 @@ reth-provider.workspace = true reth-prune.workspace = true reth-revm.workspace = true reth-stages-api.workspace = true +reth-storage-errors.workspace = true reth-tasks.workspace = true reth-trie-parallel.workspace = true reth-trie-sparse = { workspace = true, features = ["std", "metrics"] } @@ -39,6 +40,7 @@ reth-trie.workspace = true alloy-evm.workspace = true alloy-consensus.workspace = true alloy-eips.workspace = true +alloy-eip7928.workspace = true alloy-primitives.workspace = true alloy-rlp.workspace = true alloy-rpc-types-engine.workspace = true @@ -51,6 +53,7 @@ futures.workspace = true thiserror.workspace = true tokio = { workspace = true, features = ["rt", "rt-multi-thread", "sync", "macros"] } mini-moka = { workspace = true, features = ["sync"] } +moka = { workspace = true, features = ["sync"] } smallvec.workspace = true # metrics diff --git a/crates/engine/tree/benches/state_root_task.rs b/crates/engine/tree/benches/state_root_task.rs index e13ad26bc6..cfd17a8ecf 100644 --- a/crates/engine/tree/benches/state_root_task.rs +++ b/crates/engine/tree/benches/state_root_task.rs @@ -227,21 +227,22 @@ fn bench_state_root(c: &mut Criterion) { }, |(genesis_hash, mut payload_processor, provider, state_updates)| { black_box({ - let mut handle = payload_processor - .spawn( - Default::default(), - core::iter::empty::< + let mut handle = payload_processor.spawn( + Default::default(), + ( + Vec::< Result< Recovered, core::convert::Infallible, >, - >(), - StateProviderBuilder::new(provider.clone(), genesis_hash, None), - OverlayStateProviderFactory::new(provider), - &TreeConfig::default(), - ) - .map_err(|(err, ..)| err) - .expect("failed to spawn payload processor"); + >::new(), + std::convert::identity, + ), + StateProviderBuilder::new(provider.clone(), genesis_hash, None), + OverlayStateProviderFactory::new(provider), + &TreeConfig::default(), + None, + ); let mut state_hook = handle.state_hook(); diff --git a/crates/engine/tree/docs/root.md b/crates/engine/tree/docs/root.md index a5b9bcb1d4..a76d09b484 100644 --- a/crates/engine/tree/docs/root.md +++ b/crates/engine/tree/docs/root.md @@ -128,12 +128,12 @@ we send them along with the state updates to the [Sparse Trie Task](#sparse-trie ### Finishing the calculation -Once all transactions are executed, the [Engine](#engine) sends a `StateRootMessage::FinishStateUpdates` message +Once all transactions are executed, the [Engine](#engine) sends a `StateRootMessage::FinishedStateUpdates` message to the State Root Task, marking the end of receiving state updates. Every time we receive a new proof from the [MultiProof Manager](#multiproof-manager), we also check the following conditions: -1. Are all updates received? (`StateRootMessage::FinishStateUpdates` was sent) +1. Are all updates received? (`StateRootMessage::FinishedStateUpdates` was sent) 2. Is `ProofSequencer` empty? (no proofs are pending for sequencing) 3. Are all proofs that were sent to the [`MultiProofManager::spawn_or_queue`](#multiproof-manager) finished calculating and were sent to the [Sparse Trie Task](#sparse-trie-task)? diff --git a/crates/engine/tree/src/backfill.rs b/crates/engine/tree/src/backfill.rs index 9dad8c32a5..fb0a8b0d1c 100644 --- a/crates/engine/tree/src/backfill.rs +++ b/crates/engine/tree/src/backfill.rs @@ -47,7 +47,7 @@ impl BackfillSyncState { } /// Backfill sync mode functionality. -pub trait BackfillSync: Send + Sync { +pub trait BackfillSync: Send { /// Performs a backfill action. fn on_action(&mut self, action: BackfillAction); diff --git a/crates/engine/tree/src/chain.rs b/crates/engine/tree/src/chain.rs index 3e6207c9d4..35f04176a4 100644 --- a/crates/engine/tree/src/chain.rs +++ b/crates/engine/tree/src/chain.rs @@ -219,10 +219,19 @@ pub enum HandlerEvent { } /// Internal events issued by the [`ChainOrchestrator`]. -#[derive(Clone, Debug)] +#[derive(Debug)] pub enum FromOrchestrator { /// Invoked when backfill sync finished BackfillSyncFinished(ControlFlow), /// Invoked when backfill sync started BackfillSyncStarted, + /// Gracefully terminate the engine service. + /// + /// When this variant is received, the engine will persist all remaining in-memory blocks + /// to disk before shutting down. Once persistence is complete, a signal is sent through + /// the oneshot channel to notify the caller. + Terminate { + /// Channel to signal termination completion. + tx: tokio::sync::oneshot::Sender<()>, + }, } diff --git a/crates/engine/tree/src/download.rs b/crates/engine/tree/src/download.rs index b7c147e452..5ffc23740c 100644 --- a/crates/engine/tree/src/download.rs +++ b/crates/engine/tree/src/download.rs @@ -9,7 +9,7 @@ use reth_network_p2p::{ full_block::{FetchFullBlockFuture, FetchFullBlockRangeFuture, FullBlockClient}, BlockClient, }; -use reth_primitives_traits::{Block, RecoveredBlock, SealedBlock}; +use reth_primitives_traits::{Block, SealedBlock}; use std::{ cmp::{Ordering, Reverse}, collections::{binary_heap::PeekMut, BinaryHeap, HashSet, VecDeque}, @@ -44,7 +44,7 @@ pub enum DownloadAction { #[derive(Debug)] pub enum DownloadOutcome { /// Downloaded blocks. - Blocks(Vec>), + Blocks(Vec>), /// New download started. NewDownloadStarted { /// How many blocks are pending in this download. @@ -68,7 +68,7 @@ where inflight_block_range_requests: Vec>, /// Buffered blocks from downloads - this is a min-heap of blocks, using the block number for /// ordering. This means the blocks will be popped from the heap with ascending block numbers. - set_buffered_blocks: BinaryHeap>>, + set_buffered_blocks: BinaryHeap>>, /// Engine download metrics. metrics: BlockDownloaderMetrics, /// Pending events to be emitted. @@ -226,15 +226,8 @@ where let mut request = self.inflight_block_range_requests.swap_remove(idx); if let Poll::Ready(blocks) = request.poll_unpin(cx) { trace!(target: "engine::download", len=?blocks.len(), first=?blocks.first().map(|b| b.num_hash()), last=?blocks.last().map(|b| b.num_hash()), "Received full block range, buffering"); - self.set_buffered_blocks.extend( - blocks - .into_iter() - .map(|b| { - let senders = b.senders().unwrap_or_default(); - OrderedRecoveredBlock(RecoveredBlock::new_sealed(b, senders)) - }) - .map(Reverse), - ); + self.set_buffered_blocks + .extend(blocks.into_iter().map(OrderedSealedBlock).map(Reverse)); } else { // still pending self.inflight_block_range_requests.push(request); @@ -248,8 +241,7 @@ where } // drain all unique element of the block buffer if there are any - let mut downloaded_blocks: Vec> = - Vec::with_capacity(self.set_buffered_blocks.len()); + let mut downloaded_blocks = Vec::with_capacity(self.set_buffered_blocks.len()); while let Some(block) = self.set_buffered_blocks.pop() { // peek ahead and pop duplicates while let Some(peek) = self.set_buffered_blocks.peek_mut() { @@ -265,32 +257,31 @@ where } } -/// A wrapper type around [`RecoveredBlock`] that implements the [Ord] +/// A wrapper type around [`SealedBlock`] that implements the [Ord] /// trait by block number. #[derive(Debug, Clone, PartialEq, Eq)] -struct OrderedRecoveredBlock(RecoveredBlock); +struct OrderedSealedBlock(SealedBlock); -impl PartialOrd for OrderedRecoveredBlock { +impl PartialOrd for OrderedSealedBlock { fn partial_cmp(&self, other: &Self) -> Option { Some(self.cmp(other)) } } -impl Ord for OrderedRecoveredBlock { +impl Ord for OrderedSealedBlock { fn cmp(&self, other: &Self) -> Ordering { self.0.number().cmp(&other.0.number()) } } -impl From> for OrderedRecoveredBlock { +impl From> for OrderedSealedBlock { fn from(block: SealedBlock) -> Self { - let senders = block.senders().unwrap_or_default(); - Self(RecoveredBlock::new_sealed(block, senders)) + Self(block) } } -impl From> for RecoveredBlock { - fn from(value: OrderedRecoveredBlock) -> Self { +impl From> for SealedBlock { + fn from(value: OrderedSealedBlock) -> Self { value.0 } } diff --git a/crates/engine/tree/src/engine.rs b/crates/engine/tree/src/engine.rs index f08195b205..c2d0c17546 100644 --- a/crates/engine/tree/src/engine.rs +++ b/crates/engine/tree/src/engine.rs @@ -11,7 +11,7 @@ use reth_chain_state::ExecutedBlock; use reth_engine_primitives::{BeaconEngineMessage, ConsensusEngineEvent}; use reth_ethereum_primitives::EthPrimitives; use reth_payload_primitives::PayloadTypes; -use reth_primitives_traits::{Block, NodePrimitives, RecoveredBlock}; +use reth_primitives_traits::{Block, NodePrimitives, SealedBlock}; use std::{ collections::HashSet, fmt::Display, @@ -307,7 +307,7 @@ pub enum FromEngine { /// Request from the engine. Request(Req), /// Downloaded blocks from the network. - DownloadedBlocks(Vec>), + DownloadedBlocks(Vec>), } impl Display for FromEngine { diff --git a/crates/engine/tree/src/persistence.rs b/crates/engine/tree/src/persistence.rs index 12482b1a16..a58189591f 100644 --- a/crates/engine/tree/src/persistence.rs +++ b/crates/engine/tree/src/persistence.rs @@ -1,5 +1,4 @@ use crate::metrics::PersistenceMetrics; -use alloy_consensus::BlockHeader; use alloy_eips::BlockNumHash; use reth_chain_state::ExecutedBlock; use reth_errors::ProviderError; @@ -142,27 +141,23 @@ where &self, blocks: Vec>, ) -> Result, PersistenceError> { - let first_block_hash = blocks.first().map(|b| b.recovered_block.num_hash()); - let last_block_hash = blocks.last().map(|b| b.recovered_block.num_hash()); - debug!(target: "engine::persistence", first=?first_block_hash, last=?last_block_hash, "Saving range of blocks"); + let first_block = blocks.first().map(|b| b.recovered_block.num_hash()); + let last_block = blocks.last().map(|b| b.recovered_block.num_hash()); + debug!(target: "engine::persistence", first=?first_block, last=?last_block, "Saving range of blocks"); let start_time = Instant::now(); - let last_block_hash_num = blocks.last().map(|block| BlockNumHash { - hash: block.recovered_block().hash(), - number: block.recovered_block().header().number(), - }); - if last_block_hash_num.is_some() { + if last_block.is_some() { let provider_rw = self.provider.database_provider_rw()?; provider_rw.save_blocks(blocks)?; provider_rw.commit()?; } - debug!(target: "engine::persistence", first=?first_block_hash, last=?last_block_hash, "Saved range of blocks"); + debug!(target: "engine::persistence", first=?first_block, last=?last_block, "Saved range of blocks"); self.metrics.save_blocks_duration_seconds.record(start_time.elapsed()); - Ok(last_block_hash_num) + Ok(last_block) } } diff --git a/crates/engine/tree/src/tree/block_buffer.rs b/crates/engine/tree/src/tree/block_buffer.rs index 5c16819861..dff6863fd7 100644 --- a/crates/engine/tree/src/tree/block_buffer.rs +++ b/crates/engine/tree/src/tree/block_buffer.rs @@ -1,7 +1,7 @@ use crate::tree::metrics::BlockBufferMetrics; use alloy_consensus::BlockHeader; use alloy_primitives::{BlockHash, BlockNumber}; -use reth_primitives_traits::{Block, RecoveredBlock}; +use reth_primitives_traits::{Block, SealedBlock}; use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; /// Contains the tree of pending blocks that cannot be executed due to missing parent. @@ -14,11 +14,11 @@ use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; /// * [`BlockBuffer::remove_old_blocks`] to remove old blocks that precede the finalized number. /// /// Note: Buffer is limited by number of blocks that it can contain and eviction of the block -/// is done by last recently used block. +/// is done in FIFO order (oldest inserted block is evicted first). #[derive(Debug)] pub struct BlockBuffer { /// All blocks in the buffer stored by their block hash. - pub(crate) blocks: HashMap>, + pub(crate) blocks: HashMap>, /// Map of any parent block hash (even the ones not currently in the buffer) /// to the buffered children. /// Allows connecting buffered blocks by parent. @@ -49,12 +49,12 @@ impl BlockBuffer { } /// Return reference to the requested block. - pub fn block(&self, hash: &BlockHash) -> Option<&RecoveredBlock> { + pub fn block(&self, hash: &BlockHash) -> Option<&SealedBlock> { self.blocks.get(hash) } /// Return a reference to the lowest ancestor of the given block in the buffer. - pub fn lowest_ancestor(&self, hash: &BlockHash) -> Option<&RecoveredBlock> { + pub fn lowest_ancestor(&self, hash: &BlockHash) -> Option<&SealedBlock> { let mut current_block = self.blocks.get(hash)?; while let Some(parent) = self.blocks.get(¤t_block.parent_hash()) { current_block = parent; @@ -63,7 +63,7 @@ impl BlockBuffer { } /// Insert a correct block inside the buffer. - pub fn insert_block(&mut self, block: RecoveredBlock) { + pub fn insert_block(&mut self, block: SealedBlock) { let hash = block.hash(); self.parent_to_child.entry(block.parent_hash()).or_default().insert(hash); @@ -87,10 +87,7 @@ impl BlockBuffer { /// /// Note: that order of returned blocks is important and the blocks with lower block number /// in the chain will come first so that they can be executed in the correct order. - pub fn remove_block_with_children( - &mut self, - parent_hash: &BlockHash, - ) -> Vec> { + pub fn remove_block_with_children(&mut self, parent_hash: &BlockHash) -> Vec> { let removed = self .remove_block(parent_hash) .into_iter() @@ -149,7 +146,7 @@ impl BlockBuffer { /// This method will only remove the block if it's present inside `self.blocks`. /// The block might be missing from other collections, the method will only ensure that it has /// been removed. - fn remove_block(&mut self, hash: &BlockHash) -> Option> { + fn remove_block(&mut self, hash: &BlockHash) -> Option> { let block = self.blocks.remove(hash)?; self.remove_from_earliest_blocks(block.number(), hash); self.remove_from_parent(block.parent_hash(), hash); @@ -158,7 +155,7 @@ impl BlockBuffer { } /// Remove all children and their descendants for the given blocks and return them. - fn remove_children(&mut self, parent_hashes: Vec) -> Vec> { + fn remove_children(&mut self, parent_hashes: Vec) -> Vec> { // remove all parent child connection and all the child children blocks that are connected // to the discarded parent blocks. let mut remove_parent_children = parent_hashes; @@ -184,7 +181,6 @@ mod tests { use super::*; use alloy_eips::BlockNumHash; use alloy_primitives::BlockHash; - use reth_primitives_traits::RecoveredBlock; use reth_testing_utils::generators::{self, random_block, BlockParams, Rng}; use std::collections::HashMap; @@ -193,10 +189,8 @@ mod tests { rng: &mut R, number: u64, parent: BlockHash, - ) -> RecoveredBlock { - let block = - random_block(rng, number, BlockParams { parent: Some(parent), ..Default::default() }); - block.try_recover().unwrap() + ) -> SealedBlock { + random_block(rng, number, BlockParams { parent: Some(parent), ..Default::default() }) } /// Assert that all buffer collections have the same data length. @@ -216,7 +210,7 @@ mod tests { /// Assert that the block was removed from all buffer collections. fn assert_block_removal( buffer: &BlockBuffer, - block: &RecoveredBlock, + block: &SealedBlock, ) { assert!(!buffer.blocks.contains_key(&block.hash())); assert!(buffer diff --git a/crates/engine/tree/src/tree/cached_state.rs b/crates/engine/tree/src/tree/cached_state.rs index fd9999b9eb..db36123abc 100644 --- a/crates/engine/tree/src/tree/cached_state.rs +++ b/crates/engine/tree/src/tree/cached_state.rs @@ -31,6 +31,9 @@ pub(crate) struct CachedStateProvider { /// Metrics for the cached state provider metrics: CachedStateMetrics, + + /// If prewarm enabled we populate every cache miss + prewarm: bool, } impl CachedStateProvider @@ -39,12 +42,32 @@ where { /// Creates a new [`CachedStateProvider`] from an [`ExecutionCache`], state provider, and /// [`CachedStateMetrics`]. - pub(crate) const fn new_with_caches( + pub(crate) const fn new( state_provider: S, caches: ExecutionCache, metrics: CachedStateMetrics, ) -> Self { - Self { state_provider, caches, metrics } + Self { state_provider, caches, metrics, prewarm: false } + } +} + +impl CachedStateProvider { + /// Enables pre-warm mode so that every cache miss is populated. + /// + /// This is only relevant for pre-warm transaction execution with the intention to pre-populate + /// the cache with data for regular block execution. During regular block execution the + /// cache doesn't need to be populated because the actual EVM database + /// [`State`](revm::database::State) also caches internally during block execution and the cache + /// is then updated after the block with the entire [`BundleState`] output of that block which + /// contains all accessed accounts,code,storage. See also [`ExecutionCache::insert_state`]. + pub(crate) const fn prewarm(mut self) -> Self { + self.prewarm = true; + self + } + + /// Returns whether this provider should pre-warm cache misses. + const fn is_prewarm(&self) -> bool { + self.prewarm } } @@ -123,7 +146,10 @@ impl AccountReader for CachedStateProvider { self.metrics.account_cache_misses.increment(1); let res = self.state_provider.basic_account(address)?; - self.caches.account_cache.insert(*address, res); + + if self.is_prewarm() { + self.caches.account_cache.insert(*address, res); + } Ok(res) } } @@ -146,17 +172,30 @@ impl StateProvider for CachedStateProvider { storage_key: StorageKey, ) -> ProviderResult> { match self.caches.get_storage(&account, &storage_key) { - SlotStatus::NotCached => { - self.metrics.storage_cache_misses.increment(1); + (SlotStatus::NotCached, maybe_cache) => { let final_res = self.state_provider.storage(account, storage_key)?; - self.caches.insert_storage(account, storage_key, final_res); + + if self.is_prewarm() { + let account_cache = maybe_cache.unwrap_or_default(); + account_cache.insert_storage(storage_key, final_res); + // we always need to insert the value to update the weights. + // Note: there exists a race when the storage cache did not exist yet and two + // consumers looking up the a storage value for this account for the first time, + // however we can assume that this will only happen for the very first + // (mostlikely the same) value, and don't expect that this + // will accidentally replace an account storage cache with + // additional values. + self.caches.insert_storage_cache(account, account_cache); + } + + self.metrics.storage_cache_misses.increment(1); Ok(final_res) } - SlotStatus::Empty => { + (SlotStatus::Empty, _) => { self.metrics.storage_cache_hits.increment(1); Ok(None) } - SlotStatus::Value(value) => { + (SlotStatus::Value(value), _) => { self.metrics.storage_cache_hits.increment(1); Ok(Some(value)) } @@ -174,7 +213,11 @@ impl BytecodeReader for CachedStateProvider { self.metrics.code_cache_misses.increment(1); let final_res = self.state_provider.bytecode_by_hash(code_hash)?; - self.caches.code_cache.insert(*code_hash, final_res.clone()); + + if self.is_prewarm() { + self.caches.code_cache.insert(*code_hash, final_res.clone()); + } + Ok(final_res) } } @@ -311,18 +354,28 @@ pub(crate) struct ExecutionCache { impl ExecutionCache { /// Get storage value from hierarchical cache. /// - /// Returns a `SlotStatus` indicating whether: - /// - `NotCached`: The account's storage cache doesn't exist - /// - `Empty`: The slot exists in the account's cache but is empty - /// - `Value`: The slot exists and has a specific value - pub(crate) fn get_storage(&self, address: &Address, key: &StorageKey) -> SlotStatus { + /// Returns a tuple of: + /// - `SlotStatus` indicating whether: + /// - `NotCached`: The account's storage cache doesn't exist + /// - `Empty`: The slot exists in the account's cache but is empty + /// - `Value`: The slot exists and has a specific value + /// - `Option>`: The account's storage cache if it exists + pub(crate) fn get_storage( + &self, + address: &Address, + key: &StorageKey, + ) -> (SlotStatus, Option>) { match self.storage_cache.get(address) { - None => SlotStatus::NotCached, - Some(account_cache) => account_cache.get_storage(key), + None => (SlotStatus::NotCached, None), + Some(account_cache) => { + let status = account_cache.get_storage(key); + (status, Some(account_cache)) + } } } /// Insert storage value into hierarchical cache + #[cfg(test)] pub(crate) fn insert_storage( &self, address: Address, @@ -351,6 +404,15 @@ impl ExecutionCache { self.storage_cache.insert(address, account_cache); } + /// Inserts the [`AccountStorageCache`]. + pub(crate) fn insert_storage_cache( + &self, + address: Address, + storage_cache: Arc, + ) { + self.storage_cache.insert(address, storage_cache); + } + /// Invalidate storage for specific account pub(crate) fn invalidate_account_storage(&self, address: &Address) { self.storage_cache.invalidate(address); @@ -757,7 +819,7 @@ mod tests { let caches = ExecutionCacheBuilder::default().build_caches(1000); let state_provider = - CachedStateProvider::new_with_caches(provider, caches, CachedStateMetrics::zeroed()); + CachedStateProvider::new(provider, caches, CachedStateMetrics::zeroed()); // check that the storage is empty let res = state_provider.storage(address, storage_key); @@ -780,7 +842,7 @@ mod tests { let caches = ExecutionCacheBuilder::default().build_caches(1000); let state_provider = - CachedStateProvider::new_with_caches(provider, caches, CachedStateMetrics::zeroed()); + CachedStateProvider::new(provider, caches, CachedStateMetrics::zeroed()); // check that the storage returns the expected value let res = state_provider.storage(address, storage_key); @@ -800,7 +862,7 @@ mod tests { caches.insert_storage(address, storage_key, Some(storage_value)); // check that the storage returns the cached value - let slot_status = caches.get_storage(&address, &storage_key); + let (slot_status, _) = caches.get_storage(&address, &storage_key); assert_eq!(slot_status, SlotStatus::Value(storage_value)); } @@ -814,7 +876,7 @@ mod tests { let caches = ExecutionCacheBuilder::default().build_caches(1000); // check that the storage is not cached - let slot_status = caches.get_storage(&address, &storage_key); + let (slot_status, _) = caches.get_storage(&address, &storage_key); assert_eq!(slot_status, SlotStatus::NotCached); } @@ -830,7 +892,7 @@ mod tests { caches.insert_storage(address, storage_key, None); // check that the storage is empty - let slot_status = caches.get_storage(&address, &storage_key); + let (slot_status, _) = caches.get_storage(&address, &storage_key); assert_eq!(slot_status, SlotStatus::Empty); } diff --git a/crates/engine/tree/src/tree/instrumented_state.rs b/crates/engine/tree/src/tree/instrumented_state.rs index 9d96aca3a2..02ab395dc3 100644 --- a/crates/engine/tree/src/tree/instrumented_state.rs +++ b/crates/engine/tree/src/tree/instrumented_state.rs @@ -22,7 +22,7 @@ const NANOS_PER_SEC: u32 = 1_000_000_000; /// An atomic version of [`Duration`], using an [`AtomicU64`] to store the total nanoseconds in the /// duration. -#[derive(Default)] +#[derive(Debug, Default)] pub(crate) struct AtomicDuration { /// The nanoseconds part of the duration /// @@ -59,7 +59,8 @@ impl AtomicDuration { } /// A wrapper of a state provider and latency metrics. -pub(crate) struct InstrumentedStateProvider { +#[derive(Debug)] +pub struct InstrumentedStateProvider { /// The state provider state_provider: S, @@ -80,11 +81,12 @@ impl InstrumentedStateProvider where S: StateProvider, { - /// Creates a new [`InstrumentedStateProvider`] from a state provider - pub(crate) fn from_state_provider(state_provider: S) -> Self { + /// Creates a new [`InstrumentedStateProvider`] from a state provider with the provided label + /// for metrics. + pub fn new(state_provider: S, source: &'static str) -> Self { Self { state_provider, - metrics: StateProviderMetrics::default(), + metrics: StateProviderMetrics::new_with_labels(&[("source", source)]), total_storage_fetch_latency: AtomicDuration::zero(), total_code_fetch_latency: AtomicDuration::zero(), total_account_fetch_latency: AtomicDuration::zero(), @@ -134,6 +136,12 @@ impl InstrumentedStateProvider { } } +impl Drop for InstrumentedStateProvider { + fn drop(&mut self) { + self.record_total_latency(); + } +} + /// Metrics for the instrumented state provider #[derive(Metrics, Clone)] #[metrics(scope = "sync.state_provider")] diff --git a/crates/engine/tree/src/tree/invalid_headers.rs b/crates/engine/tree/src/tree/invalid_headers.rs index d349901a19..49abdc59e9 100644 --- a/crates/engine/tree/src/tree/invalid_headers.rs +++ b/crates/engine/tree/src/tree/invalid_headers.rs @@ -48,6 +48,7 @@ impl InvalidHeaderCache { // if we get here, the entry has been hit too many times, so we evict it self.headers.remove(hash); self.metrics.hit_evictions.increment(1); + self.metrics.count.set(self.headers.len() as f64); None } diff --git a/crates/engine/tree/src/tree/metrics.rs b/crates/engine/tree/src/tree/metrics.rs index 1d1e208b0a..8fd08e32a1 100644 --- a/crates/engine/tree/src/tree/metrics.rs +++ b/crates/engine/tree/src/tree/metrics.rs @@ -1,11 +1,13 @@ -use crate::tree::MeteredStateHook; +use crate::tree::{error::InsertBlockFatalError, MeteredStateHook, TreeOutcome}; use alloy_consensus::transaction::TxHashRef; use alloy_evm::{ block::{BlockExecutor, ExecutableTx}, Evm, }; +use alloy_rpc_types_engine::{PayloadStatus, PayloadStatusEnum}; use core::borrow::BorrowMut; -use reth_errors::BlockExecutionError; +use reth_engine_primitives::{ForkchoiceStatus, OnForkChoiceUpdated}; +use reth_errors::{BlockExecutionError, ProviderError}; use reth_evm::{metrics::ExecutorMetrics, OnStateHook}; use reth_execution_types::BlockExecutionOutput; use reth_metrics::{ @@ -15,6 +17,7 @@ use reth_metrics::{ use reth_primitives_traits::SignedTransaction; use reth_trie::updates::TrieUpdates; use revm::database::{states::bundle_state::BundleRetention, State}; +use revm_primitives::Address; use std::time::Instant; use tracing::{debug_span, trace}; @@ -60,9 +63,9 @@ impl EngineApiMetrics { pub(crate) fn execute_metered( &self, executor: E, - transactions: impl Iterator, BlockExecutionError>>, + mut transactions: impl Iterator, BlockExecutionError>>, state_hook: Box, - ) -> Result, BlockExecutionError> + ) -> Result<(BlockExecutionOutput, Vec
), BlockExecutionError> where DB: alloy_evm::Database, E: BlockExecutor>>, Transaction: SignedTransaction>, @@ -72,19 +75,46 @@ impl EngineApiMetrics { // be accessible. let wrapper = MeteredStateHook { metrics: self.executor.clone(), inner_hook: state_hook }; + let mut senders = Vec::new(); let mut executor = executor.with_state_hook(Some(Box::new(wrapper))); let f = || { - executor.apply_pre_execution_changes()?; - for tx in transactions { + let start = Instant::now(); + debug_span!(target: "engine::tree", "pre execution") + .entered() + .in_scope(|| executor.apply_pre_execution_changes())?; + self.executor.pre_execution_histogram.record(start.elapsed()); + + let exec_span = debug_span!(target: "engine::tree", "execution").entered(); + loop { + let start = Instant::now(); + let Some(tx) = transactions.next() else { break }; + self.executor.transaction_wait_histogram.record(start.elapsed()); + let tx = tx?; + senders.push(*tx.signer()); + let span = debug_span!(target: "engine::tree", "execute tx", tx_hash=?tx.tx().tx_hash()); - let _enter = span.enter(); + let enter = span.entered(); trace!(target: "engine::tree", "Executing transaction"); - executor.execute_transaction(tx)?; + let start = Instant::now(); + let gas_used = executor.execute_transaction(tx)?; + self.executor.transaction_execution_histogram.record(start.elapsed()); + + // record the tx gas used + enter.record("gas_used", gas_used); } - executor.finish().map(|(evm, result)| (evm.into_db(), result)) + drop(exec_span); + + let start = Instant::now(); + let result = debug_span!(target: "engine::tree", "finish") + .entered() + .in_scope(|| executor.finish()) + .map(|(evm, result)| (evm.into_db(), result)); + self.executor.post_execution_histogram.record(start.elapsed()); + + result }; // Use metered to execute and track timing/gas metrics @@ -95,7 +125,9 @@ impl EngineApiMetrics { })?; // merge transitions into bundle state - db.borrow_mut().merge_transitions(BundleRetention::Reverts); + debug_span!(target: "engine::tree", "merge transitions") + .entered() + .in_scope(|| db.borrow_mut().merge_transitions(BundleRetention::Reverts)); let output = BlockExecutionOutput { result, state: db.borrow_mut().take_bundle() }; // Update the metrics for the number of accounts, storage slots and bytecodes updated @@ -108,7 +140,7 @@ impl EngineApiMetrics { self.executor.storage_slots_updated_histogram.record(storage_slots as f64); self.executor.bytecodes_updated_histogram.record(bytecodes as f64); - Ok(output) + Ok((output, senders)) } } @@ -132,20 +164,20 @@ pub(crate) struct TreeMetrics { #[derive(Metrics)] #[metrics(scope = "consensus.engine.beacon")] pub(crate) struct EngineMetrics { + /// Engine API forkchoiceUpdated response type metrics + #[metric(skip)] + pub(crate) forkchoice_updated: ForkchoiceUpdatedMetrics, + /// Engine API newPayload response type metrics + #[metric(skip)] + pub(crate) new_payload: NewPayloadStatusMetrics, /// How many executed blocks are currently stored. pub(crate) executed_blocks: Gauge, /// How many already executed blocks were directly inserted into the tree. pub(crate) inserted_already_executed_blocks: Counter, /// The number of times the pipeline was run. pub(crate) pipeline_runs: Counter, - /// The total count of forkchoice updated messages received. - pub(crate) forkchoice_updated_messages: Counter, - /// The total count of forkchoice updated messages with payload received. - pub(crate) forkchoice_with_attributes_updated_messages: Counter, /// Newly arriving block hash is not present in executed blocks cache storage pub(crate) executed_new_block_cache_miss: Counter, - /// The total count of new payload messages received. - pub(crate) new_payload_messages: Counter, /// Histogram of persistence operation durations (in seconds) pub(crate) persistence_duration: Histogram, /// Tracks the how often we failed to deliver a newPayload response. @@ -160,6 +192,133 @@ pub(crate) struct EngineMetrics { pub(crate) block_insert_total_duration: Histogram, } +/// Metrics for engine forkchoiceUpdated responses. +#[derive(Metrics)] +#[metrics(scope = "consensus.engine.beacon")] +pub(crate) struct ForkchoiceUpdatedMetrics { + /// The total count of forkchoice updated messages received. + pub(crate) forkchoice_updated_messages: Counter, + /// The total count of forkchoice updated messages with payload received. + pub(crate) forkchoice_with_attributes_updated_messages: Counter, + /// The total count of forkchoice updated messages that we responded to with + /// [`Valid`](ForkchoiceStatus::Valid). + pub(crate) forkchoice_updated_valid: Counter, + /// The total count of forkchoice updated messages that we responded to with + /// [`Invalid`](ForkchoiceStatus::Invalid). + pub(crate) forkchoice_updated_invalid: Counter, + /// The total count of forkchoice updated messages that we responded to with + /// [`Syncing`](ForkchoiceStatus::Syncing). + pub(crate) forkchoice_updated_syncing: Counter, + /// The total count of forkchoice updated messages that were unsuccessful, i.e. we responded + /// with an error type that is not a [`PayloadStatusEnum`]. + pub(crate) forkchoice_updated_error: Counter, + /// Latency for the forkchoice updated calls. + pub(crate) forkchoice_updated_latency: Histogram, + /// Latency for the last forkchoice updated call. + pub(crate) forkchoice_updated_last: Gauge, + /// Time diff between new payload call response and the next forkchoice updated call request. + pub(crate) new_payload_forkchoice_updated_time_diff: Histogram, +} + +impl ForkchoiceUpdatedMetrics { + /// Increment the forkchoiceUpdated counter based on the given result + pub(crate) fn update_response_metrics( + &self, + start: Instant, + latest_new_payload_at: &mut Option, + has_attrs: bool, + result: &Result, ProviderError>, + ) { + let elapsed = start.elapsed(); + match result { + Ok(outcome) => match outcome.outcome.forkchoice_status() { + ForkchoiceStatus::Valid => self.forkchoice_updated_valid.increment(1), + ForkchoiceStatus::Invalid => self.forkchoice_updated_invalid.increment(1), + ForkchoiceStatus::Syncing => self.forkchoice_updated_syncing.increment(1), + }, + Err(_) => self.forkchoice_updated_error.increment(1), + } + self.forkchoice_updated_messages.increment(1); + if has_attrs { + self.forkchoice_with_attributes_updated_messages.increment(1); + } + self.forkchoice_updated_latency.record(elapsed); + self.forkchoice_updated_last.set(elapsed); + if let Some(latest_new_payload_at) = latest_new_payload_at.take() { + self.new_payload_forkchoice_updated_time_diff.record(start - latest_new_payload_at); + } + } +} + +/// Metrics for engine newPayload responses. +#[derive(Metrics)] +#[metrics(scope = "consensus.engine.beacon")] +pub(crate) struct NewPayloadStatusMetrics { + /// Finish time of the latest new payload call. + #[metric(skip)] + pub(crate) latest_at: Option, + /// The total count of new payload messages received. + pub(crate) new_payload_messages: Counter, + /// The total count of new payload messages that we responded to with + /// [Valid](PayloadStatusEnum::Valid). + pub(crate) new_payload_valid: Counter, + /// The total count of new payload messages that we responded to with + /// [Invalid](PayloadStatusEnum::Invalid). + pub(crate) new_payload_invalid: Counter, + /// The total count of new payload messages that we responded to with + /// [Syncing](PayloadStatusEnum::Syncing). + pub(crate) new_payload_syncing: Counter, + /// The total count of new payload messages that we responded to with + /// [Accepted](PayloadStatusEnum::Accepted). + pub(crate) new_payload_accepted: Counter, + /// The total count of new payload messages that were unsuccessful, i.e. we responded with an + /// error type that is not a [`PayloadStatusEnum`]. + pub(crate) new_payload_error: Counter, + /// The total gas of valid new payload messages received. + pub(crate) new_payload_total_gas: Histogram, + /// The gas per second of valid new payload messages received. + pub(crate) new_payload_gas_per_second: Histogram, + /// The gas per second for the last new payload call. + pub(crate) new_payload_gas_per_second_last: Gauge, + /// Latency for the new payload calls. + pub(crate) new_payload_latency: Histogram, + /// Latency for the last new payload call. + pub(crate) new_payload_last: Gauge, +} + +impl NewPayloadStatusMetrics { + /// Increment the newPayload counter based on the given result + pub(crate) fn update_response_metrics( + &mut self, + start: Instant, + result: &Result, InsertBlockFatalError>, + gas_used: u64, + ) { + let finish = Instant::now(); + let elapsed = finish - start; + + self.latest_at = Some(finish); + match result { + Ok(outcome) => match outcome.outcome.status { + PayloadStatusEnum::Valid => { + self.new_payload_valid.increment(1); + self.new_payload_total_gas.record(gas_used as f64); + let gas_per_second = gas_used as f64 / elapsed.as_secs_f64(); + self.new_payload_gas_per_second.record(gas_per_second); + self.new_payload_gas_per_second_last.set(gas_per_second); + } + PayloadStatusEnum::Syncing => self.new_payload_syncing.increment(1), + PayloadStatusEnum::Accepted => self.new_payload_accepted.increment(1), + PayloadStatusEnum::Invalid { .. } => self.new_payload_invalid.increment(1), + }, + Err(_) => self.new_payload_error.increment(1), + } + self.new_payload_messages.increment(1); + self.new_payload_latency.record(elapsed); + self.new_payload_last.set(elapsed); + } +} + /// Metrics for non-execution related block validation. #[derive(Metrics)] #[metrics(scope = "sync.block_validation")] @@ -172,6 +331,10 @@ pub(crate) struct BlockValidationMetrics { pub(crate) state_root_duration: Gauge, /// Histogram for state root duration ie the time spent blocked waiting for the state root pub(crate) state_root_histogram: Histogram, + /// Histogram of deferred trie computation duration. + pub(crate) deferred_trie_compute_duration: Histogram, + /// Histogram of time spent waiting for deferred trie data to become available. + pub(crate) deferred_trie_wait_duration: Histogram, /// Trie input computation duration pub(crate) trie_input_duration: Histogram, /// Payload conversion and validation latency diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index 324e3375d2..d89dc4a6b9 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -14,7 +14,8 @@ use alloy_rpc_types_engine::{ }; use error::{InsertBlockError, InsertBlockFatalError}; use reth_chain_state::{ - CanonicalInMemoryState, ExecutedBlock, MemoryOverlayStateProvider, NewCanonicalChain, + CanonicalInMemoryState, ComputedTrieData, ExecutedBlock, MemoryOverlayStateProvider, + NewCanonicalChain, }; use reth_consensus::{Consensus, FullConsensus}; use reth_engine_primitives::{ @@ -38,6 +39,7 @@ use revm::state::EvmState; use state::TreeState; use std::{ fmt::Debug, + ops, sync::{ mpsc::{Receiver, RecvError, RecvTimeoutError, Sender}, Arc, @@ -53,7 +55,7 @@ use tracing::*; mod block_buffer; mod cached_state; pub mod error; -mod instrumented_state; +pub mod instrumented_state; mod invalid_headers; mod metrics; mod payload_processor; @@ -425,9 +427,13 @@ where match self.try_recv_engine_message() { Ok(Some(msg)) => { debug!(target: "engine::tree", %msg, "received new engine message"); - if let Err(fatal) = self.on_engine_message(msg) { - error!(target: "engine::tree", %fatal, "insert block fatal error"); - return + match self.on_engine_message(msg) { + Ok(ops::ControlFlow::Break(())) => return, + Ok(ops::ControlFlow::Continue(())) => {} + Err(fatal) => { + error!(target: "engine::tree", %fatal, "insert block fatal error"); + return + } } } Ok(None) => { @@ -453,7 +459,7 @@ where /// block request processing isn't blocked for a long time. fn on_downloaded( &mut self, - mut blocks: Vec>, + mut blocks: Vec>, ) -> Result, InsertBlockFatalError> { if blocks.is_empty() { // nothing to execute @@ -506,7 +512,6 @@ where payload: T::ExecutionData, ) -> Result, InsertBlockFatalError> { trace!(target: "engine::tree", "invoked new payload"); - self.metrics.engine.new_payload_messages.increment(1); // start timing for the new payload process let start = Instant::now(); @@ -615,7 +620,7 @@ where Err(error) => match error { InsertPayloadError::Block(error) => Ok(self.on_insert_block_error(error)?), InsertPayloadError::Payload(error) => { - Ok(self.on_new_payload_error(error, parent_hash)?) + Ok(self.on_new_payload_error(error, num_hash, parent_hash)?) } }, } @@ -634,8 +639,9 @@ where payload: T::ExecutionData, ) -> Result { let parent_hash = payload.parent_hash(); + let num_hash = payload.num_hash(); - match self.payload_validator.ensure_well_formed_payload(payload) { + match self.payload_validator.convert_payload_to_block(payload) { // if the block is well-formed, buffer it for later Ok(block) => { if let Err(error) = self.buffer_block(block) { @@ -644,7 +650,7 @@ where Ok(PayloadStatus::from_status(PayloadStatusEnum::Syncing)) } } - Err(error) => Ok(self.on_new_payload_error(error, parent_hash)?), + Err(error) => Ok(self.on_new_payload_error(error, num_hash, parent_hash)?), } } @@ -704,8 +710,8 @@ where // gather all blocks until new head number. while current_canonical_number > current_number { if let Some(block) = self.canonical_block_by_hash(old_hash)? { - old_chain.push(block.clone()); old_hash = block.recovered_block().parent_hash(); + old_chain.push(block); current_canonical_number -= 1; } else { // This shouldn't happen as we're walking back the canonical chain @@ -925,48 +931,6 @@ where Ok(()) } - /// Determines if the given block is part of a fork by checking that these - /// conditions are true: - /// * walking back from the target hash to verify that the target hash is not part of an - /// extension of the canonical chain. - /// * walking back from the current head to verify that the target hash is not already part of - /// the canonical chain. - /// - /// The header is required as an arg, because we might be checking that the header is a fork - /// block before it's in the tree state and before it's in the database. - fn is_fork(&self, target: BlockWithParent) -> ProviderResult { - let target_hash = target.block.hash; - // verify that the given hash is not part of an extension of the canon chain. - let canonical_head = self.state.tree_state.canonical_head(); - let mut current_hash; - let mut current_block = target; - loop { - if current_block.block.hash == canonical_head.hash { - return Ok(false) - } - // We already passed the canonical head - if current_block.block.number <= canonical_head.number { - break - } - current_hash = current_block.parent; - - let Some(next_block) = self.sealed_header_by_hash(current_hash)? else { break }; - current_block = next_block.block_with_parent(); - } - - // verify that the given hash is not already part of canonical chain stored in memory - if self.canonical_in_memory_state.header_by_hash(target_hash).is_some() { - return Ok(false) - } - - // verify that the given hash is not already part of persisted canonical chain - if self.provider.block_number(target_hash)?.is_some() { - return Ok(false) - } - - Ok(true) - } - /// Invoked when we receive a new forkchoice update message. Calls into the blockchain tree /// to resolve chain forks and ensure that the Execution Layer is working with the latest valid /// chain. @@ -985,7 +949,7 @@ where trace!(target: "engine::tree", ?attrs, "invoked forkchoice update"); // Record metrics - self.record_forkchoice_metrics(&attrs); + self.record_forkchoice_metrics(); // Pre-validation of forkchoice state if let Some(early_result) = self.validate_forkchoice_state(state)? { @@ -1008,11 +972,7 @@ where } /// Records metrics for forkchoice updated calls - fn record_forkchoice_metrics(&self, attrs: &Option) { - self.metrics.engine.forkchoice_updated_messages.increment(1); - if attrs.is_some() { - self.metrics.engine.forkchoice_with_attributes_updated_messages.increment(1); - } + fn record_forkchoice_metrics(&self) { self.canonical_in_memory_state.on_forkchoice_update_received(); } @@ -1130,6 +1090,15 @@ where if self.engine_kind.is_opstack() || self.config.always_process_payload_attributes_on_canonical_head() { + // We need to effectively unwind the _canonical_ chain to the FCU's head, which is + // part of the canonical chain. We need to update the latest block state to reflect + // the canonical ancestor. This ensures that state providers and the transaction + // pool operate with the correct chain state after forkchoice update processing, and + // new payloads built on the reorg'd head will be added to the tree immediately. + if self.config.unwind_canonical_header() { + self.update_latest_block_to_canonical_ancestor(&canonical_header)?; + } + if let Some(attr) = attrs { debug!(target: "engine::tree", head = canonical_header.number(), "handling payload attributes for canonical head"); // Clone only when we actually need to process the attributes @@ -1141,17 +1110,6 @@ where ); return Ok(Some(TreeOutcome::new(updated))); } - - // At this point, no alternative block has been triggered, so we need effectively - // unwind the _canonical_ chain to the FCU's head, which is part of the canonical - // chain. We need to update the latest block state to reflect the - // canonical ancestor. This ensures that state providers and the - // transaction pool operate with the correct chain state after - // forkchoice update processing. - - if self.config.unwind_canonical_header() { - self.update_latest_block_to_canonical_ancestor(&canonical_header)?; - } } // According to the Engine API specification, client software MAY skip an update of the @@ -1286,7 +1244,7 @@ where .map(|b| b.recovered_block().num_hash()) .expect("Checked non-empty persisting blocks"); - debug!(target: "engine::tree", blocks = ?blocks_to_persist.iter().map(|block| block.recovered_block().num_hash()).collect::>(), "Persisting blocks"); + debug!(target: "engine::tree", count=blocks_to_persist.len(), blocks = ?blocks_to_persist.iter().map(|block| block.recovered_block().num_hash()).collect::>(), "Persisting blocks"); let (tx, rx) = oneshot::channel(); let _ = self.persistence.save_blocks(blocks_to_persist, tx); @@ -1307,22 +1265,7 @@ where // Check if persistence has complete match rx.try_recv() { Ok(last_persisted_hash_num) => { - self.metrics.engine.persistence_duration.record(start_time.elapsed()); - let Some(BlockNumHash { - hash: last_persisted_block_hash, - number: last_persisted_block_number, - }) = last_persisted_hash_num - else { - // if this happened, then we persisted no blocks because we sent an - // empty vec of blocks - warn!(target: "engine::tree", "Persistence task completed but did not persist any blocks"); - return Ok(()) - }; - - debug!(target: "engine::tree", ?last_persisted_block_hash, ?last_persisted_block_number, "Finished persisting, calling finish"); - self.persistence_state - .finish(last_persisted_block_hash, last_persisted_block_number); - self.on_new_persisted_block()?; + self.on_persistence_complete(last_persisted_hash_num, start_time)?; } Err(TryRecvError::Closed) => return Err(TryRecvError::Closed.into()), Err(TryRecvError::Empty) => { @@ -1335,7 +1278,8 @@ where if let Some(new_tip_num) = self.find_disk_reorg()? { self.remove_blocks(new_tip_num) } else if self.should_persist() { - let blocks_to_persist = self.get_canonical_blocks_to_persist()?; + let blocks_to_persist = + self.get_canonical_blocks_to_persist(PersistTarget::Threshold)?; self.persist_blocks(blocks_to_persist); } } @@ -1343,11 +1287,72 @@ where Ok(()) } + /// Finishes termination by persisting all remaining blocks and signaling completion. + /// + /// This blocks until all persistence is complete. Always signals completion, + /// even if an error occurs. + fn finish_termination( + &mut self, + pending_termination: oneshot::Sender<()>, + ) -> Result<(), AdvancePersistenceError> { + trace!(target: "engine::tree", "finishing termination, persisting remaining blocks"); + let result = self.persist_until_complete(); + let _ = pending_termination.send(()); + result + } + + /// Persists all remaining blocks until none are left. + fn persist_until_complete(&mut self) -> Result<(), AdvancePersistenceError> { + loop { + // Wait for any in-progress persistence to complete (blocking) + if let Some((rx, start_time, _action)) = self.persistence_state.rx.take() { + let result = rx.blocking_recv().map_err(|_| TryRecvError::Closed)?; + self.on_persistence_complete(result, start_time)?; + } + + let blocks_to_persist = self.get_canonical_blocks_to_persist(PersistTarget::Head)?; + + if blocks_to_persist.is_empty() { + debug!(target: "engine::tree", "persistence complete, signaling termination"); + return Ok(()) + } + + debug!(target: "engine::tree", count = blocks_to_persist.len(), "persisting remaining blocks before shutdown"); + self.persist_blocks(blocks_to_persist); + } + } + + /// Handles a completed persistence task. + fn on_persistence_complete( + &mut self, + last_persisted_hash_num: Option, + start_time: Instant, + ) -> Result<(), AdvancePersistenceError> { + self.metrics.engine.persistence_duration.record(start_time.elapsed()); + + let Some(BlockNumHash { + hash: last_persisted_block_hash, + number: last_persisted_block_number, + }) = last_persisted_hash_num + else { + // if this happened, then we persisted no blocks because we sent an empty vec of blocks + warn!(target: "engine::tree", "Persistence task completed but did not persist any blocks"); + return Ok(()) + }; + + debug!(target: "engine::tree", ?last_persisted_block_hash, ?last_persisted_block_number, elapsed=?start_time.elapsed(), "Finished persisting, calling finish"); + self.persistence_state.finish(last_persisted_block_hash, last_persisted_block_number); + self.on_new_persisted_block()?; + Ok(()) + } + /// Handles a message from the engine. + /// + /// Returns `ControlFlow::Break(())` if the engine should terminate. fn on_engine_message( &mut self, msg: FromEngine, N::Block>, - ) -> Result<(), InsertBlockFatalError> { + ) -> Result, InsertBlockFatalError> { match msg { FromEngine::Event(event) => match event { FromOrchestrator::BackfillSyncStarted => { @@ -1357,6 +1362,13 @@ where FromOrchestrator::BackfillSyncFinished(ctrl) => { self.on_backfill_sync_finished(ctrl)?; } + FromOrchestrator::Terminate { tx } => { + debug!(target: "engine::tree", "received terminate request"); + if let Err(err) = self.finish_termination(tx) { + error!(target: "engine::tree", %err, "Termination failed"); + } + return Ok(ops::ControlFlow::Break(())) + } }, FromEngine::Request(request) => { match request { @@ -1364,7 +1376,7 @@ where let block_num_hash = block.recovered_block().num_hash(); if block_num_hash.number <= self.state.tree_state.canonical_block_number() { // outdated block that can be skipped - return Ok(()) + return Ok(ops::ControlFlow::Continue(())) } debug!(target: "engine::tree", block=?block_num_hash, "inserting already executed block"); @@ -1380,6 +1392,7 @@ where } self.state.tree_state.insert_executed(block.clone()); + self.payload_validator.on_inserted_executed_block(block.clone()); self.metrics.engine.inserted_already_executed_blocks.increment(1); self.emit_event(EngineApiEvent::BeaconConsensus( ConsensusEngineEvent::CanonicalBlockAdded(block, now.elapsed()), @@ -1393,6 +1406,9 @@ where tx, version, } => { + let has_attrs = payload_attrs.is_some(); + + let start = Instant::now(); let mut output = self.on_forkchoice_updated(state, payload_attrs, version); @@ -1412,6 +1428,13 @@ where self.on_maybe_tree_event(res.event.take())?; } + self.metrics.engine.forkchoice_updated.update_response_metrics( + start, + &mut self.metrics.engine.new_payload.latest_at, + has_attrs, + &output, + ); + if let Err(err) = tx.send(output.map(|o| o.outcome).map_err(Into::into)) { @@ -1419,11 +1442,18 @@ where .engine .failed_forkchoice_updated_response_deliveries .increment(1); - error!(target: "engine::tree", "Failed to send event: {err:?}"); + error!(target: "engine::tree", ?state, elapsed=?start.elapsed(), "Failed to send event: {err:?}"); } } BeaconEngineMessage::NewPayload { payload, tx } => { + let start = Instant::now(); + let gas_used = payload.gas_used(); + let num_hash = payload.num_hash(); let mut output = self.on_new_payload(payload); + self.metrics + .engine + .new_payload + .update_response_metrics(start, &output, gas_used); let maybe_event = output.as_mut().ok().and_then(|out| out.event.take()); @@ -1434,7 +1464,7 @@ where BeaconOnNewPayloadError::Internal(Box::new(e)) })) { - error!(target: "engine::tree", "Failed to send event: {err:?}"); + error!(target: "engine::tree", payload=?num_hash, elapsed=?start.elapsed(), "Failed to send event: {err:?}"); self.metrics .engine .failed_new_payload_response_deliveries @@ -1454,7 +1484,7 @@ where } } } - Ok(()) + Ok(ops::ControlFlow::Continue(())) } /// Invoked if the backfill sync has finished to target. @@ -1574,6 +1604,32 @@ where return Ok(()) }; + // Check if there are more blocks to sync between current head and FCU target + if let Some(lowest_buffered) = + self.state.buffer.lowest_ancestor(&sync_target_state.head_block_hash) + { + let current_head_num = self.state.tree_state.current_canonical_head.number; + let target_head_num = lowest_buffered.number(); + + if let Some(distance) = self.distance_from_local_tip(current_head_num, target_head_num) + { + // There are blocks between current head and FCU target, download them + debug!( + target: "engine::tree", + %current_head_num, + %target_head_num, + %distance, + "Backfill complete, downloading remaining blocks to reach FCU target" + ); + + self.emit_event(EngineApiEvent::Download(DownloadRequest::BlockRange( + lowest_buffered.parent_hash(), + distance, + ))); + return Ok(()); + } + } + // try to close the gap by executing buffered blocks that are child blocks of the new head self.try_connect_buffered_blocks(self.state.tree_state.current_canonical_head) } @@ -1662,10 +1718,10 @@ where } /// Returns a batch of consecutive canonical blocks to persist in the range - /// `(last_persisted_number .. canonical_head - threshold]`. The expected - /// order is oldest -> newest. + /// `(last_persisted_number .. target]`. The expected order is oldest -> newest. fn get_canonical_blocks_to_persist( &self, + target: PersistTarget, ) -> Result>, AdvancePersistenceError> { // We will calculate the state root using the database, so we need to be sure there are no // changes @@ -1676,9 +1732,12 @@ where let last_persisted_number = self.persistence_state.last_persisted_block.number; let canonical_head_number = self.state.tree_state.canonical_block_number(); - // Persist only up to block buffer target - let target_number = - canonical_head_number.saturating_sub(self.config.memory_block_buffer_target()); + let target_number = match target { + PersistTarget::Head => canonical_head_number, + PersistTarget::Threshold => { + canonical_head_number.saturating_sub(self.config.memory_block_buffer_target()) + } + }; debug!( target: "engine::tree", @@ -1732,11 +1791,10 @@ where /// Return an [`ExecutedBlock`] from database or in-memory state by hash. /// - /// NOTE: This cannot fetch [`ExecutedBlock`]s for _finalized_ blocks, instead it can only - /// fetch [`ExecutedBlock`]s for _canonical_ blocks, or blocks from sidechains that the node - /// has in memory. - /// - /// For finalized blocks, this will return `None`. + /// Note: This function attempts to fetch the `ExecutedBlock` from either in-memory state + /// or the database. If the required historical data (such as trie change sets) has been + /// pruned for a given block, this operation will return an error. On archive nodes, it + /// can retrieve any block. fn canonical_block_by_hash(&self, hash: B256) -> ProviderResult>> { trace!(target: "engine::tree", ?hash, "Fetching executed block by hash"); // check memory first @@ -1756,12 +1814,17 @@ where let hashed_state = self.provider.hashed_post_state(execution_output.state()); let trie_updates = self.provider.get_block_trie_updates(block.number())?; - Ok(Some(ExecutedBlock { - recovered_block: Arc::new(RecoveredBlock::new_sealed(block, senders)), - execution_output: Arc::new(execution_output), - hashed_state: Arc::new(hashed_state), - trie_updates: Arc::new(trie_updates.into()), - })) + let sorted_hashed_state = Arc::new(hashed_state.into_sorted()); + let sorted_trie_updates = Arc::new(trie_updates); + // Skip building trie input and anchor for DB-loaded blocks. + let trie_data = + ComputedTrieData::without_trie_input(sorted_hashed_state, sorted_trie_updates); + + Ok(Some(ExecutedBlock::new( + Arc::new(RecoveredBlock::new_sealed(block, senders)), + Arc::new(execution_output), + trie_data, + ))) } /// Return sealed block header from in-memory state or database by hash. @@ -1858,6 +1921,16 @@ where false } + /// Returns true if the given hash is part of the last received sync target fork choice update. + /// + /// See [`ForkchoiceStateTracker::sync_target_state`] + fn is_any_sync_target(&self, block_hash: B256) -> bool { + if let Some(target) = self.state.forkchoice_state_tracker.sync_target_state() { + return target.contains(block_hash) + } + false + } + /// Checks if the given `check` hash points to an invalid header, inserting the given `head` /// block into the invalid header cache if the `check` hash has a known invalid ancestor. /// @@ -1927,18 +2000,19 @@ where invalid: BlockWithParent, ) -> Result { let parent_hash = payload.parent_hash(); + let num_hash = payload.num_hash(); // Here we might have 2 cases // 1. the block is well formed and indeed links to an invalid header, meaning we should // remember it as invalid // 2. the block is not well formed (i.e block hash is incorrect), and we should just return // an error and forget it - let block = match self.payload_validator.ensure_well_formed_payload(payload) { + let block = match self.payload_validator.convert_payload_to_block(payload) { Ok(block) => block, - Err(error) => return Ok(self.on_new_payload_error(error, parent_hash)?), + Err(error) => return Ok(self.on_new_payload_error(error, num_hash, parent_hash)?), }; - Ok(self.on_invalid_new_payload(block.into_sealed_block(), invalid)?) + Ok(self.on_invalid_new_payload(block, invalid)?) } /// Checks if the given `head` points to an invalid header, which requires a specific response @@ -1962,13 +2036,13 @@ where /// Validate if block is correct and satisfies all the consensus rules that concern the header /// and block body itself. - fn validate_block(&self, block: &RecoveredBlock) -> Result<(), ConsensusError> { + fn validate_block(&self, block: &SealedBlock) -> Result<(), ConsensusError> { if let Err(e) = self.consensus.validate_header(block.sealed_header()) { error!(target: "engine::tree", ?block, "Failed to validate header {}: {e}", block.hash()); return Err(e) } - if let Err(e) = self.consensus.validate_block_pre_execution(block.sealed_block()) { + if let Err(e) = self.consensus.validate_block_pre_execution(block) { error!(target: "engine::tree", ?block, "Failed to validate block {}: {e}", block.hash()); return Err(e) } @@ -1996,9 +2070,12 @@ where match self.insert_block(child) { Ok(res) => { debug!(target: "engine::tree", child =?child_num_hash, ?res, "connected buffered block"); - if self.is_sync_target_head(child_num_hash.hash) && + if self.is_any_sync_target(child_num_hash.hash) && matches!(res, InsertPayloadOk::Inserted(BlockStatus::Valid)) { + debug!(target: "engine::tree", child =?child_num_hash, "connected sync target block"); + // we just inserted a block that we know is part of the canonical chain, so + // we can make it canonical self.make_canonical(child_num_hash.hash)?; } } @@ -2021,10 +2098,10 @@ where /// Pre-validates the block and inserts it into the buffer. fn buffer_block( &mut self, - block: RecoveredBlock, + block: SealedBlock, ) -> Result<(), InsertBlockError> { if let Err(err) = self.validate_block(&block) { - return Err(InsertBlockError::consensus_error(err, block.into_sealed_block())) + return Err(InsertBlockError::consensus_error(err, block)) } self.state.buffer.insert_block(block); Ok(()) @@ -2289,14 +2366,11 @@ where #[instrument(level = "debug", target = "engine::tree", skip_all, fields(block_hash = %block.hash(), block_num = %block.number()))] fn on_downloaded_block( &mut self, - block: RecoveredBlock, + block: SealedBlock, ) -> Result, InsertBlockFatalError> { let block_num_hash = block.num_hash(); let lowest_buffered_ancestor = self.lowest_buffered_ancestor_or(block_num_hash.hash); - if self - .check_invalid_ancestor_with_head(lowest_buffered_ancestor, block.sealed_block())? - .is_some() - { + if self.check_invalid_ancestor_with_head(lowest_buffered_ancestor, &block)?.is_some() { return Ok(None) } @@ -2307,11 +2381,15 @@ where // try to append the block match self.insert_block(block) { Ok(InsertPayloadOk::Inserted(BlockStatus::Valid)) => { - if self.is_sync_target_head(block_num_hash.hash) { - trace!(target: "engine::tree", "appended downloaded sync target block"); + // check if we just inserted a block that's part of sync targets, + // i.e. head, safe, or finalized + if let Some(sync_target) = self.state.forkchoice_state_tracker.sync_target_state() && + sync_target.contains(block_num_hash.hash) + { + debug!(target: "engine::tree", ?sync_target, "appended downloaded sync target block"); - // we just inserted the current sync target block, we can try to make it - // canonical + // we just inserted a block that we know is part of the canonical chain, so we + // can make it canonical return Ok(Some(TreeEvent::TreeAction(TreeAction::MakeCanonical { sync_target_head: block_num_hash.hash, }))) @@ -2360,13 +2438,13 @@ where payload.block_with_parent(), payload, |validator, payload, ctx| validator.validate_payload(payload, ctx), - |this, payload| Ok(this.payload_validator.ensure_well_formed_payload(payload)?), + |this, payload| Ok(this.payload_validator.convert_payload_to_block(payload)?), ) } fn insert_block( &mut self, - block: RecoveredBlock, + block: SealedBlock, ) -> Result> { self.insert_block_or_payload( block.block_with_parent(), @@ -2398,7 +2476,7 @@ where block_id: BlockWithParent, input: Input, execute: impl FnOnce(&mut V, Input, TreeCtx<'_, N>) -> Result, Err>, - convert_to_block: impl FnOnce(&mut Self, Input) -> Result, Err>, + convert_to_block: impl FnOnce(&mut Self, Input) -> Result, Err>, ) -> Result where Err: From>, @@ -2410,7 +2488,7 @@ where match self.sealed_header_by_hash(block_num_hash.hash) { Err(err) => { let block = convert_to_block(self, input)?; - return Err(InsertBlockError::new(block.into_sealed_block(), err.into()).into()); + return Err(InsertBlockError::new(block, err.into()).into()); } Ok(Some(_)) => { // We now assume that we already have this block in the tree. However, we need to @@ -2425,7 +2503,7 @@ where match self.state_provider_builder(block_id.parent) { Err(err) => { let block = convert_to_block(self, input)?; - return Err(InsertBlockError::new(block.into_sealed_block(), err.into()).into()); + return Err(InsertBlockError::new(block, err.into()).into()); } Ok(None) => { let block = convert_to_block(self, input)?; @@ -2449,17 +2527,13 @@ where Ok(Some(_)) => {} } - // determine whether we are on a fork chain - let is_fork = match self.is_fork(block_id) { - Err(err) => { - let block = convert_to_block(self, input)?; - return Err(InsertBlockError::new(block.into_sealed_block(), err.into()).into()); - } - Ok(is_fork) => is_fork, - }; + // determine whether we are on a fork chain by comparing the block number with the + // canonical head. This is a simple check that is sufficient for the event emission below. + // A block is considered a fork if its number is less than or equal to the canonical head, + // as this indicates there's already a canonical block at that height. + let is_fork = block_id.block.number <= self.state.tree_state.current_canonical_head.number; - let ctx = - TreeCtx::new(&mut self.state, &self.persistence_state, &self.canonical_in_memory_state); + let ctx = TreeCtx::new(&mut self.state, &self.canonical_in_memory_state); let start = Instant::now(); @@ -2535,9 +2609,10 @@ where fn on_new_payload_error( &mut self, error: NewPayloadError, + payload_num_hash: NumHash, parent_hash: B256, ) -> ProviderResult { - error!(target: "engine::tree", %error, "Invalid payload"); + error!(target: "engine::tree", payload=?payload_num_hash, %error, "Invalid payload"); // we need to convert the error to a payload status (response to the CL) let latest_valid_hash = @@ -2803,29 +2878,11 @@ pub enum InsertPayloadOk { Inserted(BlockStatus), } -/// Whether or not the blocks are currently persisting and the input block is a descendant. +/// Target for block persistence. #[derive(Debug, Clone, Copy)] -pub enum PersistingKind { - /// The blocks are not currently persisting. - NotPersisting, - /// The blocks are currently persisting but the input block is not a descendant. - PersistingNotDescendant, - /// The blocks are currently persisting and the input block is a descendant. - PersistingDescendant, -} - -impl PersistingKind { - /// Returns true if the parallel state root can be run. - /// - /// We only run the parallel state root if we are not currently persisting any blocks or - /// persisting blocks that are all ancestors of the one we are calculating the state root for. - pub const fn can_run_parallel_state_root(&self) -> bool { - matches!(self, Self::NotPersisting | Self::PersistingDescendant) - } - - /// Returns true if the blocks are currently being persisted and the input block is a - /// descendant. - pub const fn is_descendant(&self) -> bool { - matches!(self, Self::PersistingDescendant) - } +enum PersistTarget { + /// Persist up to `canonical_head - memory_block_buffer_target`. + Threshold, + /// Persist all blocks up to and including the canonical head. + Head, } diff --git a/crates/engine/tree/src/tree/payload_processor/bal.rs b/crates/engine/tree/src/tree/payload_processor/bal.rs new file mode 100644 index 0000000000..5bda2cdb36 --- /dev/null +++ b/crates/engine/tree/src/tree/payload_processor/bal.rs @@ -0,0 +1,318 @@ +//! BAL (Block Access List, EIP-7928) related functionality. + +use alloy_consensus::constants::KECCAK_EMPTY; +use alloy_eip7928::BlockAccessList; +use alloy_primitives::{keccak256, U256}; +use reth_primitives_traits::Account; +use reth_provider::{AccountReader, ProviderError}; +use reth_trie::{HashedPostState, HashedStorage}; + +/// Converts a Block Access List into a [`HashedPostState`] by extracting the final state +/// of modified accounts and storage slots. +pub fn bal_to_hashed_post_state

( + bal: &BlockAccessList, + provider: &P, +) -> Result +where + P: AccountReader, +{ + let mut hashed_state = HashedPostState::with_capacity(bal.len()); + + for account_changes in bal { + let address = account_changes.address; + let hashed_address = keccak256(address); + + // Get the latest balance (last balance change if any) + let balance = account_changes.balance_changes.last().map(|change| change.post_balance); + + // Get the latest nonce (last nonce change if any) + let nonce = account_changes.nonce_changes.last().map(|change| change.new_nonce); + + // Get the latest code (last code change if any) + let code_hash = if let Some(code_change) = account_changes.code_changes.last() { + if code_change.new_code.is_empty() { + Some(Some(KECCAK_EMPTY)) + } else { + Some(Some(keccak256(&code_change.new_code))) + } + } else { + None + }; + + // Only fetch account from provider if we're missing any field + let existing_account = if balance.is_none() || nonce.is_none() || code_hash.is_none() { + provider.basic_account(&address)? + } else { + None + }; + + // Build the final account state + let account = Account { + balance: balance.unwrap_or_else(|| { + existing_account.as_ref().map(|acc| acc.balance).unwrap_or(U256::ZERO) + }), + nonce: nonce + .unwrap_or_else(|| existing_account.as_ref().map(|acc| acc.nonce).unwrap_or(0)), + bytecode_hash: code_hash.unwrap_or_else(|| { + existing_account.as_ref().and_then(|acc| acc.bytecode_hash).or(Some(KECCAK_EMPTY)) + }), + }; + + hashed_state.accounts.insert(hashed_address, Some(account)); + + // Process storage changes + if !account_changes.storage_changes.is_empty() { + let mut storage_map = HashedStorage::new(false); + + for slot_changes in &account_changes.storage_changes { + let hashed_slot = keccak256(slot_changes.slot); + + // Get the last change for this slot + if let Some(last_change) = slot_changes.changes.last() { + storage_map + .storage + .insert(hashed_slot, U256::from_be_bytes(last_change.new_value.0)); + } + } + + if !storage_map.storage.is_empty() { + hashed_state.storages.insert(hashed_address, storage_map); + } + } + } + + Ok(hashed_state) +} + +#[cfg(test)] +mod tests { + use super::*; + use alloy_eip7928::{ + AccountChanges, BalanceChange, CodeChange, NonceChange, SlotChanges, StorageChange, + }; + use alloy_primitives::{Address, Bytes, StorageKey, B256}; + use reth_revm::test_utils::StateProviderTest; + + #[test] + fn test_bal_to_hashed_post_state_basic() { + let provider = StateProviderTest::default(); + + let address = Address::random(); + let account_changes = AccountChanges { + address, + storage_changes: vec![], + storage_reads: vec![], + balance_changes: vec![BalanceChange::new(0, U256::from(100))], + nonce_changes: vec![NonceChange::new(0, 1)], + code_changes: vec![], + }; + + let bal = vec![account_changes]; + let result = bal_to_hashed_post_state(&bal, &provider).unwrap(); + + assert_eq!(result.accounts.len(), 1); + + let hashed_address = keccak256(address); + let account_opt = result.accounts.get(&hashed_address).unwrap(); + assert!(account_opt.is_some()); + + let account = account_opt.as_ref().unwrap(); + assert_eq!(account.balance, U256::from(100)); + assert_eq!(account.nonce, 1); + assert_eq!(account.bytecode_hash, Some(KECCAK_EMPTY)); + } + + #[test] + fn test_bal_with_storage_changes() { + let provider = StateProviderTest::default(); + + let address = Address::random(); + let slot = StorageKey::random(); + let value = B256::random(); + + let slot_changes = SlotChanges { slot, changes: vec![StorageChange::new(0, value)] }; + + let account_changes = AccountChanges { + address, + storage_changes: vec![slot_changes], + storage_reads: vec![], + balance_changes: vec![BalanceChange::new(0, U256::from(500))], + nonce_changes: vec![NonceChange::new(0, 2)], + code_changes: vec![], + }; + + let bal = vec![account_changes]; + let result = bal_to_hashed_post_state(&bal, &provider).unwrap(); + + let hashed_address = keccak256(address); + assert!(result.storages.contains_key(&hashed_address)); + + let storage = result.storages.get(&hashed_address).unwrap(); + let hashed_slot = keccak256(slot); + + let stored_value = storage.storage.get(&hashed_slot).unwrap(); + assert_eq!(*stored_value, U256::from_be_bytes(value.0)); + } + + #[test] + fn test_bal_with_code_change() { + let provider = StateProviderTest::default(); + + let address = Address::random(); + let code = Bytes::from(vec![0x60, 0x80, 0x60, 0x40]); // Some bytecode + + let account_changes = AccountChanges { + address, + storage_changes: vec![], + storage_reads: vec![], + balance_changes: vec![BalanceChange::new(0, U256::from(1000))], + nonce_changes: vec![NonceChange::new(0, 1)], + code_changes: vec![CodeChange::new(0, code.clone())], + }; + + let bal = vec![account_changes]; + let result = bal_to_hashed_post_state(&bal, &provider).unwrap(); + + let hashed_address = keccak256(address); + let account_opt = result.accounts.get(&hashed_address).unwrap(); + let account = account_opt.as_ref().unwrap(); + + let expected_code_hash = keccak256(&code); + assert_eq!(account.bytecode_hash, Some(expected_code_hash)); + } + + #[test] + fn test_bal_with_empty_code() { + let provider = StateProviderTest::default(); + + let address = Address::random(); + let empty_code = Bytes::default(); + + let account_changes = AccountChanges { + address, + storage_changes: vec![], + storage_reads: vec![], + balance_changes: vec![BalanceChange::new(0, U256::from(1000))], + nonce_changes: vec![NonceChange::new(0, 1)], + code_changes: vec![CodeChange::new(0, empty_code)], + }; + + let bal = vec![account_changes]; + let result = bal_to_hashed_post_state(&bal, &provider).unwrap(); + + let hashed_address = keccak256(address); + let account_opt = result.accounts.get(&hashed_address).unwrap(); + let account = account_opt.as_ref().unwrap(); + + assert_eq!(account.bytecode_hash, Some(KECCAK_EMPTY)); + } + + #[test] + fn test_bal_multiple_changes_takes_last() { + let provider = StateProviderTest::default(); + + let address = Address::random(); + + // Multiple balance changes - should take the last one + let account_changes = AccountChanges { + address, + storage_changes: vec![], + storage_reads: vec![], + balance_changes: vec![ + BalanceChange::new(0, U256::from(100)), + BalanceChange::new(1, U256::from(200)), + BalanceChange::new(2, U256::from(300)), + ], + nonce_changes: vec![ + NonceChange::new(0, 1), + NonceChange::new(1, 2), + NonceChange::new(2, 3), + ], + code_changes: vec![], + }; + + let bal = vec![account_changes]; + let result = bal_to_hashed_post_state(&bal, &provider).unwrap(); + + let hashed_address = keccak256(address); + let account_opt = result.accounts.get(&hashed_address).unwrap(); + let account = account_opt.as_ref().unwrap(); + + // Should have the last values + assert_eq!(account.balance, U256::from(300)); + assert_eq!(account.nonce, 3); + } + + #[test] + fn test_bal_uses_provider_for_missing_fields() { + let mut provider = StateProviderTest::default(); + + let address = Address::random(); + let code_hash = B256::random(); + let existing_account = + Account { balance: U256::from(999), nonce: 42, bytecode_hash: Some(code_hash) }; + provider.insert_account(address, existing_account, None, Default::default()); + + // Only change balance, nonce and code should come from provider + let account_changes = AccountChanges { + address, + storage_changes: vec![], + storage_reads: vec![], + balance_changes: vec![BalanceChange::new(0, U256::from(1500))], + nonce_changes: vec![], + code_changes: vec![], + }; + + let bal = vec![account_changes]; + let result = bal_to_hashed_post_state(&bal, &provider).unwrap(); + + let hashed_address = keccak256(address); + let account_opt = result.accounts.get(&hashed_address).unwrap(); + let account = account_opt.as_ref().unwrap(); + + // Balance should be updated + assert_eq!(account.balance, U256::from(1500)); + // Nonce and bytecode_hash should come from provider + assert_eq!(account.nonce, 42); + assert_eq!(account.bytecode_hash, Some(code_hash)); + } + + #[test] + fn test_bal_multiple_storage_changes_per_slot() { + let provider = StateProviderTest::default(); + + let address = Address::random(); + let slot = StorageKey::random(); + + // Multiple changes to the same slot - should take the last one + let slot_changes = SlotChanges { + slot, + changes: vec![ + StorageChange::new(0, B256::from(U256::from(100).to_be_bytes::<32>())), + StorageChange::new(1, B256::from(U256::from(200).to_be_bytes::<32>())), + StorageChange::new(2, B256::from(U256::from(300).to_be_bytes::<32>())), + ], + }; + + let account_changes = AccountChanges { + address, + storage_changes: vec![slot_changes], + storage_reads: vec![], + balance_changes: vec![BalanceChange::new(0, U256::from(100))], + nonce_changes: vec![NonceChange::new(0, 1)], + code_changes: vec![], + }; + + let bal = vec![account_changes]; + let result = bal_to_hashed_post_state(&bal, &provider).unwrap(); + + let hashed_address = keccak256(address); + let storage = result.storages.get(&hashed_address).unwrap(); + let hashed_slot = keccak256(slot); + + let stored_value = storage.storage.get(&hashed_slot).unwrap(); + + // Should have the last value + assert_eq!(*stored_value, U256::from(300)); + } +} diff --git a/crates/engine/tree/src/tree/payload_processor/configured_sparse_trie.rs b/crates/engine/tree/src/tree/payload_processor/configured_sparse_trie.rs index b587a72139..ced00e9c39 100644 --- a/crates/engine/tree/src/tree/payload_processor/configured_sparse_trie.rs +++ b/crates/engine/tree/src/tree/payload_processor/configured_sparse_trie.rs @@ -1,10 +1,10 @@ //! Configured sparse trie enum for switching between serial and parallel implementations. use alloy_primitives::B256; -use reth_trie::{Nibbles, TrieNode}; +use reth_trie::{Nibbles, ProofTrieNode, TrieMasks, TrieNode}; use reth_trie_sparse::{ errors::SparseTrieResult, provider::TrieNodeProvider, LeafLookup, LeafLookupError, - RevealedSparseNode, SerialSparseTrie, SparseTrieInterface, SparseTrieUpdates, TrieMasks, + SerialSparseTrie, SparseTrieInterface, SparseTrieUpdates, }; use reth_trie_sparse_parallel::ParallelSparseTrie; use std::borrow::Cow; @@ -83,7 +83,7 @@ impl SparseTrieInterface for ConfiguredSparseTrie { } } - fn reveal_nodes(&mut self, nodes: Vec) -> SparseTrieResult<()> { + fn reveal_nodes(&mut self, nodes: Vec) -> SparseTrieResult<()> { match self { Self::Serial(trie) => trie.reveal_nodes(nodes), Self::Parallel(trie) => trie.reveal_nodes(nodes), diff --git a/crates/engine/tree/src/tree/payload_processor/executor.rs b/crates/engine/tree/src/tree/payload_processor/executor.rs index 28165d5e8f..8409a4a06b 100644 --- a/crates/engine/tree/src/tree/payload_processor/executor.rs +++ b/crates/engine/tree/src/tree/payload_processor/executor.rs @@ -29,7 +29,7 @@ impl WorkloadExecutor { /// Shorthand for [`Runtime::spawn_blocking`] #[track_caller] - pub(super) fn spawn_blocking(&self, func: F) -> JoinHandle + pub fn spawn_blocking(&self, func: F) -> JoinHandle where F: FnOnce() -> R + Send + 'static, R: Send + 'static, diff --git a/crates/engine/tree/src/tree/payload_processor/mod.rs b/crates/engine/tree/src/tree/payload_processor/mod.rs index 7e54d8a38e..02ca39f277 100644 --- a/crates/engine/tree/src/tree/payload_processor/mod.rs +++ b/crates/engine/tree/src/tree/payload_processor/mod.rs @@ -13,6 +13,8 @@ use crate::tree::{ sparse_trie::SparseTrieTask, StateProviderBuilder, TreeConfig, }; +use alloy_eip7928::BlockAccessList; +use alloy_eips::eip1898::BlockWithParent; use alloy_evm::{block::StateChangeSource, ToTxEnv}; use alloy_primitives::B256; use crossbeam_channel::Sender as CrossbeamSender; @@ -20,18 +22,17 @@ use executor::WorkloadExecutor; use multiproof::{SparseTrieUpdate, *}; use parking_lot::RwLock; use prewarm::PrewarmMetrics; -use reth_engine_primitives::ExecutableTxIterator; +use rayon::prelude::*; use reth_evm::{ execute::{ExecutableTxFor, WithTxEnv}, - ConfigureEvm, EvmEnvFor, OnStateHook, SpecFor, TxEnvFor, + ConfigureEvm, EvmEnvFor, ExecutableTxIterator, ExecutableTxTuple, OnStateHook, SpecFor, + TxEnvFor, }; +use reth_execution_types::ExecutionOutcome; use reth_primitives_traits::NodePrimitives; use reth_provider::{BlockReader, DatabaseProviderROFactory, StateProviderFactory, StateReader}; use reth_revm::{db::BundleState, state::EvmState}; -use reth_trie::{ - hashed_cursor::HashedCursorFactory, prefix_set::TriePrefixSetsMut, - trie_cursor::TrieCursorFactory, -}; +use reth_trie::{hashed_cursor::HashedCursorFactory, trie_cursor::TrieCursorFactory}; use reth_trie_parallel::{ proof_task::{ProofTaskCtx, ProofWorkerHandle}, root::ParallelStateRootError, @@ -42,6 +43,7 @@ use reth_trie_sparse::{ }; use reth_trie_sparse_parallel::{ParallelSparseTrie, ParallelismThresholds}; use std::{ + collections::BTreeMap, sync::{ atomic::AtomicBool, mpsc::{self, channel}, @@ -49,8 +51,9 @@ use std::{ }, time::Instant, }; -use tracing::{debug, debug_span, instrument, warn}; +use tracing::{debug, debug_span, instrument, warn, Span}; +pub mod bal; mod configured_sparse_trie; pub mod executor; pub mod multiproof; @@ -90,6 +93,13 @@ pub const SPARSE_TRIE_MAX_NODES_SHRINK_CAPACITY: usize = 1_000_000; /// 144MB. pub const SPARSE_TRIE_MAX_VALUES_SHRINK_CAPACITY: usize = 1_000_000; +/// Type alias for [`PayloadHandle`] returned by payload processor spawn methods. +type IteratorPayloadHandle = PayloadHandle< + WithTxEnv, ::Tx>, + ::Error, + ::Receipt, +>; + /// Entrypoint for executing the payload. #[derive(Debug)] pub struct PayloadProcessor @@ -106,6 +116,8 @@ where cross_block_cache_size: u64, /// Whether transactions should not be executed on prewarming task. disable_transaction_prewarming: bool, + /// Whether state cache should be disable + disable_state_cache: bool, /// Determines how to configure the evm for execution. evm_config: Evm, /// Whether precompile cache should be disabled. @@ -130,6 +142,11 @@ where N: NodePrimitives, Evm: ConfigureEvm, { + /// Returns a reference to the workload executor driving payload tasks. + pub(super) const fn executor(&self) -> &WorkloadExecutor { + &self.executor + } + /// Creates a new payload processor. pub fn new( executor: WorkloadExecutor, @@ -144,6 +161,7 @@ where cross_block_cache_size: config.cross_block_cache_size(), disable_transaction_prewarming: config.disable_prewarming(), evm_config, + disable_state_cache: config.disable_state_cache(), precompile_cache_disabled: config.precompile_cache_disabled(), precompile_cache_map, sparse_state_trie: Arc::default(), @@ -190,7 +208,6 @@ where /// /// This returns a handle to await the final state root and to interact with the tasks (e.g. /// canceling) - #[allow(clippy::type_complexity)] #[instrument( level = "debug", target = "engine::tree::payload_processor", @@ -204,10 +221,8 @@ where provider_builder: StateProviderBuilder, multiproof_provider_factory: F, config: &TreeConfig, - ) -> Result< - PayloadHandle, I::Tx>, I::Error>, - (ParallelStateRootError, I, ExecutionEnv, StateProviderBuilder), - > + bal: Option>, + ) -> IteratorPayloadHandle where P: BlockReader + StateProviderFactory + StateReader + Clone + 'static, F: DatabaseProviderROFactory @@ -215,17 +230,43 @@ where + Send + 'static, { - let span = tracing::Span::current(); - let (to_sparse_trie, sparse_trie_rx) = channel(); + // start preparing transactions immediately + let (prewarm_rx, execution_rx, transaction_count_hint) = + self.spawn_tx_iterator(transactions); - // We rely on the cursor factory to provide whatever DB overlay is necessary to see a - // consistent view of the database, including the trie tables. Because of this there is no - // need for an overarching prefix set to invalidate any section of the trie tables, and so - // we use an empty prefix set. - let prefix_sets = Arc::new(TriePrefixSetsMut::default()); + let span = Span::current(); + let (to_sparse_trie, sparse_trie_rx) = channel(); + let (to_multi_proof, from_multi_proof) = crossbeam_channel::unbounded(); + + // Handle BAL-based optimization if available + let prewarm_handle = if let Some(bal) = bal { + // When BAL is present, skip spawning prewarm tasks entirely and send BAL to multiproof + debug!(target: "engine::tree::payload_processor", "BAL present, skipping prewarm tasks"); + + // Send BAL message immediately to MultiProofTask + let _ = to_multi_proof.send(MultiProofMessage::BlockAccessList(bal)); + + // Spawn minimal cache-only task without prewarming + self.spawn_caching_with( + env, + prewarm_rx, + transaction_count_hint, + provider_builder.clone(), + None, // Don't send proof targets when BAL is present + ) + } else { + // Normal path: spawn with full prewarming + self.spawn_caching_with( + env, + prewarm_rx, + transaction_count_hint, + provider_builder.clone(), + Some(to_multi_proof.clone()), + ) + }; // Create and spawn the storage proof task - let task_ctx = ProofTaskCtx::new(multiproof_provider_factory, prefix_sets); + let task_ctx = ProofTaskCtx::new(multiproof_provider_factory); let storage_worker_count = config.storage_worker_count(); let account_worker_count = config.account_worker_count(); let proof_handle = ProofWorkerHandle::new( @@ -239,26 +280,20 @@ where proof_handle.clone(), to_sparse_trie, config.multiproof_chunking_enabled().then_some(config.multiproof_chunk_size()), + to_multi_proof, + from_multi_proof, ); // wire the multiproof task to the prewarm task let to_multi_proof = Some(multi_proof_task.state_root_message_sender()); - let (prewarm_rx, execution_rx, transaction_count_hint) = - self.spawn_tx_iterator(transactions); - - let prewarm_handle = self.spawn_caching_with( - env, - prewarm_rx, - transaction_count_hint, - provider_builder, - to_multi_proof.clone(), - ); - // spawn multi-proof task + let parent_span = span.clone(); self.executor.spawn_blocking(move || { - let _enter = span.entered(); - multi_proof_task.run(); + let _enter = parent_span.entered(); + // Build a state provider for the multiproof task + let provider = provider_builder.build().expect("failed to build provider"); + multi_proof_task.run(provider); }); // wire the sparse trie to the state root response receiver @@ -267,12 +302,13 @@ where // Spawn the sparse trie task using any stored trie and parallel trie configuration. self.spawn_sparse_trie_task(sparse_trie_rx, proof_handle, state_root_tx); - Ok(PayloadHandle { + PayloadHandle { to_multi_proof, prewarm_handle, state_root: Some(state_root_rx), transactions: execution_rx, - }) + _span: span, + } } /// Spawns a task that exclusively handles cache prewarming for transaction execution. @@ -284,7 +320,7 @@ where env: ExecutionEnv, transactions: I, provider_builder: StateProviderBuilder, - ) -> PayloadHandle, I::Tx>, I::Error> + ) -> IteratorPayloadHandle where P: BlockReader + StateProviderFactory + StateReader + Clone + 'static, { @@ -296,6 +332,7 @@ where prewarm_handle, state_root: None, transactions: execution_rx, + _span: Span::current(), } } @@ -309,21 +346,46 @@ where mpsc::Receiver, I::Tx>, I::Error>>, usize, ) { - // Get the transaction count for prewarming task - // Use upper bound if available (more accurate), otherwise use lower bound - let (lower, upper) = transactions.size_hint(); - let transaction_count_hint = upper.unwrap_or(lower); + let (transactions, convert) = transactions.into(); + let transactions = transactions.into_par_iter(); + let transaction_count_hint = transactions.len(); + let (ooo_tx, ooo_rx) = mpsc::channel(); let (prewarm_tx, prewarm_rx) = mpsc::channel(); let (execute_tx, execute_rx) = mpsc::channel(); + + // Spawn a task that `convert`s all transactions in parallel and sends them out-of-order. self.executor.spawn_blocking(move || { - for tx in transactions { - let tx = tx.map(|tx| WithTxEnv { tx_env: tx.to_tx_env(), tx }); - // only send Ok(_) variants to prewarming task + transactions.enumerate().for_each_with(ooo_tx, |ooo_tx, (idx, tx)| { + let tx = convert(tx); + let tx = tx.map(|tx| WithTxEnv { tx_env: tx.to_tx_env(), tx: Arc::new(tx) }); + // Only send Ok(_) variants to prewarming task. if let Ok(tx) = &tx { let _ = prewarm_tx.send(tx.clone()); } - let _ = execute_tx.send(tx); + let _ = ooo_tx.send((idx, tx)); + }); + }); + + // Spawn a task that processes out-of-order transactions from the task above and sends them + // to the execution task in order. + self.executor.spawn_blocking(move || { + let mut next_for_execution = 0; + let mut queue = BTreeMap::new(); + while let Ok((idx, tx)) = ooo_rx.recv() { + if next_for_execution == idx { + let _ = execute_tx.send(tx); + next_for_execution += 1; + + while let Some(entry) = queue.first_entry() && + *entry.key() == next_for_execution + { + let _ = execute_tx.send(entry.remove()); + next_for_execution += 1; + } + } else { + queue.insert(idx, tx); + } } }); @@ -338,7 +400,7 @@ where transaction_count_hint: usize, provider_builder: StateProviderBuilder, to_multi_proof: Option>, - ) -> CacheTaskHandle + ) -> CacheTaskHandle where P: BlockReader + StateProviderFactory + StateReader + Clone + 'static, { @@ -348,9 +410,15 @@ where transactions = mpsc::channel().1; } - let saved_cache = self.cache_for(env.parent_hash); - let cache = saved_cache.cache().clone(); - let cache_metrics = saved_cache.metrics().clone(); + let (saved_cache, cache, cache_metrics) = if self.disable_state_cache { + (None, None, None) + } else { + let saved_cache = self.cache_for(env.parent_hash); + let cache = saved_cache.cache().clone(); + let cache_metrics = saved_cache.metrics().clone(); + (Some(saved_cache), Some(cache), Some(cache_metrics)) + }; + // configure prewarming let prewarm_ctx = PrewarmContext { env, @@ -375,9 +443,7 @@ where // spawn pre-warm task { let to_prewarm_task = to_prewarm_task.clone(); - let span = debug_span!(target: "engine::tree::payload_processor", "prewarm task"); self.executor.spawn_blocking(move || { - let _enter = span.entered(); prewarm_task.run(transactions, to_prewarm_task); }); } @@ -441,7 +507,7 @@ where sparse_state_trie, ); - let span = tracing::Span::current(); + let span = Span::current(); self.executor.spawn_blocking(move || { let _enter = span.entered(); @@ -464,28 +530,86 @@ where cleared_sparse_trie.lock().replace(cleared_trie); }); } + + /// Updates the execution cache with the post-execution state from an inserted block. + /// + /// This is used when blocks are inserted directly (e.g., locally built blocks by sequencers) + /// to ensure the cache remains warm for subsequent block execution. + /// + /// The cache enables subsequent blocks to reuse account, storage, and bytecode data without + /// hitting the database, maintaining performance consistency. + pub(crate) fn on_inserted_executed_block( + &self, + block_with_parent: BlockWithParent, + bundle_state: &BundleState, + ) { + self.execution_cache.update_with_guard(|cached| { + if cached.as_ref().is_some_and(|c| c.executed_block_hash() != block_with_parent.parent) { + debug!( + target: "engine::caching", + parent_hash = %block_with_parent.parent, + "Cannot find cache for parent hash, skip updating cache with new state for inserted executed block", + ); + return; + } + + // Take existing cache (if any) or create fresh caches + let (caches, cache_metrics) = match cached.take() { + Some(existing) => { + existing.split() + } + None => ( + ExecutionCacheBuilder::default().build_caches(self.cross_block_cache_size), + CachedStateMetrics::zeroed(), + ), + }; + + // Insert the block's bundle state into cache + let new_cache = SavedCache::new(block_with_parent.block.hash, caches, cache_metrics); + if new_cache.cache().insert_state(bundle_state).is_err() { + *cached = None; + debug!(target: "engine::caching", "cleared execution cache on update error"); + return; + } + new_cache.update_metrics(); + + // Replace with the updated cache + *cached = Some(new_cache); + debug!(target: "engine::caching", ?block_with_parent, "Updated execution cache for inserted block"); + }); + } } /// Handle to all the spawned tasks. +/// +/// Generic over `R` (receipt type) to allow sharing `Arc>` with the +/// caching task without cloning the expensive `BundleState`. #[derive(Debug)] -pub struct PayloadHandle { +pub struct PayloadHandle { /// Channel for evm state updates to_multi_proof: Option>, // must include the receiver of the state root wired to the sparse trie - prewarm_handle: CacheTaskHandle, - /// Receiver for the state root - state_root: Option>>, + prewarm_handle: CacheTaskHandle, /// Stream of block transactions transactions: mpsc::Receiver>, + /// Receiver for the state root + state_root: Option>>, + /// Span for tracing + _span: Span, } -impl PayloadHandle { +impl PayloadHandle { /// Awaits the state root /// /// # Panics /// /// If payload processing was started without background tasks. - #[instrument(level = "debug", target = "engine::tree::payload_processor", skip_all)] + #[instrument( + level = "debug", + target = "engine::tree::payload_processor", + name = "await_state_root", + skip_all + )] pub fn state_root(&mut self) -> Result { self.state_root .take() @@ -503,18 +627,18 @@ impl PayloadHandle { move |source: StateChangeSource, state: &EvmState| { if let Some(sender) = &to_multi_proof { - let _ = sender.send(MultiProofMessage::StateUpdate(source, state.clone())); + let _ = sender.send(MultiProofMessage::StateUpdate(source.into(), state.clone())); } } } /// Returns a clone of the caches used by prewarming - pub(super) fn caches(&self) -> StateExecutionCache { + pub(super) fn caches(&self) -> Option { self.prewarm_handle.cache.clone() } /// Returns a clone of the cache metrics used by prewarming - pub(super) fn cache_metrics(&self) -> CachedStateMetrics { + pub(super) fn cache_metrics(&self) -> Option { self.prewarm_handle.cache_metrics.clone() } @@ -527,9 +651,14 @@ impl PayloadHandle { /// Terminates the entire caching task. /// - /// If the [`BundleState`] is provided it will update the shared cache. - pub(super) fn terminate_caching(&mut self, block_output: Option<&BundleState>) { - self.prewarm_handle.terminate_caching(block_output) + /// If the [`ExecutionOutcome`] is provided it will update the shared cache using its + /// bundle state. Using `Arc` allows sharing with the main execution + /// path without cloning the expensive `BundleState`. + pub(super) fn terminate_caching( + &mut self, + execution_outcome: Option>>, + ) { + self.prewarm_handle.terminate_caching(execution_outcome) } /// Returns iterator yielding transactions from the stream. @@ -541,17 +670,20 @@ impl PayloadHandle { } /// Access to the spawned [`PrewarmCacheTask`]. +/// +/// Generic over `R` (receipt type) to allow sharing `Arc>` with the +/// prewarm task without cloning the expensive `BundleState`. #[derive(Debug)] -pub(crate) struct CacheTaskHandle { +pub(crate) struct CacheTaskHandle { /// The shared cache the task operates with. - cache: StateExecutionCache, + cache: Option, /// Metrics for the caches - cache_metrics: CachedStateMetrics, + cache_metrics: Option, /// Channel to the spawned prewarm task if any - to_prewarm_task: Option>, + to_prewarm_task: Option>>, } -impl CacheTaskHandle { +impl CacheTaskHandle { /// Terminates the pre-warming transaction processing. /// /// Note: This does not terminate the task yet. @@ -563,20 +695,25 @@ impl CacheTaskHandle { /// Terminates the entire pre-warming task. /// - /// If the [`BundleState`] is provided it will update the shared cache. - pub(super) fn terminate_caching(&mut self, block_output: Option<&BundleState>) { + /// If the [`ExecutionOutcome`] is provided it will update the shared cache using its + /// bundle state. Using `Arc` avoids cloning the expensive `BundleState`. + pub(super) fn terminate_caching( + &mut self, + execution_outcome: Option>>, + ) { if let Some(tx) = self.to_prewarm_task.take() { - // Only clone when we have an active task and a state to send - let event = PrewarmTaskEvent::Terminate { block_output: block_output.cloned() }; + let event = PrewarmTaskEvent::Terminate { execution_outcome }; let _ = tx.send(event); } } } -impl Drop for CacheTaskHandle { +impl Drop for CacheTaskHandle { fn drop(&mut self) { - // Ensure we always terminate on drop - self.terminate_caching(None); + // Ensure we always terminate on drop - send None without needing Send + Sync bounds + if let Some(tx) = self.to_prewarm_task.take() { + let _ = tx.send(PrewarmTaskEvent::Terminate { execution_outcome: None }); + } } } @@ -629,6 +766,8 @@ impl ExecutionCache { cache .as_ref() + // Check `is_available()` to ensure no other tasks (e.g., prewarming) currently hold + // a reference to this cache. We can only reuse it when we have exclusive access. .filter(|c| c.executed_block_hash() == parent_hash && c.is_available()) .cloned() } @@ -696,6 +835,7 @@ mod tests { precompile_cache::PrecompileCacheMap, StateProviderBuilder, TreeConfig, }; + use alloy_eips::eip1898::{BlockNumHash, BlockWithParent}; use alloy_evm::block::StateChangeSource; use rand::Rng; use reth_chainspec::ChainSpec; @@ -709,6 +849,7 @@ mod tests { test_utils::create_test_provider_factory_with_chain_spec, ChainSpecProvider, HashingWriter, }; + use reth_revm::db::BundleState; use reth_testing_utils::generators; use reth_trie::{test_utils::state_root, HashedPostState}; use revm_primitives::{Address, HashMap, B256, KECCAK_EMPTY, U256}; @@ -786,6 +927,70 @@ mod tests { assert!(new_checkout.is_some(), "new checkout should succeed after release and update"); } + #[test] + fn on_inserted_executed_block_populates_cache() { + let payload_processor = PayloadProcessor::new( + WorkloadExecutor::default(), + EthEvmConfig::new(Arc::new(ChainSpec::default())), + &TreeConfig::default(), + PrecompileCacheMap::default(), + ); + + let parent_hash = B256::from([1u8; 32]); + let block_hash = B256::from([10u8; 32]); + let block_with_parent = BlockWithParent { + block: BlockNumHash { hash: block_hash, number: 1 }, + parent: parent_hash, + }; + let bundle_state = BundleState::default(); + + // Cache should be empty initially + assert!(payload_processor.execution_cache.get_cache_for(block_hash).is_none()); + + // Update cache with inserted block + payload_processor.on_inserted_executed_block(block_with_parent, &bundle_state); + + // Cache should now exist for the block hash + let cached = payload_processor.execution_cache.get_cache_for(block_hash); + assert!(cached.is_some()); + assert_eq!(cached.unwrap().executed_block_hash(), block_hash); + } + + #[test] + fn on_inserted_executed_block_skips_on_parent_mismatch() { + let payload_processor = PayloadProcessor::new( + WorkloadExecutor::default(), + EthEvmConfig::new(Arc::new(ChainSpec::default())), + &TreeConfig::default(), + PrecompileCacheMap::default(), + ); + + // Setup: populate cache with block 1 + let block1_hash = B256::from([1u8; 32]); + payload_processor + .execution_cache + .update_with_guard(|slot| *slot = Some(make_saved_cache(block1_hash))); + + // Try to insert block 3 with wrong parent (should skip and keep block 1's cache) + let wrong_parent = B256::from([99u8; 32]); + let block3_hash = B256::from([3u8; 32]); + let block_with_parent = BlockWithParent { + block: BlockNumHash { hash: block3_hash, number: 3 }, + parent: wrong_parent, + }; + let bundle_state = BundleState::default(); + + payload_processor.on_inserted_executed_block(block_with_parent, &bundle_state); + + // Cache should still be for block 1 (unchanged) + let cached = payload_processor.execution_cache.get_cache_for(block1_hash); + assert!(cached.is_some(), "Original cache should be preserved"); + + // Cache for block 3 should not exist + let cached3 = payload_processor.execution_cache.get_cache_for(block3_hash); + assert!(cached3.is_none(), "New block cache should not be created on mismatch"); + } + fn create_mock_state_updates(num_accounts: usize, updates_per_account: usize) -> Vec { let mut rng = generators::rng(); let all_addresses: Vec

= (0..num_accounts).map(|_| rng.random()).collect(); @@ -895,19 +1100,17 @@ mod tests { let provider_factory = BlockchainProvider::new(factory).unwrap(); - let mut handle = - payload_processor - .spawn( - Default::default(), - core::iter::empty::< - Result, core::convert::Infallible>, - >(), - StateProviderBuilder::new(provider_factory.clone(), genesis_hash, None), - OverlayStateProviderFactory::new(provider_factory), - &TreeConfig::default(), - ) - .map_err(|(err, ..)| err) - .expect("failed to spawn payload processor"); + let mut handle = payload_processor.spawn( + Default::default(), + ( + Vec::, core::convert::Infallible>>::new(), + std::convert::identity, + ), + StateProviderBuilder::new(provider_factory.clone(), genesis_hash, None), + OverlayStateProviderFactory::new(provider_factory), + &TreeConfig::default(), + None, // No BAL for test + ); let mut state_hook = handle.state_hook(); diff --git a/crates/engine/tree/src/tree/payload_processor/multiproof.rs b/crates/engine/tree/src/tree/payload_processor/multiproof.rs index a000e7a5ad..5695787cf1 100644 --- a/crates/engine/tree/src/tree/payload_processor/multiproof.rs +++ b/crates/engine/tree/src/tree/payload_processor/multiproof.rs @@ -1,32 +1,74 @@ //! Multiproof task related functionality. +use crate::tree::payload_processor::bal::bal_to_hashed_post_state; +use alloy_eip7928::BlockAccessList; use alloy_evm::block::StateChangeSource; -use alloy_primitives::{ - keccak256, - map::{B256Set, HashSet}, - B256, -}; +use alloy_primitives::{keccak256, map::HashSet, B256}; use crossbeam_channel::{unbounded, Receiver as CrossbeamReceiver, Sender as CrossbeamSender}; use dashmap::DashMap; use derive_more::derive::Deref; -use metrics::Histogram; +use metrics::{Gauge, Histogram}; use reth_metrics::Metrics; +use reth_provider::AccountReader; use reth_revm::state::EvmState; use reth_trie::{ - added_removed_keys::MultiAddedRemovedKeys, prefix_set::TriePrefixSetsMut, - updates::TrieUpdatesSorted, DecodedMultiProof, HashedPostState, HashedPostStateSorted, - HashedStorage, MultiProofTargets, TrieInput, + added_removed_keys::MultiAddedRemovedKeys, DecodedMultiProof, HashedPostState, HashedStorage, + MultiProofTargets, }; use reth_trie_parallel::{ proof::ParallelProof, proof_task::{ AccountMultiproofInput, ProofResultContext, ProofResultMessage, ProofWorkerHandle, - StorageProofInput, }, }; -use std::{collections::BTreeMap, ops::DerefMut, sync::Arc, time::Instant}; +use std::{collections::BTreeMap, mem, ops::DerefMut, sync::Arc, time::Instant}; use tracing::{debug, error, instrument, trace}; +/// Source of state changes, either from EVM execution or from a Block Access List. +#[derive(Clone, Copy)] +pub enum Source { + /// State changes from EVM execution. + Evm(StateChangeSource), + /// State changes from Block Access List (EIP-7928). + BlockAccessList, +} + +impl std::fmt::Debug for Source { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Self::Evm(source) => source.fmt(f), + Self::BlockAccessList => f.write_str("BlockAccessList"), + } + } +} + +impl From for Source { + fn from(source: StateChangeSource) -> Self { + Self::Evm(source) + } +} + +/// Maximum number of targets to batch together for prefetch batching. +/// Prefetches are just proof requests (no state merging), so we allow a higher cap than state +/// updates +const PREFETCH_MAX_BATCH_TARGETS: usize = 512; + +/// Maximum number of prefetch messages to batch together. +/// Prevents excessive batching even with small messages. +const PREFETCH_MAX_BATCH_MESSAGES: usize = 16; + +/// Maximum number of targets to batch together for state updates. +/// Lower than prefetch because state updates require additional processing (hashing, state +/// partitioning) before dispatch. +const STATE_UPDATE_MAX_BATCH_TARGETS: usize = 64; + +/// Preallocation hint for state update batching to avoid repeated reallocations on small bursts. +const STATE_UPDATE_BATCH_PREALLOC: usize = 16; + +/// The default max targets, for limiting the number of account and storage proof targets to be +/// fetched by a single worker. If exceeded, chunking is forced regardless of worker availability. +const DEFAULT_MAX_TARGETS_FOR_CHUNKING: usize = 300; + /// A trie update that can be applied to sparse trie alongside the proofs for touched parts of the /// state. #[derive(Default, Debug)] @@ -56,42 +98,13 @@ impl SparseTrieUpdate { } } -/// Common configuration for multi proof tasks -#[derive(Debug, Clone, Default)] -pub(crate) struct MultiProofConfig { - /// The sorted collection of cached in-memory intermediate trie nodes that - /// can be reused for computation. - pub nodes_sorted: Arc, - /// The sorted in-memory overlay hashed state. - pub state_sorted: Arc, - /// The collection of prefix sets for the computation. Since the prefix sets _always_ - /// invalidate the in-memory nodes, not all keys from `state_sorted` might be present here, - /// if we have cached nodes for them. - pub prefix_sets: Arc, -} - -impl MultiProofConfig { - /// Creates a new state root config from the trie input. - /// - /// This returns a cleared [`TrieInput`] so that we can reuse any allocated space in the - /// [`TrieInput`]. - pub(crate) fn from_input(mut input: TrieInput) -> (TrieInput, Self) { - let config = Self { - nodes_sorted: Arc::new(input.nodes.drain_into_sorted()), - state_sorted: Arc::new(input.state.drain_into_sorted()), - prefix_sets: Arc::new(input.prefix_sets.clone()), - }; - (input.cleared(), config) - } -} - /// Messages used internally by the multi proof task. #[derive(Debug)] pub(super) enum MultiProofMessage { /// Prefetch proof targets PrefetchProofs(MultiProofTargets), /// New state update from transaction execution with its source - StateUpdate(StateChangeSource, EvmState), + StateUpdate(Source, EvmState), /// State update that can be applied to the sparse trie without any new proofs. /// /// It can be the case when all accounts and storage slots from the state update were already @@ -102,6 +115,11 @@ pub(super) enum MultiProofMessage { /// The state update that was used to calculate the proof state: HashedPostState, }, + /// Block Access List (EIP-7928; BAL) containing complete state changes for the block. + /// + /// When received, the task generates a single state update from the BAL and processes it. + /// No further messages are expected after receiving this variant. + BlockAccessList(Arc), /// Signals state update stream end. /// /// This is triggered by block execution, indicating that no additional state updates are @@ -147,11 +165,6 @@ impl ProofSequencer { while let Some(pending) = self.pending_proofs.remove(¤t_sequence) { consecutive_proofs.push(pending); current_sequence += 1; - - // if we don't have the next number, stop collecting - if !self.pending_proofs.contains_key(¤t_sequence) { - break; - } } self.next_to_deliver += consecutive_proofs.len() as u64; @@ -218,78 +231,10 @@ pub(crate) fn evm_state_to_hashed_post_state(update: EvmState) -> HashedPostStat hashed_state } -/// A pending multiproof task, either [`StorageMultiproofInput`] or [`MultiproofInput`]. -#[derive(Debug)] -enum PendingMultiproofTask { - /// A storage multiproof task input. - Storage(StorageMultiproofInput), - /// A regular multiproof task input. - Regular(MultiproofInput), -} - -impl PendingMultiproofTask { - /// Returns the proof sequence number of the task. - const fn proof_sequence_number(&self) -> u64 { - match self { - Self::Storage(input) => input.proof_sequence_number, - Self::Regular(input) => input.proof_sequence_number, - } - } - - /// Returns whether or not the proof targets are empty. - fn proof_targets_is_empty(&self) -> bool { - match self { - Self::Storage(input) => input.proof_targets.is_empty(), - Self::Regular(input) => input.proof_targets.is_empty(), - } - } - - /// Destroys the input and sends a [`MultiProofMessage::EmptyProof`] message to the sender. - fn send_empty_proof(self) { - match self { - Self::Storage(input) => input.send_empty_proof(), - Self::Regular(input) => input.send_empty_proof(), - } - } -} - -impl From for PendingMultiproofTask { - fn from(input: StorageMultiproofInput) -> Self { - Self::Storage(input) - } -} - -impl From for PendingMultiproofTask { - fn from(input: MultiproofInput) -> Self { - Self::Regular(input) - } -} - -/// Input parameters for dispatching a dedicated storage multiproof calculation. -#[derive(Debug)] -struct StorageMultiproofInput { - hashed_state_update: HashedPostState, - hashed_address: B256, - proof_targets: B256Set, - proof_sequence_number: u64, - state_root_message_sender: CrossbeamSender, - multi_added_removed_keys: Arc, -} - -impl StorageMultiproofInput { - /// Destroys the input and sends a [`MultiProofMessage::EmptyProof`] message to the sender. - fn send_empty_proof(self) { - let _ = self.state_root_message_sender.send(MultiProofMessage::EmptyProof { - sequence_number: self.proof_sequence_number, - state: self.hashed_state_update, - }); - } -} - /// Input parameters for dispatching a multiproof calculation. #[derive(Debug)] struct MultiproofInput { - source: Option, + source: Option, hashed_state_update: HashedPostState, proof_targets: MultiProofTargets, proof_sequence_number: u64, @@ -319,8 +264,6 @@ impl MultiproofInput { /// `ProofSequencer`. #[derive(Debug)] pub struct MultiproofManager { - /// Currently running calculations. - inflight: usize, /// Handle to the proof worker pools (storage and account). proof_worker_handle: ProofWorkerHandle, /// Cached storage proof roots for missed leaves; this maps @@ -349,8 +292,11 @@ impl MultiproofManager { proof_worker_handle: ProofWorkerHandle, proof_result_tx: CrossbeamSender, ) -> Self { + // Initialize the max worker gauges with the worker pool sizes + metrics.max_storage_workers.set(proof_worker_handle.total_storage_workers() as f64); + metrics.max_account_workers.set(proof_worker_handle.total_account_workers() as f64); + Self { - inflight: 0, metrics, proof_worker_handle, missed_leaves_storage_roots: Default::default(), @@ -359,93 +305,28 @@ impl MultiproofManager { } /// Dispatches a new multiproof calculation to worker pools. - fn dispatch(&mut self, input: PendingMultiproofTask) { + fn dispatch(&self, input: MultiproofInput) { // If there are no proof targets, we can just send an empty multiproof back immediately - if input.proof_targets_is_empty() { - debug!( - sequence_number = input.proof_sequence_number(), + if input.proof_targets.is_empty() { + trace!( + sequence_number = input.proof_sequence_number, "No proof targets, sending empty multiproof back immediately" ); input.send_empty_proof(); - return - } - - match input { - PendingMultiproofTask::Storage(storage_input) => { - self.dispatch_storage_proof(storage_input); - } - PendingMultiproofTask::Regular(multiproof_input) => { - self.dispatch_multiproof(multiproof_input); - } - } - } - - /// Dispatches a single storage proof calculation to worker pool. - fn dispatch_storage_proof(&mut self, storage_multiproof_input: StorageMultiproofInput) { - let StorageMultiproofInput { - hashed_state_update, - hashed_address, - proof_targets, - proof_sequence_number, - multi_added_removed_keys, - state_root_message_sender: _, - } = storage_multiproof_input; - - let storage_targets = proof_targets.len(); - - trace!( - target: "engine::tree::payload_processor::multiproof", - proof_sequence_number, - ?proof_targets, - storage_targets, - "Dispatching storage proof to workers" - ); - - let start = Instant::now(); - - // Create prefix set from targets - let prefix_set = reth_trie::prefix_set::PrefixSetMut::from( - proof_targets.iter().map(reth_trie::Nibbles::unpack), - ); - let prefix_set = prefix_set.freeze(); - - // Build computation input (data only) - let input = StorageProofInput::new( - hashed_address, - prefix_set, - proof_targets, - true, // with_branch_node_masks - Some(multi_added_removed_keys), - ); - - // Dispatch to storage worker - if let Err(e) = self.proof_worker_handle.dispatch_storage_proof( - input, - ProofResultContext::new( - self.proof_result_tx.clone(), - proof_sequence_number, - hashed_state_update, - start, - ), - ) { - error!(target: "engine::tree::payload_processor::multiproof", ?e, "Failed to dispatch storage proof"); return; } - self.inflight += 1; - self.metrics.inflight_multiproofs_histogram.record(self.inflight as f64); - self.metrics - .pending_storage_multiproofs_histogram - .record(self.proof_worker_handle.pending_storage_tasks() as f64); - self.metrics - .pending_account_multiproofs_histogram - .record(self.proof_worker_handle.pending_account_tasks() as f64); + self.dispatch_multiproof(input); } /// Signals that a multiproof calculation has finished. - fn on_calculation_complete(&mut self) { - self.inflight = self.inflight.saturating_sub(1); - self.metrics.inflight_multiproofs_histogram.record(self.inflight as f64); + fn on_calculation_complete(&self) { + self.metrics + .active_storage_workers_histogram + .record(self.proof_worker_handle.active_storage_workers() as f64); + self.metrics + .active_account_workers_histogram + .record(self.proof_worker_handle.active_account_workers() as f64); self.metrics .pending_storage_multiproofs_histogram .record(self.proof_worker_handle.pending_storage_tasks() as f64); @@ -455,7 +336,7 @@ impl MultiproofManager { } /// Dispatches a single multiproof calculation to worker pool. - fn dispatch_multiproof(&mut self, multiproof_input: MultiproofInput) { + fn dispatch_multiproof(&self, multiproof_input: MultiproofInput) { let MultiproofInput { source, hashed_state_update, @@ -506,8 +387,12 @@ impl MultiproofManager { return; } - self.inflight += 1; - self.metrics.inflight_multiproofs_histogram.record(self.inflight as f64); + self.metrics + .active_storage_workers_histogram + .record(self.proof_worker_handle.active_storage_workers() as f64); + self.metrics + .active_account_workers_histogram + .record(self.proof_worker_handle.active_account_workers() as f64); self.metrics .pending_storage_multiproofs_histogram .record(self.proof_worker_handle.pending_storage_tasks() as f64); @@ -520,8 +405,14 @@ impl MultiproofManager { #[derive(Metrics, Clone)] #[metrics(scope = "tree.root")] pub(crate) struct MultiProofTaskMetrics { - /// Histogram of inflight multiproofs. - pub inflight_multiproofs_histogram: Histogram, + /// Histogram of active storage workers processing proofs. + pub active_storage_workers_histogram: Histogram, + /// Histogram of active account workers processing proofs. + pub active_account_workers_histogram: Histogram, + /// Gauge for the maximum number of storage workers in the pool. + pub max_storage_workers: Gauge, + /// Gauge for the maximum number of account workers in the pool. + pub max_account_workers: Gauge, /// Histogram of pending storage multiproofs in the queue. pub pending_storage_multiproofs_histogram: Histogram, /// Histogram of pending account multiproofs in the queue. @@ -541,6 +432,11 @@ pub(crate) struct MultiProofTaskMetrics { /// Histogram of the number of state update proof target chunks. pub state_update_proof_chunks_histogram: Histogram, + /// Histogram of prefetch proof batch sizes (number of messages merged). + pub prefetch_batch_size_histogram: Histogram, + /// Histogram of state update batch sizes (number of messages merged). + pub state_update_batch_size_histogram: Histogram, + /// Histogram of proof calculation durations. pub proof_calculation_duration_histogram: Histogram, @@ -583,7 +479,6 @@ pub(crate) struct MultiProofTaskMetrics { /// ▼ │ /// ┌──────────────────────────────────────────────────────────────┐ │ /// │ MultiproofManager │ │ -/// │ - Tracks inflight calculations │ │ /// │ - Deduplicates against fetched_proof_targets │ │ /// │ - Routes to appropriate worker pool │ │ /// └──┬───────────────────────────────────────────────────────────┘ │ @@ -624,7 +519,6 @@ pub(crate) struct MultiProofTaskMetrics { /// /// - **[`MultiproofManager`]**: Calculation orchestrator /// - Decides between fast path ([`EmptyProof`]) and worker dispatch -/// - Tracks inflight calculations /// - Routes storage-only vs full multiproofs to appropriate workers /// - Records metrics for monitoring /// @@ -687,16 +581,22 @@ pub(super) struct MultiProofTask { multiproof_manager: MultiproofManager, /// multi proof task metrics metrics: MultiProofTaskMetrics, + /// If this number is exceeded and chunking is enabled, then this will override whether or not + /// there are any active workers and force chunking across workers. This is to prevent tasks + /// which are very long from hitting a single worker. + max_targets_for_chunking: usize, } impl MultiProofTask { - /// Creates a new multi proof task with the unified message channel + /// Creates a multiproof task with separate channels: control on `tx`/`rx`, proof results on + /// `proof_result_rx`. pub(super) fn new( proof_worker_handle: ProofWorkerHandle, to_sparse_trie: std::sync::mpsc::Sender, chunk_size: Option, + tx: CrossbeamSender, + rx: CrossbeamReceiver, ) -> Self { - let (tx, rx) = unbounded(); let (proof_result_tx, proof_result_rx) = unbounded(); let metrics = MultiProofTaskMetrics::default(); @@ -715,6 +615,7 @@ impl MultiProofTask { proof_result_tx, ), metrics, + max_targets_for_chunking: DEFAULT_MAX_TARGETS_FOR_CHUNKING, } } @@ -725,8 +626,13 @@ impl MultiProofTask { /// Handles request for proof prefetch. /// - /// Returns a number of proofs that were spawned. - #[instrument(level = "debug", target = "engine::tree::payload_processor::multiproof", skip_all, fields(accounts = targets.len()))] + /// Returns how many multiproof tasks were dispatched for the prefetch request. + #[instrument( + level = "debug", + target = "engine::tree::payload_processor::multiproof", + skip_all, + fields(accounts = targets.len(), chunks = 0) + )] fn on_prefetch_proof(&mut self, targets: MultiProofTargets) -> u64 { let proof_targets = self.get_prefetch_proof_targets(targets); self.fetched_proof_targets.extend_ref(&proof_targets); @@ -744,40 +650,33 @@ impl MultiProofTask { .prefetch_proof_targets_storages_histogram .record(proof_targets.values().map(|slots| slots.len()).sum::() as f64); - // Process proof targets in chunks. - let mut chunks = 0; - - // Only chunk if account or storage workers are available to take advantage of parallelism. - let should_chunk = - self.multiproof_manager.proof_worker_handle.has_available_account_workers() || - self.multiproof_manager.proof_worker_handle.has_available_storage_workers(); - - let mut dispatch = |proof_targets| { - self.multiproof_manager.dispatch( - MultiproofInput { + let chunking_len = proof_targets.chunking_length(); + let available_account_workers = + self.multiproof_manager.proof_worker_handle.available_account_workers(); + let available_storage_workers = + self.multiproof_manager.proof_worker_handle.available_storage_workers(); + let num_chunks = dispatch_with_chunking( + proof_targets, + chunking_len, + self.chunk_size, + self.max_targets_for_chunking, + available_account_workers, + available_storage_workers, + MultiProofTargets::chunks, + |proof_targets| { + self.multiproof_manager.dispatch(MultiproofInput { source: None, hashed_state_update: Default::default(), proof_targets, proof_sequence_number: self.proof_sequencer.next_sequence(), state_root_message_sender: self.tx.clone(), multi_added_removed_keys: Some(multi_added_removed_keys.clone()), - } - .into(), - ); - chunks += 1; - }; + }); + }, + ); + self.metrics.prefetch_proof_chunks_histogram.record(num_chunks as f64); - if should_chunk && let Some(chunk_size) = self.chunk_size { - for proof_targets_chunk in proof_targets.chunks(chunk_size) { - dispatch(proof_targets_chunk); - } - } else { - dispatch(proof_targets); - } - - self.metrics.prefetch_proof_chunks_histogram.record(chunks as f64); - - chunks + num_chunks as u64 } // Returns true if all state updates finished and all proofs processed. @@ -833,7 +732,7 @@ impl MultiProofTask { let Some(fetched_storage) = self.fetched_proof_targets.get(hashed_address) else { // this means the account has not been fetched yet, so we must fetch everything // associated with this account - continue + continue; }; let prev_target_storage_len = target_storage.len(); @@ -855,11 +754,27 @@ impl MultiProofTask { /// Handles state updates. /// - /// Returns a number of proofs that were spawned. - #[instrument(level = "debug", target = "engine::tree::payload_processor::multiproof", skip(self, update), fields(accounts = update.len()))] - fn on_state_update(&mut self, source: StateChangeSource, update: EvmState) -> u64 { + /// Returns how many proof dispatches were spawned (including an `EmptyProof` for already + /// fetched targets). + #[instrument( + level = "debug", + target = "engine::tree::payload_processor::multiproof", + skip(self, update), + fields(accounts = update.len(), chunks = 0) + )] + fn on_state_update(&mut self, source: Source, update: EvmState) -> u64 { let hashed_state_update = evm_state_to_hashed_post_state(update); + self.on_hashed_state_update(source, hashed_state_update) + } + /// Processes a hashed state update and dispatches multiproofs as needed. + /// + /// Returns the number of state updates dispatched (both `EmptyProof` and regular multiproofs). + fn on_hashed_state_update( + &mut self, + source: Source, + hashed_state_update: HashedPostState, + ) -> u64 { // Update removed keys based on the state update. self.multi_added_removed_keys.update_with_state(&hashed_state_update); @@ -882,58 +797,49 @@ impl MultiProofTask { // Clone+Arc MultiAddedRemovedKeys for sharing with the dispatched multiproof tasks let multi_added_removed_keys = Arc::new(self.multi_added_removed_keys.clone()); - // Process state updates in chunks. - let mut chunks = 0; - + let chunking_len = not_fetched_state_update.chunking_length(); let mut spawned_proof_targets = MultiProofTargets::default(); + let available_account_workers = + self.multiproof_manager.proof_worker_handle.available_account_workers(); + let available_storage_workers = + self.multiproof_manager.proof_worker_handle.available_storage_workers(); + let num_chunks = dispatch_with_chunking( + not_fetched_state_update, + chunking_len, + self.chunk_size, + self.max_targets_for_chunking, + available_account_workers, + available_storage_workers, + HashedPostState::chunks, + |hashed_state_update| { + let proof_targets = get_proof_targets( + &hashed_state_update, + &self.fetched_proof_targets, + &multi_added_removed_keys, + ); + spawned_proof_targets.extend_ref(&proof_targets); - // Only chunk if account or storage workers are available to take advantage of parallelism. - let should_chunk = - self.multiproof_manager.proof_worker_handle.has_available_account_workers() || - self.multiproof_manager.proof_worker_handle.has_available_storage_workers(); - - let mut dispatch = |hashed_state_update| { - let proof_targets = get_proof_targets( - &hashed_state_update, - &self.fetched_proof_targets, - &multi_added_removed_keys, - ); - spawned_proof_targets.extend_ref(&proof_targets); - - self.multiproof_manager.dispatch( - MultiproofInput { + self.multiproof_manager.dispatch(MultiproofInput { source: Some(source), hashed_state_update, proof_targets, proof_sequence_number: self.proof_sequencer.next_sequence(), state_root_message_sender: self.tx.clone(), multi_added_removed_keys: Some(multi_added_removed_keys.clone()), - } - .into(), - ); - - chunks += 1; - }; - - if should_chunk && let Some(chunk_size) = self.chunk_size { - for chunk in not_fetched_state_update.chunks(chunk_size) { - dispatch(chunk); - } - } else { - dispatch(not_fetched_state_update); - } - + }); + }, + ); self.metrics .state_update_proof_targets_accounts_histogram .record(spawned_proof_targets.len() as f64); self.metrics .state_update_proof_targets_storages_histogram .record(spawned_proof_targets.values().map(|slots| slots.len()).sum::() as f64); - self.metrics.state_update_proof_chunks_histogram.record(chunks as f64); + self.metrics.state_update_proof_chunks_histogram.record(num_chunks as f64); self.fetched_proof_targets.extend(spawned_proof_targets); - state_updates + chunks + state_updates + num_chunks as u64 } /// Handler for new proof calculated, aggregates all the existing sequential proofs. @@ -955,6 +861,281 @@ impl MultiProofTask { .filter(|proof| !proof.is_empty()) } + /// Processes a multiproof message, batching consecutive same-type messages. + /// + /// Drains queued messages of the same type and merges them into one batch before processing, + /// storing one pending message (different type or over-cap) to handle on the next iteration. + /// This preserves ordering without requeuing onto the channel. + /// + /// Returns `true` if done, `false` to continue. + fn process_multiproof_message

( + &mut self, + msg: MultiProofMessage, + ctx: &mut MultiproofBatchCtx, + batch_metrics: &mut MultiproofBatchMetrics, + provider: &P, + ) -> bool + where + P: AccountReader, + { + match msg { + // Prefetch proofs: batch consecutive prefetch requests up to target/message limits + MultiProofMessage::PrefetchProofs(targets) => { + trace!(target: "engine::tree::payload_processor::multiproof", "processing MultiProofMessage::PrefetchProofs"); + + if ctx.first_update_time.is_none() { + self.metrics + .first_update_wait_time_histogram + .record(ctx.start.elapsed().as_secs_f64()); + ctx.first_update_time = Some(Instant::now()); + debug!(target: "engine::tree::payload_processor::multiproof", "Started state root calculation"); + } + + let mut accumulated_count = targets.chunking_length(); + ctx.accumulated_prefetch_targets.clear(); + ctx.accumulated_prefetch_targets.push(targets); + + // Batch consecutive prefetch messages up to limits. + while accumulated_count < PREFETCH_MAX_BATCH_TARGETS && + ctx.accumulated_prefetch_targets.len() < PREFETCH_MAX_BATCH_MESSAGES + { + match self.rx.try_recv() { + Ok(MultiProofMessage::PrefetchProofs(next_targets)) => { + let next_count = next_targets.chunking_length(); + if accumulated_count + next_count > PREFETCH_MAX_BATCH_TARGETS { + ctx.pending_msg = + Some(MultiProofMessage::PrefetchProofs(next_targets)); + break; + } + accumulated_count += next_count; + ctx.accumulated_prefetch_targets.push(next_targets); + } + Ok(other_msg) => { + ctx.pending_msg = Some(other_msg); + break; + } + Err(_) => break, + } + } + + // Process all accumulated messages in a single batch + let num_batched = ctx.accumulated_prefetch_targets.len(); + self.metrics.prefetch_batch_size_histogram.record(num_batched as f64); + + // Merge all accumulated prefetch targets into a single dispatch payload. + // Use drain to preserve the buffer allocation. + let mut accumulated_iter = ctx.accumulated_prefetch_targets.drain(..); + let mut merged_targets = + accumulated_iter.next().expect("prefetch batch always has at least one entry"); + for next_targets in accumulated_iter { + merged_targets.extend(next_targets); + } + + let account_targets = merged_targets.len(); + let storage_targets = + merged_targets.values().map(|slots| slots.len()).sum::(); + batch_metrics.prefetch_proofs_requested += self.on_prefetch_proof(merged_targets); + trace!( + target: "engine::tree::payload_processor::multiproof", + account_targets, + storage_targets, + prefetch_proofs_requested = batch_metrics.prefetch_proofs_requested, + num_batched, + "Dispatched prefetch batch" + ); + + false + } + // State update: batch consecutive updates from the same source + MultiProofMessage::StateUpdate(source, update) => { + trace!(target: "engine::tree::payload_processor::multiproof", "processing MultiProofMessage::StateUpdate"); + + if ctx.first_update_time.is_none() { + self.metrics + .first_update_wait_time_histogram + .record(ctx.start.elapsed().as_secs_f64()); + ctx.first_update_time = Some(Instant::now()); + debug!(target: "engine::tree::payload_processor::multiproof", "Started state root calculation"); + } + + // Accumulate messages including the first one; reuse buffer to avoid allocations. + let mut accumulated_targets = estimate_evm_state_targets(&update); + ctx.accumulated_state_updates.clear(); + ctx.accumulated_state_updates.push((source, update)); + + // Batch consecutive state update messages up to target limit. + while accumulated_targets < STATE_UPDATE_MAX_BATCH_TARGETS { + match self.rx.try_recv() { + Ok(MultiProofMessage::StateUpdate(next_source, next_update)) => { + let (batch_source, batch_update) = &ctx.accumulated_state_updates[0]; + if !can_batch_state_update( + *batch_source, + batch_update, + next_source, + &next_update, + ) { + ctx.pending_msg = + Some(MultiProofMessage::StateUpdate(next_source, next_update)); + break; + } + + let next_estimate = estimate_evm_state_targets(&next_update); + // Would exceed batch cap; leave pending to dispatch on next iteration. + if accumulated_targets + next_estimate > STATE_UPDATE_MAX_BATCH_TARGETS + { + ctx.pending_msg = + Some(MultiProofMessage::StateUpdate(next_source, next_update)); + break; + } + accumulated_targets += next_estimate; + ctx.accumulated_state_updates.push((next_source, next_update)); + } + Ok(other_msg) => { + ctx.pending_msg = Some(other_msg); + break; + } + Err(_) => break, + } + } + + // Process all accumulated messages in a single batch + let num_batched = ctx.accumulated_state_updates.len(); + self.metrics.state_update_batch_size_histogram.record(num_batched as f64); + + #[cfg(debug_assertions)] + { + let batch_source = ctx.accumulated_state_updates[0].0; + let batch_update = &ctx.accumulated_state_updates[0].1; + debug_assert!(ctx.accumulated_state_updates.iter().all(|(source, update)| { + can_batch_state_update(batch_source, batch_update, *source, update) + })); + } + + // Merge all accumulated updates into a single EvmState payload. + // Use drain to preserve the buffer allocation. + let mut accumulated_iter = ctx.accumulated_state_updates.drain(..); + let (mut batch_source, mut merged_update) = accumulated_iter + .next() + .expect("state update batch always has at least one entry"); + for (next_source, next_update) in accumulated_iter { + batch_source = next_source; + merged_update.extend(next_update); + } + + let batch_len = merged_update.len(); + batch_metrics.state_update_proofs_requested += + self.on_state_update(batch_source, merged_update); + trace!( + target: "engine::tree::payload_processor::multiproof", + ?batch_source, + len = batch_len, + state_update_proofs_requested = ?batch_metrics.state_update_proofs_requested, + num_batched, + "Dispatched state update batch" + ); + + false + } + // Process Block Access List (BAL) - complete state changes provided upfront + MultiProofMessage::BlockAccessList(bal) => { + trace!(target: "engine::tree::payload_processor::multiproof", "processing MultiProofMessage::BAL"); + + if ctx.first_update_time.is_none() { + self.metrics + .first_update_wait_time_histogram + .record(ctx.start.elapsed().as_secs_f64()); + ctx.first_update_time = Some(Instant::now()); + debug!(target: "engine::tree::payload_processor::multiproof", "Started state root calculation from BAL"); + } + + // Convert BAL to HashedPostState and process it + match bal_to_hashed_post_state(&bal, &provider) { + Ok(hashed_state) => { + debug!( + target: "engine::tree::payload_processor::multiproof", + accounts = hashed_state.accounts.len(), + storages = hashed_state.storages.len(), + "Processing BAL state update" + ); + + // Use BlockAccessList as source for BAL-derived state updates + batch_metrics.state_update_proofs_requested += + self.on_hashed_state_update(Source::BlockAccessList, hashed_state); + } + Err(err) => { + error!(target: "engine::tree::payload_processor::multiproof", ?err, "Failed to convert BAL to hashed state"); + return true; + } + } + + // Mark updates as finished since BAL provides complete state + ctx.updates_finished_time = Some(Instant::now()); + + // Check if we're done (might need to wait for proofs to complete) + if self.is_done( + batch_metrics.proofs_processed, + batch_metrics.state_update_proofs_requested, + batch_metrics.prefetch_proofs_requested, + ctx.updates_finished(), + ) { + debug!( + target: "engine::tree::payload_processor::multiproof", + "BAL processed and all proofs complete, ending calculation" + ); + return true; + } + false + } + // Signal that no more state updates will arrive + MultiProofMessage::FinishedStateUpdates => { + trace!(target: "engine::tree::payload_processor::multiproof", "processing MultiProofMessage::FinishedStateUpdates"); + + ctx.updates_finished_time = Some(Instant::now()); + + if self.is_done( + batch_metrics.proofs_processed, + batch_metrics.state_update_proofs_requested, + batch_metrics.prefetch_proofs_requested, + ctx.updates_finished(), + ) { + debug!( + target: "engine::tree::payload_processor::multiproof", + "State updates finished and all proofs processed, ending calculation" + ); + return true; + } + false + } + // Handle proof result with no trie nodes (state unchanged) + MultiProofMessage::EmptyProof { sequence_number, state } => { + trace!(target: "engine::tree::payload_processor::multiproof", "processing MultiProofMessage::EmptyProof"); + + batch_metrics.proofs_processed += 1; + + if let Some(combined_update) = self.on_proof( + sequence_number, + SparseTrieUpdate { state, multiproof: Default::default() }, + ) { + let _ = self.to_sparse_trie.send(combined_update); + } + + if self.is_done( + batch_metrics.proofs_processed, + batch_metrics.state_update_proofs_requested, + batch_metrics.prefetch_proofs_requested, + ctx.updates_finished(), + ) { + debug!( + target: "engine::tree::payload_processor::multiproof", + "State updates finished and all proofs processed, ending calculation" + ); + return true; + } + false + } + } + } + /// Starts the main loop that handles all incoming messages, fetches proofs, applies them to the /// sparse trie, updates the sparse trie, and eventually returns the state root. /// @@ -986,137 +1167,43 @@ impl MultiProofTask { /// * Once this message is received, on every [`MultiProofMessage::EmptyProof`] and /// [`ProofResultMessage`], we check if all proofs have been processed and if there are any /// pending proofs in the proof sequencer left to be revealed. - /// 6. This task exits after all pending proofs are processed. + /// 6. While running, consecutive [`MultiProofMessage::PrefetchProofs`] and + /// [`MultiProofMessage::StateUpdate`] messages are batched to reduce redundant work; if a + /// different message type arrives mid-batch or a batch cap is reached, it is held as + /// `pending_msg` and processed on the next loop to preserve ordering. + /// 7. This task exits after all pending proofs are processed. #[instrument( level = "debug", name = "MultiProofTask::run", target = "engine::tree::payload_processor::multiproof", skip_all )] - pub(crate) fn run(mut self) { - // TODO convert those into fields - let mut prefetch_proofs_requested = 0; - let mut state_update_proofs_requested = 0; - let mut proofs_processed = 0; + pub(crate) fn run

(mut self, provider: P) + where + P: AccountReader, + { + let mut ctx = MultiproofBatchCtx::new(Instant::now()); + let mut batch_metrics = MultiproofBatchMetrics::default(); - let mut updates_finished = false; - - // Timestamp before the first state update or prefetch was received - let start = Instant::now(); - - // Timestamp when the first state update or prefetch was received - let mut first_update_time = None; - // Timestamp when state updates have finished - let mut updates_finished_time = None; - - loop { + // Main event loop; select_biased! prioritizes proof results over control messages. + // Labeled so inner match arms can `break 'main` once all work is complete. + 'main: loop { trace!(target: "engine::tree::payload_processor::multiproof", "entering main channel receiving loop"); - crossbeam_channel::select! { - recv(self.rx) -> message => { - match message { - Ok(msg) => match msg { - MultiProofMessage::PrefetchProofs(targets) => { - trace!(target: "engine::tree::payload_processor::multiproof", "processing MultiProofMessage::PrefetchProofs"); + if let Some(msg) = ctx.pending_msg.take() { + if self.process_multiproof_message(msg, &mut ctx, &mut batch_metrics, &provider) { + break 'main; + } + continue; + } - if first_update_time.is_none() { - // record the wait time - self.metrics - .first_update_wait_time_histogram - .record(start.elapsed().as_secs_f64()); - first_update_time = Some(Instant::now()); - debug!(target: "engine::tree::payload_processor::multiproof", "Started state root calculation"); - } - - let account_targets = targets.len(); - let storage_targets = - targets.values().map(|slots| slots.len()).sum::(); - prefetch_proofs_requested += self.on_prefetch_proof(targets); - debug!( - target: "engine::tree::payload_processor::multiproof", - account_targets, - storage_targets, - prefetch_proofs_requested, - "Prefetching proofs" - ); - } - MultiProofMessage::StateUpdate(source, update) => { - trace!(target: "engine::tree::payload_processor::multiproof", "processing MultiProofMessage::StateUpdate"); - - if first_update_time.is_none() { - // record the wait time - self.metrics - .first_update_wait_time_histogram - .record(start.elapsed().as_secs_f64()); - first_update_time = Some(Instant::now()); - debug!(target: "engine::tree::payload_processor::multiproof", "Started state root calculation"); - } - - let len = update.len(); - state_update_proofs_requested += self.on_state_update(source, update); - debug!( - target: "engine::tree::payload_processor::multiproof", - ?source, - len, - ?state_update_proofs_requested, - "Received new state update" - ); - } - MultiProofMessage::FinishedStateUpdates => { - trace!(target: "engine::tree::payload_processor::multiproof", "processing MultiProofMessage::FinishedStateUpdates"); - - updates_finished = true; - updates_finished_time = Some(Instant::now()); - - if self.is_done( - proofs_processed, - state_update_proofs_requested, - prefetch_proofs_requested, - updates_finished, - ) { - debug!( - target: "engine::tree::payload_processor::multiproof", - "State updates finished and all proofs processed, ending calculation" - ); - break - } - } - MultiProofMessage::EmptyProof { sequence_number, state } => { - trace!(target: "engine::tree::payload_processor::multiproof", "processing MultiProofMessage::EmptyProof"); - - proofs_processed += 1; - - if let Some(combined_update) = self.on_proof( - sequence_number, - SparseTrieUpdate { state, multiproof: Default::default() }, - ) { - let _ = self.to_sparse_trie.send(combined_update); - } - - if self.is_done( - proofs_processed, - state_update_proofs_requested, - prefetch_proofs_requested, - updates_finished, - ) { - debug!( - target: "engine::tree::payload_processor::multiproof", - "State updates finished and all proofs processed, ending calculation" - ); - break - } - } - }, - Err(_) => { - error!(target: "engine::tree::payload_processor::multiproof", "State root related message channel closed unexpectedly"); - return - } - } - }, + // Use select_biased! to prioritize proof results over new requests. + // This prevents new work from starving completed proofs and keeps workers healthy. + crossbeam_channel::select_biased! { recv(self.proof_result_rx) -> proof_msg => { match proof_msg { Ok(proof_result) => { - proofs_processed += 1; + batch_metrics.proofs_processed += 1; self.metrics .proof_calculation_duration_histogram @@ -1127,10 +1214,10 @@ impl MultiProofTask { // Convert ProofResultMessage to SparseTrieUpdate match proof_result.result { Ok(proof_result_data) => { - debug!( + trace!( target: "engine::tree::payload_processor::multiproof", sequence = proof_result.sequence_number, - total_proofs = proofs_processed, + total_proofs = batch_metrics.proofs_processed, "Processing calculated proof from worker" ); @@ -1152,16 +1239,16 @@ impl MultiProofTask { } if self.is_done( - proofs_processed, - state_update_proofs_requested, - prefetch_proofs_requested, - updates_finished, + batch_metrics.proofs_processed, + batch_metrics.state_update_proofs_requested, + batch_metrics.prefetch_proofs_requested, + ctx.updates_finished(), ) { debug!( target: "engine::tree::payload_processor::multiproof", "State updates finished and all proofs processed, ending calculation" ); - break + break 'main } } Err(_) => { @@ -1169,27 +1256,42 @@ impl MultiProofTask { return } } + }, + recv(self.rx) -> message => { + let msg = match message { + Ok(m) => m, + Err(_) => { + error!(target: "engine::tree::payload_processor::multiproof", "State root related message channel closed unexpectedly"); + return + } + }; + + if self.process_multiproof_message(msg, &mut ctx, &mut batch_metrics, &provider) { + break 'main; + } } } } debug!( target: "engine::tree::payload_processor::multiproof", - total_updates = state_update_proofs_requested, - total_proofs = proofs_processed, - total_time = ?first_update_time.map(|t|t.elapsed()), - time_since_updates_finished = ?updates_finished_time.map(|t|t.elapsed()), + total_updates = batch_metrics.state_update_proofs_requested, + total_proofs = batch_metrics.proofs_processed, + total_time = ?ctx.first_update_time.map(|t|t.elapsed()), + time_since_updates_finished = ?ctx.updates_finished_time.map(|t|t.elapsed()), "All proofs processed, ending calculation" ); // update total metrics on finish - self.metrics.state_updates_received_histogram.record(state_update_proofs_requested as f64); - self.metrics.proofs_processed_histogram.record(proofs_processed as f64); - if let Some(total_time) = first_update_time.map(|t| t.elapsed()) { + self.metrics + .state_updates_received_histogram + .record(batch_metrics.state_update_proofs_requested as f64); + self.metrics.proofs_processed_histogram.record(batch_metrics.proofs_processed as f64); + if let Some(total_time) = ctx.first_update_time.map(|t| t.elapsed()) { self.metrics.multiproof_task_total_duration_histogram.record(total_time); } - if let Some(updates_finished_time) = updates_finished_time { + if let Some(updates_finished_time) = ctx.updates_finished_time { self.metrics .last_proof_wait_time_histogram .record(updates_finished_time.elapsed().as_secs_f64()); @@ -1197,6 +1299,60 @@ impl MultiProofTask { } } +/// Context for multiproof message batching loop. +/// +/// Contains processing state that persists across loop iterations. +/// +/// Used by `process_multiproof_message` to batch consecutive same-type messages received via +/// `try_recv` for efficient processing. +struct MultiproofBatchCtx { + /// Buffers a non-matching message type encountered during batching. + /// Processed first in next iteration to preserve ordering while allowing same-type + /// messages to batch. + pending_msg: Option, + /// Timestamp when the first state update or prefetch was received. + first_update_time: Option, + /// Timestamp before the first state update or prefetch was received. + start: Instant, + /// Timestamp when state updates finished. `Some` indicates all state updates have been + /// received. + updates_finished_time: Option, + /// Reusable buffer for accumulating prefetch targets during batching. + accumulated_prefetch_targets: Vec, + /// Reusable buffer for accumulating state updates during batching. + accumulated_state_updates: Vec<(Source, EvmState)>, +} + +impl MultiproofBatchCtx { + /// Creates a new batch context with the given start time. + fn new(start: Instant) -> Self { + Self { + pending_msg: None, + first_update_time: None, + start, + updates_finished_time: None, + accumulated_prefetch_targets: Vec::with_capacity(PREFETCH_MAX_BATCH_MESSAGES), + accumulated_state_updates: Vec::with_capacity(STATE_UPDATE_BATCH_PREALLOC), + } + } + + /// Returns `true` if all state updates have been received. + const fn updates_finished(&self) -> bool { + self.updates_finished_time.is_some() + } +} + +/// Counters for tracking proof requests and processing. +#[derive(Default)] +struct MultiproofBatchMetrics { + /// Number of proofs that have been processed. + proofs_processed: u64, + /// Number of state update proofs requested. + state_update_proofs_requested: u64, + /// Number of prefetch proofs requested. + prefetch_proofs_requested: u64, +} + /// Returns accounts only with those storages that were not already fetched, and /// if there are no such storages and the account itself was already fetched, the /// account shouldn't be included. @@ -1240,10 +1396,107 @@ fn get_proof_targets( targets } +/// Dispatches work items as a single unit or in chunks based on target size and worker +/// availability. +#[allow(clippy::too_many_arguments)] +fn dispatch_with_chunking( + items: T, + chunking_len: usize, + chunk_size: Option, + max_targets_for_chunking: usize, + available_account_workers: usize, + available_storage_workers: usize, + chunker: impl FnOnce(T, usize) -> I, + mut dispatch: impl FnMut(T), +) -> usize +where + I: IntoIterator, +{ + let should_chunk = chunking_len > max_targets_for_chunking || + available_account_workers > 1 || + available_storage_workers > 1; + + if should_chunk && + let Some(chunk_size) = chunk_size && + chunking_len > chunk_size + { + let mut num_chunks = 0usize; + for chunk in chunker(items, chunk_size) { + dispatch(chunk); + num_chunks += 1; + } + return num_chunks; + } + + dispatch(items); + 1 +} + +/// Checks whether two state updates can be merged in a batch. +/// +/// Transaction updates with the same transaction ID (`StateChangeSource::Transaction(id)`) +/// are safe to merge because they originate from the same logical execution and can be +/// coalesced to amortize proof work. +fn can_batch_state_update( + batch_source: Source, + batch_update: &EvmState, + next_source: Source, + next_update: &EvmState, +) -> bool { + if !same_source(batch_source, next_source) { + return false; + } + + match (batch_source, next_source) { + ( + Source::Evm(StateChangeSource::PreBlock(_)), + Source::Evm(StateChangeSource::PreBlock(_)), + ) | + ( + Source::Evm(StateChangeSource::PostBlock(_)), + Source::Evm(StateChangeSource::PostBlock(_)), + ) => batch_update == next_update, + _ => true, + } +} + +/// Checks whether two sources refer to the same origin. +fn same_source(lhs: Source, rhs: Source) -> bool { + match (lhs, rhs) { + ( + Source::Evm(StateChangeSource::Transaction(a)), + Source::Evm(StateChangeSource::Transaction(b)), + ) => a == b, + ( + Source::Evm(StateChangeSource::PreBlock(a)), + Source::Evm(StateChangeSource::PreBlock(b)), + ) => mem::discriminant(&a) == mem::discriminant(&b), + ( + Source::Evm(StateChangeSource::PostBlock(a)), + Source::Evm(StateChangeSource::PostBlock(b)), + ) => mem::discriminant(&a) == mem::discriminant(&b), + (Source::BlockAccessList, Source::BlockAccessList) => true, + _ => false, + } +} + +/// Estimates target count from `EvmState` for batching decisions. +fn estimate_evm_state_targets(state: &EvmState) -> usize { + state + .values() + .filter(|account| account.is_touched()) + .map(|account| { + let changed_slots = account.storage.iter().filter(|(_, v)| v.is_changed()).count(); + 1 + changed_slots + }) + .sum() +} + #[cfg(test)] mod tests { use super::*; - use alloy_primitives::map::B256Set; + use alloy_eip7928::{AccountChanges, BalanceChange}; + use alloy_primitives::{map::B256Set, Address}; use reth_provider::{ providers::OverlayStateProviderFactory, test_utils::create_test_provider_factory, BlockReader, DatabaseProviderFactory, PruneCheckpointReader, StageCheckpointReader, @@ -1252,7 +1505,7 @@ mod tests { use reth_trie::MultiProof; use reth_trie_parallel::proof_task::{ProofTaskCtx, ProofWorkerHandle}; use revm_primitives::{B256, U256}; - use std::sync::OnceLock; + use std::sync::{Arc, OnceLock}; use tokio::runtime::{Handle, Runtime}; /// Get a handle to the test runtime, creating it if necessary @@ -1276,11 +1529,12 @@ mod tests { { let rt_handle = get_test_runtime_handle(); let overlay_factory = OverlayStateProviderFactory::new(factory); - let task_ctx = ProofTaskCtx::new(overlay_factory, Default::default()); + let task_ctx = ProofTaskCtx::new(overlay_factory); let proof_handle = ProofWorkerHandle::new(rt_handle, task_ctx, 1, 1); let (to_sparse_trie, _receiver) = std::sync::mpsc::channel(); + let (tx, rx) = crossbeam_channel::unbounded(); - MultiProofTask::new(proof_handle, to_sparse_trie, Some(1)) + MultiProofTask::new(proof_handle, to_sparse_trie, Some(1), tx, rx) } #[test] @@ -1715,4 +1969,756 @@ mod tests { // only slots in the state update can be included, so slot3 should not appear assert!(!targets.contains_key(&addr)); } + + /// Verifies that consecutive prefetch proof messages are batched together. + #[test] + fn test_prefetch_proofs_batching() { + let test_provider_factory = create_test_provider_factory(); + let mut task = create_test_state_root_task(test_provider_factory); + + // send multiple messages + let addr1 = B256::random(); + let addr2 = B256::random(); + let addr3 = B256::random(); + + let mut targets1 = MultiProofTargets::default(); + targets1.insert(addr1, HashSet::default()); + + let mut targets2 = MultiProofTargets::default(); + targets2.insert(addr2, HashSet::default()); + + let mut targets3 = MultiProofTargets::default(); + targets3.insert(addr3, HashSet::default()); + + let tx = task.state_root_message_sender(); + tx.send(MultiProofMessage::PrefetchProofs(targets1)).unwrap(); + tx.send(MultiProofMessage::PrefetchProofs(targets2)).unwrap(); + tx.send(MultiProofMessage::PrefetchProofs(targets3)).unwrap(); + + let proofs_requested = + if let Ok(MultiProofMessage::PrefetchProofs(targets)) = task.rx.recv() { + // simulate the batching logic + let mut merged_targets = targets; + let mut num_batched = 1; + while let Ok(MultiProofMessage::PrefetchProofs(next_targets)) = task.rx.try_recv() { + merged_targets.extend(next_targets); + num_batched += 1; + } + + assert_eq!(num_batched, 3); + assert_eq!(merged_targets.len(), 3); + assert!(merged_targets.contains_key(&addr1)); + assert!(merged_targets.contains_key(&addr2)); + assert!(merged_targets.contains_key(&addr3)); + + task.on_prefetch_proof(merged_targets) + } else { + panic!("Expected PrefetchProofs message"); + }; + + assert_eq!(proofs_requested, 1); + } + + /// Verifies that consecutive state update messages from the same source are batched together. + #[test] + fn test_state_update_batching() { + use alloy_evm::block::StateChangeSource; + use revm_state::Account; + + let test_provider_factory = create_test_provider_factory(); + let mut task = create_test_state_root_task(test_provider_factory); + + // create multiple state updates + let addr1 = alloy_primitives::Address::random(); + let addr2 = alloy_primitives::Address::random(); + + let mut update1 = EvmState::default(); + update1.insert( + addr1, + Account { + info: revm_state::AccountInfo { + balance: U256::from(100), + nonce: 1, + code_hash: Default::default(), + code: Default::default(), + }, + transaction_id: Default::default(), + storage: Default::default(), + status: revm_state::AccountStatus::Touched, + }, + ); + + let mut update2 = EvmState::default(); + update2.insert( + addr2, + Account { + info: revm_state::AccountInfo { + balance: U256::from(200), + nonce: 2, + code_hash: Default::default(), + code: Default::default(), + }, + transaction_id: Default::default(), + storage: Default::default(), + status: revm_state::AccountStatus::Touched, + }, + ); + + let source = StateChangeSource::Transaction(0); + + let tx = task.state_root_message_sender(); + tx.send(MultiProofMessage::StateUpdate(source.into(), update1.clone())).unwrap(); + tx.send(MultiProofMessage::StateUpdate(source.into(), update2.clone())).unwrap(); + + let proofs_requested = + if let Ok(MultiProofMessage::StateUpdate(_src, update)) = task.rx.recv() { + let mut merged_update = update; + let mut num_batched = 1; + + while let Ok(MultiProofMessage::StateUpdate(_next_source, next_update)) = + task.rx.try_recv() + { + merged_update.extend(next_update); + num_batched += 1; + } + + assert_eq!(num_batched, 2); + assert_eq!(merged_update.len(), 2); + assert!(merged_update.contains_key(&addr1)); + assert!(merged_update.contains_key(&addr2)); + + task.on_state_update(source.into(), merged_update) + } else { + panic!("Expected StateUpdate message"); + }; + assert_eq!(proofs_requested, 1); + } + + /// Verifies that state updates from different sources are not batched together. + #[test] + fn test_state_update_batching_separates_sources() { + use alloy_evm::block::StateChangeSource; + use revm_state::Account; + + let test_provider_factory = create_test_provider_factory(); + let task = create_test_state_root_task(test_provider_factory); + + let addr_a1 = alloy_primitives::Address::random(); + let addr_b1 = alloy_primitives::Address::random(); + let addr_a2 = alloy_primitives::Address::random(); + + let create_state_update = |addr: alloy_primitives::Address, balance: u64| { + let mut state = EvmState::default(); + state.insert( + addr, + Account { + info: revm_state::AccountInfo { + balance: U256::from(balance), + nonce: 1, + code_hash: Default::default(), + code: Default::default(), + }, + transaction_id: Default::default(), + storage: Default::default(), + status: revm_state::AccountStatus::Touched, + }, + ); + state + }; + + let source_a = StateChangeSource::Transaction(1); + let source_b = StateChangeSource::Transaction(2); + + // Queue: A1 (immediate dispatch), B1 (batched), A2 (should become pending) + let tx = task.state_root_message_sender(); + tx.send(MultiProofMessage::StateUpdate(source_a.into(), create_state_update(addr_a1, 100))) + .unwrap(); + tx.send(MultiProofMessage::StateUpdate(source_b.into(), create_state_update(addr_b1, 200))) + .unwrap(); + tx.send(MultiProofMessage::StateUpdate(source_a.into(), create_state_update(addr_a2, 300))) + .unwrap(); + + let mut pending_msg: Option = None; + + if let Ok(MultiProofMessage::StateUpdate(first_source, _)) = task.rx.recv() { + assert!(same_source(first_source, source_a.into())); + + // Simulate batching loop for remaining messages + let mut accumulated_updates: Vec<(Source, EvmState)> = Vec::new(); + let mut accumulated_targets = 0usize; + + loop { + if accumulated_targets >= STATE_UPDATE_MAX_BATCH_TARGETS { + break; + } + match task.rx.try_recv() { + Ok(MultiProofMessage::StateUpdate(next_source, next_update)) => { + if let Some((batch_source, batch_update)) = accumulated_updates.first() && + !can_batch_state_update( + *batch_source, + batch_update, + next_source, + &next_update, + ) + { + pending_msg = + Some(MultiProofMessage::StateUpdate(next_source, next_update)); + break; + } + + let next_estimate = estimate_evm_state_targets(&next_update); + if next_estimate > STATE_UPDATE_MAX_BATCH_TARGETS { + pending_msg = + Some(MultiProofMessage::StateUpdate(next_source, next_update)); + break; + } + if accumulated_targets + next_estimate > STATE_UPDATE_MAX_BATCH_TARGETS && + !accumulated_updates.is_empty() + { + pending_msg = + Some(MultiProofMessage::StateUpdate(next_source, next_update)); + break; + } + accumulated_targets += next_estimate; + accumulated_updates.push((next_source, next_update)); + } + Ok(other_msg) => { + pending_msg = Some(other_msg); + break; + } + Err(_) => break, + } + } + + assert_eq!(accumulated_updates.len(), 1, "Should only batch matching sources"); + let batch_source = accumulated_updates[0].0; + assert!(same_source(batch_source, source_b.into())); + + let batch_source = accumulated_updates[0].0; + let mut merged_update = accumulated_updates.remove(0).1; + for (_, next_update) in accumulated_updates { + merged_update.extend(next_update); + } + + assert!(same_source(batch_source, source_b.into()), "Batch should use matching source"); + assert!(merged_update.contains_key(&addr_b1)); + assert!(!merged_update.contains_key(&addr_a1)); + assert!(!merged_update.contains_key(&addr_a2)); + } else { + panic!("Expected first StateUpdate"); + } + + match pending_msg { + Some(MultiProofMessage::StateUpdate(pending_source, pending_update)) => { + assert!(same_source(pending_source, source_a.into())); + assert!(pending_update.contains_key(&addr_a2)); + } + other => panic!("Expected pending StateUpdate with source_a, got {:?}", other), + } + } + + /// Verifies that pre-block updates only batch when their payloads are identical. + #[test] + fn test_pre_block_updates_require_payload_match_to_batch() { + use alloy_evm::block::{StateChangePreBlockSource, StateChangeSource}; + use revm_state::Account; + + let test_provider_factory = create_test_provider_factory(); + let task = create_test_state_root_task(test_provider_factory); + + let addr1 = alloy_primitives::Address::random(); + let addr2 = alloy_primitives::Address::random(); + let addr3 = alloy_primitives::Address::random(); + + let create_state_update = |addr: alloy_primitives::Address, balance: u64| { + let mut state = EvmState::default(); + state.insert( + addr, + Account { + info: revm_state::AccountInfo { + balance: U256::from(balance), + nonce: 1, + code_hash: Default::default(), + code: Default::default(), + }, + transaction_id: Default::default(), + storage: Default::default(), + status: revm_state::AccountStatus::Touched, + }, + ); + state + }; + + let source = StateChangeSource::PreBlock(StateChangePreBlockSource::BeaconRootContract); + + // Queue: first update dispatched immediately, next two should not merge + let tx = task.state_root_message_sender(); + tx.send(MultiProofMessage::StateUpdate(source.into(), create_state_update(addr1, 100))) + .unwrap(); + tx.send(MultiProofMessage::StateUpdate(source.into(), create_state_update(addr2, 200))) + .unwrap(); + tx.send(MultiProofMessage::StateUpdate(source.into(), create_state_update(addr3, 300))) + .unwrap(); + + let mut pending_msg: Option = None; + + if let Ok(MultiProofMessage::StateUpdate(first_source, first_update)) = task.rx.recv() { + assert!(same_source(first_source, source.into())); + assert!(first_update.contains_key(&addr1)); + + let mut accumulated_updates: Vec<(Source, EvmState)> = Vec::new(); + let mut accumulated_targets = 0usize; + + loop { + if accumulated_targets >= STATE_UPDATE_MAX_BATCH_TARGETS { + break; + } + match task.rx.try_recv() { + Ok(MultiProofMessage::StateUpdate(next_source, next_update)) => { + if let Some((batch_source, batch_update)) = accumulated_updates.first() && + !can_batch_state_update( + *batch_source, + batch_update, + next_source, + &next_update, + ) + { + pending_msg = + Some(MultiProofMessage::StateUpdate(next_source, next_update)); + break; + } + + let next_estimate = estimate_evm_state_targets(&next_update); + if next_estimate > STATE_UPDATE_MAX_BATCH_TARGETS { + pending_msg = + Some(MultiProofMessage::StateUpdate(next_source, next_update)); + break; + } + if accumulated_targets + next_estimate > STATE_UPDATE_MAX_BATCH_TARGETS && + !accumulated_updates.is_empty() + { + pending_msg = + Some(MultiProofMessage::StateUpdate(next_source, next_update)); + break; + } + accumulated_targets += next_estimate; + accumulated_updates.push((next_source, next_update)); + } + Ok(other_msg) => { + pending_msg = Some(other_msg); + break; + } + Err(_) => break, + } + } + + assert_eq!( + accumulated_updates.len(), + 1, + "Second pre-block update should not merge with a different payload" + ); + let (batched_source, batched_update) = accumulated_updates.remove(0); + assert!(same_source(batched_source, source.into())); + assert!(batched_update.contains_key(&addr2)); + assert!(!batched_update.contains_key(&addr3)); + + match pending_msg { + Some(MultiProofMessage::StateUpdate(_, pending_update)) => { + assert!(pending_update.contains_key(&addr3)); + } + other => panic!("Expected pending third pre-block update, got {:?}", other), + } + } else { + panic!("Expected first StateUpdate"); + } + } + + /// Verifies that different message types arriving mid-batch are not lost and preserve order. + #[test] + fn test_batching_preserves_ordering_with_different_message_type() { + use alloy_evm::block::StateChangeSource; + use revm_state::Account; + + let test_provider_factory = create_test_provider_factory(); + let task = create_test_state_root_task(test_provider_factory); + + let addr1 = B256::random(); + let addr2 = B256::random(); + let addr3 = B256::random(); + let state_addr1 = alloy_primitives::Address::random(); + let state_addr2 = alloy_primitives::Address::random(); + + // Create PrefetchProofs targets + let mut targets1 = MultiProofTargets::default(); + targets1.insert(addr1, HashSet::default()); + + let mut targets2 = MultiProofTargets::default(); + targets2.insert(addr2, HashSet::default()); + + let mut targets3 = MultiProofTargets::default(); + targets3.insert(addr3, HashSet::default()); + + // Create StateUpdate 1 + let mut state_update1 = EvmState::default(); + state_update1.insert( + state_addr1, + Account { + info: revm_state::AccountInfo { + balance: U256::from(100), + nonce: 1, + code_hash: Default::default(), + code: Default::default(), + }, + transaction_id: Default::default(), + storage: Default::default(), + status: revm_state::AccountStatus::Touched, + }, + ); + + // Create StateUpdate 2 + let mut state_update2 = EvmState::default(); + state_update2.insert( + state_addr2, + Account { + info: revm_state::AccountInfo { + balance: U256::from(200), + nonce: 2, + code_hash: Default::default(), + code: Default::default(), + }, + transaction_id: Default::default(), + storage: Default::default(), + status: revm_state::AccountStatus::Touched, + }, + ); + + let source = StateChangeSource::Transaction(42); + + // Queue: [PrefetchProofs1, PrefetchProofs2, StateUpdate1, StateUpdate2, PrefetchProofs3] + let tx = task.state_root_message_sender(); + tx.send(MultiProofMessage::PrefetchProofs(targets1)).unwrap(); + tx.send(MultiProofMessage::PrefetchProofs(targets2)).unwrap(); + tx.send(MultiProofMessage::StateUpdate(source.into(), state_update1)).unwrap(); + tx.send(MultiProofMessage::StateUpdate(source.into(), state_update2)).unwrap(); + tx.send(MultiProofMessage::PrefetchProofs(targets3.clone())).unwrap(); + + // Step 1: Receive and batch PrefetchProofs (should get targets1 + targets2) + let mut pending_msg: Option = None; + if let Ok(MultiProofMessage::PrefetchProofs(targets)) = task.rx.recv() { + let mut merged_targets = targets; + let mut num_batched = 1; + + loop { + match task.rx.try_recv() { + Ok(MultiProofMessage::PrefetchProofs(next_targets)) => { + merged_targets.extend(next_targets); + num_batched += 1; + } + Ok(other_msg) => { + // Store locally to preserve ordering (the fix) + pending_msg = Some(other_msg); + break; + } + Err(_) => break, + } + } + + // Should have batched exactly 2 PrefetchProofs (not 3!) + assert_eq!(num_batched, 2, "Should batch only until different message type"); + assert_eq!(merged_targets.len(), 2); + assert!(merged_targets.contains_key(&addr1)); + assert!(merged_targets.contains_key(&addr2)); + assert!(!merged_targets.contains_key(&addr3), "addr3 should NOT be in first batch"); + } else { + panic!("Expected PrefetchProofs message"); + } + + // Step 2: The pending message should be StateUpdate1 (preserved ordering) + match pending_msg { + Some(MultiProofMessage::StateUpdate(_src, update)) => { + assert!(update.contains_key(&state_addr1), "Should be first StateUpdate"); + } + _ => panic!("StateUpdate1 was lost or reordered! The ordering fix is broken."), + } + + // Step 3: Next in channel should be StateUpdate2 + match task.rx.try_recv() { + Ok(MultiProofMessage::StateUpdate(_src, update)) => { + assert!(update.contains_key(&state_addr2), "Should be second StateUpdate"); + } + _ => panic!("StateUpdate2 was lost!"), + } + + // Step 4: Next in channel should be PrefetchProofs3 + match task.rx.try_recv() { + Ok(MultiProofMessage::PrefetchProofs(targets)) => { + assert_eq!(targets.len(), 1); + assert!(targets.contains_key(&addr3)); + } + _ => panic!("PrefetchProofs3 was lost!"), + } + } + + /// Verifies that a pending message is processed before the next loop iteration (ordering). + #[test] + fn test_pending_message_processed_before_next_iteration() { + use alloy_evm::block::StateChangeSource; + use revm_state::Account; + + let test_provider_factory = create_test_provider_factory(); + let test_provider = test_provider_factory.latest().unwrap(); + let mut task = create_test_state_root_task(test_provider_factory); + + // Queue: Prefetch1, StateUpdate, Prefetch2 + let prefetch_addr1 = B256::random(); + let prefetch_addr2 = B256::random(); + let mut prefetch1 = MultiProofTargets::default(); + prefetch1.insert(prefetch_addr1, HashSet::default()); + let mut prefetch2 = MultiProofTargets::default(); + prefetch2.insert(prefetch_addr2, HashSet::default()); + + let state_addr = alloy_primitives::Address::random(); + let mut state_update = EvmState::default(); + state_update.insert( + state_addr, + Account { + info: revm_state::AccountInfo { + balance: U256::from(42), + nonce: 1, + code_hash: Default::default(), + code: Default::default(), + }, + transaction_id: Default::default(), + storage: Default::default(), + status: revm_state::AccountStatus::Touched, + }, + ); + + let source = StateChangeSource::Transaction(99); + + let tx = task.state_root_message_sender(); + tx.send(MultiProofMessage::PrefetchProofs(prefetch1)).unwrap(); + tx.send(MultiProofMessage::StateUpdate(source.into(), state_update)).unwrap(); + tx.send(MultiProofMessage::PrefetchProofs(prefetch2.clone())).unwrap(); + + let mut ctx = MultiproofBatchCtx::new(Instant::now()); + let mut batch_metrics = MultiproofBatchMetrics::default(); + + // First message: Prefetch1 batches; StateUpdate becomes pending. + let first = task.rx.recv().unwrap(); + assert!(matches!(first, MultiProofMessage::PrefetchProofs(_))); + assert!(!task.process_multiproof_message( + first, + &mut ctx, + &mut batch_metrics, + &test_provider + )); + let pending = ctx.pending_msg.take().expect("pending message captured"); + assert!(matches!(pending, MultiProofMessage::StateUpdate(_, _))); + + // Pending message should be handled before the next select loop. + assert!(!task.process_multiproof_message( + pending, + &mut ctx, + &mut batch_metrics, + &test_provider + )); + + // Prefetch2 should now be in pending_msg (captured by StateUpdate's batching loop). + match ctx.pending_msg.take() { + Some(MultiProofMessage::PrefetchProofs(targets)) => { + assert_eq!(targets.len(), 1); + assert!(targets.contains_key(&prefetch_addr2)); + } + other => panic!("Expected remaining PrefetchProofs2 in pending_msg, got {:?}", other), + } + } + + /// Verifies that pending messages from a previous batch drain get full batching treatment. + #[test] + fn test_pending_messages_get_full_batching_treatment() { + // Queue: [Prefetch1, State1, State2, State3, Prefetch2] + // + // Expected behavior: + // 1. recv() → Prefetch1 + // 2. try_recv() → State1 is different type → pending = State1, break + // 3. Process Prefetch1 + // 4. Next iteration: pending = State1 → process with batching + // 5. try_recv() → State2 same type → merge + // 6. try_recv() → State3 same type → merge + // 7. try_recv() → Prefetch2 different type → pending = Prefetch2, break + // 8. Process merged State (1+2+3) + // + // Without the state-machine fix, State1 would be processed alone (no batching). + use alloy_evm::block::StateChangeSource; + use revm_state::Account; + + let test_provider_factory = create_test_provider_factory(); + let task = create_test_state_root_task(test_provider_factory); + + let prefetch_addr1 = B256::random(); + let prefetch_addr2 = B256::random(); + let state_addr1 = alloy_primitives::Address::random(); + let state_addr2 = alloy_primitives::Address::random(); + let state_addr3 = alloy_primitives::Address::random(); + + // Create Prefetch targets + let mut prefetch1 = MultiProofTargets::default(); + prefetch1.insert(prefetch_addr1, HashSet::default()); + + let mut prefetch2 = MultiProofTargets::default(); + prefetch2.insert(prefetch_addr2, HashSet::default()); + + // Create StateUpdates + let create_state_update = |addr: alloy_primitives::Address, balance: u64| { + let mut state = EvmState::default(); + state.insert( + addr, + Account { + info: revm_state::AccountInfo { + balance: U256::from(balance), + nonce: 1, + code_hash: Default::default(), + code: Default::default(), + }, + transaction_id: Default::default(), + storage: Default::default(), + status: revm_state::AccountStatus::Touched, + }, + ); + state + }; + + let source = StateChangeSource::Transaction(42); + + // Queue: [Prefetch1, State1, State2, State3, Prefetch2] + let tx = task.state_root_message_sender(); + tx.send(MultiProofMessage::PrefetchProofs(prefetch1.clone())).unwrap(); + tx.send(MultiProofMessage::StateUpdate( + source.into(), + create_state_update(state_addr1, 100), + )) + .unwrap(); + tx.send(MultiProofMessage::StateUpdate( + source.into(), + create_state_update(state_addr2, 200), + )) + .unwrap(); + tx.send(MultiProofMessage::StateUpdate( + source.into(), + create_state_update(state_addr3, 300), + )) + .unwrap(); + tx.send(MultiProofMessage::PrefetchProofs(prefetch2.clone())).unwrap(); + + // Simulate the state-machine loop behavior + let mut pending_msg: Option = None; + + // First iteration: recv() gets Prefetch1, drains until State1 + if let Ok(MultiProofMessage::PrefetchProofs(targets)) = task.rx.recv() { + let mut merged_targets = targets; + loop { + match task.rx.try_recv() { + Ok(MultiProofMessage::PrefetchProofs(next_targets)) => { + merged_targets.extend(next_targets); + } + Ok(other_msg) => { + pending_msg = Some(other_msg); + break; + } + Err(_) => break, + } + } + // Should have only Prefetch1 (State1 is different type) + assert_eq!(merged_targets.len(), 1); + assert!(merged_targets.contains_key(&prefetch_addr1)); + } else { + panic!("Expected PrefetchProofs"); + } + + // Pending should be State1 + assert!(matches!(pending_msg, Some(MultiProofMessage::StateUpdate(_, _)))); + + // Second iteration: process pending State1 WITH BATCHING + // This is the key test - the pending message should drain State2 and State3 + if let Some(MultiProofMessage::StateUpdate(_src, first_update)) = pending_msg.take() { + let mut merged_update = first_update; + let mut num_batched = 1; + + loop { + match task.rx.try_recv() { + Ok(MultiProofMessage::StateUpdate(_src, next_update)) => { + merged_update.extend(next_update); + num_batched += 1; + } + Ok(other_msg) => { + pending_msg = Some(other_msg); + break; + } + Err(_) => break, + } + } + + // THE KEY ASSERTION: pending State1 should have batched with State2 and State3 + assert_eq!( + num_batched, 3, + "Pending message should get full batching treatment and merge all 3 StateUpdates" + ); + assert_eq!(merged_update.len(), 3, "Should have all 3 addresses in merged update"); + assert!(merged_update.contains_key(&state_addr1)); + assert!(merged_update.contains_key(&state_addr2)); + assert!(merged_update.contains_key(&state_addr3)); + } else { + panic!("Expected pending StateUpdate"); + } + + // Pending should now be Prefetch2 + match pending_msg { + Some(MultiProofMessage::PrefetchProofs(targets)) => { + assert_eq!(targets.len(), 1); + assert!(targets.contains_key(&prefetch_addr2)); + } + _ => panic!("Prefetch2 was lost!"), + } + } + + /// Verifies that BAL messages are processed correctly and generate state updates. + #[test] + fn test_bal_message_processing() { + let test_provider_factory = create_test_provider_factory(); + let test_provider = test_provider_factory.latest().unwrap(); + let mut task = create_test_state_root_task(test_provider_factory); + + // Create a simple BAL with one account change + let account_address = Address::random(); + let account_changes = AccountChanges { + address: account_address, + balance_changes: vec![BalanceChange::new(0, U256::from(1000))], + nonce_changes: vec![], + code_changes: vec![], + storage_changes: vec![], + storage_reads: vec![], + }; + + let bal = Arc::new(vec![account_changes]); + + let mut ctx = MultiproofBatchCtx::new(Instant::now()); + let mut batch_metrics = MultiproofBatchMetrics::default(); + + let should_finish = task.process_multiproof_message( + MultiProofMessage::BlockAccessList(bal), + &mut ctx, + &mut batch_metrics, + &test_provider, + ); + + // BAL should mark updates as finished + assert!(ctx.updates_finished_time.is_some()); + + // Should have dispatched state update proofs + assert!(batch_metrics.state_update_proofs_requested > 0); + + // Should need to wait for the results of those proofs to arrive + assert!(!should_finish, "Should continue waiting for proofs"); + } } diff --git a/crates/engine/tree/src/tree/payload_processor/prewarm.rs b/crates/engine/tree/src/tree/payload_processor/prewarm.rs index 9815ea8122..75912f6f1b 100644 --- a/crates/engine/tree/src/tree/payload_processor/prewarm.rs +++ b/crates/engine/tree/src/tree/payload_processor/prewarm.rs @@ -27,10 +27,11 @@ use alloy_primitives::{keccak256, map::B256Set, B256}; use crossbeam_channel::Sender as CrossbeamSender; use metrics::{Counter, Gauge, Histogram}; use reth_evm::{execute::ExecutableTxFor, ConfigureEvm, Evm, EvmFor, SpecFor}; +use reth_execution_types::ExecutionOutcome; use reth_metrics::Metrics; use reth_primitives_traits::NodePrimitives; use reth_provider::{BlockReader, StateProviderFactory, StateReader}; -use reth_revm::{database::StateProviderDatabase, db::BundleState, state::EvmState}; +use reth_revm::{database::StateProviderDatabase, state::EvmState}; use reth_trie::MultiProofTargets; use std::{ sync::{ @@ -40,7 +41,7 @@ use std::{ }, time::Instant, }; -use tracing::{debug, debug_span, instrument, trace, warn}; +use tracing::{debug, debug_span, instrument, trace, warn, Span}; /// A wrapper for transactions that includes their index in the block. #[derive(Clone)] @@ -86,7 +87,9 @@ where /// Sender to emit evm state outcome messages, if any. to_multi_proof: Option>, /// Receiver for events produced by tx execution - actions_rx: Receiver, + actions_rx: Receiver>, + /// Parent span for tracing + parent_span: Span, } impl PrewarmCacheTask @@ -103,7 +106,7 @@ where to_multi_proof: Option>, transaction_count_hint: usize, max_concurrency: usize, - ) -> (Self, Sender) { + ) -> (Self, Sender>) { let (actions_tx, actions_rx) = channel(); trace!( @@ -122,6 +125,7 @@ where transaction_count_hint, to_multi_proof, actions_rx, + parent_span: Span::current(), }, actions_tx, ) @@ -132,21 +136,23 @@ where /// For Optimism chains, special handling is applied to the first transaction if it's a /// deposit transaction (type 0x7E/126) which sets critical metadata that affects all /// subsequent transactions in the block. - fn spawn_all(&self, pending: mpsc::Receiver, actions_tx: Sender) - where + fn spawn_all( + &self, + pending: mpsc::Receiver, + actions_tx: Sender>, + ) where Tx: ExecutableTxFor + Clone + Send + 'static, { let executor = self.executor.clone(); let ctx = self.ctx.clone(); let max_concurrency = self.max_concurrency; let transaction_count_hint = self.transaction_count_hint; - let span = tracing::Span::current(); + let span = Span::current(); self.executor.spawn_blocking(move || { let _enter = debug_span!(target: "engine::tree::payload_processor::prewarm", parent: span, "spawn_all").entered(); let (done_tx, done_rx) = mpsc::channel(); - let mut executing = 0usize; // When transaction_count_hint is 0, it means the count is unknown. In this case, spawn // max workers to handle potentially many transactions in parallel rather @@ -165,62 +171,44 @@ where handles.push(ctx.spawn_worker(i, &executor, actions_tx.clone(), done_tx.clone())); } + // Distribute transactions to workers let mut tx_index = 0usize; + while let Ok(tx) = pending.recv() { + // Stop distributing if termination was requested + if ctx.terminate_execution.load(Ordering::Relaxed) { + trace!( + target: "engine::tree::payload_processor::prewarm", + "Termination requested, stopping transaction distribution" + ); + break; + } - // Handle first transaction - special case for system transactions - if let Ok(first_tx) = pending.recv() { - // Move the transaction into the indexed wrapper to avoid an extra clone - let indexed_tx = IndexedTransaction { index: tx_index, tx: first_tx }; - // Compute metadata from the moved value - let tx_ref = indexed_tx.tx.tx(); - let is_system_tx = tx_ref.ty() > MAX_STANDARD_TX_TYPE; - let first_tx_hash = tx_ref.tx_hash(); + let indexed_tx = IndexedTransaction { index: tx_index, tx }; + let is_system_tx = indexed_tx.tx.tx().ty() > MAX_STANDARD_TX_TYPE; - // Check if this is a system transaction (type > 4) - // System transactions in the first position typically set critical metadata - // that affects all subsequent transactions (e.g., L1 block info, fees on L2s). - if is_system_tx { - // Broadcast system transaction to all workers to ensure they have the - // critical state. This is particularly important for L2s like Optimism - // where the first deposit transaction contains essential block metadata. + // System transactions (type > 4) in the first position set critical metadata + // that affects all subsequent transactions (e.g., L1 block info on L2s). + // Broadcast the first system transaction to all workers to ensure they have + // the critical state. This is particularly important for L2s like Optimism + // where the first deposit transaction (type 126) contains essential block metadata. + if tx_index == 0 && is_system_tx { for handle in &handles { - if let Err(err) = handle.send(indexed_tx.clone()) { - warn!( - target: "engine::tree::payload_processor::prewarm", - tx_hash = %first_tx_hash, - error = %err, - "Failed to send deposit transaction to worker" - ); - } + // Ignore send errors: workers listen to terminate_execution and may + // exit early when signaled. Sending to a disconnected worker is + // possible and harmless and should happen at most once due to + // the terminate_execution check above. + let _ = handle.send(indexed_tx.clone()); } } else { - // Not a deposit, send to first worker via round-robin - if let Err(err) = handles[0].send(indexed_tx) { - warn!( - target: "engine::tree::payload_processor::prewarm", - task_idx = 0, - error = %err, - "Failed to send transaction to worker" - ); - } + // Round-robin distribution for all other transactions + let worker_idx = tx_index % workers_needed; + // Ignore send errors: workers listen to terminate_execution and may + // exit early when signaled. Sending to a disconnected worker is + // possible and harmless and should happen at most once due to + // the terminate_execution check above. + let _ = handles[worker_idx].send(indexed_tx); } - executing += 1; - tx_index += 1; - } - // Process remaining transactions with round-robin distribution - while let Ok(executable) = pending.recv() { - let indexed_tx = IndexedTransaction { index: tx_index, tx: executable }; - let task_idx = executing % workers_needed; - if let Err(err) = handles[task_idx].send(indexed_tx) { - warn!( - target: "engine::tree::payload_processor::prewarm", - task_idx, - error = %err, - "Failed to send transaction to worker" - ); - } - executing += 1; tx_index += 1; } @@ -230,7 +218,7 @@ where while done_rx.recv().is_ok() {} let _ = actions_tx - .send(PrewarmTaskEvent::FinishedTxExecution { executed_transactions: executing }); + .send(PrewarmTaskEvent::FinishedTxExecution { executed_transactions: tx_index }); }); } @@ -264,38 +252,43 @@ where /// /// This method is called from `run()` only after all execution tasks are complete. #[instrument(level = "debug", target = "engine::tree::payload_processor::prewarm", skip_all)] - fn save_cache(self, state: BundleState) { + fn save_cache(self, execution_outcome: Arc>) { let start = Instant::now(); let Self { execution_cache, ctx: PrewarmContext { env, metrics, saved_cache, .. }, .. } = self; let hash = env.hash; - debug!(target: "engine::caching", parent_hash=?hash, "Updating execution cache"); - // Perform all cache operations atomically under the lock - execution_cache.update_with_guard(|cached| { - // consumes the `SavedCache` held by the prewarming task, which releases its usage guard - let (caches, cache_metrics) = saved_cache.split(); - let new_cache = SavedCache::new(hash, caches, cache_metrics); + if let Some(saved_cache) = saved_cache { + debug!(target: "engine::caching", parent_hash=?hash, "Updating execution cache"); + // Perform all cache operations atomically under the lock + execution_cache.update_with_guard(|cached| { + // consumes the `SavedCache` held by the prewarming task, which releases its usage + // guard + let (caches, cache_metrics) = saved_cache.split(); + let new_cache = SavedCache::new(hash, caches, cache_metrics); - // Insert state into cache while holding the lock - if new_cache.cache().insert_state(&state).is_err() { - // Clear the cache on error to prevent having a polluted cache - *cached = None; - debug!(target: "engine::caching", "cleared execution cache on update error"); - return; - } + // Insert state into cache while holding the lock + // Access the BundleState through the shared ExecutionOutcome + if new_cache.cache().insert_state(execution_outcome.state()).is_err() { + // Clear the cache on error to prevent having a polluted cache + *cached = None; + debug!(target: "engine::caching", "cleared execution cache on update error"); + return; + } - new_cache.update_metrics(); + new_cache.update_metrics(); - // Replace the shared cache with the new one; the previous cache (if any) is dropped. - *cached = Some(new_cache); - }); + // Replace the shared cache with the new one; the previous cache (if any) is + // dropped. + *cached = Some(new_cache); + }); - let elapsed = start.elapsed(); - debug!(target: "engine::caching", parent_hash=?hash, elapsed=?elapsed, "Updated execution cache"); + let elapsed = start.elapsed(); + debug!(target: "engine::caching", parent_hash=?hash, elapsed=?elapsed, "Updated execution cache"); - metrics.cache_saving_duration.set(elapsed.as_secs_f64()); + metrics.cache_saving_duration.set(elapsed.as_secs_f64()); + } } /// Executes the task. @@ -303,20 +296,21 @@ where /// This will execute the transactions until all transactions have been processed or the task /// was cancelled. #[instrument( + parent = &self.parent_span, level = "debug", target = "engine::tree::payload_processor::prewarm", - name = "prewarm", + name = "prewarm and caching", skip_all )] pub(super) fn run( self, pending: mpsc::Receiver + Clone + Send + 'static>, - actions_tx: Sender, + actions_tx: Sender>, ) { // spawn execution tasks. self.spawn_all(pending, actions_tx); - let mut final_block_output = None; + let mut final_execution_outcome = None; let mut finished_execution = false; while let Ok(event) = self.actions_rx.recv() { match event { @@ -329,9 +323,9 @@ where // completed executing a set of transactions self.send_multi_proof_targets(proof_targets); } - PrewarmTaskEvent::Terminate { block_output } => { + PrewarmTaskEvent::Terminate { execution_outcome } => { trace!(target: "engine::tree::payload_processor::prewarm", "Received termination signal"); - final_block_output = Some(block_output); + final_execution_outcome = Some(execution_outcome); if finished_execution { // all tasks are done, we can exit, which will save caches and exit @@ -345,7 +339,7 @@ where finished_execution = true; - if final_block_output.is_some() { + if final_execution_outcome.is_some() { // all tasks are done, we can exit, which will save caches and exit break } @@ -355,9 +349,9 @@ where debug!(target: "engine::tree::payload_processor::prewarm", "Completed prewarm execution"); - // save caches and finish - if let Some(Some(state)) = final_block_output { - self.save_cache(state); + // save caches and finish using the shared ExecutionOutcome + if let Some(Some(execution_outcome)) = final_execution_outcome { + self.save_cache(execution_outcome); } } } @@ -371,7 +365,7 @@ where { pub(super) env: ExecutionEnv, pub(super) evm_config: Evm, - pub(super) saved_cache: SavedCache, + pub(super) saved_cache: Option, /// Provider to obtain the state pub(super) provider: StateProviderBuilder, pub(super) metrics: PrewarmMetrics, @@ -399,10 +393,10 @@ where metrics, terminate_execution, precompile_cache_disabled, - mut precompile_cache_map, + precompile_cache_map, } = self; - let state_provider = match provider.build() { + let mut state_provider = match provider.build() { Ok(provider) => provider, Err(err) => { trace!( @@ -415,10 +409,15 @@ where }; // Use the caches to create a new provider with caching - let caches = saved_cache.cache().clone(); - let cache_metrics = saved_cache.metrics().clone(); - let state_provider = - CachedStateProvider::new_with_caches(state_provider, caches, cache_metrics); + if let Some(saved_cache) = saved_cache { + let caches = saved_cache.cache().clone(); + let cache_metrics = saved_cache.metrics().clone(); + state_provider = Box::new( + CachedStateProvider::new(state_provider, caches, cache_metrics) + // ensure we pre-warm the cache + .prewarm(), + ); + } let state_provider = StateProviderDatabase::new(state_provider); @@ -450,8 +449,9 @@ where /// Accepts an [`mpsc::Receiver`] of transactions and a handle to prewarm task. Executes /// transactions and streams [`PrewarmTaskEvent::Outcome`] messages for each transaction. /// - /// Returns `None` if executing the transactions failed to a non Revert error. - /// Returns the touched+modified state of the transaction. + /// This function processes transactions sequentially from the receiver and emits outcome events + /// via the provided sender. Execution errors are logged and tracked but do not stop the batch + /// processing unless the task is explicitly cancelled. /// /// Note: There are no ordering guarantees; this does not reflect the state produced by /// sequential execution. @@ -459,7 +459,7 @@ where fn transact_batch( self, txs: mpsc::Receiver>, - sender: Sender, + sender: Sender>, done_tx: Sender<()>, ) where Tx: ExecutableTxFor, @@ -471,7 +471,7 @@ where .entered(); txs.recv() } { - let _enter = + let enter = debug_span!(target: "engine::tree::payload_processor::prewarm", "prewarm tx", index, tx_hash=%tx.tx().tx_hash()) .entered(); @@ -503,7 +503,11 @@ where }; metrics.execution_duration.record(start.elapsed()); - drop(_enter); + // record some basic information about the transactions + enter.record("gas_used", res.result.gas_used()); + enter.record("is_success", res.result.is_success()); + + drop(enter); // If the task was cancelled, stop execution, send an empty result to notify the task, // and exit. @@ -536,11 +540,11 @@ where &self, idx: usize, executor: &WorkloadExecutor, - actions_tx: Sender, + actions_tx: Sender>, done_tx: Sender<()>, ) -> mpsc::Sender> where - Tx: ExecutableTxFor + Clone + Send + 'static, + Tx: ExecutableTxFor + Send + 'static, { let (tx, rx) = mpsc::channel(); let ctx = self.clone(); @@ -592,14 +596,18 @@ fn multiproof_targets_from_state(state: EvmState) -> (MultiProofTargets, usize) } /// The events the pre-warm task can handle. -pub(super) enum PrewarmTaskEvent { +/// +/// Generic over `R` (receipt type) to allow sharing `Arc>` with the main +/// execution path without cloning the expensive `BundleState`. +pub(super) enum PrewarmTaskEvent { /// Forcefully terminate all remaining transaction execution. TerminateTransactionExecution, /// Forcefully terminate the task on demand and update the shared cache with the given output /// before exiting. Terminate { - /// The final block state output. - block_output: Option, + /// The final execution outcome. Using `Arc` allows sharing with the main execution + /// path without cloning the expensive `BundleState`. + execution_outcome: Option>>, }, /// The outcome of a pre-warm task Outcome { diff --git a/crates/engine/tree/src/tree/payload_processor/sparse_trie.rs b/crates/engine/tree/src/tree/payload_processor/sparse_trie.rs index 6302abde5f..70adbb6911 100644 --- a/crates/engine/tree/src/tree/payload_processor/sparse_trie.rs +++ b/crates/engine/tree/src/tree/payload_processor/sparse_trie.rs @@ -166,8 +166,7 @@ where // Update storage slots with new values and calculate storage roots. let span = tracing::Span::current(); - let (tx, rx) = mpsc::channel(); - state + let results: Vec<_> = state .storages .into_iter() .map(|(address, storage)| (address, storage, trie.take_storage_trie(&address))) @@ -217,13 +216,7 @@ where SparseStateTrieResult::Ok((address, storage_trie)) }) - .for_each_init( - || tx.clone(), - |tx, result| { - let _ = tx.send(result); - }, - ); - drop(tx); + .collect(); // Defer leaf removals until after updates/additions, so that we don't delete an intermediate // branch node during a removal and then re-add that branch back during a later leaf addition. @@ -235,7 +228,7 @@ where let _enter = tracing::debug_span!(target: "engine::tree::payload_processor::sparse_trie", "account trie") .entered(); - for result in rx { + for result in results { let (address, storage_trie) = result?; trie.insert_storage_trie(address, storage_trie); diff --git a/crates/engine/tree/src/tree/payload_validator.rs b/crates/engine/tree/src/tree/payload_validator.rs index ecc475dd53..b89821c340 100644 --- a/crates/engine/tree/src/tree/payload_validator.rs +++ b/crates/engine/tree/src/tree/payload_validator.rs @@ -3,20 +3,20 @@ use crate::tree::{ cached_state::CachedStateProvider, error::{InsertBlockError, InsertBlockErrorKind, InsertPayloadError}, - executor::WorkloadExecutor, instrumented_state::InstrumentedStateProvider, - payload_processor::{multiproof::MultiProofConfig, PayloadProcessor}, - persistence_state::CurrentPersistenceAction, + payload_processor::{executor::WorkloadExecutor, PayloadProcessor}, precompile_cache::{CachedPrecompile, CachedPrecompileMetrics, PrecompileCacheMap}, sparse_trie::StateRootComputeOutcome, - EngineApiMetrics, EngineApiTreeState, ExecutionEnv, PayloadHandle, PersistenceState, - PersistingKind, StateProviderBuilder, StateProviderDatabase, TreeConfig, + EngineApiMetrics, EngineApiTreeState, ExecutionEnv, PayloadHandle, StateProviderBuilder, + StateProviderDatabase, TreeConfig, }; use alloy_consensus::transaction::Either; +use alloy_eip7928::BlockAccessList; use alloy_eips::{eip1898::BlockWithParent, NumHash}; use alloy_evm::Evm; -use alloy_primitives::{BlockNumber, B256}; -use reth_chain_state::{CanonicalInMemoryState, ExecutedBlock}; +use alloy_primitives::B256; +use rayon::prelude::*; +use reth_chain_state::{CanonicalInMemoryState, DeferredTrieData, ExecutedBlock}; use reth_consensus::{ConsensusError, FullConsensus}; use reth_engine_primitives::{ ConfigureEngineEvm, ExecutableTxIterator, ExecutionPayload, InvalidBlockHook, PayloadValidator, @@ -30,18 +30,26 @@ use reth_payload_primitives::{ BuiltPayload, InvalidPayloadAttributesError, NewPayloadError, PayloadTypes, }; use reth_primitives_traits::{ - AlloyBlockHeader, BlockTy, GotExpected, NodePrimitives, RecoveredBlock, SealedHeader, + AlloyBlockHeader, BlockBody, BlockTy, GotExpected, NodePrimitives, RecoveredBlock, SealedBlock, + SealedHeader, SignerRecoverable, }; use reth_provider::{ - providers::OverlayStateProviderFactory, BlockExecutionOutput, BlockNumReader, BlockReader, - DBProvider, DatabaseProviderFactory, ExecutionOutcome, HashedPostStateProvider, ProviderError, - PruneCheckpointReader, StageCheckpointReader, StateProvider, StateProviderFactory, StateReader, - StateRootProvider, TrieReader, + providers::OverlayStateProviderFactory, BlockExecutionOutput, BlockReader, + DatabaseProviderFactory, DatabaseProviderROFactory, ExecutionOutcome, HashedPostStateProvider, + ProviderError, PruneCheckpointReader, StageCheckpointReader, StateProvider, + StateProviderFactory, StateReader, TrieReader, }; use reth_revm::db::State; -use reth_trie::{updates::TrieUpdates, HashedPostState, TrieInput}; +use reth_storage_errors::db::DatabaseError; +use reth_trie::{updates::TrieUpdates, HashedPostState, StateRoot, TrieInputSorted}; use reth_trie_parallel::root::{ParallelStateRoot, ParallelStateRootError}; -use std::{collections::HashMap, sync::Arc, time::Instant}; +use revm_primitives::Address; +use std::{ + collections::HashMap, + panic::{self, AssertUnwindSafe}, + sync::Arc, + time::Instant, +}; use tracing::{debug, debug_span, error, info, instrument, trace, warn}; /// Context providing access to tree state during validation. @@ -51,8 +59,6 @@ use tracing::{debug, debug_span, error, info, instrument, trace, warn}; pub struct TreeCtx<'a, N: NodePrimitives> { /// The engine API tree state state: &'a mut EngineApiTreeState, - /// Information about the current persistence state - persistence: &'a PersistenceState, /// Reference to the canonical in-memory state canonical_in_memory_state: &'a CanonicalInMemoryState, } @@ -61,7 +67,6 @@ impl<'a, N: NodePrimitives> std::fmt::Debug for TreeCtx<'a, N> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("TreeCtx") .field("state", &"EngineApiTreeState") - .field("persistence_info", &self.persistence) .field("canonical_in_memory_state", &self.canonical_in_memory_state) .finish() } @@ -71,10 +76,9 @@ impl<'a, N: NodePrimitives> TreeCtx<'a, N> { /// Creates a new tree context pub const fn new( state: &'a mut EngineApiTreeState, - persistence: &'a PersistenceState, canonical_in_memory_state: &'a CanonicalInMemoryState, ) -> Self { - Self { state, persistence, canonical_in_memory_state } + Self { state, canonical_in_memory_state } } /// Returns a reference to the engine tree state @@ -87,43 +91,10 @@ impl<'a, N: NodePrimitives> TreeCtx<'a, N> { self.state } - /// Returns a reference to the persistence info - pub const fn persistence(&self) -> &PersistenceState { - self.persistence - } - /// Returns a reference to the canonical in-memory state pub const fn canonical_in_memory_state(&self) -> &'a CanonicalInMemoryState { self.canonical_in_memory_state } - - /// Determines the persisting kind for the given block based on persistence info. - /// - /// Based on the given header it returns whether any conflicting persistence operation is - /// currently in progress. - /// - /// This is adapted from the `persisting_kind_for` method in `EngineApiTreeHandler`. - pub fn persisting_kind_for(&self, block: BlockWithParent) -> PersistingKind { - // Check that we're currently persisting. - let Some(action) = self.persistence().current_action() else { - return PersistingKind::NotPersisting - }; - // Check that the persistince action is saving blocks, not removing them. - let CurrentPersistenceAction::SavingBlocks { highest } = action else { - return PersistingKind::PersistingNotDescendant - }; - - // The block being validated can only be a descendant if its number is higher than - // the highest block persisting. Otherwise, it's likely a fork of a lower block. - if block.block.number > highest.number && - self.state().tree_state.is_descendant(*highest, block) - { - return PersistingKind::PersistingDescendant - } - - // In all other cases, the block is not a descendant. - PersistingKind::PersistingNotDescendant - } } /// A helper type that provides reusable payload validation logic for network-specific validators. @@ -159,8 +130,6 @@ where metrics: EngineApiMetrics, /// Validator for the payload. validator: V, - /// A cleared trie input, kept around to be reused so allocations can be minimized. - trie_input: Option, } impl BasicEngineValidator @@ -204,20 +173,20 @@ where invalid_block_hook, metrics: EngineApiMetrics::default(), validator, - trie_input: Default::default(), } } /// Converts a [`BlockOrPayload`] to a recovered block. + #[instrument(level = "debug", target = "engine::tree::payload_validator", skip_all)] pub fn convert_to_block>>( &self, input: BlockOrPayload, - ) -> Result, NewPayloadError> + ) -> Result, NewPayloadError> where V: PayloadValidator, { match input { - BlockOrPayload::Payload(payload) => self.validator.ensure_well_formed_payload(payload), + BlockOrPayload::Payload(payload) => self.validator.convert_payload_to_block(payload), BlockOrPayload::Block(block) => Ok(block), } } @@ -241,21 +210,38 @@ where pub fn tx_iterator_for<'a, T: PayloadTypes>>( &'a self, input: &'a BlockOrPayload, - ) -> Result + 'a, NewPayloadError> + ) -> Result, NewPayloadError> where V: PayloadValidator, Evm: ConfigureEngineEvm, { match input { - BlockOrPayload::Payload(payload) => Ok(Either::Left( - self.evm_config + BlockOrPayload::Payload(payload) => { + let (iter, convert) = self + .evm_config .tx_iterator_for_payload(payload) .map_err(NewPayloadError::other)? - .map(|res| res.map(Either::Left)), - )), + .into(); + + let iter = Either::Left(iter.into_par_iter().map(Either::Left)); + let convert = move |tx| { + let Either::Left(tx) = tx else { unreachable!() }; + convert(tx).map(Either::Left).map_err(Either::Left) + }; + + // Box the closure to satisfy the `Fn` bound both here and in the branch below + Ok((iter, Box::new(convert) as Box _ + Send + Sync + 'static>)) + } BlockOrPayload::Block(block) => { - let transactions = block.clone_transactions_recovered().collect::>(); - Ok(Either::Right(transactions.into_iter().map(|tx| Ok(Either::Right(tx))))) + let iter = Either::Right( + block.body().clone_transactions().into_par_iter().map(Either::Right), + ); + let convert = move |tx: Either<_, N::SignedTx>| { + let Either::Right(tx) = tx else { unreachable!() }; + tx.try_into_recovered().map(Either::Right).map_err(Either::Right) + }; + + Ok((iter, Box::new(convert))) } } } @@ -302,7 +288,7 @@ where // Validate block consensus rules which includes header validation if let Err(consensus_err) = self.validate_block_inner(&block) { // Header validation error takes precedence over execution error - return Err(InsertBlockError::new(block.into_sealed_block(), consensus_err.into()).into()) + return Err(InsertBlockError::new(block, consensus_err.into()).into()) } // Also validate against the parent @@ -310,11 +296,11 @@ where self.consensus.validate_header_against_parent(block.sealed_header(), parent_block) { // Parent validation error takes precedence over execution error - return Err(InsertBlockError::new(block.into_sealed_block(), consensus_err.into()).into()) + return Err(InsertBlockError::new(block, consensus_err.into()).into()) } // No header validation errors, return the original execution error - Err(InsertBlockError::new(block.into_sealed_block(), execution_err).into()) + Err(InsertBlockError::new(block, execution_err).into()) } /// Validates a block that has already been converted from a payload. @@ -350,9 +336,7 @@ where Ok(val) => val, Err(e) => { let block = self.convert_to_block(input)?; - return Err( - InsertBlockError::new(block.into_sealed_block(), e.into()).into() - ) + return Err(InsertBlockError::new(block, e.into()).into()) } } }; @@ -383,19 +367,20 @@ where else { // this is pre-validated in the tree return Err(InsertBlockError::new( - self.convert_to_block(input)?.into_sealed_block(), + self.convert_to_block(input)?, ProviderError::HeaderNotFound(parent_hash.into()).into(), ) .into()) }; - let state_provider = ensure_ok!(provider_builder.build()); + let mut state_provider = ensure_ok!(provider_builder.build()); drop(_enter); - // fetch parent block + // Fetch parent block. This goes to memory most of the time unless the parent block is + // beyond the in-memory buffer. let Some(parent_block) = ensure_ok!(self.sealed_header_by_hash(parent_hash, ctx.state())) else { return Err(InsertBlockError::new( - self.convert_to_block(input)?.into_sealed_block(), + self.convert_to_block(input)?, ProviderError::HeaderNotFound(parent_hash.into()).into(), ) .into()) @@ -408,63 +393,64 @@ where let env = ExecutionEnv { evm_env, hash: input.hash(), parent_hash: input.parent_hash() }; // Plan the strategy used for state root computation. - let state_root_plan = self.plan_state_root_computation(); - let strategy = state_root_plan.strategy; + let strategy = self.plan_state_root_computation(); debug!( target: "engine::tree::payload_validator", ?strategy, - "Deciding which state root algorithm to run" + "Decided which state root algorithm to run" ); - // use prewarming background task + // Get an iterator over the transactions in the payload let txs = self.tx_iterator_for(&input)?; + // Extract the BAL, if valid and available + let block_access_list = ensure_ok!(input + .block_access_list() + .transpose() + // Eventually gets converted to a `InsertBlockErrorKind::Other` + .map_err(Box::::from)) + .map(Arc::new); + // Spawn the appropriate processor based on strategy - let (mut handle, strategy) = ensure_ok!(self.spawn_payload_processor( + let mut handle = ensure_ok!(self.spawn_payload_processor( env.clone(), txs, provider_builder, parent_hash, ctx.state(), strategy, + block_access_list, )); // Use cached state provider before executing, used in execution after prewarming threads // complete - let state_provider = CachedStateProvider::new_with_caches( - state_provider, - handle.caches(), - handle.cache_metrics(), - ); + if let Some((caches, cache_metrics)) = handle.caches().zip(handle.cache_metrics()) { + state_provider = + Box::new(CachedStateProvider::new(state_provider, caches, cache_metrics)); + }; + + if self.config.state_provider_metrics() { + state_provider = Box::new(InstrumentedStateProvider::new(state_provider, "engine")); + } // Execute the block and handle any execution errors - let output = match if self.config.state_provider_metrics() { - let state_provider = InstrumentedStateProvider::from_state_provider(&state_provider); - let result = self.execute_block(&state_provider, env, &input, &mut handle); - state_provider.record_total_latency(); - result - } else { - self.execute_block(&state_provider, env, &input, &mut handle) - } { + let (output, senders) = match self.execute_block(state_provider, env, &input, &mut handle) { Ok(output) => output, Err(err) => return self.handle_execution_error(input, err, &parent_block), }; - // after executing the block we can stop executing transactions + // After executing the block we can stop prewarming transactions handle.stop_prewarming_execution(); - let block = self.convert_to_block(input)?; + let block = self.convert_to_block(input)?.with_senders(senders); let hashed_state = ensure_ok_post_block!( self.validate_post_execution(&block, &parent_block, &output, &mut ctx), block ); - debug!(target: "engine::tree::payload_validator", "Calculating block state root"); - let root_time = Instant::now(); - let mut maybe_state_root = None; match strategy { @@ -533,7 +519,7 @@ where } let (root, updates) = ensure_ok_post_block!( - state_provider.state_root_with_updates(hashed_state.clone()), + self.compute_state_root_serial(block.parent_hash(), &hashed_state, ctx.state()), block ); (root, updates, root_time.elapsed()) @@ -563,15 +549,14 @@ where .into()) } - // terminate prewarming task with good state output - handle.terminate_caching(Some(&output.state)); + // Create ExecutionOutcome and wrap in Arc for sharing with both the caching task + // and the deferred trie task. This avoids cloning the expensive BundleState. + let execution_outcome = Arc::new(ExecutionOutcome::from((output, block_num_hash.number))); - Ok(ExecutedBlock { - recovered_block: Arc::new(block), - execution_output: Arc::new(ExecutionOutcome::from((output, block_num_hash.number))), - hashed_state: Arc::new(hashed_state), - trie_updates: Arc::new(trie_output), - }) + // Terminate prewarming task with the shared execution outcome + handle.terminate_caching(Some(Arc::clone(&execution_outcome))); + + Ok(self.spawn_deferred_trie_task(block, execution_outcome, &ctx, hashed_state, trie_output)) } /// Return sealed block header from database or in-memory state by hash. @@ -592,13 +577,14 @@ where /// Validate if block is correct and satisfies all the consensus rules that concern the header /// and block body itself. - fn validate_block_inner(&self, block: &RecoveredBlock) -> Result<(), ConsensusError> { + #[instrument(level = "debug", target = "engine::tree::payload_validator", skip_all)] + fn validate_block_inner(&self, block: &SealedBlock) -> Result<(), ConsensusError> { if let Err(e) = self.consensus.validate_header(block.sealed_header()) { error!(target: "engine::tree::payload_validator", ?block, "Failed to validate header {}: {e}", block.hash()); return Err(e) } - if let Err(e) = self.consensus.validate_block_pre_execution(block.sealed_block()) { + if let Err(e) = self.consensus.validate_block_pre_execution(block) { error!(target: "engine::tree::payload_validator", ?block, "Failed to validate block {}: {e}", block.hash()); return Err(e) } @@ -613,10 +599,10 @@ where state_provider: S, env: ExecutionEnv, input: &BlockOrPayload, - handle: &mut PayloadHandle, Err>, - ) -> Result, InsertBlockErrorKind> + handle: &mut PayloadHandle, Err, N::Receipt>, + ) -> Result<(BlockExecutionOutput, Vec

), InsertBlockErrorKind> where - S: StateProvider, + S: StateProvider + Send, Err: core::error::Error + Send + Sync + 'static, V: PayloadValidator, T: PayloadTypes>, @@ -625,7 +611,7 @@ where debug!(target: "engine::tree::payload_validator", "Executing block"); let mut db = State::builder() - .with_database(StateProviderDatabase::new(&state_provider)) + .with_database(StateProviderDatabase::new(state_provider)) .with_bundle_update() .without_state_clear() .build(); @@ -654,7 +640,7 @@ where let execution_start = Instant::now(); let state_hook = Box::new(handle.state_hook()); - let output = self.metrics.execute_metered( + let (output, senders) = self.metrics.execute_metered( executor, handle.iter_transactions().map(|res| res.map_err(BlockExecutionError::other)), state_hook, @@ -662,7 +648,7 @@ where let execution_finish = Instant::now(); let execution_time = execution_finish.duration_since(execution_start); debug!(target: "engine::tree::payload_validator", elapsed = ?execution_time, "Executed block"); - Ok(output) + Ok((output, senders)) } /// Compute state root for the given hashed post state in parallel. @@ -671,8 +657,6 @@ where /// /// Returns `Ok(_)` if computed successfully. /// Returns `Err(_)` if error was encountered during computation. - /// `Err(ProviderError::ConsistentView(_))` can be safely ignored and fallback computation - /// should be used instead. #[instrument(level = "debug", target = "engine::tree::payload_validator", skip_all)] fn compute_state_root_parallel( &self, @@ -680,39 +664,65 @@ where hashed_state: &HashedPostState, state: &EngineApiTreeState, ) -> Result<(B256, TrieUpdates), ParallelStateRootError> { - let provider = self.provider.database_provider_ro()?; + let (mut input, block_hash) = self.compute_trie_input(parent_hash, state)?; - let (mut input, block_number) = - self.compute_trie_input(provider, parent_hash, state, None)?; + // Extend state overlay with current block's sorted state. + input.prefix_sets.extend(hashed_state.construct_prefix_sets()); + let sorted_hashed_state = hashed_state.clone_into_sorted(); + Arc::make_mut(&mut input.state).extend_ref(&sorted_hashed_state); - // Extend with block we are validating root for. - input.append_ref(hashed_state); - - // Convert the TrieInput into a MultProofConfig, since everything uses the sorted - // forms of the state/trie fields. - let (_, multiproof_config) = MultiProofConfig::from_input(input); + let TrieInputSorted { nodes, state, prefix_sets: prefix_sets_mut } = input; let factory = OverlayStateProviderFactory::new(self.provider.clone()) - .with_block_number(Some(block_number)) - .with_trie_overlay(Some(multiproof_config.nodes_sorted)) - .with_hashed_state_overlay(Some(multiproof_config.state_sorted)); + .with_block_hash(Some(block_hash)) + .with_trie_overlay(Some(nodes)) + .with_hashed_state_overlay(Some(state)); // The `hashed_state` argument is already taken into account as part of the overlay, but we // need to use the prefix sets which were generated from it to indicate to the // ParallelStateRoot which parts of the trie need to be recomputed. - let prefix_sets = Arc::into_inner(multiproof_config.prefix_sets) - .expect("MultiProofConfig was never cloned") - .freeze(); + let prefix_sets = prefix_sets_mut.freeze(); ParallelStateRoot::new(factory, prefix_sets).incremental_root_with_updates() } + /// Compute state root for the given hashed post state in serial. + fn compute_state_root_serial( + &self, + parent_hash: B256, + hashed_state: &HashedPostState, + state: &EngineApiTreeState, + ) -> ProviderResult<(B256, TrieUpdates)> { + let (mut input, block_hash) = self.compute_trie_input(parent_hash, state)?; + + // Extend state overlay with current block's sorted state. + input.prefix_sets.extend(hashed_state.construct_prefix_sets()); + let sorted_hashed_state = hashed_state.clone_into_sorted(); + Arc::make_mut(&mut input.state).extend_ref(&sorted_hashed_state); + + let TrieInputSorted { nodes, state, .. } = input; + let prefix_sets = hashed_state.construct_prefix_sets(); + + let factory = OverlayStateProviderFactory::new(self.provider.clone()) + .with_block_hash(Some(block_hash)) + .with_trie_overlay(Some(nodes)) + .with_hashed_state_overlay(Some(state)); + + let provider = factory.database_provider_ro()?; + + Ok(StateRoot::new(&provider, &provider) + .with_prefix_sets(prefix_sets.freeze()) + .root_with_updates() + .map_err(Into::::into)?) + } + /// Validates the block after execution. /// /// This performs: /// - parent header validation /// - post-execution consensus validation /// - state-root based post-execution validation + #[instrument(level = "debug", target = "engine::tree::payload_validator", skip_all)] fn validate_post_execution>>( &self, block: &RecoveredBlock, @@ -732,21 +742,32 @@ where } // now validate against the parent + let _enter = debug_span!(target: "engine::tree::payload_validator", "validate_header_against_parent").entered(); if let Err(e) = self.consensus.validate_header_against_parent(block.sealed_header(), parent_block) { warn!(target: "engine::tree::payload_validator", ?block, "Failed to validate header {} against parent: {e}", block.hash()); return Err(e.into()) } + drop(_enter); + // Validate block post-execution rules + let _enter = + debug_span!(target: "engine::tree::payload_validator", "validate_block_post_execution") + .entered(); if let Err(err) = self.consensus.validate_block_post_execution(block, output) { // call post-block hook self.on_invalid_block(parent_block, block, output, None, ctx.state_mut()); return Err(err.into()) } + drop(_enter); + let _enter = + debug_span!(target: "engine::tree::payload_validator", "hashed_post_state").entered(); let hashed_state = self.provider.hashed_post_state(&output.state); + drop(_enter); + let _enter = debug_span!(target: "engine::tree::payload_validator", "validate_block_post_execution_with_hashed_state").entered(); if let Err(err) = self.validator.validate_block_post_execution_with_hashed_state(&hashed_state, block) { @@ -789,79 +810,46 @@ where parent_hash: B256, state: &EngineApiTreeState, strategy: StateRootStrategy, + block_access_list: Option>, ) -> Result< - ( - PayloadHandle< - impl ExecutableTxFor + use, - impl core::error::Error + Send + Sync + 'static + use, - >, - StateRootStrategy, - ), + PayloadHandle< + impl ExecutableTxFor + use, + impl core::error::Error + Send + Sync + 'static + use, + N::Receipt, + >, InsertBlockErrorKind, > { match strategy { StateRootStrategy::StateRootTask => { - // get allocated trie input if it exists - let allocated_trie_input = self.trie_input.take(); - // Compute trie input let trie_input_start = Instant::now(); - let (trie_input, block_number) = self.compute_trie_input( - self.provider.database_provider_ro()?, - parent_hash, - state, - allocated_trie_input, - )?; + let (trie_input, block_hash) = self.compute_trie_input(parent_hash, state)?; + // Create OverlayStateProviderFactory with sorted trie data for multiproofs + let TrieInputSorted { nodes, state, .. } = trie_input; + + let multiproof_provider_factory = + OverlayStateProviderFactory::new(self.provider.clone()) + .with_block_hash(Some(block_hash)) + .with_trie_overlay(Some(nodes)) + .with_hashed_state_overlay(Some(state)); + + // Record trie input duration including OverlayStateProviderFactory setup self.metrics .block_validation .trie_input_duration .record(trie_input_start.elapsed().as_secs_f64()); - // Convert the TrieInput into a MultProofConfig, since everything uses the sorted - // forms of the state/trie fields. - let (trie_input, multiproof_config) = MultiProofConfig::from_input(trie_input); - self.trie_input.replace(trie_input); - - // Create OverlayStateProviderFactory with the multiproof config, for use with - // multiproofs. - let multiproof_provider_factory = - OverlayStateProviderFactory::new(self.provider.clone()) - .with_block_number(Some(block_number)) - .with_trie_overlay(Some(multiproof_config.nodes_sorted)) - .with_hashed_state_overlay(Some(multiproof_config.state_sorted)); - - // Use state root task only if prefix sets are empty, otherwise proof generation is - // too expensive because it requires walking all paths in every proof. let spawn_start = Instant::now(); - let (handle, strategy) = match self.payload_processor.spawn( + + let handle = self.payload_processor.spawn( env, txs, provider_builder, multiproof_provider_factory, &self.config, - ) { - Ok(handle) => { - // Successfully spawned with state root task support - (handle, StateRootStrategy::StateRootTask) - } - Err((error, txs, env, provider_builder)) => { - // Failed to spawn proof workers, fallback to parallel state root - error!( - target: "engine::tree::payload_validator", - ?error, - "Failed to spawn proof workers, falling back to parallel state root" - ); - ( - self.payload_processor.spawn_cache_exclusive( - env, - txs, - provider_builder, - ), - StateRootStrategy::Parallel, - ) - } - }; + block_access_list, + ); // record prewarming initialization duration self.metrics @@ -869,9 +857,9 @@ where .spawn_payload_processor .record(spawn_start.elapsed().as_secs_f64()); - Ok((handle, strategy)) + Ok(handle) } - strategy @ (StateRootStrategy::Parallel | StateRootStrategy::Synchronous) => { + StateRootStrategy::Parallel | StateRootStrategy::Synchronous => { let start = Instant::now(); let handle = self.payload_processor.spawn_cache_exclusive(env, txs, provider_builder); @@ -882,7 +870,7 @@ where .spawn_payload_processor .record(start.elapsed().as_secs_f64()); - Ok((handle, strategy)) + Ok(handle) } } } @@ -919,23 +907,17 @@ where } /// Determines the state root computation strategy based on configuration. - #[instrument(level = "debug", target = "engine::tree::payload_validator", skip_all)] - fn plan_state_root_computation(&self) -> StateRootPlan { - let strategy = if self.config.state_root_fallback() { + /// + /// Note: Use state root task only if prefix sets are empty, otherwise proof generation is + /// too expensive because it requires walking all paths in every proof. + const fn plan_state_root_computation(&self) -> StateRootStrategy { + if self.config.state_root_fallback() { StateRootStrategy::Synchronous } else if self.config.use_state_root_task() { StateRootStrategy::StateRootTask } else { StateRootStrategy::Parallel - }; - - debug!( - target: "engine::tree::payload_validator", - ?strategy, - "Planned state root computation strategy" - ); - - StateRootPlan { strategy } + } } /// Called when an invalid block is encountered during validation. @@ -954,60 +936,173 @@ where self.invalid_block_hook.on_invalid_block(parent_header, block, output, trie_updates); } - /// Computes the trie input at the provided parent hash, as well as the block number of the - /// highest persisted ancestor. + /// Computes [`TrieInputSorted`] for the provided parent hash by combining database state + /// with in-memory overlays. /// - /// The goal of this function is to take in-memory blocks and generate a [`TrieInput`] that - /// serves as an overlay to the database blocks. + /// The goal of this function is to take in-memory blocks and generate a [`TrieInputSorted`] + /// that extends from the highest persisted ancestor up through the parent. This enables state + /// root computation and proof generation without requiring all blocks to be persisted + /// first. /// /// It works as follows: - /// 1. Collect in-memory blocks that are descendants of the provided parent hash using - /// [`crate::tree::TreeState::blocks_by_hash`]. - /// 2. If the persistence is in progress, and the block that we're computing the trie input for - /// is a descendant of the currently persisting blocks, we need to be sure that in-memory - /// blocks are not overlapping with the database blocks that may have been already persisted. - /// To do that, we're filtering out in-memory blocks that are lower than the highest database - /// block. - /// 3. Once in-memory blocks are collected and optionally filtered, we compute the - /// [`HashedPostState`] from them. + /// 1. Collect in-memory overlay blocks using [`crate::tree::TreeState::blocks_by_hash`]. This + /// returns the highest persisted ancestor hash (`block_hash`) and the list of in-memory + /// blocks building on top of it. + /// 2. Fast path: If the tip in-memory block's trie input is already anchored to `block_hash` + /// (its `anchor_hash` matches `block_hash`), reuse it directly. + /// 3. Slow path: Build a new [`TrieInputSorted`] by aggregating the overlay blocks (from oldest + /// to newest) on top of the database state at `block_hash`. #[instrument( level = "debug", target = "engine::tree::payload_validator", skip_all, fields(parent_hash) )] - fn compute_trie_input( + fn compute_trie_input( &self, - provider: TP, parent_hash: B256, state: &EngineApiTreeState, - allocated_trie_input: Option, - ) -> ProviderResult<(TrieInput, BlockNumber)> { - // get allocated trie input or use a default trie input - let mut input = allocated_trie_input.unwrap_or_default(); + ) -> ProviderResult<(TrieInputSorted, B256)> { + let wait_start = Instant::now(); + let (block_hash, blocks) = + state.tree_state.blocks_by_hash(parent_hash).unwrap_or_else(|| (parent_hash, vec![])); - let (historical, blocks) = state - .tree_state - .blocks_by_hash(parent_hash) - .map_or_else(|| (parent_hash.into(), vec![]), |(hash, blocks)| (hash.into(), blocks)); + // Fast path: if the tip block's anchor matches the persisted ancestor hash, reuse its + // TrieInput. This means the TrieInputSorted already aggregates all in-memory overlays + // from that ancestor, so we can avoid re-aggregation. + if let Some(tip_block) = blocks.first() { + let data = tip_block.trie_data(); + if let (Some(anchor_hash), Some(trie_input)) = + (data.anchor_hash(), data.trie_input().cloned()) && + anchor_hash == block_hash + { + trace!(target: "engine::tree::payload_validator", %block_hash,"Reusing trie input with matching anchor hash"); + self.metrics + .block_validation + .deferred_trie_wait_duration + .record(wait_start.elapsed().as_secs_f64()); + return Ok(((*trie_input).clone(), block_hash)); + } + } if blocks.is_empty() { debug!(target: "engine::tree::payload_validator", "Parent found on disk"); } else { - debug!(target: "engine::tree::payload_validator", %historical, blocks = blocks.len(), "Parent found in memory"); + debug!(target: "engine::tree::payload_validator", historical = ?block_hash, blocks = blocks.len(), "Parent found in memory"); } - // Convert the historical block to the block number - let block_number = provider - .convert_hash_or_number(historical)? - .ok_or_else(|| ProviderError::BlockHashNotFound(historical.as_hash().unwrap()))?; + // Extend with contents of parent in-memory blocks directly in sorted form. + let input = Self::merge_overlay_trie_input(&blocks); - // Extend with contents of parent in-memory blocks. - input.extend_with_blocks( - blocks.iter().rev().map(|block| (block.hashed_state(), block.trie_updates())), + self.metrics + .block_validation + .deferred_trie_wait_duration + .record(wait_start.elapsed().as_secs_f64()); + Ok((input, block_hash)) + } + + /// Aggregates multiple in-memory blocks into a single [`TrieInputSorted`] by combining their + /// state changes. + /// + /// The input `blocks` vector is ordered newest -> oldest (see `TreeState::blocks_by_hash`). + /// We iterate it in reverse so we start with the oldest block's trie data and extend forward + /// toward the newest, ensuring newer state takes precedence. + fn merge_overlay_trie_input(blocks: &[ExecutedBlock]) -> TrieInputSorted { + let mut input = TrieInputSorted::default(); + let mut blocks_iter = blocks.iter().rev().peekable(); + + if let Some(first) = blocks_iter.next() { + let data = first.trie_data(); + input.state = data.hashed_state; + input.nodes = data.trie_updates; + + // Only clone and mutate if there are more in-memory blocks. + if blocks_iter.peek().is_some() { + let state_mut = Arc::make_mut(&mut input.state); + let nodes_mut = Arc::make_mut(&mut input.nodes); + for block in blocks_iter { + let data = block.trie_data(); + state_mut.extend_ref(data.hashed_state.as_ref()); + nodes_mut.extend_ref(data.trie_updates.as_ref()); + } + } + } + + input + } + + /// Spawns a background task to compute and sort trie data for the executed block. + /// + /// This function creates a [`DeferredTrieData`] handle with fallback inputs and spawns a + /// blocking task that calls `wait_cloned()` to: + /// 1. Sort the block's hashed state and trie updates + /// 2. Merge ancestor overlays and extend with the sorted data + /// 3. Create an [`AnchoredTrieInput`](reth_chain_state::AnchoredTrieInput) for efficient future + /// trie computations + /// 4. Cache the result so subsequent calls return immediately + /// + /// If the background task hasn't completed when `trie_data()` is called, `wait_cloned()` + /// computes from the stored inputs, eliminating deadlock risk and duplicate computation. + /// + /// The validation hot path can return immediately after state root verification, + /// while consumers (DB writes, overlay providers, proofs) get trie data either + /// from the completed task or via fallback computation. + fn spawn_deferred_trie_task( + &self, + block: RecoveredBlock, + execution_outcome: Arc>, + ctx: &TreeCtx<'_, N>, + hashed_state: HashedPostState, + trie_output: TrieUpdates, + ) -> ExecutedBlock { + // Capture parent hash and ancestor overlays for deferred trie input construction. + let (anchor_hash, overlay_blocks) = ctx + .state() + .tree_state + .blocks_by_hash(block.parent_hash()) + .unwrap_or_else(|| (block.parent_hash(), Vec::new())); + + // Collect lightweight ancestor trie data handles. We don't call trie_data() here; + // the merge and any fallback sorting happens in the compute_trie_input_task. + let ancestors: Vec = + overlay_blocks.iter().rev().map(|b| b.trie_data_handle()).collect(); + + // Create deferred handle with fallback inputs in case the background task hasn't completed. + let deferred_trie_data = DeferredTrieData::pending( + Arc::new(hashed_state), + Arc::new(trie_output), + anchor_hash, + ancestors, ); + let deferred_handle_task = deferred_trie_data.clone(); + let deferred_compute_duration = + self.metrics.block_validation.deferred_trie_compute_duration.clone(); - Ok((input, block_number)) + // Spawn background task to compute trie data. Calling `wait_cloned` will compute from + // the stored inputs and cache the result, so subsequent calls return immediately. + let compute_trie_input_task = move || { + let result = panic::catch_unwind(AssertUnwindSafe(|| { + let compute_start = Instant::now(); + let _ = deferred_handle_task.wait_cloned(); + deferred_compute_duration.record(compute_start.elapsed().as_secs_f64()); + })); + + if result.is_err() { + error!( + target: "engine::tree::payload_validator", + "Deferred trie task panicked; fallback computation will be used when trie data is accessed" + ); + } + }; + + // Spawn task that computes trie data asynchronously. + self.payload_processor.executor().spawn_blocking(compute_trie_input_task); + + ExecutedBlock::with_deferred_trie_data( + Arc::new(block), + execution_outcome, + deferred_trie_data, + ) } } @@ -1025,12 +1120,6 @@ enum StateRootStrategy { Synchronous, } -/// State root computation plan that captures strategy and required data. -struct StateRootPlan { - /// Strategy that should be attempted for computing the state root. - strategy: StateRootStrategy, -} - /// Type that validates the payloads processed by the engine. /// /// This provides the necessary functions for validating/executing payloads/blocks. @@ -1062,10 +1151,10 @@ pub trait EngineValidator< /// /// Implementers should ensure that the checks are done in the order that conforms with the /// engine-API specification. - fn ensure_well_formed_payload( + fn convert_payload_to_block( &self, payload: Types::ExecutionData, - ) -> Result, NewPayloadError>; + ) -> Result, NewPayloadError>; /// Validates a payload received from engine API. fn validate_payload( @@ -1077,9 +1166,15 @@ pub trait EngineValidator< /// Validates a block downloaded from the network. fn validate_block( &mut self, - block: RecoveredBlock, + block: SealedBlock, ctx: TreeCtx<'_, N>, ) -> ValidationOutcome; + + /// Hook called after an executed block is inserted directly into the tree. + /// + /// This is invoked when blocks are inserted via `InsertExecutedBlock` (e.g., locally built + /// blocks by sequencers) to allow implementations to update internal state such as caches. + fn on_inserted_executed_block(&self, block: ExecutedBlock); } impl EngineValidator for BasicEngineValidator @@ -1105,11 +1200,11 @@ where self.validator.validate_payload_attributes_against_header(attr, header) } - fn ensure_well_formed_payload( + fn convert_payload_to_block( &self, payload: Types::ExecutionData, - ) -> Result, NewPayloadError> { - let block = self.validator.ensure_well_formed_payload(payload)?; + ) -> Result, NewPayloadError> { + let block = self.validator.convert_payload_to_block(payload)?; Ok(block) } @@ -1123,11 +1218,18 @@ where fn validate_block( &mut self, - block: RecoveredBlock, + block: SealedBlock, ctx: TreeCtx<'_, N>, ) -> ValidationOutcome { self.validate_block_with_state(BlockOrPayload::Block(block), ctx) } + + fn on_inserted_executed_block(&self, block: ExecutedBlock) { + self.payload_processor.on_inserted_executed_block( + block.recovered_block.block_with_parent(), + block.execution_output.state(), + ); + } } /// Enum representing either block or payload being validated. @@ -1136,7 +1238,7 @@ pub enum BlockOrPayload { /// Payload. Payload(T::ExecutionData), /// Block. - Block(RecoveredBlock::Primitives>>), + Block(SealedBlock::Primitives>>), } impl BlockOrPayload { @@ -1179,4 +1281,10 @@ impl BlockOrPayload { Self::Block(_) => "block", } } + + /// Returns the block access list if available. + pub const fn block_access_list(&self) -> Option> { + // TODO decode and return `BlockAccessList` + None + } } diff --git a/crates/engine/tree/src/tree/persistence_state.rs b/crates/engine/tree/src/tree/persistence_state.rs index bbb981a531..82a8078447 100644 --- a/crates/engine/tree/src/tree/persistence_state.rs +++ b/crates/engine/tree/src/tree/persistence_state.rs @@ -67,6 +67,7 @@ impl PersistenceState { /// Returns the current persistence action. If there is no persistence task in progress, then /// this returns `None`. + #[cfg(test)] pub(crate) fn current_action(&self) -> Option<&CurrentPersistenceAction> { self.rx.as_ref().map(|rx| &rx.2) } diff --git a/crates/engine/tree/src/tree/precompile_cache.rs b/crates/engine/tree/src/tree/precompile_cache.rs index 753922f66b..3754560f7d 100644 --- a/crates/engine/tree/src/tree/precompile_cache.rs +++ b/crates/engine/tree/src/tree/precompile_cache.rs @@ -1,50 +1,58 @@ //! Contains a precompile cache backed by `schnellru::LruMap` (LRU by length). use alloy_primitives::Bytes; -use parking_lot::Mutex; +use dashmap::DashMap; +use moka::policy::EvictionPolicy; use reth_evm::precompiles::{DynPrecompile, Precompile, PrecompileInput}; use revm::precompile::{PrecompileId, PrecompileOutput, PrecompileResult}; use revm_primitives::Address; -use schnellru::LruMap; -use std::{ - collections::HashMap, - hash::{Hash, Hasher}, - sync::Arc, -}; +use std::{hash::Hash, sync::Arc}; /// Default max cache size for [`PrecompileCache`] const MAX_CACHE_SIZE: u32 = 10_000; /// Stores caches for each precompile. #[derive(Debug, Clone, Default)] -pub struct PrecompileCacheMap(HashMap>) +pub struct PrecompileCacheMap(Arc>>) where - S: Eq + Hash + std::fmt::Debug + Send + Sync + Clone; + S: Eq + Hash + std::fmt::Debug + Send + Sync + Clone + 'static; impl PrecompileCacheMap where S: Eq + Hash + std::fmt::Debug + Send + Sync + Clone + 'static, { - pub(crate) fn cache_for_address(&mut self, address: Address) -> PrecompileCache { + pub(crate) fn cache_for_address(&self, address: Address) -> PrecompileCache { + // Try just using `.get` first to avoid acquiring a write lock. + if let Some(cache) = self.0.get(&address) { + return cache.clone(); + } + // Otherwise, fallback to `.entry` and initialize the cache. + // + // This should be very rare as caches for all precompiles will be initialized as soon as + // first EVM is created. self.0.entry(address).or_default().clone() } } /// Cache for precompiles, for each input stores the result. -/// -/// [`LruMap`] requires a mutable reference on `get` since it updates the LRU order, -/// so we use a [`Mutex`] instead of an `RwLock`. #[derive(Debug, Clone)] -pub struct PrecompileCache(Arc, CacheEntry>>>) +pub struct PrecompileCache( + moka::sync::Cache, alloy_primitives::map::DefaultHashBuilder>, +) where - S: Eq + Hash + std::fmt::Debug + Send + Sync + Clone; + S: Eq + Hash + std::fmt::Debug + Send + Sync + Clone + 'static; impl Default for PrecompileCache where S: Eq + Hash + std::fmt::Debug + Send + Sync + Clone + 'static, { fn default() -> Self { - Self(Arc::new(Mutex::new(LruMap::new(schnellru::ByLength::new(MAX_CACHE_SIZE))))) + Self( + moka::sync::CacheBuilder::new(MAX_CACHE_SIZE as u64) + .initial_capacity(MAX_CACHE_SIZE as usize) + .eviction_policy(EvictionPolicy::lru()) + .build_with_hasher(Default::default()), + ) } } @@ -52,63 +60,31 @@ impl PrecompileCache where S: Eq + Hash + std::fmt::Debug + Send + Sync + Clone + 'static, { - fn get(&self, key: &CacheKeyRef<'_, S>) -> Option { - self.0.lock().get(key).cloned() + fn get(&self, input: &[u8], spec: S) -> Option> { + self.0.get(input).filter(|e| e.spec == spec) } /// Inserts the given key and value into the cache, returning the new cache size. - fn insert(&self, key: CacheKey, value: CacheEntry) -> usize { - let mut cache = self.0.lock(); - cache.insert(key, value); - cache.len() - } -} - -/// Cache key, spec id and precompile call input. spec id is included in the key to account for -/// precompile repricing across fork activations. -#[derive(Debug, Clone, PartialEq, Eq, Hash)] -pub struct CacheKey((S, Bytes)); - -impl CacheKey { - const fn new(spec_id: S, input: Bytes) -> Self { - Self((spec_id, input)) - } -} - -/// Cache key reference, used to avoid cloning the input bytes when looking up using a [`CacheKey`]. -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct CacheKeyRef<'a, S>((S, &'a [u8])); - -impl<'a, S> CacheKeyRef<'a, S> { - const fn new(spec_id: S, input: &'a [u8]) -> Self { - Self((spec_id, input)) - } -} - -impl PartialEq> for CacheKeyRef<'_, S> { - fn eq(&self, other: &CacheKey) -> bool { - self.0 .0 == other.0 .0 && self.0 .1 == other.0 .1.as_ref() - } -} - -impl<'a, S: Hash> Hash for CacheKeyRef<'a, S> { - fn hash(&self, state: &mut H) { - self.0 .0.hash(state); - self.0 .1.hash(state); + fn insert(&self, input: Bytes, value: CacheEntry) -> usize { + self.0.insert(input, value); + self.0.entry_count() as usize } } /// Cache entry, precompile successful output. #[derive(Debug, Clone, PartialEq, Eq)] -pub struct CacheEntry(PrecompileOutput); +pub struct CacheEntry { + output: PrecompileOutput, + spec: S, +} -impl CacheEntry { +impl CacheEntry { const fn gas_used(&self) -> u64 { - self.0.gas_used + self.output.gas_used } fn to_precompile_result(&self) -> PrecompileResult { - Ok(self.0.clone()) + Ok(self.output.clone()) } } @@ -190,9 +166,7 @@ where } fn call(&self, input: PrecompileInput<'_>) -> PrecompileResult { - let key = CacheKeyRef::new(self.spec_id.clone(), input.data); - - if let Some(entry) = &self.cache.get(&key) { + if let Some(entry) = &self.cache.get(input.data, self.spec_id.clone()) { self.increment_by_one_precompile_cache_hits(); if input.gas >= entry.gas_used() { return entry.to_precompile_result() @@ -204,8 +178,10 @@ where match &result { Ok(output) => { - let key = CacheKey::new(self.spec_id.clone(), Bytes::copy_from_slice(calldata)); - let size = self.cache.insert(key, CacheEntry(output.clone())); + let size = self.cache.insert( + Bytes::copy_from_slice(calldata), + CacheEntry { output: output.clone(), spec: self.spec_id.clone() }, + ); self.set_precompile_cache_size_metric(size as f64); self.increment_by_one_precompile_cache_misses(); } @@ -246,36 +222,22 @@ impl CachedPrecompileMetrics { #[cfg(test)] mod tests { - use std::hash::DefaultHasher; - use super::*; use reth_evm::{EthEvmFactory, Evm, EvmEnv, EvmFactory}; use reth_revm::db::EmptyDB; use revm::{context::TxEnv, precompile::PrecompileOutput}; use revm_primitives::hardfork::SpecId; - #[test] - fn test_cache_key_ref_hash() { - let key1 = CacheKey::new(SpecId::PRAGUE, b"test_input".into()); - let key2 = CacheKeyRef::new(SpecId::PRAGUE, b"test_input"); - assert!(PartialEq::eq(&key2, &key1)); - - let mut hasher = DefaultHasher::new(); - key1.hash(&mut hasher); - let hash1 = hasher.finish(); - - let mut hasher = DefaultHasher::new(); - key2.hash(&mut hasher); - let hash2 = hasher.finish(); - - assert_eq!(hash1, hash2); - } - #[test] fn test_precompile_cache_basic() { - let dyn_precompile: DynPrecompile = |_input: PrecompileInput<'_>| -> PrecompileResult { - Ok(PrecompileOutput { gas_used: 0, bytes: Bytes::default(), reverted: false }) - } + let dyn_precompile: DynPrecompile = (|_input: PrecompileInput<'_>| -> PrecompileResult { + Ok(PrecompileOutput { + gas_used: 0, + gas_refunded: 0, + bytes: Bytes::default(), + reverted: false, + }) + }) .into(); let cache = @@ -283,16 +245,16 @@ mod tests { let output = PrecompileOutput { gas_used: 50, + gas_refunded: 0, bytes: alloy_primitives::Bytes::copy_from_slice(b"cached_result"), reverted: false, }; - let key = CacheKey::new(SpecId::PRAGUE, b"test_input".into()); - let expected = CacheEntry(output); - cache.cache.insert(key, expected.clone()); + let input = b"test_input"; + let expected = CacheEntry { output, spec: SpecId::PRAGUE }; + cache.cache.insert(input.into(), expected.clone()); - let key = CacheKeyRef::new(SpecId::PRAGUE, b"test_input"); - let actual = cache.cache.get(&key).unwrap(); + let actual = cache.cache.get(input, SpecId::PRAGUE).unwrap(); assert_eq!(actual, expected); } @@ -306,7 +268,7 @@ mod tests { let address1 = Address::repeat_byte(1); let address2 = Address::repeat_byte(2); - let mut cache_map = PrecompileCacheMap::default(); + let cache_map = PrecompileCacheMap::default(); // create the first precompile with a specific output let precompile1: DynPrecompile = (PrecompileId::custom("custom"), { @@ -315,6 +277,7 @@ mod tests { Ok(PrecompileOutput { gas_used: 5000, + gas_refunded: 0, bytes: alloy_primitives::Bytes::copy_from_slice(b"output_from_precompile_1"), reverted: false, }) @@ -329,6 +292,7 @@ mod tests { Ok(PrecompileOutput { gas_used: 7000, + gas_refunded: 0, bytes: alloy_primitives::Bytes::copy_from_slice(b"output_from_precompile_2"), reverted: false, }) diff --git a/crates/engine/tree/src/tree/state.rs b/crates/engine/tree/src/tree/state.rs index a10d26e3f2..0a13207e66 100644 --- a/crates/engine/tree/src/tree/state.rs +++ b/crates/engine/tree/src/tree/state.rs @@ -1,7 +1,7 @@ //! Functionality related to tree state. use crate::engine::EngineApiKind; -use alloy_eips::{eip1898::BlockWithParent, BlockNumHash}; +use alloy_eips::BlockNumHash; use alloy_primitives::{ map::{HashMap, HashSet}, BlockNumber, B256, @@ -107,10 +107,6 @@ impl TreeState { self.blocks_by_number.entry(block_number).or_default().push(executed); self.parent_to_child.entry(parent_hash).or_default().insert(hash); - - for children in self.parent_to_child.values_mut() { - children.retain(|child| self.blocks_by_hash.contains_key(child)); - } } /// Remove single executed block by its hash. @@ -294,10 +290,37 @@ impl TreeState { } } + /// Updates the canonical head to the given block. + pub(crate) const fn set_canonical_head(&mut self, new_head: BlockNumHash) { + self.current_canonical_head = new_head; + } + + /// Returns the tracked canonical head. + pub(crate) const fn canonical_head(&self) -> &BlockNumHash { + &self.current_canonical_head + } + + /// Returns the block hash of the canonical head. + pub(crate) const fn canonical_block_hash(&self) -> B256 { + self.canonical_head().hash + } + + /// Returns the block number of the canonical head. + pub(crate) const fn canonical_block_number(&self) -> BlockNumber { + self.canonical_head().number + } +} + +#[cfg(test)] +impl TreeState { /// Determines if the second block is a descendant of the first block. /// /// If the two blocks are the same, this returns `false`. - pub(crate) fn is_descendant(&self, first: BlockNumHash, second: BlockWithParent) -> bool { + pub(crate) fn is_descendant( + &self, + first: BlockNumHash, + second: alloy_eips::eip1898::BlockWithParent, + ) -> bool { // If the second block's parent is the first block's hash, then it is a direct child // and we can return early. if second.parent == first.hash { @@ -330,26 +353,6 @@ impl TreeState { // Now the block numbers should be equal, so we compare hashes. current_block.recovered_block().parent_hash() == first.hash } - - /// Updates the canonical head to the given block. - pub(crate) const fn set_canonical_head(&mut self, new_head: BlockNumHash) { - self.current_canonical_head = new_head; - } - - /// Returns the tracked canonical head. - pub(crate) const fn canonical_head(&self) -> &BlockNumHash { - &self.current_canonical_head - } - - /// Returns the block hash of the canonical head. - pub(crate) const fn canonical_block_hash(&self) -> B256 { - self.canonical_head().hash - } - - /// Returns the block number of the canonical head. - pub(crate) const fn canonical_block_number(&self) -> BlockNumber { - self.canonical_head().number - } } #[cfg(test)] diff --git a/crates/engine/tree/src/tree/tests.rs b/crates/engine/tree/src/tree/tests.rs index 7c40680c80..b1725bc36c 100644 --- a/crates/engine/tree/src/tree/tests.rs +++ b/crates/engine/tree/src/tree/tests.rs @@ -4,10 +4,10 @@ use crate::{ tree::{ payload_validator::{BasicEngineValidator, TreeCtx, ValidationOutcome}, persistence_state::CurrentPersistenceAction, - TreeConfig, + PersistTarget, TreeConfig, }, }; -use alloy_consensus::Header; + use alloy_eips::eip1898::BlockWithParent; use alloy_primitives::{ map::{HashMap, HashSet}, @@ -18,7 +18,7 @@ use alloy_rpc_types_engine::{ ExecutionData, ExecutionPayloadSidecar, ExecutionPayloadV1, ForkchoiceState, }; use assert_matches::assert_matches; -use reth_chain_state::{test_utils::TestBlockBuilder, BlockState}; +use reth_chain_state::{test_utils::TestBlockBuilder, BlockState, ComputedTrieData}; use reth_chainspec::{ChainSpec, HOLESKY, MAINNET}; use reth_engine_primitives::{EngineApiValidator, ForkchoiceStatus, NoopInvalidBlockHook}; use reth_ethereum_consensus::EthBeaconConsensus; @@ -27,7 +27,6 @@ use reth_ethereum_primitives::{Block, EthPrimitives}; use reth_evm_ethereum::MockEvmConfig; use reth_primitives_traits::Block as _; use reth_provider::{test_utils::MockEthProvider, ExecutionOutcome}; -use reth_trie::{updates::TrieUpdates, HashedPostState}; use std::{ collections::BTreeMap, str::FromStr, @@ -45,20 +44,17 @@ struct MockEngineValidator; impl reth_engine_primitives::PayloadValidator for MockEngineValidator { type Block = Block; - fn ensure_well_formed_payload( + fn convert_payload_to_block( &self, payload: ExecutionData, ) -> Result< - reth_primitives_traits::RecoveredBlock, + reth_primitives_traits::SealedBlock, reth_payload_primitives::NewPayloadError, > { - // For tests, convert the execution payload to a block let block = reth_ethereum_primitives::Block::try_from(payload.payload).map_err(|e| { reth_payload_primitives::NewPayloadError::Other(format!("{e:?}").into()) })?; - let sealed = block.seal_slow(); - - sealed.try_recover().map_err(|e| reth_payload_primitives::NewPayloadError::Other(e.into())) + Ok(block.seal_slow()) } } @@ -289,7 +285,8 @@ impl TestHarness { let fcu_state = self.fcu_state(block_hash); let (tx, rx) = oneshot::channel(); - self.tree + let _ = self + .tree .on_engine_message(FromEngine::Request( BeaconEngineMessage::ForkchoiceUpdated { state: fcu_state, @@ -336,15 +333,12 @@ impl TestHarness { fn persist_blocks(&self, blocks: Vec>) { let mut block_data: Vec<(B256, Block)> = Vec::with_capacity(blocks.len()); - let mut headers_data: Vec<(B256, Header)> = Vec::with_capacity(blocks.len()); for block in &blocks { block_data.push((block.hash(), block.clone_block())); - headers_data.push((block.hash(), block.header().clone())); } self.provider.extend_blocks(block_data); - self.provider.extend_headers(headers_data); } } @@ -403,7 +397,7 @@ impl ValidatorTestHarness { Self { harness, validator, metrics: TestMetrics::default() } } - /// Configure `PersistenceState` for specific `PersistingKind` scenarios + /// Configure `PersistenceState` for specific persistence scenarios fn start_persistence_operation(&mut self, action: CurrentPersistenceAction) { use tokio::sync::oneshot; @@ -428,11 +422,10 @@ impl ValidatorTestHarness { /// Call `validate_block_with_state` directly with block fn validate_block_direct( &mut self, - block: RecoveredBlock, + block: SealedBlock, ) -> ValidationOutcome { let ctx = TreeCtx::new( &mut self.harness.tree.state, - &self.harness.tree.persistence_state, &self.harness.tree.canonical_in_memory_state, ); let result = self.validator.validate_block(block, ctx); @@ -457,30 +450,30 @@ impl TestBlockFactory { } /// Create block that triggers consensus violation by corrupting state root - fn create_invalid_consensus_block(&mut self, parent_hash: B256) -> RecoveredBlock { + fn create_invalid_consensus_block(&mut self, parent_hash: B256) -> SealedBlock { let mut block = self.builder.generate_random_block(1, parent_hash).into_block(); // Corrupt state root to trigger consensus violation block.header.state_root = B256::random(); - block.seal_slow().try_recover().unwrap() + block.seal_slow() } /// Create block that triggers execution failure - fn create_invalid_execution_block(&mut self, parent_hash: B256) -> RecoveredBlock { + fn create_invalid_execution_block(&mut self, parent_hash: B256) -> SealedBlock { let mut block = self.builder.generate_random_block(1, parent_hash).into_block(); // Create transaction that will fail execution // This is simplified - in practice we'd create a transaction with insufficient gas, etc. block.header.gas_used = block.header.gas_limit + 1; // Gas used exceeds limit - block.seal_slow().try_recover().unwrap() + block.seal_slow() } /// Create valid block - fn create_valid_block(&mut self, parent_hash: B256) -> RecoveredBlock { + fn create_valid_block(&mut self, parent_hash: B256) -> SealedBlock { let block = self.builder.generate_random_block(1, parent_hash).into_block(); - block.seal_slow().try_recover().unwrap() + block.seal_slow() } } @@ -506,7 +499,7 @@ fn test_tree_persist_block_batch() { // process the message let msg = test_harness.tree.try_recv_engine_message().unwrap().unwrap(); - test_harness.tree.on_engine_message(msg).unwrap(); + let _ = test_harness.tree.on_engine_message(msg).unwrap(); // we now should receive the other batch let msg = test_harness.tree.try_recv_engine_message().unwrap().unwrap(); @@ -585,7 +578,7 @@ async fn test_engine_request_during_backfill() { .with_backfill_state(BackfillSyncState::Active); let (tx, rx) = oneshot::channel(); - test_harness + let _ = test_harness .tree .on_engine_message(FromEngine::Request( BeaconEngineMessage::ForkchoiceUpdated { @@ -630,7 +623,7 @@ fn test_disconnected_payload() { // ensure block is buffered let buffered = test_harness.tree.state.buffer.block(&hash).unwrap(); - assert_eq!(buffered.clone_sealed_block(), sealed_clone); + assert_eq!(*buffered, sealed_clone); } #[test] @@ -638,7 +631,7 @@ fn test_disconnected_block() { let s = include_str!("../../test-data/holesky/2.rlp"); let data = Bytes::from_str(s).unwrap(); let block = Block::decode(&mut data.as_ref()).unwrap(); - let sealed = block.seal_slow().try_recover().unwrap(); + let sealed = block.seal_slow(); let mut test_harness = TestHarness::new(HOLESKY.clone()); @@ -666,7 +659,7 @@ async fn test_holesky_payload() { TestHarness::new(HOLESKY.clone()).with_backfill_state(BackfillSyncState::Active); let (tx, rx) = oneshot::channel(); - test_harness + let _ = test_harness .tree .on_engine_message(FromEngine::Request( BeaconEngineMessage::NewPayload { @@ -826,23 +819,23 @@ fn test_tree_state_on_new_head_deep_fork() { let chain_a = test_block_builder.create_fork(&last_block, 10); let chain_b = test_block_builder.create_fork(&last_block, 10); + let empty_trie_data = ComputedTrieData::default; + for block in &chain_a { - test_harness.tree.state.tree_state.insert_executed(ExecutedBlock { - recovered_block: Arc::new(block.clone()), - execution_output: Arc::new(ExecutionOutcome::default()), - hashed_state: Arc::new(HashedPostState::default()), - trie_updates: Arc::new(TrieUpdates::default()), - }); + test_harness.tree.state.tree_state.insert_executed(ExecutedBlock::new( + Arc::new(block.clone()), + Arc::new(ExecutionOutcome::default()), + empty_trie_data(), + )); } test_harness.tree.state.tree_state.set_canonical_head(chain_a.last().unwrap().num_hash()); for block in &chain_b { - test_harness.tree.state.tree_state.insert_executed(ExecutedBlock { - recovered_block: Arc::new(block.clone()), - execution_output: Arc::new(ExecutionOutcome::default()), - hashed_state: Arc::new(HashedPostState::default()), - trie_updates: Arc::new(TrieUpdates::default()), - }); + test_harness.tree.state.tree_state.insert_executed(ExecutedBlock::new( + Arc::new(block.clone()), + Arc::new(ExecutionOutcome::default()), + empty_trie_data(), + )); } // for each block in chain_b, reorg to it and then back to canonical @@ -891,7 +884,8 @@ async fn test_get_canonical_blocks_to_persist() { .with_persistence_threshold(persistence_threshold) .with_memory_block_buffer_target(memory_block_buffer_target); - let blocks_to_persist = test_harness.tree.get_canonical_blocks_to_persist().unwrap(); + let blocks_to_persist = + test_harness.tree.get_canonical_blocks_to_persist(PersistTarget::Threshold).unwrap(); let expected_blocks_to_persist_length: usize = (canonical_head_number - memory_block_buffer_target - last_persisted_block_number) @@ -910,7 +904,8 @@ async fn test_get_canonical_blocks_to_persist() { assert!(test_harness.tree.state.tree_state.sealed_header_by_hash(&fork_block_hash).is_some()); - let blocks_to_persist = test_harness.tree.get_canonical_blocks_to_persist().unwrap(); + let blocks_to_persist = + test_harness.tree.get_canonical_blocks_to_persist(PersistTarget::Threshold).unwrap(); assert_eq!(blocks_to_persist.len(), expected_blocks_to_persist_length); // check that the fork block is not included in the blocks to persist @@ -989,7 +984,7 @@ async fn test_engine_tree_live_sync_transition_required_blocks_requested() { let backfill_tip_block = main_chain[(backfill_finished_block_number - 1) as usize].clone(); // add block to mock provider to enable persistence clean up. test_harness.provider.add_block(backfill_tip_block.hash(), backfill_tip_block.into_block()); - test_harness.tree.on_engine_message(FromEngine::Event(backfill_finished)).unwrap(); + let _ = test_harness.tree.on_engine_message(FromEngine::Event(backfill_finished)).unwrap(); let event = test_harness.from_tree_rx.recv().await.unwrap(); match event { @@ -999,9 +994,12 @@ async fn test_engine_tree_live_sync_transition_required_blocks_requested() { _ => panic!("Unexpected event: {event:#?}"), } - test_harness + let _ = test_harness .tree - .on_engine_message(FromEngine::DownloadedBlocks(vec![main_chain.last().unwrap().clone()])) + .on_engine_message(FromEngine::DownloadedBlocks(vec![main_chain + .last() + .unwrap() + .clone_sealed_block()])) .unwrap(); let event = test_harness.from_tree_rx.recv().await.unwrap(); @@ -1052,7 +1050,7 @@ async fn test_fcu_with_canonical_ancestor_updates_latest_block() { // Send FCU to the canonical ancestor let (tx, rx) = oneshot::channel(); - test_harness + let _ = test_harness .tree .on_engine_message(FromEngine::Request( BeaconEngineMessage::ForkchoiceUpdated { @@ -1133,7 +1131,7 @@ fn test_on_new_payload_canonical_insertion() { // Ensure block is buffered (like test_disconnected_payload) let buffered = test_harness.tree.state.buffer.block(&hash1).unwrap(); - assert_eq!(buffered.clone_sealed_block(), sealed1_clone, "Block should be buffered"); + assert_eq!(buffered.clone(), sealed1_clone, "Block should be buffered"); } /// Test that ensures payloads are rejected when linking to a known-invalid ancestor @@ -1240,11 +1238,7 @@ fn test_on_new_payload_backfill_buffering() { .expect("Block should be buffered during backfill sync"); // Verify the buffered block matches what we submitted - assert_eq!( - buffered_block.clone_sealed_block(), - sealed, - "Buffered block should match submitted payload" - ); + assert_eq!(*buffered_block, sealed, "Buffered block should match submitted payload"); } /// Test that captures the Engine-API rule where malformed payloads report latestValidHash = None @@ -1517,11 +1511,10 @@ mod check_invalid_ancestors_tests { // Create a genesis-like payload with parent_hash = B256::ZERO let mut test_block_builder = TestBlockBuilder::eth(); let genesis_block = test_block_builder.generate_random_block(0, B256::ZERO); - let (sealed_genesis, _) = genesis_block.split_sealed(); let genesis_payload = ExecutionData { payload: ExecutionPayloadV1::from_block_unchecked( - sealed_genesis.hash(), - &sealed_genesis.into_block(), + genesis_block.hash(), + &genesis_block.into_block(), ) .into(), sidecar: ExecutionPayloadSidecar::none(), @@ -1571,8 +1564,7 @@ mod check_invalid_ancestors_tests { // Intentionally corrupt the block to make it malformed // Modify the block after creation to make validation fail - let (sealed_block, _senders) = block.split_sealed(); - let unsealed_block = sealed_block.unseal(); + let unsealed_block = block.unseal(); // Create payload with wrong hash (this makes it malformed) let wrong_hash = B256::from([0xff; 32]); @@ -1600,13 +1592,9 @@ mod payload_execution_tests { // Create a valid payload let mut test_block_builder = TestBlockBuilder::eth(); let block = test_block_builder.generate_random_block(1, B256::ZERO); - let (sealed_block, _) = block.split_sealed(); let payload = ExecutionData { - payload: ExecutionPayloadV1::from_block_unchecked( - sealed_block.hash(), - &sealed_block.into_block(), - ) - .into(), + payload: ExecutionPayloadV1::from_block_unchecked(block.hash(), &block.into_block()) + .into(), sidecar: ExecutionPayloadSidecar::none(), }; @@ -1646,13 +1634,9 @@ mod payload_execution_tests { // Create a valid payload let mut test_block_builder = TestBlockBuilder::eth(); let block = test_block_builder.generate_random_block(1, B256::ZERO); - let (sealed_block, _) = block.split_sealed(); let payload = ExecutionData { - payload: ExecutionPayloadV1::from_block_unchecked( - sealed_block.hash(), - &sealed_block.into_block(), - ) - .into(), + payload: ExecutionPayloadV1::from_block_unchecked(block.hash(), &block.into_block()) + .into(), sidecar: ExecutionPayloadSidecar::none(), }; @@ -1674,8 +1658,7 @@ mod payload_execution_tests { let block = test_block_builder.generate_random_block(1, B256::ZERO); // Modify the block to make it malformed - let (sealed_block, _senders) = block.split_sealed(); - let mut unsealed_block = sealed_block.unseal(); + let mut unsealed_block = block.unseal(); // Corrupt the block by setting an invalid gas limit unsealed_block.header.gas_limit = 0; @@ -1695,7 +1678,6 @@ mod payload_execution_tests { #[cfg(test)] mod forkchoice_updated_tests { use super::*; - use alloy_primitives::Address; /// Test that validates the forkchoice state pre-validation logic #[tokio::test] @@ -1915,33 +1897,6 @@ mod forkchoice_updated_tests { assert!(fcu_result.payload_status.is_syncing(), "Should return syncing during backfill"); } - /// Test metrics recording in forkchoice updated - #[tokio::test] - async fn test_record_forkchoice_metrics() { - let chain_spec = MAINNET.clone(); - let test_harness = TestHarness::new(chain_spec); - - // Get initial metrics state by checking if metrics are recorded - // We can't directly get counter values, but we can verify the methods are called - - // Test without attributes - let attrs_none = None; - test_harness.tree.record_forkchoice_metrics(&attrs_none); - - // Test with attributes - let attrs_some = Some(alloy_rpc_types_engine::PayloadAttributes { - timestamp: 1000, - prev_randao: B256::random(), - suggested_fee_recipient: Address::random(), - withdrawals: None, - parent_beacon_block_root: None, - }); - test_harness.tree.record_forkchoice_metrics(&attrs_some); - - // We can't directly verify counter values since they're private metrics - // But we can verify the methods don't panic and execute successfully - } - /// Test edge case: FCU with invalid ancestor #[tokio::test] async fn test_fcu_with_invalid_ancestor() { @@ -1991,4 +1946,53 @@ mod forkchoice_updated_tests { .unwrap(); assert!(result.is_some(), "OpStack should handle canonical head"); } + + /// Test that engine termination persists all blocks and signals completion. + #[test] + fn test_engine_termination_with_everything_persisted() { + let chain_spec = MAINNET.clone(); + let mut test_block_builder = TestBlockBuilder::eth().with_chain_spec((*chain_spec).clone()); + + // Create 10 blocks to persist + let blocks: Vec<_> = test_block_builder.get_executed_blocks(1..11).collect(); + let canonical_tip = blocks.last().unwrap().recovered_block().number; + let test_harness = TestHarness::new(chain_spec).with_blocks(blocks); + + // Create termination channel + let (terminate_tx, mut terminate_rx) = oneshot::channel(); + + let to_tree_tx = test_harness.to_tree_tx.clone(); + let action_rx = test_harness.action_rx; + + // Spawn tree in background thread + std::thread::Builder::new() + .name("Engine Task".to_string()) + .spawn(|| test_harness.tree.run()) + .unwrap(); + + // Send terminate request + to_tree_tx + .send(FromEngine::Event(FromOrchestrator::Terminate { tx: terminate_tx })) + .unwrap(); + + // Handle persistence actions until termination completes + let mut last_persisted_number = 0; + loop { + if terminate_rx.try_recv().is_ok() { + break; + } + + if let Ok(PersistenceAction::SaveBlocks(saved_blocks, sender)) = + action_rx.recv_timeout(std::time::Duration::from_millis(100)) + { + if let Some(last) = saved_blocks.last() { + last_persisted_number = last.recovered_block().number; + } + sender.send(saved_blocks.last().map(|b| b.recovered_block().num_hash())).unwrap(); + } + } + + // Ensure we persisted right to the tip + assert_eq!(last_persisted_number, canonical_tip); + } } diff --git a/crates/engine/util/Cargo.toml b/crates/engine/util/Cargo.toml index 58ee6ac255..ca7382c192 100644 --- a/crates/engine/util/Cargo.toml +++ b/crates/engine/util/Cargo.toml @@ -16,7 +16,7 @@ reth-primitives-traits.workspace = true reth-errors.workspace = true reth-chainspec.workspace = true reth-fs-util.workspace = true -reth-engine-primitives.workspace = true +reth-engine-primitives = { workspace = true, features = ["std"] } reth-engine-tree.workspace = true reth-evm.workspace = true reth-revm.workspace = true diff --git a/crates/engine/util/src/reorg.rs b/crates/engine/util/src/reorg.rs index 7d84afc6d5..d247b2364f 100644 --- a/crates/engine/util/src/reorg.rs +++ b/crates/engine/util/src/reorg.rs @@ -253,7 +253,7 @@ where { // Ensure next payload is valid. let next_block = - payload_validator.ensure_well_formed_payload(next_payload).map_err(RethError::msg)?; + payload_validator.convert_payload_to_block(next_payload).map_err(RethError::msg)?; // Fetch reorg target block depending on its depth and its parent. let mut previous_hash = next_block.parent_hash(); diff --git a/crates/era-downloader/Cargo.toml b/crates/era-downloader/Cargo.toml index 54ae581813..190e533356 100644 --- a/crates/era-downloader/Cargo.toml +++ b/crates/era-downloader/Cargo.toml @@ -15,6 +15,7 @@ alloy-primitives.workspace = true # reth reth-fs-util.workspace = true +reth-era.workspace = true # http bytes.workspace = true diff --git a/crates/era-downloader/src/client.rs b/crates/era-downloader/src/client.rs index 36ed93e1e2..f8ffd55025 100644 --- a/crates/era-downloader/src/client.rs +++ b/crates/era-downloader/src/client.rs @@ -3,14 +3,18 @@ use bytes::Bytes; use eyre::{eyre, OptionExt}; use futures_util::{stream::StreamExt, Stream, TryStreamExt}; use reqwest::{Client, IntoUrl, Url}; +use reth_era::common::file_ops::EraFileType; use sha2::{Digest, Sha256}; use std::{future::Future, path::Path, str::FromStr}; use tokio::{ fs::{self, File}, io::{self, AsyncBufReadExt, AsyncRead, AsyncReadExt, AsyncWriteExt}, - join, try_join, + try_join, }; +/// Downloaded index page filename +const INDEX_HTML_FILE: &str = "index.html"; + /// Accesses the network over HTTP. pub trait HttpClient { /// Makes an HTTP GET request to `url`. Returns a stream of response body bytes. @@ -41,6 +45,7 @@ pub struct EraClient { client: Http, url: Url, folder: Box, + era_type: EraFileType, } impl EraClient { @@ -48,7 +53,8 @@ impl EraClient { /// Constructs [`EraClient`] using `client` to download from `url` into `folder`. pub fn new(client: Http, url: Url, folder: impl Into>) -> Self { - Self { client, url, folder: folder.into() } + let era_type = EraFileType::from_url(url.as_str()); + Self { client, url, folder: folder.into(), era_type } } /// Performs a GET request on `url` and stores the response body into a file located within @@ -92,9 +98,11 @@ impl EraClient { } } - self.assert_checksum(number, actual_checksum?) - .await - .map_err(|e| eyre!("{e} for {file_name} at {}", path.display()))?; + if self.era_type == EraFileType::Era1 { + self.assert_checksum(number, actual_checksum?) + .await + .map_err(|e| eyre!("{e} for {file_name} at {}", path.display()))?; + } } Ok(path.into_boxed_path()) @@ -145,9 +153,11 @@ impl EraClient { pub async fn files_count(&self) -> usize { let mut count = 0usize; + let file_extension = self.era_type.extension().trim_start_matches('.'); + if let Ok(mut dir) = fs::read_dir(&self.folder).await { while let Ok(Some(entry)) = dir.next_entry().await { - if entry.path().extension() == Some("era1".as_ref()) { + if entry.path().extension() == Some(file_extension.as_ref()) { count += 1; } } @@ -156,46 +166,35 @@ impl EraClient { count } - /// Fetches the list of ERA1 files from `url` and stores it in a file located within `folder`. + /// Fetches the list of ERA1/ERA files from `url` and stores it in a file located within + /// `folder`. + /// For era files, checksum.txt file does not exist, so the checksum verification is + /// skipped. pub async fn fetch_file_list(&self) -> eyre::Result<()> { - let (mut index, mut checksums) = try_join!( - self.client.get(self.url.clone()), - self.client.get(self.url.clone().join(Self::CHECKSUMS)?), - )?; - - let index_path = self.folder.to_path_buf().join("index.html"); + let index_path = self.folder.to_path_buf().join(INDEX_HTML_FILE); let checksums_path = self.folder.to_path_buf().join(Self::CHECKSUMS); - let (mut index_file, mut checksums_file) = - try_join!(File::create(&index_path), File::create(&checksums_path))?; - - loop { - let (index, checksums) = join!(index.next(), checksums.next()); - let (index, checksums) = (index.transpose()?, checksums.transpose()?); - - if index.is_none() && checksums.is_none() { - break; - } - let index_file = &mut index_file; - let checksums_file = &mut checksums_file; - + // Only for era1, we download also checksums file + if self.era_type == EraFileType::Era1 { + let checksums_url = self.url.join(Self::CHECKSUMS)?; try_join!( - async move { - if let Some(index) = index { - io::copy(&mut index.as_ref(), index_file).await?; - } - Ok::<(), eyre::Error>(()) - }, - async move { - if let Some(checksums) = checksums { - io::copy(&mut checksums.as_ref(), checksums_file).await?; - } - Ok::<(), eyre::Error>(()) - }, + self.download_file_to_path(self.url.clone(), &index_path), + self.download_file_to_path(checksums_url, &checksums_path) )?; + } else { + // Download only index file + self.download_file_to_path(self.url.clone(), &index_path).await?; } - let file = File::open(&index_path).await?; + // Parse and extract era filenames from index.html + self.extract_era_filenames(&index_path).await?; + + Ok(()) + } + + /// Extracts ERA filenames from `index.html` and writes them to the index file + async fn extract_era_filenames(&self, index_path: &Path) -> eyre::Result<()> { + let file = File::open(index_path).await?; let reader = io::BufReader::new(file); let mut lines = reader.lines(); @@ -203,21 +202,36 @@ impl EraClient { let file = File::create(&path).await?; let mut writer = io::BufWriter::new(file); + let ext = self.era_type.extension(); + let ext_len = ext.len(); + while let Some(line) = lines.next_line().await? { - if let Some(j) = line.find(".era1") && + if let Some(j) = line.find(ext) && let Some(i) = line[..j].rfind(|c: char| !c.is_alphanumeric() && c != '-') { - let era = &line[i + 1..j + 5]; + let era = &line[i + 1..j + ext_len]; writer.write_all(era.as_bytes()).await?; writer.write_all(b"\n").await?; } } + writer.flush().await?; + Ok(()) + } + + // Helper to download a file to a specified path + async fn download_file_to_path(&self, url: Url, path: &Path) -> eyre::Result<()> { + let mut stream = self.client.get(url).await?; + let mut file = File::create(path).await?; + + while let Some(item) = stream.next().await.transpose()? { + io::copy(&mut item.as_ref(), &mut file).await?; + } Ok(()) } - /// Returns ERA1 file name that is ordered at `number`. + /// Returns ERA1/ERA file name that is ordered at `number`. pub async fn number_to_file_name(&self, number: usize) -> eyre::Result> { let path = self.folder.to_path_buf().join("index"); let file = File::open(&path).await?; @@ -235,18 +249,23 @@ impl EraClient { match File::open(path).await { Ok(file) => { - let number = self - .file_name_to_number(name) - .ok_or_else(|| eyre!("Cannot parse ERA number from {name}"))?; + if self.era_type == EraFileType::Era1 { + let number = self + .file_name_to_number(name) + .ok_or_else(|| eyre!("Cannot parse ERA number from {name}"))?; - let actual_checksum = checksum(file).await?; - let is_verified = self.verify_checksum(number, actual_checksum).await?; + let actual_checksum = checksum(file).await?; + let is_verified = self.verify_checksum(number, actual_checksum).await?; - if !is_verified { - fs::remove_file(path).await?; + if !is_verified { + fs::remove_file(path).await?; + } + + Ok(is_verified) + } else { + // For era files, we skip checksum verification, as checksum.txt does not exist + Ok(true) } - - Ok(is_verified) } Err(e) if e.kind() == io::ErrorKind::NotFound => Ok(false), Err(e) => Err(e)?, diff --git a/crates/era-downloader/src/fs.rs b/crates/era-downloader/src/fs.rs index 19532f01cf..eaab1f3f4b 100644 --- a/crates/era-downloader/src/fs.rs +++ b/crates/era-downloader/src/fs.rs @@ -12,6 +12,8 @@ pub fn read_dir( start_from: BlockNumber, ) -> eyre::Result> + Send + Sync + 'static + Unpin> { let mut checksums = None; + + // read all the files in the given dir and also read the checksums file let mut entries = fs::read_dir(dir)? .filter_map(|entry| { (|| { @@ -29,6 +31,7 @@ pub fn read_dir( return Ok(Some((number, path.into_boxed_path()))); } } + if path.file_name() == Some("checksums.txt".as_ref()) { let file = fs::open(path)?; let reader = io::BufReader::new(file); @@ -43,9 +46,15 @@ pub fn read_dir( .collect::>>()?; let mut checksums = checksums.ok_or_eyre("Missing file `checksums.txt` in the `dir`")?; + let start_index = start_from as usize / BLOCKS_PER_FILE; + for _ in 0..start_index { + // skip the first entries in the checksums iterator so that both iters align + checksums.next().transpose()?.ok_or_eyre("Got less checksums than ERA files")?; + } + entries.sort_by(|(left, _), (right, _)| left.cmp(right)); - Ok(stream::iter(entries.into_iter().skip(start_from as usize / BLOCKS_PER_FILE).map( + Ok(stream::iter(entries.into_iter().skip_while(move |(n, _)| *n < start_index).map( move |(_, path)| { let expected_checksum = checksums.next().transpose()?.ok_or_eyre("Got less checksums than ERA files")?; diff --git a/crates/era-downloader/tests/it/checksums.rs b/crates/era-downloader/tests/it/checksums.rs index 630cbece5d..500b7ed338 100644 --- a/crates/era-downloader/tests/it/checksums.rs +++ b/crates/era-downloader/tests/it/checksums.rs @@ -60,22 +60,22 @@ impl HttpClient for FailingClient { ) -> eyre::Result> + Send + Sync + Unpin> { let url = url.into_url().unwrap(); - Ok(futures::stream::iter(vec![Ok(match url.to_string().as_str() { - "https://mainnet.era1.nimbus.team/" => Bytes::from_static(crate::NIMBUS), - "https://era1.ethportal.net/" => Bytes::from_static(crate::ETH_PORTAL), - "https://era.ithaca.xyz/era1/index.html" => Bytes::from_static(crate::ITHACA), + Ok(futures::stream::iter(vec![Ok(match url.as_str() { + "https://mainnet.era1.nimbus.team/" => Bytes::from_static(crate::ERA1_NIMBUS), + "https://era1.ethportal.net/" => Bytes::from_static(crate::ERA1_ETH_PORTAL), + "https://era.ithaca.xyz/era1/index.html" => Bytes::from_static(crate::ERA1_ITHACA), "https://mainnet.era1.nimbus.team/checksums.txt" | "https://era1.ethportal.net/checksums.txt" | "https://era.ithaca.xyz/era1/checksums.txt" => Bytes::from_static(CHECKSUMS), "https://era1.ethportal.net/mainnet-00000-5ec1ffb8.era1" | "https://mainnet.era1.nimbus.team/mainnet-00000-5ec1ffb8.era1" | "https://era.ithaca.xyz/era1/mainnet-00000-5ec1ffb8.era1" => { - Bytes::from_static(crate::MAINNET_0) + Bytes::from_static(crate::ERA1_MAINNET_0) } "https://era1.ethportal.net/mainnet-00001-a5364e9a.era1" | "https://mainnet.era1.nimbus.team/mainnet-00001-a5364e9a.era1" | "https://era.ithaca.xyz/era1/mainnet-00001-a5364e9a.era1" => { - Bytes::from_static(crate::MAINNET_1) + Bytes::from_static(crate::ERA1_MAINNET_1) } v => unimplemented!("Unexpected URL \"{v}\""), })])) diff --git a/crates/era-downloader/tests/it/download.rs b/crates/era-downloader/tests/it/download.rs index e7756bfede..bf6b956c69 100644 --- a/crates/era-downloader/tests/it/download.rs +++ b/crates/era-downloader/tests/it/download.rs @@ -10,7 +10,7 @@ use test_case::test_case; #[test_case("https://era1.ethportal.net/"; "ethportal")] #[test_case("https://era.ithaca.xyz/era1/index.html"; "ithaca")] #[tokio::test] -async fn test_getting_file_url_after_fetching_file_list(url: &str) { +async fn test_getting_era1_file_url_after_fetching_file_list(url: &str) { let base_url = Url::from_str(url).unwrap(); let folder = tempdir().unwrap(); let folder = folder.path(); @@ -48,3 +48,19 @@ async fn test_getting_file_after_fetching_file_list(url: &str) { let actual_count = client.files_count().await; assert_eq!(actual_count, expected_count); } + +#[test_case("https://mainnet.era.nimbus.team/"; "nimbus")] +#[tokio::test] +async fn test_getting_era_file_url_after_fetching_file_list(url: &str) { + let base_url = Url::from_str(url).unwrap(); + let folder = tempdir().unwrap(); + let folder = folder.path(); + let client = EraClient::new(StubClient, base_url.clone(), folder); + + client.fetch_file_list().await.unwrap(); + + let expected_url = Some(base_url.join("mainnet-00000-4b363db9.era").unwrap()); + let actual_url = client.url(0).await.unwrap(); + + assert_eq!(actual_url, expected_url); +} diff --git a/crates/era-downloader/tests/it/list.rs b/crates/era-downloader/tests/it/list.rs index 3940fa5d8b..3da10102c3 100644 --- a/crates/era-downloader/tests/it/list.rs +++ b/crates/era-downloader/tests/it/list.rs @@ -10,7 +10,7 @@ use test_case::test_case; #[test_case("https://era1.ethportal.net/"; "ethportal")] #[test_case("https://era.ithaca.xyz/era1/index.html"; "ithaca")] #[tokio::test] -async fn test_getting_file_name_after_fetching_file_list(url: &str) { +async fn test_getting_era1_file_name_after_fetching_file_list(url: &str) { let url = Url::from_str(url).unwrap(); let folder = tempdir().unwrap(); let folder = folder.path(); @@ -23,3 +23,19 @@ async fn test_getting_file_name_after_fetching_file_list(url: &str) { assert_eq!(actual, expected); } + +#[test_case("https://mainnet.era.nimbus.team/"; "nimbus")] +#[tokio::test] +async fn test_getting_era_file_name_after_fetching_file_list(url: &str) { + let url = Url::from_str(url).unwrap(); + let folder = tempdir().unwrap(); + let folder = folder.path(); + let client = EraClient::new(StubClient, url, folder); + + client.fetch_file_list().await.unwrap(); + + let actual = client.number_to_file_name(500).await.unwrap(); + let expected = Some("mainnet-00500-87109713.era".to_owned()); + + assert_eq!(actual, expected); +} diff --git a/crates/era-downloader/tests/it/main.rs b/crates/era-downloader/tests/it/main.rs index 526d3885bf..fb5f33ab71 100644 --- a/crates/era-downloader/tests/it/main.rs +++ b/crates/era-downloader/tests/it/main.rs @@ -13,12 +13,20 @@ use futures::Stream; use reqwest::IntoUrl; use reth_era_downloader::HttpClient; -pub(crate) const NIMBUS: &[u8] = include_bytes!("../res/nimbus.html"); -pub(crate) const ETH_PORTAL: &[u8] = include_bytes!("../res/ethportal.html"); -pub(crate) const ITHACA: &[u8] = include_bytes!("../res/ithaca.html"); -pub(crate) const CHECKSUMS: &[u8] = include_bytes!("../res/checksums.txt"); -pub(crate) const MAINNET_0: &[u8] = include_bytes!("../res/mainnet-00000-5ec1ffb8.era1"); -pub(crate) const MAINNET_1: &[u8] = include_bytes!("../res/mainnet-00001-a5364e9a.era1"); +pub(crate) const ERA1_NIMBUS: &[u8] = include_bytes!("../res/era1-nimbus.html"); +pub(crate) const ERA1_ETH_PORTAL: &[u8] = include_bytes!("../res/ethportal.html"); +pub(crate) const ERA1_ITHACA: &[u8] = include_bytes!("../res/era1-ithaca.html"); +pub(crate) const ERA1_CHECKSUMS: &[u8] = include_bytes!("../res/checksums.txt"); +pub(crate) const ERA1_MAINNET_0: &[u8] = + include_bytes!("../res/era1-files/mainnet-00000-5ec1ffb8.era1"); +pub(crate) const ERA1_MAINNET_1: &[u8] = + include_bytes!("../res/era1-files/mainnet-00001-a5364e9a.era1"); + +pub(crate) const ERA_NIMBUS: &[u8] = include_bytes!("../res/era-nimbus.html"); +pub(crate) const ERA_MAINNET_0: &[u8] = + include_bytes!("../res/era-files/mainnet-00000-4b363db9.era"); +pub(crate) const ERA_MAINNET_1: &[u8] = + include_bytes!("../res/era-files/mainnet-00001-40cf2f3c.era"); /// An HTTP client pre-programmed with canned answers to received calls. /// Panics if it receives an unknown call. @@ -32,23 +40,33 @@ impl HttpClient for StubClient { ) -> eyre::Result> + Send + Sync + Unpin> { let url = url.into_url().unwrap(); - Ok(futures::stream::iter(vec![Ok(match url.to_string().as_str() { - "https://mainnet.era1.nimbus.team/" => Bytes::from_static(NIMBUS), - "https://era1.ethportal.net/" => Bytes::from_static(ETH_PORTAL), - "https://era.ithaca.xyz/era1/index.html" => Bytes::from_static(ITHACA), + Ok(futures::stream::iter(vec![Ok(match url.as_str() { + // Era1 urls + "https://mainnet.era1.nimbus.team/" => Bytes::from_static(ERA1_NIMBUS), + "https://era1.ethportal.net/" => Bytes::from_static(ERA1_ETH_PORTAL), + "https://era.ithaca.xyz/era1/index.html" => Bytes::from_static(ERA1_ITHACA), "https://mainnet.era1.nimbus.team/checksums.txt" | "https://era1.ethportal.net/checksums.txt" | - "https://era.ithaca.xyz/era1/checksums.txt" => Bytes::from_static(CHECKSUMS), + "https://era.ithaca.xyz/era1/checksums.txt" => Bytes::from_static(ERA1_CHECKSUMS), "https://era1.ethportal.net/mainnet-00000-5ec1ffb8.era1" | "https://mainnet.era1.nimbus.team/mainnet-00000-5ec1ffb8.era1" | "https://era.ithaca.xyz/era1/mainnet-00000-5ec1ffb8.era1" => { - Bytes::from_static(MAINNET_0) + Bytes::from_static(ERA1_MAINNET_0) } "https://era1.ethportal.net/mainnet-00001-a5364e9a.era1" | "https://mainnet.era1.nimbus.team/mainnet-00001-a5364e9a.era1" | "https://era.ithaca.xyz/era1/mainnet-00001-a5364e9a.era1" => { - Bytes::from_static(MAINNET_1) + Bytes::from_static(ERA1_MAINNET_1) } + // Era urls + "https://mainnet.era.nimbus.team/" => Bytes::from_static(ERA_NIMBUS), + "https://mainnet.era.nimbus.team/mainnet-00000-4b363db9.era" => { + Bytes::from_static(ERA_MAINNET_0) + } + "https://mainnet.era.nimbus.team/mainnet-00001-40cf2f3c.era" => { + Bytes::from_static(ERA_MAINNET_1) + } + v => unimplemented!("Unexpected URL \"{v}\""), })])) } diff --git a/crates/era-downloader/tests/it/stream.rs b/crates/era-downloader/tests/it/stream.rs index eb7dc2da72..878e580789 100644 --- a/crates/era-downloader/tests/it/stream.rs +++ b/crates/era-downloader/tests/it/stream.rs @@ -34,7 +34,7 @@ async fn test_streaming_files_after_fetching_file_list(url: &str) { } #[tokio::test] -async fn test_streaming_files_after_fetching_file_list_into_missing_folder_fails() { +async fn test_streaming_era1_files_after_fetching_file_list_into_missing_folder_fails() { let base_url = Url::from_str("https://era.ithaca.xyz/era1/index.html").unwrap(); let folder = tempdir().unwrap().path().to_owned(); let client = EraClient::new(StubClient, base_url, folder); @@ -49,3 +49,20 @@ async fn test_streaming_files_after_fetching_file_list_into_missing_folder_fails assert_eq!(actual_error, expected_error); } + +#[tokio::test] +async fn test_streaming_era_files_after_fetching_file_list_into_missing_folder_fails() { + let base_url = Url::from_str("https://mainnet.era.nimbus.team").unwrap(); //TODO: change once ithaca host era files + let folder = tempdir().unwrap().path().to_owned(); + let client = EraClient::new(StubClient, base_url, folder); + + let mut stream = EraStream::new( + client, + EraStreamConfig::default().with_max_files(2).with_max_concurrent_downloads(1), + ); + + let actual_error = stream.next().await.unwrap().unwrap_err().to_string(); + let expected_error = "No such file or directory (os error 2)".to_owned(); + + assert_eq!(actual_error, expected_error); +} diff --git a/crates/era-downloader/tests/res/era-files/mainnet-00000-4b363db9.era b/crates/era-downloader/tests/res/era-files/mainnet-00000-4b363db9.era new file mode 100644 index 0000000000..f2ad6c76f0 --- /dev/null +++ b/crates/era-downloader/tests/res/era-files/mainnet-00000-4b363db9.era @@ -0,0 +1 @@ +c diff --git a/crates/era-downloader/tests/res/era-files/mainnet-00001-40cf2f3c.era b/crates/era-downloader/tests/res/era-files/mainnet-00001-40cf2f3c.era new file mode 100644 index 0000000000..4bcfe98e64 --- /dev/null +++ b/crates/era-downloader/tests/res/era-files/mainnet-00001-40cf2f3c.era @@ -0,0 +1 @@ +d diff --git a/crates/era-downloader/tests/res/era-nimbus.html b/crates/era-downloader/tests/res/era-nimbus.html new file mode 100644 index 0000000000..3420275cc7 --- /dev/null +++ b/crates/era-downloader/tests/res/era-nimbus.html @@ -0,0 +1,1593 @@ + +Index of / + +

Index of /


../
+mainnet-00000-4b363db9.era                         19-Apr-2023 08:01             1810141
+mainnet-00001-40cf2f3c.era                         19-Apr-2023 08:01            18580262
+mainnet-00002-74a3850f.era                         19-Apr-2023 08:01            19565485
+mainnet-00003-76b05bfe.era                         19-Apr-2023 08:01            18660406
+mainnet-00004-f82d21ce.era                         19-Apr-2023 08:01            20093454
+mainnet-00005-4ec633af.era                         19-Apr-2023 08:01            19983422
+mainnet-00006-eef40204.era                         19-Apr-2023 08:01            20641097
+mainnet-00007-ce8f6ccd.era                         19-Apr-2023 08:01            21459536
+mainnet-00008-f0d27f1e.era                         19-Apr-2023 08:01            22035110
+mainnet-00009-9ba3d167.era                         19-Apr-2023 08:01            22813416
+mainnet-00010-fde55062.era                         19-Apr-2023 08:01            23457891
+mainnet-00011-76e38a38.era                         19-Apr-2023 08:01            23862617
+mainnet-00012-7ef05aec.era                         19-Apr-2023 08:01            27179700
+mainnet-00013-691af8c1.era                         19-Apr-2023 08:01            24659214
+mainnet-00014-c17977ec.era                         19-Apr-2023 08:01            25148747
+mainnet-00015-d60a0e74.era                         19-Apr-2023 08:01            25855274
+mainnet-00016-36da78e2.era                         19-Apr-2023 08:01            26889835
+mainnet-00017-e3026c33.era                         19-Apr-2023 08:01            28599421
+mainnet-00018-8fadc278.era                         19-Apr-2023 08:01            29673899
+mainnet-00019-c3c42b77.era                         19-Apr-2023 08:01            30387949
+mainnet-00020-e49f5ec8.era                         19-Apr-2023 08:01            32311398
+mainnet-00021-98fcd379.era                         19-Apr-2023 08:01            31330610
+mainnet-00022-867c2320.era                         19-Apr-2023 08:01            31229986
+mainnet-00023-bbbf8551.era                         19-Apr-2023 08:01            30966415
+mainnet-00024-7e297816.era                         19-Apr-2023 08:01            31298813
+mainnet-00025-266413c7.era                         19-Apr-2023 08:01            32582497
+mainnet-00026-93997190.era                         19-Apr-2023 08:01            31353508
+mainnet-00027-c3e74ac5.era                         19-Apr-2023 08:01            32793045
+mainnet-00028-d390d828.era                         19-Apr-2023 08:01            32716855
+mainnet-00029-053c8d4f.era                         19-Apr-2023 08:01            35018552
+mainnet-00030-ab3e346d.era                         19-Apr-2023 08:01            33153620
+mainnet-00031-6bcae87f.era                         19-Apr-2023 08:01            33928188
+mainnet-00032-22d32f3f.era                         19-Apr-2023 08:01            34686488
+mainnet-00033-e7826225.era                         19-Apr-2023 08:01            35989306
+mainnet-00034-f464bad4.era                         19-Apr-2023 08:01            36129694
+mainnet-00035-e45c1e3e.era                         19-Apr-2023 08:01            36772095
+mainnet-00036-6a92d280.era                         19-Apr-2023 08:01            36576585
+mainnet-00037-9fe02a0d.era                         19-Apr-2023 08:01            38458478
+mainnet-00038-4ec47bd9.era                         19-Apr-2023 08:01            38371832
+mainnet-00039-dda536eb.era                         19-Apr-2023 08:01            40574896
+mainnet-00040-e67676e4.era                         19-Apr-2023 08:01            42854476
+mainnet-00041-20ca9f8a.era                         19-Apr-2023 08:01            41948268
+mainnet-00042-5c04d415.era                         19-Apr-2023 08:01            42616685
+mainnet-00043-98c36e1d.era                         19-Apr-2023 08:01            43400431
+mainnet-00044-d8066c38.era                         19-Apr-2023 08:01            47470998
+mainnet-00045-c8915969.era                         19-Apr-2023 08:01            55899512
+mainnet-00046-078aed7c.era                         19-Apr-2023 08:01            53341912
+mainnet-00047-8ad00df4.era                         19-Apr-2023 08:01            46838488
+mainnet-00048-d1fc0d45.era                         19-Apr-2023 08:01            47427770
+mainnet-00049-2d9b3eca.era                         19-Apr-2023 08:01            48335222
+mainnet-00050-649476e4.era                         19-Apr-2023 08:01            49923077
+mainnet-00051-7b310497.era                         19-Apr-2023 08:01            56562914
+mainnet-00052-7cbff20e.era                         19-Apr-2023 08:01            56649533
+mainnet-00053-9e690c2e.era                         19-Apr-2023 08:01            58669099
+mainnet-00054-f44d18d1.era                         19-Apr-2023 08:01            53642199
+mainnet-00055-3cdac49d.era                         19-Apr-2023 08:01            54674906
+mainnet-00056-9fbb8fc4.era                         19-Apr-2023 08:01            61488692
+mainnet-00057-93102c5a.era                         19-Apr-2023 08:01            63798327
+mainnet-00058-9af69b68.era                         19-Apr-2023 08:01            65633837
+mainnet-00059-c0b2123d.era                         19-Apr-2023 08:01            64031147
+mainnet-00060-202ccf6f.era                         19-Apr-2023 08:01            60016283
+mainnet-00061-bf7adcba.era                         19-Apr-2023 08:01            63529457
+mainnet-00062-54e19978.era                         19-Apr-2023 08:01            69520094
+mainnet-00063-1b090109.era                         19-Apr-2023 08:01            64288584
+mainnet-00064-07dbbdfc.era                         19-Apr-2023 08:01            62462786
+mainnet-00065-3d035c95.era                         19-Apr-2023 08:01            69824227
+mainnet-00066-c9d23f5b.era                         19-Apr-2023 08:01            69701623
+mainnet-00067-2b9c4ff2.era                         19-Apr-2023 08:01            64904839
+mainnet-00068-6c0c35c0.era                         19-Apr-2023 08:01            68077958
+mainnet-00069-fd2e135f.era                         19-Apr-2023 08:01            72932779
+mainnet-00070-df464014.era                         19-Apr-2023 08:01            73158530
+mainnet-00071-b0d5e5de.era                         19-Apr-2023 08:01            72433228
+mainnet-00072-8c994e45.era                         19-Apr-2023 08:01            66222825
+mainnet-00073-5913138f.era                         19-Apr-2023 08:01            67531788
+mainnet-00074-77c2f86c.era                         19-Apr-2023 08:01            66923751
+mainnet-00075-ea3f8b6a.era                         19-Apr-2023 08:01            64831808
+mainnet-00076-105da067.era                         19-Apr-2023 08:01            66729877
+mainnet-00077-7a1c1dbe.era                         19-Apr-2023 08:01            70441150
+mainnet-00078-f43226a1.era                         19-Apr-2023 08:01            70727253
+mainnet-00079-8065f8f5.era                         19-Apr-2023 08:01            67868774
+mainnet-00080-fc93a1b8.era                         19-Apr-2023 08:01            75110643
+mainnet-00081-64301fb7.era                         19-Apr-2023 08:01            71234420
+mainnet-00082-946e696f.era                         19-Apr-2023 08:01            68028413
+mainnet-00083-6e882135.era                         19-Apr-2023 08:01            65251306
+mainnet-00084-ea2f03fa.era                         19-Apr-2023 08:01            63653815
+mainnet-00085-400b6150.era                         19-Apr-2023 08:01            72612475
+mainnet-00086-ff3c5850.era                         19-Apr-2023 08:01            76913640
+mainnet-00087-0f2df0f3.era                         19-Apr-2023 08:01            72822474
+mainnet-00088-a5af5c4d.era                         19-Apr-2023 08:01            69817765
+mainnet-00089-779c474c.era                         19-Apr-2023 08:01            79883924
+mainnet-00090-56a5eb95.era                         19-Apr-2023 08:01            77689567
+mainnet-00091-c8820f5d.era                         19-Apr-2023 08:01            76266230
+mainnet-00092-8071bfe1.era                         19-Apr-2023 08:01            70279875
+mainnet-00093-162845ca.era                         19-Apr-2023 08:01            68271473
+mainnet-00094-ce2b7c1c.era                         19-Apr-2023 08:01            63122737
+mainnet-00095-89870019.era                         19-Apr-2023 08:01            63176783
+mainnet-00096-22d3666f.era                         19-Apr-2023 08:01            62245079
+mainnet-00097-0a12cee7.era                         19-Apr-2023 08:01            66697617
+mainnet-00098-6a323bf4.era                         19-Apr-2023 08:01            72118178
+mainnet-00099-d78a180f.era                         19-Apr-2023 08:01            75283283
+mainnet-00100-9e740416.era                         19-Apr-2023 08:01            79242147
+mainnet-00101-a2ca13d2.era                         19-Apr-2023 08:01            72687766
+mainnet-00102-42aba82e.era                         19-Apr-2023 08:01            66471367
+mainnet-00103-8b284964.era                         19-Apr-2023 08:01            58583319
+mainnet-00104-84866bce.era                         19-Apr-2023 08:01            60069517
+mainnet-00105-5973052c.era                         19-Apr-2023 08:01            57408918
+mainnet-00106-ec3b5974.era                         19-Apr-2023 08:01            60351153
+mainnet-00107-25cb4b21.era                         19-Apr-2023 08:01            60967209
+mainnet-00108-96efb79f.era                         19-Apr-2023 08:01            57472500
+mainnet-00109-40f9ed5f.era                         19-Apr-2023 08:01            63856387
+mainnet-00110-fc30c4dc.era                         19-Apr-2023 08:01            58268620
+mainnet-00111-31fa9e03.era                         19-Apr-2023 08:01            66131359
+mainnet-00112-b9c9c501.era                         19-Apr-2023 08:01            67909807
+mainnet-00113-6b00dbc2.era                         19-Apr-2023 08:01            70192442
+mainnet-00114-40543d05.era                         19-Apr-2023 08:01            67103740
+mainnet-00115-fe01e028.era                         19-Apr-2023 08:01            69615332
+mainnet-00116-9321f262.era                         19-Apr-2023 08:01            70471612
+mainnet-00117-eae18b3b.era                         19-Apr-2023 08:01            68864538
+mainnet-00118-ff07f2e3.era                         19-Apr-2023 08:01            66025557
+mainnet-00119-2d52ef48.era                         19-Apr-2023 08:01            63541726
+mainnet-00120-29f537a3.era                         19-Apr-2023 08:01            64973381
+mainnet-00121-0e6f3faa.era                         19-Apr-2023 08:01            65723355
+mainnet-00122-c8ea3c9f.era                         19-Apr-2023 08:01            64333665
+mainnet-00123-a05d338d.era                         19-Apr-2023 08:01            64879032
+mainnet-00124-961a3ce9.era                         19-Apr-2023 08:01            66202483
+mainnet-00125-5c5e9f9d.era                         19-Apr-2023 08:01            67110402
+mainnet-00126-0920fd3e.era                         19-Apr-2023 08:01            64992073
+mainnet-00127-348c85d7.era                         19-Apr-2023 08:01            68782028
+mainnet-00128-235f3c5d.era                         19-Apr-2023 08:01            69591376
+mainnet-00129-850f303e.era                         19-Apr-2023 08:01            69899092
+mainnet-00130-0f5754a0.era                         19-Apr-2023 08:01            68796462
+mainnet-00131-30551905.era                         19-Apr-2023 08:01            68312081
+mainnet-00132-15ca033c.era                         19-Apr-2023 08:01            71930760
+mainnet-00133-44e4d781.era                         19-Apr-2023 08:01            64469039
+mainnet-00134-24bb219e.era                         19-Apr-2023 08:01            67357374
+mainnet-00135-78db4c95.era                         19-Apr-2023 08:01            67484778
+mainnet-00136-154a8fe6.era                         19-Apr-2023 08:01            64252529
+mainnet-00137-2a63e504.era                         19-Apr-2023 08:01            66294089
+mainnet-00138-8f5c2cc7.era                         19-Apr-2023 08:01            69832669
+mainnet-00139-5188cfb6.era                         19-Apr-2023 08:01            70931933
+mainnet-00140-68d3319a.era                         19-Apr-2023 08:01            67676267
+mainnet-00141-9ab68992.era                         19-Apr-2023 08:01            68506361
+mainnet-00142-9daa1e68.era                         19-Apr-2023 08:01            67403565
+mainnet-00143-ea65b368.era                         19-Apr-2023 08:01            68620832
+mainnet-00144-f0e68195.era                         19-Apr-2023 08:01            70439651
+mainnet-00145-324a9fd9.era                         19-Apr-2023 08:01            74268694
+mainnet-00146-e61beaac.era                         19-Apr-2023 08:02            74104387
+mainnet-00147-3400eac8.era                         19-Apr-2023 08:02            74725735
+mainnet-00148-f2ffe12a.era                         19-Apr-2023 08:02            77682938
+mainnet-00149-941197c0.era                         19-Apr-2023 08:02            79924748
+mainnet-00150-e58cc6d1.era                         19-Apr-2023 08:02            77128487
+mainnet-00151-8b035db0.era                         19-Apr-2023 08:02            73665416
+mainnet-00152-7a1b47f8.era                         19-Apr-2023 08:02            75422717
+mainnet-00153-5b9f5d14.era                         19-Apr-2023 08:02            75991256
+mainnet-00154-5506aec2.era                         19-Apr-2023 08:02            76352743
+mainnet-00155-b187606b.era                         19-Apr-2023 08:02            80814087
+mainnet-00156-d1db3a39.era                         19-Apr-2023 08:02            81125701
+mainnet-00157-cb4a35ef.era                         19-Apr-2023 08:02            80031716
+mainnet-00158-86f4ee82.era                         19-Apr-2023 08:02            78296940
+mainnet-00159-471ea44e.era                         19-Apr-2023 08:02            80665182
+mainnet-00160-64fd0bc0.era                         19-Apr-2023 08:02            81653393
+mainnet-00161-59cfce4a.era                         19-Apr-2023 08:02            83520494
+mainnet-00162-b625abf6.era                         19-Apr-2023 08:02            81405682
+mainnet-00163-6a322a25.era                         19-Apr-2023 08:02            81356654
+mainnet-00164-b21c0ea4.era                         19-Apr-2023 08:02            77776375
+mainnet-00165-60fe50c9.era                         19-Apr-2023 08:02            81372959
+mainnet-00166-37f50120.era                         19-Apr-2023 08:02            81159848
+mainnet-00167-d06b616a.era                         19-Apr-2023 08:02            81433352
+mainnet-00168-591f41bd.era                         19-Apr-2023 08:02            81523847
+mainnet-00169-d6df2275.era                         19-Apr-2023 08:02            83528707
+mainnet-00170-e1aa3c1d.era                         19-Apr-2023 08:02            79656576
+mainnet-00171-b38e1a6b.era                         19-Apr-2023 08:02            81003140
+mainnet-00172-2c5d3b59.era                         19-Apr-2023 08:02            83870057
+mainnet-00173-ea310823.era                         19-Apr-2023 08:02            86845777
+mainnet-00174-f1dffe88.era                         19-Apr-2023 08:02            90177937
+mainnet-00175-b7a9160e.era                         19-Apr-2023 08:02            86840082
+mainnet-00176-8c05a49f.era                         19-Apr-2023 08:02            85199741
+mainnet-00177-1ea0ce2d.era                         19-Apr-2023 08:02            84875487
+mainnet-00178-0d0a5290.era                         19-Apr-2023 08:02            90506499
+mainnet-00179-529bdb65.era                         19-Apr-2023 08:02            88375904
+mainnet-00180-3d46655b.era                         19-Apr-2023 08:02            99823718
+mainnet-00181-5b2b8506.era                         19-Apr-2023 08:02            97431627
+mainnet-00182-389855a7.era                         19-Apr-2023 08:02            90997160
+mainnet-00183-7307be7c.era                         19-Apr-2023 08:02            92538927
+mainnet-00184-a0aa399e.era                         19-Apr-2023 08:02            98152875
+mainnet-00185-8e47493a.era                         19-Apr-2023 08:02            93287428
+mainnet-00186-82c6bfa8.era                         19-Apr-2023 08:02            93153188
+mainnet-00187-a05172bc.era                         19-Apr-2023 08:02            94047482
+mainnet-00188-80a8f609.era                         19-Apr-2023 08:02            96532497
+mainnet-00189-1969f732.era                         19-Apr-2023 08:02           102775171
+mainnet-00190-cf755024.era                         19-Apr-2023 08:02            97429581
+mainnet-00191-c4b273af.era                         19-Apr-2023 08:02           103605152
+mainnet-00192-02fe825e.era                         19-Apr-2023 08:02            98221273
+mainnet-00193-bf9be962.era                         19-Apr-2023 08:02            94606338
+mainnet-00194-73ba4d4b.era                         19-Apr-2023 08:02            92119107
+mainnet-00195-c6b10414.era                         19-Apr-2023 08:02            94695632
+mainnet-00196-a0c04a41.era                         19-Apr-2023 08:02            99263490
+mainnet-00197-e326d80f.era                         19-Apr-2023 08:02            90632581
+mainnet-00198-e4b20428.era                         19-Apr-2023 08:02            98177442
+mainnet-00199-1229bb7a.era                         19-Apr-2023 08:02           101276205
+mainnet-00200-1059dbf9.era                         19-Apr-2023 08:02           109459948
+mainnet-00201-7b7bcf26.era                         19-Apr-2023 08:02           103228424
+mainnet-00202-43ff90bf.era                         19-Apr-2023 08:02           100601822
+mainnet-00203-7ced6085.era                         19-Apr-2023 08:02           104361405
+mainnet-00204-aaf7f80a.era                         19-Apr-2023 08:02           102238704
+mainnet-00205-3e062302.era                         19-Apr-2023 08:02           102660932
+mainnet-00206-29475177.era                         19-Apr-2023 08:02           102893233
+mainnet-00207-1cec56ec.era                         19-Apr-2023 08:02           101029314
+mainnet-00208-09589075.era                         19-Apr-2023 08:02           104283853
+mainnet-00209-af8a2b2c.era                         19-Apr-2023 08:02           102192843
+mainnet-00210-53e9f2b8.era                         19-Apr-2023 08:02            98837926
+mainnet-00211-dd4773b5.era                         19-Apr-2023 08:02           100401650
+mainnet-00212-1f505eb5.era                         19-Apr-2023 08:02           106285285
+mainnet-00213-80ebec77.era                         19-Apr-2023 08:02           108087872
+mainnet-00214-0753b745.era                         19-Apr-2023 08:02           107119331
+mainnet-00215-8c624631.era                         19-Apr-2023 08:02           107194383
+mainnet-00216-67e04584.era                         19-Apr-2023 08:02           105244736
+mainnet-00217-529bb4d5.era                         19-Apr-2023 08:02           107292287
+mainnet-00218-918403ea.era                         19-Apr-2023 08:02           108463200
+mainnet-00219-e97456c3.era                         19-Apr-2023 08:02           109578337
+mainnet-00220-1b4181c7.era                         19-Apr-2023 08:02           109367454
+mainnet-00221-bbf1ea31.era                         19-Apr-2023 08:02           115692306
+mainnet-00222-f5c6fc10.era                         19-Apr-2023 08:02           116154257
+mainnet-00223-e3860536.era                         19-Apr-2023 08:02           117562693
+mainnet-00224-47162a88.era                         19-Apr-2023 08:02           121982248
+mainnet-00225-e4b0ee30.era                         19-Apr-2023 08:02           121410422
+mainnet-00226-474f9d41.era                         19-Apr-2023 08:02           120006785
+mainnet-00227-06b92185.era                         19-Apr-2023 08:02           117339637
+mainnet-00228-e5f13f42.era                         19-Apr-2023 08:02           122596886
+mainnet-00229-fc7ce170.era                         19-Apr-2023 08:02           123403174
+mainnet-00230-59598ac3.era                         19-Apr-2023 08:02           131368809
+mainnet-00231-2e78e382.era                         19-Apr-2023 08:02           143342472
+mainnet-00232-af149f76.era                         19-Apr-2023 08:02           122533494
+mainnet-00233-bad539a1.era                         19-Apr-2023 08:02           131088925
+mainnet-00234-911f5e67.era                         19-Apr-2023 08:02           124332893
+mainnet-00235-13d7f81a.era                         19-Apr-2023 08:02           127609612
+mainnet-00236-60ab620b.era                         19-Apr-2023 08:02           132286023
+mainnet-00237-8cd6d6f8.era                         19-Apr-2023 08:02           131961381
+mainnet-00238-dc7f6d7f.era                         19-Apr-2023 08:02           134331881
+mainnet-00239-e4cb8d62.era                         19-Apr-2023 08:02           138355516
+mainnet-00240-47e84e3b.era                         19-Apr-2023 08:02           140431003
+mainnet-00241-e1ba6894.era                         19-Apr-2023 08:02           140508194
+mainnet-00242-ff76e7b7.era                         19-Apr-2023 08:02           136578011
+mainnet-00243-8af7482e.era                         19-Apr-2023 08:02           135003688
+mainnet-00244-0ec22b52.era                         19-Apr-2023 08:02           126396110
+mainnet-00245-c084edd8.era                         19-Apr-2023 08:02           127944080
+mainnet-00246-0596ae6d.era                         19-Apr-2023 08:02           132953959
+mainnet-00247-e066d037.era                         19-Apr-2023 08:02           134959158
+mainnet-00248-859b3a5e.era                         19-Apr-2023 08:02           138410520
+mainnet-00249-28c20404.era                         19-Apr-2023 08:02           138654797
+mainnet-00250-1c525361.era                         19-Apr-2023 08:02           133548500
+mainnet-00251-51ca4809.era                         19-Apr-2023 08:02           130100495
+mainnet-00252-75a660ba.era                         19-Apr-2023 08:02           131712045
+mainnet-00253-87f32efb.era                         19-Apr-2023 08:02           136508760
+mainnet-00254-c2e7d484.era                         19-Apr-2023 08:02           135425750
+mainnet-00255-a7c46790.era                         19-Apr-2023 08:02           131551434
+mainnet-00256-2db6dfeb.era                         19-Apr-2023 08:02           131161922
+mainnet-00257-3dab898e.era                         19-Apr-2023 08:02           136207857
+mainnet-00258-8716b540.era                         19-Apr-2023 08:02           139220947
+mainnet-00259-0c5952dc.era                         19-Apr-2023 08:02           138595540
+mainnet-00260-c7e49e0d.era                         19-Apr-2023 08:03           138043147
+mainnet-00261-9fc5de06.era                         19-Apr-2023 08:03           140066104
+mainnet-00262-7e503b13.era                         19-Apr-2023 08:03           135104819
+mainnet-00263-6f24973b.era                         19-Apr-2023 08:03           131436160
+mainnet-00264-f67a09e3.era                         19-Apr-2023 08:03           137433017
+mainnet-00265-1468e348.era                         19-Apr-2023 08:03           136067719
+mainnet-00266-ad6fcf9c.era                         19-Apr-2023 08:03           138802586
+mainnet-00267-2a973e19.era                         19-Apr-2023 08:03           142173149
+mainnet-00268-80db539b.era                         19-Apr-2023 08:03           138784296
+mainnet-00269-463379f5.era                         19-Apr-2023 08:03           138787503
+mainnet-00270-3d352348.era                         19-Apr-2023 08:03           140145338
+mainnet-00271-99ea5dc6.era                         19-Apr-2023 08:03           142078088
+mainnet-00272-ae99a2a7.era                         19-Apr-2023 08:03           143222093
+mainnet-00273-4fc9d8ff.era                         19-Apr-2023 08:03           143392174
+mainnet-00274-7305fb17.era                         19-Apr-2023 08:03           142417127
+mainnet-00275-fb002049.era                         19-Apr-2023 08:03           141451380
+mainnet-00276-0ab34c16.era                         19-Apr-2023 08:03           142634264
+mainnet-00277-87c2d149.era                         19-Apr-2023 08:03           143249113
+mainnet-00278-e351c112.era                         19-Apr-2023 08:03           142725486
+mainnet-00279-fccc599d.era                         19-Apr-2023 08:03           143806302
+mainnet-00280-6df55667.era                         19-Apr-2023 08:03           143477409
+mainnet-00281-f17a7ec9.era                         19-Apr-2023 08:03           143444436
+mainnet-00282-49e08203.era                         19-Apr-2023 08:03           143611554
+mainnet-00283-409dab3d.era                         19-Apr-2023 08:03           143460220
+mainnet-00284-ccb80e2d.era                         19-Apr-2023 08:03           143031890
+mainnet-00285-74c3f8f9.era                         19-Apr-2023 08:03           143092162
+mainnet-00286-7a0086e7.era                         19-Apr-2023 08:03           143484675
+mainnet-00287-3848b932.era                         19-Apr-2023 08:03           143137543
+mainnet-00288-92c7bf45.era                         19-Apr-2023 08:03           142200815
+mainnet-00289-f2d57377.era                         19-Apr-2023 08:03           140465017
+mainnet-00290-f66bf737.era                         19-Apr-2023 08:03           132901777
+mainnet-00291-5cffd097.era                         19-Apr-2023 08:03           133799498
+mainnet-00292-b54d6727.era                         19-Apr-2023 08:03           140895436
+mainnet-00293-f9cbebe4.era                         19-Apr-2023 08:03           142635649
+mainnet-00294-a52fc044.era                         19-Apr-2023 08:03           142286481
+mainnet-00295-d764c311.era                         19-Apr-2023 08:03           140936625
+mainnet-00296-0b74d6bf.era                         19-Apr-2023 08:03           135067781
+mainnet-00297-2c312b09.era                         19-Apr-2023 08:03           135852621
+mainnet-00298-c3d2f59a.era                         19-Apr-2023 08:03           137782958
+mainnet-00299-9ca1bb8b.era                         19-Apr-2023 08:03           139210195
+mainnet-00300-23835f14.era                         19-Apr-2023 08:03           140775098
+mainnet-00301-94bdd692.era                         19-Apr-2023 08:03           141618192
+mainnet-00302-672d899b.era                         19-Apr-2023 08:03           141714545
+mainnet-00303-ef8bfe3a.era                         19-Apr-2023 08:03           141126500
+mainnet-00304-f423ed3c.era                         19-Apr-2023 08:03           140685485
+mainnet-00305-b8804abf.era                         19-Apr-2023 08:03           138826271
+mainnet-00306-9260a0c6.era                         19-Apr-2023 08:03           142131990
+mainnet-00307-deb43161.era                         19-Apr-2023 08:03           143429675
+mainnet-00308-cacc5c4c.era                         19-Apr-2023 08:03           142623937
+mainnet-00309-1f9d17d1.era                         19-Apr-2023 08:03           143304925
+mainnet-00310-9b2dd0f7.era                         19-Apr-2023 08:03           140975104
+mainnet-00311-74bc9b12.era                         19-Apr-2023 08:03           141169227
+mainnet-00312-039dbadf.era                         19-Apr-2023 08:03           143320041
+mainnet-00313-cb78de56.era                         19-Apr-2023 08:03           144544508
+mainnet-00314-aa045fb3.era                         19-Apr-2023 08:03           144207060
+mainnet-00315-86151ee0.era                         19-Apr-2023 08:03           144284174
+mainnet-00316-a768cb73.era                         19-Apr-2023 08:03           144308418
+mainnet-00317-c55ea3b8.era                         19-Apr-2023 08:03           144725081
+mainnet-00318-a6ca04a3.era                         19-Apr-2023 08:03           143601635
+mainnet-00319-8fa923d9.era                         19-Apr-2023 08:03           144570935
+mainnet-00320-c9dad55c.era                         19-Apr-2023 08:03           140119571
+mainnet-00321-3048f935.era                         19-Apr-2023 08:03           143827715
+mainnet-00322-70f99be0.era                         19-Apr-2023 08:03           145079590
+mainnet-00323-9c951b17.era                         19-Apr-2023 08:03           143859235
+mainnet-00324-41c96e46.era                         19-Apr-2023 08:03           145612087
+mainnet-00325-ee523bad.era                         19-Apr-2023 08:03           144200381
+mainnet-00326-81b666eb.era                         19-Apr-2023 08:03           144883081
+mainnet-00327-8a4ac4bb.era                         19-Apr-2023 08:03           144088378
+mainnet-00328-6a504821.era                         19-Apr-2023 08:03           145246261
+mainnet-00329-d880d95f.era                         19-Apr-2023 08:03           144544227
+mainnet-00330-c47672af.era                         19-Apr-2023 08:03           144395898
+mainnet-00331-92a3286e.era                         19-Apr-2023 08:03           144314761
+mainnet-00332-0e3d1e11.era                         19-Apr-2023 08:03           143410533
+mainnet-00333-1664895f.era                         19-Apr-2023 08:03           141165354
+mainnet-00334-bf5b55e0.era                         19-Apr-2023 08:03           140686127
+mainnet-00335-362a9895.era                         19-Apr-2023 08:03           142357964
+mainnet-00336-16d958db.era                         19-Apr-2023 08:03           143097863
+mainnet-00337-dffe144a.era                         19-Apr-2023 08:03           142880225
+mainnet-00338-fc13e70a.era                         19-Apr-2023 08:03           143369692
+mainnet-00339-50d6189f.era                         19-Apr-2023 08:03           140687599
+mainnet-00340-8e1fa202.era                         19-Apr-2023 08:03           142915993
+mainnet-00341-a9118dca.era                         19-Apr-2023 08:03           144257850
+mainnet-00342-bbf4f40a.era                         19-Apr-2023 08:03           142945729
+mainnet-00343-ae87bbfa.era                         19-Apr-2023 08:03           139730408
+mainnet-00344-97def269.era                         19-Apr-2023 08:03           144287893
+mainnet-00345-ee60fe7f.era                         19-Apr-2023 08:03           145247533
+mainnet-00346-afe75173.era                         19-Apr-2023 08:03           140383275
+mainnet-00347-ebe40c78.era                         19-Apr-2023 08:03           137305156
+mainnet-00348-31ed95ce.era                         19-Apr-2023 08:03           140187571
+mainnet-00349-1f8ba855.era                         19-Apr-2023 08:03           137702054
+mainnet-00350-f6b68695.era                         19-Apr-2023 08:04           142022093
+mainnet-00351-b1c0e38d.era                         19-Apr-2023 08:04           144745315
+mainnet-00352-9989d5e9.era                         19-Apr-2023 08:04           145697985
+mainnet-00353-5ae0ffb9.era                         19-Apr-2023 08:04           144158393
+mainnet-00354-ca0dc4e4.era                         19-Apr-2023 08:04           144468288
+mainnet-00355-43f765be.era                         19-Apr-2023 08:04           143115320
+mainnet-00356-f1bffc9a.era                         19-Apr-2023 08:04           142731615
+mainnet-00357-c542316b.era                         19-Apr-2023 08:04           139120413
+mainnet-00358-cc546403.era                         19-Apr-2023 08:04           135280520
+mainnet-00359-5054fa14.era                         19-Apr-2023 08:04           136014293
+mainnet-00360-08de2754.era                         19-Apr-2023 08:04           134986110
+mainnet-00361-c09d813e.era                         19-Apr-2023 08:04           142068273
+mainnet-00362-91268052.era                         19-Apr-2023 08:04           144734024
+mainnet-00363-b197ad33.era                         19-Apr-2023 08:04           145221207
+mainnet-00364-d873d778.era                         19-Apr-2023 08:04           143957528
+mainnet-00365-e905337e.era                         19-Apr-2023 08:04           144707364
+mainnet-00366-041623bf.era                         19-Apr-2023 08:04           141526500
+mainnet-00367-4027a2bc.era                         19-Apr-2023 08:04           138061319
+mainnet-00368-0962c836.era                         19-Apr-2023 08:04           144491489
+mainnet-00369-6dc5bdb8.era                         19-Apr-2023 08:04           146753546
+mainnet-00370-6ec40871.era                         19-Apr-2023 08:04           144573059
+mainnet-00371-d5b51b1f.era                         19-Apr-2023 08:04           142026136
+mainnet-00372-a40692c4.era                         19-Apr-2023 08:04           144712869
+mainnet-00373-c3baa5c6.era                         19-Apr-2023 08:04           142716858
+mainnet-00374-c79b0c6f.era                         19-Apr-2023 08:04           141000156
+mainnet-00375-f97aae3b.era                         19-Apr-2023 08:04           144907908
+mainnet-00376-d19f272a.era                         19-Apr-2023 08:04           146643752
+mainnet-00377-493ff9fb.era                         19-Apr-2023 08:04           147601635
+mainnet-00378-c0faf344.era                         19-Apr-2023 08:04           147740569
+mainnet-00379-93288ae1.era                         19-Apr-2023 08:04           147116558
+mainnet-00380-20c8d848.era                         19-Apr-2023 08:04           146790818
+mainnet-00381-d4a23bda.era                         19-Apr-2023 08:04           147056664
+mainnet-00382-563fafb7.era                         19-Apr-2023 08:04           145773378
+mainnet-00383-ac873f2c.era                         19-Apr-2023 08:04           145787364
+mainnet-00384-301dfcb3.era                         19-Apr-2023 08:04           143853946
+mainnet-00385-382ffae8.era                         19-Apr-2023 08:04           145489423
+mainnet-00386-93708f54.era                         19-Apr-2023 08:04           146854074
+mainnet-00387-14a9a844.era                         19-Apr-2023 08:04           147725465
+mainnet-00388-4fd885a2.era                         19-Apr-2023 08:04           143902764
+mainnet-00389-a7c98c34.era                         19-Apr-2023 08:04           142402218
+mainnet-00390-a364d119.era                         19-Apr-2023 08:04           142419416
+mainnet-00391-f403cdd3.era                         19-Apr-2023 08:04           145201759
+mainnet-00392-214f6b79.era                         19-Apr-2023 08:04           146785388
+mainnet-00393-8d4ee9ba.era                         19-Apr-2023 08:04           146040091
+mainnet-00394-3c3885bc.era                         19-Apr-2023 08:04           147195813
+mainnet-00395-9239d7ce.era                         19-Apr-2023 08:04           148416246
+mainnet-00396-019b8d1e.era                         19-Apr-2023 08:04           147350365
+mainnet-00397-2ada2dcf.era                         19-Apr-2023 08:04           148159111
+mainnet-00398-2539f1cb.era                         19-Apr-2023 08:04           148741299
+mainnet-00399-ea503937.era                         19-Apr-2023 08:04           148402948
+mainnet-00400-cebcae93.era                         19-Apr-2023 08:04           148454062
+mainnet-00401-904b72bd.era                         19-Apr-2023 08:04           149528573
+mainnet-00402-35c28170.era                         19-Apr-2023 08:04           150243706
+mainnet-00403-bb46bb18.era                         19-Apr-2023 08:04           148118111
+mainnet-00404-882a68f8.era                         19-Apr-2023 08:04           149981367
+mainnet-00405-b25162bf.era                         19-Apr-2023 08:04           149041765
+mainnet-00406-50991f74.era                         19-Apr-2023 08:04           147628707
+mainnet-00407-d485b3b4.era                         19-Apr-2023 08:04           149035343
+mainnet-00408-bce51f59.era                         19-Apr-2023 08:04           148863331
+mainnet-00409-12a8fedb.era                         19-Apr-2023 08:04           149293274
+mainnet-00410-273264a4.era                         19-Apr-2023 08:04           150353388
+mainnet-00411-344d3e70.era                         19-Apr-2023 08:04           150214222
+mainnet-00412-95698adb.era                         19-Apr-2023 08:04           150998512
+mainnet-00413-e622731a.era                         19-Apr-2023 08:04           152868638
+mainnet-00414-00314316.era                         19-Apr-2023 08:04           149856037
+mainnet-00415-9205cd0e.era                         19-Apr-2023 08:04           151302096
+mainnet-00416-211ee89e.era                         19-Apr-2023 08:04           147818765
+mainnet-00417-214f1587.era                         19-Apr-2023 08:04           146519980
+mainnet-00418-984b0fb0.era                         19-Apr-2023 08:04           145008254
+mainnet-00419-be35f9d3.era                         19-Apr-2023 08:04           143471753
+mainnet-00420-716e07c6.era                         19-Apr-2023 08:04           145488915
+mainnet-00421-2d964c44.era                         19-Apr-2023 08:04           145647054
+mainnet-00422-688ff302.era                         19-Apr-2023 08:04           149788343
+mainnet-00423-a8837094.era                         19-Apr-2023 08:04           148359905
+mainnet-00424-66592198.era                         19-Apr-2023 08:04           148839481
+mainnet-00425-f1224927.era                         19-Apr-2023 08:04           147991905
+mainnet-00426-e501e21e.era                         19-Apr-2023 08:04           149688020
+mainnet-00427-7c5a03e6.era                         19-Apr-2023 08:04           141917550
+mainnet-00428-e89501d6.era                         19-Apr-2023 08:04           141561621
+mainnet-00429-b07b7ecd.era                         19-Apr-2023 08:04           132041593
+mainnet-00430-9d576a91.era                         19-Apr-2023 08:04           130907692
+mainnet-00431-a6f08e88.era                         19-Apr-2023 08:04           130563397
+mainnet-00432-02eda247.era                         19-Apr-2023 08:04           141379377
+mainnet-00433-4a10be25.era                         19-Apr-2023 08:04           149681451
+mainnet-00434-daba5aa4.era                         19-Apr-2023 08:04           149216678
+mainnet-00435-46ac713f.era                         19-Apr-2023 08:04           146975846
+mainnet-00436-cf498157.era                         19-Apr-2023 08:04           139380529
+mainnet-00437-bd3d1515.era                         19-Apr-2023 08:04           140259423
+mainnet-00438-3a702b67.era                         19-Apr-2023 08:04           141864937
+mainnet-00439-2cb045ae.era                         19-Apr-2023 08:04           142790480
+mainnet-00440-a03195a1.era                         19-Apr-2023 08:04           143467511
+mainnet-00441-a652d5ff.era                         19-Apr-2023 08:05           144613205
+mainnet-00442-177df8c4.era                         19-Apr-2023 08:05           146002671
+mainnet-00443-9e30c504.era                         19-Apr-2023 08:05           146292377
+mainnet-00444-ad034c6b.era                         19-Apr-2023 08:05           145665055
+mainnet-00445-f92f372c.era                         19-Apr-2023 08:05           147829082
+mainnet-00446-c9ff4460.era                         19-Apr-2023 08:05           148983548
+mainnet-00447-93909b1c.era                         19-Apr-2023 08:05           151487652
+mainnet-00448-3e2020be.era                         19-Apr-2023 08:05           150375082
+mainnet-00449-420aa4e7.era                         19-Apr-2023 08:05           150551309
+mainnet-00450-0139a40c.era                         19-Apr-2023 08:05           152704757
+mainnet-00451-ab0430cb.era                         19-Apr-2023 08:05           152803994
+mainnet-00452-19970c07.era                         19-Apr-2023 08:05           150643202
+mainnet-00453-1d23ae80.era                         19-Apr-2023 08:05           150234390
+mainnet-00454-a203fd6a.era                         19-Apr-2023 08:05           149779940
+mainnet-00455-e83c7f19.era                         19-Apr-2023 08:05           151769936
+mainnet-00456-ed2ed157.era                         19-Apr-2023 08:05           152586963
+mainnet-00457-7647d780.era                         19-Apr-2023 08:05           150812062
+mainnet-00458-d83e98a0.era                         19-Apr-2023 08:05           150456529
+mainnet-00459-4c94657e.era                         19-Apr-2023 08:05           150180357
+mainnet-00460-b3d0336a.era                         19-Apr-2023 08:05           149390443
+mainnet-00461-b9cc17aa.era                         19-Apr-2023 08:05           150521363
+mainnet-00462-84b64267.era                         19-Apr-2023 08:05           150935934
+mainnet-00463-2c3e04f9.era                         19-Apr-2023 08:05           148737199
+mainnet-00464-ce81b446.era                         19-Apr-2023 08:05           148035817
+mainnet-00465-17169abc.era                         19-Apr-2023 08:05           151294041
+mainnet-00466-6f96819e.era                         19-Apr-2023 08:05           149965989
+mainnet-00467-d72171ba.era                         19-Apr-2023 08:05           150079467
+mainnet-00468-288eb7d2.era                         19-Apr-2023 08:05           149299506
+mainnet-00469-ac1bb583.era                         19-Apr-2023 08:05           149766225
+mainnet-00470-17db03ab.era                         19-Apr-2023 08:05           154542826
+mainnet-00471-d8736340.era                         19-Apr-2023 08:05           152828081
+mainnet-00472-c903f576.era                         19-Apr-2023 08:05           149920625
+mainnet-00473-85544b2c.era                         19-Apr-2023 08:05           143204985
+mainnet-00474-b5cb18bf.era                         19-Apr-2023 08:05           143883862
+mainnet-00475-1bcb0b75.era                         19-Apr-2023 08:05           149707407
+mainnet-00476-bd7f073e.era                         19-Apr-2023 08:05           148642578
+mainnet-00477-e687403d.era                         19-Apr-2023 08:05           148540204
+mainnet-00478-e352f77e.era                         19-Apr-2023 08:05           150408471
+mainnet-00479-1d77e810.era                         19-Apr-2023 08:05           149977377
+mainnet-00480-420ecbda.era                         19-Apr-2023 08:05           150153985
+mainnet-00481-3ca53867.era                         19-Apr-2023 08:05           150151322
+mainnet-00482-fa14e924.era                         19-Apr-2023 08:05           150942816
+mainnet-00483-816d1efe.era                         19-Apr-2023 08:05           150443511
+mainnet-00484-c1982d2d.era                         19-Apr-2023 08:05           149366089
+mainnet-00485-d6ea460b.era                         19-Apr-2023 08:05           149470782
+mainnet-00486-e3f47d77.era                         19-Apr-2023 08:05           147843144
+mainnet-00487-ca45547d.era                         19-Apr-2023 08:05           148553021
+mainnet-00488-6aef4d62.era                         19-Apr-2023 08:05           149505209
+mainnet-00489-c227388d.era                         19-Apr-2023 08:05           150393101
+mainnet-00490-b01d5e10.era                         19-Apr-2023 08:05           147078140
+mainnet-00491-05d38617.era                         19-Apr-2023 08:05           148069836
+mainnet-00492-ce9419e1.era                         19-Apr-2023 08:05           150646658
+mainnet-00493-e65f5d9d.era                         19-Apr-2023 08:05           151421913
+mainnet-00494-461696a2.era                         19-Apr-2023 08:05           152059901
+mainnet-00495-bb43374c.era                         19-Apr-2023 08:05           152803741
+mainnet-00496-3e322bc4.era                         19-Apr-2023 08:05           153827569
+mainnet-00497-6c371884.era                         19-Apr-2023 08:05           152946047
+mainnet-00498-37951b8a.era                         19-Apr-2023 08:05           150824865
+mainnet-00499-cf687cf0.era                         19-Apr-2023 08:05           151192378
+mainnet-00500-87109713.era                         19-Apr-2023 08:05           152823056
+mainnet-00501-5cc4a456.era                         19-Apr-2023 08:05           146391337
+mainnet-00502-10e603ed.era                         19-Apr-2023 08:05           148199375
+mainnet-00503-8ef7268b.era                         19-Apr-2023 08:05           151875291
+mainnet-00504-996ab41d.era                         19-Apr-2023 08:05           151763994
+mainnet-00505-f262525d.era                         19-Apr-2023 08:05           150677541
+mainnet-00506-a426c61e.era                         19-Apr-2023 08:05           149249452
+mainnet-00507-2fad0c94.era                         19-Apr-2023 08:05           150760026
+mainnet-00508-04f87fed.era                         19-Apr-2023 08:05           152027833
+mainnet-00509-d86d2672.era                         19-Apr-2023 08:05           151507444
+mainnet-00510-767cf672.era                         19-Apr-2023 08:05           151610239
+mainnet-00511-78a7fd5a.era                         19-Apr-2023 08:05           151887859
+mainnet-00512-99c76049.era                         19-Apr-2023 08:05           151009351
+mainnet-00513-35109a06.era                         19-Apr-2023 08:05           150910062
+mainnet-00514-2e322919.era                         19-Apr-2023 08:05           151409329
+mainnet-00515-b3354541.era                         19-Apr-2023 08:05           146896352
+mainnet-00516-aa99a4c4.era                         19-Apr-2023 08:05           144440605
+mainnet-00517-68961dbb.era                         19-Apr-2023 08:05           151542512
+mainnet-00518-4e267a3a.era                         19-Apr-2023 08:05           149956234
+mainnet-00519-11f02ea8.era                         19-Apr-2023 08:05           152191999
+mainnet-00520-fca505d1.era                         19-Apr-2023 08:05           150897624
+mainnet-00521-ba5a52a0.era                         19-Apr-2023 08:05           151664141
+mainnet-00522-8882f3b8.era                         19-Apr-2023 08:05           147405485
+mainnet-00523-ddc83067.era                         19-Apr-2023 08:05           145993937
+mainnet-00524-5068f888.era                         19-Apr-2023 08:05           147257515
+mainnet-00525-681ebfa9.era                         19-Apr-2023 08:05           148244705
+mainnet-00526-8483ee75.era                         19-Apr-2023 08:05           149990129
+mainnet-00527-051b3b7d.era                         19-Apr-2023 08:06           148358353
+mainnet-00528-e3733d57.era                         19-Apr-2023 08:06           147214678
+mainnet-00529-ac324fb0.era                         19-Apr-2023 08:06           147406673
+mainnet-00530-59213b31.era                         19-Apr-2023 08:06           144574735
+mainnet-00531-3106561c.era                         19-Apr-2023 08:06           141321005
+mainnet-00532-f09fb770.era                         19-Apr-2023 08:06           138975960
+mainnet-00533-d5b6eb9e.era                         19-Apr-2023 08:06           145204913
+mainnet-00534-b5fbeb2f.era                         19-Apr-2023 08:06           147933076
+mainnet-00535-b3845d23.era                         19-Apr-2023 08:06           149434998
+mainnet-00536-5d6d5372.era                         19-Apr-2023 08:06           149924559
+mainnet-00537-636a6c7b.era                         19-Apr-2023 08:06           148891119
+mainnet-00538-0db607c0.era                         19-Apr-2023 08:06           149941116
+mainnet-00539-eccb94a6.era                         19-Apr-2023 08:06           151451856
+mainnet-00540-d293c891.era                         19-Apr-2023 08:06           151222012
+mainnet-00541-baba46b8.era                         19-Apr-2023 08:06           149892687
+mainnet-00542-c9f98e60.era                         19-Apr-2023 08:06           149808275
+mainnet-00543-1bf67198.era                         19-Apr-2023 08:06           149872098
+mainnet-00544-ccb7f2e0.era                         19-Apr-2023 08:06           150620915
+mainnet-00545-89b6b06c.era                         19-Apr-2023 08:06           146478100
+mainnet-00546-4eef8f7f.era                         19-Apr-2023 08:06           140125331
+mainnet-00547-0166dac1.era                         19-Apr-2023 08:06           144301100
+mainnet-00548-fff2c048.era                         19-Apr-2023 08:06           148181648
+mainnet-00549-6da817b9.era                         19-Apr-2023 08:06           139007607
+mainnet-00550-22438f92.era                         19-Apr-2023 08:06           138354832
+mainnet-00551-fac0cbff.era                         19-Apr-2023 08:06           139046367
+mainnet-00552-e0f39697.era                         19-Apr-2023 08:06           140697882
+mainnet-00553-4ec1668f.era                         19-Apr-2023 08:06           139229765
+mainnet-00554-2af21c9d.era                         19-Apr-2023 08:06           140890557
+mainnet-00555-0206a1af.era                         19-Apr-2023 08:06           141196410
+mainnet-00556-c480a1a9.era                         19-Apr-2023 08:06           144861549
+mainnet-00557-690b98ad.era                         19-Apr-2023 08:06           133346780
+mainnet-00558-73deda10.era                         19-Apr-2023 08:06           129442464
+mainnet-00559-e15ed367.era                         19-Apr-2023 08:06           136700182
+mainnet-00560-224c677c.era                         19-Apr-2023 08:06           142617954
+mainnet-00561-15de525a.era                         19-Apr-2023 08:06           135296243
+mainnet-00562-e47bdb20.era                         19-Apr-2023 08:06           133406089
+mainnet-00563-f24d04c9.era                         19-Apr-2023 08:06           144994150
+mainnet-00564-dcd6c750.era                         19-Apr-2023 08:06           146784212
+mainnet-00565-2d7775ea.era                         19-Apr-2023 08:06           147304487
+mainnet-00566-19353898.era                         19-Apr-2023 08:06           137093662
+mainnet-00567-cd412b92.era                         19-Apr-2023 08:06           138876874
+mainnet-00568-c5b1968c.era                         19-Apr-2023 08:06           134999727
+mainnet-00569-ff2f4627.era                         19-Apr-2023 08:06           131216076
+mainnet-00570-891fb8fd.era                         19-Apr-2023 08:06           130411652
+mainnet-00571-a957e865.era                         19-Apr-2023 08:06           136919974
+mainnet-00572-67cc2c10.era                         19-Apr-2023 08:06           136993635
+mainnet-00573-c847a969.era                         19-Apr-2023 08:06           133719903
+mainnet-00574-3c0da77d.era                         19-Apr-2023 08:06           224997856
+mainnet-00575-abe0d5d9.era                         19-Apr-2023 08:06           495576057
+mainnet-00576-18473343.era                         19-Apr-2023 08:06           462703957
+mainnet-00577-3945c8e9.era                         19-Apr-2023 08:06           462701274
+mainnet-00578-1b2761b0.era                         19-Apr-2023 08:06           488690656
+mainnet-00579-505b6eca.era                         19-Apr-2023 08:06           516646616
+mainnet-00580-24505b1b.era                         19-Apr-2023 08:06           540829778
+mainnet-00581-5fed1297.era                         19-Apr-2023 08:06           525912796
+mainnet-00582-a55650a7.era                         19-Apr-2023 08:06           507902313
+mainnet-00583-51fb01b3.era                         19-Apr-2023 08:06           481257827
+mainnet-00584-b9214ce1.era                         19-Apr-2023 08:06           496943155
+mainnet-00585-125a6940.era                         19-Apr-2023 08:07           502140763
+mainnet-00586-865166e0.era                         19-Apr-2023 08:07           506779555
+mainnet-00587-6e183cad.era                         19-Apr-2023 08:07           509832473
+mainnet-00588-c8808368.era                         19-Apr-2023 08:07           503822671
+mainnet-00589-42803687.era                         19-Apr-2023 08:07           500850156
+mainnet-00590-2ed09275.era                         19-Apr-2023 08:07           494815765
+mainnet-00591-563fe434.era                         19-Apr-2023 08:07           495012154
+mainnet-00592-769bda94.era                         19-Apr-2023 08:07           508855516
+mainnet-00593-11494dbb.era                         19-Apr-2023 08:07           509268079
+mainnet-00594-38249b5c.era                         19-Apr-2023 08:07           502077066
+mainnet-00595-418d306c.era                         19-Apr-2023 08:07           432608543
+mainnet-00596-0c8f6483.era                         19-Apr-2023 08:07           412850034
+mainnet-00597-f8f5e231.era                         19-Apr-2023 08:07           448546679
+mainnet-00598-c1955d2f.era                         19-Apr-2023 08:07           445687915
+mainnet-00599-2de31eb5.era                         19-Apr-2023 08:07           487034006
+mainnet-00600-e031a37d.era                         19-Apr-2023 08:07           457977414
+mainnet-00601-336cddc5.era                         19-Apr-2023 08:07           433884715
+mainnet-00602-a9694dc1.era                         19-Apr-2023 08:07           456087586
+mainnet-00603-3f1f76df.era                         19-Apr-2023 08:07           479079484
+mainnet-00604-15aadeb5.era                         19-Apr-2023 08:07           498414286
+mainnet-00605-46375bab.era                         19-Apr-2023 08:07           511955699
+mainnet-00606-6ca5d9c2.era                         19-Apr-2023 08:07           486082684
+mainnet-00607-62b47765.era                         19-Apr-2023 08:07           449335714
+mainnet-00608-d6a7c702.era                         19-Apr-2023 08:07           482482451
+mainnet-00609-73a22c92.era                         19-Apr-2023 08:07           499823134
+mainnet-00610-85001de6.era                         19-Apr-2023 08:08           549047929
+mainnet-00611-48491d55.era                         19-Apr-2023 08:08           564984931
+mainnet-00612-b6190577.era                         19-Apr-2023 08:08           543066354
+mainnet-00613-b2392947.era                         19-Apr-2023 08:08           520443934
+mainnet-00614-01adaa2b.era                         19-Apr-2023 08:08           515626872
+mainnet-00615-a16e6eab.era                         19-Apr-2023 08:08           529367293
+mainnet-00616-fef77a50.era                         19-Apr-2023 08:08           528334097
+mainnet-00617-453b53bf.era                         19-Apr-2023 08:08           534903189
+mainnet-00618-6e7d7dd3.era                         19-Apr-2023 08:08           540347892
+mainnet-00619-e9e5ecbb.era                         19-Apr-2023 08:08           536071235
+mainnet-00620-fb989681.era                         19-Apr-2023 08:08           522349450
+mainnet-00621-1639ac56.era                         19-Apr-2023 08:08           562031375
+mainnet-00622-7bc85a3d.era                         19-Apr-2023 08:08           631897088
+mainnet-00623-69d46abc.era                         19-Apr-2023 08:08           647883493
+mainnet-00624-a53a4850.era                         19-Apr-2023 08:08           596921421
+mainnet-00625-26c77f05.era                         19-Apr-2023 08:08           574738057
+mainnet-00626-8b8dc42a.era                         19-Apr-2023 08:08           527717095
+mainnet-00627-f6bd90de.era                         19-Apr-2023 08:08           563400942
+mainnet-00628-5bff8aa6.era                         19-Apr-2023 08:08           541930570
+mainnet-00629-0b6885ea.era                         19-Apr-2023 08:08           522505585
+mainnet-00630-43736ddb.era                         19-Apr-2023 08:08           531768041
+mainnet-00631-dbbf2183.era                         19-Apr-2023 08:08           512125978
+mainnet-00632-b8b89228.era                         19-Apr-2023 08:08           560138303
+mainnet-00633-c5cdb62b.era                         19-Apr-2023 08:08           572780751
+mainnet-00634-0226338b.era                         19-Apr-2023 08:09           565225106
+mainnet-00635-b817d4a1.era                         19-Apr-2023 08:09           564228603
+mainnet-00636-dd28d082.era                         19-Apr-2023 08:09           528129901
+mainnet-00637-3bd698db.era                         19-Apr-2023 08:09           519734299
+mainnet-00638-357ca82b.era                         19-Apr-2023 08:09           507251686
+mainnet-00639-8c7ad2cb.era                         19-Apr-2023 08:09           524765203
+mainnet-00640-50649802.era                         19-Apr-2023 08:09           548440213
+mainnet-00641-7fc62116.era                         19-Apr-2023 08:09           553277962
+mainnet-00642-7f6f0637.era                         19-Apr-2023 08:09           537226667
+mainnet-00643-b6a63b71.era                         19-Apr-2023 08:09           527018749
+mainnet-00644-5f8f506f.era                         19-Apr-2023 08:09           499443483
+mainnet-00645-2188864c.era                         19-Apr-2023 08:09           521221458
+mainnet-00646-a8314126.era                         19-Apr-2023 08:09           535934162
+mainnet-00647-d4209a57.era                         19-Apr-2023 08:09           545777379
+mainnet-00648-ad59a24b.era                         19-Apr-2023 08:09           532293474
+mainnet-00649-b5d83959.era                         19-Apr-2023 08:09           582735939
+mainnet-00650-4f59b460.era                         19-Apr-2023 08:09           517927212
+mainnet-00651-302c1ff2.era                         19-Apr-2023 08:09           517587788
+mainnet-00652-c0c2fcc3.era                         19-Apr-2023 08:09           571705282
+mainnet-00653-2cddb94b.era                         19-Apr-2023 08:09           590869496
+mainnet-00654-f0dfb557.era                         19-Apr-2023 08:09           595213879
+mainnet-00655-3a14592b.era                         19-Apr-2023 08:09           580015510
+mainnet-00656-8f9e0955.era                         19-Apr-2023 08:09           570332276
+mainnet-00657-3e4f8b1a.era                         19-Apr-2023 08:09           513264967
+mainnet-00658-83771ab5.era                         19-Apr-2023 08:09           525798501
+mainnet-00659-74f019cd.era                         19-Apr-2023 08:09           588220728
+mainnet-00660-67813bd4.era                         19-Apr-2023 08:09           573837245
+mainnet-00661-b86f6535.era                         19-Apr-2023 08:09           586427151
+mainnet-00662-7015d148.era                         19-Apr-2023 08:09           541604335
+mainnet-00663-f9a7edf9.era                         19-Apr-2023 08:09           511863060
+mainnet-00664-1084e515.era                         19-Apr-2023 08:09           533555817
+mainnet-00665-dfb16823.era                         19-Apr-2023 08:10           534989343
+mainnet-00666-9175daf6.era                         19-Apr-2023 08:10           563532091
+mainnet-00667-16a5a652.era                         19-Apr-2023 08:10           554848282
+mainnet-00668-fb425f19.era                         19-Apr-2023 08:10           552626889
+mainnet-00669-08edc329.era                         19-Apr-2023 08:10           478996212
+mainnet-00670-b1cd7944.era                         19-Apr-2023 08:10           498479485
+mainnet-00671-c8db5713.era                         19-Apr-2023 08:10           537942060
+mainnet-00672-3a3d1895.era                         19-Apr-2023 08:10           566016132
+mainnet-00673-5b921f7d.era                         19-Apr-2023 08:10           569669917
+mainnet-00674-1d7aad1a.era                         19-Apr-2023 08:10           583186972
+mainnet-00675-f4170ce0.era                         19-Apr-2023 08:10           561058464
+mainnet-00676-079d079f.era                         19-Apr-2023 08:10           609422904
+mainnet-00677-06c9d971.era                         19-Apr-2023 08:10           608321167
+mainnet-00678-d5de9dd7.era                         19-Apr-2023 08:10           595027792
+mainnet-00679-e69f92b7.era                         19-Apr-2023 08:10           641362841
+mainnet-00680-23976593.era                         19-Apr-2023 08:10           638607520
+mainnet-00681-fdb5c50a.era                         19-Apr-2023 08:11           637711640
+mainnet-00682-477ffb23.era                         19-Apr-2023 08:11           626984513
+mainnet-00683-7546da48.era                         19-Apr-2023 08:11           626170878
+mainnet-00684-cfdf8415.era                         19-Apr-2023 08:11           547182052
+mainnet-00685-08ee0616.era                         19-Apr-2023 08:11           508113029
+mainnet-00686-d844b1a7.era                         19-Apr-2023 08:11           501837161
+mainnet-00687-9e3e819b.era                         19-Apr-2023 08:12           504437678
+mainnet-00688-7b9ed838.era                         19-Apr-2023 08:12           472423545
+mainnet-00689-20b01cfd.era                         19-Apr-2023 08:12           504885883
+mainnet-00690-35c5500a.era                         19-Apr-2023 08:12           525538491
+mainnet-00691-89e7ee6c.era                         19-Apr-2023 08:12           549451456
+mainnet-00692-0d6d6c62.era                         19-Apr-2023 08:12           537954528
+mainnet-00693-fae1772e.era                         19-Apr-2023 08:13           502106544
+mainnet-00694-9042daf2.era                         19-Apr-2023 08:13           500180256
+mainnet-00695-c41fe322.era                         19-Apr-2023 08:13           533667519
+mainnet-00696-955bb127.era                         19-Apr-2023 08:13           525940145
+mainnet-00697-90342a2c.era                         19-Apr-2023 08:13           553934927
+mainnet-00698-a3c2b085.era                         19-Apr-2023 08:13           553493293
+mainnet-00699-6ba2e1a0.era                         19-Apr-2023 08:14           533630560
+mainnet-00700-77a2ed91.era                         19-Apr-2023 08:14           531067147
+mainnet-00701-2e20b09f.era                         23-Oct-2024 08:52           541655768
+mainnet-00702-438ec0d0.era                         19-Apr-2023 08:14           564032434
+mainnet-00703-9fedf535.era                         19-Apr-2023 08:14           573063844
+mainnet-00704-7f2b27b3.era                         19-Apr-2023 08:14           614527103
+mainnet-00705-b4707b23.era                         19-Apr-2023 08:15           546234999
+mainnet-00706-1539f181.era                         19-Apr-2023 08:15           519406975
+mainnet-00707-c160b799.era                         23-Oct-2024 08:52           562424018
+mainnet-00708-61a7c610.era                         19-Apr-2023 08:15           576833673
+mainnet-00709-b93cbd7d.era                         23-Oct-2024 08:52           603511712
+mainnet-00710-3d5b2574.era                         19-Apr-2023 08:15           609292836
+mainnet-00711-363514ef.era                         19-Apr-2023 08:15           608722863
+mainnet-00712-0331da6c.era                         19-Apr-2023 08:15           595185240
+mainnet-00713-bd93ffab.era                         19-Apr-2023 08:16           636461431
+mainnet-00714-3e95b296.era                         19-Apr-2023 08:16           653322133
+mainnet-00715-8e899bb6.era                         19-Apr-2023 08:16           660796370
+mainnet-00716-6ed75e01.era                         19-Apr-2023 08:16           661301244
+mainnet-00717-66e0f690.era                         19-Apr-2023 08:16           664040750
+mainnet-00718-f89a9b7a.era                         19-Apr-2023 08:17           586820944
+mainnet-00719-a7400052.era                         23-Oct-2024 08:52           603755469
+mainnet-00720-b0e85a12.era                         19-Apr-2023 08:17           629426399
+mainnet-00721-650423f0.era                         19-Apr-2023 08:17           613986390
+mainnet-00722-7b469136.era                         19-Apr-2023 08:17           604280079
+mainnet-00723-f8c04369.era                         19-Apr-2023 08:17           595563889
+mainnet-00724-d31afe74.era                         19-Apr-2023 08:18           566112731
+mainnet-00725-8c727b38.era                         23-Oct-2024 08:52           553276601
+mainnet-00726-411eb77c.era                         19-Apr-2023 08:18           583149242
+mainnet-00727-73a7d7a3.era                         19-Apr-2023 08:18           601017473
+mainnet-00728-cbd21a79.era                         19-Apr-2023 08:18           613201260
+mainnet-00729-040e23ac.era                         19-Apr-2023 08:18           638976201
+mainnet-00730-e0ca27c6.era                         19-Apr-2023 08:19           594243162
+mainnet-00731-f32383b2.era                         23-Oct-2024 08:53           578920126
+mainnet-00732-262b0f21.era                         19-Apr-2023 08:19           622691357
+mainnet-00733-7fe3d657.era                         19-Apr-2023 08:19           659616995
+mainnet-00734-c13a4eee.era                         19-Apr-2023 08:19           670471994
+mainnet-00735-10e158b0.era                         19-Apr-2023 08:19           673008215
+mainnet-00736-32bcbf43.era                         19-Apr-2023 08:20           671270573
+mainnet-00737-9f6aeaf8.era                         19-Apr-2023 08:20           650370129
+mainnet-00738-b5d4ed5a.era                         19-Apr-2023 08:20           694578721
+mainnet-00739-846cfb2e.era                         19-Apr-2023 08:20           734879617
+mainnet-00740-e34eaf91.era                         19-Apr-2023 08:21           730321149
+mainnet-00741-52aa7e72.era                         19-Apr-2023 08:21           963813783
+mainnet-00742-7c475e71.era                         19-Apr-2023 08:21           770905783
+mainnet-00743-b64ecc18.era                         19-Apr-2023 08:21           721802029
+mainnet-00744-892c5327.era                         19-Apr-2023 08:22           757312802
+mainnet-00745-a2d9ed02.era                         19-Apr-2023 08:22           748437207
+mainnet-00746-f37e1ddf.era                         19-Apr-2023 08:22           748192145
+mainnet-00747-29066c92.era                         19-Apr-2023 08:23           781979215
+mainnet-00748-53908038.era                         19-Apr-2023 08:23           723698704
+mainnet-00749-684cd2fd.era                         19-Apr-2023 08:23           691045939
+mainnet-00750-7a7fd2fa.era                         19-Apr-2023 08:23           748106287
+mainnet-00751-4da2b768.era                         19-Apr-2023 08:24           773658982
+mainnet-00752-1a7df1c9.era                         19-Apr-2023 08:24           788092168
+mainnet-00753-9bcde975.era                         19-Apr-2023 08:24           785977479
+mainnet-00754-b82788ae.era                         19-Apr-2023 08:24           760093020
+mainnet-00755-717aa839.era                         19-Apr-2023 08:25           771476233
+mainnet-00756-0a25493c.era                         19-Apr-2023 08:25           783171376
+mainnet-00757-b67cf3b9.era                         19-Apr-2023 08:25           803741519
+mainnet-00758-a7552bde.era                         19-Apr-2023 08:26           786381467
+mainnet-00759-b17d8602.era                         23-Oct-2024 08:53           767495598
+mainnet-00760-346c731f.era                         08-May-2023 08:15           811732314
+mainnet-00761-3a46750f.era                         08-May-2023 08:15           769654991
+mainnet-00762-a9f3023b.era                         08-May-2023 08:15           799164656
+mainnet-00763-e5b74b59.era                         08-May-2023 08:15           819312199
+mainnet-00764-72c1ffea.era                         08-May-2023 08:15           785236015
+mainnet-00765-a619d810.era                         08-May-2023 08:16           769443931
+mainnet-00766-5eff964b.era                         08-May-2023 08:16           699339779
+mainnet-00767-d7e72c09.era                         08-May-2023 08:16           718356544
+mainnet-00768-62b1606b.era                         08-May-2023 08:16           685941561
+mainnet-00769-f5bd9239.era                         08-May-2023 08:16           741102370
+mainnet-00770-a8170629.era                         08-May-2023 08:16           732232248
+mainnet-00771-55405ee2.era                         08-May-2023 08:16           766719961
+mainnet-00772-ce838529.era                         08-May-2023 08:16           696550803
+mainnet-00773-d12bf8fe.era                         08-May-2023 08:16           638004868
+mainnet-00774-1c623f9f.era                         08-May-2023 08:17           701550521
+mainnet-00775-053da11e.era                         08-May-2023 08:17           637850206
+mainnet-00776-b15a45f0.era                         08-May-2023 08:17           711435225
+mainnet-00777-0a7de38a.era                         08-May-2023 08:17           696817462
+mainnet-00778-9429b8ba.era                         08-May-2023 08:17           640102478
+mainnet-00779-7db8d45b.era                         23-Oct-2024 08:53           660963999
+mainnet-00780-bb546fec.era                         23-Oct-2024 08:53           603726762
+mainnet-00781-6e8fa554.era                         23-Oct-2024 09:53           636061107
+mainnet-00782-886fa978.era                         26-Jun-2023 07:54           698085639
+mainnet-00783-0b819fcd.era                         22-May-2023 04:08           704508612
+mainnet-00784-11892522.era                         22-May-2023 04:08           747974111
+mainnet-00785-c976cc65.era                         22-May-2023 04:08           742100420
+mainnet-00786-2d6a0e03.era                         22-May-2023 04:08           716484326
+mainnet-00787-1b6c901d.era                         22-May-2023 04:08           733218903
+mainnet-00788-b468a0f5.era                         22-May-2023 04:08           790372282
+mainnet-00789-23095abc.era                         22-May-2023 04:08           791636791
+mainnet-00790-a0a71e45.era                         22-May-2023 04:08           739323435
+mainnet-00791-d5d807ad.era                         12-Jun-2023 04:21           736458814
+mainnet-00792-80e46420.era                         12-Jun-2023 04:21           741688861
+mainnet-00793-71fb8913.era                         12-Jun-2023 04:21           734708420
+mainnet-00794-d84d734d.era                         12-Jun-2023 04:21           727843645
+mainnet-00795-d2b2cac5.era                         12-Jun-2023 04:21           788850468
+mainnet-00796-2e644178.era                         12-Jun-2023 04:22           831325414
+mainnet-00797-61418b9a.era                         12-Jun-2023 04:22           826661366
+mainnet-00798-3a33e200.era                         12-Jun-2023 04:22           853766445
+mainnet-00799-4a58ec2a.era                         12-Jun-2023 04:22           787522763
+mainnet-00800-c6e8340a.era                         12-Jun-2023 04:22           815304803
+mainnet-00801-687f5501.era                         12-Jun-2023 04:22           808641205
+mainnet-00802-34f0c2a1.era                         12-Jun-2023 04:22           732331389
+mainnet-00803-358c03f1.era                         12-Jun-2023 04:22           792362847
+mainnet-00804-5b8ccf94.era                         12-Jun-2023 04:22           797548028
+mainnet-00805-eb7f02fa.era                         12-Jun-2023 04:22           843160589
+mainnet-00806-47ac87d9.era                         12-Jun-2023 04:22           827918117
+mainnet-00807-a4160018.era                         12-Jun-2023 04:22           776330252
+mainnet-00808-077bbbae.era                         26-Jun-2023 07:54           787782664
+mainnet-00809-fc590b46.era                         19-Jun-2023 07:25           774657962
+mainnet-00810-9fad0ce8.era                         19-Jun-2023 07:25           806249847
+mainnet-00811-d96da8b7.era                         19-Jun-2023 07:25           817295735
+mainnet-00812-f681f446.era                         19-Jun-2023 07:25           851551213
+mainnet-00813-a9f60881.era                         19-Jun-2023 07:25           847493844
+mainnet-00814-ee562d37.era                         19-Jun-2023 07:26           867699331
+mainnet-00815-b3c51faa.era                         26-Jun-2023 07:54           801853524
+mainnet-00816-c14b6750.era                         26-Jun-2023 07:54           826454169
+mainnet-00817-e24e816a.era                         26-Jun-2023 07:54           720958246
+mainnet-00818-ccbb3d6a.era                         26-Jun-2023 07:54           786693207
+mainnet-00819-ff60322a.era                         26-Jun-2023 07:54           843663424
+mainnet-00820-30a75a75.era                         26-Jun-2023 07:54           846784334
+mainnet-00821-0efa1511.era                         03-Jul-2023 06:02           834993174
+mainnet-00822-a21cddaf.era                         03-Jul-2023 06:02           794724029
+mainnet-00823-305cdcc3.era                         03-Jul-2023 06:02           791244731
+mainnet-00824-a45f43f3.era                         03-Jul-2023 06:03           830374545
+mainnet-00825-e20a0f4b.era                         24-Jul-2023 06:52           870819813
+mainnet-00826-f1765519.era                         24-Jul-2023 06:52           889904073
+mainnet-00827-dcef9fdb.era                         24-Jul-2023 06:53           849788323
+mainnet-00828-a511170d.era                         24-Jul-2023 06:53           847219888
+mainnet-00829-b32a05eb.era                         24-Jul-2023 06:53           800364820
+mainnet-00830-1855aad1.era                         24-Jul-2023 06:53           823425048
+mainnet-00831-c898b382.era                         24-Jul-2023 06:53           796463498
+mainnet-00832-59d05cb1.era                         24-Jul-2023 06:53           609795206
+mainnet-00833-6d17cb6a.era                         24-Jul-2023 06:53           743365995
+mainnet-00834-38769a0b.era                         24-Jul-2023 06:54           833196365
+mainnet-00835-0f60db4d.era                         24-Jul-2023 06:54           817385230
+mainnet-00836-5d08ee64.era                         24-Jul-2023 06:54           858996938
+mainnet-00837-12cdd8fb.era                         24-Jul-2023 06:54           842887915
+mainnet-00838-cb7a0ebe.era                         24-Jul-2023 06:54           861567126
+mainnet-00839-a1090041.era                         24-Jul-2023 06:54           886454606
+mainnet-00840-6c22994f.era                         24-Jul-2023 06:54           894801269
+mainnet-00841-5da55cc8.era                         24-Jul-2023 06:55           874944599
+mainnet-00842-6b3ce9b1.era                         24-Jul-2023 06:55           869624758
+mainnet-00843-34484521.era                         24-Jul-2023 06:55           878901464
+mainnet-00844-df554ead.era                         24-Jul-2023 06:55           888929443
+mainnet-00845-3cf3fd9c.era                         24-Jul-2023 06:55           865263984
+mainnet-00846-f15d5d20.era                         28-Aug-2023 05:54           800513291
+mainnet-00847-1750bf54.era                         28-Aug-2023 05:55           863504338
+mainnet-00848-e05afc8d.era                         28-Aug-2023 05:55           865939528
+mainnet-00849-ea0365ee.era                         28-Aug-2023 05:55           936527182
+mainnet-00850-8edb5d38.era                         28-Aug-2023 05:55           961064417
+mainnet-00851-9c79c091.era                         28-Aug-2023 05:55           960493868
+mainnet-00852-bc820b3a.era                         28-Aug-2023 05:56           920878392
+mainnet-00853-41123418.era                         28-Aug-2023 05:56           874193728
+mainnet-00854-95c53704.era                         28-Aug-2023 05:56          1013047460
+mainnet-00855-9ba9bed7.era                         28-Aug-2023 05:56          1002801327
+mainnet-00856-36db113f.era                         28-Aug-2023 05:56           930772310
+mainnet-00857-81495dc4.era                         28-Aug-2023 05:57           928767906
+mainnet-00858-61cad6c7.era                         28-Aug-2023 05:57          1024750253
+mainnet-00859-71624f28.era                         28-Aug-2023 05:57           948010475
+mainnet-00860-c23f7553.era                         28-Aug-2023 05:57           893907661
+mainnet-00861-071d021f.era                         28-Aug-2023 05:57          1032824345
+mainnet-00862-b91d3e3a.era                         28-Aug-2023 05:58          1027170593
+mainnet-00863-eb429e36.era                         28-Aug-2023 05:58          1016463348
+mainnet-00864-dc3b08a7.era                         28-Aug-2023 05:58           998284761
+mainnet-00865-c813618a.era                         28-Aug-2023 05:58           925250701
+mainnet-00866-25d91f5b.era                         28-Aug-2023 05:58           925584230
+mainnet-00867-252a551c.era                         28-Aug-2023 05:59          1004699688
+mainnet-00868-f1e84f1d.era                         28-Aug-2023 05:59          1113213581
+mainnet-00869-3f1d459f.era                         28-Aug-2023 05:59           988767048
+mainnet-00870-f2f2246b.era                         28-Aug-2023 05:59           992239416
+mainnet-00871-95e94b4f.era                         28-Aug-2023 06:00           973954184
+mainnet-00872-c4a496bb.era                         28-Aug-2023 06:00           989147665
+mainnet-00873-bd6c439c.era                         28-Aug-2023 06:00           993197217
+mainnet-00874-af19b48f.era                         28-Aug-2023 06:00           947911538
+mainnet-00875-0304307b.era                         28-Aug-2023 06:01           986503543
+mainnet-00876-03608796.era                         28-Aug-2023 06:01           977040266
+mainnet-00877-ce9dc617.era                         28-Aug-2023 06:01          1008605054
+mainnet-00878-2464e02b.era                         28-Aug-2023 06:01           948756741
+mainnet-00879-749540de.era                         04-Sep-2023 05:20           987031605
+mainnet-00880-b8a32060.era                         04-Sep-2023 05:21           975224999
+mainnet-00881-9a729fe9.era                         04-Sep-2023 05:21           960216853
+mainnet-00882-e883a202.era                         04-Sep-2023 05:21           998614925
+mainnet-00883-1902db49.era                         04-Sep-2023 05:21           931229313
+mainnet-00884-76aca2de.era                         04-Sep-2023 05:21           929360587
+mainnet-00885-707e06be.era                         11-Sep-2023 06:18           969228491
+mainnet-00886-c13b6b5d.era                         11-Sep-2023 06:18           939747038
+mainnet-00887-858429f8.era                         11-Sep-2023 06:18          1012058655
+mainnet-00888-1f9352e9.era                         11-Sep-2023 06:19           957342620
+mainnet-00889-49737ed7.era                         11-Sep-2023 06:19           980547424
+mainnet-00890-3916ff3c.era                         11-Sep-2023 06:19           991893360
+mainnet-00891-67728d98.era                         11-Sep-2023 06:19          1038889719
+mainnet-00892-b232c174.era                         18-Sep-2023 03:34          1086005596
+mainnet-00893-f90a505f.era                         18-Sep-2023 03:34          1082256932
+mainnet-00894-eabfbb06.era                         18-Sep-2023 03:34          1088774632
+mainnet-00895-79bf766b.era                         18-Sep-2023 03:34          1096573668
+mainnet-00896-173310df.era                         18-Sep-2023 03:35          1068353154
+mainnet-00897-5e9be9f3.era                         18-Sep-2023 03:35          1042973196
+mainnet-00898-2553521a.era                         25-Sep-2023 10:56          1067119795
+mainnet-00899-1c8b3d15.era                         25-Sep-2023 10:56          1089136275
+mainnet-00900-00d57a1e.era                         25-Sep-2023 10:56          1058179549
+mainnet-00901-79958936.era                         25-Sep-2023 10:57          1060800841
+mainnet-00902-ffaab08b.era                         25-Sep-2023 10:57          1014455552
+mainnet-00903-8f050474.era                         25-Sep-2023 10:57           994441909
+mainnet-00904-f604b6ff.era                         02-Oct-2023 01:52          1070009820
+mainnet-00905-c52945c7.era                         02-Oct-2023 01:53          1061634407
+mainnet-00906-061e9667.era                         02-Oct-2023 01:53          1117623482
+mainnet-00907-f9b9dd13.era                         02-Oct-2023 01:53          1108227037
+mainnet-00908-6f17480f.era                         02-Oct-2023 01:54          1083971925
+mainnet-00909-fc28b7d0.era                         02-Oct-2023 01:54          1104203793
+mainnet-00910-9c549b9e.era                         09-Oct-2023 07:35          1072357272
+mainnet-00911-0088390c.era                         09-Oct-2023 07:35          1017560114
+mainnet-00912-1e0ddb7e.era                         09-Oct-2023 07:35          1034319951
+mainnet-00913-82c83bad.era                         09-Oct-2023 07:36          1022458198
+mainnet-00914-fa6f3f38.era                         09-Oct-2023 07:36          1063728193
+mainnet-00915-48eea553.era                         09-Oct-2023 07:36          1051356032
+mainnet-00916-86c78c5b.era                         16-Oct-2023 06:29          1063966623
+mainnet-00917-2e0a1b0c.era                         16-Oct-2023 06:29          1069504396
+mainnet-00918-7b79051b.era                         16-Oct-2023 06:29          1066731226
+mainnet-00919-99b2052e.era                         16-Oct-2023 06:29           960708423
+mainnet-00920-43591dfc.era                         16-Oct-2023 06:30           947801041
+mainnet-00921-7a3c340e.era                         16-Oct-2023 06:30           933190266
+mainnet-00922-7377475a.era                         23-Oct-2023 08:24           981113494
+mainnet-00923-69445c1a.era                         23-Oct-2023 08:24           992049368
+mainnet-00924-cd9bfdda.era                         23-Oct-2023 08:24           989082425
+mainnet-00925-67f73440.era                         23-Oct-2023 08:24           958030180
+mainnet-00926-8d621baa.era                         23-Oct-2023 08:24           945233290
+mainnet-00927-9dda355f.era                         23-Oct-2023 08:24           974668748
+mainnet-00928-2d6ca1ee.era                         30-Oct-2023 02:05          1037368243
+mainnet-00929-db3e0a27.era                         30-Oct-2023 02:05          1034099267
+mainnet-00930-cfe42bfa.era                         30-Oct-2023 02:05          1012153045
+mainnet-00931-29738dd7.era                         30-Oct-2023 02:06           934396133
+mainnet-00932-ea471bbf.era                         30-Oct-2023 02:06           973080485
+mainnet-00933-7c28d556.era                         30-Oct-2023 02:06           986514501
+mainnet-00934-f54a6eb5.era                         06-Nov-2023 10:58           999590868
+mainnet-00935-88c8a776.era                         06-Nov-2023 10:58           962033821
+mainnet-00936-a31a161e.era                         06-Nov-2023 10:58          1022777754
+mainnet-00937-cf684168.era                         06-Nov-2023 10:58           999231170
+mainnet-00938-1c7dff58.era                         06-Nov-2023 10:58          1008648251
+mainnet-00939-17bc73f0.era                         06-Nov-2023 10:59          1062577401
+mainnet-00940-737e1682.era                         06-Nov-2023 10:59          1077754675
+mainnet-00941-491476a9.era                         13-Nov-2023 04:38          1038034285
+mainnet-00942-07a53155.era                         13-Nov-2023 04:38          1091532410
+mainnet-00943-48878dcc.era                         13-Nov-2023 04:38          1094341742
+mainnet-00944-9a036eae.era                         13-Nov-2023 04:38          1032689234
+mainnet-00945-5d2237ec.era                         13-Nov-2023 04:39           964070069
+mainnet-00946-c7db75bc.era                         13-Nov-2023 04:39          1012973886
+mainnet-00947-a6a6e278.era                         20-Nov-2023 02:21           899045300
+mainnet-00948-03585a93.era                         20-Nov-2023 02:21           967830037
+mainnet-00949-1e333c28.era                         20-Nov-2023 02:21          1015957385
+mainnet-00950-b3ae9cd0.era                         20-Nov-2023 02:21          1051942437
+mainnet-00951-9765163a.era                         20-Nov-2023 02:22          1095175148
+mainnet-00952-72cece82.era                         20-Nov-2023 02:22          1156790471
+mainnet-00953-0670fc06.era                         27-Nov-2023 05:26          1069401319
+mainnet-00954-505fa2ad.era                         27-Nov-2023 05:26          1039394824
+mainnet-00955-0d1d3fcb.era                         27-Nov-2023 05:26          1002735038
+mainnet-00956-9be3f767.era                         27-Nov-2023 05:27          1060109728
+mainnet-00957-338ee481.era                         27-Nov-2023 05:27          1116358753
+mainnet-00958-7325f013.era                         27-Nov-2023 05:27          1319322371
+mainnet-00959-1d52ab18.era                         04-Dec-2023 00:50          1156181406
+mainnet-00960-71d67d25.era                         04-Dec-2023 00:50           985678190
+mainnet-00961-e17aec30.era                         04-Dec-2023 00:50           939138939
+mainnet-00962-822fd3d5.era                         04-Dec-2023 00:51           920138170
+mainnet-00963-c2681306.era                         04-Dec-2023 00:51           921805727
+mainnet-00964-b3d63aab.era                         04-Dec-2023 00:51          1004504014
+mainnet-00965-211032a7.era                         11-Dec-2023 06:41          1000881013
+mainnet-00966-44a0d993.era                         11-Dec-2023 06:41          1059634868
+mainnet-00967-5cc56804.era                         11-Dec-2023 06:42          1072877088
+mainnet-00968-126d53bd.era                         11-Dec-2023 06:42          1011939882
+mainnet-00969-28edd9b3.era                         11-Dec-2023 06:42          1021787209
+mainnet-00970-e10ce061.era                         11-Dec-2023 06:42          1118532402
+mainnet-00971-ce26da07.era                         18-Dec-2023 00:23          1084072118
+mainnet-00972-0591765a.era                         18-Dec-2023 00:24          1154294323
+mainnet-00973-dd3c8ad9.era                         18-Dec-2023 00:24          1210776835
+mainnet-00974-b444ff6d.era                         18-Dec-2023 00:24          1093759377
+mainnet-00975-cbe02fd8.era                         18-Dec-2023 00:24          1261295440
+mainnet-00976-8c7535a1.era                         18-Dec-2023 00:25          1449783125
+mainnet-00977-f4e2ae07.era                         25-Dec-2023 05:37          1098889569
+mainnet-00978-07d55b2d.era                         25-Dec-2023 05:37          1230591248
+mainnet-00979-cd58b1ff.era                         25-Dec-2023 05:37          1119476799
+mainnet-00980-e5b66b9b.era                         25-Dec-2023 05:37          1123494252
+mainnet-00981-a46114eb.era                         25-Dec-2023 05:38          1096917394
+mainnet-00982-c1ac696b.era                         25-Dec-2023 05:38          1141784163
+mainnet-00983-49be149e.era                         25-Dec-2023 05:38          1051124967
+mainnet-00984-ff09ec7c.era                         01-Jan-2024 05:37          1001802855
+mainnet-00985-9b837cb4.era                         01-Jan-2024 05:37          1170029356
+mainnet-00986-8476c96b.era                         01-Jan-2024 05:38          1188343960
+mainnet-00987-942cbb5e.era                         01-Jan-2024 05:38          1071867821
+mainnet-00988-3e89d0df.era                         01-Jan-2024 05:38          1121008991
+mainnet-00989-98aed740.era                         01-Jan-2024 05:38          1149541746
+mainnet-00990-1b287ed5.era                         08-Jan-2024 09:58          1212922019
+mainnet-00991-b38b7ba6.era                         08-Jan-2024 09:58          1240474182
+mainnet-00992-67d3603a.era                         08-Jan-2024 09:58          1133538391
+mainnet-00993-a640b201.era                         08-Jan-2024 09:58          1158266109
+mainnet-00994-583f5d58.era                         08-Jan-2024 09:59          1186421664
+mainnet-00995-d773e72c.era                         08-Jan-2024 09:59          1274344444
+mainnet-00996-e2c4f84b.era                         15-Jan-2024 11:03          1173040509
+mainnet-00997-524f39b6.era                         15-Jan-2024 11:04          1132459955
+mainnet-00998-139c3883.era                         15-Jan-2024 11:04          1103865541
+mainnet-00999-e3244e9f.era                         15-Jan-2024 11:04          1104175515
+mainnet-01000-910e16c9.era                         15-Jan-2024 11:05          1147011072
+mainnet-01001-8299ddd3.era                         15-Jan-2024 11:05          1107449257
+mainnet-01002-59aa0262.era                         22-Jan-2024 08:51          1115920291
+mainnet-01003-b2c4b5dd.era                         22-Jan-2024 08:51          1116754609
+mainnet-01004-5a2289c2.era                         22-Jan-2024 08:52          1018162781
+mainnet-01005-f8b3b4ac.era                         22-Jan-2024 08:52           953066997
+mainnet-01006-8a6c02e1.era                         22-Jan-2024 08:52          1038274490
+mainnet-01007-0c1f2eed.era                         22-Jan-2024 08:52          1086183652
+mainnet-01008-eade9397.era                         29-Jan-2024 08:34          1076999731
+mainnet-01009-0316802b.era                         29-Jan-2024 08:34          1119043127
+mainnet-01010-0ea1ad78.era                         29-Jan-2024 08:34          1146914051
+mainnet-01011-aafe82a9.era                         29-Jan-2024 08:34          1064045432
+mainnet-01012-1c42bdef.era                         29-Jan-2024 08:35          1092291197
+mainnet-01013-9ff54825.era                         29-Jan-2024 08:35          1078983389
+mainnet-01014-e6c61a31.era                         29-Jan-2024 08:35          1142431766
+mainnet-01015-bfc01f9e.era                         05-Feb-2024 03:18          1153173765
+mainnet-01016-fc3f4ab7.era                         05-Feb-2024 03:18          1189501622
+mainnet-01017-8cd8ff7d.era                         05-Feb-2024 03:18          1123925962
+mainnet-01018-8c39c89c.era                         05-Feb-2024 03:18          1028958177
+mainnet-01019-ca20443e.era                         05-Feb-2024 03:19          1062677717
+mainnet-01020-50403439.era                         05-Feb-2024 03:19          1067064150
+mainnet-01021-d749f8c9.era                         12-Feb-2024 05:50          1165956249
+mainnet-01022-acdc946a.era                         12-Feb-2024 05:50          1054510834
+mainnet-01023-26670576.era                         12-Feb-2024 05:50          1003807420
+mainnet-01024-13bd2903.era                         12-Feb-2024 05:51           889313434
+mainnet-01025-19f6a400.era                         12-Feb-2024 05:51           929681985
+mainnet-01026-58b77a49.era                         12-Feb-2024 05:51          1019166538
+mainnet-01027-96b7bd78.era                         19-Feb-2024 08:58          1063375691
+mainnet-01028-50b3c58d.era                         19-Feb-2024 08:59          1233880239
+mainnet-01029-36c00fed.era                         19-Feb-2024 08:59          1248655386
+mainnet-01030-a8554c62.era                         19-Feb-2024 08:59          1102776535
+mainnet-01031-dbdc62f7.era                         19-Feb-2024 09:00          1174094509
+mainnet-01032-c8f1bcb1.era                         19-Feb-2024 09:00          1222359858
+mainnet-01033-63bf38fa.era                         26-Feb-2024 03:47          1189562472
+mainnet-01034-fbe9d06e.era                         26-Feb-2024 03:48          1257320716
+mainnet-01035-e94d1a72.era                         26-Feb-2024 03:48          1379355048
+mainnet-01036-118b7186.era                         26-Feb-2024 03:48          1192744870
+mainnet-01037-b28ab8ba.era                         26-Feb-2024 03:48          1263436212
+mainnet-01038-a78b7ddf.era                         26-Feb-2024 03:49          1359703022
+mainnet-01039-fde0699a.era                         04-Mar-2024 00:07          1370184165
+mainnet-01040-7d19530c.era                         04-Mar-2024 00:07          1225905386
+mainnet-01041-398d3b46.era                         04-Mar-2024 00:07          1337371389
+mainnet-01042-6364282f.era                         04-Mar-2024 00:07          1351185031
+mainnet-01043-a46193fd.era                         04-Mar-2024 00:07          1235985744
+mainnet-01044-f5ec475d.era                         04-Mar-2024 00:08          1248444632
+mainnet-01045-77d3f886.era                         11-Mar-2024 02:50          1177619028
+mainnet-01046-b3802cd3.era                         11-Mar-2024 02:51          1133430936
+mainnet-01047-0064fb20.era                         11-Mar-2024 02:51          1164750331
+mainnet-01048-808a4bf2.era                         11-Mar-2024 02:51          1088832842
+mainnet-01049-cc6db2c3.era                         11-Mar-2024 02:51          1057909023
+mainnet-01050-d1e77acc.era                         11-Mar-2024 02:51          1103115299
+mainnet-01051-7e12d77c.era                         18-Mar-2024 11:44          1075508844
+mainnet-01052-66aad1b6.era                         18-Mar-2024 11:44           996908733
+mainnet-01053-c4ee701c.era                         18-Mar-2024 11:45          1133066463
+mainnet-01054-b0075cb6.era                         18-Mar-2024 11:45           925022806
+mainnet-01055-ab2e55bf.era                         18-Mar-2024 11:45           736320894
+mainnet-01056-44385cac.era                         18-Mar-2024 11:45           774599828
+mainnet-01057-ac140e39.era                         18-Mar-2024 11:46           791355015
+mainnet-01058-dd2d45b7.era                         25-Mar-2024 08:06           754659888
+mainnet-01059-b2739649.era                         25-Mar-2024 08:07           806029022
+mainnet-01060-b72f0847.era                         25-Mar-2024 08:07           777128548
+mainnet-01061-36d851e1.era                         25-Mar-2024 08:07           806056001
+mainnet-01062-1ee83eae.era                         25-Mar-2024 08:07           824723494
+mainnet-01063-f6982971.era                         25-Mar-2024 08:07           833108880
+mainnet-01064-cba7f482.era                         01-Apr-2024 07:36           752937909
+mainnet-01065-1bfae1c4.era                         01-Apr-2024 07:36           709545628
+mainnet-01066-67f8cc77.era                         01-Apr-2024 07:37           700367434
+mainnet-01067-8ad9d021.era                         01-Apr-2024 07:37           718495314
+mainnet-01068-aea34cd4.era                         01-Apr-2024 07:37           722445322
+mainnet-01069-2a00f16a.era                         01-Apr-2024 07:37           696927002
+mainnet-01070-7616e3e2.era                         08-Apr-2024 03:51           661305125
+mainnet-01071-5304c6f7.era                         08-Apr-2024 03:51           681721953
+mainnet-01072-cb2a78df.era                         08-Apr-2024 03:52           660376625
+mainnet-01073-116cb348.era                         08-Apr-2024 03:52           667976000
+mainnet-01074-7c32320f.era                         08-Apr-2024 03:52           669149664
+mainnet-01075-9388a8c8.era                         08-Apr-2024 03:52           682192221
+mainnet-01076-7feb4130.era                         15-Apr-2024 06:45           676327136
+mainnet-01077-653370f5.era                         15-Apr-2024 06:45           665052760
+mainnet-01078-f5d64a2d.era                         15-Apr-2024 06:46           685971594
+mainnet-01079-92178e12.era                         15-Apr-2024 06:46           689813342
+mainnet-01080-3840e823.era                         15-Apr-2024 06:46           671252009
+mainnet-01081-fe751ced.era                         15-Apr-2024 06:46           672663854
+mainnet-01082-d14bce2a.era                         22-Apr-2024 08:17           684595860
+mainnet-01083-36c7d5b3.era                         22-Apr-2024 08:18           677460672
+mainnet-01084-18abd21f.era                         22-Apr-2024 08:18           656516511
+mainnet-01085-451ccf54.era                         22-Apr-2024 08:18           649935189
+mainnet-01086-902886db.era                         22-Apr-2024 08:18           648818639
+mainnet-01087-0d0e1330.era                         22-Apr-2024 08:19           638849696
+mainnet-01088-533d2a23.era                         29-Apr-2024 07:48           653432323
+mainnet-01089-93a1b601.era                         29-Apr-2024 07:49           639223723
+mainnet-01090-c4a3317c.era                         29-Apr-2024 07:49           643593872
+mainnet-01091-8e5210eb.era                         29-Apr-2024 07:49           659733631
+mainnet-01092-de317889.era                         29-Apr-2024 07:49           658703258
+mainnet-01093-b66a32cc.era                         29-Apr-2024 07:50           635378988
+mainnet-01094-1c6cb5a6.era                         29-Apr-2024 07:50           632085657
+mainnet-01095-0efdad09.era                         06-May-2024 08:44           601696789
+mainnet-01096-51bf07d3.era                         06-May-2024 08:44           667843969
+mainnet-01097-800c6f04.era                         06-May-2024 08:45           697226159
+mainnet-01098-52038cdd.era                         06-May-2024 08:45           637486045
+mainnet-01099-d2f087ab.era                         06-May-2024 08:45           659658736
+mainnet-01100-bd968114.era                         06-May-2024 08:45           641014331
+mainnet-01101-f2c1ae87.era                         13-May-2024 04:19           657663713
+mainnet-01102-e0f64801.era                         13-May-2024 04:20           669176240
+mainnet-01103-0ffe706a.era                         13-May-2024 04:20           650879139
+mainnet-01104-66ed6c0e.era                         13-May-2024 04:20           663051687
+mainnet-01105-ca7a5d0d.era                         13-May-2024 04:20           686841543
+mainnet-01106-a64ae290.era                         13-May-2024 04:21           619378837
+mainnet-01107-919c04f5.era                         20-May-2024 05:41           634477029
+mainnet-01108-db5b9be9.era                         20-May-2024 05:41           670717322
+mainnet-01109-28a2b519.era                         20-May-2024 05:41           661703006
+mainnet-01110-4ef3e14d.era                         20-May-2024 05:41           646417627
+mainnet-01111-a6faa027.era                         20-May-2024 05:42           643799941
+mainnet-01112-7045f707.era                         20-May-2024 05:42           634822727
+mainnet-01113-f38e96e1.era                         27-May-2024 04:56           629626563
+mainnet-01114-745ba36a.era                         27-May-2024 04:56           649652452
+mainnet-01115-1e8137f1.era                         27-May-2024 04:56           626563621
+mainnet-01116-40c11f73.era                         27-May-2024 04:57           646270036
+mainnet-01117-86a1e9f3.era                         27-May-2024 04:57           636281800
+mainnet-01118-8446391b.era                         27-May-2024 04:57           616328870
+mainnet-01119-9b6036f6.era                         03-Jun-2024 01:13           614347594
+mainnet-01120-bc33b830.era                         03-Jun-2024 01:14           516340226
+mainnet-01121-1b0f199a.era                         03-Jun-2024 01:14           523538313
+mainnet-01122-7365c3e8.era                         03-Jun-2024 01:14           527781479
+mainnet-01123-d026cb41.era                         03-Jun-2024 01:14           531013189
+mainnet-01124-6d94e6fd.era                         03-Jun-2024 01:14           525258226
+mainnet-01125-c344b70a.era                         10-Jun-2024 09:17           511847554
+mainnet-01126-630e4d70.era                         10-Jun-2024 09:17           523258708
+mainnet-01127-503d3141.era                         10-Jun-2024 09:18           520110861
+mainnet-01128-94aa3597.era                         10-Jun-2024 09:18           514680738
+mainnet-01129-8d1137bc.era                         10-Jun-2024 09:18           533373684
+mainnet-01130-70bf6a5c.era                         10-Jun-2024 09:18           540196002
+mainnet-01131-8aa44d86.era                         10-Jun-2024 09:18           527483662
+mainnet-01132-29970b3e.era                         17-Jun-2024 02:47           523725133
+mainnet-01133-7cbcb319.era                         17-Jun-2024 02:47           541541070
+mainnet-01134-7db2e27a.era                         17-Jun-2024 02:47           562506272
+mainnet-01135-84cd50a5.era                         17-Jun-2024 02:47           552204307
+mainnet-01136-569f04e1.era                         17-Jun-2024 02:48           542145875
+mainnet-01137-4bdc6ceb.era                         24-Jun-2024 09:11           530103758
+mainnet-01138-59d38f35.era                         24-Jun-2024 09:11           533616930
+mainnet-01139-97e78606.era                         24-Jun-2024 09:11           541950632
+mainnet-01140-f70d4869.era                         24-Jun-2024 09:12           545440235
+mainnet-01141-3a675f56.era                         24-Jun-2024 09:12           613182585
+mainnet-01142-fe5edaaa.era                         24-Jun-2024 09:12           545238653
+mainnet-01143-592c3728.era                         24-Jun-2024 09:12           519071817
+mainnet-01144-4c5f788a.era                         01-Jul-2024 00:39           544451793
+mainnet-01145-2756d4d2.era                         01-Jul-2024 00:39           537632071
+mainnet-01146-6959479b.era                         01-Jul-2024 00:39           546297936
+mainnet-01147-34a0baee.era                         01-Jul-2024 00:39           536273900
+mainnet-01148-551205ba.era                         01-Jul-2024 00:39           542295574
+mainnet-01149-90fb3502.era                         01-Jul-2024 00:40           514738750
+mainnet-01150-00aa0b36.era                         08-Jul-2024 03:46           521268023
+mainnet-01151-3aa871c3.era                         08-Jul-2024 03:46           525584362
+mainnet-01152-8e9d366a.era                         08-Jul-2024 03:46           521008287
+mainnet-01153-7665603b.era                         08-Jul-2024 03:46           544170610
+mainnet-01154-caeecd33.era                         08-Jul-2024 03:47           543122360
+mainnet-01155-23db87d4.era                         08-Jul-2024 03:47           512978887
+mainnet-01156-4786c4bc.era                         15-Jul-2024 03:33           525873269
+mainnet-01157-b4c73df5.era                         15-Jul-2024 03:33           524868204
+mainnet-01158-d19719f0.era                         15-Jul-2024 03:33           516751860
+mainnet-01159-8831acbd.era                         15-Jul-2024 03:33           524962479
+mainnet-01160-f6be8d9e.era                         15-Jul-2024 03:33           517287594
+mainnet-01161-bfe066df.era                         15-Jul-2024 03:34           515757018
+mainnet-01162-f387c372.era                         22-Jul-2024 00:09           538835657
+mainnet-01163-8433aafb.era                         22-Jul-2024 00:09           530313058
+mainnet-01164-9d014848.era                         22-Jul-2024 00:09           532985237
+mainnet-01165-9fbc60d2.era                         22-Jul-2024 00:09           533554844
+mainnet-01166-6e39e80c.era                         22-Jul-2024 00:10           525881605
+mainnet-01167-6610feb0.era                         22-Jul-2024 00:10           516969710
+mainnet-01168-d1cd2453.era                         29-Jul-2024 03:22           515827057
+mainnet-01169-d560f6b7.era                         29-Jul-2024 03:22           532758376
+mainnet-01170-e9018088.era                         29-Jul-2024 03:22           536228278
+mainnet-01171-a0885e7f.era                         29-Jul-2024 03:22           530304553
+mainnet-01172-dfdf959b.era                         29-Jul-2024 03:23           526140374
+mainnet-01173-4b4146f3.era                         29-Jul-2024 03:23           503233838
+mainnet-01174-c7cd4e84.era                         05-Aug-2024 05:27           503774924
+mainnet-01175-809f889c.era                         05-Aug-2024 05:28           522347812
+mainnet-01176-9de72b35.era                         05-Aug-2024 05:28           555634120
+mainnet-01177-c1897063.era                         05-Aug-2024 05:28           529343591
+mainnet-01178-49dbac0e.era                         05-Aug-2024 05:28           533915470
+mainnet-01179-0f0f90a4.era                         05-Aug-2024 05:28           534239576
+mainnet-01180-11a0ad82.era                         05-Aug-2024 05:29           512692069
+mainnet-01181-1b9fd83b.era                         12-Aug-2024 02:50           544589550
+mainnet-01182-9e3856d0.era                         12-Aug-2024 02:50           534654676
+mainnet-01183-595cb34b.era                         12-Aug-2024 02:50           520551561
+mainnet-01184-419044cb.era                         12-Aug-2024 02:50           520280614
+mainnet-01185-e6cfcca2.era                         12-Aug-2024 02:50           518934027
+mainnet-01186-c53bfdf9.era                         12-Aug-2024 02:51           496384503
+mainnet-01187-9bc14ac4.era                         19-Aug-2024 01:43           516388771
+mainnet-01188-a5ddb290.era                         19-Aug-2024 01:43           521884036
+mainnet-01189-3a26c876.era                         19-Aug-2024 01:43           540705955
+mainnet-01190-40f5ebfa.era                         19-Aug-2024 01:43           532503784
+mainnet-01191-61a46948.era                         19-Aug-2024 01:44           529324058
+mainnet-01192-941f360d.era                         19-Aug-2024 01:44           517036066
+mainnet-01193-413ffb33.era                         26-Aug-2024 05:30           551007340
+mainnet-01194-d3531f76.era                         26-Aug-2024 05:30           540890048
+mainnet-01195-47e5d536.era                         26-Aug-2024 05:30           525899104
+mainnet-01196-86d2ceb8.era                         26-Aug-2024 05:30           532120678
+mainnet-01197-93dfc40a.era                         26-Aug-2024 05:31           519303955
+mainnet-01198-7fa25a94.era                         26-Aug-2024 05:31           524389543
+mainnet-01199-5049e4ae.era                         02-Sep-2024 06:01           524249268
+mainnet-01200-4e63ffad.era                         02-Sep-2024 06:01           532869857
+mainnet-01201-d9ca43da.era                         02-Sep-2024 06:01           532060938
+mainnet-01202-5780d8ad.era                         02-Sep-2024 06:02           526098735
+mainnet-01203-9939c48f.era                         02-Sep-2024 06:02           524114507
+mainnet-01204-4621397b.era                         02-Sep-2024 06:02           500686660
+mainnet-01205-2550cacc.era                         09-Sep-2024 10:56           498991304
+mainnet-01206-343645e1.era                         09-Sep-2024 10:56           500950462
+mainnet-01207-4c36f600.era                         09-Sep-2024 10:56           515988706
+mainnet-01208-f34ed4d3.era                         09-Sep-2024 10:56           494922791
+mainnet-01209-22a39c65.era                         09-Sep-2024 10:57           514352398
+mainnet-01210-f8451c68.era                         09-Sep-2024 10:57           508333824
+mainnet-01211-1370f37f.era                         09-Sep-2024 10:57           506672973
+mainnet-01212-01268972.era                         16-Sep-2024 00:14           513177285
+mainnet-01213-011ff1c2.era                         16-Sep-2024 00:14           517325501
+mainnet-01214-6f717be4.era                         16-Sep-2024 00:14           513394492
+mainnet-01215-f7934ece.era                         16-Sep-2024 00:14           522925047
+mainnet-01216-1d0de988.era                         16-Sep-2024 00:15           521437045
+mainnet-01217-d1819216.era                         23-Sep-2024 09:04           516928102
+mainnet-01218-c4d201d4.era                         23-Sep-2024 09:04           526662767
+mainnet-01219-d82f4773.era                         23-Sep-2024 09:04           530528035
+mainnet-01220-c2eb4581.era                         23-Sep-2024 09:04           511189094
+mainnet-01221-f5353213.era                         23-Sep-2024 09:05           518598665
+mainnet-01222-2c5cc193.era                         23-Sep-2024 09:05           506732438
+mainnet-01223-8178f64b.era                         23-Sep-2024 09:05           509569001
+mainnet-01224-f30e85cd.era                         30-Sep-2024 11:37           504192678
+mainnet-01225-c9e90f34.era                         30-Sep-2024 11:37           501253711
+mainnet-01226-92be86f6.era                         30-Sep-2024 11:38           510124386
+mainnet-01227-497bcaf6.era                         30-Sep-2024 11:38           523961290
+mainnet-01228-f9381eb9.era                         30-Sep-2024 11:38           539758958
+mainnet-01229-a55063a3.era                         30-Sep-2024 11:38           539554804
+mainnet-01230-8d00c7e5.era                         07-Oct-2024 05:52           531327691
+mainnet-01231-58c274da.era                         07-Oct-2024 05:52           544205842
+mainnet-01232-f330b8d6.era                         07-Oct-2024 05:52           544514121
+mainnet-01233-53b80a54.era                         07-Oct-2024 05:52           543739427
+mainnet-01234-e97a19c9.era                         07-Oct-2024 05:53           537416447
+mainnet-01235-339204b8.era                         07-Oct-2024 05:53           510147622
+mainnet-01236-d5cfd238.era                         14-Oct-2024 11:39           512310651
+mainnet-01237-523a46c8.era                         14-Oct-2024 11:39           510561192
+mainnet-01238-63d410bd.era                         14-Oct-2024 11:39           507750978
+mainnet-01239-85077afb.era                         14-Oct-2024 11:40           514157553
+mainnet-01240-98ed1e81.era                         14-Oct-2024 11:40           513472871
+mainnet-01241-55252f8c.era                         14-Oct-2024 11:40           516565794
+mainnet-01242-dbecf326.era                         21-Oct-2024 04:43           512202461
+mainnet-01243-97e754b1.era                         21-Oct-2024 04:44           541157141
+mainnet-01244-d519e3a1.era                         21-Oct-2024 04:44           531181802
+mainnet-01245-8946bc4b.era                         21-Oct-2024 04:44           528295056
+mainnet-01246-952e7792.era                         21-Oct-2024 04:44           530989066
+mainnet-01247-6216c622.era                         21-Oct-2024 04:45           530583386
+mainnet-01248-faaeff0d.era                         23-Oct-2024 08:53           531889406
+mainnet-01249-23b3ce4f.era                         23-Oct-2024 09:15           540485342
+mainnet-01250-1df3dd00.era                         11-Nov-2024 02:42           527369240
+mainnet-01251-f7979d7c.era                         11-Nov-2024 02:42           519849366
+mainnet-01252-fd301068.era                         11-Nov-2024 02:42           539671179
+mainnet-01253-5c81a85e.era                         11-Nov-2024 02:42           520225292
+mainnet-01254-07b72fc2.era                         11-Nov-2024 02:42           489438274
+mainnet-01255-bc622b19.era                         11-Nov-2024 02:43           521904618
+mainnet-01256-ca7f996f.era                         11-Nov-2024 02:43           525840073
+mainnet-01257-caa30ab5.era                         11-Nov-2024 02:43           515452960
+mainnet-01258-f4f77fad.era                         11-Nov-2024 02:43           516313554
+mainnet-01259-a3365829.era                         11-Nov-2024 02:43           518067981
+mainnet-01260-e615f43c.era                         11-Nov-2024 02:44           531296276
+mainnet-01261-c092f651.era                         11-Nov-2024 02:44           549562097
+mainnet-01262-bcb8e013.era                         11-Nov-2024 02:44           533996323
+mainnet-01263-8274d1c5.era                         11-Nov-2024 02:44           547942816
+mainnet-01264-dcfa40c5.era                         11-Nov-2024 02:44           551316400
+mainnet-01265-e6064f87.era                         11-Nov-2024 02:45           543235949
+mainnet-01266-2f91212f.era                         11-Nov-2024 02:45           543702811
+mainnet-01267-e3ddc749.era                         18-Nov-2024 10:58           548089699
+mainnet-01268-ae094943.era                         18-Nov-2024 10:58           550180215
+mainnet-01269-af18ad45.era                         18-Nov-2024 10:58           545423537
+mainnet-01270-79fe06aa.era                         18-Nov-2024 10:59           543892466
+mainnet-01271-f43df910.era                         18-Nov-2024 10:59           545007035
+mainnet-01272-6cabb907.era                         18-Nov-2024 10:59           545999688
+mainnet-01273-53499c35.era                         25-Nov-2024 03:29           547941951
+mainnet-01274-42094f2a.era                         25-Nov-2024 03:29           545993070
+mainnet-01275-5dd68a9e.era                         25-Nov-2024 03:29           546352518
+mainnet-01276-c0474572.era                         25-Nov-2024 03:29           548203168
+mainnet-01277-18129ea3.era                         25-Nov-2024 03:29           561082149
+mainnet-01278-e9618417.era                         25-Nov-2024 03:30           554107010
+mainnet-01279-e84bfdac.era                         02-Dec-2024 04:37           557070417
+mainnet-01280-ccf8abdb.era                         02-Dec-2024 04:37           556983443
+mainnet-01281-e3ec741a.era                         02-Dec-2024 04:37           557638070
+mainnet-01282-316b4c12.era                         02-Dec-2024 04:37           566574873
+mainnet-01283-fc7fe135.era                         02-Dec-2024 04:37           554917172
+mainnet-01284-73c540a7.era                         02-Dec-2024 04:38           552828735
+mainnet-01285-274813ee.era                         23-Dec-2024 10:26           560002327
+mainnet-01286-e427d790.era                         23-Dec-2024 10:26           570673150
+mainnet-01287-8c566618.era                         23-Dec-2024 10:26           567255015
+mainnet-01288-970f9ef7.era                         23-Dec-2024 10:26           567912612
+mainnet-01289-68bbb574.era                         23-Dec-2024 10:27           567201986
+mainnet-01290-490e2c60.era                         23-Dec-2024 10:27           562055587
+mainnet-01291-724f5f50.era                         23-Dec-2024 10:27           565601793
+mainnet-01292-26ab701d.era                         23-Dec-2024 10:27           589837713
+mainnet-01293-9938cd29.era                         23-Dec-2024 10:28           562768281
+mainnet-01294-5009f134.era                         23-Dec-2024 10:28           568454475
+mainnet-01295-d8b9456e.era                         23-Dec-2024 10:28           557069963
+mainnet-01296-3d2e50fe.era                         23-Dec-2024 10:28           561916952
+mainnet-01297-4302ec9b.era                         23-Dec-2024 10:28           567810864
+mainnet-01298-65ac3ba4.era                         23-Dec-2024 10:29           566069376
+mainnet-01299-514a418b.era                         23-Dec-2024 10:29           584705352
+mainnet-01300-12b1cd58.era                         23-Dec-2024 10:29           568493215
+mainnet-01301-58ffae02.era                         23-Dec-2024 10:29           579609259
+mainnet-01302-ca8b3d23.era                         23-Dec-2024 10:30           567219105
+mainnet-01303-03d3c637.era                         23-Dec-2024 10:30           568036747
+mainnet-01304-597fb2d2.era                         30-Dec-2024 05:29           563733928
+mainnet-01305-e82f75eb.era                         30-Dec-2024 05:29           555917872
+mainnet-01306-e8862af0.era                         30-Dec-2024 05:29           543175566
+mainnet-01307-4ba10bd7.era                         30-Dec-2024 05:30           564966234
+mainnet-01308-41b2ae42.era                         30-Dec-2024 05:30           557444885
+mainnet-01309-b2fb856e.era                         30-Dec-2024 05:30           569689776
+mainnet-01310-717a92d9.era                         06-Jan-2025 04:29           575208992
+mainnet-01311-6d0f76d6.era                         06-Jan-2025 04:29           584106233
+mainnet-01312-0b3eef3d.era                         06-Jan-2025 04:29           480548102
+mainnet-01313-2e64295b.era                         06-Jan-2025 04:29           547496404
+mainnet-01314-8c677941.era                         06-Jan-2025 04:29           541972973
+mainnet-01315-e647d21d.era                         06-Jan-2025 04:30           537968123
+mainnet-01316-9d9bb53a.era                         13-Jan-2025 04:01           553204829
+mainnet-01317-92c1e9ab.era                         13-Jan-2025 04:01           555690541
+mainnet-01318-11ba37ba.era                         13-Jan-2025 04:01           552768393
+mainnet-01319-db8841f4.era                         13-Jan-2025 04:01           557195550
+mainnet-01320-59f1c8c0.era                         13-Jan-2025 04:02           550178409
+mainnet-01321-9d82e6dc.era                         13-Jan-2025 04:02           541631688
+mainnet-01322-6c292b86.era                         20-Jan-2025 07:02           561491048
+mainnet-01323-dbc11560.era                         20-Jan-2025 07:02           554842380
+mainnet-01324-c2221df7.era                         20-Jan-2025 07:02           567272046
+mainnet-01325-749bed39.era                         20-Jan-2025 07:02           581203487
+mainnet-01326-570a6902.era                         20-Jan-2025 07:03           569407995
+mainnet-01327-4ac78245.era                         20-Jan-2025 07:03           560130705
+mainnet-01328-362692bf.era                         27-Jan-2025 11:26           577840598
+mainnet-01329-a11e7e24.era                         27-Jan-2025 11:26           569096842
+mainnet-01330-32b046f2.era                         27-Jan-2025 11:27           571271651
+mainnet-01331-ab4b8cea.era                         27-Jan-2025 11:27           575520039
+mainnet-01332-0c621ccf.era                         27-Jan-2025 11:27           599223183
+mainnet-01333-201cd44e.era                         27-Jan-2025 11:28           599620747
+mainnet-01334-4a479a19.era                         27-Jan-2025 11:28           568687552
+mainnet-01335-381db7ff.era                         03-Feb-2025 11:38           589146849
+mainnet-01336-edbac0a0.era                         03-Feb-2025 11:38           559994615
+mainnet-01337-343a22ff.era                         03-Feb-2025 11:38           560642758
+mainnet-01338-7e85e340.era                         03-Feb-2025 11:39           560552238
+mainnet-01339-75d1c621.era                         03-Feb-2025 11:39           552870501
+mainnet-01340-429201f5.era                         03-Feb-2025 11:39           553881347
+mainnet-01341-91372206.era                         06-Apr-2025 12:52           584070656
+mainnet-01342-beed409a.era                         06-Apr-2025 12:53           613520760
+mainnet-01343-b6f6a159.era                         06-Apr-2025 12:53           596395049
+mainnet-01344-8f5487d6.era                         06-Apr-2025 12:53           603330358
+mainnet-01345-7cabd53f.era                         06-Apr-2025 12:53           575770089
+mainnet-01346-303a52b7.era                         06-Apr-2025 12:53           565215275
+mainnet-01347-3d3f2c2b.era                         06-Apr-2025 12:53           605011651
+mainnet-01348-f64fe31a.era                         06-Apr-2025 12:53           609676486
+mainnet-01349-6d74bea8.era                         06-Apr-2025 12:53           609390464
+mainnet-01350-62701c9b.era                         06-Apr-2025 12:53           620215784
+mainnet-01351-feaf38ed.era                         06-Apr-2025 12:54           599392384
+mainnet-01352-97b8f658.era                         06-Apr-2025 12:54           560595833
+mainnet-01353-4cd4ab3a.era                         06-Apr-2025 12:54           587156401
+mainnet-01354-797b839b.era                         06-Apr-2025 12:54           607490669
+mainnet-01355-7d84720c.era                         06-Apr-2025 12:54           583647685
+mainnet-01356-85bdb143.era                         06-Apr-2025 12:54           593565225
+mainnet-01357-8bf81664.era                         06-Apr-2025 12:54           635102447
+mainnet-01358-b6685f0a.era                         06-Apr-2025 12:54           558985768
+mainnet-01359-e79eb257.era                         06-Apr-2025 12:55           616124362
+mainnet-01360-8b6c330c.era                         06-Apr-2025 12:55           632658989
+mainnet-01361-8e5394d0.era                         06-Apr-2025 12:55           661831649
+mainnet-01362-fff3d653.era                         06-Apr-2025 12:55           658971016
+mainnet-01363-4548746f.era                         06-Apr-2025 12:55           686452559
+mainnet-01364-d6a5e2d9.era                         06-Apr-2025 12:55           665147593
+mainnet-01365-377bad7f.era                         06-Apr-2025 12:55           617378921
+mainnet-01366-d8771d2f.era                         06-Apr-2025 12:55           667725984
+mainnet-01367-e58a6300.era                         06-Apr-2025 12:56           635922575
+mainnet-01368-1ad08afe.era                         06-Apr-2025 12:56           638802018
+mainnet-01369-6eb67655.era                         06-Apr-2025 12:56           643699872
+mainnet-01370-de9fb9f0.era                         06-Apr-2025 12:56           593040714
+mainnet-01371-ab2dbf41.era                         06-Apr-2025 12:56           703847915
+mainnet-01372-8d09eb00.era                         06-Apr-2025 12:56           707898839
+mainnet-01373-7fdd8c83.era                         06-Apr-2025 12:56           667039238
+mainnet-01374-9fff40c4.era                         06-Apr-2025 12:57           629666777
+mainnet-01375-ecc5376f.era                         06-Apr-2025 12:57           628056691
+mainnet-01376-745e17d7.era                         06-Apr-2025 12:57           530928011
+mainnet-01377-60b83f19.era                         06-Apr-2025 12:57           590097206
+mainnet-01378-571bb72e.era                         06-Apr-2025 12:57           620967618
+mainnet-01379-a41cab48.era                         06-Apr-2025 12:57           642269678
+mainnet-01380-6e279ed9.era                         06-Apr-2025 12:57           613440985
+mainnet-01381-4005cb17.era                         06-Apr-2025 12:57           634050200
+mainnet-01382-5f9d6c83.era                         06-Apr-2025 12:57           540254112
+mainnet-01383-daa6e61b.era                         06-Apr-2025 12:58           578923306
+mainnet-01384-720d50f2.era                         06-Apr-2025 12:58           709219856
+mainnet-01385-cce9515a.era                         06-Apr-2025 12:58           651555256
+mainnet-01386-1dcb024a.era                         06-Apr-2025 12:58           668712337
+mainnet-01387-d94a5dfa.era                         06-Apr-2025 12:58           680710876
+mainnet-01388-dfc329a8.era                         06-Apr-2025 12:58           668537632
+mainnet-01389-dd7ff8fc.era                         06-Apr-2025 12:58           650269127
+mainnet-01390-427f2bda.era                         14-Apr-2025 09:54           679348752
+mainnet-01391-54f9e228.era                         14-Apr-2025 09:55           724231691
+mainnet-01392-bc55bc76.era                         14-Apr-2025 09:55           662825273
+mainnet-01393-35550d53.era                         14-Apr-2025 09:55           611592141
+mainnet-01394-cc3f3451.era                         14-Apr-2025 09:55           559951662
+mainnet-01395-d11df78c.era                         14-Apr-2025 09:55           528602637
+mainnet-01396-af34f6ac.era                         14-Apr-2025 09:55           636722722
+mainnet-01397-0e9fe959.era                         14-Apr-2025 09:55           624047192
+mainnet-01398-4737632a.era                         14-Apr-2025 09:55           625220963
+mainnet-01399-a2a5d13d.era                         14-Apr-2025 09:55           608433250
+mainnet-01400-1546988d.era                         14-Apr-2025 09:56           573436131
+mainnet-01401-95ccdec7.era                         14-Apr-2025 09:56           556839696
+mainnet-01402-89879954.era                         05-May-2025 08:01           587819452
+mainnet-01403-fa4d83f9.era                         05-May-2025 08:02           608654180
+mainnet-01404-bf0d1bbb.era                         05-May-2025 08:03           645193927
+mainnet-01405-af3f0440.era                         05-May-2025 08:03           768950346
+mainnet-01406-cf9da469.era                         05-May-2025 08:04           670386062
+mainnet-01407-2099e5d4.era                         05-May-2025 08:05           529249674
+mainnet-01408-ab7b93b7.era                         28-Apr-2025 01:51           545897544
+mainnet-01409-f68d5c35.era                         28-Apr-2025 01:51           611928991
+mainnet-01410-bf961c25.era                         28-Apr-2025 01:52           873112623
+mainnet-01411-92f93908.era                         28-Apr-2025 01:52           656827871
+mainnet-01412-7cd3a771.era                         28-Apr-2025 01:52           680537261
+mainnet-01413-01118cd1.era                         28-Apr-2025 01:52           656467793
+mainnet-01414-49e4139b.era                         06-May-2025 08:47           617730158
+mainnet-01415-f08713a7.era                         05-May-2025 06:33           649952485
+mainnet-01416-f420a3d7.era                         05-May-2025 06:33           645972098
+mainnet-01417-66e4d377.era                         05-May-2025 06:33           688283748
+mainnet-01418-a8467695.era                         05-May-2025 06:34           618649783
+mainnet-01419-5115573b.era                         05-May-2025 06:34           539700184
+mainnet-01420-38049ee3.era                         05-May-2025 06:34           559203395
+mainnet-01421-67215600.era                         12-May-2025 10:09           587519805
+mainnet-01422-566301aa.era                         12-May-2025 10:09           621356800
+mainnet-01423-d22ac6b8.era                         12-May-2025 10:10           501011043
+mainnet-01424-efb3fed0.era                         12-May-2025 10:10           547732826
+mainnet-01425-472ebfbc.era                         12-May-2025 10:10           508308324
+mainnet-01426-91c877bd.era                         12-May-2025 10:10           500999212
+mainnet-01427-a9a10326.era                         19-May-2025 05:31           511750853
+mainnet-01428-5d919b2e.era                         19-May-2025 05:31           518481386
+mainnet-01429-951203d8.era                         19-May-2025 05:31           515173938
+mainnet-01430-51f933fc.era                         19-May-2025 05:32           510685780
+mainnet-01431-fa942f94.era                         19-May-2025 05:32           506642295
+mainnet-01432-a0634156.era                         19-May-2025 05:32           477210982
+mainnet-01433-ed3ba647.era                         26-May-2025 02:13           506643773
+mainnet-01434-80a6e7c7.era                         26-May-2025 02:13           512992126
+mainnet-01435-12527fcc.era                         26-May-2025 02:14           508901306
+mainnet-01436-96e5270c.era                         26-May-2025 02:14           510014514
+mainnet-01437-4dd8dc46.era                         26-May-2025 02:14           505884905
+mainnet-01438-b7868426.era                         26-May-2025 02:14           485054203
+mainnet-01439-e67413f7.era                         02-Jun-2025 03:09           507507669
+mainnet-01440-5726ebd1.era                         02-Jun-2025 03:09           508670677
+mainnet-01441-8ed2e608.era                         02-Jun-2025 03:10           513917269
+mainnet-01442-91b4aac0.era                         02-Jun-2025 03:10           509790376
+mainnet-01443-8ac15c95.era                         02-Jun-2025 03:10           537040991
+mainnet-01444-a9085580.era                         02-Jun-2025 03:10           523360692
+mainnet-01445-11a035bf.era                         09-Jun-2025 04:38           511355933
+mainnet-01446-8710b8df.era                         09-Jun-2025 04:38           515325340
+mainnet-01447-042230e7.era                         09-Jun-2025 04:38           506901702
+mainnet-01448-7cded86d.era                         09-Jun-2025 04:38           514607852
+mainnet-01449-1aacbb08.era                         09-Jun-2025 04:39           509106002
+mainnet-01450-bf830a58.era                         09-Jun-2025 04:39           503644503
+mainnet-01451-c8ad2f2e.era                         16-Jun-2025 05:26           512352082
+mainnet-01452-cb2a3d0d.era                         16-Jun-2025 05:26           533889445
+mainnet-01453-96b64568.era                         16-Jun-2025 05:26           521220735
+mainnet-01454-b9341b25.era                         16-Jun-2025 05:27           524077240
+mainnet-01455-74a1f4c5.era                         16-Jun-2025 05:27           525673121
+mainnet-01456-08e96075.era                         16-Jun-2025 05:27           497184635
+mainnet-01457-3c8f6ef7.era                         23-Jun-2025 06:23           488037068
+mainnet-01458-5d746467.era                         23-Jun-2025 06:23           532077341
+mainnet-01459-41f6f1cf.era                         23-Jun-2025 06:23           531398644
+mainnet-01460-4aedf6f7.era                         23-Jun-2025 06:23           520741368
+mainnet-01461-9fc63162.era                         23-Jun-2025 06:23           521918826
+mainnet-01462-a65baa7d.era                         23-Jun-2025 06:24           504989708
+mainnet-01463-71cd98bb.era                         23-Jun-2025 06:24           502781197
+mainnet-01464-65d55b17.era                         30-Jun-2025 04:26           518142408
+mainnet-01465-ec236506.era                         30-Jun-2025 04:26           593335981
+mainnet-01466-1b573477.era                         30-Jun-2025 04:26           577705534
+mainnet-01467-373863fb.era                         30-Jun-2025 04:26           545465708
+mainnet-01468-3186aea5.era                         30-Jun-2025 04:27           509381337
+mainnet-01469-cb46aff4.era                         30-Jun-2025 04:27           491401479
+mainnet-01470-87d4bf6c.era                         07-Jul-2025 02:50           531057855
+mainnet-01471-c1909436.era                         07-Jul-2025 02:50           539567050
+mainnet-01472-6c492b68.era                         07-Jul-2025 02:50           532392791
+mainnet-01473-6a7890dc.era                         07-Jul-2025 02:50           537249038
+mainnet-01474-bd1b54bd.era                         07-Jul-2025 02:51           504579030
+mainnet-01475-0753f1d0.era                         07-Jul-2025 02:51           485973803
+mainnet-01476-ebebbd78.era                         14-Jul-2025 03:40           522532725
+mainnet-01477-3d63c65e.era                         14-Jul-2025 03:40           533358429
+mainnet-01478-6266ab2a.era                         14-Jul-2025 03:41           537264749
+mainnet-01479-19fbd7cf.era                         14-Jul-2025 03:41           541010085
+mainnet-01480-73549677.era                         14-Jul-2025 03:41           543304813
+mainnet-01481-3ea05fea.era                         14-Jul-2025 03:41           522750008
+mainnet-01482-5c2a345e.era                         21-Jul-2025 01:17           531863029
+mainnet-01483-3fc48cd4.era                         21-Jul-2025 01:17           537582790
+mainnet-01484-f8aa993b.era                         21-Jul-2025 01:17           537786130
+mainnet-01485-5c02747c.era                         21-Jul-2025 01:17           542329685
+mainnet-01486-ef245ecc.era                         21-Jul-2025 01:18           548678220
+mainnet-01487-c4bcbb92.era                         21-Jul-2025 01:18           532909638
+mainnet-01488-4283192d.era                         28-Jul-2025 04:49           551272646
+mainnet-01489-382b8330.era                         28-Jul-2025 04:49           619160048
+mainnet-01490-55e9e78e.era                         28-Jul-2025 04:49           616392325
+mainnet-01491-221c924e.era                         28-Jul-2025 04:50           617171399
+mainnet-01492-b7151c51.era                         28-Jul-2025 04:50           609245440
+mainnet-01493-76fa01be.era                         28-Jul-2025 04:50           555349101
+mainnet-01494-e6fa99c8.era                         04-Aug-2025 07:55           598647842
+mainnet-01495-57543dcf.era                         04-Aug-2025 07:55           614211341
+mainnet-01496-c23b15bb.era                         04-Aug-2025 07:55           589009100
+mainnet-01497-a07e2a40.era                         04-Aug-2025 07:55           572260943
+mainnet-01498-4c868960.era                         04-Aug-2025 07:56           629190715
+mainnet-01499-6a6a20ff.era                         04-Aug-2025 07:56           590685890
+mainnet-01500-057b6b26.era                         04-Aug-2025 07:56           556171743
+mainnet-01501-f6f87b12.era                         11-Aug-2025 08:49           597333684
+mainnet-01502-d4ffeca9.era                         11-Aug-2025 08:49           667039182
+mainnet-01503-dab29b31.era                         11-Aug-2025 08:49           625552613
+mainnet-01504-212f7e5d.era                         11-Aug-2025 08:49           639827455
+mainnet-01505-7dcbdfcc.era                         11-Aug-2025 08:50           630276257
+mainnet-01506-4781865b.era                         11-Aug-2025 08:50           617615852
+mainnet-01507-71a0d0ed.era                         18-Aug-2025 02:39           616329885
+mainnet-01508-25703831.era                         18-Aug-2025 02:40           626754368
+mainnet-01509-8e00415c.era                         18-Aug-2025 02:40           634867114
+mainnet-01510-2e4b9a11.era                         18-Aug-2025 02:40           641222043
+mainnet-01511-da173f04.era                         18-Aug-2025 02:40           614196467
+mainnet-01512-0a49b96e.era                         18-Aug-2025 02:41           592189452
+mainnet-01513-df34c479.era                         25-Aug-2025 09:53           613528021
+mainnet-01514-5edd8c1a.era                         25-Aug-2025 09:53           633913654
+mainnet-01515-11eb351b.era                         25-Aug-2025 09:53           617540268
+mainnet-01516-8670fdc7.era                         25-Aug-2025 09:54           608231671
+mainnet-01517-77bf4dd2.era                         25-Aug-2025 09:54           628744521
+mainnet-01518-fe90de5a.era                         25-Aug-2025 09:54           598496583
+mainnet-01519-ce776aa1.era                         01-Sep-2025 08:26           640576960
+mainnet-01520-32722bfb.era                         01-Sep-2025 08:27           717198343
+mainnet-01521-0e71ad4d.era                         01-Sep-2025 08:27           760259031
+mainnet-01522-50583e4f.era                         01-Sep-2025 08:27           675462440
+mainnet-01523-910ad536.era                         01-Sep-2025 08:28           613764353
+mainnet-01524-ea8f0b5e.era                         01-Sep-2025 08:28           575625087
+mainnet-01525-d62de99b.era                         08-Sep-2025 01:43           669359119
+mainnet-01526-391b5e5a.era                         08-Sep-2025 01:44           653964184
+mainnet-01527-01489b26.era                         08-Sep-2025 01:44           675542055
+mainnet-01528-e9bc5b30.era                         08-Sep-2025 01:44           711891996
+mainnet-01529-3798eb16.era                         08-Sep-2025 01:44           706257201
+mainnet-01530-0accb22b.era                         08-Sep-2025 01:45           653919202
+mainnet-01531-8d911990.era                         15-Sep-2025 07:55           650968739
+mainnet-01532-f43a2fb4.era                         15-Sep-2025 07:55           709114767
+mainnet-01533-ab0d4f67.era                         15-Sep-2025 07:55           668699214
+mainnet-01534-5f9378bf.era                         15-Sep-2025 07:56           676501449
+mainnet-01535-80360bd2.era                         15-Sep-2025 07:56           674695008
+mainnet-01536-0e0b783e.era                         15-Sep-2025 07:56           638043802
+mainnet-01537-96d14cf5.era                         15-Sep-2025 07:57           606845395
+mainnet-01538-2c9f9367.era                         22-Sep-2025 05:55           662375155
+mainnet-01539-c5fd437f.era                         22-Sep-2025 05:55           648392361
+mainnet-01540-3118fe4e.era                         22-Sep-2025 05:55           650881186
+mainnet-01541-44b4fcdd.era                         22-Sep-2025 05:56           643553635
+mainnet-01542-462370e1.era                         22-Sep-2025 05:56           650929390
+mainnet-01543-eb979c5c.era                         22-Sep-2025 05:56           580550163
+mainnet-01544-03c55703.era                         29-Sep-2025 04:05           635803552
+mainnet-01545-b9858945.era                         29-Sep-2025 04:06           660884266
+mainnet-01546-fbb1d2b1.era                         29-Sep-2025 04:06           642186847
+mainnet-01547-80f87b05.era                         29-Sep-2025 04:06           640847272
+mainnet-01548-296c79ff.era                         29-Sep-2025 04:06           665760701
+mainnet-01549-009bc358.era                         29-Sep-2025 04:07           611997495
+mainnet-01550-cee52de0.era                         06-Oct-2025 02:26           638819034
+mainnet-01551-4f9d2662.era                         06-Oct-2025 02:26           644511364
+mainnet-01552-39e1ab4f.era                         06-Oct-2025 02:26           652348099
+mainnet-01553-a4438806.era                         06-Oct-2025 02:26           642283660
+mainnet-01554-a1d21712.era                         06-Oct-2025 02:27           609334280
+mainnet-01555-0f6bb064.era                         06-Oct-2025 02:27           580638372
+mainnet-01556-4874cc8c.era                         13-Oct-2025 10:53           606856176
+mainnet-01557-169c265e.era                         13-Oct-2025 10:53           652740240
+mainnet-01558-d2a546b1.era                         13-Oct-2025 10:53           629692121
+mainnet-01559-0297ac02.era                         13-Oct-2025 10:54           628998706
+mainnet-01560-60670459.era                         13-Oct-2025 10:54           651533419
+mainnet-01561-1c0d3738.era                         13-Oct-2025 10:54           633349455
+mainnet-01562-19638cd7.era                         20-Oct-2025 08:51           630590339
+mainnet-01563-83425e17.era                         20-Oct-2025 08:52           634230055
+mainnet-01564-be3fb544.era                         20-Oct-2025 08:53           633587281
+mainnet-01565-70a30ac2.era                         20-Oct-2025 08:54           657461325
+mainnet-01566-889bd753.era                         20-Oct-2025 08:55           642401052
+mainnet-01567-5dab1697.era                         20-Oct-2025 08:55           617456928
+mainnet-01568-151718cf.era                         22-Oct-2025 13:10           586683223
+mainnet-01569-a8d582c2.era                         22-Oct-2025 13:10           589552271
+mainnet-01570-52db1f8f.era                         27-Oct-2025 05:26           611340268
+mainnet-01571-871da102.era                         27-Oct-2025 05:26           594561772
+mainnet-01572-e04875db.era                         27-Oct-2025 05:27           579162292
+mainnet-01573-75eddab6.era                         27-Oct-2025 05:27           522297441
+mainnet-01574-f80ba4cd.era                         03-Nov-2025 03:34           618565234
+mainnet-01575-1fc558d2.era                         03-Nov-2025 03:34           620994360
+mainnet-01576-e32a9cb4.era                         03-Nov-2025 03:34           617223135
+mainnet-01577-9c577cdb.era                         03-Nov-2025 03:34           649445555
+mainnet-01578-5f70f2e2.era                         03-Nov-2025 03:35           656459253
+mainnet-01579-ab2ba57f.era                         03-Nov-2025 03:35           568585524
+mainnet-01580-bedd6a6e.era                         10-Nov-2025 01:14           604540958
+mainnet-01581-82073d28.era                         10-Nov-2025 01:14           669364186
+mainnet-01582-f32c6826.era                         10-Nov-2025 01:15           636571133
+mainnet-01583-cbb3b80b.era                         10-Nov-2025 01:15           625837882
+mainnet-01584-357790d1.era                         10-Nov-2025 01:15           615795824
+mainnet-01585-99fd7574.era                         10-Nov-2025 01:15           645204626
+mainnet-01586-3a3b5e27.era                         10-Nov-2025 01:16           617120440
+

+ diff --git a/crates/era-downloader/tests/res/mainnet-00000-5ec1ffb8.era1 b/crates/era-downloader/tests/res/era1-files/mainnet-00000-5ec1ffb8.era1 similarity index 100% rename from crates/era-downloader/tests/res/mainnet-00000-5ec1ffb8.era1 rename to crates/era-downloader/tests/res/era1-files/mainnet-00000-5ec1ffb8.era1 diff --git a/crates/era-downloader/tests/res/mainnet-00001-a5364e9a.era1 b/crates/era-downloader/tests/res/era1-files/mainnet-00001-a5364e9a.era1 similarity index 100% rename from crates/era-downloader/tests/res/mainnet-00001-a5364e9a.era1 rename to crates/era-downloader/tests/res/era1-files/mainnet-00001-a5364e9a.era1 diff --git a/crates/era-downloader/tests/res/ithaca.html b/crates/era-downloader/tests/res/era1-ithaca.html similarity index 100% rename from crates/era-downloader/tests/res/ithaca.html rename to crates/era-downloader/tests/res/era1-ithaca.html diff --git a/crates/era-downloader/tests/res/nimbus.html b/crates/era-downloader/tests/res/era1-nimbus.html similarity index 100% rename from crates/era-downloader/tests/res/nimbus.html rename to crates/era-downloader/tests/res/era1-nimbus.html diff --git a/crates/era-utils/src/export.rs b/crates/era-utils/src/export.rs index 6ccdba2426..db8538d3c4 100644 --- a/crates/era-utils/src/export.rs +++ b/crates/era-utils/src/export.rs @@ -6,13 +6,17 @@ use alloy_consensus::BlockHeader; use alloy_primitives::{BlockNumber, B256, U256}; use eyre::{eyre, Result}; use reth_era::{ - e2s_types::IndexEntry, - era1_file::Era1Writer, - era1_types::{BlockIndex, Era1Id}, - era_file_ops::{EraFileId, StreamWriter}, - execution_types::{ - Accumulator, BlockTuple, CompressedBody, CompressedHeader, CompressedReceipts, - TotalDifficulty, MAX_BLOCKS_PER_ERA1, + common::file_ops::{EraFileId, StreamWriter}, + e2s::types::IndexEntry, + era1::{ + file::Era1Writer, + types::{ + execution::{ + Accumulator, BlockTuple, CompressedBody, CompressedHeader, CompressedReceipts, + TotalDifficulty, MAX_BLOCKS_PER_ERA1, + }, + group::{BlockIndex, Era1Id}, + }, }, }; use reth_fs_util as fs; @@ -146,6 +150,12 @@ where let era1_id = Era1Id::new(&config.network, start_block, block_count as u32) .with_hash(historical_root); + let era1_id = if config.max_blocks_per_file == MAX_BLOCKS_PER_ERA1 as u64 { + era1_id + } else { + era1_id.with_era_count() + }; + debug!("Final file name {}", era1_id.to_file_name()); let file_path = config.dir.join(era1_id.to_file_name()); let file = std::fs::File::create(&file_path)?; @@ -215,12 +225,12 @@ where writer.write_accumulator(&accumulator)?; writer.write_block_index(&block_index)?; writer.flush()?; - created_files.push(file_path.clone()); info!( target: "era::history::export", "Wrote ERA1 file: {file_path:?} with {blocks_written} blocks" ); + created_files.push(file_path); } } @@ -306,7 +316,7 @@ where #[cfg(test)] mod tests { use crate::ExportConfig; - use reth_era::execution_types::MAX_BLOCKS_PER_ERA1; + use reth_era::era1::types::execution::MAX_BLOCKS_PER_ERA1; use tempfile::tempdir; #[test] diff --git a/crates/era-utils/src/history.rs b/crates/era-utils/src/history.rs index 58d5e383c3..c3e58a060e 100644 --- a/crates/era-utils/src/history.rs +++ b/crates/era-utils/src/history.rs @@ -9,11 +9,12 @@ use reth_db_api::{ RawKey, RawTable, RawValue, }; use reth_era::{ - e2s_types::E2sError, - era1_file::{BlockTupleIterator, Era1Reader}, - era_file_ops::StreamReader, - execution_types::BlockTuple, - DecodeCompressed, + common::{decode::DecodeCompressedRlp, file_ops::StreamReader}, + e2s::error::E2sError, + era1::{ + file::{BlockTupleIterator, Era1Reader}, + types::execution::BlockTuple, + }, }; use reth_era_downloader::EraMeta; use reth_etl::Collector; @@ -115,7 +116,7 @@ where /// these stages that this work has already been done. Otherwise, there might be some conflict with /// database integrity. pub fn save_stage_checkpoints

( - provider: &P, + provider: P, from: BlockNumber, to: BlockNumber, processed: u64, @@ -251,7 +252,7 @@ where /// Extracts block headers and bodies from `iter` and appends them using `writer` and `provider`. /// -/// Adds on to `total_difficulty` and collects hash to height using `hash_collector`. +/// Collects hash to height using `hash_collector`. /// /// Skips all blocks below the [`start_bound`] of `block_numbers` and stops when reaching past the /// [`end_bound`] or the end of the file. @@ -308,7 +309,7 @@ where writer.append_header(&header, &hash)?; // Write bodies to database. - provider.append_block_bodies(vec![(header.number(), Some(body))])?; + provider.append_block_bodies(vec![(header.number(), Some(&body))])?; hash_collector.insert(hash, number)?; } diff --git a/crates/era-utils/tests/it/genesis.rs b/crates/era-utils/tests/it/genesis.rs index 0c35c458aa..700fb1f006 100644 --- a/crates/era-utils/tests/it/genesis.rs +++ b/crates/era-utils/tests/it/genesis.rs @@ -24,7 +24,7 @@ fn test_export_with_genesis_only() { assert!(file_path.exists(), "Exported file should exist on disk"); let file_name = file_path.file_name().unwrap().to_str().unwrap(); assert!( - file_name.starts_with("mainnet-00000-00001-"), + file_name.starts_with("mainnet-00000-"), "File should have correct prefix with era format" ); assert!(file_name.ends_with(".era1"), "File should have correct extension"); diff --git a/crates/era-utils/tests/it/history.rs b/crates/era-utils/tests/it/history.rs index 8e720f1001..2075722398 100644 --- a/crates/era-utils/tests/it/history.rs +++ b/crates/era-utils/tests/it/history.rs @@ -1,7 +1,7 @@ use crate::{ClientWithFakeIndex, ITHACA_ERA_INDEX_URL}; use reqwest::{Client, Url}; use reth_db_common::init::init_genesis; -use reth_era::execution_types::MAX_BLOCKS_PER_ERA1; +use reth_era::era1::types::execution::MAX_BLOCKS_PER_ERA1; use reth_era_downloader::{EraClient, EraStream, EraStreamConfig}; use reth_era_utils::{export, import, ExportConfig}; use reth_etl::Collector; diff --git a/crates/era-utils/tests/it/main.rs b/crates/era-utils/tests/it/main.rs index 94805c5b35..2e2ec0b055 100644 --- a/crates/era-utils/tests/it/main.rs +++ b/crates/era-utils/tests/it/main.rs @@ -32,7 +32,7 @@ impl HttpClient for ClientWithFakeIndex { ) -> eyre::Result> + Send + Sync + Unpin> { let url = url.into_url()?; - match url.to_string().as_str() { + match url.as_str() { ITHACA_ERA_INDEX_URL => { // Create a static stream without boxing let stream = diff --git a/crates/era/Cargo.toml b/crates/era/Cargo.toml index 09d5b8b918..194ed9e412 100644 --- a/crates/era/Cargo.toml +++ b/crates/era/Cargo.toml @@ -18,8 +18,6 @@ alloy-eips.workspace = true alloy-primitives.workspace = true alloy-rlp.workspace = true -reth-ethereum-primitives.workspace = true - # compression and decompression snap.workspace = true @@ -32,6 +30,7 @@ eyre.workspace = true rand.workspace = true reqwest.workspace = true reth-era-downloader.workspace = true +reth-ethereum-primitives.workspace = true tempfile.workspace = true tokio = { workspace = true, features = ["sync", "macros", "time", "rt-multi-thread"] } test-case.workspace = true diff --git a/crates/era/src/common/decode.rs b/crates/era/src/common/decode.rs new file mode 100644 index 0000000000..17d27557c9 --- /dev/null +++ b/crates/era/src/common/decode.rs @@ -0,0 +1,10 @@ +//! Compressed data decoding utilities. + +use crate::e2s::error::E2sError; +use alloy_rlp::Decodable; + +/// Extension trait for generic decoding from compressed data +pub trait DecodeCompressedRlp { + /// Decompress and decode the data into the given type + fn decode(&self) -> Result; +} diff --git a/crates/era/src/common/file_ops.rs b/crates/era/src/common/file_ops.rs new file mode 100644 index 0000000000..3938f9ebe8 --- /dev/null +++ b/crates/era/src/common/file_ops.rs @@ -0,0 +1,245 @@ +//! Era file format traits and I/O operations. + +use crate::e2s::{error::E2sError, types::Version}; +use std::{ + fs::File, + io::{Read, Seek, Write}, + path::Path, +}; + +/// Represents era file with generic content and identifier types +pub trait EraFileFormat: Sized { + /// Content group type + type EraGroup; + + /// The identifier type + type Id: EraFileId; + + /// Get the version + fn version(&self) -> &Version; + + /// Get the content group + fn group(&self) -> &Self::EraGroup; + + /// Get the file identifier + fn id(&self) -> &Self::Id; + + /// Create a new instance + fn new(group: Self::EraGroup, id: Self::Id) -> Self; +} + +/// Era file identifiers +pub trait EraFileId: Clone { + /// File type for this identifier + const FILE_TYPE: EraFileType; + + /// Number of items, slots for `era`, blocks for `era1`, per era + const ITEMS_PER_ERA: u64; + + /// Get the network name + fn network_name(&self) -> &str; + + /// Get the starting number (block or slot) + fn start_number(&self) -> u64; + + /// Get the count of items + fn count(&self) -> u32; + + /// Get the optional hash identifier + fn hash(&self) -> Option<[u8; 4]>; + + /// Whether to include era count in filename + fn include_era_count(&self) -> bool; + + /// Calculate era number + fn era_number(&self) -> u64 { + self.start_number() / Self::ITEMS_PER_ERA + } + + /// Calculate the number of eras spanned per file. + /// + /// If the user can decide how many slots/blocks per era file there are, we need to calculate + /// it. Most of the time it should be 1, but it can never be more than 2 eras per file + /// as there is a maximum of 8192 slots/blocks per era file. + fn era_count(&self) -> u64 { + if self.count() == 0 { + return 0; + } + let first_era = self.era_number(); + let last_number = self.start_number() + self.count() as u64 - 1; + let last_era = last_number / Self::ITEMS_PER_ERA; + last_era - first_era + 1 + } + + /// Convert to standardized file name. + fn to_file_name(&self) -> String { + Self::FILE_TYPE.format_filename( + self.network_name(), + self.era_number(), + self.hash(), + self.include_era_count(), + self.era_count(), + ) + } +} + +/// [`StreamReader`] for reading era-format files +pub trait StreamReader: Sized { + /// The file type the reader produces + type File: EraFileFormat; + + /// The iterator type for streaming data + type Iterator; + + /// Create a new reader + fn new(reader: R) -> Self; + + /// Read and parse the complete file + fn read(self, network_name: String) -> Result; + + /// Get an iterator for streaming processing + fn iter(self) -> Self::Iterator; +} + +/// [`FileReader`] provides reading era file operations for era files +pub trait FileReader: StreamReader { + /// Opens and reads an era file from the given path + fn open>( + path: P, + network_name: impl Into, + ) -> Result { + let file = File::open(path).map_err(E2sError::Io)?; + let reader = Self::new(file); + reader.read(network_name.into()) + } +} + +/// [`StreamWriter`] for writing era-format files +pub trait StreamWriter: Sized { + /// The file type this writer handles + type File: EraFileFormat; + + /// Create a new writer + fn new(writer: W) -> Self; + + /// Writer version + fn write_version(&mut self) -> Result<(), E2sError>; + + /// Write a complete era file + fn write_file(&mut self, file: &Self::File) -> Result<(), E2sError>; + + /// Flush any buffered data + fn flush(&mut self) -> Result<(), E2sError>; +} + +/// [`StreamWriter`] provides writing file operations for era files +pub trait FileWriter { + /// Era file type the writer handles + type File: EraFileFormat; + + /// Creates a new file at the specified path and writes the era file to it + fn create>(path: P, file: &Self::File) -> Result<(), E2sError>; + + /// Creates a file in the directory using standardized era naming + fn create_with_id>(directory: P, file: &Self::File) -> Result<(), E2sError>; +} + +impl> FileWriter for T { + type File = T::File; + + /// Creates a new file at the specified path and writes the era file to it + fn create>(path: P, file: &Self::File) -> Result<(), E2sError> { + let file_handle = File::create(path).map_err(E2sError::Io)?; + let mut writer = Self::new(file_handle); + writer.write_file(file)?; + Ok(()) + } + + /// Creates a file in the directory using standardized era naming + fn create_with_id>(directory: P, file: &Self::File) -> Result<(), E2sError> { + let filename = file.id().to_file_name(); + let path = directory.as_ref().join(filename); + Self::create(path, file) + } +} + +/// Era file type identifier +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +pub enum EraFileType { + /// Consensus layer ERA file, `.era` + /// Contains beacon blocks and states + Era, + /// Execution layer ERA1 file, `.era1` + /// Contains execution blocks pre-merge + Era1, +} + +impl EraFileType { + /// Get the file extension for this type, dot included + pub const fn extension(&self) -> &'static str { + match self { + Self::Era => ".era", + Self::Era1 => ".era1", + } + } + + /// Detect file type from a filename + pub fn from_filename(filename: &str) -> Option { + if filename.ends_with(".era") { + Some(Self::Era) + } else if filename.ends_with(".era1") { + Some(Self::Era1) + } else { + None + } + } + + /// Generate era file name. + /// + /// Standard format: `--.` + /// See also + /// + /// With era count (for custom exports): + /// `---.` + pub fn format_filename( + &self, + network_name: &str, + era_number: u64, + hash: Option<[u8; 4]>, + include_era_count: bool, + era_count: u64, + ) -> String { + let hash = format_hash(hash); + + if include_era_count { + format!( + "{}-{:05}-{:05}-{}{}", + network_name, + era_number, + era_count, + hash, + self.extension() + ) + } else { + format!("{}-{:05}-{}{}", network_name, era_number, hash, self.extension()) + } + } + + /// Detect file type from URL + /// By default, it assumes `Era` type + pub fn from_url(url: &str) -> Self { + if url.contains("era1") { + Self::Era1 + } else { + Self::Era + } + } +} + +/// Format hash as hex string, or placeholder if none +pub fn format_hash(hash: Option<[u8; 4]>) -> String { + match hash { + Some(h) => format!("{:02x}{:02x}{:02x}{:02x}", h[0], h[1], h[2], h[3]), + None => "00000000".to_string(), + } +} diff --git a/crates/era/src/common/mod.rs b/crates/era/src/common/mod.rs new file mode 100644 index 0000000000..3ad45dfdd8 --- /dev/null +++ b/crates/era/src/common/mod.rs @@ -0,0 +1,4 @@ +//! Common utilities and shared functionality. + +pub mod decode; +pub mod file_ops; diff --git a/crates/era/src/e2s/error.rs b/crates/era/src/e2s/error.rs new file mode 100644 index 0000000000..ccfbe7296c --- /dev/null +++ b/crates/era/src/e2s/error.rs @@ -0,0 +1,32 @@ +//! Error handling for e2s files operations + +use std::io; +use thiserror::Error; + +/// Error types for e2s file operations +#[derive(Error, Debug)] +pub enum E2sError { + /// IO error during file operations + #[error("IO error: {0}")] + Io(#[from] io::Error), + + /// Error during SSZ encoding/decoding + #[error("SSZ error: {0}")] + Ssz(String), + + /// Reserved field in header not zero + #[error("Reserved field in header not zero")] + ReservedNotZero, + + /// Error during snappy compression + #[error("Snappy compression error: {0}")] + SnappyCompression(String), + + /// Error during snappy decompression + #[error("Snappy decompression error: {0}")] + SnappyDecompression(String), + + /// Error during RLP encoding/decoding + #[error("RLP error: {0}")] + Rlp(String), +} diff --git a/crates/era/src/e2s_file.rs b/crates/era/src/e2s/file.rs similarity index 99% rename from crates/era/src/e2s_file.rs rename to crates/era/src/e2s/file.rs index e1b6989a0f..9c48add603 100644 --- a/crates/era/src/e2s_file.rs +++ b/crates/era/src/e2s/file.rs @@ -2,7 +2,10 @@ //! //! See also -use crate::e2s_types::{E2sError, Entry, Version}; +use crate::e2s::{ + error::E2sError, + types::{Entry, Version}, +}; use std::io::{BufReader, BufWriter, Read, Seek, SeekFrom, Write}; /// A reader for `E2Store` files that wraps a [`BufReader`]. @@ -107,7 +110,7 @@ impl E2StoreWriter { #[cfg(test)] mod tests { use super::*; - use crate::e2s_types::{SLOT_INDEX, VERSION}; + use crate::e2s::types::{SLOT_INDEX, VERSION}; use std::io::Cursor; fn create_slot_index_data(starting_slot: u64, offsets: &[i64]) -> Vec { diff --git a/crates/era/src/e2s/mod.rs b/crates/era/src/e2s/mod.rs new file mode 100644 index 0000000000..d67190f475 --- /dev/null +++ b/crates/era/src/e2s/mod.rs @@ -0,0 +1,5 @@ +//! Core e2store primitives and file handling. + +pub mod error; +pub mod file; +pub mod types; diff --git a/crates/era/src/e2s_types.rs b/crates/era/src/e2s/types.rs similarity index 90% rename from crates/era/src/e2s_types.rs rename to crates/era/src/e2s/types.rs index f14bfe56e8..dd0e9485da 100644 --- a/crates/era/src/e2s_types.rs +++ b/crates/era/src/e2s/types.rs @@ -8,9 +8,9 @@ //! An [`Entry`] is a complete record in the file, consisting of both a [`Header`] and its //! associated data +use crate::e2s::error::E2sError; use ssz_derive::{Decode, Encode}; use std::io::{self, Read, Write}; -use thiserror::Error; /// [`Version`] record: ['e', '2'] pub const VERSION: [u8; 2] = [0x65, 0x32]; @@ -21,34 +21,6 @@ pub const EMPTY: [u8; 2] = [0x00, 0x00]; /// `SlotIndex` record: ['i', '2'] pub const SLOT_INDEX: [u8; 2] = [0x69, 0x32]; -/// Error types for e2s file operations -#[derive(Error, Debug)] -pub enum E2sError { - /// IO error during file operations - #[error("IO error: {0}")] - Io(#[from] io::Error), - - /// Error during SSZ encoding/decoding - #[error("SSZ error: {0}")] - Ssz(String), - - /// Reserved field in header not zero - #[error("Reserved field in header not zero")] - ReservedNotZero, - - /// Error during snappy compression - #[error("Snappy compression error: {0}")] - SnappyCompression(String), - - /// Error during snappy decompression - #[error("Snappy decompression error: {0}")] - SnappyDecompression(String), - - /// Error during RLP encoding/decoding - #[error("RLP error: {0}")] - Rlp(String), -} - /// Header for TLV records in e2store files #[derive(Debug, Clone, PartialEq, Eq, Encode, Decode)] pub struct Header { diff --git a/crates/era/src/era/file.rs b/crates/era/src/era/file.rs new file mode 100644 index 0000000000..1c1a20543f --- /dev/null +++ b/crates/era/src/era/file.rs @@ -0,0 +1,342 @@ +//! Represents a complete Era file +//! +//! The structure of an Era file follows the specification: +//! `Version | block* | era-state | other-entries* | slot-index(block)? | slot-index(state)` +//! +//! See also . + +use crate::{ + common::file_ops::{EraFileFormat, FileReader, StreamReader, StreamWriter}, + e2s::{ + error::E2sError, + file::{E2StoreReader, E2StoreWriter}, + types::{Entry, IndexEntry, Version, SLOT_INDEX}, + }, + era::types::{ + consensus::{ + CompressedBeaconState, CompressedSignedBeaconBlock, COMPRESSED_BEACON_STATE, + COMPRESSED_SIGNED_BEACON_BLOCK, + }, + group::{EraGroup, EraId, SlotIndex}, + }, +}; +use std::{ + fs::File, + io::{Read, Seek, Write}, +}; + +/// Era file interface +#[derive(Debug)] +pub struct EraFile { + /// Version record, must be the first record in the file + pub version: Version, + + /// Main content group of the Era file + pub group: EraGroup, + + /// File identifier + pub id: EraId, +} + +impl EraFileFormat for EraFile { + type EraGroup = EraGroup; + type Id = EraId; + + /// Create a new [`EraFile`] + fn new(group: EraGroup, id: EraId) -> Self { + Self { version: Version, group, id } + } + + fn version(&self) -> &Version { + &self.version + } + + fn group(&self) -> &Self::EraGroup { + &self.group + } + + fn id(&self) -> &Self::Id { + &self.id + } +} + +/// Reader for era files that builds on top of [`E2StoreReader`] +#[derive(Debug)] +pub struct EraReader { + reader: E2StoreReader, +} + +/// An iterator of [`BeaconBlockIterator`] streaming from [`E2StoreReader`]. +#[derive(Debug)] +pub struct BeaconBlockIterator { + reader: E2StoreReader, + state: Option, + other_entries: Vec, + block_slot_index: Option, + state_slot_index: Option, +} + +impl BeaconBlockIterator { + fn new(reader: E2StoreReader) -> Self { + Self { + reader, + state: None, + other_entries: Default::default(), + block_slot_index: None, + state_slot_index: None, + } + } +} + +impl Iterator for BeaconBlockIterator { + type Item = Result; + + fn next(&mut self) -> Option { + self.next_result().transpose() + } +} + +impl BeaconBlockIterator { + fn next_result(&mut self) -> Result, E2sError> { + loop { + let Some(entry) = self.reader.read_next_entry()? else { + return Ok(None); + }; + + match entry.entry_type { + COMPRESSED_SIGNED_BEACON_BLOCK => { + let block = CompressedSignedBeaconBlock::from_entry(&entry)?; + return Ok(Some(block)); + } + COMPRESSED_BEACON_STATE => { + if self.state.is_some() { + return Err(E2sError::Ssz("Multiple state entries found".to_string())); + } + self.state = Some(CompressedBeaconState::from_entry(&entry)?); + } + SLOT_INDEX => { + let slot_index = SlotIndex::from_entry(&entry)?; + // if we haven't seen the state yet, the slot index is for blocks, + // if we have seen the state, the slot index is for the state + if self.state.is_none() { + self.block_slot_index = Some(slot_index); + } else { + self.state_slot_index = Some(slot_index); + } + } + _ => { + self.other_entries.push(entry); + } + } + } + } +} + +impl StreamReader for EraReader { + type File = EraFile; + type Iterator = BeaconBlockIterator; + + /// Create a new [`EraReader`] + fn new(reader: R) -> Self { + Self { reader: E2StoreReader::new(reader) } + } + + /// Returns an iterator of [`BeaconBlockIterator`] streaming from `reader`. + fn iter(self) -> BeaconBlockIterator { + BeaconBlockIterator::new(self.reader) + } + + fn read(self, network_name: String) -> Result { + self.read_and_assemble(network_name) + } +} + +impl EraReader { + /// Reads and parses an era file from the underlying reader, assembling all components + /// into a complete [`EraFile`] with an [`EraId`] that includes the provided network name. + pub fn read_and_assemble(mut self, network_name: String) -> Result { + // Validate version entry + let _version_entry = match self.reader.read_version()? { + Some(entry) if entry.is_version() => entry, + Some(_) => return Err(E2sError::Ssz("First entry is not a Version entry".to_string())), + None => return Err(E2sError::Ssz("Empty Era file".to_string())), + }; + + let mut iter = self.iter(); + let blocks = (&mut iter).collect::, _>>()?; + + let BeaconBlockIterator { + state, other_entries, block_slot_index, state_slot_index, .. + } = iter; + + let state = + state.ok_or_else(|| E2sError::Ssz("Era file missing state entry".to_string()))?; + + let state_slot_index = state_slot_index + .ok_or_else(|| E2sError::Ssz("Era file missing state slot index".to_string()))?; + + // Create appropriate `EraGroup`, genesis vs non-genesis + let mut group = if let Some(block_index) = block_slot_index { + EraGroup::with_block_index(blocks, state, block_index, state_slot_index) + } else { + EraGroup::new(blocks, state, state_slot_index) + }; + + // Add other entries + for entry in other_entries { + group.add_entry(entry); + } + + let (start_slot, slot_count) = group.slot_range(); + + let id = EraId::new(network_name, start_slot, slot_count); + + Ok(EraFile::new(group, id)) + } +} + +impl FileReader for EraReader {} + +/// Writer for Era files that builds on top of [`E2StoreWriter`] +#[derive(Debug)] +pub struct EraWriter { + writer: E2StoreWriter, + has_written_version: bool, + has_written_state: bool, + has_written_block_slot_index: bool, + has_written_state_slot_index: bool, +} + +impl StreamWriter for EraWriter { + type File = EraFile; + + /// Create a new [`EraWriter`] + fn new(writer: W) -> Self { + Self { + writer: E2StoreWriter::new(writer), + has_written_version: false, + has_written_state: false, + has_written_block_slot_index: false, + has_written_state_slot_index: false, + } + } + + /// Write the version entry + fn write_version(&mut self) -> Result<(), E2sError> { + if self.has_written_version { + return Ok(()); + } + + self.writer.write_version()?; + self.has_written_version = true; + Ok(()) + } + + fn write_file(&mut self, file: &Self::File) -> Result<(), E2sError> { + // Write version + self.write_version()?; + + // Write all blocks + for block in &file.group.blocks { + self.write_beacon_block(block)?; + } + + // Write state + self.write_beacon_state(&file.group.era_state)?; + + // Write other entries + for entry in &file.group.other_entries { + self.writer.write_entry(entry)?; + } + + // Write slot index + if let Some(ref block_index) = file.group.slot_index { + self.write_block_slot_index(block_index)?; + } + + // Write state index + self.write_state_slot_index(&file.group.state_slot_index)?; + + self.writer.flush()?; + Ok(()) + } + + /// Flush any buffered data to the underlying writer + fn flush(&mut self) -> Result<(), E2sError> { + self.writer.flush() + } +} + +impl EraWriter { + /// Write beacon block + pub fn write_beacon_block( + &mut self, + block: &CompressedSignedBeaconBlock, + ) -> Result<(), E2sError> { + self.ensure_version_written()?; + + // Ensure blocks are written before state/indices + if self.has_written_state || + self.has_written_block_slot_index || + self.has_written_state_slot_index + { + return Err(E2sError::Ssz("Cannot write blocks after state or indices".to_string())); + } + + let entry = block.to_entry(); + self.writer.write_entry(&entry)?; + Ok(()) + } + + // Write beacon state + fn write_beacon_state(&mut self, state: &CompressedBeaconState) -> Result<(), E2sError> { + self.ensure_version_written()?; + + if self.has_written_state { + return Err(E2sError::Ssz("State already written".to_string())); + } + + let entry = state.to_entry(); + self.writer.write_entry(&entry)?; + self.has_written_state = true; + Ok(()) + } + + /// Write the block slot index + pub fn write_block_slot_index(&mut self, slot_index: &SlotIndex) -> Result<(), E2sError> { + self.ensure_version_written()?; + + if self.has_written_block_slot_index { + return Err(E2sError::Ssz("Block slot index already written".to_string())); + } + + let entry = slot_index.to_entry(); + self.writer.write_entry(&entry)?; + self.has_written_block_slot_index = true; + + Ok(()) + } + + /// Write the state slot index + pub fn write_state_slot_index(&mut self, slot_index: &SlotIndex) -> Result<(), E2sError> { + self.ensure_version_written()?; + + if self.has_written_state_slot_index { + return Err(E2sError::Ssz("State slot index already written".to_string())); + } + + let entry = slot_index.to_entry(); + self.writer.write_entry(&entry)?; + self.has_written_state_slot_index = true; + + Ok(()) + } + + /// Helper to ensure version is written before any data + fn ensure_version_written(&mut self) -> Result<(), E2sError> { + if !self.has_written_version { + self.write_version()?; + } + Ok(()) + } +} diff --git a/crates/era/src/era/mod.rs b/crates/era/src/era/mod.rs new file mode 100644 index 0000000000..864f2ded31 --- /dev/null +++ b/crates/era/src/era/mod.rs @@ -0,0 +1,4 @@ +//! Core era primitives. + +pub mod file; +pub mod types; diff --git a/crates/era/src/consensus_types.rs b/crates/era/src/era/types/consensus.rs similarity index 73% rename from crates/era/src/consensus_types.rs rename to crates/era/src/era/types/consensus.rs index cdcc77ce57..8b138c286b 100644 --- a/crates/era/src/consensus_types.rs +++ b/crates/era/src/era/types/consensus.rs @@ -1,11 +1,67 @@ //! Consensus types for Era post-merge history files +//! +//! # Decoding +//! +//! This crate only handles compression/decompression. +//! To decode the SSZ data into concrete beacon types, use the [Lighthouse `types`](https://github.com/sigp/lighthouse/tree/stable/consensus/types) +//! crate or another SSZ-compatible library. +//! +//! # Examples +//! +//! ## Decoding a [`CompressedBeaconState`] +//! +//! ```ignore +//! use types::{BeaconState, ChainSpec, MainnetEthSpec}; +//! use reth_era::era::types::consensus::CompressedBeaconState; +//! +//! fn decode_state( +//! compressed_state: &CompressedBeaconState, +//! ) -> Result<(), Box> { +//! let spec = ChainSpec::mainnet(); +//! +//! // Decompress to get SSZ bytes +//! let ssz_bytes = compressed_state.decompress()?; +//! +//! // Decode with fork-aware method, chainSpec determines fork from slot in SSZ +//! let state = BeaconState::::from_ssz_bytes(&ssz_bytes, &spec) +//! .map_err(|e| format!("{:?}", e))?; +//! +//! println!("State slot: {}", state.slot()); +//! println!("Fork: {:?}", state.fork_name_unchecked()); +//! println!("Validators: {}", state.validators().len()); +//! println!("Finalized checkpoint: {:?}", state.finalized_checkpoint()); +//! Ok(()) +//! } +//! ``` +//! +//! ## Decoding a [`CompressedSignedBeaconBlock`] +//! +//! ```ignore +//! use consensus_types::{ForkName, ForkVersionDecode, MainnetEthSpec, SignedBeaconBlock}; +//! use reth_era::era::types::consensus::CompressedSignedBeaconBlock; +//! +//! // Decode using fork-aware decoding, fork must be known beforehand +//! fn decode_block( +//! compressed: &CompressedSignedBeaconBlock, +//! fork: ForkName, +//! ) -> Result<(), Box> { +//! // Decompress to get SSZ bytes +//! let ssz_bytes = compressed.decompress()?; +//! +//! let block = SignedBeaconBlock::::from_ssz_bytes_by_fork(&ssz_bytes, fork) +//! .map_err(|e| format!("{:?}", e))?; +//! +//! println!("Block slot: {}", block.message().slot()); +//! println!("Proposer index: {}", block.message().proposer_index()); +//! println!("Parent root: {:?}", block.message().parent_root()); +//! println!("State root: {:?}", block.message().state_root()); +//! +//! Ok(()) +//! } +//! ``` -use crate::{ - e2s_types::{E2sError, Entry}, - DecodeCompressedSsz, -}; +use crate::e2s::{error::E2sError, types::Entry}; use snap::{read::FrameDecoder, write::FrameEncoder}; -use ssz::Decode; use std::io::{Read, Write}; /// `CompressedSignedBeaconBlock` record type: [0x01, 0x00] @@ -76,20 +132,6 @@ impl CompressedSignedBeaconBlock { Ok(Self { data: entry.data.clone() }) } - - /// Decode the compressed signed beacon block into ssz bytes - pub fn decode_to_ssz(&self) -> Result, E2sError> { - self.decompress() - } -} - -impl DecodeCompressedSsz for CompressedSignedBeaconBlock { - fn decode(&self) -> Result { - let ssz_bytes = self.decompress()?; - T::from_ssz_bytes(&ssz_bytes).map_err(|e| { - E2sError::Ssz(format!("Failed to decode SSZ data into target type: {e:?}")) - }) - } } /// Compressed beacon state @@ -154,20 +196,6 @@ impl CompressedBeaconState { Ok(Self { data: entry.data.clone() }) } - - /// Decode the compressed beacon state into ssz bytes - pub fn decode_to_ssz(&self) -> Result, E2sError> { - self.decompress() - } -} - -impl DecodeCompressedSsz for CompressedBeaconState { - fn decode(&self) -> Result { - let ssz_bytes = self.decompress()?; - T::from_ssz_bytes(&ssz_bytes).map_err(|e| { - E2sError::Ssz(format!("Failed to decode SSZ data into target type: {e:?}")) - }) - } } #[cfg(test)] @@ -203,7 +231,7 @@ mod tests { assert_eq!(entry.entry_type, COMPRESSED_SIGNED_BEACON_BLOCK); let recovered = CompressedSignedBeaconBlock::from_entry(&entry).unwrap(); - let recovered_ssz = recovered.decode_to_ssz().unwrap(); + let recovered_ssz = recovered.decompress().unwrap(); assert_eq!(recovered_ssz, ssz_data); } @@ -217,7 +245,7 @@ mod tests { assert_eq!(entry.entry_type, COMPRESSED_BEACON_STATE); let recovered = CompressedBeaconState::from_entry(&entry).unwrap(); - let recovered_ssz = recovered.decode_to_ssz().unwrap(); + let recovered_ssz = recovered.decompress().unwrap(); assert_eq!(recovered_ssz, ssz_data); } diff --git a/crates/era/src/era_types.rs b/crates/era/src/era/types/group.rs similarity index 69% rename from crates/era/src/era_types.rs rename to crates/era/src/era/types/group.rs index a50b6f1928..2536c0394c 100644 --- a/crates/era/src/era_types.rs +++ b/crates/era/src/era/types/group.rs @@ -1,12 +1,16 @@ -//! Era types for `.era` files +//! Era types for `.era` file content //! //! See also use crate::{ - consensus_types::{CompressedBeaconState, CompressedSignedBeaconBlock}, - e2s_types::{Entry, IndexEntry, SLOT_INDEX}, + common::file_ops::{EraFileId, EraFileType}, + e2s::types::{Entry, IndexEntry, SLOT_INDEX}, + era::types::consensus::{CompressedBeaconState, CompressedSignedBeaconBlock}, }; +/// Number of slots per historical root in ERA files +pub const SLOTS_PER_HISTORICAL_ROOT: u64 = 8192; + /// Era file content group /// /// Format: `Version | block* | era-state | other-entries* | slot-index(block)? | slot-index(state)` @@ -64,6 +68,28 @@ impl EraGroup { pub fn add_entry(&mut self, entry: Entry) { self.other_entries.push(entry); } + + /// Get the starting slot and slot count. + pub const fn slot_range(&self) -> (u64, u32) { + if let Some(ref block_index) = self.slot_index { + // Non-genesis era: use block slot index + (block_index.starting_slot, block_index.slot_count() as u32) + } else { + // Genesis era: use state slot index, it should be slot 0 + // Genesis has only the genesis state, no blocks + (self.state_slot_index.starting_slot, 0) + } + } + + /// Get the starting slot number + pub const fn starting_slot(&self) -> u64 { + self.slot_range().0 + } + + /// Get the number of slots + pub const fn slot_count(&self) -> u32 { + self.slot_range().1 + } } /// [`SlotIndex`] records store offsets to data at specific slots @@ -122,11 +148,83 @@ impl IndexEntry for SlotIndex { } } +/// Era file identifier +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct EraId { + /// Network configuration name + pub network_name: String, + + /// First slot number in file + pub start_slot: u64, + + /// Number of slots in the file + pub slot_count: u32, + + /// Optional hash identifier for this file + /// First 4 bytes of the last historical root in the last state in the era file + pub hash: Option<[u8; 4]>, + + /// Whether to include era count in filename + /// It is used for custom exports when we don't use the max number of items per file + include_era_count: bool, +} + +impl EraId { + /// Create a new [`EraId`] + pub fn new(network_name: impl Into, start_slot: u64, slot_count: u32) -> Self { + Self { + network_name: network_name.into(), + start_slot, + slot_count, + hash: None, + include_era_count: false, + } + } + + /// Add a hash identifier to [`EraId`] + pub const fn with_hash(mut self, hash: [u8; 4]) -> Self { + self.hash = Some(hash); + self + } + + /// Include era count in filename, for custom slot-per-file exports + pub const fn with_era_count(mut self) -> Self { + self.include_era_count = true; + self + } +} + +impl EraFileId for EraId { + const FILE_TYPE: EraFileType = EraFileType::Era; + + const ITEMS_PER_ERA: u64 = SLOTS_PER_HISTORICAL_ROOT; + + fn network_name(&self) -> &str { + &self.network_name + } + + fn start_number(&self) -> u64 { + self.start_slot + } + + fn count(&self) -> u32 { + self.slot_count + } + + fn hash(&self) -> Option<[u8; 4]> { + self.hash + } + + fn include_era_count(&self) -> bool { + self.include_era_count + } +} + #[cfg(test)] mod tests { use super::*; use crate::{ - e2s_types::{Entry, IndexEntry}, + e2s::types::{Entry, IndexEntry}, test_utils::{create_beacon_block, create_beacon_state}, }; @@ -286,4 +384,40 @@ mod tests { let parsed_offset = index.offsets[0]; assert_eq!(parsed_offset, -1024); } + + #[test_case::test_case( + EraId::new("mainnet", 0, 8192).with_hash([0x4b, 0x36, 0x3d, 0xb9]), + "mainnet-00000-4b363db9.era"; + "Mainnet era 0" + )] + #[test_case::test_case( + EraId::new("mainnet", 8192, 8192).with_hash([0x40, 0xcf, 0x2f, 0x3c]), + "mainnet-00001-40cf2f3c.era"; + "Mainnet era 1" + )] + #[test_case::test_case( + EraId::new("mainnet", 0, 8192), + "mainnet-00000-00000000.era"; + "Without hash" + )] + fn test_era_id_file_naming(id: EraId, expected_file_name: &str) { + let actual_file_name = id.to_file_name(); + assert_eq!(actual_file_name, expected_file_name); + } + + // File naming with era-count, for custom exports + #[test_case::test_case( + EraId::new("mainnet", 0, 8192).with_hash([0x4b, 0x36, 0x3d, 0xb9]).with_era_count(), + "mainnet-00000-00001-4b363db9.era"; + "Mainnet era 0 with count" + )] + #[test_case::test_case( + EraId::new("mainnet", 8000, 500).with_hash([0xab, 0xcd, 0xef, 0x12]).with_era_count(), + "mainnet-00000-00002-abcdef12.era"; + "Spanning two eras with count" + )] + fn test_era_id_file_naming_with_era_count(id: EraId, expected_file_name: &str) { + let actual_file_name = id.to_file_name(); + assert_eq!(actual_file_name, expected_file_name); + } } diff --git a/crates/era/src/era/types/mod.rs b/crates/era/src/era/types/mod.rs new file mode 100644 index 0000000000..cf91adca54 --- /dev/null +++ b/crates/era/src/era/types/mod.rs @@ -0,0 +1,6 @@ +//! Era types primitives. +//! +//! See also + +pub mod consensus; +pub mod group; diff --git a/crates/era/src/era1_file.rs b/crates/era/src/era1/file.rs similarity index 94% rename from crates/era/src/era1_file.rs rename to crates/era/src/era1/file.rs index dc34ddef42..3f230e8ea6 100644 --- a/crates/era/src/era1_file.rs +++ b/crates/era/src/era1/file.rs @@ -6,13 +6,19 @@ //! See also . use crate::{ - e2s_file::{E2StoreReader, E2StoreWriter}, - e2s_types::{E2sError, Entry, IndexEntry, Version}, - era1_types::{BlockIndex, Era1Group, Era1Id, BLOCK_INDEX}, - era_file_ops::{EraFileFormat, FileReader, StreamReader, StreamWriter}, - execution_types::{ - self, Accumulator, BlockTuple, CompressedBody, CompressedHeader, CompressedReceipts, - TotalDifficulty, MAX_BLOCKS_PER_ERA1, + common::file_ops::{EraFileFormat, FileReader, StreamReader, StreamWriter}, + e2s::{ + error::E2sError, + file::{E2StoreReader, E2StoreWriter}, + types::{Entry, IndexEntry, Version}, + }, + era1::types::{ + execution::{ + Accumulator, BlockTuple, CompressedBody, CompressedHeader, CompressedReceipts, + TotalDifficulty, ACCUMULATOR, COMPRESSED_BODY, COMPRESSED_HEADER, COMPRESSED_RECEIPTS, + MAX_BLOCKS_PER_ERA1, TOTAL_DIFFICULTY, + }, + group::{BlockIndex, Era1Group, Era1Id, BLOCK_INDEX}, }, }; use alloy_primitives::BlockNumber; @@ -127,19 +133,19 @@ impl BlockTupleIterator { }; match entry.entry_type { - execution_types::COMPRESSED_HEADER => { + COMPRESSED_HEADER => { self.headers.push_back(CompressedHeader::from_entry(&entry)?); } - execution_types::COMPRESSED_BODY => { + COMPRESSED_BODY => { self.bodies.push_back(CompressedBody::from_entry(&entry)?); } - execution_types::COMPRESSED_RECEIPTS => { + COMPRESSED_RECEIPTS => { self.receipts.push_back(CompressedReceipts::from_entry(&entry)?); } - execution_types::TOTAL_DIFFICULTY => { + TOTAL_DIFFICULTY => { self.difficulties.push_back(TotalDifficulty::from_entry(&entry)?); } - execution_types::ACCUMULATOR => { + ACCUMULATOR => { if self.accumulator.is_some() { return Err(E2sError::Ssz("Multiple accumulator entries found".to_string())); } @@ -327,10 +333,7 @@ impl StreamWriter for Era1Writer { impl Era1Writer { /// Write a single block tuple - pub fn write_block( - &mut self, - block_tuple: &crate::execution_types::BlockTuple, - ) -> Result<(), E2sError> { + pub fn write_block(&mut self, block_tuple: &BlockTuple) -> Result<(), E2sError> { if !self.has_written_version { self.write_version()?; } @@ -403,13 +406,7 @@ impl Era1Writer { #[cfg(test)] mod tests { use super::*; - use crate::{ - era_file_ops::FileWriter, - execution_types::{ - Accumulator, BlockTuple, CompressedBody, CompressedHeader, CompressedReceipts, - TotalDifficulty, - }, - }; + use crate::common::file_ops::FileWriter; use alloy_primitives::{B256, U256}; use std::io::Cursor; use tempfile::tempdir; diff --git a/crates/era/src/era1/mod.rs b/crates/era/src/era1/mod.rs new file mode 100644 index 0000000000..de0803e721 --- /dev/null +++ b/crates/era/src/era1/mod.rs @@ -0,0 +1,4 @@ +//! Core era1 primitives and file handling. + +pub mod file; +pub mod types; diff --git a/crates/era/src/execution_types.rs b/crates/era/src/era1/types/execution.rs similarity index 94% rename from crates/era/src/execution_types.rs rename to crates/era/src/era1/types/execution.rs index 6feb2873fb..86849419e1 100644 --- a/crates/era/src/execution_types.rs +++ b/crates/era/src/era1/types/execution.rs @@ -16,7 +16,7 @@ //! //! ```rust //! use alloy_consensus::Header; -//! use reth_era::{execution_types::CompressedHeader, DecodeCompressed}; +//! use reth_era::{common::decode::DecodeCompressedRlp, era1::types::execution::CompressedHeader}; //! //! let header = Header { number: 100, ..Default::default() }; //! // Compress the header: rlp encoding and Snappy compression @@ -24,7 +24,7 @@ //! // Decompressed and decode typed compressed header //! let decoded_header: Header = compressed.decode_header()?; //! assert_eq!(decoded_header.number, 100); -//! # Ok::<(), reth_era::e2s_types::E2sError>(()) +//! # Ok::<(), reth_era::e2s::error::E2sError>(()) //! ``` //! //! ## [`CompressedBody`] @@ -32,7 +32,7 @@ //! ```rust //! use alloy_consensus::{BlockBody, Header}; //! use alloy_primitives::Bytes; -//! use reth_era::{execution_types::CompressedBody, DecodeCompressed}; +//! use reth_era::{common::decode::DecodeCompressedRlp, era1::types::execution::CompressedBody}; //! use reth_ethereum_primitives::TransactionSigned; //! //! let body: BlockBody = BlockBody { @@ -46,34 +46,32 @@ //! let decoded_body: alloy_consensus::BlockBody = //! compressed_body.decode()?; //! assert_eq!(decoded_body.transactions.len(), 1); -//! # Ok::<(), reth_era::e2s_types::E2sError>(()) +//! # Ok::<(), reth_era::e2s::error::E2sError>(()) //! ``` //! //! ## [`CompressedReceipts`] //! //! ```rust -//! use alloy_consensus::ReceiptWithBloom; -//! use reth_era::{execution_types::CompressedReceipts, DecodeCompressed}; -//! use reth_ethereum_primitives::{Receipt, TxType}; -//! -//! let receipt = Receipt { -//! tx_type: TxType::Legacy, -//! success: true, -//! cumulative_gas_used: 21000, -//! logs: vec![], +//! use alloy_consensus::{Eip658Value, Receipt, ReceiptEnvelope, ReceiptWithBloom}; +//! use reth_era::{ +//! common::decode::DecodeCompressedRlp, era1::types::execution::CompressedReceipts, //! }; -//! let receipt_with_bloom = ReceiptWithBloom { receipt, logs_bloom: Default::default() }; +//! +//! let receipt = +//! Receipt { status: Eip658Value::Eip658(true), cumulative_gas_used: 21000, logs: vec![] }; +//! let receipt_with_bloom = ReceiptWithBloom::new(receipt, Default::default()); +//! let enveloped_receipt = ReceiptEnvelope::Legacy(receipt_with_bloom); //! // Compress the receipt: rlp encoding and snappy compression -//! let compressed_receipt_data = CompressedReceipts::from_encodable(&receipt_with_bloom)?; +//! let compressed_receipt_data = CompressedReceipts::from_encodable(&enveloped_receipt)?; //! // Get raw receipt by decoding and decompressing compressed and encoded receipt -//! let decompressed_receipt = compressed_receipt_data.decode::()?; -//! assert_eq!(decompressed_receipt.receipt.cumulative_gas_used, 21000); -//! # Ok::<(), reth_era::e2s_types::E2sError>(()) +//! let decompressed_receipt = compressed_receipt_data.decode::()?; +//! assert_eq!(decompressed_receipt.cumulative_gas_used(), 21000); +//! # Ok::<(), reth_era::e2s::error::E2sError>(()) //! `````` use crate::{ - e2s_types::{E2sError, Entry}, - DecodeCompressed, + common::decode::DecodeCompressedRlp, + e2s::{error::E2sError, types::Entry}, }; use alloy_consensus::{Block, BlockBody, Header}; use alloy_primitives::{B256, U256}; @@ -227,7 +225,7 @@ impl CompressedHeader { } } -impl DecodeCompressed for CompressedHeader { +impl DecodeCompressedRlp for CompressedHeader { fn decode(&self) -> Result { let decoder = SnappyRlpCodec::::new(); decoder.decode(&self.data) @@ -314,7 +312,7 @@ impl CompressedBody { } } -impl DecodeCompressed for CompressedBody { +impl DecodeCompressedRlp for CompressedBody { fn decode(&self) -> Result { let decoder = SnappyRlpCodec::::new(); decoder.decode(&self.data) @@ -405,7 +403,7 @@ impl CompressedReceipts { } } -impl DecodeCompressed for CompressedReceipts { +impl DecodeCompressedRlp for CompressedReceipts { fn decode(&self) -> Result { let decoder = SnappyRlpCodec::::new(); decoder.decode(&self.data) @@ -697,8 +695,8 @@ mod tests { .expect("Failed to compress receipt list"); // Decode the compressed receipts back - // Note: most likely the decoding for real era files will be done to reach - // `Vec`` + // Note: For real ERA1 files, use `Vec` before Era ~1520 or use + // `Vec` after this era let decoded_receipts: Vec = compressed_receipts.decode().expect("Failed to decode compressed receipt list"); diff --git a/crates/era/src/era1_types.rs b/crates/era/src/era1/types/group.rs similarity index 81% rename from crates/era/src/era1_types.rs rename to crates/era/src/era1/types/group.rs index ef239f3e16..4d3a049aa6 100644 --- a/crates/era/src/era1_types.rs +++ b/crates/era/src/era1/types/group.rs @@ -1,11 +1,11 @@ -//! Era1 types +//! Era1 group for era1 file content //! //! See also use crate::{ - e2s_types::{Entry, IndexEntry}, - era_file_ops::EraFileId, - execution_types::{Accumulator, BlockTuple, MAX_BLOCKS_PER_ERA1}, + common::file_ops::{EraFileId, EraFileType}, + e2s::types::{Entry, IndexEntry}, + era1::types::execution::{Accumulator, BlockTuple, MAX_BLOCKS_PER_ERA1}, }; use alloy_primitives::BlockNumber; @@ -105,6 +105,10 @@ pub struct Era1Id { /// Optional hash identifier for this file /// First 4 bytes of the last historical root in the last state in the era file pub hash: Option<[u8; 4]>, + + /// Whether to include era count in filename + /// It is used for custom exports when we don't use the max number of items per file + pub include_era_count: bool, } impl Era1Id { @@ -114,7 +118,13 @@ impl Era1Id { start_block: BlockNumber, block_count: u32, ) -> Self { - Self { network_name: network_name.into(), start_block, block_count, hash: None } + Self { + network_name: network_name.into(), + start_block, + block_count, + hash: None, + include_era_count: false, + } } /// Add a hash identifier to [`Era1Id`] @@ -123,21 +133,17 @@ impl Era1Id { self } - // Helper function to calculate the number of eras per era1 file, - // If the user can decide how many blocks per era1 file there are, we need to calculate it. - // Most of the time it should be 1, but it can never be more than 2 eras per file - // as there is a maximum of 8192 blocks per era1 file. - const fn calculate_era_count(&self, first_era: u64) -> u64 { - // Calculate the actual last block number in the range - let last_block = self.start_block + self.block_count as u64 - 1; - // Find which era the last block belongs to - let last_era = last_block / MAX_BLOCKS_PER_ERA1 as u64; - // Count how many eras we span - last_era - first_era + 1 + /// Include era count in filename, for custom block-per-file exports + pub const fn with_era_count(mut self) -> Self { + self.include_era_count = true; + self } } impl EraFileId for Era1Id { + const FILE_TYPE: EraFileType = EraFileType::Era1; + + const ITEMS_PER_ERA: u64 = MAX_BLOCKS_PER_ERA1 as u64; fn network_name(&self) -> &str { &self.network_name } @@ -149,24 +155,13 @@ impl EraFileId for Era1Id { fn count(&self) -> u32 { self.block_count } - /// Convert to file name following the era file naming: - /// `---.era(1)` - /// - /// See also - fn to_file_name(&self) -> String { - // Find which era the first block belongs to - let era_number = self.start_block / MAX_BLOCKS_PER_ERA1 as u64; - let era_count = self.calculate_era_count(era_number); - if let Some(hash) = self.hash { - format!( - "{}-{:05}-{:05}-{:02x}{:02x}{:02x}{:02x}.era1", - self.network_name, era_number, era_count, hash[0], hash[1], hash[2], hash[3] - ) - } else { - // era spec format with placeholder hash when no hash available - // Format: `---00000000.era1` - format!("{}-{:05}-{:05}-00000000.era1", self.network_name, era_number, era_count) - } + + fn hash(&self) -> Option<[u8; 4]> { + self.hash + } + + fn include_era_count(&self) -> bool { + self.include_era_count } } @@ -174,8 +169,8 @@ impl EraFileId for Era1Id { mod tests { use super::*; use crate::{ + common::decode::DecodeCompressedRlp, test_utils::{create_sample_block, create_test_block_with_compressed_data}, - DecodeCompressed, }; use alloy_consensus::ReceiptWithBloom; use alloy_primitives::{B256, U256}; @@ -314,35 +309,51 @@ mod tests { #[test_case::test_case( Era1Id::new("mainnet", 0, 8192).with_hash([0x5e, 0xc1, 0xff, 0xb8]), - "mainnet-00000-00001-5ec1ffb8.era1"; + "mainnet-00000-5ec1ffb8.era1"; "Mainnet era 0" )] #[test_case::test_case( Era1Id::new("mainnet", 8192, 8192).with_hash([0x5e, 0xcb, 0x9b, 0xf9]), - "mainnet-00001-00001-5ecb9bf9.era1"; + "mainnet-00001-5ecb9bf9.era1"; "Mainnet era 1" )] #[test_case::test_case( Era1Id::new("sepolia", 0, 8192).with_hash([0x90, 0x91, 0x84, 0x72]), - "sepolia-00000-00001-90918472.era1"; + "sepolia-00000-90918472.era1"; "Sepolia era 0" )] #[test_case::test_case( Era1Id::new("sepolia", 155648, 8192).with_hash([0xfa, 0x77, 0x00, 0x19]), - "sepolia-00019-00001-fa770019.era1"; + "sepolia-00019-fa770019.era1"; "Sepolia era 19" )] #[test_case::test_case( Era1Id::new("mainnet", 1000, 100), - "mainnet-00000-00001-00000000.era1"; + "mainnet-00000-00000000.era1"; "ID without hash" )] #[test_case::test_case( Era1Id::new("sepolia", 101130240, 8192).with_hash([0xab, 0xcd, 0xef, 0x12]), - "sepolia-12345-00001-abcdef12.era1"; + "sepolia-12345-abcdef12.era1"; "Large block number era 12345" )] - fn test_era1id_file_naming(id: Era1Id, expected_file_name: &str) { + fn test_era1_id_file_naming(id: Era1Id, expected_file_name: &str) { + let actual_file_name = id.to_file_name(); + assert_eq!(actual_file_name, expected_file_name); + } + + // File naming with era-count, for custom exports + #[test_case::test_case( + Era1Id::new("mainnet", 0, 8192).with_hash([0x5e, 0xc1, 0xff, 0xb8]).with_era_count(), + "mainnet-00000-00001-5ec1ffb8.era1"; + "Mainnet era 0 with count" + )] + #[test_case::test_case( + Era1Id::new("mainnet", 8000, 500).with_hash([0xab, 0xcd, 0xef, 0x12]).with_era_count(), + "mainnet-00000-00002-abcdef12.era1"; + "Spanning two eras with count" + )] + fn test_era1_id_file_naming_with_era_count(id: Era1Id, expected_file_name: &str) { let actual_file_name = id.to_file_name(); assert_eq!(actual_file_name, expected_file_name); } diff --git a/crates/era/src/era1/types/mod.rs b/crates/era/src/era1/types/mod.rs new file mode 100644 index 0000000000..44568ddf79 --- /dev/null +++ b/crates/era/src/era1/types/mod.rs @@ -0,0 +1,6 @@ +//! Era1 types +//! +//! See also + +pub mod execution; +pub mod group; diff --git a/crates/era/src/era_file_ops.rs b/crates/era/src/era_file_ops.rs deleted file mode 100644 index 469d6b7835..0000000000 --- a/crates/era/src/era_file_ops.rs +++ /dev/null @@ -1,124 +0,0 @@ -//! Represents reading and writing operations' era file - -use crate::{e2s_types::Version, E2sError}; -use std::{ - fs::File, - io::{Read, Seek, Write}, - path::Path, -}; - -/// Represents era file with generic content and identifier types -pub trait EraFileFormat: Sized { - /// Content group type - type EraGroup; - - /// The identifier type - type Id: EraFileId; - - /// Get the version - fn version(&self) -> &Version; - - /// Get the content group - fn group(&self) -> &Self::EraGroup; - - /// Get the file identifier - fn id(&self) -> &Self::Id; - - /// Create a new instance - fn new(group: Self::EraGroup, id: Self::Id) -> Self; -} - -/// Era file identifiers -pub trait EraFileId: Clone { - /// Convert to standardized file name - fn to_file_name(&self) -> String; - - /// Get the network name - fn network_name(&self) -> &str; - - /// Get the starting number (block or slot) - fn start_number(&self) -> u64; - - /// Get the count of items - fn count(&self) -> u32; -} - -/// [`StreamReader`] for reading era-format files -pub trait StreamReader: Sized { - /// The file type the reader produces - type File: EraFileFormat; - - /// The iterator type for streaming data - type Iterator; - - /// Create a new reader - fn new(reader: R) -> Self; - - /// Read and parse the complete file - fn read(self, network_name: String) -> Result; - - /// Get an iterator for streaming processing - fn iter(self) -> Self::Iterator; -} - -/// [`FileReader`] provides reading era file operations for era files -pub trait FileReader: StreamReader { - /// Opens and reads an era file from the given path - fn open>( - path: P, - network_name: impl Into, - ) -> Result { - let file = File::open(path).map_err(E2sError::Io)?; - let reader = Self::new(file); - reader.read(network_name.into()) - } -} - -/// [`StreamWriter`] for writing era-format files -pub trait StreamWriter: Sized { - /// The file type this writer handles - type File: EraFileFormat; - - /// Create a new writer - fn new(writer: W) -> Self; - - /// Writer version - fn write_version(&mut self) -> Result<(), E2sError>; - - /// Write a complete era file - fn write_file(&mut self, file: &Self::File) -> Result<(), E2sError>; - - /// Flush any buffered data - fn flush(&mut self) -> Result<(), E2sError>; -} - -/// [`StreamWriter`] provides writing file operations for era files -pub trait FileWriter { - /// Era file type the writer handles - type File: EraFileFormat; - - /// Creates a new file at the specified path and writes the era file to it - fn create>(path: P, file: &Self::File) -> Result<(), E2sError>; - - /// Creates a file in the directory using standardized era naming - fn create_with_id>(directory: P, file: &Self::File) -> Result<(), E2sError>; -} - -impl> FileWriter for T { - type File = T::File; - - /// Creates a new file at the specified path and writes the era file to it - fn create>(path: P, file: &Self::File) -> Result<(), E2sError> { - let file_handle = File::create(path).map_err(E2sError::Io)?; - let mut writer = Self::new(file_handle); - writer.write_file(file)?; - Ok(()) - } - - /// Creates a file in the directory using standardized era naming - fn create_with_id>(directory: P, file: &Self::File) -> Result<(), E2sError> { - let filename = file.id().to_file_name(); - let path = directory.as_ref().join(filename); - Self::create(path, file) - } -} diff --git a/crates/era/src/lib.rs b/crates/era/src/lib.rs index fd0596e9df..2e4b755d76 100644 --- a/crates/era/src/lib.rs +++ b/crates/era/src/lib.rs @@ -12,29 +12,10 @@ //! - Era format: //! - Era1 format: -pub mod consensus_types; -pub mod e2s_file; -pub mod e2s_types; -pub mod era1_file; -pub mod era1_types; -pub mod era_file_ops; -pub mod era_types; -pub mod execution_types; +pub mod common; +pub mod e2s; +pub mod era; +pub mod era1; + #[cfg(test)] pub(crate) mod test_utils; - -use crate::e2s_types::E2sError; -use alloy_rlp::Decodable; -use ssz::Decode; - -/// Extension trait for generic decoding from compressed data -pub trait DecodeCompressed { - /// Decompress and decode the data into the given type - fn decode(&self) -> Result; -} - -/// Extension trait for generic decoding from compressed ssz data -pub trait DecodeCompressedSsz { - /// Decompress and decode the SSZ data into the given type - fn decode(&self) -> Result; -} diff --git a/crates/era/src/test_utils.rs b/crates/era/src/test_utils.rs index 96b2545be1..f5aab53f74 100644 --- a/crates/era/src/test_utils.rs +++ b/crates/era/src/test_utils.rs @@ -1,8 +1,8 @@ //! Utilities helpers to create era data structures for testing purposes. use crate::{ - consensus_types::{CompressedBeaconState, CompressedSignedBeaconBlock}, - execution_types::{ + era::types::consensus::{CompressedBeaconState, CompressedSignedBeaconBlock}, + era1::types::execution::{ BlockTuple, CompressedBody, CompressedHeader, CompressedReceipts, TotalDifficulty, }, }; diff --git a/crates/era/tests/it/dd.rs b/crates/era/tests/it/dd.rs deleted file mode 100644 index 769a398d6c..0000000000 --- a/crates/era/tests/it/dd.rs +++ /dev/null @@ -1,149 +0,0 @@ -//! Simple decoding and decompressing tests -//! for mainnet era1 files - -use alloy_consensus::{BlockBody, Header}; -use alloy_primitives::U256; -use reth_era::{ - e2s_types::IndexEntry, - era1_file::{Era1Reader, Era1Writer}, - era_file_ops::{StreamReader, StreamWriter}, - execution_types::CompressedBody, -}; -use reth_ethereum_primitives::TransactionSigned; -use std::io::Cursor; - -use crate::{open_test_file, Era1TestDownloader, ERA1_MAINNET_FILES_NAMES, MAINNET}; - -#[tokio::test(flavor = "multi_thread")] -#[ignore = "download intensive"] -async fn test_mainnet_era1_only_file_decompression_and_decoding() -> eyre::Result<()> { - let downloader = Era1TestDownloader::new().await.expect("Failed to create downloader"); - - for &filename in &ERA1_MAINNET_FILES_NAMES { - println!("\nTesting file: {filename}"); - let file = open_test_file(filename, &downloader, MAINNET).await?; - - // Test block decompression across different positions in the file - let test_block_indices = [ - 0, // First block - file.group.blocks.len() / 2, // Middle block - file.group.blocks.len() - 1, // Last block - ]; - - for &block_idx in &test_block_indices { - let block = &file.group.blocks[block_idx]; - let block_number = file.group.block_index.starting_number() + block_idx as u64; - - println!( - "\n Testing block {}, compressed body size: {} bytes", - block_number, - block.body.data.len() - ); - - // Test header decompression and decoding - let header_data = block.header.decompress()?; - assert!( - !header_data.is_empty(), - "Block {block_number} header decompression should produce non-empty data" - ); - - let header = block.header.decode_header()?; - assert_eq!( - header.number, block_number, - "Decoded header should have correct block number" - ); - println!("Header decompression and decoding successful"); - - // Test body decompression - let body_data = block.body.decompress()?; - assert!( - !body_data.is_empty(), - "Block {block_number} body decompression should produce non-empty data" - ); - println!("Body decompression successful ({} bytes)", body_data.len()); - - let decoded_body: BlockBody = - CompressedBody::decode_body_from_decompressed::( - &body_data, - ) - .expect("Failed to decode body"); - - println!( - "Body decoding successful: {} transactions, {} ommers, withdrawals: {}", - decoded_body.transactions.len(), - decoded_body.ommers.len(), - decoded_body.withdrawals.is_some() - ); - - // Test receipts decompression - let receipts_data = block.receipts.decompress()?; - assert!( - !receipts_data.is_empty(), - "Block {block_number} receipts decompression should produce non-empty data" - ); - println!("Receipts decompression successful ({} bytes)", receipts_data.len()); - - assert!( - block.total_difficulty.value > U256::ZERO, - "Block {block_number} should have non-zero difficulty" - ); - println!("Total difficulty verified: {}", block.total_difficulty.value); - } - - // Test round-trip serialization - println!("\n Testing data preservation roundtrip..."); - let mut buffer = Vec::new(); - { - let mut writer = Era1Writer::new(&mut buffer); - writer.write_file(&file)?; - } - - // Read back from buffer - let reader = Era1Reader::new(Cursor::new(&buffer)); - let read_back_file = reader.read(file.id.network_name.clone())?; - - // Verify basic properties are preserved - assert_eq!(file.id.network_name, read_back_file.id.network_name); - assert_eq!(file.id.start_block, read_back_file.id.start_block); - assert_eq!(file.group.blocks.len(), read_back_file.group.blocks.len()); - assert_eq!(file.group.accumulator.root, read_back_file.group.accumulator.root); - - // Test data preservation for some blocks - for &idx in &test_block_indices { - let original_block = &file.group.blocks[idx]; - let read_back_block = &read_back_file.group.blocks[idx]; - let block_number = file.group.block_index.starting_number() + idx as u64; - - println!("Block {block_number} details:"); - println!(" Header size: {} bytes", original_block.header.data.len()); - println!(" Body size: {} bytes", original_block.body.data.len()); - println!(" Receipts size: {} bytes", original_block.receipts.data.len()); - - // Test that decompressed data is identical - assert_eq!( - original_block.header.decompress()?, - read_back_block.header.decompress()?, - "Header data should be identical for block {block_number}" - ); - - assert_eq!( - original_block.body.decompress()?, - read_back_block.body.decompress()?, - "Body data should be identical for block {block_number}" - ); - - assert_eq!( - original_block.receipts.decompress()?, - read_back_block.receipts.decompress()?, - "Receipts data should be identical for block {block_number}" - ); - - assert_eq!( - original_block.total_difficulty.value, read_back_block.total_difficulty.value, - "Total difficulty should be identical for block {block_number}" - ); - } - } - - Ok(()) -} diff --git a/crates/era/tests/it/era/genesis.rs b/crates/era/tests/it/era/genesis.rs new file mode 100644 index 0000000000..66f64421b7 --- /dev/null +++ b/crates/era/tests/it/era/genesis.rs @@ -0,0 +1,37 @@ +//! Genesis block tests for `era1` files. +//! +//! These tests verify proper decompression and decoding of genesis blocks +//! from different networks. + +use crate::{EraTestDownloader, ERA_HOODI_FILES_NAMES, HOODI}; + +#[tokio::test(flavor = "multi_thread")] +#[ignore = "download intensive"] +async fn test_hoodi_genesis_era_decompression() -> eyre::Result<()> { + let downloader = EraTestDownloader::new().await?; + + let file = downloader.open_era_file(ERA_HOODI_FILES_NAMES[0], HOODI).await?; + + // Verify this is genesis era + assert!(file.group.is_genesis(), "First file should be genesis era"); + assert_eq!(file.group.starting_slot(), 0, "Genesis should start at slot 0"); + + // Genesis era has no blocks + assert_eq!(file.group.blocks.len(), 0, "Genesis era should have no blocks"); + + // Genesis should not have block slot index + assert!(file.group.slot_index.is_none(), "Genesis should not have block slot index"); + + // Test state decompression + let state_data = file.group.era_state.decompress()?; + assert!(!state_data.is_empty(), "Decompressed state should not be empty"); + + // Verify state slot index + assert_eq!( + file.group.state_slot_index.slot_count(), + 1, + "Genesis state index should have count of 1" + ); + + Ok(()) +} diff --git a/crates/era/tests/it/era/mod.rs b/crates/era/tests/it/era/mod.rs new file mode 100644 index 0000000000..a5bb431720 --- /dev/null +++ b/crates/era/tests/it/era/mod.rs @@ -0,0 +1,2 @@ +mod genesis; +mod roundtrip; diff --git a/crates/era/tests/it/era/roundtrip.rs b/crates/era/tests/it/era/roundtrip.rs new file mode 100644 index 0000000000..7234b43f71 --- /dev/null +++ b/crates/era/tests/it/era/roundtrip.rs @@ -0,0 +1,228 @@ +//! Roundtrip tests for `.era` files. +//! +//! These tests verify the full lifecycle of era files by: +//! - Reading files from their original source +//! - Decompressing their contents +//! - Re-compressing the data +//! - Writing the data back to a new file +//! - Confirming that all original data is preserved throughout the process +//! +//! +//! Only a couple of era files are downloaded from `https://mainnet.era.nimbus.team/` for mainnet +//! and `https://hoodi.era.nimbus.team/` for hoodi to keep the tests efficient. + +use reth_era::{ + common::file_ops::{EraFileFormat, StreamReader, StreamWriter}, + era::{ + file::{EraFile, EraReader, EraWriter}, + types::{ + consensus::{CompressedBeaconState, CompressedSignedBeaconBlock}, + group::{EraGroup, EraId}, + }, + }, +}; +use std::io::Cursor; + +use crate::{EraTestDownloader, HOODI, MAINNET}; + +// Helper function to test roundtrip compression/encoding for a specific file +async fn test_era_file_roundtrip( + downloader: &EraTestDownloader, + filename: &str, + network: &str, +) -> eyre::Result<()> { + println!("\nTesting roundtrip for file: {filename}"); + + let original_file = downloader.open_era_file(filename, network).await?; + + if original_file.group.is_genesis() { + println!("Genesis era detected, using special handling"); + assert_eq!(original_file.group.blocks.len(), 0, "Genesis should have no blocks"); + assert!( + original_file.group.slot_index.is_none(), + "Genesis should not have block slot index" + ); + + let state_data = original_file.group.era_state.decompress()?; + println!(" Genesis state decompressed: {} bytes", state_data.len()); + + // File roundtrip test + let mut buffer = Vec::new(); + { + let mut writer = EraWriter::new(&mut buffer); + writer.write_file(&original_file)?; + } + + let reader = EraReader::new(Cursor::new(&buffer)); + let roundtrip_file = reader.read(network.to_string())?; + + assert_eq!( + original_file.group.era_state.decompress()?, + roundtrip_file.group.era_state.decompress()?, + "Genesis state data should be identical after roundtrip" + ); + + println!("Genesis era verified successfully"); + return Ok(()); + } + + // non genesis start + let original_state_data = original_file.group.era_state.decompress()?; + + let mut buffer = Vec::new(); + { + let mut writer = EraWriter::new(&mut buffer); + writer.write_file(&original_file)?; + } + + // Read back from buffer + let reader = EraReader::new(Cursor::new(&buffer)); + let roundtrip_file = reader.read(network.to_string())?; + + assert_eq!( + original_file.id.network_name, roundtrip_file.id.network_name, + "Network name should match after roundtrip" + ); + assert_eq!( + original_file.id.start_slot, roundtrip_file.id.start_slot, + "Start slot should match after roundtrip" + ); + assert_eq!( + original_file.group.blocks.len(), + roundtrip_file.group.blocks.len(), + "Block count should match after roundtrip" + ); + + // Select a few blocks to test + let test_block_indices = [ + 0, // First block + original_file.group.blocks.len() / 2, // Middle block + original_file.group.blocks.len() - 1, // Last block + ]; + + // Test individual beacon blocks + for &block_idx in &test_block_indices { + let original_block = &original_file.group.blocks[block_idx]; + let roundtrip_block = &roundtrip_file.group.blocks[block_idx]; + + let original_block_data = original_block.decompress()?; + let roundtrip_block_data = roundtrip_block.decompress()?; + + // Verify file roundtrip preserves data + assert_eq!( + original_block_data, roundtrip_block_data, + "Block {block_idx} data should be identical after file roundtrip" + ); + + // Verify compression roundtrip + let recompressed_block = CompressedSignedBeaconBlock::from_ssz(&original_block_data)?; + let recompressed_block_data = recompressed_block.decompress()?; + + assert_eq!( + original_block_data, recompressed_block_data, + "Block {block_idx} should be identical after re-compression cycle" + ); + } + + let roundtrip_state_data = roundtrip_file.group.era_state.decompress()?; + + assert_eq!( + original_state_data, roundtrip_state_data, + "Era state data should be identical after roundtrip" + ); + + let recompressed_state = CompressedBeaconState::from_ssz(&roundtrip_state_data)?; + let recompressed_state_data = recompressed_state.decompress()?; + + assert_eq!( + original_state_data, recompressed_state_data, + "Era state data should be identical after re-compression cycle" + ); + + let recompressed_blocks: Vec = roundtrip_file + .group + .blocks + .iter() + .map(|block| { + let data = block.decompress()?; + CompressedSignedBeaconBlock::from_ssz(&data) + }) + .collect::, _>>()?; + + let new_group = if let Some(ref block_index) = roundtrip_file.group.slot_index { + EraGroup::with_block_index( + recompressed_blocks, + recompressed_state, + block_index.clone(), + roundtrip_file.group.state_slot_index.clone(), + ) + } else { + EraGroup::new( + recompressed_blocks, + recompressed_state, + roundtrip_file.group.state_slot_index, + ) + }; + + let (start_slot, slot_count) = new_group.slot_range(); + let new_file = EraFile::new(new_group, EraId::new(network, start_slot, slot_count)); + + let mut reconstructed_buffer = Vec::new(); + { + let mut writer = EraWriter::new(&mut reconstructed_buffer); + writer.write_file(&new_file)?; + } + + let reader = EraReader::new(Cursor::new(&reconstructed_buffer)); + let reconstructed_file = reader.read(network.to_string())?; + + assert_eq!( + original_file.group.blocks.len(), + reconstructed_file.group.blocks.len(), + "Block count should match after full reconstruction" + ); + + // Verify all reconstructed blocks match + for (idx, (orig, recon)) in + original_file.group.blocks.iter().zip(reconstructed_file.group.blocks.iter()).enumerate() + { + assert_eq!( + orig.decompress()?, + recon.decompress()?, + "Block {idx} should match after full reconstruction" + ); + } + + // Verify reconstructed state matches + assert_eq!( + original_state_data, + reconstructed_file.group.era_state.decompress()?, + "State should match after full reconstruction" + ); + + println!("File {filename} roundtrip successful"); + Ok(()) +} + +#[test_case::test_case("mainnet-00000-4b363db9.era"; "era_roundtrip_mainnet_0")] +#[test_case::test_case("mainnet-00178-0d0a5290.era"; "era_roundtrip_mainnet_178")] +#[test_case::test_case("mainnet-01070-7616e3e2.era"; "era_roundtrip_mainnet_1070")] +#[test_case::test_case("mainnet-01267-e3ddc749.era"; "era_roundtrip_mainnet_1267")] +#[test_case::test_case("mainnet-01592-d4dc8b98.era"; "era_roundtrip_mainnet_1592")] +#[tokio::test(flavor = "multi_thread")] +#[ignore = "download intensive"] +async fn test_roundtrip_compression_encoding_mainnet(filename: &str) -> eyre::Result<()> { + let downloader = EraTestDownloader::new().await?; + test_era_file_roundtrip(&downloader, filename, MAINNET).await +} + +#[test_case::test_case("hoodi-00000-212f13fc.era"; "era_roundtrip_hoodi_0")] +#[test_case::test_case("hoodi-00021-857e418b.era"; "era_roundtrip_hoodi_21")] +#[test_case::test_case("hoodi-00175-202aaa6d.era"; "era_roundtrip_hoodi_175")] +#[test_case::test_case("hoodi-00201-0d521fc8.era"; "era_roundtrip_hoodi_201")] +#[tokio::test(flavor = "multi_thread")] +#[ignore = "download intensive"] +async fn test_roundtrip_compression_encoding_hoodi(filename: &str) -> eyre::Result<()> { + let downloader = EraTestDownloader::new().await?; + test_era_file_roundtrip(&downloader, filename, HOODI).await +} diff --git a/crates/era/tests/it/genesis.rs b/crates/era/tests/it/era1/genesis.rs similarity index 93% rename from crates/era/tests/it/genesis.rs rename to crates/era/tests/it/era1/genesis.rs index 80869f97fa..0046c8bb89 100644 --- a/crates/era/tests/it/genesis.rs +++ b/crates/era/tests/it/era1/genesis.rs @@ -4,16 +4,16 @@ //! from different networks. use crate::{ - Era1TestDownloader, ERA1_MAINNET_FILES_NAMES, ERA1_SEPOLIA_FILES_NAMES, MAINNET, SEPOLIA, + EraTestDownloader, ERA1_MAINNET_FILES_NAMES, ERA1_SEPOLIA_FILES_NAMES, MAINNET, SEPOLIA, }; use alloy_consensus::{BlockBody, Header}; -use reth_era::{e2s_types::IndexEntry, execution_types::CompressedBody}; +use reth_era::{e2s::types::IndexEntry, era1::types::execution::CompressedBody}; use reth_ethereum_primitives::TransactionSigned; #[tokio::test(flavor = "multi_thread")] #[ignore = "download intensive"] async fn test_mainnet_genesis_block_decompression() -> eyre::Result<()> { - let downloader = Era1TestDownloader::new().await?; + let downloader = EraTestDownloader::new().await?; let file = downloader.open_era1_file(ERA1_MAINNET_FILES_NAMES[0], MAINNET).await?; @@ -65,7 +65,7 @@ async fn test_mainnet_genesis_block_decompression() -> eyre::Result<()> { #[tokio::test(flavor = "multi_thread")] #[ignore = "download intensive"] async fn test_sepolia_genesis_block_decompression() -> eyre::Result<()> { - let downloader = Era1TestDownloader::new().await?; + let downloader = EraTestDownloader::new().await?; let file = downloader.open_era1_file(ERA1_SEPOLIA_FILES_NAMES[0], SEPOLIA).await?; diff --git a/crates/era/tests/it/era1/mod.rs b/crates/era/tests/it/era1/mod.rs new file mode 100644 index 0000000000..a5bb431720 --- /dev/null +++ b/crates/era/tests/it/era1/mod.rs @@ -0,0 +1,2 @@ +mod genesis; +mod roundtrip; diff --git a/crates/era/tests/it/roundtrip.rs b/crates/era/tests/it/era1/roundtrip.rs similarity index 81% rename from crates/era/tests/it/roundtrip.rs rename to crates/era/tests/it/era1/roundtrip.rs index a78af34137..e86189e208 100644 --- a/crates/era/tests/it/roundtrip.rs +++ b/crates/era/tests/it/era1/roundtrip.rs @@ -6,28 +6,32 @@ //! - Re-encoding and recompressing the data //! - Writing the data back to a new file //! - Confirming that all original data is preserved throughout the process +//! +//! Only a couple of era1 files are downloaded from for mainnet +//! and for sepolia to keep the tests efficient. -use alloy_consensus::{BlockBody, BlockHeader, Header, ReceiptWithBloom}; -use rand::{prelude::IndexedRandom, rng}; +use alloy_consensus::{BlockBody, BlockHeader, Header, ReceiptEnvelope}; use reth_era::{ - e2s_types::IndexEntry, - era1_file::{Era1File, Era1Reader, Era1Writer}, - era1_types::{Era1Group, Era1Id}, - era_file_ops::{EraFileFormat, StreamReader, StreamWriter}, - execution_types::{ - BlockTuple, CompressedBody, CompressedHeader, CompressedReceipts, TotalDifficulty, + common::file_ops::{EraFileFormat, StreamReader, StreamWriter}, + e2s::types::IndexEntry, + era1::{ + file::{Era1File, Era1Reader, Era1Writer}, + types::{ + execution::{ + BlockTuple, CompressedBody, CompressedHeader, CompressedReceipts, TotalDifficulty, + }, + group::{Era1Group, Era1Id}, + }, }, }; use reth_ethereum_primitives::TransactionSigned; use std::io::Cursor; -use crate::{ - Era1TestDownloader, ERA1_MAINNET_FILES_NAMES, ERA1_SEPOLIA_FILES_NAMES, MAINNET, SEPOLIA, -}; +use crate::{EraTestDownloader, MAINNET, SEPOLIA}; // Helper function to test roundtrip compression/encoding for a specific file -async fn test_file_roundtrip( - downloader: &Era1TestDownloader, +async fn test_era1_file_roundtrip( + downloader: &EraTestDownloader, filename: &str, network: &str, ) -> eyre::Result<()> { @@ -148,10 +152,9 @@ async fn test_file_roundtrip( ); // Decode receipts - let original_receipts_decoded = - original_block.receipts.decode::>()?; + let original_receipts_decoded = original_block.receipts.decode::>()?; let roundtrip_receipts_decoded = - roundtrip_block.receipts.decode::>()?; + roundtrip_block.receipts.decode::>()?; assert_eq!( original_receipts_decoded, roundtrip_receipts_decoded, @@ -252,35 +255,27 @@ async fn test_file_roundtrip( Ok(()) } +#[test_case::test_case("mainnet-00000-5ec1ffb8.era1"; "era1_roundtrip_mainnet_0")] +#[test_case::test_case("mainnet-00151-e322efe1.era1"; "era1_roundtrip_mainnet_151")] +#[test_case::test_case("mainnet-01367-d7efc68f.era1"; "era1_roundtrip_mainnet_1367")] +#[test_case::test_case("mainnet-01895-3f81607c.era1"; "era1_roundtrip_mainnet_1895")] #[tokio::test(flavor = "multi_thread")] #[ignore = "download intensive"] -async fn test_roundtrip_compression_encoding_mainnet() -> eyre::Result<()> { - let downloader = Era1TestDownloader::new().await?; +async fn test_roundtrip_compression_encoding_mainnet(filename: &str) -> eyre::Result<()> { + let downloader = EraTestDownloader::new().await?; + test_era1_file_roundtrip(&downloader, filename, MAINNET).await +} - let mut rng = rng(); +#[test_case::test_case("sepolia-00000-643a00f7.era1"; "era1_roundtrip_sepolia_0")] +#[test_case::test_case("sepolia-00074-0e81003c.era1"; "era1_roundtrip_sepolia_74")] +#[test_case::test_case("sepolia-00173-b6924da5.era1"; "era1_roundtrip_sepolia_173")] +#[test_case::test_case("sepolia-00182-a4f0a8a1.era1"; "era1_roundtrip_sepolia_182")] +#[tokio::test(flavor = "multi_thread")] +#[ignore = "download intensive"] +async fn test_roundtrip_compression_encoding_sepolia(filename: &str) -> eyre::Result<()> { + let downloader = EraTestDownloader::new().await?; - // pick 4 random files from the mainnet list - let sample_files: Vec<&str> = - ERA1_MAINNET_FILES_NAMES.choose_multiple(&mut rng, 4).copied().collect(); - - println!("Testing {} randomly selected mainnet files", sample_files.len()); - - for &filename in &sample_files { - test_file_roundtrip(&downloader, filename, MAINNET).await?; - } - - Ok(()) -} - -#[tokio::test(flavor = "multi_thread")] -#[ignore = "download intensive"] -async fn test_roundtrip_compression_encoding_sepolia() -> eyre::Result<()> { - let downloader = Era1TestDownloader::new().await?; - - // Test all Sepolia files - for &filename in &ERA1_SEPOLIA_FILES_NAMES { - test_file_roundtrip(&downloader, filename, SEPOLIA).await?; - } + test_era1_file_roundtrip(&downloader, filename, SEPOLIA).await?; Ok(()) } diff --git a/crates/era/tests/it/main.rs b/crates/era/tests/it/main.rs index 611862aa8e..3954ef860b 100644 --- a/crates/era/tests/it/main.rs +++ b/crates/era/tests/it/main.rs @@ -8,14 +8,15 @@ use reqwest::{Client, Url}; use reth_era::{ - e2s_types::E2sError, - era1_file::{Era1File, Era1Reader}, - era_file_ops::FileReader, + common::file_ops::{EraFileType, FileReader}, + e2s::error::E2sError, + era::file::{EraFile, EraReader}, + era1::file::{Era1File, Era1Reader}, }; use reth_era_downloader::EraClient; use std::{ collections::HashMap, - path::{Path, PathBuf}, + path::PathBuf, str::FromStr, sync::{Arc, Mutex}, }; @@ -23,9 +24,8 @@ use std::{ use eyre::{eyre, Result}; use tempfile::TempDir; -mod dd; -mod genesis; -mod roundtrip; +mod era; +mod era1; const fn main() {} @@ -33,7 +33,7 @@ const fn main() {} const MAINNET: &str = "mainnet"; /// Default mainnet url /// for downloading mainnet `.era1` files -const MAINNET_URL: &str = "https://era.ithaca.xyz/era1/"; +const ERA1_MAINNET_URL: &str = "https://era.ithaca.xyz/era1/"; /// Succinct list of mainnet files we want to download /// from @@ -54,7 +54,7 @@ const SEPOLIA: &str = "sepolia"; /// Default sepolia url /// for downloading sepolia `.era1` files -const SEPOLIA_URL: &str = "https://era.ithaca.xyz/sepolia-era1/"; +const ERA1_SEPOLIA_URL: &str = "https://era.ithaca.xyz/sepolia-era1/"; /// Succinct list of sepolia files we want to download /// from @@ -63,21 +63,56 @@ const ERA1_SEPOLIA_FILES_NAMES: [&str; 4] = [ "sepolia-00000-643a00f7.era1", "sepolia-00074-0e81003c.era1", "sepolia-00173-b6924da5.era1", - "sepolia-00182-a4f0a8a1.era1 ", + "sepolia-00182-a4f0a8a1.era1", ]; -/// Utility for downloading `.era1` files for tests -/// in a temporary directory -/// and caching them in memory +const HOODI: &str = "hoodi"; + +/// Default hoodi url +/// for downloading hoodi `.era` files +/// TODO: to replace with internal era files hosting url +const ERA_HOODI_URL: &str = "https://hoodi.era.nimbus.team/"; + +/// Succinct list of hoodi files we want to download +/// from //TODO: to replace with internal era files hosting url +/// for testing purposes +const ERA_HOODI_FILES_NAMES: [&str; 4] = [ + "hoodi-00000-212f13fc.era", + "hoodi-00021-857e418b.era", + "hoodi-00175-202aaa6d.era", + "hoodi-00201-0d521fc8.era", +]; + +/// Default mainnet url +/// for downloading mainnet `.era` files +//TODO: to replace with internal era files hosting url +const ERA_MAINNET_URL: &str = "https://mainnet.era.nimbus.team/"; + +/// Succinct list of mainnet files we want to download +/// from //TODO: to replace with internal era files hosting url +/// for testing purposes +const ERA_MAINNET_FILES_NAMES: [&str; 8] = [ + "mainnet-00000-4b363db9.era", + "mainnet-00178-0d0a5290.era", + "mainnet-00518-4e267a3a.era", + "mainnet-00780-bb546fec.era", + "mainnet-01070-7616e3e2.era", + "mainnet-01267-e3ddc749.era", + "mainnet-01581-82073d28.era", + "mainnet-01592-d4dc8b98.era", +]; + +/// Utility for downloading `.era` and `.era1` files for tests +/// in a temporary directory and caching them in memory #[derive(Debug)] -struct Era1TestDownloader { +struct EraTestDownloader { /// Temporary directory for storing downloaded files temp_dir: TempDir, /// Cache mapping file names to their paths file_cache: Arc>>, } -impl Era1TestDownloader { +impl EraTestDownloader { /// Create a new downloader instance with a temporary directory async fn new() -> Result { let temp_dir = @@ -97,29 +132,9 @@ impl Era1TestDownloader { } // check if the filename is supported - if !ERA1_MAINNET_FILES_NAMES.contains(&filename) && - !ERA1_SEPOLIA_FILES_NAMES.contains(&filename) - { - return Err(eyre!( - "Unknown file: {}. Only the following files are supported: {:?} or {:?}", - filename, - ERA1_MAINNET_FILES_NAMES, - ERA1_SEPOLIA_FILES_NAMES - )); - } - - // initialize the client and build url config - let url = match network { - MAINNET => MAINNET_URL, - SEPOLIA => SEPOLIA_URL, - _ => { - return Err(eyre!( - "Unknown network: {}. Only mainnet and sepolia are supported.", - network - )); - } - }; + self.validate_filename(filename, network)?; + let (url, _): (&str, &[&str]) = self.get_network_config(filename, network)?; let final_url = Url::from_str(url).map_err(|e| eyre!("Failed to parse URL: {}", e))?; let folder = self.temp_dir.path(); @@ -142,6 +157,7 @@ impl Era1TestDownloader { .download_to_file(file_url) .await .map_err(|e| eyre!("Failed to download file: {}", e))?; + // update the cache { let mut cache = self.file_cache.lock().unwrap(); @@ -151,24 +167,53 @@ impl Era1TestDownloader { Ok(downloaded_path.to_path_buf()) } - /// open .era1 file, downloading it if necessary + /// Validate that filename is in the supported list for the network + fn validate_filename(&self, filename: &str, network: &str) -> Result<()> { + let (_, supported_files) = self.get_network_config(filename, network)?; + + if !supported_files.contains(&filename) { + return Err(eyre!( + "Unknown file: '{}' for network '{}'. Supported files: {:?}", + filename, + network, + supported_files + )); + } + + Ok(()) + } + + /// Get network configuration, URL and supported files, based on network and file type + fn get_network_config( + &self, + filename: &str, + network: &str, + ) -> Result<(&'static str, &'static [&'static str])> { + let file_type = EraFileType::from_filename(filename) + .ok_or_else(|| eyre!("Unknown file extension for: {}", filename))?; + + match (network, file_type) { + (MAINNET, EraFileType::Era1) => Ok((ERA1_MAINNET_URL, &ERA1_MAINNET_FILES_NAMES[..])), + (MAINNET, EraFileType::Era) => Ok((ERA_MAINNET_URL, &ERA_MAINNET_FILES_NAMES[..])), + (SEPOLIA, EraFileType::Era1) => Ok((ERA1_SEPOLIA_URL, &ERA1_SEPOLIA_FILES_NAMES[..])), + (HOODI, EraFileType::Era) => Ok((ERA_HOODI_URL, &ERA_HOODI_FILES_NAMES[..])), + _ => Err(eyre!( + "Unsupported combination: network '{}' with file type '{:?}'", + network, + file_type + )), + } + } + + /// Open `.era1` file, downloading it if necessary async fn open_era1_file(&self, filename: &str, network: &str) -> Result { let path = self.download_file(filename, network).await?; Era1Reader::open(&path, network).map_err(|e| eyre!("Failed to open Era1 file: {e}")) } -} -/// Open a test file by name, -/// downloading only if it is necessary -async fn open_test_file( - file_path: &str, - downloader: &Era1TestDownloader, - network: &str, -) -> Result { - let filename = Path::new(file_path) - .file_name() - .and_then(|os_str| os_str.to_str()) - .ok_or_else(|| eyre!("Invalid file path: {}", file_path))?; - - downloader.open_era1_file(filename, network).await + /// Open `.era` file, downloading it if necessary + async fn open_era_file(&self, filename: &str, network: &str) -> Result { + let path = self.download_file(filename, network).await?; + EraReader::open(&path, network).map_err(|e| eyre!("Failed to open Era1 file: {e}")) + } } diff --git a/crates/ethereum/cli/Cargo.toml b/crates/ethereum/cli/Cargo.toml index 5dbb8bf4cd..3000f4c67b 100644 --- a/crates/ethereum/cli/Cargo.toml +++ b/crates/ethereum/cli/Cargo.toml @@ -23,13 +23,11 @@ reth-node-ethereum.workspace = true reth-node-metrics.workspace = true reth-rpc-server-types.workspace = true reth-tracing.workspace = true -reth-tracing-otlp.workspace = true reth-node-api.workspace = true # misc clap.workspace = true eyre.workspace = true -url.workspace = true tracing.workspace = true [dev-dependencies] @@ -37,9 +35,10 @@ tracing.workspace = true tempfile.workspace = true [features] -default = ["jemalloc", "otlp"] +default = [] otlp = ["reth-tracing/otlp", "reth-node-core/otlp"] +samply = ["reth-tracing/samply", "reth-node-core/samply"] dev = ["reth-cli-commands/arbitrary"] @@ -63,8 +62,23 @@ tracy-allocator = [] snmalloc = [] snmalloc-native = [] -min-error-logs = ["tracing/release_max_level_error"] -min-warn-logs = ["tracing/release_max_level_warn"] -min-info-logs = ["tracing/release_max_level_info"] -min-debug-logs = ["tracing/release_max_level_debug"] -min-trace-logs = ["tracing/release_max_level_trace"] +min-error-logs = [ + "tracing/release_max_level_error", + "reth-node-core/min-error-logs", +] +min-warn-logs = [ + "tracing/release_max_level_warn", + "reth-node-core/min-warn-logs", +] +min-info-logs = [ + "tracing/release_max_level_info", + "reth-node-core/min-info-logs", +] +min-debug-logs = [ + "tracing/release_max_level_debug", + "reth-node-core/min-debug-logs", +] +min-trace-logs = [ + "tracing/release_max_level_trace", + "reth-node-core/min-trace-logs", +] diff --git a/crates/ethereum/cli/src/app.rs b/crates/ethereum/cli/src/app.rs index b947d6df1d..b9561c9c44 100644 --- a/crates/ethereum/cli/src/app.rs +++ b/crates/ethereum/cli/src/app.rs @@ -3,7 +3,7 @@ use eyre::{eyre, Result}; use reth_chainspec::{ChainSpec, EthChainSpec, Hardforks}; use reth_cli::chainspec::ChainSpecParser; use reth_cli_commands::{ - common::{CliComponentsBuilder, CliHeader, CliNodeTypes}, + common::{CliComponentsBuilder, CliNodeTypes, HeaderMut}, launcher::{FnLauncher, Launcher}, }; use reth_cli_runner::CliRunner; @@ -14,10 +14,7 @@ use reth_node_ethereum::{consensus::EthBeaconConsensus, EthEvmConfig, EthereumNo use reth_node_metrics::recorder::install_prometheus_recorder; use reth_rpc_server_types::RpcModuleValidator; use reth_tracing::{FileWorkerGuard, Layers}; -use reth_tracing_otlp::OtlpProtocol; use std::{fmt, sync::Arc}; -use tracing::info; -use url::Url; /// A wrapper around a parsed CLI that handles command execution. #[derive(Debug)] @@ -84,7 +81,7 @@ where ) -> Result<()>, ) -> Result<()> where - N: CliNodeTypes, ChainSpec: Hardforks>, + N: CliNodeTypes, ChainSpec: Hardforks>, C: ChainSpecParser, { let runner = match self.runner.take() { @@ -108,52 +105,10 @@ where /// Initializes tracing with the configured options. /// - /// If file logging is enabled, this function stores guard to the struct. - /// For gRPC OTLP, it requires tokio runtime context. + /// See [`Cli::init_tracing`] for more information. pub fn init_tracing(&mut self, runner: &CliRunner) -> Result<()> { if self.guard.is_none() { - let mut layers = self.layers.take().unwrap_or_default(); - - #[cfg(feature = "otlp")] - { - self.cli.traces.validate()?; - - if let Some(endpoint) = &self.cli.traces.otlp { - info!(target: "reth::cli", "Starting OTLP tracing export to {:?}", endpoint); - self.init_otlp_export(&mut layers, endpoint, runner)?; - } - } - - self.guard = self.cli.logs.init_tracing_with_layers(layers)?; - info!(target: "reth::cli", "Initialized tracing, debug log directory: {}", self.cli.logs.log_file_directory); - } - Ok(()) - } - - /// Initialize OTLP tracing export based on protocol type. - /// - /// For gRPC, `block_on` is required because tonic's channel initialization needs - /// a tokio runtime context, even though `with_span_layer` itself is not async. - #[cfg(feature = "otlp")] - fn init_otlp_export( - &self, - layers: &mut Layers, - endpoint: &Url, - runner: &CliRunner, - ) -> Result<()> { - let endpoint = endpoint.clone(); - let protocol = self.cli.traces.protocol; - let filter_level = self.cli.traces.otlp_filter.clone(); - - match protocol { - OtlpProtocol::Grpc => { - runner.block_on(async { - layers.with_span_layer("reth".to_string(), endpoint, filter_level, protocol) - })?; - } - OtlpProtocol::Http => { - layers.with_span_layer("reth".to_string(), endpoint, filter_level, protocol)?; - } + self.guard = self.cli.init_tracing(runner, self.layers.take().unwrap_or_default())?; } Ok(()) @@ -175,7 +130,7 @@ where C: ChainSpecParser, Ext: clap::Args + fmt::Debug, Rpc: RpcModuleValidator, - N: CliNodeTypes, ChainSpec: Hardforks>, + N: CliNodeTypes, ChainSpec: Hardforks>, { match cli.command { Commands::Node(command) => { @@ -199,7 +154,9 @@ where Commands::ImportEra(command) => runner.run_blocking_until_ctrl_c(command.execute::()), Commands::ExportEra(command) => runner.run_blocking_until_ctrl_c(command.execute::()), Commands::DumpGenesis(command) => runner.run_blocking_until_ctrl_c(command.execute()), - Commands::Db(command) => runner.run_blocking_until_ctrl_c(command.execute::()), + Commands::Db(command) => { + runner.run_blocking_command_until_exit(|ctx| command.execute::(ctx)) + } Commands::Download(command) => runner.run_blocking_until_ctrl_c(command.execute::()), Commands::Stage(command) => { runner.run_command_until_exit(|ctx| command.execute::(ctx, components)) diff --git a/crates/ethereum/cli/src/interface.rs b/crates/ethereum/cli/src/interface.rs index 8d2b4ba62f..c5643f6f1a 100644 --- a/crates/ethereum/cli/src/interface.rs +++ b/crates/ethereum/cli/src/interface.rs @@ -8,7 +8,7 @@ use clap::{Parser, Subcommand}; use reth_chainspec::{ChainSpec, EthChainSpec, Hardforks}; use reth_cli::chainspec::ChainSpecParser; use reth_cli_commands::{ - common::{CliComponentsBuilder, CliHeader, CliNodeTypes}, + common::{CliComponentsBuilder, CliNodeTypes, HeaderMut}, config_cmd, db, download, dump_genesis, export_era, import, import_era, init_cmd, init_state, launcher::FnLauncher, node::{self, NoArgs}, @@ -19,20 +19,20 @@ use reth_db::DatabaseEnv; use reth_node_api::NodePrimitives; use reth_node_builder::{NodeBuilder, WithLaunchContext}; use reth_node_core::{ - args::{LogArgs, TraceArgs}, + args::{LogArgs, OtlpInitStatus, TraceArgs}, version::version_metadata, }; use reth_node_metrics::recorder::install_prometheus_recorder; use reth_rpc_server_types::{DefaultRpcModuleValidator, RpcModuleValidator}; -use reth_tracing::FileWorkerGuard; +use reth_tracing::{FileWorkerGuard, Layers}; use std::{ffi::OsString, fmt, future::Future, marker::PhantomData, sync::Arc}; -use tracing::info; +use tracing::{info, warn}; /// The main reth cli interface. /// /// This is the entrypoint to the executable. #[derive(Debug, Parser)] -#[command(author, version =version_metadata().short_version.as_ref(), long_version = version_metadata().long_version.as_ref(), about = "Reth", long_about = None)] +#[command(author, name = version_metadata().name_client.as_ref(), version = version_metadata().short_version.as_ref(), long_version = version_metadata().long_version.as_ref(), about = "Reth", long_about = None)] pub struct Cli< C: ChainSpecParser = EthereumChainSpecParser, Ext: clap::Args + fmt::Debug = NoArgs, @@ -149,7 +149,7 @@ impl ) -> eyre::Result<()>, ) -> eyre::Result<()> where - N: CliNodeTypes, ChainSpec: Hardforks>, + N: CliNodeTypes, ChainSpec: Hardforks>, C: ChainSpecParser, { self.with_runner_and_components(CliRunner::try_default_runtime()?, components, launcher) @@ -197,7 +197,7 @@ impl ) -> eyre::Result<()>, ) -> eyre::Result<()> where - N: CliNodeTypes, ChainSpec: Hardforks>, + N: CliNodeTypes, ChainSpec: Hardforks>, C: ChainSpecParser, { // Add network name if available to the logs dir @@ -205,8 +205,7 @@ impl self.logs.log_file_directory = self.logs.log_file_directory.join(chain_spec.chain().to_string()); } - let _guard = self.init_tracing()?; - info!(target: "reth::cli", "Initialized tracing, debug log directory: {}", self.logs.log_file_directory); + let _guard = self.init_tracing(&runner, Layers::new())?; // Install the prometheus recorder to be sure to record all metrics let _ = install_prometheus_recorder(); @@ -219,11 +218,27 @@ impl /// /// If file logging is enabled, this function returns a guard that must be kept alive to ensure /// that all logs are flushed to disk. + /// /// If an OTLP endpoint is specified, it will export metrics to the configured collector. - pub fn init_tracing(&self) -> eyre::Result> { - let layers = reth_tracing::Layers::new(); + pub fn init_tracing( + &mut self, + runner: &CliRunner, + mut layers: Layers, + ) -> eyre::Result> { + let otlp_status = runner.block_on(self.traces.init_otlp_tracing(&mut layers))?; let guard = self.logs.init_tracing_with_layers(layers)?; + info!(target: "reth::cli", "Initialized tracing, debug log directory: {}", self.logs.log_file_directory); + match otlp_status { + OtlpInitStatus::Started(endpoint) => { + info!(target: "reth::cli", "Started OTLP {:?} tracing export to {endpoint}", self.traces.protocol); + } + OtlpInitStatus::NoFeature => { + warn!(target: "reth::cli", "Provided OTLP tracing arguments do not have effect, compile with the `otlp` feature") + } + OtlpInitStatus::Disabled => {} + } + Ok(guard) } } diff --git a/crates/ethereum/consensus/src/lib.rs b/crates/ethereum/consensus/src/lib.rs index 5aef139303..9d89b4a73d 100644 --- a/crates/ethereum/consensus/src/lib.rs +++ b/crates/ethereum/consensus/src/lib.rs @@ -12,7 +12,7 @@ extern crate alloc; use alloc::{fmt::Debug, sync::Arc}; -use alloy_consensus::EMPTY_OMMER_ROOT_HASH; +use alloy_consensus::{constants::MAXIMUM_EXTRA_DATA_SIZE, EMPTY_OMMER_ROOT_HASH}; use alloy_eips::eip7840::BlobParams; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_consensus::{Consensus, ConsensusError, FullConsensus, HeaderValidator}; @@ -38,12 +38,25 @@ pub use validation::validate_block_post_execution; pub struct EthBeaconConsensus { /// Configuration chain_spec: Arc, + /// Maximum allowed extra data size in bytes + max_extra_data_size: usize, } impl EthBeaconConsensus { /// Create a new instance of [`EthBeaconConsensus`] pub const fn new(chain_spec: Arc) -> Self { - Self { chain_spec } + Self { chain_spec, max_extra_data_size: MAXIMUM_EXTRA_DATA_SIZE } + } + + /// Returns the maximum allowed extra data size. + pub const fn max_extra_data_size(&self) -> usize { + self.max_extra_data_size + } + + /// Sets the maximum allowed extra data size and returns the updated instance. + pub const fn with_max_extra_data_size(mut self, size: usize) -> Self { + self.max_extra_data_size = size; + self } /// Returns the chain spec associated with this consensus engine. @@ -125,7 +138,7 @@ where } } } - validate_header_extra_data(header)?; + validate_header_extra_data(header, self.max_extra_data_size)?; validate_header_gas(header)?; validate_header_base_fee(header, &self.chain_spec)?; diff --git a/crates/ethereum/consensus/src/validation.rs b/crates/ethereum/consensus/src/validation.rs index 71affffeb0..2c2eacddb3 100644 --- a/crates/ethereum/consensus/src/validation.rs +++ b/crates/ethereum/consensus/src/validation.rs @@ -87,9 +87,7 @@ fn verify_receipts( logs_bloom, expected_receipts_root, expected_logs_bloom, - )?; - - Ok(()) + ) } /// Compare the calculated receipts root with the expected receipts root, also compare diff --git a/crates/ethereum/engine-primitives/src/payload.rs b/crates/ethereum/engine-primitives/src/payload.rs index 45c1f6a31f..a471e34742 100644 --- a/crates/ethereum/engine-primitives/src/payload.rs +++ b/crates/ethereum/engine-primitives/src/payload.rs @@ -120,11 +120,11 @@ impl EthBuiltPayload { /// Try converting built payload into [`ExecutionPayloadEnvelopeV4`]. /// /// Returns an error if the payload contains non EIP-4844 sidecar. - pub fn try_into_v4(self) -> Result { - Ok(ExecutionPayloadEnvelopeV4 { - execution_requests: self.requests.clone().unwrap_or_default(), - envelope_inner: self.try_into()?, - }) + pub fn try_into_v4( + mut self, + ) -> Result { + let execution_requests = self.requests.take().unwrap_or_default(); + Ok(ExecutionPayloadEnvelopeV4 { execution_requests, envelope_inner: self.try_into()? }) } /// Try converting built payload into [`ExecutionPayloadEnvelopeV5`]. @@ -424,6 +424,8 @@ pub fn payload_id(parent: &B256, attributes: &PayloadAttributes) -> PayloadId { } let out = hasher.finalize(); + + #[allow(deprecated)] // generic-array 0.14 deprecated PayloadId::new(out.as_slice()[..8].try_into().expect("sufficient length")) } diff --git a/crates/ethereum/evm/src/build.rs b/crates/ethereum/evm/src/build.rs index 85d4cae311..2530fc6179 100644 --- a/crates/ethereum/evm/src/build.rs +++ b/crates/ethereum/evm/src/build.rs @@ -5,7 +5,6 @@ use alloy_consensus::{ }; use alloy_eips::merge::BEACON_NONCE; use alloy_evm::{block::BlockExecutorFactory, eth::EthBlockExecutionCtx}; -use alloy_primitives::Bytes; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_evm::execute::{BlockAssembler, BlockAssemblerInput, BlockExecutionError}; use reth_execution_types::BlockExecutionResult; @@ -17,14 +16,12 @@ use revm::context::Block as _; pub struct EthBlockAssembler { /// The chainspec. pub chain_spec: Arc, - /// Extra data to use for the blocks. - pub extra_data: Bytes, } impl EthBlockAssembler { /// Creates a new [`EthBlockAssembler`]. - pub fn new(chain_spec: Arc) -> Self { - Self { chain_spec, extra_data: Default::default() } + pub const fn new(chain_spec: Arc) -> Self { + Self { chain_spec } } } @@ -110,7 +107,7 @@ where gas_limit: evm_env.block_env.gas_limit(), difficulty: evm_env.block_env.difficulty(), gas_used: *gas_used, - extra_data: self.extra_data.clone(), + extra_data: ctx.extra_data, parent_beacon_block_root: ctx.parent_beacon_block_root, blob_gas_used: block_blob_gas_used, excess_blob_gas, diff --git a/crates/ethereum/evm/src/lib.rs b/crates/ethereum/evm/src/lib.rs index c0f8adc9c5..dbc686fe4f 100644 --- a/crates/ethereum/evm/src/lib.rs +++ b/crates/ethereum/evm/src/lib.rs @@ -19,32 +19,37 @@ extern crate alloc; use alloc::{borrow::Cow, sync::Arc}; use alloy_consensus::Header; -use alloy_eips::Decodable2718; -pub use alloy_evm::EthEvm; use alloy_evm::{ eth::{EthBlockExecutionCtx, EthBlockExecutorFactory}, EthEvmFactory, FromRecoveredTx, FromTxWithEncoded, }; -use alloy_primitives::{Bytes, U256}; -use alloy_rpc_types_engine::ExecutionData; use core::{convert::Infallible, fmt::Debug}; -use reth_chainspec::{ChainSpec, EthChainSpec, EthereumHardforks, MAINNET}; +use reth_chainspec::{ChainSpec, EthChainSpec, MAINNET}; use reth_ethereum_primitives::{Block, EthPrimitives, TransactionSigned}; use reth_evm::{ - eth::NextEvmEnvAttributes, precompiles::PrecompilesMap, ConfigureEngineEvm, ConfigureEvm, - EvmEnv, EvmEnvFor, EvmFactory, ExecutableTxIterator, ExecutionCtxFor, NextBlockEnvAttributes, - TransactionEnv, + eth::NextEvmEnvAttributes, precompiles::PrecompilesMap, ConfigureEvm, EvmEnv, EvmFactory, + NextBlockEnvAttributes, TransactionEnv, }; -use reth_primitives_traits::{ - constants::MAX_TX_GAS_LIMIT_OSAKA, SealedBlock, SealedHeader, SignedTransaction, TxTy, -}; -use reth_storage_errors::any::AnyError; -use revm::{ - context::{BlockEnv, CfgEnv}, - context_interface::block::BlobExcessGasAndPrice, - primitives::hardfork::SpecId, +use reth_primitives_traits::{SealedBlock, SealedHeader}; +use revm::{context::BlockEnv, primitives::hardfork::SpecId}; + +#[cfg(feature = "std")] +use reth_evm::{ConfigureEngineEvm, ExecutableTxIterator}; +#[allow(unused_imports)] +use { + alloy_eips::Decodable2718, + alloy_primitives::{Bytes, U256}, + alloy_rpc_types_engine::ExecutionData, + reth_chainspec::EthereumHardforks, + reth_evm::{EvmEnvFor, ExecutionCtxFor}, + reth_primitives_traits::{constants::MAX_TX_GAS_LIMIT_OSAKA, SignedTransaction, TxTy}, + reth_storage_errors::any::AnyError, + revm::context::CfgEnv, + revm::context_interface::block::BlobExcessGasAndPrice, }; +pub use alloy_evm::EthEvm; + mod config; use alloy_evm::eth::spec::EthExecutorSpec; pub use config::{revm_spec, revm_spec_by_timestamp_and_block_number}; @@ -116,12 +121,6 @@ impl EthEvmConfig { pub const fn chain_spec(&self) -> &Arc { self.executor_factory.spec() } - - /// Sets the extra data for the block assembler. - pub fn with_extra_data(mut self, extra_data: Bytes) -> Self { - self.block_assembler.extra_data = extra_data; - self - } } impl ConfigureEvm for EthEvmConfig @@ -193,6 +192,7 @@ where parent_beacon_block_root: block.header().parent_beacon_block_root, ommers: &block.body().ommers, withdrawals: block.body().withdrawals.as_ref().map(Cow::Borrowed), + extra_data: block.header().extra_data.clone(), }) } @@ -206,10 +206,12 @@ where parent_beacon_block_root: attributes.parent_beacon_block_root, ommers: &[], withdrawals: attributes.withdrawals.map(Cow::Owned), + extra_data: attributes.extra_data, }) } } +#[cfg(feature = "std")] impl ConfigureEngineEvm for EthEvmConfig where ChainSpec: EthExecutorSpec + EthChainSpec

+ Hardforks + 'static, @@ -282,6 +284,7 @@ where parent_beacon_block_root: payload.sidecar.parent_beacon_block_root(), ommers: &[], withdrawals: payload.payload.withdrawals().map(|w| Cow::Owned(w.clone().into())), + extra_data: payload.payload.as_v1().extra_data.clone(), }) } @@ -289,12 +292,15 @@ where &self, payload: &ExecutionData, ) -> Result, Self::Error> { - Ok(payload.payload.transactions().clone().into_iter().map(|tx| { + let txs = payload.payload.transactions().clone(); + let convert = |tx: Bytes| { let tx = TxTy::::decode_2718_exact(tx.as_ref()).map_err(AnyError::new)?; let signer = tx.try_recover().map_err(AnyError::new)?; Ok::<_, AnyError>(tx.with_signer(signer)) - })) + }; + + Ok((txs, convert)) } } diff --git a/crates/ethereum/hardforks/src/display.rs b/crates/ethereum/hardforks/src/display.rs index e40a117d26..3980a01b88 100644 --- a/crates/ethereum/hardforks/src/display.rs +++ b/crates/ethereum/hardforks/src/display.rs @@ -25,6 +25,8 @@ struct DisplayFork { activated_at: ForkCondition, /// An optional EIP (e.g. `EIP-1559`). eip: Option, + /// Optional metadata to display alongside the fork (e.g. blob parameters) + metadata: Option, } impl core::fmt::Display for DisplayFork { @@ -38,6 +40,9 @@ impl core::fmt::Display for DisplayFork { match self.activated_at { ForkCondition::Block(at) | ForkCondition::Timestamp(at) => { write!(f, "{name_with_eip:32} @{at}")?; + if let Some(metadata) = &self.metadata { + write!(f, " {metadata}")?; + } } ForkCondition::TTD { total_difficulty, .. } => { // All networks that have merged are finalized. @@ -45,6 +50,9 @@ impl core::fmt::Display for DisplayFork { f, "{name_with_eip:32} @{total_difficulty} (network is known to be merged)", )?; + if let Some(metadata) = &self.metadata { + write!(f, " {metadata}")?; + } } ForkCondition::Never => unreachable!(), } @@ -116,15 +124,17 @@ impl core::fmt::Display for DisplayHardforks { Ok(()) } - format( - "Pre-merge hard forks (block based)", - &self.pre_merge, - self.with_merge.is_empty(), - f, - )?; + if !self.pre_merge.is_empty() { + format( + "Pre-merge hard forks (block based)", + &self.pre_merge, + self.with_merge.is_empty(), + f, + )?; + } if self.with_merge.is_empty() { - if !self.post_merge.is_empty() { + if !self.pre_merge.is_empty() && !self.post_merge.is_empty() { // need an extra line here in case we don't have a merge block (optimism) writeln!(f)?; } @@ -145,25 +155,33 @@ impl DisplayHardforks { pub fn new<'a, I>(hardforks: I) -> Self where I: IntoIterator, + { + // Delegate to with_meta by mapping the iterator to include None for metadata + Self::with_meta(hardforks.into_iter().map(|(fork, condition)| (fork, condition, None))) + } + + /// Creates a new [`DisplayHardforks`] from an iterator of hardforks with optional metadata. + pub fn with_meta<'a, I>(hardforks: I) -> Self + where + I: IntoIterator)>, { let mut pre_merge = Vec::new(); let mut with_merge = Vec::new(); let mut post_merge = Vec::new(); - for (fork, condition) in hardforks { - let mut display_fork = - DisplayFork { name: fork.name().to_string(), activated_at: condition, eip: None }; + for (fork, condition, metadata) in hardforks { + let display_fork = DisplayFork { + name: fork.name().to_string(), + activated_at: condition, + eip: None, + metadata, + }; match condition { ForkCondition::Block(_) => { pre_merge.push(display_fork); } - ForkCondition::TTD { activation_block_number, total_difficulty, fork_block } => { - display_fork.activated_at = ForkCondition::TTD { - activation_block_number, - fork_block, - total_difficulty, - }; + ForkCondition::TTD { .. } => { with_merge.push(display_fork); } ForkCondition::Timestamp(_) => { diff --git a/crates/ethereum/hardforks/src/hardforks/mod.rs b/crates/ethereum/hardforks/src/hardforks/mod.rs index 1c67c380d9..dad175e8f6 100644 --- a/crates/ethereum/hardforks/src/hardforks/mod.rs +++ b/crates/ethereum/hardforks/src/hardforks/mod.rs @@ -4,11 +4,7 @@ pub use dev::DEV_HARDFORKS; use crate::{ForkCondition, ForkFilter, ForkId, Hardfork, Head}; #[cfg(feature = "std")] use rustc_hash::FxHashMap; -#[cfg(feature = "std")] -use std::collections::hash_map::Entry; -#[cfg(not(feature = "std"))] -use alloc::collections::btree_map::Entry; use alloc::{boxed::Box, vec::Vec}; /// Generic trait over a set of ordered hardforks @@ -115,26 +111,74 @@ impl ChainHardforks { self.fork(fork).active_at_block(block_number) } - /// Inserts `fork` into list, updating with a new [`ForkCondition`] if it already exists. + /// Inserts a fork with the given [`ForkCondition`], maintaining forks in ascending order + /// based on the `Ord` implementation of [`ForkCondition`]. + /// + /// If the fork already exists (regardless of its current condition type), it will be removed + /// and re-inserted at the appropriate position based on the new condition. + /// + /// # Ordering Behavior + /// + /// Forks are ordered according to [`ForkCondition`]'s `Ord` implementation: + /// - [`ForkCondition::Never`] comes first + /// - [`ForkCondition::Block`] ordered by block number + /// - [`ForkCondition::Timestamp`] ordered by timestamp value + /// - [`ForkCondition::TTD`] ordered by total difficulty + /// + /// # Example + /// + /// ```ignore + /// let mut forks = ChainHardforks::default(); + /// forks.insert(Fork::Frontier, ForkCondition::Block(0)); + /// forks.insert(Fork::Homestead, ForkCondition::Block(1_150_000)); + /// forks.insert(Fork::Cancun, ForkCondition::Timestamp(1710338135)); + /// + /// // Forks are ordered: Frontier (Block 0), Homestead (Block 1150000), Cancun (Timestamp) + /// ``` pub fn insert(&mut self, fork: H, condition: ForkCondition) { - match self.map.entry(fork.name()) { - Entry::Occupied(mut entry) => { - *entry.get_mut() = condition; - if let Some((_, inner)) = - self.forks.iter_mut().find(|(inner, _)| inner.name() == fork.name()) - { - *inner = condition; - } - } - Entry::Vacant(entry) => { - entry.insert(condition); - self.forks.push((Box::new(fork), condition)); - } + // Remove existing fork if it exists + self.remove(&fork); + + // Find the correct position based on ForkCondition's Ord implementation + let pos = self + .forks + .iter() + .position(|(_, existing_condition)| *existing_condition > condition) + .unwrap_or(self.forks.len()); + + self.map.insert(fork.name(), condition); + self.forks.insert(pos, (Box::new(fork), condition)); + } + + /// Extends the list with multiple forks, updating existing entries with new + /// [`ForkCondition`]s if they already exist. + /// + /// Each fork is inserted using [`Self::insert`], maintaining proper ordering based on + /// [`ForkCondition`]'s `Ord` implementation. + /// + /// # Example + /// + /// ```ignore + /// let mut forks = ChainHardforks::default(); + /// forks.extend([ + /// (Fork::Homestead, ForkCondition::Block(1_150_000)), + /// (Fork::Frontier, ForkCondition::Block(0)), + /// (Fork::Cancun, ForkCondition::Timestamp(1710338135)), + /// ]); + /// + /// // Forks will be automatically ordered: Frontier, Homestead, Cancun + /// ``` + pub fn extend( + &mut self, + forks: impl IntoIterator, + ) { + for (fork, condition) in forks { + self.insert(fork, condition); } } /// Removes `fork` from list. - pub fn remove(&mut self, fork: H) { + pub fn remove(&mut self, fork: &H) { self.forks.retain(|(inner_fork, _)| inner_fork.name() != fork.name()); self.map.remove(fork.name()); } @@ -157,3 +201,122 @@ impl From<[(T, ForkCondition); N]> for ChainHardfor ) } } + +#[cfg(test)] +mod tests { + use super::*; + use alloy_hardforks::hardfork; + + hardfork!(AHardfork { A1, A2, A3 }); + hardfork!(BHardfork { B1, B2 }); + + #[test] + fn add_hardforks() { + let mut forks = ChainHardforks::default(); + forks.insert(AHardfork::A1, ForkCondition::Block(1)); + forks.insert(BHardfork::B1, ForkCondition::Block(1)); + assert_eq!(forks.len(), 2); + forks.is_fork_active_at_block(AHardfork::A1, 1); + forks.is_fork_active_at_block(BHardfork::B1, 1); + } + + #[test] + fn insert_maintains_fork_order() { + let mut forks = ChainHardforks::default(); + + // Insert forks in random order + forks.insert(BHardfork::B1, ForkCondition::Timestamp(2000)); + forks.insert(AHardfork::A1, ForkCondition::Block(100)); + forks.insert(AHardfork::A2, ForkCondition::Block(50)); + forks.insert(BHardfork::B2, ForkCondition::Timestamp(1000)); + + assert_eq!(forks.len(), 4); + + let fork_list: Vec<_> = forks.forks_iter().collect(); + + // Verify ordering: Block conditions come before Timestamp conditions + // and within each type, they're ordered by value + assert_eq!(fork_list[0].0.name(), "A2"); + assert_eq!(fork_list[0].1, ForkCondition::Block(50)); + assert_eq!(fork_list[1].0.name(), "A1"); + assert_eq!(fork_list[1].1, ForkCondition::Block(100)); + assert_eq!(fork_list[2].0.name(), "B2"); + assert_eq!(fork_list[2].1, ForkCondition::Timestamp(1000)); + assert_eq!(fork_list[3].0.name(), "B1"); + assert_eq!(fork_list[3].1, ForkCondition::Timestamp(2000)); + } + + #[test] + fn insert_replaces_and_reorders_existing_fork() { + let mut forks = ChainHardforks::default(); + + // Insert initial forks + forks.insert(AHardfork::A1, ForkCondition::Block(100)); + forks.insert(BHardfork::B1, ForkCondition::Block(200)); + forks.insert(AHardfork::A2, ForkCondition::Timestamp(1000)); + + assert_eq!(forks.len(), 3); + + // Update A1 from Block to Timestamp - should move it after B1 + forks.insert(AHardfork::A1, ForkCondition::Timestamp(500)); + assert_eq!(forks.len(), 3); + + let fork_list: Vec<_> = forks.forks_iter().collect(); + + // Verify new ordering + assert_eq!(fork_list[0].0.name(), "B1"); + assert_eq!(fork_list[0].1, ForkCondition::Block(200)); + assert_eq!(fork_list[1].0.name(), "A1"); + assert_eq!(fork_list[1].1, ForkCondition::Timestamp(500)); + assert_eq!(fork_list[2].0.name(), "A2"); + assert_eq!(fork_list[2].1, ForkCondition::Timestamp(1000)); + + // Update A1 timestamp to move it after A2 + forks.insert(AHardfork::A1, ForkCondition::Timestamp(2000)); + assert_eq!(forks.len(), 3); + + let fork_list: Vec<_> = forks.forks_iter().collect(); + + assert_eq!(fork_list[0].0.name(), "B1"); + assert_eq!(fork_list[0].1, ForkCondition::Block(200)); + assert_eq!(fork_list[1].0.name(), "A2"); + assert_eq!(fork_list[1].1, ForkCondition::Timestamp(1000)); + assert_eq!(fork_list[2].0.name(), "A1"); + assert_eq!(fork_list[2].1, ForkCondition::Timestamp(2000)); + } + + #[test] + fn extend_maintains_order() { + let mut forks = ChainHardforks::default(); + + // Use extend to insert multiple forks at once in random order + forks.extend([ + (AHardfork::A1, ForkCondition::Block(100)), + (AHardfork::A2, ForkCondition::Timestamp(1000)), + ]); + forks.extend([(BHardfork::B1, ForkCondition::Timestamp(2000))]); + + assert_eq!(forks.len(), 3); + + let fork_list: Vec<_> = forks.forks_iter().collect(); + + // Verify ordering is maintained + assert_eq!(fork_list[0].0.name(), "A1"); + assert_eq!(fork_list[0].1, ForkCondition::Block(100)); + assert_eq!(fork_list[1].0.name(), "A2"); + assert_eq!(fork_list[1].1, ForkCondition::Timestamp(1000)); + assert_eq!(fork_list[2].0.name(), "B1"); + assert_eq!(fork_list[2].1, ForkCondition::Timestamp(2000)); + + // Extend again with an update to A2 + forks.extend([(AHardfork::A2, ForkCondition::Timestamp(3000))]); + assert_eq!(forks.len(), 3); + + let fork_list: Vec<_> = forks.forks_iter().collect(); + + assert_eq!(fork_list[0].0.name(), "A1"); + assert_eq!(fork_list[1].0.name(), "B1"); + assert_eq!(fork_list[2].0.name(), "A2"); + assert_eq!(fork_list[2].1, ForkCondition::Timestamp(3000)); + } +} diff --git a/crates/ethereum/node/Cargo.toml b/crates/ethereum/node/Cargo.toml index 3c0efdb039..306bbf54fb 100644 --- a/crates/ethereum/node/Cargo.toml +++ b/crates/ethereum/node/Cargo.toml @@ -24,7 +24,7 @@ reth-provider.workspace = true reth-transaction-pool.workspace = true reth-network.workspace = true reth-evm.workspace = true -reth-evm-ethereum.workspace = true +reth-evm-ethereum = { workspace = true, features = ["std"] } reth-rpc.workspace = true reth-rpc-api.workspace = true reth-rpc-eth-api.workspace = true @@ -35,7 +35,7 @@ reth-chainspec.workspace = true reth-revm = { workspace = true, features = ["std"] } reth-rpc-eth-types.workspace = true reth-engine-local.workspace = true -reth-engine-primitives.workspace = true +reth-engine-primitives = { workspace = true, features = ["std"] } reth-payload-primitives.workspace = true # ethereum @@ -49,7 +49,7 @@ tokio.workspace = true # revm with required ethereum features # Note: this must be kept to ensure all features are properly enabled/forwarded -revm = { workspace = true, features = ["secp256k1", "blst", "c-kzg"] } +revm = { workspace = true, features = ["secp256k1", "blst", "c-kzg", "memory_limit"] } # misc eyre.workspace = true @@ -60,6 +60,10 @@ reth-exex.workspace = true reth-node-core.workspace = true reth-e2e-test-utils.workspace = true reth-tasks.workspace = true +reth-testing-utils.workspace = true +reth-stages-types.workspace = true +tempfile.workspace = true +jsonrpsee-core.workspace = true alloy-primitives.workspace = true alloy-provider.workspace = true @@ -74,6 +78,9 @@ futures.workspace = true tokio.workspace = true serde_json.workspace = true rand.workspace = true +serde.workspace = true +alloy-rpc-types-trace.workspace = true +similar-asserts.workspace = true [features] default = [] @@ -82,7 +89,16 @@ asm-keccak = [ "reth-node-core/asm-keccak", "revm/asm-keccak", ] -js-tracer = ["reth-node-builder/js-tracer"] +keccak-cache-global = [ + "alloy-primitives/keccak-cache-global", + "reth-node-core/keccak-cache-global", +] +js-tracer = [ + "reth-node-builder/js-tracer", + "reth-rpc/js-tracer", + "reth-rpc-eth-api/js-tracer", + "reth-rpc-eth-types/js-tracer", +] test-utils = [ "reth-node-builder/test-utils", "reth-chainspec/test-utils", @@ -95,4 +111,5 @@ test-utils = [ "reth-evm/test-utils", "reth-primitives-traits/test-utils", "reth-evm-ethereum/test-utils", + "reth-stages-types/test-utils", ] diff --git a/crates/ethereum/node/src/engine.rs b/crates/ethereum/node/src/engine.rs index 441e05d1cc..f1b880ab25 100644 --- a/crates/ethereum/node/src/engine.rs +++ b/crates/ethereum/node/src/engine.rs @@ -14,7 +14,7 @@ use reth_payload_primitives::{ validate_execution_requests, validate_version_specific_fields, EngineApiMessageVersion, EngineObjectValidationError, NewPayloadError, PayloadOrAttributes, }; -use reth_primitives_traits::RecoveredBlock; +use reth_primitives_traits::SealedBlock; use std::sync::Arc; /// Validator for the ethereum engine API. @@ -43,12 +43,11 @@ where { type Block = Block; - fn ensure_well_formed_payload( + fn convert_payload_to_block( &self, payload: ExecutionData, - ) -> Result, NewPayloadError> { - let sealed_block = self.inner.ensure_well_formed_payload(payload)?; - sealed_block.try_recover().map_err(|e| NewPayloadError::Other(e.into())) + ) -> Result, NewPayloadError> { + self.inner.ensure_well_formed_payload(payload).map_err(Into::into) } } diff --git a/crates/ethereum/node/src/node.rs b/crates/ethereum/node/src/node.rs index fa81d70e61..cf409cce9c 100644 --- a/crates/ethereum/node/src/node.rs +++ b/crates/ethereum/node/src/node.rs @@ -32,15 +32,15 @@ use reth_node_builder::{ EngineValidatorBuilder, EthApiBuilder, EthApiCtx, Identity, PayloadValidatorBuilder, RethRpcAddOns, RpcAddOns, RpcHandle, }, - BuilderContext, DebugNode, Node, NodeAdapter, PayloadBuilderConfig, + BuilderContext, DebugNode, Node, NodeAdapter, }; use reth_payload_primitives::PayloadTypes; use reth_provider::{providers::ProviderFactoryBuilder, EthStorage}; use reth_rpc::{ eth::core::{EthApiFor, EthRpcConverterFor}, - ValidationApi, + TestingApi, ValidationApi, }; -use reth_rpc_api::servers::BlockSubmissionValidationApiServer; +use reth_rpc_api::servers::{BlockSubmissionValidationApiServer, TestingApiServer}; use reth_rpc_builder::{config::RethRpcServerConfig, middleware::RethRpcMiddleware}; use reth_rpc_eth_api::{ helpers::{ @@ -118,13 +118,14 @@ impl EthereumNode { /// use reth_chainspec::ChainSpecBuilder; /// use reth_db::open_db_read_only; /// use reth_node_ethereum::EthereumNode; - /// use reth_provider::providers::StaticFileProvider; + /// use reth_provider::providers::{RocksDBProvider, StaticFileProvider}; /// use std::sync::Arc; /// /// let factory = EthereumNode::provider_factory_builder() /// .db(Arc::new(open_db_read_only("db", Default::default()).unwrap())) /// .chainspec(ChainSpecBuilder::mainnet().build().into()) /// .static_file(StaticFileProvider::read_only("db/static_files", false).unwrap()) + /// .rocksdb_provider(RocksDBProvider::builder("db/rocksdb").build().unwrap()) /// .build_provider_factory(); /// ``` pub fn provider_factory_builder() -> ProviderFactoryBuilder { @@ -313,13 +314,25 @@ where .modules .merge_if_module_configured(RethRpcModule::Eth, eth_config.into_rpc())?; + // testing_buildBlockV1: only wire when the hidden testing module is explicitly + // requested on any transport. Default stays disabled to honor security guidance. + let testing_api = TestingApi::new( + container.registry.eth_api().clone(), + container.registry.evm_config().clone(), + ) + .into_rpc(); + container + .modules + .merge_if_module_configured(RethRpcModule::Testing, testing_api)?; + Ok(()) }) .await } } -impl RethRpcAddOns for EthereumAddOns +impl RethRpcAddOns + for EthereumAddOns where N: FullNodeComponents< Types: NodeTypes< @@ -335,6 +348,7 @@ where EVB: EngineValidatorBuilder, EthApiError: FromEvmError, EvmFactoryFor: EvmFactory, + RpcMiddleware: RethRpcMiddleware, { type EthApi = EthB::EthApi; @@ -424,9 +438,7 @@ where type EVM = EthEvmConfig; async fn build_evm(self, ctx: &BuilderContext) -> eyre::Result { - let evm_config = EthEvmConfig::new(ctx.chain_spec()) - .with_extra_data(ctx.payload_builder_config().extra_data_bytes()); - Ok(evm_config) + Ok(EthEvmConfig::new(ctx.chain_spec())) } } @@ -453,6 +465,9 @@ where async fn build_pool(self, ctx: &BuilderContext) -> eyre::Result { let pool_config = ctx.pool_config(); + let blobs_disabled = ctx.config().txpool.disable_blobs_support || + ctx.config().txpool.blobpool_max_count == 0; + let blob_cache_size = if let Some(blob_cache_size) = pool_config.blob_cache_size { Some(blob_cache_size) } else { @@ -475,8 +490,9 @@ where let validator = TransactionValidationTaskExecutor::eth_builder(ctx.provider().clone()) .with_head_timestamp(ctx.head().timestamp) - .with_max_tx_input_bytes(ctx.config().txpool.max_tx_input_bytes) + .set_eip4844(!blobs_disabled) .kzg_settings(ctx.kzg_settings()?) + .with_max_tx_input_bytes(ctx.config().txpool.max_tx_input_bytes) .with_local_transactions_config(pool_config.local_transactions_config.clone()) .set_tx_fee_cap(ctx.config().rpc.rpc_tx_fee_cap) .with_max_tx_gas_limit(ctx.config().txpool.max_tx_gas_limit) diff --git a/crates/ethereum/node/src/payload.rs b/crates/ethereum/node/src/payload.rs index 405bae94d2..644078bc96 100644 --- a/crates/ethereum/node/src/payload.rs +++ b/crates/ethereum/node/src/payload.rs @@ -52,7 +52,10 @@ where ctx.provider().clone(), pool, evm_config, - EthereumBuilderConfig::new().with_gas_limit(gas_limit), + EthereumBuilderConfig::new() + .with_gas_limit(gas_limit) + .with_max_blobs_per_block(conf.max_blobs_per_block()) + .with_extra_data(conf.extra_data_bytes()), )) } } diff --git a/crates/ethereum/node/tests/e2e/custom_genesis.rs b/crates/ethereum/node/tests/e2e/custom_genesis.rs new file mode 100644 index 0000000000..6d1689655f --- /dev/null +++ b/crates/ethereum/node/tests/e2e/custom_genesis.rs @@ -0,0 +1,100 @@ +use crate::utils::eth_payload_attributes; +use alloy_genesis::Genesis; +use alloy_primitives::B256; +use reth_chainspec::{ChainSpecBuilder, MAINNET}; +use reth_e2e_test_utils::{setup, transaction::TransactionTestContext}; +use reth_node_ethereum::EthereumNode; +use reth_provider::{HeaderProvider, StageCheckpointReader}; +use reth_stages_types::StageId; +use std::sync::Arc; + +/// Tests that a node can initialize and advance with a custom genesis block number. +#[tokio::test] +async fn can_run_eth_node_with_custom_genesis_number() -> eyre::Result<()> { + reth_tracing::init_test_tracing(); + + // Create genesis with custom block number (e.g., 1000) + let mut genesis: Genesis = + serde_json::from_str(include_str!("../assets/genesis.json")).unwrap(); + genesis.number = Some(1000); + genesis.parent_hash = Some(B256::random()); + + let chain_spec = Arc::new( + ChainSpecBuilder::default() + .chain(MAINNET.chain) + .genesis(genesis) + .cancun_activated() + .build(), + ); + + let (mut nodes, _tasks, wallet) = + setup::(1, chain_spec, false, eth_payload_attributes).await?; + + let mut node = nodes.pop().unwrap(); + + // Verify stage checkpoints are initialized to genesis block number (1000) + for stage in StageId::ALL { + let checkpoint = node.inner.provider.get_stage_checkpoint(stage)?; + assert!(checkpoint.is_some(), "Stage {:?} checkpoint should exist", stage); + assert_eq!( + checkpoint.unwrap().block_number, + 1000, + "Stage {:?} checkpoint should be at genesis block 1000", + stage + ); + } + + // Advance the chain (block 1001) + let raw_tx = TransactionTestContext::transfer_tx_bytes(1, wallet.inner).await; + let tx_hash = node.rpc.inject_tx(raw_tx).await?; + let payload = node.advance_block().await?; + + let block_hash = payload.block().hash(); + let block_number = payload.block().number; + + // Verify we're at block 1001 (genesis + 1) + assert_eq!(block_number, 1001, "Block number should be 1001 after advancing from genesis 1000"); + + // Assert the block has been committed + node.assert_new_block(tx_hash, block_hash, block_number).await?; + + Ok(()) +} + +/// Tests that block queries respect custom genesis boundaries. +#[tokio::test] +async fn custom_genesis_block_query_boundaries() -> eyre::Result<()> { + reth_tracing::init_test_tracing(); + + let genesis_number = 5000u64; + + let mut genesis: Genesis = + serde_json::from_str(include_str!("../assets/genesis.json")).unwrap(); + genesis.number = Some(genesis_number); + genesis.parent_hash = Some(B256::random()); + + let chain_spec = Arc::new( + ChainSpecBuilder::default() + .chain(MAINNET.chain) + .genesis(genesis) + .cancun_activated() + .build(), + ); + + let (mut nodes, _tasks, _wallet) = + setup::(1, chain_spec, false, eth_payload_attributes).await?; + + let node = nodes.pop().unwrap(); + + // Query genesis block should succeed + let genesis_header = node.inner.provider.header_by_number(genesis_number)?; + assert!(genesis_header.is_some(), "Genesis block at {} should exist", genesis_number); + + // Query blocks before genesis should return None + for block_num in [0, 1, genesis_number - 1] { + let header = node.inner.provider.header_by_number(block_num)?; + assert!(header.is_none(), "Block {} before genesis should not exist", block_num); + } + + Ok(()) +} diff --git a/crates/ethereum/node/tests/e2e/eth.rs b/crates/ethereum/node/tests/e2e/eth.rs index c427f5f0b3..1865890482 100644 --- a/crates/ethereum/node/tests/e2e/eth.rs +++ b/crates/ethereum/node/tests/e2e/eth.rs @@ -7,6 +7,7 @@ use reth_e2e_test_utils::{ use reth_node_builder::{NodeBuilder, NodeHandle}; use reth_node_core::{args::RpcServerArgs, node_config::NodeConfig}; use reth_node_ethereum::EthereumNode; +use reth_provider::BlockNumReader; use reth_tasks::TaskManager; use std::sync::Arc; @@ -127,3 +128,55 @@ async fn test_failed_run_eth_node_with_no_auth_engine_api_over_ipc_opts() -> eyr Ok(()) } + +#[tokio::test] +async fn test_engine_graceful_shutdown() -> eyre::Result<()> { + reth_tracing::init_test_tracing(); + + let (mut nodes, _tasks, wallet) = setup::( + 1, + Arc::new( + ChainSpecBuilder::default() + .chain(MAINNET.chain) + .genesis(serde_json::from_str(include_str!("../assets/genesis.json")).unwrap()) + .cancun_activated() + .build(), + ), + false, + eth_payload_attributes, + ) + .await?; + + let mut node = nodes.pop().unwrap(); + + let raw_tx = TransactionTestContext::transfer_tx_bytes(1, wallet.inner).await; + let tx_hash = node.rpc.inject_tx(raw_tx).await?; + let payload = node.advance_block().await?; + node.assert_new_block(tx_hash, payload.block().hash(), payload.block().number).await?; + + // Get block number before shutdown + let block_before = node.inner.provider.best_block_number()?; + assert_eq!(block_before, 1, "Expected 1 block before shutdown"); + + // Verify block is NOT yet persisted to database + let db_block_before = node.inner.provider.last_block_number()?; + assert_eq!(db_block_before, 0, "Block should not be persisted yet"); + + // Trigger graceful shutdown + let done_rx = node + .inner + .add_ons_handle + .engine_shutdown + .shutdown() + .expect("shutdown should return receiver"); + + tokio::time::timeout(std::time::Duration::from_secs(2), done_rx) + .await + .expect("shutdown timed out") + .expect("shutdown completion channel should not be closed"); + + let db_block = node.inner.provider.last_block_number()?; + assert_eq!(db_block, 1, "Database should have persisted block 1"); + + Ok(()) +} diff --git a/crates/ethereum/node/tests/e2e/main.rs b/crates/ethereum/node/tests/e2e/main.rs index 0ebee83cd5..9ed9c5b9a6 100644 --- a/crates/ethereum/node/tests/e2e/main.rs +++ b/crates/ethereum/node/tests/e2e/main.rs @@ -1,10 +1,12 @@ #![allow(missing_docs)] mod blobs; +mod custom_genesis; mod dev; mod eth; mod p2p; mod pool; +mod prestate; mod rpc; mod utils; diff --git a/crates/ethereum/node/tests/e2e/p2p.rs b/crates/ethereum/node/tests/e2e/p2p.rs index 34a4210538..74266b1675 100644 --- a/crates/ethereum/node/tests/e2e/p2p.rs +++ b/crates/ethereum/node/tests/e2e/p2p.rs @@ -1,10 +1,18 @@ use crate::utils::{advance_with_random_transactions, eth_payload_attributes}; +use alloy_consensus::{SignableTransaction, TxEip1559, TxEnvelope}; +use alloy_eips::Encodable2718; +use alloy_network::TxSignerSync; use alloy_provider::{Provider, ProviderBuilder}; -use rand::{rngs::StdRng, Rng, SeedableRng}; +use futures::future::JoinAll; +use rand::{rngs::StdRng, seq::IndexedRandom, Rng, SeedableRng}; use reth_chainspec::{ChainSpecBuilder, MAINNET}; -use reth_e2e_test_utils::{setup, setup_engine, transaction::TransactionTestContext}; +use reth_e2e_test_utils::{ + setup, setup_engine, setup_engine_with_connection, transaction::TransactionTestContext, + wallet::Wallet, +}; use reth_node_ethereum::EthereumNode; -use std::sync::Arc; +use reth_rpc_api::EthApiServer; +use std::{sync::Arc, time::Duration}; #[tokio::test] async fn can_sync() -> eyre::Result<()> { @@ -195,3 +203,94 @@ async fn test_reorg_through_backfill() -> eyre::Result<()> { Ok(()) } + +#[tokio::test(flavor = "multi_thread")] +async fn test_tx_propagation() -> eyre::Result<()> { + reth_tracing::init_test_tracing(); + + let chain_spec = Arc::new( + ChainSpecBuilder::default() + .chain(MAINNET.chain) + .genesis(serde_json::from_str(include_str!("../assets/genesis.json")).unwrap()) + .cancun_activated() + .prague_activated() + .build(), + ); + + // Setup wallet + let chain_id = chain_spec.chain().into(); + let wallet = Wallet::new(1).inner; + let mut nonce = 0; + let mut build_tx = || { + let mut tx = TxEip1559 { + chain_id, + max_priority_fee_per_gas: 1_000_000_000, + max_fee_per_gas: 1_000_000_000, + gas_limit: 100_000, + nonce, + ..Default::default() + }; + nonce += 1; + let signature = wallet.sign_transaction_sync(&mut tx).unwrap(); + TxEnvelope::Eip1559(tx.into_signed(signature)) + }; + + // Setup 10 nodes + let (mut nodes, _tasks, _) = setup_engine_with_connection::( + 10, + chain_spec.clone(), + false, + Default::default(), + eth_payload_attributes, + false, + ) + .await?; + + // Connect all nodes to the first one + let (first, rest) = nodes.split_at_mut(1); + for node in rest { + node.connect(&mut first[0]).await; + } + + // Advance all nodes for 1 block so that they don't consider themselves unsynced + let tx = build_tx(); + nodes[0].rpc.inject_tx(tx.encoded_2718().into()).await?; + let payload = nodes[0].advance_block().await?; + nodes[1..] + .iter_mut() + .map(|node| async { + node.submit_payload(payload.clone()).await.unwrap(); + node.sync_to(payload.block().hash()).await.unwrap(); + }) + .collect::>() + .await; + + // Build and send transaction to first node + let tx = build_tx(); + let tx_hash = *tx.tx_hash(); + let _ = nodes[0].rpc.inject_tx(tx.encoded_2718().into()).await?; + + tokio::time::sleep(Duration::from_millis(100)).await; + + // Assert that all nodes have the transaction + for (i, node) in nodes.iter().enumerate() { + assert!( + node.rpc.inner.eth_api().transaction_by_hash(tx_hash).await?.is_some(), + "Node {i} should have the transaction" + ); + } + + // Build and send one more transaction to a random node + let tx = build_tx(); + let tx_hash = *tx.tx_hash(); + let _ = nodes.choose(&mut rand::rng()).unwrap().rpc.inject_tx(tx.encoded_2718().into()).await?; + + tokio::time::sleep(Duration::from_millis(100)).await; + + // Assert that all nodes have the transaction + for node in nodes { + assert!(node.rpc.inner.eth_api().transaction_by_hash(tx_hash).await?.is_some()); + } + + Ok(()) +} diff --git a/crates/ethereum/node/tests/e2e/prestate.rs b/crates/ethereum/node/tests/e2e/prestate.rs new file mode 100644 index 0000000000..6c66f09bf7 --- /dev/null +++ b/crates/ethereum/node/tests/e2e/prestate.rs @@ -0,0 +1,132 @@ +use alloy_eips::BlockId; +use alloy_genesis::{Genesis, GenesisAccount}; +use alloy_primitives::address; +use alloy_provider::ext::DebugApi; +use alloy_rpc_types_eth::{Transaction, TransactionRequest}; +use alloy_rpc_types_trace::geth::{ + AccountState, GethDebugTracingOptions, PreStateConfig, PreStateFrame, +}; +use eyre::{eyre, Result}; +use reth_chainspec::{ChainSpecBuilder, MAINNET}; +use reth_node_builder::{NodeBuilder, NodeHandle}; +use reth_node_core::{args::RpcServerArgs, node_config::NodeConfig}; +use reth_node_ethereum::EthereumNode; +use reth_rpc_server_types::RpcModuleSelection; +use reth_tasks::TaskManager; +use serde::Deserialize; +use std::sync::Arc; + +const PRESTATE_SNAPSHOT: &str = + include_str!("../../../../../testing/prestate/tx-selfdestruct-prestate.json"); + +/// Replays the selfdestruct transaction via `debug_traceCall` and ensures Reth's prestate matches +/// Geth's captured snapshot. +// +#[tokio::test] +async fn debug_trace_call_matches_geth_prestate_snapshot() -> Result<()> { + reth_tracing::init_test_tracing(); + + let mut genesis: Genesis = MAINNET.genesis().clone(); + genesis.coinbase = address!("0x95222290dd7278aa3ddd389cc1e1d165cc4bafe5"); + + let exec = TaskManager::current(); + let exec = exec.executor(); + + let expected_frame = expected_snapshot_frame()?; + let prestate_mode = match &expected_frame { + PreStateFrame::Default(mode) => mode.clone(), + _ => return Err(eyre!("snapshot must contain default prestate frame")), + }; + + genesis.alloc.extend( + prestate_mode + .0 + .clone() + .into_iter() + .map(|(addr, state)| (addr, account_state_to_genesis(state))), + ); + + let chain_spec = Arc::new( + ChainSpecBuilder::default() + .chain(MAINNET.chain) + .genesis(genesis) + .cancun_activated() + .prague_activated() + .build(), + ); + + let node_config = NodeConfig::test().with_chain(chain_spec).with_rpc( + RpcServerArgs::default() + .with_unused_ports() + .with_http() + .with_http_api(RpcModuleSelection::all_modules().into()), + ); + + let NodeHandle { node, node_exit_future: _ } = NodeBuilder::new(node_config) + .testing_node(exec) + .node(EthereumNode::default()) + .launch() + .await?; + + let provider = node.rpc_server_handle().eth_http_provider().unwrap(); + + // + let tx = r#"{ + "type": "0x2", + "chainId": "0x1", + "nonce": "0x39af8", + "gas": "0x249f0", + "maxFeePerGas": "0xc6432e2d7", + "maxPriorityFeePerGas": "0x68889c2b", + "to": "0xc77ad0a71008d7094a62cfbd250a2eb2afdf2776", + "value": "0x0", + "accessList": [], + "input": "0xf3fef3a3000000000000000000000000dac17f958d2ee523a2206206994597c13d831ec700000000000000000000000000000000000000000000000000000000000f6b64", + "r": "0x40ab901a8262d5e6fe9b6513996cd5df412526580cab7410c13acc9dd9f6ec93", + "s": "0x6b76354c8cb1c1d6dbebfd555be9053170f02a648c4b36740e3fd7c6e9499572", + "yParity": "0x1", + "v": "0x1", + "hash": "0x391f4b6a382d3bcc3120adc2ea8c62003e604e487d97281129156fd284a1a89d", + "blockHash": "0xf9b77bcf8c69544304dff34129f3bdc71f00fdf766c1522ed6ac1382726ead82", + "blockNumber": "0x1294fd2", + "transactionIndex": "0x3a", + "from": "0xa7fb5ca286fc3fd67525629048a4de3ba24cba2e", + "gasPrice": "0x7c5bcc0e0" + }"#; + let tx = serde_json::from_str::(tx).unwrap(); + let request = TransactionRequest::from_recovered_transaction(tx.into_recovered()); + + let trace: PreStateFrame = provider + .debug_trace_call_prestate( + request, + BlockId::latest(), + GethDebugTracingOptions::prestate_tracer(PreStateConfig::default()).into(), + ) + .await?; + + similar_asserts::assert_eq!(trace, expected_frame); + + Ok(()) +} + +fn expected_snapshot_frame() -> Result { + #[derive(Deserialize)] + struct Snapshot { + result: serde_json::Value, + } + + let snapshot: Snapshot = serde_json::from_str(PRESTATE_SNAPSHOT)?; + Ok(serde_json::from_value(snapshot.result)?) +} + +fn account_state_to_genesis(value: AccountState) -> GenesisAccount { + let balance = value.balance.unwrap_or_default(); + let code = value.code.filter(|code| !code.is_empty()); + let storage = (!value.storage.is_empty()).then_some(value.storage); + + GenesisAccount::default() + .with_balance(balance) + .with_nonce(value.nonce) + .with_code(code) + .with_storage(storage) +} diff --git a/crates/ethereum/node/tests/e2e/rpc.rs b/crates/ethereum/node/tests/e2e/rpc.rs index f040f44dfd..c149580ca6 100644 --- a/crates/ethereum/node/tests/e2e/rpc.rs +++ b/crates/ethereum/node/tests/e2e/rpc.rs @@ -1,5 +1,6 @@ use crate::utils::eth_payload_attributes; use alloy_eips::{eip2718::Encodable2718, eip7910::EthConfig}; +use alloy_genesis::Genesis; use alloy_primitives::{Address, B256, U256}; use alloy_provider::{network::EthereumWallet, Provider, ProviderBuilder, SendableTx}; use alloy_rpc_types_beacon::relay::{ @@ -11,8 +12,16 @@ use alloy_rpc_types_eth::TransactionRequest; use rand::{rngs::StdRng, Rng, SeedableRng}; use reth_chainspec::{ChainSpecBuilder, EthChainSpec, MAINNET}; use reth_e2e_test_utils::setup_engine; +use reth_network::types::NatResolver; +use reth_node_builder::{NodeBuilder, NodeHandle}; +use reth_node_core::{ + args::{NetworkArgs, RpcServerArgs}, + node_config::NodeConfig, +}; use reth_node_ethereum::EthereumNode; use reth_payload_primitives::BuiltPayload; +use reth_rpc_api::servers::AdminApiServer; +use reth_tasks::TaskManager; use std::{ sync::Arc, time::{SystemTime, UNIX_EPOCH}, @@ -329,3 +338,41 @@ async fn test_eth_config() -> eyre::Result<()> { Ok(()) } + +// +#[tokio::test] +async fn test_admin_external_ip() -> eyre::Result<()> { + reth_tracing::init_test_tracing(); + + let exec = TaskManager::current(); + let exec = exec.executor(); + + // Chain spec with test allocs + let genesis: Genesis = serde_json::from_str(include_str!("../assets/genesis.json")).unwrap(); + let chain_spec = + Arc::new(ChainSpecBuilder::default().chain(MAINNET.chain).genesis(genesis).build()); + + let external_ip = "10.64.128.71".parse().unwrap(); + // Node setup + let node_config = NodeConfig::test() + .with_chain(chain_spec) + .with_network( + NetworkArgs::default().with_nat_resolver(NatResolver::ExternalIp(external_ip)), + ) + .with_unused_ports() + .with_rpc(RpcServerArgs::default().with_unused_ports().with_http()); + + let NodeHandle { node, node_exit_future: _ } = NodeBuilder::new(node_config) + .testing_node(exec) + .node(EthereumNode::default()) + .launch() + .await?; + + let api = node.add_ons_handle.admin_api(); + + let info = api.node_info().await.unwrap(); + + assert_eq!(info.ip, external_ip); + + Ok(()) +} diff --git a/crates/ethereum/node/tests/it/main.rs b/crates/ethereum/node/tests/it/main.rs index 0f85adda31..7aeec57ada 100644 --- a/crates/ethereum/node/tests/it/main.rs +++ b/crates/ethereum/node/tests/it/main.rs @@ -2,5 +2,6 @@ mod builder; mod exex; +mod testing; const fn main() {} diff --git a/crates/ethereum/node/tests/it/testing.rs b/crates/ethereum/node/tests/it/testing.rs new file mode 100644 index 0000000000..6bd21a0879 --- /dev/null +++ b/crates/ethereum/node/tests/it/testing.rs @@ -0,0 +1,85 @@ +//! E2E tests for the testing RPC namespace. + +use alloy_primitives::{Address, B256}; +use alloy_rpc_types_engine::ExecutionPayloadEnvelopeV4; +use jsonrpsee_core::client::ClientT; +use reth_db::test_utils::create_test_rw_db; +use reth_ethereum_engine_primitives::EthPayloadAttributes; +use reth_node_builder::{NodeBuilder, NodeConfig}; +use reth_node_core::{ + args::DatadirArgs, + dirs::{DataDirPath, MaybePlatformPath}, +}; +use reth_node_ethereum::{node::EthereumAddOns, EthereumNode}; +use reth_rpc_api::TestingBuildBlockRequestV1; +use reth_rpc_server_types::{RethRpcModule, RpcModuleSelection}; +use reth_tasks::TaskManager; +use std::str::FromStr; +use tempfile::tempdir; +use tokio::sync::oneshot; + +#[tokio::test(flavor = "multi_thread")] +async fn testing_rpc_build_block_works() -> eyre::Result<()> { + let tasks = TaskManager::current(); + let mut rpc_args = reth_node_core::args::RpcServerArgs::default().with_http(); + rpc_args.http_api = Some(RpcModuleSelection::from_iter([RethRpcModule::Testing])); + let tempdir = tempdir().expect("temp datadir"); + let datadir_args = DatadirArgs { + datadir: MaybePlatformPath::::from_str(tempdir.path().to_str().unwrap()) + .expect("valid datadir"), + static_files_path: Some(tempdir.path().join("static")), + rocksdb_path: Some(tempdir.path().join("rocksdb")), + }; + let config = NodeConfig::test().with_datadir_args(datadir_args).with_rpc(rpc_args); + let db = create_test_rw_db(); + + let (tx, rx): ( + oneshot::Sender>, + oneshot::Receiver>, + ) = oneshot::channel(); + + let builder = NodeBuilder::new(config) + .with_database(db) + .with_launch_context(tasks.executor()) + .with_types::() + .with_components(EthereumNode::components()) + .with_add_ons(EthereumAddOns::default()) + .on_rpc_started(move |ctx, handles| { + let Some(client) = handles.rpc.http_client() else { return Ok(()) }; + + let chain = ctx.config().chain.clone(); + let parent_block_hash = chain.genesis_hash(); + let payload_attributes = EthPayloadAttributes { + timestamp: chain.genesis().timestamp + 1, + prev_randao: B256::ZERO, + suggested_fee_recipient: Address::ZERO, + withdrawals: None, + parent_beacon_block_root: None, + }; + + let request = TestingBuildBlockRequestV1 { + parent_block_hash, + payload_attributes, + transactions: vec![], + extra_data: None, + }; + + tokio::spawn(async move { + let res: eyre::Result = + client.request("testing_buildBlockV1", [request]).await.map_err(Into::into); + let _ = tx.send(res); + }); + + Ok(()) + }); + + // Launch the node with the default engine launcher. + let launcher = builder.engine_api_launcher(); + let _node = builder.launch_with(launcher).await?; + + // Wait for the testing RPC call to return. + let res = rx.await.expect("testing_buildBlockV1 response"); + assert!(res.is_ok(), "testing_buildBlockV1 failed: {:?}", res.err()); + + Ok(()) +} diff --git a/crates/ethereum/payload/Cargo.toml b/crates/ethereum/payload/Cargo.toml index 42d159fb84..4326d9b193 100644 --- a/crates/ethereum/payload/Cargo.toml +++ b/crates/ethereum/payload/Cargo.toml @@ -24,7 +24,7 @@ reth-payload-builder-primitives.workspace = true reth-payload-primitives.workspace = true reth-basic-payload-builder.workspace = true reth-evm.workspace = true -reth-evm-ethereum.workspace = true +reth-evm-ethereum = { workspace = true, features = ["std"] } reth-errors.workspace = true reth-chainspec.workspace = true reth-payload-validator.workspace = true diff --git a/crates/ethereum/payload/src/config.rs b/crates/ethereum/payload/src/config.rs index 9b5de23312..6e429b61f0 100644 --- a/crates/ethereum/payload/src/config.rs +++ b/crates/ethereum/payload/src/config.rs @@ -1,4 +1,5 @@ use alloy_eips::eip1559::ETHEREUM_BLOCK_GAS_LIMIT_30M; +use alloy_primitives::Bytes; use reth_primitives_traits::constants::GAS_LIMIT_BOUND_DIVISOR; /// Settings for the Ethereum builder. @@ -9,6 +10,12 @@ pub struct EthereumBuilderConfig { /// Waits for the first payload to be built if there is no payload built when the payload is /// being resolved. pub await_payload_on_missing: bool, + /// Maximum number of blobs to include per block (EIP-7872). + /// + /// If `None`, defaults to the protocol maximum. + pub max_blobs_per_block: Option, + /// Extra data for built blocks. + pub extra_data: Bytes, } impl Default for EthereumBuilderConfig { @@ -20,7 +27,12 @@ impl Default for EthereumBuilderConfig { impl EthereumBuilderConfig { /// Create new payload builder config. pub const fn new() -> Self { - Self { desired_gas_limit: ETHEREUM_BLOCK_GAS_LIMIT_30M, await_payload_on_missing: true } + Self { + desired_gas_limit: ETHEREUM_BLOCK_GAS_LIMIT_30M, + await_payload_on_missing: true, + max_blobs_per_block: None, + extra_data: Bytes::new(), + } } /// Set desired gas limit. @@ -35,6 +47,18 @@ impl EthereumBuilderConfig { self.await_payload_on_missing = await_payload_on_missing; self } + + /// Set the maximum number of blobs per block (EIP-7872). + pub const fn with_max_blobs_per_block(mut self, max_blobs_per_block: Option) -> Self { + self.max_blobs_per_block = max_blobs_per_block; + self + } + + /// Set the extra data for built blocks. + pub fn with_extra_data(mut self, extra_data: Bytes) -> Self { + self.extra_data = extra_data; + self + } } impl EthereumBuilderConfig { diff --git a/crates/ethereum/payload/src/lib.rs b/crates/ethereum/payload/src/lib.rs index 5b3eb9cfcb..b803e9a351 100644 --- a/crates/ethereum/payload/src/lib.rs +++ b/crates/ethereum/payload/src/lib.rs @@ -153,9 +153,9 @@ where let PayloadConfig { parent_header, attributes } = config; let state_provider = client.state_by_block_hash(parent_header.hash())?; - let state = StateProviderDatabase::new(&state_provider); + let state = StateProviderDatabase::new(state_provider.as_ref()); let mut db = - State::builder().with_database(cached_reads.as_db_mut(state)).with_bundle_update().build(); + State::builder().with_database_ref(cached_reads.as_db(state)).with_bundle_update().build(); let mut builder = evm_config .builder_for_next_block( @@ -168,6 +168,7 @@ where gas_limit: builder_config.gas_limit(parent_header.gas_limit), parent_beacon_block_root: attributes.parent_beacon_block_root(), withdrawals: Some(attributes.withdrawals().clone()), + extra_data: builder_config.extra_data, }, ) .map_err(PayloadBuilderError::other)?; @@ -198,8 +199,15 @@ where let mut block_transactions_rlp_length = 0; let blob_params = chain_spec.blob_params_at_timestamp(attributes.timestamp); - let max_blob_count = - blob_params.as_ref().map(|params| params.max_blob_count).unwrap_or_default(); + let protocol_max_blob_count = + blob_params.as_ref().map(|params| params.max_blob_count).unwrap_or_else(Default::default); + + // Apply user-configured blob limit (EIP-7872) + // Per EIP-7872: if the minimum is zero, set it to one + let max_blob_count = builder_config + .max_blobs_per_block + .map(|user_limit| std::cmp::min(user_limit, protocol_max_blob_count).max(1)) + .unwrap_or(protocol_max_blob_count); let is_osaka = chain_spec.is_osaka_active_at_timestamp(attributes.timestamp); @@ -211,7 +219,7 @@ where // continue best_txs.mark_invalid( &pool_tx, - InvalidPoolTransactionError::ExceedsGasLimit(pool_tx.gas_limit(), block_gas_limit), + &InvalidPoolTransactionError::ExceedsGasLimit(pool_tx.gas_limit(), block_gas_limit), ); continue } @@ -232,7 +240,7 @@ where if is_osaka && estimated_block_size_with_tx > MAX_RLP_BLOCK_SIZE { best_txs.mark_invalid( &pool_tx, - InvalidPoolTransactionError::OversizedData { + &InvalidPoolTransactionError::OversizedData { size: estimated_block_size_with_tx, limit: MAX_RLP_BLOCK_SIZE, }, @@ -254,7 +262,7 @@ where trace!(target: "payload_builder", tx=?tx.hash(), ?block_blob_count, "skipping blob transaction because it would exceed the max blob count per block"); best_txs.mark_invalid( &pool_tx, - InvalidPoolTransactionError::Eip4844( + &InvalidPoolTransactionError::Eip4844( Eip4844PoolTransactionError::TooManyEip4844Blobs { have: block_blob_count + tx_blob_count, permitted: max_blob_count, @@ -287,7 +295,7 @@ where blob_tx_sidecar = match blob_sidecar_result { Ok(sidecar) => Some(sidecar), Err(error) => { - best_txs.mark_invalid(&pool_tx, InvalidPoolTransactionError::Eip4844(error)); + best_txs.mark_invalid(&pool_tx, &InvalidPoolTransactionError::Eip4844(error)); continue } }; @@ -307,7 +315,7 @@ where trace!(target: "payload_builder", %error, ?tx, "skipping invalid transaction and its descendants"); best_txs.mark_invalid( &pool_tx, - InvalidPoolTransactionError::Consensus( + &InvalidPoolTransactionError::Consensus( InvalidTransactionError::TxTypeNotSupported, ), ); @@ -350,7 +358,8 @@ where return Ok(BuildOutcome::Aborted { fees: total_fees, cached_reads }) } - let BlockBuilderOutcome { execution_result, block, .. } = builder.finish(&state_provider)?; + let BlockBuilderOutcome { execution_result, block, .. } = + builder.finish(state_provider.as_ref())?; let requests = chain_spec .is_prague_active_at_timestamp(attributes.timestamp) diff --git a/crates/ethereum/primitives/src/receipt.rs b/crates/ethereum/primitives/src/receipt.rs index cbe8b5b806..306eae8d75 100644 --- a/crates/ethereum/primitives/src/receipt.rs +++ b/crates/ethereum/primitives/src/receipt.rs @@ -2,15 +2,12 @@ use core::fmt::Debug; use alloc::vec::Vec; use alloy_consensus::{ - Eip2718EncodableReceipt, Eip658Value, ReceiptEnvelope, ReceiptWithBloom, RlpDecodableReceipt, - RlpEncodableReceipt, TxReceipt, TxType, Typed2718, -}; -use alloy_eips::{ - eip2718::{Eip2718Error, Eip2718Result, Encodable2718, IsTyped2718}, - Decodable2718, + Eip2718DecodableReceipt, Eip2718EncodableReceipt, Eip658Value, ReceiptEnvelope, + ReceiptWithBloom, RlpDecodableReceipt, RlpEncodableReceipt, TxReceipt, TxType, Typed2718, }; +use alloy_eips::eip2718::{Eip2718Error, Eip2718Result, Encodable2718, IsTyped2718}; use alloy_primitives::{Bloom, Log, B256}; -use alloy_rlp::{BufMut, Decodable, Encodable, Header}; +use alloy_rlp::{BufMut, Decodable, Encodable, Header, RlpDecodable, RlpEncodable}; use reth_primitives_traits::{proofs::ordered_trie_root_with_encoder, InMemorySize}; /// Helper trait alias with requirements for transaction type generic to be used within [`Receipt`]. @@ -50,7 +47,7 @@ pub type RpcReceipt = EthereumReceipt; /// Typed ethereum transaction receipt. /// Receipt containing result of transaction execution. -#[derive(Clone, Debug, PartialEq, Eq, Default)] +#[derive(Clone, Debug, PartialEq, Eq, Default, RlpEncodable, RlpDecodable)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[cfg_attr(feature = "reth-codec", reth_codecs::add_arbitrary_tests(compact, rlp))] @@ -141,44 +138,6 @@ impl Receipt { pub fn calculate_receipt_root_no_memo(receipts: &[Self]) -> B256 { ordered_trie_root_with_encoder(receipts, |r, buf| r.with_bloom_ref().encode_2718(buf)) } - - /// Returns length of RLP-encoded receipt fields without the given [`Bloom`] without an RLP - /// header - pub fn rlp_encoded_fields_length_without_bloom(&self) -> usize { - self.success.length() + self.cumulative_gas_used.length() + self.logs.length() - } - - /// RLP-encodes receipt fields without the given [`Bloom`] without an RLP header. - pub fn rlp_encode_fields_without_bloom(&self, out: &mut dyn BufMut) { - self.success.encode(out); - self.cumulative_gas_used.encode(out); - self.logs.encode(out); - } - - /// Returns RLP header for inner encoding. - pub fn rlp_header_inner_without_bloom(&self) -> Header { - Header { list: true, payload_length: self.rlp_encoded_fields_length_without_bloom() } - } - - /// RLP-decodes the receipt from the provided buffer. This does not expect a type byte or - /// network header. - pub fn rlp_decode_inner_without_bloom(buf: &mut &[u8], tx_type: T) -> alloy_rlp::Result { - let header = Header::decode(buf)?; - if !header.list { - return Err(alloy_rlp::Error::UnexpectedString); - } - - let remaining = buf.len(); - let success = Decodable::decode(buf)?; - let cumulative_gas_used = Decodable::decode(buf)?; - let logs = Decodable::decode(buf)?; - - if buf.len() + header.payload_length != remaining { - return Err(alloy_rlp::Error::UnexpectedLength); - } - - Ok(Self { tx_type, success, cumulative_gas_used, logs }) - } } impl Eip2718EncodableReceipt for Receipt { @@ -195,6 +154,16 @@ impl Eip2718EncodableReceipt for Receipt { } } +impl Eip2718DecodableReceipt for Receipt { + fn typed_decode_with_bloom(ty: u8, buf: &mut &[u8]) -> Eip2718Result> { + Ok(Self::rlp_decode_inner(buf, T::try_from(ty)?)?) + } + + fn fallback_decode_with_bloom(buf: &mut &[u8]) -> Eip2718Result> { + Ok(Self::rlp_decode_inner(buf, T::try_from(0)?)?) + } +} + impl RlpEncodableReceipt for Receipt { fn rlp_encoded_length_with_bloom(&self, bloom: &Bloom) -> usize { let mut len = self.eip2718_encoded_length_with_bloom(bloom); @@ -243,48 +212,6 @@ impl RlpDecodableReceipt for Receipt { } } -impl Encodable2718 for Receipt { - fn encode_2718_len(&self) -> usize { - (!self.tx_type.is_legacy() as usize) + - self.rlp_header_inner_without_bloom().length_with_payload() - } - - // encode the header - fn encode_2718(&self, out: &mut dyn BufMut) { - if !self.tx_type.is_legacy() { - out.put_u8(self.tx_type.ty()); - } - self.rlp_header_inner_without_bloom().encode(out); - self.rlp_encode_fields_without_bloom(out); - } -} - -impl Decodable2718 for Receipt { - fn typed_decode(ty: u8, buf: &mut &[u8]) -> Eip2718Result { - Ok(Self::rlp_decode_inner_without_bloom(buf, T::try_from(ty)?)?) - } - - fn fallback_decode(buf: &mut &[u8]) -> Eip2718Result { - Ok(Self::rlp_decode_inner_without_bloom(buf, T::try_from(0)?)?) - } -} - -impl Encodable for Receipt { - fn encode(&self, out: &mut dyn BufMut) { - self.network_encode(out); - } - - fn length(&self) -> usize { - self.network_len() - } -} - -impl Decodable for Receipt { - fn decode(buf: &mut &[u8]) -> alloy_rlp::Result { - Ok(Self::network_decode(buf)?) - } -} - impl TxReceipt for EthereumReceipt where T: TxTy, @@ -331,10 +258,7 @@ impl IsTyped2718 for Receipt { impl InMemorySize for Receipt { fn size(&self) -> usize { - self.tx_type.size() + - core::mem::size_of::() + - core::mem::size_of::() + - self.logs.iter().map(|log| log.size()).sum::() + size_of::() + self.logs.iter().map(|log| log.size()).sum::() } } diff --git a/crates/ethereum/primitives/src/transaction.rs b/crates/ethereum/primitives/src/transaction.rs index f2ec4ad9cd..28782c2ac6 100644 --- a/crates/ethereum/primitives/src/transaction.rs +++ b/crates/ethereum/primitives/src/transaction.rs @@ -8,7 +8,7 @@ use alloy_consensus::{ TxLegacy, TxType, Typed2718, }; use alloy_eips::{ - eip2718::{Decodable2718, Eip2718Error, Eip2718Result, Encodable2718}, + eip2718::{Decodable2718, Eip2718Error, Eip2718Result, Encodable2718, IsTyped2718}, eip2930::AccessList, eip7702::SignedAuthorization, }; @@ -664,6 +664,12 @@ impl TxHashRef for TransactionSigned { } } +impl IsTyped2718 for TransactionSigned { + fn is_type(type_id: u8) -> bool { + ::is_type(type_id) + } +} + impl SignedTransaction for TransactionSigned {} #[cfg(test)] diff --git a/crates/ethereum/reth/Cargo.toml b/crates/ethereum/reth/Cargo.toml index 959b7c1b65..a24f39c1a7 100644 --- a/crates/ethereum/reth/Cargo.toml +++ b/crates/ethereum/reth/Cargo.toml @@ -79,7 +79,10 @@ arbitrary = [ "alloy-rpc-types-engine?/arbitrary", "reth-codecs?/arbitrary", ] - +keccak-cache-global = [ + "reth-node-ethereum?/keccak-cache-global", + "reth-node-core?/keccak-cache-global", +] test-utils = [ "reth-chainspec/test-utils", "reth-consensus?/test-utils", @@ -144,8 +147,26 @@ rpc = [ "dep:alloy-rpc-types-engine", ] tasks = ["dep:reth-tasks"] -js-tracer = ["rpc", "reth-rpc/js-tracer"] +jemalloc = [ + "reth-cli-util?/jemalloc", + "reth-ethereum-cli?/jemalloc", + "reth-node-core?/jemalloc", +] +js-tracer = [ + "rpc", + "reth-rpc/js-tracer", + "reth-node-builder?/js-tracer", + "reth-node-ethereum?/js-tracer", + "reth-rpc-eth-types?/js-tracer", +] network = ["dep:reth-network", "tasks", "dep:reth-network-api", "dep:reth-eth-wire"] +otlp = [ + "reth-ethereum-cli?/otlp", + "reth-node-core?/otlp", +] +portable = [ + "reth-revm?/portable", +] provider = ["storage-api", "tasks", "dep:reth-provider", "dep:reth-db", "dep:reth-codecs"] storage-api = ["dep:reth-storage-api"] trie = ["dep:reth-trie"] diff --git a/crates/evm/evm/Cargo.toml b/crates/evm/evm/Cargo.toml index 4bc8ef06db..ce75269838 100644 --- a/crates/evm/evm/Cargo.toml +++ b/crates/evm/evm/Cargo.toml @@ -32,6 +32,7 @@ auto_impl.workspace = true derive_more.workspace = true futures-util.workspace = true metrics = { workspace = true, optional = true } +rayon = { workspace = true, optional = true } [dev-dependencies] reth-ethereum-primitives.workspace = true @@ -40,6 +41,7 @@ reth-ethereum-forks.workspace = true [features] default = ["std"] std = [ + "dep:rayon", "reth-primitives-traits/std", "alloy-eips/std", "alloy-primitives/std", diff --git a/crates/evm/evm/src/engine.rs b/crates/evm/evm/src/engine.rs index 5b46a08617..5663745f45 100644 --- a/crates/evm/evm/src/engine.rs +++ b/crates/evm/evm/src/engine.rs @@ -1,4 +1,5 @@ use crate::{execute::ExecutableTxFor, ConfigureEvm, EvmEnvFor, ExecutionCtxFor}; +use rayon::prelude::*; /// [`ConfigureEvm`] extension providing methods for executing payloads. pub trait ConfigureEngineEvm: ConfigureEvm { @@ -18,22 +19,53 @@ pub trait ConfigureEngineEvm: ConfigureEvm { ) -> Result, Self::Error>; } -/// Iterator over executable transactions. -pub trait ExecutableTxIterator: - Iterator> + Send + 'static -{ +/// A helper trait representing a pair of a "raw" transactions iterator and a closure that can be +/// used to convert them to an executable transaction. This tuple is used in the engine to +/// parallelize heavy work like decoding or recovery. +pub trait ExecutableTxTuple: Into<(Self::IntoIter, Self::Convert)> + Send + 'static { + /// Raw transaction that can be converted to an [`ExecutableTxTuple::Tx`] + /// + /// This can be any type that can be converted to an [`ExecutableTxTuple::Tx`]. For example, + /// an unrecovered transaction or just the transaction bytes. + type RawTx: Send + Sync + 'static; /// The executable transaction type iterator yields. - type Tx: ExecutableTxFor + Clone + Send + 'static; + type Tx: Clone + Send + Sync + 'static; /// Errors that may occur while recovering or decoding transactions. type Error: core::error::Error + Send + Sync + 'static; + + /// Iterator over [`ExecutableTxTuple::Tx`]. + type IntoIter: IntoParallelIterator + + Send + + 'static; + /// Closure that can be used to convert a [`ExecutableTxTuple::RawTx`] to a + /// [`ExecutableTxTuple::Tx`]. This might involve heavy work like decoding or recovery + /// and will be parallelized in the engine. + type Convert: Fn(Self::RawTx) -> Result + Send + Sync + 'static; } -impl ExecutableTxIterator for T +impl ExecutableTxTuple for (I, F) where - Tx: ExecutableTxFor + Clone + Send + 'static, + RawTx: Send + Sync + 'static, + Tx: Clone + Send + Sync + 'static, Err: core::error::Error + Send + Sync + 'static, - T: Iterator> + Send + 'static, + I: IntoParallelIterator + Send + 'static, + F: Fn(RawTx) -> Result + Send + Sync + 'static, { + type RawTx = RawTx; type Tx = Tx; type Error = Err; + + type IntoIter = I; + type Convert = F; +} + +/// Iterator over executable transactions. +pub trait ExecutableTxIterator: + ExecutableTxTuple> +{ +} + +impl ExecutableTxIterator for T where + T: ExecutableTxTuple> +{ } diff --git a/crates/evm/evm/src/execute.rs b/crates/evm/evm/src/execute.rs index 76a9b07839..fca8f6241d 100644 --- a/crates/evm/evm/src/execute.rs +++ b/crates/evm/evm/src/execute.rs @@ -1,7 +1,7 @@ //! Traits for execution. use crate::{ConfigureEvm, Database, OnStateHook, TxEnvFor}; -use alloc::{boxed::Box, vec::Vec}; +use alloc::{boxed::Box, sync::Arc, vec::Vec}; use alloy_consensus::{BlockHeader, Header}; use alloy_eips::eip2718::WithEncoded; pub use alloy_evm::block::{BlockExecutor, BlockExecutorFactory}; @@ -447,7 +447,7 @@ impl ExecutorTx for Recovered ExecutorTx for WithTxEnv<<::Evm as Evm>::Tx, T> where - T: ExecutorTx, + T: ExecutorTx + Clone, Executor: BlockExecutor, <::Evm as Evm>::Tx: Clone, Self: RecoveredTx, @@ -457,7 +457,7 @@ where } fn into_recovered(self) -> Recovered { - self.tx.into_recovered() + Arc::unwrap_or_clone(self.tx).into_recovered() } } @@ -641,7 +641,7 @@ pub struct WithTxEnv { /// The transaction environment for EVM. pub tx_env: TxEnv, /// The recovered transaction. - pub tx: T, + pub tx: Arc, } impl> RecoveredTx for WithTxEnv { diff --git a/crates/evm/evm/src/lib.rs b/crates/evm/evm/src/lib.rs index e2101fd915..b370c15302 100644 --- a/crates/evm/evm/src/lib.rs +++ b/crates/evm/evm/src/lib.rs @@ -28,7 +28,7 @@ use alloy_evm::{ block::{BlockExecutorFactory, BlockExecutorFor}, precompiles::PrecompilesMap, }; -use alloy_primitives::{Address, B256}; +use alloy_primitives::{Address, Bytes, B256}; use core::{error::Error, fmt::Debug}; use execute::{BasicBlockExecutor, BlockAssembler, BlockBuilder}; use reth_execution_errors::BlockExecutionError; @@ -44,8 +44,10 @@ pub mod execute; mod aliases; pub use aliases::*; +#[cfg(feature = "std")] mod engine; -pub use engine::{ConfigureEngineEvm, ExecutableTxIterator}; +#[cfg(feature = "std")] +pub use engine::{ConfigureEngineEvm, ExecutableTxIterator, ExecutableTxTuple}; #[cfg(feature = "metrics")] pub mod metrics; @@ -59,8 +61,6 @@ pub use alloy_evm::{ *, }; -pub use alloy_evm::block::state_changes as state_change; - /// A complete configuration of EVM for Reth. /// /// This trait encapsulates complete configuration required for transaction execution and block @@ -501,6 +501,8 @@ pub struct NextBlockEnvAttributes { pub parent_beacon_block_root: Option, /// Withdrawals pub withdrawals: Option, + /// Optional extra data. + pub extra_data: Bytes, } /// Abstraction over transaction environment. diff --git a/crates/evm/evm/src/metrics.rs b/crates/evm/evm/src/metrics.rs index 3fa02c3265..a3afd331fc 100644 --- a/crates/evm/evm/src/metrics.rs +++ b/crates/evm/evm/src/metrics.rs @@ -17,6 +17,14 @@ pub struct ExecutorMetrics { /// The Histogram for amount of gas used. pub gas_used_histogram: Histogram, + /// The Histogram for amount of time taken to execute the pre-execution changes. + pub pre_execution_histogram: Histogram, + /// The Histogram for amount of time taken to wait for one transaction to be available. + pub transaction_wait_histogram: Histogram, + /// The Histogram for amount of time taken to execute one transaction. + pub transaction_execution_histogram: Histogram, + /// The Histogram for amount of time taken to execute the post-execution changes. + pub post_execution_histogram: Histogram, /// The Histogram for amount of time taken to execute blocks. pub execution_histogram: Histogram, /// The total amount of time it took to execute the latest block. diff --git a/crates/evm/execution-types/src/execution_outcome.rs b/crates/evm/execution-types/src/execution_outcome.rs index 49c3524729..25c8ba17ab 100644 --- a/crates/evm/execution-types/src/execution_outcome.rs +++ b/crates/evm/execution-types/src/execution_outcome.rs @@ -182,6 +182,11 @@ impl ExecutionOutcome { self.bundle.account(address).map(|a| a.info.as_ref().map(Into::into)) } + /// Returns the state [`BundleAccount`] for the given account. + pub fn account_state(&self, address: &Address) -> Option<&BundleAccount> { + self.bundle.account(address) + } + /// Get storage if value is known. /// /// This means that depending on status we can potentially return `U256::ZERO`. diff --git a/crates/exex/exex/src/backfill/factory.rs b/crates/exex/exex/src/backfill/factory.rs index 29734b905e..d9a51bc47a 100644 --- a/crates/exex/exex/src/backfill/factory.rs +++ b/crates/exex/exex/src/backfill/factory.rs @@ -39,7 +39,7 @@ impl BackfillJobFactory { } /// Sets the prune modes - pub const fn with_prune_modes(mut self, prune_modes: PruneModes) -> Self { + pub fn with_prune_modes(mut self, prune_modes: PruneModes) -> Self { self.prune_modes = prune_modes; self } diff --git a/crates/exex/exex/src/backfill/test_utils.rs b/crates/exex/exex/src/backfill/test_utils.rs index e489a98abf..4d60929814 100644 --- a/crates/exex/exex/src/backfill/test_utils.rs +++ b/crates/exex/exex/src/backfill/test_utils.rs @@ -13,7 +13,7 @@ use reth_evm_ethereum::EthEvmConfig; use reth_node_api::NodePrimitives; use reth_primitives_traits::{Block as _, RecoveredBlock}; use reth_provider::{ - providers::ProviderNodeTypes, BlockWriter as _, ExecutionOutcome, LatestStateProviderRef, + providers::ProviderNodeTypes, BlockWriter as _, ExecutionOutcome, LatestStateProvider, ProviderFactory, }; use reth_revm::database::StateProviderDatabase; @@ -69,7 +69,7 @@ where // Execute the block to produce a block execution output let mut block_execution_output = EthEvmConfig::ethereum(chain_spec) - .batch_executor(StateProviderDatabase::new(LatestStateProviderRef::new(&provider))) + .batch_executor(StateProviderDatabase::new(LatestStateProvider::new(provider))) .execute(block)?; block_execution_output.state.reverts.sort(); @@ -203,8 +203,8 @@ where let provider = provider_factory.provider()?; let evm_config = EthEvmConfig::new(chain_spec); - let executor = evm_config - .batch_executor(StateProviderDatabase::new(LatestStateProviderRef::new(&provider))); + let executor = + evm_config.batch_executor(StateProviderDatabase::new(LatestStateProvider::new(provider))); let mut execution_outcome = executor.execute_batch(vec![&block1, &block2])?; execution_outcome.state_mut().reverts.sort(); diff --git a/crates/exex/exex/src/manager.rs b/crates/exex/exex/src/manager.rs index 99694f0a51..73e3a1d85f 100644 --- a/crates/exex/exex/src/manager.rs +++ b/crates/exex/exex/src/manager.rs @@ -1303,7 +1303,7 @@ mod tests { .try_recover() .unwrap(); let provider_rw = provider_factory.database_provider_rw().unwrap(); - provider_rw.insert_block(block.clone()).unwrap(); + provider_rw.insert_block(&block).unwrap(); provider_rw.commit().unwrap(); let provider = BlockchainProvider::new(provider_factory).unwrap(); diff --git a/crates/exex/exex/src/notifications.rs b/crates/exex/exex/src/notifications.rs index c6a54e647c..550b3da556 100644 --- a/crates/exex/exex/src/notifications.rs +++ b/crates/exex/exex/src/notifications.rs @@ -481,12 +481,12 @@ mod tests { &mut rng, genesis_block.number + 1, BlockParams { parent: Some(genesis_hash), tx_count: Some(0), ..Default::default() }, - ); - let provider_rw = provider_factory.provider_rw()?; - provider_rw.insert_block(node_head_block.clone().try_recover()?)?; - provider_rw.commit()?; - + ) + .try_recover()?; let node_head = node_head_block.num_hash(); + let provider_rw = provider_factory.provider_rw()?; + provider_rw.insert_block(&node_head_block)?; + provider_rw.commit()?; let exex_head = ExExHead { block: BlockNumHash { number: genesis_block.number, hash: genesis_hash } }; @@ -613,7 +613,7 @@ mod tests { .try_recover()?; let node_head = node_head_block.num_hash(); let provider_rw = provider.database_provider_rw()?; - provider_rw.insert_block(node_head_block)?; + provider_rw.insert_block(&node_head_block)?; provider_rw.commit()?; let node_head_notification = ExExNotification::ChainCommitted { new: Arc::new( diff --git a/crates/exex/test-utils/src/lib.rs b/crates/exex/test-utils/src/lib.rs index 0305da323d..8430ea5d91 100644 --- a/crates/exex/test-utils/src/lib.rs +++ b/crates/exex/test-utils/src/lib.rs @@ -20,7 +20,9 @@ use futures_util::FutureExt; use reth_chainspec::{ChainSpec, MAINNET}; use reth_consensus::test_utils::TestConsensus; use reth_db::{ - test_utils::{create_test_rw_db, create_test_static_files_dir, TempDatabase}, + test_utils::{ + create_test_rocksdb_dir, create_test_rw_db, create_test_static_files_dir, TempDatabase, + }, DatabaseEnv, }; use reth_db_common::init::init_genesis; @@ -50,7 +52,7 @@ use reth_node_ethereum::{ use reth_payload_builder::noop::NoopPayloadBuilderService; use reth_primitives_traits::{Block as _, RecoveredBlock}; use reth_provider::{ - providers::{BlockchainProvider, StaticFileProvider}, + providers::{BlockchainProvider, RocksDBProvider, StaticFileProvider}, BlockReader, EthStorage, ProviderFactory, }; use reth_tasks::TaskManager; @@ -239,12 +241,14 @@ pub async fn test_exex_context_with_chain_spec( let consensus = Arc::new(TestConsensus::default()); let (static_dir, _) = create_test_static_files_dir(); + let (rocksdb_dir, _) = create_test_rocksdb_dir(); let db = create_test_rw_db(); let provider_factory = ProviderFactory::>::new( db, chain_spec.clone(), StaticFileProvider::read_write(static_dir.keep()).expect("static file provider"), - ); + RocksDBProvider::builder(rocksdb_dir.keep()).build().unwrap(), + )?; let genesis_hash = init_genesis(&provider_factory)?; let provider = BlockchainProvider::new(provider_factory.clone())?; diff --git a/crates/exex/types/src/notification.rs b/crates/exex/types/src/notification.rs index cf0d758055..4bf2a8be32 100644 --- a/crates/exex/types/src/notification.rs +++ b/crates/exex/types/src/notification.rs @@ -95,17 +95,33 @@ pub(super) mod serde_bincode_compat { /// notification: ExExNotification, /// } /// ``` + /// + /// This enum mirrors [`super::ExExNotification`] but uses borrowed [`Chain`] types + /// instead of `Arc` for bincode compatibility. #[derive(Debug, Serialize, Deserialize)] - #[expect(missing_docs)] #[serde(bound = "")] #[expect(clippy::large_enum_variant)] pub enum ExExNotification<'a, N> where N: NodePrimitives, { - ChainCommitted { new: Chain<'a, N> }, - ChainReorged { old: Chain<'a, N>, new: Chain<'a, N> }, - ChainReverted { old: Chain<'a, N> }, + /// Chain got committed without a reorg, and only the new chain is returned. + ChainCommitted { + /// The new chain after commit. + new: Chain<'a, N>, + }, + /// Chain got reorged, and both the old and the new chains are returned. + ChainReorged { + /// The old chain before reorg. + old: Chain<'a, N>, + /// The new chain after reorg. + new: Chain<'a, N>, + }, + /// Chain got reverted, and only the old chain is returned. + ChainReverted { + /// The old chain before reversion. + old: Chain<'a, N>, + }, } impl<'a, N> From<&'a super::ExExNotification> for ExExNotification<'a, N> diff --git a/crates/metrics/src/common/mpsc.rs b/crates/metrics/src/common/mpsc.rs index 0b3d66ecb1..9d5c1c4848 100644 --- a/crates/metrics/src/common/mpsc.rs +++ b/crates/metrics/src/common/mpsc.rs @@ -42,14 +42,13 @@ pub struct UnboundedMeteredSender { } impl UnboundedMeteredSender { - /// Creates a new [`MeteredSender`] wrapping around the provided that updates metrics on send. - // #[derive(Debug)] + /// Creates a new [`UnboundedMeteredSender`] wrapping around the provided + /// [`mpsc::UnboundedSender`] that updates metrics on send. pub fn new(sender: mpsc::UnboundedSender, scope: &'static str) -> Self { Self { sender, metrics: MeteredSenderMetrics::new(scope) } } - /// Calls the underlying that updates metrics on send. - // #[derive(Debug)]'s `try_send`, incrementing the appropriate + /// Calls the underlying [`mpsc::UnboundedSender`]'s `send`, incrementing the appropriate /// metrics depending on the result. pub fn send(&self, message: T) -> Result<(), SendError> { match self.sender.send(message) { @@ -74,7 +73,7 @@ impl Clone for UnboundedMeteredSender { /// A wrapper type around [Receiver](mpsc::UnboundedReceiver) that updates metrics on receive. #[derive(Debug)] pub struct UnboundedMeteredReceiver { - /// The [Sender](mpsc::Sender) that this wraps around + /// The [Receiver](mpsc::UnboundedReceiver) that this wraps around receiver: mpsc::UnboundedReceiver, /// Holds metrics for this type metrics: MeteredReceiverMetrics, diff --git a/crates/net/banlist/Cargo.toml b/crates/net/banlist/Cargo.toml index 7afec48d48..f5f885da24 100644 --- a/crates/net/banlist/Cargo.toml +++ b/crates/net/banlist/Cargo.toml @@ -14,3 +14,6 @@ workspace = true [dependencies] # ethereum alloy-primitives.workspace = true + +# networking +ipnet.workspace = true diff --git a/crates/net/banlist/src/lib.rs b/crates/net/banlist/src/lib.rs index fb44500efe..402041ed2f 100644 --- a/crates/net/banlist/src/lib.rs +++ b/crates/net/banlist/src/lib.rs @@ -10,7 +10,7 @@ type PeerId = alloy_primitives::B512; -use std::{collections::HashMap, net::IpAddr, time::Instant}; +use std::{collections::HashMap, net::IpAddr, str::FromStr, time::Instant}; /// Determines whether or not the IP is globally routable. /// Should be replaced with [`IpAddr::is_global`](std::net::IpAddr::is_global) once it is stable. @@ -125,11 +125,14 @@ impl BanList { /// Bans the IP until the timestamp. /// /// This does not ban non-global IPs. + /// If the IP is already banned, the timeout will be updated to the new value. pub fn ban_ip_until(&mut self, ip: IpAddr, until: Instant) { self.ban_ip_with(ip, Some(until)); } - /// Bans the peer until the timestamp + /// Bans the peer until the timestamp. + /// + /// If the peer is already banned, the timeout will be updated to the new value. pub fn ban_peer_until(&mut self, node_id: PeerId, until: Instant) { self.ban_peer_with(node_id, Some(until)); } @@ -147,6 +150,8 @@ impl BanList { } /// Bans the peer indefinitely or until the given timeout. + /// + /// If the peer is already banned, the timeout will be updated to the new value. pub fn ban_peer_with(&mut self, node_id: PeerId, until: Option) { self.banned_peers.insert(node_id, until); } @@ -154,6 +159,7 @@ impl BanList { /// Bans the ip indefinitely or until the given timeout. /// /// This does not ban non-global IPs. + /// If the IP is already banned, the timeout will be updated to the new value. pub fn ban_ip_with(&mut self, ip: IpAddr, until: Option) { if is_global(&ip) { self.banned_ips.insert(ip, until); @@ -167,7 +173,7 @@ mod tests { #[test] fn can_ban_unban_peer() { - let peer = PeerId::random(); + let peer = PeerId::new([1; 64]); let mut banlist = BanList::default(); banlist.ban_peer(peer); assert!(banlist.is_banned_peer(&peer)); @@ -209,3 +215,161 @@ mod tests { assert!(!banlist.is_banned_ip(&ip)); } } + +/// IP filter for restricting network communication to specific IP ranges using CIDR notation. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct IpFilter { + /// List of allowed IP networks in CIDR notation. + /// If empty, all IPs are allowed. + allowed_networks: Vec, +} + +impl IpFilter { + /// Creates a new IP filter with the given CIDR networks. + /// + /// If the list is empty, all IPs will be allowed. + pub const fn new(allowed_networks: Vec) -> Self { + Self { allowed_networks } + } + + /// Creates an IP filter from a comma-separated list of CIDR networks. + /// + /// # Errors + /// + /// Returns an error if any of the CIDR strings cannot be parsed. + pub fn from_cidr_string(cidrs: &str) -> Result { + if cidrs.is_empty() { + return Ok(Self::allow_all()) + } + + let networks = cidrs + .split(',') + .map(|s| s.trim()) + .filter(|s| !s.is_empty()) + .map(ipnet::IpNet::from_str) + .collect::, _>>()?; + + Ok(Self::new(networks)) + } + + /// Creates a filter that allows all IPs. + pub const fn allow_all() -> Self { + Self { allowed_networks: Vec::new() } + } + + /// Checks if the given IP address is allowed by this filter. + /// + /// Returns `true` if the filter is empty (allows all) or if the IP is within + /// any of the allowed networks. + pub fn is_allowed(&self, ip: &IpAddr) -> bool { + // If no restrictions are set, allow all IPs + if self.allowed_networks.is_empty() { + return true + } + + // Check if the IP is within any of the allowed networks + self.allowed_networks.iter().any(|net| net.contains(ip)) + } + + /// Returns `true` if this filter has restrictions (i.e., not allowing all IPs). + pub const fn has_restrictions(&self) -> bool { + !self.allowed_networks.is_empty() + } + + /// Returns the list of allowed networks. + pub fn allowed_networks(&self) -> &[ipnet::IpNet] { + &self.allowed_networks + } +} + +impl Default for IpFilter { + fn default() -> Self { + Self::allow_all() + } +} + +#[cfg(test)] +mod ip_filter_tests { + use super::*; + + #[test] + fn test_allow_all_filter() { + let filter = IpFilter::allow_all(); + assert!(filter.is_allowed(&IpAddr::from([192, 168, 1, 1]))); + assert!(filter.is_allowed(&IpAddr::from([10, 0, 0, 1]))); + assert!(filter.is_allowed(&IpAddr::from([8, 8, 8, 8]))); + assert!(!filter.has_restrictions()); + } + + #[test] + fn test_single_network_filter() { + let filter = IpFilter::from_cidr_string("192.168.0.0/16").unwrap(); + assert!(filter.is_allowed(&IpAddr::from([192, 168, 1, 1]))); + assert!(filter.is_allowed(&IpAddr::from([192, 168, 255, 255]))); + assert!(!filter.is_allowed(&IpAddr::from([192, 169, 1, 1]))); + assert!(!filter.is_allowed(&IpAddr::from([10, 0, 0, 1]))); + assert!(filter.has_restrictions()); + } + + #[test] + fn test_multiple_networks_filter() { + let filter = IpFilter::from_cidr_string("192.168.0.0/16,10.0.0.0/8").unwrap(); + assert!(filter.is_allowed(&IpAddr::from([192, 168, 1, 1]))); + assert!(filter.is_allowed(&IpAddr::from([10, 5, 10, 20]))); + assert!(filter.is_allowed(&IpAddr::from([10, 255, 255, 255]))); + assert!(!filter.is_allowed(&IpAddr::from([172, 16, 0, 1]))); + assert!(!filter.is_allowed(&IpAddr::from([8, 8, 8, 8]))); + } + + #[test] + fn test_ipv6_filter() { + let filter = IpFilter::from_cidr_string("2001:db8::/32").unwrap(); + let ipv6_in_range: IpAddr = "2001:db8::1".parse().unwrap(); + let ipv6_out_range: IpAddr = "2001:db9::1".parse().unwrap(); + + assert!(filter.is_allowed(&ipv6_in_range)); + assert!(!filter.is_allowed(&ipv6_out_range)); + } + + #[test] + fn test_mixed_ipv4_ipv6_filter() { + let filter = IpFilter::from_cidr_string("192.168.0.0/16,2001:db8::/32").unwrap(); + + assert!(filter.is_allowed(&IpAddr::from([192, 168, 1, 1]))); + let ipv6_in_range: IpAddr = "2001:db8::1".parse().unwrap(); + assert!(filter.is_allowed(&ipv6_in_range)); + + assert!(!filter.is_allowed(&IpAddr::from([10, 0, 0, 1]))); + let ipv6_out_range: IpAddr = "2001:db9::1".parse().unwrap(); + assert!(!filter.is_allowed(&ipv6_out_range)); + } + + #[test] + fn test_empty_string() { + let filter = IpFilter::from_cidr_string("").unwrap(); + assert!(filter.is_allowed(&IpAddr::from([192, 168, 1, 1]))); + assert!(!filter.has_restrictions()); + } + + #[test] + fn test_invalid_cidr() { + assert!(IpFilter::from_cidr_string("invalid").is_err()); + assert!(IpFilter::from_cidr_string("192.168.0.0/33").is_err()); + assert!(IpFilter::from_cidr_string("192.168.0.0,10.0.0.0").is_err()); + } + + #[test] + fn test_whitespace_handling() { + let filter = IpFilter::from_cidr_string(" 192.168.0.0/16 , 10.0.0.0/8 ").unwrap(); + assert!(filter.is_allowed(&IpAddr::from([192, 168, 1, 1]))); + assert!(filter.is_allowed(&IpAddr::from([10, 0, 0, 1]))); + assert!(!filter.is_allowed(&IpAddr::from([172, 16, 0, 1]))); + } + + #[test] + fn test_single_ip_as_cidr() { + let filter = IpFilter::from_cidr_string("192.168.1.100/32").unwrap(); + assert!(filter.is_allowed(&IpAddr::from([192, 168, 1, 100]))); + assert!(!filter.is_allowed(&IpAddr::from([192, 168, 1, 101]))); + } +} diff --git a/crates/net/discv4/src/config.rs b/crates/net/discv4/src/config.rs index ebd1650298..f9002a8fde 100644 --- a/crates/net/discv4/src/config.rs +++ b/crates/net/discv4/src/config.rs @@ -17,8 +17,6 @@ use std::{ #[derive(Clone, Debug)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct Discv4Config { - /// Whether to enable the incoming packet filter. Default: false. - pub enable_packet_filter: bool, /// Size of the channel buffer for outgoing messages. pub udp_egress_message_buffer: usize, /// Size of the channel buffer for incoming messages. @@ -26,7 +24,7 @@ pub struct Discv4Config { /// The number of allowed consecutive failures for `FindNode` requests. Default: 5. pub max_find_node_failures: u8, /// The interval to use when checking for expired nodes that need to be re-pinged. Default: - /// 10min. + /// 10 seconds. pub ping_interval: Duration, /// The duration of we consider a ping timed out. pub ping_expiration: Duration, @@ -41,10 +39,6 @@ pub struct Discv4Config { /// Provides a way to ban peers and ips. #[cfg_attr(feature = "serde", serde(skip))] pub ban_list: BanList, - /// Set the default duration for which nodes are banned for. This timeouts are checked every 5 - /// minutes, so the precision will be to the nearest 5 minutes. If set to `None`, bans from - /// the filter will last indefinitely. Default is 1 hour. - pub ban_duration: Option, /// Nodes to boot from. pub bootstrap_nodes: HashSet, /// Whether to randomly discover new peers. @@ -99,16 +93,15 @@ impl Discv4Config { /// Returns the corresponding [`ResolveNatInterval`], if a [`NatResolver`] and an interval was /// configured pub fn resolve_external_ip_interval(&self) -> Option { - let resolver = self.external_ip_resolver?; + let resolver = self.external_ip_resolver.clone()?; let interval = self.resolve_external_ip_interval?; - Some(ResolveNatInterval::interval(resolver, interval)) + Some(ResolveNatInterval::interval_at(resolver, tokio::time::Instant::now(), interval)) } } impl Default for Discv4Config { fn default() -> Self { Self { - enable_packet_filter: false, // This should be high enough to cover an entire recursive FindNode lookup which is // includes sending FindNode to nodes it discovered in the rounds using the concurrency // factor ALPHA @@ -126,7 +119,6 @@ impl Default for Discv4Config { lookup_interval: Duration::from_secs(20), ban_list: Default::default(), - ban_duration: Some(Duration::from_secs(60 * 60)), // 1 hour bootstrap_nodes: Default::default(), enable_dht_random_walk: true, enable_lookup: true, @@ -148,12 +140,6 @@ pub struct Discv4ConfigBuilder { } impl Discv4ConfigBuilder { - /// Whether to enable the incoming packet filter. - pub const fn enable_packet_filter(&mut self) -> &mut Self { - self.config.enable_packet_filter = true; - self - } - /// Sets the channel size for incoming messages pub const fn udp_ingress_message_buffer( &mut self, @@ -226,13 +212,13 @@ impl Discv4ConfigBuilder { self } - /// Whether to enforce expiration timestamps in messages. + /// Whether to enable EIP-868 pub const fn enable_eip868(&mut self, enable_eip868: bool) -> &mut Self { self.config.enable_eip868 = enable_eip868; self } - /// Whether to enable EIP-868 + /// Whether to enforce expiration timestamps in messages. pub const fn enforce_expiration_timestamps( &mut self, enforce_expiration_timestamps: bool, @@ -276,14 +262,6 @@ impl Discv4ConfigBuilder { self } - /// Set the default duration for which nodes are banned for. This timeouts are checked every 5 - /// minutes, so the precision will be to the nearest 5 minutes. If set to `None`, bans from - /// the filter will last indefinitely. Default is 1 hour. - pub const fn ban_duration(&mut self, ban_duration: Option) -> &mut Self { - self.config.ban_duration = ban_duration; - self - } - /// Adds a boot node pub fn add_boot_node(&mut self, node: NodeRecord) -> &mut Self { self.config.bootstrap_nodes.insert(node); @@ -297,10 +275,7 @@ impl Discv4ConfigBuilder { } /// Configures if and how the external IP of the node should be resolved. - pub const fn external_ip_resolver( - &mut self, - external_ip_resolver: Option, - ) -> &mut Self { + pub fn external_ip_resolver(&mut self, external_ip_resolver: Option) -> &mut Self { self.config.external_ip_resolver = external_ip_resolver; self } @@ -331,9 +306,29 @@ mod tests { .enable_lookup(true) .enable_dht_random_walk(true) .add_boot_nodes(HashSet::new()) - .ban_duration(None) .lookup_interval(Duration::from_secs(3)) .enable_lookup(true) .build(); } + + #[tokio::test] + async fn test_resolve_external_ip_interval_uses_interval_at() { + use reth_net_nat::NatResolver; + use std::net::{IpAddr, Ipv4Addr}; + + let ip_addr = IpAddr::V4(Ipv4Addr::new(192, 168, 1, 1)); + + // Create a config with external IP resolver + let mut builder = Discv4Config::builder(); + builder.external_ip_resolver(Some(NatResolver::ExternalIp(ip_addr))); + builder.resolve_external_ip_interval(Some(Duration::from_secs(60 * 5))); + let config = builder.build(); + + // Get the ResolveNatInterval + let mut interval = config.resolve_external_ip_interval().expect("should have interval"); + + // Test that first tick returns immediately (interval_at behavior) + let ip = interval.tick().await; + assert_eq!(ip, Some(ip_addr)); + } } diff --git a/crates/net/discv4/src/lib.rs b/crates/net/discv4/src/lib.rs index 83106cbbe6..0daad65d5a 100644 --- a/crates/net/discv4/src/lib.rs +++ b/crates/net/discv4/src/lib.rs @@ -625,10 +625,13 @@ impl Discv4Service { self.lookup_interval = tokio::time::interval(duration); } - /// Sets the external Ip to the configured external IP if [`NatResolver::ExternalIp`]. + /// Sets the external Ip to the configured external IP if [`NatResolver::ExternalIp`] or + /// [`NatResolver::ExternalAddr`]. In the case of [`NatResolver::ExternalAddr`], it will return + /// the first IP address found for the domain associated with the discv4 UDP port. fn resolve_external_ip(&mut self) { if let Some(r) = &self.resolve_external_ip_interval && - let Some(external_ip) = r.resolver().as_external_ip() + let Some(external_ip) = + r.resolver().clone().as_external_ip(self.local_node_record.udp_port) { self.set_external_ip_addr(external_ip); } diff --git a/crates/net/discv5/src/lib.rs b/crates/net/discv5/src/lib.rs index ef2c69caed..030599a4cd 100644 --- a/crates/net/discv5/src/lib.rs +++ b/crates/net/discv5/src/lib.rs @@ -75,6 +75,11 @@ pub struct Discv5 { discovered_peer_filter: MustNotIncludeKeys, /// Metrics for underlying [`discv5::Discv5`] node and filtered discovered peers. metrics: Discv5Metrics, + /// Returns the _local_ [`NodeRecord`] this service was started with. + // Note: we must track this separately because the `discv5::Discv5` does not necessarily + // provide this via it's [`local_enr`](discv5::Discv5::local_ner()) This is intended for + // obtaining the port this service was launched at + local_node_record: NodeRecord, } impl Discv5 { @@ -83,7 +88,6 @@ impl Discv5 { //////////////////////////////////////////////////////////////////////////////////////////////// /// Adds the node to the table, if it is not already present. - #[expect(clippy::result_large_err)] pub fn add_node(&self, node_record: Enr) -> Result<(), Error> { let EnrCombinedKeyWrapper(enr) = node_record.into(); self.discv5.add_enr(enr).map_err(Error::AddNodeFailed) @@ -156,22 +160,29 @@ impl Discv5 { enr.try_into().ok() } + /// Returns the local [`Enr`] of the service. + pub fn local_enr(&self) -> Enr { + self.discv5.local_enr() + } + + /// The port the discv5 service is listening on. + pub const fn local_port(&self) -> u16 { + self.local_node_record.udp_port + } + /// Spawns [`discv5::Discv5`]. Returns [`discv5::Discv5`] handle in reth compatible wrapper type /// [`Discv5`], a receiver of [`discv5::Event`]s from the underlying node, and the local /// [`Enr`](discv5::Enr) converted into the reth compatible [`NodeRecord`] type. pub async fn start( sk: &SecretKey, discv5_config: Config, - ) -> Result<(Self, mpsc::Receiver, NodeRecord), Error> { + ) -> Result<(Self, mpsc::Receiver), Error> { // // 1. make local enr from listen config // - let (enr, bc_enr, fork_key, rlpx_ip_mode) = build_local_enr(sk, &discv5_config); + let (enr, local_node_record, fork_key, rlpx_ip_mode) = build_local_enr(sk, &discv5_config); - trace!(target: "net::discv5", - ?enr, - "local ENR" - ); + trace!(target: "net::discv5", ?enr, "local ENR"); // // 2. start discv5 @@ -218,9 +229,15 @@ impl Discv5 { ); Ok(( - Self { discv5, rlpx_ip_mode, fork_key, discovered_peer_filter, metrics }, + Self { + discv5, + rlpx_ip_mode, + fork_key, + discovered_peer_filter, + metrics, + local_node_record, + }, discv5_updates, - bc_enr, )) } @@ -376,7 +393,6 @@ impl Discv5 { /// Returns the [`ForkId`] of the given [`Enr`](discv5::Enr) w.r.t. the local node's network /// stack, if field is set. - #[expect(clippy::result_large_err)] pub fn get_fork_id( &self, enr: &discv5::enr::Enr, @@ -677,6 +693,7 @@ pub async fn lookup( #[cfg(test)] mod test { + #![allow(deprecated)] use super::*; use ::enr::{CombinedKey, EnrKey}; use rand_08::thread_rng; @@ -700,12 +717,14 @@ mod test { fork_key: None, discovered_peer_filter: MustNotIncludeKeys::default(), metrics: Discv5Metrics::default(), + local_node_record: NodeRecord::new( + (Ipv4Addr::LOCALHOST, 30303).into(), + PeerId::random(), + ), } } - async fn start_discovery_node( - udp_port_discv5: u16, - ) -> (Discv5, mpsc::Receiver, NodeRecord) { + async fn start_discovery_node(udp_port_discv5: u16) -> (Discv5, mpsc::Receiver) { let secret_key = SecretKey::new(&mut thread_rng()); let discv5_addr: SocketAddr = format!("127.0.0.1:{udp_port_discv5}").parse().unwrap(); @@ -726,11 +745,11 @@ mod test { // rig test // rig node_1 - let (node_1, mut stream_1, _) = start_discovery_node(30344).await; + let (node_1, mut stream_1) = start_discovery_node(30344).await; let node_1_enr = node_1.with_discv5(|discv5| discv5.local_enr()); // rig node_2 - let (node_2, mut stream_2, _) = start_discovery_node(30355).await; + let (node_2, mut stream_2) = start_discovery_node(30355).await; let node_2_enr = node_2.with_discv5(|discv5| discv5.local_enr()); trace!(target: "net::discv5::test", diff --git a/crates/net/dns/src/lib.rs b/crates/net/dns/src/lib.rs index df597a755e..0fd5d9590f 100644 --- a/crates/net/dns/src/lib.rs +++ b/crates/net/dns/src/lib.rs @@ -268,9 +268,6 @@ impl DnsDiscoveryService { } DnsEntry::Link(link_entry) => { if kind.is_link() { - if let Some(tree) = self.trees.get_mut(&link) { - tree.resolved_links_mut().insert(hash, link_entry.clone()); - } self.sync_tree_with_link(link_entry) } else { debug!(target: "disc::dns",%link_entry, domain=%link.domain, ?hash, "resolved unexpected Link entry"); @@ -583,8 +580,14 @@ mod tests { // await recheck timeout tokio::time::sleep(config.recheck_interval).await; + let mut new_root = root.clone(); + new_root.sequence_number = new_root.sequence_number.saturating_add(1); + new_root.enr_root = "NEW_ENR_ROOT".to_string(); + new_root.sign(&secret_key).unwrap(); + resolver.insert(link.domain.clone(), new_root.to_string()); + let enr = Enr::empty(&secret_key).unwrap(); - resolver.insert(format!("{}.{}", root.enr_root.clone(), link.domain), enr.to_base64()); + resolver.insert(format!("{}.{}", new_root.enr_root.clone(), link.domain), enr.to_base64()); let event = poll_fn(|cx| service.poll(cx)).await; diff --git a/crates/net/dns/src/sync.rs b/crates/net/dns/src/sync.rs index 5b9453959d..43e7e9886a 100644 --- a/crates/net/dns/src/sync.rs +++ b/crates/net/dns/src/sync.rs @@ -2,10 +2,7 @@ use crate::tree::{LinkEntry, TreeRootEntry}; use enr::EnrKeyUnambiguous; use linked_hash_set::LinkedHashSet; use secp256k1::SecretKey; -use std::{ - collections::HashMap, - time::{Duration, Instant}, -}; +use std::time::{Duration, Instant}; /// A sync-able tree pub(crate) struct SyncTree { @@ -17,8 +14,6 @@ pub(crate) struct SyncTree { root_updated: Instant, /// The state of the tree sync progress. sync_state: SyncState, - /// Links contained in this tree - resolved_links: HashMap>, /// Unresolved links of the tree unresolved_links: LinkedHashSet, /// Unresolved nodes of the tree @@ -34,7 +29,6 @@ impl SyncTree { link, root_updated: Instant::now(), sync_state: SyncState::Pending, - resolved_links: Default::default(), unresolved_links: Default::default(), unresolved_nodes: Default::default(), } @@ -49,10 +43,6 @@ impl SyncTree { &self.link } - pub(crate) const fn resolved_links_mut(&mut self) -> &mut HashMap> { - &mut self.resolved_links - } - pub(crate) fn extend_children( &mut self, kind: ResolveKind, @@ -102,29 +92,30 @@ impl SyncTree { /// Updates the root and returns what changed pub(crate) fn update_root(&mut self, root: TreeRootEntry) { - let enr = root.enr_root == self.root.enr_root; - let link = root.link_root == self.root.link_root; + let enr_unchanged = root.enr_root == self.root.enr_root; + let link_unchanged = root.link_root == self.root.link_root; self.root = root; self.root_updated = Instant::now(); - let state = match (enr, link) { - (true, true) => { - self.unresolved_nodes.clear(); - self.unresolved_links.clear(); - SyncState::Pending - } - (true, _) => { + let state = match (enr_unchanged, link_unchanged) { + // both unchanged — no resync needed + (true, true) => return, + // only ENR changed + (false, true) => { self.unresolved_nodes.clear(); SyncState::Enr } - (_, true) => { + // only LINK changed + (true, false) => { self.unresolved_links.clear(); SyncState::Link } - _ => { - // unchanged - return + // both changed + (false, false) => { + self.unresolved_nodes.clear(); + self.unresolved_links.clear(); + SyncState::Pending } }; self.sync_state = state; @@ -132,6 +123,7 @@ impl SyncTree { } /// The action to perform by the service +#[derive(Debug)] pub(crate) enum SyncAction { UpdateRoot, Enr(String), @@ -160,3 +152,97 @@ impl ResolveKind { matches!(self, Self::Link) } } + +#[cfg(test)] +mod tests { + use super::*; + use enr::EnrKey; + use secp256k1::rand::thread_rng; + + fn base_root() -> TreeRootEntry { + // taken from existing tests to ensure valid formatting + let s = "enrtree-root:v1 e=QFT4PBCRX4XQCV3VUYJ6BTCEPU l=JGUFMSAGI7KZYB3P7IZW4S5Y3A seq=3 sig=3FmXuVwpa8Y7OstZTx9PIb1mt8FrW7VpDOFv4AaGCsZ2EIHmhraWhe4NxYhQDlw5MjeFXYMbJjsPeKlHzmJREQE"; + s.parse::().unwrap() + } + + fn make_tree() -> SyncTree { + let secret_key = SecretKey::new(&mut thread_rng()); + let link = + LinkEntry { domain: "nodes.example.org".to_string(), pubkey: secret_key.public() }; + SyncTree::new(base_root(), link) + } + + fn advance_to_active(tree: &mut SyncTree) { + // Move Pending -> (emit Link) -> Enr, then Enr -> (emit Enr) -> Active + let now = Instant::now(); + let timeout = Duration::from_secs(60 * 60 * 24); + let _ = tree.poll(now, timeout); + let _ = tree.poll(now, timeout); + } + + #[test] + fn update_root_unchanged_no_action_from_active() { + let mut tree = make_tree(); + let now = Instant::now(); + let timeout = Duration::from_secs(60 * 60 * 24); + advance_to_active(&mut tree); + + // same root -> no resync + let same = base_root(); + tree.update_root(same); + assert!(tree.poll(now, timeout).is_none()); + } + + #[test] + fn update_root_only_enr_changed_triggers_enr() { + let mut tree = make_tree(); + advance_to_active(&mut tree); + let mut new_root = base_root(); + new_root.enr_root = "NEW_ENR_ROOT".to_string(); + let now = Instant::now(); + let timeout = Duration::from_secs(60 * 60 * 24); + + tree.update_root(new_root.clone()); + match tree.poll(now, timeout) { + Some(SyncAction::Enr(hash)) => assert_eq!(hash, new_root.enr_root), + other => panic!("expected Enr action, got {:?}", other), + } + } + + #[test] + fn update_root_only_link_changed_triggers_link() { + let mut tree = make_tree(); + advance_to_active(&mut tree); + let mut new_root = base_root(); + new_root.link_root = "NEW_LINK_ROOT".to_string(); + let now = Instant::now(); + let timeout = Duration::from_secs(60 * 60 * 24); + + tree.update_root(new_root.clone()); + match tree.poll(now, timeout) { + Some(SyncAction::Link(hash)) => assert_eq!(hash, new_root.link_root), + other => panic!("expected Link action, got {:?}", other), + } + } + + #[test] + fn update_root_both_changed_triggers_link_then_enr() { + let mut tree = make_tree(); + advance_to_active(&mut tree); + let mut new_root = base_root(); + new_root.enr_root = "NEW_ENR_ROOT".to_string(); + new_root.link_root = "NEW_LINK_ROOT".to_string(); + let now = Instant::now(); + let timeout = Duration::from_secs(60 * 60 * 24); + + tree.update_root(new_root.clone()); + match tree.poll(now, timeout) { + Some(SyncAction::Link(hash)) => assert_eq!(hash, new_root.link_root), + other => panic!("expected first Link action, got {:?}", other), + } + match tree.poll(now, timeout) { + Some(SyncAction::Enr(hash)) => assert_eq!(hash, new_root.enr_root), + other => panic!("expected second Enr action, got {:?}", other), + } + } +} diff --git a/crates/net/downloaders/Cargo.toml b/crates/net/downloaders/Cargo.toml index 57094813ee..6ef34b858b 100644 --- a/crates/net/downloaders/Cargo.toml +++ b/crates/net/downloaders/Cargo.toml @@ -51,7 +51,7 @@ thiserror.workspace = true tracing.workspace = true tempfile = { workspace = true, optional = true } -itertools.workspace = true +itertools = { workspace = true, optional = true } [dev-dependencies] async-compression = { workspace = true, features = ["gzip", "tokio"] } @@ -67,10 +67,12 @@ assert_matches.workspace = true tokio = { workspace = true, features = ["macros", "rt-multi-thread"] } rand.workspace = true tempfile.workspace = true +alloy-rlp.workspace = true +itertools.workspace = true [features] default = [] -file-client = ["dep:async-compression", "dep:alloy-rlp"] +file-client = ["dep:async-compression", "dep:alloy-rlp", "dep:itertools"] test-utils = [ "tempfile", "reth-consensus/test-utils", diff --git a/crates/net/downloaders/src/bodies/bodies.rs b/crates/net/downloaders/src/bodies/bodies.rs index 153f269fe4..5d6bd3cf7f 100644 --- a/crates/net/downloaders/src/bodies/bodies.rs +++ b/crates/net/downloaders/src/bodies/bodies.rs @@ -21,7 +21,6 @@ use std::{ cmp::Ordering, collections::BinaryHeap, fmt::Debug, - mem, ops::RangeInclusive, pin::Pin, sync::Arc, @@ -215,9 +214,7 @@ where /// Adds a new response to the internal buffer fn buffer_bodies_response(&mut self, response: Vec>) { - // take into account capacity - let size = response.iter().map(BlockResponse::size).sum::() + - response.capacity() * mem::size_of::>(); + let size = response.iter().map(BlockResponse::size).sum::(); let response = OrderedBodiesResponse { resp: response, size }; let response_len = response.len(); diff --git a/crates/net/downloaders/src/file_client.rs b/crates/net/downloaders/src/file_client.rs index de3d8f8f1f..4d545aec17 100644 --- a/crates/net/downloaders/src/file_client.rs +++ b/crates/net/downloaders/src/file_client.rs @@ -481,18 +481,16 @@ impl FileReader { chunk: &mut Vec, chunk_byte_len: u64, ) -> Result { + let mut buffer = vec![0u8; 64 * 1024]; loop { if chunk.len() >= chunk_byte_len as usize { return Ok(true) } - let mut buffer = vec![0u8; 64 * 1024]; - match self.read(&mut buffer).await { Ok(0) => return Ok(!chunk.is_empty()), Ok(n) => { - buffer.truncate(n); - chunk.extend_from_slice(&buffer); + chunk.extend_from_slice(&buffer[..n]); } Err(e) => return Err(e.into()), } diff --git a/crates/net/downloaders/src/lib.rs b/crates/net/downloaders/src/lib.rs index 90d9709ebe..ed0a65ba95 100644 --- a/crates/net/downloaders/src/lib.rs +++ b/crates/net/downloaders/src/lib.rs @@ -13,6 +13,9 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg))] +#[cfg(any(test, feature = "test-utils"))] +use tempfile as _; + /// The collection of algorithms for downloading block bodies. pub mod bodies; diff --git a/crates/net/downloaders/src/test_utils/mod.rs b/crates/net/downloaders/src/test_utils/mod.rs index d945573b93..b3abf6ffd2 100644 --- a/crates/net/downloaders/src/test_utils/mod.rs +++ b/crates/net/downloaders/src/test_utils/mod.rs @@ -5,12 +5,9 @@ #[cfg(any(test, feature = "file-client"))] use crate::{bodies::test_utils::create_raw_bodies, file_codec::BlockFileCodec}; use alloy_primitives::B256; -use futures::SinkExt; use reth_ethereum_primitives::BlockBody; use reth_testing_utils::generators::{self, random_block_range, BlockRangeParams}; -use std::{collections::HashMap, io::SeekFrom, ops::RangeInclusive}; -use tokio::{fs::File, io::AsyncSeekExt}; -use tokio_util::codec::FramedWrite; +use std::{collections::HashMap, ops::RangeInclusive}; mod bodies_client; pub use bodies_client::TestBodiesClient; @@ -42,6 +39,11 @@ pub(crate) fn generate_bodies( pub(crate) async fn generate_bodies_file( range: RangeInclusive, ) -> (tokio::fs::File, Vec, HashMap) { + use futures::SinkExt; + use std::io::SeekFrom; + use tokio::{fs::File, io::AsyncSeekExt}; + use tokio_util::codec::FramedWrite; + let (headers, bodies) = generate_bodies(range); let raw_block_bodies = create_raw_bodies(headers.iter().cloned(), &mut bodies.clone()); diff --git a/crates/net/ecies/Cargo.toml b/crates/net/ecies/Cargo.toml index a55e5fa7e8..75a4bc7897 100644 --- a/crates/net/ecies/Cargo.toml +++ b/crates/net/ecies/Cargo.toml @@ -25,9 +25,6 @@ pin-project.workspace = true tracing = { workspace = true, features = ["attributes"] } -# HeaderBytes -generic-array.workspace = true -typenum.workspace = true byteorder.workspace = true # crypto @@ -42,3 +39,6 @@ aes.workspace = true hmac.workspace = true block-padding.workspace = true cipher = { workspace = true, features = ["block-padding"] } + +[dev-dependencies] +tokio = { workspace = true, features = ["net", "rt", "macros"] } diff --git a/crates/net/ecies/src/algorithm.rs b/crates/net/ecies/src/algorithm.rs index dae5e50169..8d31fc9e10 100644 --- a/crates/net/ecies/src/algorithm.rs +++ b/crates/net/ecies/src/algorithm.rs @@ -2,7 +2,7 @@ use crate::{ error::ECIESErrorImpl, - mac::{HeaderBytes, MAC}, + mac::MAC, util::{hmac_sha256, sha256}, ECIESError, }; @@ -312,7 +312,6 @@ impl ECIES { /// Create a new ECIES client with the given static secret key and remote peer ID. pub fn new_client(secret_key: SecretKey, remote_id: PeerId) -> Result { - // TODO(rand): use rng for nonce let mut rng = rng(); let nonce = B256::random(); let ephemeral_secret_key = SecretKey::new(&mut rng); @@ -639,7 +638,6 @@ impl ECIES { header[..3].copy_from_slice(&buf[..3]); header[3..6].copy_from_slice(&[194, 128, 128]); - let mut header = HeaderBytes::from(header); self.egress_aes.as_mut().unwrap().apply_keystream(&mut header); self.egress_mac.as_mut().unwrap().update_header(&header); let tag = self.egress_mac.as_mut().unwrap().digest(); @@ -660,7 +658,7 @@ impl ECIES { } let (header_bytes, mac_bytes) = split_at_mut(data, 16)?; - let header = HeaderBytes::from_mut_slice(header_bytes); + let header: &mut [u8; 16] = header_bytes.try_into().unwrap(); let mac = B128::from_slice(&mac_bytes[..16]); self.ingress_mac.as_mut().unwrap().update_header(header); @@ -670,11 +668,11 @@ impl ECIES { } self.ingress_aes.as_mut().unwrap().apply_keystream(header); - if header.as_slice().len() < 3 { + if header.len() < 3 { return Err(ECIESErrorImpl::InvalidHeader.into()) } - let body_size = usize::try_from(header.as_slice().read_uint::(3)?)?; + let body_size = usize::try_from((&header[..]).read_uint::(3)?)?; self.body_size = Some(body_size); diff --git a/crates/net/ecies/src/mac.rs b/crates/net/ecies/src/mac.rs index 03847d091e..fcccae7267 100644 --- a/crates/net/ecies/src/mac.rs +++ b/crates/net/ecies/src/mac.rs @@ -14,16 +14,7 @@ use alloy_primitives::{B128, B256}; use block_padding::NoPadding; use cipher::BlockEncrypt; use digest::KeyInit; -use generic_array::GenericArray; use sha3::{Digest, Keccak256}; -use typenum::U16; - -/// Type alias for a fixed-size array of 16 bytes used as headers. -/// -/// This type is defined as [`GenericArray`] and is commonly employed in Ethereum `RLPx` -/// protocol-related structures for headers. It represents 16 bytes of data used in various -/// cryptographic operations, such as MAC (Message Authentication Code) computation. -pub type HeaderBytes = GenericArray; /// [`Ethereum MAC`](https://github.com/ethereum/devp2p/blob/master/rlpx.md#mac) state. /// @@ -49,8 +40,8 @@ impl MAC { self.hasher.update(data) } - /// Accumulate the given [`HeaderBytes`] into the MAC's internal state. - pub fn update_header(&mut self, data: &HeaderBytes) { + /// Accumulate the given header bytes into the MAC's internal state. + pub fn update_header(&mut self, data: &[u8; 16]) { let aes = Aes256Enc::new_from_slice(self.secret.as_ref()).unwrap(); let mut encrypted = self.digest().0; diff --git a/crates/net/ecies/src/stream.rs b/crates/net/ecies/src/stream.rs index d99422f512..adf4dc7634 100644 --- a/crates/net/ecies/src/stream.rs +++ b/crates/net/ecies/src/stream.rs @@ -67,8 +67,7 @@ where secret_key: SecretKey, remote_id: PeerId, ) -> Result { - let ecies = ECIESCodec::new_client(secret_key, remote_id) - .map_err(|_| io::Error::other("invalid handshake"))?; + let ecies = ECIESCodec::new_client(secret_key, remote_id)?; let mut transport = ecies.framed(transport); diff --git a/crates/net/eth-wire-types/src/broadcast.rs b/crates/net/eth-wire-types/src/broadcast.rs index 1900cf004a..9855f6f6cb 100644 --- a/crates/net/eth-wire-types/src/broadcast.rs +++ b/crates/net/eth-wire-types/src/broadcast.rs @@ -169,7 +169,7 @@ impl NewPooledTransactionHashes { matches!(version, EthVersion::Eth67 | EthVersion::Eth66) } Self::Eth68(_) => { - matches!(version, EthVersion::Eth68 | EthVersion::Eth69) + matches!(version, EthVersion::Eth68 | EthVersion::Eth69 | EthVersion::Eth70) } } } diff --git a/crates/net/eth-wire-types/src/capability.rs b/crates/net/eth-wire-types/src/capability.rs index 3f39bed606..d35e4c17ee 100644 --- a/crates/net/eth-wire-types/src/capability.rs +++ b/crates/net/eth-wire-types/src/capability.rs @@ -100,6 +100,16 @@ impl Capability { Self::eth(EthVersion::Eth68) } + /// Returns the [`EthVersion::Eth69`] capability. + pub const fn eth_69() -> Self { + Self::eth(EthVersion::Eth69) + } + + /// Returns the [`EthVersion::Eth70`] capability. + pub const fn eth_70() -> Self { + Self::eth(EthVersion::Eth70) + } + /// Whether this is eth v66 protocol. #[inline] pub fn is_eth_v66(&self) -> bool { @@ -118,10 +128,26 @@ impl Capability { self.name == "eth" && self.version == 68 } + /// Whether this is eth v69. + #[inline] + pub fn is_eth_v69(&self) -> bool { + self.name == "eth" && self.version == 69 + } + + /// Whether this is eth v70. + #[inline] + pub fn is_eth_v70(&self) -> bool { + self.name == "eth" && self.version == 70 + } + /// Whether this is any eth version. #[inline] pub fn is_eth(&self) -> bool { - self.is_eth_v66() || self.is_eth_v67() || self.is_eth_v68() + self.is_eth_v66() || + self.is_eth_v67() || + self.is_eth_v68() || + self.is_eth_v69() || + self.is_eth_v70() } } @@ -141,7 +167,7 @@ impl From for Capability { #[cfg(any(test, feature = "arbitrary"))] impl<'a> arbitrary::Arbitrary<'a> for Capability { fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { - let version = u.int_in_range(66..=69)?; // Valid eth protocol versions are 66-69 + let version = u.int_in_range(66..=70)?; // Valid eth protocol versions are 66-70 // Only generate valid eth protocol name for now since it's the only supported protocol Ok(Self::new_static("eth", version)) } @@ -155,9 +181,22 @@ pub struct Capabilities { eth_66: bool, eth_67: bool, eth_68: bool, + eth_69: bool, + eth_70: bool, } impl Capabilities { + /// Create a new instance from the given vec. + pub fn new(value: Vec) -> Self { + Self { + eth_66: value.iter().any(Capability::is_eth_v66), + eth_67: value.iter().any(Capability::is_eth_v67), + eth_68: value.iter().any(Capability::is_eth_v68), + eth_69: value.iter().any(Capability::is_eth_v69), + eth_70: value.iter().any(Capability::is_eth_v70), + inner: value, + } + } /// Returns all capabilities. #[inline] pub fn capabilities(&self) -> &[Capability] { @@ -173,7 +212,7 @@ impl Capabilities { /// Whether the peer supports `eth` sub-protocol. #[inline] pub const fn supports_eth(&self) -> bool { - self.eth_68 || self.eth_67 || self.eth_66 + self.eth_70 || self.eth_69 || self.eth_68 || self.eth_67 || self.eth_66 } /// Whether this peer supports eth v66 protocol. @@ -193,16 +232,23 @@ impl Capabilities { pub const fn supports_eth_v68(&self) -> bool { self.eth_68 } + + /// Whether this peer supports eth v69 protocol. + #[inline] + pub const fn supports_eth_v69(&self) -> bool { + self.eth_69 + } + + /// Whether this peer supports eth v70 protocol. + #[inline] + pub const fn supports_eth_v70(&self) -> bool { + self.eth_70 + } } impl From> for Capabilities { fn from(value: Vec) -> Self { - Self { - eth_66: value.iter().any(Capability::is_eth_v66), - eth_67: value.iter().any(Capability::is_eth_v67), - eth_68: value.iter().any(Capability::is_eth_v68), - inner: value, - } + Self::new(value) } } @@ -220,6 +266,8 @@ impl Decodable for Capabilities { eth_66: inner.iter().any(Capability::is_eth_v66), eth_67: inner.iter().any(Capability::is_eth_v67), eth_68: inner.iter().any(Capability::is_eth_v68), + eth_69: inner.iter().any(Capability::is_eth_v69), + eth_70: inner.iter().any(Capability::is_eth_v70), inner, }) } diff --git a/crates/net/eth-wire-types/src/message.rs b/crates/net/eth-wire-types/src/message.rs index 5f36115204..5d29d960bf 100644 --- a/crates/net/eth-wire-types/src/message.rs +++ b/crates/net/eth-wire-types/src/message.rs @@ -1,4 +1,4 @@ -//! Implements Ethereum wire protocol for versions 66, 67, and 68. +//! Implements Ethereum wire protocol for versions 66 through 70. //! Defines structs/enums for messages, request-response pairs, and broadcasts. //! Handles compatibility with [`EthVersion`]. //! @@ -8,13 +8,13 @@ use super::{ broadcast::NewBlockHashes, BlockBodies, BlockHeaders, GetBlockBodies, GetBlockHeaders, - GetNodeData, GetPooledTransactions, GetReceipts, NewPooledTransactionHashes66, + GetNodeData, GetPooledTransactions, GetReceipts, GetReceipts70, NewPooledTransactionHashes66, NewPooledTransactionHashes68, NodeData, PooledTransactions, Receipts, Status, StatusEth69, Transactions, }; use crate::{ status::StatusMessage, BlockRangeUpdate, EthNetworkPrimitives, EthVersion, NetworkPrimitives, - RawCapabilityMessage, Receipts69, SharedTransactions, + RawCapabilityMessage, Receipts69, Receipts70, SharedTransactions, }; use alloc::{boxed::Box, string::String, sync::Arc}; use alloy_primitives::{ @@ -111,13 +111,29 @@ impl ProtocolMessage { } EthMessage::NodeData(RequestPair::decode(buf)?) } - EthMessageID::GetReceipts => EthMessage::GetReceipts(RequestPair::decode(buf)?), - EthMessageID::Receipts => { - if version < EthVersion::Eth69 { - EthMessage::Receipts(RequestPair::decode(buf)?) + EthMessageID::GetReceipts => { + if version >= EthVersion::Eth70 { + EthMessage::GetReceipts70(RequestPair::decode(buf)?) } else { - // with eth69, receipts no longer include the bloom - EthMessage::Receipts69(RequestPair::decode(buf)?) + EthMessage::GetReceipts(RequestPair::decode(buf)?) + } + } + EthMessageID::Receipts => { + match version { + v if v >= EthVersion::Eth70 => { + // eth/70 continues to omit bloom filters and adds the + // `lastBlockIncomplete` flag, encoded as + // `[request-id, lastBlockIncomplete, [[receipt₁, receipt₂], ...]]`. + EthMessage::Receipts70(RequestPair::decode(buf)?) + } + EthVersion::Eth69 => { + // with eth69, receipts no longer include the bloom + EthMessage::Receipts69(RequestPair::decode(buf)?) + } + _ => { + // before eth69 we need to decode the bloom as well + EthMessage::Receipts(RequestPair::decode(buf)?) + } } } EthMessageID::BlockRangeUpdate => { @@ -205,6 +221,9 @@ impl From> for ProtocolBroadcastMes /// /// The `eth/69` announces the historical block range served by the node. Removes total difficulty /// information. And removes the Bloom field from receipts transferred over the protocol. +/// +/// The `eth/70` (EIP-7975) keeps the eth/69 status format and introduces partial receipts. +/// requests/responses. #[derive(Clone, Debug, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub enum EthMessage { @@ -259,6 +278,12 @@ pub enum EthMessage { NodeData(RequestPair), /// Represents a `GetReceipts` request-response pair. GetReceipts(RequestPair), + /// Represents a `GetReceipts` request for eth/70. + /// + /// Note: Unlike earlier protocol versions, the eth/70 encoding for + /// `GetReceipts` in EIP-7975 inlines the request id. The type still wraps + /// a [`RequestPair`], but with a custom inline encoding. + GetReceipts70(RequestPair), /// Represents a Receipts request-response pair. #[cfg_attr( feature = "serde", @@ -271,6 +296,16 @@ pub enum EthMessage { serde(bound = "N::Receipt: serde::Serialize + serde::de::DeserializeOwned") )] Receipts69(RequestPair>), + /// Represents a Receipts request-response pair for eth/70. + #[cfg_attr( + feature = "serde", + serde(bound = "N::Receipt: serde::Serialize + serde::de::DeserializeOwned") + )] + /// + /// Note: The eth/70 encoding for `Receipts` in EIP-7975 inlines the + /// request id. The type still wraps a [`RequestPair`], but with a custom + /// inline encoding. + Receipts70(RequestPair>), /// Represents a `BlockRangeUpdate` message broadcast to the network. #[cfg_attr( feature = "serde", @@ -300,8 +335,8 @@ impl EthMessage { Self::PooledTransactions(_) => EthMessageID::PooledTransactions, Self::GetNodeData(_) => EthMessageID::GetNodeData, Self::NodeData(_) => EthMessageID::NodeData, - Self::GetReceipts(_) => EthMessageID::GetReceipts, - Self::Receipts(_) | Self::Receipts69(_) => EthMessageID::Receipts, + Self::GetReceipts(_) | Self::GetReceipts70(_) => EthMessageID::GetReceipts, + Self::Receipts(_) | Self::Receipts69(_) | Self::Receipts70(_) => EthMessageID::Receipts, Self::BlockRangeUpdate(_) => EthMessageID::BlockRangeUpdate, Self::Other(msg) => EthMessageID::Other(msg.id as u8), } @@ -314,6 +349,7 @@ impl EthMessage { Self::GetBlockBodies(_) | Self::GetBlockHeaders(_) | Self::GetReceipts(_) | + Self::GetReceipts70(_) | Self::GetPooledTransactions(_) | Self::GetNodeData(_) ) @@ -326,11 +362,40 @@ impl EthMessage { Self::PooledTransactions(_) | Self::Receipts(_) | Self::Receipts69(_) | + Self::Receipts70(_) | Self::BlockHeaders(_) | Self::BlockBodies(_) | Self::NodeData(_) ) } + + /// Converts the message types where applicable. + /// + /// This handles up/downcasting where appropriate, for example for different receipt request + /// types. + pub fn map_versioned(self, version: EthVersion) -> Self { + // For eth/70 peers we send `GetReceipts` using the new eth/70 + // encoding with `firstBlockReceiptIndex = 0`, while keeping the + // user-facing `PeerRequest` API unchanged. + if version >= EthVersion::Eth70 { + return match self { + Self::GetReceipts(pair) => { + let RequestPair { request_id, message } = pair; + let req = RequestPair { + request_id, + message: GetReceipts70 { + first_block_receipt_index: 0, + block_hashes: message.0, + }, + }; + Self::GetReceipts70(req) + } + other => other, + } + } + + self + } } impl Encodable for EthMessage { @@ -351,8 +416,10 @@ impl Encodable for EthMessage { Self::GetNodeData(request) => request.encode(out), Self::NodeData(data) => data.encode(out), Self::GetReceipts(request) => request.encode(out), + Self::GetReceipts70(request) => request.encode(out), Self::Receipts(receipts) => receipts.encode(out), Self::Receipts69(receipt69) => receipt69.encode(out), + Self::Receipts70(receipt70) => receipt70.encode(out), Self::BlockRangeUpdate(block_range_update) => block_range_update.encode(out), Self::Other(unknown) => out.put_slice(&unknown.payload), } @@ -374,8 +441,10 @@ impl Encodable for EthMessage { Self::GetNodeData(request) => request.length(), Self::NodeData(data) => data.length(), Self::GetReceipts(request) => request.length(), + Self::GetReceipts70(request) => request.length(), Self::Receipts(receipts) => receipts.length(), Self::Receipts69(receipt69) => receipt69.length(), + Self::Receipts70(receipt70) => receipt70.length(), Self::BlockRangeUpdate(block_range_update) => block_range_update.length(), Self::Other(unknown) => unknown.length(), } diff --git a/crates/net/eth-wire-types/src/receipts.rs b/crates/net/eth-wire-types/src/receipts.rs index 416797c50e..3ddd936e04 100644 --- a/crates/net/eth-wire-types/src/receipts.rs +++ b/crates/net/eth-wire-types/src/receipts.rs @@ -17,6 +17,42 @@ pub struct GetReceipts( pub Vec, ); +/// Eth/70 `GetReceipts` request payload that supports partial receipt queries. +/// +/// When used with eth/70, the request id is carried by the surrounding +/// [`crate::message::RequestPair`], and the on-wire shape is the flattened list +/// `firstBlockReceiptIndex, [blockhash₁, ...]`. +/// +/// See also [eip-7975](https://eips.ethereum.org/EIPS/eip-7975) +#[derive(Clone, Debug, PartialEq, Eq)] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] +pub struct GetReceipts70 { + /// Index into the receipts of the first requested block hash. + pub first_block_receipt_index: u64, + /// The block hashes to request receipts for. + pub block_hashes: Vec, +} + +impl alloy_rlp::Encodable for GetReceipts70 { + fn encode(&self, out: &mut dyn alloy_rlp::BufMut) { + self.first_block_receipt_index.encode(out); + self.block_hashes.encode(out); + } + + fn length(&self) -> usize { + self.first_block_receipt_index.length() + self.block_hashes.length() + } +} + +impl alloy_rlp::Decodable for GetReceipts70 { + fn decode(buf: &mut &[u8]) -> alloy_rlp::Result { + let first_block_receipt_index = u64::decode(buf)?; + let block_hashes = Vec::::decode(buf)?; + Ok(Self { first_block_receipt_index, block_hashes }) + } +} + /// The response to [`GetReceipts`], containing receipt lists that correspond to each block /// requested. #[derive(Clone, Debug, PartialEq, Eq, Default)] @@ -49,34 +85,22 @@ impl alloy_rlp::Decodable for Receipts { /// Eth/69 receipt response type that removes bloom filters from the protocol. /// /// This is effectively a subset of [`Receipts`]. -#[derive(Clone, Debug, PartialEq, Eq)] +#[derive(Clone, Debug, PartialEq, Eq, RlpEncodableWrapper, RlpDecodableWrapper)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] #[add_arbitrary_tests(rlp)] pub struct Receipts69(pub Vec>); -impl alloy_rlp::Encodable for Receipts69 { - #[inline] - fn encode(&self, out: &mut dyn alloy_rlp::BufMut) { - self.0.encode(out) - } - #[inline] - fn length(&self) -> usize { - self.0.length() - } -} - -impl alloy_rlp::Decodable for Receipts69 { - #[inline] - fn decode(buf: &mut &[u8]) -> alloy_rlp::Result { - alloy_rlp::Decodable::decode(buf).map(Self) - } -} - impl Receipts69 { /// Encodes all receipts with the bloom filter. /// - /// Note: This is an expensive operation that recalculates the bloom for each receipt. + /// Eth/69 omits bloom filters on the wire, while some internal callers + /// (and legacy APIs) still operate on [`Receipts`] with + /// [`ReceiptWithBloom`]. This helper reconstructs the bloom locally from + /// each receipt's logs so the older API can be used on top of eth/69 data. + /// + /// Note: This is an expensive operation that recalculates the bloom for + /// every receipt. pub fn into_with_bloom(self) -> Receipts { Receipts( self.0 @@ -93,6 +117,68 @@ impl From> for Receipts { } } +/// Eth/70 `Receipts` response payload. +/// +/// This is used in conjunction with [`crate::message::RequestPair`] to encode the full wire +/// message `[request-id, lastBlockIncomplete, [[receipt₁, receipt₂], ...]]`. +#[derive(Clone, Debug, PartialEq, Eq)] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] +pub struct Receipts70 { + /// Whether the receipts list for the last block is incomplete. + pub last_block_incomplete: bool, + /// Receipts grouped by block. + pub receipts: Vec>, +} + +impl alloy_rlp::Encodable for Receipts70 +where + T: alloy_rlp::Encodable, +{ + fn encode(&self, out: &mut dyn alloy_rlp::BufMut) { + self.last_block_incomplete.encode(out); + self.receipts.encode(out); + } + + fn length(&self) -> usize { + self.last_block_incomplete.length() + self.receipts.length() + } +} + +impl alloy_rlp::Decodable for Receipts70 +where + T: alloy_rlp::Decodable, +{ + fn decode(buf: &mut &[u8]) -> alloy_rlp::Result { + let last_block_incomplete = bool::decode(buf)?; + let receipts = Vec::>::decode(buf)?; + Ok(Self { last_block_incomplete, receipts }) + } +} + +impl Receipts70 { + /// Encodes all receipts with the bloom filter. + /// + /// Just like eth/69, eth/70 does not transmit bloom filters over the wire. + /// When higher layers still expect the older bloom-bearing [`Receipts`] + /// type, this helper converts the eth/70 payload into that shape by + /// recomputing the bloom locally from the contained receipts. + /// + /// Note: This is an expensive operation that recalculates the bloom for + /// every receipt. + pub fn into_with_bloom(self) -> Receipts { + // Reuse the eth/69 helper, since both variants carry the same + // receipt list shape (only eth/70 adds request metadata). + Receipts69(self.receipts).into_with_bloom() + } +} + +impl From> for Receipts { + fn from(receipts: Receipts70) -> Self { + receipts.into_with_bloom() + } +} + #[cfg(test)] mod tests { use super::*; @@ -224,4 +310,89 @@ mod tests { } ); } + + #[test] + fn decode_receipts_69() { + let data = hex!("0xf9026605f90262f9025fc60201826590c0c7800183013cd9c0c702018301a2a5c0c7010183027a36c0c702018302e03ec0c7010183034646c0c702018303ac30c0c78001830483b8c0c702018304e9a2c0c780018305c17fc0c7020183062769c0c7800183068d71c0c702018306f35bc0c702018307cb77c0c701018308a382c0c7020183097ab6c0c78080830b0156c0c70101830b6740c0c70201830bcd48c0c70101830c32f6c0c70101830c98e0c0c70201830cfecac0c70201830d64b4c0c70280830dca9ec0c70101830e30a6c0c70201830f080dc0c70201830f6e15c0c78080830fd41dc0c702018310abbac0c701018310fdc2c0c7020183116370c0c780018311c95ac0c7010183122f44c0c701808312952ec0c7020183136c7dc0c70201831443c0c0c702018314a9c8c0c7020183150f94c0c7018083169634c0c7020183176d68c0c702808317d370c0c70201831838c4c0c701808319bf64c0c70201831a256cc0c78080831bac0cc0c70201831c11d8c0c70201831c77c2c0c78080831cdd34c0c70201831db57bc0c70101831e8d07c0c70101831ef2d3c0c70201831fcb37c0c70180832030e5c0c70201832096cfc0c701018320fcb9c0c70201832162c1c0c702018321c8abc0c7020183229ffac0c70201832305c6c0c7028083236bcec0c702808323d1d6c0c702018324a91cc0c7020183250f06c0c70201832574d2c0c7020183264c15c0c70201832723b6c0c70201832789a0c0c702018327ef8ac0c7020183285574c0c702018328bb40c0c702018329212ac0c7028083298714c0c70201832a5e4ec0c70201832ac438c0c70201832b9b72c0c70201832c017ac0"); + + let request = RequestPair::::decode(&mut &data[..]).unwrap(); + assert_eq!( + request.message.0[0][0], + Receipt { + tx_type: TxType::Eip1559, + success: true, + cumulative_gas_used: 26000, + logs: vec![], + } + ); + + let encoded = alloy_rlp::encode(&request); + assert_eq!(encoded, data); + } + + #[test] + fn encode_get_receipts70_inline_shape() { + let req = RequestPair { + request_id: 1111, + message: GetReceipts70 { + first_block_receipt_index: 0, + block_hashes: vec![ + hex!("00000000000000000000000000000000000000000000000000000000deadc0de").into(), + hex!("00000000000000000000000000000000000000000000000000000000feedbeef").into(), + ], + }, + }; + + let mut out = vec![]; + req.encode(&mut out); + + let mut buf = out.as_slice(); + let header = alloy_rlp::Header::decode(&mut buf).unwrap(); + let payload_start = buf.len(); + let request_id = u64::decode(&mut buf).unwrap(); + let first_block_receipt_index = u64::decode(&mut buf).unwrap(); + let block_hashes = Vec::::decode(&mut buf).unwrap(); + + assert!(buf.is_empty(), "buffer not fully consumed"); + assert_eq!(request_id, 1111); + assert_eq!(first_block_receipt_index, 0); + assert_eq!(block_hashes.len(), 2); + // ensure payload length matches header + assert_eq!(payload_start - buf.len(), header.payload_length); + + let mut buf = out.as_slice(); + let decoded = RequestPair::::decode(&mut buf).unwrap(); + assert!(buf.is_empty(), "buffer not fully consumed on decode"); + assert_eq!(decoded, req); + } + + #[test] + fn encode_receipts70_inline_shape() { + let payload: Receipts70 = + Receipts70 { last_block_incomplete: true, receipts: vec![vec![Receipt::default()]] }; + + let resp = RequestPair { request_id: 7, message: payload }; + + let mut out = vec![]; + resp.encode(&mut out); + + let mut buf = out.as_slice(); + let header = alloy_rlp::Header::decode(&mut buf).unwrap(); + let payload_start = buf.len(); + let request_id = u64::decode(&mut buf).unwrap(); + let last_block_incomplete = bool::decode(&mut buf).unwrap(); + let receipts = Vec::>::decode(&mut buf).unwrap(); + + assert!(buf.is_empty(), "buffer not fully consumed"); + assert_eq!(payload_start - buf.len(), header.payload_length); + assert_eq!(request_id, 7); + assert!(last_block_incomplete); + assert_eq!(receipts.len(), 1); + assert_eq!(receipts[0].len(), 1); + + let mut buf = out.as_slice(); + let decoded = RequestPair::::decode(&mut buf).unwrap(); + assert!(buf.is_empty(), "buffer not fully consumed on decode"); + assert_eq!(decoded, resp); + } } diff --git a/crates/net/eth-wire-types/src/status.rs b/crates/net/eth-wire-types/src/status.rs index db363695c3..3a9a28e447 100644 --- a/crates/net/eth-wire-types/src/status.rs +++ b/crates/net/eth-wire-types/src/status.rs @@ -13,7 +13,7 @@ use reth_codecs_derive::add_arbitrary_tests; /// unsupported fields are stripped out. #[derive(Clone, Debug, PartialEq, Eq, Copy)] pub struct UnifiedStatus { - /// The eth protocol version (e.g. eth/66 to eth/69). + /// The eth protocol version (e.g. eth/66 to eth/70). pub version: EthVersion, /// The chain ID identifying the peer’s network. pub chain: Chain, @@ -157,7 +157,7 @@ impl StatusBuilder { self.status } - /// Sets the eth protocol version (e.g., eth/66, eth/69). + /// Sets the eth protocol version (e.g., eth/66, eth/70). pub const fn version(mut self, version: EthVersion) -> Self { self.status.version = version; self @@ -365,21 +365,21 @@ impl Debug for StatusEth69 { if f.alternate() { write!( f, - "Status {{\n\tversion: {:?},\n\tchain: {:?},\n\tblockhash: {},\n\tgenesis: {},\n\tforkid: {:X?}\n}}", - self.version, self.chain, hexed_blockhash, hexed_genesis, self.forkid + "StatusEth69 {{\n\tversion: {:?},\n\tchain: {:?},\n\tgenesis: {},\n\tforkid: {:X?},\n\tearliest: {},\n\tlatest: {},\n\tblockhash: {}\n}}", + self.version, self.chain, hexed_genesis, self.forkid, self.earliest, self.latest, hexed_blockhash ) } else { write!( f, - "Status {{ version: {:?}, chain: {:?}, blockhash: {}, genesis: {}, forkid: {:X?} }}", - self.version, self.chain, hexed_blockhash, hexed_genesis, self.forkid + "StatusEth69 {{ version: {:?}, chain: {:?}, genesis: {}, forkid: {:X?}, earliest: {}, latest: {}, blockhash: {} }}", + self.version, self.chain, hexed_genesis, self.forkid, self.earliest, self.latest, hexed_blockhash ) } } } -/// `StatusMessage` can store either the Legacy version (with TD) or the -/// eth/69 version (omits TD). +/// `StatusMessage` can store either the Legacy version (with TD), or the eth/69+/eth/70 version +/// (omits TD, includes block range). #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum StatusMessage { @@ -546,6 +546,24 @@ mod tests { assert_eq!(unified_status, roundtripped_unified_status); } + #[test] + fn roundtrip_eth70() { + let unified_status = UnifiedStatus::builder() + .version(EthVersion::Eth70) + .chain(Chain::mainnet()) + .genesis(MAINNET_GENESIS_HASH) + .forkid(ForkId { hash: ForkHash([0xb7, 0x15, 0x07, 0x7d]), next: 0 }) + .blockhash(b256!("0xfeb27336ca7923f8fab3bd617fcb6e75841538f71c1bcfc267d7838489d9e13d")) + .total_difficulty(None) + .earliest_block(Some(1)) + .latest_block(Some(2)) + .build(); + + let status_message = unified_status.into_message(); + let roundtripped_unified_status = UnifiedStatus::from_message(status_message); + assert_eq!(unified_status, roundtripped_unified_status); + } + #[test] fn encode_eth69_status_message() { let expected = hex!("f8544501a0d4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3c684b715077d8083ed14f2840112a880a0feb27336ca7923f8fab3bd617fcb6e75841538f71c1bcfc267d7838489d9e13d"); diff --git a/crates/net/eth-wire-types/src/version.rs b/crates/net/eth-wire-types/src/version.rs index 8b2e3a424d..6553bd2e41 100644 --- a/crates/net/eth-wire-types/src/version.rs +++ b/crates/net/eth-wire-types/src/version.rs @@ -27,6 +27,8 @@ pub enum EthVersion { Eth68 = 68, /// The `eth` protocol version 69. Eth69 = 69, + /// The `eth` protocol version 70. + Eth70 = 70, } impl EthVersion { @@ -55,6 +57,11 @@ impl EthVersion { pub const fn is_eth69(&self) -> bool { matches!(self, Self::Eth69) } + + /// Returns true if the version is eth/70 + pub const fn is_eth70(&self) -> bool { + matches!(self, Self::Eth70) + } } /// RLP encodes `EthVersion` as a single byte (66-69). @@ -96,6 +103,7 @@ impl TryFrom<&str> for EthVersion { "67" => Ok(Self::Eth67), "68" => Ok(Self::Eth68), "69" => Ok(Self::Eth69), + "70" => Ok(Self::Eth70), _ => Err(ParseVersionError(s.to_string())), } } @@ -120,6 +128,7 @@ impl TryFrom for EthVersion { 67 => Ok(Self::Eth67), 68 => Ok(Self::Eth68), 69 => Ok(Self::Eth69), + 70 => Ok(Self::Eth70), _ => Err(ParseVersionError(u.to_string())), } } @@ -149,6 +158,7 @@ impl From for &'static str { EthVersion::Eth67 => "67", EthVersion::Eth68 => "68", EthVersion::Eth69 => "69", + EthVersion::Eth70 => "70", } } } @@ -195,7 +205,7 @@ impl Decodable for ProtocolVersion { #[cfg(test)] mod tests { - use super::{EthVersion, ParseVersionError}; + use super::EthVersion; use alloy_rlp::{Decodable, Encodable, Error as RlpError}; use bytes::BytesMut; @@ -205,7 +215,7 @@ mod tests { assert_eq!(EthVersion::Eth67, EthVersion::try_from("67").unwrap()); assert_eq!(EthVersion::Eth68, EthVersion::try_from("68").unwrap()); assert_eq!(EthVersion::Eth69, EthVersion::try_from("69").unwrap()); - assert_eq!(Err(ParseVersionError("70".to_string())), EthVersion::try_from("70")); + assert_eq!(EthVersion::Eth70, EthVersion::try_from("70").unwrap()); } #[test] @@ -214,12 +224,18 @@ mod tests { assert_eq!(EthVersion::Eth67, "67".parse().unwrap()); assert_eq!(EthVersion::Eth68, "68".parse().unwrap()); assert_eq!(EthVersion::Eth69, "69".parse().unwrap()); - assert_eq!(Err(ParseVersionError("70".to_string())), "70".parse::()); + assert_eq!(EthVersion::Eth70, "70".parse().unwrap()); } #[test] fn test_eth_version_rlp_encode() { - let versions = [EthVersion::Eth66, EthVersion::Eth67, EthVersion::Eth68, EthVersion::Eth69]; + let versions = [ + EthVersion::Eth66, + EthVersion::Eth67, + EthVersion::Eth68, + EthVersion::Eth69, + EthVersion::Eth70, + ]; for version in versions { let mut encoded = BytesMut::new(); @@ -236,7 +252,7 @@ mod tests { (67_u8, Ok(EthVersion::Eth67)), (68_u8, Ok(EthVersion::Eth68)), (69_u8, Ok(EthVersion::Eth69)), - (70_u8, Err(RlpError::Custom("invalid eth version"))), + (70_u8, Ok(EthVersion::Eth70)), (65_u8, Err(RlpError::Custom("invalid eth version"))), ]; diff --git a/crates/net/eth-wire/src/capability.rs b/crates/net/eth-wire/src/capability.rs index 9b706a02cf..9691acb439 100644 --- a/crates/net/eth-wire/src/capability.rs +++ b/crates/net/eth-wire/src/capability.rs @@ -418,6 +418,8 @@ mod tests { Capability::new_static("eth", 66), Capability::new_static("eth", 67), Capability::new_static("eth", 68), + Capability::new_static("eth", 69), + Capability::new_static("eth", 70), ] .into(); @@ -425,6 +427,8 @@ mod tests { assert!(capabilities.supports_eth_v66()); assert!(capabilities.supports_eth_v67()); assert!(capabilities.supports_eth_v68()); + assert!(capabilities.supports_eth_v69()); + assert!(capabilities.supports_eth_v70()); } #[test] diff --git a/crates/net/eth-wire/src/hello.rs b/crates/net/eth-wire/src/hello.rs index 40deebb631..9cad7223a0 100644 --- a/crates/net/eth-wire/src/hello.rs +++ b/crates/net/eth-wire/src/hello.rs @@ -260,10 +260,11 @@ mod tests { assert_eq!(hello_encoded.len(), hello.length()); } + //TODO: add test for eth70 here once we have fully support it #[test] - fn test_default_protocols_include_eth69() { - // ensure that the default protocol list includes Eth69 as the latest version + fn test_default_protocols_still_include_eth69() { + // ensure that older eth/69 remains advertised for compatibility let secret_key = SecretKey::new(&mut rand_08::thread_rng()); let id = pk2id(&secret_key.public_key(SECP256K1)); let hello = HelloMessageWithProtocols::builder(id).build(); diff --git a/crates/net/eth-wire/src/p2pstream.rs b/crates/net/eth-wire/src/p2pstream.rs index e794795b1c..4c5e569394 100644 --- a/crates/net/eth-wire/src/p2pstream.rs +++ b/crates/net/eth-wire/src/p2pstream.rs @@ -101,8 +101,9 @@ where .or(Err(P2PStreamError::HandshakeError(P2PHandshakeError::Timeout)))? .ok_or(P2PStreamError::HandshakeError(P2PHandshakeError::NoResponse))??; - // let's check the compressed length first, we will need to check again once confirming - // that it contains snappy-compressed data (this will be the case for all non-p2p messages). + // Check that the uncompressed message length does not exceed the max payload size. + // Note: The first message (Hello/Disconnect) is not snappy compressed. We will check the + // decompressed length again for subsequent messages after the handshake. if first_message_bytes.len() > MAX_PAYLOAD_SIZE { return Err(P2PStreamError::MessageTooBig { message_size: first_message_bytes.len(), diff --git a/crates/net/nat/src/lib.rs b/crates/net/nat/src/lib.rs index e39889ae16..83b24f2ac5 100644 --- a/crates/net/nat/src/lib.rs +++ b/crates/net/nat/src/lib.rs @@ -19,7 +19,7 @@ pub use net_if::{NetInterfaceError, DEFAULT_NET_IF_NAME}; use std::{ fmt, future::{poll_fn, Future}, - net::{AddrParseError, IpAddr}, + net::{AddrParseError, IpAddr, ToSocketAddrs}, pin::Pin, str::FromStr, task::{Context, Poll}, @@ -38,7 +38,7 @@ const EXTERNAL_IP_APIS: &[&str] = &["https://ipinfo.io/ip", "https://icanhazip.com", "https://ifconfig.me"]; /// All builtin resolvers. -#[derive(Debug, Clone, Copy, Eq, PartialEq, Default, Hash)] +#[derive(Debug, Clone, Eq, PartialEq, Default, Hash)] #[cfg_attr(feature = "serde", derive(SerializeDisplay, DeserializeFromStr))] pub enum NatResolver { /// Resolve with any available resolver. @@ -50,6 +50,14 @@ pub enum NatResolver { PublicIp, /// Use the given [`IpAddr`] ExternalIp(IpAddr), + /// Use the given domain name as the external address to expose to peers. + /// This is behaving essentially the same as [`NatResolver::ExternalIp`], but supports domain + /// names. Domain names are resolved to IP addresses using the OS's resolver. The first IP + /// address found is used. + /// This may be useful in docker bridge networks where containers are usually queried by DNS + /// instead of direct IP addresses. + /// Note: the domain shouldn't include a port number. Only the IP address is resolved. + ExternalAddr(String), /// Resolve external IP via the network interface. NetIf, /// Resolve nothing @@ -62,10 +70,17 @@ impl NatResolver { external_addr_with(self).await } - /// Returns the external ip, if it is [`NatResolver::ExternalIp`] - pub const fn as_external_ip(self) -> Option { + /// Returns the fixed ip, if it is [`NatResolver::ExternalIp`] or [`NatResolver::ExternalAddr`]. + /// + /// In the case of [`NatResolver::ExternalAddr`], it will return the first IP address found for + /// the domain. + pub fn as_external_ip(self, port: u16) -> Option { match self { Self::ExternalIp(ip) => Some(ip), + Self::ExternalAddr(domain) => format!("{domain}:{port}") + .to_socket_addrs() + .ok() + .and_then(|mut addrs| addrs.next().map(|addr| addr.ip())), _ => None, } } @@ -78,6 +93,7 @@ impl fmt::Display for NatResolver { Self::Upnp => f.write_str("upnp"), Self::PublicIp => f.write_str("publicip"), Self::ExternalIp(ip) => write!(f, "extip:{ip}"), + Self::ExternalAddr(domain) => write!(f, "extaddr:{domain}"), Self::NetIf => f.write_str("netif"), Self::None => f.write_str("none"), } @@ -106,12 +122,15 @@ impl FromStr for NatResolver { "publicip" | "public-ip" => Self::PublicIp, "netif" => Self::NetIf, s => { - let Some(ip) = s.strip_prefix("extip:") else { + if let Some(ip) = s.strip_prefix("extip:") { + Self::ExternalIp(ip.parse()?) + } else if let Some(domain) = s.strip_prefix("extaddr:") { + Self::ExternalAddr(domain.to_string()) + } else { return Err(ParseNatResolverError::UnknownVariant(format!( "Unknown Nat Resolver: {s}" - ))) - }; - Self::ExternalIp(ip.parse()?) + ))); + } } }; Ok(r) @@ -180,7 +199,7 @@ impl ResolveNatInterval { /// `None` if the attempt was unsuccessful. pub fn poll_tick(&mut self, cx: &mut Context<'_>) -> Poll> { if self.interval.poll_tick(cx).is_ready() { - self.future = Some(Box::pin(self.resolver.external_addr())); + self.future = Some(Box::pin(self.resolver.clone().external_addr())); } if let Some(mut fut) = self.future.take() { @@ -212,6 +231,9 @@ pub async fn external_addr_with(resolver: NatResolver) -> Option { ); }) .ok(), + NatResolver::ExternalAddr(domain) => { + domain.to_socket_addrs().ok().and_then(|mut addrs| addrs.next().map(|addr| addr.ip())) + } NatResolver::None => None, } } @@ -245,7 +267,7 @@ async fn resolve_external_ip_url(url: &str) -> Option { #[cfg(test)] mod tests { use super::*; - use std::net::Ipv4Addr; + use std::net::{Ipv4Addr, Ipv6Addr}; #[tokio::test] #[ignore] @@ -267,6 +289,18 @@ mod tests { dbg!(ip); } + #[test] + fn as_external_ip_test() { + let resolver = NatResolver::ExternalAddr("localhost".to_string()); + let ip = resolver.as_external_ip(30303).expect("localhost should be resolvable"); + + if ip.is_ipv4() { + assert_eq!(ip, IpAddr::V4(Ipv4Addr::LOCALHOST)); + } else { + assert_eq!(ip, IpAddr::V6(Ipv6Addr::LOCALHOST)); + } + } + #[test] fn test_from_str() { assert_eq!(NatResolver::Any, "any".parse().unwrap()); @@ -275,6 +309,6 @@ mod tests { let ip = NatResolver::ExternalIp(IpAddr::V4(Ipv4Addr::UNSPECIFIED)); let s = "extip:0.0.0.0"; assert_eq!(ip, s.parse().unwrap()); - assert_eq!(ip.to_string().as_str(), s); + assert_eq!(ip.to_string(), s); } } diff --git a/crates/net/network-api/src/events.rs b/crates/net/network-api/src/events.rs index 8a5c754149..44cd07aebb 100644 --- a/crates/net/network-api/src/events.rs +++ b/crates/net/network-api/src/events.rs @@ -3,8 +3,8 @@ use reth_eth_wire_types::{ message::RequestPair, BlockBodies, BlockHeaders, Capabilities, DisconnectReason, EthMessage, EthNetworkPrimitives, EthVersion, GetBlockBodies, GetBlockHeaders, GetNodeData, - GetPooledTransactions, GetReceipts, NetworkPrimitives, NodeData, PooledTransactions, Receipts, - Receipts69, UnifiedStatus, + GetPooledTransactions, GetReceipts, GetReceipts70, NetworkPrimitives, NodeData, + PooledTransactions, Receipts, Receipts69, Receipts70, UnifiedStatus, }; use reth_ethereum_forks::ForkId; use reth_network_p2p::error::{RequestError, RequestResult}; @@ -238,6 +238,15 @@ pub enum PeerRequest { /// The channel to send the response for receipts. response: oneshot::Sender>>, }, + /// Requests receipts from the peer using eth/70 (supports `firstBlockReceiptIndex`). + /// + /// The response should be sent through the channel. + GetReceipts70 { + /// The request for receipts. + request: GetReceipts70, + /// The channel to send the response for receipts. + response: oneshot::Sender>>, + }, } // === impl PeerRequest === @@ -257,6 +266,7 @@ impl PeerRequest { Self::GetNodeData { response, .. } => response.send(Err(err)).ok(), Self::GetReceipts { response, .. } => response.send(Err(err)).ok(), Self::GetReceipts69 { response, .. } => response.send(Err(err)).ok(), + Self::GetReceipts70 { response, .. } => response.send(Err(err)).ok(), }; } @@ -281,6 +291,9 @@ impl PeerRequest { Self::GetReceipts { request, .. } | Self::GetReceipts69 { request, .. } => { EthMessage::GetReceipts(RequestPair { request_id, message: request.clone() }) } + Self::GetReceipts70 { request, .. } => { + EthMessage::GetReceipts70(RequestPair { request_id, message: request.clone() }) + } } } diff --git a/crates/net/network-api/src/lib.rs b/crates/net/network-api/src/lib.rs index 754463cb34..6117a99a30 100644 --- a/crates/net/network-api/src/lib.rs +++ b/crates/net/network-api/src/lib.rs @@ -112,11 +112,15 @@ pub trait PeersInfo: Send + Sync { #[auto_impl::auto_impl(&, Arc)] pub trait Peers: PeersInfo { /// Adds a peer to the peer set with TCP `SocketAddr`. + /// + /// If the peer already exists, then this will update its tracked info. fn add_peer(&self, peer: PeerId, tcp_addr: SocketAddr) { self.add_peer_kind(peer, PeerKind::Static, tcp_addr, None); } /// Adds a peer to the peer set with TCP and UDP `SocketAddr`. + /// + /// If the peer already exists, then this will update its tracked info. fn add_peer_with_udp(&self, peer: PeerId, tcp_addr: SocketAddr, udp_addr: SocketAddr) { self.add_peer_kind(peer, PeerKind::Static, tcp_addr, Some(udp_addr)); } @@ -137,6 +141,8 @@ pub trait Peers: PeersInfo { } /// Adds a peer to the known peer set, with the given kind. + /// + /// If the peer already exists, then this will update its tracked info. fn add_peer_kind( &self, peer: PeerId, diff --git a/crates/net/network-api/src/test_utils/peers_manager.rs b/crates/net/network-api/src/test_utils/peers_manager.rs index 44d9ba0243..f78cedf4c9 100644 --- a/crates/net/network-api/src/test_utils/peers_manager.rs +++ b/crates/net/network-api/src/test_utils/peers_manager.rs @@ -32,6 +32,9 @@ impl PeersHandle { } /// Adds a peer to the set. + /// + /// If the peer already exists, then this will update only the provided address, this is + /// equivalent to discovering a peer. pub fn add_peer(&self, peer_id: PeerId, addr: SocketAddr) { self.send(PeerCommand::Add(peer_id, addr)); } diff --git a/crates/net/network-types/src/peers/config.rs b/crates/net/network-types/src/peers/config.rs index 1fe685b0e8..29e4499b40 100644 --- a/crates/net/network-types/src/peers/config.rs +++ b/crates/net/network-types/src/peers/config.rs @@ -7,7 +7,7 @@ use std::{ time::Duration, }; -use reth_net_banlist::BanList; +use reth_net_banlist::{BanList, IpFilter}; use reth_network_peers::{NodeRecord, TrustedPeer}; use tracing::info; @@ -166,6 +166,12 @@ pub struct PeersConfig { /// This acts as an IP based rate limit. #[cfg_attr(feature = "serde", serde(default, with = "humantime_serde"))] pub incoming_ip_throttle_duration: Duration, + /// IP address filter for restricting network connections to specific IP ranges. + /// + /// Similar to geth's --netrestrict flag. If configured, only connections to/from + /// IPs within the specified CIDR ranges will be allowed. + #[cfg_attr(feature = "serde", serde(skip))] + pub ip_filter: IpFilter, } impl Default for PeersConfig { @@ -184,6 +190,7 @@ impl Default for PeersConfig { basic_nodes: Default::default(), max_backoff_count: 5, incoming_ip_throttle_duration: INBOUND_IP_THROTTLE_DURATION, + ip_filter: IpFilter::default(), } } } @@ -301,6 +308,12 @@ impl PeersConfig { Ok(self.with_basic_nodes(nodes)) } + /// Configure the IP filter for restricting network connections to specific IP ranges. + pub fn with_ip_filter(mut self, ip_filter: IpFilter) -> Self { + self.ip_filter = ip_filter; + self + } + /// Returns settings for testing #[cfg(any(test, feature = "test-utils"))] pub fn test() -> Self { diff --git a/crates/net/network-types/src/peers/mod.rs b/crates/net/network-types/src/peers/mod.rs index f352987501..d41882d494 100644 --- a/crates/net/network-types/src/peers/mod.rs +++ b/crates/net/network-types/src/peers/mod.rs @@ -25,7 +25,7 @@ pub struct Peer { /// The state of the connection, if any. pub state: PeerConnectionState, /// The [`ForkId`] that the peer announced via discovery. - pub fork_id: Option, + pub fork_id: Option>, /// Whether the entry should be removed after an existing session was terminated. pub remove_after_disconnect: bool, /// The kind of peer diff --git a/crates/net/network/Cargo.toml b/crates/net/network/Cargo.toml index 54902ef478..cbe93a2386 100644 --- a/crates/net/network/Cargo.toml +++ b/crates/net/network/Cargo.toml @@ -66,6 +66,7 @@ tracing.workspace = true rustc-hash.workspace = true thiserror.workspace = true parking_lot.workspace = true +rayon.workspace = true rand.workspace = true rand_08.workspace = true secp256k1 = { workspace = true, features = ["global-context", "std", "recovery"] } diff --git a/crates/net/network/src/builder.rs b/crates/net/network/src/builder.rs index 3f36b1bdc8..97a342a869 100644 --- a/crates/net/network/src/builder.rs +++ b/crates/net/network/src/builder.rs @@ -1,7 +1,5 @@ //! Builder support for configuring the entire setup. -use std::fmt::Debug; - use crate::{ eth_requests::EthRequestHandler, transactions::{ @@ -77,15 +75,7 @@ impl NetworkBuilder { self, pool: Pool, transactions_manager_config: TransactionsManagerConfig, - ) -> NetworkBuilder< - TransactionsManager< - Pool, - N, - NetworkPolicies, - >, - Eth, - N, - > { + ) -> NetworkBuilder, Eth, N> { self.transactions_with_policy( pool, transactions_manager_config, @@ -94,19 +84,12 @@ impl NetworkBuilder { } /// Creates a new [`TransactionsManager`] and wires it to the network. - pub fn transactions_with_policy< - Pool: TransactionPool, - P: TransactionPropagationPolicy + Debug, - >( + pub fn transactions_with_policy( self, pool: Pool, transactions_manager_config: TransactionsManagerConfig, - propagation_policy: P, - ) -> NetworkBuilder< - TransactionsManager>, - Eth, - N, - > { + propagation_policy: impl TransactionPropagationPolicy, + ) -> NetworkBuilder, Eth, N> { let Self { mut network, request_handler, .. } = self; let (tx, rx) = mpsc::unbounded_channel(); network.set_transactions(tx); diff --git a/crates/net/network/src/config.rs b/crates/net/network/src/config.rs index c403bdcb55..93223fabcd 100644 --- a/crates/net/network/src/config.rs +++ b/crates/net/network/src/config.rs @@ -6,7 +6,7 @@ use crate::{ transactions::TransactionsManagerConfig, NetworkHandle, NetworkManager, }; -use alloy_primitives::B256; +use alloy_eips::BlockNumHash; use reth_chainspec::{ChainSpecProvider, EthChainSpec, Hardforks}; use reth_discv4::{Discv4Config, Discv4ConfigBuilder, NatResolver, DEFAULT_DISCOVERY_ADDRESS}; use reth_discv5::NetworkStackId; @@ -94,9 +94,9 @@ pub struct NetworkConfig { /// This can be overridden to support custom handshake logic via the /// [`NetworkConfigBuilder`]. pub handshake: Arc, - /// List of block hashes to check for required blocks. + /// List of block number-hash pairs to check for required blocks. /// If non-empty, peers that don't have these blocks will be filtered out. - pub required_block_hashes: Vec, + pub required_block_hashes: Vec, } // === impl NetworkConfig === @@ -225,7 +225,7 @@ pub struct NetworkConfigBuilder { /// . handshake: Arc, /// List of block hashes to check for required blocks. - required_block_hashes: Vec, + required_block_hashes: Vec, /// Optional network id network_id: Option, } @@ -433,7 +433,7 @@ impl NetworkConfigBuilder { pub fn external_ip_resolver(mut self, resolver: NatResolver) -> Self { self.discovery_v4_builder .get_or_insert_with(Discv4Config::builder) - .external_ip_resolver(Some(resolver)); + .external_ip_resolver(Some(resolver.clone())); self.nat = Some(resolver); self } @@ -484,7 +484,7 @@ impl NetworkConfigBuilder { } // Disable nat - pub const fn disable_nat(mut self) -> Self { + pub fn disable_nat(mut self) -> Self { self.nat = None; self } @@ -555,7 +555,7 @@ impl NetworkConfigBuilder { } /// Sets the required block hashes for peer filtering. - pub fn required_block_hashes(mut self, hashes: Vec) -> Self { + pub fn required_block_hashes(mut self, hashes: Vec) -> Self { self.required_block_hashes = hashes; self } @@ -579,7 +579,7 @@ impl NetworkConfigBuilder { } /// Sets the NAT resolver for external IP. - pub const fn add_nat(mut self, nat: Option) -> Self { + pub fn add_nat(mut self, nat: Option) -> Self { self.nat = nat; self } diff --git a/crates/net/network/src/discovery.rs b/crates/net/network/src/discovery.rs index 6b95b1e3a6..3dc409e00b 100644 --- a/crates/net/network/src/discovery.rs +++ b/crates/net/network/src/discovery.rs @@ -25,7 +25,7 @@ use std::{ }; use tokio::{sync::mpsc, task::JoinHandle}; use tokio_stream::{wrappers::ReceiverStream, Stream}; -use tracing::trace; +use tracing::{debug, trace}; /// Default max capacity for cache of discovered peers. /// @@ -95,12 +95,15 @@ impl Discovery { // spawn the service let discv4_service = discv4_service.spawn(); + debug!(target:"net", ?discovery_v4_addr, "started discovery v4"); + Ok((Some(discv4), Some(discv4_updates), Some(discv4_service))) }; let discv5_future = async { let Some(config) = discv5_config else { return Ok::<_, NetworkError>((None, None)) }; - let (discv5, discv5_updates, _local_enr_discv5) = Discv5::start(&sk, config).await?; + let (discv5, discv5_updates) = Discv5::start(&sk, config).await?; + debug!(target:"net", discovery_v5_enr=? discv5.local_enr(), "started discovery v5"); Ok((Some(discv5), Some(discv5_updates.into()))) }; @@ -200,7 +203,6 @@ impl Discovery { } /// Add a node to the discv4 table. - #[expect(clippy::result_large_err)] pub(crate) fn add_discv5_node(&self, enr: Enr) -> Result<(), NetworkError> { if let Some(discv5) = &self.discv5 { discv5.add_node(enr).map_err(NetworkError::Discv5Error)?; diff --git a/crates/net/network/src/error.rs b/crates/net/network/src/error.rs index 96ba2ff85e..af3fbd1860 100644 --- a/crates/net/network/src/error.rs +++ b/crates/net/network/src/error.rs @@ -113,7 +113,22 @@ impl SessionError for EthStreamError { P2PHandshakeError::HelloNotInHandshake | P2PHandshakeError::NonHelloMessageInHandshake, )) => true, - Self::EthHandshakeError(err) => !matches!(err, EthHandshakeError::NoResponse), + Self::EthHandshakeError(err) => { + #[allow(clippy::match_same_arms)] + match err { + EthHandshakeError::NoResponse => { + // this happens when the conn simply stalled + false + } + EthHandshakeError::InvalidFork(_) => { + // this can occur when the remote or our node is running an outdated client, + // we shouldn't treat this as fatal, because the node can come back online + // with an updated version any time + false + } + _ => true, + } + } _ => false, } } @@ -144,7 +159,22 @@ impl SessionError for EthStreamError { P2PStreamError::MismatchedProtocolVersion { .. } ) } - Self::EthHandshakeError(err) => !matches!(err, EthHandshakeError::NoResponse), + Self::EthHandshakeError(err) => { + #[allow(clippy::match_same_arms)] + match err { + EthHandshakeError::NoResponse => { + // this happens when the conn simply stalled + false + } + EthHandshakeError::InvalidFork(_) => { + // this can occur when the remote or our node is running an outdated client, + // we shouldn't treat this as fatal, because the node can come back online + // with an updated version any time + false + } + _ => true, + } + } _ => false, } } @@ -196,6 +226,11 @@ impl SessionError for EthStreamError { P2PStreamError::PingerError(_) | P2PStreamError::Snap(_), ) => Some(BackoffKind::Medium), + Self::EthHandshakeError(EthHandshakeError::InvalidFork(_)) => { + // the remote can come back online after updating client version, so we can back off + // for a bit + Some(BackoffKind::Medium) + } _ => None, } } diff --git a/crates/net/network/src/eth_requests.rs b/crates/net/network/src/eth_requests.rs index 492bf8bd55..c110c5b11b 100644 --- a/crates/net/network/src/eth_requests.rs +++ b/crates/net/network/src/eth_requests.rs @@ -10,7 +10,8 @@ use alloy_rlp::Encodable; use futures::StreamExt; use reth_eth_wire::{ BlockBodies, BlockHeaders, EthNetworkPrimitives, GetBlockBodies, GetBlockHeaders, GetNodeData, - GetReceipts, HeadersDirection, NetworkPrimitives, NodeData, Receipts, Receipts69, + GetReceipts, GetReceipts70, HeadersDirection, NetworkPrimitives, NodeData, Receipts, + Receipts69, Receipts70, }; use reth_network_api::test_utils::PeersHandle; use reth_network_p2p::error::RequestResult; @@ -217,6 +218,69 @@ where let _ = response.send(Ok(Receipts69(receipts))); } + /// Handles partial responses for [`GetReceipts70`] queries. + /// + /// This will adhere to the soft limit but allow filling the last vec partially. + fn on_receipts70_request( + &self, + _peer_id: PeerId, + request: GetReceipts70, + response: oneshot::Sender>>, + ) { + self.metrics.eth_receipts_requests_received_total.increment(1); + + let GetReceipts70 { first_block_receipt_index, block_hashes } = request; + + let mut receipts = Vec::new(); + let mut total_bytes = 0usize; + let mut last_block_incomplete = false; + + for (idx, hash) in block_hashes.into_iter().enumerate() { + if idx >= MAX_RECEIPTS_SERVE { + break + } + + let Some(mut block_receipts) = + self.client.receipts_by_block(BlockHashOrNumber::Hash(hash)).unwrap_or_default() + else { + break + }; + + if idx == 0 && first_block_receipt_index > 0 { + let skip = first_block_receipt_index as usize; + if skip >= block_receipts.len() { + block_receipts.clear(); + } else { + block_receipts.drain(0..skip); + } + } + + let block_size = block_receipts.length(); + + if total_bytes + block_size <= SOFT_RESPONSE_LIMIT { + total_bytes += block_size; + receipts.push(block_receipts); + continue; + } + + let mut partial_block = Vec::new(); + for receipt in block_receipts { + let receipt_size = receipt.length(); + if total_bytes + receipt_size > SOFT_RESPONSE_LIMIT { + break; + } + total_bytes += receipt_size; + partial_block.push(receipt); + } + + receipts.push(partial_block); + last_block_incomplete = true; + break; + } + + let _ = response.send(Ok(Receipts70 { last_block_incomplete, receipts })); + } + #[inline] fn get_receipts_response(&self, request: GetReceipts, transform_fn: F) -> Vec> where @@ -285,6 +349,9 @@ where IncomingEthRequest::GetReceipts69 { peer_id, request, response } => { this.on_receipts69_request(peer_id, request, response) } + IncomingEthRequest::GetReceipts70 { peer_id, request, response } => { + this.on_receipts70_request(peer_id, request, response) + } } }, ); @@ -359,4 +426,15 @@ pub enum IncomingEthRequest { /// The channel sender for the response containing Receipts69. response: oneshot::Sender>>, }, + /// Request Receipts from the peer using eth/70. + /// + /// The response should be sent through the channel. + GetReceipts70 { + /// The ID of the peer to request receipts from. + peer_id: PeerId, + /// The specific receipts requested including the `firstBlockReceiptIndex`. + request: GetReceipts70, + /// The channel sender for the response containing Receipts70. + response: oneshot::Sender>>, + }, } diff --git a/crates/net/network/src/fetch/mod.rs b/crates/net/network/src/fetch/mod.rs index 6c14e99400..b494a13669 100644 --- a/crates/net/network/src/fetch/mod.rs +++ b/crates/net/network/src/fetch/mod.rs @@ -7,7 +7,9 @@ pub use client::FetchClient; use crate::{message::BlockRequest, session::BlockRangeInfo}; use alloy_primitives::B256; use futures::StreamExt; -use reth_eth_wire::{EthNetworkPrimitives, GetBlockBodies, GetBlockHeaders, NetworkPrimitives}; +use reth_eth_wire::{ + Capabilities, EthNetworkPrimitives, GetBlockBodies, GetBlockHeaders, NetworkPrimitives, +}; use reth_network_api::test_utils::PeersHandle; use reth_network_p2p::{ error::{EthResponseValidator, PeerRequestResult, RequestError, RequestResult}, @@ -29,7 +31,7 @@ use tokio::sync::{mpsc, mpsc::UnboundedSender, oneshot}; use tokio_stream::wrappers::UnboundedReceiverStream; type InflightHeadersRequest = Request>>; -type InflightBodiesRequest = Request, PeerRequestResult>>; +type InflightBodiesRequest = Request<(), PeerRequestResult>>; /// Manages data fetching operations. /// @@ -80,6 +82,7 @@ impl StateFetcher { peer_id: PeerId, best_hash: B256, best_number: u64, + capabilities: Arc, timeout: Arc, range_info: Option, ) { @@ -89,6 +92,7 @@ impl StateFetcher { state: PeerState::Idle, best_hash, best_number, + capabilities, timeout, last_response_likely_bad: false, range_info, @@ -135,8 +139,9 @@ impl StateFetcher { /// Returns the _next_ idle peer that's ready to accept a request, /// prioritizing those with the lowest timeout/latency and those that recently responded with - /// adequate data. - fn next_best_peer(&self) -> Option { + /// adequate data. Additionally, if full blocks are required this prioritizes peers that have + /// full history available + fn next_best_peer(&self, requirement: BestPeerRequirements) -> Option { let mut idle = self.peers.iter().filter(|(_, peer)| peer.state.is_idle()); let mut best_peer = idle.next()?; @@ -148,7 +153,13 @@ impl StateFetcher { continue } - // replace best peer if this peer has better rtt + // replace best peer if this peer meets the requirements better + if maybe_better.1.is_better(best_peer.1, &requirement) { + best_peer = maybe_better; + continue + } + + // replace best peer if this peer has better rtt and both have same range quality if maybe_better.1.timeout() < best_peer.1.timeout() && !maybe_better.1.last_response_likely_bad { @@ -166,9 +177,13 @@ impl StateFetcher { return PollAction::NoRequests } - let Some(peer_id) = self.next_best_peer() else { return PollAction::NoPeersAvailable }; - let request = self.queued_requests.pop_front().expect("not empty"); + let Some(peer_id) = self.next_best_peer(request.best_peer_requirements()) else { + // need to put back the the request + self.queued_requests.push_front(request); + return PollAction::NoPeersAvailable + }; + let request = self.prepare_block_request(peer_id, request); PollAction::Ready(FetchAction::BlockRequest { peer_id, request }) @@ -237,7 +252,7 @@ impl StateFetcher { }) } DownloadRequest::GetBlockBodies { request, response, .. } => { - let inflight = Request { request: request.clone(), response }; + let inflight = Request { request: (), response }; self.inflight_bodies_requests.insert(peer_id, inflight); BlockRequest::GetBlockBodies(GetBlockBodies(request)) } @@ -341,6 +356,9 @@ struct Peer { best_hash: B256, /// Tracks the best number of the peer. best_number: u64, + /// Capabilities announced by the peer. + #[allow(dead_code)] + capabilities: Arc, /// Tracks the current timeout value we use for the peer. timeout: Arc, /// Tracks whether the peer has recently responded with a likely bad response. @@ -351,7 +369,6 @@ struct Peer { /// lowest timeout. last_response_likely_bad: bool, /// Tracks the range info for the peer. - #[allow(dead_code)] range_info: Option, } @@ -359,6 +376,72 @@ impl Peer { fn timeout(&self) -> u64 { self.timeout.load(Ordering::Relaxed) } + + /// Returns the earliest block number available from the peer. + fn earliest(&self) -> u64 { + self.range_info.as_ref().map_or(0, |info| info.earliest()) + } + + /// Returns true if the peer has the full history available. + fn has_full_history(&self) -> bool { + self.earliest() == 0 + } + + fn range(&self) -> Option> { + self.range_info.as_ref().map(|info| info.range()) + } + + /// Returns true if this peer has a better range than the other peer for serving the requested + /// range. + /// + /// A peer has a "better range" if: + /// 1. It can fully cover the requested range while the other cannot + /// 2. None can fully cover the range, but this peer has lower start value + /// 3. If a peer doesnt announce a range we assume it has full history, but check the other's + /// range and treat that as better if it can cover the range + fn has_better_range(&self, other: &Self, range: &RangeInclusive) -> bool { + let self_range = self.range(); + let other_range = other.range(); + + match (self_range, other_range) { + (Some(self_r), Some(other_r)) => { + // Check if each peer can fully cover the requested range + let self_covers = self_r.contains(range.start()) && self_r.contains(range.end()); + let other_covers = other_r.contains(range.start()) && other_r.contains(range.end()); + + #[allow(clippy::match_same_arms)] + match (self_covers, other_covers) { + (true, false) => true, // Only self covers the range + (false, true) => false, // Only other covers the range + (true, true) => false, // Both cover + (false, false) => { + // neither covers - prefer if peer has lower (better) start range + self_r.start() < other_r.start() + } + } + } + (Some(self_r), None) => { + // Self has range info, other doesn't (treated as full history with unknown latest) + // Self is better only if it covers the range + self_r.contains(range.start()) && self_r.contains(range.end()) + } + (None, Some(other_r)) => { + // Self has no range info (full history), other has range info + // Self is better only if other doesn't cover the range + !(other_r.contains(range.start()) && other_r.contains(range.end())) + } + (None, None) => false, // Neither has range info - no one is better + } + } + + /// Returns true if this peer is better than the other peer based on the given requirements. + fn is_better(&self, other: &Self, requirement: &BestPeerRequirements) -> bool { + match requirement { + BestPeerRequirements::None => false, + BestPeerRequirements::FullBlockRange(range) => self.has_better_range(other, range), + BestPeerRequirements::FullBlock => self.has_full_history() && !other.has_full_history(), + } + } } /// Tracks the state of an individual peer @@ -420,7 +503,6 @@ pub(crate) enum DownloadRequest { request: Vec, response: oneshot::Sender>>, priority: Priority, - #[allow(dead_code)] range_hint: Option>, }, } @@ -449,6 +531,20 @@ impl DownloadRequest { const fn is_normal_priority(&self) -> bool { self.get_priority().is_normal() } + + /// Returns the best peer requirements for this request. + fn best_peer_requirements(&self) -> BestPeerRequirements { + match self { + Self::GetBlockHeaders { .. } => BestPeerRequirements::None, + Self::GetBlockBodies { range_hint, .. } => { + if let Some(range) = range_hint { + BestPeerRequirements::FullBlockRange(range.clone()) + } else { + BestPeerRequirements::FullBlock + } + } + } + } } /// An action the syncer can emit. @@ -473,6 +569,16 @@ pub(crate) enum BlockResponseOutcome { BadResponse(PeerId, ReputationChangeKind), } +/// Additional requirements for how to rank peers during selection. +enum BestPeerRequirements { + /// No additional requirements + None, + /// Peer must have this block range available. + FullBlockRange(RangeInclusive), + /// Peer must have full range. + FullBlock, +} + #[cfg(test)] mod tests { use super::*; @@ -511,20 +617,35 @@ mod tests { // Add a few random peers let peer1 = B512::random(); let peer2 = B512::random(); - fetcher.new_active_peer(peer1, B256::random(), 1, Arc::new(AtomicU64::new(1)), None); - fetcher.new_active_peer(peer2, B256::random(), 2, Arc::new(AtomicU64::new(1)), None); + let capabilities = Arc::new(Capabilities::from(vec![])); + fetcher.new_active_peer( + peer1, + B256::random(), + 1, + Arc::clone(&capabilities), + Arc::new(AtomicU64::new(1)), + None, + ); + fetcher.new_active_peer( + peer2, + B256::random(), + 2, + Arc::clone(&capabilities), + Arc::new(AtomicU64::new(1)), + None, + ); - let first_peer = fetcher.next_best_peer().unwrap(); + let first_peer = fetcher.next_best_peer(BestPeerRequirements::None).unwrap(); assert!(first_peer == peer1 || first_peer == peer2); // Pending disconnect for first_peer fetcher.on_pending_disconnect(&first_peer); // first_peer now isn't idle, so we should get other peer - let second_peer = fetcher.next_best_peer().unwrap(); + let second_peer = fetcher.next_best_peer(BestPeerRequirements::None).unwrap(); assert!(first_peer == peer1 || first_peer == peer2); assert_ne!(first_peer, second_peer); // without idle peers, returns None fetcher.on_pending_disconnect(&second_peer); - assert_eq!(fetcher.next_best_peer(), None); + assert_eq!(fetcher.next_best_peer(BestPeerRequirements::None), None); } #[tokio::test] @@ -539,18 +660,40 @@ mod tests { let peer2_timeout = Arc::new(AtomicU64::new(300)); - fetcher.new_active_peer(peer1, B256::random(), 1, Arc::new(AtomicU64::new(30)), None); - fetcher.new_active_peer(peer2, B256::random(), 2, Arc::clone(&peer2_timeout), None); - fetcher.new_active_peer(peer3, B256::random(), 3, Arc::new(AtomicU64::new(50)), None); + let capabilities = Arc::new(Capabilities::from(vec![])); + fetcher.new_active_peer( + peer1, + B256::random(), + 1, + Arc::clone(&capabilities), + Arc::new(AtomicU64::new(30)), + None, + ); + fetcher.new_active_peer( + peer2, + B256::random(), + 2, + Arc::clone(&capabilities), + Arc::clone(&peer2_timeout), + None, + ); + fetcher.new_active_peer( + peer3, + B256::random(), + 3, + Arc::clone(&capabilities), + Arc::new(AtomicU64::new(50)), + None, + ); // Must always get peer1 (lowest timeout) - assert_eq!(fetcher.next_best_peer(), Some(peer1)); - assert_eq!(fetcher.next_best_peer(), Some(peer1)); + assert_eq!(fetcher.next_best_peer(BestPeerRequirements::None), Some(peer1)); + assert_eq!(fetcher.next_best_peer(BestPeerRequirements::None), Some(peer1)); // peer2's timeout changes below peer1's peer2_timeout.store(10, Ordering::Relaxed); // Then we get peer 2 always (now lowest) - assert_eq!(fetcher.next_best_peer(), Some(peer2)); - assert_eq!(fetcher.next_best_peer(), Some(peer2)); + assert_eq!(fetcher.next_best_peer(BestPeerRequirements::None), Some(peer2)); + assert_eq!(fetcher.next_best_peer(BestPeerRequirements::None), Some(peer2)); } #[tokio::test] @@ -609,6 +752,7 @@ mod tests { peer_id, Default::default(), Default::default(), + Arc::new(Capabilities::from(vec![])), Default::default(), None, ); @@ -639,4 +783,367 @@ mod tests { assert!(fetcher.peers[&peer_id].state.is_idle()); } + + #[test] + fn test_peer_is_better_none_requirement() { + let peer1 = Peer { + state: PeerState::Idle, + best_hash: B256::random(), + best_number: 100, + capabilities: Arc::new(Capabilities::new(vec![])), + timeout: Arc::new(AtomicU64::new(10)), + last_response_likely_bad: false, + range_info: Some(BlockRangeInfo::new(0, 100, B256::random())), + }; + + let peer2 = Peer { + state: PeerState::Idle, + best_hash: B256::random(), + best_number: 50, + capabilities: Arc::new(Capabilities::new(vec![])), + timeout: Arc::new(AtomicU64::new(20)), + last_response_likely_bad: false, + range_info: None, + }; + + // With None requirement, is_better should always return false + assert!(!peer1.is_better(&peer2, &BestPeerRequirements::None)); + assert!(!peer2.is_better(&peer1, &BestPeerRequirements::None)); + } + + #[test] + fn test_peer_is_better_full_block_requirement() { + // Peer with full history (earliest = 0) + let peer_full = Peer { + state: PeerState::Idle, + best_hash: B256::random(), + best_number: 100, + capabilities: Arc::new(Capabilities::new(vec![])), + timeout: Arc::new(AtomicU64::new(10)), + last_response_likely_bad: false, + range_info: Some(BlockRangeInfo::new(0, 100, B256::random())), + }; + + // Peer without full history (earliest = 50) + let peer_partial = Peer { + state: PeerState::Idle, + best_hash: B256::random(), + best_number: 100, + capabilities: Arc::new(Capabilities::new(vec![])), + timeout: Arc::new(AtomicU64::new(10)), + last_response_likely_bad: false, + range_info: Some(BlockRangeInfo::new(50, 100, B256::random())), + }; + + // Peer without range info (treated as full history) + let peer_no_range = Peer { + state: PeerState::Idle, + best_hash: B256::random(), + best_number: 100, + capabilities: Arc::new(Capabilities::new(vec![])), + timeout: Arc::new(AtomicU64::new(10)), + last_response_likely_bad: false, + range_info: None, + }; + + // Peer with full history is better than peer without + assert!(peer_full.is_better(&peer_partial, &BestPeerRequirements::FullBlock)); + assert!(!peer_partial.is_better(&peer_full, &BestPeerRequirements::FullBlock)); + + // Peer without range info (full history) is better than partial + assert!(peer_no_range.is_better(&peer_partial, &BestPeerRequirements::FullBlock)); + assert!(!peer_partial.is_better(&peer_no_range, &BestPeerRequirements::FullBlock)); + + // Both have full history - no improvement + assert!(!peer_full.is_better(&peer_no_range, &BestPeerRequirements::FullBlock)); + assert!(!peer_no_range.is_better(&peer_full, &BestPeerRequirements::FullBlock)); + } + + #[test] + fn test_peer_is_better_full_block_range_requirement() { + let range = RangeInclusive::new(40, 60); + + // Peer that covers the requested range + let peer_covers = Peer { + state: PeerState::Idle, + best_hash: B256::random(), + best_number: 100, + capabilities: Arc::new(Capabilities::new(vec![])), + timeout: Arc::new(AtomicU64::new(10)), + last_response_likely_bad: false, + range_info: Some(BlockRangeInfo::new(0, 100, B256::random())), + }; + + // Peer that doesn't cover the range (earliest too high) + let peer_no_cover = Peer { + state: PeerState::Idle, + best_hash: B256::random(), + best_number: 100, + capabilities: Arc::new(Capabilities::new(vec![])), + timeout: Arc::new(AtomicU64::new(10)), + last_response_likely_bad: false, + range_info: Some(BlockRangeInfo::new(70, 100, B256::random())), + }; + + // Peer that covers the requested range is better than one that doesn't + assert!(peer_covers + .is_better(&peer_no_cover, &BestPeerRequirements::FullBlockRange(range.clone()))); + assert!( + !peer_no_cover.is_better(&peer_covers, &BestPeerRequirements::FullBlockRange(range)) + ); + } + + #[test] + fn test_peer_is_better_both_cover_range() { + let range = RangeInclusive::new(30, 50); + + // Peer with full history that covers the range + let peer_full = Peer { + state: PeerState::Idle, + best_hash: B256::random(), + best_number: 100, + capabilities: Arc::new(Capabilities::new(vec![])), + timeout: Arc::new(AtomicU64::new(10)), + last_response_likely_bad: false, + range_info: Some(BlockRangeInfo::new(0, 50, B256::random())), + }; + + // Peer without full history that also covers the range + let peer_partial = Peer { + state: PeerState::Idle, + best_hash: B256::random(), + best_number: 100, + capabilities: Arc::new(Capabilities::new(vec![])), + timeout: Arc::new(AtomicU64::new(10)), + last_response_likely_bad: false, + range_info: Some(BlockRangeInfo::new(30, 50, B256::random())), + }; + + // When both cover the range, prefer none + assert!(!peer_full + .is_better(&peer_partial, &BestPeerRequirements::FullBlockRange(range.clone()))); + assert!(!peer_partial.is_better(&peer_full, &BestPeerRequirements::FullBlockRange(range))); + } + + #[test] + fn test_peer_is_better_lower_start() { + let range = RangeInclusive::new(30, 60); + + // Peer with full history that covers the range + let peer_full = Peer { + state: PeerState::Idle, + best_hash: B256::random(), + best_number: 100, + capabilities: Arc::new(Capabilities::new(vec![])), + timeout: Arc::new(AtomicU64::new(10)), + last_response_likely_bad: false, + range_info: Some(BlockRangeInfo::new(0, 50, B256::random())), + }; + + // Peer without full history that also covers the range + let peer_partial = Peer { + state: PeerState::Idle, + best_hash: B256::random(), + best_number: 100, + capabilities: Arc::new(Capabilities::new(vec![])), + timeout: Arc::new(AtomicU64::new(10)), + last_response_likely_bad: false, + range_info: Some(BlockRangeInfo::new(30, 50, B256::random())), + }; + + // When both cover the range, prefer lower start value + assert!(peer_full + .is_better(&peer_partial, &BestPeerRequirements::FullBlockRange(range.clone()))); + assert!(!peer_partial.is_better(&peer_full, &BestPeerRequirements::FullBlockRange(range))); + } + + #[test] + fn test_peer_is_better_neither_covers_range() { + let range = RangeInclusive::new(40, 60); + + // Peer with full history that doesn't cover the range (latest too low) + let peer_full = Peer { + state: PeerState::Idle, + best_hash: B256::random(), + best_number: 30, + capabilities: Arc::new(Capabilities::new(vec![])), + timeout: Arc::new(AtomicU64::new(10)), + last_response_likely_bad: false, + range_info: Some(BlockRangeInfo::new(0, 30, B256::random())), + }; + + // Peer without full history that also doesn't cover the range + let peer_partial = Peer { + state: PeerState::Idle, + best_hash: B256::random(), + best_number: 30, + capabilities: Arc::new(Capabilities::new(vec![])), + timeout: Arc::new(AtomicU64::new(10)), + last_response_likely_bad: false, + range_info: Some(BlockRangeInfo::new(10, 30, B256::random())), + }; + + // When neither covers the range, prefer full history + assert!(peer_full + .is_better(&peer_partial, &BestPeerRequirements::FullBlockRange(range.clone()))); + assert!(!peer_partial.is_better(&peer_full, &BestPeerRequirements::FullBlockRange(range))); + } + + #[test] + fn test_peer_is_better_no_range_info() { + let range = RangeInclusive::new(40, 60); + + // Peer with range info + let peer_with_range = Peer { + state: PeerState::Idle, + best_hash: B256::random(), + best_number: 100, + capabilities: Arc::new(Capabilities::new(vec![])), + timeout: Arc::new(AtomicU64::new(10)), + last_response_likely_bad: false, + range_info: Some(BlockRangeInfo::new(30, 100, B256::random())), + }; + + // Peer without range info + let peer_no_range = Peer { + state: PeerState::Idle, + best_hash: B256::random(), + best_number: 100, + capabilities: Arc::new(Capabilities::new(vec![])), + timeout: Arc::new(AtomicU64::new(10)), + last_response_likely_bad: false, + range_info: None, + }; + + // Peer without range info is not better (we prefer peers with known ranges) + assert!(!peer_no_range + .is_better(&peer_with_range, &BestPeerRequirements::FullBlockRange(range.clone()))); + + // Peer with range info is better than peer without + assert!( + peer_with_range.is_better(&peer_no_range, &BestPeerRequirements::FullBlockRange(range)) + ); + } + + #[test] + fn test_peer_is_better_one_peer_no_range_covers() { + let range = RangeInclusive::new(40, 60); + + // Peer with range info that covers the requested range + let peer_with_range_covers = Peer { + state: PeerState::Idle, + best_hash: B256::random(), + best_number: 100, + capabilities: Arc::new(Capabilities::new(vec![])), + timeout: Arc::new(AtomicU64::new(10)), + last_response_likely_bad: false, + range_info: Some(BlockRangeInfo::new(30, 100, B256::random())), + }; + + // Peer without range info (treated as full history with unknown latest) + let peer_no_range = Peer { + state: PeerState::Idle, + best_hash: B256::random(), + best_number: 100, + capabilities: Arc::new(Capabilities::new(vec![])), + timeout: Arc::new(AtomicU64::new(10)), + last_response_likely_bad: false, + range_info: None, + }; + + // Peer with range that covers is better than peer without range info + assert!(peer_with_range_covers + .is_better(&peer_no_range, &BestPeerRequirements::FullBlockRange(range.clone()))); + + // Peer without range info is not better when other covers + assert!(!peer_no_range + .is_better(&peer_with_range_covers, &BestPeerRequirements::FullBlockRange(range))); + } + + #[test] + fn test_peer_is_better_one_peer_no_range_doesnt_cover() { + let range = RangeInclusive::new(40, 60); + + // Peer with range info that does NOT cover the requested range (too high) + let peer_with_range_no_cover = Peer { + state: PeerState::Idle, + best_hash: B256::random(), + best_number: 100, + capabilities: Arc::new(Capabilities::new(vec![])), + timeout: Arc::new(AtomicU64::new(10)), + last_response_likely_bad: false, + range_info: Some(BlockRangeInfo::new(70, 100, B256::random())), + }; + + // Peer without range info (treated as full history) + let peer_no_range = Peer { + state: PeerState::Idle, + best_hash: B256::random(), + best_number: 100, + capabilities: Arc::new(Capabilities::new(vec![])), + timeout: Arc::new(AtomicU64::new(10)), + last_response_likely_bad: false, + range_info: None, + }; + + // Peer with range that doesn't cover is not better + assert!(!peer_with_range_no_cover + .is_better(&peer_no_range, &BestPeerRequirements::FullBlockRange(range.clone()))); + + // Peer without range info (full history) is better when other doesn't cover + assert!(peer_no_range + .is_better(&peer_with_range_no_cover, &BestPeerRequirements::FullBlockRange(range))); + } + + #[test] + fn test_peer_is_better_edge_cases() { + // Test exact range boundaries + let range = RangeInclusive::new(50, 100); + + // Peer that exactly covers the range + let peer_exact = Peer { + state: PeerState::Idle, + best_hash: B256::random(), + best_number: 100, + capabilities: Arc::new(Capabilities::new(vec![])), + timeout: Arc::new(AtomicU64::new(10)), + last_response_likely_bad: false, + range_info: Some(BlockRangeInfo::new(50, 100, B256::random())), + }; + + // Peer that's one block short at the start + let peer_short_start = Peer { + state: PeerState::Idle, + best_hash: B256::random(), + best_number: 100, + capabilities: Arc::new(Capabilities::new(vec![])), + timeout: Arc::new(AtomicU64::new(10)), + last_response_likely_bad: false, + range_info: Some(BlockRangeInfo::new(51, 100, B256::random())), + }; + + // Peer that's one block short at the end + let peer_short_end = Peer { + state: PeerState::Idle, + best_hash: B256::random(), + best_number: 100, + capabilities: Arc::new(Capabilities::new(vec![])), + timeout: Arc::new(AtomicU64::new(10)), + last_response_likely_bad: false, + range_info: Some(BlockRangeInfo::new(50, 99, B256::random())), + }; + + // Exact coverage is better than short coverage + assert!(peer_exact + .is_better(&peer_short_start, &BestPeerRequirements::FullBlockRange(range.clone()))); + assert!(peer_exact + .is_better(&peer_short_end, &BestPeerRequirements::FullBlockRange(range.clone()))); + + // Short coverage is not better than exact coverage + assert!(!peer_short_start + .is_better(&peer_exact, &BestPeerRequirements::FullBlockRange(range.clone()))); + assert!( + !peer_short_end.is_better(&peer_exact, &BestPeerRequirements::FullBlockRange(range)) + ); + } } diff --git a/crates/net/network/src/lib.rs b/crates/net/network/src/lib.rs index a84168d384..ad63067a51 100644 --- a/crates/net/network/src/lib.rs +++ b/crates/net/network/src/lib.rs @@ -175,6 +175,7 @@ pub use reth_network_p2p as p2p; /// re-export types crates pub mod types { + pub use reth_discv4::NatResolver; pub use reth_eth_wire_types::*; pub use reth_network_types::*; } diff --git a/crates/net/network/src/manager.rs b/crates/net/network/src/manager.rs index c0a2934df7..52d8757a50 100644 --- a/crates/net/network/src/manager.rs +++ b/crates/net/network/src/manager.rs @@ -532,6 +532,13 @@ impl NetworkManager { response, }) } + PeerRequest::GetReceipts70 { request, response } => { + self.delegate_eth_request(IncomingEthRequest::GetReceipts70 { + peer_id, + request, + response, + }) + } PeerRequest::GetPooledTransactions { request, response } => { self.notify_tx_manager(NetworkTransactionEvent::GetPooledTransactions { peer_id, diff --git a/crates/net/network/src/message.rs b/crates/net/network/src/message.rs index 115939b161..58df7006e1 100644 --- a/crates/net/network/src/message.rs +++ b/crates/net/network/src/message.rs @@ -3,7 +3,7 @@ //! An `RLPx` stream is multiplexed via the prepended message-id of a framed message. //! Capabilities are exchanged via the `RLPx` `Hello` message as pairs of `(id, version)`, -use crate::types::Receipts69; +use crate::types::{Receipts69, Receipts70}; use alloy_consensus::{BlockHeader, ReceiptWithBloom}; use alloy_primitives::{Bytes, B256}; use futures::FutureExt; @@ -116,6 +116,11 @@ pub enum PeerResponse { /// The receiver channel for the response to a receipts request. response: oneshot::Receiver>>, }, + /// Represents a response to a request for receipts using eth/70. + Receipts70 { + /// The receiver channel for the response to a receipts request. + response: oneshot::Receiver>>, + }, } // === impl PeerResponse === @@ -151,6 +156,10 @@ impl PeerResponse { Self::Receipts69 { response } => { poll_request!(response, Receipts69, cx) } + Self::Receipts70 { response } => match ready!(response.poll_unpin(cx)) { + Ok(res) => PeerResponseResult::Receipts70(res), + Err(err) => PeerResponseResult::Receipts70(Err(err.into())), + }, }; Poll::Ready(res) } @@ -171,6 +180,8 @@ pub enum PeerResponseResult { Receipts(RequestResult>>>), /// Represents a result containing receipts or an error for eth/69. Receipts69(RequestResult>>), + /// Represents a result containing receipts or an error for eth/70. + Receipts70(RequestResult>), } // === impl PeerResponseResult === @@ -208,6 +219,13 @@ impl PeerResponseResult { Self::Receipts69(resp) => { to_message!(resp, Receipts69, id) } + Self::Receipts70(resp) => match resp { + Ok(res) => { + let request = RequestPair { request_id: id, message: res }; + Ok(EthMessage::Receipts70(request)) + } + Err(err) => Err(err), + }, } } @@ -220,6 +238,7 @@ impl PeerResponseResult { Self::NodeData(res) => res.as_ref().err(), Self::Receipts(res) => res.as_ref().err(), Self::Receipts69(res) => res.as_ref().err(), + Self::Receipts70(res) => res.as_ref().err(), } } diff --git a/crates/net/network/src/metrics.rs b/crates/net/network/src/metrics.rs index 241a778869..ba9efdff54 100644 --- a/crates/net/network/src/metrics.rs +++ b/crates/net/network/src/metrics.rs @@ -131,6 +131,8 @@ pub struct TransactionsManagerMetrics { /// capacity. Note, this is not a limit to the number of inflight requests, but a health /// measure. pub(crate) capacity_pending_pool_imports: Counter, + /// The time it took to prepare transactions for import. This is mostly sender recovery. + pub(crate) pool_import_prepare_duration: Histogram, /* ================ POLL DURATION ================ */ diff --git a/crates/net/network/src/network.rs b/crates/net/network/src/network.rs index cfc3d56cb2..93e14ec04e 100644 --- a/crates/net/network/src/network.rs +++ b/crates/net/network/src/network.rs @@ -232,13 +232,33 @@ impl PeersInfo for NetworkHandle { fn local_node_record(&self) -> NodeRecord { if let Some(discv4) = &self.inner.discv4 { + // Note: the discv4 services uses the same `nat` so we can directly return the node + // record here discv4.node_record() - } else if let Some(record) = self.inner.discv5.as_ref().and_then(|d| d.node_record()) { - record + } else if let Some(discv5) = self.inner.discv5.as_ref() { + // for disv5 we must check if we have an external ip configured + if let Some(external) = + self.inner.nat.clone().and_then(|nat| nat.as_external_ip(discv5.local_port())) + { + NodeRecord::new((external, discv5.local_port()).into(), *self.peer_id()) + } else { + // use the node record that discv5 tracks or use localhost + self.inner.discv5.as_ref().and_then(|d| d.node_record()).unwrap_or_else(|| { + NodeRecord::new( + (std::net::IpAddr::V4(std::net::Ipv4Addr::LOCALHOST), discv5.local_port()) + .into(), + *self.peer_id(), + ) + }) + } + // also use the tcp port + .with_tcp_port(self.inner.listener_address.lock().port()) } else { - let external_ip = self.inner.nat.and_then(|nat| nat.as_external_ip()); - let mut socket_addr = *self.inner.listener_address.lock(); + + let external_ip = + self.inner.nat.clone().and_then(|nat| nat.as_external_ip(socket_addr.port())); + if let Some(ip) = external_ip { // if able to resolve external ip, use it instead and also set the local address socket_addr.set_ip(ip) @@ -332,6 +352,9 @@ impl Peers for NetworkHandle { /// Sends a message to the [`NetworkManager`](crate::NetworkManager) to connect to the given /// peer. + /// + /// This will add a new entry for the given peer if it isn't tracked yet. + /// If it is tracked then the peer is updated with the given information. fn connect_peer_kind( &self, peer_id: PeerId, diff --git a/crates/net/network/src/peers.rs b/crates/net/network/src/peers.rs index d9ece3dd06..84066606b8 100644 --- a/crates/net/network/src/peers.rs +++ b/crates/net/network/src/peers.rs @@ -90,6 +90,8 @@ pub struct PeersManager { net_connection_state: NetworkConnectionState, /// How long to temporarily ban ip on an incoming connection attempt. incoming_ip_throttle_duration: Duration, + /// IP address filter for restricting network connections to specific IP ranges. + ip_filter: reth_net_banlist::IpFilter, } impl PeersManager { @@ -108,6 +110,7 @@ impl PeersManager { basic_nodes, max_backoff_count, incoming_ip_throttle_duration, + ip_filter, } = config; let (manager_tx, handle_rx) = mpsc::unbounded_channel(); let now = Instant::now(); @@ -138,6 +141,8 @@ impl PeersManager { }); } + trace!(target: "net::peers", trusted_peers=?trusted_peer_ids, "Initialized peers manager"); + Self { peers, trusted_peer_ids, @@ -161,6 +166,7 @@ impl PeersManager { max_backoff_count, net_connection_state: NetworkConnectionState::default(), incoming_ip_throttle_duration, + ip_filter, } } @@ -243,6 +249,12 @@ impl PeersManager { &mut self, addr: IpAddr, ) -> Result<(), InboundConnectionError> { + // Check if the IP is in the allowed ranges (netrestrict) + if !self.ip_filter.is_allowed(&addr) { + trace!(target: "net", ?addr, "Rejecting connection from IP not in allowed ranges"); + return Err(InboundConnectionError::IpBanned) + } + if self.ban_list.is_banned_ip(&addr) { return Err(InboundConnectionError::IpBanned) } @@ -457,6 +469,8 @@ impl PeersManager { /// reputation changes that can be attributed to network conditions. If the peer is a /// trusted peer, it will also be less strict with the reputation slashing. pub(crate) fn apply_reputation_change(&mut self, peer_id: &PeerId, rep: ReputationChangeKind) { + trace!(target: "net::peers", ?peer_id, reputation=?rep, "applying reputation change"); + let outcome = if let Some(peer) = self.peers.get_mut(peer_id) { // First check if we should reset the reputation if rep.is_reset() { @@ -526,6 +540,7 @@ impl PeersManager { pub(crate) fn on_active_session_gracefully_closed(&mut self, peer_id: PeerId) { match self.peers.entry(peer_id) { Entry::Occupied(mut entry) => { + trace!(target: "net::peers", ?peer_id, direction=?entry.get().state, "active session gracefully closed"); self.connection_info.decr_state(entry.get().state); if entry.get().remove_after_disconnect && !entry.get().is_trusted() { @@ -533,12 +548,23 @@ impl PeersManager { entry.remove(); self.queued_actions.push_back(PeerAction::PeerRemoved(peer_id)); } else { + let peer = entry.get_mut(); // reset the peer's state // we reset the backoff counter since we're able to establish a successful // session to that peer - entry.get_mut().severe_backoff_counter = 0; - entry.get_mut().state = PeerConnectionState::Idle; - return + peer.severe_backoff_counter = 0; + peer.state = PeerConnectionState::Idle; + + // but we're backing off slightly to avoid dialing the peer again right away, to + // give the remote time to also properly register the closed session and clean + // up and to avoid any issues with ip throttling on the remote in case this + // session was terminated right away. + peer.backed_off = true; + self.backed_off_peers.insert( + peer_id, + std::time::Instant::now() + self.incoming_ip_throttle_duration, + ); + trace!(target: "net::peers", ?peer_id, kind=?peer.kind, duration=?self.incoming_ip_throttle_duration, "backing off on gracefully closed session"); } } Entry::Vacant(_) => return, @@ -550,6 +576,7 @@ impl PeersManager { /// Called when a _pending_ outbound connection is successful. pub(crate) fn on_active_outgoing_established(&mut self, peer_id: PeerId) { if let Some(peer) = self.peers.get_mut(&peer_id) { + trace!(target: "net::peers", ?peer_id, "established active outgoing connection"); self.connection_info.decr_state(peer.state); self.connection_info.inc_out(); peer.state = PeerConnectionState::Out; @@ -643,12 +670,14 @@ impl PeersManager { // have us registered as a trusted peer. let backoff = self.backoff_durations.low; backoff_until = Some(std::time::Instant::now() + backoff); + trace!(target: "net::peers", ?peer_id, ?backoff, "backing off trusted peer"); } else { // Increment peer.backoff_counter if kind.is_severe() { peer.severe_backoff_counter = peer.severe_backoff_counter.saturating_add(1); } + trace!(target: "net::peers", ?peer_id, ?kind, severe_backoff_counter=peer.severe_backoff_counter, "backing off basic peer"); let backoff_time = self.backoff_durations.backoff_until(kind, peer.severe_backoff_counter); @@ -679,6 +708,7 @@ impl PeersManager { // remove peer if it has been marked for removal if remove_peer { + trace!(target: "net", ?peer_id, "removed peer after exceeding backoff counter"); let (peer_id, _) = self.peers.remove_entry(peer_id).expect("peer must exist"); self.queued_actions.push_back(PeerAction::PeerRemoved(peer_id)); } else if let Some(backoff_until) = backoff_until { @@ -715,7 +745,7 @@ impl PeersManager { pub(crate) fn set_discovered_fork_id(&mut self, peer_id: PeerId, fork_id: ForkId) { if let Some(peer) = self.peers.get_mut(&peer_id) { trace!(target: "net::peers", ?peer_id, ?fork_id, "set discovered fork id"); - peer.fork_id = Some(fork_id); + peer.fork_id = Some(Box::new(fork_id)); } } @@ -723,12 +753,15 @@ impl PeersManager { /// /// If the peer already exists, then the address, kind and `fork_id` will be updated. pub(crate) fn add_peer(&mut self, peer_id: PeerId, addr: PeerAddr, fork_id: Option) { - self.add_peer_kind(peer_id, PeerKind::Basic, addr, fork_id) + self.add_peer_kind(peer_id, None, addr, fork_id) } /// Marks the given peer as trusted. pub(crate) fn add_trusted_peer_id(&mut self, peer_id: PeerId) { self.trusted_peer_ids.insert(peer_id); + if let Some(peer) = self.peers.get_mut(&peer_id) { + peer.kind = PeerKind::Trusted; + } } /// Called for a newly discovered trusted peer. @@ -736,30 +769,42 @@ impl PeersManager { /// If the peer already exists, then the address and kind will be updated. #[cfg_attr(not(test), expect(dead_code))] pub(crate) fn add_trusted_peer(&mut self, peer_id: PeerId, addr: PeerAddr) { - self.add_peer_kind(peer_id, PeerKind::Trusted, addr, None) + self.add_peer_kind(peer_id, Some(PeerKind::Trusted), addr, None) } /// Called for a newly discovered peer. /// /// If the peer already exists, then the address, kind and `fork_id` will be updated. + /// If the peer exists and a [`PeerKind`] is provided then the peer's kind is updated pub(crate) fn add_peer_kind( &mut self, peer_id: PeerId, - kind: PeerKind, + kind: Option, addr: PeerAddr, fork_id: Option, ) { - if self.ban_list.is_banned(&peer_id, &addr.tcp().ip()) { + let ip_addr = addr.tcp().ip(); + + // Check if the IP is in the allowed ranges (netrestrict) + if !self.ip_filter.is_allowed(&ip_addr) { + trace!(target: "net", ?peer_id, ?ip_addr, "Skipping peer from IP not in allowed ranges"); + return + } + + if self.ban_list.is_banned(&peer_id, &ip_addr) { return } match self.peers.entry(peer_id) { Entry::Occupied(mut entry) => { let peer = entry.get_mut(); - peer.kind = kind; - peer.fork_id = fork_id; + peer.fork_id = fork_id.map(Box::new); peer.addr = addr; + if let Some(kind) = kind { + peer.kind = kind; + } + if peer.state.is_incoming() { // now that we have an actual discovered address, for that peer and not just the // ip of the incoming connection, we don't need to remove the peer after @@ -769,14 +814,15 @@ impl PeersManager { } Entry::Vacant(entry) => { trace!(target: "net::peers", ?peer_id, addr=?addr.tcp(), "discovered new node"); - let mut peer = Peer::with_kind(addr, kind); - peer.fork_id = fork_id; + let mut peer = Peer::with_kind(addr, kind.unwrap_or(PeerKind::Basic)); + peer.fork_id = fork_id.map(Box::new); entry.insert(peer); self.queued_actions.push_back(PeerAction::PeerAdded(peer_id)); } } - if kind.is_trusted() { + if kind.filter(|kind| kind.is_trusted()).is_some() { + // also track the peer in the peer id set self.trusted_peer_ids.insert(peer_id); } } @@ -830,7 +876,15 @@ impl PeersManager { addr: PeerAddr, fork_id: Option, ) { - if self.ban_list.is_banned(&peer_id, &addr.tcp().ip()) { + let ip_addr = addr.tcp().ip(); + + // Check if the IP is in the allowed ranges (netrestrict) + if !self.ip_filter.is_allowed(&ip_addr) { + trace!(target: "net", ?peer_id, ?ip_addr, "Skipping outbound connection to IP not in allowed ranges"); + return + } + + if self.ban_list.is_banned(&peer_id, &ip_addr) { return } @@ -838,7 +892,7 @@ impl PeersManager { Entry::Occupied(mut entry) => { let peer = entry.get_mut(); peer.kind = kind; - peer.fork_id = fork_id; + peer.fork_id = fork_id.map(Box::new); peer.addr = addr; if peer.state == PeerConnectionState::Idle { @@ -853,7 +907,7 @@ impl PeersManager { trace!(target: "net::peers", ?peer_id, addr=?addr.tcp(), "connects new node"); let mut peer = Peer::with_kind(addr, kind); peer.state = PeerConnectionState::PendingOut; - peer.fork_id = fork_id; + peer.fork_id = fork_id.map(Box::new); entry.insert(peer); self.connection_info.inc_pending_out(); self.queued_actions @@ -1903,6 +1957,22 @@ mod tests { } } + #[tokio::test] + async fn retain_trusted_status() { + let _socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 1, 99)), 8008); + let trusted = PeerId::random(); + let mut peers = + PeersManager::new(PeersConfig::test().with_trusted_nodes(vec![TrustedPeer { + host: Host::Ipv4(Ipv4Addr::new(127, 0, 1, 2)), + tcp_port: 8008, + udp_port: 8008, + id: trusted, + }])); + let socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 1, 2)), 8008); + peers.add_peer(trusted, PeerAddr::from_tcp(socket_addr), None); + assert!(peers.peers.get(&trusted).unwrap().is_trusted()); + } + #[tokio::test] async fn accept_incoming_trusted_unknown_peer_address() { let socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 1, 99)), 8008); @@ -2599,6 +2669,146 @@ mod tests { assert!(!peer.remove_after_disconnect); } + #[tokio::test] + async fn test_peer_reconnect_after_graceful_close_respects_throttle() { + let throttle_duration = Duration::from_millis(100); + let config = + PeersConfig { incoming_ip_throttle_duration: throttle_duration, ..PeersConfig::test() }; + let mut peers = PeersManager::new(config); + + let peer_id = PeerId::random(); + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 1, 2)), 8009); + + // Add as regular peer + peers.add_peer(peer_id, PeerAddr::from_tcp(addr), None); + + match event!(peers) { + PeerAction::PeerAdded(id) => assert_eq!(id, peer_id), + _ => unreachable!(), + } + + match event!(peers) { + PeerAction::Connect { .. } => {} + _ => unreachable!(), + } + + // Simulate outbound connection established + peers.on_active_outgoing_established(peer_id); + assert_eq!(peers.peers.get(&peer_id).unwrap().state, PeerConnectionState::Out); + + // Gracefully close the session + peers.on_active_session_gracefully_closed(peer_id); + + let peer = peers.peers.get(&peer_id).unwrap(); + assert_eq!(peer.state, PeerConnectionState::Idle); + assert!(peer.backed_off); + + // Verify the peer is in the backed_off_peers set + assert!(peers.backed_off_peers.contains_key(&peer_id)); + + // Immediately try to poll - should not trigger any actions yet + poll_fn(|cx| { + assert!(peers.poll(cx).is_pending()); + Poll::Ready(()) + }) + .await; + + // Peer should still be backed off + assert!(peers.backed_off_peers.contains_key(&peer_id)); + assert!(peers.peers.get(&peer_id).unwrap().backed_off); + + // Sleep for the throttle duration + tokio::time::sleep(throttle_duration).await; + + // After throttle duration, event! will poll until we get a Connect action + match event!(peers) { + PeerAction::Connect { peer_id: id, .. } => assert_eq!(id, peer_id), + _ => unreachable!(), + } + + // After connection is initiated, peer should no longer be backed off + assert!(!peers.backed_off_peers.contains_key(&peer_id)); + assert!(!peers.peers.get(&peer_id).unwrap().backed_off); + } + + #[tokio::test] + async fn test_backed_off_peer_can_accept_incoming_connection() { + let throttle_duration = Duration::from_millis(100); + let config = + PeersConfig { incoming_ip_throttle_duration: throttle_duration, ..PeersConfig::test() }; + let mut peers = PeersManager::new(config); + + let peer_id = PeerId::random(); + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 1, 2)), 8009); + + // Add as regular peer + peers.add_peer(peer_id, PeerAddr::from_tcp(addr), None); + + match event!(peers) { + PeerAction::PeerAdded(id) => assert_eq!(id, peer_id), + _ => unreachable!(), + } + + match event!(peers) { + PeerAction::Connect { .. } => {} + _ => unreachable!(), + } + + // Simulate outbound connection established + peers.on_active_outgoing_established(peer_id); + assert_eq!(peers.peers.get(&peer_id).unwrap().state, PeerConnectionState::Out); + + // Gracefully close the session - this will back off the peer + peers.on_active_session_gracefully_closed(peer_id); + + let peer = peers.peers.get(&peer_id).unwrap(); + assert_eq!(peer.state, PeerConnectionState::Idle); + assert!(peer.backed_off); + assert!(peers.backed_off_peers.contains_key(&peer_id)); + + // Now simulate an incoming connection from the backed-off peer + // First, handle the incoming pending session + assert!(peers.on_incoming_pending_session(addr.ip()).is_ok()); + assert_eq!(peers.connection_info.num_pending_in, 1); + + // Establish the incoming session + peers.on_incoming_session_established(peer_id, addr); + + // Peer should have been added to incoming connections + assert_eq!(peers.peers.get(&peer_id).unwrap().state, PeerConnectionState::In); + assert_eq!(peers.connection_info.num_inbound, 1); + + // Peer should still be backed off for outbound connections + assert!(peers.backed_off_peers.contains_key(&peer_id)); + assert!(peers.peers.get(&peer_id).unwrap().backed_off); + + // Verify we don't try to reconnect outbound while peer is backed off + poll_fn(|cx| { + assert!(peers.poll(cx).is_pending()); + Poll::Ready(()) + }) + .await; + + // No outbound connection should be attempted while backed off + assert_eq!(peers.peers.get(&peer_id).unwrap().state, PeerConnectionState::In); + + // After throttle duration, the backoff should be cleared + tokio::time::sleep(throttle_duration).await; + + poll_fn(|cx| { + let _ = peers.poll(cx); + Poll::Ready(()) + }) + .await; + + // Backoff should be cleared now + assert!(!peers.backed_off_peers.contains_key(&peer_id)); + assert!(!peers.peers.get(&peer_id).unwrap().backed_off); + + // Peer should still be in incoming state + assert_eq!(peers.peers.get(&peer_id).unwrap().state, PeerConnectionState::In); + } + #[tokio::test] async fn test_incoming_outgoing_already_connected() { let peer_id = PeerId::random(); @@ -2899,4 +3109,106 @@ mod tests { let updated_peer = manager.peers.get(&peer_id).unwrap(); assert_eq!(updated_peer.addr.tcp().ip(), updated_ip); } + + #[tokio::test] + async fn test_ip_filter_blocks_inbound_connection() { + use reth_net_banlist::IpFilter; + use std::net::IpAddr; + + // Create a filter that only allows 192.168.0.0/16 + let ip_filter = IpFilter::from_cidr_string("192.168.0.0/16").unwrap(); + let config = PeersConfig::test().with_ip_filter(ip_filter); + let mut peers = PeersManager::new(config); + + // Try to connect from an allowed IP + let allowed_ip: IpAddr = "192.168.1.100".parse().unwrap(); + assert!(peers.on_incoming_pending_session(allowed_ip).is_ok()); + + // Try to connect from a disallowed IP + let disallowed_ip: IpAddr = "10.0.0.1".parse().unwrap(); + assert!(peers.on_incoming_pending_session(disallowed_ip).is_err()); + } + + #[tokio::test] + async fn test_ip_filter_blocks_outbound_connection() { + use reth_net_banlist::IpFilter; + use std::net::SocketAddr; + + // Create a filter that only allows 192.168.0.0/16 + let ip_filter = IpFilter::from_cidr_string("192.168.0.0/16").unwrap(); + let config = PeersConfig::test().with_ip_filter(ip_filter); + let mut peers = PeersManager::new(config); + + let peer_id = PeerId::new([1; 64]); + + // Try to add a peer with an allowed IP + let allowed_addr: SocketAddr = "192.168.1.100:30303".parse().unwrap(); + peers.add_peer(peer_id, PeerAddr::from_tcp(allowed_addr), None); + assert!(peers.peers.contains_key(&peer_id)); + + // Try to add a peer with a disallowed IP + let peer_id2 = PeerId::new([2; 64]); + let disallowed_addr: SocketAddr = "10.0.0.1:30303".parse().unwrap(); + peers.add_peer(peer_id2, PeerAddr::from_tcp(disallowed_addr), None); + assert!(!peers.peers.contains_key(&peer_id2)); + } + + #[tokio::test] + async fn test_ip_filter_ipv6() { + use reth_net_banlist::IpFilter; + use std::net::IpAddr; + + // Create a filter that only allows IPv6 range 2001:db8::/32 + let ip_filter = IpFilter::from_cidr_string("2001:db8::/32").unwrap(); + let config = PeersConfig::test().with_ip_filter(ip_filter); + let mut peers = PeersManager::new(config); + + // Try to connect from an allowed IPv6 address + let allowed_ip: IpAddr = "2001:db8::1".parse().unwrap(); + assert!(peers.on_incoming_pending_session(allowed_ip).is_ok()); + + // Try to connect from a disallowed IPv6 address + let disallowed_ip: IpAddr = "2001:db9::1".parse().unwrap(); + assert!(peers.on_incoming_pending_session(disallowed_ip).is_err()); + } + + #[tokio::test] + async fn test_ip_filter_multiple_ranges() { + use reth_net_banlist::IpFilter; + use std::net::IpAddr; + + // Create a filter that allows multiple ranges + let ip_filter = IpFilter::from_cidr_string("192.168.0.0/16,10.0.0.0/8").unwrap(); + let config = PeersConfig::test().with_ip_filter(ip_filter); + let mut peers = PeersManager::new(config); + + // Try IPs from both allowed ranges + let ip1: IpAddr = "192.168.1.1".parse().unwrap(); + let ip2: IpAddr = "10.5.10.20".parse().unwrap(); + assert!(peers.on_incoming_pending_session(ip1).is_ok()); + assert!(peers.on_incoming_pending_session(ip2).is_ok()); + + // Try IP from disallowed range + let disallowed_ip: IpAddr = "172.16.0.1".parse().unwrap(); + assert!(peers.on_incoming_pending_session(disallowed_ip).is_err()); + } + + #[tokio::test] + async fn test_ip_filter_no_restriction() { + use reth_net_banlist::IpFilter; + use std::net::IpAddr; + + // Create a filter with no restrictions (allow all) + let ip_filter = IpFilter::allow_all(); + let config = PeersConfig::test().with_ip_filter(ip_filter); + let mut peers = PeersManager::new(config); + + // All IPs should be allowed + let ip1: IpAddr = "192.168.1.1".parse().unwrap(); + let ip2: IpAddr = "10.0.0.1".parse().unwrap(); + let ip3: IpAddr = "8.8.8.8".parse().unwrap(); + assert!(peers.on_incoming_pending_session(ip1).is_ok()); + assert!(peers.on_incoming_pending_session(ip2).is_ok()); + assert!(peers.on_incoming_pending_session(ip3).is_ok()); + } } diff --git a/crates/net/network/src/required_block_filter.rs b/crates/net/network/src/required_block_filter.rs index 9c831e2f5d..0245c0e728 100644 --- a/crates/net/network/src/required_block_filter.rs +++ b/crates/net/network/src/required_block_filter.rs @@ -3,7 +3,7 @@ //! This module provides functionality to filter out peers that don't have //! specific required blocks (primarily used for shadowfork testing). -use alloy_primitives::B256; +use alloy_eips::BlockNumHash; use futures::StreamExt; use reth_eth_wire_types::{GetBlockHeaders, HeadersDirection}; use reth_network_api::{ @@ -16,11 +16,13 @@ use tracing::{debug, info, trace}; /// /// This task listens for new peer sessions and checks if they have the required /// block hashes. Peers that don't have these blocks are banned. +/// +/// This type is mainly used to connect peers on shadow forks (e.g. mainnet shadowfork) pub struct RequiredBlockFilter { /// Network handle for listening to events and managing peer reputation. network: N, - /// List of block hashes that peers must have to be considered valid. - block_hashes: Vec, + /// List of block number-hash pairs that peers must have to be considered valid. + block_num_hashes: Vec, } impl RequiredBlockFilter @@ -28,8 +30,8 @@ where N: NetworkEventListenerProvider + Peers + Clone + Send + Sync + 'static, { /// Creates a new required block peer filter. - pub const fn new(network: N, block_hashes: Vec) -> Self { - Self { network, block_hashes } + pub const fn new(network: N, block_num_hashes: Vec) -> Self { + Self { network, block_num_hashes } } /// Spawns the required block peer filter task. @@ -37,12 +39,12 @@ where /// This task will run indefinitely, monitoring new peer sessions and filtering /// out peers that don't have the required blocks. pub fn spawn(self) { - if self.block_hashes.is_empty() { + if self.block_num_hashes.is_empty() { debug!(target: "net::filter", "No required block hashes configured, skipping peer filtering"); return; } - info!(target: "net::filter", "Starting required block peer filter with {} block hashes", self.block_hashes.len()); + info!(target: "net::filter", "Starting required block peer filter with {} block hashes", self.block_num_hashes.len()); tokio::spawn(async move { self.run().await; @@ -60,10 +62,18 @@ where // Spawn a task to check this peer's blocks let network = self.network.clone(); - let block_hashes = self.block_hashes.clone(); + let block_num_hashes = self.block_num_hashes.clone(); + let peer_block_number = info.status.latest_block.unwrap_or(0); tokio::spawn(async move { - Self::check_peer_blocks(network, peer_id, messages, block_hashes).await; + Self::check_peer_blocks( + network, + peer_id, + messages, + block_num_hashes, + peer_block_number, + ) + .await; }); } } @@ -74,9 +84,19 @@ where network: N, peer_id: reth_network_api::PeerId, messages: reth_network_api::PeerRequestSender>, - block_hashes: Vec, + block_num_hashes: Vec, + latest_peer_block: u64, ) { - for block_hash in block_hashes { + for block_num_hash in block_num_hashes { + // Skip if peer's block number is lower than required, peer might also be syncing and + // still on the same chain. + if block_num_hash.number > 0 && latest_peer_block < block_num_hash.number { + debug!(target: "net::filter", "Skipping check for block {} - peer {} only at block {}", + block_num_hash.number, peer_id, latest_peer_block); + continue; + } + + let block_hash = block_num_hash.hash; trace!(target: "net::filter", "Checking if peer {} has block {}", peer_id, block_hash); // Create a request for block headers @@ -139,28 +159,35 @@ where #[cfg(test)] mod tests { use super::*; + use alloy_eips::BlockNumHash; use alloy_primitives::{b256, B256}; use reth_network_api::noop::NoopNetwork; #[test] fn test_required_block_filter_creation() { let network = NoopNetwork::default(); - let block_hashes = vec![ - b256!("0x1111111111111111111111111111111111111111111111111111111111111111"), - b256!("0x2222222222222222222222222222222222222222222222222222222222222222"), + let block_num_hashes = vec![ + BlockNumHash::new( + 0, + b256!("0x1111111111111111111111111111111111111111111111111111111111111111"), + ), + BlockNumHash::new( + 23115201, + b256!("0x2222222222222222222222222222222222222222222222222222222222222222"), + ), ]; - let filter = RequiredBlockFilter::new(network, block_hashes.clone()); - assert_eq!(filter.block_hashes.len(), 2); - assert_eq!(filter.block_hashes, block_hashes); + let filter = RequiredBlockFilter::new(network, block_num_hashes.clone()); + assert_eq!(filter.block_num_hashes.len(), 2); + assert_eq!(filter.block_num_hashes, block_num_hashes); } #[test] fn test_required_block_filter_empty_hashes_does_not_spawn() { let network = NoopNetwork::default(); - let block_hashes = vec![]; + let block_num_hashes = vec![]; - let filter = RequiredBlockFilter::new(network, block_hashes); + let filter = RequiredBlockFilter::new(network, block_num_hashes); // This should not panic and should exit early when spawn is called filter.spawn(); } @@ -170,10 +197,10 @@ mod tests { // This test would require a more complex setup with mock network components // For now, we ensure the basic structure is correct let network = NoopNetwork::default(); - let block_hashes = vec![B256::default()]; + let block_num_hashes = vec![BlockNumHash::new(0, B256::default())]; - let filter = RequiredBlockFilter::new(network, block_hashes); + let filter = RequiredBlockFilter::new(network, block_num_hashes); // Verify the filter can be created and basic properties are set - assert_eq!(filter.block_hashes.len(), 1); + assert_eq!(filter.block_num_hashes.len(), 1); } } diff --git a/crates/net/network/src/session/active.rs b/crates/net/network/src/session/active.rs index 0044c1f92e..0d405b903e 100644 --- a/crates/net/network/src/session/active.rs +++ b/crates/net/network/src/session/active.rs @@ -25,10 +25,10 @@ use futures::{stream::Fuse, SinkExt, StreamExt}; use metrics::Gauge; use reth_eth_wire::{ errors::{EthHandshakeError, EthStreamError}, - message::{EthBroadcastMessage, MessageError, RequestPair}, + message::{EthBroadcastMessage, MessageError}, Capabilities, DisconnectP2P, DisconnectReason, EthMessage, NetworkPrimitives, NewBlockPayload, }; -use reth_eth_wire_types::RawCapabilityMessage; +use reth_eth_wire_types::{message::RequestPair, RawCapabilityMessage}; use reth_metrics::common::mpsc::MeteredPollSender; use reth_network_api::PeerRequest; use reth_network_p2p::error::RequestError; @@ -237,16 +237,6 @@ impl ActiveSession { self.try_emit_broadcast(PeerMessage::PooledTransactions(msg.into())).into() } EthMessage::NewPooledTransactionHashes68(msg) => { - if msg.hashes.len() != msg.types.len() || msg.hashes.len() != msg.sizes.len() { - return OnIncomingMessageOutcome::BadMessage { - error: EthStreamError::TransactionHashesInvalidLenOfFields { - hashes_len: msg.hashes.len(), - types_len: msg.types.len(), - sizes_len: msg.sizes.len(), - }, - message: EthMessage::NewPooledTransactionHashes68(msg), - } - } self.try_emit_broadcast(PeerMessage::PooledTransactions(msg.into())).into() } EthMessage::GetBlockHeaders(req) => { @@ -280,12 +270,18 @@ impl ActiveSession { on_request!(req, Receipts, GetReceipts) } } + EthMessage::GetReceipts70(req) => { + on_request!(req, Receipts70, GetReceipts70) + } EthMessage::Receipts(resp) => { on_response!(resp, GetReceipts) } EthMessage::Receipts69(resp) => { on_response!(resp, GetReceipts69) } + EthMessage::Receipts70(resp) => { + on_response!(resp, GetReceipts70) + } EthMessage::BlockRangeUpdate(msg) => { // Validate that earliest <= latest according to the spec if msg.earliest > msg.latest { @@ -321,9 +317,9 @@ impl ActiveSession { /// Handle an internal peer request that will be sent to the remote. fn on_internal_peer_request(&mut self, request: PeerRequest, deadline: Instant) { let request_id = self.next_id(); - trace!(?request, peer_id=?self.remote_peer_id, ?request_id, "sending request to peer"); - let msg = request.create_request_message(request_id); + let msg = request.create_request_message(request_id).map_versioned(self.conn.version()); + self.queued_outgoing.push_back(msg.into()); let req = InflightRequest { request: RequestState::Waiting(request), diff --git a/crates/net/network/src/session/types.rs b/crates/net/network/src/session/types.rs index b73bfe3b99..02297c2146 100644 --- a/crates/net/network/src/session/types.rs +++ b/crates/net/network/src/session/types.rs @@ -11,7 +11,7 @@ use std::{ }, }; -/// Information about the range of blocks available from a peer. +/// Information about the range of full blocks available from a peer. /// /// This represents the announced `eth69` /// [`BlockRangeUpdate`] of a peer. @@ -45,12 +45,12 @@ impl BlockRangeInfo { RangeInclusive::new(earliest, latest) } - /// Returns the earliest block number available from the peer. + /// Returns the earliest full block number available from the peer. pub fn earliest(&self) -> u64 { self.inner.earliest.load(Ordering::Relaxed) } - /// Returns the latest block number available from the peer. + /// Returns the latest full block number available from the peer. pub fn latest(&self) -> u64 { self.inner.latest.load(Ordering::Relaxed) } @@ -60,6 +60,11 @@ impl BlockRangeInfo { *self.inner.latest_hash.read() } + /// Returns true if the peer has the full history available. + pub fn has_full_history(&self) -> bool { + self.earliest() == 0 + } + /// Updates the range information. pub fn update(&self, earliest: u64, latest: u64, latest_hash: B256) { self.inner.earliest.store(earliest, Ordering::Relaxed); diff --git a/crates/net/network/src/state.rs b/crates/net/network/src/state.rs index 57d1a73198..84a3e86489 100644 --- a/crates/net/network/src/state.rs +++ b/crates/net/network/src/state.rs @@ -161,6 +161,7 @@ impl NetworkState { peer, status.blockhash, block_number, + Arc::clone(&capabilities), timeout, range_info, ); @@ -305,7 +306,7 @@ impl NetworkState { /// Adds a peer and its address with the given kind to the peerset. pub(crate) fn add_peer_kind(&mut self, peer_id: PeerId, kind: PeerKind, addr: PeerAddr) { - self.peers_manager.add_peer_kind(peer_id, kind, addr, None) + self.peers_manager.add_peer_kind(peer_id, Some(kind), addr, None) } /// Connects a peer and its address with the given kind diff --git a/crates/net/network/src/test_utils/testnet.rs b/crates/net/network/src/test_utils/testnet.rs index d246689954..aae1f7708e 100644 --- a/crates/net/network/src/test_utils/testnet.rs +++ b/crates/net/network/src/test_utils/testnet.rs @@ -399,13 +399,7 @@ pub struct Peer { #[pin] request_handler: Option>, #[pin] - transactions_manager: Option< - TransactionsManager< - Pool, - EthNetworkPrimitives, - NetworkPolicies, - >, - >, + transactions_manager: Option>, pool: Option, client: C, secret_key: SecretKey, diff --git a/crates/net/network/src/transactions/config.rs b/crates/net/network/src/transactions/config.rs index c34bbecd77..f6b76908df 100644 --- a/crates/net/network/src/transactions/config.rs +++ b/crates/net/network/src/transactions/config.rs @@ -1,4 +1,5 @@ -use std::{fmt::Debug, marker::PhantomData, str::FromStr}; +use core::fmt; +use std::{fmt::Debug, str::FromStr}; use super::{ PeerMetadata, DEFAULT_MAX_COUNT_TRANSACTIONS_SEEN_BY_PEER, @@ -9,11 +10,11 @@ use crate::transactions::constants::tx_fetcher::{ DEFAULT_MAX_CAPACITY_CACHE_PENDING_FETCH, DEFAULT_MAX_COUNT_CONCURRENT_REQUESTS, DEFAULT_MAX_COUNT_CONCURRENT_REQUESTS_PER_PEER, }; +use alloy_eips::eip2718::IsTyped2718; use alloy_primitives::B256; use derive_more::{Constructor, Display}; - use reth_eth_wire::NetworkPrimitives; -use reth_ethereum_primitives::TxType; +use reth_network_types::peers::kind::PeerKind; /// Configuration for managing transactions within the network. #[derive(Debug, Clone)] @@ -26,6 +27,9 @@ pub struct TransactionsManagerConfig { /// How new pending transactions are propagated. #[cfg_attr(feature = "serde", serde(default))] pub propagation_mode: TransactionPropagationMode, + /// Which peers we accept incoming transactions or announcements from. + #[cfg_attr(feature = "serde", serde(default))] + pub ingress_policy: TransactionIngressPolicy, } impl Default for TransactionsManagerConfig { @@ -34,6 +38,7 @@ impl Default for TransactionsManagerConfig { transaction_fetcher_config: TransactionFetcherConfig::default(), max_transactions_seen_by_peer_history: DEFAULT_MAX_COUNT_TRANSACTIONS_SEEN_BY_PEER, propagation_mode: TransactionPropagationMode::default(), + ingress_policy: TransactionIngressPolicy::default(), } } } @@ -122,17 +127,19 @@ impl Default for TransactionFetcherConfig { } /// A policy defining which peers pending transactions are gossiped to. -pub trait TransactionPropagationPolicy: Send + Sync + Unpin + 'static { +pub trait TransactionPropagationPolicy: + Send + Sync + Unpin + fmt::Debug + 'static +{ /// Filter a given peer based on the policy. /// /// This determines whether transactions can be propagated to this peer. - fn can_propagate(&self, peer: &mut PeerMetadata) -> bool; + fn can_propagate(&self, peer: &mut PeerMetadata) -> bool; /// A callback on the policy when a new peer session is established. - fn on_session_established(&mut self, peer: &mut PeerMetadata); + fn on_session_established(&mut self, peer: &mut PeerMetadata); /// A callback on the policy when a peer session is closed. - fn on_session_closed(&mut self, peer: &mut PeerMetadata); + fn on_session_closed(&mut self, peer: &mut PeerMetadata); } /// Determines which peers pending transactions are propagated to. @@ -150,8 +157,8 @@ pub enum TransactionPropagationKind { None, } -impl TransactionPropagationPolicy for TransactionPropagationKind { - fn can_propagate(&self, peer: &mut PeerMetadata) -> bool { +impl TransactionPropagationPolicy for TransactionPropagationKind { + fn can_propagate(&self, peer: &mut PeerMetadata) -> bool { match self { Self::All => true, Self::Trusted => peer.peer_kind.is_trusted(), @@ -159,9 +166,9 @@ impl TransactionPropagationPolicy for TransactionPropagationKind { } } - fn on_session_established(&mut self, _peer: &mut PeerMetadata) {} + fn on_session_established(&mut self, _peer: &mut PeerMetadata) {} - fn on_session_closed(&mut self, _peer: &mut PeerMetadata) {} + fn on_session_closed(&mut self, _peer: &mut PeerMetadata) {} } impl FromStr for TransactionPropagationKind { @@ -177,6 +184,48 @@ impl FromStr for TransactionPropagationKind { } } +/// Determines which peers we will accept incoming transactions or announcements from. +#[derive(Debug, Clone, Copy, Default, PartialEq, Eq, Display)] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +pub enum TransactionIngressPolicy { + /// Accept transactions from any peer. + #[default] + All, + /// Accept transactions only from trusted peers. + Trusted, + /// Drop all incoming transactions. + None, +} + +impl TransactionIngressPolicy { + /// Returns true if the ingress policy allows the provided peer kind. + pub const fn allows(&self, peer_kind: PeerKind) -> bool { + match self { + Self::All => true, + Self::Trusted => peer_kind.is_trusted(), + Self::None => false, + } + } + + /// Returns true if the ingress policy accepts transactions from any peer. + pub const fn allows_all(&self) -> bool { + matches!(self, Self::All) + } +} + +impl FromStr for TransactionIngressPolicy { + type Err = String; + + fn from_str(s: &str) -> Result { + match s { + "All" | "all" => Ok(Self::All), + "Trusted" | "trusted" => Ok(Self::Trusted), + "None" | "none" => Ok(Self::None), + _ => Err(format!("Invalid transaction ingress policy: {s}")), + } + } +} + /// Defines the outcome of evaluating a transaction against an `AnnouncementFilteringPolicy`. /// /// Dictates how the `TransactionManager` should proceed on an announced transaction. @@ -195,87 +244,65 @@ pub enum AnnouncementAcceptance { /// A policy that defines how to handle incoming transaction announcements, /// particularly concerning transaction types and other announcement metadata. -pub trait AnnouncementFilteringPolicy: Send + Sync + Unpin + 'static { +pub trait AnnouncementFilteringPolicy: + Send + Sync + Unpin + fmt::Debug + 'static +{ /// Decides how to handle a transaction announcement based on its type, hash, and size. fn decide_on_announcement(&self, ty: u8, hash: &B256, size: usize) -> AnnouncementAcceptance; } /// A generic `AnnouncementFilteringPolicy` that enforces strict validation /// of transaction type based on a generic type `T`. -#[derive(Debug, Clone)] -pub struct TypedStrictFilter + Debug + Send + Sync + 'static>(PhantomData); +#[derive(Debug, Clone, Default)] +#[non_exhaustive] +pub struct TypedStrictFilter; -impl + Debug + Send + Sync + 'static> Default for TypedStrictFilter { - fn default() -> Self { - Self(PhantomData) - } -} - -impl AnnouncementFilteringPolicy for TypedStrictFilter -where - T: TryFrom + Debug + Send + Sync + Unpin + 'static, - >::Error: Debug, -{ +impl AnnouncementFilteringPolicy for TypedStrictFilter { fn decide_on_announcement(&self, ty: u8, hash: &B256, size: usize) -> AnnouncementAcceptance { - match T::try_from(ty) { - Ok(_valid_type) => AnnouncementAcceptance::Accept, - Err(e) => { - tracing::trace!(target: "net::tx::policy::strict_typed", - type_param = %std::any::type_name::(), - %ty, - %size, - %hash, - error = ?e, - "Invalid or unrecognized transaction type byte. Rejecting entry and recommending peer penalization." - ); - AnnouncementAcceptance::Reject { penalize_peer: true } - } + if N::PooledTransaction::is_type(ty) { + AnnouncementAcceptance::Accept + } else { + tracing::trace!(target: "net::tx::policy::strict_typed", + %ty, + %size, + %hash, + "Invalid or unrecognized transaction type byte. Rejecting entry and recommending peer penalization." + ); + AnnouncementAcceptance::Reject { penalize_peer: true } } } } /// Type alias for a `TypedStrictFilter`. This is the default strict announcement filter. -pub type StrictEthAnnouncementFilter = TypedStrictFilter; +pub type StrictEthAnnouncementFilter = TypedStrictFilter; /// An [`AnnouncementFilteringPolicy`] that permissively handles unknown type bytes /// based on a given type `T` using `T::try_from(u8)`. /// /// If `T::try_from(ty)` succeeds, the announcement is accepted. Otherwise, it's ignored. -#[derive(Debug, Clone)] -pub struct TypedRelaxedFilter + Debug + Send + Sync + 'static>(PhantomData); +#[derive(Debug, Clone, Default)] +#[non_exhaustive] +pub struct TypedRelaxedFilter; -impl + Debug + Send + Sync + 'static> Default for TypedRelaxedFilter { - fn default() -> Self { - Self(PhantomData) - } -} - -impl AnnouncementFilteringPolicy for TypedRelaxedFilter -where - T: TryFrom + Debug + Send + Sync + Unpin + 'static, - >::Error: Debug, -{ +impl AnnouncementFilteringPolicy for TypedRelaxedFilter { fn decide_on_announcement(&self, ty: u8, hash: &B256, size: usize) -> AnnouncementAcceptance { - match T::try_from(ty) { - Ok(_valid_type) => AnnouncementAcceptance::Accept, - Err(e) => { - tracing::trace!(target: "net::tx::policy::relaxed_typed", - type_param = %std::any::type_name::(), - %ty, - %size, - %hash, - error = ?e, - "Unknown transaction type byte. Ignoring entry." - ); - AnnouncementAcceptance::Ignore - } + if N::PooledTransaction::is_type(ty) { + AnnouncementAcceptance::Accept + } else { + tracing::trace!(target: "net::tx::policy::relaxed_typed", + %ty, + %size, + %hash, + "Unknown transaction type byte. Ignoring entry." + ); + AnnouncementAcceptance::Ignore } } } /// Type alias for `TypedRelaxedFilter`. This filter accepts known Ethereum transaction types and /// ignores unknown ones without penalizing the peer. -pub type RelaxedEthAnnouncementFilter = TypedRelaxedFilter; +pub type RelaxedEthAnnouncementFilter = TypedRelaxedFilter; #[cfg(test)] mod tests { diff --git a/crates/net/network/src/transactions/fetcher.rs b/crates/net/network/src/transactions/fetcher.rs index a112e8cac8..8237716a8b 100644 --- a/crates/net/network/src/transactions/fetcher.rs +++ b/crates/net/network/src/transactions/fetcher.rs @@ -423,7 +423,7 @@ impl TransactionFetcher { &mut self, peers: &HashMap>, has_capacity_wrt_pending_pool_imports: impl Fn(usize) -> bool, - ) { + ) -> bool { let mut hashes_to_request = RequestTxHashes::with_capacity( DEFAULT_MARGINAL_COUNT_HASHES_GET_POOLED_TRANSACTIONS_REQUEST, ); @@ -440,7 +440,7 @@ impl TransactionFetcher { budget_find_idle_fallback_peer, ) else { // no peers are idle or budget is depleted - return + return false }; peer_id @@ -449,7 +449,7 @@ impl TransactionFetcher { ); // peer should always exist since `is_session_active` already checked - let Some(peer) = peers.get(&peer_id) else { return }; + let Some(peer) = peers.get(&peer_id) else { return false }; let conn_eth_version = peer.version; // fill the request with more hashes pending fetch that have been announced by the peer. @@ -493,7 +493,10 @@ impl TransactionFetcher { ); self.buffer_hashes(failed_to_request_hashes, Some(peer_id)); + return false } + + true } /// Filters out hashes that have been seen before. For hashes that have already been seen, the diff --git a/crates/net/network/src/transactions/mod.rs b/crates/net/network/src/transactions/mod.rs index f4ef42523d..8200cfe8eb 100644 --- a/crates/net/network/src/transactions/mod.rs +++ b/crates/net/network/src/transactions/mod.rs @@ -1,6 +1,7 @@ //! Transactions management for the p2p network. use alloy_consensus::transaction::TxHashRef; +use rayon::iter::{IntoParallelIterator, ParallelIterator}; /// Aggregation on configurable parameters for [`TransactionsManager`]. pub mod config; @@ -8,19 +9,19 @@ pub mod config; pub mod constants; /// Component responsible for fetching transactions from [`NewPooledTransactionHashes`]. pub mod fetcher; -/// Defines the [`TransactionPolicies`] trait for aggregating transaction-related policies. +/// Defines the traits for transaction-related policies. pub mod policy; pub use self::constants::{ tx_fetcher::DEFAULT_SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESP_ON_PACK_GET_POOLED_TRANSACTIONS_REQ, SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESPONSE, }; -use config::{AnnouncementAcceptance, StrictEthAnnouncementFilter, TransactionPropagationKind}; +use config::AnnouncementAcceptance; pub use config::{ - AnnouncementFilteringPolicy, TransactionFetcherConfig, TransactionPropagationMode, - TransactionPropagationPolicy, TransactionsManagerConfig, + AnnouncementFilteringPolicy, TransactionFetcherConfig, TransactionIngressPolicy, + TransactionPropagationMode, TransactionPropagationPolicy, TransactionsManagerConfig, }; -use policy::{NetworkPolicies, TransactionPolicies}; +use policy::NetworkPolicies; pub(crate) use fetcher::{FetchEvent, TransactionFetcher}; @@ -35,6 +36,7 @@ use crate::{ metrics::{ AnnouncedTxTypesMetrics, TransactionsManagerMetrics, NETWORK_POOL_TRANSACTIONS_SCOPE, }, + transactions::config::{StrictEthAnnouncementFilter, TransactionPropagationKind}, NetworkHandle, TxTypesCounter, }; use alloy_primitives::{TxHash, B256}; @@ -98,8 +100,6 @@ pub struct TransactionsHandle { manager_tx: mpsc::UnboundedSender>, } -/// Implementation of the `TransactionsHandle` API for use in testnet via type -/// [`PeerHandle`](crate::test_utils::PeerHandle). impl TransactionsHandle { fn send(&self, cmd: TransactionsCommand) { let _ = self.manager_tx.send(cmd); @@ -243,7 +243,7 @@ impl TransactionsHandle { /// propagate new transactions over the network. /// /// It can be configured with different policies for transaction propagation and announcement -/// filtering. See [`NetworkPolicies`] and [`TransactionPolicies`] for more details. +/// filtering. See [`NetworkPolicies`] for more details. /// /// ## Network Transaction Processing /// @@ -280,14 +280,7 @@ impl TransactionsHandle { /// Rate limiting via reputation, bad transaction isolation, peer scoring. #[derive(Debug)] #[must_use = "Manager does nothing unless polled."] -pub struct TransactionsManager< - Pool, - N: NetworkPrimitives = EthNetworkPrimitives, - PBundle: TransactionPolicies = NetworkPolicies< - TransactionPropagationKind, - StrictEthAnnouncementFilter, - >, -> { +pub struct TransactionsManager { /// Access to the transaction pool. pool: Pool, /// Network access. @@ -344,20 +337,14 @@ pub struct TransactionsManager< /// How the `TransactionsManager` is configured. config: TransactionsManagerConfig, /// Network Policies - policies: PBundle, + policies: NetworkPolicies, /// `TransactionsManager` metrics metrics: TransactionsManagerMetrics, /// `AnnouncedTxTypes` metrics announced_tx_types_metrics: AnnouncedTxTypesMetrics, } -impl - TransactionsManager< - Pool, - N, - NetworkPolicies, - > -{ +impl TransactionsManager { /// Sets up a new instance. /// /// Note: This expects an existing [`NetworkManager`](crate::NetworkManager) instance. @@ -372,14 +359,15 @@ impl pool, from_network, transactions_manager_config, - NetworkPolicies::default(), + NetworkPolicies::new( + TransactionPropagationKind::default(), + StrictEthAnnouncementFilter::default(), + ), ) } } -impl - TransactionsManager -{ +impl TransactionsManager { /// Sets up a new instance with given the settings. /// /// Note: This expects an existing [`NetworkManager`](crate::NetworkManager) instance. @@ -388,7 +376,7 @@ impl pool: Pool, from_network: mpsc::UnboundedReceiver>, transactions_manager_config: TransactionsManagerConfig, - policies: PBundle, + policies: NetworkPolicies, ) -> Self { let network_events = network.event_listener(); @@ -508,7 +496,9 @@ impl } /// Runs an operation to fetch hashes that are cached in [`TransactionFetcher`]. - fn on_fetch_hashes_pending_fetch(&mut self) { + /// + /// Returns `true` if a request was sent. + fn on_fetch_hashes_pending_fetch(&mut self) -> bool { // try drain transaction hashes pending fetch let info = &self.pending_pool_imports_info; let max_pending_pool_imports = info.max_pending_pool_imports; @@ -516,7 +506,7 @@ impl |divisor| info.has_capacity(max_pending_pool_imports / divisor); self.transaction_fetcher - .on_fetch_pending_hashes(&self.peers, has_capacity_wrt_pending_pool_imports); + .on_fetch_pending_hashes(&self.peers, has_capacity_wrt_pending_pool_imports) } fn on_request_error(&self, peer_id: PeerId, req_err: RequestError) { @@ -559,9 +549,7 @@ impl } } -impl - TransactionsManager -{ +impl TransactionsManager { /// Processes a batch import results. fn on_batch_import_result(&mut self, batch_results: Vec>) { for res in batch_results { @@ -828,16 +816,13 @@ impl } } -impl TransactionsManager +impl TransactionsManager where Pool: TransactionPool + Unpin + 'static, - N: NetworkPrimitives< BroadcastedTransaction: SignedTransaction, PooledTransaction: SignedTransaction, > + Unpin, - - PBundle: TransactionPolicies, Pool::Transaction: PoolTransaction, { @@ -875,9 +860,8 @@ where peer_id: PeerId, propagation_mode: PropagationMode, ) -> Option { - trace!(target: "net::tx", ?peer_id, "Propagating transactions to peer"); - let peer = self.peers.get_mut(&peer_id)?; + trace!(target: "net::tx", ?peer_id, "Propagating transactions to peer"); let mut propagated = PropagatedTransactions::default(); // filter all transactions unknown to the peer @@ -1282,10 +1266,25 @@ where } } + /// Returns true if the ingress policy allows processing messages from the given peer. + fn accepts_incoming_from(&self, peer_id: &PeerId) -> bool { + if self.config.ingress_policy.allows_all() { + return true; + } + let Some(peer) = self.peers.get(peer_id) else { + return false; + }; + self.config.ingress_policy.allows(peer.peer_kind()) + } + /// Handles dedicated transaction events related to the `eth` protocol. fn on_network_tx_event(&mut self, event: NetworkTransactionEvent) { match event { NetworkTransactionEvent::IncomingTransactions { peer_id, msg } => { + if !self.accepts_incoming_from(&peer_id) { + trace!(target: "net::tx", peer_id=format!("{peer_id:#}"), policy=?self.config.ingress_policy, "Ignoring full transactions from peer blocked by ingress policy"); + return; + } // ensure we didn't receive any blob transactions as these are disallowed to be // broadcasted in full @@ -1306,6 +1305,10 @@ where } } NetworkTransactionEvent::IncomingPooledTransactionHashes { peer_id, msg } => { + if !self.accepts_incoming_from(&peer_id) { + trace!(target: "net::tx", peer_id=format!("{peer_id:#}"), policy=?self.config.ingress_policy, "Ignoring transaction hashes from peer blocked by ingress policy"); + return; + } self.on_new_pooled_transaction_hashes(peer_id, msg) } NetworkTransactionEvent::GetPooledTransactions { peer_id, request, response } => { @@ -1335,6 +1338,8 @@ where let Some(peer) = self.peers.get_mut(&peer_id) else { return }; let mut transactions = transactions.0; + let start = Instant::now(); + // mark the transactions as received self.transaction_fetcher .remove_hashes_from_transaction_fetcher(transactions.iter().map(|tx| tx.tx_hash())); @@ -1363,13 +1368,31 @@ where // tracks the quality of the given transactions let mut has_bad_transactions = false; - // 2. filter out transactions that are invalid or already pending import pre-size to avoid - // reallocations - let mut new_txs = Vec::with_capacity(transactions.len()); - for tx in transactions { - // recover transaction - let tx = match tx.try_into_recovered() { - Ok(tx) => tx, + // Remove known and invalid transactions + transactions.retain(|tx| { + if let Entry::Occupied(mut entry) = self.transactions_by_peers.entry(*tx.tx_hash()) { + entry.get_mut().insert(peer_id); + return false + } + if self.bad_imports.contains(tx.tx_hash()) { + trace!(target: "net::tx", + peer_id=format!("{peer_id:#}"), + hash=%tx.tx_hash(), + client_version=%peer.client_version, + "received a known bad transaction from peer" + ); + has_bad_transactions = true; + return false; + } + true + }); + + let txs_len = transactions.len(); + + let new_txs = transactions + .into_par_iter() + .filter_map(|tx| match tx.try_into_recovered() { + Ok(tx) => Some(Pool::Transaction::from_pooled(tx)), Err(badtx) => { trace!(target: "net::tx", peer_id=format!("{peer_id:#}"), @@ -1377,37 +1400,17 @@ where client_version=%peer.client_version, "failed ecrecovery for transaction" ); - has_bad_transactions = true; - continue + None } - }; + }) + .collect::>(); - match self.transactions_by_peers.entry(*tx.tx_hash()) { - Entry::Occupied(mut entry) => { - // transaction was already inserted - entry.get_mut().insert(peer_id); - } - Entry::Vacant(entry) => { - if self.bad_imports.contains(tx.tx_hash()) { - trace!(target: "net::tx", - peer_id=format!("{peer_id:#}"), - hash=%tx.tx_hash(), - client_version=%peer.client_version, - "received a known bad transaction from peer" - ); - has_bad_transactions = true; - } else { - // this is a new transaction that should be imported into the pool + has_bad_transactions |= new_txs.len() != txs_len; - let pool_transaction = Pool::Transaction::from_pooled(tx); - new_txs.push(pool_transaction); - - entry.insert(HashSet::from([peer_id])); - } - } - } + // Record the transactions as seen by the peer + for tx in &new_txs { + self.transactions_by_peers.insert(*tx.hash(), HashSet::from([peer_id])); } - new_txs.shrink_to_fit(); // 3. import new transactions as a batch to minimize lock contention on the underlying // pool @@ -1456,6 +1459,8 @@ where if num_already_seen_by_peer > 0 { self.report_already_seen(peer_id); } + + self.metrics.pool_import_prepare_duration.record(start.elapsed()); } /// Processes a [`FetchEvent`]. @@ -1491,8 +1496,7 @@ impl< BroadcastedTransaction: SignedTransaction, PooledTransaction: SignedTransaction, > + Unpin, - PBundle: TransactionPolicies + Unpin, - > Future for TransactionsManager + > Future for TransactionsManager where Pool::Transaction: PoolTransaction, @@ -1534,7 +1538,19 @@ where SOFT_LIMIT_COUNT_HASHES_IN_NEW_POOLED_TRANSACTIONS_BROADCAST_MESSAGE, ) { Poll::Ready(count) => { - count == SOFT_LIMIT_COUNT_HASHES_IN_NEW_POOLED_TRANSACTIONS_BROADCAST_MESSAGE + if count == SOFT_LIMIT_COUNT_HASHES_IN_NEW_POOLED_TRANSACTIONS_BROADCAST_MESSAGE { + // we filled the entire buffer capacity and need to try again on the next poll + // immediately + true + } else { + // try once more, because mostlikely the channel is now empty and the waker is + // registered if this is pending, if we filled additional hashes, we poll again + // on the next iteration + let limit = + SOFT_LIMIT_COUNT_HASHES_IN_NEW_POOLED_TRANSACTIONS_BROADCAST_MESSAGE - + new_txs.len(); + this.pending_transactions.poll_recv_many(cx, &mut new_txs, limit).is_ready() + } } Poll::Pending => false, }; @@ -1542,25 +1558,6 @@ where this.on_new_pending_transactions(new_txs); } - // Advance inflight fetch requests (flush transaction fetcher and queue for - // import to pool). - // - // The smallest decodable transaction is an empty legacy transaction, 10 bytes - // (2 MiB / 10 bytes > 200k transactions). - // - // Since transactions aren't validated until they are inserted into the pool, - // this can potentially queue >200k transactions for insertion to pool. More - // if the message size is bigger than the soft limit on a `PooledTransactions` - // response which is 2 MiB. - let maybe_more_tx_fetch_events = metered_poll_nested_stream_with_budget!( - poll_durations.acc_fetch_events, - "net::tx", - "Transaction fetch events stream", - DEFAULT_BUDGET_TRY_DRAIN_STREAM, - this.transaction_fetcher.poll_next_unpin(cx), - |event| this.on_fetch_event(event), - ); - // Advance incoming transaction events (stream new txns/announcements from // network manager and queue for import to pool/fetch txns). // @@ -1584,6 +1581,25 @@ where |event| this.on_network_tx_event(event), ); + // Advance inflight fetch requests (flush transaction fetcher and queue for + // import to pool). + // + // The smallest decodable transaction is an empty legacy transaction, 10 bytes + // (2 MiB / 10 bytes > 200k transactions). + // + // Since transactions aren't validated until they are inserted into the pool, + // this can potentially queue >200k transactions for insertion to pool. More + // if the message size is bigger than the soft limit on a `PooledTransactions` + // response which is 2 MiB. + let mut maybe_more_tx_fetch_events = metered_poll_nested_stream_with_budget!( + poll_durations.acc_fetch_events, + "net::tx", + "Transaction fetch events stream", + DEFAULT_BUDGET_TRY_DRAIN_STREAM, + this.transaction_fetcher.poll_next_unpin(cx), + |event| this.on_fetch_event(event), + ); + // Advance pool imports (flush txns to pool). // // Note, this is done in batches. A batch is filled from one `Transactions` @@ -1613,8 +1629,10 @@ where // Sends at most one request. duration_metered_exec!( { - if this.has_capacity_for_fetching_pending_hashes() { - this.on_fetch_hashes_pending_fetch(); + if this.has_capacity_for_fetching_pending_hashes() && + this.on_fetch_hashes_pending_fetch() + { + maybe_more_tx_fetch_events = true; } }, poll_durations.acc_pending_fetch @@ -1905,7 +1923,9 @@ impl PooledTransactionsHashesBuilder { fn new(version: EthVersion) -> Self { match version { EthVersion::Eth66 | EthVersion::Eth67 => Self::Eth66(Default::default()), - EthVersion::Eth68 | EthVersion::Eth69 => Self::Eth68(Default::default()), + EthVersion::Eth68 | EthVersion::Eth69 | EthVersion::Eth70 => { + Self::Eth68(Default::default()) + } } } @@ -2896,11 +2916,7 @@ mod tests { let network_handle = network_manager.handle().clone(); let network_service_handle = tokio::spawn(network_manager); - let mut tx_manager = TransactionsManager::< - TestPool, - EthNetworkPrimitives, - NetworkPolicies, - >::with_policy( + let mut tx_manager = TransactionsManager::::with_policy( network_handle.clone(), pool.clone(), from_network_rx, diff --git a/crates/net/network/src/transactions/policy.rs b/crates/net/network/src/transactions/policy.rs index c25b9d9b41..0fbadae037 100644 --- a/crates/net/network/src/transactions/policy.rs +++ b/crates/net/network/src/transactions/policy.rs @@ -1,78 +1,49 @@ use crate::transactions::config::{AnnouncementFilteringPolicy, TransactionPropagationPolicy}; +use reth_eth_wire::NetworkPrimitives; use std::fmt::Debug; -/// A bundle of policies that control the behavior of network components like -/// the [`TransactionsManager`](super::TransactionsManager). -/// -/// This trait allows for different collections of policies to be used interchangeably. -pub trait TransactionPolicies: Send + Sync + Debug + 'static { - /// The type of the policy used for transaction propagation. - type Propagation: TransactionPropagationPolicy; - /// The type of the policy used for filtering transaction announcements. - type Announcement: AnnouncementFilteringPolicy; - - /// Returns a reference to the transaction propagation policy. - fn propagation_policy(&self) -> &Self::Propagation; - - /// Returns a mutable reference to the transaction propagation policy. - fn propagation_policy_mut(&mut self) -> &mut Self::Propagation; - - /// Returns a reference to the announcement filtering policy. - fn announcement_filter(&self) -> &Self::Announcement; -} - /// A container that bundles specific implementations of transaction-related policies, /// -/// This struct implements the [`TransactionPolicies`] trait, providing a complete set of -/// policies required by components like the [`TransactionsManager`](super::TransactionsManager). -/// It holds a specific [`TransactionPropagationPolicy`] and an -/// [`AnnouncementFilteringPolicy`]. -#[derive(Debug, Clone, Default)] -pub struct NetworkPolicies { - propagation: P, - announcement: A, +/// This struct provides a complete set of policies required by components like the +/// [`TransactionsManager`](super::TransactionsManager). It holds a specific +/// [`TransactionPropagationPolicy`] and an [`AnnouncementFilteringPolicy`]. +#[derive(Debug)] +pub struct NetworkPolicies { + propagation: Box>, + announcement: Box>, } -impl NetworkPolicies { +impl NetworkPolicies { /// Creates a new bundle of network policies. - pub const fn new(propagation: P, announcement: A) -> Self { - Self { propagation, announcement } + pub fn new( + propagation: impl TransactionPropagationPolicy, + announcement: impl AnnouncementFilteringPolicy, + ) -> Self { + Self { propagation: Box::new(propagation), announcement: Box::new(announcement) } } /// Returns a new `NetworkPolicies` bundle with the `TransactionPropagationPolicy` replaced. - pub fn with_propagation(self, new_propagation: NewP) -> NetworkPolicies - where - NewP: TransactionPropagationPolicy, - { - NetworkPolicies::new(new_propagation, self.announcement) + pub fn with_propagation(self, new_propagation: impl TransactionPropagationPolicy) -> Self { + Self { propagation: Box::new(new_propagation), announcement: self.announcement } } /// Returns a new `NetworkPolicies` bundle with the `AnnouncementFilteringPolicy` replaced. - pub fn with_announcement(self, new_announcement: NewA) -> NetworkPolicies - where - NewA: AnnouncementFilteringPolicy, - { - NetworkPolicies::new(self.propagation, new_announcement) - } -} - -impl TransactionPolicies for NetworkPolicies -where - P: TransactionPropagationPolicy + Debug, - A: AnnouncementFilteringPolicy + Debug, -{ - type Propagation = P; - type Announcement = A; - - fn propagation_policy(&self) -> &Self::Propagation { - &self.propagation - } - - fn propagation_policy_mut(&mut self) -> &mut Self::Propagation { - &mut self.propagation - } - - fn announcement_filter(&self) -> &Self::Announcement { - &self.announcement + pub fn with_announcement(self, new_announcement: impl AnnouncementFilteringPolicy) -> Self { + Self { propagation: self.propagation, announcement: Box::new(new_announcement) } + } + + /// Returns a reference to the transaction propagation policy. + pub fn propagation_policy(&self) -> &dyn TransactionPropagationPolicy { + &*self.propagation + } + + /// Returns a mutable reference to the transaction propagation policy. + pub fn propagation_policy_mut(&mut self) -> &mut dyn TransactionPropagationPolicy { + &mut *self.propagation + } + + /// Returns a reference to the announcement filtering policy. + pub fn announcement_filter(&self) -> &dyn AnnouncementFilteringPolicy { + &*self.announcement } } diff --git a/crates/net/network/tests/it/connect.rs b/crates/net/network/tests/it/connect.rs index 1a3371a907..d11c6b9541 100644 --- a/crates/net/network/tests/it/connect.rs +++ b/crates/net/network/tests/it/connect.rs @@ -736,3 +736,51 @@ async fn test_connect_peer_in_different_network_should_fail() { let removed_peer_id = event_stream.peer_removed().await.unwrap(); assert_eq!(removed_peer_id, *peer_handle.peer_id()); } + +#[tokio::test(flavor = "multi_thread")] +async fn test_reconnect_trusted() { + reth_tracing::init_test_tracing(); + + let net = Testnet::create(2).await; + + let mut handles = net.handles(); + let handle0 = handles.next().unwrap(); + let handle1 = handles.next().unwrap(); + + drop(handles); + let _handle = net.spawn(); + + let mut listener0 = NetworkEventStream::new(handle0.event_listener()); + + // Connect the two peers + handle0.add_peer(*handle1.peer_id(), handle1.local_addr()); + handle1.add_peer(*handle0.peer_id(), handle0.local_addr()); + let peer = listener0.next_session_established().await.unwrap(); + assert_eq!(peer, *handle1.peer_id()); + assert_eq!(handle0.num_connected_peers(), 1); + + // Add handle1 as a trusted peer + handle0.add_trusted_peer(*handle1.peer_id(), handle1.local_addr()); + + // Trigger disconnect from handle0 + handle0.disconnect_peer(*handle1.peer_id()); + + // Wait for the session to close + let (peer, reason) = listener0.next_session_closed().await.unwrap(); + assert_eq!(peer, *handle1.peer_id()); + assert_eq!(handle0.num_connected_peers(), 0); + println!("Disconnect reason: {:?}", reason); + + // Await that handle1 (trusted peer) reconnects automatically + let reconnect_result = + tokio::time::timeout(Duration::from_secs(60), listener0.next_session_established()).await; + + match reconnect_result { + Ok(Some(peer)) => { + assert_eq!(peer, *handle1.peer_id()); + assert_eq!(handle0.num_connected_peers(), 1); + } + Ok(None) => panic!("Event stream ended without reconnection"), + Err(_) => panic!("Trusted peer did not reconnect in time"), + } +} diff --git a/crates/net/network/tests/it/txgossip.rs b/crates/net/network/tests/it/txgossip.rs index ed1c2f925d..d0f192cff5 100644 --- a/crates/net/network/tests/it/txgossip.rs +++ b/crates/net/network/tests/it/txgossip.rs @@ -5,7 +5,9 @@ use futures::StreamExt; use reth_ethereum_primitives::TransactionSigned; use reth_network::{ test_utils::{NetworkEventStream, Testnet}, - transactions::config::TransactionPropagationKind, + transactions::config::{ + TransactionIngressPolicy, TransactionPropagationKind, TransactionsManagerConfig, + }, NetworkEvent, NetworkEventListenerProvider, Peers, }; use reth_network_api::{events::PeerEvent, PeerKind, PeersInfo}; @@ -123,6 +125,73 @@ async fn test_tx_propagation_policy_trusted_only() { assert!(buff.contains(&outcome_1.hash)); } +#[tokio::test(flavor = "multi_thread")] +async fn test_tx_ingress_policy_trusted_only() { + reth_tracing::init_test_tracing(); + + let provider = MockEthProvider::default(); + + let tx_manager_config = TransactionsManagerConfig { + ingress_policy: TransactionIngressPolicy::Trusted, + ..Default::default() + }; + + let net = Testnet::create_with(2, provider.clone()).await; + let net = net.with_eth_pool_config(tx_manager_config); + + let handle = net.spawn(); + + // connect all the peers + handle.connect_peers().await; + + let peer_0_handle = &handle.peers()[0]; + let peer_1_handle = &handle.peers()[1]; + + let mut peer0_tx_listener = peer_0_handle.pool().unwrap().pending_transactions_listener(); + + let mut tx_gen = TransactionGenerator::new(rand::rng()); + let tx = tx_gen.gen_eip1559_pooled(); + + // ensure the sender has balance + let sender = tx.sender(); + provider.add_account(sender, ExtendedAccount::new(0, U256::from(100_000_000))); + + // insert the tx in peer1's pool + let outcome_0 = peer_1_handle.pool().unwrap().add_external_transaction(tx).await.unwrap(); + + // ensure tx is not accepted by peer0 + peer0_tx_listener.try_recv().expect_err("Empty"); + + let mut event_stream_0 = NetworkEventStream::new(peer_0_handle.network().event_listener()); + let mut event_stream_1 = NetworkEventStream::new(peer_1_handle.network().event_listener()); + + // disconnect peer1 from peer0 + peer_0_handle.network().remove_peer(*peer_1_handle.peer_id(), PeerKind::Static); + join!(event_stream_0.next_session_closed(), event_stream_1.next_session_closed()); + + // re register peer1 as trusted + peer_0_handle.network().add_trusted_peer(*peer_1_handle.peer_id(), peer_1_handle.local_addr()); + join!(event_stream_0.next_session_established(), event_stream_1.next_session_established()); + + let mut tx_gen = TransactionGenerator::new(rand::rng()); + let tx = tx_gen.gen_eip1559_pooled(); + + // ensure the sender has balance + let sender = tx.sender(); + provider.add_account(sender, ExtendedAccount::new(0, U256::from(100_000_000))); + + // insert pending tx in peer1's pool + let outcome_1 = peer_1_handle.pool().unwrap().add_external_transaction(tx).await.unwrap(); + + // ensure peer0 now receives both pending txs from peer1 (the blocked one and the new one) + let mut buff = Vec::with_capacity(2); + buff.push(peer0_tx_listener.recv().await.unwrap()); + buff.push(peer0_tx_listener.recv().await.unwrap()); + + assert!(buff.contains(&outcome_0.hash)); + assert!(buff.contains(&outcome_1.hash)); +} + #[tokio::test(flavor = "multi_thread")] async fn test_4844_tx_gossip_penalization() { reth_tracing::init_test_tracing(); diff --git a/crates/net/p2p/src/bodies/downloader.rs b/crates/net/p2p/src/bodies/downloader.rs index f11473daa9..db0a80c5eb 100644 --- a/crates/net/p2p/src/bodies/downloader.rs +++ b/crates/net/p2p/src/bodies/downloader.rs @@ -13,9 +13,7 @@ pub type BodyDownloaderResult = DownloadResult>>; /// A downloader represents a distinct strategy for submitting requests to download block bodies, /// while a [`BodiesClient`][crate::bodies::client::BodiesClient] represents a client capable of /// fulfilling these requests. -pub trait BodyDownloader: - Send + Sync + Stream> + Unpin -{ +pub trait BodyDownloader: Send + Stream> + Unpin { /// The Block type this downloader supports type Block: Block + 'static; diff --git a/crates/net/p2p/src/bodies/response.rs b/crates/net/p2p/src/bodies/response.rs index 772fe6cbbd..2c53a651aa 100644 --- a/crates/net/p2p/src/bodies/response.rs +++ b/crates/net/p2p/src/bodies/response.rs @@ -37,6 +37,14 @@ where Self::Empty(_) => None, } } + + /// Return the reference to the response body + pub const fn body(&self) -> Option<&B::Body> { + match self { + Self::Full(block) => Some(block.body()), + Self::Empty(_) => None, + } + } } impl InMemorySize for BlockResponse { diff --git a/crates/node/builder/Cargo.toml b/crates/node/builder/Cargo.toml index c1224d35e5..f61e940262 100644 --- a/crates/node/builder/Cargo.toml +++ b/crates/node/builder/Cargo.toml @@ -15,7 +15,6 @@ workspace = true ## reth reth-chain-state.workspace = true reth-chainspec.workspace = true -reth-cli-util.workspace = true reth-config.workspace = true reth-consensus-debug-client.workspace = true reth-consensus.workspace = true @@ -77,6 +76,7 @@ secp256k1 = { workspace = true, features = ["global-context", "std", "recovery"] ## misc aquamarine.workspace = true eyre.workspace = true +parking_lot.workspace = true jsonrpsee.workspace = true fdlimit.workspace = true rayon.workspace = true @@ -95,7 +95,11 @@ reth-evm-ethereum = { workspace = true, features = ["test-utils"] } [features] default = [] -js-tracer = ["reth-rpc/js-tracer"] +js-tracer = [ + "reth-rpc/js-tracer", + "reth-node-ethereum/js-tracer", + "reth-rpc-eth-types/js-tracer", +] test-utils = [ "reth-db/test-utils", "reth-chain-state/test-utils", diff --git a/crates/node/builder/src/builder/mod.rs b/crates/node/builder/src/builder/mod.rs index 8f01f251b5..130e5eb675 100644 --- a/crates/node/builder/src/builder/mod.rs +++ b/crates/node/builder/src/builder/mod.rs @@ -12,7 +12,6 @@ use crate::{ use alloy_eips::eip4844::env_settings::EnvKzgSettings; use futures::Future; use reth_chainspec::{EthChainSpec, EthereumHardforks, Hardforks}; -use reth_cli_util::get_secret_key; use reth_db_api::{database::Database, database_metrics::DatabaseMetrics}; use reth_exex::ExExContext; use reth_network::{ @@ -36,7 +35,7 @@ use reth_provider::{ use reth_tasks::TaskExecutor; use reth_transaction_pool::{PoolConfig, PoolTransaction, TransactionPool}; use secp256k1::SecretKey; -use std::{fmt::Debug, sync::Arc}; +use std::sync::Arc; use tracing::{info, trace, warn}; pub mod add_ons; @@ -331,6 +330,11 @@ impl WithLaunchContext> { pub const fn config(&self) -> &NodeConfig { self.builder.config() } + + /// Returns a mutable reference to the node builder's config. + pub const fn config_mut(&mut self) -> &mut NodeConfig { + self.builder.config_mut() + } } impl WithLaunchContext> @@ -452,6 +456,11 @@ where &self.builder.config } + /// Returns a mutable reference to the node builder's config. + pub const fn config_mut(&mut self) -> &mut NodeConfig<::ChainSpec> { + &mut self.builder.config + } + /// Returns a reference to node's database. pub const fn db(&self) -> &T::DB { &self.builder.adapter.database @@ -524,6 +533,27 @@ where } /// Modifies the addons with the given closure. + /// + /// This method provides access to methods on the addons type that don't have + /// direct builder methods. It's useful for advanced configuration scenarios + /// where you need to call addon-specific methods. + /// + /// # Examples + /// + /// ```rust,ignore + /// use tower::layer::util::Identity; + /// + /// let builder = NodeBuilder::new(config) + /// .with_types::() + /// .with_components(EthereumNode::components()) + /// .with_add_ons(EthereumAddOns::default()) + /// .map_add_ons(|addons| addons.with_rpc_middleware(Identity::default())); + /// ``` + /// + /// # See also + /// + /// - [`NodeAddOns`] trait for available addon types + /// - [`crate::NodeBuilderWithComponents::extend_rpc_modules`] for RPC module configuration pub fn map_add_ons(self, f: F) -> Self where F: FnOnce(AO) -> AO, @@ -570,10 +600,10 @@ where /// .extend_rpc_modules(|ctx| { /// // Access node components, so they can used by the CustomApi /// let pool = ctx.pool().clone(); - /// + /// /// // Add custom RPC namespace /// ctx.modules.merge_configured(CustomApi { pool }.into_rpc())?; - /// + /// /// Ok(()) /// }) /// .build()?; @@ -729,6 +759,11 @@ impl BuilderContext { &self.config_container.config } + /// Returns a mutable reference to the config of the node. + pub const fn config_mut(&mut self) -> &mut NodeConfig<::ChainSpec> { + &mut self.config_container.config + } + /// Returns the loaded reh.toml config. pub const fn reth_config(&self) -> &reth_config::Config { &self.config_container.toml_config @@ -817,15 +852,15 @@ impl BuilderContext { > + Unpin + 'static, Node::Provider: BlockReaderFor, - Policy: TransactionPropagationPolicy + Debug, + Policy: TransactionPropagationPolicy, { let (handle, network, txpool, eth) = builder .transactions_with_policy(pool, tx_config, propagation_policy) .request_handler(self.provider().clone()) .split_with_handle(); - self.executor.spawn_critical("p2p txpool", Box::pin(txpool)); - self.executor.spawn_critical("p2p eth request handler", Box::pin(eth)); + self.executor.spawn_critical_blocking("p2p txpool", Box::pin(txpool)); + self.executor.spawn_critical_blocking("p2p eth request handler", Box::pin(eth)); let default_peers_path = self.config().datadir().known_peers(); let known_peers_file = self.config().network.persistent_peers_file(default_peers_path); @@ -854,9 +889,7 @@ impl BuilderContext { /// Get the network secret from the given data dir fn network_secret(&self, data_dir: &ChainPath) -> eyre::Result { - let network_secret_path = - self.config().network.p2p_secret_key.clone().unwrap_or_else(|| data_dir.p2p_secret()); - let secret_key = get_secret_key(&network_secret_path)?; + let secret_key = self.config().network.secret_key(data_dir.p2p_secret())?; Ok(secret_key) } diff --git a/crates/node/builder/src/builder/states.rs b/crates/node/builder/src/builder/states.rs index f60b56d57e..21bd897a40 100644 --- a/crates/node/builder/src/builder/states.rs +++ b/crates/node/builder/src/builder/states.rs @@ -235,6 +235,27 @@ where } /// Modifies the addons with the given closure. + /// + /// This method provides access to methods on the addons type that don't have + /// direct builder methods. It's useful for advanced configuration scenarios + /// where you need to call addon-specific methods. + /// + /// # Examples + /// + /// ```rust,ignore + /// use tower::layer::util::Identity; + /// + /// let builder = NodeBuilder::new(config) + /// .with_types::() + /// .with_components(EthereumNode::components()) + /// .with_add_ons(EthereumAddOns::default()) + /// .map_add_ons(|addons| addons.with_rpc_middleware(Identity::default())); + /// ``` + /// + /// # See also + /// + /// - [`NodeAddOns`] trait for available addon types + /// - [`crate::NodeBuilderWithComponents::extend_rpc_modules`] for RPC module configuration pub fn map_add_ons(mut self, f: F) -> Self where F: FnOnce(AO) -> AO, diff --git a/crates/node/builder/src/components/pool.rs b/crates/node/builder/src/components/pool.rs index a261f02c75..bb88c54b9f 100644 --- a/crates/node/builder/src/components/pool.rs +++ b/crates/node/builder/src/components/pool.rs @@ -6,8 +6,8 @@ use reth_chain_state::CanonStateSubscriptions; use reth_chainspec::EthereumHardforks; use reth_node_api::{NodeTypes, TxTy}; use reth_transaction_pool::{ - blobstore::DiskFileBlobStore, CoinbaseTipOrdering, PoolConfig, PoolTransaction, SubPoolLimit, - TransactionPool, TransactionValidationTaskExecutor, TransactionValidator, + blobstore::DiskFileBlobStore, BlobStore, CoinbaseTipOrdering, PoolConfig, PoolTransaction, + SubPoolLimit, TransactionPool, TransactionValidationTaskExecutor, TransactionValidator, }; use std::{collections::HashSet, future::Future}; @@ -133,31 +133,49 @@ where V::Transaction: PoolTransaction> + reth_transaction_pool::EthPoolTransaction, { + /// Consume the ype and build the [`reth_transaction_pool::Pool`] with the given config and blob + /// store. + pub fn build( + self, + blob_store: BS, + pool_config: PoolConfig, + ) -> reth_transaction_pool::Pool< + TransactionValidationTaskExecutor, + CoinbaseTipOrdering, + BS, + > + where + BS: BlobStore, + { + let TxPoolBuilder { validator, .. } = self; + reth_transaction_pool::Pool::new( + validator, + CoinbaseTipOrdering::default(), + blob_store, + pool_config, + ) + } + /// Build the transaction pool and spawn its maintenance tasks. /// This method creates the blob store, builds the pool, and spawns maintenance tasks. - pub fn build_and_spawn_maintenance_task( + pub fn build_and_spawn_maintenance_task( self, - blob_store: DiskFileBlobStore, + blob_store: BS, pool_config: PoolConfig, ) -> eyre::Result< reth_transaction_pool::Pool< TransactionValidationTaskExecutor, CoinbaseTipOrdering, - DiskFileBlobStore, + BS, >, - > { - // Destructure self to avoid partial move issues - let TxPoolBuilder { ctx, validator, .. } = self; - - let transaction_pool = reth_transaction_pool::Pool::new( - validator, - CoinbaseTipOrdering::default(), - blob_store, - pool_config.clone(), - ); - + > + where + BS: BlobStore, + { + let ctx = self.ctx; + let transaction_pool = self.build(blob_store, pool_config); // Spawn maintenance tasks using standalone functions - spawn_maintenance_tasks(ctx, transaction_pool.clone(), &pool_config)?; + spawn_maintenance_tasks(ctx, transaction_pool.clone(), transaction_pool.config())?; Ok(transaction_pool) } diff --git a/crates/node/builder/src/launch/common.rs b/crates/node/builder/src/launch/common.rs index 92e3a7aa81..7f0e944c92 100644 --- a/crates/node/builder/src/launch/common.rs +++ b/crates/node/builder/src/launch/common.rs @@ -34,16 +34,15 @@ use crate::{ hooks::OnComponentInitializedHook, BuilderContext, ExExLauncher, NodeAdapter, PrimitivesTy, }; -use alloy_consensus::BlockHeader as _; use alloy_eips::eip2124::Head; use alloy_primitives::{BlockNumber, B256}; use eyre::Context; use rayon::ThreadPoolBuilder; -use reth_chainspec::{Chain, EthChainSpec, EthereumHardfork, EthereumHardforks}; +use reth_chainspec::{Chain, EthChainSpec, EthereumHardforks}; use reth_config::{config::EtlConfig, PruneConfig}; use reth_consensus::noop::NoopConsensus; use reth_db_api::{database::Database, database_metrics::DatabaseMetrics}; -use reth_db_common::init::{init_genesis, InitStorageError}; +use reth_db_common::init::{init_genesis_with_settings, InitStorageError}; use reth_downloaders::{bodies::noop::NoopBodiesDownloader, headers::noop::NoopHeaderDownloader}; use reth_engine_local::MiningMode; use reth_evm::{noop::NoopEvmConfig, ConfigureEvm}; @@ -66,9 +65,9 @@ use reth_node_metrics::{ version::VersionInfo, }; use reth_provider::{ - providers::{NodeTypesForProvider, ProviderNodeTypes, StaticFileProvider}, - BlockHashReader, BlockNumReader, BlockReaderIdExt, ProviderError, ProviderFactory, - ProviderResult, StageCheckpointReader, StaticFileProviderFactory, + providers::{NodeTypesForProvider, ProviderNodeTypes, RocksDBProvider, StaticFileProvider}, + BlockHashReader, BlockNumReader, ProviderError, ProviderFactory, ProviderResult, + StageCheckpointReader, StaticFileProviderBuilder, StaticFileProviderFactory, }; use reth_prune::{PruneModes, PrunerBuilder}; use reth_rpc_builder::config::RethRpcServerConfig; @@ -166,11 +165,14 @@ impl LaunchContext { // Update the config with the command line arguments toml_config.peers.trusted_nodes_only = config.network.trusted_only; + // Merge static file CLI arguments with config file, giving priority to CLI + toml_config.static_files = config.static_files.merge_with_config(toml_config.static_files); + Ok(toml_config) } /// Save prune config to the toml file if node is a full node or has custom pruning CLI - /// arguments. + /// arguments. Also migrates deprecated prune config values to new defaults. fn save_pruning_config( reth_config: &mut reth_config::Config, config: &NodeConfig, @@ -179,15 +181,22 @@ impl LaunchContext { where ChainSpec: EthChainSpec + reth_chainspec::EthereumHardforks, { + let mut should_save = reth_config.prune.segments.migrate(); + if let Some(prune_config) = config.prune_config() { if reth_config.prune != prune_config { reth_config.set_prune_config(prune_config); - info!(target: "reth::cli", "Saving prune config to toml file"); - reth_config.save(config_path.as_ref())?; + should_save = true; } } else if !reth_config.prune.is_default() { warn!(target: "reth::cli", "Pruning configuration is present in the config file, but no CLI arguments are provided. Using config from file."); } + + if should_save { + info!(target: "reth::cli", "Saving prune config to toml file"); + reth_config.save(config_path.as_ref())?; + } + Ok(()) } @@ -406,14 +415,13 @@ impl LaunchContextWith, Evm: ConfigureEvm + 'static, { + // Validate static files configuration + let static_files_config = &self.toml_config().static_files; + static_files_config.validate()?; + + // Apply per-segment blocks_per_file configuration + let static_file_provider = + StaticFileProviderBuilder::read_write(self.data_dir().static_files())? + .with_metrics() + .with_blocks_per_file_for_segments(static_files_config.as_blocks_per_file_map()) + .with_genesis_block_number(self.chain_spec().genesis().number.unwrap_or_default()) + .build()?; + + // Initialize RocksDB provider with metrics, statistics, and default tables + let rocksdb_provider = RocksDBProvider::builder(self.data_dir().rocksdb()) + .with_default_tables() + .with_metrics() + .with_statistics() + .build()?; + let factory = ProviderFactory::new( self.right().clone(), self.chain_spec(), - StaticFileProvider::read_write(self.data_dir().static_files())?, - ) - .with_prune_modes(self.prune_modes()) - .with_static_files_metrics(); - - let has_receipt_pruning = self.toml_config().prune.has_receipts_pruning(); + static_file_provider, + rocksdb_provider, + )? + .with_prune_modes(self.prune_modes()); // Check for consistency between database and static files. If it fails, it unwinds to // the first block that's consistent between database and static files. - if let Some(unwind_target) = factory - .static_file_provider() - .check_consistency(&factory.provider()?, has_receipt_pruning)? + if let Some(unwind_target) = + factory.static_file_provider().check_consistency(&factory.provider()?)? { // Highly unlikely to happen, and given its destructive nature, it's better to panic // instead. @@ -583,7 +607,6 @@ where let listen_addr = self.node_config().metrics.prometheus; if let Some(addr) = listen_addr { - info!(target: "reth::cli", "Starting metrics endpoint at {}", addr); let config = MetricServerConfig::new( addr, VersionInfo { @@ -620,13 +643,19 @@ where /// Convenience function to [`Self::init_genesis`] pub fn with_genesis(self) -> Result { - init_genesis(self.provider_factory())?; + init_genesis_with_settings( + self.provider_factory(), + self.node_config().static_files.to_settings(), + )?; Ok(self) } /// Write the genesis block and state if it has not already been written pub fn init_genesis(&self) -> Result { - init_genesis(self.provider_factory()) + init_genesis_with_settings( + self.provider_factory(), + self.node_config().static_files.to_settings(), + ) } /// Creates a new `WithMeteredProvider` container and attaches it to the @@ -909,28 +938,44 @@ where /// /// A target block hash if the pipeline is inconsistent, otherwise `None`. pub fn check_pipeline_consistency(&self) -> ProviderResult> { + // We skip the era stage if it's not enabled + let era_enabled = self.era_import_source().is_some(); + let mut all_stages = + StageId::ALL.into_iter().filter(|id| era_enabled || id != &StageId::Era); + + // Get the expected first stage based on config. + let first_stage = all_stages.next().expect("there must be at least one stage"); + // If no target was provided, check if the stages are congruent - check if the // checkpoint of the last stage matches the checkpoint of the first. let first_stage_checkpoint = self .blockchain_db() - .get_stage_checkpoint(*StageId::ALL.first().unwrap())? + .get_stage_checkpoint(first_stage)? .unwrap_or_default() .block_number; - // Skip the first stage as we've already retrieved it and comparing all other checkpoints - // against it. - for stage_id in StageId::ALL.iter().skip(1) { + // Compare all other stages against the first + for stage_id in all_stages { let stage_checkpoint = self .blockchain_db() - .get_stage_checkpoint(*stage_id)? + .get_stage_checkpoint(stage_id)? .unwrap_or_default() .block_number; // If the checkpoint of any stage is less than the checkpoint of the first stage, // retrieve and return the block hash of the latest header and use it as the target. + debug!( + target: "consensus::engine", + first_stage_id = %first_stage, + first_stage_checkpoint, + stage_id = %stage_id, + stage_checkpoint = stage_checkpoint, + "Checking stage against first stage", + ); if stage_checkpoint < first_stage_checkpoint { debug!( target: "consensus::engine", + first_stage_id = %first_stage, first_stage_checkpoint, inconsistent_stage_id = %stage_id, inconsistent_stage_checkpoint = stage_checkpoint, @@ -945,40 +990,6 @@ where Ok(None) } - /// Expire the pre-merge transactions if the node is configured to do so and the chain has a - /// merge block. - /// - /// If the node is configured to prune pre-merge transactions and it has synced past the merge - /// block, it will delete the pre-merge transaction static files if they still exist. - pub fn expire_pre_merge_transactions(&self) -> eyre::Result<()> - where - T: FullNodeTypes, - { - if self.node_config().pruning.bodies_pre_merge && - let Some(merge_block) = self - .chain_spec() - .ethereum_fork_activation(EthereumHardfork::Paris) - .block_number() - { - // Ensure we only expire transactions after we synced past the merge block. - let Some(latest) = self.blockchain_db().latest_header()? else { return Ok(()) }; - if latest.number() > merge_block { - let provider = self.blockchain_db().static_file_provider(); - if provider - .get_lowest_transaction_static_file_block() - .is_some_and(|lowest| lowest < merge_block) - { - info!(target: "reth::cli", merge_block, "Expiring pre-merge transactions"); - provider.delete_transactions_below(merge_block)?; - } else { - debug!(target: "reth::cli", merge_block, "No pre-merge transactions to expire"); - } - } - } - - Ok(()) - } - /// Returns the metrics sender. pub fn sync_metrics_tx(&self) -> UnboundedSender { self.right().db_provider_container.metrics_sender.clone() @@ -1207,7 +1218,6 @@ mod tests { storage_history_before: None, bodies_pre_merge: false, bodies_distance: None, - #[expect(deprecated)] receipts_log_filter: None, bodies_before: None, }, diff --git a/crates/node/builder/src/launch/debug.rs b/crates/node/builder/src/launch/debug.rs index f5e9745cdd..a623a825ad 100644 --- a/crates/node/builder/src/launch/debug.rs +++ b/crates/node/builder/src/launch/debug.rs @@ -7,7 +7,7 @@ use reth_chainspec::EthChainSpec; use reth_consensus_debug_client::{DebugConsensusClient, EtherscanBlockProvider, RpcBlockProvider}; use reth_engine_local::LocalMiner; use reth_node_api::{ - BlockTy, FullNodeComponents, PayloadAttrTy, PayloadAttributesBuilder, PayloadTypes, + BlockTy, FullNodeComponents, HeaderTy, PayloadAttrTy, PayloadAttributesBuilder, PayloadTypes, }; use std::{ future::{Future, IntoFuture}, @@ -73,9 +73,7 @@ pub trait DebugNode: Node { /// be constructed during local mining. fn local_payload_attributes_builder( chain_spec: &Self::ChainSpec, - ) -> impl PayloadAttributesBuilder< - <::Payload as PayloadTypes>::PayloadAttributes, - >; + ) -> impl PayloadAttributesBuilder<::PayloadAttributes, HeaderTy>; } /// Node launcher with support for launching various debugging utilities. @@ -120,7 +118,7 @@ where inner: L, target: Target, local_payload_attributes_builder: - Option>>>, + Option, HeaderTy>>>, map_attributes: Option) -> PayloadAttrTy + Send + Sync>>, } @@ -133,7 +131,7 @@ where { pub fn with_payload_attributes_builder( self, - builder: impl PayloadAttributesBuilder>, + builder: impl PayloadAttributesBuilder, HeaderTy>, ) -> Self { Self { inner: self.inner, @@ -229,7 +227,7 @@ where } else { let local = N::Types::local_payload_attributes_builder(&chain_spec); let builder = if let Some(f) = map_attributes { - Either::Left(move |block_number| f(local.build(block_number))) + Either::Left(move |parent| f(local.build(&parent))) } else { Either::Right(local) }; diff --git a/crates/node/builder/src/launch/engine.rs b/crates/node/builder/src/launch/engine.rs index 93309b65b1..e77842acb9 100644 --- a/crates/node/builder/src/launch/engine.rs +++ b/crates/node/builder/src/launch/engine.rs @@ -3,7 +3,7 @@ use crate::{ common::{Attached, LaunchContextWith, WithConfigs}, hooks::NodeHooks, - rpc::{EngineValidatorAddOn, EngineValidatorBuilder, RethRpcAddOns, RpcHandle}, + rpc::{EngineShutdown, EngineValidatorAddOn, EngineValidatorBuilder, RethRpcAddOns, RpcHandle}, setup::build_networked_pipeline, AddOns, AddOnsContext, FullNode, LaunchContext, LaunchNode, NodeAdapter, NodeBuilderWithComponents, NodeComponents, NodeComponentsBuilder, NodeHandle, NodeTypesAdapter, @@ -13,6 +13,7 @@ use futures::{stream_select, StreamExt}; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_engine_service::service::{ChainEvent, EngineService}; use reth_engine_tree::{ + chain::FromOrchestrator, engine::{EngineApiRequest, EngineRequestHandler}, tree::TreeConfig, }; @@ -31,7 +32,7 @@ use reth_node_core::{ use reth_node_events::node; use reth_provider::{ providers::{BlockchainProvider, NodeTypesForProvider}, - BlockNumReader, + BlockNumReader, MetadataProvider, }; use reth_tasks::TaskExecutor; use reth_tokio_util::EventSender; @@ -39,6 +40,7 @@ use reth_tracing::tracing::{debug, error, info}; use std::{future::Future, pin::Pin, sync::Arc}; use tokio::sync::{mpsc::unbounded_channel, oneshot}; use tokio_stream::wrappers::UnboundedReceiverStream; +use tracing::warn; /// The engine node launcher. #[derive(Debug)] @@ -98,8 +100,24 @@ impl EngineNodeLauncher { .with_adjusted_configs() // Create the provider factory .with_provider_factory::<_, >::Evm>().await? - .inspect(|_| { + .inspect(|ctx| { info!(target: "reth::cli", "Database opened"); + match ctx.provider_factory().storage_settings() { + Ok(settings) => { + info!( + target: "reth::cli", + ?settings, + "Storage settings" + ); + }, + Err(err) => { + warn!( + target: "reth::cli", + ?err, + "Failed to get storage settings" + ); + }, + } }) .with_prometheus_server().await? .inspect(|this| { @@ -117,9 +135,6 @@ impl EngineNodeLauncher { })? .with_components(components_builder, on_component_initialized).await?; - // Try to expire pre-merge transaction history if configured - ctx.expire_pre_merge_transactions()?; - // spawn exexs if any let maybe_exex_manager_handle = ctx.launch_exex(installed_exex).await?; @@ -246,8 +261,16 @@ impl EngineNodeLauncher { )), ); - let RpcHandle { rpc_server_handles, rpc_registry, engine_events, beacon_engine_handle } = - add_ons.launch_add_ons(add_ons_ctx).await?; + let RpcHandle { + rpc_server_handles, + rpc_registry, + engine_events, + beacon_engine_handle, + engine_shutdown: _, + } = add_ons.launch_add_ons(add_ons_ctx).await?; + + // Create engine shutdown handle + let (engine_shutdown, mut shutdown_rx) = EngineShutdown::new(); // Run consensus engine to completion let initial_target = ctx.initial_backfill_target()?; @@ -264,12 +287,16 @@ impl EngineNodeLauncher { let provider = ctx.blockchain_db().clone(); let (exit, rx) = oneshot::channel(); let terminate_after_backfill = ctx.terminate_after_initial_backfill(); + let startup_sync_state_idle = ctx.node_config().debug.startup_sync_state_idle; info!(target: "reth::cli", "Starting consensus engine"); ctx.task_executor().spawn_critical("consensus engine", Box::pin(async move { if let Some(initial_target) = initial_target { debug!(target: "reth::cli", %initial_target, "start backfill sync"); + // network_handle's sync state is already initialized at Syncing engine_service.orchestrator_mut().start_backfill_sync(initial_target); + } else if startup_sync_state_idle { + network_handle.update_sync_state(SyncState::Idle); } let mut res = Ok(()); @@ -277,10 +304,18 @@ impl EngineNodeLauncher { // advance the chain and await payloads built locally to add into the engine api tree handler to prevent re-execution if that block is received as payload from the CL loop { tokio::select! { + shutdown_req = &mut shutdown_rx => { + if let Ok(req) = shutdown_req { + debug!(target: "reth::cli", "received engine shutdown request"); + engine_service.orchestrator_mut().handler_mut().handler_mut().on_event( + FromOrchestrator::Terminate { tx: req.done_tx }.into() + ); + } + } payload = built_payloads.select_next_some() => { if let Some(executed_block) = payload.executed_block() { - debug!(target: "reth::cli", block=?executed_block.recovered_block().num_hash(), "inserting built payload"); - engine_service.orchestrator_mut().handler_mut().handler_mut().on_event(EngineApiRequest::InsertExecutedBlock(executed_block).into()); + debug!(target: "reth::cli", block=?executed_block.recovered_block.num_hash(), "inserting built payload"); + engine_service.orchestrator_mut().handler_mut().handler_mut().on_event(EngineApiRequest::InsertExecutedBlock(executed_block.into_executed_payload()).into()); } } event = engine_service.next() => { @@ -292,6 +327,9 @@ impl EngineNodeLauncher { debug!(target: "reth::cli", "Terminating after initial backfill"); break } + if startup_sync_state_idle { + network_handle.update_sync_state(SyncState::Idle); + } } ChainEvent::BackfillSyncStarted => { network_handle.update_sync_state(SyncState::Syncing); @@ -345,6 +383,7 @@ impl EngineNodeLauncher { rpc_registry, engine_events, beacon_engine_handle, + engine_shutdown, }, }; // Notify on node started diff --git a/crates/node/builder/src/rpc.rs b/crates/node/builder/src/rpc.rs index ed0a3fb64d..360019e0ea 100644 --- a/crates/node/builder/src/rpc.rs +++ b/crates/node/builder/src/rpc.rs @@ -11,6 +11,7 @@ use crate::{ use alloy_rpc_types::engine::ClientVersionV1; use alloy_rpc_types_engine::ExecutionData; use jsonrpsee::{core::middleware::layer::Either, RpcModule}; +use parking_lot::Mutex; use reth_chain_state::CanonStateSubscriptions; use reth_chainspec::{ChainSpecProvider, EthChainSpec, EthereumHardforks, Hardforks}; use reth_node_api::{ @@ -23,7 +24,10 @@ use reth_node_core::{ version::{version_metadata, CLIENT_CODE}, }; use reth_payload_builder::{PayloadBuilderHandle, PayloadStore}; -use reth_rpc::eth::{core::EthRpcConverterFor, DevSigner, EthApiTypes, FullEthApiServer}; +use reth_rpc::{ + eth::{core::EthRpcConverterFor, DevSigner, EthApiTypes, FullEthApiServer}, + AdminApi, +}; use reth_rpc_api::{eth::helpers::EthTransactions, IntoEngineApiRpcModule}; use reth_rpc_builder::{ auth::{AuthRpcModule, AuthServerHandle}, @@ -38,7 +42,9 @@ use std::{ fmt::{self, Debug}, future::Future, ops::{Deref, DerefMut}, + sync::Arc, }; +use tokio::sync::oneshot; /// Contains the handles to the spawned RPC servers. /// @@ -329,6 +335,8 @@ pub struct RpcHandle { pub engine_events: EventSender::Primitives>>, /// Handle to the beacon consensus engine. pub beacon_engine_handle: ConsensusEngineHandle<::Payload>, + /// Handle to trigger engine shutdown. + pub engine_shutdown: EngineShutdown, } impl Clone for RpcHandle { @@ -338,6 +346,7 @@ impl Clone for RpcHandle RpcHandle { ) -> &EventSender::Primitives>> { &self.engine_events } + + /// Returns the `EthApi` instance of the rpc server. + pub const fn eth_api(&self) -> &EthApi { + self.rpc_registry.registry.eth_api() + } + + /// Returns an instance of the [`AdminApi`] for the rpc server. + pub fn admin_api( + &self, + ) -> AdminApi::ChainSpec, Node::Pool> + where + ::ChainSpec: EthereumHardforks, + { + self.rpc_registry.registry.admin_api() + } } /// Handle returned when only the regular RPC server (HTTP/WS/IPC) is launched. @@ -938,6 +963,7 @@ where rpc_registry: registry, engine_events, beacon_engine_handle: engine_handle, + engine_shutdown: EngineShutdown::default(), }) } @@ -973,7 +999,12 @@ where ); let eth_config = config.rpc.eth_config().max_batch_size(config.txpool.max_batch_size()); - let ctx = EthApiCtx { components: &node, config: eth_config, cache }; + let ctx = EthApiCtx { + components: &node, + config: eth_config, + cache, + engine_handle: beacon_engine_handle.clone(), + }; let eth_api = eth_api_builder.build_eth_api(ctx).await?; let auth_config = config.rpc.auth_server_config(jwt_secret)?; @@ -987,7 +1018,7 @@ where .with_executor(Box::new(node.task_executor().clone())) .with_evm_config(node.evm_config().clone()) .with_consensus(node.consensus().clone()) - .build_with_auth_server(module_config, engine_api, eth_api); + .build_with_auth_server(module_config, engine_api, eth_api, engine_events.clone()); // in dev mode we generate 20 random dev-signer accounts if config.dev.dev { @@ -1137,6 +1168,8 @@ pub struct EthApiCtx<'a, N: FullNodeTypes> { pub config: EthConfig, /// Cache for eth state pub cache: EthStateCache>, + /// Handle to the beacon consensus engine + pub engine_handle: ConsensusEngineHandle<::Payload>, } impl<'a, N: FullNodeComponents>> @@ -1154,18 +1187,17 @@ impl<'a, N: FullNodeComponents: Default + Send + 'static { /// The Ethapi implementation this builder will build. - type EthApi: EthApiTypes - + FullEthApiServer - + Unpin - + 'static; + type EthApi: FullEthApiServer; /// Builds the [`EthApiServer`](reth_rpc_api::eth::EthApiServer) from the given context. fn build_eth_api( @@ -1357,6 +1389,7 @@ where version: version_metadata().cargo_pkg_version.to_string(), commit: version_metadata().vergen_git_sha.to_string(), }; + Ok(EngineApi::new( ctx.node.provider().clone(), ctx.config.chain.clone(), @@ -1368,6 +1401,7 @@ where EngineCapabilities::default(), engine_validator, ctx.config.engine.accept_execution_requests_hash, + ctx.node.network().clone(), )) } } @@ -1402,3 +1436,48 @@ impl IntoEngineApiRpcModule for NoopEngineApi { RpcModule::new(()) } } + +/// Handle to trigger graceful engine shutdown. +/// +/// This handle can be used to request a graceful shutdown of the engine, +/// which will persist all remaining in-memory blocks before terminating. +#[derive(Clone, Debug)] +pub struct EngineShutdown { + /// Channel to send shutdown signal. + tx: Arc>>>, +} + +impl EngineShutdown { + /// Creates a new [`EngineShutdown`] handle and returns the receiver. + pub fn new() -> (Self, oneshot::Receiver) { + let (tx, rx) = oneshot::channel(); + (Self { tx: Arc::new(Mutex::new(Some(tx))) }, rx) + } + + /// Requests a graceful engine shutdown. + /// + /// All remaining in-memory blocks will be persisted before the engine terminates. + /// + /// Returns a receiver that resolves when shutdown is complete. + /// Returns `None` if shutdown was already triggered. + pub fn shutdown(&self) -> Option> { + let mut guard = self.tx.lock(); + let tx = guard.take()?; + let (done_tx, done_rx) = oneshot::channel(); + let _ = tx.send(EngineShutdownRequest { done_tx }); + Some(done_rx) + } +} + +impl Default for EngineShutdown { + fn default() -> Self { + Self { tx: Arc::new(Mutex::new(None)) } + } +} + +/// Request to shutdown the engine. +#[derive(Debug)] +pub struct EngineShutdownRequest { + /// Channel to signal shutdown completion. + pub done_tx: oneshot::Sender<()>, +} diff --git a/crates/node/core/Cargo.toml b/crates/node/core/Cargo.toml index b1a472bd9f..e4a3ef7120 100644 --- a/crates/node/core/Cargo.toml +++ b/crates/node/core/Cargo.toml @@ -19,6 +19,7 @@ reth-cli-util.workspace = true reth-db = { workspace = true, features = ["mdbx"] } reth-storage-errors.workspace = true reth-storage-api = { workspace = true, features = ["std", "db-api"] } +reth-provider.workspace = true reth-network = { workspace = true, features = ["serde"] } reth-network-p2p.workspace = true reth-rpc-eth-types.workspace = true @@ -30,6 +31,7 @@ reth-config = { workspace = true, features = ["serde"] } reth-discv4.workspace = true reth-discv5.workspace = true reth-net-nat.workspace = true +reth-net-banlist.workspace = true reth-network-peers.workspace = true reth-prune-types.workspace = true reth-stages-types.workspace = true @@ -45,15 +47,16 @@ alloy-eips.workspace = true # misc eyre.workspace = true -clap = { workspace = true, features = ["derive", "env"] } +clap = { workspace = true, features = ["derive", "env", "string"] } humantime.workspace = true rand.workspace = true derive_more.workspace = true toml.workspace = true serde.workspace = true strum = { workspace = true, features = ["derive"] } +thiserror.workspace = true url.workspace = true - +ipnet.workspace = true # io dirs-next.workspace = true shellexpand.workspace = true @@ -77,8 +80,15 @@ tokio.workspace = true # Features for vergen to generate correct env vars jemalloc = ["reth-cli-util/jemalloc"] asm-keccak = ["alloy-primitives/asm-keccak"] -# Feature to enable opentelemetry export +keccak-cache-global = ["alloy-primitives/keccak-cache-global"] otlp = ["reth-tracing/otlp"] +samply = ["reth-tracing/samply"] + +min-error-logs = ["tracing/release_max_level_error"] +min-warn-logs = ["tracing/release_max_level_warn"] +min-info-logs = ["tracing/release_max_level_info"] +min-debug-logs = ["tracing/release_max_level_debug"] +min-trace-logs = ["tracing/release_max_level_trace"] [build-dependencies] vergen = { workspace = true, features = ["build", "cargo", "emit_and_set"] } diff --git a/crates/node/core/src/args/database.rs b/crates/node/core/src/args/database.rs index 6f1d3bfc71..298669e198 100644 --- a/crates/node/core/src/args/database.rs +++ b/crates/node/core/src/args/database.rs @@ -25,9 +25,26 @@ pub struct DatabaseArgs { /// NFS volume. #[arg(long = "db.exclusive")] pub exclusive: Option, - /// Maximum database size (e.g., 4TB, 8MB) + /// Maximum database size (e.g., 4TB, 8TB). + /// + /// This sets the "map size" of the database. If the database grows beyond this + /// limit, the node will stop with an "environment map size limit reached" error. + /// + /// The default value is 8TB. #[arg(long = "db.max-size", value_parser = parse_byte_size)] pub max_size: Option, + /// Database page size (e.g., 4KB, 8KB, 16KB). + /// + /// Specifies the page size used by the MDBX database. + /// + /// The page size determines the maximum database size. + /// MDBX supports up to 2^31 pages, so with the default 4KB page size, the maximum + /// database size is 8TB. To allow larger databases, increase this value to 8KB or higher. + /// + /// WARNING: This setting is only configurable at database creation; changing + /// it later requires re-syncing. + #[arg(long = "db.page-size", value_parser = parse_byte_size)] + pub page_size: Option, /// Database growth step (e.g., 4GB, 4KB) #[arg(long = "db.growth-step", value_parser = parse_byte_size)] pub growth_step: Option, @@ -68,6 +85,7 @@ impl DatabaseArgs { .with_exclusive(self.exclusive) .with_max_read_transaction_duration(max_read_transaction_duration) .with_geometry_max_size(self.max_size) + .with_geometry_page_size(self.page_size) .with_growth_step(self.growth_step) .with_max_readers(self.max_readers) .with_sync_mode(self.sync_mode) @@ -300,6 +318,41 @@ mod tests { assert!(result.is_err()); } + #[test] + fn test_command_parser_with_valid_page_size_from_str() { + let cmd = CommandParser::::try_parse_from(["reth", "--db.page-size", "8KB"]) + .unwrap(); + assert_eq!(cmd.args.page_size, Some(KILOBYTE * 8)); + + let cmd = CommandParser::::try_parse_from(["reth", "--db.page-size", "1MB"]) + .unwrap(); + assert_eq!(cmd.args.page_size, Some(MEGABYTE)); + + // Test with spaces + let cmd = + CommandParser::::try_parse_from(["reth", "--db.page-size", "16 KB"]) + .unwrap(); + assert_eq!(cmd.args.page_size, Some(KILOBYTE * 16)); + + // Test with just a number (bytes) + let cmd = CommandParser::::try_parse_from(["reth", "--db.page-size", "4096"]) + .unwrap(); + assert_eq!(cmd.args.page_size, Some(KILOBYTE * 4)); + } + + #[test] + fn test_command_parser_with_invalid_page_size() { + // Invalid text + let result = + CommandParser::::try_parse_from(["reth", "--db.page-size", "invalid"]); + assert!(result.is_err()); + + // Invalid unit + let result = + CommandParser::::try_parse_from(["reth", "--db.page-size", "7 ZB"]); + assert!(result.is_err()); + } + #[test] fn test_possible_values() { // Initialize the LogLevelValueParser diff --git a/crates/node/core/src/args/datadir_args.rs b/crates/node/core/src/args/datadir_args.rs index cb0590f177..a968334ae3 100644 --- a/crates/node/core/src/args/datadir_args.rs +++ b/crates/node/core/src/args/datadir_args.rs @@ -27,6 +27,10 @@ pub struct DatadirArgs { verbatim_doc_comment )] pub static_files_path: Option, + + /// The absolute path to store `RocksDB` database in. + #[arg(long = "datadir.rocksdb", value_name = "PATH", verbatim_doc_comment)] + pub rocksdb_path: Option, } impl DatadirArgs { diff --git a/crates/node/core/src/args/debug.rs b/crates/node/core/src/args/debug.rs index 13d7685b05..dce3aa784e 100644 --- a/crates/node/core/src/args/debug.rs +++ b/crates/node/core/src/args/debug.rs @@ -101,6 +101,13 @@ pub struct DebugArgs { /// Example: `nodename:secret@host:port` #[arg(long = "ethstats", help_heading = "Debug")] pub ethstats: Option, + + /// Set the node to idle state when the backfill is not running. + /// + /// This makes the `eth_syncing` RPC return "Idle" when the node has just started or finished + /// the backfill, but did not yet receive any new blocks. + #[arg(long = "debug.startup-sync-state-idle", help_heading = "Debug")] + pub startup_sync_state_idle: bool, } impl Default for DebugArgs { @@ -119,6 +126,7 @@ impl Default for DebugArgs { invalid_block_hook: Some(InvalidBlockSelection::default()), healthy_node_rpc_url: None, ethstats: None, + startup_sync_state_idle: false, } } } @@ -349,6 +357,17 @@ mod tests { assert_eq!(args, default_args); } + #[test] + fn test_parse_invalid_block_args_none() { + let expected_args = DebugArgs { + invalid_block_hook: Some(InvalidBlockSelection::from(vec![])), + ..Default::default() + }; + let args = + CommandParser::::parse_from(["reth", "--debug.invalid-block-hook", ""]).args; + assert_eq!(args, expected_args); + } + #[test] fn test_parse_invalid_block_args() { let expected_args = DebugArgs { diff --git a/crates/node/core/src/args/engine.rs b/crates/node/core/src/args/engine.rs index 29535f2c1d..a860e62e45 100644 --- a/crates/node/core/src/args/engine.rs +++ b/crates/node/core/src/args/engine.rs @@ -1,27 +1,217 @@ //! clap [Args](clap::Args) for engine purposes -use clap::Args; +use clap::{builder::Resettable, Args}; use reth_engine_primitives::{TreeConfig, DEFAULT_MULTIPROOF_TASK_CHUNK_SIZE}; +use std::sync::OnceLock; use crate::node_config::{ DEFAULT_CROSS_BLOCK_CACHE_SIZE_MB, DEFAULT_MEMORY_BLOCK_BUFFER_TARGET, DEFAULT_PERSISTENCE_THRESHOLD, DEFAULT_RESERVED_CPU_CORES, }; +/// Global static engine defaults +static ENGINE_DEFAULTS: OnceLock = OnceLock::new(); + +/// Default values for engine that can be customized +/// +/// Global defaults can be set via [`DefaultEngineValues::try_init`]. +#[derive(Debug, Clone)] +pub struct DefaultEngineValues { + persistence_threshold: u64, + memory_block_buffer_target: u64, + legacy_state_root_task_enabled: bool, + state_cache_disabled: bool, + prewarming_disabled: bool, + parallel_sparse_trie_disabled: bool, + state_provider_metrics: bool, + cross_block_cache_size: u64, + state_root_task_compare_updates: bool, + accept_execution_requests_hash: bool, + multiproof_chunking_enabled: bool, + multiproof_chunk_size: usize, + reserved_cpu_cores: usize, + precompile_cache_disabled: bool, + state_root_fallback: bool, + always_process_payload_attributes_on_canonical_head: bool, + allow_unwind_canonical_header: bool, + storage_worker_count: Option, + account_worker_count: Option, +} + +impl DefaultEngineValues { + /// Initialize the global engine defaults with this configuration + pub fn try_init(self) -> Result<(), Self> { + ENGINE_DEFAULTS.set(self) + } + + /// Get a reference to the global engine defaults + pub fn get_global() -> &'static Self { + ENGINE_DEFAULTS.get_or_init(Self::default) + } + + /// Set the default persistence threshold + pub const fn with_persistence_threshold(mut self, v: u64) -> Self { + self.persistence_threshold = v; + self + } + + /// Set the default memory block buffer target + pub const fn with_memory_block_buffer_target(mut self, v: u64) -> Self { + self.memory_block_buffer_target = v; + self + } + + /// Set whether to enable legacy state root task by default + pub const fn with_legacy_state_root_task_enabled(mut self, v: bool) -> Self { + self.legacy_state_root_task_enabled = v; + self + } + + /// Set whether to disable state cache by default + pub const fn with_state_cache_disabled(mut self, v: bool) -> Self { + self.state_cache_disabled = v; + self + } + + /// Set whether to disable prewarming by default + pub const fn with_prewarming_disabled(mut self, v: bool) -> Self { + self.prewarming_disabled = v; + self + } + + /// Set whether to disable parallel sparse trie by default + pub const fn with_parallel_sparse_trie_disabled(mut self, v: bool) -> Self { + self.parallel_sparse_trie_disabled = v; + self + } + + /// Set whether to enable state provider metrics by default + pub const fn with_state_provider_metrics(mut self, v: bool) -> Self { + self.state_provider_metrics = v; + self + } + + /// Set the default cross-block cache size in MB + pub const fn with_cross_block_cache_size(mut self, v: u64) -> Self { + self.cross_block_cache_size = v; + self + } + + /// Set whether to compare state root task updates by default + pub const fn with_state_root_task_compare_updates(mut self, v: bool) -> Self { + self.state_root_task_compare_updates = v; + self + } + + /// Set whether to accept execution requests hash by default + pub const fn with_accept_execution_requests_hash(mut self, v: bool) -> Self { + self.accept_execution_requests_hash = v; + self + } + + /// Set whether to enable multiproof chunking by default + pub const fn with_multiproof_chunking_enabled(mut self, v: bool) -> Self { + self.multiproof_chunking_enabled = v; + self + } + + /// Set the default multiproof chunk size + pub const fn with_multiproof_chunk_size(mut self, v: usize) -> Self { + self.multiproof_chunk_size = v; + self + } + + /// Set the default number of reserved CPU cores + pub const fn with_reserved_cpu_cores(mut self, v: usize) -> Self { + self.reserved_cpu_cores = v; + self + } + + /// Set whether to disable precompile cache by default + pub const fn with_precompile_cache_disabled(mut self, v: bool) -> Self { + self.precompile_cache_disabled = v; + self + } + + /// Set whether to enable state root fallback by default + pub const fn with_state_root_fallback(mut self, v: bool) -> Self { + self.state_root_fallback = v; + self + } + + /// Set whether to always process payload attributes on canonical head by default + pub const fn with_always_process_payload_attributes_on_canonical_head( + mut self, + v: bool, + ) -> Self { + self.always_process_payload_attributes_on_canonical_head = v; + self + } + + /// Set whether to allow unwinding canonical header by default + pub const fn with_allow_unwind_canonical_header(mut self, v: bool) -> Self { + self.allow_unwind_canonical_header = v; + self + } + + /// Set the default storage worker count + pub const fn with_storage_worker_count(mut self, v: Option) -> Self { + self.storage_worker_count = v; + self + } + + /// Set the default account worker count + pub const fn with_account_worker_count(mut self, v: Option) -> Self { + self.account_worker_count = v; + self + } +} + +impl Default for DefaultEngineValues { + fn default() -> Self { + Self { + persistence_threshold: DEFAULT_PERSISTENCE_THRESHOLD, + memory_block_buffer_target: DEFAULT_MEMORY_BLOCK_BUFFER_TARGET, + legacy_state_root_task_enabled: false, + state_cache_disabled: false, + prewarming_disabled: false, + parallel_sparse_trie_disabled: false, + state_provider_metrics: false, + cross_block_cache_size: DEFAULT_CROSS_BLOCK_CACHE_SIZE_MB, + state_root_task_compare_updates: false, + accept_execution_requests_hash: false, + multiproof_chunking_enabled: true, + multiproof_chunk_size: DEFAULT_MULTIPROOF_TASK_CHUNK_SIZE, + reserved_cpu_cores: DEFAULT_RESERVED_CPU_CORES, + precompile_cache_disabled: false, + state_root_fallback: false, + always_process_payload_attributes_on_canonical_head: false, + allow_unwind_canonical_header: false, + storage_worker_count: None, + account_worker_count: None, + } + } +} + /// Parameters for configuring the engine driver. #[derive(Debug, Clone, Args, PartialEq, Eq)] #[command(next_help_heading = "Engine")] pub struct EngineArgs { - /// Configure persistence threshold for engine experimental. - #[arg(long = "engine.persistence-threshold", default_value_t = DEFAULT_PERSISTENCE_THRESHOLD)] + /// Configure persistence threshold for the engine. This determines how many canonical blocks + /// must be in-memory, ahead of the last persisted block, before flushing canonical blocks to + /// disk again. + /// + /// To persist blocks as fast as the node receives them, set this value to zero. This will + /// cause more frequent DB writes. + #[arg(long = "engine.persistence-threshold", default_value_t = DefaultEngineValues::get_global().persistence_threshold)] pub persistence_threshold: u64, /// Configure the target number of blocks to keep in memory. - #[arg(long = "engine.memory-block-buffer-target", default_value_t = DEFAULT_MEMORY_BLOCK_BUFFER_TARGET)] + #[arg(long = "engine.memory-block-buffer-target", default_value_t = DefaultEngineValues::get_global().memory_block_buffer_target)] pub memory_block_buffer_target: u64, /// Enable legacy state root - #[arg(long = "engine.legacy-state-root", default_value = "false")] + #[arg(long = "engine.legacy-state-root", default_value_t = DefaultEngineValues::get_global().legacy_state_root_task_enabled)] pub legacy_state_root_task_enabled: bool, /// CAUTION: This CLI flag has no effect anymore, use --engine.disable-caching-and-prewarming @@ -30,8 +220,12 @@ pub struct EngineArgs { #[deprecated] pub caching_and_prewarming_enabled: bool, + /// Disable state cache + #[arg(long = "engine.disable-state-cache", default_value_t = DefaultEngineValues::get_global().state_cache_disabled)] + pub state_cache_disabled: bool, + /// Disable parallel prewarming - #[arg(long = "engine.disable-prewarming", alias = "engine.disable-caching-and-prewarming")] + #[arg(long = "engine.disable-prewarming", alias = "engine.disable-caching-and-prewarming", default_value_t = DefaultEngineValues::get_global().prewarming_disabled)] pub prewarming_disabled: bool, /// CAUTION: This CLI flag has no effect anymore, use --engine.disable-parallel-sparse-trie @@ -41,38 +235,38 @@ pub struct EngineArgs { pub parallel_sparse_trie_enabled: bool, /// Disable the parallel sparse trie in the engine. - #[arg(long = "engine.disable-parallel-sparse-trie", default_value = "false")] + #[arg(long = "engine.disable-parallel-sparse-trie", default_value_t = DefaultEngineValues::get_global().parallel_sparse_trie_disabled)] pub parallel_sparse_trie_disabled: bool, /// Enable state provider latency metrics. This allows the engine to collect and report stats /// about how long state provider calls took during execution, but this does introduce slight /// overhead to state provider calls. - #[arg(long = "engine.state-provider-metrics", default_value = "false")] + #[arg(long = "engine.state-provider-metrics", default_value_t = DefaultEngineValues::get_global().state_provider_metrics)] pub state_provider_metrics: bool, /// Configure the size of cross-block cache in megabytes - #[arg(long = "engine.cross-block-cache-size", default_value_t = DEFAULT_CROSS_BLOCK_CACHE_SIZE_MB)] + #[arg(long = "engine.cross-block-cache-size", default_value_t = DefaultEngineValues::get_global().cross_block_cache_size)] pub cross_block_cache_size: u64, /// Enable comparing trie updates from the state root task to the trie updates from the regular /// state root calculation. - #[arg(long = "engine.state-root-task-compare-updates")] + #[arg(long = "engine.state-root-task-compare-updates", default_value_t = DefaultEngineValues::get_global().state_root_task_compare_updates)] pub state_root_task_compare_updates: bool, /// Enables accepting requests hash instead of an array of requests in `engine_newPayloadV4`. - #[arg(long = "engine.accept-execution-requests-hash")] + #[arg(long = "engine.accept-execution-requests-hash", default_value_t = DefaultEngineValues::get_global().accept_execution_requests_hash)] pub accept_execution_requests_hash: bool, /// Whether multiproof task should chunk proof targets. - #[arg(long = "engine.multiproof-chunking", default_value = "true")] + #[arg(long = "engine.multiproof-chunking", default_value_t = DefaultEngineValues::get_global().multiproof_chunking_enabled)] pub multiproof_chunking_enabled: bool, /// Multiproof task chunk size for proof targets. - #[arg(long = "engine.multiproof-chunk-size", default_value_t = DEFAULT_MULTIPROOF_TASK_CHUNK_SIZE)] + #[arg(long = "engine.multiproof-chunk-size", default_value_t = DefaultEngineValues::get_global().multiproof_chunk_size)] pub multiproof_chunk_size: usize, /// Configure the number of reserved CPU cores for non-reth processes - #[arg(long = "engine.reserved-cpu-cores", default_value_t = DEFAULT_RESERVED_CPU_CORES)] + #[arg(long = "engine.reserved-cpu-cores", default_value_t = DefaultEngineValues::get_global().reserved_cpu_cores)] pub reserved_cpu_cores: usize, /// CAUTION: This CLI flag has no effect anymore, use --engine.disable-precompile-cache @@ -82,11 +276,11 @@ pub struct EngineArgs { pub precompile_cache_enabled: bool, /// Disable precompile cache - #[arg(long = "engine.disable-precompile-cache", default_value = "false")] + #[arg(long = "engine.disable-precompile-cache", default_value_t = DefaultEngineValues::get_global().precompile_cache_disabled)] pub precompile_cache_disabled: bool, /// Enable state root fallback, useful for testing - #[arg(long = "engine.state-root-fallback", default_value = "false")] + #[arg(long = "engine.state-root-fallback", default_value_t = DefaultEngineValues::get_global().state_root_fallback)] pub state_root_fallback: bool, /// Always process payload attributes and begin a payload build process even if @@ -96,51 +290,73 @@ pub struct EngineArgs { /// Note: This is a no-op on OP Stack. #[arg( long = "engine.always-process-payload-attributes-on-canonical-head", - default_value = "false" + default_value_t = DefaultEngineValues::get_global().always_process_payload_attributes_on_canonical_head )] pub always_process_payload_attributes_on_canonical_head: bool, /// Allow unwinding canonical header to ancestor during forkchoice updates. /// See `TreeConfig::unwind_canonical_header` for more details. - #[arg(long = "engine.allow-unwind-canonical-header", default_value = "false")] + #[arg(long = "engine.allow-unwind-canonical-header", default_value_t = DefaultEngineValues::get_global().allow_unwind_canonical_header)] pub allow_unwind_canonical_header: bool, /// Configure the number of storage proof workers in the Tokio blocking pool. /// If not specified, defaults to 2x available parallelism, clamped between 2 and 64. - #[arg(long = "engine.storage-worker-count")] + #[arg(long = "engine.storage-worker-count", default_value = Resettable::from(DefaultEngineValues::get_global().storage_worker_count.map(|v| v.to_string().into())))] pub storage_worker_count: Option, /// Configure the number of account proof workers in the Tokio blocking pool. /// If not specified, defaults to the same count as storage workers. - #[arg(long = "engine.account-worker-count")] + #[arg(long = "engine.account-worker-count", default_value = Resettable::from(DefaultEngineValues::get_global().account_worker_count.map(|v| v.to_string().into())))] pub account_worker_count: Option, } #[allow(deprecated)] impl Default for EngineArgs { fn default() -> Self { + let DefaultEngineValues { + persistence_threshold, + memory_block_buffer_target, + legacy_state_root_task_enabled, + state_cache_disabled, + prewarming_disabled, + parallel_sparse_trie_disabled, + state_provider_metrics, + cross_block_cache_size, + state_root_task_compare_updates, + accept_execution_requests_hash, + multiproof_chunking_enabled, + multiproof_chunk_size, + reserved_cpu_cores, + precompile_cache_disabled, + state_root_fallback, + always_process_payload_attributes_on_canonical_head, + allow_unwind_canonical_header, + storage_worker_count, + account_worker_count, + } = DefaultEngineValues::get_global().clone(); Self { - persistence_threshold: DEFAULT_PERSISTENCE_THRESHOLD, - memory_block_buffer_target: DEFAULT_MEMORY_BLOCK_BUFFER_TARGET, - legacy_state_root_task_enabled: false, - state_root_task_compare_updates: false, + persistence_threshold, + memory_block_buffer_target, + legacy_state_root_task_enabled, + state_root_task_compare_updates, caching_and_prewarming_enabled: true, - prewarming_disabled: false, + state_cache_disabled, + prewarming_disabled, parallel_sparse_trie_enabled: true, - parallel_sparse_trie_disabled: false, - state_provider_metrics: false, - cross_block_cache_size: DEFAULT_CROSS_BLOCK_CACHE_SIZE_MB, - accept_execution_requests_hash: false, - multiproof_chunking_enabled: true, - multiproof_chunk_size: DEFAULT_MULTIPROOF_TASK_CHUNK_SIZE, - reserved_cpu_cores: DEFAULT_RESERVED_CPU_CORES, + parallel_sparse_trie_disabled, + state_provider_metrics, + cross_block_cache_size, + accept_execution_requests_hash, + multiproof_chunking_enabled, + multiproof_chunk_size, + reserved_cpu_cores, precompile_cache_enabled: true, - precompile_cache_disabled: false, - state_root_fallback: false, - always_process_payload_attributes_on_canonical_head: false, - allow_unwind_canonical_header: false, - storage_worker_count: None, - account_worker_count: None, + precompile_cache_disabled, + state_root_fallback, + always_process_payload_attributes_on_canonical_head, + allow_unwind_canonical_header, + storage_worker_count, + account_worker_count, } } } @@ -152,6 +368,7 @@ impl EngineArgs { .with_persistence_threshold(self.persistence_threshold) .with_memory_block_buffer_target(self.memory_block_buffer_target) .with_legacy_state_root(self.legacy_state_root_task_enabled) + .without_state_cache(self.state_cache_disabled) .without_prewarming(self.prewarming_disabled) .with_disable_parallel_sparse_trie(self.parallel_sparse_trie_disabled) .with_state_provider_metrics(self.state_provider_metrics) @@ -197,4 +414,66 @@ mod tests { let args = CommandParser::::parse_from(["reth"]).args; assert_eq!(args, default_args); } + + #[test] + #[allow(deprecated)] + fn engine_args() { + let args = EngineArgs { + persistence_threshold: 100, + memory_block_buffer_target: 50, + legacy_state_root_task_enabled: true, + caching_and_prewarming_enabled: true, + state_cache_disabled: true, + prewarming_disabled: true, + parallel_sparse_trie_enabled: true, + parallel_sparse_trie_disabled: true, + state_provider_metrics: true, + cross_block_cache_size: 256, + state_root_task_compare_updates: true, + accept_execution_requests_hash: true, + multiproof_chunking_enabled: true, + multiproof_chunk_size: 512, + reserved_cpu_cores: 4, + precompile_cache_enabled: true, + precompile_cache_disabled: true, + state_root_fallback: true, + always_process_payload_attributes_on_canonical_head: true, + allow_unwind_canonical_header: true, + storage_worker_count: Some(16), + account_worker_count: Some(8), + }; + + let parsed_args = CommandParser::::parse_from([ + "reth", + "--engine.persistence-threshold", + "100", + "--engine.memory-block-buffer-target", + "50", + "--engine.legacy-state-root", + "--engine.disable-state-cache", + "--engine.disable-prewarming", + "--engine.disable-parallel-sparse-trie", + "--engine.state-provider-metrics", + "--engine.cross-block-cache-size", + "256", + "--engine.state-root-task-compare-updates", + "--engine.accept-execution-requests-hash", + "--engine.multiproof-chunking", + "--engine.multiproof-chunk-size", + "512", + "--engine.reserved-cpu-cores", + "4", + "--engine.disable-precompile-cache", + "--engine.state-root-fallback", + "--engine.always-process-payload-attributes-on-canonical-head", + "--engine.allow-unwind-canonical-header", + "--engine.storage-worker-count", + "16", + "--engine.account-worker-count", + "8", + ]) + .args; + + assert_eq!(parsed_args, args); + } } diff --git a/crates/node/core/src/args/error.rs b/crates/node/core/src/args/error.rs new file mode 100644 index 0000000000..163c063cd7 --- /dev/null +++ b/crates/node/core/src/args/error.rs @@ -0,0 +1,22 @@ +use std::num::ParseIntError; + +/// Error while parsing a `ReceiptsLogPruneConfig` +#[derive(thiserror::Error, Debug)] +#[expect(clippy::enum_variant_names)] +pub(crate) enum ReceiptsLogError { + /// The format of the filter is invalid. + #[error("invalid filter format: {0}")] + InvalidFilterFormat(String), + /// Address is invalid. + #[error("address is invalid: {0}")] + InvalidAddress(String), + /// The prune mode is not one of full, distance, before. + #[error("prune mode is invalid: {0}")] + InvalidPruneMode(String), + /// The distance value supplied is invalid. + #[error("distance is invalid: {0}")] + InvalidDistance(ParseIntError), + /// The block number supplied is invalid. + #[error("block number is invalid: {0}")] + InvalidBlockNumber(ParseIntError), +} diff --git a/crates/node/core/src/args/mod.rs b/crates/node/core/src/args/mod.rs index 17584a913c..2872886950 100644 --- a/crates/node/core/src/args/mod.rs +++ b/crates/node/core/src/args/mod.rs @@ -6,7 +6,7 @@ pub use network::{DiscoveryArgs, NetworkArgs}; /// RpcServerArg struct for configuring the RPC mod rpc_server; -pub use rpc_server::RpcServerArgs; +pub use rpc_server::{DefaultRpcServerArgs, RpcServerArgs}; /// `RpcStateCacheArgs` struct for configuring RPC state cache mod rpc_state_cache; @@ -26,7 +26,7 @@ pub use log::{ColorMode, LogArgs, Verbosity}; /// `TraceArgs` for tracing and spans support mod trace; -pub use trace::TraceArgs; +pub use trace::{OtlpInitStatus, TraceArgs}; /// `MetricArgs` to configure metrics. mod metric; @@ -34,7 +34,7 @@ pub use metric::MetricArgs; /// `PayloadBuilderArgs` struct for configuring the payload builder mod payload_builder; -pub use payload_builder::PayloadBuilderArgs; +pub use payload_builder::{DefaultPayloadBuilderValues, PayloadBuilderArgs}; /// Stage related arguments mod stage; @@ -46,7 +46,7 @@ pub use gas_price_oracle::GasPriceOracleArgs; /// TxPoolArgs for configuring the transaction pool mod txpool; -pub use txpool::TxPoolArgs; +pub use txpool::{DefaultTxPoolValues, TxPoolArgs}; /// DevArgs for configuring the dev testnet mod dev; @@ -66,7 +66,7 @@ pub use benchmark_args::BenchmarkArgs; /// EngineArgs for configuring the engine mod engine; -pub use engine::EngineArgs; +pub use engine::{DefaultEngineValues, EngineArgs}; /// `RessArgs` for configuring ress subprotocol. mod ress_args; @@ -76,4 +76,9 @@ pub use ress_args::RessArgs; mod era; pub use era::{DefaultEraHost, EraArgs, EraSourceArgs}; +/// `StaticFilesArgs` for configuring static files. +mod static_files; +pub use static_files::StaticFilesArgs; + +mod error; pub mod types; diff --git a/crates/node/core/src/args/network.rs b/crates/node/core/src/args/network.rs index 52ff52b1ce..619a79bb81 100644 --- a/crates/node/core/src/args/network.rs +++ b/crates/node/core/src/args/network.rs @@ -1,5 +1,6 @@ //! clap [Args](clap::Args) for network related arguments. +use alloy_eips::BlockNumHash; use alloy_primitives::B256; use std::{ net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6}, @@ -10,16 +11,18 @@ use std::{ use crate::version::version_metadata; use clap::Args; use reth_chainspec::EthChainSpec; +use reth_cli_util::{get_secret_key, load_secret_key::SecretKeyError}; use reth_config::Config; use reth_discv4::{NodeRecord, DEFAULT_DISCOVERY_ADDR, DEFAULT_DISCOVERY_PORT}; use reth_discv5::{ discv5::ListenConfig, DEFAULT_COUNT_BOOTSTRAP_LOOKUPS, DEFAULT_DISCOVERY_V5_PORT, DEFAULT_SECONDS_BOOTSTRAP_LOOKUP_INTERVAL, DEFAULT_SECONDS_LOOKUP_INTERVAL, }; +use reth_net_banlist::IpFilter; use reth_net_nat::{NatResolver, DEFAULT_NET_IF_NAME}; use reth_network::{ transactions::{ - config::TransactionPropagationKind, + config::{TransactionIngressPolicy, TransactionPropagationKind}, constants::{ tx_fetcher::{ DEFAULT_MAX_CAPACITY_CACHE_PENDING_FETCH, DEFAULT_MAX_COUNT_CONCURRENT_REQUESTS, @@ -33,10 +36,11 @@ use reth_network::{ DEFAULT_SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESP_ON_PACK_GET_POOLED_TRANSACTIONS_REQ, SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESPONSE, }, - HelloMessageWithProtocols, NetworkConfigBuilder, NetworkPrimitives, SessionsConfig, + HelloMessageWithProtocols, NetworkConfigBuilder, NetworkPrimitives, }; use reth_network_peers::{mainnet_nodes, TrustedPeer}; use secp256k1::SecretKey; +use std::str::FromStr; use tracing::error; /// Parameters for configuring the network more granularity via CLI @@ -81,9 +85,16 @@ pub struct NetworkArgs { /// /// This will also deterministically set the peer ID. If not specified, it will be set in the /// data dir for the chain being used. - #[arg(long, value_name = "PATH")] + #[arg(long, value_name = "PATH", conflicts_with = "p2p_secret_key_hex")] pub p2p_secret_key: Option, + /// Hex encoded secret key to use for this node. + /// + /// This will also deterministically set the peer ID. Cannot be used together with + /// `--p2p-secret-key`. + #[arg(long, value_name = "HEX", conflicts_with = "p2p_secret_key")] + pub p2p_secret_key_hex: Option, + /// Do not persist peers. #[arg(long, verbatim_doc_comment)] pub no_persist_peers: bool, @@ -100,14 +111,26 @@ pub struct NetworkArgs { #[arg(long = "port", value_name = "PORT", default_value_t = DEFAULT_DISCOVERY_PORT)] pub port: u16, - /// Maximum number of outbound requests. default: 100 + /// Maximum number of outbound peers. default: 100 #[arg(long)] pub max_outbound_peers: Option, - /// Maximum number of inbound requests. default: 30 + /// Maximum number of inbound peers. default: 30 #[arg(long)] pub max_inbound_peers: Option, + /// Maximum number of total peers (inbound + outbound). + /// + /// Splits peers using approximately 2:1 inbound:outbound ratio. Cannot be used together with + /// `--max-outbound-peers` or `--max-inbound-peers`. + #[arg( + long, + value_name = "COUNT", + conflicts_with = "max_outbound_peers", + conflicts_with = "max_inbound_peers" + )] + pub max_peers: Option, + /// Max concurrent `GetPooledTransactions` requests. #[arg(long = "max-tx-reqs", value_name = "COUNT", default_value_t = DEFAULT_MAX_COUNT_CONCURRENT_REQUESTS, verbatim_doc_comment)] pub max_concurrent_tx_requests: u32, @@ -162,6 +185,12 @@ pub struct NetworkArgs { #[arg(long = "tx-propagation-policy", default_value_t = TransactionPropagationKind::All)] pub tx_propagation_policy: TransactionPropagationKind, + /// Transaction ingress policy + /// + /// Determines which peers' transactions are accepted over P2P. + #[arg(long = "tx-ingress-policy", default_value_t = TransactionIngressPolicy::All)] + pub tx_ingress_policy: TransactionIngressPolicy, + /// Disable transaction pool gossip /// /// Disables gossiping of transactions in the mempool to peers. This can be omitted for @@ -180,14 +209,24 @@ pub struct NetworkArgs { )] pub propagation_mode: TransactionPropagationMode, - /// Comma separated list of required block hashes. + /// Comma separated list of required block hashes or block number=hash pairs. /// Peers that don't have these blocks will be filtered out. - #[arg(long = "required-block-hashes", value_delimiter = ',')] - pub required_block_hashes: Vec, + /// Format: hash or `block_number=hash` (e.g., 23115201=0x1234...) + #[arg(long = "required-block-hashes", value_delimiter = ',', value_parser = parse_block_num_hash)] + pub required_block_hashes: Vec, /// Optional network ID to override the chain specification's network ID for P2P connections #[arg(long)] pub network_id: Option, + + /// Restrict network communication to the given IP networks (CIDR masks). + /// + /// Comma separated list of CIDR network specifications. + /// Only peers with IP addresses within these ranges will be allowed to connect. + /// + /// Example: --netrestrict "192.168.0.0/16,10.0.0.0/8" + #[arg(long, value_name = "NETRESTRICT")] + pub netrestrict: Option, } impl NetworkArgs { @@ -218,6 +257,34 @@ impl NetworkArgs { bootnodes.into_iter().filter_map(|node| node.resolve_blocking().ok()).collect() }) } + + /// Returns the max inbound peers (2:1 ratio). + pub fn resolved_max_inbound_peers(&self) -> Option { + if let Some(max_peers) = self.max_peers { + if max_peers == 0 { + Some(0) + } else { + let outbound = (max_peers / 3).max(1); + Some(max_peers.saturating_sub(outbound)) + } + } else { + self.max_inbound_peers + } + } + + /// Returns the max outbound peers (1:2 ratio). + pub fn resolved_max_outbound_peers(&self) -> Option { + if let Some(max_peers) = self.max_peers { + if max_peers == 0 { + Some(0) + } else { + Some((max_peers / 3).max(1)) + } + } else { + self.max_outbound_peers + } + } + /// Configures and returns a `TransactionsManagerConfig` based on the current settings. pub const fn transactions_manager_config(&self) -> TransactionsManagerConfig { TransactionsManagerConfig { @@ -230,6 +297,7 @@ impl NetworkArgs { ), max_transactions_seen_by_peer_history: self.max_seen_tx_history, propagation_mode: self.propagation_mode, + ingress_policy: self.tx_ingress_policy, } } @@ -258,20 +326,20 @@ impl NetworkArgs { let peers_file = self.peers_file.clone().unwrap_or(default_peers_file); // Configure peer connections + let ip_filter = self.ip_filter().unwrap_or_default(); let peers_config = config - .peers - .clone() - .with_max_inbound_opt(self.max_inbound_peers) - .with_max_outbound_opt(self.max_outbound_peers); + .peers_config_with_basic_nodes_from_file( + self.persistent_peers_file(peers_file).as_deref(), + ) + .with_max_inbound_opt(self.resolved_max_inbound_peers()) + .with_max_outbound_opt(self.resolved_max_outbound_peers()) + .with_ip_filter(ip_filter); // Configure basic network stack NetworkConfigBuilder::::new(secret_key) - .peer_config(config.peers_config_with_basic_nodes_from_file( - self.persistent_peers_file(peers_file).as_deref(), - )) - .external_ip_resolver(self.nat) + .external_ip_resolver(self.nat.clone()) .sessions_config( - SessionsConfig::default().with_upscaled_event_buffer(peers_config.max_peers()), + config.sessions.clone().with_upscaled_event_buffer(peers_config.max_peers()), ) .peer_config(peers_config) .boot_nodes(chain_bootnodes.clone()) @@ -309,6 +377,12 @@ impl NetworkArgs { self.no_persist_peers.not().then_some(peers_file) } + /// Configures the [`DiscoveryArgs`]. + pub const fn with_discovery(mut self, discovery: DiscoveryArgs) -> Self { + self.discovery = discovery; + self + } + /// Sets the p2p port to zero, to allow the OS to assign a random unused port when /// the network components bind to a socket. pub const fn with_unused_p2p_port(mut self) -> Self { @@ -324,6 +398,12 @@ impl NetworkArgs { self } + /// Configures the [`NatResolver`] + pub fn with_nat_resolver(mut self, nat: NatResolver) -> Self { + self.nat = nat; + self + } + /// Change networking port numbers based on the instance number, if provided. /// Ports are updated to `previous_value + instance - 1` /// @@ -344,6 +424,36 @@ impl NetworkArgs { ) .await } + + /// Load the p2p secret key from the provided options. + /// + /// If `p2p_secret_key_hex` is provided, it will be used directly. + /// If `p2p_secret_key` is provided, it will be loaded from the file. + /// If neither is provided, the `default_secret_key_path` will be used. + pub fn secret_key( + &self, + default_secret_key_path: PathBuf, + ) -> Result { + if let Some(b256) = &self.p2p_secret_key_hex { + // Use the B256 value directly (already validated as 32 bytes) + SecretKey::from_slice(b256.as_slice()).map_err(SecretKeyError::SecretKeyDecodeError) + } else { + // Load from file (either provided path or default) + let secret_key_path = self.p2p_secret_key.clone().unwrap_or(default_secret_key_path); + get_secret_key(&secret_key_path) + } + } + + /// Creates an IP filter from the netrestrict argument. + /// + /// Returns an error if the CIDR format is invalid. + pub fn ip_filter(&self) -> Result { + if let Some(netrestrict) = &self.netrestrict { + IpFilter::from_cidr_string(netrestrict) + } else { + Ok(IpFilter::allow_all()) + } + } } impl Default for NetworkArgs { @@ -357,12 +467,14 @@ impl Default for NetworkArgs { peers_file: None, identity: version_metadata().p2p_client_version.to_string(), p2p_secret_key: None, + p2p_secret_key_hex: None, no_persist_peers: false, nat: NatResolver::Any, addr: DEFAULT_DISCOVERY_ADDR, port: DEFAULT_DISCOVERY_PORT, max_outbound_peers: None, max_inbound_peers: None, + max_peers: None, max_concurrent_tx_requests: DEFAULT_MAX_COUNT_CONCURRENT_REQUESTS, max_concurrent_tx_requests_per_peer: DEFAULT_MAX_COUNT_CONCURRENT_REQUESTS_PER_PEER, soft_limit_byte_size_pooled_transactions_response: @@ -373,10 +485,12 @@ impl Default for NetworkArgs { max_capacity_cache_txns_pending_fetch: DEFAULT_MAX_CAPACITY_CACHE_PENDING_FETCH, net_if: None, tx_propagation_policy: TransactionPropagationKind::default(), + tx_ingress_policy: TransactionIngressPolicy::default(), disable_tx_gossip: false, propagation_mode: TransactionPropagationMode::Sqrt, required_block_hashes: vec![], network_id: None, + netrestrict: None, } } } @@ -542,6 +656,12 @@ impl DiscoveryArgs { self } + /// Set the discovery V5 port + pub const fn with_discv5_port(mut self, port: u16) -> Self { + self.discv5_port = port; + self + } + /// Change networking port numbers based on the instance number. /// Ports are updated to `previous_value + instance - 1` /// @@ -576,10 +696,32 @@ impl Default for DiscoveryArgs { } } +/// Parse a block number=hash pair or just a hash into `BlockNumHash` +fn parse_block_num_hash(s: &str) -> Result { + if let Some((num_str, hash_str)) = s.split_once('=') { + let number = num_str.parse().map_err(|_| format!("Invalid block number: {}", num_str))?; + let hash = B256::from_str(hash_str).map_err(|_| format!("Invalid hash: {}", hash_str))?; + Ok(BlockNumHash::new(number, hash)) + } else { + // For backward compatibility, treat as hash-only with number 0 + let hash = B256::from_str(s).map_err(|_| format!("Invalid hash: {}", s))?; + Ok(BlockNumHash::new(0, hash)) + } +} + #[cfg(test)] mod tests { use super::*; use clap::Parser; + use reth_chainspec::MAINNET; + use reth_config::Config; + use reth_network_peers::NodeRecord; + use secp256k1::SecretKey; + use std::{ + fs, + time::{SystemTime, UNIX_EPOCH}, + }; + /// A helper type to parse Args more easily #[derive(Parser)] struct CommandParser { @@ -640,10 +782,11 @@ mod tests { let tests = vec![0, 10]; for retries in tests { + let retries_str = retries.to_string(); let args = CommandParser::::parse_from([ "reth", "--dns-retries", - retries.to_string().as_str(), + retries_str.as_str(), ]) .args; @@ -657,6 +800,96 @@ mod tests { assert!(args.disable_tx_gossip); } + #[test] + fn parse_max_peers_flag() { + let args = CommandParser::::parse_from(["reth", "--max-peers", "90"]).args; + + assert_eq!(args.max_peers, Some(90)); + assert_eq!(args.max_outbound_peers, None); + assert_eq!(args.max_inbound_peers, None); + assert_eq!(args.resolved_max_outbound_peers(), Some(30)); + assert_eq!(args.resolved_max_inbound_peers(), Some(60)); + } + + #[test] + fn max_peers_conflicts_with_outbound() { + let result = CommandParser::::try_parse_from([ + "reth", + "--max-peers", + "90", + "--max-outbound-peers", + "50", + ]); + assert!( + result.is_err(), + "Should fail when both --max-peers and --max-outbound-peers are used" + ); + } + + #[test] + fn max_peers_conflicts_with_inbound() { + let result = CommandParser::::try_parse_from([ + "reth", + "--max-peers", + "90", + "--max-inbound-peers", + "30", + ]); + assert!( + result.is_err(), + "Should fail when both --max-peers and --max-inbound-peers are used" + ); + } + + #[test] + fn max_peers_split_calculation() { + let args = CommandParser::::parse_from(["reth", "--max-peers", "90"]).args; + + assert_eq!(args.max_peers, Some(90)); + assert_eq!(args.resolved_max_outbound_peers(), Some(30)); + assert_eq!(args.resolved_max_inbound_peers(), Some(60)); + } + + #[test] + fn max_peers_small_values() { + let args1 = CommandParser::::parse_from(["reth", "--max-peers", "1"]).args; + assert_eq!(args1.resolved_max_outbound_peers(), Some(1)); + assert_eq!(args1.resolved_max_inbound_peers(), Some(0)); + + let args2 = CommandParser::::parse_from(["reth", "--max-peers", "2"]).args; + assert_eq!(args2.resolved_max_outbound_peers(), Some(1)); + assert_eq!(args2.resolved_max_inbound_peers(), Some(1)); + + let args3 = CommandParser::::parse_from(["reth", "--max-peers", "3"]).args; + assert_eq!(args3.resolved_max_outbound_peers(), Some(1)); + assert_eq!(args3.resolved_max_inbound_peers(), Some(2)); + } + + #[test] + fn resolved_peers_without_max_peers() { + let args = CommandParser::::parse_from([ + "reth", + "--max-outbound-peers", + "75", + "--max-inbound-peers", + "15", + ]) + .args; + + assert_eq!(args.max_peers, None); + assert_eq!(args.resolved_max_outbound_peers(), Some(75)); + assert_eq!(args.resolved_max_inbound_peers(), Some(15)); + } + + #[test] + fn resolved_peers_with_defaults() { + let args = CommandParser::::parse_from(["reth"]).args; + + assert_eq!(args.max_peers, None); + assert_eq!(args.resolved_max_outbound_peers(), None); + assert_eq!(args.resolved_max_inbound_peers(), None); + } + #[test] fn network_args_default_sanity_test() { let default_args = NetworkArgs::default(); @@ -670,17 +903,21 @@ mod tests { let args = CommandParser::::parse_from([ "reth", "--required-block-hashes", - "0x1111111111111111111111111111111111111111111111111111111111111111,0x2222222222222222222222222222222222222222222222222222222222222222", + "0x1111111111111111111111111111111111111111111111111111111111111111,23115201=0x2222222222222222222222222222222222222222222222222222222222222222", ]) .args; assert_eq!(args.required_block_hashes.len(), 2); + // First hash without block number (should default to 0) + assert_eq!(args.required_block_hashes[0].number, 0); assert_eq!( - args.required_block_hashes[0].to_string(), + args.required_block_hashes[0].hash.to_string(), "0x1111111111111111111111111111111111111111111111111111111111111111" ); + // Second with block number=hash format + assert_eq!(args.required_block_hashes[1].number, 23115201); assert_eq!( - args.required_block_hashes[1].to_string(), + args.required_block_hashes[1].hash.to_string(), "0x2222222222222222222222222222222222222222222222222222222222222222" ); } @@ -690,4 +927,175 @@ mod tests { let args = CommandParser::::parse_from(["reth"]).args; assert!(args.required_block_hashes.is_empty()); } + + #[test] + fn test_parse_block_num_hash() { + // Test hash only format + let result = parse_block_num_hash( + "0x1111111111111111111111111111111111111111111111111111111111111111", + ); + assert!(result.is_ok()); + assert_eq!(result.unwrap().number, 0); + + // Test block_number=hash format + let result = parse_block_num_hash( + "23115201=0x2222222222222222222222222222222222222222222222222222222222222222", + ); + assert!(result.is_ok()); + assert_eq!(result.unwrap().number, 23115201); + + // Test invalid formats + assert!(parse_block_num_hash("invalid").is_err()); + assert!(parse_block_num_hash( + "abc=0x1111111111111111111111111111111111111111111111111111111111111111" + ) + .is_err()); + } + + #[test] + fn parse_p2p_secret_key_hex() { + let hex = "4c0883a69102937d6231471b5dbb6204fe512961708279f8c5c58b3b9c4e8b8f"; + let args = + CommandParser::::parse_from(["reth", "--p2p-secret-key-hex", hex]).args; + + let expected: B256 = hex.parse().unwrap(); + assert_eq!(args.p2p_secret_key_hex, Some(expected)); + assert_eq!(args.p2p_secret_key, None); + } + + #[test] + fn parse_p2p_secret_key_hex_with_0x_prefix() { + let hex = "0x4c0883a69102937d6231471b5dbb6204fe512961708279f8c5c58b3b9c4e8b8f"; + let args = + CommandParser::::parse_from(["reth", "--p2p-secret-key-hex", hex]).args; + + let expected: B256 = hex.parse().unwrap(); + assert_eq!(args.p2p_secret_key_hex, Some(expected)); + assert_eq!(args.p2p_secret_key, None); + } + + #[test] + fn test_p2p_secret_key_and_hex_are_mutually_exclusive() { + let result = CommandParser::::try_parse_from([ + "reth", + "--p2p-secret-key", + "/path/to/key", + "--p2p-secret-key-hex", + "4c0883a69102937d6231471b5dbb6204fe512961708279f8c5c58b3b9c4e8b8f", + ]); + + assert!(result.is_err()); + } + + #[test] + fn test_secret_key_method_with_hex() { + let hex = "4c0883a69102937d6231471b5dbb6204fe512961708279f8c5c58b3b9c4e8b8f"; + let args = + CommandParser::::parse_from(["reth", "--p2p-secret-key-hex", hex]).args; + + let temp_dir = std::env::temp_dir(); + let default_path = temp_dir.join("default_key"); + let secret_key = args.secret_key(default_path).unwrap(); + + // Verify the secret key matches the hex input + assert_eq!(alloy_primitives::hex::encode(secret_key.secret_bytes()), hex); + } + + #[test] + fn parse_netrestrict_single_network() { + let args = + CommandParser::::parse_from(["reth", "--netrestrict", "192.168.0.0/16"]) + .args; + + assert_eq!(args.netrestrict, Some("192.168.0.0/16".to_string())); + + let ip_filter = args.ip_filter().unwrap(); + assert!(ip_filter.has_restrictions()); + assert!(ip_filter.is_allowed(&"192.168.1.1".parse().unwrap())); + assert!(!ip_filter.is_allowed(&"10.0.0.1".parse().unwrap())); + } + + #[test] + fn parse_netrestrict_multiple_networks() { + let args = CommandParser::::parse_from([ + "reth", + "--netrestrict", + "192.168.0.0/16,10.0.0.0/8", + ]) + .args; + + assert_eq!(args.netrestrict, Some("192.168.0.0/16,10.0.0.0/8".to_string())); + + let ip_filter = args.ip_filter().unwrap(); + assert!(ip_filter.has_restrictions()); + assert!(ip_filter.is_allowed(&"192.168.1.1".parse().unwrap())); + assert!(ip_filter.is_allowed(&"10.5.10.20".parse().unwrap())); + assert!(!ip_filter.is_allowed(&"172.16.0.1".parse().unwrap())); + } + + #[test] + fn parse_netrestrict_ipv6() { + let args = + CommandParser::::parse_from(["reth", "--netrestrict", "2001:db8::/32"]) + .args; + + let ip_filter = args.ip_filter().unwrap(); + assert!(ip_filter.has_restrictions()); + assert!(ip_filter.is_allowed(&"2001:db8::1".parse().unwrap())); + assert!(!ip_filter.is_allowed(&"2001:db9::1".parse().unwrap())); + } + + #[test] + fn netrestrict_not_set() { + let args = CommandParser::::parse_from(["reth"]).args; + assert_eq!(args.netrestrict, None); + + let ip_filter = args.ip_filter().unwrap(); + assert!(!ip_filter.has_restrictions()); + assert!(ip_filter.is_allowed(&"192.168.1.1".parse().unwrap())); + assert!(ip_filter.is_allowed(&"10.0.0.1".parse().unwrap())); + } + + #[test] + fn netrestrict_invalid_cidr() { + let args = + CommandParser::::parse_from(["reth", "--netrestrict", "invalid-cidr"]) + .args; + + assert!(args.ip_filter().is_err()); + } + + #[test] + fn network_config_preserves_basic_nodes_from_peers_file() { + let enode = "enode://6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0@10.3.58.6:30303?discport=30301"; + let unique = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_nanos(); + + let peers_file = std::env::temp_dir().join(format!("reth_peers_test_{}.json", unique)); + fs::write(&peers_file, format!("[\"{}\"]", enode)).expect("write peers file"); + + // Build NetworkArgs with peers_file set and no_persist_peers=false + let args = NetworkArgs { + peers_file: Some(peers_file.clone()), + no_persist_peers: false, + ..Default::default() + }; + + // Build the network config using a deterministic secret key + let secret_key = SecretKey::from_byte_array(&[1u8; 32]).unwrap(); + let builder = args.network_config::( + &Config::default(), + MAINNET.clone(), + secret_key, + peers_file.clone(), + ); + + let net_cfg = builder.build_with_noop_provider(MAINNET.clone()); + + // Assert basic_nodes contains our node + let node: NodeRecord = enode.parse().unwrap(); + assert!(net_cfg.peers_config.basic_nodes.contains(&node)); + + // Cleanup + let _ = fs::remove_file(&peers_file); + } } diff --git a/crates/node/core/src/args/payload_builder.rs b/crates/node/core/src/args/payload_builder.rs index f751bcc070..d9ce66499f 100644 --- a/crates/node/core/src/args/payload_builder.rs +++ b/crates/node/core/src/args/payload_builder.rs @@ -1,23 +1,94 @@ use crate::{cli::config::PayloadBuilderConfig, version::default_extra_data}; use alloy_consensus::constants::MAXIMUM_EXTRA_DATA_SIZE; -use alloy_eips::merge::SLOT_DURATION; use clap::{ builder::{RangedU64ValueParser, TypedValueParser}, Arg, Args, Command, }; -use reth_cli_util::{parse_duration_from_secs, parse_duration_from_secs_or_ms}; -use std::{borrow::Cow, ffi::OsStr, time::Duration}; +use reth_cli_util::{ + parse_duration_from_secs, parse_duration_from_secs_or_ms, + parsers::format_duration_as_secs_or_ms, +}; +use std::{borrow::Cow, ffi::OsStr, sync::OnceLock, time::Duration}; + +/// Global static payload builder defaults +static PAYLOAD_BUILDER_DEFAULTS: OnceLock = OnceLock::new(); + +/// Default values for payload builder that can be customized +/// +/// Global defaults can be set via [`DefaultPayloadBuilderValues::try_init`]. +#[derive(Debug, Clone)] +pub struct DefaultPayloadBuilderValues { + /// Default extra data for blocks + extra_data: String, + /// Default interval between payload builds in seconds + interval: String, + /// Default deadline for payload builds in seconds + deadline: String, + /// Default maximum number of concurrent payload building tasks + max_payload_tasks: usize, +} + +impl DefaultPayloadBuilderValues { + /// Initialize the global payload builder defaults with this configuration + pub fn try_init(self) -> Result<(), Self> { + PAYLOAD_BUILDER_DEFAULTS.set(self) + } + + /// Get a reference to the global payload builder defaults + pub fn get_global() -> &'static Self { + PAYLOAD_BUILDER_DEFAULTS.get_or_init(Self::default) + } + + /// Set the default extra data + pub fn with_extra_data(mut self, v: impl Into) -> Self { + self.extra_data = v.into(); + self + } + + /// Set the default interval in seconds + pub fn with_interval(mut self, v: Duration) -> Self { + self.interval = format_duration_as_secs_or_ms(v); + self + } + + /// Set the default deadline in seconds + pub fn with_deadline(mut self, v: u64) -> Self { + self.deadline = format!("{}", v); + self + } + + /// Set the default maximum payload tasks + pub const fn with_max_payload_tasks(mut self, v: usize) -> Self { + self.max_payload_tasks = v; + self + } +} + +impl Default for DefaultPayloadBuilderValues { + fn default() -> Self { + Self { + extra_data: default_extra_data(), + interval: "1".to_string(), + deadline: "12".to_string(), + max_payload_tasks: 3, + } + } +} /// Parameters for configuring the Payload Builder #[derive(Debug, Clone, Args, PartialEq, Eq)] #[command(next_help_heading = "Builder")] pub struct PayloadBuilderArgs { /// Block extra data set by the payload builder. - #[arg(long = "builder.extradata", value_parser = ExtraDataValueParser::default(), default_value_t = default_extra_data())] + #[arg( + long = "builder.extradata", + value_parser = ExtraDataValueParser::default(), + default_value_t = DefaultPayloadBuilderValues::get_global().extra_data.clone() + )] pub extra_data: String, /// Target gas limit for built blocks. - #[arg(long = "builder.gaslimit", value_name = "GAS_LIMIT")] + #[arg(long = "builder.gaslimit", alias = "miner.gaslimit", value_name = "GAS_LIMIT")] pub gas_limit: Option, /// The interval at which the job should build a new payload after the last. @@ -25,26 +96,46 @@ pub struct PayloadBuilderArgs { /// Interval is specified in seconds or in milliseconds if the value ends with `ms`: /// * `50ms` -> 50 milliseconds /// * `1` -> 1 second - #[arg(long = "builder.interval", value_parser = parse_duration_from_secs_or_ms, default_value = "1", value_name = "DURATION")] + #[arg( + long = "builder.interval", + value_parser = parse_duration_from_secs_or_ms, + default_value = DefaultPayloadBuilderValues::get_global().interval.as_str(), + value_name = "DURATION" + )] pub interval: Duration, /// The deadline for when the payload builder job should resolve. - #[arg(long = "builder.deadline", value_parser = parse_duration_from_secs, default_value = "12", value_name = "SECONDS")] + #[arg( + long = "builder.deadline", + value_parser = parse_duration_from_secs, + default_value = DefaultPayloadBuilderValues::get_global().deadline.as_str(), + value_name = "SECONDS" + )] pub deadline: Duration, /// Maximum number of tasks to spawn for building a payload. - #[arg(long = "builder.max-tasks", default_value = "3", value_parser = RangedU64ValueParser::::new().range(1..))] + #[arg( + long = "builder.max-tasks", + value_parser = RangedU64ValueParser::::new().range(1..), + default_value_t = DefaultPayloadBuilderValues::get_global().max_payload_tasks + )] pub max_payload_tasks: usize, + + /// Maximum number of blobs to include per block. + #[arg(long = "builder.max-blobs", value_name = "COUNT")] + pub max_blobs_per_block: Option, } impl Default for PayloadBuilderArgs { fn default() -> Self { + let defaults = DefaultPayloadBuilderValues::get_global(); Self { - extra_data: default_extra_data(), - interval: Duration::from_secs(1), + extra_data: defaults.extra_data.clone(), + interval: parse_duration_from_secs_or_ms(defaults.interval.as_str()).unwrap(), gas_limit: None, - deadline: SLOT_DURATION, - max_payload_tasks: 3, + deadline: Duration::from_secs(defaults.deadline.parse().unwrap()), + max_payload_tasks: defaults.max_payload_tasks, + max_blobs_per_block: None, } } } @@ -69,6 +160,10 @@ impl PayloadBuilderConfig for PayloadBuilderArgs { fn max_payload_tasks(&self) -> usize { self.max_payload_tasks } + + fn max_blobs_per_block(&self) -> Option { + self.max_blobs_per_block + } } #[derive(Clone, Debug, Default)] diff --git a/crates/node/core/src/args/pruning.rs b/crates/node/core/src/args/pruning.rs index 2ff67446bb..ed79ee529c 100644 --- a/crates/node/core/src/args/pruning.rs +++ b/crates/node/core/src/args/pruning.rs @@ -1,13 +1,15 @@ //! Pruning and full node arguments -use std::ops::Not; - -use crate::primitives::EthereumHardfork; -use alloy_primitives::BlockNumber; +use crate::{args::error::ReceiptsLogError, primitives::EthereumHardfork}; +use alloy_primitives::{Address, BlockNumber}; use clap::{builder::RangedU64ValueParser, Args}; use reth_chainspec::EthereumHardforks; use reth_config::config::PruneConfig; -use reth_prune_types::{PruneMode, PruneModes, MINIMUM_PRUNING_DISTANCE}; +use reth_prune_types::{ + PruneMode, PruneModes, ReceiptsLogPruneConfig, MERKLE_CHANGESETS_RETENTION_BLOCKS, + MINIMUM_PRUNING_DISTANCE, +}; +use std::{collections::BTreeMap, ops::Not}; /// Parameters for pruning and full node #[derive(Debug, Clone, Args, PartialEq, Eq, Default)] @@ -60,15 +62,12 @@ pub struct PruningArgs { /// Prune receipts before the specified block number. The specified block number is not pruned. #[arg(long = "prune.receipts.before", value_name = "BLOCK_NUMBER", conflicts_with_all = &["receipts_full", "receipts_pre_merge", "receipts_distance"])] pub receipts_before: Option, - /// Receipts Log Filter - #[arg( - long = "prune.receipts-log-filter", - alias = "prune.receiptslogfilter", - value_name = "FILTER_CONFIG", - hide = true - )] - #[deprecated] - pub receipts_log_filter: Option, + // Receipts Log Filter + /// Configure receipts log filter. Format: + /// <`address`>:<`prune_mode`>... where <`prune_mode`> can be 'full', 'distance:<`blocks`>', or + /// 'before:<`block_number`>' + #[arg(long = "prune.receiptslogfilter", value_name = "FILTER_CONFIG", conflicts_with_all = &["receipts_full", "receipts_pre_merge", "receipts_distance", "receipts_before"], value_parser = parse_receipts_log_filter)] + pub receipts_log_filter: Option, // Account History /// Prunes all account history. @@ -131,11 +130,12 @@ impl PruningArgs { receipts: Some(PruneMode::Distance(MINIMUM_PRUNING_DISTANCE)), account_history: Some(PruneMode::Distance(MINIMUM_PRUNING_DISTANCE)), storage_history: Some(PruneMode::Distance(MINIMUM_PRUNING_DISTANCE)), - // TODO: set default to pre-merge block if available - bodies_history: None, - merkle_changesets: PruneMode::Distance(MINIMUM_PRUNING_DISTANCE), - #[expect(deprecated)] - receipts_log_filter: (), + bodies_history: chain_spec + .ethereum_fork_activation(EthereumHardfork::Paris) + .block_number() + .map(PruneMode::Before), + merkle_changesets: PruneMode::Distance(MERKLE_CHANGESETS_RETENTION_BLOCKS), + receipts_log_filter: Default::default(), }, } } @@ -162,14 +162,13 @@ impl PruningArgs { if let Some(mode) = self.storage_history_prune_mode() { config.segments.storage_history = Some(mode); } - - // Log warning if receipts_log_filter is set (deprecated feature) - #[expect(deprecated)] - if self.receipts_log_filter.is_some() { - tracing::warn!( - target: "reth::cli", - "The --prune.receiptslogfilter flag is deprecated and has no effect. It will be removed in a future release." - ); + if let Some(receipt_logs) = + self.receipts_log_filter.as_ref().filter(|c| !c.is_empty()).cloned() + { + config.segments.receipts_log_filter = receipt_logs; + // need to remove the receipts segment filter entirely because that takes precedence + // over the logs filter + config.segments.receipts.take(); } config.is_default().not().then_some(config) @@ -257,3 +256,141 @@ impl PruningArgs { } } } + +/// Parses `,` separated pruning info into [`ReceiptsLogPruneConfig`]. +pub(crate) fn parse_receipts_log_filter( + value: &str, +) -> Result { + let mut config = BTreeMap::new(); + // Split out each of the filters. + let filters = value.split(','); + for filter in filters { + let parts: Vec<&str> = filter.split(':').collect(); + if parts.len() < 2 { + return Err(ReceiptsLogError::InvalidFilterFormat(filter.to_string())); + } + // Parse the address + let address = parts[0] + .parse::
() + .map_err(|_| ReceiptsLogError::InvalidAddress(parts[0].to_string()))?; + + // Parse the prune mode + let prune_mode = match parts[1] { + "full" => PruneMode::Full, + s if s.starts_with("distance") => { + if parts.len() < 3 { + return Err(ReceiptsLogError::InvalidFilterFormat(filter.to_string())); + } + let distance = + parts[2].parse::().map_err(ReceiptsLogError::InvalidDistance)?; + PruneMode::Distance(distance) + } + s if s.starts_with("before") => { + if parts.len() < 3 { + return Err(ReceiptsLogError::InvalidFilterFormat(filter.to_string())); + } + let block_number = + parts[2].parse::().map_err(ReceiptsLogError::InvalidBlockNumber)?; + PruneMode::Before(block_number) + } + _ => return Err(ReceiptsLogError::InvalidPruneMode(parts[1].to_string())), + }; + config.insert(address, prune_mode); + } + Ok(ReceiptsLogPruneConfig(config)) +} + +#[cfg(test)] +mod tests { + use super::*; + use alloy_primitives::address; + use clap::Parser; + + /// A helper type to parse Args more easily + #[derive(Parser)] + struct CommandParser { + #[command(flatten)] + args: T, + } + + #[test] + fn pruning_args_sanity_check() { + let args = CommandParser::::parse_from([ + "reth", + "--prune.receiptslogfilter", + "0x0000000000000000000000000000000000000003:before:5000000", + ]) + .args; + let mut config = ReceiptsLogPruneConfig::default(); + config.0.insert( + address!("0x0000000000000000000000000000000000000003"), + PruneMode::Before(5000000), + ); + assert_eq!(args.receipts_log_filter, Some(config)); + } + + #[test] + fn parse_receiptslogfilter() { + let default_args = PruningArgs::default(); + let args = CommandParser::::parse_from(["reth"]).args; + assert_eq!(args, default_args); + } + + #[test] + fn test_parse_receipts_log_filter() { + let filter1 = "0x0000000000000000000000000000000000000001:full"; + let filter2 = "0x0000000000000000000000000000000000000002:distance:1000"; + let filter3 = "0x0000000000000000000000000000000000000003:before:5000000"; + let filters = [filter1, filter2, filter3].join(","); + + // Args can be parsed. + let result = parse_receipts_log_filter(&filters); + assert!(result.is_ok()); + let config = result.unwrap(); + assert_eq!(config.0.len(), 3); + + // Check that the args were parsed correctly. + let addr1: Address = "0x0000000000000000000000000000000000000001".parse().unwrap(); + let addr2: Address = "0x0000000000000000000000000000000000000002".parse().unwrap(); + let addr3: Address = "0x0000000000000000000000000000000000000003".parse().unwrap(); + + assert_eq!(config.0.get(&addr1), Some(&PruneMode::Full)); + assert_eq!(config.0.get(&addr2), Some(&PruneMode::Distance(1000))); + assert_eq!(config.0.get(&addr3), Some(&PruneMode::Before(5000000))); + } + + #[test] + fn test_parse_receipts_log_filter_invalid_filter_format() { + let result = parse_receipts_log_filter("invalid_format"); + assert!(matches!(result, Err(ReceiptsLogError::InvalidFilterFormat(_)))); + } + + #[test] + fn test_parse_receipts_log_filter_invalid_address() { + let result = parse_receipts_log_filter("invalid_address:full"); + assert!(matches!(result, Err(ReceiptsLogError::InvalidAddress(_)))); + } + + #[test] + fn test_parse_receipts_log_filter_invalid_prune_mode() { + let result = + parse_receipts_log_filter("0x0000000000000000000000000000000000000000:invalid_mode"); + assert!(matches!(result, Err(ReceiptsLogError::InvalidPruneMode(_)))); + } + + #[test] + fn test_parse_receipts_log_filter_invalid_distance() { + let result = parse_receipts_log_filter( + "0x0000000000000000000000000000000000000000:distance:invalid_distance", + ); + assert!(matches!(result, Err(ReceiptsLogError::InvalidDistance(_)))); + } + + #[test] + fn test_parse_receipts_log_filter_invalid_block_number() { + let result = parse_receipts_log_filter( + "0x0000000000000000000000000000000000000000:before:invalid_block", + ); + assert!(matches!(result, Err(ReceiptsLogError::InvalidBlockNumber(_)))); + } +} diff --git a/crates/node/core/src/args/rpc_server.rs b/crates/node/core/src/args/rpc_server.rs index 58a1c388e4..4d73abf7ad 100644 --- a/crates/node/core/src/args/rpc_server.rs +++ b/crates/node/core/src/args/rpc_server.rs @@ -7,7 +7,7 @@ use crate::args::{ use alloy_primitives::Address; use alloy_rpc_types_engine::JwtSecret; use clap::{ - builder::{PossibleValue, RangedU64ValueParser, TypedValueParser}, + builder::{PossibleValue, RangedU64ValueParser, Resettable, TypedValueParser}, Arg, Args, Command, }; use rand::Rng; @@ -19,12 +19,16 @@ use std::{ ffi::OsStr, net::{IpAddr, Ipv4Addr}, path::PathBuf, + sync::OnceLock, time::Duration, }; use url::Url; use super::types::MaxOr; +/// Global static RPC server defaults +static RPC_SERVER_DEFAULTS: OnceLock = OnceLock::new(); + /// Default max number of subscriptions per connection. pub(crate) const RPC_DEFAULT_MAX_SUBS_PER_CONN: u32 = 1024; @@ -37,76 +41,442 @@ pub(crate) const RPC_DEFAULT_MAX_REQUEST_SIZE_MB: u32 = 15; pub(crate) const RPC_DEFAULT_MAX_RESPONSE_SIZE_MB: u32 = 160; /// Default number of incoming connections. +/// +/// This restricts how many active connections (http, ws) the server accepts. +/// Once exceeded, the server can reject new connections. pub(crate) const RPC_DEFAULT_MAX_CONNECTIONS: u32 = 500; +/// Default values for RPC server that can be customized +/// +/// Global defaults can be set via [`DefaultRpcServerArgs::try_init`]. +#[derive(Debug, Clone)] +pub struct DefaultRpcServerArgs { + http: bool, + http_addr: IpAddr, + http_port: u16, + http_disable_compression: bool, + http_api: Option, + http_corsdomain: Option, + ws: bool, + ws_addr: IpAddr, + ws_port: u16, + ws_allowed_origins: Option, + ws_api: Option, + ipcdisable: bool, + ipcpath: String, + ipc_socket_permissions: Option, + auth_addr: IpAddr, + auth_port: u16, + auth_jwtsecret: Option, + auth_ipc: bool, + auth_ipc_path: String, + disable_auth_server: bool, + rpc_jwtsecret: Option, + rpc_max_request_size: MaxU32, + rpc_max_response_size: MaxU32, + rpc_max_subscriptions_per_connection: MaxU32, + rpc_max_connections: MaxU32, + rpc_max_tracing_requests: usize, + rpc_max_blocking_io_requests: usize, + rpc_max_trace_filter_blocks: u64, + rpc_max_blocks_per_filter: ZeroAsNoneU64, + rpc_max_logs_per_response: ZeroAsNoneU64, + rpc_gas_cap: u64, + rpc_evm_memory_limit: u64, + rpc_tx_fee_cap: u128, + rpc_max_simulate_blocks: u64, + rpc_eth_proof_window: u64, + rpc_proof_permits: usize, + rpc_pending_block: PendingBlockKind, + rpc_forwarder: Option, + builder_disallow: Option>, + rpc_state_cache: RpcStateCacheArgs, + gas_price_oracle: GasPriceOracleArgs, + rpc_send_raw_transaction_sync_timeout: Duration, +} + +impl DefaultRpcServerArgs { + /// Initialize the global RPC server defaults with this configuration + pub fn try_init(self) -> Result<(), Self> { + RPC_SERVER_DEFAULTS.set(self) + } + + /// Get a reference to the global RPC server defaults + pub fn get_global() -> &'static Self { + RPC_SERVER_DEFAULTS.get_or_init(Self::default) + } + + /// Set the default HTTP enabled state + pub const fn with_http(mut self, v: bool) -> Self { + self.http = v; + self + } + + /// Set the default HTTP address + pub const fn with_http_addr(mut self, v: IpAddr) -> Self { + self.http_addr = v; + self + } + + /// Set the default HTTP port + pub const fn with_http_port(mut self, v: u16) -> Self { + self.http_port = v; + self + } + + /// Set whether to disable HTTP compression by default + pub const fn with_http_disable_compression(mut self, v: bool) -> Self { + self.http_disable_compression = v; + self + } + + /// Set the default HTTP API modules + pub fn with_http_api(mut self, v: Option) -> Self { + self.http_api = v; + self + } + + /// Set the default HTTP CORS domain + pub fn with_http_corsdomain(mut self, v: Option) -> Self { + self.http_corsdomain = v; + self + } + + /// Set the default WS enabled state + pub const fn with_ws(mut self, v: bool) -> Self { + self.ws = v; + self + } + + /// Set the default WS address + pub const fn with_ws_addr(mut self, v: IpAddr) -> Self { + self.ws_addr = v; + self + } + + /// Set the default WS port + pub const fn with_ws_port(mut self, v: u16) -> Self { + self.ws_port = v; + self + } + + /// Set the default WS allowed origins + pub fn with_ws_allowed_origins(mut self, v: Option) -> Self { + self.ws_allowed_origins = v; + self + } + + /// Set the default WS API modules + pub fn with_ws_api(mut self, v: Option) -> Self { + self.ws_api = v; + self + } + + /// Set whether to disable IPC by default + pub const fn with_ipcdisable(mut self, v: bool) -> Self { + self.ipcdisable = v; + self + } + + /// Set the default IPC path + pub fn with_ipcpath(mut self, v: String) -> Self { + self.ipcpath = v; + self + } + + /// Set the default IPC socket permissions + pub fn with_ipc_socket_permissions(mut self, v: Option) -> Self { + self.ipc_socket_permissions = v; + self + } + + /// Set the default auth server address + pub const fn with_auth_addr(mut self, v: IpAddr) -> Self { + self.auth_addr = v; + self + } + + /// Set the default auth server port + pub const fn with_auth_port(mut self, v: u16) -> Self { + self.auth_port = v; + self + } + + /// Set the default auth JWT secret path + pub fn with_auth_jwtsecret(mut self, v: Option) -> Self { + self.auth_jwtsecret = v; + self + } + + /// Set the default auth IPC enabled state + pub const fn with_auth_ipc(mut self, v: bool) -> Self { + self.auth_ipc = v; + self + } + + /// Set the default auth IPC path + pub fn with_auth_ipc_path(mut self, v: String) -> Self { + self.auth_ipc_path = v; + self + } + + /// Set whether to disable the auth server by default + pub const fn with_disable_auth_server(mut self, v: bool) -> Self { + self.disable_auth_server = v; + self + } + + /// Set the default RPC JWT secret + pub const fn with_rpc_jwtsecret(mut self, v: Option) -> Self { + self.rpc_jwtsecret = v; + self + } + + /// Set the default max request size + pub const fn with_rpc_max_request_size(mut self, v: MaxU32) -> Self { + self.rpc_max_request_size = v; + self + } + + /// Set the default max response size + pub const fn with_rpc_max_response_size(mut self, v: MaxU32) -> Self { + self.rpc_max_response_size = v; + self + } + + /// Set the default max subscriptions per connection + pub const fn with_rpc_max_subscriptions_per_connection(mut self, v: MaxU32) -> Self { + self.rpc_max_subscriptions_per_connection = v; + self + } + + /// Set the default max connections + pub const fn with_rpc_max_connections(mut self, v: MaxU32) -> Self { + self.rpc_max_connections = v; + self + } + + /// Set the default max tracing requests + pub const fn with_rpc_max_tracing_requests(mut self, v: usize) -> Self { + self.rpc_max_tracing_requests = v; + self + } + + /// Set the default max blocking IO requests + pub const fn with_rpc_max_blocking_io_requests(mut self, v: usize) -> Self { + self.rpc_max_blocking_io_requests = v; + self + } + + /// Set the default max trace filter blocks + pub const fn with_rpc_max_trace_filter_blocks(mut self, v: u64) -> Self { + self.rpc_max_trace_filter_blocks = v; + self + } + + /// Set the default max blocks per filter + pub const fn with_rpc_max_blocks_per_filter(mut self, v: ZeroAsNoneU64) -> Self { + self.rpc_max_blocks_per_filter = v; + self + } + + /// Set the default max logs per response + pub const fn with_rpc_max_logs_per_response(mut self, v: ZeroAsNoneU64) -> Self { + self.rpc_max_logs_per_response = v; + self + } + + /// Set the default gas cap + pub const fn with_rpc_gas_cap(mut self, v: u64) -> Self { + self.rpc_gas_cap = v; + self + } + + /// Set the default EVM memory limit + pub const fn with_rpc_evm_memory_limit(mut self, v: u64) -> Self { + self.rpc_evm_memory_limit = v; + self + } + + /// Set the default tx fee cap + pub const fn with_rpc_tx_fee_cap(mut self, v: u128) -> Self { + self.rpc_tx_fee_cap = v; + self + } + + /// Set the default max simulate blocks + pub const fn with_rpc_max_simulate_blocks(mut self, v: u64) -> Self { + self.rpc_max_simulate_blocks = v; + self + } + + /// Set the default eth proof window + pub const fn with_rpc_eth_proof_window(mut self, v: u64) -> Self { + self.rpc_eth_proof_window = v; + self + } + + /// Set the default proof permits + pub const fn with_rpc_proof_permits(mut self, v: usize) -> Self { + self.rpc_proof_permits = v; + self + } + + /// Set the default pending block kind + pub const fn with_rpc_pending_block(mut self, v: PendingBlockKind) -> Self { + self.rpc_pending_block = v; + self + } + + /// Set the default RPC forwarder + pub fn with_rpc_forwarder(mut self, v: Option) -> Self { + self.rpc_forwarder = v; + self + } + + /// Set the default builder disallow addresses + pub fn with_builder_disallow(mut self, v: Option>) -> Self { + self.builder_disallow = v; + self + } + + /// Set the default RPC state cache args + pub const fn with_rpc_state_cache(mut self, v: RpcStateCacheArgs) -> Self { + self.rpc_state_cache = v; + self + } + + /// Set the default gas price oracle args + pub const fn with_gas_price_oracle(mut self, v: GasPriceOracleArgs) -> Self { + self.gas_price_oracle = v; + self + } + + /// Set the default send raw transaction sync timeout + pub const fn with_rpc_send_raw_transaction_sync_timeout(mut self, v: Duration) -> Self { + self.rpc_send_raw_transaction_sync_timeout = v; + self + } +} + +impl Default for DefaultRpcServerArgs { + fn default() -> Self { + Self { + http: false, + http_addr: Ipv4Addr::LOCALHOST.into(), + http_port: constants::DEFAULT_HTTP_RPC_PORT, + http_disable_compression: false, + http_api: None, + http_corsdomain: None, + ws: false, + ws_addr: Ipv4Addr::LOCALHOST.into(), + ws_port: constants::DEFAULT_WS_RPC_PORT, + ws_allowed_origins: None, + ws_api: None, + ipcdisable: false, + ipcpath: constants::DEFAULT_IPC_ENDPOINT.to_string(), + ipc_socket_permissions: None, + auth_addr: Ipv4Addr::LOCALHOST.into(), + auth_port: constants::DEFAULT_AUTH_PORT, + auth_jwtsecret: None, + auth_ipc: false, + auth_ipc_path: constants::DEFAULT_ENGINE_API_IPC_ENDPOINT.to_string(), + disable_auth_server: false, + rpc_jwtsecret: None, + rpc_max_request_size: RPC_DEFAULT_MAX_REQUEST_SIZE_MB.into(), + rpc_max_response_size: RPC_DEFAULT_MAX_RESPONSE_SIZE_MB.into(), + rpc_max_subscriptions_per_connection: RPC_DEFAULT_MAX_SUBS_PER_CONN.into(), + rpc_max_connections: RPC_DEFAULT_MAX_CONNECTIONS.into(), + rpc_max_tracing_requests: constants::default_max_tracing_requests(), + rpc_max_blocking_io_requests: constants::DEFAULT_MAX_BLOCKING_IO_REQUEST, + rpc_max_trace_filter_blocks: constants::DEFAULT_MAX_TRACE_FILTER_BLOCKS, + rpc_max_blocks_per_filter: constants::DEFAULT_MAX_BLOCKS_PER_FILTER.into(), + rpc_max_logs_per_response: (constants::DEFAULT_MAX_LOGS_PER_RESPONSE as u64).into(), + rpc_gas_cap: constants::gas_oracle::RPC_DEFAULT_GAS_CAP, + rpc_evm_memory_limit: (1 << 32) - 1, + rpc_tx_fee_cap: constants::DEFAULT_TX_FEE_CAP_WEI, + rpc_max_simulate_blocks: constants::DEFAULT_MAX_SIMULATE_BLOCKS, + rpc_eth_proof_window: constants::DEFAULT_ETH_PROOF_WINDOW, + rpc_proof_permits: constants::DEFAULT_PROOF_PERMITS, + rpc_pending_block: PendingBlockKind::Full, + rpc_forwarder: None, + builder_disallow: None, + rpc_state_cache: RpcStateCacheArgs::default(), + gas_price_oracle: GasPriceOracleArgs::default(), + rpc_send_raw_transaction_sync_timeout: + constants::RPC_DEFAULT_SEND_RAW_TX_SYNC_TIMEOUT_SECS, + } + } +} + /// Parameters for configuring the rpc more granularity via CLI #[derive(Debug, Clone, Args, PartialEq, Eq)] #[command(next_help_heading = "RPC")] pub struct RpcServerArgs { /// Enable the HTTP-RPC server - #[arg(long, default_value_if("dev", "true", "true"))] + #[arg(long, default_value_if("dev", "true", "true"), default_value_t = DefaultRpcServerArgs::get_global().http)] pub http: bool, /// Http server address to listen on - #[arg(long = "http.addr", default_value_t = IpAddr::V4(Ipv4Addr::LOCALHOST))] + #[arg(long = "http.addr", default_value_t = DefaultRpcServerArgs::get_global().http_addr)] pub http_addr: IpAddr, /// Http server port to listen on - #[arg(long = "http.port", default_value_t = constants::DEFAULT_HTTP_RPC_PORT)] + #[arg(long = "http.port", default_value_t = DefaultRpcServerArgs::get_global().http_port)] pub http_port: u16, /// Disable compression for HTTP responses - #[arg(long = "http.disable-compression", default_value_t = false)] + #[arg(long = "http.disable-compression", default_value_t = DefaultRpcServerArgs::get_global().http_disable_compression)] pub http_disable_compression: bool, /// Rpc Modules to be configured for the HTTP server - #[arg(long = "http.api", value_parser = RpcModuleSelectionValueParser::default())] + #[arg(long = "http.api", value_parser = RpcModuleSelectionValueParser::default(), default_value = Resettable::from(DefaultRpcServerArgs::get_global().http_api.as_ref().map(|v| v.to_string().into())))] pub http_api: Option, /// Http Corsdomain to allow request from - #[arg(long = "http.corsdomain")] + #[arg(long = "http.corsdomain", default_value = Resettable::from(DefaultRpcServerArgs::get_global().http_corsdomain.as_ref().map(|v| v.to_string().into())))] pub http_corsdomain: Option, /// Enable the WS-RPC server - #[arg(long)] + #[arg(long, default_value_t = DefaultRpcServerArgs::get_global().ws)] pub ws: bool, /// Ws server address to listen on - #[arg(long = "ws.addr", default_value_t = IpAddr::V4(Ipv4Addr::LOCALHOST))] + #[arg(long = "ws.addr", default_value_t = DefaultRpcServerArgs::get_global().ws_addr)] pub ws_addr: IpAddr, /// Ws server port to listen on - #[arg(long = "ws.port", default_value_t = constants::DEFAULT_WS_RPC_PORT)] + #[arg(long = "ws.port", default_value_t = DefaultRpcServerArgs::get_global().ws_port)] pub ws_port: u16, /// Origins from which to accept `WebSocket` requests - #[arg(id = "ws.origins", long = "ws.origins", alias = "ws.corsdomain")] + #[arg(id = "ws.origins", long = "ws.origins", alias = "ws.corsdomain", default_value = Resettable::from(DefaultRpcServerArgs::get_global().ws_allowed_origins.as_ref().map(|v| v.to_string().into())))] pub ws_allowed_origins: Option, /// Rpc Modules to be configured for the WS server - #[arg(long = "ws.api", value_parser = RpcModuleSelectionValueParser::default())] + #[arg(long = "ws.api", value_parser = RpcModuleSelectionValueParser::default(), default_value = Resettable::from(DefaultRpcServerArgs::get_global().ws_api.as_ref().map(|v| v.to_string().into())))] pub ws_api: Option, /// Disable the IPC-RPC server - #[arg(long)] + #[arg(long, default_value_t = DefaultRpcServerArgs::get_global().ipcdisable)] pub ipcdisable: bool, /// Filename for IPC socket/pipe within the datadir - #[arg(long, default_value_t = constants::DEFAULT_IPC_ENDPOINT.to_string())] + #[arg(long, default_value_t = DefaultRpcServerArgs::get_global().ipcpath.clone())] pub ipcpath: String, /// Set the permissions for the IPC socket file, in octal format. /// /// If not specified, the permissions will be set by the system's umask. - #[arg(long = "ipc.permissions")] + #[arg(long = "ipc.permissions", default_value = Resettable::from(DefaultRpcServerArgs::get_global().ipc_socket_permissions.as_ref().map(|v| v.to_string().into())))] pub ipc_socket_permissions: Option, /// Auth server address to listen on - #[arg(long = "authrpc.addr", default_value_t = IpAddr::V4(Ipv4Addr::LOCALHOST))] + #[arg(long = "authrpc.addr", default_value_t = DefaultRpcServerArgs::get_global().auth_addr)] pub auth_addr: IpAddr, /// Auth server port to listen on - #[arg(long = "authrpc.port", default_value_t = constants::DEFAULT_AUTH_PORT)] + #[arg(long = "authrpc.port", default_value_t = DefaultRpcServerArgs::get_global().auth_port)] pub auth_port: u16, /// Path to a JWT secret to use for the authenticated engine-API RPC server. @@ -115,22 +485,22 @@ pub struct RpcServerArgs { /// /// If no path is provided, a secret will be generated and stored in the datadir under /// `//jwt.hex`. For mainnet this would be `~/.reth/mainnet/jwt.hex` by default. - #[arg(long = "authrpc.jwtsecret", value_name = "PATH", global = true, required = false)] + #[arg(long = "authrpc.jwtsecret", value_name = "PATH", global = true, required = false, default_value = Resettable::from(DefaultRpcServerArgs::get_global().auth_jwtsecret.as_ref().map(|v| v.to_string_lossy().into())))] pub auth_jwtsecret: Option, /// Enable auth engine API over IPC - #[arg(long)] + #[arg(long, default_value_t = DefaultRpcServerArgs::get_global().auth_ipc)] pub auth_ipc: bool, /// Filename for auth IPC socket/pipe within the datadir - #[arg(long = "auth-ipc.path", default_value_t = constants::DEFAULT_ENGINE_API_IPC_ENDPOINT.to_string())] + #[arg(long = "auth-ipc.path", default_value_t = DefaultRpcServerArgs::get_global().auth_ipc_path.clone())] pub auth_ipc_path: String, /// Disable the auth/engine API server. /// /// This will prevent the authenticated engine-API server from starting. Use this if you're /// running a node that doesn't need to serve engine API requests. - #[arg(long = "disable-auth-server", alias = "disable-engine-api")] + #[arg(long = "disable-auth-server", alias = "disable-engine-api", default_value_t = DefaultRpcServerArgs::get_global().disable_auth_server)] pub disable_auth_server: bool, /// Hex encoded JWT secret to authenticate the regular RPC server(s), see `--http.api` and @@ -138,23 +508,23 @@ pub struct RpcServerArgs { /// /// This is __not__ used for the authenticated engine-API RPC server, see /// `--authrpc.jwtsecret`. - #[arg(long = "rpc.jwtsecret", value_name = "HEX", global = true, required = false)] + #[arg(long = "rpc.jwtsecret", value_name = "HEX", global = true, required = false, default_value = Resettable::from(DefaultRpcServerArgs::get_global().rpc_jwtsecret.as_ref().map(|v| format!("{:?}", v).into())))] pub rpc_jwtsecret: Option, /// Set the maximum RPC request payload size for both HTTP and WS in megabytes. - #[arg(long = "rpc.max-request-size", alias = "rpc-max-request-size", default_value_t = RPC_DEFAULT_MAX_REQUEST_SIZE_MB.into())] + #[arg(long = "rpc.max-request-size", alias = "rpc-max-request-size", default_value_t = DefaultRpcServerArgs::get_global().rpc_max_request_size)] pub rpc_max_request_size: MaxU32, /// Set the maximum RPC response payload size for both HTTP and WS in megabytes. - #[arg(long = "rpc.max-response-size", alias = "rpc-max-response-size", visible_alias = "rpc.returndata.limit", default_value_t = RPC_DEFAULT_MAX_RESPONSE_SIZE_MB.into())] + #[arg(long = "rpc.max-response-size", alias = "rpc-max-response-size", visible_alias = "rpc.returndata.limit", default_value_t = DefaultRpcServerArgs::get_global().rpc_max_response_size)] pub rpc_max_response_size: MaxU32, /// Set the maximum concurrent subscriptions per connection. - #[arg(long = "rpc.max-subscriptions-per-connection", alias = "rpc-max-subscriptions-per-connection", default_value_t = RPC_DEFAULT_MAX_SUBS_PER_CONN.into())] + #[arg(long = "rpc.max-subscriptions-per-connection", alias = "rpc-max-subscriptions-per-connection", default_value_t = DefaultRpcServerArgs::get_global().rpc_max_subscriptions_per_connection)] pub rpc_max_subscriptions_per_connection: MaxU32, /// Maximum number of RPC server connections. - #[arg(long = "rpc.max-connections", alias = "rpc-max-connections", value_name = "COUNT", default_value_t = RPC_DEFAULT_MAX_CONNECTIONS.into())] + #[arg(long = "rpc.max-connections", alias = "rpc-max-connections", value_name = "COUNT", default_value_t = DefaultRpcServerArgs::get_global().rpc_max_connections)] pub rpc_max_connections: MaxU32, /// Maximum number of concurrent tracing requests. @@ -163,19 +533,27 @@ pub struct RpcServerArgs { /// Tracing requests are generally CPU bound. /// Choosing a value that is higher than the available CPU cores can have a negative impact on /// the performance of the node and affect the node's ability to maintain sync. - #[arg(long = "rpc.max-tracing-requests", alias = "rpc-max-tracing-requests", value_name = "COUNT", default_value_t = constants::default_max_tracing_requests())] + #[arg(long = "rpc.max-tracing-requests", alias = "rpc-max-tracing-requests", value_name = "COUNT", default_value_t = DefaultRpcServerArgs::get_global().rpc_max_tracing_requests)] pub rpc_max_tracing_requests: usize, + /// Maximum number of concurrent blocking IO requests. + /// + /// Blocking IO requests include `eth_call`, `eth_estimateGas`, and similar methods that + /// require EVM execution. These are spawned as blocking tasks to avoid blocking the async + /// runtime. + #[arg(long = "rpc.max-blocking-io-requests", alias = "rpc-max-blocking-io-requests", value_name = "COUNT", default_value_t = DefaultRpcServerArgs::get_global().rpc_max_blocking_io_requests)] + pub rpc_max_blocking_io_requests: usize, + /// Maximum number of blocks for `trace_filter` requests. - #[arg(long = "rpc.max-trace-filter-blocks", alias = "rpc-max-trace-filter-blocks", value_name = "COUNT", default_value_t = constants::DEFAULT_MAX_TRACE_FILTER_BLOCKS)] + #[arg(long = "rpc.max-trace-filter-blocks", alias = "rpc-max-trace-filter-blocks", value_name = "COUNT", default_value_t = DefaultRpcServerArgs::get_global().rpc_max_trace_filter_blocks)] pub rpc_max_trace_filter_blocks: u64, /// Maximum number of blocks that could be scanned per filter request. (0 = entire chain) - #[arg(long = "rpc.max-blocks-per-filter", alias = "rpc-max-blocks-per-filter", value_name = "COUNT", default_value_t = ZeroAsNoneU64::new(constants::DEFAULT_MAX_BLOCKS_PER_FILTER))] + #[arg(long = "rpc.max-blocks-per-filter", alias = "rpc-max-blocks-per-filter", value_name = "COUNT", default_value_t = DefaultRpcServerArgs::get_global().rpc_max_blocks_per_filter)] pub rpc_max_blocks_per_filter: ZeroAsNoneU64, /// Maximum number of logs that can be returned in a single response. (0 = no limit) - #[arg(long = "rpc.max-logs-per-response", alias = "rpc-max-logs-per-response", value_name = "COUNT", default_value_t = ZeroAsNoneU64::new(constants::DEFAULT_MAX_LOGS_PER_RESPONSE as u64))] + #[arg(long = "rpc.max-logs-per-response", alias = "rpc-max-logs-per-response", value_name = "COUNT", default_value_t = DefaultRpcServerArgs::get_global().rpc_max_logs_per_response)] pub rpc_max_logs_per_response: ZeroAsNoneU64, /// Maximum gas limit for `eth_call` and call tracing RPC methods. @@ -184,10 +562,20 @@ pub struct RpcServerArgs { alias = "rpc-gascap", value_name = "GAS_CAP", value_parser = MaxOr::new(RangedU64ValueParser::::new().range(1..)), - default_value_t = constants::gas_oracle::RPC_DEFAULT_GAS_CAP + default_value_t = DefaultRpcServerArgs::get_global().rpc_gas_cap )] pub rpc_gas_cap: u64, + /// Maximum memory the EVM can allocate per RPC request. + #[arg( + long = "rpc.evm-memory-limit", + alias = "rpc-evm-memory-limit", + value_name = "MEMORY_LIMIT", + value_parser = MaxOr::new(RangedU64ValueParser::::new().range(1..)), + default_value_t = DefaultRpcServerArgs::get_global().rpc_evm_memory_limit + )] + pub rpc_evm_memory_limit: u64, + /// Maximum eth transaction fee (in ether) that can be sent via the RPC APIs (0 = no cap) #[arg( long = "rpc.txfeecap", @@ -202,7 +590,7 @@ pub struct RpcServerArgs { #[arg( long = "rpc.max-simulate-blocks", value_name = "BLOCKS_COUNT", - default_value_t = constants::DEFAULT_MAX_SIMULATE_BLOCKS + default_value_t = DefaultRpcServerArgs::get_global().rpc_max_simulate_blocks )] pub rpc_max_simulate_blocks: u64, @@ -211,7 +599,7 @@ pub struct RpcServerArgs { /// configured number of blocks from current tip (up to `tip - window`). #[arg( long = "rpc.eth-proof-window", - default_value_t = constants::DEFAULT_ETH_PROOF_WINDOW, + default_value_t = DefaultRpcServerArgs::get_global().rpc_eth_proof_window, value_parser = RangedU64ValueParser::::new().range(..=constants::MAX_ETH_PROOF_WINDOW) )] pub rpc_eth_proof_window: u64, @@ -233,7 +621,7 @@ pub struct RpcServerArgs { /// Path to file containing disallowed addresses, json-encoded list of strings. Block /// validation API will reject blocks containing transactions from these addresses. - #[arg(long = "builder.disallow", value_name = "PATH", value_parser = reth_cli_util::parsers::read_json_from_file::>)] + #[arg(long = "builder.disallow", value_name = "PATH", value_parser = reth_cli_util::parsers::read_json_from_file::>, default_value = Resettable::from(DefaultRpcServerArgs::get_global().builder_disallow.as_ref().map(|v| format!("{:?}", v).into())))] pub builder_disallow: Option>, /// State cache configuration. @@ -377,48 +765,93 @@ impl RpcServerArgs { impl Default for RpcServerArgs { fn default() -> Self { + let DefaultRpcServerArgs { + http, + http_addr, + http_port, + http_disable_compression, + http_api, + http_corsdomain, + ws, + ws_addr, + ws_port, + ws_allowed_origins, + ws_api, + ipcdisable, + ipcpath, + ipc_socket_permissions, + auth_addr, + auth_port, + auth_jwtsecret, + auth_ipc, + auth_ipc_path, + disable_auth_server, + rpc_jwtsecret, + rpc_max_request_size, + rpc_max_response_size, + rpc_max_subscriptions_per_connection, + rpc_max_connections, + rpc_max_tracing_requests, + rpc_max_blocking_io_requests, + rpc_max_trace_filter_blocks, + rpc_max_blocks_per_filter, + rpc_max_logs_per_response, + rpc_gas_cap, + rpc_evm_memory_limit, + rpc_tx_fee_cap, + rpc_max_simulate_blocks, + rpc_eth_proof_window, + rpc_proof_permits, + rpc_pending_block, + rpc_forwarder, + builder_disallow, + rpc_state_cache, + gas_price_oracle, + rpc_send_raw_transaction_sync_timeout, + } = DefaultRpcServerArgs::get_global().clone(); Self { - http: false, - http_addr: Ipv4Addr::LOCALHOST.into(), - http_port: constants::DEFAULT_HTTP_RPC_PORT, - http_disable_compression: false, - http_api: None, - http_corsdomain: None, - ws: false, - ws_addr: Ipv4Addr::LOCALHOST.into(), - ws_port: constants::DEFAULT_WS_RPC_PORT, - ws_allowed_origins: None, - ws_api: None, - ipcdisable: false, - ipcpath: constants::DEFAULT_IPC_ENDPOINT.to_string(), - ipc_socket_permissions: None, - auth_addr: Ipv4Addr::LOCALHOST.into(), - auth_port: constants::DEFAULT_AUTH_PORT, - auth_jwtsecret: None, - auth_ipc: false, - auth_ipc_path: constants::DEFAULT_ENGINE_API_IPC_ENDPOINT.to_string(), - disable_auth_server: false, - rpc_jwtsecret: None, - rpc_max_request_size: RPC_DEFAULT_MAX_REQUEST_SIZE_MB.into(), - rpc_max_response_size: RPC_DEFAULT_MAX_RESPONSE_SIZE_MB.into(), - rpc_max_subscriptions_per_connection: RPC_DEFAULT_MAX_SUBS_PER_CONN.into(), - rpc_max_connections: RPC_DEFAULT_MAX_CONNECTIONS.into(), - rpc_max_tracing_requests: constants::default_max_tracing_requests(), - rpc_max_trace_filter_blocks: constants::DEFAULT_MAX_TRACE_FILTER_BLOCKS, - rpc_max_blocks_per_filter: constants::DEFAULT_MAX_BLOCKS_PER_FILTER.into(), - rpc_max_logs_per_response: (constants::DEFAULT_MAX_LOGS_PER_RESPONSE as u64).into(), - rpc_gas_cap: constants::gas_oracle::RPC_DEFAULT_GAS_CAP, - rpc_tx_fee_cap: constants::DEFAULT_TX_FEE_CAP_WEI, - rpc_max_simulate_blocks: constants::DEFAULT_MAX_SIMULATE_BLOCKS, - rpc_eth_proof_window: constants::DEFAULT_ETH_PROOF_WINDOW, - rpc_pending_block: PendingBlockKind::Full, - gas_price_oracle: GasPriceOracleArgs::default(), - rpc_state_cache: RpcStateCacheArgs::default(), - rpc_proof_permits: constants::DEFAULT_PROOF_PERMITS, - rpc_forwarder: None, - builder_disallow: Default::default(), - rpc_send_raw_transaction_sync_timeout: - constants::RPC_DEFAULT_SEND_RAW_TX_SYNC_TIMEOUT_SECS, + http, + http_addr, + http_port, + http_disable_compression, + http_api, + http_corsdomain, + ws, + ws_addr, + ws_port, + ws_allowed_origins, + ws_api, + ipcdisable, + ipcpath, + ipc_socket_permissions, + auth_addr, + auth_port, + auth_jwtsecret, + auth_ipc, + auth_ipc_path, + disable_auth_server, + rpc_jwtsecret, + rpc_max_request_size, + rpc_max_response_size, + rpc_max_subscriptions_per_connection, + rpc_max_connections, + rpc_max_tracing_requests, + rpc_max_blocking_io_requests, + rpc_max_trace_filter_blocks, + rpc_max_blocks_per_filter, + rpc_max_logs_per_response, + rpc_gas_cap, + rpc_evm_memory_limit, + rpc_tx_fee_cap, + rpc_max_simulate_blocks, + rpc_eth_proof_window, + rpc_proof_permits, + rpc_pending_block, + rpc_forwarder, + builder_disallow, + rpc_state_cache, + gas_price_oracle, + rpc_send_raw_transaction_sync_timeout, } } } @@ -531,4 +964,159 @@ mod tests { let expected = 1_000_000_000_000_000_000u128; assert_eq!(args.rpc_tx_fee_cap, expected); // 1 ETH default cap } + + #[test] + fn test_rpc_server_args() { + let args = RpcServerArgs { + http: true, + http_addr: "127.0.0.1".parse().unwrap(), + http_port: 8545, + http_disable_compression: false, + http_api: Some(RpcModuleSelection::try_from_selection(["eth", "admin"]).unwrap()), + http_corsdomain: Some("*".to_string()), + ws: true, + ws_addr: "127.0.0.1".parse().unwrap(), + ws_port: 8546, + ws_allowed_origins: Some("*".to_string()), + ws_api: Some(RpcModuleSelection::try_from_selection(["eth", "admin"]).unwrap()), + ipcdisable: false, + ipcpath: "reth.ipc".to_string(), + ipc_socket_permissions: Some("0o666".to_string()), + auth_addr: "127.0.0.1".parse().unwrap(), + auth_port: 8551, + auth_jwtsecret: Some(std::path::PathBuf::from("/tmp/jwt.hex")), + auth_ipc: false, + auth_ipc_path: "engine.ipc".to_string(), + disable_auth_server: false, + rpc_jwtsecret: Some( + JwtSecret::from_hex( + "0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef", + ) + .unwrap(), + ), + rpc_max_request_size: 15u32.into(), + rpc_max_response_size: 160u32.into(), + rpc_max_subscriptions_per_connection: 1024u32.into(), + rpc_max_connections: 500u32.into(), + rpc_max_tracing_requests: 16, + rpc_max_blocking_io_requests: 256, + rpc_max_trace_filter_blocks: 4000, + rpc_max_blocks_per_filter: 1000u64.into(), + rpc_max_logs_per_response: 10000u64.into(), + rpc_gas_cap: 50_000_000, + rpc_evm_memory_limit: 256, + rpc_tx_fee_cap: 2_000_000_000_000_000_000u128, + rpc_max_simulate_blocks: 256, + rpc_eth_proof_window: 100_000, + rpc_proof_permits: 16, + rpc_pending_block: PendingBlockKind::Full, + rpc_forwarder: Some("http://localhost:8545".parse().unwrap()), + builder_disallow: None, + rpc_state_cache: RpcStateCacheArgs { + max_blocks: 5000, + max_receipts: 2000, + max_headers: 1000, + max_concurrent_db_requests: 512, + }, + gas_price_oracle: GasPriceOracleArgs { + blocks: 20, + ignore_price: 2, + max_price: 500_000_000_000, + percentile: 60, + default_suggested_fee: None, + }, + rpc_send_raw_transaction_sync_timeout: std::time::Duration::from_secs(30), + }; + + let parsed_args = CommandParser::::parse_from([ + "reth", + "--http", + "--http.addr", + "127.0.0.1", + "--http.port", + "8545", + "--http.api", + "eth,admin", + "--http.corsdomain", + "*", + "--ws", + "--ws.addr", + "127.0.0.1", + "--ws.port", + "8546", + "--ws.origins", + "*", + "--ws.api", + "eth,admin", + "--ipcpath", + "reth.ipc", + "--ipc.permissions", + "0o666", + "--authrpc.addr", + "127.0.0.1", + "--authrpc.port", + "8551", + "--authrpc.jwtsecret", + "/tmp/jwt.hex", + "--auth-ipc.path", + "engine.ipc", + "--rpc.jwtsecret", + "0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef", + "--rpc.max-request-size", + "15", + "--rpc.max-response-size", + "160", + "--rpc.max-subscriptions-per-connection", + "1024", + "--rpc.max-connections", + "500", + "--rpc.max-tracing-requests", + "16", + "--rpc.max-blocking-io-requests", + "256", + "--rpc.max-trace-filter-blocks", + "4000", + "--rpc.max-blocks-per-filter", + "1000", + "--rpc.max-logs-per-response", + "10000", + "--rpc.gascap", + "50000000", + "--rpc.evm-memory-limit", + "256", + "--rpc.txfeecap", + "2.0", + "--rpc.max-simulate-blocks", + "256", + "--rpc.eth-proof-window", + "100000", + "--rpc.proof-permits", + "16", + "--rpc.pending-block", + "full", + "--rpc.forwarder", + "http://localhost:8545", + "--rpc-cache.max-blocks", + "5000", + "--rpc-cache.max-receipts", + "2000", + "--rpc-cache.max-headers", + "1000", + "--rpc-cache.max-concurrent-db-requests", + "512", + "--gpo.blocks", + "20", + "--gpo.ignoreprice", + "2", + "--gpo.maxprice", + "500000000000", + "--gpo.percentile", + "60", + "--rpc.send-raw-transaction-sync-timeout", + "30s", + ]) + .args; + + assert_eq!(parsed_args, args); + } } diff --git a/crates/node/core/src/args/static_files.rs b/crates/node/core/src/args/static_files.rs new file mode 100644 index 0000000000..5f7a1510ee --- /dev/null +++ b/crates/node/core/src/args/static_files.rs @@ -0,0 +1,71 @@ +//! clap [Args](clap::Args) for static files configuration + +use clap::Args; +use reth_config::config::{BlocksPerFileConfig, StaticFilesConfig}; +use reth_provider::StorageSettings; + +/// Parameters for static files configuration +#[derive(Debug, Args, PartialEq, Eq, Default, Clone, Copy)] +#[command(next_help_heading = "Static Files")] +pub struct StaticFilesArgs { + /// Number of blocks per file for the headers segment. + #[arg(long = "static-files.blocks-per-file.headers")] + pub blocks_per_file_headers: Option, + + /// Number of blocks per file for the transactions segment. + #[arg(long = "static-files.blocks-per-file.transactions")] + pub blocks_per_file_transactions: Option, + + /// Number of blocks per file for the receipts segment. + #[arg(long = "static-files.blocks-per-file.receipts")] + pub blocks_per_file_receipts: Option, + + /// Number of blocks per file for the transaction senders segment. + #[arg(long = "static-files.blocks-per-file.transaction-senders")] + pub blocks_per_file_transaction_senders: Option, + + /// Store receipts in static files instead of the database. + /// + /// When enabled, receipts will be written to static files on disk instead of the database. + /// + /// Note: This setting can only be configured at genesis initialization. Once + /// the node has been initialized, changing this flag requires re-syncing from scratch. + #[arg(long = "static-files.receipts")] + pub receipts: bool, + + /// Store transaction senders in static files instead of the database. + /// + /// When enabled, transaction senders will be written to static files on disk instead of the + /// database. + /// + /// Note: This setting can only be configured at genesis initialization. Once + /// the node has been initialized, changing this flag requires re-syncing from scratch. + #[arg(long = "static-files.transaction-senders")] + pub transaction_senders: bool, +} + +impl StaticFilesArgs { + /// Merges the CLI arguments with an existing [`StaticFilesConfig`], giving priority to CLI + /// args. + pub fn merge_with_config(&self, config: StaticFilesConfig) -> StaticFilesConfig { + StaticFilesConfig { + blocks_per_file: BlocksPerFileConfig { + headers: self.blocks_per_file_headers.or(config.blocks_per_file.headers), + transactions: self + .blocks_per_file_transactions + .or(config.blocks_per_file.transactions), + receipts: self.blocks_per_file_receipts.or(config.blocks_per_file.receipts), + transaction_senders: self + .blocks_per_file_transaction_senders + .or(config.blocks_per_file.transaction_senders), + }, + } + } + + /// Converts the static files arguments into [`StorageSettings`]. + pub const fn to_settings(&self) -> StorageSettings { + StorageSettings::legacy() + .with_receipts_in_static_files(self.receipts) + .with_transaction_senders_in_static_files(self.transaction_senders) + } +} diff --git a/crates/node/core/src/args/trace.rs b/crates/node/core/src/args/trace.rs index 5b5e21502d..b94b83f433 100644 --- a/crates/node/core/src/args/trace.rs +++ b/crates/node/core/src/args/trace.rs @@ -2,7 +2,7 @@ use clap::Parser; use eyre::WrapErr; -use reth_tracing::tracing_subscriber::EnvFilter; +use reth_tracing::{tracing_subscriber::EnvFilter, Layers}; use reth_tracing_otlp::OtlpProtocol; use url::Url; @@ -61,6 +61,40 @@ pub struct TraceArgs { help_heading = "Tracing" )] pub otlp_filter: EnvFilter, + + /// Service name to use for OTLP tracing export. + /// + /// This name will be used to identify the service in distributed tracing systems + /// like Jaeger or Zipkin. Useful for differentiating between multiple reth instances. + /// + /// Set via `OTEL_SERVICE_NAME` environment variable. Defaults to "reth" if not specified. + #[arg( + long = "tracing-otlp.service-name", + env = "OTEL_SERVICE_NAME", + global = true, + value_name = "NAME", + default_value = "reth", + hide = true, + help_heading = "Tracing" + )] + pub service_name: String, + + /// Trace sampling ratio to control the percentage of traces to export. + /// + /// Valid range: 0.0 to 1.0 + /// - 1.0, default: Sample all traces + /// - 0.01: Sample 1% of traces + /// - 0.0: Disable sampling + /// + /// Example: --tracing-otlp.sample-ratio=0.0. + #[arg( + long = "tracing-otlp.sample-ratio", + env = "OTEL_TRACES_SAMPLER_ARG", + global = true, + value_name = "RATIO", + help_heading = "Tracing" + )] + pub sample_ratio: Option, } impl Default for TraceArgs { @@ -69,20 +103,66 @@ impl Default for TraceArgs { otlp: None, protocol: OtlpProtocol::Http, otlp_filter: EnvFilter::from_default_env(), + sample_ratio: None, + service_name: "reth".to_string(), } } } impl TraceArgs { - /// Validate the configuration - pub fn validate(&mut self) -> eyre::Result<()> { - if let Some(url) = &mut self.otlp { - self.protocol.validate_endpoint(url)?; + /// Initialize OTLP tracing with the given layers and runner. + /// + /// This method handles OTLP tracing initialization based on the configured options, + /// including validation, protocol selection, and feature flag checking. + /// + /// Returns the initialization status to allow callers to log appropriate messages. + /// + /// Note: even though this function is async, it does not actually perform any async operations. + /// It's needed only to be able to initialize the gRPC transport of OTLP tracing that needs to + /// be called inside a tokio runtime context. + pub async fn init_otlp_tracing( + &mut self, + _layers: &mut Layers, + ) -> eyre::Result { + if let Some(endpoint) = self.otlp.as_mut() { + self.protocol.validate_endpoint(endpoint)?; + + #[cfg(feature = "otlp")] + { + { + let config = reth_tracing_otlp::OtlpConfig::new( + self.service_name.clone(), + endpoint.clone(), + self.protocol, + self.sample_ratio, + )?; + + _layers.with_span_layer(config.clone(), self.otlp_filter.clone())?; + + Ok(OtlpInitStatus::Started(config.endpoint().clone())) + } + } + #[cfg(not(feature = "otlp"))] + { + Ok(OtlpInitStatus::NoFeature) + } + } else { + Ok(OtlpInitStatus::Disabled) } - Ok(()) } } +/// Status of OTLP tracing initialization. +#[derive(Debug)] +pub enum OtlpInitStatus { + /// OTLP tracing was successfully started with the given endpoint. + Started(Url), + /// OTLP tracing is disabled (no endpoint configured). + Disabled, + /// OTLP arguments provided but feature is not compiled. + NoFeature, +} + // Parses an OTLP endpoint url. fn parse_otlp_endpoint(arg: &str) -> eyre::Result { Url::parse(arg).wrap_err("Invalid URL for OTLP trace output") diff --git a/crates/node/core/src/args/txpool.rs b/crates/node/core/src/args/txpool.rs index 2ab604be16..8c826e1afb 100644 --- a/crates/node/core/src/args/txpool.rs +++ b/crates/node/core/src/args/txpool.rs @@ -3,8 +3,8 @@ use crate::cli::config::RethTransactionPoolConfig; use alloy_eips::eip1559::{ETHEREUM_BLOCK_GAS_LIMIT_30M, MIN_PROTOCOL_BASE_FEE}; use alloy_primitives::Address; -use clap::Args; -use reth_cli_util::parse_duration_from_secs_or_ms; +use clap::{builder::Resettable, Args}; +use reth_cli_util::{parse_duration_from_secs_or_ms, parsers::format_duration_as_secs_or_ms}; use reth_transaction_pool::{ blobstore::disk::DEFAULT_MAX_CACHED_BLOBS, maintain::MAX_QUEUED_TRANSACTION_LIFETIME, @@ -15,126 +15,401 @@ use reth_transaction_pool::{ REPLACE_BLOB_PRICE_BUMP, TXPOOL_MAX_ACCOUNT_SLOTS_PER_SENDER, TXPOOL_SUBPOOL_MAX_SIZE_MB_DEFAULT, TXPOOL_SUBPOOL_MAX_TXS_DEFAULT, }; -use std::time::Duration; +use std::{path::PathBuf, sync::OnceLock, time::Duration}; + +/// Global static transaction pool defaults +static TXPOOL_DEFAULTS: OnceLock = OnceLock::new(); + +/// Default values for transaction pool that can be customized +/// +/// Global defaults can be set via [`DefaultTxPoolValues::try_init`]. +#[derive(Debug, Clone)] +pub struct DefaultTxPoolValues { + pending_max_count: usize, + pending_max_size: usize, + basefee_max_count: usize, + basefee_max_size: usize, + queued_max_count: usize, + queued_max_size: usize, + blobpool_max_count: usize, + blobpool_max_size: usize, + blob_cache_size: Option, + disable_blobs_support: bool, + max_account_slots: usize, + price_bump: u128, + minimal_protocol_basefee: u64, + minimum_priority_fee: Option, + enforced_gas_limit: u64, + max_tx_gas_limit: Option, + blob_transaction_price_bump: u128, + max_tx_input_bytes: usize, + max_cached_entries: u32, + no_locals: bool, + locals: Vec
, + no_local_transactions_propagation: bool, + additional_validation_tasks: usize, + pending_tx_listener_buffer_size: usize, + new_tx_listener_buffer_size: usize, + max_new_pending_txs_notifications: usize, + max_queued_lifetime: Duration, + transactions_backup_path: Option, + disable_transactions_backup: bool, + max_batch_size: usize, +} + +impl DefaultTxPoolValues { + /// Initialize the global transaction pool defaults with this configuration + pub fn try_init(self) -> Result<(), Self> { + TXPOOL_DEFAULTS.set(self) + } + + /// Get a reference to the global transaction pool defaults + pub fn get_global() -> &'static Self { + TXPOOL_DEFAULTS.get_or_init(Self::default) + } + + /// Set the default pending sub-pool max transaction count + pub const fn with_pending_max_count(mut self, v: usize) -> Self { + self.pending_max_count = v; + self + } + + /// Set the default pending sub-pool max size in MB + pub const fn with_pending_max_size(mut self, v: usize) -> Self { + self.pending_max_size = v; + self + } + + /// Set the default basefee sub-pool max transaction count + pub const fn with_basefee_max_count(mut self, v: usize) -> Self { + self.basefee_max_count = v; + self + } + + /// Set the default basefee sub-pool max size in MB + pub const fn with_basefee_max_size(mut self, v: usize) -> Self { + self.basefee_max_size = v; + self + } + + /// Set the default queued sub-pool max transaction count + pub const fn with_queued_max_count(mut self, v: usize) -> Self { + self.queued_max_count = v; + self + } + + /// Set the default queued sub-pool max size in MB + pub const fn with_queued_max_size(mut self, v: usize) -> Self { + self.queued_max_size = v; + self + } + + /// Set the default blobpool max transaction count + pub const fn with_blobpool_max_count(mut self, v: usize) -> Self { + self.blobpool_max_count = v; + self + } + + /// Set the default blobpool max size in MB + pub const fn with_blobpool_max_size(mut self, v: usize) -> Self { + self.blobpool_max_size = v; + self + } + + /// Set the default blob cache size + pub const fn with_blob_cache_size(mut self, v: Option) -> Self { + self.blob_cache_size = v; + self + } + + /// Set whether to disable blob transaction support by default + pub const fn with_disable_blobs_support(mut self, v: bool) -> Self { + self.disable_blobs_support = v; + self + } + + /// Set the default max account slots + pub const fn with_max_account_slots(mut self, v: usize) -> Self { + self.max_account_slots = v; + self + } + + /// Set the default price bump percentage + pub const fn with_price_bump(mut self, v: u128) -> Self { + self.price_bump = v; + self + } + + /// Set the default minimal protocol base fee + pub const fn with_minimal_protocol_basefee(mut self, v: u64) -> Self { + self.minimal_protocol_basefee = v; + self + } + + /// Set the default minimum priority fee + pub const fn with_minimum_priority_fee(mut self, v: Option) -> Self { + self.minimum_priority_fee = v; + self + } + + /// Set the default enforced gas limit + pub const fn with_enforced_gas_limit(mut self, v: u64) -> Self { + self.enforced_gas_limit = v; + self + } + + /// Set the default max transaction gas limit + pub const fn with_max_tx_gas_limit(mut self, v: Option) -> Self { + self.max_tx_gas_limit = v; + self + } + + /// Set the default blob transaction price bump + pub const fn with_blob_transaction_price_bump(mut self, v: u128) -> Self { + self.blob_transaction_price_bump = v; + self + } + + /// Set the default max transaction input bytes + pub const fn with_max_tx_input_bytes(mut self, v: usize) -> Self { + self.max_tx_input_bytes = v; + self + } + + /// Set the default max cached entries + pub const fn with_max_cached_entries(mut self, v: u32) -> Self { + self.max_cached_entries = v; + self + } + + /// Set whether to disable local transaction exemptions by default + pub const fn with_no_locals(mut self, v: bool) -> Self { + self.no_locals = v; + self + } + + /// Set the default local addresses + pub fn with_locals(mut self, v: Vec
) -> Self { + self.locals = v; + self + } + + /// Set whether to disable local transaction propagation by default + pub const fn with_no_local_transactions_propagation(mut self, v: bool) -> Self { + self.no_local_transactions_propagation = v; + self + } + + /// Set the default additional validation tasks + pub const fn with_additional_validation_tasks(mut self, v: usize) -> Self { + self.additional_validation_tasks = v; + self + } + + /// Set the default pending transaction listener buffer size + pub const fn with_pending_tx_listener_buffer_size(mut self, v: usize) -> Self { + self.pending_tx_listener_buffer_size = v; + self + } + + /// Set the default new transaction listener buffer size + pub const fn with_new_tx_listener_buffer_size(mut self, v: usize) -> Self { + self.new_tx_listener_buffer_size = v; + self + } + + /// Set the default max new pending transactions notifications + pub const fn with_max_new_pending_txs_notifications(mut self, v: usize) -> Self { + self.max_new_pending_txs_notifications = v; + self + } + + /// Set the default max queued lifetime + pub const fn with_max_queued_lifetime(mut self, v: Duration) -> Self { + self.max_queued_lifetime = v; + self + } + + /// Set the default transactions backup path + pub fn with_transactions_backup_path(mut self, v: Option) -> Self { + self.transactions_backup_path = v; + self + } + + /// Set whether to disable transaction backup by default + pub const fn with_disable_transactions_backup(mut self, v: bool) -> Self { + self.disable_transactions_backup = v; + self + } + + /// Set the default max batch size + pub const fn with_max_batch_size(mut self, v: usize) -> Self { + self.max_batch_size = v; + self + } +} + +impl Default for DefaultTxPoolValues { + fn default() -> Self { + Self { + pending_max_count: TXPOOL_SUBPOOL_MAX_TXS_DEFAULT, + pending_max_size: TXPOOL_SUBPOOL_MAX_SIZE_MB_DEFAULT, + basefee_max_count: TXPOOL_SUBPOOL_MAX_TXS_DEFAULT, + basefee_max_size: TXPOOL_SUBPOOL_MAX_SIZE_MB_DEFAULT, + queued_max_count: TXPOOL_SUBPOOL_MAX_TXS_DEFAULT, + queued_max_size: TXPOOL_SUBPOOL_MAX_SIZE_MB_DEFAULT, + blobpool_max_count: TXPOOL_SUBPOOL_MAX_TXS_DEFAULT, + blobpool_max_size: TXPOOL_SUBPOOL_MAX_SIZE_MB_DEFAULT, + blob_cache_size: None, + disable_blobs_support: false, + max_account_slots: TXPOOL_MAX_ACCOUNT_SLOTS_PER_SENDER, + price_bump: DEFAULT_PRICE_BUMP, + minimal_protocol_basefee: MIN_PROTOCOL_BASE_FEE, + minimum_priority_fee: None, + enforced_gas_limit: ETHEREUM_BLOCK_GAS_LIMIT_30M, + max_tx_gas_limit: None, + blob_transaction_price_bump: REPLACE_BLOB_PRICE_BUMP, + max_tx_input_bytes: DEFAULT_MAX_TX_INPUT_BYTES, + max_cached_entries: DEFAULT_MAX_CACHED_BLOBS, + no_locals: false, + locals: Vec::new(), + no_local_transactions_propagation: false, + additional_validation_tasks: DEFAULT_TXPOOL_ADDITIONAL_VALIDATION_TASKS, + pending_tx_listener_buffer_size: PENDING_TX_LISTENER_BUFFER_SIZE, + new_tx_listener_buffer_size: NEW_TX_LISTENER_BUFFER_SIZE, + max_new_pending_txs_notifications: MAX_NEW_PENDING_TXS_NOTIFICATIONS, + max_queued_lifetime: MAX_QUEUED_TRANSACTION_LIFETIME, + transactions_backup_path: None, + disable_transactions_backup: false, + max_batch_size: 1, + } + } +} /// Parameters for debugging purposes #[derive(Debug, Clone, Args, PartialEq, Eq)] #[command(next_help_heading = "TxPool")] pub struct TxPoolArgs { /// Max number of transaction in the pending sub-pool. - #[arg(long = "txpool.pending-max-count", alias = "txpool.pending_max_count", default_value_t = TXPOOL_SUBPOOL_MAX_TXS_DEFAULT)] + #[arg(long = "txpool.pending-max-count", alias = "txpool.pending_max_count", default_value_t = DefaultTxPoolValues::get_global().pending_max_count)] pub pending_max_count: usize, /// Max size of the pending sub-pool in megabytes. - #[arg(long = "txpool.pending-max-size", alias = "txpool.pending_max_size", default_value_t = TXPOOL_SUBPOOL_MAX_SIZE_MB_DEFAULT)] + #[arg(long = "txpool.pending-max-size", alias = "txpool.pending_max_size", default_value_t = DefaultTxPoolValues::get_global().pending_max_size)] pub pending_max_size: usize, /// Max number of transaction in the basefee sub-pool - #[arg(long = "txpool.basefee-max-count", alias = "txpool.basefee_max_count", default_value_t = TXPOOL_SUBPOOL_MAX_TXS_DEFAULT)] + #[arg(long = "txpool.basefee-max-count", alias = "txpool.basefee_max_count", default_value_t = DefaultTxPoolValues::get_global().basefee_max_count)] pub basefee_max_count: usize, /// Max size of the basefee sub-pool in megabytes. - #[arg(long = "txpool.basefee-max-size", alias = "txpool.basefee_max_size", default_value_t = TXPOOL_SUBPOOL_MAX_SIZE_MB_DEFAULT)] + #[arg(long = "txpool.basefee-max-size", alias = "txpool.basefee_max_size", default_value_t = DefaultTxPoolValues::get_global().basefee_max_size)] pub basefee_max_size: usize, /// Max number of transaction in the queued sub-pool - #[arg(long = "txpool.queued-max-count", alias = "txpool.queued_max_count", default_value_t = TXPOOL_SUBPOOL_MAX_TXS_DEFAULT)] + #[arg(long = "txpool.queued-max-count", alias = "txpool.queued_max_count", default_value_t = DefaultTxPoolValues::get_global().queued_max_count)] pub queued_max_count: usize, /// Max size of the queued sub-pool in megabytes. - #[arg(long = "txpool.queued-max-size", alias = "txpool.queued_max_size", default_value_t = TXPOOL_SUBPOOL_MAX_SIZE_MB_DEFAULT)] + #[arg(long = "txpool.queued-max-size", alias = "txpool.queued_max_size", default_value_t = DefaultTxPoolValues::get_global().queued_max_size)] pub queued_max_size: usize, /// Max number of transaction in the blobpool - #[arg(long = "txpool.blobpool-max-count", alias = "txpool.blobpool_max_count", default_value_t = TXPOOL_SUBPOOL_MAX_TXS_DEFAULT)] + #[arg(long = "txpool.blobpool-max-count", alias = "txpool.blobpool_max_count", default_value_t = DefaultTxPoolValues::get_global().blobpool_max_count)] pub blobpool_max_count: usize, /// Max size of the blobpool in megabytes. - #[arg(long = "txpool.blobpool-max-size", alias = "txpool.blobpool_max_size", default_value_t = TXPOOL_SUBPOOL_MAX_SIZE_MB_DEFAULT)] + #[arg(long = "txpool.blobpool-max-size", alias = "txpool.blobpool_max_size", default_value_t = DefaultTxPoolValues::get_global().blobpool_max_size)] pub blobpool_max_size: usize, /// Max number of entries for the in memory cache of the blob store. - #[arg(long = "txpool.blob-cache-size", alias = "txpool.blob_cache_size")] + #[arg(long = "txpool.blob-cache-size", alias = "txpool.blob_cache_size", default_value = Resettable::from(DefaultTxPoolValues::get_global().blob_cache_size.map(|v| v.to_string().into())))] pub blob_cache_size: Option, + /// Disable EIP-4844 blob transaction support + #[arg(long = "txpool.disable-blobs-support", alias = "txpool.disable_blobs_support", default_value_t = DefaultTxPoolValues::get_global().disable_blobs_support, conflicts_with_all = ["blobpool_max_count", "blobpool_max_size", "blob_cache_size", "blob_transaction_price_bump"])] + pub disable_blobs_support: bool, + /// Max number of executable transaction slots guaranteed per account - #[arg(long = "txpool.max-account-slots", alias = "txpool.max_account_slots", default_value_t = TXPOOL_MAX_ACCOUNT_SLOTS_PER_SENDER)] + #[arg(long = "txpool.max-account-slots", alias = "txpool.max_account_slots", default_value_t = DefaultTxPoolValues::get_global().max_account_slots)] pub max_account_slots: usize, /// Price bump (in %) for the transaction pool underpriced check. - #[arg(long = "txpool.pricebump", default_value_t = DEFAULT_PRICE_BUMP)] + #[arg(long = "txpool.pricebump", default_value_t = DefaultTxPoolValues::get_global().price_bump)] pub price_bump: u128, /// Minimum base fee required by the protocol. - #[arg(long = "txpool.minimal-protocol-fee", default_value_t = MIN_PROTOCOL_BASE_FEE)] + #[arg(long = "txpool.minimal-protocol-fee", default_value_t = DefaultTxPoolValues::get_global().minimal_protocol_basefee)] pub minimal_protocol_basefee: u64, /// Minimum priority fee required for transaction acceptance into the pool. /// Transactions with priority fee below this value will be rejected. - #[arg(long = "txpool.minimum-priority-fee")] + #[arg(long = "txpool.minimum-priority-fee", default_value = Resettable::from(DefaultTxPoolValues::get_global().minimum_priority_fee.map(|v| v.to_string().into())))] pub minimum_priority_fee: Option, /// The default enforced gas limit for transactions entering the pool - #[arg(long = "txpool.gas-limit", default_value_t = ETHEREUM_BLOCK_GAS_LIMIT_30M)] + #[arg(long = "txpool.gas-limit", default_value_t = DefaultTxPoolValues::get_global().enforced_gas_limit)] pub enforced_gas_limit: u64, /// Maximum gas limit for individual transactions. Transactions exceeding this limit will be /// rejected by the transaction pool - #[arg(long = "txpool.max-tx-gas")] + #[arg(long = "txpool.max-tx-gas", default_value = Resettable::from(DefaultTxPoolValues::get_global().max_tx_gas_limit.map(|v| v.to_string().into())))] pub max_tx_gas_limit: Option, /// Price bump percentage to replace an already existing blob transaction - #[arg(long = "blobpool.pricebump", default_value_t = REPLACE_BLOB_PRICE_BUMP)] + #[arg(long = "blobpool.pricebump", default_value_t = DefaultTxPoolValues::get_global().blob_transaction_price_bump)] pub blob_transaction_price_bump: u128, /// Max size in bytes of a single transaction allowed to enter the pool - #[arg(long = "txpool.max-tx-input-bytes", alias = "txpool.max_tx_input_bytes", default_value_t = DEFAULT_MAX_TX_INPUT_BYTES)] + #[arg(long = "txpool.max-tx-input-bytes", alias = "txpool.max_tx_input_bytes", default_value_t = DefaultTxPoolValues::get_global().max_tx_input_bytes)] pub max_tx_input_bytes: usize, /// The maximum number of blobs to keep in the in memory blob cache. - #[arg(long = "txpool.max-cached-entries", alias = "txpool.max_cached_entries", default_value_t = DEFAULT_MAX_CACHED_BLOBS)] + #[arg(long = "txpool.max-cached-entries", alias = "txpool.max_cached_entries", default_value_t = DefaultTxPoolValues::get_global().max_cached_entries)] pub max_cached_entries: u32, /// Flag to disable local transaction exemptions. - #[arg(long = "txpool.nolocals")] + #[arg(long = "txpool.nolocals", default_value_t = DefaultTxPoolValues::get_global().no_locals)] pub no_locals: bool, /// Flag to allow certain addresses as local. - #[arg(long = "txpool.locals")] + #[arg(long = "txpool.locals", default_values = DefaultTxPoolValues::get_global().locals.iter().map(ToString::to_string))] pub locals: Vec
, /// Flag to toggle local transaction propagation. - #[arg(long = "txpool.no-local-transactions-propagation")] + #[arg(long = "txpool.no-local-transactions-propagation", default_value_t = DefaultTxPoolValues::get_global().no_local_transactions_propagation)] pub no_local_transactions_propagation: bool, + /// Number of additional transaction validation tasks to spawn. - #[arg(long = "txpool.additional-validation-tasks", alias = "txpool.additional_validation_tasks", default_value_t = DEFAULT_TXPOOL_ADDITIONAL_VALIDATION_TASKS)] + #[arg(long = "txpool.additional-validation-tasks", alias = "txpool.additional_validation_tasks", default_value_t = DefaultTxPoolValues::get_global().additional_validation_tasks)] pub additional_validation_tasks: usize, /// Maximum number of pending transactions from the network to buffer - #[arg(long = "txpool.max-pending-txns", alias = "txpool.max_pending_txns", default_value_t = PENDING_TX_LISTENER_BUFFER_SIZE)] + #[arg(long = "txpool.max-pending-txns", alias = "txpool.max_pending_txns", default_value_t = DefaultTxPoolValues::get_global().pending_tx_listener_buffer_size)] pub pending_tx_listener_buffer_size: usize, /// Maximum number of new transactions to buffer - #[arg(long = "txpool.max-new-txns", alias = "txpool.max_new_txns", default_value_t = NEW_TX_LISTENER_BUFFER_SIZE)] + #[arg(long = "txpool.max-new-txns", alias = "txpool.max_new_txns", default_value_t = DefaultTxPoolValues::get_global().new_tx_listener_buffer_size)] pub new_tx_listener_buffer_size: usize, /// How many new pending transactions to buffer and send to in progress pending transaction /// iterators. - #[arg(long = "txpool.max-new-pending-txs-notifications", alias = "txpool.max-new-pending-txs-notifications", default_value_t = MAX_NEW_PENDING_TXS_NOTIFICATIONS)] + #[arg(long = "txpool.max-new-pending-txs-notifications", alias = "txpool.max-new-pending-txs-notifications", default_value_t = DefaultTxPoolValues::get_global().max_new_pending_txs_notifications)] pub max_new_pending_txs_notifications: usize, /// Maximum amount of time non-executable transaction are queued. - #[arg(long = "txpool.lifetime", value_parser = parse_duration_from_secs_or_ms, default_value = "10800", value_name = "DURATION")] + #[arg(long = "txpool.lifetime", value_parser = parse_duration_from_secs_or_ms, value_name = "DURATION", default_value = format_duration_as_secs_or_ms(DefaultTxPoolValues::get_global().max_queued_lifetime))] pub max_queued_lifetime: Duration, /// Path to store the local transaction backup at, to survive node restarts. - #[arg(long = "txpool.transactions-backup", alias = "txpool.journal", value_name = "PATH")] - pub transactions_backup_path: Option, + #[arg(long = "txpool.transactions-backup", alias = "txpool.journal", value_name = "PATH", default_value = Resettable::from(DefaultTxPoolValues::get_global().transactions_backup_path.as_ref().map(|v| v.to_string_lossy().into())))] + pub transactions_backup_path: Option, /// Disables transaction backup to disk on node shutdown. #[arg( long = "txpool.disable-transactions-backup", alias = "txpool.disable-journal", - conflicts_with = "transactions_backup_path" + conflicts_with = "transactions_backup_path", + default_value_t = DefaultTxPoolValues::get_global().disable_transactions_backup )] pub disable_transactions_backup: bool, /// Max batch size for transaction pool insertions - #[arg(long = "txpool.max-batch-size", default_value_t = 1)] + #[arg(long = "txpool.max-batch-size", default_value_t = DefaultTxPoolValues::get_global().max_batch_size)] pub max_batch_size: usize, } @@ -158,36 +433,69 @@ impl TxPoolArgs { impl Default for TxPoolArgs { fn default() -> Self { + let DefaultTxPoolValues { + pending_max_count, + pending_max_size, + basefee_max_count, + basefee_max_size, + queued_max_count, + queued_max_size, + blobpool_max_count, + blobpool_max_size, + blob_cache_size, + disable_blobs_support, + max_account_slots, + price_bump, + minimal_protocol_basefee, + minimum_priority_fee, + enforced_gas_limit, + max_tx_gas_limit, + blob_transaction_price_bump, + max_tx_input_bytes, + max_cached_entries, + no_locals, + locals, + no_local_transactions_propagation, + additional_validation_tasks, + pending_tx_listener_buffer_size, + new_tx_listener_buffer_size, + max_new_pending_txs_notifications, + max_queued_lifetime, + transactions_backup_path, + disable_transactions_backup, + max_batch_size, + } = DefaultTxPoolValues::get_global().clone(); Self { - pending_max_count: TXPOOL_SUBPOOL_MAX_TXS_DEFAULT, - pending_max_size: TXPOOL_SUBPOOL_MAX_SIZE_MB_DEFAULT, - basefee_max_count: TXPOOL_SUBPOOL_MAX_TXS_DEFAULT, - basefee_max_size: TXPOOL_SUBPOOL_MAX_SIZE_MB_DEFAULT, - queued_max_count: TXPOOL_SUBPOOL_MAX_TXS_DEFAULT, - queued_max_size: TXPOOL_SUBPOOL_MAX_SIZE_MB_DEFAULT, - blobpool_max_count: TXPOOL_SUBPOOL_MAX_TXS_DEFAULT, - blobpool_max_size: TXPOOL_SUBPOOL_MAX_SIZE_MB_DEFAULT, - blob_cache_size: None, - max_account_slots: TXPOOL_MAX_ACCOUNT_SLOTS_PER_SENDER, - price_bump: DEFAULT_PRICE_BUMP, - minimal_protocol_basefee: MIN_PROTOCOL_BASE_FEE, - minimum_priority_fee: None, - enforced_gas_limit: ETHEREUM_BLOCK_GAS_LIMIT_30M, - max_tx_gas_limit: None, - blob_transaction_price_bump: REPLACE_BLOB_PRICE_BUMP, - max_tx_input_bytes: DEFAULT_MAX_TX_INPUT_BYTES, - max_cached_entries: DEFAULT_MAX_CACHED_BLOBS, - no_locals: false, - locals: Default::default(), - no_local_transactions_propagation: false, - additional_validation_tasks: DEFAULT_TXPOOL_ADDITIONAL_VALIDATION_TASKS, - pending_tx_listener_buffer_size: PENDING_TX_LISTENER_BUFFER_SIZE, - new_tx_listener_buffer_size: NEW_TX_LISTENER_BUFFER_SIZE, - max_new_pending_txs_notifications: MAX_NEW_PENDING_TXS_NOTIFICATIONS, - max_queued_lifetime: MAX_QUEUED_TRANSACTION_LIFETIME, - transactions_backup_path: None, - disable_transactions_backup: false, - max_batch_size: 1, + pending_max_count, + pending_max_size, + basefee_max_count, + basefee_max_size, + queued_max_count, + queued_max_size, + blobpool_max_count, + blobpool_max_size, + blob_cache_size, + disable_blobs_support, + max_account_slots, + price_bump, + minimal_protocol_basefee, + minimum_priority_fee, + enforced_gas_limit, + max_tx_gas_limit, + blob_transaction_price_bump, + max_tx_input_bytes, + max_cached_entries, + no_locals, + locals, + no_local_transactions_propagation, + additional_validation_tasks, + pending_tx_listener_buffer_size, + new_tx_listener_buffer_size, + max_new_pending_txs_notifications, + max_queued_lifetime, + transactions_backup_path, + disable_transactions_backup, + max_batch_size, } } } @@ -195,6 +503,7 @@ impl Default for TxPoolArgs { impl RethTransactionPoolConfig for TxPoolArgs { /// Returns transaction pool configuration. fn pool_config(&self) -> PoolConfig { + let default_config = PoolConfig::default(); PoolConfig { local_transactions_config: LocalTransactionConfig { no_exemptions: self.no_locals, @@ -230,7 +539,7 @@ impl RethTransactionPoolConfig for TxPoolArgs { new_tx_listener_buffer_size: self.new_tx_listener_buffer_size, max_new_pending_txs_notifications: self.max_new_pending_txs_notifications, max_queued_lifetime: self.max_queued_lifetime, - ..Default::default() + max_inflight_delegated_slot_limit: default_config.max_inflight_delegated_slot_limit, } } @@ -243,6 +552,7 @@ impl RethTransactionPoolConfig for TxPoolArgs { #[cfg(test)] mod tests { use super::*; + use alloy_primitives::address; use clap::Parser; /// A helper type to parse Args more easily @@ -259,17 +569,6 @@ mod tests { assert_eq!(args, default_args); } - #[test] - fn txpool_parse_locals() { - let args = CommandParser::::parse_from([ - "reth", - "--txpool.locals", - "0x0000000000000000000000000000000000000000", - ]) - .args; - assert_eq!(args.locals, vec![Address::ZERO]); - } - #[test] fn txpool_parse_max_tx_lifetime() { // Test with a custom duration @@ -289,4 +588,106 @@ mod tests { assert!(result.is_err(), "Expected an error for invalid duration"); } + + #[test] + fn txpool_args() { + let args = TxPoolArgs { + pending_max_count: 1000, + pending_max_size: 200, + basefee_max_count: 2000, + basefee_max_size: 300, + queued_max_count: 3000, + queued_max_size: 400, + blobpool_max_count: 4000, + blobpool_max_size: 500, + blob_cache_size: Some(100), + disable_blobs_support: false, + max_account_slots: 20, + price_bump: 15, + minimal_protocol_basefee: 1000000000, + minimum_priority_fee: Some(2000000000), + enforced_gas_limit: 40000000, + max_tx_gas_limit: Some(50000000), + blob_transaction_price_bump: 25, + max_tx_input_bytes: 131072, + max_cached_entries: 200, + no_locals: true, + locals: vec![ + address!("0x0000000000000000000000000000000000000001"), + address!("0x0000000000000000000000000000000000000002"), + ], + no_local_transactions_propagation: true, + additional_validation_tasks: 4, + pending_tx_listener_buffer_size: 512, + new_tx_listener_buffer_size: 256, + max_new_pending_txs_notifications: 128, + max_queued_lifetime: Duration::from_secs(7200), + transactions_backup_path: Some(PathBuf::from("/tmp/txpool-backup")), + disable_transactions_backup: false, + max_batch_size: 10, + }; + + let parsed_args = CommandParser::::parse_from([ + "reth", + "--txpool.pending-max-count", + "1000", + "--txpool.pending-max-size", + "200", + "--txpool.basefee-max-count", + "2000", + "--txpool.basefee-max-size", + "300", + "--txpool.queued-max-count", + "3000", + "--txpool.queued-max-size", + "400", + "--txpool.blobpool-max-count", + "4000", + "--txpool.blobpool-max-size", + "500", + "--txpool.blob-cache-size", + "100", + "--txpool.max-account-slots", + "20", + "--txpool.pricebump", + "15", + "--txpool.minimal-protocol-fee", + "1000000000", + "--txpool.minimum-priority-fee", + "2000000000", + "--txpool.gas-limit", + "40000000", + "--txpool.max-tx-gas", + "50000000", + "--blobpool.pricebump", + "25", + "--txpool.max-tx-input-bytes", + "131072", + "--txpool.max-cached-entries", + "200", + "--txpool.nolocals", + "--txpool.locals", + "0x0000000000000000000000000000000000000001", + "--txpool.locals", + "0x0000000000000000000000000000000000000002", + "--txpool.no-local-transactions-propagation", + "--txpool.additional-validation-tasks", + "4", + "--txpool.max-pending-txns", + "512", + "--txpool.max-new-txns", + "256", + "--txpool.max-new-pending-txs-notifications", + "128", + "--txpool.lifetime", + "7200", + "--txpool.transactions-backup", + "/tmp/txpool-backup", + "--txpool.max-batch-size", + "10", + ]) + .args; + + assert_eq!(parsed_args, args); + } } diff --git a/crates/node/core/src/cli/config.rs b/crates/node/core/src/cli/config.rs index 8c29c4745e..4d5a485b59 100644 --- a/crates/node/core/src/cli/config.rs +++ b/crates/node/core/src/cli/config.rs @@ -35,6 +35,11 @@ pub trait PayloadBuilderConfig { /// Maximum number of tasks to spawn for building a payload. fn max_payload_tasks(&self) -> usize; + /// Maximum number of blobs to include per block (EIP-7872). + /// + /// If `None`, defaults to the protocol maximum. + fn max_blobs_per_block(&self) -> Option; + /// Returns the configured gas limit if set, or a chain-specific default. fn gas_limit_for(&self, chain: Chain) -> u64 { if let Some(limit) = self.gas_limit() { diff --git a/crates/node/core/src/dirs.rs b/crates/node/core/src/dirs.rs index 4f8507c4e6..a21cde2280 100644 --- a/crates/node/core/src/dirs.rs +++ b/crates/node/core/src/dirs.rs @@ -301,6 +301,18 @@ impl ChainPath { } } + /// Returns the path to the `RocksDB` database directory for this chain. + /// + /// `//rocksdb` + pub fn rocksdb(&self) -> PathBuf { + let datadir_args = &self.2; + if let Some(rocksdb_path) = &datadir_args.rocksdb_path { + rocksdb_path.clone() + } else { + self.data_dir().join("rocksdb") + } + } + /// Returns the path to the reth p2p secret key for this chain. /// /// `//discovery-secret` diff --git a/crates/node/core/src/node_config.rs b/crates/node/core/src/node_config.rs index c69593adf0..1d5b1700cb 100644 --- a/crates/node/core/src/node_config.rs +++ b/crates/node/core/src/node_config.rs @@ -3,7 +3,7 @@ use crate::{ args::{ DatabaseArgs, DatadirArgs, DebugArgs, DevArgs, EngineArgs, NetworkArgs, PayloadBuilderArgs, - PruningArgs, RpcServerArgs, TxPoolArgs, + PruningArgs, RpcServerArgs, StaticFilesArgs, TxPoolArgs, }, dirs::{ChainPath, DataDirPath}, utils::get_single_header, @@ -147,6 +147,9 @@ pub struct NodeConfig { /// All ERA import related arguments with --era prefix pub era: EraArgs, + + /// All static files related arguments + pub static_files: StaticFilesArgs, } impl NodeConfig { @@ -177,6 +180,7 @@ impl NodeConfig { datadir: DatadirArgs::default(), engine: EngineArgs::default(), era: EraArgs::default(), + static_files: StaticFilesArgs::default(), } } @@ -233,6 +237,46 @@ impl NodeConfig { self } + /// Set the [`ChainSpec`] for the node and converts the type to that chainid. + pub fn map_chain(self, chain: impl Into>) -> NodeConfig { + let Self { + datadir, + config, + metrics, + instance, + network, + rpc, + txpool, + builder, + debug, + db, + dev, + pruning, + engine, + era, + static_files, + .. + } = self; + NodeConfig { + datadir, + config, + chain: chain.into(), + metrics, + instance, + network, + rpc, + txpool, + builder, + debug, + db, + dev, + pruning, + engine, + era, + static_files, + } + } + /// Set the metrics address for the node pub fn with_metrics(mut self, metrics: MetricArgs) -> Self { self.metrics = metrics; @@ -427,6 +471,12 @@ impl NodeConfig { self } + /// Disables all discovery services for the node. + pub const fn with_disabled_discovery(mut self) -> Self { + self.network.discovery.disable_discovery = true; + self + } + /// Effectively disables the RPC state cache by setting the cache sizes to `0`. /// /// By setting the cache sizes to 0, caching of newly executed or fetched blocks will be @@ -493,6 +543,7 @@ impl NodeConfig { pruning: self.pruning, engine: self.engine, era: self.era, + static_files: self.static_files, } } @@ -533,6 +584,7 @@ impl Clone for NodeConfig { datadir: self.datadir.clone(), engine: self.engine.clone(), era: self.era.clone(), + static_files: self.static_files, } } } diff --git a/crates/node/ethstats/src/ethstats.rs b/crates/node/ethstats/src/ethstats.rs index 7592e93ae9..df87060d77 100644 --- a/crates/node/ethstats/src/ethstats.rs +++ b/crates/node/ethstats/src/ethstats.rs @@ -118,7 +118,7 @@ where "Successfully connected to EthStats server at {}", self.credentials.host ); let conn: ConnWrapper = ConnWrapper::new(ws_stream); - *self.conn.write().await = Some(conn.clone()); + *self.conn.write().await = Some(conn); self.login().await?; Ok(()) } @@ -208,7 +208,7 @@ where active: true, syncing: self.network.is_syncing(), peers: self.network.num_connected_peers() as u64, - gas_price: 0, // TODO + gas_price: self.pool.block_info().pending_basefee, uptime: 100, }, }; @@ -558,24 +558,32 @@ where // Start the read loop in a separate task let read_handle = { - let conn = self.conn.clone(); + let conn_arc = self.conn.clone(); let message_tx = message_tx.clone(); let shutdown_tx = shutdown_tx.clone(); tokio::spawn(async move { loop { - let conn = conn.read().await; - if let Some(conn) = conn.as_ref() { + let conn_guard = conn_arc.read().await; + if let Some(conn) = conn_guard.as_ref() { match conn.read_json().await { Ok(msg) => { if message_tx.send(msg).await.is_err() { break; } } - Err(e) => { - debug!(target: "ethstats", "Read error: {}", e); - break; - } + Err(e) => match e { + crate::error::ConnectionError::Serialization(err) => { + debug!(target: "ethstats", "JSON parse error from stats server: {}", err); + } + other => { + debug!(target: "ethstats", "Read error: {}", other); + drop(conn_guard); + if let Some(conn) = conn_arc.write().await.take() { + let _ = conn.close().await; + } + } + }, } } else { sleep(RECONNECT_INTERVAL).await; @@ -658,10 +666,12 @@ where } // Handle reconnection - _ = reconnect_interval.tick(), if self.conn.read().await.is_none() => { - match self.connect().await { - Ok(_) => info!(target: "ethstats", "Reconnected successfully"), - Err(e) => debug!(target: "ethstats", "Reconnect failed: {}", e), + _ = reconnect_interval.tick() => { + if self.conn.read().await.is_none() { + match self.connect().await { + Ok(_) => info!(target: "ethstats", "Reconnected successfully"), + Err(e) => debug!(target: "ethstats", "Reconnect failed: {}", e), + } } } } diff --git a/crates/node/events/src/node.rs b/crates/node/events/src/node.rs index 02c7709819..4a577e7411 100644 --- a/crates/node/events/src/node.rs +++ b/crates/node/events/src/node.rs @@ -5,9 +5,7 @@ use alloy_consensus::{constants::GWEI_TO_WEI, BlockHeader}; use alloy_primitives::{BlockNumber, B256}; use alloy_rpc_types_engine::ForkchoiceState; use futures::Stream; -use reth_engine_primitives::{ - ConsensusEngineEvent, ConsensusEngineLiveSyncProgress, ForkchoiceStatus, -}; +use reth_engine_primitives::{ConsensusEngineEvent, ForkchoiceStatus}; use reth_network_api::PeersInfo; use reth_primitives_traits::{format_gas, format_gas_throughput, BlockBody, NodePrimitives}; use reth_prune_types::PrunerEvent; @@ -233,20 +231,6 @@ impl NodeState { self.safe_block_hash = Some(safe_block_hash); self.finalized_block_hash = Some(finalized_block_hash); } - ConsensusEngineEvent::LiveSyncProgress(live_sync_progress) => { - match live_sync_progress { - ConsensusEngineLiveSyncProgress::DownloadingBlocks { - remaining_blocks, - target, - } => { - info!( - remaining_blocks, - target_block_hash=?target, - "Live sync in progress, downloading blocks" - ); - } - } - } ConsensusEngineEvent::CanonicalBlockAdded(executed, elapsed) => { let block = executed.sealed_block(); let mut full = block.gas_used() as f64 * 100.0 / block.gas_limit() as f64; @@ -596,6 +580,8 @@ impl Display for Eta { f, "{}", humantime::format_duration(Duration::from_secs(remaining.as_secs())) + .to_string() + .replace(' ', "") ) } } @@ -621,6 +607,6 @@ mod tests { } .to_string(); - assert_eq!(eta, "13m 37s"); + assert_eq!(eta, "13m37s"); } } diff --git a/crates/node/metrics/src/server.rs b/crates/node/metrics/src/server.rs index d7beb6c3a1..26e9a918fa 100644 --- a/crates/node/metrics/src/server.rs +++ b/crates/node/metrics/src/server.rs @@ -119,6 +119,8 @@ impl MetricServer { .await .wrap_err("Could not bind to address")?; + tracing::info!(target: "reth::cli", "Starting metrics endpoint at {}", listener.local_addr().unwrap()); + task_executor.spawn_with_graceful_shutdown_signal(|mut signal| { Box::pin(async move { loop { diff --git a/crates/optimism/bin/Cargo.toml b/crates/optimism/bin/Cargo.toml index 3733227a3a..c9d2a2f3d3 100644 --- a/crates/optimism/bin/Cargo.toml +++ b/crates/optimism/bin/Cargo.toml @@ -12,7 +12,7 @@ exclude.workspace = true reth-cli-util.workspace = true reth-optimism-cli.workspace = true reth-optimism-rpc.workspace = true -reth-optimism-node = { workspace = true, features = ["js-tracer"] } +reth-optimism-node.workspace = true reth-optimism-chainspec.workspace = true reth-optimism-consensus.workspace = true reth-optimism-evm.workspace = true @@ -27,14 +27,23 @@ tracing.workspace = true workspace = true [features] -default = ["jemalloc", "reth-optimism-evm/portable"] +default = ["jemalloc", "otlp", "reth-optimism-evm/portable", "js-tracer", "keccak-cache-global", "asm-keccak"] + +otlp = ["reth-optimism-cli/otlp"] +samply = ["reth-optimism-cli/samply"] + +js-tracer = [ + "reth-optimism-node/js-tracer", +] jemalloc = ["reth-cli-util/jemalloc", "reth-optimism-cli/jemalloc"] jemalloc-prof = ["reth-cli-util/jemalloc-prof"] tracy-allocator = ["reth-cli-util/tracy-allocator"] asm-keccak = ["reth-optimism-cli/asm-keccak", "reth-optimism-node/asm-keccak"] - +keccak-cache-global = [ + "reth-optimism-node/keccak-cache-global", +] dev = [ "reth-optimism-cli/dev", "reth-optimism-primitives/arbitrary", diff --git a/crates/optimism/chainspec/Cargo.toml b/crates/optimism/chainspec/Cargo.toml index 5520116470..a4ef9263b1 100644 --- a/crates/optimism/chainspec/Cargo.toml +++ b/crates/optimism/chainspec/Cargo.toml @@ -48,6 +48,7 @@ op-alloy-consensus.workspace = true [dev-dependencies] reth-chainspec = { workspace = true, features = ["test-utils"] } +alloy-op-hardforks.workspace = true [features] default = ["std"] @@ -85,4 +86,5 @@ serde = [ "reth-optimism-primitives/serde", "reth-primitives-traits/serde", "op-alloy-consensus/serde", + "alloy-op-hardforks/serde", ] diff --git a/crates/optimism/chainspec/res/superchain-configs.tar b/crates/optimism/chainspec/res/superchain-configs.tar index da035a32da..2ed30f474b 100644 Binary files a/crates/optimism/chainspec/res/superchain-configs.tar and b/crates/optimism/chainspec/res/superchain-configs.tar differ diff --git a/crates/optimism/chainspec/res/superchain_registry_commit b/crates/optimism/chainspec/res/superchain_registry_commit index 70808136d1..239646ec04 100644 --- a/crates/optimism/chainspec/res/superchain_registry_commit +++ b/crates/optimism/chainspec/res/superchain_registry_commit @@ -1 +1 @@ -d56233c1e5254fc2fd769d5b33269502a1fe9ef8 +59e22d265b7a423b7f51a67a722471a6f3c3cc39 diff --git a/crates/optimism/chainspec/src/lib.rs b/crates/optimism/chainspec/src/lib.rs index 30d90e64c9..a91102c4f8 100644 --- a/crates/optimism/chainspec/src/lib.rs +++ b/crates/optimism/chainspec/src/lib.rs @@ -70,7 +70,7 @@ use reth_chainspec::{ }; use reth_ethereum_forks::{ChainHardforks, EthereumHardfork, ForkCondition}; use reth_network_peers::NodeRecord; -use reth_optimism_primitives::ADDRESS_L2_TO_L1_MESSAGE_PASSER; +use reth_optimism_primitives::L2_TO_L1_MESSAGE_PASSER_ADDRESS; use reth_primitives_traits::{sync::LazyLock, SealedHeader}; /// Chain spec builder for a OP stack chain. @@ -499,7 +499,7 @@ pub fn make_op_genesis_header(genesis: &Genesis, hardforks: &ChainHardforks) -> // If Isthmus is active, overwrite the withdrawals root with the storage root of predeploy // `L2ToL1MessagePasser.sol` if hardforks.fork(OpHardfork::Isthmus).active_at_timestamp(header.timestamp) && - let Some(predeploy) = genesis.alloc.get(&ADDRESS_L2_TO_L1_MESSAGE_PASSER) && + let Some(predeploy) = genesis.alloc.get(&L2_TO_L1_MESSAGE_PASSER_ADDRESS) && let Some(storage) = &predeploy.storage { header.withdrawals_root = @@ -519,7 +519,11 @@ pub fn make_op_genesis_header(genesis: &Genesis, hardforks: &ChainHardforks) -> mod tests { use alloc::string::{String, ToString}; use alloy_genesis::{ChainConfig, Genesis}; - use alloy_primitives::b256; + use alloy_op_hardforks::{ + BASE_MAINNET_JOVIAN_TIMESTAMP, BASE_SEPOLIA_JOVIAN_TIMESTAMP, OP_MAINNET_JOVIAN_TIMESTAMP, + OP_SEPOLIA_JOVIAN_TIMESTAMP, + }; + use alloy_primitives::{b256, hex}; use reth_chainspec::{test_fork_ids, BaseFeeParams, BaseFeeParamsKind}; use reth_ethereum_forks::{EthereumHardfork, ForkCondition, ForkHash, ForkId, Head}; use reth_optimism_forks::{OpHardfork, OpHardforks}; @@ -611,13 +615,20 @@ mod tests { // Isthmus ( Head { number: 0, timestamp: 1746806401, ..Default::default() }, - ForkId { hash: ForkHash([0x86, 0x72, 0x8b, 0x4e]), next: 0 }, /* TODO: update timestamp when Jovian is planned */ + ForkId { + hash: ForkHash([0x86, 0x72, 0x8b, 0x4e]), + next: BASE_MAINNET_JOVIAN_TIMESTAMP, + }, + ), + // Jovian + ( + Head { + number: 0, + timestamp: BASE_MAINNET_JOVIAN_TIMESTAMP, + ..Default::default() + }, + BASE_MAINNET.hardfork_fork_id(OpHardfork::Jovian).unwrap(), ), - // // Jovian - // ( - // Head { number: 0, timestamp: u64::MAX, ..Default::default() }, /* TODO: - // update timestamp when Jovian is planned */ ForkId { hash: - // ForkHash([0xef, 0x0e, 0x58, 0x33]), next: 0 }, ), ], ); } @@ -670,13 +681,20 @@ mod tests { // Isthmus ( Head { number: 0, timestamp: 1744905600, ..Default::default() }, - ForkId { hash: ForkHash([0x6c, 0x62, 0x5e, 0xe1]), next: 0 }, /* TODO: update timestamp when Jovian is planned */ + ForkId { + hash: ForkHash([0x6c, 0x62, 0x5e, 0xe1]), + next: OP_SEPOLIA_JOVIAN_TIMESTAMP, + }, + ), + // Jovian + ( + Head { + number: 0, + timestamp: OP_SEPOLIA_JOVIAN_TIMESTAMP, + ..Default::default() + }, + OP_SEPOLIA.hardfork_fork_id(OpHardfork::Jovian).unwrap(), ), - // // Jovian - // ( - // Head { number: 0, timestamp: u64::MAX, ..Default::default() }, /* TODO: - // update timestamp when Jovian is planned */ ForkId { hash: - // ForkHash([0x04, 0x2a, 0x5c, 0x14]), next: 0 }, ), ], ); } @@ -739,13 +757,20 @@ mod tests { // Isthmus ( Head { number: 105235063, timestamp: 1746806401, ..Default::default() }, - ForkId { hash: ForkHash([0x37, 0xbe, 0x75, 0x8f]), next: 0 }, /* TODO: update timestamp when Jovian is planned */ + ForkId { + hash: ForkHash([0x37, 0xbe, 0x75, 0x8f]), + next: OP_MAINNET_JOVIAN_TIMESTAMP, + }, ), // Jovian - // ( - // Head { number: 105235063, timestamp: u64::MAX, ..Default::default() }, /* - // TODO: update timestamp when Jovian is planned */ ForkId { - // hash: ForkHash([0x26, 0xce, 0xa1, 0x75]), next: 0 }, ), + ( + Head { + number: 105235063, + timestamp: OP_MAINNET_JOVIAN_TIMESTAMP, + ..Default::default() + }, + OP_MAINNET.hardfork_fork_id(OpHardfork::Jovian).unwrap(), + ), ], ); } @@ -798,13 +823,20 @@ mod tests { // Isthmus ( Head { number: 0, timestamp: 1744905600, ..Default::default() }, - ForkId { hash: ForkHash([0x06, 0x0a, 0x4d, 0x1d]), next: 0 }, /* TODO: update timestamp when Jovian is planned */ + ForkId { + hash: ForkHash([0x06, 0x0a, 0x4d, 0x1d]), + next: BASE_SEPOLIA_JOVIAN_TIMESTAMP, + }, + ), + // Jovian + ( + Head { + number: 0, + timestamp: BASE_SEPOLIA_JOVIAN_TIMESTAMP, + ..Default::default() + }, + BASE_SEPOLIA.hardfork_fork_id(OpHardfork::Jovian).unwrap(), ), - // // Jovian - // ( - // Head { number: 0, timestamp: u64::MAX, ..Default::default() }, /* TODO: - // update timestamp when Jovian is planned */ ForkId { hash: - // ForkHash([0xcd, 0xfd, 0x39, 0x99]), next: 0 }, ), ], ); } @@ -848,7 +880,7 @@ mod tests { #[test] fn latest_base_mainnet_fork_id() { assert_eq!( - ForkId { hash: ForkHash([0x86, 0x72, 0x8b, 0x4e]), next: 0 }, + ForkId { hash: ForkHash(hex!("1cfeafc9")), next: 0 }, BASE_MAINNET.latest_fork_id() ) } @@ -857,7 +889,7 @@ mod tests { fn latest_base_mainnet_fork_id_with_builder() { let base_mainnet = OpChainSpecBuilder::base_mainnet().build(); assert_eq!( - ForkId { hash: ForkHash([0x86, 0x72, 0x8b, 0x4e]), next: 0 }, + ForkId { hash: ForkHash(hex!("1cfeafc9")), next: 0 }, base_mainnet.latest_fork_id() ) } diff --git a/crates/optimism/chainspec/src/superchain/chain_specs.rs b/crates/optimism/chainspec/src/superchain/chain_specs.rs index 1547082eca..8a794221ea 100644 --- a/crates/optimism/chainspec/src/superchain/chain_specs.rs +++ b/crates/optimism/chainspec/src/superchain/chain_specs.rs @@ -45,6 +45,7 @@ create_superchain_specs!( ("settlus-sepolia", "sepolia"), ("shape", "mainnet"), ("shape", "sepolia"), + ("silent-data-mainnet", "mainnet"), ("snax", "mainnet"), ("soneium", "mainnet"), ("soneium-minato", "sepolia"), diff --git a/crates/optimism/chainspec/src/superchain/configs.rs b/crates/optimism/chainspec/src/superchain/configs.rs index 53b30a2f5d..b0ebc0fb84 100644 --- a/crates/optimism/chainspec/src/superchain/configs.rs +++ b/crates/optimism/chainspec/src/superchain/configs.rs @@ -87,8 +87,18 @@ fn read_file( #[cfg(test)] mod tests { use super::*; - use crate::superchain::Superchain; - use reth_optimism_primitives::ADDRESS_L2_TO_L1_MESSAGE_PASSER; + use crate::{generated_chain_value_parser, superchain::Superchain, SUPPORTED_CHAINS}; + use alloy_chains::NamedChain; + use alloy_op_hardforks::{ + OpHardfork, BASE_MAINNET_CANYON_TIMESTAMP, BASE_MAINNET_ECOTONE_TIMESTAMP, + BASE_MAINNET_ISTHMUS_TIMESTAMP, BASE_MAINNET_JOVIAN_TIMESTAMP, + BASE_SEPOLIA_CANYON_TIMESTAMP, BASE_SEPOLIA_ECOTONE_TIMESTAMP, + BASE_SEPOLIA_ISTHMUS_TIMESTAMP, BASE_SEPOLIA_JOVIAN_TIMESTAMP, OP_MAINNET_CANYON_TIMESTAMP, + OP_MAINNET_ECOTONE_TIMESTAMP, OP_MAINNET_ISTHMUS_TIMESTAMP, OP_MAINNET_JOVIAN_TIMESTAMP, + OP_SEPOLIA_CANYON_TIMESTAMP, OP_SEPOLIA_ECOTONE_TIMESTAMP, OP_SEPOLIA_ISTHMUS_TIMESTAMP, + OP_SEPOLIA_JOVIAN_TIMESTAMP, + }; + use reth_optimism_primitives::L2_TO_L1_MESSAGE_PASSER_ADDRESS; use tar_no_std::TarArchiveRef; #[test] @@ -96,7 +106,7 @@ mod tests { let genesis = read_superchain_genesis("unichain", "mainnet").unwrap(); assert_eq!(genesis.config.chain_id, 130); assert_eq!(genesis.timestamp, 1730748359); - assert!(genesis.alloc.contains_key(&ADDRESS_L2_TO_L1_MESSAGE_PASSER)); + assert!(genesis.alloc.contains_key(&L2_TO_L1_MESSAGE_PASSER_ADDRESS)); } #[test] @@ -104,7 +114,7 @@ mod tests { let genesis = read_superchain_genesis("funki", "mainnet").unwrap(); assert_eq!(genesis.config.chain_id, 33979); assert_eq!(genesis.timestamp, 1721211095); - assert!(genesis.alloc.contains_key(&ADDRESS_L2_TO_L1_MESSAGE_PASSER)); + assert!(genesis.alloc.contains_key(&L2_TO_L1_MESSAGE_PASSER_ADDRESS)); } #[test] @@ -150,4 +160,139 @@ mod tests { ); } } + + #[test] + fn test_hardfork_timestamps() { + for &chain in SUPPORTED_CHAINS { + let metadata = generated_chain_value_parser(chain).unwrap(); + + match metadata.chain().named() { + Some(NamedChain::Optimism) => { + assert_eq!( + metadata.hardforks.get(OpHardfork::Jovian).unwrap().as_timestamp().unwrap(), + OP_MAINNET_JOVIAN_TIMESTAMP + ); + + assert_eq!( + metadata + .hardforks + .get(OpHardfork::Isthmus) + .unwrap() + .as_timestamp() + .unwrap(), + OP_MAINNET_ISTHMUS_TIMESTAMP + ); + + assert_eq!( + metadata.hardforks.get(OpHardfork::Canyon).unwrap().as_timestamp().unwrap(), + OP_MAINNET_CANYON_TIMESTAMP + ); + + assert_eq!( + metadata + .hardforks + .get(OpHardfork::Ecotone) + .unwrap() + .as_timestamp() + .unwrap(), + OP_MAINNET_ECOTONE_TIMESTAMP + ); + } + Some(NamedChain::OptimismSepolia) => { + assert_eq!( + metadata.hardforks.get(OpHardfork::Jovian).unwrap().as_timestamp().unwrap(), + OP_SEPOLIA_JOVIAN_TIMESTAMP + ); + + assert_eq!( + metadata + .hardforks + .get(OpHardfork::Isthmus) + .unwrap() + .as_timestamp() + .unwrap(), + OP_SEPOLIA_ISTHMUS_TIMESTAMP + ); + + assert_eq!( + metadata.hardforks.get(OpHardfork::Canyon).unwrap().as_timestamp().unwrap(), + OP_SEPOLIA_CANYON_TIMESTAMP + ); + + assert_eq!( + metadata + .hardforks + .get(OpHardfork::Ecotone) + .unwrap() + .as_timestamp() + .unwrap(), + OP_SEPOLIA_ECOTONE_TIMESTAMP + ); + } + Some(NamedChain::Base) => { + assert_eq!( + metadata.hardforks.get(OpHardfork::Jovian).unwrap().as_timestamp().unwrap(), + BASE_MAINNET_JOVIAN_TIMESTAMP + ); + + assert_eq!( + metadata + .hardforks + .get(OpHardfork::Isthmus) + .unwrap() + .as_timestamp() + .unwrap(), + BASE_MAINNET_ISTHMUS_TIMESTAMP + ); + + assert_eq!( + metadata.hardforks.get(OpHardfork::Canyon).unwrap().as_timestamp().unwrap(), + BASE_MAINNET_CANYON_TIMESTAMP + ); + + assert_eq!( + metadata + .hardforks + .get(OpHardfork::Ecotone) + .unwrap() + .as_timestamp() + .unwrap(), + BASE_MAINNET_ECOTONE_TIMESTAMP + ); + } + Some(NamedChain::BaseSepolia) => { + assert_eq!( + metadata.hardforks.get(OpHardfork::Jovian).unwrap().as_timestamp().unwrap(), + BASE_SEPOLIA_JOVIAN_TIMESTAMP + ); + + assert_eq!( + metadata + .hardforks + .get(OpHardfork::Isthmus) + .unwrap() + .as_timestamp() + .unwrap(), + BASE_SEPOLIA_ISTHMUS_TIMESTAMP + ); + + assert_eq!( + metadata.hardforks.get(OpHardfork::Canyon).unwrap().as_timestamp().unwrap(), + BASE_SEPOLIA_CANYON_TIMESTAMP + ); + + assert_eq!( + metadata + .hardforks + .get(OpHardfork::Ecotone) + .unwrap() + .as_timestamp() + .unwrap(), + BASE_SEPOLIA_ECOTONE_TIMESTAMP + ); + } + _ => {} + } + } + } } diff --git a/crates/optimism/cli/Cargo.toml b/crates/optimism/cli/Cargo.toml index eb32004533..062d66dfd4 100644 --- a/crates/optimism/cli/Cargo.toml +++ b/crates/optimism/cli/Cargo.toml @@ -44,7 +44,6 @@ reth-optimism-evm.workspace = true reth-cli-runner.workspace = true reth-node-builder = { workspace = true, features = ["op"] } reth-tracing.workspace = true -reth-tracing-otlp.workspace = true # eth alloy-eips.workspace = true @@ -56,7 +55,6 @@ alloy-rlp.workspace = true futures-util.workspace = true derive_more.workspace = true serde.workspace = true -url.workspace = true clap = { workspace = true, features = ["derive", "env"] } tokio = { workspace = true, features = ["sync", "macros", "time", "rt-multi-thread"] } @@ -76,10 +74,11 @@ reth-stages = { workspace = true, features = ["test-utils"] } reth-optimism-chainspec = { workspace = true, features = ["std", "superchain-configs"] } [features] -default = ["otlp"] +default = [] # Opentelemtry feature to activate metrics export otlp = ["reth-tracing/otlp", "reth-node-core/otlp"] +samply = ["reth-tracing/samply", "reth-node-core/samply"] asm-keccak = [ "alloy-primitives/asm-keccak", @@ -107,5 +106,4 @@ serde = [ "reth-optimism-primitives/serde", "reth-primitives-traits/serde", "reth-optimism-chainspec/serde", - "url/serde", ] diff --git a/crates/optimism/cli/src/app.rs b/crates/optimism/cli/src/app.rs index 8567c2b7e5..a23873daad 100644 --- a/crates/optimism/cli/src/app.rs +++ b/crates/optimism/cli/src/app.rs @@ -3,16 +3,15 @@ use eyre::{eyre, Result}; use reth_cli::chainspec::ChainSpecParser; use reth_cli_commands::launcher::Launcher; use reth_cli_runner::CliRunner; +use reth_node_core::args::OtlpInitStatus; use reth_node_metrics::recorder::install_prometheus_recorder; use reth_optimism_chainspec::OpChainSpec; use reth_optimism_consensus::OpBeaconConsensus; use reth_optimism_node::{OpExecutorProvider, OpNode}; use reth_rpc_server_types::RpcModuleValidator; use reth_tracing::{FileWorkerGuard, Layers}; -use reth_tracing_otlp::OtlpProtocol; use std::{fmt, sync::Arc}; -use tracing::info; -use url::Url; +use tracing::{info, warn}; /// A wrapper around a parsed CLI that handles command execution. #[derive(Debug)] @@ -99,7 +98,9 @@ where runner.run_blocking_until_ctrl_c(command.execute::()) } Commands::DumpGenesis(command) => runner.run_blocking_until_ctrl_c(command.execute()), - Commands::Db(command) => runner.run_blocking_until_ctrl_c(command.execute::()), + Commands::Db(command) => { + runner.run_blocking_command_until_exit(|ctx| command.execute::(ctx)) + } Commands::Stage(command) => { runner.run_command_until_exit(|ctx| command.execute::(ctx, components)) } @@ -122,47 +123,20 @@ where if self.guard.is_none() { let mut layers = self.layers.take().unwrap_or_default(); - #[cfg(feature = "otlp")] - { - self.cli.traces.validate()?; - if let Some(endpoint) = &self.cli.traces.otlp { - info!(target: "reth::cli", "Starting OTLP tracing export to {:?}", endpoint); - self.init_otlp_export(&mut layers, endpoint, runner)?; - } - } + let otlp_status = runner.block_on(self.cli.traces.init_otlp_tracing(&mut layers))?; self.guard = self.cli.logs.init_tracing_with_layers(layers)?; info!(target: "reth::cli", "Initialized tracing, debug log directory: {}", self.cli.logs.log_file_directory); - } - Ok(()) - } - - /// Initialize OTLP tracing export based on protocol type. - /// - /// For gRPC, `block_on` is required because tonic's channel initialization needs - /// a tokio runtime context, even though `with_span_layer` itself is not async. - #[cfg(feature = "otlp")] - fn init_otlp_export( - &self, - layers: &mut Layers, - endpoint: &Url, - runner: &CliRunner, - ) -> Result<()> { - let endpoint = endpoint.clone(); - let protocol = self.cli.traces.protocol; - let level_filter = self.cli.traces.otlp_filter.clone(); - - match protocol { - OtlpProtocol::Grpc => { - runner.block_on(async { - layers.with_span_layer("reth".to_string(), endpoint, level_filter, protocol) - })?; - } - OtlpProtocol::Http => { - layers.with_span_layer("reth".to_string(), endpoint, level_filter, protocol)?; + match otlp_status { + OtlpInitStatus::Started(endpoint) => { + info!(target: "reth::cli", "Started OTLP {:?} tracing export to {endpoint}", self.cli.traces.protocol); + } + OtlpInitStatus::NoFeature => { + warn!(target: "reth::cli", "Provided OTLP tracing arguments do not have effect, compile with the `otlp` feature") + } + OtlpInitStatus::Disabled => {} } } - Ok(()) } } diff --git a/crates/optimism/cli/src/commands/init_state.rs b/crates/optimism/cli/src/commands/init_state.rs index 950f60193f..93de398675 100644 --- a/crates/optimism/cli/src/commands/init_state.rs +++ b/crates/optimism/cli/src/commands/init_state.rs @@ -3,14 +3,14 @@ use alloy_consensus::Header; use clap::Parser; use reth_cli::chainspec::ChainSpecParser; -use reth_cli_commands::common::{AccessRights, CliHeader, CliNodeTypes, Environment}; +use reth_cli_commands::common::{AccessRights, CliNodeTypes, Environment}; use reth_db_common::init::init_from_state_dump; use reth_optimism_chainspec::OpChainSpec; use reth_optimism_primitives::{ bedrock::{BEDROCK_HEADER, BEDROCK_HEADER_HASH}, OpPrimitives, }; -use reth_primitives_traits::SealedHeader; +use reth_primitives_traits::{header::HeaderMut, SealedHeader}; use reth_provider::{ BlockNumReader, DBProvider, DatabaseProviderFactory, StaticFileProviderFactory, StaticFileWriter, diff --git a/crates/optimism/cli/src/lib.rs b/crates/optimism/cli/src/lib.rs index 1655b92d6e..52fdcc2ddd 100644 --- a/crates/optimism/cli/src/lib.rs +++ b/crates/optimism/cli/src/lib.rs @@ -62,7 +62,7 @@ use reth_node_metrics as _; /// /// This is the entrypoint to the executable. #[derive(Debug, Parser)] -#[command(author, version = version_metadata().short_version.as_ref(), long_version = version_metadata().long_version.as_ref(), about = "Reth", long_about = None)] +#[command(author, name = version_metadata().name_client.as_ref(), version = version_metadata().short_version.as_ref(), long_version = version_metadata().long_version.as_ref(), about = "Reth", long_about = None)] pub struct Cli< Spec: ChainSpecParser = OpChainSpecParser, Ext: clap::Args + fmt::Debug = RollupArgs, diff --git a/crates/optimism/consensus/src/lib.rs b/crates/optimism/consensus/src/lib.rs index 25e11be9ac..a54cf05f2f 100644 --- a/crates/optimism/consensus/src/lib.rs +++ b/crates/optimism/consensus/src/lib.rs @@ -12,15 +12,17 @@ extern crate alloc; use alloc::{format, sync::Arc}; -use alloy_consensus::{BlockHeader as _, EMPTY_OMMER_ROOT_HASH}; +use alloy_consensus::{ + constants::MAXIMUM_EXTRA_DATA_SIZE, BlockHeader as _, EMPTY_OMMER_ROOT_HASH, +}; use alloy_primitives::B64; use core::fmt::Debug; use reth_chainspec::EthChainSpec; use reth_consensus::{Consensus, ConsensusError, FullConsensus, HeaderValidator}; use reth_consensus_common::validation::{ - validate_against_parent_4844, validate_against_parent_eip1559_base_fee, - validate_against_parent_hash_number, validate_against_parent_timestamp, validate_cancun_gas, - validate_header_base_fee, validate_header_extra_data, validate_header_gas, + validate_against_parent_eip1559_base_fee, validate_against_parent_hash_number, + validate_against_parent_timestamp, validate_cancun_gas, validate_header_base_fee, + validate_header_extra_data, validate_header_gas, }; use reth_execution_types::BlockExecutionResult; use reth_optimism_forks::OpHardforks; @@ -46,12 +48,25 @@ pub use error::OpConsensusError; pub struct OpBeaconConsensus { /// Configuration chain_spec: Arc, + /// Maximum allowed extra data size in bytes + max_extra_data_size: usize, } impl OpBeaconConsensus { /// Create a new instance of [`OpBeaconConsensus`] pub const fn new(chain_spec: Arc) -> Self { - Self { chain_spec } + Self { chain_spec, max_extra_data_size: MAXIMUM_EXTRA_DATA_SIZE } + } + + /// Returns the maximum allowed extra data size. + pub const fn max_extra_data_size(&self) -> usize { + self.max_extra_data_size + } + + /// Sets the maximum allowed extra data size and returns the updated instance. + pub const fn with_max_extra_data_size(mut self, size: usize) -> Self { + self.max_extra_data_size = size; + self } } @@ -166,7 +181,7 @@ where // is greater than its parent timestamp. // validate header extra data for all networks post merge - validate_header_extra_data(header)?; + validate_header_extra_data(header, self.max_extra_data_size)?; validate_header_gas(header)?; validate_header_base_fee(header, &self.chain_spec) } @@ -188,9 +203,32 @@ where &self.chain_spec, )?; - // ensure that the blob gas fields for this block - if let Some(blob_params) = self.chain_spec.blob_params_at_timestamp(header.timestamp()) { - validate_against_parent_4844(header.header(), parent.header(), blob_params)?; + // Ensure that the blob gas fields for this block are correctly set. + // In the op-stack, the excess blob gas is always 0 for all blocks after ecotone. + // The blob gas used and the excess blob gas should both be set after ecotone. + // After Jovian, the blob gas used contains the current DA footprint. + if self.chain_spec.is_ecotone_active_at_timestamp(header.timestamp()) { + let blob_gas_used = header.blob_gas_used().ok_or(ConsensusError::BlobGasUsedMissing)?; + + // Before Jovian and after ecotone, the blob gas used should be 0. + if !self.chain_spec.is_jovian_active_at_timestamp(header.timestamp()) && + blob_gas_used != 0 + { + return Err(ConsensusError::BlobGasUsedDiff(GotExpected { + got: blob_gas_used, + expected: 0, + })); + } + + let excess_blob_gas = + header.excess_blob_gas().ok_or(ConsensusError::ExcessBlobGasMissing)?; + if excess_blob_gas != 0 { + return Err(ConsensusError::ExcessBlobGasDiff { + diff: GotExpected { got: excess_blob_gas, expected: 0 }, + parent_excess_blob_gas: parent.excess_blob_gas().unwrap_or(0), + parent_blob_gas_used: parent.blob_gas_used().unwrap_or(0), + }) + } } Ok(()) @@ -203,12 +241,15 @@ mod tests { use alloy_consensus::{BlockBody, Eip658Value, Header, Receipt, TxEip7702, TxReceipt}; use alloy_eips::{eip4895::Withdrawals, eip7685::Requests}; - use alloy_primitives::{Address, Bytes, Signature, U256}; - use op_alloy_consensus::OpTypedTransaction; - use reth_consensus::{Consensus, ConsensusError, FullConsensus}; + use alloy_primitives::{Address, Bytes, Log, Signature, U256}; + use op_alloy_consensus::{ + encode_holocene_extra_data, encode_jovian_extra_data, OpTypedTransaction, + }; + use reth_chainspec::BaseFeeParams; + use reth_consensus::{Consensus, ConsensusError, FullConsensus, HeaderValidator}; use reth_optimism_chainspec::{OpChainSpec, OpChainSpecBuilder, OP_MAINNET}; use reth_optimism_primitives::{OpPrimitives, OpReceipt, OpTransactionSigned}; - use reth_primitives_traits::{proofs, GotExpected, RecoveredBlock, SealedBlock}; + use reth_primitives_traits::{proofs, GotExpected, RecoveredBlock, SealedBlock, SealedHeader}; use reth_provider::BlockExecutionResult; use crate::OpBeaconConsensus; @@ -326,7 +367,7 @@ mod tests { let beacon_consensus = OpBeaconConsensus::new(Arc::new(chain_spec)); - let receipt = OpReceipt::Eip7702(Receipt { + let receipt = OpReceipt::Eip7702(Receipt:: { status: Eip658Value::success(), cumulative_gas_used: GAS_USED, logs: vec![], @@ -395,7 +436,7 @@ mod tests { let beacon_consensus = OpBeaconConsensus::new(Arc::new(chain_spec)); - let receipt = OpReceipt::Eip7702(Receipt { + let receipt = OpReceipt::Eip7702(Receipt:: { status: Eip658Value::success(), cumulative_gas_used: GAS_USED, logs: vec![], @@ -410,7 +451,9 @@ mod tests { )), gas_used: GAS_USED, timestamp: u64::MAX, - receipts_root: proofs::calculate_receipt_root(std::slice::from_ref(&receipt)), + receipts_root: proofs::calculate_receipt_root(std::slice::from_ref( + &receipt.with_bloom_ref(), + )), logs_bloom: receipt.bloom(), ..Default::default() }; @@ -452,4 +495,301 @@ mod tests { }) ); } + + #[test] + fn test_header_min_base_fee_validation() { + const MIN_BASE_FEE: u64 = 1000; + + let chain_spec = OpChainSpecBuilder::default() + .jovian_activated() + .genesis(OP_MAINNET.genesis.clone()) + .chain(OP_MAINNET.chain) + .build(); + + // create a tx + let transaction = mock_tx(0); + + let beacon_consensus = OpBeaconConsensus::new(Arc::new(chain_spec)); + + let receipt = OpReceipt::Eip7702(Receipt:: { + status: Eip658Value::success(), + cumulative_gas_used: 0, + logs: vec![], + }); + + let parent = Header { + number: 0, + base_fee_per_gas: Some(MIN_BASE_FEE / 10), + withdrawals_root: Some(proofs::calculate_withdrawals_root(&[])), + blob_gas_used: Some(0), + excess_blob_gas: Some(0), + transactions_root: proofs::calculate_transaction_root(std::slice::from_ref( + &transaction, + )), + gas_used: 0, + timestamp: u64::MAX - 1, + receipts_root: proofs::calculate_receipt_root(std::slice::from_ref( + &receipt.with_bloom_ref(), + )), + logs_bloom: receipt.bloom(), + extra_data: encode_jovian_extra_data( + Default::default(), + BaseFeeParams::optimism(), + MIN_BASE_FEE, + ) + .unwrap(), + ..Default::default() + }; + let parent = SealedHeader::seal_slow(parent); + + let header = Header { + number: 1, + base_fee_per_gas: Some(MIN_BASE_FEE), + withdrawals_root: Some(proofs::calculate_withdrawals_root(&[])), + blob_gas_used: Some(0), + excess_blob_gas: Some(0), + transactions_root: proofs::calculate_transaction_root(std::slice::from_ref( + &transaction, + )), + gas_used: 0, + timestamp: u64::MAX, + receipts_root: proofs::calculate_receipt_root(std::slice::from_ref( + &receipt.with_bloom_ref(), + )), + logs_bloom: receipt.bloom(), + parent_hash: parent.hash(), + ..Default::default() + }; + let header = SealedHeader::seal_slow(header); + + let result = beacon_consensus.validate_header_against_parent(&header, &parent); + + assert!(result.is_ok()); + } + + #[test] + fn test_header_min_base_fee_validation_failure() { + const MIN_BASE_FEE: u64 = 1000; + + let chain_spec = OpChainSpecBuilder::default() + .jovian_activated() + .genesis(OP_MAINNET.genesis.clone()) + .chain(OP_MAINNET.chain) + .build(); + + // create a tx + let transaction = mock_tx(0); + + let beacon_consensus = OpBeaconConsensus::new(Arc::new(chain_spec)); + + let receipt = OpReceipt::Eip7702(Receipt:: { + status: Eip658Value::success(), + cumulative_gas_used: 0, + logs: vec![], + }); + + let parent = Header { + number: 0, + base_fee_per_gas: Some(MIN_BASE_FEE / 10), + withdrawals_root: Some(proofs::calculate_withdrawals_root(&[])), + blob_gas_used: Some(0), + excess_blob_gas: Some(0), + transactions_root: proofs::calculate_transaction_root(std::slice::from_ref( + &transaction, + )), + gas_used: 0, + timestamp: u64::MAX - 1, + receipts_root: proofs::calculate_receipt_root(std::slice::from_ref( + &receipt.with_bloom_ref(), + )), + logs_bloom: receipt.bloom(), + extra_data: encode_jovian_extra_data( + Default::default(), + BaseFeeParams::optimism(), + MIN_BASE_FEE, + ) + .unwrap(), + ..Default::default() + }; + let parent = SealedHeader::seal_slow(parent); + + let header = Header { + number: 1, + base_fee_per_gas: Some(MIN_BASE_FEE - 1), + withdrawals_root: Some(proofs::calculate_withdrawals_root(&[])), + blob_gas_used: Some(0), + excess_blob_gas: Some(0), + transactions_root: proofs::calculate_transaction_root(std::slice::from_ref( + &transaction, + )), + gas_used: 0, + timestamp: u64::MAX, + receipts_root: proofs::calculate_receipt_root(std::slice::from_ref( + &receipt.with_bloom_ref(), + )), + logs_bloom: receipt.bloom(), + parent_hash: parent.hash(), + ..Default::default() + }; + let header = SealedHeader::seal_slow(header); + + let result = beacon_consensus.validate_header_against_parent(&header, &parent); + + assert!(result.is_err()); + assert_eq!( + result.unwrap_err(), + ConsensusError::BaseFeeDiff(GotExpected { + got: MIN_BASE_FEE - 1, + expected: MIN_BASE_FEE, + }) + ); + } + + #[test] + fn test_header_da_footprint_validation() { + const MIN_BASE_FEE: u64 = 100_000; + const DA_FOOTPRINT: u64 = GAS_LIMIT - 1; + const GAS_LIMIT: u64 = 100_000_000; + + let chain_spec = OpChainSpecBuilder::default() + .jovian_activated() + .genesis(OP_MAINNET.genesis.clone()) + .chain(OP_MAINNET.chain) + .build(); + + // create a tx + let transaction = mock_tx(0); + + let beacon_consensus = OpBeaconConsensus::new(Arc::new(chain_spec)); + + let receipt = OpReceipt::Eip7702(Receipt:: { + status: Eip658Value::success(), + cumulative_gas_used: 0, + logs: vec![], + }); + + let parent = Header { + number: 0, + base_fee_per_gas: Some(MIN_BASE_FEE), + withdrawals_root: Some(proofs::calculate_withdrawals_root(&[])), + blob_gas_used: Some(DA_FOOTPRINT), + excess_blob_gas: Some(0), + transactions_root: proofs::calculate_transaction_root(std::slice::from_ref( + &transaction, + )), + gas_used: 0, + timestamp: u64::MAX - 1, + receipts_root: proofs::calculate_receipt_root(std::slice::from_ref( + &receipt.with_bloom_ref(), + )), + logs_bloom: receipt.bloom(), + extra_data: encode_jovian_extra_data( + Default::default(), + BaseFeeParams::optimism(), + MIN_BASE_FEE, + ) + .unwrap(), + gas_limit: GAS_LIMIT, + ..Default::default() + }; + let parent = SealedHeader::seal_slow(parent); + + let header = Header { + number: 1, + base_fee_per_gas: Some(MIN_BASE_FEE + MIN_BASE_FEE / 10), + withdrawals_root: Some(proofs::calculate_withdrawals_root(&[])), + blob_gas_used: Some(DA_FOOTPRINT), + excess_blob_gas: Some(0), + transactions_root: proofs::calculate_transaction_root(std::slice::from_ref( + &transaction, + )), + gas_used: 0, + timestamp: u64::MAX, + receipts_root: proofs::calculate_receipt_root(std::slice::from_ref( + &receipt.with_bloom_ref(), + )), + logs_bloom: receipt.bloom(), + parent_hash: parent.hash(), + ..Default::default() + }; + let header = SealedHeader::seal_slow(header); + + let result = beacon_consensus.validate_header_against_parent(&header, &parent); + + assert!(result.is_ok()); + } + + #[test] + fn test_header_isthmus_validation() { + const MIN_BASE_FEE: u64 = 100_000; + const DA_FOOTPRINT: u64 = GAS_LIMIT - 1; + const GAS_LIMIT: u64 = 100_000_000; + + let chain_spec = OpChainSpecBuilder::default() + .isthmus_activated() + .genesis(OP_MAINNET.genesis.clone()) + .chain(OP_MAINNET.chain) + .build(); + + // create a tx + let transaction = mock_tx(0); + + let beacon_consensus = OpBeaconConsensus::new(Arc::new(chain_spec)); + + let receipt = OpReceipt::Eip7702(Receipt:: { + status: Eip658Value::success(), + cumulative_gas_used: 0, + logs: vec![], + }); + + let parent = Header { + number: 0, + base_fee_per_gas: Some(MIN_BASE_FEE), + withdrawals_root: Some(proofs::calculate_withdrawals_root(&[])), + blob_gas_used: Some(DA_FOOTPRINT), + excess_blob_gas: Some(0), + transactions_root: proofs::calculate_transaction_root(std::slice::from_ref( + &transaction, + )), + gas_used: 0, + timestamp: u64::MAX - 1, + receipts_root: proofs::calculate_receipt_root(std::slice::from_ref( + &receipt.with_bloom_ref(), + )), + logs_bloom: receipt.bloom(), + extra_data: encode_holocene_extra_data(Default::default(), BaseFeeParams::optimism()) + .unwrap(), + gas_limit: GAS_LIMIT, + ..Default::default() + }; + let parent = SealedHeader::seal_slow(parent); + + let header = Header { + number: 1, + base_fee_per_gas: Some(MIN_BASE_FEE - 2 * MIN_BASE_FEE / 100), + withdrawals_root: Some(proofs::calculate_withdrawals_root(&[])), + blob_gas_used: Some(DA_FOOTPRINT), + excess_blob_gas: Some(0), + transactions_root: proofs::calculate_transaction_root(std::slice::from_ref( + &transaction, + )), + gas_used: 0, + timestamp: u64::MAX, + receipts_root: proofs::calculate_receipt_root(std::slice::from_ref( + &receipt.with_bloom_ref(), + )), + logs_bloom: receipt.bloom(), + parent_hash: parent.hash(), + ..Default::default() + }; + let header = SealedHeader::seal_slow(header); + + let result = beacon_consensus.validate_header_against_parent(&header, &parent); + + assert!(result.is_err()); + assert_eq!( + result.unwrap_err(), + ConsensusError::BlobGasUsedDiff(GotExpected { got: DA_FOOTPRINT, expected: 0 }) + ); + } } diff --git a/crates/optimism/consensus/src/validation/isthmus.rs b/crates/optimism/consensus/src/validation/isthmus.rs index 4703e10869..f35f4ea69a 100644 --- a/crates/optimism/consensus/src/validation/isthmus.rs +++ b/crates/optimism/consensus/src/validation/isthmus.rs @@ -2,17 +2,14 @@ use crate::OpConsensusError; use alloy_consensus::BlockHeader; -use alloy_primitives::{address, Address, B256}; +use alloy_primitives::B256; use alloy_trie::EMPTY_ROOT_HASH; +use reth_optimism_primitives::L2_TO_L1_MESSAGE_PASSER_ADDRESS; use reth_storage_api::{errors::ProviderResult, StorageRootProvider}; use reth_trie_common::HashedStorage; use revm::database::BundleState; use tracing::warn; -/// The L2 contract `L2ToL1MessagePasser`, stores commitments to withdrawal transactions. -pub const ADDRESS_L2_TO_L1_MESSAGE_PASSER: Address = - address!("0x4200000000000000000000000000000000000016"); - /// Verifies that `withdrawals_root` (i.e. `l2tol1-msg-passer` storage root since Isthmus) field is /// set in block header. pub fn ensure_withdrawals_storage_root_is_some( @@ -35,7 +32,7 @@ pub fn withdrawals_root( withdrawals_root_prehashed( state_updates .state() - .get(&ADDRESS_L2_TO_L1_MESSAGE_PASSER) + .get(&L2_TO_L1_MESSAGE_PASSER_ADDRESS) .map(|acc| { HashedStorage::from_plain_storage( acc.status, @@ -55,7 +52,7 @@ pub fn withdrawals_root_prehashed( hashed_storage_updates: HashedStorage, state: DB, ) -> ProviderResult { - state.storage_root(ADDRESS_L2_TO_L1_MESSAGE_PASSER, hashed_storage_updates) + state.storage_root(L2_TO_L1_MESSAGE_PASSER_ADDRESS, hashed_storage_updates) } /// Verifies block header field `withdrawals_root` against storage root of @@ -138,7 +135,6 @@ mod test { use reth_db_common::init::init_genesis; use reth_optimism_chainspec::OpChainSpecBuilder; use reth_optimism_node::OpNode; - use reth_optimism_primitives::ADDRESS_L2_TO_L1_MESSAGE_PASSER; use reth_provider::{ providers::BlockchainProvider, test_utils::create_test_provider_factory_with_node_types, StateWriter, @@ -150,7 +146,7 @@ mod test { #[test] fn l2tol1_message_passer_no_withdrawals() { - let hashed_address = keccak256(ADDRESS_L2_TO_L1_MESSAGE_PASSER); + let hashed_address = keccak256(L2_TO_L1_MESSAGE_PASSER_ADDRESS); // create account storage let init_storage = HashedStorage::from_iter( diff --git a/crates/optimism/consensus/src/validation/mod.rs b/crates/optimism/consensus/src/validation/mod.rs index 8509a97e7a..c17e8429c8 100644 --- a/crates/optimism/consensus/src/validation/mod.rs +++ b/crates/optimism/consensus/src/validation/mod.rs @@ -202,12 +202,15 @@ mod tests { use reth_optimism_primitives::OpReceipt; use std::sync::Arc; - const JOVIAN_TIMESTAMP: u64 = 1900000000; + const HOLOCENE_TIMESTAMP: u64 = 1700000000; + const ISTHMUS_TIMESTAMP: u64 = 1750000000; + const JOVIAN_TIMESTAMP: u64 = 1800000000; const BLOCK_TIME_SECONDS: u64 = 2; fn holocene_chainspec() -> Arc { let mut hardforks = BASE_SEPOLIA_HARDFORKS.clone(); - hardforks.insert(OpHardfork::Holocene.boxed(), ForkCondition::Timestamp(1800000000)); + hardforks + .insert(OpHardfork::Holocene.boxed(), ForkCondition::Timestamp(HOLOCENE_TIMESTAMP)); Arc::new(OpChainSpec { inner: ChainSpec { chain: BASE_SEPOLIA.inner.chain, @@ -227,7 +230,7 @@ mod tests { chainspec .inner .hardforks - .insert(OpHardfork::Isthmus.boxed(), ForkCondition::Timestamp(1800000000)); + .insert(OpHardfork::Isthmus.boxed(), ForkCondition::Timestamp(ISTHMUS_TIMESTAMP)); chainspec } @@ -236,7 +239,7 @@ mod tests { chainspec .inner .hardforks - .insert(OpHardfork::Jovian.boxed(), ForkCondition::Timestamp(1900000000)); + .insert(OpHardfork::Jovian.boxed(), ForkCondition::Timestamp(JOVIAN_TIMESTAMP)); chainspec } @@ -264,14 +267,14 @@ mod tests { base_fee_per_gas: Some(1), gas_used: 15763614, gas_limit: 144000000, - timestamp: 1800000003, + timestamp: HOLOCENE_TIMESTAMP + 3, extra_data: Bytes::from_static(&[0, 0, 0, 0, 0, 0, 0, 0, 0]), ..Default::default() }; let base_fee = reth_optimism_chainspec::OpChainSpec::next_block_base_fee( &op_chain_spec, &parent, - 1800000005, + HOLOCENE_TIMESTAMP + 5, ); assert_eq!( base_fee.unwrap(), @@ -286,14 +289,14 @@ mod tests { gas_used: 15763614, gas_limit: 144000000, extra_data: Bytes::from_static(&[0, 0, 0, 0, 8, 0, 0, 0, 8]), - timestamp: 1800000003, + timestamp: HOLOCENE_TIMESTAMP + 3, ..Default::default() }; let base_fee = reth_optimism_chainspec::OpChainSpec::next_block_base_fee( &holocene_chainspec(), &parent, - 1800000005, + HOLOCENE_TIMESTAMP + 5, ); assert_eq!( base_fee.unwrap(), diff --git a/crates/optimism/evm/Cargo.toml b/crates/optimism/evm/Cargo.toml index f2dce0a9ba..724f8555e0 100644 --- a/crates/optimism/evm/Cargo.toml +++ b/crates/optimism/evm/Cargo.toml @@ -75,5 +75,9 @@ std = [ "op-alloy-rpc-types-engine/std", "reth-storage-errors/std", ] -portable = ["reth-revm/portable"] -rpc = ["reth-rpc-eth-api"] +portable = [ + "reth-revm/portable", + "op-revm/portable", + "revm/portable", +] +rpc = ["reth-rpc-eth-api", "reth-optimism-primitives/serde", "reth-optimism-primitives/reth-codec", "alloy-evm/rpc"] diff --git a/crates/optimism/evm/src/config.rs b/crates/optimism/evm/src/config.rs index 47ed2853d0..1f1068c40d 100644 --- a/crates/optimism/evm/src/config.rs +++ b/crates/optimism/evm/src/config.rs @@ -1,8 +1,7 @@ pub use alloy_op_evm::{ spec as revm_spec, spec_by_timestamp_after_bedrock as revm_spec_by_timestamp_after_bedrock, }; - -use alloy_consensus::BlockHeader; +use op_alloy_rpc_types_engine::OpFlashblockPayloadBase; use revm::primitives::{Address, Bytes, B256}; /// Context relevant for execution of a next block w.r.t OP. @@ -23,7 +22,7 @@ pub struct OpNextBlockEnvAttributes { } #[cfg(feature = "rpc")] -impl reth_rpc_eth_api::helpers::pending_block::BuildPendingEnv +impl reth_rpc_eth_api::helpers::pending_block::BuildPendingEnv for OpNextBlockEnvAttributes { fn build_pending_env(parent: &crate::SealedHeader) -> Self { @@ -37,3 +36,16 @@ impl reth_rpc_eth_api::helpers::pending_block::BuildPendingEnv for OpNextBlockEnvAttributes { + fn from(base: OpFlashblockPayloadBase) -> Self { + Self { + timestamp: base.timestamp, + suggested_fee_recipient: base.fee_recipient, + prev_randao: base.prev_randao, + gas_limit: base.gas_limit, + parent_beacon_block_root: Some(base.parent_beacon_block_root), + extra_data: base.extra_data, + } + } +} diff --git a/crates/optimism/evm/src/lib.rs b/crates/optimism/evm/src/lib.rs index e5df16ee2e..1805cc4036 100644 --- a/crates/optimism/evm/src/lib.rs +++ b/crates/optimism/evm/src/lib.rs @@ -13,32 +13,38 @@ extern crate alloc; use alloc::sync::Arc; use alloy_consensus::{BlockHeader, Header}; -use alloy_eips::Decodable2718; use alloy_evm::{EvmFactory, FromRecoveredTx, FromTxWithEncoded}; use alloy_op_evm::block::{receipt_builder::OpReceiptBuilder, OpTxEnv}; -use alloy_primitives::U256; use core::fmt::Debug; use op_alloy_consensus::EIP1559ParamError; -use op_alloy_rpc_types_engine::OpExecutionData; use op_revm::{OpSpecId, OpTransaction}; use reth_chainspec::EthChainSpec; use reth_evm::{ - eth::NextEvmEnvAttributes, precompiles::PrecompilesMap, ConfigureEngineEvm, ConfigureEvm, - EvmEnv, EvmEnvFor, ExecutableTxIterator, ExecutionCtxFor, TransactionEnv, + eth::NextEvmEnvAttributes, precompiles::PrecompilesMap, ConfigureEvm, EvmEnv, TransactionEnv, }; use reth_optimism_chainspec::OpChainSpec; use reth_optimism_forks::OpHardforks; use reth_optimism_primitives::{DepositReceipt, OpPrimitives}; -use reth_primitives_traits::{ - NodePrimitives, SealedBlock, SealedHeader, SignedTransaction, TxTy, WithEncoded, -}; -use reth_storage_errors::any::AnyError; -use revm::{ - context::{BlockEnv, CfgEnv, TxEnv}, - context_interface::block::BlobExcessGasAndPrice, - primitives::hardfork::SpecId, +use reth_primitives_traits::{NodePrimitives, SealedBlock, SealedHeader, SignedTransaction}; +use revm::context::{BlockEnv, TxEnv}; + +#[allow(unused_imports)] +use { + alloy_eips::Decodable2718, + alloy_primitives::{Bytes, U256}, + op_alloy_rpc_types_engine::OpExecutionData, + reth_evm::{EvmEnvFor, ExecutionCtxFor}, + reth_primitives_traits::{TxTy, WithEncoded}, + reth_storage_errors::any::AnyError, + revm::{ + context::CfgEnv, context_interface::block::BlobExcessGasAndPrice, + primitives::hardfork::SpecId, + }, }; +#[cfg(feature = "std")] +use reth_evm::{ConfigureEngineEvm, ExecutableTxIterator}; + mod config; pub use config::{revm_spec, revm_spec_by_timestamp_after_bedrock, OpNextBlockEnvAttributes}; mod execute; @@ -200,6 +206,7 @@ where } } +#[cfg(feature = "std")] impl ConfigureEngineEvm for OpEvmConfig where ChainSpec: EthChainSpec
+ OpHardforks, @@ -265,12 +272,15 @@ where &self, payload: &OpExecutionData, ) -> Result, Self::Error> { - Ok(payload.payload.transactions().clone().into_iter().map(|encoded| { + let transactions = payload.payload.transactions().clone(); + let convert = |encoded: Bytes| { let tx = TxTy::::decode_2718_exact(encoded.as_ref()) .map_err(AnyError::new)?; let signer = tx.try_recover().map_err(AnyError::new)?; Ok::<_, AnyError>(WithEncoded::new(encoded, tx.with_signer(signer))) - })) + }; + + Ok((transactions, convert)) } } @@ -483,14 +493,14 @@ mod tests { block2.set_hash(block2_hash); // Create a random receipt object, receipt1 - let receipt1 = OpReceipt::Legacy(Receipt { + let receipt1 = OpReceipt::Legacy(Receipt:: { cumulative_gas_used: 46913, logs: vec![], status: true.into(), }); // Create another random receipt object, receipt2 - let receipt2 = OpReceipt::Legacy(Receipt { + let receipt2 = OpReceipt::Legacy(Receipt:: { cumulative_gas_used: 1325345, logs: vec![], status: true.into(), @@ -541,7 +551,7 @@ mod tests { ); // Create a Receipts object with a vector of receipt vectors - let receipts = vec![vec![Some(OpReceipt::Legacy(Receipt { + let receipts = vec![vec![Some(OpReceipt::Legacy(Receipt:: { cumulative_gas_used: 46913, logs: vec![], status: true.into(), @@ -599,7 +609,7 @@ mod tests { #[test] fn test_block_number_to_index() { // Create a Receipts object with a vector of receipt vectors - let receipts = vec![vec![Some(OpReceipt::Legacy(Receipt { + let receipts = vec![vec![Some(OpReceipt::Legacy(Receipt:: { cumulative_gas_used: 46913, logs: vec![], status: true.into(), @@ -630,7 +640,7 @@ mod tests { #[test] fn test_get_logs() { // Create a Receipts object with a vector of receipt vectors - let receipts = vec![vec![OpReceipt::Legacy(Receipt { + let receipts = vec![vec![OpReceipt::Legacy(Receipt:: { cumulative_gas_used: 46913, logs: vec![Log::::default()], status: true.into(), @@ -658,7 +668,7 @@ mod tests { #[test] fn test_receipts_by_block() { // Create a Receipts object with a vector of receipt vectors - let receipts = vec![vec![Some(OpReceipt::Legacy(Receipt { + let receipts = vec![vec![Some(OpReceipt::Legacy(Receipt:: { cumulative_gas_used: 46913, logs: vec![Log::::default()], status: true.into(), @@ -682,7 +692,7 @@ mod tests { // Assert that the receipts for block number 123 match the expected receipts assert_eq!( receipts_by_block, - vec![&Some(OpReceipt::Legacy(Receipt { + vec![&Some(OpReceipt::Legacy(Receipt:: { cumulative_gas_used: 46913, logs: vec![Log::::default()], status: true.into(), @@ -693,7 +703,7 @@ mod tests { #[test] fn test_receipts_len() { // Create a Receipts object with a vector of receipt vectors - let receipts = vec![vec![Some(OpReceipt::Legacy(Receipt { + let receipts = vec![vec![Some(OpReceipt::Legacy(Receipt:: { cumulative_gas_used: 46913, logs: vec![Log::::default()], status: true.into(), @@ -738,7 +748,7 @@ mod tests { #[test] fn test_revert_to() { // Create a random receipt object - let receipt = OpReceipt::Legacy(Receipt { + let receipt = OpReceipt::Legacy(Receipt:: { cumulative_gas_used: 46913, logs: vec![], status: true.into(), @@ -783,7 +793,7 @@ mod tests { #[test] fn test_extend_execution_outcome() { // Create a Receipt object with specific attributes. - let receipt = OpReceipt::Legacy(Receipt { + let receipt = OpReceipt::Legacy(Receipt:: { cumulative_gas_used: 46913, logs: vec![], status: true.into(), @@ -823,7 +833,7 @@ mod tests { #[test] fn test_split_at_execution_outcome() { // Create a random receipt object - let receipt = OpReceipt::Legacy(Receipt { + let receipt = OpReceipt::Legacy(Receipt:: { cumulative_gas_used: 46913, logs: vec![], status: true.into(), diff --git a/crates/optimism/flashblocks/Cargo.toml b/crates/optimism/flashblocks/Cargo.toml index 977e28d37e..e0754aab95 100644 --- a/crates/optimism/flashblocks/Cargo.toml +++ b/crates/optimism/flashblocks/Cargo.toml @@ -13,7 +13,6 @@ workspace = true [dependencies] # reth reth-optimism-primitives = { workspace = true, features = ["serde"] } -reth-optimism-evm.workspace = true reth-chain-state = { workspace = true, features = ["serde"] } reth-primitives-traits = { workspace = true, features = ["serde"] } reth-engine-primitives = { workspace = true, features = ["std"] } @@ -30,15 +29,16 @@ reth-metrics.workspace = true # alloy alloy-eips = { workspace = true, features = ["serde"] } -alloy-serde.workspace = true alloy-primitives = { workspace = true, features = ["serde"] } alloy-rpc-types-engine = { workspace = true, features = ["serde"] } alloy-consensus.workspace = true +# op-alloy +op-alloy-rpc-types-engine = { workspace = true, features = ["k256"] } + # io tokio.workspace = true tokio-tungstenite = { workspace = true, features = ["rustls-tls-native-roots"] } -serde.workspace = true serde_json.workspace = true url.workspace = true futures-util.workspace = true @@ -57,3 +57,4 @@ derive_more.workspace = true [dev-dependencies] test-case.workspace = true alloy-consensus.workspace = true +op-alloy-consensus.workspace = true diff --git a/crates/optimism/flashblocks/src/cache.rs b/crates/optimism/flashblocks/src/cache.rs new file mode 100644 index 0000000000..9aeed3435e --- /dev/null +++ b/crates/optimism/flashblocks/src/cache.rs @@ -0,0 +1,482 @@ +//! Sequence cache management for flashblocks. +//! +//! The `SequenceManager` maintains a ring buffer of recently completed flashblock sequences +//! and intelligently selects which sequence to build based on the local chain tip. + +use crate::{ + sequence::{FlashBlockPendingSequence, SequenceExecutionOutcome}, + worker::BuildArgs, + FlashBlock, FlashBlockCompleteSequence, PendingFlashBlock, +}; +use alloy_eips::eip2718::WithEncoded; +use alloy_primitives::B256; +use reth_primitives_traits::{NodePrimitives, Recovered, SignedTransaction}; +use reth_revm::cached::CachedReads; +use ringbuffer::{AllocRingBuffer, RingBuffer}; +use tokio::sync::broadcast; +use tracing::*; + +/// Maximum number of cached sequences in the ring buffer. +const CACHE_SIZE: usize = 3; +/// 200 ms flashblock time. +pub(crate) const FLASHBLOCK_BLOCK_TIME: u64 = 200; + +/// Manages flashblock sequences with caching support. +/// +/// This struct handles: +/// - Tracking the current pending sequence +/// - Caching completed sequences in a fixed-size ring buffer +/// - Finding the best sequence to build based on local chain tip +/// - Broadcasting completed sequences to subscribers +#[derive(Debug)] +pub(crate) struct SequenceManager { + /// Current pending sequence being built up from incoming flashblocks + pending: FlashBlockPendingSequence, + /// Cached recovered transactions for the pending sequence + pending_transactions: Vec>>, + /// Ring buffer of recently completed sequences bundled with their decoded transactions (FIFO, + /// size 3) + completed_cache: AllocRingBuffer<(FlashBlockCompleteSequence, Vec>>)>, + /// Broadcast channel for completed sequences + block_broadcaster: broadcast::Sender, + /// Whether to compute state roots when building blocks + compute_state_root: bool, +} + +impl SequenceManager { + /// Creates a new sequence manager. + pub(crate) fn new(compute_state_root: bool) -> Self { + let (block_broadcaster, _) = broadcast::channel(128); + Self { + pending: FlashBlockPendingSequence::new(), + pending_transactions: Vec::new(), + completed_cache: AllocRingBuffer::new(CACHE_SIZE), + block_broadcaster, + compute_state_root, + } + } + + /// Returns the sender half of the flashblock sequence broadcast channel. + pub(crate) const fn block_sequence_broadcaster( + &self, + ) -> &broadcast::Sender { + &self.block_broadcaster + } + + /// Gets a subscriber to the flashblock sequences produced. + pub(crate) fn subscribe_block_sequence(&self) -> crate::FlashBlockCompleteSequenceRx { + self.block_broadcaster.subscribe() + } + + /// Inserts a new flashblock into the pending sequence. + /// + /// When a flashblock with index 0 arrives (indicating a new block), the current + /// pending sequence is finalized, cached, and broadcast immediately. If the sequence + /// is later built on top of local tip, `on_build_complete()` will broadcast again + /// with computed `state_root`. + /// + /// Transactions are recovered once and cached for reuse during block building. + pub(crate) fn insert_flashblock(&mut self, flashblock: FlashBlock) -> eyre::Result<()> { + // If this starts a new block, finalize and cache the previous sequence BEFORE inserting + if flashblock.index == 0 && self.pending.count() > 0 { + let completed = self.pending.finalize()?; + let block_number = completed.block_number(); + let parent_hash = completed.payload_base().parent_hash; + + trace!( + target: "flashblocks", + block_number, + %parent_hash, + cache_size = self.completed_cache.len(), + "Caching completed flashblock sequence" + ); + + // Broadcast immediately to consensus client (even without state_root) + // This ensures sequences are forwarded during catch-up even if not buildable on tip. + // ConsensusClient checks execution_outcome and skips newPayload if state_root is zero. + if self.block_broadcaster.receiver_count() > 0 { + let _ = self.block_broadcaster.send(completed.clone()); + } + + // Bundle completed sequence with its decoded transactions and push to cache + // Ring buffer automatically evicts oldest entry when full + let txs = std::mem::take(&mut self.pending_transactions); + self.completed_cache.push((completed, txs)); + + // ensure cache is wiped on new flashblock + let _ = self.pending.take_cached_reads(); + } + + self.pending_transactions + .extend(flashblock.recover_transactions().collect::, _>>()?); + self.pending.insert(flashblock); + Ok(()) + } + + /// Returns the current pending sequence for inspection. + pub(crate) const fn pending(&self) -> &FlashBlockPendingSequence { + &self.pending + } + + /// Finds the next sequence to build and returns ready-to-use `BuildArgs`. + /// + /// Priority order: + /// 1. Current pending sequence (if parent matches local tip) + /// 2. Cached sequence with exact parent match + /// + /// Returns None if nothing is buildable right now. + pub(crate) fn next_buildable_args( + &mut self, + local_tip_hash: B256, + local_tip_timestamp: u64, + ) -> Option>>>> { + // Try to find a buildable sequence: (base, last_fb, transactions, cached_state, + // source_name) + let (base, last_flashblock, transactions, cached_state, source_name) = + // Priority 1: Try current pending sequence + if let Some(base) = self.pending.payload_base().filter(|b| b.parent_hash == local_tip_hash) { + let cached_state = self.pending.take_cached_reads().map(|r| (base.parent_hash, r)); + let last_fb = self.pending.last_flashblock()?; + let transactions = self.pending_transactions.clone(); + (base, last_fb, transactions, cached_state, "pending") + } + // Priority 2: Try cached sequence with exact parent match + else if let Some((cached, txs)) = self.completed_cache.iter().find(|(c, _)| c.payload_base().parent_hash == local_tip_hash) { + let base = cached.payload_base().clone(); + let last_fb = cached.last(); + let transactions = txs.clone(); + let cached_state = None; + (base, last_fb, transactions, cached_state, "cached") + } else { + return None; + }; + + // Auto-detect when to compute state root: only if the builder didn't provide it (sent + // B256::ZERO) and we're near the expected final flashblock index. + // + // Background: Each block period receives multiple flashblocks at regular intervals. + // The sequencer sends an initial "base" flashblock at index 0 when a new block starts, + // then subsequent flashblocks are produced every FLASHBLOCK_BLOCK_TIME intervals (200ms). + // + // Examples with different block times: + // - Base (2s blocks): expect 2000ms / 200ms = 10 intervals → Flashblocks: index 0 (base) + // + indices 1-10 = potentially 11 total + // + // - Unichain (1s blocks): expect 1000ms / 200ms = 5 intervals → Flashblocks: index 0 (base) + // + indices 1-5 = potentially 6 total + // + // Why compute at N-1 instead of N: + // 1. Timing variance in flashblock producing time may mean only N flashblocks were produced + // instead of N+1 (missing the final one). Computing at N-1 ensures we get the state root + // for most common cases. + // + // 2. The +1 case (index 0 base + N intervals): If all N+1 flashblocks do arrive, we'll + // still calculate state root for flashblock N, which sacrifices a little performance but + // still ensures correctness for common cases. + // + // Note: Pathological cases may result in fewer flashblocks than expected (e.g., builder + // downtime, flashblock execution exceeding timing budget). When this occurs, we won't + // compute the state root, causing FlashblockConsensusClient to lack precomputed state for + // engine_newPayload. This is safe: we still have op-node as backstop to maintain + // chain progression. + let block_time_ms = (base.timestamp - local_tip_timestamp) * 1000; + let expected_final_flashblock = block_time_ms / FLASHBLOCK_BLOCK_TIME; + let compute_state_root = self.compute_state_root && + last_flashblock.diff.state_root.is_zero() && + last_flashblock.index >= expected_final_flashblock.saturating_sub(1); + + trace!( + target: "flashblocks", + block_number = base.block_number, + source = source_name, + flashblock_index = last_flashblock.index, + expected_final_flashblock, + compute_state_root_enabled = self.compute_state_root, + state_root_is_zero = last_flashblock.diff.state_root.is_zero(), + will_compute_state_root = compute_state_root, + "Building from flashblock sequence" + ); + + Some(BuildArgs { + base, + transactions, + cached_state, + last_flashblock_index: last_flashblock.index, + last_flashblock_hash: last_flashblock.diff.block_hash, + compute_state_root, + }) + } + + /// Records the result of building a sequence and re-broadcasts with execution outcome. + /// + /// Updates execution outcome and cached reads. For cached sequences (already broadcast + /// once during finalize), this broadcasts again with the computed `state_root`, allowing + /// the consensus client to submit via `engine_newPayload`. + pub(crate) fn on_build_complete( + &mut self, + parent_hash: B256, + result: Option<(PendingFlashBlock, CachedReads)>, + ) { + let Some((computed_block, cached_reads)) = result else { + return; + }; + + // Extract execution outcome + let execution_outcome = computed_block.computed_state_root().map(|state_root| { + SequenceExecutionOutcome { block_hash: computed_block.block().hash(), state_root } + }); + + // Update pending sequence with execution results + if self.pending.payload_base().is_some_and(|base| base.parent_hash == parent_hash) { + self.pending.set_execution_outcome(execution_outcome); + self.pending.set_cached_reads(cached_reads); + trace!( + target: "flashblocks", + block_number = self.pending.block_number(), + has_computed_state_root = execution_outcome.is_some(), + "Updated pending sequence with build results" + ); + } + // Check if this completed sequence in cache and broadcast with execution outcome + else if let Some((cached, _)) = self + .completed_cache + .iter_mut() + .find(|(c, _)| c.payload_base().parent_hash == parent_hash) + { + // Only re-broadcast if we computed new information (state_root was missing). + // If sequencer already provided state_root, we already broadcast in insert_flashblock, + // so skip re-broadcast to avoid duplicate FCU calls. + let needs_rebroadcast = + execution_outcome.is_some() && cached.execution_outcome().is_none(); + + cached.set_execution_outcome(execution_outcome); + + if needs_rebroadcast && self.block_broadcaster.receiver_count() > 0 { + trace!( + target: "flashblocks", + block_number = cached.block_number(), + "Re-broadcasting sequence with computed state_root" + ); + let _ = self.block_broadcaster.send(cached.clone()); + } + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::test_utils::TestFlashBlockFactory; + use alloy_primitives::B256; + use op_alloy_consensus::OpTxEnvelope; + + #[test] + fn test_sequence_manager_new() { + let manager: SequenceManager = SequenceManager::new(true); + assert_eq!(manager.pending().count(), 0); + } + + #[test] + fn test_insert_flashblock_creates_pending_sequence() { + let mut manager: SequenceManager = SequenceManager::new(true); + let factory = TestFlashBlockFactory::new(); + + let fb0 = factory.flashblock_at(0).build(); + manager.insert_flashblock(fb0).unwrap(); + + assert_eq!(manager.pending().count(), 1); + assert_eq!(manager.pending().block_number(), Some(100)); + } + + #[test] + fn test_insert_flashblock_caches_completed_sequence() { + let mut manager: SequenceManager = SequenceManager::new(true); + let factory = TestFlashBlockFactory::new(); + + // Build first sequence + let fb0 = factory.flashblock_at(0).build(); + manager.insert_flashblock(fb0.clone()).unwrap(); + + let fb1 = factory.flashblock_after(&fb0).build(); + manager.insert_flashblock(fb1).unwrap(); + + // Insert new base (index 0) which should finalize and cache previous sequence + let fb2 = factory.flashblock_for_next_block(&fb0).build(); + manager.insert_flashblock(fb2).unwrap(); + + // New sequence should be pending + assert_eq!(manager.pending().count(), 1); + assert_eq!(manager.pending().block_number(), Some(101)); + assert_eq!(manager.completed_cache.len(), 1); + let (cached_sequence, _txs) = manager.completed_cache.get(0).unwrap(); + assert_eq!(cached_sequence.block_number(), 100); + } + + #[test] + fn test_next_buildable_args_returns_none_when_empty() { + let mut manager: SequenceManager = SequenceManager::new(true); + let local_tip_hash = B256::random(); + let local_tip_timestamp = 1000; + + let args = manager.next_buildable_args(local_tip_hash, local_tip_timestamp); + assert!(args.is_none()); + } + + #[test] + fn test_next_buildable_args_matches_pending_parent() { + let mut manager: SequenceManager = SequenceManager::new(true); + let factory = TestFlashBlockFactory::new(); + + let fb0 = factory.flashblock_at(0).build(); + let parent_hash = fb0.base.as_ref().unwrap().parent_hash; + manager.insert_flashblock(fb0).unwrap(); + + let args = manager.next_buildable_args(parent_hash, 1000000); + assert!(args.is_some()); + + let build_args = args.unwrap(); + assert_eq!(build_args.last_flashblock_index, 0); + } + + #[test] + fn test_next_buildable_args_returns_none_when_parent_mismatch() { + let mut manager: SequenceManager = SequenceManager::new(true); + let factory = TestFlashBlockFactory::new(); + + let fb0 = factory.flashblock_at(0).build(); + manager.insert_flashblock(fb0).unwrap(); + + // Use different parent hash + let wrong_parent = B256::random(); + let args = manager.next_buildable_args(wrong_parent, 1000000); + assert!(args.is_none()); + } + + #[test] + fn test_next_buildable_args_prefers_pending_over_cached() { + let mut manager: SequenceManager = SequenceManager::new(true); + let factory = TestFlashBlockFactory::new(); + + // Create and finalize first sequence + let fb0 = factory.flashblock_at(0).build(); + manager.insert_flashblock(fb0.clone()).unwrap(); + + // Create new sequence (finalizes previous) + let fb1 = factory.flashblock_for_next_block(&fb0).build(); + let parent_hash = fb1.base.as_ref().unwrap().parent_hash; + manager.insert_flashblock(fb1).unwrap(); + + // Request with first sequence's parent (should find cached) + let args = manager.next_buildable_args(parent_hash, 1000000); + assert!(args.is_some()); + } + + #[test] + fn test_next_buildable_args_finds_cached_sequence() { + let mut manager: SequenceManager = SequenceManager::new(true); + let factory = TestFlashBlockFactory::new(); + + // Build and cache first sequence + let fb0 = factory.flashblock_at(0).build(); + let parent_hash = fb0.base.as_ref().unwrap().parent_hash; + manager.insert_flashblock(fb0.clone()).unwrap(); + + // Start new sequence to finalize first + let fb1 = factory.flashblock_for_next_block(&fb0).build(); + manager.insert_flashblock(fb1.clone()).unwrap(); + + // Clear pending by starting another sequence + let fb2 = factory.flashblock_for_next_block(&fb1).build(); + manager.insert_flashblock(fb2).unwrap(); + + // Request first sequence's parent - should find in cache + let args = manager.next_buildable_args(parent_hash, 1000000); + assert!(args.is_some()); + } + + #[test] + fn test_compute_state_root_logic_near_expected_final() { + let mut manager: SequenceManager = SequenceManager::new(true); + let block_time = 2u64; + let factory = TestFlashBlockFactory::new().with_block_time(block_time); + + // Create sequence with zero state root (needs computation) + let fb0 = factory.flashblock_at(0).state_root(B256::ZERO).build(); + let parent_hash = fb0.base.as_ref().unwrap().parent_hash; + let base_timestamp = fb0.base.as_ref().unwrap().timestamp; + manager.insert_flashblock(fb0.clone()).unwrap(); + + // Add flashblocks up to expected final index (2000ms / 200ms = 10) + for i in 1..=9 { + let fb = factory.flashblock_after(&fb0).index(i).state_root(B256::ZERO).build(); + manager.insert_flashblock(fb).unwrap(); + } + + // Request with proper timing - should compute state root for index 9 + let args = manager.next_buildable_args(parent_hash, base_timestamp - block_time); + assert!(args.is_some()); + assert!(args.unwrap().compute_state_root); + } + + #[test] + fn test_no_compute_state_root_when_provided_by_sequencer() { + let mut manager: SequenceManager = SequenceManager::new(true); + let block_time = 2u64; + let factory = TestFlashBlockFactory::new().with_block_time(block_time); + + // Create sequence with non-zero state root (provided by sequencer) + let fb0 = factory.flashblock_at(0).state_root(B256::random()).build(); + let parent_hash = fb0.base.as_ref().unwrap().parent_hash; + let base_timestamp = fb0.base.as_ref().unwrap().timestamp; + manager.insert_flashblock(fb0).unwrap(); + + let args = manager.next_buildable_args(parent_hash, base_timestamp - block_time); + assert!(args.is_some()); + assert!(!args.unwrap().compute_state_root); + } + + #[test] + fn test_no_compute_state_root_when_disabled() { + let mut manager: SequenceManager = SequenceManager::new(false); + let block_time = 2u64; + let factory = TestFlashBlockFactory::new().with_block_time(block_time); + + // Create sequence with zero state root (needs computation) + let fb0 = factory.flashblock_at(0).state_root(B256::ZERO).build(); + let parent_hash = fb0.base.as_ref().unwrap().parent_hash; + let base_timestamp = fb0.base.as_ref().unwrap().timestamp; + manager.insert_flashblock(fb0.clone()).unwrap(); + + // Add flashblocks up to expected final index (2000ms / 200ms = 10) + for i in 1..=9 { + let fb = factory.flashblock_after(&fb0).index(i).state_root(B256::ZERO).build(); + manager.insert_flashblock(fb).unwrap(); + } + + // Request with proper timing - should compute state root for index 9 + let args = manager.next_buildable_args(parent_hash, base_timestamp - block_time); + assert!(args.is_some()); + assert!(!args.unwrap().compute_state_root); + } + + #[test] + fn test_cache_ring_buffer_evicts_oldest() { + let mut manager: SequenceManager = SequenceManager::new(true); + let factory = TestFlashBlockFactory::new(); + + // Fill cache with 4 sequences (cache size is 3, so oldest should be evicted) + let mut last_fb = factory.flashblock_at(0).build(); + manager.insert_flashblock(last_fb.clone()).unwrap(); + + for _ in 0..3 { + last_fb = factory.flashblock_for_next_block(&last_fb).build(); + manager.insert_flashblock(last_fb.clone()).unwrap(); + } + + // The first sequence should have been evicted, so we can't build it + let first_parent = factory.flashblock_at(0).build().base.unwrap().parent_hash; + let args = manager.next_buildable_args(first_parent, 1000000); + // Should not find it (evicted from ring buffer) + assert!(args.is_none()); + } +} diff --git a/crates/optimism/flashblocks/src/consensus.rs b/crates/optimism/flashblocks/src/consensus.rs index 60314d2f6c..0b502c0738 100644 --- a/crates/optimism/flashblocks/src/consensus.rs +++ b/crates/optimism/flashblocks/src/consensus.rs @@ -1,86 +1,458 @@ -use crate::FlashBlockCompleteSequenceRx; +use crate::{FlashBlockCompleteSequence, FlashBlockCompleteSequenceRx}; use alloy_primitives::B256; +use alloy_rpc_types_engine::PayloadStatusEnum; +use op_alloy_rpc_types_engine::OpExecutionData; use reth_engine_primitives::ConsensusEngineHandle; use reth_optimism_payload_builder::OpPayloadTypes; -use reth_payload_primitives::EngineApiMessageVersion; -use ringbuffer::{AllocRingBuffer, RingBuffer}; -use tracing::warn; +use reth_payload_primitives::{EngineApiMessageVersion, ExecutionPayload, PayloadTypes}; +use tracing::*; -/// Consensus client that sends FCUs and new payloads using blocks from a [`FlashBlockService`] +/// Consensus client that sends FCUs and new payloads using blocks from a [`FlashBlockService`]. +/// +/// This client receives completed flashblock sequences and: +/// - Attempts to submit `engine_newPayload` if `state_root` is available (non-zero) +/// - Always sends `engine_forkChoiceUpdated` to drive chain forward /// /// [`FlashBlockService`]: crate::FlashBlockService #[derive(Debug)] -pub struct FlashBlockConsensusClient { +pub struct FlashBlockConsensusClient

+where + P: PayloadTypes, +{ /// Handle to execution client. - engine_handle: ConsensusEngineHandle, + engine_handle: ConsensusEngineHandle

, + /// Receiver for completed flashblock sequences from `FlashBlockService`. sequence_receiver: FlashBlockCompleteSequenceRx, } -impl FlashBlockConsensusClient { +impl

FlashBlockConsensusClient

+where + P: PayloadTypes, + P::ExecutionData: for<'a> TryFrom<&'a FlashBlockCompleteSequence, Error: std::fmt::Display>, +{ /// Create a new `FlashBlockConsensusClient` with the given Op engine and sequence receiver. pub const fn new( - engine_handle: ConsensusEngineHandle, + engine_handle: ConsensusEngineHandle

, sequence_receiver: FlashBlockCompleteSequenceRx, ) -> eyre::Result { Ok(Self { engine_handle, sequence_receiver }) } - /// Get previous block hash using previous block hash buffer. If it isn't available (buffer - /// started more recently than `offset`), return default zero hash - fn get_previous_block_hash( - &self, - previous_block_hashes: &AllocRingBuffer, - offset: usize, - ) -> B256 { - *previous_block_hashes - .len() - .checked_sub(offset) - .and_then(|index| previous_block_hashes.get(index)) - .unwrap_or_default() + /// Attempts to submit a new payload to the engine. + /// + /// The `TryFrom` conversion will fail if `execution_outcome.state_root` is `B256::ZERO`, + /// in which case this returns the `parent_hash` instead to drive the chain forward. + /// + /// Returns the block hash to use for FCU (either the new block or parent). + async fn submit_new_payload(&self, sequence: &FlashBlockCompleteSequence) -> B256 { + let payload = match P::ExecutionData::try_from(sequence) { + Ok(payload) => payload, + Err(err) => { + trace!(target: "flashblocks", %err, "Failed payload conversion, using parent hash"); + return sequence.payload_base().parent_hash; + } + }; + + let block_number = payload.block_number(); + let block_hash = payload.block_hash(); + match self.engine_handle.new_payload(payload).await { + Ok(result) => { + debug!( + target: "flashblocks", + flashblock_count = sequence.count(), + block_number, + %block_hash, + ?result, + "Submitted engine_newPayload", + ); + + if let PayloadStatusEnum::Invalid { validation_error } = result.status { + debug!( + target: "flashblocks", + block_number, + %block_hash, + %validation_error, + "Payload validation error", + ); + }; + } + Err(err) => { + error!( + target: "flashblocks", + %err, + block_number, + "Failed to submit new payload", + ); + } + } + + block_hash } - /// Spawn the client to start sending FCUs and new payloads by periodically fetching recent - /// blocks. - pub async fn run(mut self) { - let mut previous_block_hashes = AllocRingBuffer::new(64); + /// Submit a forkchoice update to the engine. + async fn submit_forkchoice_update( + &self, + head_block_hash: B256, + sequence: &FlashBlockCompleteSequence, + ) { + let block_number = sequence.block_number(); + let safe_hash = sequence.payload_base().parent_hash; + let finalized_hash = sequence.payload_base().parent_hash; + let fcu_state = alloy_rpc_types_engine::ForkchoiceState { + head_block_hash, + safe_block_hash: safe_hash, + finalized_block_hash: finalized_hash, + }; - loop { - match self.sequence_receiver.recv().await { - Ok(sequence) => { - let block_hash = sequence.payload_base().parent_hash; - previous_block_hashes.push(block_hash); - - if sequence.state_root().is_none() { - warn!("Missing state root for the complete sequence") - } - - // Load previous block hashes. We're using (head - 32) and (head - 64) as the - // safe and finalized block hashes. - let safe_block_hash = self.get_previous_block_hash(&previous_block_hashes, 32); - let finalized_block_hash = - self.get_previous_block_hash(&previous_block_hashes, 64); - - let state = alloy_rpc_types_engine::ForkchoiceState { - head_block_hash: block_hash, - safe_block_hash, - finalized_block_hash, - }; - - // Send FCU - let _ = self - .engine_handle - .fork_choice_updated(state, None, EngineApiMessageVersion::V3) - .await; - } - Err(err) => { - warn!( - target: "consensus::flashblock-client", - %err, - "error while fetching flashblock completed sequence" - ); - break; - } + match self + .engine_handle + .fork_choice_updated(fcu_state, None, EngineApiMessageVersion::V5) + .await + { + Ok(result) => { + debug!( + target: "flashblocks", + flashblock_count = sequence.count(), + block_number, + %head_block_hash, + %safe_hash, + %finalized_hash, + ?result, + "Submitted engine_forkChoiceUpdated", + ) + } + Err(err) => { + error!( + target: "flashblocks", + %err, + block_number, + %head_block_hash, + %safe_hash, + %finalized_hash, + "Failed to submit fork choice update", + ); } } } + + /// Runs the consensus client loop. + /// + /// Continuously receives completed flashblock sequences and submits them to the execution + /// engine: + /// 1. Attempts `engine_newPayload` (only if `state_root` is available) + /// 2. Always sends `engine_forkChoiceUpdated` to drive chain forward + pub async fn run(mut self) { + loop { + let Ok(sequence) = self.sequence_receiver.recv().await else { + continue; + }; + + // Returns block_hash for FCU: + // - If state_root is available: submits newPayload and returns the new block's hash + // - If state_root is zero: skips newPayload and returns parent_hash (no progress yet) + let block_hash = self.submit_new_payload(&sequence).await; + + self.submit_forkchoice_update(block_hash, &sequence).await; + } + } +} + +impl TryFrom<&FlashBlockCompleteSequence> for OpExecutionData { + type Error = &'static str; + + fn try_from(sequence: &FlashBlockCompleteSequence) -> Result { + let mut data = Self::from_flashblocks_unchecked(sequence); + + // If execution outcome is available, use the computed state_root and block_hash. + // FlashBlockService computes these when building sequences on top of the local tip. + if let Some(execution_outcome) = sequence.execution_outcome() { + let payload = data.payload.as_v1_mut(); + payload.state_root = execution_outcome.state_root; + payload.block_hash = execution_outcome.block_hash; + } + + // Only proceed if we have a valid state_root (non-zero). + if data.payload.as_v1_mut().state_root == B256::ZERO { + return Err("No state_root available for payload"); + } + + Ok(data) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{sequence::SequenceExecutionOutcome, test_utils::TestFlashBlockFactory}; + + mod op_execution_data_conversion { + use super::*; + + #[test] + fn test_try_from_fails_with_zero_state_root() { + // When execution_outcome is None, state_root remains zero and conversion fails + let factory = TestFlashBlockFactory::new(); + let fb0 = factory.flashblock_at(0).state_root(B256::ZERO).build(); + + let sequence = FlashBlockCompleteSequence::new(vec![fb0], None).unwrap(); + + let result = OpExecutionData::try_from(&sequence); + assert!(result.is_err()); + assert_eq!(result.unwrap_err(), "No state_root available for payload"); + } + + #[test] + fn test_try_from_succeeds_with_execution_outcome() { + // When execution_outcome has state_root, conversion succeeds + let factory = TestFlashBlockFactory::new(); + let fb0 = factory.flashblock_at(0).state_root(B256::ZERO).build(); + + let execution_outcome = SequenceExecutionOutcome { + block_hash: B256::random(), + state_root: B256::random(), // Non-zero + }; + + let sequence = + FlashBlockCompleteSequence::new(vec![fb0], Some(execution_outcome)).unwrap(); + + let result = OpExecutionData::try_from(&sequence); + assert!(result.is_ok()); + + let mut data = result.unwrap(); + assert_eq!(data.payload.as_v1_mut().state_root, execution_outcome.state_root); + assert_eq!(data.payload.as_v1_mut().block_hash, execution_outcome.block_hash); + } + + #[test] + fn test_try_from_succeeds_with_provided_state_root() { + // When sequencer provides non-zero state_root, conversion succeeds + let factory = TestFlashBlockFactory::new(); + let provided_state_root = B256::random(); + let fb0 = factory.flashblock_at(0).state_root(provided_state_root).build(); + + let sequence = FlashBlockCompleteSequence::new(vec![fb0], None).unwrap(); + + let result = OpExecutionData::try_from(&sequence); + assert!(result.is_ok()); + + let mut data = result.unwrap(); + assert_eq!(data.payload.as_v1_mut().state_root, provided_state_root); + } + + #[test] + fn test_try_from_execution_outcome_overrides_provided_state_root() { + // execution_outcome takes precedence over sequencer-provided state_root + let factory = TestFlashBlockFactory::new(); + let provided_state_root = B256::random(); + let fb0 = factory.flashblock_at(0).state_root(provided_state_root).build(); + + let execution_outcome = SequenceExecutionOutcome { + block_hash: B256::random(), + state_root: B256::random(), // Different from provided + }; + + let sequence = + FlashBlockCompleteSequence::new(vec![fb0], Some(execution_outcome)).unwrap(); + + let result = OpExecutionData::try_from(&sequence); + assert!(result.is_ok()); + + let mut data = result.unwrap(); + // Should use execution_outcome, not the provided state_root + assert_eq!(data.payload.as_v1_mut().state_root, execution_outcome.state_root); + assert_ne!(data.payload.as_v1_mut().state_root, provided_state_root); + } + + #[test] + fn test_try_from_with_multiple_flashblocks() { + // Test conversion with sequence of multiple flashblocks + let factory = TestFlashBlockFactory::new(); + let fb0 = factory.flashblock_at(0).state_root(B256::ZERO).build(); + let fb1 = factory.flashblock_after(&fb0).state_root(B256::ZERO).build(); + let fb2 = factory.flashblock_after(&fb1).state_root(B256::ZERO).build(); + + let execution_outcome = + SequenceExecutionOutcome { block_hash: B256::random(), state_root: B256::random() }; + + let sequence = + FlashBlockCompleteSequence::new(vec![fb0, fb1, fb2], Some(execution_outcome)) + .unwrap(); + + let result = OpExecutionData::try_from(&sequence); + assert!(result.is_ok()); + + let mut data = result.unwrap(); + assert_eq!(data.payload.as_v1_mut().state_root, execution_outcome.state_root); + assert_eq!(data.payload.as_v1_mut().block_hash, execution_outcome.block_hash); + } + } + + mod consensus_client_creation { + use super::*; + use tokio::sync::broadcast; + + #[test] + fn test_new_creates_client() { + let (engine_tx, _) = tokio::sync::mpsc::unbounded_channel(); + let engine_handle = ConsensusEngineHandle::::new(engine_tx); + + let (_, sequence_rx) = broadcast::channel(1); + + let result = FlashBlockConsensusClient::new(engine_handle, sequence_rx); + assert!(result.is_ok()); + } + } + + mod submit_new_payload_behavior { + use super::*; + + #[test] + fn test_submit_new_payload_returns_parent_hash_when_no_state_root() { + // When conversion fails (no state_root), should return parent_hash + let factory = TestFlashBlockFactory::new(); + let fb0 = factory.flashblock_at(0).state_root(B256::ZERO).build(); + let parent_hash = fb0.base.as_ref().unwrap().parent_hash; + + let sequence = FlashBlockCompleteSequence::new(vec![fb0], None).unwrap(); + + // Verify conversion would fail + let conversion_result = OpExecutionData::try_from(&sequence); + assert!(conversion_result.is_err()); + + // In the actual run loop, submit_new_payload would return parent_hash + assert_eq!(sequence.payload_base().parent_hash, parent_hash); + } + + #[test] + fn test_submit_new_payload_returns_block_hash_when_state_root_available() { + // When conversion succeeds, should return the new block's hash + let factory = TestFlashBlockFactory::new(); + let fb0 = factory.flashblock_at(0).state_root(B256::ZERO).build(); + + let execution_outcome = + SequenceExecutionOutcome { block_hash: B256::random(), state_root: B256::random() }; + + let sequence = + FlashBlockCompleteSequence::new(vec![fb0], Some(execution_outcome)).unwrap(); + + // Verify conversion succeeds + let conversion_result = OpExecutionData::try_from(&sequence); + assert!(conversion_result.is_ok()); + + let mut data = conversion_result.unwrap(); + assert_eq!(data.payload.as_v1_mut().block_hash, execution_outcome.block_hash); + } + } + + mod forkchoice_update_behavior { + use super::*; + + #[test] + fn test_forkchoice_state_uses_parent_hash_for_safe_and_finalized() { + // Both safe_hash and finalized_hash should be set to parent_hash + let factory = TestFlashBlockFactory::new(); + let fb0 = factory.flashblock_at(0).build(); + let parent_hash = fb0.base.as_ref().unwrap().parent_hash; + + let sequence = FlashBlockCompleteSequence::new(vec![fb0], None).unwrap(); + + // Verify the expected forkchoice state + assert_eq!(sequence.payload_base().parent_hash, parent_hash); + } + + #[test] + fn test_forkchoice_update_with_new_block_hash() { + // When newPayload succeeds, FCU should use the new block's hash as head + let factory = TestFlashBlockFactory::new(); + let fb0 = factory.flashblock_at(0).state_root(B256::ZERO).build(); + + let execution_outcome = + SequenceExecutionOutcome { block_hash: B256::random(), state_root: B256::random() }; + + let sequence = + FlashBlockCompleteSequence::new(vec![fb0], Some(execution_outcome)).unwrap(); + + // The head_block_hash for FCU would be execution_outcome.block_hash + assert_eq!( + sequence.execution_outcome().unwrap().block_hash, + execution_outcome.block_hash + ); + } + + #[test] + fn test_forkchoice_update_with_parent_hash_when_no_state_root() { + // When newPayload is skipped (no state_root), FCU should use parent_hash as head + let factory = TestFlashBlockFactory::new(); + let fb0 = factory.flashblock_at(0).state_root(B256::ZERO).build(); + let parent_hash = fb0.base.as_ref().unwrap().parent_hash; + + let sequence = FlashBlockCompleteSequence::new(vec![fb0], None).unwrap(); + + // The head_block_hash for FCU would be parent_hash (fallback) + assert_eq!(sequence.payload_base().parent_hash, parent_hash); + } + } + + mod run_loop_logic { + use super::*; + + #[test] + fn test_run_loop_processes_sequence_with_state_root() { + // Scenario: Sequence with state_root should trigger both newPayload and FCU + let factory = TestFlashBlockFactory::new(); + let fb0 = factory.flashblock_at(0).state_root(B256::ZERO).build(); + + let execution_outcome = + SequenceExecutionOutcome { block_hash: B256::random(), state_root: B256::random() }; + + let sequence = + FlashBlockCompleteSequence::new(vec![fb0], Some(execution_outcome)).unwrap(); + + // Verify sequence is ready for newPayload + let conversion = OpExecutionData::try_from(&sequence); + assert!(conversion.is_ok()); + } + + #[test] + fn test_run_loop_processes_sequence_without_state_root() { + // Scenario: Sequence without state_root should skip newPayload but still do FCU + let factory = TestFlashBlockFactory::new(); + let fb0 = factory.flashblock_at(0).state_root(B256::ZERO).build(); + + let sequence = FlashBlockCompleteSequence::new(vec![fb0], None).unwrap(); + + // Verify sequence cannot be converted (newPayload will be skipped) + let conversion = OpExecutionData::try_from(&sequence); + assert!(conversion.is_err()); + + // But FCU should still happen with parent_hash + assert!(sequence.payload_base().parent_hash != B256::ZERO); + } + + #[test] + fn test_run_loop_handles_multiple_sequences() { + // Multiple sequences should be processed independently + let factory = TestFlashBlockFactory::new(); + + // Sequence 1: With state_root + let fb0_seq1 = factory.flashblock_at(0).state_root(B256::ZERO).build(); + let outcome1 = + SequenceExecutionOutcome { block_hash: B256::random(), state_root: B256::random() }; + let seq1 = + FlashBlockCompleteSequence::new(vec![fb0_seq1.clone()], Some(outcome1)).unwrap(); + + // Sequence 2: Without state_root (for next block) + let fb0_seq2 = factory.flashblock_for_next_block(&fb0_seq1).build(); + let seq2 = FlashBlockCompleteSequence::new(vec![fb0_seq2], None).unwrap(); + + // Both should be valid sequences + assert_eq!(seq1.block_number(), 100); + assert_eq!(seq2.block_number(), 101); + + // seq1 can be converted + assert!(OpExecutionData::try_from(&seq1).is_ok()); + // seq2 cannot be converted + assert!(OpExecutionData::try_from(&seq2).is_err()); + } + } } diff --git a/crates/optimism/flashblocks/src/lib.rs b/crates/optimism/flashblocks/src/lib.rs index d36ddb21fc..fe77dc18a8 100644 --- a/crates/optimism/flashblocks/src/lib.rs +++ b/crates/optimism/flashblocks/src/lib.rs @@ -8,23 +8,33 @@ #![cfg_attr(docsrs, feature(doc_cfg))] #![cfg_attr(not(test), warn(unused_crate_dependencies))] -pub use payload::{ - ExecutionPayloadBaseV1, ExecutionPayloadFlashblockDeltaV1, FlashBlock, FlashBlockDecoder, - Metadata, -}; -pub use service::{FlashBlockBuildInfo, FlashBlockService}; -pub use ws::{WsConnect, WsFlashBlockStream}; +use reth_primitives_traits::NodePrimitives; +use std::sync::Arc; + +// Included to enable serde feature for OpReceipt type used transitively +use reth_optimism_primitives as _; mod consensus; pub use consensus::FlashBlockConsensusClient; + mod payload; -pub use payload::PendingFlashBlock; +pub use payload::{FlashBlock, PendingFlashBlock}; + mod sequence; pub use sequence::{FlashBlockCompleteSequence, FlashBlockPendingSequence}; mod service; +pub use service::{FlashBlockBuildInfo, FlashBlockService}; + mod worker; + +mod cache; + +#[cfg(test)] +mod test_utils; + mod ws; +pub use ws::{FlashBlockDecoder, WsConnect, WsFlashBlockStream}; /// Receiver of the most recent [`PendingFlashBlock`] built out of [`FlashBlock`]s. /// @@ -37,5 +47,37 @@ pub type PendingBlockRx = tokio::sync::watch::Receiver; +/// Receiver of received [`FlashBlock`]s from the (websocket) subscription. +/// +/// [`FlashBlock`]: crate::FlashBlock +pub type FlashBlockRx = tokio::sync::broadcast::Receiver>; + /// Receiver that signals whether a [`FlashBlock`] is currently being built. pub type InProgressFlashBlockRx = tokio::sync::watch::Receiver>; + +/// Container for all flashblocks-related listeners. +/// +/// Groups together the channels for flashblock-related updates. +#[derive(Debug)] +pub struct FlashblocksListeners { + /// Receiver of the most recent executed [`PendingFlashBlock`] built out of [`FlashBlock`]s. + pub pending_block_rx: PendingBlockRx, + /// Subscription channel of the complete sequences of [`FlashBlock`]s built. + pub flashblocks_sequence: tokio::sync::broadcast::Sender, + /// Receiver that signals whether a [`FlashBlock`] is currently being built. + pub in_progress_rx: InProgressFlashBlockRx, + /// Subscription channel for received flashblocks from the (websocket) connection. + pub received_flashblocks: tokio::sync::broadcast::Sender>, +} + +impl FlashblocksListeners { + /// Creates a new [`FlashblocksListeners`] with the given channels. + pub const fn new( + pending_block_rx: PendingBlockRx, + flashblocks_sequence: tokio::sync::broadcast::Sender, + in_progress_rx: InProgressFlashBlockRx, + received_flashblocks: tokio::sync::broadcast::Sender>, + ) -> Self { + Self { pending_block_rx, flashblocks_sequence, in_progress_rx, received_flashblocks } + } +} diff --git a/crates/optimism/flashblocks/src/payload.rs b/crates/optimism/flashblocks/src/payload.rs index da81ada016..c7031c1856 100644 --- a/crates/optimism/flashblocks/src/payload.rs +++ b/crates/optimism/flashblocks/src/payload.rs @@ -1,141 +1,11 @@ use alloy_consensus::BlockHeader; -use alloy_eips::eip4895::Withdrawal; -use alloy_primitives::{bytes, Address, Bloom, Bytes, B256, U256}; -use alloy_rpc_types_engine::PayloadId; +use alloy_primitives::B256; use derive_more::Deref; -use reth_optimism_evm::OpNextBlockEnvAttributes; -use reth_optimism_primitives::OpReceipt; use reth_primitives_traits::NodePrimitives; use reth_rpc_eth_types::PendingBlock; -use serde::{Deserialize, Serialize}; -use std::collections::BTreeMap; -/// Represents a Flashblock, a real-time block-like structure emitted by the Base L2 chain. -/// -/// A Flashblock provides a snapshot of a block’s effects before finalization, -/// allowing faster insight into state transitions, balance changes, and logs. -/// It includes a diff of the block’s execution and associated metadata. -/// -/// See: [Base Flashblocks Documentation](https://docs.base.org/chain/flashblocks) -#[derive(Default, Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] -pub struct FlashBlock { - /// The unique payload ID as assigned by the execution engine for this block. - pub payload_id: PayloadId, - /// A sequential index that identifies the order of this Flashblock. - pub index: u64, - /// A subset of block header fields. - pub base: Option, - /// The execution diff representing state transitions and transactions. - pub diff: ExecutionPayloadFlashblockDeltaV1, - /// Additional metadata about the block such as receipts and balances. - pub metadata: Metadata, -} - -impl FlashBlock { - /// Returns the block number of this flashblock. - pub const fn block_number(&self) -> u64 { - self.metadata.block_number - } - - /// Returns the first parent hash of this flashblock. - pub fn parent_hash(&self) -> Option { - Some(self.base.as_ref()?.parent_hash) - } -} - -/// A trait for decoding flashblocks from bytes. -pub trait FlashBlockDecoder: Send + 'static { - /// Decodes `bytes` into a [`FlashBlock`]. - fn decode(&self, bytes: bytes::Bytes) -> eyre::Result; -} - -/// Default implementation of the decoder. -impl FlashBlockDecoder for () { - fn decode(&self, bytes: bytes::Bytes) -> eyre::Result { - FlashBlock::decode(bytes) - } -} - -/// Provides metadata about the block that may be useful for indexing or analysis. -#[derive(Default, Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] -pub struct Metadata { - /// The number of the block in the L2 chain. - pub block_number: u64, - /// A map of addresses to their updated balances after the block execution. - /// This represents balance changes due to transactions, rewards, or system transfers. - pub new_account_balances: BTreeMap, - /// Execution receipts for all transactions in the block. - /// Contains logs, gas usage, and other EVM-level metadata. - pub receipts: BTreeMap, -} - -/// Represents the base configuration of an execution payload that remains constant -/// throughout block construction. This includes fundamental block properties like -/// parent hash, block number, and other header fields that are determined at -/// block creation and cannot be modified. -#[derive(Clone, Debug, Eq, PartialEq, Default, Deserialize, Serialize)] -pub struct ExecutionPayloadBaseV1 { - /// Ecotone parent beacon block root - pub parent_beacon_block_root: B256, - /// The parent hash of the block. - pub parent_hash: B256, - /// The fee recipient of the block. - pub fee_recipient: Address, - /// The previous randao of the block. - pub prev_randao: B256, - /// The block number. - #[serde(with = "alloy_serde::quantity")] - pub block_number: u64, - /// The gas limit of the block. - #[serde(with = "alloy_serde::quantity")] - pub gas_limit: u64, - /// The timestamp of the block. - #[serde(with = "alloy_serde::quantity")] - pub timestamp: u64, - /// The extra data of the block. - pub extra_data: Bytes, - /// The base fee per gas of the block. - pub base_fee_per_gas: U256, -} - -/// Represents the modified portions of an execution payload within a flashblock. -/// This structure contains only the fields that can be updated during block construction, -/// such as state root, receipts, logs, and new transactions. Other immutable block fields -/// like parent hash and block number are excluded since they remain constant throughout -/// the block's construction. -#[derive(Clone, Debug, Eq, PartialEq, Default, Deserialize, Serialize)] -pub struct ExecutionPayloadFlashblockDeltaV1 { - /// The state root of the block. - pub state_root: B256, - /// The receipts root of the block. - pub receipts_root: B256, - /// The logs bloom of the block. - pub logs_bloom: Bloom, - /// The gas used of the block. - #[serde(with = "alloy_serde::quantity")] - pub gas_used: u64, - /// The block hash of the block. - pub block_hash: B256, - /// The transactions of the block. - pub transactions: Vec, - /// Array of [`Withdrawal`] enabled with V2 - pub withdrawals: Vec, - /// The withdrawals root of the block. - pub withdrawals_root: B256, -} - -impl From for OpNextBlockEnvAttributes { - fn from(value: ExecutionPayloadBaseV1) -> Self { - Self { - timestamp: value.timestamp, - suggested_fee_recipient: value.fee_recipient, - prev_randao: value.prev_randao, - gas_limit: value.gas_limit, - parent_beacon_block_root: Some(value.parent_beacon_block_root), - extra_data: value.extra_data, - } - } -} +/// Type alias for the Optimism flashblock payload. +pub type FlashBlock = op_alloy_rpc_types_engine::OpFlashblockPayload; /// The pending block built with all received Flashblocks alongside the metadata for the last added /// Flashblock. @@ -168,3 +38,206 @@ impl PendingFlashBlock { self.has_computed_state_root.then_some(self.pending.block().state_root()) } } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_flashblock_serde_roundtrip() { + let raw = r#"{ + "diff": { + "block_hash": "0x2d902e3fcb5bd57e0bf878cbbda1386e7fb8968d518912d58678a35e58261c46", + "gas_used": "0x2907796", + "logs_bloom": "0x5c21065292452cfcd5175abfee20e796773da578307356043ba4f62692aca01204e8908f97ab9df43f1e9c57f586b1c9a7df8b66ffa7746dfeeb538617fea5eb75ad87f8b6653f597d86814dc5ad6de404e5a48aeffcc4b1e170c2bdbc7a334936c66166ba0faa6517597b676ef65c588342756f280f7d610aa3ed35c5d877449bfacbdb9b40d98c457f974ab264ec40e4edd6e9fab4c0cb794bf75f10ea20dab75a1f9fd1c441d4c365d1476841e8593f1d1b9a1c52919a0fcf9fc5eef2ef82fe80971a72d1cde1cb195db4806058a229e88acfddfe1a1308adb6f69afa3aaf67f4bd49e93e9f9532ea30bd891a8ff08de61fb645bec678db816950b47fcef0", + "receipts_root": "0x2c4203e9aa87258627bf23ab4d5f9d92da30285ea11dc0b3e140a5a8d4b63e26", + "state_root": "0x0000000000000000000000000000000000000000000000000000000000000000", + "transactions": [ + "0x02f8c2822105830b0c58840b677c0f840c93fb5a834c4b4094d599955d17a1378651e76557ffc406c71300fcb080b851020026000100271000c8e9d514f85b57b70de033e841d788ab4df1acd691802acc26dcd13fb9e38fa8e10001004e2000c8e9d55bd42770e29cb76904377ffdb22737fc9f5eb36fde875fcbfa687b1c3023c080a07e8486ab3db9f07588a3f37bd8ffb9b349ba9bb738a2500d78a4583e1e54a6f9a068d0b3c729a6777c81dd49bd0c2dc3a079f0ceed4e778fbfe79176e8b70d68d8", + "0xf90fae820248840158a3c58307291a94bbbfd134e9b44bfb5123898ba36b01de7ab93d9880b90f443087505600000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000e0000000000000000000000000000000000000000000000000000000000000012000000000000000000000000001c2c79343de52f99538cd2cbbd67ba0813f403000000000000000000000000001c2c79343de52f99538cd2cbbd67ba0813f40300000000000000000000000000000000000000000000000000000000000000001000000000000000000000000833589fcd6edb6e08f4c7c32d4f71b54bda0291300000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000004b2ee6f00000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000018000000000000000000000000000000000000000000000000000000000000003600000000000000000000000000000000000000000000000000000000000000c00000000000000000000000000833589fcd6edb6e08f4c7c32d4f71b54bda029130000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000044095ea7b30000000000000000000000000000000000001ff3684f28c67538d4d072c227340000000000000000000000000000000000000000000000000000000004b2ee6f00000000000000000000000000000000000000000000000000000000000000000000000000000000f5042e6ffac5a625d4e7848e0b01373d8eb9e22200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000001243b2253c8000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000e00000000000000000000000000000000000000000000000000000000000000001000000000000000000000000833589fcd6edb6e08f4c7c32d4f71b54bda029130000000000000000000000000000000000000000000000000000000000000001000000000000000000000000f70da97812cb96acdf810712aa562db8dfa3dbef000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000133f4000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001ff3684f28c67538d4d072c2273400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000007e42213bc0b000000000000000000000000ea758cac6115309b325c582fd0782d79e3502177000000000000000000000000833589fcd6edb6e08f4c7c32d4f71b54bda029130000000000000000000000000000000000000000000000000000000004b1ba7b000000000000000000000000ea758cac6115309b325c582fd0782d79e350217700000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000007041fff991f000000000000000000000000f5042e6ffac5a625d4e7848e0b01373d8eb9e222000000000000000000000000d9aaec86b65d86f6a7b5b1b0c42ffa531710b6ca0000000000000000000000000000000000000000000000000000000004b06d9200000000000000000000000000000000000000000000000000000000000000a0d311e79cd2099f6f1f0607040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000058000000000000000000000000000000000000000000000000000000000000000e4c1fb425e000000000000000000000000ea758cac6115309b325c582fd0782d79e3502177000000000000000000000000833589fcd6edb6e08f4c7c32d4f71b54bda029130000000000000000000000000000000000000000000000000000000004b1ba7b00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000069073bb900000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003c438c9c147000000000000000000000000833589fcd6edb6e08f4c7c32d4f71b54bda029130000000000000000000000000000000000000000000000000000000000002710000000000000000000000000ba12222222228d8ba445958a75a0704d566bf2c800000000000000000000000000000000000000000000000000000000000001c400000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000002e4945bcec9000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001200000000000000000000000000000000000000000000000000000000000000220000000000000000000000000ea758cac6115309b325c582fd0782d79e35021770000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ea758cac6115309b325c582fd0782d79e3502177000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002800000000000000000000000000000000000000000000000000000000069073bb9000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000208f360baf899845441eccdc46525e26bb8860752a0002000000000000000001cd000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000004b1ba7b00000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000000000000000000000833589fcd6edb6e08f4c7c32d4f71b54bda02913000000000000000000000000d9aaec86b65d86f6a7b5b1b0c42ffa531710b6ca00000000000000000000000000000000000000000000000000000000000000027fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008434ee90ca000000000000000000000000f5c4f3dc02c3fb9279495a8fef7b0741da956157000000000000000000000000d9aaec86b65d86f6a7b5b1b0c42ffa531710b6ca0000000000000000000000000000000000000000000000000000000004b1a7880000000000000000000000000000000000000000000000000000000000002710000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f5042e6ffac5a625d4e7848e0b01373d8eb9e22200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000001243b2253c8000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000e00000000000000000000000000000000000000000000000000000000000000001000000000000000000000000d9aaec86b65d86f6a7b5b1b0c42ffa531710b6ca000000000000000000000000000000000000000000000000000000000000000100000000000000000000000001c2c79343de52f99538cd2cbbd67ba0813f403000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002887696e8edbbcbd7306955512ff6f2d8426403eef4762157da3e9c5a89d78f682422da0c8d8b1aa1c9bfd1fe1e4a10c6123caa2fe582294aa5798c54546faa4c09590a9a012a1c78fca9cfefd281c1e44682de3c4420299da5cf2ae498f67d7de7dcf166c", + "0x02f8f582210582a649831db02984026c1a34833d090094f2cb4e685946beecbc9ce5f318b68edc583bcfa080b88600000000000069073af31c4289d066d04f33681f6686155c8243dff963557765630a39bdd8c54e6b7dbe5d4b689e9d536608db03163882cf005f7b5813e41d2fdec75161c8470a410c4c9201000202b6e39c63c7e4ebc01d51f845dfc9cff3f5adf9ef2710000000000103cd1f9777571493aeacb7eae45cd30a226d3e612d4e200000000000c080a088fd1a2b2e5891109afc3845b2c8b0ca76ea8306190dcb80a703a2451f7bab25a0718ae373e36c8ddb2b934ca936ed824db22c0625cfea29be3d408ff41787fc8c", + "0x02f9030b822105830536f9830f58ab84025c6b93833d090094c90d989d809e26b2d95fb72eb3288fef72af8c2f80b9029a00000000000069073af31c3d4d0646e102b6f958428cd8ed562efa6efb234f629b5f6ca52a15fd2e33aea76eb64fb04cae81b3e5b769dbdc681dcfd4b7a802a2cacdf1ccb65276a722c67607000202b6e39c63c7e4ebc01d51f845dfc9cff3f5adf9ef2710000000000103cd1f9777571493aeacb7eae45cd30a226d3e612d4e200000000000010206777762d3eb91810b15526c2c9102864d722ef7a9ed24e77271c1dcbf0fdcba68138800000000010698c8f03094a9e65ccedc14c40130e4a5dd0ce14fb12ea58cbeac11f662b458b9271000000000000003045a9ad2bb92b0b3e5c571fdd5125114e04e02be1a0bb80000000001036e55486ea6b8691ba58224f3cae35505add86c372710000000000003681d6e4b0b020656ca04956ddaf76add7ef022f60dac00000000010003028be0fcdd7cf0b53b7b82b8f6ea8586d07c53359f2710000000000006c30e25679d5c77b257ac3a61ad08603b11e7afe77ac9222a5386c27d08b6b6c3ea6000000000010696d4b53a38337a5733179751781178a2613306063c511b78cd02684739288c0a01f400000000000002020d028b2d7a29d2e57efc6405a1dce1437180e3ce27100000000001068a71465e76d736564b0c90f5cf3d0d7b69c461c36f69250ae27dbead147cc8f80bb80000000000000206354def8b7e6b2ee04bf85c00f5e79f173d0b76d5017bab3a90c7ba62e1722699000000000000010245f3ad9e63f629be6e278cc4cf34d3b0a79a4a0b27100000000000010404b154dbcd3c75580382c2353082df4390613d93c627120000000001011500cc7d9c2b460720a48cc7444d7e7dfe43f6050bb80a03000000015c8dec5f0eedf1f8934815ef8fb8cb8198eac6520bb80a030000010286f3dd3b4d08de718d7909b0fdc16f4cbdf94ef527100000000000c001a0d4c12f6433ff6ea0573633364c030d8b46ed5764494f80eb434f27060c39f315a034df82c4ac185a666280d578992feee0c05fc75d93e3e2286726c85fba1bb0a0", + "0x02f8f68221058305c7b3830f4ef58401a5485d832dc6c094f2cb4e685946beecbc9ce5f318b68edc583bcfa080b88600000000000069073af31b777ac6b2082fc399fde92a814114b7896ca0b0503106910ea099d5e32c93bfc0013ed2850534c3f8583ab7276414416c0d15ac021126f6cb6ca1ed091ddc01eb01000202b6e39c63c7e4ebc01d51f845dfc9cff3f5adf9ef2710000000000103cd1f9777571493aeacb7eae45cd30a226d3e612d4e200000000000c080a09694b95dc893bed698ede415c188db3530ccc98a01d79bb9f11d783de7dddde9a0275b0165ab21ea0e6f721c624aa2270a3f98276ca0c95381d90e3f9d434b4881", + "0x02f8f682210583034573830f4ef58401a5485d832dc6c094f2cb4e685946beecbc9ce5f318b68edc583bcfa080b88600000000000069073af31c970da8f2adb8bafe6d254ec4428f8342508e169f75e8450f6ff8488813dfa638395e16787966f01731fddffd0e7352cde07fd24bba283bd27f1828fb2a0c700701000202b6e39c63c7e4ebc01d51f845dfc9cff3f5adf9ef2710000000000103cd1f9777571493aeacb7eae45cd30a226d3e612d4e200000000000c080a00181afe4bedab67692a9c1ff30a89fde6b3d3c8407a47a2777efcd6bdc0c39d2a022d6a4219e72eebdbc5d31ae998243ccec1b192c5c7c586308ccddb4838cd631", + "0x02f8c1822105830b0cfd830f4ed084013bce1b834c4b4094d599955d17a1378651e76557ffc406c71300fcb080b851020026000100271000c8e9d514f85b57b70de033e841d788ab4df1acd691802acc26dcd13fb9e38fa8e10001004e2000c8e9d55bd42770e29cb76904377ffdb22737fc9f5eb36fde875fcbfa687b1c3023c001a0d87c4e16986db55b8846bccfe7bca824b75216e72d8f92369c46681800285cb2a00ec53251be3c2a0d19884747d123ddb0ada3c0a917b21882e297e95c2294d52a", + "0x02f901d58221058306361d830f4240840163efbc8301546194833589fcd6edb6e08f4c7c32d4f71b54bda0291380b90164cf092995000000000000000000000000d723d9f752c19faf88a5fd2111a38d0cc5d395b00000000000000000000000000b55712de2ce8f93be30d53c03d48ea275cd14d000000000000000000000000000000000000000000000000000000000000003e8000000000000000000000000000000000000000000000000000000006907385e0000000000000000000000000000000000000000000000000000000069073be2bef9866b70d0bb74d8763996eb5967b1b24cd48f7801f94ad80cb49431df6b1d00000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000000417c9c2382c6c3f029aa3dcbf1df075366fae7bc9fba7f3729713e0bf4d518951f5340350208db96af23686d9985ce552e3588244456a23ca99ecbcae779ea11e71c00000000000000000000000000000000000000000000000000000000000000c080a0b1090c8c67ca9a49ba3591c72c8851f187bbfc39b1920dff2f6c0157ed1ada39a0265b7f704f4c1b5c2c5ca57f1a4040e1e48878c9ad5f2cca9c4e6669d12989f2", + "0x02f8c1822105830b0c98830f424084013bc18b834c4b4094d599955d17a1378651e76557ffc406c71300fcb080b851020026000100271000c8e9d514f85b57b70de033e841d788ab4df1acd691802acc26dcd13fb9e38fa8e10001004e2000c8e9d55bd42770e29cb76904377ffdb22737fc9f5eb36fde875fcbfa687b1c3023c001a080a96d18ae46b58d9a470846a05b394ab4a49a2e379de1941205684e1ac291f9a01e6d4d2c6bab5bf8b89f1df2d6beb85d9f1b3f3be73ca2b72e4ad2d9da0d12d2", + "0x02f901d48221058231e0830f4240840163efbc8301544d94833589fcd6edb6e08f4c7c32d4f71b54bda0291380b90164cf0929950000000000000000000000001de8dbc2409c4bbf14445b0d404bb894f0c6cff70000000000000000000000008d8fa42584a727488eeb0e29405ad794a105bb9b0000000000000000000000000000000000000000000000000000000000002710000000000000000000000000000000000000000000000000000000006907385d0000000000000000000000000000000000000000000000000000000069073af16b129c414484e011621c44e0b32451fdbd69e63ef4919f427dde08c16cb199b100000000000000000000000000000000000000000000000000000000000000e00000000000000000000000000000000000000000000000000000000000000041ae0a4b618c30f0e5d92d7fe99bb435413b2201711427699fd285f69666396cee76199d4e901cfb298612cb3b8ad06178cefb4136a8bc1be07c01b5fea80e5ec11b00000000000000000000000000000000000000000000000000000000000000c080a0af315068084aae367f00263dbd872908bbb9ceaefd6b792fc48dd357e6bdf8afa01e7f0e5913570394b9648939ef71fc5ac34fe320a2757ec388316731a335e69f", + "0x02f9022f82210583052d0b830f423f84025c5527833d090094c90d989d809e26b2d95fb72eb3288fef72af8c2f80b901be00000000000069073af31cf0f932cecc8c4c6ffffa554a63e8fba251434483ed3903966d2ba5a70121618a1c45bd9ee158192ab8d7e12ce0f447f2848a48aedaa89e0efa8637bb931745de05000202b6e39c63c7e4ebc01d51f845dfc9cff3f5adf9ef2710000000000103cd1f9777571493aeacb7eae45cd30a226d3e612d4e2000000000000003045a9ad2bb92b0b3e5c571fdd5125114e04e02be1a0bb80000000001036e55486ea6b8691ba58224f3cae35505add86c372710000000000003681d6e4b0b020656ca04956ddaf76add7ef022f60dac0000000001010206777762d3eb91810b15526c2c9102864d722ef7a9ed24e77271c1dcbf0fdcba68138800000000010698c8f03094a9e65ccedc14c40130e4a5dd0ce14fb12ea58cbeac11f662b458b9271000000000000002005554419ccd0293d9383901f461c7c3e0c66e925f0bb80000000001028eb9437532fac8d6a7870f3f887b7978d20355fc271000000000000003035d28f920c9d23100e4a38b2ba2d8ae617c3b261501f4000000000102bc51db8aec659027ae0b0e468c0735418161a7800bb8000000000003dbc6998296caa1652a810dc8d3baf4a8294330f100500000000000c080a040000b130b1759df897a9573691a3d1cafacc6d95d0db1826f275afc30e2ff63a0400a7514f8d5383970c4412205ec8e9c6ca06acea504acabd2d3c36e9cb5003d" + ], + "withdrawals": [], + "withdrawals_root": "0x81864c23f426ad807d66c9fdde33213e1fdbac06c1b751d279901d1ce13670ac" + }, + "index": 10, + "metadata": { + "block_number": 37646058, + "new_account_balances": { + "0x000000000022d473030f116ddee9f6b43ac78ba3": "0x0", + "0x0000000071727de22e5e9d8baf0edac6f37da032": "0x23281e39594556899", + "0x0000f90827f1c53a10cb7a02335b175320002935": "0x0", + "0x000f3df6d732807ef1319fb7b8bb8522d0beac02": "0x0" + }, + "receipts": { + "0x1a766690fd6d0febffc488f12fbd7385c43fbe1e07113a1316f22f176355297e": { + "Legacy": { + "cumulativeGasUsed": "0x2868d76", + "logs": [ + { + "address": "0x833589fcd6edb6e08f4c7c32d4f71b54bda02913", + "data": "0x0000000000000000000000000000000000000000000000000000000004b2ee6f", + "topics": [ + "0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef", + "0x00000000000000000000000001c2c79343de52f99538cd2cbbd67ba0813f4030", + "0x000000000000000000000000f5042e6ffac5a625d4e7848e0b01373d8eb9e222" + ] + }, + { + "address": "0x833589fcd6edb6e08f4c7c32d4f71b54bda02913", + "data": "0x0000000000000000000000000000000000000000000000000000000004b2ee6f", + "topics": [ + "0x8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925", + "0x000000000000000000000000f5042e6ffac5a625d4e7848e0b01373d8eb9e222", + "0x0000000000000000000000000000000000001ff3684f28c67538d4d072c22734" + ] + }, + { + "address": "0xf5042e6ffac5a625d4e7848e0b01373d8eb9e222", + "data": "0x000000000000000000000000833589fcd6edb6e08f4c7c32d4f71b54bda02913000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000044095ea7b30000000000000000000000000000000000001ff3684f28c67538d4d072c227340000000000000000000000000000000000000000000000000000000004b2ee6f00000000000000000000000000000000000000000000000000000000", + "topics": [ + "0x93485dcd31a905e3ffd7b012abe3438fa8fa77f98ddc9f50e879d3fa7ccdc324" + ] + }, + { + "address": "0x833589fcd6edb6e08f4c7c32d4f71b54bda02913", + "data": "0x00000000000000000000000000000000000000000000000000000000000133f4", + "topics": [ + "0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef", + "0x000000000000000000000000f5042e6ffac5a625d4e7848e0b01373d8eb9e222", + "0x000000000000000000000000f70da97812cb96acdf810712aa562db8dfa3dbef" + ] + }, + { + "address": "0xf5042e6ffac5a625d4e7848e0b01373d8eb9e222", + "data": "0x000000000000000000000000f5042e6ffac5a625d4e7848e0b01373d8eb9e2220000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001243b2253c8000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000e00000000000000000000000000000000000000000000000000000000000000001000000000000000000000000833589fcd6edb6e08f4c7c32d4f71b54bda029130000000000000000000000000000000000000000000000000000000000000001000000000000000000000000f70da97812cb96acdf810712aa562db8dfa3dbef000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000133f400000000000000000000000000000000000000000000000000000000", + "topics": [ + "0x93485dcd31a905e3ffd7b012abe3438fa8fa77f98ddc9f50e879d3fa7ccdc324" + ] + }, + { + "address": "0x833589fcd6edb6e08f4c7c32d4f71b54bda02913", + "data": "0x0000000000000000000000000000000000000000000000000000000004b1ba7b", + "topics": [ + "0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef", + "0x000000000000000000000000f5042e6ffac5a625d4e7848e0b01373d8eb9e222", + "0x000000000000000000000000ea758cac6115309b325c582fd0782d79e3502177" + ] + }, + { + "address": "0x8f360baf899845441eccdc46525e26bb8860752a", + "data": "0x00000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000001957cc57b7a9959c0000000000000000000000000000000000000000000000001957cc57b7a9959800000000000000000000000000000000000000000000000444e308096a22c339000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000092458cc3a866f04600000000000000000000000000000000000000000000000025f3e27916e84b59000", + "topics": [ + "0x4e1d56f7310a8c32b2267f756b19ba65019b4890068ce114a25009abe54de5ba" + ] + }, + { + "address": "0xba12222222228d8ba445958a75a0704d566bf2c8", + "data": "0x0000000000000000000000000000000000000000000000000000000004b1ba7b0000000000000000000000000000000000000000000000000000000004b1a44c", + "topics": [ + "0x2170c741c41531aec20e7c107c24eecfdd15e69c9bb0a8dd37b1840b9e0b207b", + "0x8f360baf899845441eccdc46525e26bb8860752a0002000000000000000001cd", + "0x000000000000000000000000833589fcd6edb6e08f4c7c32d4f71b54bda02913", + "0x000000000000000000000000d9aaec86b65d86f6a7b5b1b0c42ffa531710b6ca" + ] + }, + { + "address": "0x833589fcd6edb6e08f4c7c32d4f71b54bda02913", + "data": "0x0000000000000000000000000000000000000000000000000000000004b1ba7b", + "topics": [ + "0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef", + "0x000000000000000000000000ea758cac6115309b325c582fd0782d79e3502177", + "0x000000000000000000000000ba12222222228d8ba445958a75a0704d566bf2c8" + ] + }, + { + "address": "0xd9aaec86b65d86f6a7b5b1b0c42ffa531710b6ca", + "data": "0x0000000000000000000000000000000000000000000000000000000004b1a44c", + "topics": [ + "0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef", + "0x000000000000000000000000ba12222222228d8ba445958a75a0704d566bf2c8", + "0x000000000000000000000000ea758cac6115309b325c582fd0782d79e3502177" + ] + }, + { + "address": "0xd9aaec86b65d86f6a7b5b1b0c42ffa531710b6ca", + "data": "0x0000000000000000000000000000000000000000000000000000000004b1a44c", + "topics": [ + "0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef", + "0x000000000000000000000000ea758cac6115309b325c582fd0782d79e3502177", + "0x000000000000000000000000f5042e6ffac5a625d4e7848e0b01373d8eb9e222" + ] + }, + { + "address": "0xf5042e6ffac5a625d4e7848e0b01373d8eb9e222", + "data": "0x0000000000000000000000000000000000001ff3684f28c67538d4d072c227340000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000007e42213bc0b000000000000000000000000ea758cac6115309b325c582fd0782d79e3502177000000000000000000000000833589fcd6edb6e08f4c7c32d4f71b54bda029130000000000000000000000000000000000000000000000000000000004b1ba7b000000000000000000000000ea758cac6115309b325c582fd0782d79e350217700000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000007041fff991f000000000000000000000000f5042e6ffac5a625d4e7848e0b01373d8eb9e222000000000000000000000000d9aaec86b65d86f6a7b5b1b0c42ffa531710b6ca0000000000000000000000000000000000000000000000000000000004b06d9200000000000000000000000000000000000000000000000000000000000000a0d311e79cd2099f6f1f0607040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000058000000000000000000000000000000000000000000000000000000000000000e4c1fb425e000000000000000000000000ea758cac6115309b325c582fd0782d79e3502177000000000000000000000000833589fcd6edb6e08f4c7c32d4f71b54bda029130000000000000000000000000000000000000000000000000000000004b1ba7b00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000069073bb900000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003c438c9c147000000000000000000000000833589fcd6edb6e08f4c7c32d4f71b54bda029130000000000000000000000000000000000000000000000000000000000002710000000000000000000000000ba12222222228d8ba445958a75a0704d566bf2c800000000000000000000000000000000000000000000000000000000000001c400000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000002e4945bcec9000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001200000000000000000000000000000000000000000000000000000000000000220000000000000000000000000ea758cac6115309b325c582fd0782d79e35021770000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ea758cac6115309b325c582fd0782d79e3502177000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002800000000000000000000000000000000000000000000000000000000069073bb9000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000208f360baf899845441eccdc46525e26bb8860752a0002000000000000000001cd000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000004b1ba7b00000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000000000000000000000833589fcd6edb6e08f4c7c32d4f71b54bda02913000000000000000000000000d9aaec86b65d86f6a7b5b1b0c42ffa531710b6ca00000000000000000000000000000000000000000000000000000000000000027fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008434ee90ca000000000000000000000000f5c4f3dc02c3fb9279495a8fef7b0741da956157000000000000000000000000d9aaec86b65d86f6a7b5b1b0c42ffa531710b6ca0000000000000000000000000000000000000000000000000000000004b1a7880000000000000000000000000000000000000000000000000000000000002710000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "topics": [ + "0x93485dcd31a905e3ffd7b012abe3438fa8fa77f98ddc9f50e879d3fa7ccdc324" + ] + }, + { + "address": "0xd9aaec86b65d86f6a7b5b1b0c42ffa531710b6ca", + "data": "0x0000000000000000000000000000000000000000000000000000000004b1a44c", + "topics": [ + "0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef", + "0x000000000000000000000000f5042e6ffac5a625d4e7848e0b01373d8eb9e222", + "0x00000000000000000000000001c2c79343de52f99538cd2cbbd67ba0813f4030" + ] + }, + { + "address": "0xf5042e6ffac5a625d4e7848e0b01373d8eb9e222", + "data": "0x000000000000000000000000f5042e6ffac5a625d4e7848e0b01373d8eb9e2220000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001243b2253c8000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000e00000000000000000000000000000000000000000000000000000000000000001000000000000000000000000d9aaec86b65d86f6a7b5b1b0c42ffa531710b6ca000000000000000000000000000000000000000000000000000000000000000100000000000000000000000001c2c79343de52f99538cd2cbbd67ba0813f40300000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "topics": [ + "0x93485dcd31a905e3ffd7b012abe3438fa8fa77f98ddc9f50e879d3fa7ccdc324" + ] + } + ], + "status": "0x1" + } + }, + "0x2cd6b4825b5ee40b703c947e15630336dceda97825b70412da54ccc27f484496": { + "Eip1559": { + "cumulativeGasUsed": "0x28cca69", + "logs": [ + { + "address": "0x833589fcd6edb6e08f4c7c32d4f71b54bda02913", + "data": "0x", + "topics": [ + "0x98de503528ee59b575ef0c0a2576a82497bfc029a5685b209e9ec333479b10a5", + "0x000000000000000000000000d723d9f752c19faf88a5fd2111a38d0cc5d395b0", + "0xbef9866b70d0bb74d8763996eb5967b1b24cd48f7801f94ad80cb49431df6b1d" + ] + }, + { + "address": "0x833589fcd6edb6e08f4c7c32d4f71b54bda02913", + "data": "0x00000000000000000000000000000000000000000000000000000000000003e8", + "topics": [ + "0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef", + "0x000000000000000000000000d723d9f752c19faf88a5fd2111a38d0cc5d395b0", + "0x0000000000000000000000000b55712de2ce8f93be30d53c03d48ea275cd14d0" + ] + } + ], + "status": "0x1" + } + } + } + }, + "payload_id": "0x0316ecb1aa1671b5" +}"#; + + let flashblock: FlashBlock = serde_json::from_str(raw).expect("deserialize"); + let serialized = serde_json::to_string(&flashblock).expect("serialize"); + let roundtrip: FlashBlock = serde_json::from_str(&serialized).expect("roundtrip"); + + assert_eq!(flashblock, roundtrip); + } +} diff --git a/crates/optimism/flashblocks/src/sequence.rs b/crates/optimism/flashblocks/src/sequence.rs index fff4bd84a4..abf9e6d514 100644 --- a/crates/optimism/flashblocks/src/sequence.rs +++ b/crates/optimism/flashblocks/src/sequence.rs @@ -1,41 +1,61 @@ -use crate::{ExecutionPayloadBaseV1, FlashBlock, FlashBlockCompleteSequenceRx}; -use alloy_eips::eip2718::WithEncoded; -use alloy_primitives::B256; +use crate::{FlashBlock, FlashBlockCompleteSequenceRx}; +use alloy_primitives::{Bytes, B256}; use alloy_rpc_types_engine::PayloadId; use core::mem; use eyre::{bail, OptionExt}; -use reth_primitives_traits::{Recovered, SignedTransaction}; +use op_alloy_rpc_types_engine::OpFlashblockPayloadBase; +use reth_revm::cached::CachedReads; use std::{collections::BTreeMap, ops::Deref}; use tokio::sync::broadcast; -use tracing::{debug, trace, warn}; +use tracing::*; /// The size of the broadcast channel for completed flashblock sequences. const FLASHBLOCK_SEQUENCE_CHANNEL_SIZE: usize = 128; -/// An ordered B-tree keeping the track of a sequence of [`FlashBlock`]s by their indices. -#[derive(Debug)] -pub struct FlashBlockPendingSequence { - /// tracks the individual flashblocks in order - /// - /// With a blocktime of 2s and flashblock tick-rate of 200ms plus one extra flashblock per new - /// pending block, we expect 11 flashblocks per slot. - inner: BTreeMap>, - /// Broadcasts flashblocks to subscribers. - block_broadcaster: broadcast::Sender, - /// Optional properly computed state root for the current sequence. - state_root: Option, +/// Outcome from executing a flashblock sequence. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct SequenceExecutionOutcome { + /// The block hash of the executed pending block + pub block_hash: B256, + /// Properly computed state root + pub state_root: B256, } -impl FlashBlockPendingSequence -where - T: SignedTransaction, -{ +/// An ordered B-tree keeping the track of a sequence of [`FlashBlock`]s by their indices. +#[derive(Debug)] +pub struct FlashBlockPendingSequence { + /// tracks the individual flashblocks in order + inner: BTreeMap, + /// Broadcasts flashblocks to subscribers. + block_broadcaster: broadcast::Sender, + /// Optional execution outcome from building the current sequence. + execution_outcome: Option, + /// Cached state reads for the current block. + /// Current `PendingFlashBlock` is built out of a sequence of `FlashBlocks`, and executed again + /// when fb received on top of the same block. Avoid redundant I/O across multiple + /// executions within the same block. + cached_reads: Option, +} + +impl FlashBlockPendingSequence { /// Create a new pending sequence. pub fn new() -> Self { // Note: if the channel is full, send will not block but rather overwrite the oldest // messages. Order is preserved. let (tx, _) = broadcast::channel(FLASHBLOCK_SEQUENCE_CHANNEL_SIZE); - Self { inner: BTreeMap::new(), block_broadcaster: tx, state_root: None } + Self { + inner: BTreeMap::new(), + block_broadcaster: tx, + execution_outcome: None, + cached_reads: None, + } + } + + /// Returns the sender half of the [`FlashBlockCompleteSequence`] channel. + pub const fn block_sequence_broadcaster( + &self, + ) -> &broadcast::Sender { + &self.block_broadcaster } /// Gets a subscriber to the flashblock sequences produced. @@ -43,91 +63,55 @@ where self.block_broadcaster.subscribe() } - // Clears the state and broadcasts the blocks produced to subscribers. - fn clear_and_broadcast_blocks(&mut self) { - let flashblocks = mem::take(&mut self.inner); - - // If there are any subscribers, send the flashblocks to them. - if self.block_broadcaster.receiver_count() > 0 { - let flashblocks = match FlashBlockCompleteSequence::new( - flashblocks.into_iter().map(|block| block.1.into()).collect(), - self.state_root, - ) { - Ok(flashblocks) => flashblocks, - Err(err) => { - debug!(target: "flashblocks", error = ?err, "Failed to create full flashblock complete sequence"); - return; - } - }; - - // Note: this should only ever fail if there are no receivers. This can happen if - // there is a race condition between the clause right above and this - // one. We can simply warn the user and continue. - if let Err(err) = self.block_broadcaster.send(flashblocks) { - warn!(target: "flashblocks", error = ?err, "Failed to send flashblocks to subscribers"); - } - } - } - /// Inserts a new block into the sequence. /// /// A [`FlashBlock`] with index 0 resets the set. - pub fn insert(&mut self, flashblock: FlashBlock) -> eyre::Result<()> { + pub fn insert(&mut self, flashblock: FlashBlock) { if flashblock.index == 0 { - trace!(number=%flashblock.block_number(), "Tracking new flashblock sequence"); - - // Flash block at index zero resets the whole state. - self.clear_and_broadcast_blocks(); - - self.inner.insert(flashblock.index, PreparedFlashBlock::new(flashblock)?); - return Ok(()) + trace!(target: "flashblocks", number=%flashblock.block_number(), "Tracking new flashblock sequence"); + self.inner.insert(flashblock.index, flashblock); + return; } // only insert if we previously received the same block and payload, assume we received // index 0 - let same_block = self.block_number() == Some(flashblock.metadata.block_number); + let same_block = self.block_number() == Some(flashblock.block_number()); let same_payload = self.payload_id() == Some(flashblock.payload_id); if same_block && same_payload { - trace!(number=%flashblock.block_number(), index = %flashblock.index, block_count = self.inner.len() ,"Received followup flashblock"); - self.inner.insert(flashblock.index, PreparedFlashBlock::new(flashblock)?); + trace!(target: "flashblocks", number=%flashblock.block_number(), index = %flashblock.index, block_count = self.inner.len() ,"Received followup flashblock"); + self.inner.insert(flashblock.index, flashblock); } else { - trace!(number=%flashblock.block_number(), index = %flashblock.index, current=?self.block_number() ,"Ignoring untracked flashblock following"); + trace!(target: "flashblocks", number=%flashblock.block_number(), index = %flashblock.index, current=?self.block_number() ,"Ignoring untracked flashblock following"); } - - Ok(()) } - /// Set state root - pub const fn set_state_root(&mut self, state_root: Option) { - self.state_root = state_root; + /// Set execution outcome from building the flashblock sequence + pub const fn set_execution_outcome( + &mut self, + execution_outcome: Option, + ) { + self.execution_outcome = execution_outcome; } - /// Iterator over sequence of executable transactions. - /// - /// A flashblocks is not ready if there's missing previous flashblocks, i.e. there's a gap in - /// the sequence - /// - /// Note: flashblocks start at `index 0`. - pub fn ready_transactions(&self) -> impl Iterator>> + '_ { - self.inner - .values() - .enumerate() - .take_while(|(idx, block)| { - // flashblock index 0 is the first flashblock - block.block().index == *idx as u64 - }) - .flat_map(|(_, block)| block.txs.clone()) + /// Set cached reads for this sequence + pub fn set_cached_reads(&mut self, cached_reads: CachedReads) { + self.cached_reads = Some(cached_reads); + } + + /// Removes the cached reads for this sequence + pub const fn take_cached_reads(&mut self) -> Option { + self.cached_reads.take() } /// Returns the first block number pub fn block_number(&self) -> Option { - Some(self.inner.values().next()?.block().metadata.block_number) + Some(self.inner.values().next()?.block_number()) } /// Returns the payload base of the first tracked flashblock. - pub fn payload_base(&self) -> Option { - self.inner.values().next()?.block().base.clone() + pub fn payload_base(&self) -> Option { + self.inner.values().next()?.base.clone() } /// Returns the number of tracked flashblocks. @@ -137,35 +121,55 @@ where /// Returns the reference to the last flashblock. pub fn last_flashblock(&self) -> Option<&FlashBlock> { - self.inner.last_key_value().map(|(_, b)| &b.block) + self.inner.last_key_value().map(|(_, b)| b) } /// Returns the current/latest flashblock index in the sequence pub fn index(&self) -> Option { - Some(self.inner.values().last()?.block().index) + Some(self.inner.values().last()?.index) } /// Returns the payload id of the first tracked flashblock in the current sequence. pub fn payload_id(&self) -> Option { - Some(self.inner.values().next()?.block().payload_id) + Some(self.inner.values().next()?.payload_id) + } + + /// Finalizes the current pending sequence and returns it as a complete sequence. + /// + /// Clears the internal state and returns an error if the sequence is empty or validation fails. + pub fn finalize(&mut self) -> eyre::Result { + if self.inner.is_empty() { + bail!("Cannot finalize empty flashblock sequence"); + } + + let flashblocks = mem::take(&mut self.inner); + let execution_outcome = mem::take(&mut self.execution_outcome); + self.cached_reads = None; + + FlashBlockCompleteSequence::new(flashblocks.into_values().collect(), execution_outcome) + } + + /// Returns an iterator over all flashblocks in the sequence. + pub fn flashblocks(&self) -> impl Iterator { + self.inner.values() } } -impl Default for FlashBlockPendingSequence -where - T: SignedTransaction, -{ +impl Default for FlashBlockPendingSequence { fn default() -> Self { Self::new() } } /// A complete sequence of flashblocks, often corresponding to a full block. -/// Ensure invariants of a complete flashblocks sequence. +/// +/// Ensures invariants of a complete flashblocks sequence. +/// If this entire sequence of flashblocks was executed on top of latest block, this also includes +/// the execution outcome with block hash and state root. #[derive(Debug, Clone)] pub struct FlashBlockCompleteSequence { inner: Vec, - /// Optional state root for the current sequence - state_root: Option, + /// Optional execution outcome from building the flashblock sequence + execution_outcome: Option, } impl FlashBlockCompleteSequence { @@ -174,7 +178,10 @@ impl FlashBlockCompleteSequence { /// * vector is not empty /// * first flashblock have the base payload /// * sequence of flashblocks is sound (successive index from 0, same payload id, ...) - pub fn new(blocks: Vec, state_root: Option) -> eyre::Result { + pub fn new( + blocks: Vec, + execution_outcome: Option, + ) -> eyre::Result { let first_block = blocks.first().ok_or_eyre("No flashblocks in sequence")?; // Ensure that first flashblock have base @@ -184,21 +191,21 @@ impl FlashBlockCompleteSequence { if !blocks.iter().enumerate().all(|(idx, block)| { idx == block.index as usize && block.payload_id == first_block.payload_id && - block.metadata.block_number == first_block.metadata.block_number + block.block_number() == first_block.block_number() }) { bail!("Flashblock inconsistencies detected in sequence"); } - Ok(Self { inner: blocks, state_root }) + Ok(Self { inner: blocks, execution_outcome }) } /// Returns the block number pub fn block_number(&self) -> u64 { - self.inner.first().unwrap().metadata.block_number + self.inner.first().unwrap().block_number() } /// Returns the payload base of the first flashblock. - pub fn payload_base(&self) -> &ExecutionPayloadBaseV1 { + pub fn payload_base(&self) -> &OpFlashblockPayloadBase { self.inner.first().unwrap().base.as_ref().unwrap() } @@ -212,9 +219,22 @@ impl FlashBlockCompleteSequence { self.inner.last().unwrap() } - /// Returns the state root for the current sequence - pub const fn state_root(&self) -> Option { - self.state_root + /// Returns the execution outcome of the sequence. + pub const fn execution_outcome(&self) -> Option { + self.execution_outcome + } + + /// Updates execution outcome of the sequence. + pub const fn set_execution_outcome( + &mut self, + execution_outcome: Option, + ) { + self.execution_outcome = execution_outcome; + } + + /// Returns all transactions from all flashblocks in the sequence + pub fn all_transactions(&self) -> Vec { + self.inner.iter().flat_map(|fb| fb.diff.transactions.iter().cloned()).collect() } } @@ -226,169 +246,437 @@ impl Deref for FlashBlockCompleteSequence { } } -impl TryFrom> for FlashBlockCompleteSequence { +impl TryFrom for FlashBlockCompleteSequence { type Error = eyre::Error; - fn try_from(sequence: FlashBlockPendingSequence) -> Result { - Self::new( - sequence.inner.into_values().map(|block| block.block().clone()).collect::>(), - sequence.state_root, - ) - } -} - -#[derive(Debug)] -struct PreparedFlashBlock { - /// The prepared transactions, ready for execution - txs: Vec>>, - /// The tracked flashblock - block: FlashBlock, -} - -impl PreparedFlashBlock { - const fn block(&self) -> &FlashBlock { - &self.block - } -} - -impl From> for FlashBlock { - fn from(val: PreparedFlashBlock) -> Self { - val.block - } -} - -impl PreparedFlashBlock -where - T: SignedTransaction, -{ - /// Creates a flashblock that is ready for execution by preparing all transactions - /// - /// Returns an error if decoding or signer recovery fails. - fn new(block: FlashBlock) -> eyre::Result { - let mut txs = Vec::with_capacity(block.diff.transactions.len()); - for encoded in block.diff.transactions.iter().cloned() { - let tx = T::decode_2718_exact(encoded.as_ref())?; - let signer = tx.try_recover()?; - let tx = WithEncoded::new(encoded, tx.with_signer(signer)); - txs.push(tx); - } - - Ok(Self { txs, block }) - } -} - -impl Deref for PreparedFlashBlock { - type Target = FlashBlock; - - fn deref(&self) -> &Self::Target { - &self.block + fn try_from(sequence: FlashBlockPendingSequence) -> Result { + Self::new(sequence.inner.into_values().collect(), sequence.execution_outcome) } } #[cfg(test)] mod tests { use super::*; - use crate::ExecutionPayloadFlashblockDeltaV1; - use alloy_consensus::{ - transaction::SignerRecoverable, EthereumTxEnvelope, EthereumTypedTransaction, TxEip1559, - }; - use alloy_eips::Encodable2718; - use alloy_primitives::{hex, Signature, TxKind, U256}; + use crate::test_utils::TestFlashBlockFactory; - #[test] - fn test_sequence_stops_before_gap() { - let mut sequence = FlashBlockPendingSequence::new(); - let tx = EthereumTxEnvelope::new_unhashed( - EthereumTypedTransaction::::Eip1559(TxEip1559 { - chain_id: 4, - nonce: 26u64, - max_priority_fee_per_gas: 1500000000, - max_fee_per_gas: 1500000013, - gas_limit: 21_000u64, - to: TxKind::Call(hex!("61815774383099e24810ab832a5b2a5425c154d5").into()), - value: U256::from(3000000000000000000u64), - input: Default::default(), - access_list: Default::default(), - }), - Signature::new( - U256::from_be_bytes(hex!( - "59e6b67f48fb32e7e570dfb11e042b5ad2e55e3ce3ce9cd989c7e06e07feeafd" - )), - U256::from_be_bytes(hex!( - "016b83f4f980694ed2eee4d10667242b1f40dc406901b34125b008d334d47469" - )), - true, - ), - ); - let tx = Recovered::new_unchecked(tx.clone(), tx.recover_signer_unchecked().unwrap()); + mod pending_sequence_insert { + use super::*; - sequence - .insert(FlashBlock { - payload_id: Default::default(), - index: 0, - base: None, - diff: ExecutionPayloadFlashblockDeltaV1 { - transactions: vec![tx.encoded_2718().into()], - ..Default::default() - }, - metadata: Default::default(), - }) - .unwrap(); + #[test] + fn test_insert_index_zero_creates_new_sequence() { + let mut sequence = FlashBlockPendingSequence::new(); + let factory = TestFlashBlockFactory::new(); + let fb0 = factory.flashblock_at(0).build(); + let payload_id = fb0.payload_id; - sequence - .insert(FlashBlock { - payload_id: Default::default(), - index: 2, - base: None, - diff: Default::default(), - metadata: Default::default(), - }) - .unwrap(); + sequence.insert(fb0); - let actual_txs: Vec<_> = sequence.ready_transactions().collect(); - let expected_txs = vec![WithEncoded::new(tx.encoded_2718().into(), tx)]; - - assert_eq!(actual_txs, expected_txs); - } - - #[test] - fn test_sequence_sends_flashblocks_to_subscribers() { - let mut sequence = FlashBlockPendingSequence::>::new(); - let mut subscriber = sequence.subscribe_block_sequence(); - - for idx in 0..10 { - sequence - .insert(FlashBlock { - payload_id: Default::default(), - index: idx, - base: Some(ExecutionPayloadBaseV1::default()), - diff: Default::default(), - metadata: Default::default(), - }) - .unwrap(); + assert_eq!(sequence.count(), 1); + assert_eq!(sequence.block_number(), Some(100)); + assert_eq!(sequence.payload_id(), Some(payload_id)); } - assert_eq!(sequence.count(), 10); + #[test] + fn test_insert_followup_same_block_and_payload() { + let mut sequence = FlashBlockPendingSequence::new(); + let factory = TestFlashBlockFactory::new(); - // Then we don't receive anything until we insert a new flashblock - let no_flashblock = subscriber.try_recv(); - assert!(no_flashblock.is_err()); + let fb0 = factory.flashblock_at(0).build(); + sequence.insert(fb0.clone()); - // Let's insert a new flashblock with index 0 - sequence - .insert(FlashBlock { - payload_id: Default::default(), - index: 0, - base: Some(ExecutionPayloadBaseV1::default()), - diff: Default::default(), - metadata: Default::default(), - }) - .unwrap(); + let fb1 = factory.flashblock_after(&fb0).build(); + sequence.insert(fb1.clone()); - let flashblocks = subscriber.try_recv().unwrap(); - assert_eq!(flashblocks.count(), 10); + let fb2 = factory.flashblock_after(&fb1).build(); + sequence.insert(fb2); - for (idx, block) in flashblocks.iter().enumerate() { - assert_eq!(block.index, idx as u64); + assert_eq!(sequence.count(), 3); + assert_eq!(sequence.index(), Some(2)); + } + + #[test] + fn test_insert_ignores_different_block_number() { + let mut sequence = FlashBlockPendingSequence::new(); + let factory = TestFlashBlockFactory::new(); + + let fb0 = factory.flashblock_at(0).build(); + sequence.insert(fb0.clone()); + + // Try to insert followup with different block number + let fb1 = factory.flashblock_after(&fb0).block_number(101).build(); + sequence.insert(fb1); + + assert_eq!(sequence.count(), 1); + assert_eq!(sequence.block_number(), Some(100)); + } + + #[test] + fn test_insert_ignores_different_payload_id() { + let mut sequence = FlashBlockPendingSequence::new(); + let factory = TestFlashBlockFactory::new(); + + let fb0 = factory.flashblock_at(0).build(); + let payload_id1 = fb0.payload_id; + sequence.insert(fb0.clone()); + + // Try to insert followup with different payload_id + let payload_id2 = alloy_rpc_types_engine::PayloadId::new([2u8; 8]); + let fb1 = factory.flashblock_after(&fb0).payload_id(payload_id2).build(); + sequence.insert(fb1); + + assert_eq!(sequence.count(), 1); + assert_eq!(sequence.payload_id(), Some(payload_id1)); + } + + #[test] + fn test_insert_maintains_btree_order() { + let mut sequence = FlashBlockPendingSequence::new(); + let factory = TestFlashBlockFactory::new(); + + let fb0 = factory.flashblock_at(0).build(); + sequence.insert(fb0.clone()); + + let fb2 = factory.flashblock_after(&fb0).index(2).build(); + sequence.insert(fb2); + + let fb1 = factory.flashblock_after(&fb0).build(); + sequence.insert(fb1); + + let indices: Vec = sequence.flashblocks().map(|fb| fb.index).collect(); + assert_eq!(indices, vec![0, 1, 2]); + } + } + + mod pending_sequence_finalize { + use super::*; + + #[test] + fn test_finalize_empty_sequence_fails() { + let mut sequence = FlashBlockPendingSequence::new(); + let result = sequence.finalize(); + + assert!(result.is_err()); + assert_eq!( + result.unwrap_err().to_string(), + "Cannot finalize empty flashblock sequence" + ); + } + + #[test] + fn test_finalize_clears_pending_state() { + let mut sequence = FlashBlockPendingSequence::new(); + let factory = TestFlashBlockFactory::new(); + + let fb0 = factory.flashblock_at(0).build(); + sequence.insert(fb0); + + assert_eq!(sequence.count(), 1); + + let _complete = sequence.finalize().unwrap(); + + // After finalize, sequence should be empty + assert_eq!(sequence.count(), 0); + assert_eq!(sequence.block_number(), None); + } + + #[test] + fn test_finalize_preserves_execution_outcome() { + let mut sequence = FlashBlockPendingSequence::new(); + let factory = TestFlashBlockFactory::new(); + + let fb0 = factory.flashblock_at(0).build(); + sequence.insert(fb0); + + let outcome = + SequenceExecutionOutcome { block_hash: B256::random(), state_root: B256::random() }; + sequence.set_execution_outcome(Some(outcome)); + + let complete = sequence.finalize().unwrap(); + + assert_eq!(complete.execution_outcome(), Some(outcome)); + } + + #[test] + fn test_finalize_clears_cached_reads() { + let mut sequence = FlashBlockPendingSequence::new(); + let factory = TestFlashBlockFactory::new(); + + let fb0 = factory.flashblock_at(0).build(); + sequence.insert(fb0); + + let cached_reads = CachedReads::default(); + sequence.set_cached_reads(cached_reads); + assert!(sequence.take_cached_reads().is_some()); + + let _complete = sequence.finalize().unwrap(); + + // Cached reads should be cleared + assert!(sequence.take_cached_reads().is_none()); + } + + #[test] + fn test_finalize_multiple_times_after_refill() { + let mut sequence = FlashBlockPendingSequence::new(); + let factory = TestFlashBlockFactory::new(); + + // First sequence + let fb0 = factory.flashblock_at(0).build(); + sequence.insert(fb0); + + let complete1 = sequence.finalize().unwrap(); + assert_eq!(complete1.count(), 1); + + // Add new sequence for next block + let fb1 = factory.flashblock_for_next_block(&complete1.last().clone()).build(); + sequence.insert(fb1); + + let complete2 = sequence.finalize().unwrap(); + assert_eq!(complete2.count(), 1); + assert_eq!(complete2.block_number(), 101); + } + } + + mod complete_sequence_invariants { + use super::*; + + #[test] + fn test_new_empty_sequence_fails() { + let result = FlashBlockCompleteSequence::new(vec![], None); + assert!(result.is_err()); + assert_eq!(result.unwrap_err().to_string(), "No flashblocks in sequence"); + } + + #[test] + fn test_new_requires_base_at_index_zero() { + let factory = TestFlashBlockFactory::new(); + // Use builder() with index 1 first to create a flashblock, then change its index to 0 + // to bypass the auto-base creation logic + let mut fb0_no_base = factory.flashblock_at(1).build(); + fb0_no_base.index = 0; + fb0_no_base.base = None; + + let result = FlashBlockCompleteSequence::new(vec![fb0_no_base], None); + assert!(result.is_err()); + assert_eq!(result.unwrap_err().to_string(), "Flashblock at index 0 has no base"); + } + + #[test] + fn test_new_validates_successive_indices() { + let factory = TestFlashBlockFactory::new(); + + let fb0 = factory.flashblock_at(0).build(); + // Skip index 1, go straight to 2 + let fb2 = factory.flashblock_after(&fb0).index(2).build(); + + let result = FlashBlockCompleteSequence::new(vec![fb0, fb2], None); + assert!(result.is_err()); + assert_eq!( + result.unwrap_err().to_string(), + "Flashblock inconsistencies detected in sequence" + ); + } + + #[test] + fn test_new_validates_same_block_number() { + let factory = TestFlashBlockFactory::new(); + + let fb0 = factory.flashblock_at(0).build(); + let fb1 = factory.flashblock_after(&fb0).block_number(101).build(); + + let result = FlashBlockCompleteSequence::new(vec![fb0, fb1], None); + assert!(result.is_err()); + assert_eq!( + result.unwrap_err().to_string(), + "Flashblock inconsistencies detected in sequence" + ); + } + + #[test] + fn test_new_validates_same_payload_id() { + let factory = TestFlashBlockFactory::new(); + + let fb0 = factory.flashblock_at(0).build(); + let payload_id2 = alloy_rpc_types_engine::PayloadId::new([2u8; 8]); + let fb1 = factory.flashblock_after(&fb0).payload_id(payload_id2).build(); + + let result = FlashBlockCompleteSequence::new(vec![fb0, fb1], None); + assert!(result.is_err()); + assert_eq!( + result.unwrap_err().to_string(), + "Flashblock inconsistencies detected in sequence" + ); + } + + #[test] + fn test_new_valid_single_flashblock() { + let factory = TestFlashBlockFactory::new(); + let fb0 = factory.flashblock_at(0).build(); + + let result = FlashBlockCompleteSequence::new(vec![fb0], None); + assert!(result.is_ok()); + + let complete = result.unwrap(); + assert_eq!(complete.count(), 1); + assert_eq!(complete.block_number(), 100); + } + + #[test] + fn test_new_valid_multiple_flashblocks() { + let factory = TestFlashBlockFactory::new(); + + let fb0 = factory.flashblock_at(0).build(); + let fb1 = factory.flashblock_after(&fb0).build(); + let fb2 = factory.flashblock_after(&fb1).build(); + + let result = FlashBlockCompleteSequence::new(vec![fb0, fb1, fb2], None); + assert!(result.is_ok()); + + let complete = result.unwrap(); + assert_eq!(complete.count(), 3); + assert_eq!(complete.last().index, 2); + } + + #[test] + fn test_all_transactions_aggregates_correctly() { + let factory = TestFlashBlockFactory::new(); + + let fb0 = factory + .flashblock_at(0) + .transactions(vec![Bytes::from_static(&[1, 2, 3]), Bytes::from_static(&[4, 5, 6])]) + .build(); + + let fb1 = factory + .flashblock_after(&fb0) + .transactions(vec![Bytes::from_static(&[7, 8, 9])]) + .build(); + + let complete = FlashBlockCompleteSequence::new(vec![fb0, fb1], None).unwrap(); + let all_txs = complete.all_transactions(); + + assert_eq!(all_txs.len(), 3); + assert_eq!(all_txs[0], Bytes::from_static(&[1, 2, 3])); + assert_eq!(all_txs[1], Bytes::from_static(&[4, 5, 6])); + assert_eq!(all_txs[2], Bytes::from_static(&[7, 8, 9])); + } + + #[test] + fn test_payload_base_returns_first_block_base() { + let factory = TestFlashBlockFactory::new(); + + let fb0 = factory.flashblock_at(0).build(); + let fb1 = factory.flashblock_after(&fb0).build(); + + let complete = FlashBlockCompleteSequence::new(vec![fb0.clone(), fb1], None).unwrap(); + + assert_eq!(complete.payload_base().block_number, fb0.base.unwrap().block_number); + } + + #[test] + fn test_execution_outcome_mutation() { + let factory = TestFlashBlockFactory::new(); + let fb0 = factory.flashblock_at(0).build(); + + let mut complete = FlashBlockCompleteSequence::new(vec![fb0], None).unwrap(); + assert!(complete.execution_outcome().is_none()); + + let outcome = + SequenceExecutionOutcome { block_hash: B256::random(), state_root: B256::random() }; + complete.set_execution_outcome(Some(outcome)); + + assert_eq!(complete.execution_outcome(), Some(outcome)); + } + + #[test] + fn test_deref_provides_vec_access() { + let factory = TestFlashBlockFactory::new(); + + let fb0 = factory.flashblock_at(0).build(); + let fb1 = factory.flashblock_after(&fb0).build(); + + let complete = FlashBlockCompleteSequence::new(vec![fb0, fb1], None).unwrap(); + + // Use deref to access Vec methods + assert_eq!(complete.len(), 2); + assert!(!complete.is_empty()); + } + } + + mod sequence_conversion { + use super::*; + + #[test] + fn test_try_from_pending_to_complete_valid() { + let mut pending = FlashBlockPendingSequence::new(); + let factory = TestFlashBlockFactory::new(); + + let fb0 = factory.flashblock_at(0).build(); + pending.insert(fb0); + + let complete: Result = pending.try_into(); + assert!(complete.is_ok()); + assert_eq!(complete.unwrap().count(), 1); + } + + #[test] + fn test_try_from_pending_to_complete_empty_fails() { + let pending = FlashBlockPendingSequence::new(); + + let complete: Result = pending.try_into(); + assert!(complete.is_err()); + } + + #[test] + fn test_try_from_preserves_execution_outcome() { + let mut pending = FlashBlockPendingSequence::new(); + let factory = TestFlashBlockFactory::new(); + + let fb0 = factory.flashblock_at(0).build(); + pending.insert(fb0); + + let outcome = + SequenceExecutionOutcome { block_hash: B256::random(), state_root: B256::random() }; + pending.set_execution_outcome(Some(outcome)); + + let complete: FlashBlockCompleteSequence = pending.try_into().unwrap(); + assert_eq!(complete.execution_outcome(), Some(outcome)); + } + } + + mod pending_sequence_helpers { + use super::*; + + #[test] + fn test_last_flashblock_returns_highest_index() { + let mut sequence = FlashBlockPendingSequence::new(); + let factory = TestFlashBlockFactory::new(); + + let fb0 = factory.flashblock_at(0).build(); + sequence.insert(fb0.clone()); + + let fb1 = factory.flashblock_after(&fb0).build(); + sequence.insert(fb1); + + let last = sequence.last_flashblock().unwrap(); + assert_eq!(last.index, 1); + } + + #[test] + fn test_subscribe_block_sequence_channel() { + let sequence = FlashBlockPendingSequence::new(); + let mut rx = sequence.subscribe_block_sequence(); + + // Spawn a task that sends a complete sequence + let tx = sequence.block_sequence_broadcaster().clone(); + std::thread::spawn(move || { + let factory = TestFlashBlockFactory::new(); + let fb0 = factory.flashblock_at(0).build(); + let complete = FlashBlockCompleteSequence::new(vec![fb0], None).unwrap(); + let _ = tx.send(complete); + }); + + // Should receive the broadcast + let received = rx.blocking_recv(); + assert!(received.is_ok()); + assert_eq!(received.unwrap().count(), 1); } } } diff --git a/crates/optimism/flashblocks/src/service.rs b/crates/optimism/flashblocks/src/service.rs index 7e442470d9..4eed74683f 100644 --- a/crates/optimism/flashblocks/src/service.rs +++ b/crates/optimism/flashblocks/src/service.rs @@ -1,34 +1,20 @@ use crate::{ - sequence::FlashBlockPendingSequence, - worker::{BuildArgs, FlashBlockBuilder}, - ExecutionPayloadBaseV1, FlashBlock, FlashBlockCompleteSequenceRx, InProgressFlashBlockRx, - PendingFlashBlock, + cache::SequenceManager, worker::FlashBlockBuilder, FlashBlock, FlashBlockCompleteSequence, + FlashBlockCompleteSequenceRx, InProgressFlashBlockRx, PendingFlashBlock, }; -use alloy_eips::eip2718::WithEncoded; use alloy_primitives::B256; use futures_util::{FutureExt, Stream, StreamExt}; -use metrics::Histogram; -use reth_chain_state::{CanonStateNotification, CanonStateNotifications, CanonStateSubscriptions}; +use metrics::{Gauge, Histogram}; +use op_alloy_rpc_types_engine::OpFlashblockPayloadBase; use reth_evm::ConfigureEvm; use reth_metrics::Metrics; -use reth_primitives_traits::{ - AlloyBlockHeader, BlockTy, HeaderTy, NodePrimitives, ReceiptTy, Recovered, -}; +use reth_primitives_traits::{AlloyBlockHeader, BlockTy, HeaderTy, NodePrimitives, ReceiptTy}; use reth_revm::cached::CachedReads; use reth_storage_api::{BlockReaderIdExt, StateProviderFactory}; use reth_tasks::TaskExecutor; -use std::{ - pin::Pin, - task::{ready, Context, Poll}, - time::Instant, -}; -use tokio::{ - pin, - sync::{oneshot, watch}, -}; -use tracing::{debug, trace, warn}; - -pub(crate) const FB_STATE_ROOT_FROM_INDEX: usize = 9; +use std::{sync::Arc, time::Instant}; +use tokio::sync::{oneshot, watch}; +use tracing::*; /// The `FlashBlockService` maintains an in-memory [`PendingFlashBlock`] built out of a sequence of /// [`FlashBlock`]s. @@ -36,28 +22,216 @@ pub(crate) const FB_STATE_ROOT_FROM_INDEX: usize = 9; pub struct FlashBlockService< N: NodePrimitives, S, - EvmConfig: ConfigureEvm, + EvmConfig: ConfigureEvm + Unpin>, Provider, > { - rx: S, - current: Option>, - blocks: FlashBlockPendingSequence, - rebuild: bool, - builder: FlashBlockBuilder, - canon_receiver: CanonStateNotifications, - spawner: TaskExecutor, - job: Option>, - /// Cached state reads for the current block. - /// Current `PendingFlashBlock` is built out of a sequence of `FlashBlocks`, and executed again - /// when fb received on top of the same block. Avoid redundant I/O across multiple - /// executions within the same block. - cached_state: Option<(B256, CachedReads)>, - /// Signals when a block build is in progress + /// Incoming flashblock stream. + incoming_flashblock_rx: S, + /// Signals when a block build is in progress. in_progress_tx: watch::Sender>, + /// Broadcast channel to forward received flashblocks from the subscription. + received_flashblocks_tx: tokio::sync::broadcast::Sender>, + + /// Executes flashblock sequences to build pending blocks. + builder: FlashBlockBuilder, + /// Task executor for spawning block build jobs. + spawner: TaskExecutor, + /// Currently running block build job with start time and result receiver. + job: Option>, + /// Manages flashblock sequences with caching and intelligent build selection. + sequences: SequenceManager, + /// `FlashBlock` service's metrics metrics: FlashBlockServiceMetrics, - /// Enable state root calculation from flashblock with index [`FB_STATE_ROOT_FROM_INDEX`] - compute_state_root: bool, +} + +impl FlashBlockService +where + N: NodePrimitives, + S: Stream> + Unpin + 'static, + EvmConfig: ConfigureEvm + Unpin> + + Clone + + 'static, + Provider: StateProviderFactory + + BlockReaderIdExt< + Header = HeaderTy, + Block = BlockTy, + Transaction = N::SignedTx, + Receipt = ReceiptTy, + > + Unpin + + Clone + + 'static, +{ + /// Constructs a new `FlashBlockService` that receives [`FlashBlock`]s from `rx` stream. + pub fn new( + incoming_flashblock_rx: S, + evm_config: EvmConfig, + provider: Provider, + spawner: TaskExecutor, + compute_state_root: bool, + ) -> Self { + let (in_progress_tx, _) = watch::channel(None); + let (received_flashblocks_tx, _) = tokio::sync::broadcast::channel(128); + Self { + incoming_flashblock_rx, + in_progress_tx, + received_flashblocks_tx, + builder: FlashBlockBuilder::new(evm_config, provider), + spawner, + job: None, + sequences: SequenceManager::new(compute_state_root), + metrics: FlashBlockServiceMetrics::default(), + } + } + + /// Returns the sender half to the received flashblocks. + pub const fn flashblocks_broadcaster( + &self, + ) -> &tokio::sync::broadcast::Sender> { + &self.received_flashblocks_tx + } + + /// Returns the sender half to the flashblock sequence. + pub const fn block_sequence_broadcaster( + &self, + ) -> &tokio::sync::broadcast::Sender { + self.sequences.block_sequence_broadcaster() + } + + /// Returns a subscriber to the flashblock sequence. + pub fn subscribe_block_sequence(&self) -> FlashBlockCompleteSequenceRx { + self.sequences.subscribe_block_sequence() + } + + /// Returns a receiver that signals when a flashblock is being built. + pub fn subscribe_in_progress(&self) -> InProgressFlashBlockRx { + self.in_progress_tx.subscribe() + } + + /// Drives the service and sends new blocks to the receiver. + /// + /// This loop: + /// 1. Checks if any build job has completed and processes results + /// 2. Receives and batches all immediately available flashblocks + /// 3. Attempts to build a block from the complete sequence + /// + /// Note: this should be spawned + pub async fn run(mut self, tx: watch::Sender>>) { + loop { + tokio::select! { + // Event 1: job exists, listen to job results + Some(result) = async { + match self.job.as_mut() { + Some((_, rx)) => rx.await.ok(), + None => std::future::pending().await, + } + } => { + let (start_time, _) = self.job.take().unwrap(); + let _ = self.in_progress_tx.send(None); + + match result { + Ok(Some((pending, cached_reads))) => { + let parent_hash = pending.parent_hash(); + self.sequences + .on_build_complete(parent_hash, Some((pending.clone(), cached_reads))); + + let elapsed = start_time.elapsed(); + self.metrics.execution_duration.record(elapsed.as_secs_f64()); + + let _ = tx.send(Some(pending)); + } + Ok(None) => { + trace!(target: "flashblocks", "Build job returned None"); + } + Err(err) => { + warn!(target: "flashblocks", %err, "Build job failed"); + } + } + } + + // Event 2: New flashblock arrives (batch process all ready flashblocks) + result = self.incoming_flashblock_rx.next() => { + match result { + Some(Ok(flashblock)) => { + // Process first flashblock + self.process_flashblock(flashblock); + + // Batch process all other immediately available flashblocks + while let Some(result) = self.incoming_flashblock_rx.next().now_or_never().flatten() { + match result { + Ok(fb) => self.process_flashblock(fb), + Err(err) => warn!(target: "flashblocks", %err, "Error receiving flashblock"), + } + } + + self.try_start_build_job(); + } + Some(Err(err)) => { + warn!(target: "flashblocks", %err, "Error receiving flashblock"); + } + None => { + warn!(target: "flashblocks", "Flashblock stream ended"); + break; + } + } + } + } + } + } + + /// Processes a single flashblock: notifies subscribers, records metrics, and inserts into + /// sequence. + fn process_flashblock(&mut self, flashblock: FlashBlock) { + self.notify_received_flashblock(&flashblock); + + if flashblock.index == 0 { + self.metrics.last_flashblock_length.record(self.sequences.pending().count() as f64); + } + + if let Err(err) = self.sequences.insert_flashblock(flashblock) { + trace!(target: "flashblocks", %err, "Failed to insert flashblock"); + } + } + + /// Notifies all subscribers about the received flashblock. + fn notify_received_flashblock(&self, flashblock: &FlashBlock) { + if self.received_flashblocks_tx.receiver_count() > 0 { + let _ = self.received_flashblocks_tx.send(Arc::new(flashblock.clone())); + } + } + + /// Attempts to build a block if no job is currently running and a buildable sequence exists. + fn try_start_build_job(&mut self) { + if self.job.is_some() { + return; // Already building + } + + let Some(latest) = self.builder.provider().latest_header().ok().flatten() else { + return; + }; + + let Some(args) = self.sequences.next_buildable_args(latest.hash(), latest.timestamp()) + else { + return; // Nothing buildable + }; + + // Spawn build job + let fb_info = FlashBlockBuildInfo { + parent_hash: args.base.parent_hash, + index: args.last_flashblock_index, + block_number: args.base.block_number, + }; + self.metrics.current_block_height.set(fb_info.block_number as f64); + self.metrics.current_index.set(fb_info.index as f64); + let _ = self.in_progress_tx.send(Some(fb_info)); + + let (tx, rx) = oneshot::channel(); + let builder = self.builder.clone(); + self.spawner.spawn_blocking(Box::pin(async move { + let _ = tx.send(builder.execute(args)); + })); + self.job = Some((Instant::now(), rx)); + } } /// Information for a flashblock currently built @@ -71,279 +245,6 @@ pub struct FlashBlockBuildInfo { pub block_number: u64, } -impl FlashBlockService -where - N: NodePrimitives, - S: Stream> + Unpin + 'static, - EvmConfig: ConfigureEvm + Unpin> - + Clone - + 'static, - Provider: StateProviderFactory - + CanonStateSubscriptions - + BlockReaderIdExt< - Header = HeaderTy, - Block = BlockTy, - Transaction = N::SignedTx, - Receipt = ReceiptTy, - > + Unpin - + Clone - + 'static, -{ - /// Constructs a new `FlashBlockService` that receives [`FlashBlock`]s from `rx` stream. - pub fn new(rx: S, evm_config: EvmConfig, provider: Provider, spawner: TaskExecutor) -> Self { - let (in_progress_tx, _) = watch::channel(None); - Self { - rx, - current: None, - blocks: FlashBlockPendingSequence::new(), - canon_receiver: provider.subscribe_to_canonical_state(), - builder: FlashBlockBuilder::new(evm_config, provider), - rebuild: false, - spawner, - job: None, - cached_state: None, - in_progress_tx, - metrics: FlashBlockServiceMetrics::default(), - compute_state_root: false, - } - } - - /// Enable state root calculation from flashblock - pub const fn compute_state_root(mut self, enable_state_root: bool) -> Self { - self.compute_state_root = enable_state_root; - self - } - - /// Returns a subscriber to the flashblock sequence. - pub fn subscribe_block_sequence(&self) -> FlashBlockCompleteSequenceRx { - self.blocks.subscribe_block_sequence() - } - - /// Returns a receiver that signals when a flashblock is being built. - pub fn subscribe_in_progress(&self) -> InProgressFlashBlockRx { - self.in_progress_tx.subscribe() - } - - /// Drives the services and sends new blocks to the receiver - /// - /// Note: this should be spawned - pub async fn run(mut self, tx: tokio::sync::watch::Sender>>) { - while let Some(block) = self.next().await { - if let Ok(block) = block.inspect_err(|e| tracing::error!("{e}")) { - let _ = tx.send(block).inspect_err(|e| tracing::error!("{e}")); - } - } - - warn!("Flashblock service has stopped"); - } - - /// Returns the [`BuildArgs`] made purely out of [`FlashBlock`]s that were received earlier. - /// - /// Returns `None` if the flashblock have no `base` or the base is not a child block of latest. - fn build_args( - &mut self, - ) -> Option< - BuildArgs< - impl IntoIterator>> - + use, - >, - > { - let Some(base) = self.blocks.payload_base() else { - trace!( - flashblock_number = ?self.blocks.block_number(), - count = %self.blocks.count(), - "Missing flashblock payload base" - ); - - return None - }; - - // attempt an initial consecutive check - if let Some(latest) = self.builder.provider().latest_header().ok().flatten() && - latest.hash() != base.parent_hash - { - trace!(flashblock_parent=?base.parent_hash, flashblock_number=base.block_number, local_latest=?latest.num_hash(), "Skipping non consecutive build attempt"); - return None - } - - let Some(last_flashblock) = self.blocks.last_flashblock() else { - trace!(flashblock_number = ?self.blocks.block_number(), count = %self.blocks.count(), "Missing last flashblock"); - return None - }; - - // Check if state root must be computed - let compute_state_root = - self.compute_state_root && self.blocks.index() >= Some(FB_STATE_ROOT_FROM_INDEX as u64); - - Some(BuildArgs { - base, - transactions: self.blocks.ready_transactions().collect::>(), - cached_state: self.cached_state.take(), - last_flashblock_index: last_flashblock.index, - last_flashblock_hash: last_flashblock.diff.block_hash, - compute_state_root, - }) - } - - /// Takes out `current` [`PendingFlashBlock`] if `state` is not preceding it. - fn on_new_tip(&mut self, state: CanonStateNotification) -> Option> { - let tip = state.tip_checked()?; - let tip_hash = tip.hash(); - let current = self.current.take_if(|current| current.parent_hash() != tip_hash); - - // Prefill the cache with state from the new canonical tip, similar to payload/basic - let mut cached = CachedReads::default(); - let committed = state.committed(); - let new_execution_outcome = committed.execution_outcome(); - for (addr, acc) in new_execution_outcome.bundle_accounts_iter() { - if let Some(info) = acc.info.clone() { - // Pre-cache existing accounts and their storage (only changed accounts/storage) - let storage = - acc.storage.iter().map(|(key, slot)| (*key, slot.present_value)).collect(); - cached.insert_account(addr, info, storage); - } - } - self.cached_state = Some((tip_hash, cached)); - - current - } -} - -impl Stream for FlashBlockService -where - N: NodePrimitives, - S: Stream> + Unpin + 'static, - EvmConfig: ConfigureEvm + Unpin> - + Clone - + 'static, - Provider: StateProviderFactory - + CanonStateSubscriptions - + BlockReaderIdExt< - Header = HeaderTy, - Block = BlockTy, - Transaction = N::SignedTx, - Receipt = ReceiptTy, - > + Unpin - + Clone - + 'static, -{ - type Item = eyre::Result>>; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let this = self.get_mut(); - - loop { - // drive pending build job to completion - let result = match this.job.as_mut() { - Some((now, rx)) => { - let result = ready!(rx.poll_unpin(cx)); - result.ok().map(|res| (*now, res)) - } - None => None, - }; - // reset job - this.job.take(); - // No build in progress - let _ = this.in_progress_tx.send(None); - - if let Some((now, result)) = result { - match result { - Ok(Some((new_pending, cached_reads))) => { - // update state root of the current sequence - this.blocks.set_state_root(new_pending.computed_state_root()); - - // built a new pending block - this.current = Some(new_pending.clone()); - // cache reads - this.cached_state = Some((new_pending.parent_hash(), cached_reads)); - this.rebuild = false; - - let elapsed = now.elapsed(); - this.metrics.execution_duration.record(elapsed.as_secs_f64()); - trace!( - parent_hash = %new_pending.block().parent_hash(), - block_number = new_pending.block().number(), - flash_blocks = this.blocks.count(), - ?elapsed, - "Built new block with flashblocks" - ); - - return Poll::Ready(Some(Ok(Some(new_pending)))); - } - Ok(None) => { - // nothing to do because tracked flashblock doesn't attach to latest - } - Err(err) => { - // we can ignore this error - debug!(%err, "failed to execute flashblock"); - } - } - } - - // consume new flashblocks while they're ready - while let Poll::Ready(Some(result)) = this.rx.poll_next_unpin(cx) { - match result { - Ok(flashblock) => { - if flashblock.index == 0 { - this.metrics.last_flashblock_length.record(this.blocks.count() as f64); - } - match this.blocks.insert(flashblock) { - Ok(_) => this.rebuild = true, - Err(err) => debug!(%err, "Failed to prepare flashblock"), - } - } - Err(err) => return Poll::Ready(Some(Err(err))), - } - } - - // update on new head block - if let Poll::Ready(Ok(state)) = { - let fut = this.canon_receiver.recv(); - pin!(fut); - fut.poll_unpin(cx) - } && let Some(current) = this.on_new_tip(state) - { - trace!( - parent_hash = %current.block().parent_hash(), - block_number = current.block().number(), - "Clearing current flashblock on new canonical block" - ); - - return Poll::Ready(Some(Ok(None))) - } - - if !this.rebuild && this.current.is_some() { - return Poll::Pending - } - - // try to build a block on top of latest - if let Some(args) = this.build_args() { - let now = Instant::now(); - - let fb_info = FlashBlockBuildInfo { - parent_hash: args.base.parent_hash, - index: args.last_flashblock_index, - block_number: args.base.block_number, - }; - // Signal that a flashblock build has started with build metadata - let _ = this.in_progress_tx.send(Some(fb_info)); - let (tx, rx) = oneshot::channel(); - let builder = this.builder.clone(); - - this.spawner.spawn_blocking(async move { - let _ = tx.send(builder.execute(args)); - }); - this.job.replace((now, rx)); - - // continue and poll the spawned job - continue - } - - return Poll::Pending - } - } -} - type BuildJob = (Instant, oneshot::Receiver, CachedReads)>>>); @@ -354,4 +255,8 @@ struct FlashBlockServiceMetrics { last_flashblock_length: Histogram, /// The duration applying flashblock state changes in seconds. execution_duration: Histogram, + /// Current block height. + current_block_height: Gauge, + /// Current flashblock index. + current_index: Gauge, } diff --git a/crates/optimism/flashblocks/src/test_utils.rs b/crates/optimism/flashblocks/src/test_utils.rs new file mode 100644 index 0000000000..1c2da1f7c8 --- /dev/null +++ b/crates/optimism/flashblocks/src/test_utils.rs @@ -0,0 +1,340 @@ +//! Test utilities for flashblocks. +//! +//! Provides a factory for creating test flashblocks with automatic timestamp management. +//! +//! # Examples +//! +//! ## Simple: Create a flashblock sequence for the same block +//! +//! ```ignore +//! let factory = FlashBlockTestFactory::new(2); // 2 second block time +//! let fb0 = factory.flashblock_at(0).build(); +//! let fb1 = factory.flashblock_after(&fb0).build(); +//! let fb2 = factory.flashblock_after(&fb1).build(); +//! ``` +//! +//! ## Create flashblocks with transactions +//! +//! ```ignore +//! let factory = FlashBlockTestFactory::new(2); +//! let fb0 = factory.flashblock_at(0).build(); +//! let txs = vec![Bytes::from_static(&[1, 2, 3])]; +//! let fb1 = factory.flashblock_after(&fb0).transactions(txs).build(); +//! ``` +//! +//! ## Test across multiple blocks (timestamps auto-increment) +//! +//! ```ignore +//! let factory = FlashBlockTestFactory::new(2); // 2 second blocks +//! +//! // Block 100 at timestamp 1000000 +//! let fb0 = factory.flashblock_at(0).build(); +//! let fb1 = factory.flashblock_after(&fb0).build(); +//! +//! // Block 101 at timestamp 1000002 (auto-incremented by block_time) +//! let fb2 = factory.flashblock_for_next_block(&fb1).build(); +//! let fb3 = factory.flashblock_after(&fb2).build(); +//! ``` +//! +//! ## Full control with builder +//! +//! ```ignore +//! let factory = FlashBlockTestFactory::new(1); +//! let fb = factory.custom() +//! .block_number(100) +//! .parent_hash(specific_hash) +//! .state_root(computed_root) +//! .transactions(txs) +//! .build(); +//! ``` + +use crate::FlashBlock; +use alloy_primitives::{Address, Bloom, Bytes, B256, U256}; +use alloy_rpc_types_engine::PayloadId; +use op_alloy_rpc_types_engine::{ + OpFlashblockPayloadBase, OpFlashblockPayloadDelta, OpFlashblockPayloadMetadata, +}; + +/// Factory for creating test flashblocks with automatic timestamp management. +/// +/// Tracks `block_time` to automatically increment timestamps when creating new blocks. +/// Returns builders that can be further customized before calling `build()`. +/// +/// # Examples +/// +/// ```ignore +/// let factory = TestFlashBlockFactory::new(2); // 2 second block time +/// let fb0 = factory.flashblock_at(0).build(); +/// let fb1 = factory.flashblock_after(&fb0).build(); +/// let fb2 = factory.flashblock_for_next_block(&fb1).build(); // timestamp auto-increments +/// ``` +#[derive(Debug)] +pub(crate) struct TestFlashBlockFactory { + /// Block time in seconds (used to auto-increment timestamps) + block_time: u64, + /// Starting timestamp for the first block + base_timestamp: u64, + /// Current block number being tracked + current_block_number: u64, +} + +impl TestFlashBlockFactory { + /// Creates a new builder with the specified block time in seconds. + /// + /// # Arguments + /// + /// * `block_time` - Time between blocks in seconds (e.g., 2 for 2-second blocks) + /// + /// # Examples + /// + /// ```ignore + /// let factory = TestFlashBlockFactory::new(2); // 2 second blocks + /// let factory_fast = TestFlashBlockFactory::new(1); // 1 second blocks + /// ``` + pub(crate) fn new() -> Self { + Self { block_time: 2, base_timestamp: 1_000_000, current_block_number: 100 } + } + + pub(crate) fn with_block_time(mut self, block_time: u64) -> Self { + self.block_time = block_time; + self + } + + /// Creates a builder for a flashblock at the specified index (within the current block). + /// + /// Returns a builder with index set, allowing further customization before building. + /// + /// # Examples + /// + /// ```ignore + /// let factory = TestFlashBlockFactory::new(2); + /// let fb0 = factory.flashblock_at(0).build(); // Simple usage + /// let fb1 = factory.flashblock_at(1).state_root(specific_root).build(); // Customize + /// ``` + pub(crate) fn flashblock_at(&self, index: u64) -> TestFlashBlockBuilder { + self.builder().index(index).block_number(self.current_block_number) + } + + /// Creates a builder for a flashblock following the previous one in the same sequence. + /// + /// Automatically increments the index and maintains `block_number` and `payload_id`. + /// Returns a builder allowing further customization. + /// + /// # Examples + /// + /// ```ignore + /// let factory = TestFlashBlockFactory::new(2); + /// let fb0 = factory.flashblock_at(0).build(); + /// let fb1 = factory.flashblock_after(&fb0).build(); // Simple + /// let fb2 = factory.flashblock_after(&fb1).transactions(txs).build(); // With txs + /// ``` + pub(crate) fn flashblock_after(&self, previous: &FlashBlock) -> TestFlashBlockBuilder { + let parent_hash = + previous.base.as_ref().map(|b| b.parent_hash).unwrap_or(previous.diff.block_hash); + + self.builder() + .index(previous.index + 1) + .block_number(previous.metadata.block_number) + .payload_id(previous.payload_id) + .parent_hash(parent_hash) + .timestamp(previous.base.as_ref().map(|b| b.timestamp).unwrap_or(self.base_timestamp)) + } + + /// Creates a builder for a flashblock for the next block, starting a new sequence at index 0. + /// + /// Increments block number, uses previous `block_hash` as `parent_hash`, generates new + /// `payload_id`, and automatically increments the timestamp by `block_time`. + /// Returns a builder allowing further customization. + /// + /// # Examples + /// + /// ```ignore + /// let factory = TestFlashBlockFactory::new(2); // 2 second blocks + /// let fb0 = factory.flashblock_at(0).build(); // Block 100, timestamp 1000000 + /// let fb1 = factory.flashblock_for_next_block(&fb0).build(); // Block 101, timestamp 1000002 + /// let fb2 = factory.flashblock_for_next_block(&fb1).transactions(txs).build(); // Customize + /// ``` + pub(crate) fn flashblock_for_next_block(&self, previous: &FlashBlock) -> TestFlashBlockBuilder { + let prev_timestamp = + previous.base.as_ref().map(|b| b.timestamp).unwrap_or(self.base_timestamp); + + self.builder() + .index(0) + .block_number(previous.metadata.block_number + 1) + .payload_id(PayloadId::new(B256::random().0[0..8].try_into().unwrap())) + .parent_hash(previous.diff.block_hash) + .timestamp(prev_timestamp + self.block_time) + } + + /// Returns a custom builder for full control over flashblock creation. + /// + /// Use this when the convenience methods don't provide enough control. + /// + /// # Examples + /// + /// ```ignore + /// let factory = TestFlashBlockFactory::new(2); + /// let fb = factory.builder() + /// .index(5) + /// .block_number(200) + /// .parent_hash(specific_hash) + /// .state_root(computed_root) + /// .build(); + /// ``` + pub(crate) fn builder(&self) -> TestFlashBlockBuilder { + TestFlashBlockBuilder { + index: 0, + block_number: self.current_block_number, + payload_id: PayloadId::new([1u8; 8]), + parent_hash: B256::random(), + timestamp: self.base_timestamp, + base: None, + block_hash: B256::random(), + state_root: B256::ZERO, + receipts_root: B256::ZERO, + logs_bloom: Bloom::default(), + gas_used: 0, + transactions: vec![], + withdrawals: vec![], + withdrawals_root: B256::ZERO, + blob_gas_used: None, + } + } +} + +/// Custom builder for creating test flashblocks with full control. +/// +/// Created via [`TestFlashBlockFactory::builder()`]. +#[derive(Debug)] +pub(crate) struct TestFlashBlockBuilder { + index: u64, + block_number: u64, + payload_id: PayloadId, + parent_hash: B256, + timestamp: u64, + base: Option, + block_hash: B256, + state_root: B256, + receipts_root: B256, + logs_bloom: Bloom, + gas_used: u64, + transactions: Vec, + withdrawals: Vec, + withdrawals_root: B256, + blob_gas_used: Option, +} + +impl TestFlashBlockBuilder { + /// Sets the flashblock index. + pub(crate) fn index(mut self, index: u64) -> Self { + self.index = index; + self + } + + /// Sets the block number. + pub(crate) fn block_number(mut self, block_number: u64) -> Self { + self.block_number = block_number; + self + } + + /// Sets the payload ID. + pub(crate) fn payload_id(mut self, payload_id: PayloadId) -> Self { + self.payload_id = payload_id; + self + } + + /// Sets the parent hash. + pub(crate) fn parent_hash(mut self, parent_hash: B256) -> Self { + self.parent_hash = parent_hash; + self + } + + /// Sets the timestamp. + pub(crate) fn timestamp(mut self, timestamp: u64) -> Self { + self.timestamp = timestamp; + self + } + + /// Sets the base payload. Automatically created for index 0 if not set. + #[allow(dead_code)] + pub(crate) fn base(mut self, base: OpFlashblockPayloadBase) -> Self { + self.base = Some(base); + self + } + + /// Sets the block hash in the diff. + #[allow(dead_code)] + pub(crate) fn block_hash(mut self, block_hash: B256) -> Self { + self.block_hash = block_hash; + self + } + + /// Sets the state root in the diff. + #[allow(dead_code)] + pub(crate) fn state_root(mut self, state_root: B256) -> Self { + self.state_root = state_root; + self + } + + /// Sets the receipts root in the diff. + #[allow(dead_code)] + pub(crate) fn receipts_root(mut self, receipts_root: B256) -> Self { + self.receipts_root = receipts_root; + self + } + + /// Sets the transactions in the diff. + pub(crate) fn transactions(mut self, transactions: Vec) -> Self { + self.transactions = transactions; + self + } + + /// Sets the gas used in the diff. + #[allow(dead_code)] + pub(crate) fn gas_used(mut self, gas_used: u64) -> Self { + self.gas_used = gas_used; + self + } + + /// Builds the flashblock. + /// + /// If index is 0 and no base was explicitly set, creates a default base. + pub(crate) fn build(mut self) -> FlashBlock { + // Auto-create base for index 0 if not set + if self.index == 0 && self.base.is_none() { + self.base = Some(OpFlashblockPayloadBase { + parent_hash: self.parent_hash, + parent_beacon_block_root: B256::random(), + fee_recipient: Address::default(), + prev_randao: B256::random(), + block_number: self.block_number, + gas_limit: 30_000_000, + timestamp: self.timestamp, + extra_data: Default::default(), + base_fee_per_gas: U256::from(1_000_000_000u64), + }); + } + + FlashBlock { + index: self.index, + payload_id: self.payload_id, + base: self.base, + diff: OpFlashblockPayloadDelta { + block_hash: self.block_hash, + state_root: self.state_root, + receipts_root: self.receipts_root, + logs_bloom: self.logs_bloom, + gas_used: self.gas_used, + transactions: self.transactions, + withdrawals: self.withdrawals, + withdrawals_root: self.withdrawals_root, + blob_gas_used: self.blob_gas_used, + }, + metadata: OpFlashblockPayloadMetadata { + block_number: self.block_number, + receipts: Default::default(), + new_account_balances: Default::default(), + }, + } + } +} diff --git a/crates/optimism/flashblocks/src/worker.rs b/crates/optimism/flashblocks/src/worker.rs index 8cf7777f6a..7d9ab860a5 100644 --- a/crates/optimism/flashblocks/src/worker.rs +++ b/crates/optimism/flashblocks/src/worker.rs @@ -1,7 +1,8 @@ -use crate::{ExecutionPayloadBaseV1, PendingFlashBlock}; +use crate::PendingFlashBlock; use alloy_eips::{eip2718::WithEncoded, BlockNumberOrTag}; use alloy_primitives::B256; -use reth_chain_state::{CanonStateSubscriptions, ExecutedBlock}; +use op_alloy_rpc_types_engine::OpFlashblockPayloadBase; +use reth_chain_state::{ComputedTrieData, ExecutedBlock}; use reth_errors::RethError; use reth_evm::{ execute::{BlockBuilder, BlockBuilderOutcome}, @@ -38,7 +39,7 @@ impl FlashBlockBuilder { } pub(crate) struct BuildArgs { - pub(crate) base: ExecutionPayloadBaseV1, + pub(crate) base: OpFlashblockPayloadBase, pub(crate) transactions: I, pub(crate) cached_state: Option<(B256, CachedReads)>, pub(crate) last_flashblock_index: u64, @@ -49,9 +50,8 @@ pub(crate) struct BuildArgs { impl FlashBlockBuilder where N: NodePrimitives, - EvmConfig: ConfigureEvm + Unpin>, + EvmConfig: ConfigureEvm + Unpin>, Provider: StateProviderFactory - + CanonStateSubscriptions + BlockReaderIdExt< Header = HeaderTy, Block = BlockTy, @@ -60,14 +60,14 @@ where > + Unpin, { /// Returns the [`PendingFlashBlock`] made purely out of transactions and - /// [`ExecutionPayloadBaseV1`] in `args`. + /// [`OpFlashblockPayloadBase`] in `args`. /// /// Returns `None` if the flashblock doesn't attach to the latest header. pub(crate) fn execute>>>( &self, mut args: BuildArgs, ) -> eyre::Result, CachedReads)>> { - trace!("Attempting new pending block from flashblocks"); + trace!(target: "flashblocks", "Attempting new pending block from flashblocks"); let latest = self .provider @@ -76,7 +76,7 @@ where let latest_hash = latest.hash(); if args.base.parent_hash != latest_hash { - trace!(flashblock_parent = ?args.base.parent_hash, local_latest=?latest.num_hash(),"Skipping non consecutive flashblock"); + trace!(target: "flashblocks", flashblock_parent = ?args.base.parent_hash, local_latest=?latest.num_hash(),"Skipping non consecutive flashblock"); // doesn't attach to the latest block return Ok(None) } @@ -106,6 +106,7 @@ where // if the real state root should be computed let BlockBuilderOutcome { execution_result, block, hashed_state, .. } = if args.compute_state_root { + trace!(target: "flashblocks", "Computing block state root"); builder.finish(&state_provider)? } else { builder.finish(NoopProvider::default())? @@ -120,12 +121,14 @@ where let pending_block = PendingBlock::with_executed_block( Instant::now() + Duration::from_secs(1), - ExecutedBlock { - recovered_block: block.into(), - execution_output: Arc::new(execution_outcome), - hashed_state: Arc::new(hashed_state), - trie_updates: Arc::default(), - }, + ExecutedBlock::new( + block.into(), + Arc::new(execution_outcome), + ComputedTrieData::without_trie_input( + Arc::new(hashed_state.into_sorted()), + Arc::default(), + ), + ), ); let pending_flashblock = PendingFlashBlock::new( pending_block, diff --git a/crates/optimism/flashblocks/src/ws/decoding.rs b/crates/optimism/flashblocks/src/ws/decoding.rs index 267f79cf19..64d96dc5e3 100644 --- a/crates/optimism/flashblocks/src/ws/decoding.rs +++ b/crates/optimism/flashblocks/src/ws/decoding.rs @@ -1,52 +1,29 @@ -use crate::{ExecutionPayloadBaseV1, ExecutionPayloadFlashblockDeltaV1, FlashBlock, Metadata}; +use crate::FlashBlock; use alloy_primitives::bytes::Bytes; -use alloy_rpc_types_engine::PayloadId; -use serde::{Deserialize, Serialize}; -use std::{fmt::Debug, io}; +use std::io; -/// Internal helper for decoding -#[derive(Clone, Debug, PartialEq, Default, Deserialize, Serialize)] -struct FlashblocksPayloadV1 { - /// The payload id of the flashblock - pub payload_id: PayloadId, - /// The index of the flashblock in the block - pub index: u64, - /// The base execution payload configuration - #[serde(skip_serializing_if = "Option::is_none")] - pub base: Option, - /// The delta/diff containing modified portions of the execution payload - pub diff: ExecutionPayloadFlashblockDeltaV1, - /// Additional metadata associated with the flashblock - pub metadata: serde_json::Value, +/// A trait for decoding flashblocks from bytes. +pub trait FlashBlockDecoder: Send + 'static { + /// Decodes `bytes` into a [`FlashBlock`]. + fn decode(&self, bytes: Bytes) -> eyre::Result; } -impl FlashBlock { - /// Decodes `bytes` into [`FlashBlock`]. - /// - /// This function is specific to the Base Optimism websocket encoding. - /// - /// It is assumed that the `bytes` are encoded in JSON and optionally compressed using brotli. - /// Whether the `bytes` is compressed or not is determined by looking at the first - /// non ascii-whitespace character. - pub(crate) fn decode(bytes: Bytes) -> eyre::Result { - let bytes = try_parse_message(bytes)?; - - let payload: FlashblocksPayloadV1 = serde_json::from_slice(&bytes) - .map_err(|e| eyre::eyre!("failed to parse message: {e}"))?; - - let metadata: Metadata = serde_json::from_value(payload.metadata) - .map_err(|e| eyre::eyre!("failed to parse message metadata: {e}"))?; - - Ok(Self { - payload_id: payload.payload_id, - index: payload.index, - base: payload.base, - diff: payload.diff, - metadata, - }) +/// Default implementation of the decoder. +impl FlashBlockDecoder for () { + fn decode(&self, bytes: Bytes) -> eyre::Result { + decode_flashblock(bytes) } } +pub(crate) fn decode_flashblock(bytes: Bytes) -> eyre::Result { + let bytes = crate::ws::decoding::try_parse_message(bytes)?; + + let payload: FlashBlock = + serde_json::from_slice(&bytes).map_err(|e| eyre::eyre!("failed to parse message: {e}"))?; + + Ok(payload) +} + /// Maps `bytes` into a potentially different [`Bytes`]. /// /// If the bytes start with a "{" character, prepended by any number of ASCII-whitespaces, diff --git a/crates/optimism/flashblocks/src/ws/mod.rs b/crates/optimism/flashblocks/src/ws/mod.rs index 2b82089931..651d83c916 100644 --- a/crates/optimism/flashblocks/src/ws/mod.rs +++ b/crates/optimism/flashblocks/src/ws/mod.rs @@ -1,4 +1,6 @@ pub use stream::{WsConnect, WsFlashBlockStream}; mod decoding; +pub use decoding::FlashBlockDecoder; + mod stream; diff --git a/crates/optimism/flashblocks/src/ws/stream.rs b/crates/optimism/flashblocks/src/ws/stream.rs index 64cf6f718e..e46fd6d747 100644 --- a/crates/optimism/flashblocks/src/ws/stream.rs +++ b/crates/optimism/flashblocks/src/ws/stream.rs @@ -1,4 +1,4 @@ -use crate::{FlashBlock, FlashBlockDecoder}; +use crate::{ws::FlashBlockDecoder, FlashBlock}; use futures_util::{ stream::{SplitSink, SplitStream}, FutureExt, Sink, Stream, StreamExt, @@ -126,7 +126,9 @@ where } Ok(Message::Ping(bytes)) => this.ping(bytes), Ok(Message::Close(frame)) => this.close(frame), - Ok(msg) => debug!("Received unexpected message: {:?}", msg), + Ok(msg) => { + debug!(target: "flashblocks", "Received unexpected message: {:?}", msg) + } Err(err) => return Poll::Ready(Some(Err(err.into()))), } } @@ -238,7 +240,6 @@ impl WsConnect for WsConnector { #[cfg(test)] mod tests { use super::*; - use crate::ExecutionPayloadBaseV1; use alloy_primitives::bytes::Bytes; use brotli::enc::BrotliEncoderParams; use std::{future, iter}; @@ -449,23 +450,7 @@ mod tests { } fn flashblock() -> FlashBlock { - FlashBlock { - payload_id: Default::default(), - index: 0, - base: Some(ExecutionPayloadBaseV1 { - parent_beacon_block_root: Default::default(), - parent_hash: Default::default(), - fee_recipient: Default::default(), - prev_randao: Default::default(), - block_number: 0, - gas_limit: 0, - timestamp: 0, - extra_data: Default::default(), - base_fee_per_gas: Default::default(), - }), - diff: Default::default(), - metadata: Default::default(), - } + Default::default() } #[test_case::test_case(to_json_message(Message::Binary); "json binary")] diff --git a/crates/optimism/hardforks/src/lib.rs b/crates/optimism/hardforks/src/lib.rs index 85152c5974..202194c63a 100644 --- a/crates/optimism/hardforks/src/lib.rs +++ b/crates/optimism/hardforks/src/lib.rs @@ -18,6 +18,10 @@ extern crate alloc; +use alloy_op_hardforks::{ + BASE_MAINNET_JOVIAN_TIMESTAMP, BASE_SEPOLIA_JOVIAN_TIMESTAMP, OP_MAINNET_JOVIAN_TIMESTAMP, + OP_SEPOLIA_JOVIAN_TIMESTAMP, +}; // Re-export alloy-op-hardforks types. pub use alloy_op_hardforks::{OpHardfork, OpHardforks}; @@ -28,6 +32,7 @@ use reth_ethereum_forks::{ChainHardforks, EthereumHardfork, ForkCondition, Hardf /// Dev hardforks pub static DEV_HARDFORKS: LazyLock = LazyLock::new(|| { + const JOVIAN_TIMESTAMP: ForkCondition = ForkCondition::Timestamp(1761840000); ChainHardforks::new(vec![ (EthereumHardfork::Frontier.boxed(), ForkCondition::Block(0)), (EthereumHardfork::Homestead.boxed(), ForkCondition::Block(0)), @@ -58,7 +63,7 @@ pub static DEV_HARDFORKS: LazyLock = LazyLock::new(|| { (OpHardfork::Granite.boxed(), ForkCondition::Timestamp(0)), (EthereumHardfork::Prague.boxed(), ForkCondition::Timestamp(0)), (OpHardfork::Isthmus.boxed(), ForkCondition::Timestamp(0)), - // (OpHardfork::Jovian.boxed(), ForkCondition::Timestamp(0)), + (OpHardfork::Jovian.boxed(), JOVIAN_TIMESTAMP), ]) }); @@ -97,8 +102,7 @@ pub static OP_MAINNET_HARDFORKS: LazyLock = LazyLock::new(|| { (OpHardfork::Holocene.boxed(), ForkCondition::Timestamp(1736445601)), (EthereumHardfork::Prague.boxed(), ForkCondition::Timestamp(1746806401)), (OpHardfork::Isthmus.boxed(), ForkCondition::Timestamp(1746806401)), - // (OpHardfork::Jovian.boxed(), ForkCondition::Timestamp(u64::MAX)), /* TODO: Update - // timestamp when Jovian is planned */ + (OpHardfork::Jovian.boxed(), ForkCondition::Timestamp(OP_MAINNET_JOVIAN_TIMESTAMP)), ]) }); /// Optimism Sepolia list of hardforks. @@ -136,8 +140,7 @@ pub static OP_SEPOLIA_HARDFORKS: LazyLock = LazyLock::new(|| { (OpHardfork::Holocene.boxed(), ForkCondition::Timestamp(1732633200)), (EthereumHardfork::Prague.boxed(), ForkCondition::Timestamp(1744905600)), (OpHardfork::Isthmus.boxed(), ForkCondition::Timestamp(1744905600)), - // (OpHardfork::Jovian.boxed(), ForkCondition::Timestamp(u64::MAX)), /* TODO: Update - // timestamp when Jovian is planned */ + (OpHardfork::Jovian.boxed(), ForkCondition::Timestamp(OP_SEPOLIA_JOVIAN_TIMESTAMP)), ]) }); @@ -176,8 +179,7 @@ pub static BASE_SEPOLIA_HARDFORKS: LazyLock = LazyLock::new(|| { (OpHardfork::Holocene.boxed(), ForkCondition::Timestamp(1732633200)), (EthereumHardfork::Prague.boxed(), ForkCondition::Timestamp(1744905600)), (OpHardfork::Isthmus.boxed(), ForkCondition::Timestamp(1744905600)), - // (OpHardfork::Jovian.boxed(), ForkCondition::Timestamp(u64::MAX)), /* TODO: Update - // timestamp when Jovian is planned */ + (OpHardfork::Jovian.boxed(), ForkCondition::Timestamp(BASE_SEPOLIA_JOVIAN_TIMESTAMP)), ]) }); @@ -216,7 +218,6 @@ pub static BASE_MAINNET_HARDFORKS: LazyLock = LazyLock::new(|| { (OpHardfork::Holocene.boxed(), ForkCondition::Timestamp(1736445601)), (EthereumHardfork::Prague.boxed(), ForkCondition::Timestamp(1746806401)), (OpHardfork::Isthmus.boxed(), ForkCondition::Timestamp(1746806401)), - // (OpHardfork::Jovian.boxed(), ForkCondition::Timestamp(u64::MAX)), /* TODO: Update - // timestamp when Jovian is planned */ + (OpHardfork::Jovian.boxed(), ForkCondition::Timestamp(BASE_MAINNET_JOVIAN_TIMESTAMP)), ]) }); diff --git a/crates/optimism/node/Cargo.toml b/crates/optimism/node/Cargo.toml index 162700ac0a..7e79b3a886 100644 --- a/crates/optimism/node/Cargo.toml +++ b/crates/optimism/node/Cargo.toml @@ -34,7 +34,7 @@ reth-rpc-api.workspace = true # op-reth reth-optimism-payload-builder.workspace = true -reth-optimism-evm = { workspace = true, features = ["rpc"] } +reth-optimism-evm = { workspace = true, features = ["std", "rpc"] } reth-optimism-rpc.workspace = true reth-optimism-storage.workspace = true reth-optimism-txpool.workspace = true @@ -45,7 +45,7 @@ reth-optimism-primitives = { workspace = true, features = ["serde", "serde-binco # revm with required optimism features # Note: this must be kept to ensure all features are properly enabled/forwarded -revm = { workspace = true, features = ["secp256k1", "blst", "c-kzg"] } +revm = { workspace = true, features = ["secp256k1", "blst", "c-kzg", "memory_limit"] } op-revm.workspace = true # ethereum @@ -80,8 +80,10 @@ reth-payload-util.workspace = true reth-revm = { workspace = true, features = ["std"] } reth-rpc.workspace = true reth-rpc-eth-types.workspace = true +reth-stages-types.workspace = true alloy-network.workspace = true +alloy-op-hardforks.workspace = true futures.workspace = true op-alloy-network.workspace = true @@ -93,7 +95,17 @@ asm-keccak = [ "reth-node-core/asm-keccak", "revm/asm-keccak", ] -js-tracer = ["reth-node-builder/js-tracer"] +keccak-cache-global = [ + "alloy-primitives/keccak-cache-global", + "reth-node-core/keccak-cache-global", + "reth-optimism-node/keccak-cache-global", +] +js-tracer = [ + "reth-node-builder/js-tracer", + "reth-optimism-node/js-tracer", + "reth-rpc/js-tracer", + "reth-rpc-eth-types/js-tracer", +] test-utils = [ "reth-tasks", "reth-e2e-test-utils", @@ -113,6 +125,7 @@ test-utils = [ "reth-optimism-primitives/arbitrary", "reth-primitives-traits/test-utils", "reth-trie-common/test-utils", + "reth-stages-types/test-utils", ] reth-codec = ["reth-optimism-primitives/reth-codec"] diff --git a/crates/optimism/node/src/args.rs b/crates/optimism/node/src/args.rs index 4e9bb2ce7c..abddb76109 100644 --- a/crates/optimism/node/src/args.rs +++ b/crates/optimism/node/src/args.rs @@ -72,8 +72,16 @@ pub struct RollupArgs { /// /// If given, the flashblocks are received to build pending block. All request with "pending" /// block tag will use the pending state based on flashblocks. - #[arg(long)] + #[arg(long, alias = "websocket-url")] pub flashblocks_url: Option, + + /// Enable flashblock consensus client to drive the chain forward + /// + /// When enabled, the flashblock consensus client will process flashblock sequences and submit + /// them to the engine API to advance the chain. + /// Requires `flashblocks_url` to be set. + #[arg(long, default_value_t = false, requires = "flashblocks_url")] + pub flashblock_consensus: bool, } impl Default for RollupArgs { @@ -90,6 +98,7 @@ impl Default for RollupArgs { historical_rpc: None, min_suggested_priority_fee: 1_000_000, flashblocks_url: None, + flashblock_consensus: false, } } } diff --git a/crates/optimism/node/src/engine.rs b/crates/optimism/node/src/engine.rs index af018d6f27..92bd5c4b8d 100644 --- a/crates/optimism/node/src/engine.rs +++ b/crates/optimism/node/src/engine.rs @@ -18,7 +18,7 @@ use reth_node_api::{ use reth_optimism_consensus::isthmus; use reth_optimism_forks::OpHardforks; use reth_optimism_payload_builder::{OpExecutionPayloadValidator, OpPayloadTypes}; -use reth_optimism_primitives::{OpBlock, ADDRESS_L2_TO_L1_MESSAGE_PASSER}; +use reth_optimism_primitives::{OpBlock, L2_TO_L1_MESSAGE_PASSER_ADDRESS}; use reth_primitives_traits::{Block, RecoveredBlock, SealedBlock, SignedTransaction}; use reth_provider::StateProviderFactory; use reth_trie_common::{HashedPostState, KeyHasher}; @@ -76,7 +76,7 @@ pub struct OpEngineValidator { impl OpEngineValidator { /// Instantiates a new validator. pub fn new(chain_spec: Arc, provider: P) -> Self { - let hashed_addr_l2tol1_msg_passer = KH::hash_key(ADDRESS_L2_TO_L1_MESSAGE_PASSER); + let hashed_addr_l2tol1_msg_passer = KH::hash_key(L2_TO_L1_MESSAGE_PASSER_ADDRESS); Self { inner: OpExecutionPayloadValidator::new(chain_spec), provider, @@ -121,15 +121,6 @@ where { type Block = alloy_consensus::Block; - fn ensure_well_formed_payload( - &self, - payload: OpExecutionData, - ) -> Result, NewPayloadError> { - let sealed_block = - self.inner.ensure_well_formed_payload(payload).map_err(NewPayloadError::other)?; - sealed_block.try_recover().map_err(|e| NewPayloadError::Other(e.into())) - } - fn validate_block_post_execution_with_hashed_state( &self, state_updates: &HashedPostState, @@ -159,6 +150,13 @@ where Ok(()) } + + fn convert_payload_to_block( + &self, + payload: OpExecutionData, + ) -> Result, NewPayloadError> { + self.inner.ensure_well_formed_payload(payload).map_err(NewPayloadError::other) + } } impl EngineApiValidator for OpEngineValidator @@ -301,23 +299,16 @@ mod test { use super::*; use crate::engine; + use alloy_op_hardforks::BASE_SEPOLIA_JOVIAN_TIMESTAMP; use alloy_primitives::{b64, Address, B256, B64}; use alloy_rpc_types_engine::PayloadAttributes; - use reth_chainspec::{ChainSpec, ForkCondition, Hardfork}; + use reth_chainspec::ChainSpec; use reth_optimism_chainspec::{OpChainSpec, BASE_SEPOLIA}; - use reth_optimism_forks::OpHardfork; use reth_provider::noop::NoopProvider; use reth_trie_common::KeccakKeyHasher; - const JOVIAN_TIMESTAMP: u64 = 1744909000; - fn get_chainspec() -> Arc { - let mut base_sepolia_spec = BASE_SEPOLIA.inner.clone(); - - // TODO: Remove this once we know the Jovian timestamp - base_sepolia_spec - .hardforks - .insert(OpHardfork::Jovian.boxed(), ForkCondition::Timestamp(JOVIAN_TIMESTAMP)); + let base_sepolia_spec = BASE_SEPOLIA.inner.clone(); Arc::new(OpChainSpec { inner: ChainSpec { @@ -429,7 +420,8 @@ mod test { fn test_well_formed_attributes_jovian_valid() { let validator = OpEngineValidator::new::(get_chainspec(), NoopProvider::default()); - let attributes = get_attributes(Some(b64!("0000000000000000")), Some(1), JOVIAN_TIMESTAMP); + let attributes = + get_attributes(Some(b64!("0000000000000000")), Some(1), BASE_SEPOLIA_JOVIAN_TIMESTAMP); let result = as EngineApiValidator< OpEngineTypes, @@ -444,7 +436,7 @@ mod test { fn test_malformed_attributes_jovian_with_eip_1559_params_none() { let validator = OpEngineValidator::new::(get_chainspec(), NoopProvider::default()); - let attributes = get_attributes(None, Some(1), JOVIAN_TIMESTAMP); + let attributes = get_attributes(None, Some(1), BASE_SEPOLIA_JOVIAN_TIMESTAMP); let result = as EngineApiValidator< OpEngineTypes, @@ -474,7 +466,8 @@ mod test { fn test_malformed_attributes_post_jovian_with_min_base_fee_none() { let validator = OpEngineValidator::new::(get_chainspec(), NoopProvider::default()); - let attributes = get_attributes(Some(b64!("0000000000000000")), None, JOVIAN_TIMESTAMP); + let attributes = + get_attributes(Some(b64!("0000000000000000")), None, BASE_SEPOLIA_JOVIAN_TIMESTAMP); let result = as EngineApiValidator< OpEngineTypes, diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index ebad4e6699..dd68ab8a8e 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -39,7 +39,7 @@ use reth_optimism_evm::{OpEvmConfig, OpRethReceiptBuilder}; use reth_optimism_forks::OpHardforks; use reth_optimism_payload_builder::{ builder::OpPayloadTransactions, - config::{OpBuilderConfig, OpDAConfig}, + config::{OpBuilderConfig, OpDAConfig, OpGasLimitConfig}, OpAttributes, OpBuiltPayload, OpPayloadPrimitives, }; use reth_optimism_primitives::{DepositReceipt, OpPrimitives}; @@ -118,6 +118,10 @@ pub struct OpNode { /// /// By default no throttling is applied. pub da_config: OpDAConfig, + /// Gas limit configuration for the OP builder. + /// Used to control the gas limit of the blocks produced by the OP builder.(configured by the + /// batcher via the `miner_` api) + pub gas_limit_config: OpGasLimitConfig, } /// A [`ComponentsBuilder`] with its generic arguments set to a stack of Optimism specific builders. @@ -133,7 +137,11 @@ pub type OpNodeComponentBuilder = ComponentsBu impl OpNode { /// Creates a new instance of the Optimism node type. pub fn new(args: RollupArgs) -> Self { - Self { args, da_config: OpDAConfig::default() } + Self { + args, + da_config: OpDAConfig::default(), + gas_limit_config: OpGasLimitConfig::default(), + } } /// Configure the data availability configuration for the OP builder. @@ -142,6 +150,12 @@ impl OpNode { self } + /// Configure the gas limit configuration for the OP builder. + pub fn with_gas_limit_config(mut self, gas_limit_config: OpGasLimitConfig) -> Self { + self.gas_limit_config = gas_limit_config; + self + } + /// Returns the components for the given [`RollupArgs`]. pub fn components(&self) -> OpNodeComponentBuilder where @@ -161,7 +175,9 @@ impl OpNode { ) .executor(OpExecutorBuilder::default()) .payload(BasicPayloadServiceBuilder::new( - OpPayloadBuilder::new(compute_pending_block).with_da_config(self.da_config.clone()), + OpPayloadBuilder::new(compute_pending_block) + .with_da_config(self.da_config.clone()) + .with_gas_limit_config(self.gas_limit_config.clone()), )) .network(OpNetworkBuilder::new(disable_txpool_gossip, !discovery_v4)) .consensus(OpConsensusBuilder::default()) @@ -173,10 +189,12 @@ impl OpNode { .with_sequencer(self.args.sequencer.clone()) .with_sequencer_headers(self.args.sequencer_headers.clone()) .with_da_config(self.da_config.clone()) + .with_gas_limit_config(self.gas_limit_config.clone()) .with_enable_tx_conditional(self.args.enable_tx_conditional) .with_min_suggested_priority_fee(self.args.min_suggested_priority_fee) .with_historical_rpc(self.args.historical_rpc.clone()) .with_flashblocks(self.args.flashblocks_url.clone()) + .with_flashblock_consensus(self.args.flashblock_consensus) } /// Instantiates the [`ProviderFactoryBuilder`] for an opstack node. @@ -200,13 +218,14 @@ impl OpNode { /// use reth_db::open_db_read_only; /// use reth_optimism_chainspec::OpChainSpecBuilder; /// use reth_optimism_node::OpNode; - /// use reth_provider::providers::StaticFileProvider; + /// use reth_provider::providers::{RocksDBProvider, StaticFileProvider}; /// use std::sync::Arc; /// /// let factory = OpNode::provider_factory_builder() /// .db(Arc::new(open_db_read_only("db", Default::default()).unwrap())) /// .chainspec(OpChainSpecBuilder::base_mainnet().build().into()) /// .static_file(StaticFileProvider::read_only("db/static_files", false).unwrap()) + /// .rocksdb_provider(RocksDBProvider::builder("db/rocksdb").build().unwrap()) /// .build_provider_factory(); /// ``` pub fn provider_factory_builder() -> ProviderFactoryBuilder { @@ -286,6 +305,8 @@ pub struct OpAddOns< pub rpc_add_ons: RpcAddOns, /// Data availability configuration for the OP builder. pub da_config: OpDAConfig, + /// Gas limit configuration for the OP builder. + pub gas_limit_config: OpGasLimitConfig, /// Sequencer client, configured to forward submitted transactions to sequencer of given OP /// network. pub sequencer_url: Option, @@ -306,9 +327,11 @@ where EthB: EthApiBuilder, { /// Creates a new instance from components. + #[allow(clippy::too_many_arguments)] pub const fn new( rpc_add_ons: RpcAddOns, da_config: OpDAConfig, + gas_limit_config: OpGasLimitConfig, sequencer_url: Option, sequencer_headers: Vec, historical_rpc: Option, @@ -318,6 +341,7 @@ where Self { rpc_add_ons, da_config, + gas_limit_config, sequencer_url, sequencer_headers, historical_rpc, @@ -368,6 +392,7 @@ where let Self { rpc_add_ons, da_config, + gas_limit_config, sequencer_url, sequencer_headers, historical_rpc, @@ -378,6 +403,7 @@ where OpAddOns::new( rpc_add_ons.with_engine_api(engine_api_builder), da_config, + gas_limit_config, sequencer_url, sequencer_headers, historical_rpc, @@ -394,6 +420,7 @@ where let Self { rpc_add_ons, da_config, + gas_limit_config, sequencer_url, sequencer_headers, enable_tx_conditional, @@ -404,6 +431,7 @@ where OpAddOns::new( rpc_add_ons.with_payload_validator(payload_validator_builder), da_config, + gas_limit_config, sequencer_url, sequencer_headers, historical_rpc, @@ -423,6 +451,7 @@ where let Self { rpc_add_ons, da_config, + gas_limit_config, sequencer_url, sequencer_headers, enable_tx_conditional, @@ -433,6 +462,7 @@ where OpAddOns::new( rpc_add_ons.with_rpc_middleware(rpc_middleware), da_config, + gas_limit_config, sequencer_url, sequencer_headers, historical_rpc, @@ -496,6 +526,7 @@ where let Self { rpc_add_ons, da_config, + gas_limit_config, sequencer_url, sequencer_headers, enable_tx_conditional, @@ -536,7 +567,7 @@ where Box::new(ctx.node.task_executor().clone()), builder, ); - let miner_ext = OpMinerExtApi::new(da_config); + let miner_ext = OpMinerExtApi::new(da_config, gas_limit_config); let sequencer_client = if let Some(url) = sequencer_url { Some(SequencerClient::new_with_headers(url, sequencer_headers).await?) @@ -559,7 +590,7 @@ where modules.merge_if_module_configured(RethRpcModule::Debug, debug_ext.into_rpc())?; // extend the miner namespace if configured in the regular http server - modules.merge_if_module_configured( + modules.add_or_replace_if_module_configured( RethRpcModule::Miner, miner_ext.clone().into_rpc(), )?; @@ -652,6 +683,8 @@ pub struct OpAddOnsBuilder { historical_rpc: Option, /// Data availability configuration for the OP builder. da_config: Option, + /// Gas limit configuration for the OP builder. + gas_limit_config: Option, /// Enable transaction conditionals. enable_tx_conditional: bool, /// Marker for network types. @@ -664,6 +697,8 @@ pub struct OpAddOnsBuilder { tokio_runtime: Option, /// A URL pointing to a secure websocket service that streams out flashblocks. flashblocks_url: Option, + /// Enable flashblock consensus client to drive chain forward. + flashblock_consensus: bool, } impl Default for OpAddOnsBuilder { @@ -673,12 +708,14 @@ impl Default for OpAddOnsBuilder { sequencer_headers: Vec::new(), historical_rpc: None, da_config: None, + gas_limit_config: None, enable_tx_conditional: false, min_suggested_priority_fee: 1_000_000, _nt: PhantomData, rpc_middleware: Identity::new(), tokio_runtime: None, flashblocks_url: None, + flashblock_consensus: false, } } } @@ -702,6 +739,12 @@ impl OpAddOnsBuilder { self } + /// Configure the gas limit configuration for the OP payload builder. + pub fn with_gas_limit_config(mut self, gas_limit_config: OpGasLimitConfig) -> Self { + self.gas_limit_config = Some(gas_limit_config); + self + } + /// Configure if transaction conditional should be enabled. pub const fn with_enable_tx_conditional(mut self, enable_tx_conditional: bool) -> Self { self.enable_tx_conditional = enable_tx_conditional; @@ -735,11 +778,13 @@ impl OpAddOnsBuilder { sequencer_headers, historical_rpc, da_config, + gas_limit_config, enable_tx_conditional, min_suggested_priority_fee, tokio_runtime, _nt, flashblocks_url, + flashblock_consensus, .. } = self; OpAddOnsBuilder { @@ -747,12 +792,14 @@ impl OpAddOnsBuilder { sequencer_headers, historical_rpc, da_config, + gas_limit_config, enable_tx_conditional, min_suggested_priority_fee, _nt, rpc_middleware, tokio_runtime, flashblocks_url, + flashblock_consensus, } } @@ -761,6 +808,12 @@ impl OpAddOnsBuilder { self.flashblocks_url = flashblocks_url; self } + + /// With a flashblock consensus client to drive chain forward. + pub const fn with_flashblock_consensus(mut self, flashblock_consensus: bool) -> Self { + self.flashblock_consensus = flashblock_consensus; + self + } } impl OpAddOnsBuilder { @@ -779,12 +832,14 @@ impl OpAddOnsBuilder { sequencer_url, sequencer_headers, da_config, + gas_limit_config, enable_tx_conditional, min_suggested_priority_fee, historical_rpc, rpc_middleware, tokio_runtime, flashblocks_url, + flashblock_consensus, .. } = self; @@ -794,7 +849,8 @@ impl OpAddOnsBuilder { .with_sequencer(sequencer_url.clone()) .with_sequencer_headers(sequencer_headers.clone()) .with_min_suggested_priority_fee(min_suggested_priority_fee) - .with_flashblocks(flashblocks_url), + .with_flashblocks(flashblocks_url) + .with_flashblock_consensus(flashblock_consensus), PVB::default(), EB::default(), EVB::default(), @@ -802,6 +858,7 @@ impl OpAddOnsBuilder { ) .with_tokio_runtime(tokio_runtime), da_config.unwrap_or_default(), + gas_limit_config.unwrap_or_default(), sequencer_url, sequencer_headers, historical_rpc, @@ -1006,13 +1063,21 @@ pub struct OpPayloadBuilder { /// This data availability configuration specifies constraints for the payload builder /// when assembling payloads pub da_config: OpDAConfig, + /// Gas limit configuration for the OP builder. + /// This is used to configure gas limit related constraints for the payload builder. + pub gas_limit_config: OpGasLimitConfig, } impl OpPayloadBuilder { /// Create a new instance with the given `compute_pending_block` flag and data availability /// config. pub fn new(compute_pending_block: bool) -> Self { - Self { compute_pending_block, best_transactions: (), da_config: OpDAConfig::default() } + Self { + compute_pending_block, + best_transactions: (), + da_config: OpDAConfig::default(), + gas_limit_config: OpGasLimitConfig::default(), + } } /// Configure the data availability configuration for the OP payload builder. @@ -1020,14 +1085,20 @@ impl OpPayloadBuilder { self.da_config = da_config; self } + + /// Configure the gas limit configuration for the OP payload builder. + pub fn with_gas_limit_config(mut self, gas_limit_config: OpGasLimitConfig) -> Self { + self.gas_limit_config = gas_limit_config; + self + } } impl OpPayloadBuilder { /// Configures the type responsible for yielding the transactions that should be included in the /// payload. pub fn with_transactions(self, best_transactions: T) -> OpPayloadBuilder { - let Self { compute_pending_block, da_config, .. } = self; - OpPayloadBuilder { compute_pending_block, best_transactions, da_config } + let Self { compute_pending_block, da_config, gas_limit_config, .. } = self; + OpPayloadBuilder { compute_pending_block, best_transactions, da_config, gas_limit_config } } } @@ -1068,7 +1139,10 @@ where pool, ctx.provider().clone(), evm_config, - OpBuilderConfig { da_config: self.da_config.clone() }, + OpBuilderConfig { + da_config: self.da_config.clone(), + gas_limit_config: self.gas_limit_config.clone(), + }, ) .with_transactions(self.best_transactions.clone()) .set_compute_pending_block(self.compute_pending_block); @@ -1110,7 +1184,8 @@ impl OpNetworkBuilder { Node: FullNodeTypes>, NetworkP: NetworkPrimitives, { - let Self { disable_txpool_gossip, disable_discovery_v4, .. } = self.clone(); + let disable_txpool_gossip = self.disable_txpool_gossip; + let disable_discovery_v4 = self.disable_discovery_v4; let args = &ctx.config().network; let network_builder = ctx .network_config_builder()? diff --git a/crates/optimism/node/src/rpc.rs b/crates/optimism/node/src/rpc.rs index db811a7f92..e2a8e5c489 100644 --- a/crates/optimism/node/src/rpc.rs +++ b/crates/optimism/node/src/rpc.rs @@ -13,7 +13,7 @@ //! components::ComponentsBuilder, //! hooks::OnComponentInitializedHook, //! rpc::{EthApiBuilder, EthApiCtx}, -//! LaunchContext, NodeConfig, RethFullAdapter, +//! ConsensusEngineHandle, LaunchContext, NodeConfig, RethFullAdapter, //! }; //! use reth_optimism_chainspec::OP_SEPOLIA; //! use reth_optimism_evm::OpEvmConfig; @@ -67,7 +67,14 @@ //! config.cache, //! node.task_executor().clone(), //! ); -//! let ctx = EthApiCtx { components: node.node_adapter(), config, cache }; +//! // Create a dummy beacon engine handle for offline mode +//! let (tx, _) = tokio::sync::mpsc::unbounded_channel(); +//! let ctx = EthApiCtx { +//! components: node.node_adapter(), +//! config, +//! cache, +//! engine_handle: ConsensusEngineHandle::new(tx), +//! }; //! let eth_api = OpEthApiBuilder::::default().build_eth_api(ctx).await.unwrap(); //! //! // build `trace` namespace API @@ -139,6 +146,7 @@ where EngineCapabilities::new(OP_ENGINE_CAPABILITIES.iter().copied()), engine_validator, ctx.config.engine.accept_execution_requests_hash, + ctx.node.network().clone(), ); Ok(OpEngineApi::new(inner)) diff --git a/crates/optimism/node/tests/e2e-testsuite/testsuite.rs b/crates/optimism/node/tests/e2e-testsuite/testsuite.rs index 75dff49c14..b031b3a826 100644 --- a/crates/optimism/node/tests/e2e-testsuite/testsuite.rs +++ b/crates/optimism/node/tests/e2e-testsuite/testsuite.rs @@ -1,4 +1,4 @@ -use alloy_primitives::{Address, B256}; +use alloy_primitives::{Address, B256, B64}; use eyre::Result; use op_alloy_rpc_types_engine::OpPayloadAttributes; use reth_e2e_test_utils::testsuite::{ @@ -53,3 +53,48 @@ async fn test_testsuite_op_assert_mine_block() -> Result<()> { Ok(()) } + +#[tokio::test] +async fn test_testsuite_op_assert_mine_block_isthmus_activated() -> Result<()> { + reth_tracing::init_test_tracing(); + + let setup = Setup::default() + .with_chain_spec(Arc::new( + OpChainSpecBuilder::default() + .chain(OP_MAINNET.chain) + .genesis(serde_json::from_str(include_str!("../assets/genesis.json")).unwrap()) + .isthmus_activated() + .build() + .into(), + )) + .with_network(NetworkSetup::single_node()); + + let test = + TestBuilder::new().with_setup(setup).with_action(AssertMineBlock::::new( + 0, + vec![], + Some(B256::ZERO), + // TODO: refactor once we have actions to generate payload attributes. + OpPayloadAttributes { + payload_attributes: alloy_rpc_types_engine::PayloadAttributes { + timestamp: std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .unwrap() + .as_secs(), + prev_randao: B256::random(), + suggested_fee_recipient: Address::random(), + withdrawals: Some(vec![]), + parent_beacon_block_root: Some(B256::ZERO), + }, + transactions: None, + no_tx_pool: None, + eip_1559_params: Some(B64::ZERO), + min_base_fee: None, + gas_limit: Some(30_000_000), + }, + )); + + test.run::().await?; + + Ok(()) +} diff --git a/crates/optimism/node/tests/it/custom_genesis.rs b/crates/optimism/node/tests/it/custom_genesis.rs new file mode 100644 index 0000000000..da19456650 --- /dev/null +++ b/crates/optimism/node/tests/it/custom_genesis.rs @@ -0,0 +1,123 @@ +//! Tests for custom genesis block number support. + +use alloy_consensus::BlockHeader; +use alloy_genesis::Genesis; +use alloy_primitives::B256; +use reth_chainspec::EthChainSpec; +use reth_db::test_utils::create_test_rw_db_with_path; +use reth_e2e_test_utils::{ + node::NodeTestContext, transaction::TransactionTestContext, wallet::Wallet, +}; +use reth_node_builder::{EngineNodeLauncher, Node, NodeBuilder, NodeConfig}; +use reth_node_core::args::DatadirArgs; +use reth_optimism_chainspec::OpChainSpecBuilder; +use reth_optimism_node::{utils::optimism_payload_attributes, OpNode}; +use reth_provider::{providers::BlockchainProvider, HeaderProvider, StageCheckpointReader}; +use reth_stages_types::StageId; +use std::sync::Arc; +use tokio::sync::Mutex; + +/// Tests that an OP node can initialize with a custom genesis block number. +#[tokio::test] +async fn test_op_node_custom_genesis_number() { + reth_tracing::init_test_tracing(); + + let genesis_number = 1000; + + // Create genesis with custom block number (1000) + let mut genesis: Genesis = + serde_json::from_str(include_str!("../assets/genesis.json")).unwrap(); + genesis.number = Some(genesis_number); + genesis.parent_hash = Some(B256::random()); + + let chain_spec = + Arc::new(OpChainSpecBuilder::base_mainnet().genesis(genesis).ecotone_activated().build()); + + let wallet = Arc::new(Mutex::new(Wallet::default().with_chain_id(chain_spec.chain().into()))); + + // Configure and launch the node + let config = NodeConfig::new(chain_spec.clone()).with_datadir_args(DatadirArgs { + datadir: reth_db::test_utils::tempdir_path().into(), + ..Default::default() + }); + let db = create_test_rw_db_with_path( + config + .datadir + .datadir + .unwrap_or_chain_default(config.chain.chain(), config.datadir.clone()) + .db(), + ); + let tasks = reth_tasks::TaskManager::current(); + let node_handle = NodeBuilder::new(config.clone()) + .with_database(db) + .with_types_and_provider::>() + .with_components(OpNode::default().components()) + .with_add_ons(OpNode::new(Default::default()).add_ons()) + .launch_with_fn(|builder| { + let launcher = EngineNodeLauncher::new( + tasks.executor(), + builder.config.datadir(), + Default::default(), + ); + builder.launch_with(launcher) + }) + .await + .expect("Failed to launch node"); + + let mut node = + NodeTestContext::new(node_handle.node, optimism_payload_attributes).await.unwrap(); + + // Verify stage checkpoints are initialized to genesis block number (1000) + for stage in StageId::ALL { + let checkpoint = node.inner.provider.get_stage_checkpoint(stage).unwrap(); + assert!(checkpoint.is_some(), "Stage {:?} checkpoint should exist", stage); + assert_eq!( + checkpoint.unwrap().block_number, + 1000, + "Stage {:?} checkpoint should be at genesis block 1000", + stage + ); + } + + // Query genesis block should succeed + let genesis_header = node.inner.provider.header_by_number(genesis_number).unwrap(); + assert!(genesis_header.is_some(), "Genesis block at {} should exist", genesis_number); + + // Query blocks before genesis should return None + for block_num in [0, 1, genesis_number - 1] { + let header = node.inner.provider.header_by_number(block_num).unwrap(); + assert!(header.is_none(), "Block {} before genesis should not exist", block_num); + } + + // Advance the chain with a single block + let _ = wallet; // wallet available for future use + let block_payloads = node + .advance(1, |_| { + Box::pin({ + let value = wallet.clone(); + async move { + let mut wallet = value.lock().await; + let tx_fut = TransactionTestContext::optimism_l1_block_info_tx( + wallet.chain_id, + wallet.inner.clone(), + wallet.inner_nonce, + ); + wallet.inner_nonce += 1; + + tx_fut.await + } + }) + }) + .await + .unwrap(); + + assert_eq!(block_payloads.len(), 1); + let block = block_payloads.first().unwrap().block(); + + // Verify the new block is at 1001 (genesis 1000 + 1) + assert_eq!( + block.number(), + 1001, + "Block number should be 1001 after advancing from genesis 100" + ); +} diff --git a/crates/optimism/node/tests/it/main.rs b/crates/optimism/node/tests/it/main.rs index fbd49d4c1c..87fa15a829 100644 --- a/crates/optimism/node/tests/it/main.rs +++ b/crates/optimism/node/tests/it/main.rs @@ -4,4 +4,8 @@ mod builder; mod priority; +mod rpc; + +mod custom_genesis; + const fn main() {} diff --git a/crates/optimism/node/tests/it/rpc.rs b/crates/optimism/node/tests/it/rpc.rs new file mode 100644 index 0000000000..8869975ea9 --- /dev/null +++ b/crates/optimism/node/tests/it/rpc.rs @@ -0,0 +1,41 @@ +//! RPC integration tests. + +use reth_network::types::NatResolver; +use reth_node_builder::{NodeBuilder, NodeHandle}; +use reth_node_core::{ + args::{NetworkArgs, RpcServerArgs}, + node_config::NodeConfig, +}; +use reth_optimism_chainspec::BASE_MAINNET; +use reth_optimism_node::OpNode; +use reth_rpc_api::servers::AdminApiServer; +use reth_tasks::TaskManager; + +// +#[tokio::test] +async fn test_admin_external_ip() -> eyre::Result<()> { + reth_tracing::init_test_tracing(); + + let exec = TaskManager::current(); + let exec = exec.executor(); + + let external_ip = "10.64.128.71".parse().unwrap(); + // Node setup + let node_config = NodeConfig::test() + .map_chain(BASE_MAINNET.clone()) + .with_network( + NetworkArgs::default().with_nat_resolver(NatResolver::ExternalIp(external_ip)), + ) + .with_rpc(RpcServerArgs::default().with_unused_ports().with_http()); + + let NodeHandle { node, node_exit_future: _ } = + NodeBuilder::new(node_config).testing_node(exec).node(OpNode::default()).launch().await?; + + let api = node.add_ons_handle.admin_api(); + + let info = api.node_info().await.unwrap(); + + assert_eq!(info.ip, external_ip); + + Ok(()) +} diff --git a/crates/optimism/payload/Cargo.toml b/crates/optimism/payload/Cargo.toml index e75075a12c..0674ed7cf7 100644 --- a/crates/optimism/payload/Cargo.toml +++ b/crates/optimism/payload/Cargo.toml @@ -25,7 +25,6 @@ reth-payload-builder-primitives.workspace = true reth-payload-util.workspace = true reth-payload-primitives = { workspace = true, features = ["op"] } reth-basic-payload-builder.workspace = true -reth-chain-state.workspace = true reth-payload-validator.workspace = true # op-reth @@ -52,3 +51,4 @@ tracing.workspace = true thiserror.workspace = true sha2.workspace = true serde.workspace = true +either.workspace = true diff --git a/crates/optimism/payload/src/builder.rs b/crates/optimism/payload/src/builder.rs index 05f33d3b69..99bc07065a 100644 --- a/crates/optimism/payload/src/builder.rs +++ b/crates/optimism/payload/src/builder.rs @@ -1,9 +1,7 @@ //! Optimism payload builder implementation. use crate::{ - config::{OpBuilderConfig, OpDAConfig}, - error::OpPayloadBuilderError, - payload::OpBuiltPayload, - OpAttributes, OpPayloadBuilderAttributes, OpPayloadPrimitives, + config::OpBuilderConfig, error::OpPayloadBuilderError, payload::OpBuiltPayload, OpAttributes, + OpPayloadBuilderAttributes, OpPayloadPrimitives, }; use alloy_consensus::{BlockHeader, Transaction, Typed2718}; use alloy_evm::Evm as AlloyEvm; @@ -11,7 +9,6 @@ use alloy_primitives::{B256, U256}; use alloy_rpc_types_debug::ExecutionWitness; use alloy_rpc_types_engine::PayloadId; use reth_basic_payload_builder::*; -use reth_chain_state::ExecutedBlock; use reth_chainspec::{ChainSpecProvider, EthChainSpec}; use reth_evm::{ block::BlockExecutorFor, @@ -23,14 +20,14 @@ use reth_evm::{ }; use reth_execution_types::ExecutionOutcome; use reth_optimism_forks::OpHardforks; -use reth_optimism_primitives::{transaction::OpTransaction, ADDRESS_L2_TO_L1_MESSAGE_PASSER}; +use reth_optimism_primitives::{transaction::OpTransaction, L2_TO_L1_MESSAGE_PASSER_ADDRESS}; use reth_optimism_txpool::{ estimated_da_size::DataAvailabilitySized, interop::{is_valid_interop, MaybeInteropTransaction}, OpPooledTx, }; use reth_payload_builder_primitives::PayloadBuilderError; -use reth_payload_primitives::{BuildNextEnv, PayloadBuilderAttributes}; +use reth_payload_primitives::{BuildNextEnv, BuiltPayloadExecutedBlock, PayloadBuilderAttributes}; use reth_payload_util::{BestPayloadTransactions, NoopPayloadTransactions, PayloadTransactions}; use reth_primitives_traits::{ HeaderTy, NodePrimitives, SealedHeader, SealedHeaderFor, SignedTransaction, TxTy, @@ -187,7 +184,7 @@ where let ctx = OpPayloadBuilderCtx { evm_config: self.evm_config.clone(), - da_config: self.config.da_config.clone(), + builder_config: self.config.clone(), chain_spec: self.client.chain_spec(), config, cancel, @@ -223,7 +220,7 @@ where let config = PayloadConfig { parent_header: Arc::new(parent), attributes }; let ctx = OpPayloadBuilderCtx { evm_config: self.evm_config.clone(), - da_config: self.config.da_config.clone(), + builder_config: self.config.clone(), chain_spec: self.client.chain_spec(), config, cancel: Default::default(), @@ -386,11 +383,12 @@ impl OpBuilder<'_, Txs> { ); // create the executed block data - let executed: ExecutedBlock = ExecutedBlock { + let executed: BuiltPayloadExecutedBlock = BuiltPayloadExecutedBlock { recovered_block: Arc::new(block), execution_output: Arc::new(execution_outcome), - hashed_state: Arc::new(hashed_state), - trie_updates: Arc::new(trie_updates), + // Keep unsorted; conversion to sorted happens when needed downstream + hashed_state: either::Either::Left(Arc::new(hashed_state)), + trie_updates: either::Either::Left(Arc::new(trie_updates)), }; let no_tx_pool = ctx.attributes().no_tx_pool(); @@ -437,7 +435,7 @@ impl OpBuilder<'_, Txs> { if ctx.chain_spec.is_isthmus_active_at_timestamp(ctx.attributes().timestamp()) { // force load `L2ToL1MessagePasser.sol` so l2 withdrawals root can be computed even if // no l2 withdrawals in block - _ = db.load_cache_account(ADDRESS_L2_TO_L1_MESSAGE_PASSER)?; + _ = db.load_cache_account(L2_TO_L1_MESSAGE_PASSER_ADDRESS)?; } let ExecutionWitnessRecord { hashed_state, codes, keys, lowest_block_number: _ } = @@ -550,8 +548,8 @@ pub struct OpPayloadBuilderCtx< > { /// The type that knows how to perform system calls and configure the evm. pub evm_config: Evm, - /// The DA config for the payload builder - pub da_config: OpDAConfig, + /// Additional config for the builder/sequencer, e.g. DA and gas limit + pub builder_config: OpBuilderConfig, /// The chainspec pub chain_spec: Arc, /// How to build the payload. @@ -684,9 +682,14 @@ where Builder: BlockBuilder, <::Evm as AlloyEvm>::DB: Database, { - let block_gas_limit = builder.evm_mut().block().gas_limit(); - let block_da_limit = self.da_config.max_da_block_size(); - let tx_da_limit = self.da_config.max_da_tx_size(); + let mut block_gas_limit = builder.evm_mut().block().gas_limit(); + if let Some(gas_limit_config) = self.builder_config.gas_limit_config.gas_limit() { + // If a gas limit is configured, use that limit as target if it's smaller, otherwise use + // the block's actual gas limit. + block_gas_limit = gas_limit_config.min(block_gas_limit); + }; + let block_da_limit = self.builder_config.da_config.max_da_block_size(); + let tx_da_limit = self.builder_config.da_config.max_da_tx_size(); let base_fee = builder.evm_mut().block().basefee(); while let Some(tx) = best_txs.next(()) { diff --git a/crates/optimism/payload/src/config.rs b/crates/optimism/payload/src/config.rs index 469bfc9fe3..c79ee0ece4 100644 --- a/crates/optimism/payload/src/config.rs +++ b/crates/optimism/payload/src/config.rs @@ -7,12 +7,14 @@ use std::sync::{atomic::AtomicU64, Arc}; pub struct OpBuilderConfig { /// Data availability configuration for the OP builder. pub da_config: OpDAConfig, + /// Gas limit configuration for the OP builder. + pub gas_limit_config: OpGasLimitConfig, } impl OpBuilderConfig { /// Creates a new OP builder configuration with the given data availability configuration. - pub const fn new(da_config: OpDAConfig) -> Self { - Self { da_config } + pub const fn new(da_config: OpDAConfig, gas_limit_config: OpGasLimitConfig) -> Self { + Self { da_config, gas_limit_config } } /// Returns the Data Availability configuration for the OP builder, if it has configured @@ -100,6 +102,40 @@ struct OpDAConfigInner { max_da_block_size: AtomicU64, } +/// Contains the Gas Limit configuration for the OP builder. +/// +/// This type is shareable and can be used to update the Gas Limit configuration for the OP payload +/// builder. +#[derive(Debug, Clone, Default)] +pub struct OpGasLimitConfig { + /// Gas limit for a transaction + /// + /// 0 means use the default gas limit. + gas_limit: Arc, +} + +impl OpGasLimitConfig { + /// Creates a new Gas Limit configuration with the given maximum gas limit. + pub fn new(max_gas_limit: u64) -> Self { + let this = Self::default(); + this.set_gas_limit(max_gas_limit); + this + } + /// Returns the gas limit for a transaction, if any. + pub fn gas_limit(&self) -> Option { + let val = self.gas_limit.load(std::sync::atomic::Ordering::Relaxed); + if val == 0 { + None + } else { + Some(val) + } + } + /// Sets the gas limit for a transaction. 0 means use the default gas limit. + pub fn set_gas_limit(&self, gas_limit: u64) { + self.gas_limit.store(gas_limit, std::sync::atomic::Ordering::Relaxed); + } +} + #[cfg(test)] mod tests { use super::*; @@ -122,4 +158,14 @@ mod tests { let config = OpBuilderConfig::default(); assert!(config.constrained_da_config().is_none()); } + + #[test] + fn test_gas_limit() { + let gas_limit = OpGasLimitConfig::default(); + assert_eq!(gas_limit.gas_limit(), None); + gas_limit.set_gas_limit(50000); + assert_eq!(gas_limit.gas_limit(), Some(50000)); + gas_limit.set_gas_limit(0); + assert_eq!(gas_limit.gas_limit(), None); + } } diff --git a/crates/optimism/payload/src/payload.rs b/crates/optimism/payload/src/payload.rs index 6f530acd85..3f7b3d401e 100644 --- a/crates/optimism/payload/src/payload.rs +++ b/crates/optimism/payload/src/payload.rs @@ -16,12 +16,13 @@ use op_alloy_consensus::{encode_holocene_extra_data, encode_jovian_extra_data, E use op_alloy_rpc_types_engine::{ OpExecutionPayloadEnvelopeV3, OpExecutionPayloadEnvelopeV4, OpExecutionPayloadV4, }; -use reth_chain_state::ExecutedBlock; use reth_chainspec::EthChainSpec; use reth_optimism_evm::OpNextBlockEnvAttributes; use reth_optimism_forks::OpHardforks; use reth_payload_builder::{EthPayloadBuilderAttributes, PayloadBuilderError}; -use reth_payload_primitives::{BuildNextEnv, BuiltPayload, PayloadBuilderAttributes}; +use reth_payload_primitives::{ + BuildNextEnv, BuiltPayload, BuiltPayloadExecutedBlock, PayloadBuilderAttributes, +}; use reth_primitives_traits::{ NodePrimitives, SealedBlock, SealedHeader, SignedTransaction, WithEncoded, }; @@ -176,7 +177,7 @@ pub struct OpBuiltPayload { /// Sealed block pub(crate) block: Arc>, /// Block execution data for the payload, if any. - pub(crate) executed_block: Option>, + pub(crate) executed_block: Option>, /// The fees of the block pub(crate) fees: U256, } @@ -189,7 +190,7 @@ impl OpBuiltPayload { id: PayloadId, block: Arc>, fees: U256, - executed_block: Option>, + executed_block: Option>, ) -> Self { Self { id, block, fees, executed_block } } @@ -226,7 +227,7 @@ impl BuiltPayload for OpBuiltPayload { self.fees } - fn executed_block(&self) -> Option> { + fn executed_block(&self) -> Option> { self.executed_block.clone() } @@ -342,6 +343,9 @@ where /// Generates the payload id for the configured payload from the [`OpPayloadAttributes`]. /// /// Returns an 8-byte identifier by hashing the payload components with sha256 hash. +/// +/// Note: This must be updated whenever the [`OpPayloadAttributes`] changes for a hardfork. +/// See also pub fn payload_id_optimism( parent: &B256, attributes: &OpPayloadAttributes, @@ -387,8 +391,14 @@ pub fn payload_id_optimism( hasher.update(eip_1559_params.as_slice()); } + if let Some(min_base_fee) = attributes.min_base_fee { + hasher.update(min_base_fee.to_be_bytes()); + } + let mut out = hasher.finalize(); out[0] = payload_version; + + #[allow(deprecated)] // generic-array 0.14 deprecated PayloadId::new(out.as_slice()[..8].try_into().expect("sufficient length")) } @@ -473,6 +483,37 @@ mod tests { ); } + #[test] + fn test_payload_id_parity_op_geth_jovian() { + // + let expected = + PayloadId::new(FixedBytes::<8>::from_str("0x046c65ffc4d659ec").unwrap().into()); + let attrs = OpPayloadAttributes { + payload_attributes: PayloadAttributes { + timestamp: 1728933301, + prev_randao: b256!("0x9158595abbdab2c90635087619aa7042bbebe47642dfab3c9bfb934f6b082765"), + suggested_fee_recipient: address!("0x4200000000000000000000000000000000000011"), + withdrawals: Some([].into()), + parent_beacon_block_root: b256!("0x8fe0193b9bf83cb7e5a08538e494fecc23046aab9a497af3704f4afdae3250ff").into(), + }, + transactions: Some([bytes!("7ef8f8a0dc19cfa777d90980e4875d0a548a881baaa3f83f14d1bc0d3038bc329350e54194deaddeaddeaddeaddeaddeaddeaddeaddead00019442000000000000000000000000000000000000158080830f424080b8a4440a5e20000f424000000000000000000000000300000000670d6d890000000000000125000000000000000000000000000000000000000000000000000000000000000700000000000000000000000000000000000000000000000000000000000000014bf9181db6e381d4384bbf69c48b0ee0eed23c6ca26143c6d2544f9d39997a590000000000000000000000007f83d659683caf2767fd3c720981d51f5bc365bc")].into()), + no_tx_pool: None, + gas_limit: Some(30000000), + eip_1559_params: None, + min_base_fee: Some(100), + }; + + // Reth's `PayloadId` should match op-geth's `PayloadId`. This fails + assert_eq!( + expected, + payload_id_optimism( + &b256!("0x3533bf30edaf9505d0810bf475cbe4e5f4b9889904b9845e83efdeab4e92eb1e"), + &attrs, + EngineApiMessageVersion::V4 as u8 + ) + ); + } + #[test] fn test_get_extra_data_post_holocene() { let attributes: OpPayloadBuilderAttributes = diff --git a/crates/optimism/primitives/Cargo.toml b/crates/optimism/primitives/Cargo.toml index 30257049c2..ef83fe3ddb 100644 --- a/crates/optimism/primitives/Cargo.toml +++ b/crates/optimism/primitives/Cargo.toml @@ -14,8 +14,6 @@ workspace = true [dependencies] # reth reth-primitives-traits = { workspace = true, features = ["op"] } -reth-codecs = { workspace = true, optional = true, features = ["op"] } -reth-zstd-compressors = { workspace = true, optional = true } # ethereum alloy-primitives.workspace = true @@ -27,17 +25,15 @@ alloy-rlp.workspace = true op-alloy-consensus.workspace = true # codec -bytes = { workspace = true, optional = true } -modular-bitfield = { workspace = true, optional = true } serde = { workspace = true, optional = true } serde_with = { workspace = true, optional = true } -# test -arbitrary = { workspace = true, features = ["derive"], optional = true } - [dev-dependencies] reth-codecs = { workspace = true, features = ["test-utils", "op"] } +bytes.workspace = true +modular-bitfield.workspace = true +reth-zstd-compressors.workspace = true rand.workspace = true arbitrary.workspace = true rstest.workspace = true @@ -53,41 +49,35 @@ secp256k1 = { workspace = true, features = ["rand"] } default = ["std"] std = [ "reth-primitives-traits/std", - "reth-codecs?/std", "alloy-consensus/std", "alloy-primitives/std", "serde?/std", - "bytes?/std", "alloy-rlp/std", - "reth-zstd-compressors?/std", "op-alloy-consensus/std", "serde_json/std", "serde_with?/std", "alloy-eips/std", "secp256k1/std", + "bytes/std", + "reth-zstd-compressors/std", ] alloy-compat = ["op-alloy-consensus/alloy-compat"] reth-codec = [ - "dep:reth-codecs", "std", "reth-primitives-traits/reth-codec", - "reth-codecs?/op", - "dep:bytes", - "dep:modular-bitfield", - "dep:reth-zstd-compressors", ] serde = [ "dep:serde", "reth-primitives-traits/serde", "alloy-primitives/serde", "alloy-consensus/serde", - "bytes?/serde", - "reth-codecs?/serde", "op-alloy-consensus/serde", "alloy-eips/serde", "rand/serde", "rand_08/serde", "secp256k1/serde", + "bytes/serde", + "reth-codecs/serde", ] serde-bincode-compat = [ "serde", @@ -99,11 +89,10 @@ serde-bincode-compat = [ ] arbitrary = [ "std", - "dep:arbitrary", "reth-primitives-traits/arbitrary", - "reth-codecs?/arbitrary", "op-alloy-consensus/arbitrary", "alloy-consensus/arbitrary", "alloy-primitives/arbitrary", "alloy-eips/arbitrary", + "reth-codecs/arbitrary", ] diff --git a/crates/optimism/primitives/src/lib.rs b/crates/optimism/primitives/src/lib.rs index 8100d70c91..0664732f2a 100644 --- a/crates/optimism/primitives/src/lib.rs +++ b/crates/optimism/primitives/src/lib.rs @@ -13,14 +13,15 @@ extern crate alloc; pub mod bedrock; -pub mod predeploys; -pub use predeploys::ADDRESS_L2_TO_L1_MESSAGE_PASSER; +// Re-export predeploys from op-alloy-consensus +pub use op_alloy_consensus::L2_TO_L1_MESSAGE_PASSER_ADDRESS; pub mod transaction; pub use transaction::*; mod receipt; -pub use receipt::{DepositReceipt, OpReceipt}; +pub use op_alloy_consensus::OpReceipt; +pub use receipt::DepositReceipt; /// Optimism-specific block type. pub type OpBlock = alloy_consensus::Block; @@ -44,6 +45,6 @@ impl reth_primitives_traits::NodePrimitives for OpPrimitives { /// Bincode-compatible serde implementations. #[cfg(feature = "serde-bincode-compat")] pub mod serde_bincode_compat { - pub use super::receipt::serde_bincode_compat::*; - pub use op_alloy_consensus::serde_bincode_compat::*; + pub use super::receipt::serde_bincode_compat::OpReceipt as LocalOpReceipt; + pub use op_alloy_consensus::serde_bincode_compat::OpReceipt; } diff --git a/crates/optimism/primitives/src/predeploys.rs b/crates/optimism/primitives/src/predeploys.rs deleted file mode 100644 index fe52b4f1cb..0000000000 --- a/crates/optimism/primitives/src/predeploys.rs +++ /dev/null @@ -1,8 +0,0 @@ -//! Addresses of OP pre-deploys. -// todo: move to op-alloy - -use alloy_primitives::{address, Address}; - -/// The L2 contract `L2ToL1MessagePasser`, stores commitments to withdrawal transactions. -pub const ADDRESS_L2_TO_L1_MESSAGE_PASSER: Address = - address!("0x4200000000000000000000000000000000000016"); diff --git a/crates/optimism/primitives/src/receipt.rs b/crates/optimism/primitives/src/receipt.rs index 74f21eab11..1ed7cde2c9 100644 --- a/crates/optimism/primitives/src/receipt.rs +++ b/crates/optimism/primitives/src/receipt.rs @@ -9,409 +9,9 @@ use alloy_eips::{ }; use alloy_primitives::{Bloom, Log}; use alloy_rlp::{BufMut, Decodable, Encodable, Header}; -use op_alloy_consensus::{OpDepositReceipt, OpTxType}; +use op_alloy_consensus::{OpDepositReceipt, OpReceipt, OpTxType}; use reth_primitives_traits::InMemorySize; -/// Typed ethereum transaction receipt. -/// Receipt containing result of transaction execution. -#[derive(Clone, Debug, PartialEq, Eq)] -#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] -#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] -#[cfg_attr(feature = "reth-codec", reth_codecs::add_arbitrary_tests(rlp))] -pub enum OpReceipt { - /// Legacy receipt - Legacy(Receipt), - /// EIP-2930 receipt - Eip2930(Receipt), - /// EIP-1559 receipt - Eip1559(Receipt), - /// EIP-7702 receipt - Eip7702(Receipt), - /// Deposit receipt - Deposit(OpDepositReceipt), -} - -impl OpReceipt { - /// Returns [`OpTxType`] of the receipt. - pub const fn tx_type(&self) -> OpTxType { - match self { - Self::Legacy(_) => OpTxType::Legacy, - Self::Eip2930(_) => OpTxType::Eip2930, - Self::Eip1559(_) => OpTxType::Eip1559, - Self::Eip7702(_) => OpTxType::Eip7702, - Self::Deposit(_) => OpTxType::Deposit, - } - } - - /// Returns inner [`Receipt`], - pub const fn as_receipt(&self) -> &Receipt { - match self { - Self::Legacy(receipt) | - Self::Eip2930(receipt) | - Self::Eip1559(receipt) | - Self::Eip7702(receipt) => receipt, - Self::Deposit(receipt) => &receipt.inner, - } - } - - /// Returns a mutable reference to the inner [`Receipt`], - pub const fn as_receipt_mut(&mut self) -> &mut Receipt { - match self { - Self::Legacy(receipt) | - Self::Eip2930(receipt) | - Self::Eip1559(receipt) | - Self::Eip7702(receipt) => receipt, - Self::Deposit(receipt) => &mut receipt.inner, - } - } - - /// Consumes this and returns the inner [`Receipt`]. - pub fn into_receipt(self) -> Receipt { - match self { - Self::Legacy(receipt) | - Self::Eip2930(receipt) | - Self::Eip1559(receipt) | - Self::Eip7702(receipt) => receipt, - Self::Deposit(receipt) => receipt.inner, - } - } - - /// Returns length of RLP-encoded receipt fields with the given [`Bloom`] without an RLP header. - pub fn rlp_encoded_fields_length(&self, bloom: &Bloom) -> usize { - match self { - Self::Legacy(receipt) | - Self::Eip2930(receipt) | - Self::Eip1559(receipt) | - Self::Eip7702(receipt) => receipt.rlp_encoded_fields_length_with_bloom(bloom), - Self::Deposit(receipt) => receipt.rlp_encoded_fields_length_with_bloom(bloom), - } - } - - /// RLP-encodes receipt fields with the given [`Bloom`] without an RLP header. - pub fn rlp_encode_fields(&self, bloom: &Bloom, out: &mut dyn BufMut) { - match self { - Self::Legacy(receipt) | - Self::Eip2930(receipt) | - Self::Eip1559(receipt) | - Self::Eip7702(receipt) => receipt.rlp_encode_fields_with_bloom(bloom, out), - Self::Deposit(receipt) => receipt.rlp_encode_fields_with_bloom(bloom, out), - } - } - - /// Returns RLP header for inner encoding. - pub fn rlp_header_inner(&self, bloom: &Bloom) -> Header { - Header { list: true, payload_length: self.rlp_encoded_fields_length(bloom) } - } - - /// Returns RLP header for inner encoding without bloom. - pub fn rlp_header_inner_without_bloom(&self) -> Header { - Header { list: true, payload_length: self.rlp_encoded_fields_length_without_bloom() } - } - - /// RLP-decodes the receipt from the provided buffer. This does not expect a type byte or - /// network header. - pub fn rlp_decode_inner( - buf: &mut &[u8], - tx_type: OpTxType, - ) -> alloy_rlp::Result> { - match tx_type { - OpTxType::Legacy => { - let ReceiptWithBloom { receipt, logs_bloom } = - RlpDecodableReceipt::rlp_decode_with_bloom(buf)?; - Ok(ReceiptWithBloom { receipt: Self::Legacy(receipt), logs_bloom }) - } - OpTxType::Eip2930 => { - let ReceiptWithBloom { receipt, logs_bloom } = - RlpDecodableReceipt::rlp_decode_with_bloom(buf)?; - Ok(ReceiptWithBloom { receipt: Self::Eip2930(receipt), logs_bloom }) - } - OpTxType::Eip1559 => { - let ReceiptWithBloom { receipt, logs_bloom } = - RlpDecodableReceipt::rlp_decode_with_bloom(buf)?; - Ok(ReceiptWithBloom { receipt: Self::Eip1559(receipt), logs_bloom }) - } - OpTxType::Eip7702 => { - let ReceiptWithBloom { receipt, logs_bloom } = - RlpDecodableReceipt::rlp_decode_with_bloom(buf)?; - Ok(ReceiptWithBloom { receipt: Self::Eip7702(receipt), logs_bloom }) - } - OpTxType::Deposit => { - let ReceiptWithBloom { receipt, logs_bloom } = - RlpDecodableReceipt::rlp_decode_with_bloom(buf)?; - Ok(ReceiptWithBloom { receipt: Self::Deposit(receipt), logs_bloom }) - } - } - } - - /// RLP-encodes receipt fields without an RLP header. - pub fn rlp_encode_fields_without_bloom(&self, out: &mut dyn BufMut) { - match self { - Self::Legacy(receipt) | - Self::Eip2930(receipt) | - Self::Eip1559(receipt) | - Self::Eip7702(receipt) => { - receipt.status.encode(out); - receipt.cumulative_gas_used.encode(out); - receipt.logs.encode(out); - } - Self::Deposit(receipt) => { - receipt.inner.status.encode(out); - receipt.inner.cumulative_gas_used.encode(out); - receipt.inner.logs.encode(out); - if let Some(nonce) = receipt.deposit_nonce { - nonce.encode(out); - } - if let Some(version) = receipt.deposit_receipt_version { - version.encode(out); - } - } - } - } - - /// Returns length of RLP-encoded receipt fields without an RLP header. - pub fn rlp_encoded_fields_length_without_bloom(&self) -> usize { - match self { - Self::Legacy(receipt) | - Self::Eip2930(receipt) | - Self::Eip1559(receipt) | - Self::Eip7702(receipt) => { - receipt.status.length() + - receipt.cumulative_gas_used.length() + - receipt.logs.length() - } - Self::Deposit(receipt) => { - receipt.inner.status.length() + - receipt.inner.cumulative_gas_used.length() + - receipt.inner.logs.length() + - receipt.deposit_nonce.map_or(0, |nonce| nonce.length()) + - receipt.deposit_receipt_version.map_or(0, |version| version.length()) - } - } - } - - /// RLP-decodes the receipt from the provided buffer without bloom. - pub fn rlp_decode_inner_without_bloom( - buf: &mut &[u8], - tx_type: OpTxType, - ) -> alloy_rlp::Result { - let header = Header::decode(buf)?; - if !header.list { - return Err(alloy_rlp::Error::UnexpectedString); - } - - let remaining = buf.len(); - let status = Decodable::decode(buf)?; - let cumulative_gas_used = Decodable::decode(buf)?; - let logs = Decodable::decode(buf)?; - - let mut deposit_nonce = None; - let mut deposit_receipt_version = None; - - // For deposit receipts, try to decode nonce and version if they exist - if tx_type == OpTxType::Deposit && buf.len() + header.payload_length > remaining { - deposit_nonce = Some(Decodable::decode(buf)?); - if buf.len() + header.payload_length > remaining { - deposit_receipt_version = Some(Decodable::decode(buf)?); - } - } - - if buf.len() + header.payload_length != remaining { - return Err(alloy_rlp::Error::UnexpectedLength); - } - - match tx_type { - OpTxType::Legacy => Ok(Self::Legacy(Receipt { status, cumulative_gas_used, logs })), - OpTxType::Eip2930 => Ok(Self::Eip2930(Receipt { status, cumulative_gas_used, logs })), - OpTxType::Eip1559 => Ok(Self::Eip1559(Receipt { status, cumulative_gas_used, logs })), - OpTxType::Eip7702 => Ok(Self::Eip7702(Receipt { status, cumulative_gas_used, logs })), - OpTxType::Deposit => Ok(Self::Deposit(OpDepositReceipt { - inner: Receipt { status, cumulative_gas_used, logs }, - deposit_nonce, - deposit_receipt_version, - })), - } - } -} - -impl Eip2718EncodableReceipt for OpReceipt { - fn eip2718_encoded_length_with_bloom(&self, bloom: &Bloom) -> usize { - !self.tx_type().is_legacy() as usize + self.rlp_header_inner(bloom).length_with_payload() - } - - fn eip2718_encode_with_bloom(&self, bloom: &Bloom, out: &mut dyn BufMut) { - if !self.tx_type().is_legacy() { - out.put_u8(self.tx_type() as u8); - } - self.rlp_header_inner(bloom).encode(out); - self.rlp_encode_fields(bloom, out); - } -} - -impl RlpEncodableReceipt for OpReceipt { - fn rlp_encoded_length_with_bloom(&self, bloom: &Bloom) -> usize { - let mut len = self.eip2718_encoded_length_with_bloom(bloom); - if !self.tx_type().is_legacy() { - len += Header { - list: false, - payload_length: self.eip2718_encoded_length_with_bloom(bloom), - } - .length(); - } - - len - } - - fn rlp_encode_with_bloom(&self, bloom: &Bloom, out: &mut dyn BufMut) { - if !self.tx_type().is_legacy() { - Header { list: false, payload_length: self.eip2718_encoded_length_with_bloom(bloom) } - .encode(out); - } - self.eip2718_encode_with_bloom(bloom, out); - } -} - -impl RlpDecodableReceipt for OpReceipt { - fn rlp_decode_with_bloom(buf: &mut &[u8]) -> alloy_rlp::Result> { - let header_buf = &mut &**buf; - let header = Header::decode(header_buf)?; - - // Legacy receipt, reuse initial buffer without advancing - if header.list { - return Self::rlp_decode_inner(buf, OpTxType::Legacy) - } - - // Otherwise, advance the buffer and try decoding type flag followed by receipt - *buf = *header_buf; - - let remaining = buf.len(); - let tx_type = OpTxType::decode(buf)?; - let this = Self::rlp_decode_inner(buf, tx_type)?; - - if buf.len() + header.payload_length != remaining { - return Err(alloy_rlp::Error::UnexpectedLength); - } - - Ok(this) - } -} - -impl Encodable2718 for OpReceipt { - fn encode_2718_len(&self) -> usize { - !self.tx_type().is_legacy() as usize + - self.rlp_header_inner_without_bloom().length_with_payload() - } - - fn encode_2718(&self, out: &mut dyn BufMut) { - if !self.tx_type().is_legacy() { - out.put_u8(self.tx_type() as u8); - } - self.rlp_header_inner_without_bloom().encode(out); - self.rlp_encode_fields_without_bloom(out); - } -} - -impl Decodable2718 for OpReceipt { - fn typed_decode(ty: u8, buf: &mut &[u8]) -> Eip2718Result { - Ok(Self::rlp_decode_inner_without_bloom(buf, OpTxType::try_from(ty)?)?) - } - - fn fallback_decode(buf: &mut &[u8]) -> Eip2718Result { - Ok(Self::rlp_decode_inner_without_bloom(buf, OpTxType::Legacy)?) - } -} - -impl Encodable for OpReceipt { - fn encode(&self, out: &mut dyn BufMut) { - self.network_encode(out); - } - - fn length(&self) -> usize { - self.network_len() - } -} - -impl Decodable for OpReceipt { - fn decode(buf: &mut &[u8]) -> alloy_rlp::Result { - Ok(Self::network_decode(buf)?) - } -} - -impl TxReceipt for OpReceipt { - type Log = Log; - - fn status_or_post_state(&self) -> Eip658Value { - self.as_receipt().status_or_post_state() - } - - fn status(&self) -> bool { - self.as_receipt().status() - } - - fn bloom(&self) -> Bloom { - self.as_receipt().bloom() - } - - fn cumulative_gas_used(&self) -> u64 { - self.as_receipt().cumulative_gas_used() - } - - fn logs(&self) -> &[Log] { - self.as_receipt().logs() - } - - fn into_logs(self) -> Vec { - match self { - Self::Legacy(receipt) | - Self::Eip2930(receipt) | - Self::Eip1559(receipt) | - Self::Eip7702(receipt) => receipt.logs, - Self::Deposit(receipt) => receipt.inner.logs, - } - } -} - -impl Typed2718 for OpReceipt { - fn ty(&self) -> u8 { - self.tx_type().into() - } -} - -impl IsTyped2718 for OpReceipt { - fn is_type(type_id: u8) -> bool { - ::is_type(type_id) - } -} - -impl InMemorySize for OpReceipt { - fn size(&self) -> usize { - self.as_receipt().size() - } -} - -impl From for OpReceipt { - fn from(envelope: op_alloy_consensus::OpReceiptEnvelope) -> Self { - match envelope { - op_alloy_consensus::OpReceiptEnvelope::Legacy(receipt) => Self::Legacy(receipt.receipt), - op_alloy_consensus::OpReceiptEnvelope::Eip2930(receipt) => { - Self::Eip2930(receipt.receipt) - } - op_alloy_consensus::OpReceiptEnvelope::Eip1559(receipt) => { - Self::Eip1559(receipt.receipt) - } - op_alloy_consensus::OpReceiptEnvelope::Eip7702(receipt) => { - Self::Eip7702(receipt.receipt) - } - op_alloy_consensus::OpReceiptEnvelope::Deposit(receipt) => { - Self::Deposit(OpDepositReceipt { - deposit_nonce: receipt.receipt.deposit_nonce, - deposit_receipt_version: receipt.receipt.deposit_receipt_version, - inner: receipt.receipt.inner, - }) - } - } - } -} - /// Trait for deposit receipt. pub trait DepositReceipt: reth_primitives_traits::Receipt { /// Converts a `Receipt` into a mutable Optimism deposit receipt. @@ -437,100 +37,6 @@ impl DepositReceipt for OpReceipt { } } -#[cfg(feature = "reth-codec")] -mod compact { - use super::*; - use alloc::borrow::Cow; - use reth_codecs::Compact; - - #[derive(reth_codecs::CompactZstd)] - #[reth_zstd( - compressor = reth_zstd_compressors::RECEIPT_COMPRESSOR, - decompressor = reth_zstd_compressors::RECEIPT_DECOMPRESSOR - )] - struct CompactOpReceipt<'a> { - tx_type: OpTxType, - success: bool, - cumulative_gas_used: u64, - #[expect(clippy::owned_cow)] - logs: Cow<'a, Vec>, - deposit_nonce: Option, - deposit_receipt_version: Option, - } - - impl<'a> From<&'a OpReceipt> for CompactOpReceipt<'a> { - fn from(receipt: &'a OpReceipt) -> Self { - Self { - tx_type: receipt.tx_type(), - success: receipt.status(), - cumulative_gas_used: receipt.cumulative_gas_used(), - logs: Cow::Borrowed(&receipt.as_receipt().logs), - deposit_nonce: if let OpReceipt::Deposit(receipt) = receipt { - receipt.deposit_nonce - } else { - None - }, - deposit_receipt_version: if let OpReceipt::Deposit(receipt) = receipt { - receipt.deposit_receipt_version - } else { - None - }, - } - } - } - - impl From> for OpReceipt { - fn from(receipt: CompactOpReceipt<'_>) -> Self { - let CompactOpReceipt { - tx_type, - success, - cumulative_gas_used, - logs, - deposit_nonce, - deposit_receipt_version, - } = receipt; - - let inner = - Receipt { status: success.into(), cumulative_gas_used, logs: logs.into_owned() }; - - match tx_type { - OpTxType::Legacy => Self::Legacy(inner), - OpTxType::Eip2930 => Self::Eip2930(inner), - OpTxType::Eip1559 => Self::Eip1559(inner), - OpTxType::Eip7702 => Self::Eip7702(inner), - OpTxType::Deposit => Self::Deposit(OpDepositReceipt { - inner, - deposit_nonce, - deposit_receipt_version, - }), - } - } - } - - impl Compact for OpReceipt { - fn to_compact(&self, buf: &mut B) -> usize - where - B: bytes::BufMut + AsMut<[u8]>, - { - CompactOpReceipt::from(self).to_compact(buf) - } - - fn from_compact(buf: &[u8], len: usize) -> (Self, &[u8]) { - let (receipt, buf) = CompactOpReceipt::from_compact(buf, len); - (receipt.into(), buf) - } - } - - #[cfg(test)] - #[test] - fn test_ensure_backwards_compatibility() { - use reth_codecs::{test_utils::UnusedBits, validate_bitflag_backwards_compat}; - - assert_eq!(CompactOpReceipt::bitflag_encoded_bytes(), 2); - validate_bitflag_backwards_compat!(CompactOpReceipt<'_>, UnusedBits::NotZero); - } -} - #[cfg(all(feature = "serde", feature = "serde-bincode-compat"))] pub(super) mod serde_bincode_compat { use serde::{Deserialize, Deserializer, Serialize, Serializer}; @@ -540,17 +46,21 @@ pub(super) mod serde_bincode_compat { /// /// Intended to use with the [`serde_with::serde_as`] macro in the following way: /// ```rust - /// use reth_optimism_primitives::{serde_bincode_compat, OpReceipt}; + /// use reth_optimism_primitives::OpReceipt; + /// use reth_primitives_traits::serde_bincode_compat::SerdeBincodeCompat; /// use serde::{de::DeserializeOwned, Deserialize, Serialize}; /// use serde_with::serde_as; /// /// #[serde_as] /// #[derive(Serialize, Deserialize)] /// struct Data { - /// #[serde_as(as = "serde_bincode_compat::OpReceipt<'_>")] + /// #[serde_as( + /// as = "reth_primitives_traits::serde_bincode_compat::BincodeReprFor<'_, OpReceipt>" + /// )] /// receipt: OpReceipt, /// } /// ``` + #[allow(rustdoc::private_doc_tests)] #[derive(Debug, Serialize, Deserialize)] pub enum OpReceipt<'a> { /// Legacy receipt @@ -609,18 +119,6 @@ pub(super) mod serde_bincode_compat { } } - impl reth_primitives_traits::serde_bincode_compat::SerdeBincodeCompat for super::OpReceipt { - type BincodeRepr<'a> = OpReceipt<'a>; - - fn as_repr(&self) -> Self::BincodeRepr<'_> { - self.into() - } - - fn from_repr(repr: Self::BincodeRepr<'_>) -> Self { - repr.into() - } - } - #[cfg(test)] mod tests { use crate::{receipt::serde_bincode_compat, OpReceipt}; @@ -678,7 +176,7 @@ mod tests { let mut data = Vec::with_capacity(expected.length()); let receipt = ReceiptWithBloom { - receipt: OpReceipt::Legacy(Receipt { + receipt: OpReceipt::Legacy(Receipt:: { status: Eip658Value::Eip658(false), cumulative_gas_used: 0x1, logs: vec![Log::new_unchecked( @@ -709,7 +207,7 @@ mod tests { // EIP658Receipt let expected = ReceiptWithBloom { - receipt: OpReceipt::Legacy(Receipt { + receipt: OpReceipt::Legacy(Receipt:: { status: Eip658Value::Eip658(false), cumulative_gas_used: 0x1, logs: vec![Log::new_unchecked( @@ -737,7 +235,7 @@ mod tests { // Deposit Receipt (post-regolith) let expected = ReceiptWithBloom { receipt: OpReceipt::Deposit(OpDepositReceipt { - inner: Receipt { + inner: Receipt:: { status: Eip658Value::Eip658(true), cumulative_gas_used: 46913, logs: vec![], @@ -762,10 +260,10 @@ mod tests { "b901117ef9010d0182b741b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c0833d3bbf01" ); - // Deposit Receipt (post-regolith) + // Deposit Receipt (post-canyon) let expected = ReceiptWithBloom { receipt: OpReceipt::Deposit(OpDepositReceipt { - inner: Receipt { + inner: Receipt:: { status: Eip658Value::Eip658(true), cumulative_gas_used: 46913, logs: vec![], @@ -786,7 +284,7 @@ mod tests { #[test] fn gigantic_receipt() { - let receipt = OpReceipt::Legacy(Receipt { + let receipt = OpReceipt::Legacy(Receipt:: { status: Eip658Value::Eip658(true), cumulative_gas_used: 16747627, logs: vec![ @@ -816,7 +314,7 @@ mod tests { #[test] fn test_encode_2718_length() { let receipt = ReceiptWithBloom { - receipt: OpReceipt::Eip1559(Receipt { + receipt: OpReceipt::Eip1559(Receipt:: { status: Eip658Value::Eip658(true), cumulative_gas_used: 21000, logs: vec![], @@ -833,7 +331,7 @@ mod tests { // Test for legacy receipt as well let legacy_receipt = ReceiptWithBloom { - receipt: OpReceipt::Legacy(Receipt { + receipt: OpReceipt::Legacy(Receipt:: { status: Eip658Value::Eip658(true), cumulative_gas_used: 21000, logs: vec![], diff --git a/crates/optimism/primitives/src/transaction/signed.rs b/crates/optimism/primitives/src/transaction/signed.rs index 820cc11271..fc2f63abd8 100644 --- a/crates/optimism/primitives/src/transaction/signed.rs +++ b/crates/optimism/primitives/src/transaction/signed.rs @@ -9,7 +9,7 @@ use alloy_consensus::{ Typed2718, }; use alloy_eips::{ - eip2718::{Decodable2718, Eip2718Error, Eip2718Result, Encodable2718}, + eip2718::{Decodable2718, Eip2718Error, Eip2718Result, Encodable2718, IsTyped2718}, eip2930::AccessList, eip7702::SignedAuthorization, }; @@ -148,6 +148,12 @@ impl TxHashRef for OpTransactionSigned { } } +impl IsTyped2718 for OpTransactionSigned { + fn is_type(type_id: u8) -> bool { + ::is_type(type_id) + } +} + impl SignedTransaction for OpTransactionSigned { fn recalculate_hash(&self) -> B256 { keccak256(self.encoded_2718()) diff --git a/crates/optimism/reth/Cargo.toml b/crates/optimism/reth/Cargo.toml index 384eca45b8..f18f0e10db 100644 --- a/crates/optimism/reth/Cargo.toml +++ b/crates/optimism/reth/Cargo.toml @@ -74,7 +74,10 @@ arbitrary = [ "reth-eth-wire?/arbitrary", "reth-codecs?/arbitrary", ] - +keccak-cache-global = [ + "reth-optimism-node?/keccak-cache-global", + "reth-node-core?/keccak-cache-global", +] test-utils = [ "reth-chainspec/test-utils", "reth-consensus?/test-utils", @@ -126,8 +129,27 @@ rpc = [ "dep:reth-optimism-rpc", ] tasks = ["dep:reth-tasks"] -js-tracer = ["rpc", "reth-rpc/js-tracer"] +jemalloc = [ + "reth-cli-util?/jemalloc", + "reth-node-core?/jemalloc", + "reth-optimism-cli?/jemalloc", +] +js-tracer = [ + "rpc", + "reth-rpc/js-tracer", + "reth-node-builder?/js-tracer", + "reth-optimism-node?/js-tracer", + "reth-rpc-eth-types?/js-tracer", +] network = ["dep:reth-network", "tasks", "dep:reth-network-api", "dep:reth-eth-wire"] +otlp = [ + "reth-node-core?/otlp", + "reth-optimism-cli?/otlp", +] +portable = [ + "reth-optimism-evm?/portable", + "reth-revm?/portable", +] provider = ["storage-api", "tasks", "dep:reth-provider", "dep:reth-db", "dep:reth-codecs"] pool = ["dep:reth-transaction-pool"] storage-api = ["dep:reth-storage-api"] diff --git a/crates/optimism/rpc/Cargo.toml b/crates/optimism/rpc/Cargo.toml index acbc491f64..5d926caf15 100644 --- a/crates/optimism/rpc/Cargo.toml +++ b/crates/optimism/rpc/Cargo.toml @@ -28,7 +28,6 @@ reth-node-builder.workspace = true reth-chainspec.workspace = true reth-chain-state.workspace = true reth-rpc-engine-api.workspace = true -reth-rpc-convert.workspace = true # op-reth reth-optimism-evm.workspace = true @@ -84,6 +83,7 @@ metrics.workspace = true [dev-dependencies] reth-optimism-chainspec.workspace = true +alloy-op-hardforks.workspace = true [features] client = [ diff --git a/crates/optimism/rpc/src/error.rs b/crates/optimism/rpc/src/error.rs index 40d34ef7cc..b457ce9d9c 100644 --- a/crates/optimism/rpc/src/error.rs +++ b/crates/optimism/rpc/src/error.rs @@ -1,6 +1,7 @@ //! RPC errors specific to OP. use alloy_json_rpc::ErrorPayload; +use alloy_primitives::Bytes; use alloy_rpc_types_eth::{error::EthRpcErrorCode, BlockError}; use alloy_transport::{RpcError, TransportErrorKind}; use jsonrpsee_types::error::{INTERNAL_ERROR_CODE, INVALID_PARAMS_CODE}; @@ -8,7 +9,10 @@ use op_revm::{OpHaltReason, OpTransactionError}; use reth_evm::execute::ProviderError; use reth_optimism_evm::OpBlockExecutionError; use reth_rpc_eth_api::{AsEthApiError, EthTxEnvError, TransactionConversionError}; -use reth_rpc_eth_types::{error::api::FromEvmHalt, EthApiError}; +use reth_rpc_eth_types::{ + error::api::{FromEvmHalt, FromRevert}, + EthApiError, +}; use reth_rpc_server_types::result::{internal_rpc_err, rpc_err}; use revm::context_interface::result::{EVMError, InvalidTransaction}; use std::{convert::Infallible, fmt::Display}; @@ -67,6 +71,9 @@ pub enum OpInvalidTransactionError { /// A deposit transaction halted post-regolith #[error("deposit transaction halted after regolith")] HaltedDepositPostRegolith, + /// The encoded transaction was missing during evm execution. + #[error("missing enveloped transaction bytes")] + MissingEnvelopedTx, /// Transaction conditional errors. #[error(transparent)] TxConditionalErr(#[from] TxConditionalErr), @@ -76,7 +83,8 @@ impl From for jsonrpsee_types::error::ErrorObject<'st fn from(err: OpInvalidTransactionError) -> Self { match err { OpInvalidTransactionError::DepositSystemTxPostRegolith | - OpInvalidTransactionError::HaltedDepositPostRegolith => { + OpInvalidTransactionError::HaltedDepositPostRegolith | + OpInvalidTransactionError::MissingEnvelopedTx => { rpc_err(EthRpcErrorCode::TransactionRejected.code(), err.to_string(), None) } OpInvalidTransactionError::TxConditionalErr(_) => err.into(), @@ -93,6 +101,7 @@ impl TryFrom for OpInvalidTransactionError { Ok(Self::DepositSystemTxPostRegolith) } OpTransactionError::HaltedDepositPostRegolith => Ok(Self::HaltedDepositPostRegolith), + OpTransactionError::MissingEnvelopedTx => Ok(Self::MissingEnvelopedTx), OpTransactionError::Base(err) => Err(err), } } @@ -189,6 +198,12 @@ impl FromEvmHalt for OpEthApiError { } } +impl FromRevert for OpEthApiError { + fn from_revert(output: Bytes) -> Self { + Self::Eth(EthApiError::from_revert(output)) + } +} + impl From for OpEthApiError { fn from(value: TransactionConversionError) -> Self { Self::Eth(EthApiError::from(value)) diff --git a/crates/optimism/rpc/src/eth/call.rs b/crates/optimism/rpc/src/eth/call.rs index 4e853984ac..db96bda83f 100644 --- a/crates/optimism/rpc/src/eth/call.rs +++ b/crates/optimism/rpc/src/eth/call.rs @@ -35,4 +35,9 @@ where fn max_simulate_blocks(&self) -> u64 { self.inner.eth_api.max_simulate_blocks() } + + #[inline] + fn evm_memory_limit(&self) -> u64 { + self.inner.eth_api.evm_memory_limit() + } } diff --git a/crates/optimism/rpc/src/eth/mod.rs b/crates/optimism/rpc/src/eth/mod.rs index 04887d98f4..16389b5e91 100644 --- a/crates/optimism/rpc/src/eth/mod.rs +++ b/crates/optimism/rpc/src/eth/mod.rs @@ -16,6 +16,7 @@ use alloy_consensus::BlockHeader; use alloy_primitives::{B256, U256}; use eyre::WrapErr; use op_alloy_network::Optimism; +use op_alloy_rpc_types_engine::OpFlashblockPayloadBase; pub use receipt::{OpReceiptBuilder, OpReceiptFieldsBuilder}; use reqwest::Url; use reth_chainspec::{EthereumHardforks, Hardforks}; @@ -23,8 +24,9 @@ use reth_evm::ConfigureEvm; use reth_node_api::{FullNodeComponents, FullNodeTypes, HeaderTy, NodeTypes}; use reth_node_builder::rpc::{EthApiBuilder, EthApiCtx}; use reth_optimism_flashblocks::{ - ExecutionPayloadBaseV1, FlashBlockBuildInfo, FlashBlockCompleteSequenceRx, FlashBlockService, - InProgressFlashBlockRx, PendingBlockRx, PendingFlashBlock, WsFlashBlockStream, + FlashBlockBuildInfo, FlashBlockCompleteSequence, FlashBlockCompleteSequenceRx, + FlashBlockConsensusClient, FlashBlockRx, FlashBlockService, FlashblocksListeners, + PendingBlockRx, PendingFlashBlock, WsFlashBlockStream, }; use reth_rpc::eth::core::EthApiInner; use reth_rpc_eth_api::{ @@ -35,10 +37,8 @@ use reth_rpc_eth_api::{ EthApiTypes, FromEvmError, FullEthApiServer, RpcConvert, RpcConverter, RpcNodeCore, RpcNodeCoreExt, RpcTypes, }; -use reth_rpc_eth_types::{ - EthStateCache, FeeHistoryCache, GasPriceOracle, PendingBlock, PendingBlockEnvOrigin, -}; -use reth_storage_api::ProviderHeader; +use reth_rpc_eth_types::{EthStateCache, FeeHistoryCache, GasPriceOracle, PendingBlock}; +use reth_storage_api::{BlockReaderIdExt, ProviderHeader}; use reth_tasks::{ pool::{BlockingTaskGuard, BlockingTaskPool}, TaskSpawner, @@ -85,21 +85,22 @@ impl OpEthApi { eth_api: EthApiNodeBackend, sequencer_client: Option, min_suggested_priority_fee: U256, - pending_block_rx: Option>, - flashblock_rx: Option, - in_progress_rx: Option, + flashblocks: Option>, ) -> Self { let inner = Arc::new(OpEthApiInner { eth_api, sequencer_client, min_suggested_priority_fee, - pending_block_rx, - flashblock_rx, - in_progress_rx, + flashblocks, }); Self { inner } } + /// Build a [`OpEthApi`] using [`OpEthApiBuilder`]. + pub const fn builder() -> OpEthApiBuilder { + OpEthApiBuilder::new() + } + /// Returns a reference to the [`EthApiNodeBackend`]. pub fn eth_api(&self) -> &EthApiNodeBackend { self.inner.eth_api() @@ -111,17 +112,22 @@ impl OpEthApi { /// Returns a cloned pending block receiver, if any. pub fn pending_block_rx(&self) -> Option> { - self.inner.pending_block_rx.clone() + self.inner.flashblocks.as_ref().map(|f| f.pending_block_rx.clone()) } - /// Returns a flashblock receiver, if any, by resubscribing to it. - pub fn flashblock_rx(&self) -> Option { - self.inner.flashblock_rx.as_ref().map(|rx| rx.resubscribe()) + /// Returns a new subscription to received flashblocks. + pub fn subscribe_received_flashblocks(&self) -> Option { + self.inner.flashblocks.as_ref().map(|f| f.received_flashblocks.subscribe()) + } + + /// Returns a new subscription to flashblock sequences. + pub fn subscribe_flashblock_sequence(&self) -> Option { + self.inner.flashblocks.as_ref().map(|f| f.flashblocks_sequence.subscribe()) } /// Returns information about the flashblock currently being built, if any. fn flashblock_build_info(&self) -> Option { - self.inner.in_progress_rx.as_ref().and_then(|rx| *rx.borrow()) + self.inner.flashblocks.as_ref().and_then(|f| *f.in_progress_rx.borrow()) } /// Extracts pending block if it matches the expected parent hash. @@ -133,17 +139,14 @@ impl OpEthApi { block.filter(|b| b.block().parent_hash() == parent_hash).map(|b| b.pending.clone()) } - /// Build a [`OpEthApi`] using [`OpEthApiBuilder`]. - pub const fn builder() -> OpEthApiBuilder { - OpEthApiBuilder::new() - } - /// Awaits a fresh flashblock if one is being built, otherwise returns current. async fn flashblock( &self, parent_hash: B256, ) -> eyre::Result>> { - let Some(rx) = self.inner.pending_block_rx.as_ref() else { return Ok(None) }; + let Some(rx) = self.inner.flashblocks.as_ref().map(|f| &f.pending_block_rx) else { + return Ok(None) + }; // Check if a flashblock is being built if let Some(build_info) = self.flashblock_build_info() { @@ -174,27 +177,25 @@ impl OpEthApi { OpEthApiError: FromEvmError, Rpc: RpcConvert, { - let pending = self.pending_block_env_and_cfg()?; - let parent = match pending.origin { - PendingBlockEnvOrigin::ActualPending(..) => return Ok(None), - PendingBlockEnvOrigin::DerivedFromLatest(parent) => parent, + let Some(latest) = self.provider().latest_header()? else { + return Ok(None); }; - self.flashblock(parent.hash()).await + self.flashblock(latest.hash()).await } } impl EthApiTypes for OpEthApi where N: RpcNodeCore, - Rpc: RpcConvert, + Rpc: RpcConvert, { type Error = OpEthApiError; type NetworkTypes = Rpc::Network; type RpcConvert = Rpc; - fn tx_resp_builder(&self) -> &Self::RpcConvert { - self.inner.eth_api.tx_resp_builder() + fn converter(&self) -> &Self::RpcConvert { + self.inner.eth_api.converter() } } @@ -244,7 +245,7 @@ where impl EthApiSpec for OpEthApi where N: RpcNodeCore, - Rpc: RpcConvert, + Rpc: RpcConvert, { #[inline] fn starting_block(&self) -> U256 { @@ -255,7 +256,7 @@ where impl SpawnBlocking for OpEthApi where N: RpcNodeCore, - Rpc: RpcConvert, + Rpc: RpcConvert, { #[inline] fn io_task_spawner(&self) -> impl TaskSpawner { @@ -271,6 +272,11 @@ where fn tracing_task_guard(&self) -> &BlockingTaskGuard { self.inner.eth_api.blocking_task_guard() } + + #[inline] + fn blocking_io_task_guard(&self) -> &Arc { + self.inner.eth_api.blocking_io_request_semaphore() + } } impl LoadFee for OpEthApi @@ -310,7 +316,7 @@ where impl EthState for OpEthApi where N: RpcNodeCore, - Rpc: RpcConvert, + Rpc: RpcConvert, Self: LoadPendingBlock, { #[inline] @@ -331,7 +337,7 @@ impl Trace for OpEthApi where N: RpcNodeCore, OpEthApiError: FromEvmError, - Rpc: RpcConvert, + Rpc: RpcConvert, { } @@ -352,16 +358,10 @@ pub struct OpEthApiInner { /// /// See also min_suggested_priority_fee: U256, - /// Pending block receiver. + /// Flashblocks listeners. /// - /// If set, then it provides current pending block based on received Flashblocks. - pending_block_rx: Option>, - /// Flashblocks receiver. - /// - /// If set, then it provides sequences of flashblock built. - flashblock_rx: Option, - /// Receiver that signals when a flashblock is being built - in_progress_rx: Option, + /// If set, provides receivers for pending blocks, flashblock sequences, and build status. + flashblocks: Option>, } impl fmt::Debug for OpEthApiInner { @@ -405,6 +405,12 @@ pub struct OpEthApiBuilder { /// /// [flashblocks]: reth_optimism_flashblocks flashblocks_url: Option, + /// Enable flashblock consensus client to drive the chain forward. + /// + /// When enabled, flashblock sequences are submitted to the engine API via + /// `newPayload` and `forkchoiceUpdated` calls, advancing the canonical chain state. + /// Requires `flashblocks_url` to be set. + flashblock_consensus: bool, /// Marker for network types. _nt: PhantomData, } @@ -416,6 +422,7 @@ impl Default for OpEthApiBuilder { sequencer_headers: Vec::new(), min_suggested_priority_fee: 1_000_000, flashblocks_url: None, + flashblock_consensus: false, _nt: PhantomData, } } @@ -429,6 +436,7 @@ impl OpEthApiBuilder { sequencer_headers: Vec::new(), min_suggested_priority_fee: 1_000_000, flashblocks_url: None, + flashblock_consensus: false, _nt: PhantomData, } } @@ -456,6 +464,12 @@ impl OpEthApiBuilder { self.flashblocks_url = flashblocks_url; self } + + /// With flashblock consensus client enabled to drive chain forward + pub const fn with_flashblock_consensus(mut self, flashblock_consensus: bool) -> Self { + self.flashblock_consensus = flashblock_consensus; + self + } } impl EthApiBuilder for OpEthApiBuilder @@ -463,10 +477,18 @@ where N: FullNodeComponents< Evm: ConfigureEvm< NextBlockEnvCtx: BuildPendingEnv> - + From + + From + Unpin, >, - Types: NodeTypes, + Types: NodeTypes< + ChainSpec: Hardforks + EthereumHardforks, + Payload: reth_node_api::PayloadTypes< + ExecutionData: for<'a> TryFrom< + &'a FlashBlockCompleteSequence, + Error: std::fmt::Display, + >, + >, + >, >, NetworkT: RpcTypes, OpRpcConvert: RpcConvert, @@ -481,6 +503,7 @@ where sequencer_headers, min_suggested_priority_fee, flashblocks_url, + flashblock_consensus, .. } = self; let rpc_converter = @@ -497,28 +520,43 @@ where None }; - let (pending_block_rx, flashblock_rx, in_progress_rx) = - if let Some(ws_url) = flashblocks_url { - info!(target: "reth:cli", %ws_url, "Launching flashblocks service"); + let flashblocks = if let Some(ws_url) = flashblocks_url { + info!(target: "reth:cli", %ws_url, "Launching flashblocks service"); - let (tx, pending_rx) = watch::channel(None); - let stream = WsFlashBlockStream::new(ws_url); - let service = FlashBlockService::new( - stream, - ctx.components.evm_config().clone(), - ctx.components.provider().clone(), - ctx.components.task_executor().clone(), - ); + let (tx, pending_rx) = watch::channel(None); + let stream = WsFlashBlockStream::new(ws_url); + let service = FlashBlockService::new( + stream, + ctx.components.evm_config().clone(), + ctx.components.provider().clone(), + ctx.components.task_executor().clone(), + // enable state root calculation if flashblock_consensus is enabled. + flashblock_consensus, + ); - let flashblock_rx = service.subscribe_block_sequence(); - let in_progress_rx = service.subscribe_in_progress(); + let flashblocks_sequence = service.block_sequence_broadcaster().clone(); + let received_flashblocks = service.flashblocks_broadcaster().clone(); + let in_progress_rx = service.subscribe_in_progress(); + ctx.components.task_executor().spawn(Box::pin(service.run(tx))); - ctx.components.task_executor().spawn(Box::pin(service.run(tx))); + if flashblock_consensus { + info!(target: "reth::cli", "Launching FlashBlockConsensusClient"); + let flashblock_client = FlashBlockConsensusClient::new( + ctx.engine_handle.clone(), + flashblocks_sequence.subscribe(), + )?; + ctx.components.task_executor().spawn(Box::pin(flashblock_client.run())); + } - (Some(pending_rx), Some(flashblock_rx), Some(in_progress_rx)) - } else { - (None, None, None) - }; + Some(FlashblocksListeners::new( + pending_rx, + flashblocks_sequence, + in_progress_rx, + received_flashblocks, + )) + } else { + None + }; let eth_api = ctx.eth_api_builder().with_rpc_converter(rpc_converter).build_inner(); @@ -526,9 +564,7 @@ where eth_api, sequencer_client, U256::from(min_suggested_priority_fee), - pending_block_rx, - flashblock_rx, - in_progress_rx, + flashblocks, )) } } diff --git a/crates/optimism/rpc/src/eth/pending_block.rs b/crates/optimism/rpc/src/eth/pending_block.rs index 151668f403..bf351d7de1 100644 --- a/crates/optimism/rpc/src/eth/pending_block.rs +++ b/crates/optimism/rpc/src/eth/pending_block.rs @@ -6,22 +6,19 @@ use alloy_eips::BlockNumberOrTag; use reth_chain_state::BlockState; use reth_rpc_eth_api::{ helpers::{pending_block::PendingEnvBuilder, LoadPendingBlock, SpawnBlocking}, - FromEvmError, RpcConvert, RpcNodeCore, + FromEvmError, RpcConvert, RpcNodeCore, RpcNodeCoreExt, }; use reth_rpc_eth_types::{ block::BlockAndReceipts, builder::config::PendingBlockKind, error::FromEthApiError, EthApiError, PendingBlock, }; -use reth_storage_api::{ - BlockReader, BlockReaderIdExt, ReceiptProvider, StateProviderBox, StateProviderFactory, -}; -use std::sync::Arc; +use reth_storage_api::{BlockReaderIdExt, StateProviderBox, StateProviderFactory}; impl LoadPendingBlock for OpEthApi where N: RpcNodeCore, OpEthApiError: FromEvmError, - Rpc: RpcConvert, + Rpc: RpcConvert, { #[inline] fn pending_block(&self) -> &tokio::sync::Mutex>> { @@ -38,33 +35,6 @@ where self.inner.eth_api.pending_block_kind() } - /// Returns the locally built pending block - async fn local_pending_block( - &self, - ) -> Result>, Self::Error> { - if let Ok(Some(pending)) = self.pending_flashblock().await { - return Ok(Some(pending.into_block_and_receipts())); - } - - // See: - let latest = self - .provider() - .latest_header()? - .ok_or(EthApiError::HeaderNotFound(BlockNumberOrTag::Latest.into()))?; - let block_id = latest.hash().into(); - let block = self - .provider() - .recovered_block(block_id, Default::default())? - .ok_or(EthApiError::HeaderNotFound(block_id.into()))?; - - let receipts = self - .provider() - .receipts_by_block(block_id)? - .ok_or(EthApiError::ReceiptsNotFound(block_id.into()))?; - - Ok(Some(BlockAndReceipts { block: Arc::new(block), receipts: Arc::new(receipts) })) - } - /// Returns a [`StateProviderBox`] on a mem-pool built pending block overlaying latest. async fn local_pending_state(&self) -> Result, Self::Error> where @@ -83,4 +53,27 @@ where Ok(Some(Box::new(state.state_provider(latest_historical)) as StateProviderBox)) } + + /// Returns the locally built pending block + async fn local_pending_block( + &self, + ) -> Result>, Self::Error> { + if let Ok(Some(pending)) = self.pending_flashblock().await { + return Ok(Some(pending.into_block_and_receipts())); + } + + // See: + let latest = self + .provider() + .latest_header()? + .ok_or(EthApiError::HeaderNotFound(BlockNumberOrTag::Latest.into()))?; + + let latest = self + .cache() + .get_block_and_receipts(latest.hash()) + .await + .map_err(Self::Error::from_eth_err)? + .map(|(block, receipts)| BlockAndReceipts { block, receipts }); + Ok(latest) + } } diff --git a/crates/optimism/rpc/src/eth/receipt.rs b/crates/optimism/rpc/src/eth/receipt.rs index 5d1e8e2979..e86aa61567 100644 --- a/crates/optimism/rpc/src/eth/receipt.rs +++ b/crates/optimism/rpc/src/eth/receipt.rs @@ -1,16 +1,16 @@ //! Loads and formats OP receipt RPC response. use crate::{eth::RpcNodeCore, OpEthApi, OpEthApiError}; -use alloy_consensus::{BlockHeader, Receipt, TxReceipt}; +use alloy_consensus::{BlockHeader, Receipt, ReceiptWithBloom, TxReceipt}; use alloy_eips::eip2718::Encodable2718; use alloy_rpc_types_eth::{Log, TransactionReceipt}; -use op_alloy_consensus::{OpReceiptEnvelope, OpTransaction}; +use op_alloy_consensus::{OpReceipt, OpTransaction}; use op_alloy_rpc_types::{L1BlockInfo, OpTransactionReceipt, OpTransactionReceiptFields}; -use reth_chainspec::ChainSpecProvider; +use op_revm::estimate_tx_compressed_size; +use reth_chainspec::{ChainSpecProvider, EthChainSpec}; use reth_node_api::NodePrimitives; use reth_optimism_evm::RethL1BlockInfo; use reth_optimism_forks::OpHardforks; -use reth_optimism_primitives::OpReceipt; use reth_primitives_traits::SealedBlock; use reth_rpc_eth_api::{ helpers::LoadReceipt, @@ -74,9 +74,11 @@ where let mut l1_block_info = match reth_optimism_evm::extract_l1_info(block.body()) { Ok(l1_block_info) => l1_block_info, Err(err) => { + let genesis_number = + self.provider.chain_spec().genesis().number.unwrap_or_default(); // If it is the genesis block (i.e. block number is 0), there is no L1 info, so // we return an empty l1_block_info. - if block.header().number() == 0 { + if block.header().number() == genesis_number { return Ok(vec![]); } return Err(err.into()); @@ -269,7 +271,7 @@ impl OpReceiptFieldsBuilder { #[derive(Debug)] pub struct OpReceiptBuilder { /// Core receipt, has all the fields of an L1 receipt and is the basis for the OP receipt. - pub core_receipt: TransactionReceipt>, + pub core_receipt: TransactionReceipt>>, /// Additional OP receipt fields. pub op_receipt_fields: OpTransactionReceiptFields, } @@ -287,29 +289,35 @@ impl OpReceiptBuilder { let timestamp = input.meta.timestamp; let block_number = input.meta.block_number; let tx_signed = *input.tx.inner(); - let core_receipt = build_receipt(input, None, |receipt, next_log_index, meta| { + let mut core_receipt = build_receipt(input, None, |receipt, next_log_index, meta| { let map_logs = move |receipt: alloy_consensus::Receipt| { let Receipt { status, cumulative_gas_used, logs } = receipt; let logs = Log::collect_for_receipt(next_log_index, meta, logs); Receipt { status, cumulative_gas_used, logs } }; - match receipt { - OpReceipt::Legacy(receipt) => { - OpReceiptEnvelope::Legacy(map_logs(receipt).into_with_bloom()) - } - OpReceipt::Eip2930(receipt) => { - OpReceiptEnvelope::Eip2930(map_logs(receipt).into_with_bloom()) - } - OpReceipt::Eip1559(receipt) => { - OpReceiptEnvelope::Eip1559(map_logs(receipt).into_with_bloom()) - } - OpReceipt::Eip7702(receipt) => { - OpReceiptEnvelope::Eip7702(map_logs(receipt).into_with_bloom()) - } - OpReceipt::Deposit(receipt) => { - OpReceiptEnvelope::Deposit(receipt.map_inner(map_logs).into_with_bloom()) - } - } + let mapped_receipt: OpReceipt = match receipt { + OpReceipt::Legacy(receipt) => OpReceipt::Legacy(map_logs(receipt)), + OpReceipt::Eip2930(receipt) => OpReceipt::Eip2930(map_logs(receipt)), + OpReceipt::Eip1559(receipt) => OpReceipt::Eip1559(map_logs(receipt)), + OpReceipt::Eip7702(receipt) => OpReceipt::Eip7702(map_logs(receipt)), + OpReceipt::Deposit(receipt) => OpReceipt::Deposit(receipt.map_inner(map_logs)), + }; + mapped_receipt.into_with_bloom() + }); + + // In jovian, we're using the blob gas used field to store the current da + // footprint's value. + // We're computing the jovian blob gas used before building the receipt since the inputs get + // consumed by the `build_receipt` function. + chain_spec.is_jovian_active_at_timestamp(timestamp).then(|| { + // Estimate the size of the transaction in bytes and multiply by the DA + // footprint gas scalar. + // Jovian specs: `https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/jovian/exec-engine.md#da-footprint-block-limit` + let da_size = estimate_tx_compressed_size(tx_signed.encoded_2718().as_slice()) + .saturating_div(1_000_000) + .saturating_mul(l1_block_info.da_footprint_gas_scalar.unwrap_or_default().into()); + + core_receipt.blob_gas_used = Some(da_size); }); let op_receipt_fields = OpReceiptFieldsBuilder::new(timestamp, block_number) @@ -333,11 +341,16 @@ impl OpReceiptBuilder { #[cfg(test)] mod test { use super::*; - use alloy_consensus::{Block, BlockBody}; - use alloy_primitives::{hex, U256}; + use alloy_consensus::{transaction::TransactionMeta, Block, BlockBody, Eip658Value, TxEip7702}; + use alloy_op_hardforks::{ + OpChainHardforks, OP_MAINNET_ISTHMUS_TIMESTAMP, OP_MAINNET_JOVIAN_TIMESTAMP, + }; + use alloy_primitives::{hex, Address, Bytes, Signature, U256}; + use op_alloy_consensus::OpTypedTransaction; use op_alloy_network::eip2718::Decodable2718; use reth_optimism_chainspec::{BASE_MAINNET, OP_MAINNET}; - use reth_optimism_primitives::OpTransactionSigned; + use reth_optimism_primitives::{OpPrimitives, OpTransactionSigned}; + use reth_primitives_traits::Recovered; /// OP Mainnet transaction at index 0 in block 124665056. /// @@ -567,4 +580,144 @@ mod test { assert_eq!(operator_fee_constant, None, "incorrect operator fee constant"); assert_eq!(da_footprint_gas_scalar, None, "incorrect da footprint gas scalar"); } + + #[test] + fn da_footprint_gas_scalar_included_in_receipt_post_jovian() { + const DA_FOOTPRINT_GAS_SCALAR: u16 = 10; + + let tx = TxEip7702 { + chain_id: 1u64, + nonce: 0, + max_fee_per_gas: 0x28f000fff, + max_priority_fee_per_gas: 0x28f000fff, + gas_limit: 10, + to: Address::default(), + value: U256::from(3_u64), + input: Bytes::from(vec![1, 2]), + access_list: Default::default(), + authorization_list: Default::default(), + }; + + let signature = Signature::new(U256::default(), U256::default(), true); + + let tx = OpTransactionSigned::new_unhashed(OpTypedTransaction::Eip7702(tx), signature); + + let mut l1_block_info = op_revm::L1BlockInfo { + da_footprint_gas_scalar: Some(DA_FOOTPRINT_GAS_SCALAR), + ..Default::default() + }; + + let op_hardforks = OpChainHardforks::op_mainnet(); + + let receipt = OpReceiptFieldsBuilder::new(OP_MAINNET_JOVIAN_TIMESTAMP, u64::MAX) + .l1_block_info(&op_hardforks, &tx, &mut l1_block_info) + .expect("should parse revm l1 info") + .build(); + + assert_eq!(receipt.l1_block_info.da_footprint_gas_scalar, Some(DA_FOOTPRINT_GAS_SCALAR)); + } + + #[test] + fn blob_gas_used_included_in_receipt_post_jovian() { + const DA_FOOTPRINT_GAS_SCALAR: u16 = 100; + let tx = TxEip7702 { + chain_id: 1u64, + nonce: 0, + max_fee_per_gas: 0x28f000fff, + max_priority_fee_per_gas: 0x28f000fff, + gas_limit: 10, + to: Address::default(), + value: U256::from(3_u64), + access_list: Default::default(), + authorization_list: Default::default(), + input: Bytes::from(vec![0; 1_000_000]), + }; + + let signature = Signature::new(U256::default(), U256::default(), true); + + let tx = OpTransactionSigned::new_unhashed(OpTypedTransaction::Eip7702(tx), signature); + + let mut l1_block_info = op_revm::L1BlockInfo { + da_footprint_gas_scalar: Some(DA_FOOTPRINT_GAS_SCALAR), + ..Default::default() + }; + + let op_hardforks = OpChainHardforks::op_mainnet(); + + let op_receipt = OpReceiptBuilder::new( + &op_hardforks, + ConvertReceiptInput:: { + tx: Recovered::new_unchecked(&tx, Address::default()), + receipt: OpReceipt::Eip7702(Receipt { + status: Eip658Value::Eip658(true), + cumulative_gas_used: 100, + logs: vec![], + }), + gas_used: 100, + next_log_index: 0, + meta: TransactionMeta { + timestamp: OP_MAINNET_JOVIAN_TIMESTAMP, + ..Default::default() + }, + }, + &mut l1_block_info, + ) + .unwrap(); + + let expected_blob_gas_used = estimate_tx_compressed_size(tx.encoded_2718().as_slice()) + .saturating_div(1_000_000) + .saturating_mul(DA_FOOTPRINT_GAS_SCALAR.into()); + + assert_eq!(op_receipt.core_receipt.blob_gas_used, Some(expected_blob_gas_used)); + } + + #[test] + fn blob_gas_used_not_included_in_receipt_post_isthmus() { + const DA_FOOTPRINT_GAS_SCALAR: u16 = 100; + let tx = TxEip7702 { + chain_id: 1u64, + nonce: 0, + max_fee_per_gas: 0x28f000fff, + max_priority_fee_per_gas: 0x28f000fff, + gas_limit: 10, + to: Address::default(), + value: U256::from(3_u64), + access_list: Default::default(), + authorization_list: Default::default(), + input: Bytes::from(vec![0; 1_000_000]), + }; + + let signature = Signature::new(U256::default(), U256::default(), true); + + let tx = OpTransactionSigned::new_unhashed(OpTypedTransaction::Eip7702(tx), signature); + + let mut l1_block_info = op_revm::L1BlockInfo { + da_footprint_gas_scalar: Some(DA_FOOTPRINT_GAS_SCALAR), + ..Default::default() + }; + + let op_hardforks = OpChainHardforks::op_mainnet(); + + let op_receipt = OpReceiptBuilder::new( + &op_hardforks, + ConvertReceiptInput:: { + tx: Recovered::new_unchecked(&tx, Address::default()), + receipt: OpReceipt::Eip7702(Receipt { + status: Eip658Value::Eip658(true), + cumulative_gas_used: 100, + logs: vec![], + }), + gas_used: 100, + next_log_index: 0, + meta: TransactionMeta { + timestamp: OP_MAINNET_ISTHMUS_TIMESTAMP, + ..Default::default() + }, + }, + &mut l1_block_info, + ) + .unwrap(); + + assert_eq!(op_receipt.core_receipt.blob_gas_used, None); + } } diff --git a/crates/optimism/rpc/src/eth/transaction.rs b/crates/optimism/rpc/src/eth/transaction.rs index 37c05815a6..5dee6e14c5 100644 --- a/crates/optimism/rpc/src/eth/transaction.rs +++ b/crates/optimism/rpc/src/eth/transaction.rs @@ -1,27 +1,24 @@ //! Loads and formats OP transaction RPC response. use crate::{OpEthApi, OpEthApiError, SequencerClient}; -use alloy_consensus::TxReceipt as _; use alloy_primitives::{Bytes, B256}; use alloy_rpc_types_eth::TransactionInfo; use futures::StreamExt; use op_alloy_consensus::{transaction::OpTransactionInfo, OpTransaction}; use reth_chain_state::CanonStateSubscriptions; use reth_optimism_primitives::DepositReceipt; -use reth_primitives_traits::{BlockBody, SignedTransaction, SignerRecoverable}; -use reth_rpc_convert::transaction::ConvertReceiptInput; +use reth_primitives_traits::{ + BlockBody, Recovered, SignedTransaction, SignerRecoverable, WithEncoded, +}; use reth_rpc_eth_api::{ - helpers::{ - receipt::calculate_gas_used_and_next_log_index, spec::SignersForRpc, EthTransactions, - LoadReceipt, LoadTransaction, - }, + helpers::{spec::SignersForRpc, EthTransactions, LoadReceipt, LoadTransaction, SpawnBlocking}, try_into_op_tx_info, EthApiTypes as _, FromEthApiError, FromEvmError, RpcConvert, RpcNodeCore, RpcReceipt, TxInfoMapper, }; -use reth_rpc_eth_types::{utils::recover_raw_transaction, EthApiError}; -use reth_storage_api::{errors::ProviderError, ReceiptProvider}; +use reth_rpc_eth_types::{EthApiError, TransactionSource}; +use reth_storage_api::{errors::ProviderError, ProviderTx, ReceiptProvider, TransactionsProvider}; use reth_transaction_pool::{ - AddedTransactionOutcome, PoolTransaction, TransactionOrigin, TransactionPool, + AddedTransactionOutcome, PoolPooledTx, PoolTransaction, TransactionOrigin, TransactionPool, }; use std::{ fmt::{Debug, Formatter}, @@ -44,11 +41,11 @@ where self.inner.eth_api.send_raw_transaction_sync_timeout() } - /// Decodes and recovers the transaction and submits it to the pool. - /// - /// Returns the hash of the transaction. - async fn send_raw_transaction(&self, tx: Bytes) -> Result { - let recovered = recover_raw_transaction(&tx)?; + async fn send_transaction( + &self, + tx: WithEncoded>>, + ) -> Result { + let (tx, recovered) = tx.split(); // broadcast raw transaction to subscribers if there is any. self.eth_api().broadcast_raw_transaction(tx.clone()); @@ -88,21 +85,35 @@ where fn send_raw_transaction_sync( &self, tx: Bytes, - ) -> impl Future, Self::Error>> + Send - where - Self: LoadReceipt + 'static, - { + ) -> impl Future, Self::Error>> + Send { let this = self.clone(); let timeout_duration = self.send_raw_transaction_sync_timeout(); async move { let mut canonical_stream = this.provider().canonical_state_stream(); let hash = EthTransactions::send_raw_transaction(&this, tx).await?; - let flashblock_rx = this.pending_block_rx(); - let mut flashblock_stream = flashblock_rx.map(WatchStream::new); + let mut flashblock_stream = this.pending_block_rx().map(WatchStream::new); tokio::time::timeout(timeout_duration, async { loop { tokio::select! { + biased; + // check if the tx was preconfirmed in a new flashblock + flashblock = async { + if let Some(stream) = &mut flashblock_stream { + stream.next().await + } else { + futures::future::pending().await + } + } => { + if let Some(flashblock) = flashblock.flatten() { + // if flashblocks are supported, attempt to find id from the pending block + if let Some(receipt) = flashblock + .find_and_convert_transaction_receipt(hash, this.converter()) + { + return receipt; + } + } + } // Listen for regular canonical block updates for inclusion canonical_notification = canonical_stream.next() => { if let Some(notification) = canonical_notification { @@ -118,23 +129,6 @@ where break; } } - // check if the tx was preconfirmed in a new flashblock - _flashblock_update = async { - if let Some(ref mut stream) = flashblock_stream { - stream.next().await - } else { - futures::future::pending().await - } - } => { - // Check flashblocks for faster confirmation (Optimism-specific) - if let Ok(Some(pending_block)) = this.pending_flashblock().await { - let block_and_receipts = pending_block.into_block_and_receipts(); - if block_and_receipts.block.body().contains_transaction(&hash) - && let Some(receipt) = this.transaction_receipt(hash).await? { - return Ok(receipt); - } - } - } } } Err(Self::Error::from_eth_err(EthApiError::TransactionConfirmationTimeout { @@ -168,42 +162,11 @@ where if tx_receipt.is_none() { // if flashblocks are supported, attempt to find id from the pending block - if let Ok(Some(pending_block)) = this.pending_flashblock().await { - let block_and_receipts = pending_block.into_block_and_receipts(); - if let Some((tx, receipt)) = - block_and_receipts.find_transaction_and_receipt_by_hash(hash) - { - // Build tx receipt from pending block and receipts directly inline. - // This avoids canonical cache lookup that would be done by the - // `build_transaction_receipt` which would result in a block not found - // issue. See: https://github.com/paradigmxyz/reth/issues/18529 - let meta = tx.meta(); - let all_receipts = &block_and_receipts.receipts; - - let (gas_used, next_log_index) = - calculate_gas_used_and_next_log_index(meta.index, all_receipts); - - return Ok(Some( - this.tx_resp_builder() - .convert_receipts_with_block( - vec![ConvertReceiptInput { - tx: tx - .tx() - .clone() - .try_into_recovered_unchecked() - .map_err(Self::Error::from_eth_err)? - .as_recovered_ref(), - gas_used: receipt.cumulative_gas_used() - gas_used, - receipt: receipt.clone(), - next_log_index, - meta, - }], - block_and_receipts.sealed_block(), - )? - .pop() - .unwrap(), - )) - } + if let Ok(Some(pending_block)) = this.pending_flashblock().await && + let Some(Ok(receipt)) = pending_block + .find_and_convert_transaction_receipt(hash, this.converter()) + { + return Ok(Some(receipt)); } } let Some((tx, meta, receipt)) = tx_receipt else { return Ok(None) }; @@ -218,6 +181,53 @@ where OpEthApiError: FromEvmError, Rpc: RpcConvert, { + async fn transaction_by_hash( + &self, + hash: B256, + ) -> Result>>, Self::Error> { + // 1. Try to find the transaction on disk (historical blocks) + if let Some((tx, meta)) = self + .spawn_blocking_io(move |this| { + this.provider() + .transaction_by_hash_with_meta(hash) + .map_err(Self::Error::from_eth_err) + }) + .await? + { + let transaction = tx + .try_into_recovered_unchecked() + .map_err(|_| EthApiError::InvalidTransactionSignature)?; + + return Ok(Some(TransactionSource::Block { + transaction, + index: meta.index, + block_hash: meta.block_hash, + block_number: meta.block_number, + base_fee: meta.base_fee, + })); + } + + // 2. check flashblocks (sequencer preconfirmations) + if let Ok(Some(pending_block)) = self.pending_flashblock().await && + let Some(indexed_tx) = pending_block.block().find_indexed(hash) + { + let meta = indexed_tx.meta(); + return Ok(Some(TransactionSource::Block { + transaction: indexed_tx.recovered_tx().cloned(), + index: meta.index, + block_hash: meta.block_hash, + block_number: meta.block_number, + base_fee: meta.base_fee, + })); + } + + // 3. check local pool + if let Some(tx) = self.pool().get(&hash).map(|tx| tx.transaction.clone_into_consensus()) { + return Ok(Some(TransactionSource::Pool(tx))); + } + + Ok(None) + } } impl OpEthApi diff --git a/crates/optimism/rpc/src/historical.rs b/crates/optimism/rpc/src/historical.rs index 736d962b6d..6037da4fe7 100644 --- a/crates/optimism/rpc/src/historical.rs +++ b/crates/optimism/rpc/src/historical.rs @@ -5,8 +5,9 @@ use alloy_eips::BlockId; use alloy_json_rpc::{RpcRecv, RpcSend}; use alloy_primitives::{BlockNumber, B256}; use alloy_rpc_client::RpcClient; +use jsonrpsee::BatchResponseBuilder; use jsonrpsee_core::{ - middleware::{Batch, Notification, RpcServiceT}, + middleware::{Batch, BatchEntry, Notification, RpcServiceT}, server::MethodResponse, }; use jsonrpsee_types::{Params, Request}; @@ -122,8 +123,14 @@ impl HistoricalRpcService { impl RpcServiceT for HistoricalRpcService where - S: RpcServiceT + Send + Sync + Clone + 'static, - + S: RpcServiceT< + MethodResponse = MethodResponse, + BatchResponse = MethodResponse, + NotificationResponse = MethodResponse, + > + Send + + Sync + + Clone + + 'static, P: BlockReaderIdExt + TransactionsProvider + Send + Sync + Clone + 'static, { type MethodResponse = S::MethodResponse; @@ -145,8 +152,64 @@ where }) } - fn batch<'a>(&self, req: Batch<'a>) -> impl Future + Send + 'a { - self.inner.batch(req) + fn batch<'a>( + &self, + mut req: Batch<'a>, + ) -> impl Future + Send + 'a { + let this = self.clone(); + let historical = self.historical.clone(); + + async move { + let mut needs_forwarding = false; + for entry in req.iter_mut() { + if let Ok(BatchEntry::Call(call)) = entry && + historical.should_forward_request(call) + { + needs_forwarding = true; + break; + } + } + + if !needs_forwarding { + // no call needs to be forwarded and we can simply perform this batch request + return this.inner.batch(req).await; + } + + // the entire response is checked above so we can assume that these don't exceed + let mut batch_rp = BatchResponseBuilder::new_with_limit(usize::MAX); + let mut got_notification = false; + + for batch_entry in req { + match batch_entry { + Ok(BatchEntry::Call(req)) => { + let rp = this.call(req).await; + if let Err(err) = batch_rp.append(rp) { + return err; + } + } + Ok(BatchEntry::Notification(n)) => { + got_notification = true; + this.notification(n).await; + } + Err(err) => { + let (err, id) = err.into_parts(); + let rp = MethodResponse::error(id, err); + if let Err(err) = batch_rp.append(rp) { + return err; + } + } + } + } + + // If the batch is empty and we got a notification, we return an empty response. + if batch_rp.is_empty() && got_notification { + MethodResponse::notification() + } + // An empty batch is regarded as an invalid request here. + else { + MethodResponse::from_batch(batch_rp.finish()) + } + } } fn notification<'a>( @@ -171,21 +234,23 @@ impl

HistoricalRpcInner

where P: BlockReaderIdExt + TransactionsProvider + Send + Sync + Clone, { - /// Checks if a request should be forwarded to the historical endpoint and returns - /// the response if it was forwarded. - async fn maybe_forward_request(&self, req: &Request<'_>) -> Option { - let should_forward = match req.method_name() { + /// Checks if a request should be forwarded to the historical endpoint (synchronous check). + fn should_forward_request(&self, req: &Request<'_>) -> bool { + match req.method_name() { "debug_traceTransaction" | "eth_getTransactionByHash" | "eth_getTransactionReceipt" | "eth_getRawTransactionByHash" => self.should_forward_transaction(req), method => self.should_forward_block_request(method, req), - }; + } + } - if should_forward { + /// Checks if a request should be forwarded to the historical endpoint and returns + /// the response if it was forwarded. + async fn maybe_forward_request(&self, req: &Request<'_>) -> Option { + if self.should_forward_request(req) { return self.forward_to_historical(req).await } - None } diff --git a/crates/optimism/rpc/src/miner.rs b/crates/optimism/rpc/src/miner.rs index a4de556ea1..f8780f37e8 100644 --- a/crates/optimism/rpc/src/miner.rs +++ b/crates/optimism/rpc/src/miner.rs @@ -4,7 +4,7 @@ use alloy_primitives::U64; use jsonrpsee_core::{async_trait, RpcResult}; pub use op_alloy_rpc_jsonrpsee::traits::MinerApiExtServer; use reth_metrics::{metrics::Gauge, Metrics}; -use reth_optimism_payload_builder::config::OpDAConfig; +use reth_optimism_payload_builder::config::{OpDAConfig, OpGasLimitConfig}; use tracing::debug; /// Miner API extension for OP, exposes settings for the data availability configuration via the @@ -12,14 +12,15 @@ use tracing::debug; #[derive(Debug, Clone)] pub struct OpMinerExtApi { da_config: OpDAConfig, + gas_limit_config: OpGasLimitConfig, metrics: OpMinerMetrics, } impl OpMinerExtApi { /// Instantiate the miner API extension with the given, sharable data availability /// configuration. - pub fn new(da_config: OpDAConfig) -> Self { - Self { da_config, metrics: OpMinerMetrics::default() } + pub fn new(da_config: OpDAConfig, gas_limit_config: OpGasLimitConfig) -> Self { + Self { da_config, gas_limit_config, metrics: OpMinerMetrics::default() } } } @@ -35,6 +36,13 @@ impl MinerApiExtServer for OpMinerExtApi { Ok(true) } + + async fn set_gas_limit(&self, gas_limit: U64) -> RpcResult { + debug!(target: "rpc", "Setting gas limit: {}", gas_limit); + self.gas_limit_config.set_gas_limit(gas_limit.to()); + self.metrics.set_gas_limit(gas_limit.to()); + Ok(true) + } } /// Optimism miner metrics @@ -45,6 +53,8 @@ pub struct OpMinerMetrics { max_da_tx_size: Gauge, /// Max DA block size set on the miner max_da_block_size: Gauge, + /// Gas limit set on the miner + gas_limit: Gauge, } impl OpMinerMetrics { @@ -59,4 +69,10 @@ impl OpMinerMetrics { pub fn set_max_da_block_size(&self, size: u64) { self.max_da_block_size.set(size as f64); } + + /// Sets the gas limit gauge value + #[inline] + pub fn set_gas_limit(&self, gas_limit: u64) { + self.gas_limit.set(gas_limit as f64); + } } diff --git a/crates/optimism/txpool/src/supervisor/client.rs b/crates/optimism/txpool/src/supervisor/client.rs index b362fae2e1..a49704ac50 100644 --- a/crates/optimism/txpool/src/supervisor/client.rs +++ b/crates/optimism/txpool/src/supervisor/client.rs @@ -1,7 +1,6 @@ //! This is our custom implementation of validator struct use crate::{ - interop::MaybeInteropTransaction, supervisor::{ metrics::SupervisorMetrics, parse_access_list_items_to_inbox_entries, ExecutingDescriptor, InteropTxValidatorError, @@ -139,8 +138,7 @@ impl SupervisorClient { where InputIter: IntoIterator + Send + 'a, InputIter::IntoIter: Send + 'a, - TItem: - MaybeInteropTransaction + PoolTransaction + Transaction + Clone + Send + Sync + 'static, + TItem: PoolTransaction + Transaction + Send, { stream::iter(txs_to_revalidate.into_iter().map(move |tx_item| { let client_for_async_task = self.clone(); diff --git a/crates/optimism/txpool/src/validator.rs b/crates/optimism/txpool/src/validator.rs index fd4710b8a4..8a715fc47c 100644 --- a/crates/optimism/txpool/src/validator.rs +++ b/crates/optimism/txpool/src/validator.rs @@ -88,7 +88,8 @@ impl OpTransactionValidator { impl OpTransactionValidator where - Client: ChainSpecProvider + StateProviderFactory + BlockReaderIdExt, + Client: + ChainSpecProvider + StateProviderFactory + BlockReaderIdExt + Sync, Tx: EthPoolTransaction + OpPooledTx, { /// Create a new [`OpTransactionValidator`]. @@ -177,7 +178,7 @@ where &self, origin: TransactionOrigin, transaction: Tx, - state: &mut Option>, + state: &mut Option>, ) -> TransactionValidationOutcome { if transaction.is_eip4844() { return TransactionValidationOutcome::Invalid( @@ -289,7 +290,8 @@ where impl TransactionValidator for OpTransactionValidator where - Client: ChainSpecProvider + StateProviderFactory + BlockReaderIdExt, + Client: + ChainSpecProvider + StateProviderFactory + BlockReaderIdExt + Sync, Tx: EthPoolTransaction + OpPooledTx, { type Transaction = Tx; diff --git a/crates/payload/basic/src/better_payload_emitter.rs b/crates/payload/basic/src/better_payload_emitter.rs index a6fcaa08ec..216995ed9b 100644 --- a/crates/payload/basic/src/better_payload_emitter.rs +++ b/crates/payload/basic/src/better_payload_emitter.rs @@ -3,8 +3,8 @@ use reth_payload_builder::PayloadBuilderError; use std::sync::Arc; use tokio::sync::broadcast; -/// Emits events when a better payload is built. Delegates the actual payload building -/// to an inner [`PayloadBuilder`]. +/// Emits events when a payload is built (both `Better` and `Freeze` outcomes). +/// Delegates the actual payload building to an inner [`PayloadBuilder`]. #[derive(Debug, Clone)] pub struct BetterPayloadEmitter { better_payloads_tx: broadcast::Sender>, @@ -16,7 +16,8 @@ where PB: PayloadBuilder, { /// Create a new [`BetterPayloadEmitter`] with the given inner payload builder. - /// Owns the sender half of a broadcast channel that emits the better payloads. + /// Owns the sender half of a broadcast channel that emits payloads when they are built + /// (for both `Better` and `Freeze` outcomes). pub const fn new( better_payloads_tx: broadcast::Sender>, inner: PB, @@ -38,9 +39,14 @@ where args: BuildArguments, ) -> Result, PayloadBuilderError> { match self.inner.try_build(args) { - Ok(BuildOutcome::Better { payload, cached_reads }) => { - let _ = self.better_payloads_tx.send(Arc::new(payload.clone())); - Ok(BuildOutcome::Better { payload, cached_reads }) + Ok(res) => { + // Emit payload for both Better and Freeze outcomes, as both represent valid + // payloads that should be available to subscribers (e.g., for + // insertion into engine service). + if let Some(payload) = res.payload().cloned() { + let _ = self.better_payloads_tx.send(Arc::new(payload)); + } + Ok(res) } res => res, } diff --git a/crates/payload/basic/src/lib.rs b/crates/payload/basic/src/lib.rs index aa2b1f6680..84a4f64a4e 100644 --- a/crates/payload/basic/src/lib.rs +++ b/crates/payload/basic/src/lib.rs @@ -706,7 +706,7 @@ pub enum BuildOutcome { } impl BuildOutcome { - /// Consumes the type and returns the payload if the outcome is `Better`. + /// Consumes the type and returns the payload if the outcome is `Better` or `Freeze`. pub fn into_payload(self) -> Option { match self { Self::Better { payload, .. } | Self::Freeze(payload) => Some(payload), @@ -714,11 +714,24 @@ impl BuildOutcome { } } + /// Consumes the type and returns the payload if the outcome is `Better` or `Freeze`. + pub const fn payload(&self) -> Option<&Payload> { + match self { + Self::Better { payload, .. } | Self::Freeze(payload) => Some(payload), + _ => None, + } + } + /// Returns true if the outcome is `Better`. pub const fn is_better(&self) -> bool { matches!(self, Self::Better { .. }) } + /// Returns true if the outcome is `Freeze`. + pub const fn is_frozen(&self) -> bool { + matches!(self, Self::Freeze { .. }) + } + /// Returns true if the outcome is `Aborted`. pub const fn is_aborted(&self) -> bool { matches!(self, Self::Aborted { .. }) diff --git a/crates/payload/basic/src/stack.rs b/crates/payload/basic/src/stack.rs index 22792c9bbc..ba5c927b9f 100644 --- a/crates/payload/basic/src/stack.rs +++ b/crates/payload/basic/src/stack.rs @@ -194,16 +194,16 @@ where &self, args: BuildArguments, ) -> Result, PayloadBuilderError> { - match args.config.attributes { - Either::Left(ref left_attr) => { + let BuildArguments { cached_reads, config, cancel, best_payload } = args; + let PayloadConfig { parent_header, attributes } = config; + + match attributes { + Either::Left(left_attr) => { let left_args: BuildArguments = BuildArguments { - cached_reads: args.cached_reads.clone(), - config: PayloadConfig { - parent_header: args.config.parent_header.clone(), - attributes: left_attr.clone(), - }, - cancel: args.cancel.clone(), - best_payload: args.best_payload.clone().and_then(|payload| { + cached_reads, + config: PayloadConfig { parent_header, attributes: left_attr }, + cancel, + best_payload: best_payload.and_then(|payload| { if let Either::Left(p) = payload { Some(p) } else { @@ -211,18 +211,14 @@ where } }), }; - self.left.try_build(left_args).map(|out| out.map_payload(Either::Left)) } - Either::Right(ref right_attr) => { + Either::Right(right_attr) => { let right_args = BuildArguments { - cached_reads: args.cached_reads.clone(), - config: PayloadConfig { - parent_header: args.config.parent_header.clone(), - attributes: right_attr.clone(), - }, - cancel: args.cancel.clone(), - best_payload: args.best_payload.clone().and_then(|payload| { + cached_reads, + config: PayloadConfig { parent_header, attributes: right_attr }, + cancel, + best_payload: best_payload.and_then(|payload| { if let Either::Right(p) = payload { Some(p) } else { @@ -230,7 +226,6 @@ where } }), }; - self.right.try_build(right_args).map(|out| out.map_payload(Either::Right)) } } @@ -240,19 +235,13 @@ where &self, config: PayloadConfig>, ) -> Result { - match config.attributes { - Either::Left(left_attr) => { - let left_config = PayloadConfig { - parent_header: config.parent_header.clone(), - attributes: left_attr, - }; + match config { + PayloadConfig { parent_header, attributes: Either::Left(left_attr) } => { + let left_config = PayloadConfig { parent_header, attributes: left_attr }; self.left.build_empty_payload(left_config).map(Either::Left) } - Either::Right(right_attr) => { - let right_config = PayloadConfig { - parent_header: config.parent_header.clone(), - attributes: right_attr, - }; + PayloadConfig { parent_header, attributes: Either::Right(right_attr) } => { + let right_config = PayloadConfig { parent_header, attributes: right_attr }; self.right.build_empty_payload(right_config).map(Either::Right) } } diff --git a/crates/payload/builder/src/service.rs b/crates/payload/builder/src/service.rs index f3f1b03ab2..507b302651 100644 --- a/crates/payload/builder/src/service.rs +++ b/crates/payload/builder/src/service.rs @@ -441,7 +441,7 @@ where this.metrics.inc_initiated_jobs(); new_job = true; this.payload_jobs.push((job, id)); - this.payload_events.send(Events::Attributes(attr.clone())).ok(); + this.payload_events.send(Events::Attributes(attr)).ok(); } Err(err) => { this.metrics.inc_failed_jobs(); diff --git a/crates/payload/primitives/Cargo.toml b/crates/payload/primitives/Cargo.toml index 670727e3c6..32a5476a80 100644 --- a/crates/payload/primitives/Cargo.toml +++ b/crates/payload/primitives/Cargo.toml @@ -17,8 +17,11 @@ reth-primitives-traits.workspace = true reth-chainspec.workspace = true reth-errors.workspace = true reth-chain-state.workspace = true +reth-execution-types.workspace = true +reth-trie-common.workspace = true # alloy +alloy-consensus.workspace = true alloy-eips.workspace = true alloy-primitives.workspace = true alloy-rpc-types-engine = { workspace = true, features = ["serde"] } @@ -38,6 +41,8 @@ assert_matches.workspace = true default = ["std"] std = [ "reth-chainspec/std", + "reth-execution-types/std", + "reth-trie-common/std", "alloy-eips/std", "alloy-primitives/std", "alloy-rpc-types-engine/std", @@ -46,6 +51,7 @@ std = [ "thiserror/std", "reth-primitives-traits/std", "either/std", + "alloy-consensus/std", ] op = [ "dep:op-alloy-rpc-types-engine", diff --git a/crates/payload/primitives/src/lib.rs b/crates/payload/primitives/src/lib.rs index ca3cccda88..7072f288cd 100644 --- a/crates/payload/primitives/src/lib.rs +++ b/crates/payload/primitives/src/lib.rs @@ -13,7 +13,6 @@ extern crate alloc; -use crate::alloc::string::ToString; use alloy_primitives::Bytes; use reth_chainspec::EthereumHardforks; use reth_primitives_traits::{NodePrimitives, SealedBlock}; @@ -26,8 +25,8 @@ pub use error::{ mod traits; pub use traits::{ - BuildNextEnv, BuiltPayload, PayloadAttributes, PayloadAttributesBuilder, - PayloadBuilderAttributes, + BuildNextEnv, BuiltPayload, BuiltPayloadExecutedBlock, PayloadAttributes, + PayloadAttributesBuilder, PayloadBuilderAttributes, }; mod payload; @@ -73,11 +72,14 @@ pub trait PayloadTypes: Send + Sync + Unpin + core::fmt::Debug + Clone + 'static /// * If V4, this ensures that the payload timestamp is within the Prague timestamp. /// * If V5, this ensures that the payload timestamp is within the Osaka timestamp. /// +/// Additionally, it ensures that `engine_getPayloadV4` is not used for an Osaka payload. +/// /// Otherwise, this will return [`EngineObjectValidationError::UnsupportedFork`]. pub fn validate_payload_timestamp( chain_spec: impl EthereumHardforks, version: EngineApiMessageVersion, timestamp: u64, + kind: MessageValidationKind, ) -> Result<(), EngineObjectValidationError> { let is_cancun = chain_spec.is_cancun_active_at_timestamp(timestamp); if version.is_v2() && is_cancun { @@ -158,6 +160,11 @@ pub fn validate_payload_timestamp( return Err(EngineObjectValidationError::UnsupportedFork) } + // `engine_getPayloadV4` MUST reject payloads with a timestamp >= Osaka. + if version.is_v4() && kind == MessageValidationKind::GetPayload && is_osaka { + return Err(EngineObjectValidationError::UnsupportedFork) + } + Ok(()) } @@ -302,7 +309,7 @@ pub fn validate_parent_beacon_block_root_presence( // // 2. Client software **MUST** return `-38005: Unsupported fork` error if the `timestamp` of the // payload does not fall within the time frame of the Cancun fork. - validate_payload_timestamp(chain_spec, version, timestamp)?; + validate_payload_timestamp(chain_spec, version, timestamp, validation_kind)?; Ok(()) } @@ -314,9 +321,14 @@ pub fn validate_parent_beacon_block_root_presence( #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum MessageValidationKind { /// We are validating fields of a payload attributes. + /// This corresponds to `engine_forkchoiceUpdated`. PayloadAttributes, /// We are validating fields of a payload. + /// This corresponds to `engine_newPayload`. Payload, + /// We are validating a built payload. + /// This corresponds to `engine_getPayload`. + GetPayload, } impl MessageValidationKind { @@ -327,7 +339,9 @@ impl MessageValidationKind { error: VersionSpecificValidationError, ) -> EngineObjectValidationError { match self { - Self::Payload => EngineObjectValidationError::Payload(error), + // Both NewPayload and GetPayload errors are treated as generic Payload validation + // errors + Self::Payload | Self::GetPayload => EngineObjectValidationError::Payload(error), Self::PayloadAttributes => EngineObjectValidationError::PayloadAttributes(error), } } @@ -463,21 +477,19 @@ pub fn validate_execution_requests(requests: &[Bytes]) -> Result<(), EngineObjec let mut last_request_type = None; for request in requests { if request.len() <= 1 { - return Err(EngineObjectValidationError::InvalidParams( - "EmptyExecutionRequest".to_string().into(), - )) + return Err(EngineObjectValidationError::InvalidParams("EmptyExecutionRequest".into())) } let request_type = request[0]; if Some(request_type) < last_request_type { return Err(EngineObjectValidationError::InvalidParams( - "OutOfOrderExecutionRequest".to_string().into(), + "OutOfOrderExecutionRequest".into(), )) } if Some(request_type) == last_request_type { return Err(EngineObjectValidationError::InvalidParams( - "DuplicatedExecutionRequestType".to_string().into(), + "DuplicatedExecutionRequestType".into(), )) } @@ -490,12 +502,41 @@ pub fn validate_execution_requests(requests: &[Bytes]) -> Result<(), EngineObjec mod tests { use super::*; use assert_matches::assert_matches; + use reth_chainspec::{ChainSpecBuilder, EthereumHardfork, ForkCondition}; #[test] fn version_ord() { assert!(EngineApiMessageVersion::V4 > EngineApiMessageVersion::V3); } + #[test] + fn validate_osaka_get_payload_restrictions() { + // Osaka activates at timestamp 1000 + let osaka_activation = 1000; + let chain_spec = ChainSpecBuilder::mainnet() + .with_fork(EthereumHardfork::Prague, ForkCondition::Timestamp(0)) + .with_fork(EthereumHardfork::Osaka, ForkCondition::Timestamp(osaka_activation)) + .build(); + + // Osaka is Active + V4 + GetPayload + let res = validate_payload_timestamp( + &chain_spec, + EngineApiMessageVersion::V4, + osaka_activation, + MessageValidationKind::GetPayload, + ); + assert_matches!(res, Err(EngineObjectValidationError::UnsupportedFork)); + + // Osaka is Active + V4 + Payload (NewPayload) + let res = validate_payload_timestamp( + &chain_spec, + EngineApiMessageVersion::V4, + osaka_activation, + MessageValidationKind::Payload, + ); + assert_matches!(res, Ok(())); + } + #[test] fn execution_requests_validation() { assert_matches!(validate_execution_requests(&[]), Ok(())); diff --git a/crates/payload/primitives/src/payload.rs b/crates/payload/primitives/src/payload.rs index 709a37768f..d50f6ffd05 100644 --- a/crates/payload/primitives/src/payload.rs +++ b/crates/payload/primitives/src/payload.rs @@ -3,7 +3,7 @@ use crate::{MessageValidationKind, PayloadAttributes}; use alloc::vec::Vec; use alloy_eips::{eip1898::BlockWithParent, eip4895::Withdrawal, eip7685::Requests, BlockNumHash}; -use alloy_primitives::B256; +use alloy_primitives::{Bytes, B256}; use alloy_rpc_types_engine::ExecutionData; use core::fmt::Debug; use serde::{de::DeserializeOwned, Serialize}; @@ -40,6 +40,11 @@ pub trait ExecutionPayload: /// Returns `None` for pre-Shanghai blocks. fn withdrawals(&self) -> Option<&Vec>; + /// Returns the access list included in this payload. + /// + /// Returns `None` for pre-Amsterdam blocks. + fn block_access_list(&self) -> Option<&Bytes>; + /// Returns the beacon block root associated with this payload. /// /// Returns `None` for pre-merge payloads. @@ -50,6 +55,9 @@ pub trait ExecutionPayload: /// Returns the total gas consumed by all transactions in this block. fn gas_used(&self) -> u64; + + /// Returns the number of transactions in the payload. + fn transaction_count(&self) -> usize; } impl ExecutionPayload for ExecutionData { @@ -69,6 +77,10 @@ impl ExecutionPayload for ExecutionData { self.payload.withdrawals() } + fn block_access_list(&self) -> Option<&Bytes> { + None + } + fn parent_beacon_block_root(&self) -> Option { self.sidecar.parent_beacon_block_root() } @@ -80,6 +92,10 @@ impl ExecutionPayload for ExecutionData { fn gas_used(&self) -> u64 { self.payload.as_v1().gas_used } + + fn transaction_count(&self) -> usize { + self.payload.as_v1().transactions.len() + } } /// A unified type for handling both execution payloads and payload attributes. @@ -172,6 +188,10 @@ impl ExecutionPayload for op_alloy_rpc_types_engine::OpExecutionData { self.payload.as_v2().map(|p| &p.withdrawals) } + fn block_access_list(&self) -> Option<&Bytes> { + None + } + fn parent_beacon_block_root(&self) -> Option { self.sidecar.parent_beacon_block_root() } @@ -183,6 +203,10 @@ impl ExecutionPayload for op_alloy_rpc_types_engine::OpExecutionData { fn gas_used(&self) -> u64 { self.payload.as_v1().gas_used } + + fn transaction_count(&self) -> usize { + self.payload.as_v1().transactions.len() + } } /// Extended functionality for Ethereum execution payloads diff --git a/crates/payload/primitives/src/traits.rs b/crates/payload/primitives/src/traits.rs index 160956afa2..726122743e 100644 --- a/crates/payload/primitives/src/traits.rs +++ b/crates/payload/primitives/src/traits.rs @@ -1,7 +1,7 @@ //! Core traits for working with execution payloads. use crate::PayloadBuilderError; -use alloc::{boxed::Box, vec::Vec}; +use alloc::{boxed::Box, sync::Arc, vec::Vec}; use alloy_eips::{ eip4895::{Withdrawal, Withdrawals}, eip7685::Requests, @@ -9,8 +9,64 @@ use alloy_eips::{ use alloy_primitives::{Address, B256, U256}; use alloy_rpc_types_engine::{PayloadAttributes as EthPayloadAttributes, PayloadId}; use core::fmt; -use reth_chain_state::ExecutedBlock; -use reth_primitives_traits::{NodePrimitives, SealedBlock, SealedHeader}; +use either::Either; +use reth_chain_state::ComputedTrieData; +use reth_execution_types::ExecutionOutcome; +use reth_primitives_traits::{NodePrimitives, RecoveredBlock, SealedBlock, SealedHeader}; +use reth_trie_common::{ + updates::{TrieUpdates, TrieUpdatesSorted}, + HashedPostState, HashedPostStateSorted, +}; + +/// Represents an executed block for payload building purposes. +/// +/// This type captures the complete execution state of a built block, +/// including the recovered block, execution outcome, hashed state, and trie updates. +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct BuiltPayloadExecutedBlock { + /// Recovered Block + pub recovered_block: Arc>, + /// Block's execution outcome. + pub execution_output: Arc>, + /// Block's hashed state. + /// + /// Supports both unsorted and sorted variants so payload builders can avoid cloning in order + /// to convert from one to the other when it's not necessary. + pub hashed_state: Either, Arc>, + /// Trie updates that result from calculating the state root for the block. + /// + /// Supports both unsorted and sorted variants so payload builders can avoid cloning in order + /// to convert from one to the other when it's not necessary. + pub trie_updates: Either, Arc>, +} + +impl BuiltPayloadExecutedBlock { + /// Converts this into an [`reth_chain_state::ExecutedBlock`]. + /// + /// Ensures hashed state and trie updates are in their sorted representations + /// as required by `reth_chain_state::ExecutedBlock`. + pub fn into_executed_payload(self) -> reth_chain_state::ExecutedBlock { + let hashed_state = match self.hashed_state { + // Convert unsorted to sorted + Either::Left(unsorted) => Arc::new(Arc::unwrap_or_clone(unsorted).into_sorted()), + // Already sorted + Either::Right(sorted) => sorted, + }; + + let trie_updates = match self.trie_updates { + // Convert unsorted to sorted + Either::Left(unsorted) => Arc::new(Arc::unwrap_or_clone(unsorted).into_sorted()), + // Already sorted + Either::Right(sorted) => sorted, + }; + + reth_chain_state::ExecutedBlock::new( + self.recovered_block, + self.execution_output, + ComputedTrieData::without_trie_input(hashed_state, trie_updates), + ) + } +} /// Represents a successfully built execution payload (block). /// @@ -30,7 +86,7 @@ pub trait BuiltPayload: Send + Sync + fmt::Debug { /// Returns the complete execution result including state updates. /// /// Returns `None` if execution data is not available or not tracked. - fn executed_block(&self) -> Option> { + fn executed_block(&self) -> Option> { None } @@ -141,40 +197,44 @@ impl PayloadAttributes for op_alloy_rpc_types_engine::OpPayloadAttributes { /// /// Enables different strategies for generating payload attributes based on /// contextual information. Useful for testing and specialized building. -pub trait PayloadAttributesBuilder: Send + Sync + 'static { +pub trait PayloadAttributesBuilder: + Send + Sync + 'static +{ /// Constructs new payload attributes for the given timestamp. - fn build(&self, timestamp: u64) -> Attributes; + fn build(&self, parent: &SealedHeader

) -> Attributes; } -impl PayloadAttributesBuilder for F +impl PayloadAttributesBuilder for F where - F: Fn(u64) -> Attributes + Send + Sync + 'static, + Header: Clone, + F: Fn(SealedHeader
) -> Attributes + Send + Sync + 'static, { - fn build(&self, timestamp: u64) -> Attributes { - self(timestamp) + fn build(&self, parent: &SealedHeader
) -> Attributes { + self(parent.clone()) } } -impl PayloadAttributesBuilder for either::Either +impl PayloadAttributesBuilder for Either where - L: PayloadAttributesBuilder, - R: PayloadAttributesBuilder, + L: PayloadAttributesBuilder, + R: PayloadAttributesBuilder, { - fn build(&self, timestamp: u64) -> Attributes { + fn build(&self, parent: &SealedHeader
) -> Attributes { match self { - Self::Left(l) => l.build(timestamp), - Self::Right(r) => r.build(timestamp), + Self::Left(l) => l.build(parent), + Self::Right(r) => r.build(parent), } } } -impl PayloadAttributesBuilder - for Box> +impl PayloadAttributesBuilder + for Box> where + Header: 'static, Attributes: 'static, { - fn build(&self, timestamp: u64) -> Attributes { - self.as_ref().build(timestamp) + fn build(&self, parent: &SealedHeader
) -> Attributes { + self.as_ref().build(parent) } } diff --git a/crates/payload/util/src/traits.rs b/crates/payload/util/src/traits.rs index 7d076d3687..7480055b58 100644 --- a/crates/payload/util/src/traits.rs +++ b/crates/payload/util/src/traits.rs @@ -19,8 +19,11 @@ pub trait PayloadTransactions { ctx: (), ) -> Option; - /// Exclude descendants of the transaction with given sender and nonce from the iterator, - /// because this transaction won't be included in the block. + /// Marks the transaction identified by `sender` and `nonce` as invalid for this iterator. + /// + /// Implementations must ensure that subsequent transactions returned from this iterator do not + /// depend on this transaction. For example, they may choose to stop yielding any further + /// transactions from this sender in the current iteration. fn mark_invalid(&mut self, sender: Address, nonce: u64); } @@ -46,6 +49,9 @@ impl PayloadTransactions for NoopPayloadTransactions { /// Wrapper struct that allows to convert `BestTransactions` (used in tx pool) to /// `PayloadTransactions` (used in block composition). +/// +/// Note: `mark_invalid` for this type filters out all further transactions from the given sender +/// in the current iteration, mirroring the semantics of `BestTransactions::mark_invalid`. #[derive(Debug)] pub struct BestPayloadTransactions where diff --git a/crates/primitives-traits/src/block/body.rs b/crates/primitives-traits/src/block/body.rs index 4dc9a67e88..17ea1c67e3 100644 --- a/crates/primitives-traits/src/block/body.rs +++ b/crates/primitives-traits/src/block/body.rs @@ -198,6 +198,47 @@ pub trait BlockBody: .collect() }) } + + /// Returns an iterator over `Recovered<&Transaction>` for all transactions in the block body. + /// + /// This method recovers signers and returns an iterator without cloning transactions, + /// making it more efficient than [`BlockBody::recover_transactions`] when owned values are not + /// required. + /// + /// # Errors + /// + /// Returns an error if any transaction's signature is invalid. + fn recover_transactions_ref( + &self, + ) -> Result> + '_, RecoveryError> { + let signers = self.recover_signers()?; + Ok(self + .transactions() + .iter() + .zip(signers) + .map(|(tx, signer)| Recovered::new_unchecked(tx, signer))) + } + + /// Returns an iterator over `Recovered<&Transaction>` for all transactions in the block body + /// _without ensuring that the signature has a low `s` value_. + /// + /// This method recovers signers and returns an iterator without cloning transactions, + /// making it more efficient than recovering with owned transactions when owned values are not + /// required. + /// + /// # Errors + /// + /// Returns an error if any transaction's signature is invalid. + fn recover_transactions_unchecked_ref( + &self, + ) -> Result> + '_, RecoveryError> { + let signers = self.recover_signers_unchecked()?; + Ok(self + .transactions() + .iter() + .zip(signers) + .map(|(tx, signer)| Recovered::new_unchecked(tx, signer))) + } } impl BlockBody for alloy_consensus::BlockBody diff --git a/crates/primitives-traits/src/block/recovered.rs b/crates/primitives-traits/src/block/recovered.rs index d6bba9d112..7265cced2d 100644 --- a/crates/primitives-traits/src/block/recovered.rs +++ b/crates/primitives-traits/src/block/recovered.rs @@ -218,7 +218,7 @@ impl RecoveredBlock { /// A safer variant of [`Self::new_sealed`] that checks if the number of senders is equal to /// the number of transactions in the block and recovers the senders from the transactions, if - /// not using [`SignedTransaction::recover_signer_unchecked`](crate::transaction::signed::SignedTransaction) + /// not using [`SignedTransaction::recover_signer`](crate::transaction::signed::SignedTransaction) /// to recover the senders. /// /// Returns an error if any of the transactions fail to recover the sender. @@ -472,7 +472,7 @@ impl Default for RecoveredBlock { impl InMemorySize for RecoveredBlock { #[inline] fn size(&self) -> usize { - self.block.size() + self.senders.len() * core::mem::size_of::
() + self.block.size() + self.senders.capacity() * core::mem::size_of::
() } } @@ -668,7 +668,7 @@ mod rpc_compat { { /// Converts the block into an RPC [`Block`] with the given [`BlockTransactionsKind`]. /// - /// The `tx_resp_builder` closure transforms each transaction into the desired response + /// The `converter` closure transforms each transaction into the desired response /// type. /// /// `header_builder` transforms the block header into RPC representation. It takes the @@ -677,7 +677,7 @@ mod rpc_compat { pub fn into_rpc_block( self, kind: BlockTransactionsKind, - tx_resp_builder: F, + converter: F, header_builder: impl FnOnce(SealedHeader, usize) -> Result, ) -> Result, E> where @@ -688,9 +688,7 @@ mod rpc_compat { { match kind { BlockTransactionsKind::Hashes => self.into_rpc_block_with_tx_hashes(header_builder), - BlockTransactionsKind::Full => { - self.into_rpc_block_full(tx_resp_builder, header_builder) - } + BlockTransactionsKind::Full => self.into_rpc_block_full(converter, header_builder), } } @@ -699,7 +697,7 @@ mod rpc_compat { /// For transaction hashes, only necessary parts are cloned for efficiency. /// For full transactions, the entire block is cloned. /// - /// The `tx_resp_builder` closure transforms each transaction into the desired response + /// The `converter` closure transforms each transaction into the desired response /// type. /// /// `header_builder` transforms the block header into RPC representation. It takes the @@ -708,7 +706,7 @@ mod rpc_compat { pub fn clone_into_rpc_block( &self, kind: BlockTransactionsKind, - tx_resp_builder: F, + converter: F, header_builder: impl FnOnce(SealedHeader, usize) -> Result, ) -> Result, E> where @@ -720,7 +718,7 @@ mod rpc_compat { match kind { BlockTransactionsKind::Hashes => self.to_rpc_block_with_tx_hashes(header_builder), BlockTransactionsKind::Full => { - self.clone().into_rpc_block_full(tx_resp_builder, header_builder) + self.clone().into_rpc_block_full(converter, header_builder) } } } @@ -769,10 +767,10 @@ mod rpc_compat { /// Converts the block into an RPC [`Block`] with full transaction objects. /// /// Returns [`BlockTransactions::Full`] with complete transaction data. - /// The `tx_resp_builder` closure transforms each transaction with its metadata. + /// The `converter` closure transforms each transaction with its metadata. pub fn into_rpc_block_full( self, - tx_resp_builder: F, + converter: F, header_builder: impl FnOnce(SealedHeader, usize) -> Result, ) -> Result, E> where @@ -803,7 +801,7 @@ mod rpc_compat { index: Some(idx as u64), }; - tx_resp_builder(Recovered::new_unchecked(tx, sender), tx_info) + converter(Recovered::new_unchecked(tx, sender), tx_info) }) .collect::, E>>()?; diff --git a/crates/primitives-traits/src/block/sealed.rs b/crates/primitives-traits/src/block/sealed.rs index 5c43178146..4ae6e4cdc7 100644 --- a/crates/primitives-traits/src/block/sealed.rs +++ b/crates/primitives-traits/src/block/sealed.rs @@ -179,7 +179,7 @@ impl SealedBlock { /// Recovers all senders from the transactions in the block. /// - /// Returns `None` if any of the transactions fail to recover the sender. + /// Returns an error if any of the transactions fail to recover the sender. pub fn senders(&self) -> Result, RecoveryError> { self.body().recover_signers() } diff --git a/crates/primitives-traits/src/constants/gas_units.rs b/crates/primitives-traits/src/constants/gas_units.rs index e311e34d0a..986f207034 100644 --- a/crates/primitives-traits/src/constants/gas_units.rs +++ b/crates/primitives-traits/src/constants/gas_units.rs @@ -10,10 +10,14 @@ pub const MEGAGAS: u64 = KILOGAS * 1_000; /// Represents one Gigagas, or `1_000_000_000` gas. pub const GIGAGAS: u64 = MEGAGAS * 1_000; +/// Represents one Teragas, or `1_000_000_000_000` gas. +pub const TERAGAS: u64 = GIGAGAS * 1_000; + /// Returns a formatted gas throughput log, showing either: /// * "Kgas/s", or 1,000 gas per second /// * "Mgas/s", or 1,000,000 gas per second /// * "Ggas/s", or 1,000,000,000 gas per second +/// * "Tgas/s", or 1,000,000,000,000 gas per second /// /// Depending on the magnitude of the gas throughput. pub fn format_gas_throughput(gas: u64, execution_duration: Duration) -> String { @@ -22,8 +26,10 @@ pub fn format_gas_throughput(gas: u64, execution_duration: Duration) -> String { format!("{:.2}Kgas/second", gas_per_second / KILOGAS as f64) } else if gas_per_second < GIGAGAS as f64 { format!("{:.2}Mgas/second", gas_per_second / MEGAGAS as f64) - } else { + } else if gas_per_second < TERAGAS as f64 { format!("{:.2}Ggas/second", gas_per_second / GIGAGAS as f64) + } else { + format!("{:.2}Tgas/second", gas_per_second / TERAGAS as f64) } } @@ -31,6 +37,7 @@ pub fn format_gas_throughput(gas: u64, execution_duration: Duration) -> String { /// * "Kgas", or 1,000 gas /// * "Mgas", or 1,000,000 gas /// * "Ggas", or 1,000,000,000 gas +/// * "Tgas", or 1,000,000,000,000 gas /// /// Depending on the magnitude of gas. pub fn format_gas(gas: u64) -> String { @@ -39,8 +46,10 @@ pub fn format_gas(gas: u64) -> String { format!("{:.2}Kgas", gas / KILOGAS as f64) } else if gas < GIGAGAS as f64 { format!("{:.2}Mgas", gas / MEGAGAS as f64) - } else { + } else if gas < TERAGAS as f64 { format!("{:.2}Ggas", gas / GIGAGAS as f64) + } else { + format!("{:.2}Tgas", gas / TERAGAS as f64) } } @@ -65,6 +74,10 @@ mod tests { let gas = 100_000_000_000; let gas_unit = format_gas(gas); assert_eq!(gas_unit, "100.00Ggas"); + + let gas = 100_000_000_000_000; + let gas_unit = format_gas(gas); + assert_eq!(gas_unit, "100.00Tgas"); } #[test] diff --git a/crates/primitives-traits/src/header/header_mut.rs b/crates/primitives-traits/src/header/header_mut.rs new file mode 100644 index 0000000000..d0eec1a8ff --- /dev/null +++ b/crates/primitives-traits/src/header/header_mut.rs @@ -0,0 +1,52 @@ +//! Mutable header utilities. + +use crate::BlockHeader; +use alloy_consensus::Header; +use alloy_primitives::{BlockHash, BlockNumber, B256, U256}; + +/// A helper trait for [`Header`]s that allows for mutable access to the headers values. +/// +/// This allows for modifying the header for testing and mocking purposes. +pub trait HeaderMut: BlockHeader { + /// Updates the parent block hash. + fn set_parent_hash(&mut self, hash: BlockHash); + + /// Updates the block number. + fn set_block_number(&mut self, number: BlockNumber); + + /// Updates the block's timestamp. + fn set_timestamp(&mut self, number: BlockNumber); + + /// Updates the block state root. + fn set_state_root(&mut self, state_root: B256); + + /// Updates the block difficulty. + fn set_difficulty(&mut self, difficulty: U256); + + /// Updates the block number (alias for CLI compatibility). + fn set_number(&mut self, number: u64) { + self.set_block_number(number); + } +} + +impl HeaderMut for Header { + fn set_parent_hash(&mut self, hash: BlockHash) { + self.parent_hash = hash; + } + + fn set_block_number(&mut self, number: BlockNumber) { + self.number = number; + } + + fn set_timestamp(&mut self, number: BlockNumber) { + self.timestamp = number; + } + + fn set_state_root(&mut self, state_root: B256) { + self.state_root = state_root; + } + + fn set_difficulty(&mut self, difficulty: U256) { + self.difficulty = difficulty; + } +} diff --git a/crates/primitives-traits/src/header/mod.rs b/crates/primitives-traits/src/header/mod.rs index 198b9cb3c8..e251cf4773 100644 --- a/crates/primitives-traits/src/header/mod.rs +++ b/crates/primitives-traits/src/header/mod.rs @@ -1,6 +1,9 @@ mod sealed; pub use sealed::{Header, SealedHeader, SealedHeaderFor}; +mod header_mut; +pub use header_mut::HeaderMut; + #[cfg(any(test, feature = "test-utils", feature = "arbitrary"))] pub mod test_utils; diff --git a/crates/primitives-traits/src/header/sealed.rs b/crates/primitives-traits/src/header/sealed.rs index bcf69813f9..fa393d73ee 100644 --- a/crates/primitives-traits/src/header/sealed.rs +++ b/crates/primitives-traits/src/header/sealed.rs @@ -94,7 +94,7 @@ impl SealedHeader { *self.hash_ref() } - /// This is the inverse of [`Header::seal_slow`] which returns the raw header and hash. + /// This is the inverse of [`Self::seal_slow`] which returns the raw header and hash. pub fn split(self) -> (H, BlockHash) { let hash = self.hash(); (self.header, hash) diff --git a/crates/primitives-traits/src/header/test_utils.rs b/crates/primitives-traits/src/header/test_utils.rs index 893f432359..efd8f2771f 100644 --- a/crates/primitives-traits/src/header/test_utils.rs +++ b/crates/primitives-traits/src/header/test_utils.rs @@ -1,45 +1,12 @@ //! Test utilities for the block header. -use crate::BlockHeader; use alloy_consensus::Header; -use alloy_primitives::{BlockHash, BlockNumber, B256, U256}; +use alloy_primitives::B256; use proptest::{arbitrary::any, prop_compose}; use proptest_arbitrary_interop::arb; -/// A helper trait for [`Header`]s that allows for mutable access to the headers values. -/// -/// This allows for modifying the header for testing purposes. -pub trait TestHeader: BlockHeader { - /// Updates the parent block hash. - fn set_parent_hash(&mut self, hash: BlockHash); - - /// Updates the block number. - fn set_block_number(&mut self, number: BlockNumber); - - /// Updates the block state root. - fn set_state_root(&mut self, state_root: B256); - - /// Updates the block difficulty. - fn set_difficulty(&mut self, difficulty: U256); -} - -impl TestHeader for Header { - fn set_parent_hash(&mut self, hash: BlockHash) { - self.parent_hash = hash - } - - fn set_block_number(&mut self, number: BlockNumber) { - self.number = number; - } - - fn set_state_root(&mut self, state_root: B256) { - self.state_root = state_root; - } - - fn set_difficulty(&mut self, difficulty: U256) { - self.difficulty = difficulty; - } -} +/// Re-export `HeaderMut` for backward compatibility in tests. +pub use super::HeaderMut as TestHeader; /// Generates a header which is valid __with respect to past and future forks__. This means, for /// example, that if the withdrawals root is present, the base fee per gas is also present. diff --git a/crates/primitives-traits/src/lib.rs b/crates/primitives-traits/src/lib.rs index 67df9637fa..18fb6292bd 100644 --- a/crates/primitives-traits/src/lib.rs +++ b/crates/primitives-traits/src/lib.rs @@ -148,6 +148,7 @@ pub use block::{ Block, FullBlock, RecoveredBlock, SealedBlock, }; +#[cfg(test)] mod withdrawal; pub use alloy_eips::eip2718::WithEncoded; @@ -156,13 +157,14 @@ pub mod crypto; mod error; pub use error::{GotExpected, GotExpectedBoxed}; +#[cfg(test)] mod log; pub use alloy_primitives::{logs_bloom, Log, LogData}; pub mod proofs; mod storage; -pub use storage::StorageEntry; +pub use storage::{StorageEntry, ValueWithSubKey}; pub mod sync; diff --git a/crates/primitives-traits/src/serde_bincode_compat.rs b/crates/primitives-traits/src/serde_bincode_compat.rs index 217ad5ff33..5169f945b8 100644 --- a/crates/primitives-traits/src/serde_bincode_compat.rs +++ b/crates/primitives-traits/src/serde_bincode_compat.rs @@ -345,4 +345,17 @@ mod block_bincode { repr.into() } } + + #[cfg(feature = "op")] + impl super::SerdeBincodeCompat for op_alloy_consensus::OpReceipt { + type BincodeRepr<'a> = op_alloy_consensus::serde_bincode_compat::OpReceipt<'a>; + + fn as_repr(&self) -> Self::BincodeRepr<'_> { + self.into() + } + + fn from_repr(repr: Self::BincodeRepr<'_>) -> Self { + repr.into() + } + } } diff --git a/crates/primitives-traits/src/size.rs b/crates/primitives-traits/src/size.rs index 82c8b5d9c4..e2343cfb95 100644 --- a/crates/primitives-traits/src/size.rs +++ b/crates/primitives-traits/src/size.rs @@ -148,6 +148,18 @@ mod op { } } + impl InMemorySize for op_alloy_consensus::OpReceipt { + fn size(&self) -> usize { + match self { + Self::Legacy(receipt) | + Self::Eip2930(receipt) | + Self::Eip1559(receipt) | + Self::Eip7702(receipt) => receipt.size(), + Self::Deposit(receipt) => receipt.size(), + } + } + } + impl InMemorySize for op_alloy_consensus::OpTypedTransaction { fn size(&self) -> usize { match self { diff --git a/crates/primitives-traits/src/storage.rs b/crates/primitives-traits/src/storage.rs index c6b9b1e11c..4383f03cf9 100644 --- a/crates/primitives-traits/src/storage.rs +++ b/crates/primitives-traits/src/storage.rs @@ -1,5 +1,17 @@ use alloy_primitives::{B256, U256}; +/// Trait for `DupSort` table values that contain a subkey. +/// +/// This trait allows extracting the subkey from a value during database iteration, +/// enabling proper range queries and filtering on `DupSort` tables. +pub trait ValueWithSubKey { + /// The type of the subkey. + type SubKey; + + /// Extract the subkey from the value. + fn get_subkey(&self) -> Self::SubKey; +} + /// Account storage entry. /// /// `key` is the subkey when used as a value in the `StorageChangeSets` table. @@ -21,6 +33,14 @@ impl StorageEntry { } } +impl ValueWithSubKey for StorageEntry { + type SubKey = B256; + + fn get_subkey(&self) -> Self::SubKey { + self.key + } +} + impl From<(B256, U256)> for StorageEntry { fn from((key, value): (B256, U256)) -> Self { Self { key, value } diff --git a/crates/primitives-traits/src/transaction/error.rs b/crates/primitives-traits/src/transaction/error.rs index b87405e4ab..1b01f83a75 100644 --- a/crates/primitives-traits/src/transaction/error.rs +++ b/crates/primitives-traits/src/transaction/error.rs @@ -66,6 +66,17 @@ pub enum InvalidTransactionError { GasLimitTooHigh, } +impl InvalidTransactionError { + /// Returns true if this is [`InvalidTransactionError::NonceNotConsistent`] and the + /// transaction's nonce is lower than the state's. + pub fn is_nonce_too_low(&self) -> bool { + match self { + Self::NonceNotConsistent { tx, state } => tx < state, + _ => false, + } + } +} + /// Represents error variants that can happen when trying to convert a transaction to pooled /// transaction. #[derive(Debug, Clone, Eq, PartialEq, derive_more::Display, derive_more::Error)] diff --git a/crates/primitives-traits/src/transaction/signed.rs b/crates/primitives-traits/src/transaction/signed.rs index 08a6758d8d..a6212a6c68 100644 --- a/crates/primitives-traits/src/transaction/signed.rs +++ b/crates/primitives-traits/src/transaction/signed.rs @@ -6,7 +6,7 @@ use alloy_consensus::{ transaction::{Recovered, RlpEcdsaEncodableTx, SignerRecoverable, TxHashRef}, EthereumTxEnvelope, SignableTransaction, }; -use alloy_eips::eip2718::{Decodable2718, Encodable2718}; +use alloy_eips::eip2718::{Decodable2718, Encodable2718, IsTyped2718}; use alloy_primitives::{keccak256, Address, Signature, B256}; use alloy_rlp::{Decodable, Encodable}; use core::hash::Hash; @@ -46,6 +46,7 @@ pub trait SignedTransaction: + InMemorySize + SignerRecoverable + TxHashRef + + IsTyped2718 { /// Returns whether this transaction type can be __broadcasted__ as full transaction over the /// network. diff --git a/crates/primitives/benches/validate_blob_tx.rs b/crates/primitives/benches/validate_blob_tx.rs index 9708bc9a30..be958ea2ad 100644 --- a/crates/primitives/benches/validate_blob_tx.rs +++ b/crates/primitives/benches/validate_blob_tx.rs @@ -42,14 +42,13 @@ fn validate_blob_tx( blob_sidecar.blobs.extend(blob_sidecar_ext.blobs); blob_sidecar.proofs.extend(blob_sidecar_ext.proofs); blob_sidecar.commitments.extend(blob_sidecar_ext.commitments); - - if blob_sidecar.blobs.len() > num_blobs as usize { - blob_sidecar.blobs.truncate(num_blobs as usize); - blob_sidecar.proofs.truncate(num_blobs as usize); - blob_sidecar.commitments.truncate(num_blobs as usize); - } } + // ensure exactly num_blobs blobs + blob_sidecar.blobs.truncate(num_blobs as usize); + blob_sidecar.proofs.truncate(num_blobs as usize); + blob_sidecar.commitments.truncate(num_blobs as usize); + tx.blob_versioned_hashes = blob_sidecar.versioned_hashes().collect(); (tx, blob_sidecar) diff --git a/crates/prune/prune/Cargo.toml b/crates/prune/prune/Cargo.toml index 615a793bb8..ff4d47054e 100644 --- a/crates/prune/prune/Cargo.toml +++ b/crates/prune/prune/Cargo.toml @@ -21,9 +21,11 @@ reth-tokio-util.workspace = true reth-config.workspace = true reth-prune-types.workspace = true reth-primitives-traits.workspace = true +reth-stages-types.workspace = true reth-static-file-types.workspace = true # ethereum +alloy-consensus.workspace = true alloy-eips.workspace = true # metrics diff --git a/crates/prune/prune/src/builder.rs b/crates/prune/prune/src/builder.rs index f61aa6bd46..41a496bd92 100644 --- a/crates/prune/prune/src/builder.rs +++ b/crates/prune/prune/src/builder.rs @@ -7,7 +7,7 @@ use reth_primitives_traits::NodePrimitives; use reth_provider::{ providers::StaticFileProvider, BlockReader, ChainStateBlockReader, DBProvider, DatabaseProviderFactory, NodePrimitivesProvider, PruneCheckpointReader, PruneCheckpointWriter, - StaticFileProviderFactory, + StageCheckpointReader, StaticFileProviderFactory, StorageSettingsCache, }; use reth_prune_types::PruneModes; use std::time::Duration; @@ -43,7 +43,7 @@ impl PrunerBuilder { } /// Sets the configuration for every part of the data that can be pruned. - pub const fn segments(mut self, segments: PruneModes) -> Self { + pub fn segments(mut self, segments: PruneModes) -> Self { self.segments = segments; self } @@ -80,6 +80,8 @@ impl PrunerBuilder { + PruneCheckpointReader + BlockReader + ChainStateBlockReader + + StorageSettingsCache + + StageCheckpointReader + StaticFileProviderFactory< Primitives: NodePrimitives, >, @@ -112,7 +114,9 @@ impl PrunerBuilder { + BlockReader + ChainStateBlockReader + PruneCheckpointWriter - + PruneCheckpointReader, + + PruneCheckpointReader + + StorageSettingsCache + + StageCheckpointReader, { let segments = SegmentSet::::from_components(static_file_provider, self.segments); diff --git a/crates/prune/prune/src/db_ext.rs b/crates/prune/prune/src/db_ext.rs index 63ab87c446..ee1b3cec94 100644 --- a/crates/prune/prune/src/db_ext.rs +++ b/crates/prune/prune/src/db_ext.rs @@ -1,14 +1,14 @@ use crate::PruneLimiter; use reth_db_api::{ cursor::{DbCursorRO, DbCursorRW, RangeWalker}, - table::{Table, TableRow}, - transaction::DbTxMut, + table::{DupSort, Table, TableRow}, + transaction::{DbTx, DbTxMut}, DatabaseError, }; use std::{fmt::Debug, ops::RangeBounds}; use tracing::debug; -pub(crate) trait DbTxPruneExt: DbTxMut { +pub(crate) trait DbTxPruneExt: DbTxMut + DbTx { /// Prune the table for the specified pre-sorted key iterator. /// /// Returns number of rows pruned. @@ -19,11 +19,12 @@ pub(crate) trait DbTxPruneExt: DbTxMut { mut delete_callback: impl FnMut(TableRow), ) -> Result<(usize, bool), DatabaseError> { let mut cursor = self.cursor_write::()?; - let mut keys = keys.into_iter(); + let mut keys = keys.into_iter().peekable(); let mut deleted_entries = 0; - for key in &mut keys { + let mut done = true; + while keys.peek().is_some() { if limiter.is_limit_reached() { debug!( target: "providers::db", @@ -33,9 +34,11 @@ pub(crate) trait DbTxPruneExt: DbTxMut { table = %T::NAME, "Pruning limit reached" ); + done = false; break } + let key = keys.next().expect("peek() said Some"); let row = cursor.seek_exact(key)?; if let Some(row) = row { cursor.delete_current()?; @@ -45,7 +48,6 @@ pub(crate) trait DbTxPruneExt: DbTxMut { } } - let done = keys.next().is_none(); Ok((deleted_entries, done)) } @@ -121,6 +123,207 @@ pub(crate) trait DbTxPruneExt: DbTxMut { Ok(false) } + + /// Prune a DUPSORT table for the specified key range. + /// + /// Returns number of rows pruned. + fn prune_dupsort_table_with_range( + &self, + keys: impl RangeBounds + Clone + Debug, + limiter: &mut PruneLimiter, + mut delete_callback: impl FnMut(TableRow), + ) -> Result<(usize, bool), DatabaseError> { + let starting_entries = self.entries::()?; + let mut cursor = self.cursor_dup_write::()?; + let mut walker = cursor.walk_range(keys)?; + + let done = loop { + if limiter.is_limit_reached() { + debug!( + target: "providers::db", + ?limiter, + deleted_entries_limit = %limiter.is_deleted_entries_limit_reached(), + time_limit = %limiter.is_time_limit_reached(), + table = %T::NAME, + "Pruning limit reached" + ); + break false + } + + let Some(res) = walker.next() else { break true }; + let row = res?; + + walker.delete_current_duplicates()?; + limiter.increment_deleted_entries_count(); + delete_callback(row); + }; + + debug!( + target: "providers::db", + table=?T::NAME, + cursor_current=?cursor.current(), + "done walking", + ); + + let ending_entries = self.entries::()?; + + Ok((starting_entries - ending_entries, done)) + } } -impl DbTxPruneExt for Tx where Tx: DbTxMut {} +impl DbTxPruneExt for Tx where Tx: DbTxMut + DbTx {} + +#[cfg(test)] +mod tests { + use super::DbTxPruneExt; + use crate::PruneLimiter; + use reth_db_api::tables; + use reth_primitives_traits::SignerRecoverable; + use reth_provider::{DBProvider, DatabaseProviderFactory}; + use reth_stages::test_utils::{StorageKind, TestStageDB}; + use reth_testing_utils::generators::{self, random_block_range, BlockRangeParams}; + use std::sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, + }; + + struct CountingIter { + data: Vec, + calls: Arc, + } + + impl CountingIter { + fn new(data: Vec, calls: Arc) -> Self { + Self { data, calls } + } + } + + struct CountingIntoIter { + inner: std::vec::IntoIter, + calls: Arc, + } + + impl Iterator for CountingIntoIter { + type Item = u64; + fn next(&mut self) -> Option { + let res = self.inner.next(); + self.calls.fetch_add(1, Ordering::SeqCst); + res + } + } + + impl IntoIterator for CountingIter { + type Item = u64; + type IntoIter = CountingIntoIter; + fn into_iter(self) -> Self::IntoIter { + CountingIntoIter { inner: self.data.into_iter(), calls: self.calls } + } + } + + #[test] + fn prune_table_with_iterator_early_exit_does_not_overconsume() { + let db = TestStageDB::default(); + let mut rng = generators::rng(); + + let blocks = random_block_range( + &mut rng, + 1..=3, + BlockRangeParams { + parent: Some(alloy_primitives::B256::ZERO), + tx_count: 2..3, + ..Default::default() + }, + ); + db.insert_blocks(blocks.iter(), StorageKind::Database(None)).expect("insert blocks"); + + let mut tx_senders = Vec::new(); + for block in &blocks { + tx_senders.reserve_exact(block.transaction_count()); + for transaction in &block.body().transactions { + tx_senders.push(( + tx_senders.len() as u64, + transaction.recover_signer().expect("recover signer"), + )); + } + } + let total = tx_senders.len(); + db.insert_transaction_senders(tx_senders).expect("insert transaction senders"); + + let provider = db.factory.database_provider_rw().unwrap(); + + let calls = Arc::new(AtomicUsize::new(0)); + let keys: Vec = (0..total as u64).collect(); + let counting_iter = CountingIter::new(keys, calls.clone()); + + let mut limiter = PruneLimiter::default().set_deleted_entries_limit(2); + + let (pruned, done) = provider + .tx_ref() + .prune_table_with_iterator::( + counting_iter, + &mut limiter, + |_| {}, + ) + .expect("prune"); + + assert_eq!(pruned, 2); + assert!(!done); + assert_eq!(calls.load(Ordering::SeqCst), pruned + 1); + + provider.commit().expect("commit"); + assert_eq!(db.table::().unwrap().len(), total - 2); + } + + #[test] + fn prune_table_with_iterator_consumes_to_end_reports_done() { + let db = TestStageDB::default(); + let mut rng = generators::rng(); + + let blocks = random_block_range( + &mut rng, + 1..=2, + BlockRangeParams { + parent: Some(alloy_primitives::B256::ZERO), + tx_count: 1..2, + ..Default::default() + }, + ); + db.insert_blocks(blocks.iter(), StorageKind::Database(None)).expect("insert blocks"); + + let mut tx_senders = Vec::new(); + for block in &blocks { + for transaction in &block.body().transactions { + tx_senders.push(( + tx_senders.len() as u64, + transaction.recover_signer().expect("recover signer"), + )); + } + } + let total = tx_senders.len(); + db.insert_transaction_senders(tx_senders).expect("insert transaction senders"); + + let provider = db.factory.database_provider_rw().unwrap(); + + let calls = Arc::new(AtomicUsize::new(0)); + let keys: Vec = (0..total as u64).collect(); + let counting_iter = CountingIter::new(keys, calls.clone()); + + let mut limiter = PruneLimiter::default().set_deleted_entries_limit(usize::MAX); + + let (pruned, done) = provider + .tx_ref() + .prune_table_with_iterator::( + counting_iter, + &mut limiter, + |_| {}, + ) + .expect("prune"); + + assert_eq!(pruned, total); + assert!(done); + assert_eq!(calls.load(Ordering::SeqCst), total + 1); + + provider.commit().expect("commit"); + assert_eq!(db.table::().unwrap().len(), 0); + } +} diff --git a/crates/prune/prune/src/limiter.rs b/crates/prune/prune/src/limiter.rs index d347ecddbd..a32e6ab243 100644 --- a/crates/prune/prune/src/limiter.rs +++ b/crates/prune/prune/src/limiter.rs @@ -96,7 +96,7 @@ impl PruneLimiter { /// Returns the number of deleted entries left before the limit is reached. pub fn deleted_entries_limit_left(&self) -> Option { - self.deleted_entries_limit.as_ref().map(|limit| limit.limit - limit.deleted) + self.deleted_entries_limit.as_ref().map(|limit| limit.limit.saturating_sub(limit.deleted)) } /// Returns the limit on the number of deleted entries (rows in the database). @@ -411,4 +411,35 @@ mod tests { sleep(Duration::new(0, 10_000_000)); // 10 milliseconds assert!(limiter.is_limit_reached(), "Limit should be reached when time limit is reached"); } + + #[test] + fn test_deleted_entries_limit_left_saturation_and_normal() { + // less than limit → no saturation + let mut limiter = PruneLimiter::default().set_deleted_entries_limit(10); + limiter.increment_deleted_entries_count_by(3); + assert_eq!(limiter.deleted_entries_limit_left(), Some(7)); + + // equal to limit → saturates to 0 + let mut limiter = PruneLimiter::default().set_deleted_entries_limit(3); + limiter.increment_deleted_entries_count_by(3); + assert_eq!(limiter.deleted_entries_limit_left(), Some(0)); + + // overrun past limit → saturates to 0 + let mut limiter = PruneLimiter::default().set_deleted_entries_limit(10); + limiter.increment_deleted_entries_count_by(12); + assert_eq!(limiter.deleted_entries_limit_left(), Some(0)); + + // lowering limit via set → saturates to 0 if below deleted + let mut limiter = PruneLimiter::default().set_deleted_entries_limit(20); + limiter.increment_deleted_entries_count_by(15); + let limiter = limiter.set_deleted_entries_limit(10); + assert_eq!(limiter.deleted_entries_limit_left(), Some(0)); + + // lowering limit via floor → saturates to 0 if below deleted + let mut limiter = PruneLimiter::default().set_deleted_entries_limit(15); + limiter.increment_deleted_entries_count_by(14); + let denominator = NonZeroUsize::new(8).unwrap(); + let limiter = limiter.floor_deleted_entries_limit_to_multiple_of(denominator); + assert_eq!(limiter.deleted_entries_limit_left(), Some(0)); + } } diff --git a/crates/prune/prune/src/pruner.rs b/crates/prune/prune/src/pruner.rs index 4ef060774b..b700d61028 100644 --- a/crates/prune/prune/src/pruner.rs +++ b/crates/prune/prune/src/pruner.rs @@ -8,8 +8,10 @@ use alloy_primitives::BlockNumber; use reth_exex_types::FinishedExExHeight; use reth_provider::{ DBProvider, DatabaseProviderFactory, PruneCheckpointReader, PruneCheckpointWriter, + StageCheckpointReader, }; use reth_prune_types::{PruneProgress, PrunedSegmentInfo, PrunerOutput}; +use reth_stages_types::StageId; use reth_tokio_util::{EventSender, EventStream}; use std::time::{Duration, Instant}; use tokio::sync::watch; @@ -100,7 +102,7 @@ where impl Pruner where - Provider: PruneCheckpointReader + PruneCheckpointWriter, + Provider: PruneCheckpointReader + PruneCheckpointWriter + StageCheckpointReader, { /// Listen for events on the pruner. pub fn events(&self) -> EventStream { @@ -200,6 +202,19 @@ where .transpose()? .flatten() { + // Check if segment has a required stage that must be finished first + if let Some(required_stage) = segment.required_stage() && + !is_stage_finished(provider, required_stage)? + { + debug!( + target: "pruner", + segment = ?segment.segment(), + ?required_stage, + "Segment's required stage not finished, skipping" + ); + continue + } + debug!( target: "pruner", segment = ?segment.segment(), @@ -318,7 +333,9 @@ where impl Pruner where - PF: DatabaseProviderFactory, + PF: DatabaseProviderFactory< + ProviderRW: PruneCheckpointWriter + PruneCheckpointReader + StageCheckpointReader, + >, { /// Run the pruner. This will only prune data up to the highest finished ExEx height, if there /// are no ExExes. @@ -333,6 +350,19 @@ where } } +/// Checks if the given stage has caught up with the `Finish` stage. +/// +/// Returns `true` if the stage checkpoint is >= the Finish stage checkpoint. +fn is_stage_finished( + provider: &Provider, + stage_id: StageId, +) -> Result { + let stage_checkpoint = provider.get_stage_checkpoint(stage_id)?.map(|c| c.block_number); + let finish_checkpoint = provider.get_stage_checkpoint(StageId::Finish)?.map(|c| c.block_number); + + Ok(stage_checkpoint >= finish_checkpoint) +} + #[cfg(test)] mod tests { use crate::Pruner; diff --git a/crates/prune/prune/src/segments/mod.rs b/crates/prune/prune/src/segments/mod.rs index 43be33a75d..04eaaceed1 100644 --- a/crates/prune/prune/src/segments/mod.rs +++ b/crates/prune/prune/src/segments/mod.rs @@ -4,16 +4,56 @@ mod user; use crate::{PruneLimiter, PrunerError}; use alloy_primitives::{BlockNumber, TxNumber}; -use reth_provider::{errors::provider::ProviderResult, BlockReader, PruneCheckpointWriter}; -use reth_prune_types::{PruneCheckpoint, PruneMode, PrunePurpose, PruneSegment, SegmentOutput}; +use reth_provider::{ + errors::provider::ProviderResult, BlockReader, PruneCheckpointWriter, StaticFileProviderFactory, +}; +use reth_prune_types::{ + PruneCheckpoint, PruneMode, PruneProgress, PrunePurpose, PruneSegment, SegmentOutput, + SegmentOutputCheckpoint, +}; +use reth_stages_types::StageId; +use reth_static_file_types::StaticFileSegment; pub use set::SegmentSet; use std::{fmt::Debug, ops::RangeInclusive}; use tracing::error; pub use user::{ - AccountHistory, MerkleChangeSets, Receipts as UserReceipts, SenderRecovery, StorageHistory, - TransactionLookup, + AccountHistory, Bodies, MerkleChangeSets, Receipts as UserReceipts, ReceiptsByLogs, + SenderRecovery, StorageHistory, TransactionLookup, }; +/// Prunes data from static files for a given segment. +/// +/// This is a generic helper function used by both receipts and bodies pruning +/// when data is stored in static files. +pub(crate) fn prune_static_files( + provider: &Provider, + input: PruneInput, + segment: StaticFileSegment, +) -> Result +where + Provider: StaticFileProviderFactory, +{ + let deleted_headers = + provider.static_file_provider().delete_segment_below_block(segment, input.to_block + 1)?; + + if deleted_headers.is_empty() { + return Ok(SegmentOutput::done()) + } + + let tx_ranges = deleted_headers.iter().filter_map(|header| header.tx_range()); + + let pruned = tx_ranges.clone().map(|range| range.len()).sum::() as usize; + + Ok(SegmentOutput { + progress: PruneProgress::Finished, + pruned, + checkpoint: Some(SegmentOutputCheckpoint { + block_number: Some(input.to_block), + tx_number: tx_ranges.map(|range| range.end()).max(), + }), + }) +} + /// A segment represents a pruning of some portion of the data. /// /// Segments are called from [`Pruner`](crate::Pruner) with the following lifecycle: @@ -45,6 +85,14 @@ pub trait Segment: Debug + Send + Sync { { provider.save_prune_checkpoint(self.segment(), checkpoint) } + + /// Returns the stage this segment depends on, if any. + /// + /// If this returns `Some(stage_id)`, the pruner will skip this segment if the stage + /// has not yet caught up with the `Finish` stage checkpoint. + fn required_stage(&self) -> Option { + None + } } /// Segment pruning input, see [`Segment::prune`]. @@ -187,7 +235,7 @@ mod tests { for block in &blocks { provider_rw .insert_block( - block.clone().try_recover().expect("failed to seal block with senders"), + &block.clone().try_recover().expect("failed to seal block with senders"), ) .expect("failed to insert block"); } @@ -225,7 +273,7 @@ mod tests { for block in &blocks { provider_rw .insert_block( - block.clone().try_recover().expect("failed to seal block with senders"), + &block.clone().try_recover().expect("failed to seal block with senders"), ) .expect("failed to insert block"); } @@ -271,7 +319,7 @@ mod tests { for block in &blocks { provider_rw .insert_block( - block.clone().try_recover().expect("failed to seal block with senders"), + &block.clone().try_recover().expect("failed to seal block with senders"), ) .expect("failed to insert block"); } @@ -307,7 +355,7 @@ mod tests { for block in &blocks { provider_rw .insert_block( - block.clone().try_recover().expect("failed to seal block with senders"), + &block.clone().try_recover().expect("failed to seal block with senders"), ) .expect("failed to insert block"); } diff --git a/crates/prune/prune/src/segments/receipts.rs b/crates/prune/prune/src/segments/receipts.rs index 68a1255201..30915f89b2 100644 --- a/crates/prune/prune/src/segments/receipts.rs +++ b/crates/prune/prune/src/segments/receipts.rs @@ -3,15 +3,21 @@ //! - [`crate::segments::user::Receipts`] is responsible for pruning receipts according to the //! user-configured settings (for example, on a full node or with a custom prune config) -use crate::{db_ext::DbTxPruneExt, segments::PruneInput, PrunerError}; +use crate::{ + db_ext::DbTxPruneExt, + segments::{self, PruneInput}, + PrunerError, +}; use reth_db_api::{table::Value, tables, transaction::DbTxMut}; use reth_primitives_traits::NodePrimitives; use reth_provider::{ - errors::provider::ProviderResult, BlockReader, DBProvider, NodePrimitivesProvider, - PruneCheckpointWriter, TransactionsProvider, + errors::provider::ProviderResult, BlockReader, DBProvider, EitherWriter, + NodePrimitivesProvider, PruneCheckpointWriter, StaticFileProviderFactory, StorageSettingsCache, + TransactionsProvider, }; use reth_prune_types::{PruneCheckpoint, PruneSegment, SegmentOutput, SegmentOutputCheckpoint}; -use tracing::trace; +use reth_static_file_types::StaticFileSegment; +use tracing::{debug, trace}; pub(crate) fn prune( provider: &Provider, @@ -21,8 +27,17 @@ where Provider: DBProvider + TransactionsProvider + BlockReader + + StorageSettingsCache + + StaticFileProviderFactory + NodePrimitivesProvider>, { + if EitherWriter::receipts_destination(provider).is_static_file() { + debug!(target: "pruner", "Pruning receipts from static files."); + return segments::prune_static_files(provider, input, StaticFileSegment::Receipts) + } + debug!(target: "pruner", "Pruning receipts from database."); + + // Original database implementation for when receipts are not on static files (old nodes) let tx_range = match input.get_next_tx_num_range(provider)? { Some(range) => range, None => { @@ -46,7 +61,7 @@ where trace!(target: "pruner", %pruned, %done, "Pruned receipts"); let last_pruned_block = provider - .transaction_block(last_pruned_transaction)? + .block_by_transaction_id(last_pruned_transaction)? .ok_or(PrunerError::InconsistentData("Block for transaction is not found"))? // If there's more receipts to prune, set the checkpoint block number to previous, // so we could finish pruning its receipts on the next run. @@ -98,8 +113,14 @@ mod tests { use std::ops::Sub; #[test] - fn prune() { - let db = TestStageDB::default(); + fn prune_legacy() { + let mut db = TestStageDB::default(); + // Configure the factory to use database for receipts by enabling receipt pruning. + // This ensures EitherWriter::receipts_destination returns Database instead of StaticFile. + db.factory = db.factory.with_prune_modes(reth_prune_types::PruneModes { + receipts: Some(PruneMode::Full), + ..Default::default() + }); let mut rng = generators::rng(); let blocks = random_block_range( diff --git a/crates/prune/prune/src/segments/set.rs b/crates/prune/prune/src/segments/set.rs index 4538773d7d..479ab4f25b 100644 --- a/crates/prune/prune/src/segments/set.rs +++ b/crates/prune/prune/src/segments/set.rs @@ -1,13 +1,13 @@ use crate::segments::{ - AccountHistory, MerkleChangeSets, Segment, SenderRecovery, StorageHistory, TransactionLookup, - UserReceipts, + user::ReceiptsByLogs, AccountHistory, Bodies, MerkleChangeSets, Segment, SenderRecovery, + StorageHistory, TransactionLookup, UserReceipts, }; use alloy_eips::eip2718::Encodable2718; use reth_db_api::{table::Value, transaction::DbTxMut}; use reth_primitives_traits::NodePrimitives; use reth_provider::{ providers::StaticFileProvider, BlockReader, ChainStateBlockReader, DBProvider, - PruneCheckpointReader, PruneCheckpointWriter, StaticFileProviderFactory, + PruneCheckpointReader, PruneCheckpointWriter, StaticFileProviderFactory, StorageSettingsCache, }; use reth_prune_types::PruneModes; @@ -51,7 +51,8 @@ where + PruneCheckpointWriter + PruneCheckpointReader + BlockReader - + ChainStateBlockReader, + + ChainStateBlockReader + + StorageSettingsCache, { /// Creates a [`SegmentSet`] from an existing components, such as [`StaticFileProvider`] and /// [`PruneModes`]. @@ -59,19 +60,20 @@ where _static_file_provider: StaticFileProvider, prune_modes: PruneModes, ) -> Self { - #[expect(deprecated)] let PruneModes { sender_recovery, transaction_lookup, receipts, account_history, storage_history, - bodies_history: _, + bodies_history, merkle_changesets, - receipts_log_filter: (), + receipts_log_filter, } = prune_modes; Self::default() + // Bodies - run first since file deletion is fast + .segment_opt(bodies_history.map(Bodies::new)) // Merkle changesets .segment(MerkleChangeSets::new(merkle_changesets)) // Account history @@ -80,6 +82,11 @@ where .segment_opt(storage_history.map(StorageHistory::new)) // User receipts .segment_opt(receipts.map(UserReceipts::new)) + // Receipts by logs + .segment_opt( + (!receipts_log_filter.is_empty()) + .then(|| ReceiptsByLogs::new(receipts_log_filter.clone())), + ) // Transaction lookup .segment_opt(transaction_lookup.map(TransactionLookup::new)) // Sender recovery diff --git a/crates/prune/prune/src/segments/user/bodies.rs b/crates/prune/prune/src/segments/user/bodies.rs new file mode 100644 index 0000000000..4d0dca41b1 --- /dev/null +++ b/crates/prune/prune/src/segments/user/bodies.rs @@ -0,0 +1,305 @@ +use crate::{ + segments::{self, PruneInput, Segment}, + PrunerError, +}; +use reth_provider::{BlockReader, StaticFileProviderFactory}; +use reth_prune_types::{PruneMode, PrunePurpose, PruneSegment, SegmentOutput}; +use reth_static_file_types::StaticFileSegment; + +/// Segment responsible for pruning transactions in static files. +/// +/// This segment is controlled by the `bodies_history` configuration. +#[derive(Debug)] +pub struct Bodies { + mode: PruneMode, +} + +impl Bodies { + /// Creates a new [`Bodies`] segment with the given prune mode. + pub const fn new(mode: PruneMode) -> Self { + Self { mode } + } +} + +impl Segment for Bodies +where + Provider: StaticFileProviderFactory + BlockReader, +{ + fn segment(&self) -> PruneSegment { + PruneSegment::Bodies + } + + fn mode(&self) -> Option { + Some(self.mode) + } + + fn purpose(&self) -> PrunePurpose { + PrunePurpose::User + } + + fn prune(&self, provider: &Provider, input: PruneInput) -> Result { + segments::prune_static_files(provider, input, StaticFileSegment::Transactions) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::Pruner; + use alloy_primitives::BlockNumber; + use reth_exex_types::FinishedExExHeight; + use reth_provider::{ + test_utils::{create_test_provider_factory, MockNodeTypesWithDB}, + ProviderFactory, StaticFileWriter, + }; + use reth_prune_types::{PruneMode, PruneProgress, PruneSegment}; + use reth_static_file_types::{ + SegmentHeader, SegmentRangeInclusive, StaticFileSegment, DEFAULT_BLOCKS_PER_STATIC_FILE, + }; + + /// Creates empty static file jars at 500k block intervals up to the tip block. + /// + /// Each jar contains sequential transaction ranges for testing deletion logic. + fn setup_static_file_jars(provider: &P, tip_block: u64) { + let num_jars = (tip_block + 1) / DEFAULT_BLOCKS_PER_STATIC_FILE; + let txs_per_jar = 1000; + let static_file_provider = provider.static_file_provider(); + + let mut writer = + static_file_provider.latest_writer(StaticFileSegment::Transactions).unwrap(); + + for jar_idx in 0..num_jars { + let block_start = jar_idx * DEFAULT_BLOCKS_PER_STATIC_FILE; + let block_end = ((jar_idx + 1) * DEFAULT_BLOCKS_PER_STATIC_FILE - 1).min(tip_block); + + let tx_start = jar_idx * txs_per_jar; + let tx_end = tx_start + txs_per_jar - 1; + + *writer.user_header_mut() = SegmentHeader::new( + SegmentRangeInclusive::new(block_start, block_end), + Some(SegmentRangeInclusive::new(block_start, block_end)), + Some(SegmentRangeInclusive::new(tx_start, tx_end)), + StaticFileSegment::Transactions, + ); + + writer.inner().set_dirty(); + writer.commit().expect("commit empty jar"); + + if jar_idx < num_jars - 1 { + writer.increment_block(block_end + 1).expect("increment block"); + } + } + + static_file_provider.initialize_index().expect("initialize index"); + } + + struct PruneTestCase { + prune_mode: PruneMode, + expected_pruned: usize, + expected_lowest_block: Option, + } + + fn run_prune_test( + factory: &ProviderFactory, + finished_exex_height_rx: &tokio::sync::watch::Receiver, + test_case: PruneTestCase, + tip: BlockNumber, + ) { + let bodies = Bodies::new(test_case.prune_mode); + let segments: Vec>> = vec![Box::new(bodies)]; + + let mut pruner = Pruner::new_with_factory( + factory.clone(), + segments, + 5, + 10000, + None, + finished_exex_height_rx.clone(), + ); + + let result = pruner.run(tip).expect("pruner run"); + + assert_eq!(result.progress, PruneProgress::Finished); + assert_eq!(result.segments.len(), 1); + + let (segment, output) = &result.segments[0]; + assert_eq!(*segment, PruneSegment::Bodies); + assert_eq!(output.pruned, test_case.expected_pruned); + + let static_provider = factory.static_file_provider(); + assert_eq!( + static_provider.get_lowest_range_end(StaticFileSegment::Transactions), + test_case.expected_lowest_block + ); + assert_eq!( + static_provider.get_highest_static_file_block(StaticFileSegment::Transactions), + Some(tip) + ); + } + + #[test] + fn bodies_prune_through_pruner() { + let factory = create_test_provider_factory(); + let tip = 2_499_999; + setup_static_file_jars(&factory, tip); + + let (_, finished_exex_height_rx) = tokio::sync::watch::channel(FinishedExExHeight::NoExExs); + + let test_cases = vec![ + // Test 1: PruneMode::Before(750_000) → deletes jar 1 (0-499_999) + PruneTestCase { + prune_mode: PruneMode::Before(750_000), + expected_pruned: 1000, + expected_lowest_block: Some(999_999), + }, + // Test 2: PruneMode::Before(850_000) → no deletion (jar 2: 500_000-999_999 contains + // target) + PruneTestCase { + prune_mode: PruneMode::Before(850_000), + expected_pruned: 0, + expected_lowest_block: Some(999_999), + }, + // Test 3: PruneMode::Before(1_599_999) → deletes jar 2 (500_000-999_999) and jar 3 + // (1_000_000-1_499_999) + PruneTestCase { + prune_mode: PruneMode::Before(1_599_999), + expected_pruned: 2000, + expected_lowest_block: Some(1_999_999), + }, + // Test 4: PruneMode::Distance(500_000) with tip=2_499_999 → deletes jar 4 + // (1_500_000-1_999_999) + PruneTestCase { + prune_mode: PruneMode::Distance(500_000), + expected_pruned: 1000, + expected_lowest_block: Some(2_499_999), + }, + // Test 5: PruneMode::Before(2_300_000) → no deletion (jar 5: 2_000_000-2_499_999 + // contains target) + PruneTestCase { + prune_mode: PruneMode::Before(2_300_000), + expected_pruned: 0, + expected_lowest_block: Some(2_499_999), + }, + ]; + + for test_case in test_cases { + run_prune_test(&factory, &finished_exex_height_rx, test_case, tip); + } + } + + #[test] + fn min_block_updated_on_sync() { + // Regression test: update_index must update min_block to prevent stale values + // that can cause pruner to incorrectly delete static files when PruneMode::Before(0) is + // used. + + struct MinBlockTestCase { + // Block range + initial_range: Option, + updated_range: SegmentRangeInclusive, + // Min block + expected_before_update: Option, + expected_after_update: BlockNumber, + // Test delete_segment_below_block with this value + delete_below_block: BlockNumber, + // Expected number of deleted segments + expected_deleted: usize, + } + + let test_cases = vec![ + // Test 1: Empty initial state (None) -> syncs to block 100 + MinBlockTestCase { + initial_range: None, + updated_range: SegmentRangeInclusive::new(0, 100), + expected_before_update: None, + expected_after_update: 100, + delete_below_block: 1, + expected_deleted: 0, + }, + // Test 2: Genesis state [0..=0] -> syncs to block 100 (eg. op-reth node after op-reth + // init-state) + MinBlockTestCase { + initial_range: Some(SegmentRangeInclusive::new(0, 0)), + updated_range: SegmentRangeInclusive::new(0, 100), + expected_before_update: Some(0), + expected_after_update: 100, + delete_below_block: 1, + expected_deleted: 0, + }, + // Test 3: Existing state [0..=50] -> syncs to block 200 + MinBlockTestCase { + initial_range: Some(SegmentRangeInclusive::new(0, 50)), + updated_range: SegmentRangeInclusive::new(0, 200), + expected_before_update: Some(50), + expected_after_update: 200, + delete_below_block: 150, + expected_deleted: 0, + }, + ]; + + for ( + idx, + MinBlockTestCase { + initial_range, + updated_range, + expected_before_update, + expected_after_update, + delete_below_block, + expected_deleted, + }, + ) in test_cases.into_iter().enumerate() + { + let factory = create_test_provider_factory(); + let static_provider = factory.static_file_provider(); + + let mut writer = + static_provider.latest_writer(StaticFileSegment::Transactions).unwrap(); + + // Set up initial state if provided + if let Some(initial_range) = initial_range { + *writer.user_header_mut() = SegmentHeader::new( + // Expected block range needs to have a fixed size that's determined by the + // provider itself + static_provider + .find_fixed_range(StaticFileSegment::Transactions, initial_range.start()), + Some(initial_range), + Some(initial_range), + StaticFileSegment::Transactions, + ); + writer.inner().set_dirty(); + writer.commit().unwrap(); + static_provider.initialize_index().unwrap(); + } + + // Verify initial state + assert_eq!( + static_provider.get_lowest_range_end(StaticFileSegment::Transactions), + expected_before_update, + "Test case {}: Initial min_block mismatch", + idx + ); + + // Update to new block and tx ranges + writer.user_header_mut().set_block_range(updated_range.start(), updated_range.end()); + writer.user_header_mut().set_tx_range(updated_range.start(), updated_range.end()); + writer.inner().set_dirty(); + writer.commit().unwrap(); // update_index is called inside + + // Verify min_block was updated (not stuck at stale value) + assert_eq!( + static_provider.get_lowest_range_end(StaticFileSegment::Transactions), + Some(expected_after_update), + "Test case {}: min_block should be updated to {} (not stuck at stale value)", + idx, + expected_after_update + ); + + // Verify delete_segment_below_block behaves correctly with updated min_block + let deleted = static_provider + .delete_segment_below_block(StaticFileSegment::Transactions, delete_below_block) + .unwrap(); + + assert_eq!(deleted.len(), expected_deleted); + } + } +} diff --git a/crates/prune/prune/src/segments/user/merkle_change_sets.rs b/crates/prune/prune/src/segments/user/merkle_change_sets.rs index 89cc4567b7..c02d752fcd 100644 --- a/crates/prune/prune/src/segments/user/merkle_change_sets.rs +++ b/crates/prune/prune/src/segments/user/merkle_change_sets.rs @@ -13,6 +13,7 @@ use reth_provider::{ use reth_prune_types::{ PruneCheckpoint, PruneMode, PrunePurpose, PruneSegment, SegmentOutput, SegmentOutputCheckpoint, }; +use reth_stages_types::StageId; use tracing::{instrument, trace}; #[derive(Debug)] @@ -47,6 +48,10 @@ where PrunePurpose::User } + fn required_stage(&self) -> Option { + Some(StageId::MerkleChangeSets) + } + #[instrument(level = "trace", target = "pruner", skip(self, provider), ret)] fn prune(&self, provider: &Provider, input: PruneInput) -> Result { let Some(block_range) = input.get_next_block_range() else { @@ -66,10 +71,9 @@ where let mut last_storages_pruned_block = None; let (storages_pruned, done) = - provider.tx_ref().prune_table_with_range::( + provider.tx_ref().prune_dupsort_table_with_range::( storage_range, &mut limiter, - |_| false, |(BlockNumberHashedAddress((block_number, _)), _)| { last_storages_pruned_block = Some(block_number); }, @@ -85,10 +89,9 @@ where .unwrap_or(block_range_end); let (accounts_pruned, done) = - provider.tx_ref().prune_table_with_range::( + provider.tx_ref().prune_dupsort_table_with_range::( block_range, &mut limiter, - |_| false, |row| last_accounts_pruned_block = row.0, )?; diff --git a/crates/prune/prune/src/segments/user/mod.rs b/crates/prune/prune/src/segments/user/mod.rs index bdbc27f22f..b993d3f261 100644 --- a/crates/prune/prune/src/segments/user/mod.rs +++ b/crates/prune/prune/src/segments/user/mod.rs @@ -1,14 +1,18 @@ mod account_history; +mod bodies; mod history; mod merkle_change_sets; mod receipts; +mod receipts_by_logs; mod sender_recovery; mod storage_history; mod transaction_lookup; pub use account_history::AccountHistory; +pub use bodies::Bodies; pub use merkle_change_sets::MerkleChangeSets; pub use receipts::Receipts; +pub use receipts_by_logs::ReceiptsByLogs; pub use sender_recovery::SenderRecovery; pub use storage_history::StorageHistory; pub use transaction_lookup::TransactionLookup; diff --git a/crates/prune/prune/src/segments/user/receipts.rs b/crates/prune/prune/src/segments/user/receipts.rs index 03faddc1d5..9f193b4ca3 100644 --- a/crates/prune/prune/src/segments/user/receipts.rs +++ b/crates/prune/prune/src/segments/user/receipts.rs @@ -6,7 +6,7 @@ use reth_db_api::{table::Value, transaction::DbTxMut}; use reth_primitives_traits::NodePrimitives; use reth_provider::{ errors::provider::ProviderResult, BlockReader, DBProvider, NodePrimitivesProvider, - PruneCheckpointWriter, TransactionsProvider, + PruneCheckpointWriter, StaticFileProviderFactory, StorageSettingsCache, TransactionsProvider, }; use reth_prune_types::{PruneCheckpoint, PruneMode, PrunePurpose, PruneSegment, SegmentOutput}; use tracing::instrument; @@ -28,6 +28,8 @@ where + PruneCheckpointWriter + TransactionsProvider + BlockReader + + StorageSettingsCache + + StaticFileProviderFactory + NodePrimitivesProvider>, { fn segment(&self) -> PruneSegment { diff --git a/crates/prune/prune/src/segments/user/receipts_by_logs.rs b/crates/prune/prune/src/segments/user/receipts_by_logs.rs new file mode 100644 index 0000000000..9e57bd2411 --- /dev/null +++ b/crates/prune/prune/src/segments/user/receipts_by_logs.rs @@ -0,0 +1,362 @@ +use crate::{ + db_ext::DbTxPruneExt, + segments::{PruneInput, Segment}, + PrunerError, +}; +use alloy_consensus::TxReceipt; +use reth_db_api::{table::Value, tables, transaction::DbTxMut}; +use reth_primitives_traits::NodePrimitives; +use reth_provider::{ + BlockReader, DBProvider, NodePrimitivesProvider, PruneCheckpointWriter, TransactionsProvider, +}; +use reth_prune_types::{ + PruneCheckpoint, PruneMode, PrunePurpose, PruneSegment, ReceiptsLogPruneConfig, SegmentOutput, + MINIMUM_PRUNING_DISTANCE, +}; +use tracing::{instrument, trace}; +#[derive(Debug)] +pub struct ReceiptsByLogs { + config: ReceiptsLogPruneConfig, +} + +impl ReceiptsByLogs { + pub const fn new(config: ReceiptsLogPruneConfig) -> Self { + Self { config } + } +} + +impl Segment for ReceiptsByLogs +where + Provider: DBProvider + + PruneCheckpointWriter + + TransactionsProvider + + BlockReader + + NodePrimitivesProvider>, +{ + fn segment(&self) -> PruneSegment { + PruneSegment::ContractLogs + } + + fn mode(&self) -> Option { + None + } + + fn purpose(&self) -> PrunePurpose { + PrunePurpose::User + } + + #[instrument(target = "pruner", skip(self, provider), ret(level = "trace"))] + fn prune(&self, provider: &Provider, input: PruneInput) -> Result { + // Contract log filtering removes every receipt possible except the ones in the list. So, + // for the other receipts it's as if they had a `PruneMode::Distance()` of + // `MINIMUM_PRUNING_DISTANCE`. + let to_block = PruneMode::Distance(MINIMUM_PRUNING_DISTANCE) + .prune_target_block(input.to_block, PruneSegment::ContractLogs, PrunePurpose::User)? + .map(|(bn, _)| bn) + .unwrap_or_default(); + + // Get status checkpoint from latest run + let mut last_pruned_block = + input.previous_checkpoint.and_then(|checkpoint| checkpoint.block_number); + + let initial_last_pruned_block = last_pruned_block; + + let mut from_tx_number = match initial_last_pruned_block { + Some(block) => provider + .block_body_indices(block)? + .map(|block| block.last_tx_num() + 1) + .unwrap_or(0), + None => 0, + }; + + // Figure out what receipts have already been pruned, so we can have an accurate + // `address_filter` + let address_filter = self.config.group_by_block(input.to_block, last_pruned_block)?; + + // Splits all transactions in different block ranges. Each block range will have its own + // filter address list and will check it while going through the table + // + // Example: + // For an `address_filter` such as: + // { block9: [a1, a2], block20: [a3, a4, a5] } + // + // The following structures will be created in the exact order as showed: + // `block_ranges`: [ + // (block0, block8, 0 addresses), + // (block9, block19, 2 addresses), + // (block20, to_block, 5 addresses) + // ] + // `filtered_addresses`: [a1, a2, a3, a4, a5] + // + // The first range will delete all receipts between block0 - block8 + // The second range will delete all receipts between block9 - 19, except the ones with + // emitter logs from these addresses: [a1, a2]. + // The third range will delete all receipts between block20 - to_block, except the ones with + // emitter logs from these addresses: [a1, a2, a3, a4, a5] + let mut block_ranges = vec![]; + let mut blocks_iter = address_filter.iter().peekable(); + let mut filtered_addresses = vec![]; + + while let Some((start_block, addresses)) = blocks_iter.next() { + filtered_addresses.extend_from_slice(addresses); + + // This will clear all receipts before the first appearance of a contract log or since + // the block after the last pruned one. + if block_ranges.is_empty() { + let init = last_pruned_block.map(|b| b + 1).unwrap_or_default(); + if init < *start_block { + block_ranges.push((init, *start_block - 1, 0)); + } + } + + let end_block = + blocks_iter.peek().map(|(next_block, _)| *next_block - 1).unwrap_or(to_block); + + // Addresses in lower block ranges, are still included in the inclusion list for future + // ranges. + block_ranges.push((*start_block, end_block, filtered_addresses.len())); + } + + trace!( + target: "pruner", + ?block_ranges, + ?filtered_addresses, + "Calculated block ranges and filtered addresses", + ); + + let mut limiter = input.limiter; + + let mut done = true; + let mut pruned = 0; + let mut last_pruned_transaction = None; + for (start_block, end_block, num_addresses) in block_ranges { + let block_range = start_block..=end_block; + + // Calculate the transaction range from this block range + let tx_range_end = match provider.block_body_indices(end_block)? { + Some(body) => body.last_tx_num(), + None => { + trace!( + target: "pruner", + ?block_range, + "No receipts to prune." + ); + continue + } + }; + let tx_range = from_tx_number..=tx_range_end; + + // Delete receipts, except the ones in the inclusion list + let mut last_skipped_transaction = 0; + let deleted; + (deleted, done) = provider.tx_ref().prune_table_with_range::::Receipt, + >>( + tx_range, + &mut limiter, + |(tx_num, receipt)| { + let skip = num_addresses > 0 && + receipt.logs().iter().any(|log| { + filtered_addresses[..num_addresses].contains(&&log.address) + }); + + if skip { + last_skipped_transaction = *tx_num; + } + skip + }, + |row| last_pruned_transaction = Some(row.0), + )?; + + trace!(target: "pruner", %deleted, %done, ?block_range, "Pruned receipts"); + + pruned += deleted; + + // For accurate checkpoints we need to know that we have checked every transaction. + // Example: we reached the end of the range, and the last receipt is supposed to skip + // its deletion. + let last_pruned_transaction = *last_pruned_transaction + .insert(last_pruned_transaction.unwrap_or_default().max(last_skipped_transaction)); + + last_pruned_block = Some( + provider + .block_by_transaction_id(last_pruned_transaction)? + .ok_or(PrunerError::InconsistentData("Block for transaction is not found"))? + // If there's more receipts to prune, set the checkpoint block number to + // previous, so we could finish pruning its receipts on the + // next run. + .saturating_sub(if done { 0 } else { 1 }), + ); + + if limiter.is_limit_reached() { + done &= end_block == to_block; + break + } + + from_tx_number = last_pruned_transaction + 1; + } + + // If there are contracts using `PruneMode::Distance(_)` there will be receipts before + // `to_block` that become eligible to be pruned in future runs. Therefore, our checkpoint is + // not actually `to_block`, but the `lowest_block_with_distance` from any contract. + // This ensures that in future pruner runs we can prune all these receipts between the + // previous `lowest_block_with_distance` and the new one using + // `get_next_tx_num_range_from_checkpoint`. + // + // Only applies if we were able to prune everything intended for this run, otherwise the + // checkpoint is the `last_pruned_block`. + let prune_mode_block = self + .config + .lowest_block_with_distance(input.to_block, initial_last_pruned_block)? + .unwrap_or(to_block); + + provider.save_prune_checkpoint( + PruneSegment::ContractLogs, + PruneCheckpoint { + block_number: Some(prune_mode_block.min(last_pruned_block.unwrap_or(u64::MAX))), + tx_number: last_pruned_transaction, + prune_mode: PruneMode::Before(prune_mode_block), + }, + )?; + + let progress = limiter.progress(done); + + Ok(SegmentOutput { progress, pruned, checkpoint: None }) + } +} + +#[cfg(test)] +mod tests { + use crate::segments::{user::ReceiptsByLogs, PruneInput, PruneLimiter, Segment}; + use alloy_primitives::B256; + use assert_matches::assert_matches; + use reth_db_api::{cursor::DbCursorRO, tables, transaction::DbTx}; + use reth_primitives_traits::InMemorySize; + use reth_provider::{BlockReader, DBProvider, DatabaseProviderFactory, PruneCheckpointReader}; + use reth_prune_types::{PruneMode, PruneSegment, ReceiptsLogPruneConfig}; + use reth_stages::test_utils::{StorageKind, TestStageDB}; + use reth_testing_utils::generators::{ + self, random_block_range, random_eoa_account, random_log, random_receipt, BlockRangeParams, + }; + use std::collections::BTreeMap; + + #[test] + fn prune_receipts_by_logs() { + reth_tracing::init_test_tracing(); + + let db = TestStageDB::default(); + let mut rng = generators::rng(); + + let tip = 20000; + let blocks = [ + random_block_range( + &mut rng, + 0..=100, + BlockRangeParams { parent: Some(B256::ZERO), tx_count: 1..5, ..Default::default() }, + ), + random_block_range( + &mut rng, + (100 + 1)..=(tip - 100), + BlockRangeParams { parent: Some(B256::ZERO), tx_count: 0..1, ..Default::default() }, + ), + random_block_range( + &mut rng, + (tip - 100 + 1)..=tip, + BlockRangeParams { parent: Some(B256::ZERO), tx_count: 1..5, ..Default::default() }, + ), + ] + .concat(); + db.insert_blocks(blocks.iter(), StorageKind::Database(None)).expect("insert blocks"); + + let mut receipts = Vec::new(); + + let (deposit_contract_addr, _) = random_eoa_account(&mut rng); + for block in &blocks { + receipts.reserve_exact(block.body().size()); + for (txi, transaction) in block.body().transactions.iter().enumerate() { + let mut receipt = random_receipt(&mut rng, transaction, Some(1), None); + receipt.logs.push(random_log( + &mut rng, + (txi == (block.transaction_count() - 1)).then_some(deposit_contract_addr), + Some(1), + )); + receipts.push((receipts.len() as u64, receipt)); + } + } + db.insert_receipts(receipts).expect("insert receipts"); + + assert_eq!( + db.table::().unwrap().len(), + blocks.iter().map(|block| block.transaction_count()).sum::() + ); + assert_eq!( + db.table::().unwrap().len(), + db.table::().unwrap().len() + ); + + let run_prune = || { + let provider = db.factory.database_provider_rw().unwrap(); + + let prune_before_block: usize = 20; + let prune_mode = PruneMode::Before(prune_before_block as u64); + let receipts_log_filter = + ReceiptsLogPruneConfig(BTreeMap::from([(deposit_contract_addr, prune_mode)])); + + let limiter = PruneLimiter::default().set_deleted_entries_limit(10); + + let result = ReceiptsByLogs::new(receipts_log_filter).prune( + &provider, + PruneInput { + previous_checkpoint: db + .factory + .provider() + .unwrap() + .get_prune_checkpoint(PruneSegment::ContractLogs) + .unwrap(), + to_block: tip, + limiter, + }, + ); + provider.commit().expect("commit"); + + assert_matches!(result, Ok(_)); + let output = result.unwrap(); + + let (pruned_block, pruned_tx) = db + .factory + .provider() + .unwrap() + .get_prune_checkpoint(PruneSegment::ContractLogs) + .unwrap() + .map(|checkpoint| (checkpoint.block_number.unwrap(), checkpoint.tx_number.unwrap())) + .unwrap_or_default(); + + // All receipts are in the end of the block + let unprunable = pruned_block.saturating_sub(prune_before_block as u64 - 1); + + assert_eq!( + db.table::().unwrap().len(), + blocks.iter().map(|block| block.transaction_count()).sum::() - + ((pruned_tx + 1) - unprunable) as usize + ); + + output.progress.is_finished() + }; + + while !run_prune() {} + + let provider = db.factory.provider().unwrap(); + let mut cursor = provider.tx_ref().cursor_read::().unwrap(); + let walker = cursor.walk(None).unwrap(); + for receipt in walker { + let (tx_num, receipt) = receipt.unwrap(); + + // Either we only find our contract, or the receipt is part of the unprunable receipts + // set by tip - 128 + assert!( + receipt.logs.iter().any(|l| l.address == deposit_contract_addr) || + provider.block_by_transaction_id(tx_num).unwrap().unwrap() > tip - 128, + ); + } + } +} diff --git a/crates/prune/prune/src/segments/user/sender_recovery.rs b/crates/prune/prune/src/segments/user/sender_recovery.rs index 9fbad8c428..9569532e8b 100644 --- a/crates/prune/prune/src/segments/user/sender_recovery.rs +++ b/crates/prune/prune/src/segments/user/sender_recovery.rs @@ -61,7 +61,7 @@ where trace!(target: "pruner", %pruned, %done, "Pruned transaction senders"); let last_pruned_block = provider - .transaction_block(last_pruned_transaction)? + .block_by_transaction_id(last_pruned_transaction)? .ok_or(PrunerError::InconsistentData("Block for transaction is not found"))? // If there's more transaction senders to prune, set the checkpoint block number to // previous, so we could finish pruning its transaction senders on the next run. diff --git a/crates/prune/prune/src/segments/user/transaction_lookup.rs b/crates/prune/prune/src/segments/user/transaction_lookup.rs index fed90d84f2..74e0e29647 100644 --- a/crates/prune/prune/src/segments/user/transaction_lookup.rs +++ b/crates/prune/prune/src/segments/user/transaction_lookup.rs @@ -122,7 +122,7 @@ where let last_pruned_transaction = last_pruned_transaction.unwrap_or(tx_range_end); let last_pruned_block = provider - .transaction_block(last_pruned_transaction)? + .block_by_transaction_id(last_pruned_transaction)? .ok_or(PrunerError::InconsistentData("Block for transaction is not found"))? // If there's more transaction lookup entries to prune, set the checkpoint block number // to previous, so we could finish pruning its transaction lookup entries on the next diff --git a/crates/prune/types/Cargo.toml b/crates/prune/types/Cargo.toml index b60621b331..30adbb14d9 100644 --- a/crates/prune/types/Cargo.toml +++ b/crates/prune/types/Cargo.toml @@ -16,6 +16,7 @@ reth-codecs = { workspace = true, optional = true } alloy-primitives.workspace = true derive_more.workspace = true +strum = { workspace = true, features = ["derive"] } thiserror.workspace = true modular-bitfield = { workspace = true, optional = true } @@ -42,6 +43,7 @@ std = [ "serde?/std", "serde_json/std", "thiserror/std", + "strum/std", ] test-utils = [ "std", diff --git a/crates/prune/types/src/lib.rs b/crates/prune/types/src/lib.rs index a588693892..8233a3487b 100644 --- a/crates/prune/types/src/lib.rs +++ b/crates/prune/types/src/lib.rs @@ -18,6 +18,10 @@ mod pruner; mod segment; mod target; +use alloc::{collections::BTreeMap, vec::Vec}; +use alloy_primitives::{Address, BlockNumber}; +use core::ops::Deref; + pub use checkpoint::PruneCheckpoint; pub use event::PrunerEvent; pub use mode::PruneMode; @@ -26,4 +30,304 @@ pub use pruner::{ SegmentOutputCheckpoint, }; pub use segment::{PrunePurpose, PruneSegment, PruneSegmentError}; -pub use target::{PruneModes, UnwindTargetPrunedError, MINIMUM_PRUNING_DISTANCE}; +pub use target::{ + PruneModes, UnwindTargetPrunedError, MERKLE_CHANGESETS_RETENTION_BLOCKS, + MINIMUM_PRUNING_DISTANCE, +}; + +/// Configuration for pruning receipts not associated with logs emitted by the specified contracts. +#[derive(Debug, Clone, PartialEq, Eq, Default)] +#[cfg_attr(any(test, feature = "serde"), derive(serde::Serialize, serde::Deserialize))] +pub struct ReceiptsLogPruneConfig(pub BTreeMap); + +impl ReceiptsLogPruneConfig { + /// Checks if the configuration is empty + pub fn is_empty(&self) -> bool { + self.0.is_empty() + } + + /// Given the `tip` block number, consolidates the structure so it can easily be queried for + /// filtering across a range of blocks. + /// + /// Example: + /// + /// `{ addrA: Before(872), addrB: Before(500), addrC: Distance(128) }` + /// + /// for `tip: 1000`, gets transformed to a map such as: + /// + /// `{ 500: [addrB], 872: [addrA, addrC] }` + /// + /// The [`BlockNumber`] key of the new map should be viewed as `PruneMode::Before(block)`, which + /// makes the previous result equivalent to + /// + /// `{ Before(500): [addrB], Before(872): [addrA, addrC] }` + pub fn group_by_block( + &self, + tip: BlockNumber, + pruned_block: Option, + ) -> Result>, PruneSegmentError> { + let mut map = BTreeMap::new(); + let base_block = pruned_block.unwrap_or_default() + 1; + + for (address, mode) in &self.0 { + // Getting `None`, means that there is nothing to prune yet, so we need it to include in + // the BTreeMap (block = 0), otherwise it will be excluded. + // Reminder that this BTreeMap works as an inclusion list that excludes (prunes) all + // other receipts. + // + // Reminder, that we increment because the [`BlockNumber`] key of the new map should be + // viewed as `PruneMode::Before(block)` + let block = base_block.max( + mode.prune_target_block(tip, PruneSegment::ContractLogs, PrunePurpose::User)? + .map(|(block, _)| block) + .unwrap_or_default() + + 1, + ); + + map.entry(block).or_insert_with(Vec::new).push(address) + } + Ok(map) + } + + /// Returns the lowest block where we start filtering logs which use `PruneMode::Distance(_)`. + pub fn lowest_block_with_distance( + &self, + tip: BlockNumber, + pruned_block: Option, + ) -> Result, PruneSegmentError> { + let pruned_block = pruned_block.unwrap_or_default(); + let mut lowest = None; + + for mode in self.values() { + if mode.is_distance() && + let Some((block, _)) = + mode.prune_target_block(tip, PruneSegment::ContractLogs, PrunePurpose::User)? + { + lowest = Some(lowest.unwrap_or(u64::MAX).min(block)); + } + } + + Ok(lowest.map(|lowest| lowest.max(pruned_block))) + } +} + +impl Deref for ReceiptsLogPruneConfig { + type Target = BTreeMap; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_group_by_block_empty_config() { + let config = ReceiptsLogPruneConfig(BTreeMap::new()); + let tip = 1000; + let pruned_block = None; + + let result = config.group_by_block(tip, pruned_block).unwrap(); + assert!(result.is_empty(), "The result should be empty when the config is empty"); + } + + #[test] + fn test_group_by_block_single_entry() { + let mut config_map = BTreeMap::new(); + let address = Address::new([1; 20]); + let prune_mode = PruneMode::Before(500); + config_map.insert(address, prune_mode); + + let config = ReceiptsLogPruneConfig(config_map); + // Big tip to have something to prune for the target block + let tip = 3000000; + let pruned_block = Some(400); + + let result = config.group_by_block(tip, pruned_block).unwrap(); + + // Expect one entry with block 500 and the corresponding address + assert_eq!(result.len(), 1); + assert_eq!(result[&500], vec![&address], "Address should be grouped under block 500"); + + // Tip smaller than the target block, so that we have nothing to prune for the block + let tip = 300; + let pruned_block = Some(400); + + let result = config.group_by_block(tip, pruned_block).unwrap(); + + // Expect one entry with block 400 and the corresponding address + assert_eq!(result.len(), 1); + assert_eq!(result[&401], vec![&address], "Address should be grouped under block 400"); + } + + #[test] + fn test_group_by_block_multiple_entries() { + let mut config_map = BTreeMap::new(); + let address1 = Address::new([1; 20]); + let address2 = Address::new([2; 20]); + let prune_mode1 = PruneMode::Before(600); + let prune_mode2 = PruneMode::Before(800); + config_map.insert(address1, prune_mode1); + config_map.insert(address2, prune_mode2); + + let config = ReceiptsLogPruneConfig(config_map); + let tip = 900000; + let pruned_block = Some(400); + + let result = config.group_by_block(tip, pruned_block).unwrap(); + + // Expect two entries: one for block 600 and another for block 800 + assert_eq!(result.len(), 2); + assert_eq!(result[&600], vec![&address1], "Address1 should be grouped under block 600"); + assert_eq!(result[&800], vec![&address2], "Address2 should be grouped under block 800"); + } + + #[test] + fn test_group_by_block_with_distance_prune_mode() { + let mut config_map = BTreeMap::new(); + let address = Address::new([1; 20]); + let prune_mode = PruneMode::Distance(100000); + config_map.insert(address, prune_mode); + + let config = ReceiptsLogPruneConfig(config_map); + let tip = 100100; + // Pruned block is smaller than the target block + let pruned_block = Some(50); + + let result = config.group_by_block(tip, pruned_block).unwrap(); + + // Expect the entry to be grouped under block 100 (tip - distance) + assert_eq!(result.len(), 1); + assert_eq!(result[&101], vec![&address], "Address should be grouped under block 100"); + + let tip = 100100; + // Pruned block is larger than the target block + let pruned_block = Some(800); + + let result = config.group_by_block(tip, pruned_block).unwrap(); + + // Expect the entry to be grouped under block 800 which is larger than tip - distance + assert_eq!(result.len(), 1); + assert_eq!(result[&801], vec![&address], "Address should be grouped under block 800"); + } + + #[test] + fn test_lowest_block_with_distance_empty_config() { + let config = ReceiptsLogPruneConfig(BTreeMap::new()); + let tip = 1000; + let pruned_block = None; + + let result = config.lowest_block_with_distance(tip, pruned_block).unwrap(); + assert_eq!(result, None, "The result should be None when the config is empty"); + } + + #[test] + fn test_lowest_block_with_distance_no_distance_mode() { + let mut config_map = BTreeMap::new(); + let address = Address::new([1; 20]); + let prune_mode = PruneMode::Before(500); + config_map.insert(address, prune_mode); + + let config = ReceiptsLogPruneConfig(config_map); + let tip = 1000; + let pruned_block = None; + + let result = config.lowest_block_with_distance(tip, pruned_block).unwrap(); + assert_eq!(result, None, "The result should be None when there are no Distance modes"); + } + + #[test] + fn test_lowest_block_with_distance_single_entry() { + let mut config_map = BTreeMap::new(); + let address = Address::new([1; 20]); + let prune_mode = PruneMode::Distance(100000); + config_map.insert(address, prune_mode); + + let config = ReceiptsLogPruneConfig(config_map); + + let tip = 100100; + let pruned_block = Some(400); + + // Expect the lowest block to be 400 as 400 > 100100 - 100000 (tip - distance) + assert_eq!( + config.lowest_block_with_distance(tip, pruned_block).unwrap(), + Some(400), + "The lowest block should be 400" + ); + + let tip = 100100; + let pruned_block = Some(50); + + // Expect the lowest block to be 100 as 100 > 50 (pruned block) + assert_eq!( + config.lowest_block_with_distance(tip, pruned_block).unwrap(), + Some(100), + "The lowest block should be 100" + ); + } + + #[test] + fn test_lowest_block_with_distance_multiple_entries_last() { + let mut config_map = BTreeMap::new(); + let address1 = Address::new([1; 20]); + let address2 = Address::new([2; 20]); + let prune_mode1 = PruneMode::Distance(100100); + let prune_mode2 = PruneMode::Distance(100300); + config_map.insert(address1, prune_mode1); + config_map.insert(address2, prune_mode2); + + let config = ReceiptsLogPruneConfig(config_map); + let tip = 200300; + let pruned_block = Some(100); + + // The lowest block should be 200300 - 100300 = 100000: + // - First iteration will return 100200 => 200300 - 100100 = 100200 + // - Second iteration will return 100000 => 200300 - 100300 = 100000 < 100200 + // - Final result is 100000 + assert_eq!(config.lowest_block_with_distance(tip, pruned_block).unwrap(), Some(100000)); + } + + #[test] + fn test_lowest_block_with_distance_multiple_entries_first() { + let mut config_map = BTreeMap::new(); + let address1 = Address::new([1; 20]); + let address2 = Address::new([2; 20]); + let prune_mode1 = PruneMode::Distance(100400); + let prune_mode2 = PruneMode::Distance(100300); + config_map.insert(address1, prune_mode1); + config_map.insert(address2, prune_mode2); + + let config = ReceiptsLogPruneConfig(config_map); + let tip = 200300; + let pruned_block = Some(100); + + // The lowest block should be 200300 - 100400 = 99900: + // - First iteration, lowest block is 200300 - 100400 = 99900 + // - Second iteration, lowest block is still 99900 < 200300 - 100300 = 100000 + // - Final result is 99900 + assert_eq!(config.lowest_block_with_distance(tip, pruned_block).unwrap(), Some(99900)); + } + + #[test] + fn test_lowest_block_with_distance_multiple_entries_pruned_block() { + let mut config_map = BTreeMap::new(); + let address1 = Address::new([1; 20]); + let address2 = Address::new([2; 20]); + let prune_mode1 = PruneMode::Distance(100400); + let prune_mode2 = PruneMode::Distance(100300); + config_map.insert(address1, prune_mode1); + config_map.insert(address2, prune_mode2); + + let config = ReceiptsLogPruneConfig(config_map); + let tip = 200300; + let pruned_block = Some(100000); + + // The lowest block should be 100000 because: + // - Lowest is 200300 - 100400 = 99900 < 200300 - 100300 = 100000 + // - Lowest is compared to the pruned block 100000: 100000 > 99900 + // - Finally the lowest block is 100000 + assert_eq!(config.lowest_block_with_distance(tip, pruned_block).unwrap(), Some(100000)); + } +} diff --git a/crates/prune/types/src/segment.rs b/crates/prune/types/src/segment.rs index cfc812a1a0..7922f906c2 100644 --- a/crates/prune/types/src/segment.rs +++ b/crates/prune/types/src/segment.rs @@ -1,7 +1,8 @@ #![allow(deprecated)] // necessary to all defining deprecated `PruneSegment` variants -use crate::MINIMUM_PRUNING_DISTANCE; +use crate::{MERKLE_CHANGESETS_RETENTION_BLOCKS, MINIMUM_PRUNING_DISTANCE}; use derive_more::Display; +use strum::{EnumIter, IntoEnumIterator}; use thiserror::Error; /// Segment of the data that can be pruned. @@ -9,7 +10,7 @@ use thiserror::Error; /// VERY IMPORTANT NOTE: new variants must be added to the end of this enum, and old variants which /// are no longer used must not be removed from this enum. The variant index is encoded directly /// when writing to the `PruneCheckpoint` table, so changing the order here will corrupt the table. -#[derive(Debug, Display, Clone, Copy, Eq, PartialEq, Ord, PartialOrd, Hash)] +#[derive(Debug, Display, Clone, Copy, Eq, PartialEq, Ord, PartialOrd, Hash, EnumIter)] #[cfg_attr(test, derive(arbitrary::Arbitrary))] #[cfg_attr(any(test, feature = "reth-codec"), derive(reth_codecs::Compact))] #[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(compact))] @@ -28,14 +29,18 @@ pub enum PruneSegment { /// Prune segment responsible for the `StorageChangeSets` and `StoragesHistory` tables. StorageHistory, #[deprecated = "Variant indexes cannot be changed"] + #[strum(disabled)] /// Prune segment responsible for the `CanonicalHeaders`, `Headers` tables. Headers, #[deprecated = "Variant indexes cannot be changed"] + #[strum(disabled)] /// Prune segment responsible for the `Transactions` table. Transactions, /// Prune segment responsible for all rows in `AccountsTrieChangeSets` and /// `StoragesTrieChangeSets` table. MerkleChangeSets, + /// Prune segment responsible for bodies (transactions in static files). + Bodies, } #[cfg(test)] @@ -47,6 +52,14 @@ impl Default for PruneSegment { } impl PruneSegment { + /// Returns an iterator over all variants of [`PruneSegment`]. + /// + /// Excludes deprecated variants that are no longer used, but can still be found in the + /// database. + pub fn variants() -> impl Iterator { + Self::iter() + } + /// Returns minimum number of blocks to keep in the database for this segment. pub const fn min_blocks(&self, purpose: PrunePurpose) -> u64 { match self { @@ -55,8 +68,9 @@ impl PruneSegment { Self::ContractLogs | Self::AccountHistory | Self::StorageHistory | - Self::MerkleChangeSets | + Self::Bodies | Self::Receipts => MINIMUM_PRUNING_DISTANCE, + Self::MerkleChangeSets => MERKLE_CHANGESETS_RETENTION_BLOCKS, #[expect(deprecated)] #[expect(clippy::match_same_arms)] Self::Headers | Self::Transactions => 0, @@ -102,3 +116,20 @@ pub enum PruneSegmentError { #[error("the configuration provided for {0} is invalid")] Configuration(PruneSegment), } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_prune_segment_iter_excludes_deprecated() { + let segments: Vec = PruneSegment::variants().collect(); + + // Verify deprecated variants are not included derived iter + #[expect(deprecated)] + { + assert!(!segments.contains(&PruneSegment::Headers)); + assert!(!segments.contains(&PruneSegment::Transactions)); + } + } +} diff --git a/crates/prune/types/src/target.rs b/crates/prune/types/src/target.rs index bb61c006cd..df72347988 100644 --- a/crates/prune/types/src/target.rs +++ b/crates/prune/types/src/target.rs @@ -2,7 +2,7 @@ use alloy_primitives::BlockNumber; use derive_more::Display; use thiserror::Error; -use crate::{PruneCheckpoint, PruneMode, PruneSegment}; +use crate::{PruneCheckpoint, PruneMode, PruneSegment, ReceiptsLogPruneConfig}; /// Minimum distance from the tip necessary for the node to work correctly: /// 1. Minimum 2 epochs (32 blocks per epoch) required to handle any reorg according to the @@ -36,9 +36,13 @@ pub enum HistoryType { StorageHistory, } +/// Default number of blocks to retain for merkle changesets. +/// This is used by both the `MerkleChangeSets` stage and the pruner segment. +pub const MERKLE_CHANGESETS_RETENTION_BLOCKS: u64 = 128; + /// Default pruning mode for merkle changesets const fn default_merkle_changesets_mode() -> PruneMode { - PruneMode::Distance(MINIMUM_PRUNING_DISTANCE) + PruneMode::Distance(MERKLE_CHANGESETS_RETENTION_BLOCKS) } /// Pruning configuration for every segment of the data that can be pruned. @@ -91,18 +95,18 @@ pub struct PruneModes { pub bodies_history: Option, /// Merkle Changesets pruning configuration for `AccountsTrieChangeSets` and /// `StoragesTrieChangeSets`. + #[cfg_attr(any(test, feature = "serde"), serde(default = "default_merkle_changesets_mode"))] + pub merkle_changesets: PruneMode, + /// Receipts pruning configuration by retaining only those receipts that contain logs emitted + /// by the specified addresses, discarding others. This setting is overridden by `receipts`. + /// + /// The [`BlockNumber`](`crate::BlockNumber`) represents the starting block from which point + /// onwards the receipts are preserved. #[cfg_attr( any(test, feature = "serde"), - serde( - default = "default_merkle_changesets_mode", - deserialize_with = "deserialize_prune_mode_with_min_blocks::" - ) + serde(skip_serializing_if = "ReceiptsLogPruneConfig::is_empty") )] - pub merkle_changesets: PruneMode, - /// Receipts log filtering has been deprecated and will be removed in a future release. - #[deprecated] - #[cfg_attr(any(test, feature = "serde"), serde(skip))] - pub receipts_log_filter: (), + pub receipts_log_filter: ReceiptsLogPruneConfig, } impl Default for PruneModes { @@ -115,15 +119,14 @@ impl Default for PruneModes { storage_history: None, bodies_history: None, merkle_changesets: default_merkle_changesets_mode(), - #[expect(deprecated)] - receipts_log_filter: (), + receipts_log_filter: ReceiptsLogPruneConfig::default(), } } } impl PruneModes { /// Sets pruning to all targets. - pub const fn all() -> Self { + pub fn all() -> Self { Self { sender_recovery: Some(PruneMode::Full), transaction_lookup: Some(PruneMode::Full), @@ -132,14 +135,29 @@ impl PruneModes { storage_history: Some(PruneMode::Full), bodies_history: Some(PruneMode::Full), merkle_changesets: PruneMode::Full, - #[expect(deprecated)] - receipts_log_filter: (), + receipts_log_filter: Default::default(), } } /// Returns whether there is any kind of receipt pruning configuration. - pub const fn has_receipts_pruning(&self) -> bool { - self.receipts.is_some() + pub fn has_receipts_pruning(&self) -> bool { + self.receipts.is_some() || !self.receipts_log_filter.is_empty() + } + + /// Migrates deprecated prune mode values to their new defaults. + /// + /// Returns `true` if any migration was performed. + /// + /// Currently migrates: + /// - `merkle_changesets`: `Distance(n)` where `n < 128` or `n == 10064` -> `Distance(128)` + pub const fn migrate(&mut self) -> bool { + if let PruneMode::Distance(d) = self.merkle_changesets && + (d < MERKLE_CHANGESETS_RETENTION_BLOCKS || d == MINIMUM_PRUNING_DISTANCE) + { + self.merkle_changesets = PruneMode::Distance(MERKLE_CHANGESETS_RETENTION_BLOCKS); + return true; + } + false } /// Returns an error if we can't unwind to the targeted block because the target block is @@ -191,28 +209,6 @@ impl PruneModes { } } -/// Deserializes [`PruneMode`] and validates that the value is not less than the const -/// generic parameter `MIN_BLOCKS`. This parameter represents the number of blocks that needs to be -/// left in database after the pruning. -/// -/// 1. For [`PruneMode::Full`], it fails if `MIN_BLOCKS > 0`. -/// 2. For [`PruneMode::Distance`], it fails if `distance < MIN_BLOCKS + 1`. `+ 1` is needed because -/// `PruneMode::Distance(0)` means that we leave zero blocks from the latest, meaning we have one -/// block in the database. -#[cfg(any(test, feature = "serde"))] -fn deserialize_prune_mode_with_min_blocks< - 'de, - const MIN_BLOCKS: u64, - D: serde::Deserializer<'de>, ->( - deserializer: D, -) -> Result { - use serde::Deserialize; - let prune_mode = PruneMode::deserialize(deserializer)?; - serde_deserialize_validate::(&prune_mode)?; - Ok(prune_mode) -} - /// Deserializes [`Option`] and validates that the value is not less than the const /// generic parameter `MIN_BLOCKS`. This parameter represents the number of blocks that needs to be /// left in database after the pruning. diff --git a/crates/ress/protocol/src/lib.rs b/crates/ress/protocol/src/lib.rs index 50db2a3191..82820cc5a3 100644 --- a/crates/ress/protocol/src/lib.rs +++ b/crates/ress/protocol/src/lib.rs @@ -1,5 +1,30 @@ -//! `ress` protocol is an `RLPx` subprotocol for stateless nodes. -//! following [RLPx specs](https://github.com/ethereum/devp2p/blob/master/rlpx.md) +//! RESS protocol for stateless Ethereum nodes. +//! +//! Enables stateless nodes to fetch execution witnesses, bytecode, and block data from +//! stateful peers for minimal on-disk state with full execution capability. +//! +//! ## Node Types +//! +//! - **Stateless**: Minimal state, requests data on-demand +//! - **Stateful**: Full Ethereum nodes providing state data +//! +//! Valid connections: Stateless ↔ Stateless ✅, Stateless ↔ Stateful ✅, Stateful ↔ Stateful ❌ +//! +//! ## Messages +//! +//! - `NodeType (0x00)`: Handshake +//! - `GetHeaders/Headers (0x01/0x02)`: Block headers +//! - `GetBlockBodies/BlockBodies (0x03/0x04)`: Block bodies +//! - `GetBytecode/Bytecode (0x05/0x06)`: Contract bytecode +//! - `GetWitness/Witness (0x07/0x08)`: Execution witnesses +//! +//! ## Flow +//! +//! 1. Exchange `NodeType` for compatibility +//! 2. Download ancestor blocks via headers/bodies +//! 3. For new payloads: request witness → get missing bytecode → execute +//! +//! Protocol version: `ress/1` #![doc( html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", diff --git a/crates/ress/protocol/src/types.rs b/crates/ress/protocol/src/types.rs index 45ef3604a7..2c474ca79d 100644 --- a/crates/ress/protocol/src/types.rs +++ b/crates/ress/protocol/src/types.rs @@ -1,7 +1,16 @@ use alloy_primitives::bytes::{Buf, BufMut}; use alloy_rlp::{Decodable, Encodable}; -/// Node type variant. +/// Represents the type of node in the RESS protocol. +/// +/// This enum is used during the handshake phase to identify whether a peer is a stateless +/// or stateful node. The node type determines which connections are valid: +/// - Stateless ↔ Stateless: valid +/// - Stateless ↔ Stateful: valid +/// - Stateful ↔ Stateful: invalid +/// +/// Use [`is_valid_connection`](Self::is_valid_connection) to check if a connection between +/// two node types is allowed. #[repr(u8)] #[derive(PartialEq, Eq, Copy, Clone, Debug)] #[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] diff --git a/crates/ress/provider/src/lib.rs b/crates/ress/provider/src/lib.rs index d986eb9e95..3a0b83673a 100644 --- a/crates/ress/provider/src/lib.rs +++ b/crates/ress/provider/src/lib.rs @@ -156,11 +156,11 @@ where // NOTE: there might be a race condition where target ancestor hash gets evicted from the // database. let witness_state_provider = self.provider.state_by_block_hash(ancestor_hash)?; - let mut trie_input = TrieInput::default(); - for block in executed_ancestors.into_iter().rev() { - let trie_updates = block.trie_updates.as_ref(); - trie_input.append_cached_ref(trie_updates, &block.hashed_state); - } + let bundles: Vec<_> = + executed_ancestors.iter().rev().map(|block| block.trie_data()).collect(); + let trie_input = TrieInput::from_blocks_sorted( + bundles.iter().map(|data| (data.hashed_state.as_ref(), data.trie_updates.as_ref())), + ); let mut hashed_state = db.into_state(); hashed_state.extend(record.hashed_state); diff --git a/crates/ress/provider/src/pending_state.rs b/crates/ress/provider/src/pending_state.rs index f536acdb60..5196448733 100644 --- a/crates/ress/provider/src/pending_state.rs +++ b/crates/ress/provider/src/pending_state.rs @@ -123,8 +123,7 @@ pub async fn maintain_pending_state

( } // ignore ConsensusEngineEvent::CanonicalChainCommitted(_, _) | - ConsensusEngineEvent::BlockReceived(_) | - ConsensusEngineEvent::LiveSyncProgress(_) => (), + ConsensusEngineEvent::BlockReceived(_) => (), } } } diff --git a/crates/revm/Cargo.toml b/crates/revm/Cargo.toml index 488a685b38..92036e3908 100644 --- a/crates/revm/Cargo.toml +++ b/crates/revm/Cargo.toml @@ -68,3 +68,4 @@ optional-checks = [ "optional-eip3607", "optional-no-base-fee", ] +memory_limit = ["revm/memory_limit"] diff --git a/crates/revm/src/cached.rs b/crates/revm/src/cached.rs index bf4bd6d5d1..a802b5a0e1 100644 --- a/crates/revm/src/cached.rs +++ b/crates/revm/src/cached.rs @@ -24,6 +24,7 @@ use revm::{bytecode::Bytecode, state::AccountInfo, Database, DatabaseRef}; /// let db = cached_reads.as_db_mut(db); /// // this is `Database` and can be used to build a payload, it never commits to `CachedReads` or the underlying database, but all reads from the underlying database are cached in `CachedReads`. /// // Subsequent payload build attempts can use cached reads and avoid hitting the underlying database. +/// // Note: `cached_reads` must outlive `db` to satisfy lifetime requirements. /// let state = State::builder().with_database(db).build(); /// } /// ``` @@ -71,6 +72,10 @@ impl CachedReads { } /// A [Database] that caches reads inside [`CachedReads`]. +/// +/// The lifetime parameter `'a` is tied to the lifetime of the underlying [`CachedReads`] instance. +/// This ensures that the cache remains valid for the entire duration this wrapper is used. +/// The original [`CachedReads`] must outlive this wrapper to prevent use-after-free. #[derive(Debug)] pub struct CachedReadsDbMut<'a, DB> { /// The cache of reads. @@ -146,11 +151,11 @@ impl Database for CachedReadsDbMut<'_, DB> { } fn block_hash(&mut self, number: u64) -> Result { - let code = match self.cached.block_hashes.entry(number) { + let hash = match self.cached.block_hashes.entry(number) { Entry::Occupied(entry) => *entry.get(), Entry::Vacant(entry) => *entry.insert(self.db.block_hash_ref(number)?), }; - Ok(code) + Ok(hash) } } @@ -158,6 +163,11 @@ impl Database for CachedReadsDbMut<'_, DB> { /// /// This is intended to be used as the [`DatabaseRef`] for /// `revm::db::State` for repeated payload build jobs. +/// +/// The lifetime parameter `'a` matches the lifetime of the underlying [`CachedReadsDbMut`], +/// which in turn is tied to the [`CachedReads`] cache. [`RefCell`] is used here to provide +/// interior mutability for the [`DatabaseRef`] trait (which requires `&self`), while the +/// lifetime ensures the cache remains valid throughout the wrapper's usage. #[derive(Debug)] pub struct CachedReadsDBRef<'a, DB> { /// The inner cache reads db mut. diff --git a/crates/revm/src/database.rs b/crates/revm/src/database.rs index 6b829c3d73..a4d74ff262 100644 --- a/crates/revm/src/database.rs +++ b/crates/revm/src/database.rs @@ -9,7 +9,7 @@ use revm::{bytecode::Bytecode, state::AccountInfo, Database, DatabaseRef}; /// A helper trait responsible for providing state necessary for EVM execution. /// /// This serves as the data layer for [`Database`]. -pub trait EvmStateProvider: Send + Sync { +pub trait EvmStateProvider { /// Get basic account information. /// /// Returns [`None`] if the account doesn't exist. diff --git a/crates/rpc/ipc/src/server/mod.rs b/crates/rpc/ipc/src/server/mod.rs index 75431b915a..58cd2f29e8 100644 --- a/crates/rpc/ipc/src/server/mod.rs +++ b/crates/rpc/ipc/src/server/mod.rs @@ -248,7 +248,7 @@ where } } -impl std::fmt::Debug for IpcServer { +impl std::fmt::Debug for IpcServer { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("IpcServer") .field("endpoint", &self.endpoint) diff --git a/crates/rpc/ipc/src/server/rpc_service.rs b/crates/rpc/ipc/src/server/rpc_service.rs index f7fcdace4c..05c6e93efc 100644 --- a/crates/rpc/ipc/src/server/rpc_service.rs +++ b/crates/rpc/ipc/src/server/rpc_service.rs @@ -107,18 +107,13 @@ impl RpcServiceT for RpcService { fn batch<'a>(&self, req: Batch<'a>) -> impl Future + Send + 'a { let entries: Vec<_> = req.into_iter().collect(); - - let mut got_notif = false; let mut batch_response = BatchResponseBuilder::new_with_limit(self.max_response_body_size); let mut pending_calls: FuturesOrdered<_> = entries .into_iter() .filter_map(|v| match v { Ok(BatchEntry::Call(call)) => Some(Either::Right(self.call(call))), - Ok(BatchEntry::Notification(_n)) => { - got_notif = true; - None - } + Ok(BatchEntry::Notification(_n)) => None, Err(_err) => Some(Either::Left(async { MethodResponse::error(Id::Null, ErrorObject::from(ErrorCode::InvalidRequest)) })), diff --git a/crates/rpc/rpc-api/Cargo.toml b/crates/rpc/rpc-api/Cargo.toml index 7d170d342f..e2c2d00155 100644 --- a/crates/rpc/rpc-api/Cargo.toml +++ b/crates/rpc/rpc-api/Cargo.toml @@ -35,9 +35,11 @@ alloy-serde.workspace = true alloy-rpc-types-beacon.workspace = true alloy-rpc-types-engine.workspace = true alloy-genesis.workspace = true +serde = { workspace = true, features = ["derive"] } # misc jsonrpsee = { workspace = true, features = ["server", "macros"] } +serde_json.workspace = true [features] client = [ @@ -45,3 +47,8 @@ client = [ "jsonrpsee/async-client", "reth-rpc-eth-api/client", ] + +[dev-dependencies] +serde_json = { workspace = true } +tokio = { workspace = true, features = ["macros", "rt-multi-thread"] } +jsonrpsee = { workspace = true, features = ["client", "async-client", "http-client"] } diff --git a/crates/rpc/rpc-api/src/debug.rs b/crates/rpc/rpc-api/src/debug.rs index 5dd7401782..268c06fd00 100644 --- a/crates/rpc/rpc-api/src/debug.rs +++ b/crates/rpc/rpc-api/src/debug.rs @@ -3,7 +3,7 @@ use alloy_genesis::ChainConfig; use alloy_json_rpc::RpcObject; use alloy_primitives::{Address, Bytes, B256}; use alloy_rpc_types_debug::ExecutionWitness; -use alloy_rpc_types_eth::{Block, Bundle, StateContext}; +use alloy_rpc_types_eth::{Bundle, StateContext}; use alloy_rpc_types_trace::geth::{ BlockTraceResult, GethDebugTracingCallOptions, GethDebugTracingOptions, GethTrace, TraceResult, }; @@ -38,7 +38,7 @@ pub trait DebugApi { /// Returns an array of recent bad blocks that the client has seen on the network. #[method(name = "getBadBlocks")] - async fn bad_blocks(&self) -> RpcResult>; + async fn bad_blocks(&self) -> RpcResult>; /// Returns the structured logs created during the execution of EVM between two blocks /// (excluding start) as a JSON object. @@ -222,7 +222,7 @@ pub trait DebugApi { /// Returns the raw value of a key stored in the database. #[method(name = "dbGet")] - async fn debug_db_get(&self, key: String) -> RpcResult<()>; + async fn debug_db_get(&self, key: String) -> RpcResult>; /// Retrieves the state that corresponds to the block number and returns a list of accounts /// (including storage and code). diff --git a/crates/rpc/rpc-api/src/engine.rs b/crates/rpc/rpc-api/src/engine.rs index bf097eec2f..175825fe92 100644 --- a/crates/rpc/rpc-api/src/engine.rs +++ b/crates/rpc/rpc-api/src/engine.rs @@ -240,6 +240,18 @@ pub trait EngineApi { &self, versioned_hashes: Vec, ) -> RpcResult>>; + + /// Fetch blobs for the consensus layer from the blob store. + /// + /// Returns a response of the same length as the request. Missing or older-version blobs are + /// returned as `null` elements. + /// + /// Returns `null` if syncing. + #[method(name = "getBlobsV3")] + async fn get_blobs_v3( + &self, + versioned_hashes: Vec, + ) -> RpcResult>>>; } /// A subset of the ETH rpc interface: diff --git a/crates/rpc/rpc-api/src/lib.rs b/crates/rpc/rpc-api/src/lib.rs index 89e21c80b0..9c3a4baa03 100644 --- a/crates/rpc/rpc-api/src/lib.rs +++ b/crates/rpc/rpc-api/src/lib.rs @@ -25,11 +25,14 @@ mod net; mod otterscan; mod reth; mod rpc; +mod testing; mod trace; mod txpool; mod validation; mod web3; +pub use testing::{TestingBuildBlockRequestV1, TESTING_BUILD_BLOCK_V1}; + /// re-export of all server traits pub use servers::*; @@ -45,6 +48,7 @@ pub mod servers { otterscan::OtterscanServer, reth::RethApiServer, rpc::RpcApiServer, + testing::TestingApiServer, trace::TraceApiServer, txpool::TxPoolApiServer, validation::BlockSubmissionValidationApiServer, @@ -75,6 +79,7 @@ pub mod clients { otterscan::OtterscanClient, reth::RethApiClient, rpc::RpcApiServer, + testing::TestingApiClient, trace::TraceApiClient, txpool::TxPoolApiClient, validation::BlockSubmissionValidationApiClient, diff --git a/crates/rpc/rpc-api/src/testing.rs b/crates/rpc/rpc-api/src/testing.rs new file mode 100644 index 0000000000..f49380058e --- /dev/null +++ b/crates/rpc/rpc-api/src/testing.rs @@ -0,0 +1,45 @@ +//! Testing namespace for building a block in a single call. +//! +//! This follows the `testing_buildBlockV1` specification. **Highly sensitive:** +//! testing-only, powerful enough to include arbitrary transactions; must stay +//! disabled by default and never be exposed on public-facing RPC without an +//! explicit operator flag. + +use alloy_primitives::{Bytes, B256}; +use alloy_rpc_types_engine::{ + ExecutionPayloadEnvelopeV5, PayloadAttributes as EthPayloadAttributes, +}; +use jsonrpsee::proc_macros::rpc; +use serde::{Deserialize, Serialize}; + +/// Capability string for `testing_buildBlockV1`. +pub const TESTING_BUILD_BLOCK_V1: &str = "testing_buildBlockV1"; + +/// Request payload for `testing_buildBlockV1`. +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct TestingBuildBlockRequestV1 { + /// Parent block hash of the block to build. + pub parent_block_hash: B256, + /// Payload attributes (Cancun version). + pub payload_attributes: EthPayloadAttributes, + /// Raw signed transactions to force-include in order. + pub transactions: Vec, + /// Optional extra data for the block header. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub extra_data: Option, +} + +/// Testing RPC interface for building a block in a single call. +#[cfg_attr(not(feature = "client"), rpc(server, namespace = "testing"))] +#[cfg_attr(feature = "client", rpc(server, client, namespace = "testing"))] +pub trait TestingApi { + /// Builds a block using the provided parent, payload attributes, and transactions. + /// + /// See + #[method(name = "buildBlockV1")] + async fn build_block_v1( + &self, + request: TestingBuildBlockRequestV1, + ) -> jsonrpsee::core::RpcResult; +} diff --git a/crates/rpc/rpc-builder/Cargo.toml b/crates/rpc/rpc-builder/Cargo.toml index e7178405b3..93bfd8acbd 100644 --- a/crates/rpc/rpc-builder/Cargo.toml +++ b/crates/rpc/rpc-builder/Cargo.toml @@ -17,6 +17,7 @@ reth-primitives-traits.workspace = true reth-ipc.workspace = true reth-chainspec.workspace = true reth-consensus.workspace = true +reth-engine-primitives.workspace = true reth-network-api.workspace = true reth-node-core.workspace = true reth-rpc.workspace = true @@ -26,6 +27,7 @@ reth-rpc-layer.workspace = true reth-rpc-eth-types.workspace = true reth-rpc-server-types.workspace = true reth-tasks = { workspace = true, features = ["rayon"] } +reth-tokio-util.workspace = true reth-transaction-pool.workspace = true reth-storage-api.workspace = true reth-chain-state.workspace = true @@ -63,7 +65,6 @@ reth-rpc-api = { workspace = true, features = ["client"] } reth-rpc-engine-api.workspace = true reth-tracing.workspace = true reth-transaction-pool = { workspace = true, features = ["test-utils"] } -reth-engine-primitives.workspace = true reth-node-ethereum.workspace = true alloy-primitives.workspace = true diff --git a/crates/rpc/rpc-builder/src/config.rs b/crates/rpc/rpc-builder/src/config.rs index 011e24d468..1acd6744ed 100644 --- a/crates/rpc/rpc-builder/src/config.rs +++ b/crates/rpc/rpc-builder/src/config.rs @@ -94,6 +94,7 @@ impl RethRpcServerConfig for RpcServerArgs { fn eth_config(&self) -> EthConfig { EthConfig::default() .max_tracing_requests(self.rpc_max_tracing_requests) + .max_blocking_io_requests(self.rpc_max_blocking_io_requests) .max_trace_filter_blocks(self.rpc_max_trace_filter_blocks) .max_blocks_per_filter(self.rpc_max_blocks_per_filter.unwrap_or_max()) .max_logs_per_response(self.rpc_max_logs_per_response.unwrap_or_max() as usize) @@ -105,6 +106,7 @@ impl RethRpcServerConfig for RpcServerArgs { .proof_permits(self.rpc_proof_permits) .pending_block_kind(self.rpc_pending_block) .raw_tx_forwarder(self.rpc_forwarder.clone()) + .rpc_evm_memory_limit(self.rpc_evm_memory_limit) } fn flashbots_config(&self) -> ValidationApiConfig { @@ -137,7 +139,7 @@ impl RethRpcServerConfig for RpcServerArgs { fn transport_rpc_module_config(&self) -> TransportRpcModuleConfig { let mut config = TransportRpcModuleConfig::default() - .with_config(RpcModuleConfig::new(self.eth_config(), self.flashbots_config())); + .with_config(RpcModuleConfig::new(self.eth_config())); if self.http { config = config.with_http( @@ -189,6 +191,13 @@ impl RethRpcServerConfig for RpcServerArgs { ); } + if self.ws_api.is_some() && !self.ws { + warn!( + target: "reth::cli", + "The --ws.api flag is set but --ws is not enabled. WS RPC API will not be exposed." + ); + } + if self.http { let socket_address = SocketAddr::new(self.http_addr, self.http_port); config = config diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index 6bd4223f60..6fd882dc50 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -32,12 +32,13 @@ use jsonrpsee::{ }; use reth_chainspec::{ChainSpecProvider, EthereumHardforks}; use reth_consensus::{ConsensusError, FullConsensus}; +use reth_engine_primitives::ConsensusEngineEvent; use reth_evm::ConfigureEvm; use reth_network_api::{noop::NoopNetwork, NetworkInfo, Peers}; use reth_primitives_traits::{NodePrimitives, TxTy}; use reth_rpc::{ AdminApi, DebugApi, EngineEthApi, EthApi, EthApiBuilder, EthBundle, MinerApi, NetApi, - OtterscanApi, RPCApi, RethApi, TraceApi, TxPoolApi, ValidationApiConfig, Web3Api, + OtterscanApi, RPCApi, RethApi, TraceApi, TxPoolApi, Web3Api, }; use reth_rpc_api::servers::*; use reth_rpc_eth_api::{ @@ -46,16 +47,18 @@ use reth_rpc_eth_api::{ TraceExt, }, node::RpcNodeCoreAdapter, - EthApiServer, EthApiTypes, FullEthApiServer, RpcBlock, RpcConvert, RpcConverter, RpcHeader, - RpcNodeCore, RpcReceipt, RpcTransaction, RpcTxReq, + EthApiServer, EthApiTypes, FullEthApiServer, FullEthApiTypes, RpcBlock, RpcConvert, + RpcConverter, RpcHeader, RpcNodeCore, RpcReceipt, RpcTransaction, RpcTxReq, }; use reth_rpc_eth_types::{receipt::EthReceiptConverter, EthConfig, EthSubscriptionIdProvider}; use reth_rpc_layer::{AuthLayer, Claims, CompressionLayer, JwtAuthValidator, JwtSecret}; +pub use reth_rpc_server_types::RethRpcModule; use reth_storage_api::{ - AccountReader, BlockReader, ChangeSetReader, FullRpcProvider, ProviderBlock, + AccountReader, BlockReader, ChangeSetReader, FullRpcProvider, NodePrimitivesProvider, StateProviderFactory, }; use reth_tasks::{pool::BlockingTaskGuard, TaskSpawner, TokioTaskExecutor}; +use reth_tokio_util::EventSender; use reth_transaction_pool::{noop::NoopTransactionPool, TransactionPool}; use serde::{Deserialize, Serialize}; use std::{ @@ -74,7 +77,7 @@ use jsonrpsee::server::ServerConfigBuilder; pub use reth_ipc::server::{ Builder as IpcServerBuilder, RpcServiceBuilder as IpcRpcServiceBuilder, }; -pub use reth_rpc_server_types::{constants, RethRpcModule, RpcModuleSelection}; +pub use reth_rpc_server_types::{constants, RpcModuleSelection}; pub use tower::layer::util::{Identity, Stack}; /// Auth server utilities. @@ -99,7 +102,7 @@ pub use eth::EthHandlers; // Rpc server metrics mod metrics; use crate::middleware::RethRpcMiddleware; -pub use metrics::{MeteredRequestFuture, RpcRequestMetricsService}; +pub use metrics::{MeteredBatchRequestsFuture, MeteredRequestFuture, RpcRequestMetricsService}; use reth_chain_state::CanonStateSubscriptions; use reth_rpc::eth::sim_bundle::EthSimBundle; @@ -326,6 +329,7 @@ where module_config: TransportRpcModuleConfig, engine: impl IntoEngineApiRpcModule, eth: EthApi, + engine_events: EventSender>, ) -> ( TransportRpcModules, AuthRpcModule, @@ -334,16 +338,10 @@ where where EthApi: FullEthApiServer, { - let Self { provider, pool, network, executor, consensus, evm_config, .. } = self; - let config = module_config.config.clone().unwrap_or_default(); - let mut registry = RpcRegistryInner::new( - provider, pool, network, executor, consensus, config, evm_config, eth, - ); - + let mut registry = self.into_registry(config, eth, engine_events); let modules = registry.create_transport_rpc_modules(module_config); - let auth_module = registry.create_auth_module(engine); (modules, auth_module, registry) @@ -357,12 +355,23 @@ where self, config: RpcModuleConfig, eth: EthApi, + engine_events: EventSender>, ) -> RpcRegistryInner where - EthApi: EthApiTypes + 'static, + EthApi: FullEthApiServer, { let Self { provider, pool, network, executor, consensus, evm_config, .. } = self; - RpcRegistryInner::new(provider, pool, network, executor, consensus, config, evm_config, eth) + RpcRegistryInner::new( + provider, + pool, + network, + executor, + consensus, + config, + evm_config, + eth, + engine_events, + ) } /// Configures all [`RpcModule`]s specific to the given [`TransportRpcModuleConfig`] which can @@ -371,27 +380,17 @@ where self, module_config: TransportRpcModuleConfig, eth: EthApi, + engine_events: EventSender>, ) -> TransportRpcModules<()> where EthApi: FullEthApiServer, { let mut modules = TransportRpcModules::default(); - let Self { provider, pool, network, executor, consensus, evm_config, .. } = self; - if !module_config.is_empty() { let TransportRpcModuleConfig { http, ws, ipc, config } = module_config.clone(); - let mut registry = RpcRegistryInner::new( - provider, - pool, - network, - executor, - consensus, - config.unwrap_or_default(), - evm_config, - eth, - ); + let mut registry = self.into_registry(config.unwrap_or_default(), eth, engine_events); modules.config = module_config; modules.http = registry.maybe_module(http.as_ref()); @@ -414,8 +413,6 @@ impl Default for RpcModuleBuilder { pub struct RpcModuleConfig { /// `eth` namespace settings eth: EthConfig, - /// `flashbots` namespace settings - flashbots: ValidationApiConfig, } // === impl RpcModuleConfig === @@ -427,8 +424,8 @@ impl RpcModuleConfig { } /// Returns a new RPC module config given the eth namespace config - pub const fn new(eth: EthConfig, flashbots: ValidationApiConfig) -> Self { - Self { eth, flashbots } + pub const fn new(eth: EthConfig) -> Self { + Self { eth } } /// Get a reference to the eth namespace config @@ -446,7 +443,6 @@ impl RpcModuleConfig { #[derive(Clone, Debug, Default)] pub struct RpcModuleConfigBuilder { eth: Option, - flashbots: Option, } // === impl RpcModuleConfigBuilder === @@ -458,16 +454,10 @@ impl RpcModuleConfigBuilder { self } - /// Configures a custom flashbots namespace config - pub fn flashbots(mut self, flashbots: ValidationApiConfig) -> Self { - self.flashbots = Some(flashbots); - self - } - /// Consumes the type and creates the [`RpcModuleConfig`] pub fn build(self) -> RpcModuleConfig { - let Self { eth, flashbots } = self; - RpcModuleConfig { eth: eth.unwrap_or_default(), flashbots: flashbots.unwrap_or_default() } + let Self { eth } = self; + RpcModuleConfig { eth: eth.unwrap_or_default() } } /// Get a reference to the eth namespace config, if any @@ -487,16 +477,8 @@ impl RpcModuleConfigBuilder { } /// A Helper type the holds instances of the configured modules. -#[derive(Debug, Clone)] -#[expect(dead_code)] // Consensus generic, might be useful in the future -pub struct RpcRegistryInner< - Provider: BlockReader, - Pool, - Network, - EthApi: EthApiTypes, - EvmConfig, - Consensus, -> { +#[derive(Debug)] +pub struct RpcRegistryInner { provider: Provider, pool: Pool, network: Network, @@ -511,6 +493,9 @@ pub struct RpcRegistryInner< modules: HashMap, /// eth config settings eth_config: EthConfig, + /// Notification channel for engine API events + engine_events: + EventSender::Primitives>>, } // === impl RpcRegistryInner === @@ -527,7 +512,7 @@ where + 'static, Pool: Send + Sync + Clone + 'static, Network: Clone + 'static, - EthApi: EthApiTypes + 'static, + EthApi: FullEthApiTypes + 'static, EvmConfig: ConfigureEvm, { /// Creates a new, empty instance. @@ -541,6 +526,9 @@ where config: RpcModuleConfig, evm_config: EvmConfig, eth_api: EthApi, + engine_events: EventSender< + ConsensusEngineEvent<::Primitives>, + >, ) -> Self where EvmConfig: ConfigureEvm, @@ -560,14 +548,14 @@ where blocking_pool_guard, eth_config: config.eth, evm_config, + engine_events, } } } -impl - RpcRegistryInner +impl + RpcRegistryInner where - Provider: BlockReader, EthApi: EthApiTypes, { /// Returns a reference to the installed [`EthApi`]. @@ -595,6 +583,11 @@ where &self.provider } + /// Returns a reference to the evm config + pub const fn evm_config(&self) -> &Evm { + &self.evm_config + } + /// Returns all installed methods pub fn methods(&self) -> Vec { self.modules.values().cloned().collect() @@ -706,8 +699,7 @@ where /// If called outside of the tokio runtime. See also [`Self::eth_api`] pub fn register_debug(&mut self) -> &mut Self where - EthApi: EthApiSpec + EthTransactions + TraceExt, - EvmConfig::Primitives: NodePrimitives>, + EthApi: EthTransactions + TraceExt, { let debug_api = self.debug_api(); self.modules.insert(RethRpcModule::Debug, debug_api.into_rpc().into()); @@ -814,8 +806,16 @@ where /// # Panics /// /// If called outside of the tokio runtime. See also [`Self::eth_api`] - pub fn debug_api(&self) -> DebugApi { - DebugApi::new(self.eth_api().clone(), self.blocking_pool_guard.clone()) + pub fn debug_api(&self) -> DebugApi + where + EthApi: FullEthApiTypes, + { + DebugApi::new( + self.eth_api().clone(), + self.blocking_pool_guard.clone(), + self.tasks(), + self.engine_events.new_listener(), + ) } /// Instantiates `NetApi` @@ -933,11 +933,14 @@ where ) .into_rpc() .into(), - RethRpcModule::Debug => { - DebugApi::new(eth_api.clone(), self.blocking_pool_guard.clone()) - .into_rpc() - .into() - } + RethRpcModule::Debug => DebugApi::new( + eth_api.clone(), + self.blocking_pool_guard.clone(), + &*self.executor, + self.engine_events.new_listener(), + ) + .into_rpc() + .into(), RethRpcModule::Eth => { // merge all eth handlers let mut module = eth_api.clone().into_rpc(); @@ -968,7 +971,7 @@ where RethRpcModule::Web3 => Web3Api::new(self.network.clone()).into_rpc().into(), RethRpcModule::Txpool => TxPoolApi::new( self.eth.api.pool().clone(), - dyn_clone::clone(self.eth.api.tx_resp_builder()), + dyn_clone::clone(self.eth.api.converter()), ) .into_rpc() .into(), @@ -986,18 +989,18 @@ where .into_rpc() .into() } - // only relevant for Ethereum and configured in `EthereumAddOns` - // implementation - // TODO: can we get rid of this here? - // Custom modules are not handled here - they should be registered via - // extend_rpc_modules - RethRpcModule::Flashbots | RethRpcModule::Other(_) => Default::default(), RethRpcModule::Miner => MinerApi::default().into_rpc().into(), RethRpcModule::Mev => { EthSimBundle::new(eth_api.clone(), self.blocking_pool_guard.clone()) .into_rpc() .into() } + // these are implementation specific and need to be handled during + // initialization and should be registered via extend_rpc_modules in the + // nodebuilder rpc addon stack + RethRpcModule::Flashbots | + RethRpcModule::Testing | + RethRpcModule::Other(_) => Default::default(), }) .clone() }) @@ -1005,6 +1008,33 @@ where } } +impl Clone + for RpcRegistryInner +where + EthApi: EthApiTypes, + Provider: Clone, + Pool: Clone, + Network: Clone, + EvmConfig: Clone, + Consensus: Clone, +{ + fn clone(&self) -> Self { + Self { + provider: self.provider.clone(), + pool: self.pool.clone(), + network: self.network.clone(), + executor: self.executor.clone(), + evm_config: self.evm_config.clone(), + consensus: self.consensus.clone(), + eth: self.eth.clone(), + blocking_pool_guard: self.blocking_pool_guard.clone(), + modules: self.modules.clone(), + eth_config: self.eth_config.clone(), + engine_events: self.engine_events.clone(), + } + } +} + /// A builder type for configuring and launching the servers that will handle RPC requests. /// /// Supported server transports are: @@ -1665,6 +1695,27 @@ impl TransportRpcModules { Ok(()) } + /// Merge the given [`Methods`] in all configured transport modules if the given + /// [`RethRpcModule`] is configured for the transport, using a closure to lazily + /// create the methods only when needed. + /// + /// The closure is only called if at least one transport has the module configured. + /// Fails if any of the methods in the closure result is present already. + pub fn merge_if_module_configured_with( + &mut self, + module: RethRpcModule, + f: F, + ) -> Result<(), RegisterMethodError> + where + F: FnOnce() -> Methods, + { + // Early return if module not configured for any transport + if !self.module_config().contains_any(&module) { + return Ok(()); + } + self.merge_if_module_configured(module, f()) + } + /// Merge the given [Methods] in the configured http methods. /// /// Fails if any of the methods in other is present already. @@ -2291,7 +2342,7 @@ mod tests { $( let val: RethRpcModule = $s.parse().unwrap(); assert_eq!(val, $v); - assert_eq!(val.to_string().as_str(), $s); + assert_eq!(val.to_string(), $s); )* }; } @@ -2593,4 +2644,38 @@ mod tests { assert!(ipc.method("eth_existing").is_none()); assert!(ipc.method("eth_new").is_none()); } + + #[test] + fn test_merge_if_module_configured_with_lazy_evaluation() { + // Create a config that enables RethRpcModule::Eth for HTTP only + let config = TransportRpcModuleConfig::default().with_http([RethRpcModule::Eth]); + + let mut modules = + TransportRpcModules { config, http: Some(RpcModule::new(())), ws: None, ipc: None }; + + // Track whether closure was called + let mut closure_called = false; + + // Test with configured module - closure should be called + let result = modules.merge_if_module_configured_with(RethRpcModule::Eth, || { + closure_called = true; + let mut methods = RpcModule::new(()); + methods.register_method("eth_test", |_, _, _| "test").unwrap(); + methods.into() + }); + + assert!(result.is_ok()); + assert!(closure_called, "Closure should be called when module is configured"); + assert!(modules.http.as_ref().unwrap().method("eth_test").is_some()); + + // Reset and test with unconfigured module - closure should NOT be called + closure_called = false; + let result = modules.merge_if_module_configured_with(RethRpcModule::Debug, || { + closure_called = true; + RpcModule::new(()).into() + }); + + assert!(result.is_ok()); + assert!(!closure_called, "Closure should NOT be called when module is not configured"); + } } diff --git a/crates/rpc/rpc-builder/src/metrics.rs b/crates/rpc/rpc-builder/src/metrics.rs index f32d90ed09..56bb9a313c 100644 --- a/crates/rpc/rpc-builder/src/metrics.rs +++ b/crates/rpc/rpc-builder/src/metrics.rs @@ -62,7 +62,7 @@ impl RpcRequestMetrics { Self::new(module, RpcTransport::WebSocket) } - /// Creates a new instance of the metrics layer for Ws. + /// Creates a new instance of the metrics layer for Ipc. pub(crate) fn ipc(module: &RpcModule<()>) -> Self { Self::new(module, RpcTransport::Ipc) } @@ -127,7 +127,20 @@ where } fn batch<'a>(&self, req: Batch<'a>) -> impl Future + Send + 'a { - self.inner.batch(req) + self.metrics.inner.connection_metrics.batches_started_total.increment(1); + + for batch_entry in req.iter().flatten() { + let method_name = batch_entry.method_name(); + if let Some(call_metrics) = self.metrics.inner.call_metrics.get(method_name) { + call_metrics.started_total.increment(1); + } + } + + MeteredBatchRequestsFuture { + fut: self.inner.batch(req), + started_at: Instant::now(), + metrics: self.metrics.clone(), + } } fn notification<'a>( @@ -194,6 +207,42 @@ impl> Future for MeteredRequestFuture { } } +/// Response future to update the metrics for a batch of request/response pairs. +#[pin_project::pin_project] +pub struct MeteredBatchRequestsFuture { + #[pin] + fut: F, + /// time when the batch request started + started_at: Instant, + /// metrics for the batch + metrics: RpcRequestMetrics, +} + +impl std::fmt::Debug for MeteredBatchRequestsFuture { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.write_str("MeteredBatchRequestsFuture") + } +} + +impl Future for MeteredBatchRequestsFuture +where + F: Future, +{ + type Output = F::Output; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let this = self.project(); + let res = this.fut.poll(cx); + + if res.is_ready() { + let elapsed = this.started_at.elapsed().as_secs_f64(); + this.metrics.inner.connection_metrics.batches_finished_total.increment(1); + this.metrics.inner.connection_metrics.batch_response_time_seconds.record(elapsed); + } + res + } +} + /// The transport protocol used for the RPC connection. #[derive(Debug, Clone, Copy, Eq, PartialEq)] pub(crate) enum RpcTransport { @@ -232,6 +281,12 @@ struct RpcServerConnectionMetrics { requests_finished_total: Counter, /// Response for a single request/response pair request_time_seconds: Histogram, + /// The number of batch requests started + batches_started_total: Counter, + /// The number of batch requests finished + batches_finished_total: Counter, + /// Response time for a batch request + batch_response_time_seconds: Histogram, } /// Metrics for the RPC calls diff --git a/crates/rpc/rpc-builder/tests/it/http.rs b/crates/rpc/rpc-builder/tests/it/http.rs index 601fd78960..6be4d5d965 100644 --- a/crates/rpc/rpc-builder/tests/it/http.rs +++ b/crates/rpc/rpc-builder/tests/it/http.rs @@ -1694,3 +1694,47 @@ async fn test_eth_fee_history_raw() { ) .await; } + +#[tokio::test(flavor = "multi_thread")] +async fn test_debug_db_get() { + reth_tracing::init_test_tracing(); + + let handle = launch_http(vec![RethRpcModule::Debug]).await; + let client = handle.http_client().unwrap(); + + let valid_test_cases = [ + "0x630000000000000000000000000000000000000000000000000000000000000000", + "c00000000000000000000000000000000", + ]; + + for key in valid_test_cases { + DebugApiClient::<()>::debug_db_get(&client, key.into()).await.unwrap(); + } + + // Invalid test cases + let test_cases = [ + ("0x0000", "Key must be 33 bytes, got 2"), + ("00", "Key must be 33 bytes, got 2"), + ( + "0x000000000000000000000000000000000000000000000000000000000000000000", + "Key prefix must be 0x63", + ), + ("000000000000000000000000000000000", "Key prefix must be 0x63"), + ("0xc0000000000000000000000000000000000000000000000000000000000000000", "Invalid hex key"), + ]; + + let match_error_msg = |err: jsonrpsee::core::client::Error, expected: String| -> bool { + match err { + jsonrpsee::core::client::Error::Call(error_obj) => { + error_obj.code() == ErrorCode::InvalidParams.code() && + error_obj.message() == expected + } + _ => false, + } + }; + + for (key, expected) in test_cases { + let err = DebugApiClient::<()>::debug_db_get(&client, key.into()).await.unwrap_err(); + assert!(match_error_msg(err, expected.into())); + } +} diff --git a/crates/rpc/rpc-builder/tests/it/middleware.rs b/crates/rpc/rpc-builder/tests/it/middleware.rs index 9a70356bca..36c8ca1732 100644 --- a/crates/rpc/rpc-builder/tests/it/middleware.rs +++ b/crates/rpc/rpc-builder/tests/it/middleware.rs @@ -9,6 +9,7 @@ use reth_ethereum_primitives::TransactionSigned; use reth_rpc_builder::{RpcServerConfig, TransportRpcModuleConfig}; use reth_rpc_eth_api::EthApiClient; use reth_rpc_server_types::RpcModuleSelection; +use reth_tokio_util::EventSender; use std::{ future::Future, sync::{ @@ -73,8 +74,11 @@ where async fn test_rpc_middleware() { let builder = test_rpc_builder(); let eth_api = builder.bootstrap_eth_api(); - let modules = - builder.build(TransportRpcModuleConfig::set_http(RpcModuleSelection::All), eth_api); + let modules = builder.build( + TransportRpcModuleConfig::set_http(RpcModuleSelection::All), + eth_api, + EventSender::new(1), + ); let mylayer = MyMiddlewareLayer::default(); diff --git a/crates/rpc/rpc-builder/tests/it/startup.rs b/crates/rpc/rpc-builder/tests/it/startup.rs index 24aa1d9327..eb84fb648a 100644 --- a/crates/rpc/rpc-builder/tests/it/startup.rs +++ b/crates/rpc/rpc-builder/tests/it/startup.rs @@ -7,6 +7,7 @@ use reth_rpc_builder::{ RpcServerConfig, TransportRpcModuleConfig, }; use reth_rpc_server_types::RethRpcModule; +use reth_tokio_util::EventSender; use crate::utils::{ launch_http, launch_http_ws_same_port, launch_ws, test_address, test_rpc_builder, @@ -27,8 +28,11 @@ async fn test_http_addr_in_use() { let addr = handle.http_local_addr().unwrap(); let builder = test_rpc_builder(); let eth_api = builder.bootstrap_eth_api(); - let server = - builder.build(TransportRpcModuleConfig::set_http(vec![RethRpcModule::Admin]), eth_api); + let server = builder.build( + TransportRpcModuleConfig::set_http(vec![RethRpcModule::Admin]), + eth_api, + EventSender::new(1), + ); let result = RpcServerConfig::http(Default::default()).with_http_address(addr).start(&server).await; let err = result.unwrap_err(); @@ -41,8 +45,11 @@ async fn test_ws_addr_in_use() { let addr = handle.ws_local_addr().unwrap(); let builder = test_rpc_builder(); let eth_api = builder.bootstrap_eth_api(); - let server = - builder.build(TransportRpcModuleConfig::set_ws(vec![RethRpcModule::Admin]), eth_api); + let server = builder.build( + TransportRpcModuleConfig::set_ws(vec![RethRpcModule::Admin]), + eth_api, + EventSender::new(1), + ); let result = RpcServerConfig::ws(Default::default()).with_ws_address(addr).start(&server).await; let err = result.unwrap_err(); assert!(is_addr_in_use_kind(&err, ServerKind::WS(addr)), "{err}"); @@ -64,6 +71,7 @@ async fn test_launch_same_port_different_modules() { TransportRpcModuleConfig::set_ws(vec![RethRpcModule::Admin]) .with_http(vec![RethRpcModule::Eth]), eth_api, + EventSender::new(1), ); let addr = test_address(); let res = RpcServerConfig::ws(Default::default()) @@ -87,6 +95,7 @@ async fn test_launch_same_port_same_cors() { TransportRpcModuleConfig::set_ws(vec![RethRpcModule::Eth]) .with_http(vec![RethRpcModule::Eth]), eth_api, + EventSender::new(1), ); let addr = test_address(); let res = RpcServerConfig::ws(Default::default()) @@ -108,6 +117,7 @@ async fn test_launch_same_port_different_cors() { TransportRpcModuleConfig::set_ws(vec![RethRpcModule::Eth]) .with_http(vec![RethRpcModule::Eth]), eth_api, + EventSender::new(1), ); let addr = test_address(); let res = RpcServerConfig::ws(Default::default()) diff --git a/crates/rpc/rpc-builder/tests/it/utils.rs b/crates/rpc/rpc-builder/tests/it/utils.rs index 673a1f79fc..b41951b722 100644 --- a/crates/rpc/rpc-builder/tests/it/utils.rs +++ b/crates/rpc/rpc-builder/tests/it/utils.rs @@ -4,6 +4,7 @@ use reth_consensus::noop::NoopConsensus; use reth_engine_primitives::ConsensusEngineHandle; use reth_ethereum_engine_primitives::EthEngineTypes; use reth_ethereum_primitives::EthPrimitives; +use reth_tokio_util::EventSender; use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4}; use reth_evm_ethereum::EthEvmConfig; @@ -53,6 +54,7 @@ pub async fn launch_auth(secret: JwtSecret) -> AuthServerHandle { EngineCapabilities::default(), EthereumEngineValidator::new(MAINNET.clone()), false, + NoopNetwork::default(), ); let module = AuthRpcModule::new(engine_api); module.start_server(config).await.unwrap() @@ -62,7 +64,8 @@ pub async fn launch_auth(secret: JwtSecret) -> AuthServerHandle { pub async fn launch_http(modules: impl Into) -> RpcServerHandle { let builder = test_rpc_builder(); let eth_api = builder.bootstrap_eth_api(); - let server = builder.build(TransportRpcModuleConfig::set_http(modules), eth_api); + let server = + builder.build(TransportRpcModuleConfig::set_http(modules), eth_api, EventSender::new(1)); RpcServerConfig::http(Default::default()) .with_http_address(test_address()) .start(&server) @@ -74,7 +77,8 @@ pub async fn launch_http(modules: impl Into) -> RpcServerHan pub async fn launch_ws(modules: impl Into) -> RpcServerHandle { let builder = test_rpc_builder(); let eth_api = builder.bootstrap_eth_api(); - let server = builder.build(TransportRpcModuleConfig::set_ws(modules), eth_api); + let server = + builder.build(TransportRpcModuleConfig::set_ws(modules), eth_api, EventSender::new(1)); RpcServerConfig::ws(Default::default()) .with_ws_address(test_address()) .start(&server) @@ -87,8 +91,11 @@ pub async fn launch_http_ws(modules: impl Into) -> RpcServer let builder = test_rpc_builder(); let eth_api = builder.bootstrap_eth_api(); let modules = modules.into(); - let server = builder - .build(TransportRpcModuleConfig::set_ws(modules.clone()).with_http(modules), eth_api); + let server = builder.build( + TransportRpcModuleConfig::set_ws(modules.clone()).with_http(modules), + eth_api, + EventSender::new(1), + ); RpcServerConfig::ws(Default::default()) .with_ws_address(test_address()) .with_ws_address(test_address()) @@ -104,8 +111,11 @@ pub async fn launch_http_ws_same_port(modules: impl Into) -> let builder = test_rpc_builder(); let modules = modules.into(); let eth_api = builder.bootstrap_eth_api(); - let server = builder - .build(TransportRpcModuleConfig::set_ws(modules.clone()).with_http(modules), eth_api); + let server = builder.build( + TransportRpcModuleConfig::set_ws(modules.clone()).with_http(modules), + eth_api, + EventSender::new(1), + ); let addr = test_address(); RpcServerConfig::ws(Default::default()) .with_ws_address(addr) diff --git a/crates/rpc/rpc-convert/Cargo.toml b/crates/rpc/rpc-convert/Cargo.toml index af43e9c54a..53b8d0541e 100644 --- a/crates/rpc/rpc-convert/Cargo.toml +++ b/crates/rpc/rpc-convert/Cargo.toml @@ -25,16 +25,13 @@ alloy-signer.workspace = true alloy-consensus.workspace = true alloy-network.workspace = true alloy-json-rpc.workspace = true +alloy-evm = { workspace = true, features = ["rpc"] } # optimism op-alloy-consensus = { workspace = true, optional = true } op-alloy-rpc-types = { workspace = true, optional = true } op-alloy-network = { workspace = true, optional = true } reth-optimism-primitives = { workspace = true, optional = true } -op-revm = { workspace = true, optional = true } - -# revm -revm-context.workspace = true # io jsonrpsee-types.workspace = true @@ -56,7 +53,7 @@ op = [ "dep:op-alloy-network", "dep:reth-optimism-primitives", "dep:reth-storage-api", - "dep:op-revm", "reth-evm/op", "reth-primitives-traits/op", + "alloy-evm/op", ] diff --git a/crates/rpc/rpc-convert/src/fees.rs b/crates/rpc/rpc-convert/src/fees.rs deleted file mode 100644 index 46f8fc8c20..0000000000 --- a/crates/rpc/rpc-convert/src/fees.rs +++ /dev/null @@ -1,281 +0,0 @@ -use alloy_primitives::{B256, U256}; -use std::cmp::min; -use thiserror::Error; - -/// Helper type for representing the fees of a `TransactionRequest` -#[derive(Debug)] -pub struct CallFees { - /// EIP-1559 priority fee - pub max_priority_fee_per_gas: Option, - /// Unified gas price setting - /// - /// Will be the configured `basefee` if unset in the request - /// - /// `gasPrice` for legacy, - /// `maxFeePerGas` for EIP-1559 - pub gas_price: U256, - /// Max Fee per Blob gas for EIP-4844 transactions - pub max_fee_per_blob_gas: Option, -} - -impl CallFees { - /// Ensures the fields of a `TransactionRequest` are not conflicting. - /// - /// # EIP-4844 transactions - /// - /// Blob transactions have an additional fee parameter `maxFeePerBlobGas`. - /// If the `maxFeePerBlobGas` or `blobVersionedHashes` are set we treat it as an EIP-4844 - /// transaction. - /// - /// Note: Due to the `Default` impl of [`BlockEnv`] (Some(0)) this assumes the `block_blob_fee` - /// is always `Some` - /// - /// ## Notable design decisions - /// - /// For compatibility reasons, this contains several exceptions when fee values are validated: - /// - If both `maxFeePerGas` and `maxPriorityFeePerGas` are set to `0` they are treated as - /// missing values, bypassing fee checks wrt. `baseFeePerGas`. - /// - /// This mirrors geth's behaviour when transaction requests are executed: - /// - /// [`BlockEnv`]: revm_context::BlockEnv - pub fn ensure_fees( - call_gas_price: Option, - call_max_fee: Option, - call_priority_fee: Option, - block_base_fee: U256, - blob_versioned_hashes: Option<&[B256]>, - max_fee_per_blob_gas: Option, - block_blob_fee: Option, - ) -> Result { - /// Get the effective gas price of a transaction as specfified in EIP-1559 with relevant - /// checks. - fn get_effective_gas_price( - max_fee_per_gas: Option, - max_priority_fee_per_gas: Option, - block_base_fee: U256, - ) -> Result { - match max_fee_per_gas { - Some(max_fee) => { - let max_priority_fee_per_gas = max_priority_fee_per_gas.unwrap_or(U256::ZERO); - - // only enforce the fee cap if provided input is not zero - if !(max_fee.is_zero() && max_priority_fee_per_gas.is_zero()) && - max_fee < block_base_fee - { - // `base_fee_per_gas` is greater than the `max_fee_per_gas` - return Err(CallFeesError::FeeCapTooLow) - } - if max_fee < max_priority_fee_per_gas { - return Err( - // `max_priority_fee_per_gas` is greater than the `max_fee_per_gas` - CallFeesError::TipAboveFeeCap, - ) - } - // ref - Ok(min( - max_fee, - block_base_fee - .checked_add(max_priority_fee_per_gas) - .ok_or(CallFeesError::TipVeryHigh)?, - )) - } - None => Ok(block_base_fee - .checked_add(max_priority_fee_per_gas.unwrap_or(U256::ZERO)) - .ok_or(CallFeesError::TipVeryHigh)?), - } - } - - let has_blob_hashes = - blob_versioned_hashes.as_ref().map(|blobs| !blobs.is_empty()).unwrap_or(false); - - match (call_gas_price, call_max_fee, call_priority_fee, max_fee_per_blob_gas) { - (gas_price, None, None, None) => { - // either legacy transaction or no fee fields are specified - // when no fields are specified, set gas price to zero - let gas_price = gas_price.unwrap_or(U256::ZERO); - Ok(Self { - gas_price, - max_priority_fee_per_gas: None, - max_fee_per_blob_gas: has_blob_hashes.then_some(block_blob_fee).flatten(), - }) - } - (None, max_fee_per_gas, max_priority_fee_per_gas, None) => { - // request for eip-1559 transaction - let effective_gas_price = get_effective_gas_price( - max_fee_per_gas, - max_priority_fee_per_gas, - block_base_fee, - )?; - let max_fee_per_blob_gas = has_blob_hashes.then_some(block_blob_fee).flatten(); - - Ok(Self { - gas_price: effective_gas_price, - max_priority_fee_per_gas, - max_fee_per_blob_gas, - }) - } - (None, max_fee_per_gas, max_priority_fee_per_gas, Some(max_fee_per_blob_gas)) => { - // request for eip-4844 transaction - let effective_gas_price = get_effective_gas_price( - max_fee_per_gas, - max_priority_fee_per_gas, - block_base_fee, - )?; - // Ensure blob_hashes are present - if !has_blob_hashes { - // Blob transaction but no blob hashes - return Err(CallFeesError::BlobTransactionMissingBlobHashes) - } - - Ok(Self { - gas_price: effective_gas_price, - max_priority_fee_per_gas, - max_fee_per_blob_gas: Some(max_fee_per_blob_gas), - }) - } - _ => { - // this fallback covers incompatible combinations of fields - Err(CallFeesError::ConflictingFeeFieldsInRequest) - } - } - } -} - -/// Error coming from decoding and validating transaction request fees. -#[derive(Debug, Error)] -pub enum CallFeesError { - /// Thrown when a call or transaction request (`eth_call`, `eth_estimateGas`, - /// `eth_sendTransaction`) contains conflicting fields (legacy, EIP-1559) - #[error("both gasPrice and (maxFeePerGas or maxPriorityFeePerGas) specified")] - ConflictingFeeFieldsInRequest, - /// Thrown post London if the transaction's fee is less than the base fee of the block - #[error("max fee per gas less than block base fee")] - FeeCapTooLow, - /// Thrown to ensure no one is able to specify a transaction with a tip higher than the total - /// fee cap. - #[error("max priority fee per gas higher than max fee per gas")] - TipAboveFeeCap, - /// A sanity error to avoid huge numbers specified in the tip field. - #[error("max priority fee per gas higher than 2^256-1")] - TipVeryHigh, - /// Blob transaction has no versioned hashes - #[error("blob transaction missing blob hashes")] - BlobTransactionMissingBlobHashes, -} - -#[cfg(test)] -mod tests { - use super::*; - use alloy_consensus::constants::GWEI_TO_WEI; - - #[test] - fn test_ensure_0_fallback() { - let CallFees { gas_price, .. } = - CallFees::ensure_fees(None, None, None, U256::from(99), None, None, Some(U256::ZERO)) - .unwrap(); - assert!(gas_price.is_zero()); - } - - #[test] - fn test_ensure_max_fee_0_exception() { - let CallFees { gas_price, .. } = - CallFees::ensure_fees(None, Some(U256::ZERO), None, U256::from(99), None, None, None) - .unwrap(); - assert!(gas_price.is_zero()); - } - - #[test] - fn test_blob_fees() { - let CallFees { gas_price, max_fee_per_blob_gas, .. } = - CallFees::ensure_fees(None, None, None, U256::from(99), None, None, Some(U256::ZERO)) - .unwrap(); - assert!(gas_price.is_zero()); - assert_eq!(max_fee_per_blob_gas, None); - - let CallFees { gas_price, max_fee_per_blob_gas, .. } = CallFees::ensure_fees( - None, - None, - None, - U256::from(99), - Some(&[B256::from(U256::ZERO)]), - None, - Some(U256::from(99)), - ) - .unwrap(); - assert!(gas_price.is_zero()); - assert_eq!(max_fee_per_blob_gas, Some(U256::from(99))); - } - - #[test] - fn test_eip_1559_fees() { - let CallFees { gas_price, .. } = CallFees::ensure_fees( - None, - Some(U256::from(25 * GWEI_TO_WEI)), - Some(U256::from(15 * GWEI_TO_WEI)), - U256::from(15 * GWEI_TO_WEI), - None, - None, - Some(U256::ZERO), - ) - .unwrap(); - assert_eq!(gas_price, U256::from(25 * GWEI_TO_WEI)); - - let CallFees { gas_price, .. } = CallFees::ensure_fees( - None, - Some(U256::from(25 * GWEI_TO_WEI)), - Some(U256::from(5 * GWEI_TO_WEI)), - U256::from(15 * GWEI_TO_WEI), - None, - None, - Some(U256::ZERO), - ) - .unwrap(); - assert_eq!(gas_price, U256::from(20 * GWEI_TO_WEI)); - - let CallFees { gas_price, .. } = CallFees::ensure_fees( - None, - Some(U256::from(30 * GWEI_TO_WEI)), - Some(U256::from(30 * GWEI_TO_WEI)), - U256::from(15 * GWEI_TO_WEI), - None, - None, - Some(U256::ZERO), - ) - .unwrap(); - assert_eq!(gas_price, U256::from(30 * GWEI_TO_WEI)); - - let call_fees = CallFees::ensure_fees( - None, - Some(U256::from(30 * GWEI_TO_WEI)), - Some(U256::from(31 * GWEI_TO_WEI)), - U256::from(15 * GWEI_TO_WEI), - None, - None, - Some(U256::ZERO), - ); - assert!(call_fees.is_err()); - - let call_fees = CallFees::ensure_fees( - None, - Some(U256::from(5 * GWEI_TO_WEI)), - Some(U256::from(GWEI_TO_WEI)), - U256::from(15 * GWEI_TO_WEI), - None, - None, - Some(U256::ZERO), - ); - assert!(call_fees.is_err()); - - let call_fees = CallFees::ensure_fees( - None, - Some(U256::MAX), - Some(U256::MAX), - U256::from(5 * GWEI_TO_WEI), - None, - None, - Some(U256::ZERO), - ); - assert!(call_fees.is_err()); - } -} diff --git a/crates/rpc/rpc-convert/src/lib.rs b/crates/rpc/rpc-convert/src/lib.rs index 9844b17b60..0d33251ce0 100644 --- a/crates/rpc/rpc-convert/src/lib.rs +++ b/crates/rpc/rpc-convert/src/lib.rs @@ -11,19 +11,19 @@ #![cfg_attr(docsrs, feature(doc_cfg))] pub mod block; -mod fees; pub mod receipt; mod rpc; pub mod transaction; pub use block::TryFromBlockResponse; -pub use fees::{CallFees, CallFeesError}; pub use receipt::TryFromReceiptResponse; pub use rpc::*; pub use transaction::{ - EthTxEnvError, IntoRpcTx, RpcConvert, RpcConverter, TransactionConversionError, - TryFromTransactionResponse, TryIntoSimTx, TxInfoMapper, + RpcConvert, RpcConverter, TransactionConversionError, TryFromTransactionResponse, TryIntoSimTx, + TxInfoMapper, }; +pub use alloy_evm::rpc::{CallFees, CallFeesError, EthTxEnvError, TryIntoTxEnv}; + #[cfg(feature = "op")] pub use transaction::op::*; diff --git a/crates/rpc/rpc-convert/src/receipt.rs b/crates/rpc/rpc-convert/src/receipt.rs index 5f37c1cad5..2da79a9b53 100644 --- a/crates/rpc/rpc-convert/src/receipt.rs +++ b/crates/rpc/rpc-convert/src/receipt.rs @@ -35,7 +35,7 @@ impl TryFromReceiptResponse for reth_optimism_primit fn from_receipt_response( receipt_response: op_alloy_rpc_types::OpTransactionReceipt, ) -> Result { - Ok(receipt_response.inner.inner.map_logs(Into::into).into()) + Ok(receipt_response.inner.inner.into_components().0.map_logs(Into::into)) } } @@ -70,14 +70,17 @@ mod tests { #[cfg(feature = "op")] #[test] fn test_try_from_receipt_response_optimism() { - use op_alloy_consensus::OpReceiptEnvelope; + use alloy_consensus::ReceiptWithBloom; + use op_alloy_consensus::OpReceipt; use op_alloy_network::Optimism; use op_alloy_rpc_types::OpTransactionReceipt; - use reth_optimism_primitives::OpReceipt; let op_receipt = OpTransactionReceipt { inner: alloy_rpc_types_eth::TransactionReceipt { - inner: OpReceiptEnvelope::Eip1559(Default::default()), + inner: ReceiptWithBloom { + receipt: OpReceipt::Eip1559(Default::default()), + logs_bloom: Default::default(), + }, transaction_hash: Default::default(), transaction_index: None, block_hash: None, diff --git a/crates/rpc/rpc-convert/src/rpc.rs b/crates/rpc/rpc-convert/src/rpc.rs index cf67bc11ad..7fe3ae8e19 100644 --- a/crates/rpc/rpc-convert/src/rpc.rs +++ b/crates/rpc/rpc-convert/src/rpc.rs @@ -90,13 +90,14 @@ impl SignableTxRequest ) -> Result { let mut tx = self.build_typed_tx().map_err(|_| SignTxRequestError::InvalidTransactionRequest)?; - let signature = signer.sign_transaction(&mut tx).await?; - // sanity check + // sanity check: deposit transactions must not be signed by the user if tx.is_deposit() { return Err(SignTxRequestError::InvalidTransactionRequest); } + let signature = signer.sign_transaction(&mut tx).await?; + Ok(tx.into_signed(signature).into()) } } diff --git a/crates/rpc/rpc-convert/src/transaction.rs b/crates/rpc/rpc-convert/src/transaction.rs index 6766ec43fb..1036e15199 100644 --- a/crates/rpc/rpc-convert/src/transaction.rs +++ b/crates/rpc/rpc-convert/src/transaction.rs @@ -1,30 +1,21 @@ //! Compatibility functions for rpc `Transaction` type. use crate::{ - fees::{CallFees, CallFeesError}, - RpcHeader, RpcReceipt, RpcTransaction, RpcTxReq, RpcTypes, SignableTxRequest, + RpcHeader, RpcReceipt, RpcTransaction, RpcTxReq, RpcTypes, SignableTxRequest, TryIntoTxEnv, }; use alloy_consensus::{ error::ValueError, transaction::Recovered, EthereumTxEnvelope, Sealable, TxEip4844, }; use alloy_network::Network; -use alloy_primitives::{Address, TxKind, U256}; -use alloy_rpc_types_eth::{ - request::{TransactionInputError, TransactionRequest}, - Transaction, TransactionInfo, -}; +use alloy_primitives::{Address, U256}; +use alloy_rpc_types_eth::{request::TransactionRequest, Transaction, TransactionInfo}; use core::error; use dyn_clone::DynClone; -use reth_evm::{ - revm::context_interface::{either::Either, Block}, - BlockEnvFor, ConfigureEvm, EvmEnvFor, TxEnvFor, -}; +use reth_evm::{BlockEnvFor, ConfigureEvm, EvmEnvFor, TxEnvFor}; use reth_primitives_traits::{ BlockTy, HeaderTy, NodePrimitives, SealedBlock, SealedHeader, SealedHeaderFor, TransactionMeta, TxTy, }; -use revm_context::{BlockEnv, CfgEnv, TxEnv}; use std::{convert::Infallible, error::Error, fmt::Debug, marker::PhantomData}; -use thiserror::Error; /// Input for [`RpcConvert::convert_receipts`]. #[derive(Debug, Clone)] @@ -462,7 +453,7 @@ where tx_req: TxReq, evm_env: &EvmEnvFor, ) -> Result, Self::Error> { - tx_req.try_into_tx_env(&evm_env.cfg_env, &evm_env.block_env) + tx_req.try_into_tx_env(evm_env) } } @@ -491,127 +482,17 @@ where } } -/// Converts `self` into `T`. -/// -/// Should create an executable transaction environment using [`TransactionRequest`]. -pub trait TryIntoTxEnv { - /// An associated error that can occur during the conversion. - type Err; - - /// Performs the conversion. - fn try_into_tx_env( - self, - cfg_env: &CfgEnv, - block_env: &BlockEnv, - ) -> Result; -} - -/// An Ethereum specific transaction environment error than can occur during conversion from -/// [`TransactionRequest`]. -#[derive(Debug, Error)] -pub enum EthTxEnvError { - /// Error while decoding or validating transaction request fees. - #[error(transparent)] - CallFees(#[from] CallFeesError), - /// Both data and input fields are set and not equal. - #[error(transparent)] - Input(#[from] TransactionInputError), -} - -impl TryIntoTxEnv for TransactionRequest { - type Err = EthTxEnvError; - - fn try_into_tx_env( - self, - cfg_env: &CfgEnv, - block_env: &BlockEnv, - ) -> Result { - // Ensure that if versioned hashes are set, they're not empty - if self.blob_versioned_hashes.as_ref().is_some_and(|hashes| hashes.is_empty()) { - return Err(CallFeesError::BlobTransactionMissingBlobHashes.into()) - } - - let tx_type = self.minimal_tx_type() as u8; - - let Self { - from, - to, - gas_price, - max_fee_per_gas, - max_priority_fee_per_gas, - gas, - value, - input, - nonce, - access_list, - chain_id, - blob_versioned_hashes, - max_fee_per_blob_gas, - authorization_list, - transaction_type: _, - sidecar: _, - } = self; - - let CallFees { max_priority_fee_per_gas, gas_price, max_fee_per_blob_gas } = - CallFees::ensure_fees( - gas_price.map(U256::from), - max_fee_per_gas.map(U256::from), - max_priority_fee_per_gas.map(U256::from), - U256::from(block_env.basefee), - blob_versioned_hashes.as_deref(), - max_fee_per_blob_gas.map(U256::from), - block_env.blob_gasprice().map(U256::from), - )?; - - let gas_limit = gas.unwrap_or( - // Use maximum allowed gas limit. The reason for this - // is that both Erigon and Geth use pre-configured gas cap even if - // it's possible to derive the gas limit from the block: - // - block_env.gas_limit, - ); - - let chain_id = chain_id.unwrap_or(cfg_env.chain_id); - - let caller = from.unwrap_or_default(); - - let nonce = nonce.unwrap_or_default(); - - let env = TxEnv { - tx_type, - gas_limit, - nonce, - caller, - gas_price: gas_price.saturating_to(), - gas_priority_fee: max_priority_fee_per_gas.map(|v| v.saturating_to()), - kind: to.unwrap_or(TxKind::Create), - value: value.unwrap_or_default(), - data: input.try_into_unique_input().map_err(EthTxEnvError::from)?.unwrap_or_default(), - chain_id: Some(chain_id), - access_list: access_list.unwrap_or_default(), - // EIP-4844 fields - blob_hashes: blob_versioned_hashes.unwrap_or_default(), - max_fee_per_blob_gas: max_fee_per_blob_gas - .map(|v| v.saturating_to()) - .unwrap_or_default(), - // EIP-7702 fields - authorization_list: authorization_list - .unwrap_or_default() - .into_iter() - .map(Either::Left) - .collect(), - }; - - Ok(env) - } -} - /// Conversion into transaction RPC response failed. -#[derive(Debug, Clone, Error)] -#[error("Failed to convert transaction into RPC response: {0}")] -pub struct TransactionConversionError(String); +#[derive(Debug, thiserror::Error)] +pub enum TransactionConversionError { + /// Required fields are missing from the transaction request. + #[error("Failed to convert transaction into RPC response: {0}")] + FromTxReq(String), + /// Other conversion errors. + #[error("{0}")] + Other(String), +} /// Generic RPC response object converter for `Evm` and network `Network`. /// /// The main purpose of this struct is to provide an implementation of [`RpcConvert`] for generic @@ -950,7 +831,7 @@ where Ok(self .sim_tx_converter .convert_sim_tx(request) - .map_err(|e| TransactionConversionError(e.to_string()))?) + .map_err(|e| TransactionConversionError::FromTxReq(e.to_string()))?) } fn tx_env( @@ -990,13 +871,12 @@ where pub mod op { use super::*; use alloy_consensus::SignableTransaction; - use alloy_primitives::{Address, Bytes, Signature}; + use alloy_signer::Signature; use op_alloy_consensus::{ transaction::{OpDepositInfo, OpTransactionInfo}, OpTxEnvelope, }; use op_alloy_rpc_types::OpTransactionRequest; - use op_revm::OpTransaction; use reth_optimism_primitives::DepositReceipt; use reth_primitives_traits::SignedTransaction; use reth_storage_api::{errors::ProviderError, ReceiptProvider}; @@ -1054,22 +934,6 @@ pub mod op { Ok(tx.into_signed(signature).into()) } } - - impl TryIntoTxEnv> for OpTransactionRequest { - type Err = EthTxEnvError; - - fn try_into_tx_env( - self, - cfg_env: &CfgEnv, - block_env: &BlockEnv, - ) -> Result, Self::Err> { - Ok(OpTransaction { - base: self.as_ref().clone().try_into_tx_env(cfg_env, block_env)?, - enveloped_tx: Some(Bytes::new()), - deposit: Default::default(), - }) - } - } } /// Trait for converting network transaction responses to primitive transaction types. @@ -1146,8 +1010,6 @@ mod transaction_response_tests { #[cfg(feature = "op")] mod op { use super::*; - use crate::transaction::TryIntoTxEnv; - use revm_context::{BlockEnv, CfgEnv}; #[test] fn test_optimism_transaction_conversion() { @@ -1180,23 +1042,5 @@ mod transaction_response_tests { assert!(result.is_ok()); } - - #[test] - fn test_op_into_tx_env() { - use op_alloy_rpc_types::OpTransactionRequest; - use op_revm::{transaction::OpTxTr, OpSpecId}; - use revm_context::Transaction; - - let s = r#"{"from":"0x0000000000000000000000000000000000000000","to":"0x6d362b9c3ab68c0b7c79e8a714f1d7f3af63655f","input":"0x1626ba7ec8ee0d506e864589b799a645ddb88b08f5d39e8049f9f702b3b61fa15e55fc73000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000550000002d6db27c52e3c11c1cf24072004ac75cba49b25bf45f513902e469755e1f3bf2ca8324ad16930b0a965c012a24bb1101f876ebebac047bd3b6bf610205a27171eaaeffe4b5e5589936f4e542d637b627311b0000000000000000000000","data":"0x1626ba7ec8ee0d506e864589b799a645ddb88b08f5d39e8049f9f702b3b61fa15e55fc73000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000550000002d6db27c52e3c11c1cf24072004ac75cba49b25bf45f513902e469755e1f3bf2ca8324ad16930b0a965c012a24bb1101f876ebebac047bd3b6bf610205a27171eaaeffe4b5e5589936f4e542d637b627311b0000000000000000000000","chainId":"0x7a69"}"#; - - let req: OpTransactionRequest = serde_json::from_str(s).unwrap(); - - let cfg = CfgEnv::::default(); - let block_env = BlockEnv::default(); - let tx_env = req.try_into_tx_env(&cfg, &block_env).unwrap(); - assert_eq!(tx_env.gas_limit(), block_env.gas_limit); - assert_eq!(tx_env.gas_price(), 0); - assert!(tx_env.enveloped_tx().unwrap().is_empty()); - } } } diff --git a/crates/rpc/rpc-engine-api/Cargo.toml b/crates/rpc/rpc-engine-api/Cargo.toml index 825eb485fc..2702a40419 100644 --- a/crates/rpc/rpc-engine-api/Cargo.toml +++ b/crates/rpc/rpc-engine-api/Cargo.toml @@ -23,6 +23,7 @@ reth-tasks.workspace = true reth-engine-primitives.workspace = true reth-transaction-pool.workspace = true reth-primitives-traits.workspace = true +reth-network-api.workspace = true # ethereum alloy-eips.workspace = true @@ -43,7 +44,6 @@ jsonrpsee-types.workspace = true serde.workspace = true thiserror.workspace = true tracing.workspace = true -parking_lot.workspace = true [dev-dependencies] reth-ethereum-engine-primitives.workspace = true diff --git a/crates/rpc/rpc-engine-api/src/capabilities.rs b/crates/rpc/rpc-engine-api/src/capabilities.rs index 67a5a1b72d..1e95d7ed1c 100644 --- a/crates/rpc/rpc-engine-api/src/capabilities.rs +++ b/crates/rpc/rpc-engine-api/src/capabilities.rs @@ -19,6 +19,7 @@ pub const CAPABILITIES: &[&str] = &[ "engine_getPayloadBodiesByRangeV1", "engine_getBlobsV1", "engine_getBlobsV2", + "engine_getBlobsV3", ]; // The list of all supported Engine capabilities available over the engine endpoint. diff --git a/crates/rpc/rpc-engine-api/src/engine_api.rs b/crates/rpc/rpc-engine-api/src/engine_api.rs index 6aeadeecba..8db352f7b4 100644 --- a/crates/rpc/rpc-engine-api/src/engine_api.rs +++ b/crates/rpc/rpc-engine-api/src/engine_api.rs @@ -16,13 +16,13 @@ use alloy_rpc_types_engine::{ }; use async_trait::async_trait; use jsonrpsee_core::{server::RpcModule, RpcResult}; -use parking_lot::Mutex; use reth_chainspec::EthereumHardforks; use reth_engine_primitives::{ConsensusEngineHandle, EngineApiValidator, EngineTypes}; +use reth_network_api::NetworkInfo; use reth_payload_builder::PayloadStore; use reth_payload_primitives::{ - validate_payload_timestamp, EngineApiMessageVersion, ExecutionPayload, PayloadOrAttributes, - PayloadTypes, + validate_payload_timestamp, EngineApiMessageVersion, MessageValidationKind, + PayloadOrAttributes, PayloadTypes, }; use reth_primitives_traits::{Block, BlockBody}; use reth_rpc_api::{EngineApiServer, IntoEngineApiRpcModule}; @@ -95,7 +95,9 @@ where capabilities: EngineCapabilities, validator: Validator, accept_execution_requests_hash: bool, + network: impl NetworkInfo + 'static, ) -> Self { + let is_syncing = Arc::new(move || network.is_syncing()); let inner = Arc::new(EngineApiInner { provider, chain_spec, @@ -107,8 +109,8 @@ where capabilities, tx_pool, validator, - latest_new_payload_response: Mutex::new(None), accept_execution_requests_hash, + is_syncing, }); Self { inner } } @@ -147,12 +149,7 @@ where .validator .validate_version_specific_fields(EngineApiMessageVersion::V1, payload_or_attrs)?; - Ok(self - .inner - .beacon_consensus - .new_payload(payload) - .await - .inspect(|_| self.inner.on_new_payload_response())?) + Ok(self.inner.beacon_consensus.new_payload(payload).await?) } /// Metered version of `new_payload_v1`. @@ -161,12 +158,9 @@ where payload: PayloadT::ExecutionData, ) -> EngineApiResult { let start = Instant::now(); - let gas_used = payload.gas_used(); - let res = Self::new_payload_v1(self, payload).await; let elapsed = start.elapsed(); self.inner.metrics.latency.new_payload_v1.record(elapsed); - self.inner.metrics.new_payload_response.update_response_metrics(&res, gas_used, elapsed); res } @@ -183,12 +177,7 @@ where self.inner .validator .validate_version_specific_fields(EngineApiMessageVersion::V2, payload_or_attrs)?; - Ok(self - .inner - .beacon_consensus - .new_payload(payload) - .await - .inspect(|_| self.inner.on_new_payload_response())?) + Ok(self.inner.beacon_consensus.new_payload(payload).await?) } /// Metered version of `new_payload_v2`. @@ -197,12 +186,9 @@ where payload: PayloadT::ExecutionData, ) -> EngineApiResult { let start = Instant::now(); - let gas_used = payload.gas_used(); - let res = Self::new_payload_v2(self, payload).await; let elapsed = start.elapsed(); self.inner.metrics.latency.new_payload_v2.record(elapsed); - self.inner.metrics.new_payload_response.update_response_metrics(&res, gas_used, elapsed); res } @@ -220,12 +206,7 @@ where .validator .validate_version_specific_fields(EngineApiMessageVersion::V3, payload_or_attrs)?; - Ok(self - .inner - .beacon_consensus - .new_payload(payload) - .await - .inspect(|_| self.inner.on_new_payload_response())?) + Ok(self.inner.beacon_consensus.new_payload(payload).await?) } /// Metrics version of `new_payload_v3` @@ -234,12 +215,10 @@ where payload: PayloadT::ExecutionData, ) -> RpcResult { let start = Instant::now(); - let gas_used = payload.gas_used(); let res = Self::new_payload_v3(self, payload).await; let elapsed = start.elapsed(); self.inner.metrics.latency.new_payload_v3.record(elapsed); - self.inner.metrics.new_payload_response.update_response_metrics(&res, gas_used, elapsed); Ok(res?) } @@ -257,12 +236,7 @@ where .validator .validate_version_specific_fields(EngineApiMessageVersion::V4, payload_or_attrs)?; - Ok(self - .inner - .beacon_consensus - .new_payload(payload) - .await - .inspect(|_| self.inner.on_new_payload_response())?) + Ok(self.inner.beacon_consensus.new_payload(payload).await?) } /// Metrics version of `new_payload_v4` @@ -271,13 +245,10 @@ where payload: PayloadT::ExecutionData, ) -> RpcResult { let start = Instant::now(); - let gas_used = payload.gas_used(); - let res = Self::new_payload_v4(self, payload).await; let elapsed = start.elapsed(); self.inner.metrics.latency.new_payload_v4.record(elapsed); - self.inner.metrics.new_payload_response.update_response_metrics(&res, gas_used, elapsed); Ok(res?) } @@ -320,7 +291,6 @@ where let start = Instant::now(); let res = Self::fork_choice_updated_v1(self, state, payload_attrs).await; self.inner.metrics.latency.fork_choice_updated_v1.record(start.elapsed()); - self.inner.metrics.fcu_response.update_response_metrics(&res); res } @@ -346,7 +316,6 @@ where let start = Instant::now(); let res = Self::fork_choice_updated_v2(self, state, payload_attrs).await; self.inner.metrics.latency.fork_choice_updated_v2.record(start.elapsed()); - self.inner.metrics.fcu_response.update_response_metrics(&res); res } @@ -372,7 +341,6 @@ where let start = Instant::now(); let res = Self::fork_choice_updated_v3(self, state, payload_attrs).await; self.inner.metrics.latency.fork_choice_updated_v3.record(start.elapsed()); - self.inner.metrics.fcu_response.update_response_metrics(&res); res } @@ -399,9 +367,15 @@ where where EngineT::BuiltPayload: TryInto, { - // validate timestamp according to engine rules + // Validate timestamp according to engine rules + // Enforces Osaka restrictions on `getPayloadV4`. let timestamp = self.get_payload_timestamp(payload_id).await?; - validate_payload_timestamp(&self.inner.chain_spec, version, timestamp)?; + validate_payload_timestamp( + &self.inner.chain_spec, + version, + timestamp, + MessageValidationKind::GetPayload, + )?; // Now resolve the payload self.get_built_payload(payload_id).await?.try_into().map_err(|_| { @@ -493,7 +467,7 @@ where /// Returns the most recent version of the payload that is available in the corresponding /// payload build process at the time of receiving this call. /// - /// See also + /// See also /// /// Note: /// > Provider software MAY stop the corresponding build process after serving this call. @@ -694,9 +668,9 @@ where hashes: Vec, ) -> EngineApiResult { let start = Instant::now(); - let res = Self::get_payload_bodies_by_hash_v1(self, hashes); + let res = Self::get_payload_bodies_by_hash_v1(self, hashes).await; self.inner.metrics.latency.get_payload_bodies_by_hash_v1.record(start.elapsed()); - res.await + res } /// Validates the `engine_forkchoiceUpdated` payload attributes and executes the forkchoice @@ -718,8 +692,6 @@ where state: ForkchoiceState, payload_attrs: Option, ) -> EngineApiResult { - self.inner.record_elapsed_time_on_fcu(); - if let Some(ref attrs) = payload_attrs { let attr_validation_res = self.inner.validator.ensure_well_formed_attributes(version, attrs); @@ -824,6 +796,35 @@ where .map_err(|err| EngineApiError::Internal(Box::new(err))) } + fn get_blobs_v3( + &self, + versioned_hashes: Vec, + ) -> EngineApiResult>>> { + // Check if Osaka fork is active + let current_timestamp = + SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap_or_default().as_secs(); + if !self.inner.chain_spec.is_osaka_active_at_timestamp(current_timestamp) { + return Err(EngineApiError::EngineObjectValidationError( + reth_payload_primitives::EngineObjectValidationError::UnsupportedFork, + )); + } + + if versioned_hashes.len() > MAX_BLOB_LIMIT { + return Err(EngineApiError::BlobRequestTooLarge { len: versioned_hashes.len() }) + } + + // Spec requires returning `null` if syncing. + if (*self.inner.is_syncing)() { + return Ok(None) + } + + self.inner + .tx_pool + .get_blobs_for_versioned_hashes_v3(&versioned_hashes) + .map(Some) + .map_err(|err| EngineApiError::Internal(Box::new(err))) + } + /// Metered version of `get_blobs_v2`. pub fn get_blobs_v2_metered( &self, @@ -859,6 +860,27 @@ where res } + + /// Metered version of `get_blobs_v3`. + pub fn get_blobs_v3_metered( + &self, + versioned_hashes: Vec, + ) -> EngineApiResult>>> { + let hashes_len = versioned_hashes.len(); + let start = Instant::now(); + let res = Self::get_blobs_v3(self, versioned_hashes); + self.inner.metrics.latency.get_blobs_v3.record(start.elapsed()); + + if let Ok(Some(blobs)) = &res { + let blobs_found = blobs.iter().flatten().count(); + let blobs_missed = hashes_len - blobs_found; + + self.inner.metrics.blob_metrics.blob_count.increment(blobs_found as u64); + self.inner.metrics.blob_metrics.blob_misses.increment(blobs_missed as u64); + } + + res + } } // This is the concrete ethereum engine API implementation. @@ -965,7 +987,7 @@ where Ok(self.fork_choice_updated_v2_metered(fork_choice_state, payload_attributes).await?) } - /// Handler for `engine_forkchoiceUpdatedV2` + /// Handler for `engine_forkchoiceUpdatedV3` /// /// See also async fn fork_choice_updated_v3( @@ -1131,6 +1153,14 @@ where trace!(target: "rpc::engine", "Serving engine_getBlobsV2"); Ok(self.get_blobs_v2_metered(versioned_hashes)?) } + + async fn get_blobs_v3( + &self, + versioned_hashes: Vec, + ) -> RpcResult>>> { + trace!(target: "rpc::engine", "Serving engine_getBlobsV3"); + Ok(self.get_blobs_v3_metered(versioned_hashes)?) + } } impl IntoEngineApiRpcModule @@ -1186,29 +1216,9 @@ struct EngineApiInner>, accept_execution_requests_hash: bool, -} - -impl - EngineApiInner -where - PayloadT: PayloadTypes, -{ - /// Tracks the elapsed time between the new payload response and the received forkchoice update - /// request. - fn record_elapsed_time_on_fcu(&self) { - if let Some(start_time) = self.latest_new_payload_response.lock().take() { - let elapsed_time = start_time.elapsed(); - self.metrics.latency.new_payload_forkchoice_updated_time_diff.record(elapsed_time); - } - } - - /// Updates the timestamp for the latest new payload response. - fn on_new_payload_response(&self) { - self.latest_new_payload_response.lock().replace(Instant::now()); - } + /// Returns `true` if the node is currently syncing. + is_syncing: Arc bool + Send + Sync>, } #[cfg(test)] @@ -1216,10 +1226,13 @@ mod tests { use super::*; use alloy_rpc_types_engine::{ClientCode, ClientVersionV1}; use assert_matches::assert_matches; - use reth_chainspec::{ChainSpec, MAINNET}; + use reth_chainspec::{ChainSpec, ChainSpecBuilder, MAINNET}; use reth_engine_primitives::BeaconEngineMessage; use reth_ethereum_engine_primitives::EthEngineTypes; use reth_ethereum_primitives::Block; + use reth_network_api::{ + noop::NoopNetwork, EthProtocolInfo, NetworkError, NetworkInfo, NetworkStatus, + }; use reth_node_ethereum::EthereumEngineValidator; use reth_payload_builder::test_utils::spawn_test_payload_service; use reth_provider::test_utils::MockEthProvider; @@ -1260,6 +1273,7 @@ mod tests { EngineCapabilities::default(), EthereumEngineValidator::new(chain_spec.clone()), false, + NoopNetwork::default(), ); let handle = EngineApiTestHandle { chain_spec, provider, from_api: engine_rx }; (handle, api) @@ -1301,6 +1315,76 @@ mod tests { assert_matches!(handle.from_api.recv().await, Some(BeaconEngineMessage::NewPayload { .. })); } + #[derive(Clone)] + struct TestNetworkInfo { + syncing: bool, + } + + impl NetworkInfo for TestNetworkInfo { + fn local_addr(&self) -> std::net::SocketAddr { + (std::net::Ipv4Addr::UNSPECIFIED, 0).into() + } + + async fn network_status(&self) -> Result { + #[allow(deprecated)] + Ok(NetworkStatus { + client_version: "test".to_string(), + protocol_version: 5, + eth_protocol_info: EthProtocolInfo { + network: 1, + difficulty: None, + genesis: Default::default(), + config: Default::default(), + head: Default::default(), + }, + capabilities: vec![], + }) + } + + fn chain_id(&self) -> u64 { + 1 + } + + fn is_syncing(&self) -> bool { + self.syncing + } + + fn is_initially_syncing(&self) -> bool { + self.syncing + } + } + + #[tokio::test] + async fn get_blobs_v3_returns_null_when_syncing() { + let chain_spec: Arc = + Arc::new(ChainSpecBuilder::mainnet().osaka_activated().build()); + let provider = Arc::new(MockEthProvider::default()); + let payload_store = spawn_test_payload_service::(); + let (to_engine, _engine_rx) = unbounded_channel::>(); + + let api = EngineApi::new( + provider, + chain_spec.clone(), + ConsensusEngineHandle::new(to_engine), + payload_store.into(), + NoopTransactionPool::default(), + Box::::default(), + ClientVersionV1 { + code: ClientCode::RH, + name: "Reth".to_string(), + version: "v0.0.0-test".to_string(), + commit: "test".to_string(), + }, + EngineCapabilities::default(), + EthereumEngineValidator::new(chain_spec), + false, + TestNetworkInfo { syncing: true }, + ); + + let res = api.get_blobs_v3_metered(vec![B256::ZERO]); + assert_matches!(res, Ok(None)); + } + // tests covering `engine_getPayloadBodiesByRange` and `engine_getPayloadBodiesByHash` mod get_payload_bodies { use super::*; diff --git a/crates/rpc/rpc-engine-api/src/metrics.rs b/crates/rpc/rpc-engine-api/src/metrics.rs index 95156e490b..d2ec2d3e08 100644 --- a/crates/rpc/rpc-engine-api/src/metrics.rs +++ b/crates/rpc/rpc-engine-api/src/metrics.rs @@ -1,8 +1,4 @@ -use std::time::Duration; - -use crate::EngineApiError; -use alloy_rpc_types_engine::{ForkchoiceUpdated, PayloadStatus, PayloadStatusEnum}; -use metrics::{Counter, Gauge, Histogram}; +use metrics::{Counter, Histogram}; use reth_metrics::Metrics; /// All beacon consensus engine metrics @@ -10,10 +6,6 @@ use reth_metrics::Metrics; pub(crate) struct EngineApiMetrics { /// Engine API latency metrics pub(crate) latency: EngineApiLatencyMetrics, - /// Engine API forkchoiceUpdated response type metrics - pub(crate) fcu_response: ForkchoiceUpdatedResponseMetrics, - /// Engine API newPayload response type metrics - pub(crate) new_payload_response: NewPayloadStatusResponseMetrics, /// Blob-related metrics pub(crate) blob_metrics: BlobMetrics, } @@ -36,8 +28,6 @@ pub(crate) struct EngineApiLatencyMetrics { pub(crate) fork_choice_updated_v2: Histogram, /// Latency for `engine_forkchoiceUpdatedV3` pub(crate) fork_choice_updated_v3: Histogram, - /// Time diff between `engine_newPayloadV*` and the next FCU - pub(crate) new_payload_forkchoice_updated_time_diff: Histogram, /// Latency for `engine_getPayloadV1` pub(crate) get_payload_v1: Histogram, /// Latency for `engine_getPayloadV2` @@ -56,58 +46,8 @@ pub(crate) struct EngineApiLatencyMetrics { pub(crate) get_blobs_v1: Histogram, /// Latency for `engine_getBlobsV2` pub(crate) get_blobs_v2: Histogram, -} - -/// Metrics for engine API forkchoiceUpdated responses. -#[derive(Metrics)] -#[metrics(scope = "engine.rpc")] -pub(crate) struct ForkchoiceUpdatedResponseMetrics { - /// The total count of forkchoice updated messages received. - pub(crate) forkchoice_updated_messages: Counter, - /// The total count of forkchoice updated messages that we responded to with - /// [`Invalid`](alloy_rpc_types_engine::PayloadStatusEnum#Invalid). - pub(crate) forkchoice_updated_invalid: Counter, - /// The total count of forkchoice updated messages that we responded to with - /// [`Valid`](alloy_rpc_types_engine::PayloadStatusEnum#Valid). - pub(crate) forkchoice_updated_valid: Counter, - /// The total count of forkchoice updated messages that we responded to with - /// [`Syncing`](alloy_rpc_types_engine::PayloadStatusEnum#Syncing). - pub(crate) forkchoice_updated_syncing: Counter, - /// The total count of forkchoice updated messages that we responded to with - /// [`Accepted`](alloy_rpc_types_engine::PayloadStatusEnum#Accepted). - pub(crate) forkchoice_updated_accepted: Counter, - /// The total count of forkchoice updated messages that were unsuccessful, i.e. we responded - /// with an error type that is not a [`PayloadStatusEnum`]. - pub(crate) forkchoice_updated_error: Counter, -} - -/// Metrics for engine API newPayload responses. -#[derive(Metrics)] -#[metrics(scope = "engine.rpc")] -pub(crate) struct NewPayloadStatusResponseMetrics { - /// The total count of new payload messages received. - pub(crate) new_payload_messages: Counter, - /// The total count of new payload messages that we responded to with - /// [Invalid](alloy_rpc_types_engine::PayloadStatusEnum#Invalid). - pub(crate) new_payload_invalid: Counter, - /// The total count of new payload messages that we responded to with - /// [Valid](alloy_rpc_types_engine::PayloadStatusEnum#Valid). - pub(crate) new_payload_valid: Counter, - /// The total count of new payload messages that we responded to with - /// [Syncing](alloy_rpc_types_engine::PayloadStatusEnum#Syncing). - pub(crate) new_payload_syncing: Counter, - /// The total count of new payload messages that we responded to with - /// [Accepted](alloy_rpc_types_engine::PayloadStatusEnum#Accepted). - pub(crate) new_payload_accepted: Counter, - /// The total count of new payload messages that were unsuccessful, i.e. we responded with an - /// error type that is not a [`PayloadStatusEnum`]. - pub(crate) new_payload_error: Counter, - /// The total gas of valid new payload messages received. - pub(crate) new_payload_total_gas: Histogram, - /// The gas per second of valid new payload messages received. - pub(crate) new_payload_gas_per_second: Histogram, - /// Latency for the last `engine_newPayloadV*` call - pub(crate) new_payload_last: Gauge, + /// Latency for `engine_getBlobsV3` + pub(crate) get_blobs_v3: Histogram, } #[derive(Metrics)] @@ -126,48 +66,3 @@ pub(crate) struct BlobMetrics { /// Number of times getBlobsV2 responded with “miss” pub(crate) get_blobs_requests_failure_total: Counter, } - -impl NewPayloadStatusResponseMetrics { - /// Increment the newPayload counter based on the given rpc result - pub(crate) fn update_response_metrics( - &self, - result: &Result, - gas_used: u64, - time: Duration, - ) { - self.new_payload_last.set(time); - match result { - Ok(status) => match status.status { - PayloadStatusEnum::Valid => { - self.new_payload_valid.increment(1); - self.new_payload_total_gas.record(gas_used as f64); - self.new_payload_gas_per_second.record(gas_used as f64 / time.as_secs_f64()); - } - PayloadStatusEnum::Syncing => self.new_payload_syncing.increment(1), - PayloadStatusEnum::Accepted => self.new_payload_accepted.increment(1), - PayloadStatusEnum::Invalid { .. } => self.new_payload_invalid.increment(1), - }, - Err(_) => self.new_payload_error.increment(1), - } - self.new_payload_messages.increment(1); - } -} - -impl ForkchoiceUpdatedResponseMetrics { - /// Increment the forkchoiceUpdated counter based on the given rpc result - pub(crate) fn update_response_metrics( - &self, - result: &Result, - ) { - match result { - Ok(status) => match status.payload_status.status { - PayloadStatusEnum::Valid => self.forkchoice_updated_valid.increment(1), - PayloadStatusEnum::Syncing => self.forkchoice_updated_syncing.increment(1), - PayloadStatusEnum::Accepted => self.forkchoice_updated_accepted.increment(1), - PayloadStatusEnum::Invalid { .. } => self.forkchoice_updated_invalid.increment(1), - }, - Err(_) => self.forkchoice_updated_error.increment(1), - } - self.forkchoice_updated_messages.increment(1); - } -} diff --git a/crates/rpc/rpc-eth-api/Cargo.toml b/crates/rpc/rpc-eth-api/Cargo.toml index 88a7f05932..830e8cf83a 100644 --- a/crates/rpc/rpc-eth-api/Cargo.toml +++ b/crates/rpc/rpc-eth-api/Cargo.toml @@ -13,7 +13,7 @@ workspace = true [dependencies] # reth -revm = { workspace = true, features = ["optional_block_gas_limit", "optional_eip3607", "optional_no_base_fee", "optional_fee_charge"] } +revm = { workspace = true, features = ["optional_block_gas_limit", "optional_eip3607", "optional_no_base_fee", "optional_fee_charge", "memory_limit"] } reth-chain-state.workspace = true revm-inspectors.workspace = true reth-primitives-traits = { workspace = true, features = ["rpc-compat"] } diff --git a/crates/rpc/rpc-eth-api/src/core.rs b/crates/rpc/rpc-eth-api/src/core.rs index 40f19c8622..6ca5585858 100644 --- a/crates/rpc/rpc-eth-api/src/core.rs +++ b/crates/rpc/rpc-eth-api/src/core.rs @@ -18,7 +18,7 @@ use alloy_serde::JsonStorageKey; use jsonrpsee::{core::RpcResult, proc_macros::rpc}; use reth_primitives_traits::TxTy; use reth_rpc_convert::RpcTxReq; -use reth_rpc_eth_types::FillTransactionResult; +use reth_rpc_eth_types::FillTransaction; use reth_rpc_server_types::{result::internal_rpc_err, ToRpcResult}; use tracing::trace; @@ -242,7 +242,7 @@ pub trait EthApi< /// Fills the defaults on a given unsigned transaction. #[method(name = "fillTransaction")] - async fn fill_transaction(&self, request: TxReq) -> RpcResult>; + async fn fill_transaction(&self, request: TxReq) -> RpcResult>; /// Simulate arbitrary number of transactions at an arbitrary blockchain index, with the /// optionality of state overrides @@ -550,8 +550,9 @@ where trace!(target: "rpc::eth", ?hash, "Serving eth_getTransactionByHash"); Ok(EthTransactions::transaction_by_hash(self, hash) .await? - .map(|tx| tx.into_transaction(self.tx_resp_builder())) - .transpose()?) + .map(|tx| tx.into_transaction(self.converter())) + .transpose() + .map_err(T::Error::from)?) } /// Handler for: `eth_getRawTransactionByBlockHashAndIndex` @@ -703,7 +704,7 @@ where async fn fill_transaction( &self, request: RpcTxReq, - ) -> RpcResult>> { + ) -> RpcResult>> { trace!(target: "rpc::eth", ?request, "Serving eth_fillTransaction"); Ok(EthTransactions::fill_transaction(self, request).await?) } @@ -827,7 +828,7 @@ where /// Handler for: `eth_sendTransaction` async fn send_transaction(&self, request: RpcTxReq) -> RpcResult { trace!(target: "rpc::eth", ?request, "Serving eth_sendTransaction"); - Ok(EthTransactions::send_transaction(self, request).await?) + Ok(EthTransactions::send_transaction_request(self, request).await?) } /// Handler for: `eth_sendRawTransaction` diff --git a/crates/rpc/rpc-eth-api/src/helpers/block.rs b/crates/rpc/rpc-eth-api/src/helpers/block.rs index 17e4b000b3..753d13b9a8 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/block.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/block.rs @@ -30,9 +30,7 @@ pub type BlockAndReceiptsResult = Result< /// Block related functions for the [`EthApiServer`](crate::EthApiServer) trait in the /// `eth_` namespace. -pub trait EthBlocks: - LoadBlock> -{ +pub trait EthBlocks: LoadBlock> { /// Returns the block header for the given block id. fn rpc_block_header( &self, @@ -61,8 +59,8 @@ pub trait EthBlocks: let block = block.clone_into_rpc_block( full.into(), - |tx, tx_info| self.tx_resp_builder().fill(tx, tx_info), - |header, size| self.tx_resp_builder().convert_header(header, size), + |tx, tx_info| self.converter().fill(tx, tx_info), + |header, size| self.converter().convert_header(header, size), )?; Ok(Some(block)) } @@ -76,7 +74,11 @@ pub trait EthBlocks: block_id: BlockId, ) -> impl Future, Self::Error>> + Send { async move { + // If no pending block from provider, build the pending block locally. if block_id.is_pending() { + if let Some(pending) = self.local_pending_block().await? { + return Ok(Some(pending.block.body().transaction_count())); + } // Pending block can be fetched directly without need for caching return Ok(self .provider() @@ -156,10 +158,10 @@ pub trait EthBlocks: }) .collect::>(); - return self - .tx_resp_builder() + return Ok(self + .converter() .convert_receipts_with_block(inputs, block.sealed_block()) - .map(Some) + .map(Some)?) } Ok(None) @@ -258,7 +260,7 @@ pub trait EthBlocks: alloy_consensus::Block::::uncle(header); let size = block.length(); let header = self - .tx_resp_builder() + .converter() .convert_header(SealedHeader::new_unhashed(block.header), size)?; Ok(Block { uncles: vec![], diff --git a/crates/rpc/rpc-eth-api/src/helpers/blocking_task.rs b/crates/rpc/rpc-eth-api/src/helpers/blocking_task.rs index 886ff63914..c174cd9bde 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/blocking_task.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/blocking_task.rs @@ -7,18 +7,29 @@ use reth_tasks::{ pool::{BlockingTaskGuard, BlockingTaskPool}, TaskSpawner, }; -use tokio::sync::{oneshot, AcquireError, OwnedSemaphorePermit}; +use std::sync::Arc; +use tokio::sync::{oneshot, AcquireError, OwnedSemaphorePermit, Semaphore}; use crate::EthApiTypes; -/// Executes code on a blocking thread. +/// Helpers for spawning blocking operations. +/// +/// Operations can be blocking because they require lots of CPU work and/or IO. +/// +/// This differentiates between workloads that are primarily CPU bound and heavier in general (such +/// as tracing tasks) and tasks that have a more balanced profile (io and cpu), such as `eth_call` +/// and alike. +/// +/// This provides access to semaphores that permit how many of those are permitted concurrently. +/// It's expected that tracing related tasks are configured with a lower threshold, because not only +/// are they CPU heavy but they can also accumulate more memory for the traces. pub trait SpawnBlocking: EthApiTypes + Clone + Send + Sync + 'static { /// Returns a handle for spawning IO heavy blocking tasks. /// /// Runtime access in default trait method implementations. fn io_task_spawner(&self) -> impl TaskSpawner; - /// Returns a handle for spawning CPU heavy blocking tasks. + /// Returns a handle for spawning __CPU heavy__ blocking tasks, such as tracing requests. /// /// Thread pool access in default trait method implementations. fn tracing_task_pool(&self) -> &BlockingTaskPool; @@ -26,21 +37,121 @@ pub trait SpawnBlocking: EthApiTypes + Clone + Send + Sync + 'static { /// Returns handle to semaphore for pool of CPU heavy blocking tasks. fn tracing_task_guard(&self) -> &BlockingTaskGuard; + /// Returns handle to semaphore for blocking IO tasks. + /// + /// This semaphore is used to limit concurrent blocking IO operations like `eth_call`, + /// `eth_estimateGas`, and similar methods that require EVM execution. + fn blocking_io_task_guard(&self) -> &Arc; + + /// Acquires a permit from the tracing task semaphore. + /// + /// This should be used for __CPU heavy__ operations like `debug_traceTransaction`, + /// `debug_traceCall`, and similar tracing methods. These tasks are typically: + /// - Primarily CPU bound with intensive computation + /// - Can accumulate significant memory for trace results + /// - Expected to have lower concurrency limits than general blocking IO tasks + /// + /// For blocking IO tasks like `eth_call` or `eth_estimateGas`, use + /// [`acquire_owned_blocking_io`](Self::acquire_owned_blocking_io) instead. + /// /// See also [`Semaphore::acquire_owned`](`tokio::sync::Semaphore::acquire_owned`). - fn acquire_owned( + fn acquire_owned_tracing( &self, ) -> impl Future> + Send { self.tracing_task_guard().clone().acquire_owned() } + /// Acquires multiple permits from the tracing task semaphore. + /// + /// This should be used for particularly heavy tracing operations that require more resources + /// than a standard trace. The permit count should reflect the expected resource consumption + /// relative to a standard tracing operation. + /// + /// Like [`acquire_owned_tracing`](Self::acquire_owned_tracing), this is specifically for + /// CPU-intensive tracing tasks, not general blocking IO operations. + /// /// See also [`Semaphore::acquire_many_owned`](`tokio::sync::Semaphore::acquire_many_owned`). - fn acquire_many_owned( + fn acquire_many_owned_tracing( &self, n: u32, ) -> impl Future> + Send { self.tracing_task_guard().clone().acquire_many_owned(n) } + /// Acquires a permit from the blocking IO request semaphore. + /// + /// This should be used for operations like `eth_call`, `eth_estimateGas`, and similar methods + /// that require EVM execution and are spawned as blocking tasks. + /// + /// See also [`Semaphore::acquire_owned`](`tokio::sync::Semaphore::acquire_owned`). + fn acquire_owned_blocking_io( + &self, + ) -> impl Future> + Send { + self.blocking_io_task_guard().clone().acquire_owned() + } + + /// Acquires multiple permits from the blocking IO request semaphore. + /// + /// This should be used for operations that may require more resources than a single permit + /// allows. + /// + /// See also [`Semaphore::acquire_many_owned`](`tokio::sync::Semaphore::acquire_many_owned`). + fn acquire_many_owned_blocking_io( + &self, + n: u32, + ) -> impl Future> + Send { + self.blocking_io_task_guard().clone().acquire_many_owned(n) + } + + /// Acquires permits from the blocking IO request semaphore based on a calculated weight. + /// + /// The weight determines the maximum number of concurrent requests of this type that can run. + /// For example, if the semaphore has 256 total permits and `weight=10`, then at most 10 + /// concurrent requests of this type are allowed. + /// + /// The permits acquired per request is calculated as `total_permits / weight`, with an + /// adjustment: if this result is even, we add 1 to ensure that `weight - 1` permits are + /// always available for other tasks, preventing complete semaphore exhaustion. + /// + /// This should be used to explicitly limit concurrent requests based on their expected + /// resource consumption: + /// + /// - **Block range queries**: Higher weight for larger ranges (fewer concurrent requests) + /// - **Complex calls**: Higher weight for expensive operations + /// - **Batch operations**: Higher weight for larger batches + /// - **Historical queries**: Higher weight for deeper history lookups + /// + /// # Examples + /// + /// ```ignore + /// // For a heavy request, use higher weight to limit concurrency + /// let weight = 20; // Allow at most 20 concurrent requests of this type + /// let _permit = self.acquire_weighted_blocking_io(weight).await?; + /// ``` + /// + /// This helps prevent resource exhaustion from concurrent expensive operations while allowing + /// many cheap operations to run in parallel. + /// + /// See also [`Semaphore::acquire_many_owned`](`tokio::sync::Semaphore::acquire_many_owned`). + fn acquire_weighted_blocking_io( + &self, + weight: u32, + ) -> impl Future> + Send { + let guard = self.blocking_io_task_guard(); + let total_permits = guard.available_permits().max(1) as u32; + let weight = weight.max(1); + let mut permits_to_acquire = (total_permits / weight).max(1); + + // If total_permits divides evenly by weight, add 1 to ensure that when `weight` + // concurrent requests are running, at least `weight - 1` permits remain available + // for other tasks + if total_permits.is_multiple_of(weight) { + permits_to_acquire += 1; + } + + guard.clone().acquire_many_owned(permits_to_acquire) + } + /// Executes the future on a new blocking task. /// /// Note: This is expected for futures that are dominated by blocking IO operations, for tracing diff --git a/crates/rpc/rpc-eth-api/src/helpers/call.rs b/crates/rpc/rpc-eth-api/src/helpers/call.rs index 7eb10c1053..d3790251e5 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/call.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/call.rs @@ -25,21 +25,18 @@ use reth_evm::{ }; use reth_node_api::BlockBody; use reth_primitives_traits::Recovered; -use reth_revm::{database::StateProviderDatabase, db::State}; +use reth_revm::{cancelled::CancelOnDrop, database::StateProviderDatabase, db::State}; use reth_rpc_convert::{RpcConvert, RpcTxReq}; use reth_rpc_eth_types::{ - cache::db::{StateCacheDbRefMutWrapper, StateProviderTraitObjWrapper}, - error::{api::FromEvmHalt, ensure_success, FromEthApiError}, + cache::db::StateProviderTraitObjWrapper, + error::{AsEthApiError, FromEthApiError}, simulate::{self, EthSimulateError}, - EthApiError, RevertError, StateCacheDb, + EthApiError, StateCacheDb, }; -use reth_storage_api::{BlockIdReader, ProviderTx}; +use reth_storage_api::{BlockIdReader, ProviderTx, StateProviderBox}; use revm::{ context::Block, - context_interface::{ - result::{ExecutionResult, ResultAndState}, - Transaction, - }, + context_interface::{result::ResultAndState, Transaction}, Database, DatabaseCommit, }; use revm_inspectors::{access_list::AccessListInspector, transfer::TransferInspector}; @@ -92,10 +89,7 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA self.recovered_block(block).await?.ok_or(EthApiError::HeaderNotFound(block))?; let mut parent = base_block.sealed_header().clone(); - let this = self.clone(); - self.spawn_with_state_at_block(block, move |state| { - let mut db = - State::builder().with_database(StateProviderDatabase::new(state)).build(); + self.spawn_with_state_at_block(block, move |this, mut db| { let mut blocks: Vec>> = Vec::with_capacity(block_state_calls.len()); for block in block_state_calls { @@ -165,6 +159,13 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA .context_for_next_block(&parent, this.next_env_attributes(&parent)?) .map_err(RethError::other) .map_err(Self::Error::from_eth_err)?; + let map_err = |e: EthApiError| -> Self::Error { + match e.as_simulate_error() { + Some(sim_err) => Self::Error::from_eth_err(EthApiError::other(sim_err)), + None => Self::Error::from_eth_err(e), + } + }; + let (result, results) = if trace_transfers { // prepare inspector to capture transfer inside the evm so they are recorded // and included in logs @@ -178,8 +179,9 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA calls, default_gas_limit, chain_id, - this.tx_resp_builder(), - )? + this.converter(), + ) + .map_err(map_err)? } else { let evm = this.evm_config().evm_with_env(&mut db, evm_env); let builder = this.evm_config().create_block_builder(evm, &parent, ctx); @@ -188,17 +190,18 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA calls, default_gas_limit, chain_id, - this.tx_resp_builder(), - )? + this.converter(), + ) + .map_err(map_err)? }; parent = result.block.clone_sealed_header(); - let block = simulate::build_simulated_block( + let block = simulate::build_simulated_block::( result.block, results, return_full_transactions.into(), - this.tx_resp_builder(), + this.converter(), )?; blocks.push(block); @@ -218,10 +221,11 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA overrides: EvmOverrides, ) -> impl Future> + Send { async move { + let _permit = self.acquire_owned_blocking_io().await; let res = self.transact_call_at(request, block_number.unwrap_or_default(), overrides).await?; - ensure_success(res.result) + Self::Error::ensure_success(res.result) } } @@ -280,11 +284,8 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA replay_block_txs = false; } - let this = self.clone(); - self.spawn_with_state_at_block(at.into(), move |state| { + self.spawn_with_state_at_block(at, move |this, mut db| { let mut all_results = Vec::with_capacity(bundles.len()); - let mut db = - State::builder().with_database(StateProviderDatabase::new(state)).build(); if replay_block_txs { // only need to replay the transactions in the block if not all transactions are @@ -334,7 +335,7 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA }, )?; - match ensure_success::<_, Self::Error>(res.result) { + match Self::Error::ensure_success(res.result) { Ok(output) => { bundle_results .push(EthCallResponse { value: Some(output), error: None }); @@ -432,46 +433,22 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA let result = this.inspect(&mut db, evm_env.clone(), tx_env.clone(), &mut inspector)?; let access_list = inspector.into_access_list(); + let gas_used = result.result.gas_used(); tx_env.set_access_list(access_list.clone()); - match result.result { - ExecutionResult::Halt { reason, gas_used } => { - let error = - Some(Self::Error::from_evm_halt(reason, tx_env.gas_limit()).to_string()); - return Ok(AccessListResult { - access_list, - gas_used: U256::from(gas_used), - error, - }) - } - ExecutionResult::Revert { output, gas_used } => { - let error = Some(RevertError::new(output).to_string()); - return Ok(AccessListResult { - access_list, - gas_used: U256::from(gas_used), - error, - }) - } - ExecutionResult::Success { .. } => {} - }; + if let Err(err) = Self::Error::ensure_success(result.result) { + return Ok(AccessListResult { + access_list, + gas_used: U256::from(gas_used), + error: Some(err.to_string()), + }); + } // transact again to get the exact gas used - let gas_limit = tx_env.gas_limit(); let result = this.transact(&mut db, evm_env, tx_env)?; - let res = match result.result { - ExecutionResult::Halt { reason, gas_used } => { - let error = Some(Self::Error::from_evm_halt(reason, gas_limit).to_string()); - AccessListResult { access_list, gas_used: U256::from(gas_used), error } - } - ExecutionResult::Revert { output, gas_used } => { - let error = Some(RevertError::new(output).to_string()); - AccessListResult { access_list, gas_used: U256::from(gas_used), error } - } - ExecutionResult::Success { gas_used, .. } => { - AccessListResult { access_list, gas_used: U256::from(gas_used), error: None } - } - }; + let gas_used = result.result.gas_used(); + let error = Self::Error::ensure_success(result.result).err().map(|e| e.to_string()); - Ok(res) + Ok(AccessListResult { access_list, gas_used: U256::from(gas_used), error }) }) } } @@ -493,6 +470,9 @@ pub trait Call: /// Returns the maximum number of blocks accepted for `eth_simulateV1`. fn max_simulate_blocks(&self) -> u64; + /// Returns the maximum memory the EVM can allocate per RPC request. + fn evm_memory_limit(&self) -> u64; + /// Returns the max gas limit that the caller can afford given a transaction environment. fn caller_gas_allowance( &self, @@ -511,13 +491,11 @@ pub trait Call: ) -> impl Future> + Send where R: Send + 'static, - F: FnOnce(Self, StateProviderTraitObjWrapper<'_>) -> Result - + Send - + 'static, + F: FnOnce(Self, StateProviderBox) -> Result + Send + 'static, { self.spawn_blocking_io_fut(move |this| async move { let state = this.state_at_block_id(at).await?; - f(this, StateProviderTraitObjWrapper(&state)) + f(this, state) }) } @@ -558,6 +536,11 @@ pub trait Call: } /// Executes the call request at the given [`BlockId`]. + /// + /// This spawns a new task that obtains the state for the given [`BlockId`] and then transacts + /// the call [`Self::transact`]. If the future is dropped before the (blocking) transact + /// call is invoked, then the task is cancelled early, (for example if the request is terminated + /// early client-side). fn transact_call_at( &self, request: RpcTxReq<::Network>, @@ -567,25 +550,42 @@ pub trait Call: where Self: LoadPendingBlock, { - let this = self.clone(); - self.spawn_with_call_at(request, at, overrides, move |db, evm_env, tx_env| { - this.transact(db, evm_env, tx_env) - }) + async move { + let guard = CancelOnDrop::default(); + let cancel = guard.clone(); + let this = self.clone(); + + let res = self + .spawn_with_call_at(request, at, overrides, move |db, evm_env, tx_env| { + if cancel.is_cancelled() { + // callsite dropped the guard + return Err(EthApiError::InternalEthError.into()) + } + this.transact(db, evm_env, tx_env) + }) + .await; + drop(guard); + res + } } /// Executes the closure with the state that corresponds to the given [`BlockId`] on a new task fn spawn_with_state_at_block( &self, - at: BlockId, + at: impl Into, f: F, ) -> impl Future> + Send where - F: FnOnce(StateProviderTraitObjWrapper<'_>) -> Result + Send + 'static, + F: FnOnce(Self, StateCacheDb) -> Result + Send + 'static, R: Send + 'static, { + let at = at.into(); self.spawn_blocking_io_fut(move |this| async move { let state = this.state_at_block_id(at).await?; - f(StateProviderTraitObjWrapper(&state)) + let db = State::builder() + .with_database(StateProviderDatabase::new(StateProviderTraitObjWrapper(state))) + .build(); + f(this, db) }) } @@ -614,7 +614,7 @@ pub trait Call: where Self: LoadPendingBlock, F: FnOnce( - StateCacheDbRefMutWrapper<'_, '_>, + &mut StateCacheDb, EvmEnvFor, TxEnvFor, ) -> Result @@ -624,17 +624,11 @@ pub trait Call: { async move { let (evm_env, at) = self.evm_env_at(at).await?; - let this = self.clone(); - self.spawn_blocking_io_fut(move |_| async move { - let state = this.state_at_block_id(at).await?; - let mut db = State::builder() - .with_database(StateProviderDatabase::new(StateProviderTraitObjWrapper(&state))) - .build(); - + self.spawn_with_state_at_block(at, move |this, mut db| { let (evm_env, tx_env) = this.prepare_call_env(evm_env, request, &mut db, overrides)?; - f(StateCacheDbRefMutWrapper(&mut db), evm_env, tx_env) + f(&mut db, evm_env, tx_env) }) .await } @@ -659,7 +653,7 @@ pub trait Call: F: FnOnce( TransactionInfo, ResultAndState>, - StateCacheDb<'_>, + StateCacheDb, ) -> Result + Send + 'static, @@ -678,10 +672,7 @@ pub trait Call: // block the transaction is included in let parent_block = block.parent_hash(); - let this = self.clone(); - self.spawn_with_state_at_block(parent_block.into(), move |state| { - let mut db = - State::builder().with_database(StateProviderDatabase::new(state)).build(); + self.spawn_with_state_at_block(parent_block, move |this, mut db| { let block_txs = block.transactions_recovered(); // replay all transactions prior to the targeted transaction @@ -748,7 +739,7 @@ pub trait Call: request.as_mut().set_nonce(nonce); } - Ok(self.tx_resp_builder().tx_env(request, evm_env)?) + Ok(self.converter().tx_env(request, evm_env)?) } /// Prepares the [`reth_evm::EvmEnv`] for execution of calls. @@ -811,6 +802,8 @@ pub trait Call: // evm_env.cfg_env.disable_fee_charge = true; + evm_env.cfg_env.memory_limit = self.evm_memory_limit(); + // set nonce to None so that the correct nonce is chosen by the EVM request.as_mut().take_nonce(); diff --git a/crates/rpc/rpc-eth-api/src/helpers/config.rs b/crates/rpc/rpc-eth-api/src/helpers/config.rs index c4014e6f20..fd07651672 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/config.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/config.rs @@ -1,7 +1,10 @@ //! Loads chain configuration. -use alloy_consensus::Header; -use alloy_eips::eip7910::{EthConfig, EthForkConfig, SystemContract}; +use alloy_consensus::BlockHeader; +use alloy_eips::{ + eip7840::BlobParams, + eip7910::{EthConfig, EthForkConfig, SystemContract}, +}; use alloy_evm::precompiles::Precompile; use alloy_primitives::Address; use jsonrpsee::{core::RpcResult, proc_macros::rpc}; @@ -9,6 +12,7 @@ use reth_chainspec::{ChainSpecProvider, EthChainSpec, EthereumHardforks, Hardfor use reth_errors::{ProviderError, RethError}; use reth_evm::{precompiles::PrecompilesMap, ConfigureEvm, Evm}; use reth_node_api::NodePrimitives; +use reth_primitives_traits::header::HeaderMut; use reth_revm::db::EmptyDB; use reth_rpc_eth_types::EthApiError; use reth_storage_api::BlockReaderIdExt; @@ -35,9 +39,9 @@ pub struct EthConfigHandler { impl EthConfigHandler where Provider: ChainSpecProvider - + BlockReaderIdExt

+ + BlockReaderIdExt + 'static, - Evm: ConfigureEvm> + 'static, + Evm: ConfigureEvm> + 'static, { /// Creates a new [`EthConfigHandler`]. pub const fn new(provider: Provider, evm_config: Evm) -> Self { @@ -45,12 +49,11 @@ where } /// Returns fork config for specific timestamp. - /// Returns [`None`] if no blob params were found for this fork. fn build_fork_config_at( &self, timestamp: u64, precompiles: BTreeMap, - ) -> Option { + ) -> EthForkConfig { let chain_spec = self.provider.chain_spec(); let mut system_contracts = BTreeMap::::default(); @@ -71,14 +74,17 @@ where .0 .into(); - Some(EthForkConfig { + EthForkConfig { activation_time: timestamp, - blob_schedule: chain_spec.blob_params_at_timestamp(timestamp)?, + blob_schedule: chain_spec + .blob_params_at_timestamp(timestamp) + // no blob support, so we set this to original cancun values as defined in eip-4844 + .unwrap_or(BlobParams::cancun()), chain_id: chain_spec.chain().id(), fork_id, precompiles, system_contracts, - }) + } } fn config(&self) -> Result { @@ -100,22 +106,20 @@ where let (current_fork_idx, current_fork_timestamp) = fork_timestamps .iter() - .position(|ts| &latest.timestamp < ts) + .position(|ts| &latest.timestamp() < ts) .and_then(|idx| idx.checked_sub(1)) .or_else(|| fork_timestamps.len().checked_sub(1)) .and_then(|idx| fork_timestamps.get(idx).map(|ts| (idx, *ts))) .ok_or_else(|| RethError::msg("no active timestamp fork found"))?; - let current = self - .build_fork_config_at(current_fork_timestamp, current_precompiles) - .ok_or_else(|| RethError::msg("no fork config for current fork"))?; + let current = self.build_fork_config_at(current_fork_timestamp, current_precompiles); let mut config = EthConfig { current, next: None, last: None }; if let Some(next_fork_timestamp) = fork_timestamps.get(current_fork_idx + 1).copied() { let fake_header = { let mut header = latest.clone(); - header.timestamp = next_fork_timestamp; + header.set_timestamp(next_fork_timestamp); header }; let next_precompiles = evm_to_precompiles_map( @@ -124,7 +128,7 @@ where .map_err(RethError::other)?, ); - config.next = self.build_fork_config_at(next_fork_timestamp, next_precompiles); + config.next = Some(self.build_fork_config_at(next_fork_timestamp, next_precompiles)); } else { // If there is no fork scheduled, there is no "last" or "final" fork scheduled. return Ok(config); @@ -133,7 +137,7 @@ where let last_fork_timestamp = fork_timestamps.last().copied().unwrap(); let fake_header = { let mut header = latest; - header.timestamp = last_fork_timestamp; + header.set_timestamp(last_fork_timestamp); header }; let last_precompiles = evm_to_precompiles_map( @@ -142,7 +146,7 @@ where .map_err(RethError::other)?, ); - config.last = self.build_fork_config_at(last_fork_timestamp, last_precompiles); + config.last = Some(self.build_fork_config_at(last_fork_timestamp, last_precompiles)); Ok(config) } @@ -151,9 +155,9 @@ where impl EthConfigApiServer for EthConfigHandler where Provider: ChainSpecProvider - + BlockReaderIdExt
+ + BlockReaderIdExt + 'static, - Evm: ConfigureEvm> + 'static, + Evm: ConfigureEvm> + 'static, { fn config(&self) -> RpcResult { Ok(self.config().map_err(EthApiError::from)?) diff --git a/crates/rpc/rpc-eth-api/src/helpers/estimate.rs b/crates/rpc/rpc-eth-api/src/helpers/estimate.rs index 6c14f96049..fa9dec303a 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/estimate.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/estimate.rs @@ -10,17 +10,23 @@ use futures::Future; use reth_chainspec::MIN_TRANSACTION_GAS; use reth_errors::ProviderError; use reth_evm::{ConfigureEvm, Database, Evm, EvmEnvFor, EvmFor, TransactionEnv, TxEnvFor}; -use reth_revm::{database::StateProviderDatabase, db::State}; +use reth_revm::{ + database::{EvmStateProvider, StateProviderDatabase}, + db::State, +}; use reth_rpc_convert::{RpcConvert, RpcTxReq}; use reth_rpc_eth_types::{ - error::{api::FromEvmHalt, FromEvmError}, - EthApiError, RevertError, RpcInvalidTransactionError, + error::{ + api::{FromEvmHalt, FromRevert}, + FromEvmError, + }, + EthApiError, RpcInvalidTransactionError, }; use reth_rpc_server_types::constants::gas_oracle::{CALL_STIPEND_GAS, ESTIMATE_GAS_ERROR_RATIO}; -use reth_storage_api::StateProvider; use revm::{ context::Block, context_interface::{result::ExecutionResult, Transaction}, + primitives::KECCAK_EMPTY, }; use tracing::trace; @@ -45,7 +51,7 @@ pub trait EstimateCall: Call { state_override: Option, ) -> Result where - S: StateProvider, + S: EvmStateProvider, { // Disabled because eth_estimateGas is sometimes used with eoa senders // See @@ -92,10 +98,14 @@ pub trait EstimateCall: Call { // Check if this is a basic transfer (no input data to account with no code) let is_basic_transfer = if tx_env.input().is_empty() && - let TxKind::Call(to) = tx_env.kind() && - let Ok(code) = db.database.account_code(&to) + let TxKind::Call(to) = tx_env.kind() { - code.map(|code| code.is_empty()).unwrap_or(true) + match db.database.basic_account(&to) { + Ok(Some(account)) => { + account.bytecode_hash.is_none() || account.bytecode_hash == Some(KECCAK_EMPTY) + } + _ => true, + } } else { false }; @@ -175,7 +185,7 @@ pub trait EstimateCall: Call { Self::map_out_of_gas_err(&mut evm, tx_env, max_gas_limit) } else { // the transaction did revert - Err(RpcInvalidTransactionError::Revert(RevertError::new(output)).into_eth_err()) + Err(Self::Error::from_revert(output)) } } }; @@ -319,7 +329,7 @@ pub trait EstimateCall: Call { } ExecutionResult::Revert { output, .. } => { // reverted again after bumping the limit - Err(RpcInvalidTransactionError::Revert(RevertError::new(output)).into_eth_err()) + Err(Self::Error::from_revert(output)) } ExecutionResult::Halt { reason, .. } => { Err(Self::Error::from_evm_halt(reason, req_gas_limit)) diff --git a/crates/rpc/rpc-eth-api/src/helpers/fee.rs b/crates/rpc/rpc-eth-api/src/helpers/fee.rs index b0d736981c..e3e513c9ce 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/fee.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/fee.rs @@ -110,7 +110,8 @@ pub trait EthFees: // increasing and 0 <= p <= 100 // Note: The types used ensure that the percentiles are never < 0 if let Some(percentiles) = &reward_percentiles && - percentiles.windows(2).any(|w| w[0] > w[1] || w[0] > 100.) + (percentiles.iter().any(|p| *p < 0.0 || *p > 100.0) || + percentiles.windows(2).any(|w| w[0] > w[1])) { return Err(EthApiError::InvalidRewardPercentiles.into()) } diff --git a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs index 1dda44d090..830d066a09 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs @@ -8,7 +8,7 @@ use alloy_eips::eip7840::BlobParams; use alloy_primitives::{B256, U256}; use alloy_rpc_types_eth::BlockNumberOrTag; use futures::Future; -use reth_chain_state::{BlockState, ExecutedBlock}; +use reth_chain_state::{BlockState, ComputedTrieData, ExecutedBlock}; use reth_chainspec::{ChainSpecProvider, EthChainSpec}; use reth_errors::{BlockExecutionError, BlockValidationError, ProviderError, RethError}; use reth_evm::{ @@ -235,7 +235,7 @@ pub trait LoadPendingBlock: .provider() .history_by_block_hash(parent.hash()) .map_err(Self::Error::from_eth_err)?; - let state = StateProviderDatabase::new(&state_provider); + let state = StateProviderDatabase::new(state_provider); let mut db = State::builder().with_database(state).with_bundle_update().build(); let mut builder = self @@ -276,7 +276,7 @@ pub trait LoadPendingBlock: // the iterator before we can continue best_txs.mark_invalid( &pool_tx, - InvalidPoolTransactionError::ExceedsGasLimit( + &InvalidPoolTransactionError::ExceedsGasLimit( pool_tx.gas_limit(), block_gas_limit, ), @@ -290,7 +290,7 @@ pub trait LoadPendingBlock: // transactions from the iteratorbefore we can continue best_txs.mark_invalid( &pool_tx, - InvalidPoolTransactionError::Consensus( + &InvalidPoolTransactionError::Consensus( InvalidTransactionError::TxTypeNotSupported, ), ); @@ -311,7 +311,7 @@ pub trait LoadPendingBlock: // for regular transactions above. best_txs.mark_invalid( &pool_tx, - InvalidPoolTransactionError::ExceedsGasLimit( + &InvalidPoolTransactionError::ExceedsGasLimit( tx_blob_gas, blob_params.max_blob_gas_per_block(), ), @@ -332,7 +332,7 @@ pub trait LoadPendingBlock: // descendants best_txs.mark_invalid( &pool_tx, - InvalidPoolTransactionError::Consensus( + &InvalidPoolTransactionError::Consensus( InvalidTransactionError::TxTypeNotSupported, ), ); @@ -369,12 +369,14 @@ pub trait LoadPendingBlock: vec![execution_result.requests], ); - Ok(ExecutedBlock { - recovered_block: block.into(), - execution_output: Arc::new(execution_outcome), - hashed_state: Arc::new(hashed_state), - trie_updates: Arc::new(trie_updates), - }) + Ok(ExecutedBlock::new( + block.into(), + Arc::new(execution_outcome), + ComputedTrieData::without_trie_input( + Arc::new(hashed_state.into_sorted()), + Arc::new(trie_updates.into_sorted()), + ), + )) } } @@ -416,8 +418,29 @@ impl BuildPendingEnv for NextBlockEnvAttributes { suggested_fee_recipient: parent.beneficiary(), prev_randao: B256::random(), gas_limit: parent.gas_limit(), - parent_beacon_block_root: parent.parent_beacon_block_root().map(|_| B256::ZERO), + parent_beacon_block_root: parent.parent_beacon_block_root(), withdrawals: parent.withdrawals_root().map(|_| Default::default()), + extra_data: parent.extra_data().clone(), } } } + +#[cfg(test)] +mod tests { + use super::*; + use alloy_consensus::Header; + use alloy_primitives::B256; + use reth_primitives_traits::SealedHeader; + + #[test] + fn pending_env_keeps_parent_beacon_root() { + let mut header = Header::default(); + let beacon_root = B256::repeat_byte(0x42); + header.parent_beacon_block_root = Some(beacon_root); + let sealed = SealedHeader::new(header, B256::ZERO); + + let attrs = NextBlockEnvAttributes::build_pending_env(&sealed); + + assert_eq!(attrs.parent_beacon_block_root, Some(beacon_root)); + } +} diff --git a/crates/rpc/rpc-eth-api/src/helpers/receipt.rs b/crates/rpc/rpc-eth-api/src/helpers/receipt.rs index 58c3e8897d..ee44f9cc44 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/receipt.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/receipt.rs @@ -6,41 +6,16 @@ use alloy_consensus::{transaction::TransactionMeta, TxReceipt}; use futures::Future; use reth_primitives_traits::SignerRecoverable; use reth_rpc_convert::{transaction::ConvertReceiptInput, RpcConvert}; -use reth_rpc_eth_types::{error::FromEthApiError, EthApiError}; +use reth_rpc_eth_types::{ + error::FromEthApiError, utils::calculate_gas_used_and_next_log_index, EthApiError, +}; use reth_storage_api::{ProviderReceipt, ProviderTx}; -/// Calculates the gas used and next log index for a transaction at the given index -pub fn calculate_gas_used_and_next_log_index( - tx_index: u64, - all_receipts: &[impl TxReceipt], -) -> (u64, usize) { - let mut gas_used = 0; - let mut next_log_index = 0; - - if tx_index > 0 { - for receipt in all_receipts.iter().take(tx_index as usize) { - gas_used = receipt.cumulative_gas_used(); - next_log_index += receipt.logs().len(); - } - } - - (gas_used, next_log_index) -} - /// Assembles transaction receipt data w.r.t to network. /// /// Behaviour shared by several `eth_` RPC methods, not exclusive to `eth_` receipts RPC methods. pub trait LoadReceipt: - EthApiTypes< - RpcConvert: RpcConvert< - Primitives = Self::Primitives, - Error = Self::Error, - Network = Self::NetworkTypes, - >, - Error: FromEthApiError, - > + RpcNodeCoreExt - + Send - + Sync + EthApiTypes> + RpcNodeCoreExt + Send + Sync { /// Helper method for `eth_getBlockReceipts` and `eth_getTransactionReceipt`. fn build_transaction_receipt( @@ -63,7 +38,7 @@ pub trait LoadReceipt: calculate_gas_used_and_next_log_index(meta.index, &all_receipts); Ok(self - .tx_resp_builder() + .converter() .convert_receipts(vec![ConvertReceiptInput { tx: tx .try_into_recovered_unchecked() diff --git a/crates/rpc/rpc-eth-api/src/helpers/state.rs b/crates/rpc/rpc-eth-api/src/helpers/state.rs index 1b3dbfcdee..9f55bae972 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/state.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/state.rs @@ -16,7 +16,8 @@ use reth_rpc_eth_types::{ error::FromEvmError, EthApiError, PendingBlockEnv, RpcInvalidTransactionError, }; use reth_storage_api::{ - BlockIdReader, BlockNumReader, StateProvider, StateProviderBox, StateProviderFactory, + BlockIdReader, BlockNumReader, BlockReaderIdExt, StateProvider, StateProviderBox, + StateProviderFactory, }; use reth_transaction_pool::TransactionPool; @@ -96,7 +97,7 @@ pub trait EthState: LoadState + SpawnBlocking { { Ok(async move { let _permit = self - .acquire_owned() + .acquire_owned_tracing() .await .map_err(RethError::other) .map_err(EthApiError::Internal)?; @@ -273,21 +274,20 @@ pub trait LoadState: let PendingBlockEnv { evm_env, origin } = self.pending_block_env_and_cfg()?; Ok((evm_env, origin.state_block_id())) } else { - // Use cached values if there is no pending block - let block_hash = RpcNodeCore::provider(self) - .block_hash_for_id(at) + // we can assume that the blockid will be predominantly `Latest` (e.g. for + // `eth_call`) and if requested by number or hash we can quickly fetch just the + // header + let header = RpcNodeCore::provider(self) + .sealed_header_by_id(at) .map_err(Self::Error::from_eth_err)? - .ok_or(EthApiError::HeaderNotFound(at))?; - - let header = - self.cache().get_header(block_hash).await.map_err(Self::Error::from_eth_err)?; + .ok_or_else(|| EthApiError::HeaderNotFound(at))?; let evm_env = self .evm_config() .evm_env(&header) .map_err(RethError::other) .map_err(Self::Error::from_eth_err)?; - Ok((evm_env, block_hash.into())) + Ok((evm_env, header.hash().into())) } } } diff --git a/crates/rpc/rpc-eth-api/src/helpers/trace.rs b/crates/rpc/rpc-eth-api/src/helpers/trace.rs index 30ba12165e..20440725a8 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/trace.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/trace.rs @@ -1,6 +1,6 @@ //! Loads a pending block from database. Helper trait for `eth_` call and trace RPC methods. -use super::{Call, LoadBlock, LoadPendingBlock, LoadState, LoadTransaction}; +use super::{Call, LoadBlock, LoadState, LoadTransaction}; use crate::FromEvmError; use alloy_consensus::{transaction::TxHashRef, BlockHeader}; use alloy_primitives::B256; @@ -14,17 +14,14 @@ use reth_evm::{ }; use reth_primitives_traits::{BlockBody, Recovered, RecoveredBlock}; use reth_revm::{database::StateProviderDatabase, db::State}; -use reth_rpc_eth_types::{ - cache::db::{StateCacheDb, StateCacheDbRefMutWrapper, StateProviderTraitObjWrapper}, - EthApiError, -}; +use reth_rpc_eth_types::{cache::db::StateCacheDb, EthApiError}; use reth_storage_api::{ProviderBlock, ProviderTx}; use revm::{context::Block, context_interface::result::ResultAndState, DatabaseCommit}; use revm_inspectors::tracing::{TracingInspector, TracingInspectorConfig}; use std::sync::Arc; /// Executes CPU heavy tasks. -pub trait Trace: LoadState> { +pub trait Trace: LoadState> + Call { /// Executes the [`TxEnvFor`] with [`reth_evm::EvmEnv`] against the given [Database] without /// committing state changes. fn inspect( @@ -58,7 +55,6 @@ pub trait Trace: LoadState> { f: F, ) -> impl Future> + Send where - Self: Call, R: Send + 'static, F: FnOnce( TracingInspector, @@ -91,19 +87,16 @@ pub trait Trace: LoadState> { f: F, ) -> impl Future> + Send where - Self: LoadPendingBlock + Call, F: FnOnce( TracingInspector, ResultAndState>, - StateCacheDb<'_>, + StateCacheDb, ) -> Result + Send + 'static, R: Send + 'static, { - let this = self.clone(); - self.spawn_with_state_at_block(at, move |state| { - let mut db = State::builder().with_database(StateProviderDatabase::new(state)).build(); + self.spawn_with_state_at_block(at, move |this, mut db| { let mut inspector = TracingInspector::new(config); let res = this.inspect(&mut db, evm_env, tx_env, &mut inspector)?; f(inspector, res, db) @@ -126,12 +119,12 @@ pub trait Trace: LoadState> { f: F, ) -> impl Future, Self::Error>> + Send where - Self: LoadPendingBlock + LoadTransaction + Call, + Self: LoadTransaction, F: FnOnce( TransactionInfo, TracingInspector, ResultAndState>, - StateCacheDb<'_>, + StateCacheDb, ) -> Result + Send + 'static, @@ -156,17 +149,16 @@ pub trait Trace: LoadState> { f: F, ) -> impl Future, Self::Error>> + Send where - Self: LoadPendingBlock + LoadTransaction + Call, + Self: LoadTransaction, F: FnOnce( TransactionInfo, Insp, ResultAndState>, - StateCacheDb<'_>, + StateCacheDb, ) -> Result + Send + 'static, - Insp: - for<'a, 'b> InspectorFor> + Send + 'static, + Insp: for<'a> InspectorFor + Send + 'static, R: Send + 'static, { async move { @@ -182,10 +174,7 @@ pub trait Trace: LoadState> { // block the transaction is included in let parent_block = block.parent_hash(); - let this = self.clone(); - self.spawn_with_state_at_block(parent_block.into(), move |state| { - let mut db = - State::builder().with_database(StateProviderDatabase::new(state)).build(); + self.spawn_with_state_at_block(parent_block, move |this, mut db| { let block_txs = block.transactions_recovered(); this.apply_pre_execution_changes(&block, &mut db, &evm_env)?; @@ -194,12 +183,7 @@ pub trait Trace: LoadState> { this.replay_transactions_until(&mut db, evm_env.clone(), block_txs, *tx.tx_hash())?; let tx_env = this.evm_config().tx_env(tx); - let res = this.inspect( - StateCacheDbRefMutWrapper(&mut db), - evm_env, - tx_env, - &mut inspector, - )?; + let res = this.inspect(&mut db, evm_env, tx_env, &mut inspector)?; f(tx_info, inspector, res, db) }) .await @@ -228,7 +212,7 @@ pub trait Trace: LoadState> { TracingCtx< '_, Recovered<&ProviderTx>, - EvmFor, TracingInspector>, + EvmFor, >, ) -> Result + Send @@ -269,13 +253,13 @@ pub trait Trace: LoadState> { TracingCtx< '_, Recovered<&ProviderTx>, - EvmFor, Insp>, + EvmFor, >, ) -> Result + Send + 'static, Setup: FnMut() -> Insp + Send + 'static, - Insp: Clone + for<'a, 'b> InspectorFor>, + Insp: Clone + for<'a> InspectorFor, R: Send + 'static, { async move { @@ -296,21 +280,14 @@ pub trait Trace: LoadState> { } // replay all transactions of the block - self.spawn_blocking_io_fut(move |this| async move { - // we need to get the state of the parent block because we're replaying this block - // on top of its parent block's state - let state_at = block.parent_hash(); + // we need to get the state of the parent block because we're replaying this block + // on top of its parent block's state + self.spawn_with_state_at_block(block.parent_hash(), move |this, mut db| { let block_hash = block.hash(); let block_number = evm_env.block_env.number().saturating_to(); let base_fee = evm_env.block_env.basefee(); - // now get the state - let state = this.state_at_block_id(state_at.into()).await?; - let mut db = State::builder() - .with_database(StateProviderDatabase::new(StateProviderTraitObjWrapper(&state))) - .build(); - this.apply_pre_execution_changes(&block, &mut db, &evm_env)?; // prepare transactions, we do everything upfront to reduce time spent with open @@ -328,7 +305,7 @@ pub trait Trace: LoadState> { let results = this .evm_config() .evm_factory() - .create_tracer(StateCacheDbRefMutWrapper(&mut db), evm_env, inspector_setup()) + .create_tracer(&mut db, evm_env, inspector_setup()) .try_trace_many(block.transactions_recovered().take(max_transactions), |ctx| { let tx_info = TransactionInfo { hash: Some(*ctx.tx.tx_hash()), @@ -375,7 +352,7 @@ pub trait Trace: LoadState> { TracingCtx< '_, Recovered<&ProviderTx>, - EvmFor, TracingInspector>, + EvmFor, >, ) -> Result + Send @@ -415,13 +392,13 @@ pub trait Trace: LoadState> { TracingCtx< '_, Recovered<&ProviderTx>, - EvmFor, Insp>, + EvmFor, >, ) -> Result + Send + 'static, Setup: FnMut() -> Insp + Send + 'static, - Insp: Clone + for<'a, 'b> InspectorFor>, + Insp: Clone + for<'a> InspectorFor, R: Send + 'static, { self.trace_block_until_with_inspector(block_id, block, None, insp_setup, f) diff --git a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs index d2e0b5f943..4256a55230 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs @@ -19,18 +19,19 @@ use alloy_rpc_types_eth::{BlockNumberOrTag, TransactionInfo}; use futures::{Future, StreamExt}; use reth_chain_state::CanonStateSubscriptions; use reth_node_api::BlockBody; -use reth_primitives_traits::{RecoveredBlock, SignedTransaction, TxTy}; -use reth_rpc_convert::{transaction::RpcConvert, RpcTxReq}; +use reth_primitives_traits::{Recovered, RecoveredBlock, SignedTransaction, TxTy, WithEncoded}; +use reth_rpc_convert::{transaction::RpcConvert, RpcTxReq, TransactionConversionError}; use reth_rpc_eth_types::{ - utils::binary_search, EthApiError, EthApiError::TransactionConfirmationTimeout, - FillTransactionResult, SignError, TransactionSource, + utils::{binary_search, recover_raw_transaction}, + EthApiError::{self, TransactionConfirmationTimeout}, + FillTransaction, SignError, TransactionSource, }; use reth_storage_api::{ BlockNumReader, BlockReaderIdExt, ProviderBlock, ProviderReceipt, ProviderTx, ReceiptProvider, TransactionsProvider, }; use reth_transaction_pool::{ - AddedTransactionOutcome, PoolTransaction, TransactionOrigin, TransactionPool, + AddedTransactionOutcome, PoolPooledTx, PoolTransaction, TransactionOrigin, TransactionPool, }; use std::{sync::Arc, time::Duration}; @@ -76,6 +77,17 @@ pub trait EthTransactions: LoadTransaction { fn send_raw_transaction( &self, tx: Bytes, + ) -> impl Future> + Send { + async move { + let recovered = recover_raw_transaction::>(&tx)?; + self.send_transaction(WithEncoded::new(tx, recovered)).await + } + } + + /// Submits the transaction to the pool. + fn send_transaction( + &self, + tx: WithEncoded>>, ) -> impl Future> + Send; /// Decodes and recovers the transaction and submits it to the pool. @@ -278,7 +290,7 @@ pub trait EthTransactions: LoadTransaction { }; return Ok(Some( - self.tx_resp_builder().fill(tx.clone().with_signer(*signer), tx_info)?, + self.converter().fill(tx.clone().with_signer(*signer), tx_info)?, )) } } @@ -304,13 +316,11 @@ pub trait EthTransactions: LoadTransaction { RpcNodeCore::pool(self).get_transaction_by_sender_and_nonce(sender, nonce) { let transaction = tx.transaction.clone_into_consensus(); - return Ok(Some(self.tx_resp_builder().fill_pending(transaction)?)); + return Ok(Some(self.converter().fill_pending(transaction)?)); } - // Check if the sender is a contract - if !self.get_code(sender, None).await?.is_empty() { - return Ok(None); - } + // Note: we can't optimize for contracts (account with code) and cannot shortcircuit if + // the address has code, because with 7702 EOAs can also have code let highest = self.transaction_count(sender, None).await?.saturating_to::(); @@ -354,7 +364,7 @@ pub trait EthTransactions: LoadTransaction { base_fee: base_fee_per_gas, index: Some(index as u64), }; - self.tx_resp_builder().fill(tx.clone().with_signer(*signer), tx_info) + Ok(self.converter().fill(tx.clone().with_signer(*signer), tx_info)?) }) }) .ok_or(EthApiError::HeaderNotFound(block_id))? @@ -386,7 +396,7 @@ pub trait EthTransactions: LoadTransaction { /// Signs transaction with a matching signer, if any and submits the transaction to the pool. /// Returns the hash of the signed transaction. - fn send_transaction( + fn send_transaction_request( &self, mut request: RpcTxReq, ) -> impl Future> + Send @@ -423,7 +433,9 @@ pub trait EthTransactions: LoadTransaction { <::Pool as TransactionPool>::Transaction::try_from_consensus( transaction, ) - .map_err(|_| EthApiError::TransactionConversionError)?; + .map_err(|e| { + Self::Error::from_eth_err(TransactionConversionError::Other(e.to_string())) + })?; // submit the transaction to the pool with a `Local` origin let AddedTransactionOutcome { hash, .. } = self @@ -440,7 +452,7 @@ pub trait EthTransactions: LoadTransaction { fn fill_transaction( &self, mut request: RpcTxReq, - ) -> impl Future>, Self::Error>> + Send + ) -> impl Future>, Self::Error>> + Send where Self: EthApiSpec + LoadBlock + EstimateCall + LoadFee, { @@ -497,11 +509,11 @@ pub trait EthTransactions: LoadTransaction { } } - let tx = self.tx_resp_builder().build_simulate_v1_transaction(request)?; + let tx = self.converter().build_simulate_v1_transaction(request)?; let raw = tx.encoded_2718().into(); - Ok(FillTransactionResult { raw, tx }) + Ok(FillTransaction { raw, tx }) } } @@ -599,45 +611,37 @@ pub trait LoadTransaction: SpawnBlocking + FullEthApiTypes + RpcNodeCoreExt { > + Send { async move { // Try to find the transaction on disk - let mut resp = self + if let Some((tx, meta)) = self .spawn_blocking_io(move |this| { - match this - .provider() + this.provider() .transaction_by_hash_with_meta(hash) - .map_err(Self::Error::from_eth_err)? - { - None => Ok(None), - Some((tx, meta)) => { - // Note: we assume this transaction is valid, because it's mined (or - // part of pending block) and already. We don't need to - // check for pre EIP-2 because this transaction could be pre-EIP-2. - let transaction = tx - .try_into_recovered_unchecked() - .map_err(|_| EthApiError::InvalidTransactionSignature)?; - - let tx = TransactionSource::Block { - transaction, - index: meta.index, - block_hash: meta.block_hash, - block_number: meta.block_number, - base_fee: meta.base_fee, - }; - Ok(Some(tx)) - } - } + .map_err(Self::Error::from_eth_err) }) - .await?; + .await? + { + // Note: we assume this transaction is valid, because it's mined (or + // part of pending block) and already. We don't need to + // check for pre EIP-2 because this transaction could be pre-EIP-2. + let transaction = tx + .try_into_recovered_unchecked() + .map_err(|_| EthApiError::InvalidTransactionSignature)?; - if resp.is_none() { - // tx not found on disk, check pool - if let Some(tx) = - self.pool().get(&hash).map(|tx| tx.transaction.clone().into_consensus()) - { - resp = Some(TransactionSource::Pool(tx.into())); - } + return Ok(Some(TransactionSource::Block { + transaction, + index: meta.index, + block_hash: meta.block_hash, + block_number: meta.block_number, + base_fee: meta.base_fee, + })); } - Ok(resp) + // tx not found on disk, check pool + if let Some(tx) = self.pool().get(&hash).map(|tx| tx.transaction.clone_into_consensus()) + { + return Ok(Some(TransactionSource::Pool(tx.into()))); + } + + Ok(None) } } diff --git a/crates/rpc/rpc-eth-api/src/types.rs b/crates/rpc/rpc-eth-api/src/types.rs index ed4fcfa5c8..b4b23e0168 100644 --- a/crates/rpc/rpc-eth-api/src/types.rs +++ b/crates/rpc/rpc-eth-api/src/types.rs @@ -5,10 +5,7 @@ use alloy_rpc_types_eth::Block; use reth_rpc_convert::{RpcConvert, SignableTxRequest}; pub use reth_rpc_convert::{RpcTransaction, RpcTxReq, RpcTypes}; use reth_storage_api::ProviderTx; -use std::{ - error::Error, - fmt::{self}, -}; +use std::error::Error; /// Network specific `eth` API types. /// @@ -23,16 +20,17 @@ pub trait EthApiTypes: Send + Sync + Clone { type Error: Into> + FromEthApiError + AsEthApiError + + From<::Error> + Error + Send + Sync; /// Blockchain primitive types, specific to network, e.g. block and transaction. type NetworkTypes: RpcTypes; /// Conversion methods for transaction RPC type. - type RpcConvert: Send + Sync + fmt::Debug; + type RpcConvert: RpcConvert; /// Returns reference to transaction response builder. - fn tx_resp_builder(&self) -> &Self::RpcConvert; + fn converter(&self) -> &Self::RpcConvert; } /// Adapter for network specific block type. @@ -55,11 +53,7 @@ where NetworkTypes: RpcTypes< TransactionRequest: SignableTxRequest>, >, - RpcConvert: RpcConvert< - Primitives = Self::Primitives, - Network = Self::NetworkTypes, - Error = RpcError, - >, + RpcConvert: RpcConvert, >, { } @@ -70,11 +64,7 @@ impl FullEthApiTypes for T where NetworkTypes: RpcTypes< TransactionRequest: SignableTxRequest>, >, - RpcConvert: RpcConvert< - Primitives = ::Primitives, - Network = Self::NetworkTypes, - Error = RpcError, - >, + RpcConvert: RpcConvert, > { } diff --git a/crates/rpc/rpc-eth-types/src/builder/config.rs b/crates/rpc/rpc-eth-types/src/builder/config.rs index 47f15ae5ae..7b09a3144a 100644 --- a/crates/rpc/rpc-eth-types/src/builder/config.rs +++ b/crates/rpc/rpc-eth-types/src/builder/config.rs @@ -8,9 +8,10 @@ use crate::{ }; use reqwest::Url; use reth_rpc_server_types::constants::{ - default_max_tracing_requests, DEFAULT_ETH_PROOF_WINDOW, DEFAULT_MAX_BLOCKS_PER_FILTER, - DEFAULT_MAX_LOGS_PER_RESPONSE, DEFAULT_MAX_SIMULATE_BLOCKS, DEFAULT_MAX_TRACE_FILTER_BLOCKS, - DEFAULT_PROOF_PERMITS, RPC_DEFAULT_SEND_RAW_TX_SYNC_TIMEOUT_SECS, + default_max_tracing_requests, DEFAULT_ETH_PROOF_WINDOW, DEFAULT_MAX_BLOCKING_IO_REQUEST, + DEFAULT_MAX_BLOCKS_PER_FILTER, DEFAULT_MAX_LOGS_PER_RESPONSE, DEFAULT_MAX_SIMULATE_BLOCKS, + DEFAULT_MAX_TRACE_FILTER_BLOCKS, DEFAULT_PROOF_PERMITS, + RPC_DEFAULT_SEND_RAW_TX_SYNC_TIMEOUT_SECS, }; use serde::{Deserialize, Serialize}; @@ -68,6 +69,15 @@ pub struct EthConfig { pub eth_proof_window: u64, /// The maximum number of tracing calls that can be executed in concurrently. pub max_tracing_requests: usize, + /// The maximum number of blocking IO calls that can be executed in concurrently. + /// + /// Requests such as `eth_call`, `eth_estimateGas` and alike require evm execution, which is + /// considered blocking since it's usually more heavy on the IO side but also CPU constrained. + /// It is expected that these are spawned as short lived blocking tokio tasks. This config + /// determines how many can be spawned concurrently, to avoid a build up in the tokio's + /// blocking pool queue since there's only a limited number of threads available. This setting + /// restricts how many tasks are spawned concurrently. + pub max_blocking_io_requests: usize, /// Maximum number of blocks for `trace_filter` requests. pub max_trace_filter_blocks: u64, /// Maximum number of blocks that could be scanned per filter request in `eth_getLogs` calls. @@ -95,6 +105,8 @@ pub struct EthConfig { pub raw_tx_forwarder: ForwardConfig, /// Timeout duration for `send_raw_transaction_sync` RPC method. pub send_raw_transaction_sync_timeout: Duration, + /// Maximum memory the EVM can allocate per RPC request. + pub rpc_evm_memory_limit: u64, } impl EthConfig { @@ -114,6 +126,7 @@ impl Default for EthConfig { gas_oracle: GasPriceOracleConfig::default(), eth_proof_window: DEFAULT_ETH_PROOF_WINDOW, max_tracing_requests: default_max_tracing_requests(), + max_blocking_io_requests: DEFAULT_MAX_BLOCKING_IO_REQUEST, max_trace_filter_blocks: DEFAULT_MAX_TRACE_FILTER_BLOCKS, max_blocks_per_filter: DEFAULT_MAX_BLOCKS_PER_FILTER, max_logs_per_response: DEFAULT_MAX_LOGS_PER_RESPONSE, @@ -126,6 +139,7 @@ impl Default for EthConfig { pending_block_kind: PendingBlockKind::Full, raw_tx_forwarder: ForwardConfig::default(), send_raw_transaction_sync_timeout: RPC_DEFAULT_SEND_RAW_TX_SYNC_TIMEOUT_SECS, + rpc_evm_memory_limit: (1 << 32) - 1, } } } @@ -149,6 +163,12 @@ impl EthConfig { self } + /// Configures the maximum number of blocking IO requests + pub const fn max_blocking_io_requests(mut self, max_requests: usize) -> Self { + self.max_blocking_io_requests = max_requests; + self + } + /// Configures the maximum block length to scan per `eth_getLogs` request pub const fn max_blocks_per_filter(mut self, max_blocks: u64) -> Self { self.max_blocks_per_filter = max_blocks; @@ -216,6 +236,12 @@ impl EthConfig { self.send_raw_transaction_sync_timeout = timeout; self } + + /// Configures the maximum memory the EVM can allocate per RPC request. + pub const fn rpc_evm_memory_limit(mut self, memory_limit: u64) -> Self { + self.rpc_evm_memory_limit = memory_limit; + self + } } /// Config for the filter @@ -263,7 +289,7 @@ impl Default for EthFilterConfig { max_blocks_per_filter: None, max_logs_per_response: None, // 5min - stale_filter_ttl: Duration::from_secs(5 * 60), + stale_filter_ttl: DEFAULT_STALE_FILTER_TTL, } } } diff --git a/crates/rpc/rpc-eth-types/src/cache/db.rs b/crates/rpc/rpc-eth-types/src/cache/db.rs index 8209af0fa5..09e1b3db3c 100644 --- a/crates/rpc/rpc-eth-types/src/cache/db.rs +++ b/crates/rpc/rpc-eth-types/src/cache/db.rs @@ -4,25 +4,24 @@ use alloy_primitives::{Address, B256, U256}; use reth_errors::ProviderResult; -use reth_revm::{database::StateProviderDatabase, DatabaseRef}; -use reth_storage_api::{BytecodeReader, HashedPostStateProvider, StateProvider}; +use reth_revm::database::StateProviderDatabase; +use reth_storage_api::{BytecodeReader, HashedPostStateProvider, StateProvider, StateProviderBox}; use reth_trie::{HashedStorage, MultiProofTargets}; -use revm::{ - database::{BundleState, State}, - primitives::HashMap, - state::{AccountInfo, Bytecode}, - Database, DatabaseCommit, -}; +use revm::database::{BundleState, State}; /// Helper alias type for the state's [`State`] -pub type StateCacheDb<'a> = State>>; +pub type StateCacheDb = State>; /// Hack to get around 'higher-ranked lifetime error', see /// +/// +/// Apparently, when dealing with our RPC code, compiler is struggling to prove lifetimes around +/// [`StateProvider`] trait objects. This type is a workaround which should help the compiler to +/// understand that there are no lifetimes involved. #[expect(missing_debug_implementations)] -pub struct StateProviderTraitObjWrapper<'a>(pub &'a dyn StateProvider); +pub struct StateProviderTraitObjWrapper(pub StateProviderBox); -impl reth_storage_api::StateRootProvider for StateProviderTraitObjWrapper<'_> { +impl reth_storage_api::StateRootProvider for StateProviderTraitObjWrapper { fn state_root( &self, hashed_state: reth_trie::HashedPostState, @@ -52,7 +51,7 @@ impl reth_storage_api::StateRootProvider for StateProviderTraitObjWrapper<'_> { } } -impl reth_storage_api::StorageRootProvider for StateProviderTraitObjWrapper<'_> { +impl reth_storage_api::StorageRootProvider for StateProviderTraitObjWrapper { fn storage_root( &self, address: Address, @@ -80,7 +79,7 @@ impl reth_storage_api::StorageRootProvider for StateProviderTraitObjWrapper<'_> } } -impl reth_storage_api::StateProofProvider for StateProviderTraitObjWrapper<'_> { +impl reth_storage_api::StateProofProvider for StateProviderTraitObjWrapper { fn proof( &self, input: reth_trie::TrieInput, @@ -107,7 +106,7 @@ impl reth_storage_api::StateProofProvider for StateProviderTraitObjWrapper<'_> { } } -impl reth_storage_api::AccountReader for StateProviderTraitObjWrapper<'_> { +impl reth_storage_api::AccountReader for StateProviderTraitObjWrapper { fn basic_account( &self, address: &Address, @@ -116,7 +115,7 @@ impl reth_storage_api::AccountReader for StateProviderTraitObjWrapper<'_> { } } -impl reth_storage_api::BlockHashReader for StateProviderTraitObjWrapper<'_> { +impl reth_storage_api::BlockHashReader for StateProviderTraitObjWrapper { fn block_hash( &self, block_number: alloy_primitives::BlockNumber, @@ -140,13 +139,13 @@ impl reth_storage_api::BlockHashReader for StateProviderTraitObjWrapper<'_> { } } -impl HashedPostStateProvider for StateProviderTraitObjWrapper<'_> { +impl HashedPostStateProvider for StateProviderTraitObjWrapper { fn hashed_post_state(&self, bundle_state: &BundleState) -> reth_trie::HashedPostState { self.0.hashed_post_state(bundle_state) } } -impl StateProvider for StateProviderTraitObjWrapper<'_> { +impl StateProvider for StateProviderTraitObjWrapper { fn storage( &self, account: Address, @@ -171,7 +170,7 @@ impl StateProvider for StateProviderTraitObjWrapper<'_> { } } -impl BytecodeReader for StateProviderTraitObjWrapper<'_> { +impl BytecodeReader for StateProviderTraitObjWrapper { fn bytecode_by_hash( &self, code_hash: &B256, @@ -179,58 +178,3 @@ impl BytecodeReader for StateProviderTraitObjWrapper<'_> { self.0.bytecode_by_hash(code_hash) } } - -/// Hack to get around 'higher-ranked lifetime error', see -/// -pub struct StateCacheDbRefMutWrapper<'a, 'b>(pub &'b mut StateCacheDb<'a>); - -impl<'a, 'b> core::fmt::Debug for StateCacheDbRefMutWrapper<'a, 'b> { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - f.debug_struct("StateCacheDbRefMutWrapper").finish_non_exhaustive() - } -} - -impl<'a> Database for StateCacheDbRefMutWrapper<'a, '_> { - type Error = as Database>::Error; - fn basic(&mut self, address: Address) -> Result, Self::Error> { - self.0.basic(address) - } - - fn code_by_hash(&mut self, code_hash: B256) -> Result { - self.0.code_by_hash(code_hash) - } - - fn storage(&mut self, address: Address, index: U256) -> Result { - self.0.storage(address, index) - } - - fn block_hash(&mut self, number: u64) -> Result { - self.0.block_hash(number) - } -} - -impl<'a> DatabaseRef for StateCacheDbRefMutWrapper<'a, '_> { - type Error = as Database>::Error; - - fn basic_ref(&self, address: Address) -> Result, Self::Error> { - self.0.basic_ref(address) - } - - fn code_by_hash_ref(&self, code_hash: B256) -> Result { - self.0.code_by_hash_ref(code_hash) - } - - fn storage_ref(&self, address: Address, index: U256) -> Result { - self.0.storage_ref(address, index) - } - - fn block_hash_ref(&self, number: u64) -> Result { - self.0.block_hash_ref(number) - } -} - -impl DatabaseCommit for StateCacheDbRefMutWrapper<'_, '_> { - fn commit(&mut self, changes: HashMap) { - self.0.commit(changes) - } -} diff --git a/crates/rpc/rpc-eth-types/src/cache/multi_consumer.rs b/crates/rpc/rpc-eth-types/src/cache/multi_consumer.rs index dec5dcb09a..3ce52ee5a4 100644 --- a/crates/rpc/rpc-eth-types/src/cache/multi_consumer.rs +++ b/crates/rpc/rpc-eth-types/src/cache/multi_consumer.rs @@ -92,7 +92,7 @@ where /// /// Can fail if the element is rejected by the limiter or if we fail to grow an empty map. /// - /// See [`Schnellru::insert`](LruMap::insert) for more info. + /// See [`LruMap::insert`] for more info. pub fn insert<'a>(&mut self, key: L::KeyToInsert<'a>, value: V) -> bool where L::KeyToInsert<'a>: Hash + PartialEq, diff --git a/crates/rpc/rpc-eth-types/src/error/api.rs b/crates/rpc/rpc-eth-types/src/error/api.rs index 03641d067e..744314ecb0 100644 --- a/crates/rpc/rpc-eth-types/src/error/api.rs +++ b/crates/rpc/rpc-eth-types/src/error/api.rs @@ -1,10 +1,11 @@ //! Helper traits to wrap generic l1 errors, in network specific error type configured in //! `reth_rpc_eth_api::EthApiTypes`. -use crate::EthApiError; +use crate::{simulate::EthSimulateError, EthApiError, RevertError}; +use alloy_primitives::Bytes; use reth_errors::ProviderError; use reth_evm::{ConfigureEvm, EvmErrorFor, HaltReasonFor}; -use revm::context_interface::result::HaltReason; +use revm::{context::result::ExecutionResult, context_interface::result::HaltReason}; use super::RpcInvalidTransactionError; @@ -73,6 +74,32 @@ pub trait AsEthApiError { false } + + /// Returns [`EthSimulateError`] if this error maps to a simulate-specific error code. + fn as_simulate_error(&self) -> Option { + let err = self.as_err()?; + match err { + EthApiError::InvalidTransaction(tx_err) => match tx_err { + RpcInvalidTransactionError::NonceTooLow { tx, state } => { + Some(EthSimulateError::NonceTooLow { tx: *tx, state: *state }) + } + RpcInvalidTransactionError::NonceTooHigh => Some(EthSimulateError::NonceTooHigh), + RpcInvalidTransactionError::FeeCapTooLow => { + Some(EthSimulateError::BaseFeePerGasTooLow) + } + RpcInvalidTransactionError::GasTooLow => Some(EthSimulateError::IntrinsicGasTooLow), + RpcInvalidTransactionError::InsufficientFunds { cost, balance } => { + Some(EthSimulateError::InsufficientFunds { cost: *cost, balance: *balance }) + } + RpcInvalidTransactionError::SenderNoEOA => Some(EthSimulateError::SenderNotEOA), + RpcInvalidTransactionError::MaxInitCodeSizeExceeded => { + Some(EthSimulateError::MaxInitCodeSizeExceeded) + } + _ => None, + }, + _ => None, + } + } } impl AsEthApiError for EthApiError { @@ -83,17 +110,28 @@ impl AsEthApiError for EthApiError { /// Helper trait to convert from revm errors. pub trait FromEvmError: - From> + FromEvmHalt> + From> + FromEvmHalt> + FromRevert { /// Converts from EVM error to this type. fn from_evm_err(err: EvmErrorFor) -> Self { err.into() } + + /// Ensures the execution result is successful or returns an error, + fn ensure_success(result: ExecutionResult>) -> Result { + match result { + ExecutionResult::Success { output, .. } => Ok(output.into_data()), + ExecutionResult::Revert { output, .. } => Err(Self::from_revert(output)), + ExecutionResult::Halt { reason, gas_used } => { + Err(Self::from_evm_halt(reason, gas_used)) + } + } + } } impl FromEvmError for T where - T: From> + FromEvmHalt>, + T: From> + FromEvmHalt> + FromRevert, Evm: ConfigureEvm, { } @@ -109,3 +147,17 @@ impl FromEvmHalt for EthApiError { RpcInvalidTransactionError::halt(halt, gas_limit).into() } } + +/// Helper trait to construct errors from unexpected reverts. +pub trait FromRevert { + /// Constructs an error from revert bytes. + /// + /// This is only invoked when revert was unexpected (`eth_call`, `eth_estimateGas`, etc). + fn from_revert(output: Bytes) -> Self; +} + +impl FromRevert for EthApiError { + fn from_revert(output: Bytes) -> Self { + RpcInvalidTransactionError::Revert(RevertError::new(output)).into() + } +} diff --git a/crates/rpc/rpc-eth-types/src/error/mod.rs b/crates/rpc/rpc-eth-types/src/error/mod.rs index ef65e4ccc2..2a7e5141c3 100644 --- a/crates/rpc/rpc-eth-types/src/error/mod.rs +++ b/crates/rpc/rpc-eth-types/src/error/mod.rs @@ -1,7 +1,6 @@ //! Implementation specific Errors for the `eth_` namespace. pub mod api; -use crate::error::api::FromEvmHalt; use alloy_eips::BlockId; use alloy_evm::{call::CallError, overrides::StateOverrideError}; use alloy_primitives::{Address, Bytes, B256, U256}; @@ -21,9 +20,9 @@ use reth_transaction_pool::error::{ PoolError, PoolErrorKind, PoolTransactionError, }; use revm::context_interface::result::{ - EVMError, ExecutionResult, HaltReason, InvalidHeader, InvalidTransaction, OutOfGasError, + EVMError, HaltReason, InvalidHeader, InvalidTransaction, OutOfGasError, }; -use revm_inspectors::tracing::MuxError; +use revm_inspectors::tracing::{DebugInspectorError, MuxError}; use std::convert::Infallible; use tokio::sync::oneshot::error::RecvError; @@ -165,8 +164,8 @@ pub enum EthApiError { #[error("Invalid bytecode: {0}")] InvalidBytecode(String), /// Error encountered when converting a transaction type - #[error("Transaction conversion error")] - TransactionConversionError, + #[error(transparent)] + TransactionConversionError(#[from] TransactionConversionError), /// Error thrown when tracing with a muxTracer fails #[error(transparent)] MuxTracerError(#[from] MuxError), @@ -274,7 +273,7 @@ impl From for jsonrpsee_types::error::ErrorObject<'static> { EthApiError::Signing(_) | EthApiError::BothStateAndStateDiffInOverride(_) | EthApiError::InvalidTracerConfig | - EthApiError::TransactionConversionError | + EthApiError::TransactionConversionError(_) | EthApiError::InvalidRewardPercentiles | EthApiError::InvalidBytecode(_) => invalid_params_rpc_err(error.to_string()), EthApiError::InvalidTransaction(err) => err.into(), @@ -337,12 +336,6 @@ impl From for jsonrpsee_types::error::ErrorObject<'static> { } } -impl From for EthApiError { - fn from(_: TransactionConversionError) -> Self { - Self::TransactionConversionError - } -} - impl From> for EthApiError where E: Into, @@ -414,6 +407,25 @@ impl From for EthApiError { } } +impl From> for EthApiError +where + Err: core::error::Error + Send + Sync + 'static, +{ + fn from(error: DebugInspectorError) -> Self { + match error { + DebugInspectorError::InvalidTracerConfig => Self::InvalidTracerConfig, + DebugInspectorError::UnsupportedTracer => Self::Unsupported("unsupported tracer"), + DebugInspectorError::JsTracerNotEnabled => { + Self::Unsupported("JS Tracer is not enabled") + } + DebugInspectorError::MuxInspector(err) => err.into(), + DebugInspectorError::Database(err) => Self::Internal(RethError::other(err)), + #[cfg(feature = "js-tracer")] + DebugInspectorError::JsInspector(err) => err.into(), + } + } +} + impl From for EthApiError { fn from(error: RethError) -> Self { match error { @@ -619,6 +631,9 @@ pub enum RpcInvalidTransactionError { /// Contains the gas limit. #[error("out of gas: gas exhausted during memory expansion: {0}")] MemoryOutOfGas(u64), + /// Memory limit was exceeded during memory expansion. + #[error("out of memory: memory limit exceeded during memory expansion")] + MemoryLimitOutOfGas, /// Gas limit was exceeded during precompile execution. /// Contains the gas limit. #[error("out of gas: gas exhausted during precompiled contract execution: {0}")] @@ -723,7 +738,8 @@ impl RpcInvalidTransactionError { OutOfGasError::Basic | OutOfGasError::ReentrancySentry => { Self::BasicOutOfGas(gas_limit) } - OutOfGasError::Memory | OutOfGasError::MemoryLimit => Self::MemoryOutOfGas(gas_limit), + OutOfGasError::Memory => Self::MemoryOutOfGas(gas_limit), + OutOfGasError::MemoryLimit => Self::MemoryLimitOutOfGas, OutOfGasError::Precompile => Self::PrecompileOutOfGas(gas_limit), OutOfGasError::InvalidOperand => Self::InvalidOperandOutOfGas(gas_limit), } @@ -1070,20 +1086,6 @@ pub enum SignError { NoChainId, } -/// Converts the evm [`ExecutionResult`] into a result where `Ok` variant is the output bytes if it -/// is [`ExecutionResult::Success`]. -pub fn ensure_success + FromEthApiError>( - result: ExecutionResult, -) -> Result { - match result { - ExecutionResult::Success { output, .. } => Ok(output.into_data()), - ExecutionResult::Revert { output, .. } => { - Err(Error::from_eth_err(RpcInvalidTransactionError::Revert(RevertError::new(output)))) - } - ExecutionResult::Halt { reason, gas_used } => Err(Error::from_evm_halt(reason, gas_used)), - } -} - #[cfg(test)] mod tests { use super::*; diff --git a/crates/rpc/rpc-eth-types/src/lib.rs b/crates/rpc/rpc-eth-types/src/lib.rs index 7378ad9962..8d829aebf4 100644 --- a/crates/rpc/rpc-eth-types/src/lib.rs +++ b/crates/rpc/rpc-eth-types/src/lib.rs @@ -23,6 +23,7 @@ pub mod transaction; pub mod tx_forward; pub mod utils; +pub use alloy_rpc_types_eth::FillTransaction; pub use builder::config::{EthConfig, EthFilterConfig}; pub use cache::{ config::EthStateCacheConfig, db::StateCacheDb, multi_consumer::MultiConsumerLruCache, @@ -35,5 +36,5 @@ pub use gas_oracle::{ }; pub use id_provider::EthSubscriptionIdProvider; pub use pending_block::{PendingBlock, PendingBlockEnv, PendingBlockEnvOrigin}; -pub use transaction::{FillTransactionResult, TransactionSource}; +pub use transaction::TransactionSource; pub use tx_forward::ForwardConfig; diff --git a/crates/rpc/rpc-eth-types/src/logs_utils.rs b/crates/rpc/rpc-eth-types/src/logs_utils.rs index 1d93de4bb1..6562c3043d 100644 --- a/crates/rpc/rpc-eth-types/src/logs_utils.rs +++ b/crates/rpc/rpc-eth-types/src/logs_utils.rs @@ -11,6 +11,7 @@ use reth_errors::ProviderError; use reth_primitives_traits::{BlockBody, RecoveredBlock, SignedTransaction}; use reth_storage_api::{BlockReader, ProviderBlock}; use std::sync::Arc; +use thiserror::Error; /// Returns all matching of a block's receipts when the transaction hashes are known. pub fn matching_block_logs_with_tx_hashes<'a, I, R>( @@ -147,30 +148,40 @@ where /// Computes the block range based on the filter range and current block numbers. /// -/// This returns `(min(best,from), min(best,to))`. +/// Returns an error for invalid ranges rather than silently clamping values. pub fn get_filter_block_range( from_block: Option, to_block: Option, start_block: u64, info: ChainInfo, -) -> (u64, u64) { - let mut from_block_number = start_block; - let mut to_block_number = info.best_number; +) -> Result<(u64, u64), FilterBlockRangeError> { + let from_block_number = from_block.unwrap_or(start_block); + let to_block_number = to_block.unwrap_or(info.best_number); - // if a `from_block` argument is provided then the `from_block_number` is the converted value or - // the start block if the converted value is larger than the start block, since `from_block` - // can't be a future block: `min(head, from_block)` - if let Some(filter_from_block) = from_block { - from_block_number = start_block.min(filter_from_block) + // from > to is an invalid range + if from_block_number > to_block_number { + return Err(FilterBlockRangeError::InvalidBlockRange); } - // upper end of the range is the converted `to_block` argument, restricted by the best block: - // `min(best_number,to_block_number)` - if let Some(filter_to_block) = to_block { - to_block_number = info.best_number.min(filter_to_block); + // we cannot query blocks that don't exist yet + if to_block_number > info.best_number { + return Err(FilterBlockRangeError::BlockRangeExceedsHead); } - (from_block_number, to_block_number) + Ok((from_block_number, to_block_number)) +} + +/// Errors for filter block range validation. +/// +/// See also . +#[derive(Debug, Clone, Copy, PartialEq, Eq, Error)] +pub enum FilterBlockRangeError { + /// `from_block > to_block` + #[error("invalid block range params")] + InvalidBlockRange, + /// Block range extends beyond current head + #[error("block range extends beyond current head block")] + BlockRangeExceedsHead, } #[cfg(test)] @@ -184,44 +195,73 @@ mod tests { let from = 14000000u64; let to = 14000100u64; let info = ChainInfo { best_number: 15000000, ..Default::default() }; - let range = get_filter_block_range(Some(from), Some(to), info.best_number, info); + let range = get_filter_block_range(Some(from), Some(to), info.best_number, info).unwrap(); assert_eq!(range, (from, to)); } - #[test] - fn test_log_range_higher() { - let from = 15000001u64; - let to = 15000002u64; - let info = ChainInfo { best_number: 15000000, ..Default::default() }; - let range = get_filter_block_range(Some(from), Some(to), info.best_number, info); - assert_eq!(range, (info.best_number, info.best_number)); - } - #[test] fn test_log_range_from() { let from = 14000000u64; let info = ChainInfo { best_number: 15000000, ..Default::default() }; - let range = get_filter_block_range(Some(from), None, info.best_number, info); + let range = get_filter_block_range(Some(from), None, 0, info).unwrap(); assert_eq!(range, (from, info.best_number)); } #[test] fn test_log_range_to() { let to = 14000000u64; + let start_block = 0u64; let info = ChainInfo { best_number: 15000000, ..Default::default() }; - let range = get_filter_block_range(None, Some(to), info.best_number, info); - assert_eq!(range, (info.best_number, to)); + let range = get_filter_block_range(None, Some(to), start_block, info).unwrap(); + assert_eq!(range, (start_block, to)); + } + + #[test] + fn test_log_range_higher_error() { + // Range extends beyond head -> should error instead of clamping + let from = 15000001u64; + let to = 15000002u64; + let info = ChainInfo { best_number: 15000000, ..Default::default() }; + let err = get_filter_block_range(Some(from), Some(to), info.best_number, info).unwrap_err(); + assert_eq!(err, FilterBlockRangeError::BlockRangeExceedsHead); + } + + #[test] + fn test_log_range_to_below_start_error() { + // to_block < start_block, default from -> invalid range + let to = 14000000u64; + let info = ChainInfo { best_number: 15000000, ..Default::default() }; + let err = get_filter_block_range(None, Some(to), info.best_number, info).unwrap_err(); + assert_eq!(err, FilterBlockRangeError::InvalidBlockRange); } #[test] fn test_log_range_empty() { let info = ChainInfo { best_number: 15000000, ..Default::default() }; - let range = get_filter_block_range(None, None, info.best_number, info); + let range = get_filter_block_range(None, None, info.best_number, info).unwrap(); // no range given -> head assert_eq!(range, (info.best_number, info.best_number)); } + #[test] + fn test_invalid_block_range_error() { + let from = 100; + let to = 50; + let info = ChainInfo { best_number: 150, ..Default::default() }; + let err = get_filter_block_range(Some(from), Some(to), 0, info).unwrap_err(); + assert_eq!(err, FilterBlockRangeError::InvalidBlockRange); + } + + #[test] + fn test_block_range_exceeds_head_error() { + let from = 100; + let to = 200; + let info = ChainInfo { best_number: 150, ..Default::default() }; + let err = get_filter_block_range(Some(from), Some(to), 0, info).unwrap_err(); + assert_eq!(err, FilterBlockRangeError::BlockRangeExceedsHead); + } + #[test] fn parse_log_from_only() { let s = r#"{"fromBlock":"0xf47a42","address":["0x7de93682b9b5d80d45cd371f7a14f74d49b0914c","0x0f00392fcb466c0e4e4310d81b941e07b4d5a079","0xebf67ab8cff336d3f609127e8bbf8bd6dd93cd81"],"topics":["0x0559884fd3a460db3073b7fc896cc77986f16e378210ded43186175bf646fc5f"]}"#; @@ -242,7 +282,8 @@ mod tests { to_block.and_then(alloy_rpc_types_eth::BlockNumberOrTag::as_number), start_block, info, - ); + ) + .unwrap(); assert_eq!(from_block_number, 16022082); assert_eq!(to_block_number, best_number); } diff --git a/crates/rpc/rpc-eth-types/src/pending_block.rs b/crates/rpc/rpc-eth-types/src/pending_block.rs index 45f50ea82c..3150fffdc5 100644 --- a/crates/rpc/rpc-eth-types/src/pending_block.rs +++ b/crates/rpc/rpc-eth-types/src/pending_block.rs @@ -4,17 +4,18 @@ use std::{sync::Arc, time::Instant}; -use crate::block::BlockAndReceipts; -use alloy_consensus::BlockHeader; +use crate::{block::BlockAndReceipts, utils::calculate_gas_used_and_next_log_index}; +use alloy_consensus::{BlockHeader, TxReceipt}; use alloy_eips::{BlockId, BlockNumberOrTag}; -use alloy_primitives::{BlockHash, B256}; +use alloy_primitives::{BlockHash, TxHash, B256}; use derive_more::Constructor; use reth_chain_state::{BlockState, ExecutedBlock}; use reth_ethereum_primitives::Receipt; use reth_evm::{ConfigureEvm, EvmEnvFor}; use reth_primitives_traits::{ - Block, BlockTy, NodePrimitives, ReceiptTy, RecoveredBlock, SealedHeader, + Block, BlockTy, IndexedTx, NodePrimitives, ReceiptTy, RecoveredBlock, SealedHeader, }; +use reth_rpc_convert::{transaction::ConvertReceiptInput, RpcConvert, RpcTypes}; /// Configured [`reth_evm::EvmEnv`] for a pending block. #[derive(Debug, Clone, Constructor)] @@ -129,6 +130,52 @@ impl PendingBlock { pub fn parent_hash(&self) -> BlockHash { self.executed_block.recovered_block().parent_hash() } + + /// Finds a transaction by hash and returns it along with its corresponding receipt. + /// + /// Returns `None` if the transaction is not found in this block. + pub fn find_transaction_and_receipt_by_hash( + &self, + tx_hash: TxHash, + ) -> Option<(IndexedTx<'_, N::Block>, &N::Receipt)> { + let indexed_tx = self.executed_block.recovered_block().find_indexed(tx_hash)?; + let receipt = self.receipts.get(indexed_tx.index())?; + Some((indexed_tx, receipt)) + } + + /// Returns the rpc transaction receipt for the given transaction hash if it exists. + /// + /// This uses the given converter to turn [`Self::find_transaction_and_receipt_by_hash`] into + /// the rpc format. + pub fn find_and_convert_transaction_receipt( + &self, + tx_hash: TxHash, + converter: &C, + ) -> Option::Receipt, C::Error>> + where + C: RpcConvert, + { + let (tx, receipt) = self.find_transaction_and_receipt_by_hash(tx_hash)?; + let meta = tx.meta(); + let all_receipts = &self.receipts; + + let (gas_used, next_log_index) = + calculate_gas_used_and_next_log_index(meta.index, all_receipts); + + converter + .convert_receipts_with_block( + vec![ConvertReceiptInput { + tx: tx.recovered_tx(), + gas_used: receipt.cumulative_gas_used() - gas_used, + receipt: receipt.clone(), + next_log_index, + meta, + }], + self.executed_block.sealed_block(), + ) + .map(|mut receipts| receipts.pop()) + .transpose() + } } impl From> for BlockState { diff --git a/crates/rpc/rpc-eth-types/src/simulate.rs b/crates/rpc/rpc-eth-types/src/simulate.rs index ec63443da3..7f122723fa 100644 --- a/crates/rpc/rpc-eth-types/src/simulate.rs +++ b/crates/rpc/rpc-eth-types/src/simulate.rs @@ -1,11 +1,8 @@ //! Utilities for serving `eth_simulateV1` use crate::{ - error::{ - api::{FromEthApiError, FromEvmHalt}, - ToRpcError, - }, - EthApiError, RevertError, + error::{api::FromEthApiError, FromEvmError, ToRpcError}, + EthApiError, }; use alloy_consensus::{transaction::TxHashRef, BlockHeader, Transaction as _}; use alloy_eips::eip2718::WithEncoded; @@ -17,7 +14,7 @@ use alloy_rpc_types_eth::{ use jsonrpsee_types::ErrorObject; use reth_evm::{ execute::{BlockBuilder, BlockBuilderOutcome, BlockExecutor}, - Evm, + Evm, HaltReasonFor, }; use reth_primitives_traits::{BlockBody as _, BlockTy, NodePrimitives, Recovered, RecoveredBlock}; use reth_rpc_convert::{RpcBlock, RpcConvert, RpcTxReq}; @@ -26,7 +23,7 @@ use reth_storage_api::noop::NoopProvider; use revm::{ context::Block, context_interface::result::ExecutionResult, - primitives::{Address, Bytes, TxKind}, + primitives::{Address, Bytes, TxKind, U256}, Database, }; @@ -39,12 +36,67 @@ pub enum EthSimulateError { /// Max gas limit for entire operation exceeded. #[error("Client adjustable limit reached")] GasLimitReached, + /// Block number in sequence did not increase. + #[error("Block number in sequence did not increase")] + BlockNumberInvalid, + /// Block timestamp in sequence did not increase or stay the same. + #[error("Block timestamp in sequence did not increase")] + BlockTimestampInvalid, + /// Transaction nonce is too low. + #[error("nonce too low: next nonce {state}, tx nonce {tx}")] + NonceTooLow { + /// Transaction nonce. + tx: u64, + /// Current state nonce. + state: u64, + }, + /// Transaction nonce is too high. + #[error("nonce too high")] + NonceTooHigh, + /// Transaction's baseFeePerGas is too low. + #[error("max fee per gas less than block base fee")] + BaseFeePerGasTooLow, + /// Not enough gas provided to pay for intrinsic gas. + #[error("intrinsic gas too low")] + IntrinsicGasTooLow, + /// Insufficient funds to pay for gas fees and value. + #[error("insufficient funds for gas * price + value: have {balance} want {cost}")] + InsufficientFunds { + /// Transaction cost. + cost: U256, + /// Sender balance. + balance: U256, + }, + /// Sender is not an EOA. + #[error("sender is not an EOA")] + SenderNotEOA, + /// Max init code size exceeded. + #[error("max initcode size exceeded")] + MaxInitCodeSizeExceeded, + /// `MovePrecompileToAddress` referenced itself in replacement. + #[error("MovePrecompileToAddress referenced itself")] + PrecompileSelfReference, + /// Multiple `MovePrecompileToAddress` referencing the same address. + #[error("Multiple MovePrecompileToAddress referencing the same address")] + PrecompileDuplicateAddress, } impl EthSimulateError { - const fn error_code(&self) -> i32 { + /// Returns the JSON-RPC error code for a `eth_simulateV1` error. + pub const fn error_code(&self) -> i32 { match self { + Self::NonceTooLow { .. } => -38010, + Self::NonceTooHigh => -38011, + Self::BaseFeePerGasTooLow => -38012, + Self::IntrinsicGasTooLow => -38013, + Self::InsufficientFunds { .. } => -38014, Self::BlockGasLimitExceeded => -38015, + Self::BlockNumberInvalid => -38020, + Self::BlockTimestampInvalid => -38021, + Self::PrecompileSelfReference => -38022, + Self::PrecompileDuplicateAddress => -38023, + Self::SenderNotEOA => -38024, + Self::MaxInitCodeSizeExceeded => -38025, Self::GasLimitReached => -38026, } } @@ -68,7 +120,7 @@ pub fn execute_transactions( calls: Vec>, default_gas_limit: u64, chain_id: u64, - tx_resp_builder: &T, + converter: &T, ) -> Result< ( BlockBuilderOutcome, @@ -92,7 +144,7 @@ where builder.evm().block().basefee(), chain_id, builder.evm_mut().db_mut(), - tx_resp_builder, + converter, )?; // Create transaction with an empty envelope. // The effect for a layer-2 execution client is that it does not charge L1 cost. @@ -120,7 +172,7 @@ pub fn resolve_transaction( block_base_fee_per_gas: u64, chain_id: u64, db: &mut DB, - tx_resp_builder: &T, + converter: &T, ) -> Result, EthApiError> where DB::Error: Into, @@ -178,22 +230,26 @@ where } } - let tx = tx_resp_builder - .build_simulate_v1_transaction(tx) - .map_err(|e| EthApiError::other(e.into()))?; + let tx = + converter.build_simulate_v1_transaction(tx).map_err(|e| EthApiError::other(e.into()))?; Ok(Recovered::new_unchecked(tx, from)) } /// Handles outputs of the calls execution and builds a [`SimulatedBlock`]. -pub fn build_simulated_block( +pub fn build_simulated_block( block: RecoveredBlock>, - results: Vec>, + results: Vec>>, txs_kind: BlockTransactionsKind, - tx_resp_builder: &T, -) -> Result>, T::Error> + converter: &T, +) -> Result>, Err> where - T: RpcConvert>, + Err: std::error::Error + + FromEthApiError + + FromEvmError + + From + + Into>, + T: RpcConvert, { let mut calls: Vec = Vec::with_capacity(results.len()); @@ -201,7 +257,7 @@ where for (index, (result, tx)) in results.into_iter().zip(block.body().transactions()).enumerate() { let call = match result { ExecutionResult::Halt { reason, gas_used } => { - let error = T::Error::from_evm_halt(reason, tx.gas_limit()); + let error = Err::from_evm_halt(reason, tx.gas_limit()); SimCallResult { return_data: Bytes::new(), error: Some(SimulateError { @@ -214,12 +270,12 @@ where } } ExecutionResult::Revert { output, gas_used } => { - let error = RevertError::new(output.clone()); + let error = Err::from_revert(output.clone()); SimCallResult { return_data: output, error: Some(SimulateError { - code: error.error_code(), message: error.to_string(), + code: error.into().code(), }), gas_used, status: false, @@ -254,8 +310,8 @@ where let block = block.into_rpc_block( txs_kind, - |tx, tx_info| tx_resp_builder.fill(tx, tx_info), - |header, size| tx_resp_builder.convert_header(header, size), + |tx, tx_info| converter.fill(tx, tx_info), + |header, size| converter.convert_header(header, size), )?; Ok(SimulatedBlock { inner: block, calls }) } diff --git a/crates/rpc/rpc-eth-types/src/transaction.rs b/crates/rpc/rpc-eth-types/src/transaction.rs index 3d099f0118..de3323d61e 100644 --- a/crates/rpc/rpc-eth-types/src/transaction.rs +++ b/crates/rpc/rpc-eth-types/src/transaction.rs @@ -2,21 +2,11 @@ //! //! Transaction wrapper that labels transaction with its origin. -use alloy_primitives::{Bytes, B256}; +use alloy_primitives::B256; use alloy_rpc_types_eth::TransactionInfo; use reth_ethereum_primitives::TransactionSigned; use reth_primitives_traits::{NodePrimitives, Recovered, SignedTransaction}; use reth_rpc_convert::{RpcConvert, RpcTransaction}; -use serde::{Deserialize, Serialize}; - -/// Response type for `eth_fillTransaction` RPC method. -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct FillTransactionResult { - /// RLP-encoded transaction bytes - pub raw: Bytes, - /// Filled transaction object - pub tx: T, -} /// Represents from where a transaction was fetched. #[derive(Debug, Clone, Eq, PartialEq)] diff --git a/crates/rpc/rpc-eth-types/src/utils.rs b/crates/rpc/rpc-eth-types/src/utils.rs index 69f9833af5..5df161d33b 100644 --- a/crates/rpc/rpc-eth-types/src/utils.rs +++ b/crates/rpc/rpc-eth-types/src/utils.rs @@ -1,9 +1,28 @@ //! Commonly used code snippets use super::{EthApiError, EthResult}; +use alloy_consensus::TxReceipt; use reth_primitives_traits::{Recovered, SignedTransaction}; use std::future::Future; +/// Calculates the gas used and next log index for a transaction at the given index +pub fn calculate_gas_used_and_next_log_index( + tx_index: u64, + all_receipts: &[impl TxReceipt], +) -> (u64, usize) { + let mut gas_used = 0; + let mut next_log_index = 0; + + if tx_index > 0 { + for receipt in all_receipts.iter().take(tx_index as usize) { + gas_used = receipt.cumulative_gas_used(); + next_log_index += receipt.logs().len(); + } + } + + (gas_used, next_log_index) +} + /// Recovers a [`SignedTransaction`] from an enveloped encoded byte stream. /// /// This is a helper function that returns the appropriate RPC-specific error if the input data is @@ -64,10 +83,10 @@ where /// Calculates the blob gas used ratio for a block, accounting for the case where /// `max_blob_gas_per_block` is zero. /// -/// Returns `0.0` if `blob_gas_used` is `0`, otherwise returns the ratio +/// Returns `0.0` if `max_blob_gas_per_block` is `0`, otherwise returns the ratio /// `blob_gas_used/max_blob_gas_per_block`. pub fn checked_blob_gas_used_ratio(blob_gas_used: u64, max_blob_gas_per_block: u64) -> f64 { - if blob_gas_used == 0 { + if max_blob_gas_per_block == 0 { 0.0 } else { blob_gas_used as f64 / max_blob_gas_per_block as f64 @@ -105,6 +124,8 @@ mod tests { fn test_checked_blob_gas_used_ratio() { // No blob gas used, max blob gas per block is 0 assert_eq!(checked_blob_gas_used_ratio(0, 0), 0.0); + // Blob gas used is non-zero, max blob gas per block is 0 (division by zero protection) + assert_eq!(checked_blob_gas_used_ratio(50, 0), 0.0); // Blob gas used is zero, max blob gas per block is non-zero assert_eq!(checked_blob_gas_used_ratio(0, 100), 0.0); // Blob gas used is non-zero, max blob gas per block is non-zero diff --git a/crates/rpc/rpc-server-types/src/constants.rs b/crates/rpc/rpc-server-types/src/constants.rs index 8861af7b54..acf5294fe9 100644 --- a/crates/rpc/rpc-server-types/src/constants.rs +++ b/crates/rpc/rpc-server-types/src/constants.rs @@ -18,6 +18,20 @@ pub const DEFAULT_MAX_LOGS_PER_RESPONSE: usize = 20_000; /// The default maximum number of blocks for `trace_filter` requests. pub const DEFAULT_MAX_TRACE_FILTER_BLOCKS: u64 = 100; +/// Setting for how many concurrent (heavier) _blocking_ IO requests are allowed. +/// +/// What is considered a blocking IO request can depend on the RPC method. In general anything that +/// requires IO is considered blocking and should be spawned as blocking. This setting is however, +/// primarily intended for heavier blocking requests that require evm execution for example, +/// `eth_call` and alike. This is intended to be used with a semaphore that must be acquired before +/// a new task is spawned to avoid unnecessary pooling if the number of inflight requests exceeds +/// the available threads in the pool. +/// +/// tokio's blocking pool, has a default of 512 and could grow unbounded, since requests like +/// `eth_call` also require a lot of cpu which will occupy the thread, we can set this to a lower +/// value. +pub const DEFAULT_MAX_BLOCKING_IO_REQUEST: usize = 256; + /// The default maximum number tracing requests we're allowing concurrently. /// Tracing is mostly CPU bound so we're limiting the number of concurrent requests to something /// lower that the number of cores, in order to minimize the impact on the rest of the system. diff --git a/crates/rpc/rpc-server-types/src/module.rs b/crates/rpc/rpc-server-types/src/module.rs index db9268d5d6..d41e10bb73 100644 --- a/crates/rpc/rpc-server-types/src/module.rs +++ b/crates/rpc/rpc-server-types/src/module.rs @@ -240,7 +240,7 @@ impl From> for RpcModuleSelection { impl From<[RethRpcModule; N]> for RpcModuleSelection { fn from(s: [RethRpcModule; N]) -> Self { - Self::Selection(s.iter().cloned().collect()) + Self::Selection(s.into_iter().collect()) } } @@ -323,6 +323,8 @@ pub enum RethRpcModule { Miner, /// `mev_` module Mev, + /// `testing_` module + Testing, /// Custom RPC module not part of the standard set #[strum(default)] #[serde(untagged)] @@ -347,6 +349,7 @@ impl RethRpcModule { Self::Flashbots, Self::Miner, Self::Mev, + Self::Testing, ]; /// Returns the number of standard variants (excludes Other) @@ -406,6 +409,7 @@ impl AsRef for RethRpcModule { Self::Flashbots => "flashbots", Self::Miner => "miner", Self::Mev => "mev", + Self::Testing => "testing", } } } @@ -428,6 +432,7 @@ impl FromStr for RethRpcModule { "flashbots" => Self::Flashbots, "miner" => Self::Miner, "mev" => Self::Mev, + "testing" => Self::Testing, // Any unknown module becomes Other other => Self::Other(other.to_string()), }) diff --git a/crates/rpc/rpc-server-types/src/result.rs b/crates/rpc/rpc-server-types/src/result.rs index 769f1cd9c0..e2170dcfe0 100644 --- a/crates/rpc/rpc-server-types/src/result.rs +++ b/crates/rpc/rpc-server-types/src/result.rs @@ -65,7 +65,7 @@ macro_rules! impl_to_rpc_result { } #[inline] - fn map_internal_err<'a, F, M>(self, op: F) -> jsonrpsee_core::RpcResult + fn map_internal_err(self, op: F) -> jsonrpsee_core::RpcResult where F: FnOnce($err) -> M, M: Into, diff --git a/crates/rpc/rpc-testing-util/src/trace.rs b/crates/rpc/rpc-testing-util/src/trace.rs index 8f71d1c455..79aceebe06 100644 --- a/crates/rpc/rpc-testing-util/src/trace.rs +++ b/crates/rpc/rpc-testing-util/src/trace.rs @@ -71,7 +71,7 @@ pub trait TraceApiExt { /// Returns a new stream that yields the traces the opcodes for the given blocks. /// - /// See also [`StreamExt::buffered`]. + /// See also [`StreamExt::buffer_unordered`]. fn trace_block_opcode_gas_unordered( &self, params: I, @@ -301,7 +301,7 @@ impl + Sync> TraceApiExt for T { Err(err) => Err((err, block)), } })) - .buffered(n); + .buffer_unordered(n); TraceBlockOpcodeGasStream { stream: Box::pin(stream) } } @@ -583,6 +583,7 @@ mod tests { use super::*; use alloy_eips::BlockNumberOrTag; use alloy_rpc_types_trace::filter::TraceFilterMode; + use futures::future::join; use jsonrpsee::http_client::HttpClientBuilder; const fn assert_is_stream(_: &St) {} @@ -748,4 +749,41 @@ mod tests { assert_is_stream(&stream); let _opcodes = stream.next().await.unwrap(); } + + #[tokio::test(flavor = "multi_thread")] + #[ignore] + async fn compare_block_stream() { + let client_a = HttpClientBuilder::default().build("http://localhost:8545").unwrap(); + let client_b = HttpClientBuilder::default().build("http://localhost:8544").unwrap(); + let blocks = 0u64..=1681464; + let mut stream_a = client_a.trace_block_buffered(blocks.clone(), 2); + let mut stream_b = client_b.trace_block_buffered(blocks, 2); + + let mut count = 0; + loop { + let (res_a, res_b) = join(stream_a.next(), stream_b.next()).await; + + if res_a.is_none() && res_b.is_none() { + break; + } + + match (res_a, res_b) { + (Some(Ok(res_a)), Some(Ok(res_b))) => { + if res_a != res_b { + println!("Received different trace results: {res_a:?}, res_b: {res_b:?}"); + } + } + (res_a, res_b) => { + println!("Received different responses: {res_a:?}, res_b: {res_b:?}"); + } + } + + if count % 1000 == 0 { + println!("Blocks traced: {count}"); + } + + count += 1; + } + println!("Total blocks traced: {count}"); + } } diff --git a/crates/rpc/rpc/Cargo.toml b/crates/rpc/rpc/Cargo.toml index e028e47448..80c89e6027 100644 --- a/crates/rpc/rpc/Cargo.toml +++ b/crates/rpc/rpc/Cargo.toml @@ -38,6 +38,8 @@ reth-rpc-server-types.workspace = true reth-network-types.workspace = true reth-consensus.workspace = true reth-consensus-common.workspace = true +reth-ethereum-primitives.workspace = true +reth-ethereum-engine-primitives.workspace = true reth-node-api.workspace = true reth-trie-common.workspace = true @@ -63,7 +65,7 @@ alloy-rpc-types-txpool.workspace = true alloy-rpc-types-admin.workspace = true alloy-rpc-types-engine = { workspace = true, features = ["kzg"] } alloy-serde.workspace = true -revm = { workspace = true, features = ["optional_block_gas_limit", "optional_eip3607", "optional_no_base_fee"] } +revm = { workspace = true, features = ["optional_block_gas_limit", "optional_eip3607", "optional_no_base_fee", "memory_limit"] } revm-primitives = { workspace = true, features = ["serde"] } # rpc @@ -106,4 +108,8 @@ rand.workspace = true jsonrpsee = { workspace = true, features = ["client"] } [features] -js-tracer = ["revm-inspectors/js-tracer", "reth-rpc-eth-types/js-tracer"] +js-tracer = [ + "revm-inspectors/js-tracer", + "reth-rpc-eth-types/js-tracer", + "reth-rpc-eth-api/js-tracer", +] diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index 99b37a09d9..29afd85757 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -5,58 +5,84 @@ use alloy_consensus::{ use alloy_eips::{eip2718::Encodable2718, BlockId, BlockNumberOrTag}; use alloy_evm::env::BlockEnvironment; use alloy_genesis::ChainConfig; -use alloy_primitives::{uint, Address, Bytes, B256}; +use alloy_primitives::{hex::decode, uint, Address, Bytes, B256}; use alloy_rlp::{Decodable, Encodable}; +use alloy_rpc_types::BlockTransactionsKind; use alloy_rpc_types_debug::ExecutionWitness; -use alloy_rpc_types_eth::{ - state::EvmOverrides, Block as RpcBlock, BlockError, Bundle, StateContext, TransactionInfo, -}; +use alloy_rpc_types_eth::{state::EvmOverrides, BlockError, Bundle, StateContext}; use alloy_rpc_types_trace::geth::{ - call::FlatCallFrame, BlockTraceResult, FourByteFrame, GethDebugBuiltInTracerType, - GethDebugTracerType, GethDebugTracingCallOptions, GethDebugTracingOptions, GethTrace, - NoopFrame, TraceResult, + BlockTraceResult, GethDebugTracingCallOptions, GethDebugTracingOptions, GethTrace, TraceResult, }; use async_trait::async_trait; +use futures::Stream; use jsonrpsee::core::RpcResult; +use parking_lot::RwLock; use reth_chainspec::{ChainSpecProvider, EthChainSpec, EthereumHardforks}; +use reth_engine_primitives::ConsensusEngineEvent; use reth_errors::RethError; -use reth_evm::{execute::Executor, ConfigureEvm, EvmEnvFor, TxEnvFor}; -use reth_primitives_traits::{Block as _, BlockBody, ReceiptWithBloom, RecoveredBlock}; -use reth_revm::{database::StateProviderDatabase, db::State, witness::ExecutionWitnessRecord}; +use reth_evm::{execute::Executor, ConfigureEvm, EvmEnvFor}; +use reth_primitives_traits::{ + Block as BlockTrait, BlockBody, BlockTy, ReceiptWithBloom, RecoveredBlock, +}; +use reth_revm::{db::State, witness::ExecutionWitnessRecord}; use reth_rpc_api::DebugApiServer; use reth_rpc_convert::RpcTxReq; use reth_rpc_eth_api::{ helpers::{EthTransactions, TraceExt}, - EthApiTypes, FromEthApiError, RpcNodeCore, + FromEthApiError, RpcConvert, RpcNodeCore, }; -use reth_rpc_eth_types::{EthApiError, StateCacheDb}; +use reth_rpc_eth_types::EthApiError; use reth_rpc_server_types::{result::internal_rpc_err, ToRpcResult}; use reth_storage_api::{ BlockIdReader, BlockReaderIdExt, HeaderProvider, ProviderBlock, ReceiptProviderIdExt, StateProofProvider, StateProviderFactory, StateRootProvider, TransactionVariant, }; -use reth_tasks::pool::BlockingTaskGuard; +use reth_tasks::{pool::BlockingTaskGuard, TaskSpawner}; use reth_trie_common::{updates::TrieUpdates, HashedPostState}; -use revm::{context::Block, context_interface::Transaction, state::EvmState, DatabaseCommit}; -use revm_inspectors::tracing::{ - FourByteInspector, MuxInspector, TracingInspector, TracingInspectorConfig, TransactionContext, -}; -use std::sync::Arc; +use revm::DatabaseCommit; +use revm_inspectors::tracing::{DebugInspector, TransactionContext}; +use serde::{Deserialize, Serialize}; +use std::{collections::VecDeque, sync::Arc}; use tokio::sync::{AcquireError, OwnedSemaphorePermit}; +use tokio_stream::StreamExt; /// `debug` API implementation. /// /// This type provides the functionality for handling `debug` related requests. -pub struct DebugApi { +pub struct DebugApi { inner: Arc>, } -// === impl DebugApi === - -impl DebugApi { +impl DebugApi +where + Eth: RpcNodeCore, +{ /// Create a new instance of the [`DebugApi`] - pub fn new(eth_api: Eth, blocking_task_guard: BlockingTaskGuard) -> Self { - let inner = Arc::new(DebugApiInner { eth_api, blocking_task_guard }); + pub fn new( + eth_api: Eth, + blocking_task_guard: BlockingTaskGuard, + executor: impl TaskSpawner, + mut stream: impl Stream> + Send + Unpin + 'static, + ) -> Self { + let bad_block_store = BadBlockStore::default(); + let inner = Arc::new(DebugApiInner { + eth_api, + blocking_task_guard, + bad_block_store: bad_block_store.clone(), + }); + + // Spawn a task caching bad blocks + executor.spawn(Box::pin(async move { + while let Some(event) = stream.next().await { + if let ConsensusEngineEvent::InvalidBlock(block) = event && + let Ok(recovered) = + RecoveredBlock::try_recover_sealed(block.as_ref().clone()) + { + bad_block_store.insert(recovered); + } + } + })); + Self { inner } } @@ -64,9 +90,7 @@ impl DebugApi { pub fn eth_api(&self) -> &Eth { &self.inner.eth_api } -} -impl DebugApi { /// Access the underlying provider. pub fn provider(&self) -> &Eth::Provider { self.inner.eth_api.provider() @@ -77,7 +101,7 @@ impl DebugApi { impl DebugApi where - Eth: EthApiTypes + TraceExt + 'static, + Eth: TraceExt, { /// Acquires a permit to execute a tracing call. async fn acquire_trace_permit(&self) -> Result { @@ -91,43 +115,44 @@ where evm_env: EvmEnvFor, opts: GethDebugTracingOptions, ) -> Result, Eth::Error> { - // replay all transactions of the block - let this = self.clone(); self.eth_api() - .spawn_with_state_at_block(block.parent_hash().into(), move |state| { + .spawn_with_state_at_block(block.parent_hash(), move |eth_api, mut db| { let mut results = Vec::with_capacity(block.body().transactions().len()); - let mut db = - State::builder().with_database(StateProviderDatabase::new(state)).build(); - this.eth_api().apply_pre_execution_changes(&block, &mut db, &evm_env)?; + eth_api.apply_pre_execution_changes(&block, &mut db, &evm_env)?; let mut transactions = block.transactions_recovered().enumerate().peekable(); - let mut inspector = None; + let mut inspector = DebugInspector::new(opts).map_err(Eth::Error::from_eth_err)?; while let Some((index, tx)) = transactions.next() { let tx_hash = *tx.tx_hash(); + let tx_env = eth_api.evm_config().tx_env(tx); - let tx_env = this.eth_api().evm_config().tx_env(tx); - - let (result, state_changes) = this.trace_transaction( - &opts, - evm_env.clone(), - tx_env, + let res = eth_api.inspect( &mut db, - Some(TransactionContext { - block_hash: Some(block.hash()), - tx_hash: Some(tx_hash), - tx_index: Some(index), - }), + evm_env.clone(), + tx_env.clone(), &mut inspector, )?; - - inspector = inspector.map(|insp| insp.fused()); + let result = inspector + .get_result( + Some(TransactionContext { + block_hash: Some(block.hash()), + tx_hash: Some(tx_hash), + tx_index: Some(index), + }), + &tx_env, + &evm_env.block_env, + &res, + &mut db, + ) + .map_err(Eth::Error::from_eth_err)?; results.push(TraceResult::Success { result, tx_hash: Some(tx_hash) }); if transactions.peek().is_some() { + inspector.fuse().map_err(Eth::Error::from_eth_err)?; // need to apply the state changes of this transaction before executing the // next transaction - db.commit(state_changes) + db.commit(res.state) } } @@ -219,42 +244,43 @@ where let state_at: BlockId = block.parent_hash().into(); let block_hash = block.hash(); - let this = self.clone(); self.eth_api() - .spawn_with_state_at_block(state_at, move |state| { + .spawn_with_state_at_block(state_at, move |eth_api, mut db| { let block_txs = block.transactions_recovered(); // configure env for the target transaction let tx = transaction.into_recovered(); - let mut db = - State::builder().with_database(StateProviderDatabase::new(state)).build(); - - this.eth_api().apply_pre_execution_changes(&block, &mut db, &evm_env)?; + eth_api.apply_pre_execution_changes(&block, &mut db, &evm_env)?; // replay all transactions prior to the targeted transaction - let index = this.eth_api().replay_transactions_until( + let index = eth_api.replay_transactions_until( &mut db, evm_env.clone(), block_txs, *tx.tx_hash(), )?; - let tx_env = this.eth_api().evm_config().tx_env(&tx); + let tx_env = eth_api.evm_config().tx_env(&tx); - this.trace_transaction( - &opts, - evm_env, - tx_env, - &mut db, - Some(TransactionContext { - block_hash: Some(block_hash), - tx_index: Some(index), - tx_hash: Some(*tx.tx_hash()), - }), - &mut None, - ) - .map(|(trace, _)| trace) + let mut inspector = DebugInspector::new(opts).map_err(Eth::Error::from_eth_err)?; + let res = + eth_api.inspect(&mut db, evm_env.clone(), tx_env.clone(), &mut inspector)?; + let trace = inspector + .get_result( + Some(TransactionContext { + block_hash: Some(block_hash), + tx_index: Some(index), + tx_hash: Some(*tx.tx_hash()), + }), + &tx_env, + &evm_env.block_env, + &res, + &mut db, + ) + .map_err(Eth::Error::from_eth_err)?; + + Ok(trace) }) .await } @@ -262,6 +288,10 @@ where /// The `debug_traceCall` method lets you run an `eth_call` within the context of the given /// block execution using the final state of parent block as the base. /// + /// If `tx_index` is provided in opts, the call will be traced at the state after executing + /// transactions up to the specified index within the block (0-indexed). + /// If not provided, then uses the post-state (default behavior). + /// /// Differences compare to `eth_call`: /// - `debug_traceCall` executes with __enabled__ basefee check, `eth_call` does not: pub async fn debug_trace_call( @@ -272,217 +302,99 @@ where ) -> Result { let at = block_id.unwrap_or_default(); let GethDebugTracingCallOptions { - tracing_options, state_overrides, block_overrides, .. + tracing_options, + state_overrides, + block_overrides, + tx_index, } = opts; let overrides = EvmOverrides::new(state_overrides, block_overrides.map(Box::new)); - let GethDebugTracingOptions { config, tracer, tracer_config, .. } = tracing_options; - let this = self.clone(); - if let Some(tracer) = tracer { - #[allow(unreachable_patterns)] - return match tracer { - GethDebugTracerType::BuiltInTracer(tracer) => match tracer { - GethDebugBuiltInTracerType::FourByteTracer => { - let mut inspector = FourByteInspector::default(); - let inspector = self - .eth_api() - .spawn_with_call_at(call, at, overrides, move |db, evm_env, tx_env| { - this.eth_api().inspect(db, evm_env, tx_env, &mut inspector)?; - Ok(inspector) - }) - .await?; - Ok(FourByteFrame::from(&inspector).into()) - } - GethDebugBuiltInTracerType::CallTracer => { - let call_config = tracer_config - .into_call_config() - .map_err(|_| EthApiError::InvalidTracerConfig)?; - - let mut inspector = TracingInspector::new( - TracingInspectorConfig::from_geth_call_config(&call_config), - ); - - let frame = self - .eth_api() - .spawn_with_call_at(call, at, overrides, move |db, evm_env, tx_env| { - let gas_limit = tx_env.gas_limit(); - let res = - this.eth_api().inspect(db, evm_env, tx_env, &mut inspector)?; - let frame = inspector - .with_transaction_gas_limit(gas_limit) - .into_geth_builder() - .geth_call_traces(call_config, res.result.gas_used()); - Ok(frame.into()) - }) - .await?; - Ok(frame) - } - GethDebugBuiltInTracerType::PreStateTracer => { - let prestate_config = tracer_config - .into_pre_state_config() - .map_err(|_| EthApiError::InvalidTracerConfig)?; - let mut inspector = TracingInspector::new( - TracingInspectorConfig::from_geth_prestate_config(&prestate_config), - ); - - let frame = self - .eth_api() - .spawn_with_call_at(call, at, overrides, move |db, evm_env, tx_env| { - // wrapper is hack to get around 'higher-ranked lifetime error', - // see - let db = db.0; - - let gas_limit = tx_env.gas_limit(); - let res = this.eth_api().inspect( - &mut *db, - evm_env, - tx_env, - &mut inspector, - )?; - let frame = inspector - .with_transaction_gas_limit(gas_limit) - .into_geth_builder() - .geth_prestate_traces(&res, &prestate_config, db) - .map_err(Eth::Error::from_eth_err)?; - Ok(frame) - }) - .await?; - Ok(frame.into()) - } - GethDebugBuiltInTracerType::NoopTracer => Ok(NoopFrame::default().into()), - GethDebugBuiltInTracerType::MuxTracer => { - let mux_config = tracer_config - .into_mux_config() - .map_err(|_| EthApiError::InvalidTracerConfig)?; - - let mut inspector = MuxInspector::try_from_config(mux_config) - .map_err(Eth::Error::from_eth_err)?; - - let frame = self - .inner - .eth_api - .spawn_with_call_at(call, at, overrides, move |db, evm_env, tx_env| { - // wrapper is hack to get around 'higher-ranked lifetime error', see - // - let db = db.0; - - let tx_info = TransactionInfo { - block_number: Some(evm_env.block_env.number().saturating_to()), - base_fee: Some(evm_env.block_env.basefee()), - hash: None, - block_hash: None, - index: None, - }; - - let res = this.eth_api().inspect( - &mut *db, - evm_env, - tx_env, - &mut inspector, - )?; - let frame = inspector - .try_into_mux_frame(&res, db, tx_info) - .map_err(Eth::Error::from_eth_err)?; - Ok(frame.into()) - }) - .await?; - Ok(frame) - } - GethDebugBuiltInTracerType::FlatCallTracer => { - let flat_call_config = tracer_config - .into_flat_call_config() - .map_err(|_| EthApiError::InvalidTracerConfig)?; - - let mut inspector = TracingInspector::new( - TracingInspectorConfig::from_flat_call_config(&flat_call_config), - ); - - let frame: FlatCallFrame = self - .inner - .eth_api - .spawn_with_call_at(call, at, overrides, move |db, evm_env, tx_env| { - let gas_limit = tx_env.gas_limit(); - this.eth_api().inspect(db, evm_env, tx_env, &mut inspector)?; - let tx_info = TransactionInfo::default(); - let frame: FlatCallFrame = inspector - .with_transaction_gas_limit(gas_limit) - .into_parity_builder() - .into_localized_transaction_traces(tx_info); - Ok(frame) - }) - .await?; - - Ok(frame.into()) - } - _ => { - // Note: this match is non-exhaustive in case we need to add support for - // additional tracers - Err(EthApiError::Unsupported("unsupported tracer").into()) - } - }, - #[cfg(not(feature = "js-tracer"))] - GethDebugTracerType::JsTracer(_) => { - Err(EthApiError::Unsupported("JS Tracer is not enabled").into()) - } - #[cfg(feature = "js-tracer")] - GethDebugTracerType::JsTracer(code) => { - let config = tracer_config.into_json(); - - let (_, at) = self.eth_api().evm_env_at(at).await?; - - let res = self - .eth_api() - .spawn_with_call_at(call, at, overrides, move |db, evm_env, tx_env| { - // wrapper is hack to get around 'higher-ranked lifetime error', see - // - let db = db.0; - - let mut inspector = - revm_inspectors::tracing::js::JsInspector::new(code, config) - .map_err(Eth::Error::from_eth_err)?; - let res = this.eth_api().inspect( - &mut *db, - evm_env.clone(), - tx_env.clone(), - &mut inspector, - )?; - inspector - .json_result(res, &tx_env, &evm_env.block_env, db) - .map_err(Eth::Error::from_eth_err) - }) - .await?; - - Ok(GethTrace::JS(res)) - } - _ => { - // Note: this match is non-exhaustive in case we need to add support for - // additional tracers - Err(EthApiError::Unsupported("unsupported tracer").into()) - } - } + // Check if we need to replay transactions for a specific tx_index + if let Some(tx_idx) = tx_index { + return self + .debug_trace_call_at_tx_index(call, at, tx_idx as usize, tracing_options, overrides) + .await; } - // default structlog tracer - let inspector_config = TracingInspectorConfig::from_geth_config(&config); - - let mut inspector = TracingInspector::new(inspector_config); - - let (res, tx_gas_limit, inspector) = self - .eth_api() + let this = self.clone(); + self.eth_api() .spawn_with_call_at(call, at, overrides, move |db, evm_env, tx_env| { - let gas_limit = tx_env.gas_limit(); - let res = this.eth_api().inspect(db, evm_env, tx_env, &mut inspector)?; - Ok((res, gas_limit, inspector)) + let mut inspector = + DebugInspector::new(tracing_options).map_err(Eth::Error::from_eth_err)?; + let res = this.eth_api().inspect( + &mut *db, + evm_env.clone(), + tx_env.clone(), + &mut inspector, + )?; + let trace = inspector + .get_result(None, &tx_env, &evm_env.block_env, &res, db) + .map_err(Eth::Error::from_eth_err)?; + Ok(trace) }) - .await?; - let gas_used = res.result.gas_used(); - let return_value = res.result.into_output().unwrap_or_default(); - let frame = inspector - .with_transaction_gas_limit(tx_gas_limit) - .into_geth_builder() - .geth_traces(gas_used, return_value, config); + .await + } - Ok(frame.into()) + /// Helper method to execute `debug_trace_call` at a specific transaction index within a block. + /// This replays transactions up to the specified index, then executes the trace call in that + /// state. + async fn debug_trace_call_at_tx_index( + &self, + call: RpcTxReq, + block_id: BlockId, + tx_index: usize, + tracing_options: GethDebugTracingOptions, + overrides: EvmOverrides, + ) -> Result { + // Get the target block to check transaction count + let block = self + .eth_api() + .recovered_block(block_id) + .await? + .ok_or(EthApiError::HeaderNotFound(block_id))?; + + if tx_index >= block.transaction_count() { + // tx_index out of bounds + return Err(EthApiError::InvalidParams(format!( + "tx_index {} out of bounds for block with {} transactions", + tx_index, + block.transaction_count() + )) + .into()) + } + + let (evm_env, _) = self.eth_api().evm_env_at(block.hash().into()).await?; + + // execute after the parent block, replaying `tx_index` transactions + let state_at = block.parent_hash(); + + self.eth_api() + .spawn_with_state_at_block(state_at, move |eth_api, mut db| { + // 1. apply pre-execution changes + eth_api.apply_pre_execution_changes(&block, &mut db, &evm_env)?; + + // 2. replay the required number of transactions + for tx in block.transactions_recovered().take(tx_index) { + let tx_env = eth_api.evm_config().tx_env(tx); + let res = eth_api.transact(&mut db, evm_env.clone(), tx_env)?; + db.commit(res.state); + } + + // 3. now execute the trace call on this state + let (evm_env, tx_env) = + eth_api.prepare_call_env(evm_env, call, &mut db, overrides)?; + + let mut inspector = + DebugInspector::new(tracing_options).map_err(Eth::Error::from_eth_err)?; + let res = + eth_api.inspect(&mut db, evm_env.clone(), tx_env.clone(), &mut inspector)?; + let trace = inspector + .get_result(None, &tx_env, &evm_env.block_env, &res, &mut db) + .map_err(Eth::Error::from_eth_err)?; + + Ok(trace) + }) + .await } /// The `debug_traceCallMany` method lets you run an `eth_callMany` within the context of the @@ -527,14 +439,10 @@ where replay_block_txs = false; } - let this = self.clone(); - self.eth_api() - .spawn_with_state_at_block(at.into(), move |state| { + .spawn_with_state_at_block(at, move |eth_api, mut db| { // the outer vec for the bundles let mut all_bundles = Vec::with_capacity(bundles.len()); - let mut db = - State::builder().with_database(StateProviderDatabase::new(state)).build(); if replay_block_txs { // only need to replay the transactions in the block if not all transactions are @@ -543,20 +451,21 @@ where // Execute all transactions until index for tx in transactions { - let tx_env = this.eth_api().evm_config().tx_env(tx); - let res = this.eth_api().transact(&mut db, evm_env.clone(), tx_env)?; + let tx_env = eth_api.evm_config().tx_env(tx); + let res = eth_api.transact(&mut db, evm_env.clone(), tx_env)?; db.commit(res.state); } } // Trace all bundles let mut bundles = bundles.into_iter().peekable(); + let mut inspector = DebugInspector::new(tracing_options.clone()) + .map_err(Eth::Error::from_eth_err)?; while let Some(bundle) = bundles.next() { let mut results = Vec::with_capacity(bundle.transactions.len()); let Bundle { transactions, block_override } = bundle; let block_overrides = block_override.map(Box::new); - let mut inspector = None; let mut transactions = transactions.into_iter().peekable(); while let Some(tx) = transactions.next() { @@ -564,28 +473,24 @@ where let state_overrides = state_overrides.take(); let overrides = EvmOverrides::new(state_overrides, block_overrides.clone()); - let (evm_env, tx_env) = this.eth_api().prepare_call_env( - evm_env.clone(), - tx, - &mut db, - overrides, - )?; + let (evm_env, tx_env) = + eth_api.prepare_call_env(evm_env.clone(), tx, &mut db, overrides)?; - let (trace, state) = this.trace_transaction( - &tracing_options, - evm_env, - tx_env, + let res = eth_api.inspect( &mut db, - None, + evm_env.clone(), + tx_env.clone(), &mut inspector, )?; - - inspector = inspector.map(|insp| insp.fused()); + let trace = inspector + .get_result(None, &tx_env, &evm_env.block_env, &res, &mut db) + .map_err(Eth::Error::from_eth_err)?; // If there is more transactions, commit the database // If there is no transactions, but more bundles, commit to the database too if transactions.peek().is_some() || bundles.peek().is_some() { - db.commit(state); + inspector.fuse().map_err(Eth::Error::from_eth_err)?; + db.commit(res.state); } results.push(trace); } @@ -639,14 +544,12 @@ where &self, block: Arc>>, ) -> Result { - let this = self.clone(); let block_number = block.header().number(); let (mut exec_witness, lowest_block_number) = self .eth_api() - .spawn_with_state_at_block(block.parent_hash().into(), move |state_provider| { - let db = StateProviderDatabase::new(&state_provider); - let block_executor = this.eth_api().evm_config().executor(db); + .spawn_with_state_at_block(block.parent_hash(), move |eth_api, mut db| { + let block_executor = eth_api.evm_config().executor(&mut db); let mut witness_record = ExecutionWitnessRecord::default(); @@ -659,7 +562,9 @@ where let ExecutionWitnessRecord { hashed_state, codes, keys, lowest_block_number } = witness_record; - let state = state_provider + let state = db + .database + .0 .witness(Default::default(), hashed_state) .map_err(EthApiError::from)?; Ok(( @@ -710,191 +615,6 @@ where .map(|b| b.original_bytes())) } - /// Executes the configured transaction with the environment on the given database. - /// - /// It optionally takes fused inspector ([`TracingInspector::fused`]) to avoid re-creating the - /// inspector for each transaction. This is useful when tracing multiple transactions in a - /// block. This is only useful for block tracing which uses the same tracer for all transactions - /// in the block. - /// - /// Caution: If the inspector is provided then `opts.tracer_config` is ignored. - /// - /// Returns the trace frame and the state that got updated after executing the transaction. - /// - /// Note: this does not apply any state overrides if they're configured in the `opts`. - /// - /// Caution: this is blocking and should be performed on a blocking task. - fn trace_transaction( - &self, - opts: &GethDebugTracingOptions, - evm_env: EvmEnvFor, - tx_env: TxEnvFor, - db: &mut StateCacheDb<'_>, - transaction_context: Option, - fused_inspector: &mut Option, - ) -> Result<(GethTrace, EvmState), Eth::Error> { - let GethDebugTracingOptions { config, tracer, tracer_config, .. } = opts; - - let tx_info = TransactionInfo { - hash: transaction_context.as_ref().map(|c| c.tx_hash).unwrap_or_default(), - index: transaction_context - .as_ref() - .map(|c| c.tx_index.map(|i| i as u64)) - .unwrap_or_default(), - block_hash: transaction_context.as_ref().map(|c| c.block_hash).unwrap_or_default(), - block_number: Some(evm_env.block_env.number().saturating_to()), - base_fee: Some(evm_env.block_env.basefee()), - }; - - if let Some(tracer) = tracer { - #[allow(unreachable_patterns)] - return match tracer { - GethDebugTracerType::BuiltInTracer(tracer) => match tracer { - GethDebugBuiltInTracerType::FourByteTracer => { - let mut inspector = FourByteInspector::default(); - let res = self.eth_api().inspect(db, evm_env, tx_env, &mut inspector)?; - return Ok((FourByteFrame::from(&inspector).into(), res.state)) - } - GethDebugBuiltInTracerType::CallTracer => { - let call_config = tracer_config - .clone() - .into_call_config() - .map_err(|_| EthApiError::InvalidTracerConfig)?; - - let mut inspector = fused_inspector.get_or_insert_with(|| { - TracingInspector::new(TracingInspectorConfig::from_geth_call_config( - &call_config, - )) - }); - - let gas_limit = tx_env.gas_limit(); - let res = self.eth_api().inspect(db, evm_env, tx_env, &mut inspector)?; - - inspector.set_transaction_gas_limit(gas_limit); - - let frame = inspector - .geth_builder() - .geth_call_traces(call_config, res.result.gas_used()); - - return Ok((frame.into(), res.state)) - } - GethDebugBuiltInTracerType::PreStateTracer => { - let prestate_config = tracer_config - .clone() - .into_pre_state_config() - .map_err(|_| EthApiError::InvalidTracerConfig)?; - - let mut inspector = fused_inspector.get_or_insert_with(|| { - TracingInspector::new( - TracingInspectorConfig::from_geth_prestate_config(&prestate_config), - ) - }); - let gas_limit = tx_env.gas_limit(); - let res = - self.eth_api().inspect(&mut *db, evm_env, tx_env, &mut inspector)?; - - inspector.set_transaction_gas_limit(gas_limit); - let frame = inspector - .geth_builder() - .geth_prestate_traces(&res, &prestate_config, db) - .map_err(Eth::Error::from_eth_err)?; - - return Ok((frame.into(), res.state)) - } - GethDebugBuiltInTracerType::NoopTracer => { - Ok((NoopFrame::default().into(), Default::default())) - } - GethDebugBuiltInTracerType::MuxTracer => { - let mux_config = tracer_config - .clone() - .into_mux_config() - .map_err(|_| EthApiError::InvalidTracerConfig)?; - - let mut inspector = MuxInspector::try_from_config(mux_config) - .map_err(Eth::Error::from_eth_err)?; - - let res = - self.eth_api().inspect(&mut *db, evm_env, tx_env, &mut inspector)?; - let frame = inspector - .try_into_mux_frame(&res, db, tx_info) - .map_err(Eth::Error::from_eth_err)?; - return Ok((frame.into(), res.state)) - } - GethDebugBuiltInTracerType::FlatCallTracer => { - let flat_call_config = tracer_config - .clone() - .into_flat_call_config() - .map_err(|_| EthApiError::InvalidTracerConfig)?; - - let mut inspector = TracingInspector::new( - TracingInspectorConfig::from_flat_call_config(&flat_call_config), - ); - - let gas_limit = tx_env.gas_limit(); - let res = self.eth_api().inspect(db, evm_env, tx_env, &mut inspector)?; - let frame: FlatCallFrame = inspector - .with_transaction_gas_limit(gas_limit) - .into_parity_builder() - .into_localized_transaction_traces(tx_info); - - return Ok((frame.into(), res.state)); - } - _ => { - // Note: this match is non-exhaustive in case we need to add support for - // additional tracers - Err(EthApiError::Unsupported("unsupported tracer").into()) - } - }, - #[cfg(not(feature = "js-tracer"))] - GethDebugTracerType::JsTracer(_) => { - Err(EthApiError::Unsupported("JS Tracer is not enabled").into()) - } - #[cfg(feature = "js-tracer")] - GethDebugTracerType::JsTracer(code) => { - let config = tracer_config.clone().into_json(); - let mut inspector = - revm_inspectors::tracing::js::JsInspector::with_transaction_context( - code.clone(), - config, - transaction_context.unwrap_or_default(), - ) - .map_err(Eth::Error::from_eth_err)?; - let res = self.eth_api().inspect( - &mut *db, - evm_env.clone(), - tx_env.clone(), - &mut inspector, - )?; - - let state = res.state.clone(); - let result = inspector - .json_result(res, &tx_env, &evm_env.block_env, db) - .map_err(Eth::Error::from_eth_err)?; - Ok((GethTrace::JS(result), state)) - } - _ => { - // Note: this match is non-exhaustive in case we need to add support for - // additional tracers - Err(EthApiError::Unsupported("unsupported tracer").into()) - } - } - } - - // default structlog tracer - let mut inspector = fused_inspector.get_or_insert_with(|| { - let inspector_config = TracingInspectorConfig::from_geth_config(config); - TracingInspector::new(inspector_config) - }); - let gas_limit = tx_env.gas_limit(); - let res = self.eth_api().inspect(db, evm_env, tx_env, &mut inspector)?; - let gas_used = res.result.gas_used(); - let return_value = res.result.into_output().unwrap_or_default(); - inspector.set_transaction_gas_limit(gas_limit); - let frame = inspector.geth_builder().geth_traces(gas_used, return_value, *config); - - Ok((frame.into(), res.state)) - } - /// Returns the state root of the `HashedPostState` on top of the state for the given block with /// trie updates. async fn debug_state_root_with_updates( @@ -918,7 +638,7 @@ where #[async_trait] impl DebugApiServer> for DebugApi where - Eth: EthApiTypes + EthTransactions + TraceExt + 'static, + Eth: EthTransactions + TraceExt, { /// Handler for `debug_getRawHeader` async fn raw_header(&self, block_id: BlockId) -> RpcResult { @@ -968,7 +688,7 @@ where /// Handler for `debug_getRawTransactions` /// Returns the bytes of the transaction for the given hash. async fn raw_transactions(&self, block_id: BlockId) -> RpcResult> { - let block = self + let block: RecoveredBlock> = self .provider() .block_with_senders_by_id(block_id, TransactionVariant::NoHash) .to_rpc_result()? @@ -989,8 +709,36 @@ where } /// Handler for `debug_getBadBlocks` - async fn bad_blocks(&self) -> RpcResult> { - Ok(vec![]) + async fn bad_blocks(&self) -> RpcResult> { + let blocks = self.inner.bad_block_store.all(); + let mut bad_blocks = Vec::with_capacity(blocks.len()); + + #[derive(Serialize, Deserialize)] + struct BadBlockSerde { + block: T, + hash: B256, + rlp: Bytes, + } + + for block in blocks { + let rlp = alloy_rlp::encode(block.sealed_block()).into(); + let hash = block.hash(); + + let block = block + .clone_into_rpc_block( + BlockTransactionsKind::Full, + |tx, tx_info| self.eth_api().converter().fill(tx, tx_info), + |header, size| self.eth_api().converter().convert_header(header, size), + ) + .map_err(|err| Eth::Error::from(err).into())?; + + let bad_block = serde_json::to_value(BadBlockSerde { block, hash, rlp }) + .map_err(|err| EthApiError::other(internal_rpc_err(err.to_string())))?; + + bad_blocks.push(bad_block); + } + + Ok(bad_blocks) } /// Handler for `debug_traceChain` @@ -1143,8 +891,38 @@ where Ok(()) } - async fn debug_db_get(&self, _key: String) -> RpcResult<()> { - Ok(()) + /// `debug_db_get` - database key lookup + /// + /// Currently supported: + /// * Contract bytecode associated with a code hash. The key format is: `<0x63>` + /// * Prefix byte: 0x63 (required) + /// * Code hash: 32 bytes + /// Must be provided as either: + /// * Hex string: "0x63..." (66 hex characters after 0x) + /// * Raw byte string: raw byte string (33 bytes) + /// See Geth impl: + async fn debug_db_get(&self, key: String) -> RpcResult> { + let key_bytes = if key.starts_with("0x") { + decode(&key).map_err(|_| EthApiError::InvalidParams("Invalid hex key".to_string()))? + } else { + key.into_bytes() + }; + + if key_bytes.len() != 33 { + return Err(EthApiError::InvalidParams(format!( + "Key must be 33 bytes, got {}", + key_bytes.len() + )) + .into()); + } + if key_bytes[0] != 0x63 { + return Err(EthApiError::InvalidParams("Key prefix must be 0x63".to_string()).into()); + } + + let code_hash = B256::from_slice(&key_bytes[1..33]); + + // No block ID is provided, so it defaults to the latest block + self.debug_code_by_hash(code_hash, None).await.map_err(Into::into) } async fn debug_dump_block(&self, _number: BlockId) -> RpcResult<()> { @@ -1323,21 +1101,66 @@ where } } -impl std::fmt::Debug for DebugApi { +impl std::fmt::Debug for DebugApi { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("DebugApi").finish_non_exhaustive() } } -impl Clone for DebugApi { +impl Clone for DebugApi { fn clone(&self) -> Self { Self { inner: Arc::clone(&self.inner) } } } -struct DebugApiInner { +struct DebugApiInner { /// The implementation of `eth` API eth_api: Eth, // restrict the number of concurrent calls to blocking calls blocking_task_guard: BlockingTaskGuard, + /// Cache for bad blocks. + bad_block_store: BadBlockStore>, +} + +/// A bounded, deduplicating store of recently observed bad blocks. +#[derive(Clone, Debug)] +struct BadBlockStore { + inner: Arc>>>>, + limit: usize, +} + +impl BadBlockStore { + /// Creates a new store with the given capacity. + fn new(limit: usize) -> Self { + Self { inner: Arc::new(RwLock::new(VecDeque::with_capacity(limit))), limit } + } + + /// Inserts a recovered block, keeping only the most recent `limit` entries and deduplicating + /// by block hash. + fn insert(&self, block: RecoveredBlock) { + let hash = block.hash(); + let mut guard = self.inner.write(); + + // skip if we already recorded this bad block , and keep original ordering + if guard.iter().any(|b| b.hash() == hash) { + return; + } + guard.push_back(Arc::new(block)); + + while guard.len() > self.limit { + guard.pop_front(); + } + } + + /// Returns all cached bad blocks ordered from newest to oldest. + fn all(&self) -> Vec>> { + let guard = self.inner.read(); + guard.iter().rev().cloned().collect() + } +} + +impl Default for BadBlockStore { + fn default() -> Self { + Self::new(64) + } } diff --git a/crates/rpc/rpc/src/eth/builder.rs b/crates/rpc/rpc/src/eth/builder.rs index c34d268d64..9642ca97be 100644 --- a/crates/rpc/rpc/src/eth/builder.rs +++ b/crates/rpc/rpc/src/eth/builder.rs @@ -15,7 +15,8 @@ use reth_rpc_eth_types::{ FeeHistoryCacheConfig, ForwardConfig, GasCap, GasPriceOracle, GasPriceOracleConfig, }; use reth_rpc_server_types::constants::{ - DEFAULT_ETH_PROOF_WINDOW, DEFAULT_MAX_SIMULATE_BLOCKS, DEFAULT_PROOF_PERMITS, + DEFAULT_ETH_PROOF_WINDOW, DEFAULT_MAX_BLOCKING_IO_REQUEST, DEFAULT_MAX_SIMULATE_BLOCKS, + DEFAULT_PROOF_PERMITS, }; use reth_tasks::{pool::BlockingTaskPool, TaskSpawner, TokioTaskExecutor}; use std::{sync::Arc, time::Duration}; @@ -41,9 +42,11 @@ pub struct EthApiBuilder { task_spawner: Box, next_env: NextEnv, max_batch_size: usize, + max_blocking_io_requests: usize, pending_block_kind: PendingBlockKind, raw_tx_forwarder: ForwardConfig, send_raw_transaction_sync_timeout: Duration, + evm_memory_limit: u64, } impl @@ -91,9 +94,11 @@ impl EthApiBuilder { task_spawner, next_env, max_batch_size, + max_blocking_io_requests, pending_block_kind, raw_tx_forwarder, send_raw_transaction_sync_timeout, + evm_memory_limit, } = self; EthApiBuilder { components, @@ -111,9 +116,11 @@ impl EthApiBuilder { task_spawner, next_env, max_batch_size, + max_blocking_io_requests, pending_block_kind, raw_tx_forwarder, send_raw_transaction_sync_timeout, + evm_memory_limit, } } } @@ -142,9 +149,11 @@ where eth_state_cache_config: Default::default(), next_env: Default::default(), max_batch_size: 1, + max_blocking_io_requests: DEFAULT_MAX_BLOCKING_IO_REQUEST, pending_block_kind: PendingBlockKind::Full, raw_tx_forwarder: ForwardConfig::default(), send_raw_transaction_sync_timeout: Duration::from_secs(30), + evm_memory_limit: (1 << 32) - 1, } } } @@ -180,9 +189,11 @@ where gas_oracle_config, next_env, max_batch_size, + max_blocking_io_requests, pending_block_kind, raw_tx_forwarder, send_raw_transaction_sync_timeout, + evm_memory_limit, } = self; EthApiBuilder { components, @@ -200,9 +211,11 @@ where gas_oracle_config, next_env, max_batch_size, + max_blocking_io_requests, pending_block_kind, raw_tx_forwarder, send_raw_transaction_sync_timeout, + evm_memory_limit, } } @@ -227,9 +240,11 @@ where gas_oracle_config, next_env: _, max_batch_size, + max_blocking_io_requests, pending_block_kind, raw_tx_forwarder, send_raw_transaction_sync_timeout, + evm_memory_limit, } = self; EthApiBuilder { components, @@ -247,9 +262,11 @@ where gas_oracle_config, next_env, max_batch_size, + max_blocking_io_requests, pending_block_kind, raw_tx_forwarder, send_raw_transaction_sync_timeout, + evm_memory_limit, } } @@ -327,6 +344,12 @@ where self } + /// Sets the maximum number of concurrent blocking IO requests. + pub const fn max_blocking_io_requests(mut self, max_blocking_io_requests: usize) -> Self { + self.max_blocking_io_requests = max_blocking_io_requests; + self + } + /// Sets the pending block kind pub const fn pending_block_kind(mut self, pending_block_kind: PendingBlockKind) -> Self { self.pending_block_kind = pending_block_kind; @@ -474,9 +497,11 @@ where task_spawner, next_env, max_batch_size, + max_blocking_io_requests, pending_block_kind, raw_tx_forwarder, send_raw_transaction_sync_timeout, + evm_memory_limit, } = self; let provider = components.provider().clone(); @@ -514,9 +539,11 @@ where rpc_converter, next_env, max_batch_size, + max_blocking_io_requests, pending_block_kind, raw_tx_forwarder.forwarder_client(), send_raw_transaction_sync_timeout, + evm_memory_limit, ) } @@ -541,4 +568,10 @@ where self.send_raw_transaction_sync_timeout = timeout; self } + + /// Sets the maximum memory the EVM can allocate per RPC request. + pub const fn evm_memory_limit(mut self, memory_limit: u64) -> Self { + self.evm_memory_limit = memory_limit; + self + } } diff --git a/crates/rpc/rpc/src/eth/bundle.rs b/crates/rpc/rpc/src/eth/bundle.rs index d49b5486d3..1f7712dc24 100644 --- a/crates/rpc/rpc/src/eth/bundle.rs +++ b/crates/rpc/rpc/src/eth/bundle.rs @@ -8,7 +8,6 @@ use alloy_rpc_types_mev::{EthCallBundle, EthCallBundleResponse, EthCallBundleTra use jsonrpsee::core::RpcResult; use reth_chainspec::{ChainSpecProvider, EthChainSpec}; use reth_evm::{ConfigureEvm, Evm}; -use reth_revm::{database::StateProviderDatabase, State}; use reth_rpc_eth_api::{ helpers::{Call, EthTransactions, LoadPendingBlock}, EthCallBundleApiServer, FromEthApiError, FromEvmError, @@ -114,9 +113,7 @@ where .chain_spec() .blob_params_at_timestamp(evm_env.block_env.timestamp().saturating_to()) .unwrap_or_else(BlobParams::cancun); - if transactions.iter().filter_map(|tx| tx.blob_gas_used()).sum::() > - blob_params.max_blob_gas_per_block() - { + if blob_gas_used > blob_params.max_blob_gas_per_block() { return Err(EthApiError::InvalidParams( EthBundleError::Eip4844BlobGasExceeded(blob_params.max_blob_gas_per_block()) .to_string(), @@ -144,13 +141,10 @@ where // use the block number of the request evm_env.block_env.inner_mut().number = U256::from(block_number); - let eth_api = self.eth_api().clone(); - self.eth_api() - .spawn_with_state_at_block(at, move |state| { + .spawn_with_state_at_block(at, move |eth_api, db| { let coinbase = evm_env.block_env.beneficiary(); let basefee = evm_env.block_env.basefee(); - let db = State::builder().with_database(StateProviderDatabase::new(state)).build(); let initial_coinbase = db .basic_ref(coinbase) diff --git a/crates/rpc/rpc/src/eth/core.rs b/crates/rpc/rpc/src/eth/core.rs index d2e5cf124e..04216f49fd 100644 --- a/crates/rpc/rpc/src/eth/core.rs +++ b/crates/rpc/rpc/src/eth/core.rs @@ -33,7 +33,7 @@ use reth_transaction_pool::{ blobstore::BlobSidecarConverter, noop::NoopTransactionPool, AddedTransactionOutcome, BatchTxProcessor, BatchTxRequest, TransactionPool, }; -use tokio::sync::{broadcast, mpsc, Mutex}; +use tokio::sync::{broadcast, mpsc, Mutex, Semaphore}; const DEFAULT_BROADCAST_CAPACITY: usize = 2000; @@ -152,9 +152,11 @@ where proof_permits: usize, rpc_converter: Rpc, max_batch_size: usize, + max_blocking_io_requests: usize, pending_block_kind: PendingBlockKind, raw_tx_forwarder: ForwardConfig, send_raw_transaction_sync_timeout: Duration, + evm_memory_limit: u64, ) -> Self { let inner = EthApiInner::new( components, @@ -170,9 +172,11 @@ where rpc_converter, (), max_batch_size, + max_blocking_io_requests, pending_block_kind, raw_tx_forwarder.forwarder_client(), send_raw_transaction_sync_timeout, + evm_memory_limit, ); Self { inner: Arc::new(inner) } @@ -182,14 +186,14 @@ where impl EthApiTypes for EthApi where N: RpcNodeCore, - Rpc: RpcConvert, + Rpc: RpcConvert, { type Error = EthApiError; type NetworkTypes = Rpc::Network; type RpcConvert = Rpc; - fn tx_resp_builder(&self) -> &Self::RpcConvert { - &self.tx_resp_builder + fn converter(&self) -> &Self::RpcConvert { + &self.converter } } @@ -245,7 +249,7 @@ where impl SpawnBlocking for EthApi where N: RpcNodeCore, - Rpc: RpcConvert, + Rpc: RpcConvert, { #[inline] fn io_task_spawner(&self) -> impl TaskSpawner { @@ -261,6 +265,11 @@ where fn tracing_task_guard(&self) -> &BlockingTaskGuard { self.inner.blocking_task_guard() } + + #[inline] + fn blocking_io_task_guard(&self) -> &std::sync::Arc { + self.inner.blocking_io_request_semaphore() + } } /// Container type `EthApi` @@ -294,6 +303,9 @@ pub struct EthApiInner { /// Guard for getproof calls blocking_task_guard: BlockingTaskGuard, + /// Semaphore to limit concurrent blocking IO requests (`eth_call`, `eth_estimateGas`, etc.) + blocking_io_request_semaphore: Arc, + /// Transaction broadcast channel raw_tx_sender: broadcast::Sender, @@ -301,7 +313,7 @@ pub struct EthApiInner { raw_tx_forwarder: Option, /// Converter for RPC types. - tx_resp_builder: Rpc, + converter: Rpc, /// Builder for pending block environment. next_env_builder: Box>, @@ -318,6 +330,9 @@ pub struct EthApiInner { /// Blob sidecar converter blob_sidecar_converter: BlobSidecarConverter, + + /// Maximum memory the EVM can allocate per RPC request. + evm_memory_limit: u64, } impl EthApiInner @@ -338,12 +353,14 @@ where fee_history_cache: FeeHistoryCache>, task_spawner: Box, proof_permits: usize, - tx_resp_builder: Rpc, + converter: Rpc, next_env: impl PendingEnvBuilder, max_batch_size: usize, + max_blocking_io_requests: usize, pending_block_kind: PendingBlockKind, raw_tx_forwarder: Option, send_raw_transaction_sync_timeout: Duration, + evm_memory_limit: u64, ) -> Self { let signers = parking_lot::RwLock::new(Default::default()); // get the block number of the latest block @@ -378,14 +395,16 @@ where blocking_task_pool, fee_history_cache, blocking_task_guard: BlockingTaskGuard::new(proof_permits), + blocking_io_request_semaphore: Arc::new(Semaphore::new(max_blocking_io_requests)), raw_tx_sender, raw_tx_forwarder, - tx_resp_builder, + converter, next_env_builder: Box::new(next_env), tx_batch_sender, pending_block_kind, send_raw_transaction_sync_timeout, blob_sidecar_converter: BlobSidecarConverter::new(), + evm_memory_limit, } } } @@ -403,8 +422,8 @@ where /// Returns a handle to the transaction response builder. #[inline] - pub const fn tx_resp_builder(&self) -> &Rpc { - &self.tx_resp_builder + pub const fn converter(&self) -> &Rpc { + &self.converter } /// Returns a handle to data in memory. @@ -433,6 +452,8 @@ where } /// Returns a handle to the blocking thread pool. + /// + /// This is intended for tasks that are CPU bound. #[inline] pub const fn blocking_task_pool(&self) -> &BlockingTaskPool { &self.blocking_task_pool @@ -518,7 +539,7 @@ where /// Returns the transaction batch sender #[inline] - const fn tx_batch_sender( + pub const fn tx_batch_sender( &self, ) -> &mpsc::UnboundedSender::Transaction>> { &self.tx_batch_sender @@ -563,6 +584,18 @@ where pub const fn blob_sidecar_converter(&self) -> &BlobSidecarConverter { &self.blob_sidecar_converter } + + /// Returns the EVM memory limit. + #[inline] + pub const fn evm_memory_limit(&self) -> u64 { + self.evm_memory_limit + } + + /// Returns a reference to the blocking IO request semaphore. + #[inline] + pub const fn blocking_io_request_semaphore(&self) -> &Arc { + &self.blocking_io_request_semaphore + } } #[cfg(test)] diff --git a/crates/rpc/rpc/src/eth/filter.rs b/crates/rpc/rpc/src/eth/filter.rs index 01b6a94158..735f789f20 100644 --- a/crates/rpc/rpc/src/eth/filter.rs +++ b/crates/rpc/rpc/src/eth/filter.rs @@ -1,6 +1,7 @@ //! `eth_` `Filter` RPC handler implementation use alloy_consensus::BlockHeader; +use alloy_eips::BlockNumberOrTag; use alloy_primitives::{Sealable, TxHash}; use alloy_rpc_types_eth::{ BlockNumHash, Filter, FilterBlockOption, FilterChanges, FilterId, Log, @@ -17,6 +18,7 @@ use jsonrpsee::{core::RpcResult, server::IdProvider}; use reth_errors::ProviderError; use reth_primitives_traits::{NodePrimitives, SealedHeader}; use reth_rpc_eth_api::{ + helpers::{EthBlocks, LoadReceipt}, EngineEthFilter, EthApiTypes, EthFilterApiServer, FullEthApiTypes, QueryLimits, RpcConvert, RpcNodeCoreExt, RpcTransaction, }; @@ -48,7 +50,11 @@ use tracing::{debug, error, trace}; impl EngineEthFilter for EthFilter where - Eth: FullEthApiTypes + RpcNodeCoreExt + 'static, + Eth: FullEthApiTypes + + RpcNodeCoreExt + + LoadReceipt + + EthBlocks + + 'static, { /// Returns logs matching given filter object, no query limits fn logs( @@ -193,7 +199,11 @@ where impl EthFilter where - Eth: FullEthApiTypes + RpcNodeCoreExt + 'static, + Eth: FullEthApiTypes + + RpcNodeCoreExt + + LoadReceipt + + EthBlocks + + 'static, { /// Access the underlying provider. fn provider(&self) -> &Eth::Provider { @@ -257,7 +267,7 @@ where .map(|num| self.provider().convert_block_number(num)) .transpose()? .flatten(); - logs_utils::get_filter_block_range(from, to, start_block, info) + logs_utils::get_filter_block_range(from, to, start_block, info)? } FilterBlockOption::AtBlockHash(_) => { // blockHash is equivalent to fromBlock = toBlock = the block number with @@ -288,11 +298,12 @@ where /// Handler for `eth_getFilterLogs` pub async fn filter_logs(&self, id: FilterId) -> Result, EthFilterError> { let filter = { - let filters = self.inner.active_filters.inner.lock().await; - if let FilterKind::Log(ref filter) = - filters.get(&id).ok_or_else(|| EthFilterError::FilterNotFound(id.clone()))?.kind - { - *filter.clone() + let mut filters = self.inner.active_filters.inner.lock().await; + let filter = + filters.get_mut(&id).ok_or_else(|| EthFilterError::FilterNotFound(id.clone()))?; + if let FilterKind::Log(ref inner_filter) = filter.kind { + filter.last_poll_timestamp = Instant::now(); + *inner_filter.clone() } else { // Not a log filter return Err(EthFilterError::FilterNotFound(id)) @@ -315,7 +326,7 @@ where #[async_trait] impl EthFilterApiServer> for EthFilter where - Eth: FullEthApiTypes + RpcNodeCoreExt + 'static, + Eth: FullEthApiTypes + RpcNodeCoreExt + LoadReceipt + EthBlocks + 'static, { /// Handler for `eth_newFilter` async fn new_filter(&self, filter: Filter) -> RpcResult { @@ -348,7 +359,7 @@ where let stream = self.pool().new_pending_pool_transactions_listener(); let full_txs_receiver = FullTransactionsReceiver::new( stream, - dyn_clone::clone(self.inner.eth_api.tx_resp_builder()), + dyn_clone::clone(self.inner.eth_api.converter()), ); FilterKind::PendingTransaction(PendingTransactionKind::FullTransaction(Arc::new( full_txs_receiver, @@ -356,8 +367,6 @@ where } }; - //let filter = FilterKind::PendingTransaction(transaction_kind); - // Install the filter and propagate any errors self.inner.install_filter(transaction_kind).await } @@ -434,6 +443,8 @@ impl EthFilterInner where Eth: RpcNodeCoreExt + EthApiTypes + + LoadReceipt + + EthBlocks + 'static, { /// Access the underlying provider. @@ -454,23 +465,24 @@ where ) -> Result, EthFilterError> { match filter.block_option { FilterBlockOption::AtBlockHash(block_hash) => { - // for all matching logs in the block - // get the block header with the hash - let header = self - .provider() - .header_by_hash_or_number(block_hash.into())? - .ok_or_else(|| ProviderError::HeaderNotFound(block_hash.into()))?; + // First try to get cached block and receipts, as it's likely they're already cached + let Some((receipts, maybe_block)) = + self.eth_cache().get_receipts_and_maybe_block(block_hash).await? + else { + return Err(ProviderError::HeaderNotFound(block_hash.into()).into()) + }; + + // Get header - from cached block if available, otherwise from provider + let header = if let Some(block) = &maybe_block { + block.header().clone() + } else { + self.provider() + .header_by_hash_or_number(block_hash.into())? + .ok_or_else(|| ProviderError::HeaderNotFound(block_hash.into()))? + }; let block_num_hash = BlockNumHash::new(header.number(), block_hash); - // we also need to ensure that the receipts are available and return an error if - // not, in case the block hash been reorged - let (receipts, maybe_block) = self - .eth_cache() - .get_receipts_and_maybe_block(block_num_hash.hash) - .await? - .ok_or(EthApiError::HeaderNotFound(block_hash.into()))?; - let mut all_logs = Vec::new(); append_matching_block_logs( &mut all_logs, @@ -487,10 +499,43 @@ where Ok(all_logs) } FilterBlockOption::Range { from_block, to_block } => { - // compute the range - let info = self.provider().chain_info()?; + // Handle special case where from block is pending + if from_block.is_some_and(|b| b.is_pending()) { + let to_block = to_block.unwrap_or(BlockNumberOrTag::Pending); + if !(to_block.is_pending() || to_block.is_number()) { + // always empty range + return Ok(Vec::new()); + } + // Try to get pending block and receipts + if let Ok(Some(pending_block)) = self.eth_api.local_pending_block().await { + if let BlockNumberOrTag::Number(to_block) = to_block && + to_block < pending_block.block.number() + { + // this block range is empty based on the user input + return Ok(Vec::new()); + } - // we start at the most recent block if unset in filter + let info = self.provider().chain_info()?; + if pending_block.block.number() > info.best_number { + // only consider the pending block if it is ahead of the chain + let mut all_logs = Vec::new(); + let timestamp = pending_block.block.timestamp(); + let block_num_hash = pending_block.block.num_hash(); + append_matching_block_logs( + &mut all_logs, + ProviderOrBlock::::Block(pending_block.block), + &filter, + block_num_hash, + &pending_block.receipts, + false, // removed = false for pending blocks + timestamp, + )?; + return Ok(all_logs); + } + } + } + + let info = self.provider().chain_info()?; let start_block = info.best_number; let from = from_block .map(|num| self.provider().convert_block_number(num)) @@ -501,6 +546,13 @@ where .transpose()? .flatten(); + // Return error if toBlock exceeds current head + if let Some(t) = to && + t > info.best_number + { + return Err(EthFilterError::BlockRangeExceedsHead); + } + if let Some(f) = from && f > info.best_number { @@ -509,7 +561,7 @@ where } let (from_block_number, to_block_number) = - logs_utils::get_filter_block_range(from, to, start_block, info); + logs_utils::get_filter_block_range(from, to, start_block, info)?; self.get_logs_in_block_range(filter, from_block_number, to_block_number, limits) .await @@ -735,7 +787,7 @@ impl PendingTransactionsReceiver { #[derive(Debug, Clone)] struct FullTransactionsReceiver { txs_stream: Arc>>, - tx_resp_builder: TxCompat, + converter: TxCompat, } impl FullTransactionsReceiver @@ -744,8 +796,8 @@ where TxCompat: RpcConvert>, { /// Creates a new `FullTransactionsReceiver` encapsulating the provided transaction stream. - fn new(stream: NewSubpoolTransactionStream, tx_resp_builder: TxCompat) -> Self { - Self { txs_stream: Arc::new(Mutex::new(stream)), tx_resp_builder } + fn new(stream: NewSubpoolTransactionStream, converter: TxCompat) -> Self { + Self { txs_stream: Arc::new(Mutex::new(stream)), converter } } /// Returns all new pending transactions received since the last poll. @@ -754,7 +806,7 @@ where let mut prepared_stream = self.txs_stream.lock().await; while let Ok(tx) = prepared_stream.try_recv() { - match self.tx_resp_builder.fill_pending(tx.transaction.to_consensus()) { + match self.converter.fill_pending(tx.transaction.to_consensus()) { Ok(tx) => pending_txs.push(tx), Err(err) => { error!(target: "rpc", @@ -849,6 +901,9 @@ pub enum EthFilterError { /// Invalid block range. #[error("invalid block range params")] InvalidBlockRangeParams, + /// Block range extends beyond current head. + #[error("block range extends beyond current head block")] + BlockRangeExceedsHead, /// Query scope is too broad. #[error("query exceeds max block range {0}")] QueryExceedsMaxBlocks(u64), @@ -883,7 +938,8 @@ impl From for jsonrpsee::types::error::ErrorObject<'static> { EthFilterError::EthAPIError(err) => err.into(), err @ (EthFilterError::InvalidBlockRangeParams | EthFilterError::QueryExceedsMaxBlocks(_) | - EthFilterError::QueryExceedsMaxResults { .. }) => { + EthFilterError::QueryExceedsMaxResults { .. } | + EthFilterError::BlockRangeExceedsHead) => { rpc_error_with_code(jsonrpsee::types::error::INVALID_PARAMS_CODE, err.to_string()) } } @@ -896,6 +952,15 @@ impl From for EthFilterError { } } +impl From for EthFilterError { + fn from(err: logs_utils::FilterBlockRangeError) -> Self { + match err { + logs_utils::FilterBlockRangeError::InvalidBlockRange => Self::InvalidBlockRangeParams, + logs_utils::FilterBlockRangeError::BlockRangeExceedsHead => Self::BlockRangeExceedsHead, + } + } +} + /// Helper type for the common pattern of returning receipts, block and the original header that is /// a match for the filter. struct ReceiptBlockResult

@@ -912,7 +977,11 @@ where /// Represents different modes for processing block ranges when filtering logs enum RangeMode< - Eth: RpcNodeCoreExt + EthApiTypes + 'static, + Eth: RpcNodeCoreExt + + EthApiTypes + + LoadReceipt + + EthBlocks + + 'static, > { /// Use cache-based processing for recent blocks Cached(CachedMode), @@ -921,7 +990,11 @@ enum RangeMode< } impl< - Eth: RpcNodeCoreExt + EthApiTypes + 'static, + Eth: RpcNodeCoreExt + + EthApiTypes + + LoadReceipt + + EthBlocks + + 'static, > RangeMode { /// Creates a new `RangeMode`. @@ -993,14 +1066,22 @@ impl< /// Mode for processing blocks using cache optimization for recent blocks struct CachedMode< - Eth: RpcNodeCoreExt + EthApiTypes + 'static, + Eth: RpcNodeCoreExt + + EthApiTypes + + LoadReceipt + + EthBlocks + + 'static, > { filter_inner: Arc>, headers_iter: std::vec::IntoIter::Header>>, } impl< - Eth: RpcNodeCoreExt + EthApiTypes + 'static, + Eth: RpcNodeCoreExt + + EthApiTypes + + LoadReceipt + + EthBlocks + + 'static, > CachedMode { async fn next(&mut self) -> Result>, EthFilterError> { @@ -1027,7 +1108,11 @@ type ReceiptFetchFuture

= /// Mode for processing blocks using range queries for older blocks struct RangeBlockMode< - Eth: RpcNodeCoreExt + EthApiTypes + 'static, + Eth: RpcNodeCoreExt + + EthApiTypes + + LoadReceipt + + EthBlocks + + 'static, > { filter_inner: Arc>, iter: Peekable::Header>>>, @@ -1038,7 +1123,11 @@ struct RangeBlockMode< } impl< - Eth: RpcNodeCoreExt + EthApiTypes + 'static, + Eth: RpcNodeCoreExt + + EthApiTypes + + LoadReceipt + + EthBlocks + + 'static, > RangeBlockMode { async fn next(&mut self) -> Result>, EthFilterError> { @@ -1070,7 +1159,7 @@ impl< let expected_next = last_header.number() + 1; if peeked.number() != expected_next { - debug!( + trace!( target: "rpc::eth::filter", last_block = last_header.number(), next_block = peeked.number(), diff --git a/crates/rpc/rpc/src/eth/helpers/call.rs b/crates/rpc/rpc/src/eth/helpers/call.rs index abe06cb55e..ad9f020bd0 100644 --- a/crates/rpc/rpc/src/eth/helpers/call.rs +++ b/crates/rpc/rpc/src/eth/helpers/call.rs @@ -31,6 +31,11 @@ where fn max_simulate_blocks(&self) -> u64 { self.inner.max_simulate_blocks() } + + #[inline] + fn evm_memory_limit(&self) -> u64 { + self.inner.evm_memory_limit() + } } impl EstimateCall for EthApi diff --git a/crates/rpc/rpc/src/eth/helpers/pending_block.rs b/crates/rpc/rpc/src/eth/helpers/pending_block.rs index 0c08c12e0e..0e0ae51669 100644 --- a/crates/rpc/rpc/src/eth/helpers/pending_block.rs +++ b/crates/rpc/rpc/src/eth/helpers/pending_block.rs @@ -12,7 +12,7 @@ impl LoadPendingBlock for EthApi where N: RpcNodeCore, EthApiError: FromEvmError, - Rpc: RpcConvert, + Rpc: RpcConvert, { #[inline] fn pending_block(&self) -> &tokio::sync::Mutex>> { diff --git a/crates/rpc/rpc/src/eth/helpers/spec.rs b/crates/rpc/rpc/src/eth/helpers/spec.rs index fdae08f8f1..e1a2343caf 100644 --- a/crates/rpc/rpc/src/eth/helpers/spec.rs +++ b/crates/rpc/rpc/src/eth/helpers/spec.rs @@ -1,13 +1,14 @@ use alloy_primitives::U256; use reth_rpc_convert::RpcConvert; use reth_rpc_eth_api::{helpers::EthApiSpec, RpcNodeCore}; +use reth_rpc_eth_types::EthApiError; use crate::EthApi; impl EthApiSpec for EthApi where N: RpcNodeCore, - Rpc: RpcConvert, + Rpc: RpcConvert, { fn starting_block(&self) -> U256 { self.inner.starting_block() diff --git a/crates/rpc/rpc/src/eth/helpers/state.rs b/crates/rpc/rpc/src/eth/helpers/state.rs index 3d9cc76309..157ab7b311 100644 --- a/crates/rpc/rpc/src/eth/helpers/state.rs +++ b/crates/rpc/rpc/src/eth/helpers/state.rs @@ -6,11 +6,12 @@ use reth_rpc_eth_api::{ helpers::{EthState, LoadPendingBlock, LoadState}, RpcNodeCore, }; +use reth_rpc_eth_types::EthApiError; impl EthState for EthApi where N: RpcNodeCore, - Rpc: RpcConvert, + Rpc: RpcConvert, Self: LoadPendingBlock, { fn max_proof_window(&self) -> u64 { diff --git a/crates/rpc/rpc/src/eth/helpers/sync_listener.rs b/crates/rpc/rpc/src/eth/helpers/sync_listener.rs index e444f76d3a..d9069f4596 100644 --- a/crates/rpc/rpc/src/eth/helpers/sync_listener.rs +++ b/crates/rpc/rpc/src/eth/helpers/sync_listener.rs @@ -26,10 +26,10 @@ impl SyncListener { } } -impl Future for SyncListener +impl Future for SyncListener where N: NetworkInfo, - St: Stream + Unpin, + St: Stream + Unpin, { type Output = (); diff --git a/crates/rpc/rpc/src/eth/helpers/trace.rs b/crates/rpc/rpc/src/eth/helpers/trace.rs index 3e00f2df0c..55b1604eea 100644 --- a/crates/rpc/rpc/src/eth/helpers/trace.rs +++ b/crates/rpc/rpc/src/eth/helpers/trace.rs @@ -10,6 +10,6 @@ impl Trace for EthApi where N: RpcNodeCore, EthApiError: FromEvmError, - Rpc: RpcConvert, + Rpc: RpcConvert, { } diff --git a/crates/rpc/rpc/src/eth/helpers/transaction.rs b/crates/rpc/rpc/src/eth/helpers/transaction.rs index 7889dd1f54..8f2c5bf93e 100644 --- a/crates/rpc/rpc/src/eth/helpers/transaction.rs +++ b/crates/rpc/rpc/src/eth/helpers/transaction.rs @@ -5,19 +5,19 @@ use std::time::Duration; use crate::EthApi; use alloy_consensus::BlobTransactionValidationError; use alloy_eips::{eip7594::BlobTransactionSidecarVariant, BlockId, Typed2718}; -use alloy_primitives::{hex, Bytes, B256}; +use alloy_primitives::{hex, B256}; use reth_chainspec::{ChainSpecProvider, EthereumHardforks}; -use reth_primitives_traits::AlloyBlockHeader; +use reth_primitives_traits::{AlloyBlockHeader, Recovered, WithEncoded}; use reth_rpc_convert::RpcConvert; use reth_rpc_eth_api::{ helpers::{spec::SignersForRpc, EthTransactions, LoadTransaction}, FromEvmError, RpcNodeCore, }; -use reth_rpc_eth_types::{error::RpcPoolError, utils::recover_raw_transaction, EthApiError}; +use reth_rpc_eth_types::{error::RpcPoolError, EthApiError}; use reth_storage_api::BlockReaderIdExt; use reth_transaction_pool::{ error::Eip4844PoolTransactionError, AddedTransactionOutcome, EthBlobTransactionSidecar, - EthPoolTransaction, PoolTransaction, TransactionPool, + EthPoolTransaction, PoolPooledTx, PoolTransaction, TransactionPool, }; impl EthTransactions for EthApi @@ -36,12 +36,11 @@ where self.inner.send_raw_transaction_sync_timeout() } - /// Decodes and recovers the transaction and submits it to the pool. - /// - /// Returns the hash of the transaction. - async fn send_raw_transaction(&self, tx: Bytes) -> Result { - let recovered = recover_raw_transaction(&tx)?; - + async fn send_transaction( + &self, + tx: WithEncoded>>, + ) -> Result { + let (tx, recovered) = tx.split(); let mut pool_transaction = ::Transaction::from_pooled(recovered); @@ -147,6 +146,7 @@ mod tests { }; use reth_rpc_eth_api::node::RpcNodeCoreAdapter; use reth_transaction_pool::test_utils::{testing_pool, TestPool}; + use revm_primitives::Bytes; use std::collections::HashMap; fn mock_eth_api( diff --git a/crates/rpc/rpc/src/eth/pubsub.rs b/crates/rpc/rpc/src/eth/pubsub.rs index 985cdf3129..56a0c7a697 100644 --- a/crates/rpc/rpc/src/eth/pubsub.rs +++ b/crates/rpc/rpc/src/eth/pubsub.rs @@ -2,10 +2,10 @@ use std::sync::Arc; -use alloy_primitives::{TxHash, U256}; +use alloy_primitives::TxHash; use alloy_rpc_types_eth::{ pubsub::{Params, PubSubSyncStatus, SubscriptionKind, SyncStatusMetadata}, - Filter, Header, Log, + Filter, Log, }; use futures::StreamExt; use jsonrpsee::{ @@ -13,7 +13,7 @@ use jsonrpsee::{ }; use reth_chain_state::CanonStateSubscriptions; use reth_network_api::NetworkInfo; -use reth_primitives_traits::NodePrimitives; +use reth_rpc_convert::RpcHeader; use reth_rpc_eth_api::{ pubsub::EthPubSubApiServer, EthApiTypes, RpcConvert, RpcNodeCore, RpcTransaction, }; @@ -21,7 +21,7 @@ use reth_rpc_eth_types::logs_utils; use reth_rpc_server_types::result::{internal_rpc_err, invalid_params_rpc_err}; use reth_storage_api::BlockNumReader; use reth_tasks::{TaskSpawner, TokioTaskExecutor}; -use reth_transaction_pool::{NewTransactionEvent, PoolConsensusTx, TransactionPool}; +use reth_transaction_pool::{NewTransactionEvent, TransactionPool}; use serde::Serialize; use tokio_stream::{ wrappers::{BroadcastStream, ReceiverStream}, @@ -55,17 +55,9 @@ impl EthPubSub { } } -impl EthPubSub +impl EthPubSub where - Eth: RpcNodeCore< - Provider: BlockNumReader + CanonStateSubscriptions, - Pool: TransactionPool, - Network: NetworkInfo, - > + EthApiTypes< - RpcConvert: RpcConvert< - Primitives: NodePrimitives>, - >, - >, + Eth: RpcNodeCore + EthApiTypes>, { /// Returns the current sync status for the `syncing` subscription pub fn sync_status(&self, is_syncing: bool) -> PubSubSyncStatus { @@ -85,7 +77,7 @@ where } /// Returns a stream that yields all new RPC blocks. - pub fn new_headers_stream(&self) -> impl Stream> { + pub fn new_headers_stream(&self) -> impl Stream> { self.inner.new_headers_stream() } @@ -126,7 +118,7 @@ where let tx_value = match self .inner .eth_api - .tx_resp_builder() + .converter() .fill_pending(tx.transaction.to_consensus()) { Ok(tx) => Some(tx), @@ -211,15 +203,7 @@ where #[async_trait::async_trait] impl EthPubSubApiServer> for EthPubSub where - Eth: RpcNodeCore< - Provider: BlockNumReader + CanonStateSubscriptions, - Pool: TransactionPool, - Network: NetworkInfo, - > + EthApiTypes< - RpcConvert: RpcConvert< - Primitives: NodePrimitives>, - >, - > + 'static, + Eth: RpcNodeCore + EthApiTypes>, { /// Handler for `eth_subscribe` async fn subscribe( @@ -351,22 +335,26 @@ where } } -impl EthPubSubInner +impl EthPubSubInner where - Eth: RpcNodeCore>, + Eth: EthApiTypes> + RpcNodeCore, { /// Returns a stream that yields all new RPC blocks. - fn new_headers_stream(&self) -> impl Stream> { + fn new_headers_stream(&self) -> impl Stream> { + let converter = self.eth_api.converter(); self.eth_api.provider().canonical_state_stream().flat_map(|new_chain| { let headers = new_chain .committed() .blocks_iter() - .map(|block| { - Header::from_consensus( - block.clone_sealed_header().into(), - None, - Some(U256::from(block.rlp_length())), - ) + .filter_map(|block| { + match converter.convert_header(block.clone_sealed_header(), block.rlp_length()) + { + Ok(header) => Some(header), + Err(err) => { + error!(target = "rpc", %err, "Failed to convert header"); + None + } + } }) .collect::>(); futures::stream::iter(headers) diff --git a/crates/rpc/rpc/src/eth/sim_bundle.rs b/crates/rpc/rpc/src/eth/sim_bundle.rs index fa3fd46e45..9701b70851 100644 --- a/crates/rpc/rpc/src/eth/sim_bundle.rs +++ b/crates/rpc/rpc/src/eth/sim_bundle.rs @@ -12,7 +12,6 @@ use alloy_rpc_types_mev::{ use jsonrpsee::core::RpcResult; use reth_evm::{ConfigureEvm, Evm}; use reth_primitives_traits::Recovered; -use reth_revm::{database::StateProviderDatabase, State}; use reth_rpc_api::MevSimApiServer; use reth_rpc_eth_api::{ helpers::{block::LoadBlock, Call, EthTransactions}, @@ -241,13 +240,11 @@ where let sim_response = self .inner .eth_api - .spawn_with_state_at_block(current_block_id, move |state| { + .spawn_with_state_at_block(current_block_id, move |_, mut db| { // Setup environment let current_block_number = current_block.number(); let coinbase = evm_env.block_env.beneficiary(); let basefee = evm_env.block_env.basefee(); - let mut db = - State::builder().with_database(StateProviderDatabase::new(state)).build(); // apply overrides apply_block_overrides(block_overrides, &mut db, evm_env.block_env.inner_mut()); @@ -316,11 +313,11 @@ where // logs. We should collect bundle logs when we are processing the bundle items. if logs { let tx_logs = result - .logs() - .iter() - .map(|log| { + .into_logs() + .into_iter() + .map(|inner| { let full_log = alloy_rpc_types_eth::Log { - inner: log.clone(), + inner, block_hash: None, block_number: None, block_timestamp: None, @@ -343,6 +340,8 @@ where } // After processing all transactions, process refunds + // Store the original refundable value to calculate all payouts correctly + let original_refundable_value = refundable_value; for item in &flattened_bundle { if let Some(refund_percent) = item.refund_percent { // Get refund configurations @@ -358,9 +357,11 @@ where // Add gas used for payout transactions total_gas_used += SBUNDLE_PAYOUT_MAX_COST * refund_configs.len() as u64; - // Calculate allocated refundable value (payout value) - let payout_value = - refundable_value * U256::from(refund_percent) / U256::from(100); + // Calculate allocated refundable value (payout value) based on ORIGINAL + // refundable value This ensures all refund_percent + // values are calculated from the same base + let payout_value = original_refundable_value * U256::from(refund_percent) / + U256::from(100); if payout_tx_fee > payout_value { return Err(EthApiError::InvalidParams( diff --git a/crates/rpc/rpc/src/lib.rs b/crates/rpc/rpc/src/lib.rs index b5a20c19cf..816b39f485 100644 --- a/crates/rpc/rpc/src/lib.rs +++ b/crates/rpc/rpc/src/lib.rs @@ -42,6 +42,7 @@ mod net; mod otterscan; mod reth; mod rpc; +mod testing; mod trace; mod txpool; mod validation; @@ -58,6 +59,7 @@ pub use otterscan::OtterscanApi; pub use reth::RethApi; pub use reth_rpc_convert::RpcTypes; pub use rpc::RPCApi; +pub use testing::TestingApi; pub use trace::TraceApi; pub use txpool::TxPoolApi; pub use validation::{ValidationApi, ValidationApiConfig}; diff --git a/crates/rpc/rpc/src/testing.rs b/crates/rpc/rpc/src/testing.rs new file mode 100644 index 0000000000..833f0749e2 --- /dev/null +++ b/crates/rpc/rpc/src/testing.rs @@ -0,0 +1,127 @@ +//! Implementation of the `testing` namespace. +//! +//! This exposes `testing_buildBlockV1`, intended for non-production/debug use. + +use alloy_consensus::{Header, Transaction}; +use alloy_evm::Evm; +use alloy_primitives::U256; +use alloy_rpc_types_engine::ExecutionPayloadEnvelopeV5; +use async_trait::async_trait; +use jsonrpsee::core::RpcResult; +use reth_errors::RethError; +use reth_ethereum_engine_primitives::EthBuiltPayload; +use reth_ethereum_primitives::EthPrimitives; +use reth_evm::{execute::BlockBuilder, ConfigureEvm, NextBlockEnvAttributes}; +use reth_primitives_traits::{AlloyBlockHeader as BlockTrait, Recovered, TxTy}; +use reth_revm::{database::StateProviderDatabase, db::State}; +use reth_rpc_api::{TestingApiServer, TestingBuildBlockRequestV1}; +use reth_rpc_eth_api::{helpers::Call, FromEthApiError}; +use reth_rpc_eth_types::{utils::recover_raw_transaction, EthApiError}; +use reth_storage_api::{BlockReader, HeaderProvider}; +use revm::context::Block; +use std::sync::Arc; + +/// Testing API handler. +#[derive(Debug, Clone)] +pub struct TestingApi { + eth_api: Eth, + evm_config: Evm, +} + +impl TestingApi { + /// Create a new testing API handler. + pub const fn new(eth_api: Eth, evm_config: Evm) -> Self { + Self { eth_api, evm_config } + } +} + +impl TestingApi +where + Eth: Call>, + Evm: ConfigureEvm + + 'static, +{ + async fn build_block_v1( + &self, + request: TestingBuildBlockRequestV1, + ) -> Result { + let evm_config = self.evm_config.clone(); + self.eth_api + .spawn_with_state_at_block(request.parent_block_hash, move |eth_api, state| { + let state = state.database.0; + let mut db = State::builder() + .with_bundle_update() + .with_database(StateProviderDatabase::new(&state)) + .build(); + let parent = eth_api + .provider() + .sealed_header_by_hash(request.parent_block_hash)? + .ok_or_else(|| { + EthApiError::HeaderNotFound(request.parent_block_hash.into()) + })?; + + let env_attrs = NextBlockEnvAttributes { + timestamp: request.payload_attributes.timestamp, + suggested_fee_recipient: request.payload_attributes.suggested_fee_recipient, + prev_randao: request.payload_attributes.prev_randao, + gas_limit: parent.gas_limit(), + parent_beacon_block_root: request.payload_attributes.parent_beacon_block_root, + withdrawals: request.payload_attributes.withdrawals.map(Into::into), + extra_data: request.extra_data.unwrap_or_default(), + }; + + let mut builder = evm_config + .builder_for_next_block(&mut db, &parent, env_attrs) + .map_err(RethError::other) + .map_err(Eth::Error::from_eth_err)?; + builder.apply_pre_execution_changes().map_err(Eth::Error::from_eth_err)?; + + let mut total_fees = U256::ZERO; + let base_fee = builder.evm_mut().block().basefee(); + + for tx in request.transactions { + let tx: Recovered> = recover_raw_transaction(&tx)?; + let tip = tx.effective_tip_per_gas(base_fee).unwrap_or_default(); + let gas_used = + builder.execute_transaction(tx).map_err(Eth::Error::from_eth_err)?; + + total_fees += U256::from(tip) * U256::from(gas_used); + } + let outcome = builder.finish(&state).map_err(Eth::Error::from_eth_err)?; + + let requests = outcome + .block + .requests_hash() + .is_some() + .then_some(outcome.execution_result.requests); + + EthBuiltPayload::new( + alloy_rpc_types_engine::PayloadId::default(), + Arc::new(outcome.block.into_sealed_block()), + total_fees, + requests, + ) + .try_into_v5() + .map_err(RethError::other) + .map_err(Eth::Error::from_eth_err) + }) + .await + } +} + +#[async_trait] +impl TestingApiServer for TestingApi +where + Eth: Call>, + Evm: ConfigureEvm + + 'static, +{ + /// Handles `testing_buildBlockV1` by gating concurrency via a semaphore and offloading heavy + /// work to the blocking pool to avoid stalling the async runtime. + async fn build_block_v1( + &self, + request: TestingBuildBlockRequestV1, + ) -> RpcResult { + self.build_block_v1(request).await.map_err(Into::into) + } +} diff --git a/crates/rpc/rpc/src/trace.rs b/crates/rpc/rpc/src/trace.rs index 6e4205eead..0ead02a537 100644 --- a/crates/rpc/rpc/src/trace.rs +++ b/crates/rpc/rpc/src/trace.rs @@ -20,7 +20,6 @@ use jsonrpsee::core::RpcResult; use reth_chainspec::{ChainSpecProvider, EthChainSpec, EthereumHardfork, MAINNET, SEPOLIA}; use reth_evm::ConfigureEvm; use reth_primitives_traits::{BlockBody, BlockHeader}; -use reth_revm::{database::StateProviderDatabase, State}; use reth_rpc_api::TraceApiServer; use reth_rpc_convert::RpcTxReq; use reth_rpc_eth_api::{ @@ -102,10 +101,6 @@ where let this = self.clone(); self.eth_api() .spawn_with_call_at(trace_request.call, at, overrides, move |db, evm_env, tx_env| { - // wrapper is hack to get around 'higher-ranked lifetime error', see - // - let db = db.0; - let res = this.eth_api().inspect(&mut *db, evm_env, tx_env, &mut inspector)?; let trace_res = inspector .into_parity_builder() @@ -153,18 +148,14 @@ where let at = block_id.unwrap_or(BlockId::pending()); let (evm_env, at) = self.eth_api().evm_env_at(at).await?; - let this = self.clone(); // execute all transactions on top of each other and record the traces self.eth_api() - .spawn_with_state_at_block(at, move |state| { + .spawn_with_state_at_block(at, move |eth_api, mut db| { let mut results = Vec::with_capacity(calls.len()); - let mut db = - State::builder().with_database(StateProviderDatabase::new(state)).build(); - let mut calls = calls.into_iter().peekable(); while let Some((call, trace_types)) = calls.next() { - let (evm_env, tx_env) = this.eth_api().prepare_call_env( + let (evm_env, tx_env) = eth_api.prepare_call_env( evm_env.clone(), call, &mut db, @@ -172,7 +163,7 @@ where )?; let config = TracingInspectorConfig::from_parity_config(&trace_types); let mut inspector = TracingInspector::new(config); - let res = this.eth_api().inspect(&mut db, evm_env, tx_env, &mut inspector)?; + let res = eth_api.inspect(&mut db, evm_env, tx_env, &mut inspector)?; let trace_res = inspector .into_parity_builder() @@ -184,8 +175,6 @@ where // need to apply the state changes of this call before executing the // next call if calls.peek().is_some() { - // need to apply the state changes of this call before executing - // the next call db.commit(res.state) } } @@ -363,7 +352,7 @@ where ) -> Result, Eth::Error> { // We'll reuse the matcher across multiple blocks that are traced in parallel let matcher = Arc::new(filter.matcher()); - let TraceFilter { from_block, to_block, after, count, .. } = filter; + let TraceFilter { from_block, to_block, mut after, count, .. } = filter; let start = from_block.unwrap_or(0); let latest_block = self.provider().best_block_number().map_err(Eth::Error::from_eth_err)?; @@ -389,81 +378,98 @@ where .into()) } - // fetch all blocks in that range - let blocks = self - .provider() - .recovered_block_range(start..=end) - .map_err(Eth::Error::from_eth_err)? - .into_iter() - .map(Arc::new) - .collect::>(); + let mut all_traces = Vec::new(); + let mut block_traces = Vec::with_capacity(self.inner.eth_config.max_tracing_requests); + for chunk_start in (start..=end).step_by(self.inner.eth_config.max_tracing_requests) { + let chunk_end = + std::cmp::min(chunk_start + self.inner.eth_config.max_tracing_requests as u64, end); - // trace all blocks - let mut block_traces = Vec::with_capacity(blocks.len()); - for block in &blocks { - let matcher = matcher.clone(); - let traces = self.eth_api().trace_block_until( - block.hash().into(), - Some(block.clone()), - None, - TracingInspectorConfig::default_parity(), - move |tx_info, mut ctx| { - let mut traces = ctx - .take_inspector() - .into_parity_builder() - .into_localized_transaction_traces(tx_info); - traces.retain(|trace| matcher.matches(&trace.trace)); - Ok(Some(traces)) - }, - ); - block_traces.push(traces); - } + // fetch all blocks in that chunk + let blocks = self + .eth_api() + .spawn_blocking_io(move |this| { + Ok(this + .provider() + .recovered_block_range(chunk_start..=chunk_end) + .map_err(Eth::Error::from_eth_err)? + .into_iter() + .map(Arc::new) + .collect::>()) + }) + .await?; - let block_traces = futures::future::try_join_all(block_traces).await?; - let mut all_traces = block_traces - .into_iter() - .flatten() - .flat_map(|traces| traces.into_iter().flatten().flat_map(|traces| traces.into_iter())) - .collect::>(); - - // add reward traces for all blocks - for block in &blocks { - if let Some(base_block_reward) = self.calculate_base_block_reward(block.header())? { - all_traces.extend( - self.extract_reward_traces( - block.header(), - block.body().ommers(), - base_block_reward, - ) - .into_iter() - .filter(|trace| matcher.matches(&trace.trace)), + // trace all blocks + for block in &blocks { + let matcher = matcher.clone(); + let traces = self.eth_api().trace_block_until( + block.hash().into(), + Some(block.clone()), + None, + TracingInspectorConfig::default_parity(), + move |tx_info, mut ctx| { + let mut traces = ctx + .take_inspector() + .into_parity_builder() + .into_localized_transaction_traces(tx_info); + traces.retain(|trace| matcher.matches(&trace.trace)); + Ok(Some(traces)) + }, ); - } else { - // no block reward, means we're past the Paris hardfork and don't expect any rewards - // because the blocks in ascending order - break + block_traces.push(traces); } + + #[allow(clippy::iter_with_drain)] + let block_traces = futures::future::try_join_all(block_traces.drain(..)).await?; + all_traces.extend(block_traces.into_iter().flatten().flat_map(|traces| { + traces.into_iter().flatten().flat_map(|traces| traces.into_iter()) + })); + + // add reward traces for all blocks + for block in &blocks { + if let Some(base_block_reward) = self.calculate_base_block_reward(block.header())? { + all_traces.extend( + self.extract_reward_traces( + block.header(), + block.body().ommers(), + base_block_reward, + ) + .into_iter() + .filter(|trace| matcher.matches(&trace.trace)), + ); + } else { + // no block reward, means we're past the Paris hardfork and don't expect any + // rewards because the blocks in ascending order + break + } + } + + // Skips the first `after` number of matching traces. + if let Some(cutoff) = after.map(|a| a as usize) && + cutoff < all_traces.len() + { + all_traces.drain(..cutoff); + // we removed the first `after` traces + after = None; + } + + // Return at most `count` of traces + if let Some(count) = count { + let count = count as usize; + if count < all_traces.len() { + all_traces.truncate(count); + return Ok(all_traces) + } + }; } - // Skips the first `after` number of matching traces. - // If `after` is greater than or equal to the number of matched traces, it returns an empty - // array. - if let Some(after) = after.map(|a| a as usize) { - if after < all_traces.len() { - all_traces.drain(..after); - } else { - return Ok(vec![]) - } + // If `after` is greater than or equal to the number of matched traces, it returns an + // empty array. + if let Some(cutoff) = after.map(|a| a as usize) && + cutoff >= all_traces.len() + { + return Ok(vec![]) } - // Return at most `count` of traces - if let Some(count) = count { - let count = count as usize; - if count < all_traces.len() { - all_traces.truncate(count); - } - }; - Ok(all_traces) } @@ -692,6 +698,7 @@ where /// # Limitations /// This currently requires block filter fields, since reth does not have address indices yet. async fn trace_filter(&self, filter: TraceFilter) -> RpcResult> { + let _permit = self.inner.blocking_task_guard.clone().acquire_many_owned(2).await; Ok(Self::trace_filter(self, filter).await.map_err(Into::into)?) } diff --git a/crates/rpc/rpc/src/txpool.rs b/crates/rpc/rpc/src/txpool.rs index 5c7bcd45a8..51355dc1c1 100644 --- a/crates/rpc/rpc/src/txpool.rs +++ b/crates/rpc/rpc/src/txpool.rs @@ -24,13 +24,13 @@ use tracing::trace; pub struct TxPoolApi { /// An interface to interact with the pool pool: Pool, - tx_resp_builder: Eth, + converter: Eth, } impl TxPoolApi { /// Creates a new instance of `TxpoolApi`. - pub const fn new(pool: Pool, tx_resp_builder: Eth) -> Self { - Self { pool, tx_resp_builder } + pub const fn new(pool: Pool, converter: Eth) -> Self { + Self { pool, converter } } } @@ -65,10 +65,10 @@ where let mut content = TxpoolContent::default(); for pending in pending { - insert::<_, Eth>(&pending.transaction, &mut content.pending, &self.tx_resp_builder)?; + insert::<_, Eth>(&pending.transaction, &mut content.pending, &self.converter)?; } for queued in queued { - insert::<_, Eth>(&queued.transaction, &mut content.queued, &self.tx_resp_builder)?; + insert::<_, Eth>(&queued.transaction, &mut content.queued, &self.converter)?; } Ok(content) diff --git a/crates/stages/api/Cargo.toml b/crates/stages/api/Cargo.toml index c8eb81289d..6cdf45f790 100644 --- a/crates/stages/api/Cargo.toml +++ b/crates/stages/api/Cargo.toml @@ -43,6 +43,9 @@ auto_impl.workspace = true [dev-dependencies] assert_matches.workspace = true +reth-chainspec.workspace = true +reth-db = { workspace = true, features = ["test-utils"] } +reth-db-api.workspace = true reth-provider = { workspace = true, features = ["test-utils"] } tokio = { workspace = true, features = ["sync", "rt-multi-thread"] } tokio-stream.workspace = true @@ -50,9 +53,12 @@ reth-testing-utils.workspace = true [features] test-utils = [ + "reth-chainspec/test-utils", "reth-consensus/test-utils", + "reth-db-api/test-utils", + "reth-db/test-utils", "reth-network-p2p/test-utils", + "reth-primitives-traits/test-utils", "reth-provider/test-utils", "reth-stages-types/test-utils", - "reth-primitives-traits/test-utils", ] diff --git a/crates/stages/api/src/metrics/listener.rs b/crates/stages/api/src/metrics/listener.rs index 8c0707d1be..2ae367eb36 100644 --- a/crates/stages/api/src/metrics/listener.rs +++ b/crates/stages/api/src/metrics/listener.rs @@ -52,17 +52,7 @@ impl MetricsListener { trace!(target: "sync::metrics", ?event, "Metric event received"); match event { MetricEvent::SyncHeight { height } => { - for stage_id in StageId::ALL { - self.handle_event(MetricEvent::StageCheckpoint { - stage_id, - checkpoint: StageCheckpoint { - block_number: height, - stage_checkpoint: None, - }, - max_block_number: Some(height), - elapsed: Duration::default(), - }); - } + self.update_all_stages_height(height); } MetricEvent::StageCheckpoint { stage_id, checkpoint, max_block_number, elapsed } => { let stage_metrics = self.sync_metrics.get_stage_metrics(stage_id); @@ -83,6 +73,17 @@ impl MetricsListener { } } } + + /// Updates all stage checkpoints to the given height efficiently. + fn update_all_stages_height(&mut self, height: BlockNumber) { + for stage_id in StageId::ALL { + let stage_metrics = self.sync_metrics.get_stage_metrics(stage_id); + let height_f64 = height as f64; + stage_metrics.checkpoint.set(height_f64); + stage_metrics.entities_processed.set(height_f64); + stage_metrics.entities_total.set(height_f64); + } + } } impl Future for MetricsListener { diff --git a/crates/stages/api/src/pipeline/builder.rs b/crates/stages/api/src/pipeline/builder.rs index 56b895cac7..818b037da7 100644 --- a/crates/stages/api/src/pipeline/builder.rs +++ b/crates/stages/api/src/pipeline/builder.rs @@ -11,7 +11,7 @@ pub struct PipelineBuilder { stages: Vec>, /// The maximum block number to sync to. max_block: Option, - /// A receiver for the current chain tip to sync to. + /// A Sender for the current chain tip to sync to. tip_tx: Option>, metrics_tx: Option, fail_on_unwind: bool, @@ -35,11 +35,9 @@ impl PipelineBuilder { /// [`builder`][StageSet::builder] on the set which will convert it to a /// [`StageSetBuilder`][crate::StageSetBuilder]. pub fn add_stages>(mut self, set: Set) -> Self { - let states = set.builder().build(); - self.stages.reserve_exact(states.len()); - for stage in states { - self.stages.push(stage); - } + let stages = set.builder().build(); + self.stages.reserve(stages.len()); + self.stages.extend(stages); self } diff --git a/crates/stages/api/src/pipeline/mod.rs b/crates/stages/api/src/pipeline/mod.rs index e8542c36da..9b13badc76 100644 --- a/crates/stages/api/src/pipeline/mod.rs +++ b/crates/stages/api/src/pipeline/mod.rs @@ -315,7 +315,8 @@ impl Pipeline { // attempt to proceed with a finalized block which has been unwinded let _locked_sf_producer = self.static_file_producer.lock(); - let mut provider_rw = self.provider_factory.database_provider_rw()?; + let mut provider_rw = + self.provider_factory.database_provider_rw()?.disable_long_read_transaction_safety(); for stage in unwind_pipeline { let stage_id = stage.id(); diff --git a/crates/stages/api/src/stage.rs b/crates/stages/api/src/stage.rs index 9fc3038c69..b3095da718 100644 --- a/crates/stages/api/src/stage.rs +++ b/crates/stages/api/src/stage.rs @@ -1,12 +1,13 @@ use crate::{error::StageError, StageCheckpoint, StageId}; use alloy_primitives::{BlockNumber, TxNumber}; -use reth_provider::{BlockReader, ProviderError}; +use reth_provider::{BlockReader, ProviderError, StaticFileProviderFactory, StaticFileSegment}; use std::{ cmp::{max, min}, future::{poll_fn, Future}, ops::{Range, RangeInclusive}, task::{Context, Poll}, }; +use tracing::instrument; /// Stage execution input, see [`Stage::execute`]. #[derive(Debug, Default, PartialEq, Eq, Clone, Copy)] @@ -17,6 +18,26 @@ pub struct ExecInput { pub checkpoint: Option, } +/// Return type for [`ExecInput::next_block_range_with_threshold`]. +#[derive(Debug, PartialEq, Eq, Clone)] +pub struct BlockRangeOutput { + /// The block range to execute. + pub block_range: RangeInclusive, + /// Whether this is the final range to execute. + pub is_final_range: bool, +} + +/// Return type for [`ExecInput::next_block_range_with_transaction_threshold`]. +#[derive(Debug, PartialEq, Eq, Clone)] +pub struct TransactionRangeOutput { + /// The transaction range to execute. + pub tx_range: Range, + /// The block range to execute. + pub block_range: RangeInclusive, + /// Whether this is the final range to execute. + pub is_final_range: bool, +} + impl ExecInput { /// Return the checkpoint of the stage or default. pub fn checkpoint(&self) -> StageCheckpoint { @@ -42,8 +63,7 @@ impl ExecInput { /// Return next block range that needs to be executed. pub fn next_block_range(&self) -> RangeInclusive { - let (range, _) = self.next_block_range_with_threshold(u64::MAX); - range + self.next_block_range_with_threshold(u64::MAX).block_range } /// Return true if this is the first block range to execute. @@ -52,11 +72,7 @@ impl ExecInput { } /// Return the next block range to execute. - /// Return pair of the block range and if this is final block range. - pub fn next_block_range_with_threshold( - &self, - threshold: u64, - ) -> (RangeInclusive, bool) { + pub fn next_block_range_with_threshold(&self, threshold: u64) -> BlockRangeOutput { let current_block = self.checkpoint(); let start = current_block.block_number + 1; let target = self.target(); @@ -64,23 +80,46 @@ impl ExecInput { let end = min(target, current_block.block_number.saturating_add(threshold)); let is_final_range = end == target; - (start..=end, is_final_range) + BlockRangeOutput { block_range: start..=end, is_final_range } } /// Return the next block range determined the number of transactions within it. /// This function walks the block indices until either the end of the range is reached or /// the number of transactions exceeds the threshold. + /// + /// Returns [`None`] if no transactions are found for the current execution input. + #[instrument(level = "debug", target = "sync::stages", skip(provider), ret)] pub fn next_block_range_with_transaction_threshold( &self, provider: &Provider, tx_threshold: u64, - ) -> Result<(Range, RangeInclusive, bool), StageError> + ) -> Result, StageError> where - Provider: BlockReader, + Provider: StaticFileProviderFactory + BlockReader, { - let start_block = self.next_block(); + // Get lowest available block number for transactions + let Some(lowest_transactions_block) = + provider.static_file_provider().get_lowest_range_start(StaticFileSegment::Transactions) + else { + return Ok(None) + }; + + // We can only process transactions that have associated static files, so we cap the start + // block by lowest available block number. + // + // Certain transactions may not have associated static files when user deletes them + // manually. In that case, we can't process them, and need to adjust the start block + // accordingly. + let start_block = self.next_block().max(lowest_transactions_block); let target_block = self.target(); + // If the start block is greater than the target, then there's no transactions to process + // and we return early. It's possible to trigger this scenario when running `reth + // stage run` manually for a range of transactions that doesn't exist. + if start_block > target_block { + return Ok(None) + } + let start_block_body = provider .block_body_indices(start_block)? .ok_or(ProviderError::BlockBodyIndicesNotFound(start_block))?; @@ -95,7 +134,7 @@ impl ExecInput { if all_tx_cnt == 0 { // if there is no more transaction return back. - return Ok((first_tx_num..first_tx_num, start_block..=target_block, true)) + return Ok(None) } // get block of this tx @@ -105,7 +144,7 @@ impl ExecInput { // get tx block number. next_tx_num in this case will be less than all_tx_cnt. // So we are sure that transaction must exist. let end_block_number = provider - .transaction_block(first_tx_num + tx_threshold)? + .block_by_transaction_id(first_tx_num + tx_threshold)? .expect("block of tx must exist"); // we want to get range of all transactions of this block, so we are fetching block // body. @@ -116,7 +155,11 @@ impl ExecInput { }; let tx_range = first_tx_num..next_tx_num; - Ok((tx_range, start_block..=end_block, is_final_range)) + Ok(Some(TransactionRangeOutput { + tx_range, + block_range: start_block..=end_block, + is_final_range, + })) } } @@ -195,7 +238,7 @@ pub struct UnwindOutput { /// /// Stages receive [`DBProvider`](reth_provider::DBProvider). #[auto_impl::auto_impl(Box)] -pub trait Stage: Send + Sync { +pub trait Stage: Send { /// Get the ID of the stage. /// /// Stage IDs must be unique. @@ -277,3 +320,167 @@ pub trait StageExt: Stage { } impl + ?Sized> StageExt for S {} + +#[cfg(test)] +mod tests { + use reth_chainspec::MAINNET; + use reth_db::test_utils::{ + create_test_rocksdb_dir, create_test_rw_db, create_test_static_files_dir, + }; + use reth_db_api::{models::StoredBlockBodyIndices, tables, transaction::DbTxMut}; + use reth_provider::{ + providers::RocksDBProvider, test_utils::MockNodeTypesWithDB, ProviderFactory, + StaticFileProviderBuilder, StaticFileProviderFactory, StaticFileSegment, + }; + use reth_stages_types::StageCheckpoint; + use reth_testing_utils::generators::{self, random_signed_tx}; + + use crate::ExecInput; + + #[test] + fn test_exec_input_next_block_range_with_transaction_threshold() { + let mut rng = generators::rng(); + let provider_factory = ProviderFactory::::new( + create_test_rw_db(), + MAINNET.clone(), + StaticFileProviderBuilder::read_write(create_test_static_files_dir().0.keep()) + .unwrap() + .with_blocks_per_file(1) + .build() + .unwrap(), + RocksDBProvider::builder(create_test_rocksdb_dir().0.keep()).build().unwrap(), + ) + .unwrap(); + + // Without checkpoint, without transactions in static files + { + let exec_input = ExecInput { target: Some(100), checkpoint: None }; + + let range_output = exec_input + .next_block_range_with_transaction_threshold(&provider_factory, 10) + .unwrap(); + assert!(range_output.is_none()); + } + + // With checkpoint at block 10, without transactions in static files + { + let exec_input = + ExecInput { target: Some(1), checkpoint: Some(StageCheckpoint::new(10)) }; + + let range_output = exec_input + .next_block_range_with_transaction_threshold(&provider_factory, 10) + .unwrap(); + assert!(range_output.is_none()); + } + + // Without checkpoint, with transactions in static files starting from block 1 + { + let exec_input = ExecInput { target: Some(1), checkpoint: None }; + + let mut provider_rw = provider_factory.provider_rw().unwrap(); + provider_rw + .tx_mut() + .put::( + 1, + StoredBlockBodyIndices { first_tx_num: 0, tx_count: 2 }, + ) + .unwrap(); + let mut writer = + provider_rw.get_static_file_writer(0, StaticFileSegment::Transactions).unwrap(); + writer.increment_block(0).unwrap(); + writer.increment_block(1).unwrap(); + writer.append_transaction(0, &random_signed_tx(&mut rng)).unwrap(); + writer.append_transaction(1, &random_signed_tx(&mut rng)).unwrap(); + drop(writer); + provider_rw.commit().unwrap(); + + let range_output = exec_input + .next_block_range_with_transaction_threshold(&provider_factory, 10) + .unwrap() + .unwrap(); + assert_eq!(range_output.tx_range, 0..2); + assert_eq!(range_output.block_range, 1..=1); + assert!(range_output.is_final_range); + } + + // With checkpoint at block 1, with transactions in static files starting from block 1 + { + let exec_input = + ExecInput { target: Some(2), checkpoint: Some(StageCheckpoint::new(1)) }; + + let mut provider_rw = provider_factory.provider_rw().unwrap(); + provider_rw + .tx_mut() + .put::( + 2, + StoredBlockBodyIndices { first_tx_num: 2, tx_count: 1 }, + ) + .unwrap(); + let mut writer = + provider_rw.get_static_file_writer(1, StaticFileSegment::Transactions).unwrap(); + writer.increment_block(2).unwrap(); + writer.append_transaction(2, &random_signed_tx(&mut rng)).unwrap(); + drop(writer); + provider_rw.commit().unwrap(); + + let range_output = exec_input + .next_block_range_with_transaction_threshold(&provider_factory, 10) + .unwrap() + .unwrap(); + assert_eq!(range_output.tx_range, 2..3); + assert_eq!(range_output.block_range, 2..=2); + assert!(range_output.is_final_range); + } + + // Without checkpoint, with transactions in static files starting from block 2 + { + let exec_input = ExecInput { target: Some(2), checkpoint: None }; + + provider_factory + .static_file_provider() + .delete_jar(StaticFileSegment::Transactions, 0) + .unwrap(); + provider_factory + .static_file_provider() + .delete_jar(StaticFileSegment::Transactions, 1) + .unwrap(); + + let range_output = exec_input + .next_block_range_with_transaction_threshold(&provider_factory, 10) + .unwrap() + .unwrap(); + assert_eq!(range_output.tx_range, 2..3); + assert_eq!(range_output.block_range, 2..=2); + assert!(range_output.is_final_range); + } + + // Without checkpoint, with transactions in static files starting from block 2 + { + let exec_input = + ExecInput { target: Some(3), checkpoint: Some(StageCheckpoint::new(2)) }; + + let mut provider_rw = provider_factory.provider_rw().unwrap(); + provider_rw + .tx_mut() + .put::( + 3, + StoredBlockBodyIndices { first_tx_num: 3, tx_count: 1 }, + ) + .unwrap(); + let mut writer = + provider_rw.get_static_file_writer(1, StaticFileSegment::Transactions).unwrap(); + writer.increment_block(3).unwrap(); + writer.append_transaction(3, &random_signed_tx(&mut rng)).unwrap(); + drop(writer); + provider_rw.commit().unwrap(); + + let range_output = exec_input + .next_block_range_with_transaction_threshold(&provider_factory, 10) + .unwrap() + .unwrap(); + assert_eq!(range_output.tx_range, 3..4); + assert_eq!(range_output.block_range, 3..=3); + assert!(range_output.is_final_range); + } + } +} diff --git a/crates/stages/stages/Cargo.toml b/crates/stages/stages/Cargo.toml index 32114c58e1..462a6d74c7 100644 --- a/crates/stages/stages/Cargo.toml +++ b/crates/stages/stages/Cargo.toml @@ -75,6 +75,7 @@ reth-network-p2p = { workspace = true, features = ["test-utils"] } reth-downloaders.workspace = true reth-static-file.workspace = true reth-stages-api = { workspace = true, features = ["test-utils"] } +reth-storage-api.workspace = true reth-testing-utils.workspace = true reth-trie = { workspace = true, features = ["test-utils"] } reth-provider = { workspace = true, features = ["test-utils"] } @@ -116,6 +117,7 @@ test-utils = [ "reth-ethereum-primitives?/test-utils", "reth-evm-ethereum/test-utils", ] +rocksdb = ["reth-provider/rocksdb"] [[bench]] name = "criterion" diff --git a/crates/stages/stages/src/sets.rs b/crates/stages/stages/src/sets.rs index 015be50733..48a2a99580 100644 --- a/crates/stages/stages/src/sets.rs +++ b/crates/stages/stages/src/sets.rs @@ -270,8 +270,14 @@ where Stage, { fn builder(self) -> StageSetBuilder { - StageSetBuilder::default() - .add_stage(EraStage::new(self.era_import_source, self.stages_config.etl.clone())) + let mut builder = StageSetBuilder::default(); + + if self.era_import_source.is_some() { + builder = builder + .add_stage(EraStage::new(self.era_import_source, self.stages_config.etl.clone())); + } + + builder .add_stage(HeaderStage::new( self.provider, self.header_downloader, diff --git a/crates/stages/stages/src/stages/bodies.rs b/crates/stages/stages/src/stages/bodies.rs index 7b6090ca86..0d6a4b4fd7 100644 --- a/crates/stages/stages/src/stages/bodies.rs +++ b/crates/stages/stages/src/stages/bodies.rs @@ -202,10 +202,7 @@ where // Write bodies to database. provider.append_block_bodies( - buffer - .into_iter() - .map(|response| (response.block_number(), response.into_body())) - .collect(), + buffer.iter().map(|response| (response.block_number(), response.body())).collect(), )?; // The stage is "done" if: @@ -772,8 +769,9 @@ mod tests { *range.start()..*range.end() + 1, |cursor, number| cursor.get_two::>(number.into()), )? { - let (header, hash) = header?; - self.headers.push_back(SealedHeader::new(header, hash)); + if let Some((header, hash)) = header? { + self.headers.push_back(SealedHeader::new(header, hash)); + } } Ok(()) diff --git a/crates/stages/stages/src/stages/era.rs b/crates/stages/stages/src/stages/era.rs index 10598f9011..6e81054ed6 100644 --- a/crates/stages/stages/src/stages/era.rs +++ b/crates/stages/stages/src/stages/era.rs @@ -4,7 +4,7 @@ use futures_util::{Stream, StreamExt}; use reqwest::{Client, Url}; use reth_config::config::EtlConfig; use reth_db_api::{table::Value, transaction::DbTxMut}; -use reth_era::{era1_file::Era1Reader, era_file_ops::StreamReader}; +use reth_era::{common::file_ops::StreamReader, era1::file::Era1Reader}; use reth_era_downloader::{read_dir, EraClient, EraMeta, EraStream, EraStreamConfig}; use reth_era_utils as era; use reth_etl::Collector; @@ -195,7 +195,7 @@ where } era::save_stage_checkpoints( - &provider, + provider, input.checkpoint().block_number, height, height, @@ -204,13 +204,24 @@ where height } else { - // It's possible for a pipeline sync to be executed with a None target, e.g. after a - // stage was manually dropped, and `reth node` is then called without a `--debug.tip`. + // No era files to process. Return the highest block we're aware of to avoid + // limiting subsequent stages with an outdated checkpoint. // - // In this case we don't want to simply default to zero, as that would overwrite the - // previously stored checkpoint block number. Instead we default to that previous - // checkpoint. - input.target.unwrap_or_else(|| input.checkpoint().block_number) + // This can happen when: + // 1. Era import is complete (all pre-merge blocks imported) + // 2. No era import source was configured + // + // We return max(checkpoint, highest_header, target) to ensure we don't return + // a stale checkpoint that could limit subsequent stages like Headers. + let highest_header = provider + .static_file_provider() + .get_highest_static_file_block(StaticFileSegment::Headers) + .unwrap_or_default(); + + let checkpoint = input.checkpoint().block_number; + let from_target = input.target.unwrap_or(checkpoint); + + checkpoint.max(highest_header).max(from_target) }; Ok(ExecOutput { checkpoint: StageCheckpoint::new(height), done: height >= input.target() }) diff --git a/crates/stages/stages/src/stages/execution.rs b/crates/stages/stages/src/stages/execution.rs index adfc87c5cc..b4931faa9e 100644 --- a/crates/stages/stages/src/stages/execution.rs +++ b/crates/stages/stages/src/stages/execution.rs @@ -11,9 +11,9 @@ use reth_exex::{ExExManagerHandle, ExExNotification, ExExNotificationSource}; use reth_primitives_traits::{format_gas_throughput, BlockBody, NodePrimitives}; use reth_provider::{ providers::{StaticFileProvider, StaticFileWriter}, - BlockHashReader, BlockReader, DBProvider, ExecutionOutcome, HeaderProvider, + BlockHashReader, BlockReader, DBProvider, EitherWriter, ExecutionOutcome, HeaderProvider, LatestStateProviderRef, OriginalValuesKnown, ProviderError, StateWriter, - StaticFileProviderFactory, StatsReader, TransactionVariant, + StaticFileProviderFactory, StatsReader, StorageSettingsCache, TransactionVariant, }; use reth_revm::database::StateProviderDatabase; use reth_stages_api::{ @@ -23,7 +23,7 @@ use reth_stages_api::{ }; use reth_static_file_types::StaticFileSegment; use std::{ - cmp::Ordering, + cmp::{max, Ordering}, ops::RangeInclusive, sync::Arc, task::{ready, Context, Poll}, @@ -185,11 +185,15 @@ where unwind_to: Option, ) -> Result<(), StageError> where - Provider: StaticFileProviderFactory + DBProvider + BlockReader + HeaderProvider, + Provider: StaticFileProviderFactory + + DBProvider + + BlockReader + + HeaderProvider + + StorageSettingsCache, { - // If there's any receipts pruning configured, receipts are written directly to database and - // inconsistencies are expected. - if provider.prune_modes_ref().has_receipts_pruning() { + // On old nodes, if there's any receipts pruning configured, receipts are written directly + // to database and inconsistencies are expected. + if EitherWriter::receipts_destination(provider).is_database() { return Ok(()) } @@ -205,16 +209,23 @@ where .map(|num| num + 1) .unwrap_or(0); + // Get highest block number in static files for receipts + let static_file_block_num = static_file_provider + .get_highest_static_file_block(StaticFileSegment::Receipts) + .unwrap_or(0); + // Check if we had any unexpected shutdown after committing to static files, but // NOT committing to database. - match next_static_file_receipt_num.cmp(&next_receipt_num) { + match static_file_block_num.cmp(&checkpoint) { // It can be equal when it's a chain of empty blocks, but we still need to update the // last block in the range. Ordering::Greater | Ordering::Equal => { let mut static_file_producer = static_file_provider.latest_writer(StaticFileSegment::Receipts)?; - static_file_producer - .prune_receipts(next_static_file_receipt_num - next_receipt_num, checkpoint)?; + static_file_producer.prune_receipts( + next_static_file_receipt_num.saturating_sub(next_receipt_num), + checkpoint, + )?; // Since this is a database <-> static file inconsistency, we commit the change // straight away. static_file_producer.commit()?; @@ -222,19 +233,14 @@ where Ordering::Less => { // If we are already in the process of unwind, this might be fine because we will // fix the inconsistency right away. - if let Some(unwind_to) = unwind_to { - let next_receipt_num_after_unwind = provider - .block_body_indices(unwind_to)? - .map(|b| b.next_tx_num()) - .ok_or(ProviderError::BlockBodyIndicesNotFound(unwind_to))?; - - if next_receipt_num_after_unwind > next_static_file_receipt_num { - // This means we need a deeper unwind. - } else { - return Ok(()) - } + if let Some(unwind_to) = unwind_to && + unwind_to <= static_file_block_num + { + return Ok(()) } + // Otherwise, this is a real inconsistency - database has more blocks than static + // files return Err(missing_static_data_error( next_static_file_receipt_num.saturating_sub(1), &static_file_provider, @@ -259,7 +265,8 @@ where Primitives: NodePrimitives, > + StatsReader + BlockHashReader - + StateWriter::Receipt>, + + StateWriter::Receipt> + + StorageSettingsCache, { /// Return the id of the stage fn id(&self) -> StageId { @@ -613,7 +620,11 @@ where // Otherwise, we recalculate the whole stage checkpoint including the amount of gas // already processed, if there's any. _ => { - let processed = calculate_gas_used_from_headers(provider, 0..=start_block - 1)?; + let genesis_block_number = provider.genesis_block_number(); + let processed = calculate_gas_used_from_headers( + provider, + genesis_block_number..=max(start_block - 1, genesis_block_number), + )?; ExecutionCheckpoint { block_range: CheckpointBlockRange { from: start_block, to: max_block }, @@ -646,8 +657,9 @@ where *range.start()..*range.end() + 1, |cursor, number| cursor.get_one::>(number.into()), )? { - let entry = entry?; - gas_total += entry.gas_used(); + if let Some(entry) = entry? { + gas_total += entry.gas_used(); + } } let duration = start.elapsed(); @@ -660,12 +672,12 @@ where mod tests { use super::*; use crate::{stages::MERKLE_STAGE_DEFAULT_REBUILD_THRESHOLD, test_utils::TestStageDB}; - use alloy_primitives::{address, hex_literal::hex, keccak256, B256, U256}; + use alloy_primitives::{address, hex_literal::hex, keccak256, Address, B256, U256}; use alloy_rlp::Decodable; use assert_matches::assert_matches; use reth_chainspec::ChainSpecBuilder; use reth_db_api::{ - models::AccountBeforeTx, + models::{metadata::StorageSettings, AccountBeforeTx}, transaction::{DbTx, DbTxMut}, }; use reth_ethereum_consensus::EthBeaconConsensus; @@ -677,7 +689,10 @@ mod tests { DatabaseProviderFactory, ReceiptProvider, StaticFileProviderFactory, }; use reth_prune::PruneModes; + use reth_prune_types::{PruneMode, ReceiptsLogPruneConfig}; use reth_stages_api::StageUnitCheckpoint; + use reth_testing_utils::generators; + use std::collections::BTreeMap; fn stage() -> ExecutionStage { let evm_config = @@ -733,8 +748,8 @@ mod tests { let genesis = SealedBlock::::decode(&mut genesis_rlp).unwrap(); let mut block_rlp = hex!("f90262f901f9a075c371ba45999d87f4542326910a11af515897aebce5265d3f6acd1f1161f82fa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa098f2dcd87c8ae4083e7017a05456c14eea4b1db2032126e27b3b1563d57d7cc0a08151d548273f6683169524b66ca9fe338b9ce42bc3540046c828fd939ae23bcba03f4e5c2ec5b2170b711d97ee755c160457bb58d8daa338e835ec02ae6860bbabb901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020000018502540be40082a8798203e800a00000000000000000000000000000000000000000000000000000000000000000880000000000000000f863f861800a8405f5e10094100000000000000000000000000000000000000080801ba07e09e26678ed4fac08a249ebe8ed680bf9051a5e14ad223e4b2b9d26e0208f37a05f6e3f188e3e6eab7d7d3b6568f5eac7d687b08d307d3154ccd8c87b4630509bc0").as_slice(); let block = SealedBlock::::decode(&mut block_rlp).unwrap(); - provider.insert_block(genesis.try_recover().unwrap()).unwrap(); - provider.insert_block(block.clone().try_recover().unwrap()).unwrap(); + provider.insert_block(&genesis.try_recover().unwrap()).unwrap(); + provider.insert_block(&block.clone().try_recover().unwrap()).unwrap(); provider .static_file_provider() .latest_writer(StaticFileSegment::Headers) @@ -774,8 +789,8 @@ mod tests { let genesis = SealedBlock::::decode(&mut genesis_rlp).unwrap(); let mut block_rlp = hex!("f90262f901f9a075c371ba45999d87f4542326910a11af515897aebce5265d3f6acd1f1161f82fa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa098f2dcd87c8ae4083e7017a05456c14eea4b1db2032126e27b3b1563d57d7cc0a08151d548273f6683169524b66ca9fe338b9ce42bc3540046c828fd939ae23bcba03f4e5c2ec5b2170b711d97ee755c160457bb58d8daa338e835ec02ae6860bbabb901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020000018502540be40082a8798203e800a00000000000000000000000000000000000000000000000000000000000000000880000000000000000f863f861800a8405f5e10094100000000000000000000000000000000000000080801ba07e09e26678ed4fac08a249ebe8ed680bf9051a5e14ad223e4b2b9d26e0208f37a05f6e3f188e3e6eab7d7d3b6568f5eac7d687b08d307d3154ccd8c87b4630509bc0").as_slice(); let block = SealedBlock::::decode(&mut block_rlp).unwrap(); - provider.insert_block(genesis.try_recover().unwrap()).unwrap(); - provider.insert_block(block.clone().try_recover().unwrap()).unwrap(); + provider.insert_block(&genesis.try_recover().unwrap()).unwrap(); + provider.insert_block(&block.clone().try_recover().unwrap()).unwrap(); provider .static_file_provider() .latest_writer(StaticFileSegment::Headers) @@ -815,8 +830,8 @@ mod tests { let genesis = SealedBlock::::decode(&mut genesis_rlp).unwrap(); let mut block_rlp = hex!("f90262f901f9a075c371ba45999d87f4542326910a11af515897aebce5265d3f6acd1f1161f82fa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa098f2dcd87c8ae4083e7017a05456c14eea4b1db2032126e27b3b1563d57d7cc0a08151d548273f6683169524b66ca9fe338b9ce42bc3540046c828fd939ae23bcba03f4e5c2ec5b2170b711d97ee755c160457bb58d8daa338e835ec02ae6860bbabb901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020000018502540be40082a8798203e800a00000000000000000000000000000000000000000000000000000000000000000880000000000000000f863f861800a8405f5e10094100000000000000000000000000000000000000080801ba07e09e26678ed4fac08a249ebe8ed680bf9051a5e14ad223e4b2b9d26e0208f37a05f6e3f188e3e6eab7d7d3b6568f5eac7d687b08d307d3154ccd8c87b4630509bc0").as_slice(); let block = SealedBlock::::decode(&mut block_rlp).unwrap(); - provider.insert_block(genesis.try_recover().unwrap()).unwrap(); - provider.insert_block(block.clone().try_recover().unwrap()).unwrap(); + provider.insert_block(&genesis.try_recover().unwrap()).unwrap(); + provider.insert_block(&block.clone().try_recover().unwrap()).unwrap(); provider .static_file_provider() .latest_writer(StaticFileSegment::Headers) @@ -848,8 +863,8 @@ mod tests { let genesis = SealedBlock::::decode(&mut genesis_rlp).unwrap(); let mut block_rlp = hex!("f90262f901f9a075c371ba45999d87f4542326910a11af515897aebce5265d3f6acd1f1161f82fa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa098f2dcd87c8ae4083e7017a05456c14eea4b1db2032126e27b3b1563d57d7cc0a08151d548273f6683169524b66ca9fe338b9ce42bc3540046c828fd939ae23bcba03f4e5c2ec5b2170b711d97ee755c160457bb58d8daa338e835ec02ae6860bbabb901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020000018502540be40082a8798203e800a00000000000000000000000000000000000000000000000000000000000000000880000000000000000f863f861800a8405f5e10094100000000000000000000000000000000000000080801ba07e09e26678ed4fac08a249ebe8ed680bf9051a5e14ad223e4b2b9d26e0208f37a05f6e3f188e3e6eab7d7d3b6568f5eac7d687b08d307d3154ccd8c87b4630509bc0").as_slice(); let block = SealedBlock::::decode(&mut block_rlp).unwrap(); - provider.insert_block(genesis.try_recover().unwrap()).unwrap(); - provider.insert_block(block.clone().try_recover().unwrap()).unwrap(); + provider.insert_block(&genesis.try_recover().unwrap()).unwrap(); + provider.insert_block(&block.clone().try_recover().unwrap()).unwrap(); provider .static_file_provider() .latest_writer(StaticFileSegment::Headers) @@ -894,11 +909,20 @@ mod tests { // If there is a pruning configuration, then it's forced to use the database. // This way we test both cases. let modes = [None, Some(PruneModes::default())]; + let random_filter = ReceiptsLogPruneConfig(BTreeMap::from([( + Address::random(), + PruneMode::Distance(100000), + )])); // Tests node with database and node with static files - for mode in modes { + for mut mode in modes { let mut provider = factory.database_provider_rw().unwrap(); + if let Some(mode) = &mut mode { + // Simulating a full node where we write receipts to database + mode.receipts_log_filter = random_filter.clone(); + } + let mut execution_stage = stage(); provider.set_prune_modes(mode.clone().unwrap_or_default()); @@ -981,8 +1005,8 @@ mod tests { let genesis = SealedBlock::::decode(&mut genesis_rlp).unwrap(); let mut block_rlp = hex!("f90262f901f9a075c371ba45999d87f4542326910a11af515897aebce5265d3f6acd1f1161f82fa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa098f2dcd87c8ae4083e7017a05456c14eea4b1db2032126e27b3b1563d57d7cc0a08151d548273f6683169524b66ca9fe338b9ce42bc3540046c828fd939ae23bcba03f4e5c2ec5b2170b711d97ee755c160457bb58d8daa338e835ec02ae6860bbabb901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020000018502540be40082a8798203e800a00000000000000000000000000000000000000000000000000000000000000000880000000000000000f863f861800a8405f5e10094100000000000000000000000000000000000000080801ba07e09e26678ed4fac08a249ebe8ed680bf9051a5e14ad223e4b2b9d26e0208f37a05f6e3f188e3e6eab7d7d3b6568f5eac7d687b08d307d3154ccd8c87b4630509bc0").as_slice(); let block = SealedBlock::::decode(&mut block_rlp).unwrap(); - provider.insert_block(genesis.try_recover().unwrap()).unwrap(); - provider.insert_block(block.clone().try_recover().unwrap()).unwrap(); + provider.insert_block(&genesis.try_recover().unwrap()).unwrap(); + provider.insert_block(&block.clone().try_recover().unwrap()).unwrap(); provider .static_file_provider() .latest_writer(StaticFileSegment::Headers) @@ -1022,9 +1046,18 @@ mod tests { // If there is a pruning configuration, then it's forced to use the database. // This way we test both cases. let modes = [None, Some(PruneModes::default())]; + let random_filter = ReceiptsLogPruneConfig(BTreeMap::from([( + Address::random(), + PruneMode::Before(100000), + )])); // Tests node with database and node with static files - for mode in modes { + for mut mode in modes { + if let Some(mode) = &mut mode { + // Simulating a full node where we write receipts to database + mode.receipts_log_filter = random_filter.clone(); + } + // Test Execution let mut execution_stage = stage(); provider.set_prune_modes(mode.clone().unwrap_or_default()); @@ -1082,8 +1115,8 @@ mod tests { let genesis = SealedBlock::::decode(&mut genesis_rlp).unwrap(); let mut block_rlp = hex!("f9025ff901f7a0c86e8cc0310ae7c531c758678ddbfd16fc51c8cef8cec650b032de9869e8b94fa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa050554882fbbda2c2fd93fdc466db9946ea262a67f7a76cc169e714f105ab583da00967f09ef1dfed20c0eacfaa94d5cd4002eda3242ac47eae68972d07b106d192a0e3c8b47fbfc94667ef4cceb17e5cc21e3b1eebd442cebb27f07562b33836290db90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302000001830f42408238108203e800a00000000000000000000000000000000000000000000000000000000000000000880000000000000000f862f860800a83061a8094095e7baea6a6c7c4c2dfeb977efac326af552d8780801ba072ed817487b84ba367d15d2f039b5fc5f087d0a8882fbdf73e8cb49357e1ce30a0403d800545b8fc544f92ce8124e2255f8c3c6af93f28243a120585d4c4c6a2a3c0").as_slice(); let block = SealedBlock::::decode(&mut block_rlp).unwrap(); - provider.insert_block(genesis.try_recover().unwrap()).unwrap(); - provider.insert_block(block.clone().try_recover().unwrap()).unwrap(); + provider.insert_block(&genesis.try_recover().unwrap()).unwrap(); + provider.insert_block(&block.clone().try_recover().unwrap()).unwrap(); provider .static_file_provider() .latest_writer(StaticFileSegment::Headers) @@ -1216,4 +1249,68 @@ mod tests { ] ); } + + #[test] + fn test_ensure_consistency_with_skipped_receipts() { + // Test that ensure_consistency allows the case where receipts are intentionally + // skipped. When receipts are skipped, blocks are still incremented in static files + // but no receipt data is written. + + let factory = create_test_provider_factory(); + factory.set_storage_settings_cache( + StorageSettings::legacy().with_receipts_in_static_files(true), + ); + + // Setup with block 1 + let provider_rw = factory.database_provider_rw().unwrap(); + let mut rng = generators::rng(); + let genesis = generators::random_block(&mut rng, 0, Default::default()); + provider_rw + .insert_block(&genesis.try_recover().unwrap()) + .expect("failed to insert genesis"); + let block = generators::random_block( + &mut rng, + 1, + generators::BlockParams { tx_count: Some(2), ..Default::default() }, + ); + provider_rw.insert_block(&block.try_recover().unwrap()).expect("failed to insert block"); + + let static_file_provider = provider_rw.static_file_provider(); + static_file_provider.latest_writer(StaticFileSegment::Headers).unwrap().commit().unwrap(); + + // Simulate skipped receipts: increment block in receipts static file but don't write + // receipts + { + let mut receipts_writer = + static_file_provider.latest_writer(StaticFileSegment::Receipts).unwrap(); + receipts_writer.increment_block(0).unwrap(); + receipts_writer.increment_block(1).unwrap(); + receipts_writer.commit().unwrap(); + } // Explicitly drop receipts_writer here + + provider_rw.commit().expect("failed to commit"); + + // Verify blocks are incremented but no receipts written + assert_eq!( + factory + .static_file_provider() + .get_highest_static_file_block(StaticFileSegment::Receipts), + Some(1) + ); + assert_eq!( + factory.static_file_provider().get_highest_static_file_tx(StaticFileSegment::Receipts), + None + ); + + // Create execution stage + let stage = stage(); + + // Run ensure_consistency - should NOT error + // Block numbers match (both at 1), but tx numbers don't (database has txs, static files + // don't) This is fine - receipts are being skipped + let provider = factory.provider().unwrap(); + stage + .ensure_consistency(&provider, 1, None) + .expect("ensure_consistency should succeed when receipts are intentionally skipped"); + } } diff --git a/crates/stages/stages/src/stages/hashing_account.rs b/crates/stages/stages/src/stages/hashing_account.rs index 1e48f2d38e..d9e5c9e2c3 100644 --- a/crates/stages/stages/src/stages/hashing_account.rs +++ b/crates/stages/stages/src/stages/hashing_account.rs @@ -86,7 +86,7 @@ impl AccountHashingStage { ); for block in blocks { - provider.insert_block(block.try_recover().unwrap()).unwrap(); + provider.insert_block(&block.try_recover().unwrap()).unwrap(); } provider .static_file_provider() diff --git a/crates/stages/stages/src/stages/hashing_storage.rs b/crates/stages/stages/src/stages/hashing_storage.rs index c52f800a01..c879cc029b 100644 --- a/crates/stages/stages/src/stages/hashing_storage.rs +++ b/crates/stages/stages/src/stages/hashing_storage.rs @@ -1,4 +1,4 @@ -use alloy_primitives::{bytes::BufMut, keccak256, B256}; +use alloy_primitives::{bytes::BufMut, keccak256, Address, B256}; use itertools::Itertools; use reth_config::config::{EtlConfig, HashingConfig}; use reth_db_api::{ @@ -101,9 +101,16 @@ where let chunk = chunk.collect::, _>>()?; // Spawn the hashing task onto the global rayon pool rayon::spawn(move || { + // Cache hashed address since PlainStorageState is sorted by address + let (mut last_addr, mut hashed_addr) = + (Address::ZERO, keccak256(Address::ZERO)); for (address, slot) in chunk { + if address != last_addr { + last_addr = address; + hashed_addr = keccak256(address); + } let mut addr_key = Vec::with_capacity(64); - addr_key.put_slice(keccak256(address).as_slice()); + addr_key.put_slice(hashed_addr.as_slice()); addr_key.put_slice(keccak256(slot.key).as_slice()); let _ = tx.send((addr_key, CompactU256::from(slot.value))); } diff --git a/crates/stages/stages/src/stages/headers.rs b/crates/stages/stages/src/stages/headers.rs index 8ad39be5eb..360b34b5db 100644 --- a/crates/stages/stages/src/stages/headers.rs +++ b/crates/stages/stages/src/stages/headers.rs @@ -14,7 +14,9 @@ use reth_network_p2p::headers::{ downloader::{HeaderDownloader, HeaderSyncGap, SyncTarget}, error::HeadersDownloaderError, }; -use reth_primitives_traits::{serde_bincode_compat, FullBlockHeader, NodePrimitives, SealedHeader}; +use reth_primitives_traits::{ + serde_bincode_compat, FullBlockHeader, HeaderTy, NodePrimitives, SealedHeader, +}; use reth_provider::{ providers::StaticFileWriter, BlockHashReader, DBProvider, HeaderSyncGapProvider, StaticFileProviderFactory, @@ -333,8 +335,9 @@ where (input.unwind_to + 1).., )?; provider.tx_ref().unwind_table_by_num::(input.unwind_to)?; - let unfinalized_headers_unwound = - provider.tx_ref().unwind_table_by_num::(input.unwind_to)?; + let unfinalized_headers_unwound = provider.tx_ref().unwind_table_by_num::, + >>(input.unwind_to)?; // determine how many headers to unwind from the static files based on the highest block and // the unwind_to block diff --git a/crates/stages/stages/src/stages/merkle_changesets.rs b/crates/stages/stages/src/stages/merkle_changesets.rs index 7bf756c3dd..81983b7366 100644 --- a/crates/stages/stages/src/stages/merkle_changesets.rs +++ b/crates/stages/stages/src/stages/merkle_changesets.rs @@ -4,16 +4,21 @@ use alloy_primitives::BlockNumber; use reth_consensus::ConsensusError; use reth_primitives_traits::{GotExpected, SealedHeader}; use reth_provider::{ - ChainStateBlockReader, DBProvider, HeaderProvider, ProviderError, StageCheckpointReader, - TrieWriter, + ChainStateBlockReader, DBProvider, HeaderProvider, ProviderError, PruneCheckpointReader, + PruneCheckpointWriter, StageCheckpointReader, StageCheckpointWriter, TrieWriter, +}; +use reth_prune_types::{ + PruneCheckpoint, PruneMode, PruneSegment, MERKLE_CHANGESETS_RETENTION_BLOCKS, }; use reth_stages_api::{ - BlockErrorKind, CheckpointBlockRange, ExecInput, ExecOutput, MerkleChangeSetsCheckpoint, Stage, - StageCheckpoint, StageError, StageId, UnwindInput, UnwindOutput, + BlockErrorKind, ExecInput, ExecOutput, Stage, StageCheckpoint, StageError, StageId, + UnwindInput, UnwindOutput, +}; +use reth_trie::{ + updates::TrieUpdates, HashedPostStateSorted, KeccakKeyHasher, StateRoot, TrieInputSorted, }; -use reth_trie::{updates::TrieUpdates, HashedPostState, KeccakKeyHasher, StateRoot, TrieInput}; use reth_trie_db::{DatabaseHashedPostState, DatabaseStateRoot}; -use std::ops::Range; +use std::{ops::Range, sync::Arc}; use tracing::{debug, error}; /// The `MerkleChangeSets` stage. @@ -22,14 +27,15 @@ use tracing::{debug, error}; #[derive(Debug, Clone)] pub struct MerkleChangeSets { /// The number of blocks to retain changesets for, used as a fallback when the finalized block - /// is not found. Defaults to 64 (2 epochs in beacon chain). + /// is not found. Defaults to [`MERKLE_CHANGESETS_RETENTION_BLOCKS`] (2 epochs in beacon + /// chain). retention_blocks: u64, } impl MerkleChangeSets { - /// Creates a new `MerkleChangeSets` stage with default retention blocks of 64. + /// Creates a new `MerkleChangeSets` stage with the default retention blocks. pub const fn new() -> Self { - Self { retention_blocks: 64 } + Self { retention_blocks: MERKLE_CHANGESETS_RETENTION_BLOCKS } } /// Creates a new `MerkleChangeSets` stage with a custom finalized block height. @@ -39,14 +45,28 @@ impl MerkleChangeSets { /// Returns the range of blocks which are already computed. Will return an empty range if none /// have been computed. - fn computed_range(checkpoint: Option) -> Range { + fn computed_range( + provider: &Provider, + checkpoint: Option, + ) -> Result, StageError> + where + Provider: PruneCheckpointReader, + { let to = checkpoint.map(|chk| chk.block_number).unwrap_or_default(); - let from = checkpoint - .map(|chk| chk.merkle_changesets_stage_checkpoint().unwrap_or_default()) - .unwrap_or_default() - .block_range - .to; - from..to + 1 + + // Get the prune checkpoint for MerkleChangeSets to use as the lower bound. If there's no + // prune checkpoint or if the pruned block number is None, return empty range + let Some(from) = provider + .get_prune_checkpoint(PruneSegment::MerkleChangeSets)? + .and_then(|chk| chk.block_number) + // prune checkpoint indicates the last block pruned, so the block after is the start of + // the computed data + .map(|block_number| block_number + 1) + else { + return Ok(0..0) + }; + + Ok(from..to + 1) } /// Determines the target range for changeset computation based on the checkpoint and provider @@ -90,12 +110,12 @@ impl MerkleChangeSets { Ok(target_start..target_end) } - /// Calculates the trie updates given a [`TrieInput`], asserting that the resulting state root - /// matches the expected one for the block. + /// Calculates the trie updates given a [`TrieInputSorted`], asserting that the resulting state + /// root matches the expected one for the block. fn calculate_block_trie_updates( provider: &Provider, block_number: BlockNumber, - input: TrieInput, + input: TrieInputSorted, ) -> Result { let (root, trie_updates) = StateRoot::overlay_root_from_nodes_with_updates(provider.tx_ref(), input).map_err( @@ -177,21 +197,21 @@ impl MerkleChangeSets { ); let mut per_block_state_reverts = Vec::new(); for block_number in target_range.clone() { - per_block_state_reverts.push(HashedPostState::from_reverts::( + per_block_state_reverts.push(HashedPostStateSorted::from_reverts::( provider.tx_ref(), block_number..=block_number, )?); } // Helper to retrieve state revert data for a specific block from the pre-computed array - let get_block_state_revert = |block_number: BlockNumber| -> &HashedPostState { + let get_block_state_revert = |block_number: BlockNumber| -> &HashedPostStateSorted { let index = (block_number - target_start) as usize; &per_block_state_reverts[index] }; // Helper to accumulate state reverts from a given block to the target end - let compute_cumulative_state_revert = |block_number: BlockNumber| -> HashedPostState { - let mut cumulative_revert = HashedPostState::default(); + let compute_cumulative_state_revert = |block_number: BlockNumber| -> HashedPostStateSorted { + let mut cumulative_revert = HashedPostStateSorted::default(); for n in (block_number..target_end).rev() { cumulative_revert.extend_ref(get_block_state_revert(n)) } @@ -201,7 +221,7 @@ impl MerkleChangeSets { // To calculate the changeset for a block, we first need the TrieUpdates which are // generated as a result of processing the block. To get these we need: // 1) The TrieUpdates which revert the db's trie to _prior_ to the block - // 2) The HashedPostState to revert the db's state to _after_ the block + // 2) The HashedPostStateSorted to revert the db's state to _after_ the block // // To get (1) for `target_start` we need to do a big state root calculation which takes // into account all changes between that block and db tip. For each block after the @@ -212,12 +232,15 @@ impl MerkleChangeSets { ?target_start, "Computing trie state at starting block", ); - let mut input = TrieInput::default(); - input.state = compute_cumulative_state_revert(target_start); - input.prefix_sets = input.state.construct_prefix_sets(); + let initial_state = compute_cumulative_state_revert(target_start); + let initial_prefix_sets = initial_state.construct_prefix_sets(); + let initial_input = + TrieInputSorted::new(Arc::default(), Arc::new(initial_state), initial_prefix_sets); // target_start will be >= 1, see `determine_target_range`. - input.nodes = - Self::calculate_block_trie_updates(provider, target_start - 1, input.clone())?; + let mut nodes = Arc::new( + Self::calculate_block_trie_updates(provider, target_start - 1, initial_input)? + .into_sorted(), + ); for block_number in target_range { debug!( @@ -227,21 +250,24 @@ impl MerkleChangeSets { ); // Revert the state so that this block has been just processed, meaning we take the // cumulative revert of the subsequent block. - input.state = compute_cumulative_state_revert(block_number + 1); + let state = Arc::new(compute_cumulative_state_revert(block_number + 1)); - // Construct prefix sets from only this block's `HashedPostState`, because we only care - // about trie updates which occurred as a result of this block being processed. - input.prefix_sets = get_block_state_revert(block_number).construct_prefix_sets(); + // Construct prefix sets from only this block's `HashedPostStateSorted`, because we only + // care about trie updates which occurred as a result of this block being processed. + let prefix_sets = get_block_state_revert(block_number).construct_prefix_sets(); + + let input = TrieInputSorted::new(Arc::clone(&nodes), state, prefix_sets); // Calculate the trie updates for this block, then apply those updates to the reverts. // We calculate the overlay which will be passed into the next step using the trie // reverts prior to them being updated. let this_trie_updates = - Self::calculate_block_trie_updates(provider, block_number, input.clone())?; + Self::calculate_block_trie_updates(provider, block_number, input)?.into_sorted(); - let trie_overlay = input.nodes.clone().into_sorted(); - input.nodes.extend_ref(&this_trie_updates); - let this_trie_updates = this_trie_updates.into_sorted(); + let trie_overlay = Arc::clone(&nodes); + let mut nodes_mut = Arc::unwrap_or_clone(nodes); + nodes_mut.extend_ref(&this_trie_updates); + nodes = Arc::new(nodes_mut); // Write the changesets to the DB using the trie updates produced by the block, and the // trie reverts as the overlay. @@ -269,8 +295,14 @@ impl Default for MerkleChangeSets { impl Stage for MerkleChangeSets where - Provider: - StageCheckpointReader + TrieWriter + DBProvider + HeaderProvider + ChainStateBlockReader, + Provider: StageCheckpointReader + + TrieWriter + + DBProvider + + HeaderProvider + + ChainStateBlockReader + + StageCheckpointWriter + + PruneCheckpointReader + + PruneCheckpointWriter, { fn id(&self) -> StageId { StageId::MerkleChangeSets @@ -291,7 +323,13 @@ where // Get the previously computed range. This will be updated to reflect the populating of the // target range. - let mut computed_range = Self::computed_range(input.checkpoint); + let mut computed_range = Self::computed_range(provider, input.checkpoint)?; + debug!( + target: "sync::stages::merkle_changesets", + ?computed_range, + ?target_range, + "Got computed and target ranges", + ); // We want the target range to not include any data already computed previously, if // possible, so we start the target range from the end of the computed range if that is @@ -315,9 +353,9 @@ where } // If target range is empty (target_start >= target_end), stage is already successfully - // executed + // executed. if target_range.start >= target_range.end { - return Ok(ExecOutput::done(input.checkpoint.unwrap_or_default())); + return Ok(ExecOutput::done(StageCheckpoint::new(target_range.end.saturating_sub(1)))); } // If our target range is a continuation of the already computed range then we can keep the @@ -336,16 +374,19 @@ where // Populate the target range with changesets Self::populate_range(provider, target_range)?; - let checkpoint_block_range = CheckpointBlockRange { - from: computed_range.start, - // CheckpointBlockRange is inclusive - to: computed_range.end.saturating_sub(1), - }; + // Update the prune checkpoint to reflect that all data before `computed_range.start` + // is not available. + provider.save_prune_checkpoint( + PruneSegment::MerkleChangeSets, + PruneCheckpoint { + block_number: Some(computed_range.start.saturating_sub(1)), + tx_number: None, + prune_mode: PruneMode::Before(computed_range.start), + }, + )?; - let checkpoint = StageCheckpoint::new(checkpoint_block_range.to) - .with_merkle_changesets_stage_checkpoint(MerkleChangeSetsCheckpoint { - block_range: checkpoint_block_range, - }); + // `computed_range.end` is exclusive. + let checkpoint = StageCheckpoint::new(computed_range.end.saturating_sub(1)); Ok(ExecOutput::done(checkpoint)) } @@ -358,22 +399,42 @@ where // Unwinding is trivial; just clear everything after the target block. provider.clear_trie_changesets_from(input.unwind_to + 1)?; - let mut computed_range = Self::computed_range(Some(input.checkpoint)); + let mut computed_range = Self::computed_range(provider, Some(input.checkpoint))?; computed_range.end = input.unwind_to + 1; if computed_range.start > computed_range.end { computed_range.start = computed_range.end; } - let checkpoint_block_range = CheckpointBlockRange { - from: computed_range.start, - // computed_range.end is exclusive - to: computed_range.end.saturating_sub(1), - }; + // If we've unwound so far that there are no longer enough trie changesets available then + // simply clear them and the checkpoints, so that on next pipeline startup they will be + // regenerated. + // + // We don't do this check if the target block is not greater than the retention threshold + // (which happens near genesis), as in that case would could still have all possible + // changesets even if the total count doesn't meet the threshold. + debug!( + target: "sync::stages::merkle_changesets", + ?computed_range, + retention_blocks=?self.retention_blocks, + "Checking if computed range is over retention threshold", + ); + if input.unwind_to > self.retention_blocks && + computed_range.end - computed_range.start < self.retention_blocks + { + debug!( + target: "sync::stages::merkle_changesets", + ?computed_range, + retention_blocks=?self.retention_blocks, + "Clearing checkpoints completely", + ); + provider.clear_trie_changesets()?; + provider + .save_stage_checkpoint(StageId::MerkleChangeSets, StageCheckpoint::default())?; + return Ok(UnwindOutput { checkpoint: StageCheckpoint::default() }) + } - let checkpoint = StageCheckpoint::new(input.unwind_to) - .with_merkle_changesets_stage_checkpoint(MerkleChangeSetsCheckpoint { - block_range: checkpoint_block_range, - }); + // `computed_range.end` is exclusive + let checkpoint = StageCheckpoint::new(computed_range.end.saturating_sub(1)); Ok(UnwindOutput { checkpoint }) } diff --git a/crates/stages/stages/src/stages/mod.rs b/crates/stages/stages/src/stages/mod.rs index 58fa7cfb32..d6747a3960 100644 --- a/crates/stages/stages/src/stages/mod.rs +++ b/crates/stages/stages/src/stages/mod.rs @@ -96,11 +96,11 @@ mod tests { let genesis = SealedBlock::::decode(&mut genesis_rlp).unwrap(); let mut block_rlp = hex!("f90262f901f9a075c371ba45999d87f4542326910a11af515897aebce5265d3f6acd1f1161f82fa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa098f2dcd87c8ae4083e7017a05456c14eea4b1db2032126e27b3b1563d57d7cc0a08151d548273f6683169524b66ca9fe338b9ce42bc3540046c828fd939ae23bcba03f4e5c2ec5b2170b711d97ee755c160457bb58d8daa338e835ec02ae6860bbabb901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020000018502540be40082a8798203e800a00000000000000000000000000000000000000000000000000000000000000000880000000000000000f863f861800a8405f5e10094100000000000000000000000000000000000000080801ba07e09e26678ed4fac08a249ebe8ed680bf9051a5e14ad223e4b2b9d26e0208f37a05f6e3f188e3e6eab7d7d3b6568f5eac7d687b08d307d3154ccd8c87b4630509bc0").as_slice(); let block = SealedBlock::::decode(&mut block_rlp).unwrap(); - provider_rw.insert_block(genesis.try_recover().unwrap()).unwrap(); - provider_rw.insert_block(block.clone().try_recover().unwrap()).unwrap(); + let mut head = block.hash(); + provider_rw.insert_block(&genesis.try_recover().unwrap()).unwrap(); + provider_rw.insert_block(&block.try_recover().unwrap()).unwrap(); // Fill with bogus blocks to respect PruneMode distance. - let mut head = block.hash(); let mut rng = generators::rng(); for block_number in 2..=tip { let nblock = random_block( @@ -109,7 +109,7 @@ mod tests { generators::BlockParams { parent: Some(head), ..Default::default() }, ); head = nblock.hash(); - provider_rw.insert_block(nblock.try_recover().unwrap()).unwrap(); + provider_rw.insert_block(&nblock.try_recover().unwrap()).unwrap(); } provider_rw .static_file_provider() @@ -303,7 +303,6 @@ mod tests { db: &TestStageDB, prune_count: usize, segment: StaticFileSegment, - is_full_node: bool, expected: Option, ) { // We recreate the static file provider, since consistency heals are done on fetching the @@ -330,7 +329,7 @@ mod tests { static_file_provider = StaticFileProvider::read_write(static_file_provider.path()).unwrap(); assert!(matches!( static_file_provider - .check_consistency(&db.factory.database_provider_ro().unwrap(), is_full_node,), + .check_consistency(&db.factory.database_provider_ro().unwrap()), Ok(e) if e == expected )); } @@ -352,7 +351,7 @@ mod tests { assert!(matches!( db.factory .static_file_provider() - .check_consistency(&db.factory.database_provider_ro().unwrap(), false,), + .check_consistency(&db.factory.database_provider_ro().unwrap(),), Ok(e) if e == expected )); } @@ -385,7 +384,7 @@ mod tests { assert!(matches!( db.factory .static_file_provider() - .check_consistency(&db.factory.database_provider_ro().unwrap(), false), + .check_consistency(&db.factory.database_provider_ro().unwrap()), Ok(e) if e == expected )); } @@ -396,36 +395,40 @@ mod tests { let db_provider = db.factory.database_provider_ro().unwrap(); assert!(matches!( - db.factory.static_file_provider().check_consistency(&db_provider, false), + db.factory.static_file_provider().check_consistency(&db_provider), Ok(None) )); } #[test] fn test_consistency_no_commit_prune() { - let db = seed_data(90).unwrap(); - let full_node = true; - let archive_node = !full_node; + // Test full node with receipt pruning + let mut db_full = seed_data(90).unwrap(); + db_full.factory = db_full.factory.with_prune_modes(PruneModes { + receipts: Some(PruneMode::Before(1)), + ..Default::default() + }); // Full node does not use receipts, therefore doesn't check for consistency on receipts // segment - simulate_behind_checkpoint_corruption(&db, 1, StaticFileSegment::Receipts, full_node, None); + simulate_behind_checkpoint_corruption(&db_full, 1, StaticFileSegment::Receipts, None); + + // Test archive node without receipt pruning + let db_archive = seed_data(90).unwrap(); // there are 2 to 3 transactions per block. however, if we lose one tx, we need to unwind to // the previous block. simulate_behind_checkpoint_corruption( - &db, + &db_archive, 1, StaticFileSegment::Receipts, - archive_node, Some(PipelineTarget::Unwind(88)), ); simulate_behind_checkpoint_corruption( - &db, + &db_archive, 3, StaticFileSegment::Headers, - archive_node, Some(PipelineTarget::Unwind(86)), ); } diff --git a/crates/stages/stages/src/stages/prune.rs b/crates/stages/stages/src/stages/prune.rs index 3161d4b141..392f7f07a8 100644 --- a/crates/stages/stages/src/stages/prune.rs +++ b/crates/stages/stages/src/stages/prune.rs @@ -2,7 +2,7 @@ use reth_db_api::{table::Value, transaction::DbTxMut}; use reth_primitives_traits::NodePrimitives; use reth_provider::{ BlockReader, ChainStateBlockReader, DBProvider, PruneCheckpointReader, PruneCheckpointWriter, - StaticFileProviderFactory, + StageCheckpointReader, StaticFileProviderFactory, StorageSettingsCache, }; use reth_prune::{ PruneMode, PruneModes, PruneSegment, PrunerBuilder, SegmentOutput, SegmentOutputCheckpoint, @@ -43,9 +43,10 @@ where + PruneCheckpointWriter + BlockReader + ChainStateBlockReader + + StageCheckpointReader + StaticFileProviderFactory< Primitives: NodePrimitives, - >, + > + StorageSettingsCache, { fn id(&self) -> StageId { StageId::Prune @@ -103,9 +104,18 @@ where // We cannot recover the data that was pruned in `execute`, so we just update the // checkpoints. let prune_checkpoints = provider.get_prune_checkpoints()?; + let unwind_to_last_tx = + provider.block_body_indices(input.unwind_to)?.map(|i| i.last_tx_num()); + for (segment, mut checkpoint) in prune_checkpoints { - checkpoint.block_number = Some(input.unwind_to); - provider.save_prune_checkpoint(segment, checkpoint)?; + // Only update the checkpoint if unwind_to is lower than the existing checkpoint. + if let Some(block) = checkpoint.block_number && + input.unwind_to < block + { + checkpoint.block_number = Some(input.unwind_to); + checkpoint.tx_number = unwind_to_last_tx; + provider.save_prune_checkpoint(segment, checkpoint)?; + } } Ok(UnwindOutput { checkpoint: StageCheckpoint::new(input.unwind_to) }) } @@ -135,9 +145,10 @@ where + PruneCheckpointWriter + BlockReader + ChainStateBlockReader + + StageCheckpointReader + StaticFileProviderFactory< Primitives: NodePrimitives, - >, + > + StorageSettingsCache, { fn id(&self) -> StageId { StageId::PruneSenderRecovery diff --git a/crates/stages/stages/src/stages/sender_recovery.rs b/crates/stages/stages/src/stages/sender_recovery.rs index 947f062095..818ba5af08 100644 --- a/crates/stages/stages/src/stages/sender_recovery.rs +++ b/crates/stages/stages/src/stages/sender_recovery.rs @@ -1,4 +1,4 @@ -use alloy_primitives::{Address, TxNumber}; +use alloy_primitives::{Address, BlockNumber, TxNumber}; use reth_config::config::SenderRecoveryConfig; use reth_consensus::ConsensusError; use reth_db::static_file::TransactionMask; @@ -11,8 +11,8 @@ use reth_db_api::{ }; use reth_primitives_traits::{GotExpected, NodePrimitives, SignedTransaction}; use reth_provider::{ - BlockReader, DBProvider, HeaderProvider, ProviderError, PruneCheckpointReader, - StaticFileProviderFactory, StatsReader, + BlockReader, DBProvider, EitherWriter, HeaderProvider, ProviderError, PruneCheckpointReader, + StaticFileProviderFactory, StatsReader, StorageSettingsCache, TransactionsProvider, }; use reth_prune_types::PruneSegment; use reth_stages_api::{ @@ -20,7 +20,7 @@ use reth_stages_api::{ StageId, UnwindInput, UnwindOutput, }; use reth_static_file_types::StaticFileSegment; -use std::{fmt::Debug, ops::Range, sync::mpsc}; +use std::{fmt::Debug, ops::Range, sync::mpsc, time::Instant}; use thiserror::Error; use tracing::*; @@ -64,7 +64,8 @@ where + BlockReader + StaticFileProviderFactory> + StatsReader - + PruneCheckpointReader, + + PruneCheckpointReader + + StorageSettingsCache, { /// Return the id of the stage fn id(&self) -> StageId { @@ -74,48 +75,75 @@ where /// Retrieve the range of transactions to iterate over by querying /// [`BlockBodyIndices`][reth_db_api::tables::BlockBodyIndices], /// collect transactions within that range, recover signer for each transaction and store - /// entries in the [`TransactionSenders`][reth_db_api::tables::TransactionSenders] table. + /// entries in the [`TransactionSenders`][reth_db_api::tables::TransactionSenders] table or + /// static files depending on configuration. fn execute(&mut self, provider: &Provider, input: ExecInput) -> Result { if input.target_reached() { return Ok(ExecOutput::done(input.checkpoint())) } - let (tx_range, block_range, is_final_range) = - input.next_block_range_with_transaction_threshold(provider, self.commit_threshold)?; - let end_block = *block_range.end(); - - // No transactions to walk over - if tx_range.is_empty() { - info!(target: "sync::stages::sender_recovery", ?tx_range, "Target transaction already reached"); + let Some(range_output) = + input.next_block_range_with_transaction_threshold(provider, self.commit_threshold)? + else { + info!(target: "sync::stages::sender_recovery", "No transaction senders to recover"); + EitherWriter::new_senders( + provider, + provider + .static_file_provider() + .get_highest_static_file_block(StaticFileSegment::TransactionSenders) + .unwrap_or_default(), + )? + .ensure_at_block(input.target())?; return Ok(ExecOutput { - checkpoint: StageCheckpoint::new(end_block) + checkpoint: StageCheckpoint::new(input.target()) .with_entities_stage_checkpoint(stage_checkpoint(provider)?), - done: is_final_range, + done: true, }) - } + }; + let end_block = *range_output.block_range.end(); - // Acquire the cursor for inserting elements - let mut senders_cursor = provider.tx_ref().cursor_write::()?; + let mut writer = EitherWriter::new_senders(provider, *range_output.block_range.start())?; - info!(target: "sync::stages::sender_recovery", ?tx_range, "Recovering senders"); + info!(target: "sync::stages::sender_recovery", tx_range = ?range_output.tx_range, "Recovering senders"); // Iterate over transactions in batches, recover the senders and append them - let batch = tx_range + let batch = range_output + .tx_range .clone() .step_by(BATCH_SIZE) - .map(|start| start..std::cmp::min(start + BATCH_SIZE as u64, tx_range.end)) + .map(|start| start..std::cmp::min(start + BATCH_SIZE as u64, range_output.tx_range.end)) .collect::>>(); let tx_batch_sender = setup_range_recovery(provider); + let start = Instant::now(); + let block_body_indices = + provider.block_body_indices_range(range_output.block_range.clone())?; + let block_body_indices_elapsed = start.elapsed(); + let mut blocks_with_indices = range_output.block_range.zip(block_body_indices).peekable(); + for range in batch { - recover_range(range, provider, tx_batch_sender.clone(), &mut senders_cursor)?; + // Pair each transaction number with its block number + let start = Instant::now(); + let block_numbers = range.clone().fold(Vec::new(), |mut block_numbers, tx| { + while let Some((block, index)) = blocks_with_indices.peek() { + if index.contains_tx(tx) { + block_numbers.push(*block); + return block_numbers + } + blocks_with_indices.next(); + } + block_numbers + }); + let fold_elapsed = start.elapsed(); + debug!(target: "sync::stages::sender_recovery", ?block_body_indices_elapsed, ?fold_elapsed, len = block_numbers.len(), "Calculated block numbers"); + recover_range(range, block_numbers, provider, tx_batch_sender.clone(), &mut writer)?; } Ok(ExecOutput { checkpoint: StageCheckpoint::new(end_block) .with_entities_stage_checkpoint(stage_checkpoint(provider)?), - done: is_final_range, + done: range_output.is_final_range, }) } @@ -142,15 +170,22 @@ where } fn recover_range( - tx_range: Range, + tx_range: Range, + block_numbers: Vec, provider: &Provider, tx_batch_sender: mpsc::Sender, RecoveryResultSender)>>, - senders_cursor: &mut CURSOR, + writer: &mut EitherWriter<'_, CURSOR, Provider::Primitives>, ) -> Result<(), StageError> where - Provider: DBProvider + HeaderProvider + StaticFileProviderFactory, + Provider: DBProvider + HeaderProvider + TransactionsProvider + StaticFileProviderFactory, CURSOR: DbCursorRW, { + debug_assert_eq!( + tx_range.clone().count(), + block_numbers.len(), + "Transaction range and block numbers count mismatch" + ); + debug!(target: "sync::stages::sender_recovery", ?tx_range, "Sending batch for processing"); // Preallocate channels for each chunks in the batch @@ -172,6 +207,7 @@ where debug!(target: "sync::stages::sender_recovery", ?tx_range, "Appending recovered senders to the database"); let mut processed_transactions = 0; + let mut block_numbers = block_numbers.into_iter(); for channel in receivers { while let Ok(recovered) = channel.recv() { let (tx_id, sender) = match recovered { @@ -209,7 +245,12 @@ where } } }; - senders_cursor.append(tx_id, &sender)?; + + let new_block_number = block_numbers + .next() + .expect("block numbers iterator has the same length as the number of transactions"); + writer.ensure_at_block(new_block_number)?; + writer.append_sender(tx_id, &sender)?; processed_transactions += 1; } } diff --git a/crates/stages/stages/src/stages/tx_lookup.rs b/crates/stages/stages/src/stages/tx_lookup.rs index 8b1c531736..087a040f79 100644 --- a/crates/stages/stages/src/stages/tx_lookup.rs +++ b/crates/stages/stages/src/stages/tx_lookup.rs @@ -3,17 +3,16 @@ use alloy_primitives::{TxHash, TxNumber}; use num_traits::Zero; use reth_config::config::{EtlConfig, TransactionLookupConfig}; use reth_db_api::{ - cursor::{DbCursorRO, DbCursorRW}, - table::Value, + table::{Decode, Decompress, Value}, tables, transaction::DbTxMut, - RawKey, RawValue, }; use reth_etl::Collector; use reth_primitives_traits::{NodePrimitives, SignedTransaction}; use reth_provider::{ - BlockReader, DBProvider, PruneCheckpointReader, PruneCheckpointWriter, - StaticFileProviderFactory, StatsReader, TransactionsProvider, TransactionsProviderExt, + BlockReader, DBProvider, EitherWriter, PruneCheckpointReader, PruneCheckpointWriter, + RocksDBProviderFactory, StaticFileProviderFactory, StatsReader, StorageSettingsCache, + TransactionsProvider, TransactionsProviderExt, }; use reth_prune_types::{PruneCheckpoint, PruneMode, PrunePurpose, PruneSegment}; use reth_stages_api::{ @@ -65,7 +64,9 @@ where + PruneCheckpointReader + StatsReader + StaticFileProviderFactory> - + TransactionsProviderExt, + + TransactionsProviderExt + + StorageSettingsCache + + RocksDBProviderFactory, { /// Return the id of the stage fn id(&self) -> StageId { @@ -126,14 +127,21 @@ where ); loop { - let (tx_range, block_range, is_final_range) = - input.next_block_range_with_transaction_threshold(provider, self.chunk_size)?; + let Some(range_output) = + input.next_block_range_with_transaction_threshold(provider, self.chunk_size)? + else { + input.checkpoint = Some( + StageCheckpoint::new(input.target()) + .with_entities_stage_checkpoint(stage_checkpoint(provider)?), + ); + break; + }; - let end_block = *block_range.end(); + let end_block = *range_output.block_range.end(); - info!(target: "sync::stages::transaction_lookup", ?tx_range, "Calculating transaction hashes"); + info!(target: "sync::stages::transaction_lookup", tx_range = ?range_output.tx_range, "Calculating transaction hashes"); - for (key, value) in provider.transaction_hashes_by_range(tx_range)? { + for (key, value) in provider.transaction_hashes_by_range(range_output.tx_range)? { hash_collector.insert(key, value)?; } @@ -142,17 +150,28 @@ where .with_entities_stage_checkpoint(stage_checkpoint(provider)?), ); - if is_final_range { - let append_only = - provider.count_entries::()?.is_zero(); - let mut txhash_cursor = provider - .tx_ref() - .cursor_write::>()?; - + if range_output.is_final_range { let total_hashes = hash_collector.len(); let interval = (total_hashes / 10).max(1); + + // Use append mode when table is empty (first sync) - significantly faster + let append_only = + provider.count_entries::()?.is_zero(); + + // Create RocksDB batch if feature is enabled + #[cfg(all(unix, feature = "rocksdb"))] + let rocksdb = provider.rocksdb_provider(); + #[cfg(all(unix, feature = "rocksdb"))] + let rocksdb_batch = rocksdb.batch(); + #[cfg(not(all(unix, feature = "rocksdb")))] + let rocksdb_batch = (); + + // Create writer that routes to either MDBX or RocksDB based on settings + let mut writer = + EitherWriter::new_transaction_hash_numbers(provider, rocksdb_batch)?; + for (index, hash_to_number) in hash_collector.iter()?.enumerate() { - let (hash, number) = hash_to_number?; + let (hash_bytes, number_bytes) = hash_to_number?; if index > 0 && index.is_multiple_of(interval) { info!( target: "sync::stages::transaction_lookup", @@ -162,12 +181,16 @@ where ); } - let key = RawKey::::from_vec(hash); - if append_only { - txhash_cursor.append(key, &RawValue::::from_vec(number))? - } else { - txhash_cursor.insert(key, &RawValue::::from_vec(number))? - } + // Decode from raw ETL bytes + let hash = TxHash::decode(&hash_bytes)?; + let tx_num = TxNumber::decompress(&number_bytes)?; + writer.put_transaction_hash_number(hash, tx_num, append_only)?; + } + + // Extract and register RocksDB batch for commit at provider level + #[cfg(all(unix, feature = "rocksdb"))] + if let Some(batch) = writer.into_raw_rocksdb_batch() { + provider.set_pending_rocksdb_batch(batch); } trace!(target: "sync::stages::transaction_lookup", @@ -192,11 +215,19 @@ where provider: &Provider, input: UnwindInput, ) -> Result { - let tx = provider.tx_ref(); let (range, unwind_to, _) = input.unwind_block_range_with_threshold(self.chunk_size); - // Cursor to unwind tx hash to number - let mut tx_hash_number_cursor = tx.cursor_write::()?; + // Create RocksDB batch if feature is enabled + #[cfg(all(unix, feature = "rocksdb"))] + let rocksdb = provider.rocksdb_provider(); + #[cfg(all(unix, feature = "rocksdb"))] + let rocksdb_batch = rocksdb.batch(); + #[cfg(not(all(unix, feature = "rocksdb")))] + let rocksdb_batch = (); + + // Create writer that routes to either MDBX or RocksDB based on settings + let mut writer = EitherWriter::new_transaction_hash_numbers(provider, rocksdb_batch)?; + let static_file_provider = provider.static_file_provider(); let rev_walker = provider .block_body_indices_range(range.clone())? @@ -211,15 +242,18 @@ where // Delete all transactions that belong to this block for tx_id in body.tx_num_range() { - // First delete the transaction and hash to id mapping - if let Some(transaction) = static_file_provider.transaction_by_id(tx_id)? && - tx_hash_number_cursor.seek_exact(transaction.trie_hash())?.is_some() - { - tx_hash_number_cursor.delete_current()?; + if let Some(transaction) = static_file_provider.transaction_by_id(tx_id)? { + writer.delete_transaction_hash_number(transaction.trie_hash())?; } } } + // Extract and register RocksDB batch for commit at provider level + #[cfg(all(unix, feature = "rocksdb"))] + if let Some(batch) = writer.into_raw_rocksdb_batch() { + provider.set_pending_rocksdb_batch(batch); + } + Ok(UnwindOutput { checkpoint: StageCheckpoint::new(unwind_to) .with_entities_stage_checkpoint(stage_checkpoint(provider)?), @@ -259,7 +293,7 @@ mod tests { }; use alloy_primitives::{BlockNumber, B256}; use assert_matches::assert_matches; - use reth_db_api::transaction::DbTx; + use reth_db_api::{cursor::DbCursorRO, transaction::DbTx}; use reth_ethereum_primitives::Block; use reth_primitives_traits::SealedBlock; use reth_provider::{ @@ -574,4 +608,160 @@ mod tests { self.ensure_no_hash_by_block(input.unwind_to) } } + + #[cfg(all(unix, feature = "rocksdb"))] + mod rocksdb_tests { + use super::*; + use reth_provider::RocksDBProviderFactory; + use reth_storage_api::StorageSettings; + + /// Test that when `transaction_hash_numbers_in_rocksdb` is enabled, the stage + /// writes transaction hash mappings to `RocksDB` instead of MDBX. + #[tokio::test] + async fn execute_writes_to_rocksdb_when_enabled() { + let (previous_stage, stage_progress) = (110, 100); + let mut rng = generators::rng(); + + // Set up the runner + let runner = TransactionLookupTestRunner::default(); + + // Enable RocksDB for transaction hash numbers + runner.db.factory.set_storage_settings_cache( + StorageSettings::legacy().with_transaction_hash_numbers_in_rocksdb(true), + ); + + let input = ExecInput { + target: Some(previous_stage), + checkpoint: Some(StageCheckpoint::new(stage_progress)), + }; + + // Insert blocks with transactions + let blocks = random_block_range( + &mut rng, + stage_progress + 1..=previous_stage, + BlockRangeParams { + parent: Some(B256::ZERO), + tx_count: 1..3, // Ensure we have transactions + ..Default::default() + }, + ); + runner + .db + .insert_blocks(blocks.iter(), StorageKind::Static) + .expect("failed to insert blocks"); + + // Count expected transactions + let expected_tx_count: usize = blocks.iter().map(|b| b.body().transactions.len()).sum(); + assert!(expected_tx_count > 0, "test requires at least one transaction"); + + // Execute the stage + let rx = runner.execute(input); + let result = rx.await.unwrap(); + assert!(result.is_ok(), "stage execution failed: {:?}", result); + + // Verify MDBX table is empty (data should be in RocksDB) + let mdbx_count = runner.db.count_entries::().unwrap(); + assert_eq!( + mdbx_count, 0, + "MDBX TransactionHashNumbers should be empty when RocksDB is enabled" + ); + + // Verify RocksDB has the data + let rocksdb = runner.db.factory.rocksdb_provider(); + let mut rocksdb_count = 0; + for block in &blocks { + for tx in &block.body().transactions { + let hash = *tx.tx_hash(); + let result = rocksdb.get::(hash).unwrap(); + assert!(result.is_some(), "Transaction hash {:?} not found in RocksDB", hash); + rocksdb_count += 1; + } + } + assert_eq!( + rocksdb_count, expected_tx_count, + "RocksDB should contain all transaction hashes" + ); + } + + /// Test that when `transaction_hash_numbers_in_rocksdb` is enabled, the stage + /// unwind deletes transaction hash mappings from `RocksDB` instead of MDBX. + #[tokio::test] + async fn unwind_deletes_from_rocksdb_when_enabled() { + let (previous_stage, stage_progress) = (110, 100); + let mut rng = generators::rng(); + + // Set up the runner + let runner = TransactionLookupTestRunner::default(); + + // Enable RocksDB for transaction hash numbers + runner.db.factory.set_storage_settings_cache( + StorageSettings::legacy().with_transaction_hash_numbers_in_rocksdb(true), + ); + + // Insert blocks with transactions + let blocks = random_block_range( + &mut rng, + stage_progress + 1..=previous_stage, + BlockRangeParams { + parent: Some(B256::ZERO), + tx_count: 1..3, // Ensure we have transactions + ..Default::default() + }, + ); + runner + .db + .insert_blocks(blocks.iter(), StorageKind::Static) + .expect("failed to insert blocks"); + + // Count expected transactions + let expected_tx_count: usize = blocks.iter().map(|b| b.body().transactions.len()).sum(); + assert!(expected_tx_count > 0, "test requires at least one transaction"); + + // Execute the stage first to populate RocksDB + let exec_input = ExecInput { + target: Some(previous_stage), + checkpoint: Some(StageCheckpoint::new(stage_progress)), + }; + let rx = runner.execute(exec_input); + let result = rx.await.unwrap(); + assert!(result.is_ok(), "stage execution failed: {:?}", result); + + // Verify RocksDB has the data before unwind + let rocksdb = runner.db.factory.rocksdb_provider(); + for block in &blocks { + for tx in &block.body().transactions { + let hash = *tx.tx_hash(); + let result = rocksdb.get::(hash).unwrap(); + assert!( + result.is_some(), + "Transaction hash {:?} should exist before unwind", + hash + ); + } + } + + // Now unwind to stage_progress (removing all the blocks we added) + let unwind_input = UnwindInput { + checkpoint: StageCheckpoint::new(previous_stage), + unwind_to: stage_progress, + bad_block: None, + }; + let unwind_result = runner.unwind(unwind_input).await; + assert!(unwind_result.is_ok(), "stage unwind failed: {:?}", unwind_result); + + // Verify RocksDB data is deleted after unwind + let rocksdb = runner.db.factory.rocksdb_provider(); + for block in &blocks { + for tx in &block.body().transactions { + let hash = *tx.tx_hash(); + let result = rocksdb.get::(hash).unwrap(); + assert!( + result.is_none(), + "Transaction hash {:?} should be deleted from RocksDB after unwind", + hash + ); + } + } + } + } } diff --git a/crates/stages/stages/src/test_utils/test_db.rs b/crates/stages/stages/src/test_utils/test_db.rs index 3fe1c7f1f9..c137c38826 100644 --- a/crates/stages/stages/src/test_utils/test_db.rs +++ b/crates/stages/stages/src/test_utils/test_db.rs @@ -1,7 +1,10 @@ use alloy_primitives::{keccak256, Address, BlockNumber, TxHash, TxNumber, B256}; use reth_chainspec::MAINNET; use reth_db::{ - test_utils::{create_test_rw_db, create_test_rw_db_with_path, create_test_static_files_dir}, + test_utils::{ + create_test_rocksdb_dir, create_test_rw_db, create_test_rw_db_with_path, + create_test_static_files_dir, + }, DatabaseEnv, }; use reth_db_api::{ @@ -17,7 +20,9 @@ use reth_db_api::{ use reth_ethereum_primitives::{Block, EthPrimitives, Receipt}; use reth_primitives_traits::{Account, SealedBlock, SealedHeader, StorageEntry}; use reth_provider::{ - providers::{StaticFileProvider, StaticFileProviderRWRefMut, StaticFileWriter}, + providers::{ + RocksDBProvider, StaticFileProvider, StaticFileProviderRWRefMut, StaticFileWriter, + }, test_utils::MockNodeTypesWithDB, HistoryWriter, ProviderError, ProviderFactory, StaticFileProviderFactory, StatsReader, }; @@ -38,13 +43,16 @@ impl Default for TestStageDB { /// Create a new instance of [`TestStageDB`] fn default() -> Self { let (static_dir, static_dir_path) = create_test_static_files_dir(); + let (_, rocksdb_dir_path) = create_test_rocksdb_dir(); Self { temp_static_files_dir: static_dir, factory: ProviderFactory::new( create_test_rw_db(), MAINNET.clone(), StaticFileProvider::read_write(static_dir_path).unwrap(), - ), + RocksDBProvider::builder(rocksdb_dir_path).with_default_tables().build().unwrap(), + ) + .expect("failed to create test provider factory"), } } } @@ -52,6 +60,7 @@ impl Default for TestStageDB { impl TestStageDB { pub fn new(path: &Path) -> Self { let (static_dir, static_dir_path) = create_test_static_files_dir(); + let (_, rocksdb_dir_path) = create_test_rocksdb_dir(); Self { temp_static_files_dir: static_dir, @@ -59,7 +68,9 @@ impl TestStageDB { create_test_rw_db_with_path(path), MAINNET.clone(), StaticFileProvider::read_write(static_dir_path).unwrap(), - ), + RocksDBProvider::builder(rocksdb_dir_path).with_default_tables().build().unwrap(), + ) + .expect("failed to create test provider factory"), } } diff --git a/crates/stages/types/Cargo.toml b/crates/stages/types/Cargo.toml index 19e1530489..6e70fbe26a 100644 --- a/crates/stages/types/Cargo.toml +++ b/crates/stages/types/Cargo.toml @@ -24,12 +24,14 @@ modular-bitfield = { workspace = true, optional = true } [dev-dependencies] reth-codecs.workspace = true +reth-trie-common = { workspace = true, features = ["reth-codec"] } alloy-primitives = { workspace = true, features = ["arbitrary", "rand"] } arbitrary = { workspace = true, features = ["derive"] } proptest.workspace = true proptest-arbitrary-interop.workspace = true rand.workspace = true bytes.workspace = true +modular-bitfield.workspace = true [features] default = ["std"] diff --git a/crates/stages/types/src/checkpoints.rs b/crates/stages/types/src/checkpoints.rs index 16bee1387f..c21412ea43 100644 --- a/crates/stages/types/src/checkpoints.rs +++ b/crates/stages/types/src/checkpoints.rs @@ -1,4 +1,6 @@ use super::StageId; +#[cfg(test)] +use alloc::vec; use alloc::{format, string::String, vec::Vec}; use alloy_primitives::{Address, BlockNumber, B256, U256}; use core::ops::RangeInclusive; @@ -326,7 +328,9 @@ impl EntitiesCheckpoint { // Truncate to 2 decimal places, rounding down so that 99.999% becomes 99.99% and not 100%. #[cfg(not(feature = "std"))] { - Some(format!("{:.2}%", (percentage * 100.0) / 100.0)) + // Manual floor implementation using integer arithmetic for no_std + let scaled = (percentage * 100.0) as u64; + Some(format!("{:.2}%", scaled as f64 / 100.0)) } #[cfg(feature = "std")] Some(format!("{:.2}%", (percentage * 100.0).floor() / 100.0)) diff --git a/crates/stages/types/src/execution.rs b/crates/stages/types/src/execution.rs index a334951abe..caf7c2448e 100644 --- a/crates/stages/types/src/execution.rs +++ b/crates/stages/types/src/execution.rs @@ -2,11 +2,8 @@ use core::time::Duration; /// The thresholds at which the execution stage writes state changes to the database. /// -/// If either of the thresholds (`max_blocks` and `max_changes`) are hit, then the execution stage -/// commits all pending changes to the database. -/// -/// A third threshold, `max_changesets`, can be set to periodically write changesets to the -/// current database transaction, which frees up memory. +/// If any of the thresholds (`max_blocks`, `max_changes`, `max_cumulative_gas`, or `max_duration`) +/// are hit, then the execution stage commits all pending changes to the database. #[derive(Debug, Clone)] pub struct ExecutionStageThresholds { /// The maximum number of blocks to execute before the execution stage commits. diff --git a/crates/stateless/Cargo.toml b/crates/stateless/Cargo.toml index 8adbae28ae..d79bedb76b 100644 --- a/crates/stateless/Cargo.toml +++ b/crates/stateless/Cargo.toml @@ -18,6 +18,7 @@ alloy-rlp.workspace = true alloy-trie.workspace = true alloy-consensus.workspace = true alloy-rpc-types-debug.workspace = true +alloy-genesis = { workspace = true, features = ["serde-bincode-compat"] } # reth reth-ethereum-consensus.workspace = true diff --git a/crates/stateless/src/lib.rs b/crates/stateless/src/lib.rs index 6813638485..06540d319f 100644 --- a/crates/stateless/src/lib.rs +++ b/crates/stateless/src/lib.rs @@ -39,11 +39,14 @@ mod recover_block; /// Sparse trie implementation for stateless validation pub mod trie; +use alloy_genesis::ChainConfig; #[doc(inline)] pub use recover_block::UncompressedPublicKey; #[doc(inline)] pub use trie::StatelessTrie; #[doc(inline)] +pub use validation::stateless_validation; +#[doc(inline)] pub use validation::stateless_validation_with_trie; /// Implementation of stateless validation @@ -53,6 +56,8 @@ pub(crate) mod witness_db; #[doc(inline)] pub use alloy_rpc_types_debug::ExecutionWitness; +pub use alloy_genesis::Genesis; + use reth_ethereum_primitives::Block; /// `StatelessInput` is a convenience structure for serializing the input needed @@ -67,4 +72,7 @@ pub struct StatelessInput { pub block: Block, /// `ExecutionWitness` for the stateless validation function pub witness: ExecutionWitness, + /// Chain configuration for the stateless validation function + #[serde_as(as = "alloy_genesis::serde_bincode_compat::ChainConfig<'_>")] + pub chain_config: ChainConfig, } diff --git a/crates/stateless/src/validation.rs b/crates/stateless/src/validation.rs index a0475b0993..08d84f8466 100644 --- a/crates/stateless/src/validation.rs +++ b/crates/stateless/src/validation.rs @@ -17,8 +17,11 @@ use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_consensus::{Consensus, HeaderValidator}; use reth_errors::ConsensusError; use reth_ethereum_consensus::{validate_block_post_execution, EthBeaconConsensus}; -use reth_ethereum_primitives::{Block, EthPrimitives}; -use reth_evm::{execute::Executor, ConfigureEvm}; +use reth_ethereum_primitives::{Block, EthPrimitives, EthereumReceipt}; +use reth_evm::{ + execute::{BlockExecutionOutput, Executor}, + ConfigureEvm, +}; use reth_primitives_traits::{RecoveredBlock, SealedHeader}; use reth_trie_common::{HashedPostState, KeccakKeyHasher}; @@ -49,7 +52,7 @@ pub enum StatelessValidationError { }, /// Error during stateless block execution. - #[error("stateless block execution failed")] + #[error("stateless block execution failed: {0}")] StatelessExecutionFailed(String), /// Error during consensus validation of the block. @@ -144,7 +147,7 @@ pub fn stateless_validation( witness: ExecutionWitness, chain_spec: Arc, evm_config: E, -) -> Result +) -> Result<(B256, BlockExecutionOutput), StatelessValidationError> where ChainSpec: Send + Sync + EthChainSpec

+ EthereumHardforks + Debug, E: ConfigureEvm + Clone + 'static, @@ -170,7 +173,7 @@ pub fn stateless_validation_with_trie( witness: ExecutionWitness, chain_spec: Arc, evm_config: E, -) -> Result +) -> Result<(B256, BlockExecutionOutput), StatelessValidationError> where T: StatelessTrie, ChainSpec: Send + Sync + EthChainSpec
+ EthereumHardforks + Debug, @@ -242,7 +245,7 @@ where } // Return block hash - Ok(current_block.hash_slow()) + Ok((current_block.hash_slow(), output)) } /// Performs consensus validation checks on a block without execution or state validation. diff --git a/crates/stateless/src/witness_db.rs b/crates/stateless/src/witness_db.rs index 4a99c286ad..466b4de30b 100644 --- a/crates/stateless/src/witness_db.rs +++ b/crates/stateless/src/witness_db.rs @@ -23,18 +23,12 @@ where { /// Map of block numbers to block hashes. /// This is used to service the `BLOCKHASH` opcode. - // TODO: use Vec instead -- ancestors should be contiguous - // TODO: so we can use the current_block_number and an offset to - // TODO: get the block number of a particular ancestor block_hashes_by_block_number: BTreeMap, /// Map of code hashes to bytecode. /// Used to fetch contract code needed during execution. bytecode: B256Map, /// The sparse Merkle Patricia Trie containing account and storage state. /// This is used to provide account/storage values during EVM execution. - /// TODO: Ideally we do not have this trie and instead a simple map. - /// TODO: Then as a corollary we can avoid unnecessary hashing in `Database::storage` - /// TODO: and `Database::basic` without needing to cache the hashed Addresses and Keys trie: &'a T, } diff --git a/crates/static-file/static-file/src/segments/receipts.rs b/crates/static-file/static-file/src/segments/receipts.rs index 0c556f781d..b09dadd1ea 100644 --- a/crates/static-file/static-file/src/segments/receipts.rs +++ b/crates/static-file/static-file/src/segments/receipts.rs @@ -3,9 +3,7 @@ use alloy_primitives::BlockNumber; use reth_codecs::Compact; use reth_db_api::{cursor::DbCursorRO, table::Value, tables, transaction::DbTx}; use reth_primitives_traits::NodePrimitives; -use reth_provider::{ - providers::StaticFileWriter, BlockReader, DBProvider, StaticFileProviderFactory, -}; +use reth_provider::{BlockReader, DBProvider, StaticFileProviderFactory}; use reth_static_file_types::StaticFileSegment; use reth_storage_errors::provider::{ProviderError, ProviderResult}; use std::ops::RangeInclusive; @@ -29,9 +27,8 @@ where provider: Provider, block_range: RangeInclusive, ) -> ProviderResult<()> { - let static_file_provider = provider.static_file_provider(); let mut static_file_writer = - static_file_provider.get_writer(*block_range.start(), StaticFileSegment::Receipts)?; + provider.get_static_file_writer(*block_range.start(), StaticFileSegment::Receipts)?; for block in block_range { static_file_writer.increment_block(block)?; diff --git a/crates/static-file/static-file/src/static_file_producer.rs b/crates/static-file/static-file/src/static_file_producer.rs index 2e7aa4b9df..03337f1fd7 100644 --- a/crates/static-file/static-file/src/static_file_producer.rs +++ b/crates/static-file/static-file/src/static_file_producer.rs @@ -194,7 +194,9 @@ where let targets = StaticFileTargets { // StaticFile receipts only if they're not pruned according to the user configuration - receipts: if self.prune_modes.receipts.is_none() { + receipts: if self.prune_modes.receipts.is_none() && + self.prune_modes.receipts_log_filter.is_empty() + { finalized_block_numbers.receipts.and_then(|finalized_block_number| { self.get_static_file_target( highest_static_files.receipts, diff --git a/crates/static-file/types/Cargo.toml b/crates/static-file/types/Cargo.toml index e2cd90c268..959b4edd11 100644 --- a/crates/static-file/types/Cargo.toml +++ b/crates/static-file/types/Cargo.toml @@ -21,6 +21,8 @@ strum = { workspace = true, features = ["derive"] } [dev-dependencies] reth-nippy-jar.workspace = true +serde_json.workspace = true +insta.workspace = true [features] default = ["std"] @@ -29,5 +31,6 @@ std = [ "derive_more/std", "serde/std", "strum/std", + "serde_json/std", ] clap = ["dep:clap"] diff --git a/crates/static-file/types/src/lib.rs b/crates/static-file/types/src/lib.rs index 9606b0ec98..73d0ffe050 100644 --- a/crates/static-file/types/src/lib.rs +++ b/crates/static-file/types/src/lib.rs @@ -78,7 +78,7 @@ impl StaticFileTargets { } /// Each static file has a fixed number of blocks. This gives out the range where the requested -/// block is positioned. Used for segment filename. +/// block is positioned, according to the specified number of blocks per static file. pub const fn find_fixed_range( block: BlockNumber, blocks_per_static_file: u64, diff --git a/crates/static-file/types/src/segment.rs b/crates/static-file/types/src/segment.rs index 0458bea167..cd57f019ba 100644 --- a/crates/static-file/types/src/segment.rs +++ b/crates/static-file/types/src/segment.rs @@ -1,13 +1,9 @@ use crate::{BlockNumber, Compression}; -use alloc::{ - format, - string::{String, ToString}, -}; +use alloc::{format, string::String}; use alloy_primitives::TxNumber; use core::{ops::RangeInclusive, str::FromStr}; -use derive_more::Display; use serde::{Deserialize, Serialize}; -use strum::{AsRefStr, EnumString}; +use strum::{EnumIs, EnumString}; #[derive( Debug, @@ -21,38 +17,44 @@ use strum::{AsRefStr, EnumString}; Deserialize, Serialize, EnumString, - AsRefStr, - Display, + derive_more::Display, + EnumIs, )] +#[strum(serialize_all = "kebab-case")] #[cfg_attr(feature = "clap", derive(clap::ValueEnum))] /// Segment of the data that can be moved to static files. pub enum StaticFileSegment { - #[strum(serialize = "headers")] /// Static File segment responsible for the `CanonicalHeaders`, `Headers`, /// `HeaderTerminalDifficulties` tables. Headers, - #[strum(serialize = "transactions")] /// Static File segment responsible for the `Transactions` table. Transactions, - #[strum(serialize = "receipts")] /// Static File segment responsible for the `Receipts` table. Receipts, + /// Static File segment responsible for the `TransactionSenders` table. + TransactionSenders, } impl StaticFileSegment { - /// Returns the segment as a string. + /// Returns a string representation of the segment. pub const fn as_str(&self) -> &'static str { + // `strum` doesn't generate a doc comment for `into_str` when using `IntoStaticStr` derive + // macro, so we need to manually implement it. + // + // NOTE: this name cannot have underscores in it, as underscores are used as delimiters in + // static file paths, for fetching static files for a specific block range match self { Self::Headers => "headers", Self::Transactions => "transactions", Self::Receipts => "receipts", + Self::TransactionSenders => "transaction-senders", } } /// Returns an iterator over all segments. pub fn iter() -> impl Iterator { // The order of segments is significant and must be maintained to ensure correctness. - [Self::Headers, Self::Transactions, Self::Receipts].into_iter() + [Self::Headers, Self::Transactions, Self::Receipts, Self::TransactionSenders].into_iter() } /// Returns the default configuration of the segment. @@ -64,7 +66,7 @@ impl StaticFileSegment { pub const fn columns(&self) -> usize { match self { Self::Headers => 3, - Self::Transactions | Self::Receipts => 1, + Self::Transactions | Self::Receipts | Self::TransactionSenders => 1, } } @@ -72,7 +74,7 @@ impl StaticFileSegment { pub fn filename(&self, block_range: &SegmentRangeInclusive) -> String { // ATTENTION: if changing the name format, be sure to reflect those changes in // [`Self::parse_filename`]. - format!("static_file_{}_{}_{}", self.as_ref(), block_range.start(), block_range.end()) + format!("static_file_{}_{}_{}", self.as_str(), block_range.start(), block_range.end()) } /// Returns file name for the provided segment and range, alongside filters, compression. @@ -83,7 +85,7 @@ impl StaticFileSegment { ) -> String { let prefix = self.filename(block_range); - let filters_name = "none".to_string(); + let filters_name = "none"; // ATTENTION: if changing the name format, be sure to reflect those changes in // [`Self::parse_filename`.] @@ -122,29 +124,25 @@ impl StaticFileSegment { Some((segment, SegmentRangeInclusive::new(block_start, block_end))) } - /// Returns `true` if the segment is `StaticFileSegment::Headers`. - pub const fn is_headers(&self) -> bool { - matches!(self, Self::Headers) - } - - /// Returns `true` if the segment is `StaticFileSegment::Receipts`. - pub const fn is_receipts(&self) -> bool { - matches!(self, Self::Receipts) - } - /// Returns `true` if a segment row is linked to a transaction. pub const fn is_tx_based(&self) -> bool { - matches!(self, Self::Receipts | Self::Transactions) + match self { + Self::Receipts | Self::Transactions | Self::TransactionSenders => true, + Self::Headers => false, + } } /// Returns `true` if a segment row is linked to a block. pub const fn is_block_based(&self) -> bool { - matches!(self, Self::Headers) + match self { + Self::Headers => true, + Self::Receipts | Self::Transactions | Self::TransactionSenders => false, + } } } /// A segment header that contains information common to all segments. Used for storage. -#[derive(Debug, Serialize, Deserialize, Eq, PartialEq, Hash, Clone)] +#[derive(Debug, Serialize, Deserialize, Eq, PartialEq, Hash, Clone, Copy)] pub struct SegmentHeader { /// Defines the expected block range for a static file segment. This attribute is crucial for /// scenarios where the file contains no data, allowing for a representation beyond a @@ -175,14 +173,19 @@ impl SegmentHeader { self.segment } + /// Returns the expected block range. + pub const fn expected_block_range(&self) -> SegmentRangeInclusive { + self.expected_block_range + } + /// Returns the block range. - pub const fn block_range(&self) -> Option<&SegmentRangeInclusive> { - self.block_range.as_ref() + pub const fn block_range(&self) -> Option { + self.block_range } /// Returns the transaction range. - pub const fn tx_range(&self) -> Option<&SegmentRangeInclusive> { - self.tx_range.as_ref() + pub const fn tx_range(&self) -> Option { + self.tx_range } /// The expected block start of the segment. @@ -217,12 +220,12 @@ impl SegmentHeader { /// Number of transactions. pub fn tx_len(&self) -> Option { - self.tx_range.as_ref().map(|r| (r.end() + 1) - r.start()) + self.tx_range.as_ref().map(|r| r.len()) } /// Number of blocks. pub fn block_len(&self) -> Option { - self.block_range.as_ref().map(|r| (r.end() + 1) - r.start()) + self.block_range.as_ref().map(|r| r.len()) } /// Increments block end range depending on segment @@ -329,6 +332,16 @@ impl SegmentRangeInclusive { pub const fn end(&self) -> u64 { self.end } + + /// Returns the length of the inclusive range. + pub const fn len(&self) -> u64 { + self.end.saturating_sub(self.start).saturating_add(1) + } + + /// Returns true if the range is empty. + pub const fn is_empty(&self) -> bool { + self.start > self.end + } } impl core::fmt::Display for SegmentRangeInclusive { @@ -358,8 +371,9 @@ impl From for RangeInclusive { #[cfg(test)] mod tests { use super::*; - use alloy_primitives::hex; + use alloy_primitives::Bytes; use reth_nippy_jar::NippyJar; + use std::env::temp_dir; #[test] fn test_filename() { @@ -418,53 +432,83 @@ mod tests { } #[test] - fn test_segment_config_backwards() { - let headers = hex!( - "010000000000000000000000000000001fa10700000000000100000000000000001fa10700000000000000000000030000000000000020a107000000000001010000004a02000000000000" - ); - let transactions = hex!( - "010000000000000000000000000000001fa10700000000000100000000000000001fa107000000000001000000000000000034a107000000000001000000010000000000000035a1070000000000004010000000000000" - ); - let receipts = hex!( - "010000000000000000000000000000001fa10700000000000100000000000000000000000000000000000200000001000000000000000000000000000000000000000000000000" + fn test_segment_config_serialization() { + let segments = vec![ + SegmentHeader { + expected_block_range: SegmentRangeInclusive::new(0, 200), + block_range: Some(SegmentRangeInclusive::new(0, 100)), + tx_range: None, + segment: StaticFileSegment::Headers, + }, + SegmentHeader { + expected_block_range: SegmentRangeInclusive::new(0, 200), + block_range: None, + tx_range: Some(SegmentRangeInclusive::new(0, 300)), + segment: StaticFileSegment::Transactions, + }, + SegmentHeader { + expected_block_range: SegmentRangeInclusive::new(0, 200), + block_range: Some(SegmentRangeInclusive::new(0, 100)), + tx_range: Some(SegmentRangeInclusive::new(0, 300)), + segment: StaticFileSegment::Receipts, + }, + SegmentHeader { + expected_block_range: SegmentRangeInclusive::new(0, 200), + block_range: Some(SegmentRangeInclusive::new(0, 100)), + tx_range: Some(SegmentRangeInclusive::new(0, 300)), + segment: StaticFileSegment::TransactionSenders, + }, + ]; + // Check that we test all segments + assert_eq!( + segments.iter().map(|segment| segment.segment()).collect::>(), + StaticFileSegment::iter().collect::>() ); - { - let headers = NippyJar::::load_from_reader(&headers[..]).unwrap(); - assert_eq!( - &SegmentHeader { - expected_block_range: SegmentRangeInclusive::new(0, 499999), - block_range: Some(SegmentRangeInclusive::new(0, 499999)), - tx_range: None, - segment: StaticFileSegment::Headers, - }, - headers.user_header() - ); + for header in segments { + let segment_jar = NippyJar::new(1, &temp_dir(), header); + let mut serialized = Vec::new(); + segment_jar.save_to_writer(&mut serialized).unwrap(); + + let deserialized = + NippyJar::::load_from_reader(&serialized[..]).unwrap(); + assert_eq!(deserialized.user_header(), segment_jar.user_header()); + + insta::assert_snapshot!(header.segment().to_string(), Bytes::from(serialized)); } - { - let transactions = - NippyJar::::load_from_reader(&transactions[..]).unwrap(); - assert_eq!( - &SegmentHeader { - expected_block_range: SegmentRangeInclusive::new(0, 499999), - block_range: Some(SegmentRangeInclusive::new(0, 499999)), - tx_range: Some(SegmentRangeInclusive::new(0, 500020)), - segment: StaticFileSegment::Transactions, - }, - transactions.user_header() - ); + } + + /// Used in filename writing/parsing + #[test] + fn test_static_file_segment_str_roundtrip() { + for segment in StaticFileSegment::iter() { + let static_str = segment.as_str(); + assert_eq!(StaticFileSegment::from_str(static_str).unwrap(), segment); + + let expected_str = match segment { + StaticFileSegment::Headers => "headers", + StaticFileSegment::Transactions => "transactions", + StaticFileSegment::Receipts => "receipts", + StaticFileSegment::TransactionSenders => "transaction-senders", + }; + assert_eq!(static_str, expected_str); } - { - let receipts = NippyJar::::load_from_reader(&receipts[..]).unwrap(); - assert_eq!( - &SegmentHeader { - expected_block_range: SegmentRangeInclusive::new(0, 499999), - block_range: Some(SegmentRangeInclusive::new(0, 0)), - tx_range: None, - segment: StaticFileSegment::Receipts, - }, - receipts.user_header() - ); + } + + /// Used in segment headers serialize/deserialize + #[test] + fn test_static_file_segment_serde_roundtrip() { + for segment in StaticFileSegment::iter() { + let ser = serde_json::to_string(&segment).unwrap(); + assert_eq!(serde_json::from_str::(&ser).unwrap(), segment); + + let expected_str = match segment { + StaticFileSegment::Headers => "Headers", + StaticFileSegment::Transactions => "Transactions", + StaticFileSegment::Receipts => "Receipts", + StaticFileSegment::TransactionSenders => "TransactionSenders", + }; + assert_eq!(ser, format!("\"{expected_str}\"")); } } } diff --git a/crates/static-file/types/src/snapshots/reth_static_file_types__segment__tests__Headers.snap b/crates/static-file/types/src/snapshots/reth_static_file_types__segment__tests__Headers.snap new file mode 100644 index 0000000000..9e97f7b33b --- /dev/null +++ b/crates/static-file/types/src/snapshots/reth_static_file_types__segment__tests__Headers.snap @@ -0,0 +1,5 @@ +--- +source: crates/static-file/types/src/segment.rs +expression: "Bytes::from(serialized)" +--- +0x01000000000000000000000000000000c8000000000000000100000000000000006400000000000000000000000001000000000000000000000000000000000000000000000000 diff --git a/crates/static-file/types/src/snapshots/reth_static_file_types__segment__tests__Receipts.snap b/crates/static-file/types/src/snapshots/reth_static_file_types__segment__tests__Receipts.snap new file mode 100644 index 0000000000..fb25e2ec09 --- /dev/null +++ b/crates/static-file/types/src/snapshots/reth_static_file_types__segment__tests__Receipts.snap @@ -0,0 +1,5 @@ +--- +source: crates/static-file/types/src/segment.rs +expression: "Bytes::from(serialized)" +--- +0x01000000000000000000000000000000c80000000000000001000000000000000064000000000000000100000000000000002c010000000000000200000001000000000000000000000000000000000000000000000000 diff --git a/crates/static-file/types/src/snapshots/reth_static_file_types__segment__tests__TransactionSenders.snap b/crates/static-file/types/src/snapshots/reth_static_file_types__segment__tests__TransactionSenders.snap new file mode 100644 index 0000000000..c4c85485cc --- /dev/null +++ b/crates/static-file/types/src/snapshots/reth_static_file_types__segment__tests__TransactionSenders.snap @@ -0,0 +1,5 @@ +--- +source: crates/static-file/types/src/segment.rs +expression: "Bytes::from(serialized)" +--- +0x01000000000000000000000000000000c80000000000000001000000000000000064000000000000000100000000000000002c010000000000000300000001000000000000000000000000000000000000000000000000 diff --git a/crates/static-file/types/src/snapshots/reth_static_file_types__segment__tests__Transactions.snap b/crates/static-file/types/src/snapshots/reth_static_file_types__segment__tests__Transactions.snap new file mode 100644 index 0000000000..4f6251a6b9 --- /dev/null +++ b/crates/static-file/types/src/snapshots/reth_static_file_types__segment__tests__Transactions.snap @@ -0,0 +1,5 @@ +--- +source: crates/static-file/types/src/segment.rs +expression: "Bytes::from(serialized)" +--- +0x01000000000000000000000000000000c800000000000000000100000000000000002c010000000000000100000001000000000000000000000000000000000000000000000000 diff --git a/crates/storage/codecs/derive/src/compact/flags.rs b/crates/storage/codecs/derive/src/compact/flags.rs index b6bad46291..c3e0b988cf 100644 --- a/crates/storage/codecs/derive/src/compact/flags.rs +++ b/crates/storage/codecs/derive/src/compact/flags.rs @@ -51,7 +51,7 @@ pub(crate) fn generate_flag_struct( quote! { buf.get_u8(), }; - total_bytes.into() + total_bytes ]; let docs = format!( @@ -64,11 +64,11 @@ pub(crate) fn generate_flag_struct( impl<'a> #ident<'a> { #[doc = #bitflag_encoded_bytes] pub const fn bitflag_encoded_bytes() -> usize { - #total_bytes as usize + #total_bytes } #[doc = #bitflag_unused_bits] pub const fn bitflag_unused_bits() -> usize { - #unused_bits as usize + #unused_bits } } } @@ -77,11 +77,11 @@ pub(crate) fn generate_flag_struct( impl #ident { #[doc = #bitflag_encoded_bytes] pub const fn bitflag_encoded_bytes() -> usize { - #total_bytes as usize + #total_bytes } #[doc = #bitflag_unused_bits] pub const fn bitflag_unused_bits() -> usize { - #unused_bits as usize + #unused_bits } } } @@ -123,8 +123,8 @@ fn build_struct_field_flags( fields: Vec<&StructFieldDescriptor>, field_flags: &mut Vec, is_zstd: bool, -) -> u8 { - let mut total_bits = 0; +) -> usize { + let mut total_bits: usize = 0; // Find out the adequate bit size for the length of each field, if applicable. for field in fields { @@ -138,7 +138,7 @@ fn build_struct_field_flags( let name = format_ident!("{name}_len"); let bitsize = get_bit_size(ftype); let bsize = format_ident!("B{bitsize}"); - total_bits += bitsize; + total_bits += bitsize as usize; field_flags.push(quote! { pub #name: #bsize , @@ -170,7 +170,7 @@ fn build_struct_field_flags( /// skipped field. /// /// Returns the total number of bytes used by the flags struct and how many unused bits. -fn pad_flag_struct(total_bits: u8, field_flags: &mut Vec) -> (u8, u8) { +fn pad_flag_struct(total_bits: usize, field_flags: &mut Vec) -> (usize, usize) { let remaining = 8 - total_bits % 8; if remaining == 8 { (total_bits / 8, 0) diff --git a/crates/storage/codecs/derive/src/compact/mod.rs b/crates/storage/codecs/derive/src/compact/mod.rs index 00f622be43..ed43286923 100644 --- a/crates/storage/codecs/derive/src/compact/mod.rs +++ b/crates/storage/codecs/derive/src/compact/mod.rs @@ -82,7 +82,7 @@ pub fn get_fields(data: &Data) -> FieldList { ); load_field(&data_fields.unnamed[0], &mut fields, false); } - syn::Fields::Unit => todo!(), + syn::Fields::Unit => unimplemented!("Compact does not support unit structs"), }, Data::Enum(data) => { for variant in &data.variants { @@ -106,7 +106,7 @@ pub fn get_fields(data: &Data) -> FieldList { } } } - Data::Union(_) => todo!(), + Data::Union(_) => unimplemented!("Compact does not support union types"), } fields @@ -176,7 +176,8 @@ fn should_use_alt_impl(ftype: &str, segment: &syn::PathSegment) -> bool { let Some(syn::GenericArgument::Type(syn::Type::Path(arg_path))) = args.args.last() && let (Some(path), 1) = (arg_path.path.segments.first(), arg_path.path.segments.len()) && ["B256", "Address", "Address", "Bloom", "TxHash", "BlockHash", "CompactPlaceholder"] - .contains(&path.ident.to_string().as_str()) + .iter() + .any(|&s| path.ident == s) { return true } @@ -237,11 +238,11 @@ mod tests { impl TestStruct { #[doc = "Used bytes by [`TestStructFlags`]"] pub const fn bitflag_encoded_bytes() -> usize { - 2u8 as usize + 2usize } #[doc = "Unused bits for new fields by [`TestStructFlags`]"] pub const fn bitflag_unused_bits() -> usize { - 1u8 as usize + 1usize } } diff --git a/crates/storage/codecs/src/alloy/mod.rs b/crates/storage/codecs/src/alloy/mod.rs index 34fcd6fdc2..9081fad098 100644 --- a/crates/storage/codecs/src/alloy/mod.rs +++ b/crates/storage/codecs/src/alloy/mod.rs @@ -24,6 +24,9 @@ cond_mod!( withdrawal ); +#[cfg(all(feature = "op", feature = "std"))] +pub mod optimism; + pub mod transaction; #[cfg(test)] diff --git a/crates/storage/codecs/src/alloy/optimism.rs b/crates/storage/codecs/src/alloy/optimism.rs new file mode 100644 index 0000000000..7a851a5041 --- /dev/null +++ b/crates/storage/codecs/src/alloy/optimism.rs @@ -0,0 +1,97 @@ +//! Compact implementations for Optimism types. + +use crate::Compact; +use alloc::{borrow::Cow, vec::Vec}; +use alloy_consensus::{Receipt, TxReceipt}; +use alloy_primitives::Log; +use op_alloy_consensus::{OpDepositReceipt, OpReceipt, OpTxType}; +use reth_codecs_derive::CompactZstd; + +#[derive(CompactZstd)] +#[reth_codecs(crate = "crate")] +#[reth_zstd( + compressor = reth_zstd_compressors::RECEIPT_COMPRESSOR, + decompressor = reth_zstd_compressors::RECEIPT_DECOMPRESSOR +)] +struct CompactOpReceipt<'a> { + tx_type: OpTxType, + success: bool, + cumulative_gas_used: u64, + #[expect(clippy::owned_cow)] + logs: Cow<'a, Vec>, + deposit_nonce: Option, + deposit_receipt_version: Option, +} + +impl<'a> From<&'a OpReceipt> for CompactOpReceipt<'a> { + fn from(receipt: &'a OpReceipt) -> Self { + Self { + tx_type: receipt.tx_type(), + success: receipt.status(), + cumulative_gas_used: receipt.cumulative_gas_used(), + logs: Cow::Borrowed(&receipt.as_receipt().logs), + deposit_nonce: if let OpReceipt::Deposit(receipt) = receipt { + receipt.deposit_nonce + } else { + None + }, + deposit_receipt_version: if let OpReceipt::Deposit(receipt) = receipt { + receipt.deposit_receipt_version + } else { + None + }, + } + } +} + +impl From> for OpReceipt { + fn from(receipt: CompactOpReceipt<'_>) -> Self { + let CompactOpReceipt { + tx_type, + success, + cumulative_gas_used, + logs, + deposit_nonce, + deposit_receipt_version, + } = receipt; + + let inner = + Receipt { status: success.into(), cumulative_gas_used, logs: logs.into_owned() }; + + match tx_type { + OpTxType::Legacy => Self::Legacy(inner), + OpTxType::Eip2930 => Self::Eip2930(inner), + OpTxType::Eip1559 => Self::Eip1559(inner), + OpTxType::Eip7702 => Self::Eip7702(inner), + OpTxType::Deposit => { + Self::Deposit(OpDepositReceipt { inner, deposit_nonce, deposit_receipt_version }) + } + } + } +} + +impl Compact for OpReceipt { + fn to_compact(&self, buf: &mut B) -> usize + where + B: bytes::BufMut + AsMut<[u8]>, + { + CompactOpReceipt::from(self).to_compact(buf) + } + + fn from_compact(buf: &[u8], len: usize) -> (Self, &[u8]) { + let (receipt, buf) = CompactOpReceipt::from_compact(buf, len); + (receipt.into(), buf) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{test_utils::UnusedBits, validate_bitflag_backwards_compat}; + + #[test] + fn test_ensure_backwards_compatibility() { + assert_eq!(CompactOpReceipt::bitflag_encoded_bytes(), 2); + validate_bitflag_backwards_compat!(CompactOpReceipt<'_>, UnusedBits::NotZero); + } +} diff --git a/crates/storage/codecs/src/alloy/transaction/eip1559.rs b/crates/storage/codecs/src/alloy/transaction/eip1559.rs index 6d910a6900..f13422a2de 100644 --- a/crates/storage/codecs/src/alloy/transaction/eip1559.rs +++ b/crates/storage/codecs/src/alloy/transaction/eip1559.rs @@ -53,7 +53,8 @@ impl Compact for AlloyTxEip1559 { } fn from_compact(buf: &[u8], len: usize) -> (Self, &[u8]) { - let (tx, _) = TxEip1559::from_compact(buf, len); + // Return the remaining slice from the inner from_compact to advance the cursor correctly. + let (tx, remaining) = TxEip1559::from_compact(buf, len); let alloy_tx = Self { chain_id: tx.chain_id, @@ -67,6 +68,6 @@ impl Compact for AlloyTxEip1559 { input: tx.input, }; - (alloy_tx, buf) + (alloy_tx, remaining) } } diff --git a/crates/storage/codecs/src/alloy/transaction/eip2930.rs b/crates/storage/codecs/src/alloy/transaction/eip2930.rs index aeb08f361b..a5c25a84d4 100644 --- a/crates/storage/codecs/src/alloy/transaction/eip2930.rs +++ b/crates/storage/codecs/src/alloy/transaction/eip2930.rs @@ -52,7 +52,8 @@ impl Compact for AlloyTxEip2930 { } fn from_compact(buf: &[u8], len: usize) -> (Self, &[u8]) { - let (tx, _) = TxEip2930::from_compact(buf, len); + // Return the remaining slice from the inner from_compact to advance the cursor correctly. + let (tx, remaining) = TxEip2930::from_compact(buf, len); let alloy_tx = Self { chain_id: tx.chain_id, nonce: tx.nonce, @@ -63,6 +64,6 @@ impl Compact for AlloyTxEip2930 { access_list: tx.access_list, input: tx.input, }; - (alloy_tx, buf) + (alloy_tx, remaining) } } diff --git a/crates/storage/codecs/src/alloy/transaction/eip4844.rs b/crates/storage/codecs/src/alloy/transaction/eip4844.rs index 6367f3e08e..6ea1927f7d 100644 --- a/crates/storage/codecs/src/alloy/transaction/eip4844.rs +++ b/crates/storage/codecs/src/alloy/transaction/eip4844.rs @@ -68,7 +68,8 @@ impl Compact for AlloyTxEip4844 { } fn from_compact(buf: &[u8], len: usize) -> (Self, &[u8]) { - let (tx, _) = TxEip4844::from_compact(buf, len); + // Return the remaining slice from the inner from_compact to advance the cursor correctly. + let (tx, remaining) = TxEip4844::from_compact(buf, len); let alloy_tx = Self { chain_id: tx.chain_id, nonce: tx.nonce, @@ -82,7 +83,7 @@ impl Compact for AlloyTxEip4844 { max_fee_per_blob_gas: tx.max_fee_per_blob_gas, input: tx.input, }; - (alloy_tx, buf) + (alloy_tx, remaining) } } diff --git a/crates/storage/codecs/src/alloy/transaction/eip7702.rs b/crates/storage/codecs/src/alloy/transaction/eip7702.rs index eab10af0b6..95de81c380 100644 --- a/crates/storage/codecs/src/alloy/transaction/eip7702.rs +++ b/crates/storage/codecs/src/alloy/transaction/eip7702.rs @@ -57,7 +57,8 @@ impl Compact for AlloyTxEip7702 { } fn from_compact(buf: &[u8], len: usize) -> (Self, &[u8]) { - let (tx, _) = TxEip7702::from_compact(buf, len); + // Return the remaining slice from the inner from_compact to advance the cursor correctly. + let (tx, remaining) = TxEip7702::from_compact(buf, len); let alloy_tx = Self { chain_id: tx.chain_id, nonce: tx.nonce, @@ -70,6 +71,6 @@ impl Compact for AlloyTxEip7702 { access_list: tx.access_list, authorization_list: tx.authorization_list, }; - (alloy_tx, buf) + (alloy_tx, remaining) } } diff --git a/crates/storage/codecs/src/alloy/transaction/legacy.rs b/crates/storage/codecs/src/alloy/transaction/legacy.rs index 1667893dc3..c4caf97ac3 100644 --- a/crates/storage/codecs/src/alloy/transaction/legacy.rs +++ b/crates/storage/codecs/src/alloy/transaction/legacy.rs @@ -67,7 +67,8 @@ impl Compact for AlloyTxLegacy { } fn from_compact(buf: &[u8], len: usize) -> (Self, &[u8]) { - let (tx, _) = TxLegacy::from_compact(buf, len); + // Return the remaining slice from the inner from_compact to advance the cursor correctly. + let (tx, remaining) = TxLegacy::from_compact(buf, len); let alloy_tx = Self { chain_id: tx.chain_id, @@ -79,6 +80,6 @@ impl Compact for AlloyTxLegacy { input: tx.input, }; - (alloy_tx, buf) + (alloy_tx, remaining) } } diff --git a/crates/storage/codecs/src/alloy/transaction/optimism.rs b/crates/storage/codecs/src/alloy/transaction/optimism.rs index 40333ce988..7f9c318e6a 100644 --- a/crates/storage/codecs/src/alloy/transaction/optimism.rs +++ b/crates/storage/codecs/src/alloy/transaction/optimism.rs @@ -66,7 +66,8 @@ impl Compact for AlloyTxDeposit { } fn from_compact(buf: &[u8], len: usize) -> (Self, &[u8]) { - let (tx, _) = TxDeposit::from_compact(buf, len); + // Return the remaining slice from the inner from_compact to advance the cursor correctly. + let (tx, remaining) = TxDeposit::from_compact(buf, len); let alloy_tx = Self { source_hash: tx.source_hash, from: tx.from, @@ -77,7 +78,7 @@ impl Compact for AlloyTxDeposit { is_system_transaction: tx.is_system_transaction, input: tx.input, }; - (alloy_tx, buf) + (alloy_tx, remaining) } } diff --git a/crates/storage/db-api/Cargo.toml b/crates/storage/db-api/Cargo.toml index bd77b9d63d..e91c6c12e7 100644 --- a/crates/storage/db-api/Cargo.toml +++ b/crates/storage/db-api/Cargo.toml @@ -50,7 +50,9 @@ proptest = { workspace = true, optional = true } [dev-dependencies] # reth libs with arbitrary reth-codecs = { workspace = true, features = ["test-utils"] } +reth-db-models = { workspace = true, features = ["arbitrary"] } +alloy-primitives = { workspace = true, features = ["rand"] } rand.workspace = true test-fuzz.workspace = true diff --git a/crates/storage/db-api/src/cursor.rs b/crates/storage/db-api/src/cursor.rs index 068b64a3c9..133f6dceb8 100644 --- a/crates/storage/db-api/src/cursor.rs +++ b/crates/storage/db-api/src/cursor.rs @@ -318,6 +318,14 @@ impl + DbCursorRO> RangeWalker<'_, T, CURSOR> } } +impl + DbCursorRO> RangeWalker<'_, T, CURSOR> { + /// Delete all duplicate entries for current key that walker points to. + pub fn delete_current_duplicates(&mut self) -> Result<(), DatabaseError> { + self.start.take(); + self.cursor.delete_current_duplicates() + } +} + /// Provides an iterator to `Cursor` when handling a `DupSort` table. /// /// Reason why we have two lifetimes is to distinguish between `'cursor` lifetime diff --git a/crates/storage/db-api/src/database.rs b/crates/storage/db-api/src/database.rs index df7c3a5678..1f8e3e125a 100644 --- a/crates/storage/db-api/src/database.rs +++ b/crates/storage/db-api/src/database.rs @@ -26,11 +26,11 @@ pub trait Database: Send + Sync + Debug { /// end of the execution. fn view(&self, f: F) -> Result where - F: FnOnce(&Self::TX) -> T, + F: FnOnce(&mut Self::TX) -> T, { - let tx = self.tx()?; + let mut tx = self.tx()?; - let res = f(&tx); + let res = f(&mut tx); tx.commit()?; Ok(res) diff --git a/crates/storage/db-api/src/models/metadata.rs b/crates/storage/db-api/src/models/metadata.rs new file mode 100644 index 0000000000..60862e0df6 --- /dev/null +++ b/crates/storage/db-api/src/models/metadata.rs @@ -0,0 +1,78 @@ +//! Storage metadata models. + +use reth_codecs::{add_arbitrary_tests, Compact}; +use serde::{Deserialize, Serialize}; + +/// Storage configuration settings for this node. +/// +/// These should be set during `init_genesis` or `init_db` depending on whether we want dictate +/// behaviour of new or old nodes respectively. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize, Compact)] +#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] +#[add_arbitrary_tests(compact)] +pub struct StorageSettings { + /// Whether this node always writes receipts to static files. + /// + /// If this is set to FALSE AND receipt pruning IS ENABLED, all receipts should be written to DB. Otherwise, they should be written to static files. This ensures that older nodes do not need to migrate their current DB tables to static files. For more, read: + #[serde(default)] + pub receipts_in_static_files: bool, + /// Whether this node always writes transaction senders to static files. + #[serde(default)] + pub transaction_senders_in_static_files: bool, + /// Whether `StoragesHistory` is stored in `RocksDB`. + #[serde(default)] + pub storages_history_in_rocksdb: bool, + /// Whether `TransactionHashNumbers` is stored in `RocksDB`. + #[serde(default)] + pub transaction_hash_numbers_in_rocksdb: bool, + /// Whether `AccountsHistory` is stored in `RocksDB`. + #[serde(default)] + pub account_history_in_rocksdb: bool, +} + +impl StorageSettings { + /// Creates `StorageSettings` for legacy nodes. + /// + /// This explicitly sets `receipts_in_static_files` and `transaction_senders_in_static_files` to + /// `false`, ensuring older nodes continue writing receipts and transaction senders to the + /// database when receipt pruning is enabled. + pub const fn legacy() -> Self { + Self { + receipts_in_static_files: false, + transaction_senders_in_static_files: false, + storages_history_in_rocksdb: false, + transaction_hash_numbers_in_rocksdb: false, + account_history_in_rocksdb: false, + } + } + + /// Sets the `receipts_in_static_files` flag to the provided value. + pub const fn with_receipts_in_static_files(mut self, value: bool) -> Self { + self.receipts_in_static_files = value; + self + } + + /// Sets the `transaction_senders_in_static_files` flag to the provided value. + pub const fn with_transaction_senders_in_static_files(mut self, value: bool) -> Self { + self.transaction_senders_in_static_files = value; + self + } + + /// Sets the `storages_history_in_rocksdb` flag to the provided value. + pub const fn with_storages_history_in_rocksdb(mut self, value: bool) -> Self { + self.storages_history_in_rocksdb = value; + self + } + + /// Sets the `transaction_hash_numbers_in_rocksdb` flag to the provided value. + pub const fn with_transaction_hash_numbers_in_rocksdb(mut self, value: bool) -> Self { + self.transaction_hash_numbers_in_rocksdb = value; + self + } + + /// Sets the `account_history_in_rocksdb` flag to the provided value. + pub const fn with_account_history_in_rocksdb(mut self, value: bool) -> Self { + self.account_history_in_rocksdb = value; + self + } +} diff --git a/crates/storage/db-api/src/models/mod.rs b/crates/storage/db-api/src/models/mod.rs index 31d9b301f8..ebc3625250 100644 --- a/crates/storage/db-api/src/models/mod.rs +++ b/crates/storage/db-api/src/models/mod.rs @@ -20,12 +20,14 @@ use serde::{Deserialize, Serialize}; pub mod accounts; pub mod blocks; pub mod integer_list; +pub mod metadata; pub mod sharded_key; pub mod storage_sharded_key; pub use accounts::*; pub use blocks::*; pub use integer_list::IntegerList; +pub use metadata::*; pub use reth_db_models::{ AccountBeforeTx, ClientVersion, StaticFileBlockWithdrawals, StoredBlockBodyIndices, StoredBlockWithdrawals, diff --git a/crates/storage/db-api/src/table.rs b/crates/storage/db-api/src/table.rs index 5715852a5d..54517908de 100644 --- a/crates/storage/db-api/src/table.rs +++ b/crates/storage/db-api/src/table.rs @@ -139,6 +139,9 @@ pub trait TableImporter: DbTxMut { } /// Imports table data from another transaction within a range. + /// + /// This method works correctly with both regular and `DupSort` tables. For `DupSort` tables, + /// all duplicate entries within the range are preserved during import. fn import_table_with_range( &self, source_tx: &R, diff --git a/crates/storage/db-api/src/tables/mod.rs b/crates/storage/db-api/src/tables/mod.rs index cf2a20fff0..903d4ca762 100644 --- a/crates/storage/db-api/src/tables/mod.rs +++ b/crates/storage/db-api/src/tables/mod.rs @@ -94,7 +94,10 @@ pub trait TableViewer { /// Operate on the dupsort table in a generic way. /// /// By default, the `view` function is invoked unless overridden. - fn view_dupsort(&self) -> Result { + fn view_dupsort(&self) -> Result + where + T::Value: reth_primitives_traits::ValueWithSubKey, + { self.view::() } } @@ -540,6 +543,13 @@ tables! { type Key = ChainStateKey; type Value = BlockNumber; } + + /// Stores generic node metadata as key-value pairs. + /// Can store feature flags, configuration markers, and other node-specific data. + table Metadata { + type Key = String; + type Value = Vec; + } } /// Keys for the `ChainState` table. diff --git a/crates/storage/db-api/src/transaction.rs b/crates/storage/db-api/src/transaction.rs index d6028b7c5e..fa9306be84 100644 --- a/crates/storage/db-api/src/transaction.rs +++ b/crates/storage/db-api/src/transaction.rs @@ -5,8 +5,14 @@ use crate::{ }; use std::fmt::Debug; +/// Helper adapter type for accessing [`DbTx`] cursor. +pub type CursorTy = ::Cursor; + +/// Helper adapter type for accessing [`DbTxMut`] mutable cursor. +pub type CursorMutTy = ::CursorMut; + /// Read only transaction -pub trait DbTx: Debug + Send + Sync { +pub trait DbTx: Debug + Send { /// Cursor type for this read-only transaction type Cursor: DbCursorRO + Send + Sync; /// `DupCursor` type for this read-only transaction @@ -37,7 +43,7 @@ pub trait DbTx: Debug + Send + Sync { } /// Read write transaction that allows writing to database -pub trait DbTxMut: Send + Sync { +pub trait DbTxMut: Send { /// Read-Write Cursor type type CursorMut: DbCursorRW + DbCursorRO + Send + Sync; /// Read-Write `DupCursor` type diff --git a/crates/storage/db-api/src/unwind.rs b/crates/storage/db-api/src/unwind.rs index 79cf585a62..e737edfd51 100644 --- a/crates/storage/db-api/src/unwind.rs +++ b/crates/storage/db-api/src/unwind.rs @@ -30,7 +30,7 @@ pub trait DbTxUnwindExt: DbTxMut { let mut deleted = 0; while let Some(Ok((entry_key, _))) = reverse_walker.next() { - if selector(entry_key.clone()) <= key { + if selector(entry_key) <= key { break } reverse_walker.delete_current()?; diff --git a/crates/storage/db-common/src/init.rs b/crates/storage/db-common/src/init.rs index de55cea3c9..cffcdbe57f 100644 --- a/crates/storage/db-common/src/init.rs +++ b/crates/storage/db-common/src/init.rs @@ -15,9 +15,9 @@ use reth_primitives_traits::{ use reth_provider::{ errors::provider::ProviderResult, providers::StaticFileWriter, BlockHashReader, BlockNumReader, BundleStateInit, ChainSpecProvider, DBProvider, DatabaseProviderFactory, ExecutionOutcome, - HashingWriter, HeaderProvider, HistoryWriter, OriginalValuesKnown, ProviderError, RevertsInit, - StageCheckpointReader, StageCheckpointWriter, StateWriter, StaticFileProviderFactory, - TrieWriter, + HashingWriter, HeaderProvider, HistoryWriter, MetadataWriter, OriginalValuesKnown, + ProviderError, RevertsInit, StageCheckpointReader, StageCheckpointWriter, StateWriter, + StaticFileProviderFactory, StorageSettings, StorageSettingsCache, TrieWriter, }; use reth_stages_types::{StageCheckpoint, StageId}; use reth_static_file_types::StaticFileSegment; @@ -90,7 +90,8 @@ where + StaticFileProviderFactory> + ChainSpecProvider + StageCheckpointReader - + BlockHashReader, + + BlockHashReader + + StorageSettingsCache, PF::ProviderRW: StaticFileProviderFactory + StageCheckpointWriter + HistoryWriter @@ -98,6 +99,35 @@ where + HashingWriter + StateWriter + TrieWriter + + MetadataWriter + + ChainSpecProvider + + AsRef, + PF::ChainSpec: EthChainSpec
::BlockHeader>, +{ + init_genesis_with_settings(factory, StorageSettings::legacy()) +} + +/// Write the genesis block if it has not already been written with [`StorageSettings`]. +pub fn init_genesis_with_settings( + factory: &PF, + storage_settings: StorageSettings, +) -> Result +where + PF: DatabaseProviderFactory + + StaticFileProviderFactory> + + ChainSpecProvider + + StageCheckpointReader + + BlockHashReader + + StorageSettingsCache, + PF::ProviderRW: StaticFileProviderFactory + + StageCheckpointWriter + + HistoryWriter + + HeaderProvider + + HashingWriter + + StateWriter + + TrieWriter + + MetadataWriter + + ChainSpecProvider + AsRef, PF::ChainSpec: EthChainSpec
::BlockHeader>, { @@ -106,9 +136,12 @@ where let genesis = chain.genesis(); let hash = chain.genesis_hash(); + // Get the genesis block number from the chain spec + let genesis_block_number = chain.genesis_header().number(); + // Check if we already have the genesis header or if we have the wrong one. - match factory.block_hash(0) { - Ok(None) | Err(ProviderError::MissingStaticFileBlock(StaticFileSegment::Headers, 0)) => {} + match factory.block_hash(genesis_block_number) { + Ok(None) | Err(ProviderError::MissingStaticFileBlock(StaticFileSegment::Headers, _)) => {} Ok(Some(block_hash)) => { if block_hash == hash { // Some users will at times attempt to re-sync from scratch by just deleting the @@ -151,19 +184,34 @@ where // compute state root to populate trie tables compute_state_root(&provider_rw, None)?; - // insert sync stage + // set stage checkpoint to genesis block number for all stages + let checkpoint = StageCheckpoint::new(genesis_block_number); for stage in StageId::ALL { - provider_rw.save_stage_checkpoint(stage, Default::default())?; + provider_rw.save_stage_checkpoint(stage, checkpoint)?; } // Static file segments start empty, so we need to initialize the genesis block. let static_file_provider = provider_rw.static_file_provider(); - static_file_provider.latest_writer(StaticFileSegment::Receipts)?.increment_block(0)?; - static_file_provider.latest_writer(StaticFileSegment::Transactions)?.increment_block(0)?; + + // Static file segments start empty, so we need to initialize the genesis block. + // For genesis blocks with non-zero block numbers, we need to use get_writer() instead of + // latest_writer() to ensure the genesis block is stored in the correct static file range. + static_file_provider + .get_writer(genesis_block_number, StaticFileSegment::Receipts)? + .user_header_mut() + .set_block_range(genesis_block_number, genesis_block_number); + static_file_provider + .get_writer(genesis_block_number, StaticFileSegment::Transactions)? + .user_header_mut() + .set_block_range(genesis_block_number, genesis_block_number); + + // Behaviour reserved only for new nodes should be set here. + provider_rw.write_storage_settings(storage_settings)?; // `commit_unwind`` will first commit the DB and then the static file provider, which is // necessary on `init_genesis`. provider_rw.commit()?; + factory.set_storage_settings_cache(storage_settings); Ok(hash) } @@ -178,9 +226,11 @@ where + DBProvider + HeaderProvider + StateWriter + + ChainSpecProvider + AsRef, { - insert_state(provider, alloc, 0) + let genesis_block_number = provider.chain_spec().genesis_header().number(); + insert_state(provider, alloc, genesis_block_number) } /// Inserts state at given block into database. @@ -303,9 +353,10 @@ pub fn insert_genesis_history<'a, 'b, Provider>( alloc: impl Iterator + Clone, ) -> ProviderResult<()> where - Provider: DBProvider + HistoryWriter, + Provider: DBProvider + HistoryWriter + ChainSpecProvider, { - insert_history(provider, alloc, 0) + let genesis_block_number = provider.chain_spec().genesis_header().number(); + insert_history(provider, alloc, genesis_block_number) } /// Inserts history indices for genesis accounts and storage. @@ -345,17 +396,37 @@ where let (header, block_hash) = (chain.genesis_header(), chain.genesis_hash()); let static_file_provider = provider.static_file_provider(); - match static_file_provider.block_hash(0) { - Ok(None) | Err(ProviderError::MissingStaticFileBlock(StaticFileSegment::Headers, 0)) => { - let mut writer = static_file_provider.latest_writer(StaticFileSegment::Headers)?; - writer.append_header(header, &block_hash)?; + // Get the actual genesis block number from the header + let genesis_block_number = header.number(); + + match static_file_provider.block_hash(genesis_block_number) { + Ok(None) | Err(ProviderError::MissingStaticFileBlock(StaticFileSegment::Headers, _)) => { + let difficulty = header.difficulty(); + + // For genesis blocks with non-zero block numbers, we need to ensure they are stored + // in the correct static file range. We use get_writer() with the genesis block number + // to ensure the genesis block is stored in the correct static file range. + let mut writer = static_file_provider + .get_writer(genesis_block_number, StaticFileSegment::Headers)?; + + // For non-zero genesis blocks, we need to set block range to genesis_block_number and + // append header without increment block + if genesis_block_number > 0 { + writer + .user_header_mut() + .set_block_range(genesis_block_number, genesis_block_number); + writer.append_header_direct(header, difficulty, &block_hash)?; + } else { + // For zero genesis blocks, use normal append_header + writer.append_header(header, &block_hash)?; + } } Ok(Some(_)) => {} Err(e) => return Err(e), } - provider.tx_ref().put::(block_hash, 0)?; - provider.tx_ref().put::(0, Default::default())?; + provider.tx_ref().put::(block_hash, genesis_block_number)?; + provider.tx_ref().put::(genesis_block_number, Default::default())?; Ok(()) } @@ -482,7 +553,8 @@ fn parse_accounts( let mut line = String::new(); let mut collector = Collector::new(etl_config.file_size, etl_config.dir); - while let Ok(n) = reader.read_line(&mut line) { + loop { + let n = reader.read_line(&mut line)?; if n == 0 { break } @@ -678,7 +750,7 @@ mod tests { }; use reth_provider::{ test_utils::{create_test_provider_factory_with_chain_spec, MockNodeTypesWithDB}, - ProviderFactory, + ProviderFactory, RocksDBProviderFactory, }; use std::{collections::BTreeMap, sync::Arc}; @@ -723,14 +795,19 @@ mod tests { fn fail_init_inconsistent_db() { let factory = create_test_provider_factory_with_chain_spec(SEPOLIA.clone()); let static_file_provider = factory.static_file_provider(); + let rocksdb_provider = factory.rocksdb_provider(); init_genesis(&factory).unwrap(); // Try to init db with a different genesis block - let genesis_hash = init_genesis(&ProviderFactory::::new( - factory.into_db(), - MAINNET.clone(), - static_file_provider, - )); + let genesis_hash = init_genesis( + &ProviderFactory::::new( + factory.into_db(), + MAINNET.clone(), + static_file_provider, + rocksdb_provider, + ) + .unwrap(), + ); assert!(matches!( genesis_hash.unwrap_err(), diff --git a/crates/storage/db-models/src/accounts.rs b/crates/storage/db-models/src/accounts.rs index cbae5d84aa..409d80abaa 100644 --- a/crates/storage/db-models/src/accounts.rs +++ b/crates/storage/db-models/src/accounts.rs @@ -1,5 +1,5 @@ use alloy_primitives::Address; -use reth_primitives_traits::Account; +use reth_primitives_traits::{Account, ValueWithSubKey}; /// Account as it is saved in the database. /// @@ -15,6 +15,14 @@ pub struct AccountBeforeTx { pub info: Option, } +impl ValueWithSubKey for AccountBeforeTx { + type SubKey = Address; + + fn get_subkey(&self) -> Self::SubKey { + self.address + } +} + // NOTE: Removing reth_codec and manually encode subkey // and compress second part of the value. If we have compression // over whole value (Even SubKey) that would mess up fetching of values with seek_by_key_subkey diff --git a/crates/storage/db-models/src/blocks.rs b/crates/storage/db-models/src/blocks.rs index 2512db1cc9..71f4cfe9aa 100644 --- a/crates/storage/db-models/src/blocks.rs +++ b/crates/storage/db-models/src/blocks.rs @@ -64,6 +64,11 @@ impl StoredBlockBodyIndices { pub const fn tx_count(&self) -> NumTransactions { self.tx_count } + + /// Returns true if the block contains a transaction with the given number. + pub const fn contains_tx(&self, tx_num: TxNumber) -> bool { + tx_num >= self.first_tx_num && tx_num < self.next_tx_num() + } } /// The storage representation of block withdrawals. diff --git a/crates/storage/db/src/implementation/mdbx/cursor.rs b/crates/storage/db/src/implementation/mdbx/cursor.rs index 0bbb75ce4b..5ca6eacb6c 100644 --- a/crates/storage/db/src/implementation/mdbx/cursor.rs +++ b/crates/storage/db/src/implementation/mdbx/cursor.rs @@ -345,3 +345,110 @@ impl DbDupCursorRW for Cursor { ) } } + +#[cfg(test)] +mod tests { + use crate::{ + mdbx::{DatabaseArguments, DatabaseEnv, DatabaseEnvKind}, + tables::StorageChangeSets, + Database, + }; + use alloy_primitives::{address, Address, B256, U256}; + use reth_db_api::{ + cursor::{DbCursorRO, DbDupCursorRW}, + models::{BlockNumberAddress, ClientVersion}, + table::TableImporter, + transaction::{DbTx, DbTxMut}, + }; + use reth_primitives_traits::StorageEntry; + use std::sync::Arc; + use tempfile::TempDir; + + fn create_test_db() -> Arc { + let path = TempDir::new().unwrap(); + let mut db = DatabaseEnv::open( + path.path(), + DatabaseEnvKind::RW, + DatabaseArguments::new(ClientVersion::default()), + ) + .unwrap(); + db.create_tables().unwrap(); + Arc::new(db) + } + + #[test] + fn test_import_table_with_range_works_on_dupsort() { + let addr1 = address!("0000000000000000000000000000000000000001"); + let addr2 = address!("0000000000000000000000000000000000000002"); + let addr3 = address!("0000000000000000000000000000000000000003"); + let source_db = create_test_db(); + let target_db = create_test_db(); + let test_data = vec![ + ( + BlockNumberAddress((100, addr1)), + StorageEntry { key: B256::with_last_byte(1), value: U256::from(100) }, + ), + ( + BlockNumberAddress((100, addr1)), + StorageEntry { key: B256::with_last_byte(2), value: U256::from(200) }, + ), + ( + BlockNumberAddress((100, addr1)), + StorageEntry { key: B256::with_last_byte(3), value: U256::from(300) }, + ), + ( + BlockNumberAddress((101, addr1)), + StorageEntry { key: B256::with_last_byte(1), value: U256::from(400) }, + ), + ( + BlockNumberAddress((101, addr2)), + StorageEntry { key: B256::with_last_byte(1), value: U256::from(500) }, + ), + ( + BlockNumberAddress((101, addr2)), + StorageEntry { key: B256::with_last_byte(2), value: U256::from(600) }, + ), + ( + BlockNumberAddress((102, addr3)), + StorageEntry { key: B256::with_last_byte(1), value: U256::from(700) }, + ), + ]; + + // setup data + let tx = source_db.tx_mut().unwrap(); + { + let mut cursor = tx.cursor_dup_write::().unwrap(); + for (key, value) in &test_data { + cursor.append_dup(*key, *value).unwrap(); + } + } + tx.commit().unwrap(); + + // import data from source db to target + let source_tx = source_db.tx().unwrap(); + let target_tx = target_db.tx_mut().unwrap(); + + target_tx + .import_table_with_range::( + &source_tx, + Some(BlockNumberAddress((100, Address::ZERO))), + BlockNumberAddress((102, Address::repeat_byte(0xff))), + ) + .unwrap(); + target_tx.commit().unwrap(); + + // fetch all data from target db + let verify_tx = target_db.tx().unwrap(); + let mut cursor = verify_tx.cursor_dup_read::().unwrap(); + let copied: Vec<_> = cursor.walk(None).unwrap().collect::, _>>().unwrap(); + + // verify each entry matches the test data + assert_eq!(copied.len(), test_data.len(), "Should copy all entries including duplicates"); + for ((copied_key, copied_value), (expected_key, expected_value)) in + copied.iter().zip(test_data.iter()) + { + assert_eq!(copied_key, expected_key); + assert_eq!(copied_value, expected_value); + } + } +} diff --git a/crates/storage/db/src/implementation/mdbx/mod.rs b/crates/storage/db/src/implementation/mdbx/mod.rs index b00bfd3c9a..05da99d682 100644 --- a/crates/storage/db/src/implementation/mdbx/mod.rs +++ b/crates/storage/db/src/implementation/mdbx/mod.rs @@ -154,6 +154,15 @@ impl DatabaseArguments { self } + /// Sets the database page size value. + pub const fn with_geometry_page_size(mut self, page_size: Option) -> Self { + if let Some(size) = page_size { + self.geometry.page_size = Some(reth_libmdbx::PageSize::Set(size)); + } + + self + } + /// Sets the database sync mode. pub const fn with_sync_mode(mut self, sync_mode: Option) -> Self { if let Some(sync_mode) = sync_mode { diff --git a/crates/storage/db/src/lib.rs b/crates/storage/db/src/lib.rs index a630672384..cd37f50d61 100644 --- a/crates/storage/db/src/lib.rs +++ b/crates/storage/db/src/lib.rs @@ -159,6 +159,14 @@ pub mod test_utils { (temp_dir, path) } + /// Create `rocksdb` path for testing + #[track_caller] + pub fn create_test_rocksdb_dir() -> (TempDir, PathBuf) { + let temp_dir = TempDir::with_prefix("reth-test-rocksdb-").expect(ERROR_TEMPDIR); + let path = temp_dir.path().to_path_buf(); + (temp_dir, path) + } + /// Get a temporary directory path to use for the database pub fn tempdir_path() -> PathBuf { let builder = tempfile::Builder::new().prefix("reth-test-").rand_bytes(8).tempdir(); @@ -185,12 +193,13 @@ pub mod test_utils { #[track_caller] pub fn create_test_rw_db_with_path>(path: P) -> Arc> { let path = path.as_ref().to_path_buf(); + let emsg = format!("{ERROR_DB_CREATION}: {path:?}"); let db = init_db( path.as_path(), DatabaseArguments::new(ClientVersion::default()) .with_max_read_transaction_duration(Some(MaxReadTransactionDuration::Unbounded)), ) - .expect(ERROR_DB_CREATION); + .expect(&emsg); Arc::new(TempDatabase::new(db, path)) } @@ -201,8 +210,9 @@ pub mod test_utils { .with_max_read_transaction_duration(Some(MaxReadTransactionDuration::Unbounded)); let path = tempdir_path(); + let emsg = format!("{ERROR_DB_CREATION}: {path:?}"); { - init_db(path.as_path(), args.clone()).expect(ERROR_DB_CREATION); + init_db(path.as_path(), args.clone()).expect(&emsg); } let db = open_db_read_only(path.as_path(), args).expect(ERROR_DB_OPEN); Arc::new(TempDatabase::new(db, path)) diff --git a/crates/storage/db/src/static_file/masks.rs b/crates/storage/db/src/static_file/masks.rs index 17833e7ee2..c1d9860278 100644 --- a/crates/storage/db/src/static_file/masks.rs +++ b/crates/storage/db/src/static_file/masks.rs @@ -3,7 +3,7 @@ use crate::{ static_file::mask::{ColumnSelectorOne, ColumnSelectorTwo}, HeaderTerminalDifficulties, }; -use alloy_primitives::BlockHash; +use alloy_primitives::{Address, BlockHash}; use reth_db_api::table::Table; // HEADER MASKS @@ -33,12 +33,18 @@ add_static_file_mask! { // RECEIPT MASKS add_static_file_mask! { - #[doc = "Mask for selecting a single receipt from Receipts static file segment"] + #[doc = "Mask for selecting a single receipt from `Receipts` static file segment"] ReceiptMask, R, 0b1 } // TRANSACTION MASKS add_static_file_mask! { - #[doc = "Mask for selecting a single transaction from Transactions static file segment"] + #[doc = "Mask for selecting a single transaction from `Transactions` static file segment"] TransactionMask, T, 0b1 } + +// TRANSACTION SENDER MASKS +add_static_file_mask! { + #[doc = "Mask for selecting a single transaction sender from `TransactionSenders` static file segment"] + TransactionSenderMask, Address, 0b1 +} diff --git a/crates/storage/db/src/static_file/mod.rs b/crates/storage/db/src/static_file/mod.rs index f2c9ce45fb..6292020dd5 100644 --- a/crates/storage/db/src/static_file/mod.rs +++ b/crates/storage/db/src/static_file/mod.rs @@ -1,9 +1,6 @@ //! reth's static file database table import and access -use std::{ - collections::{hash_map::Entry, HashMap}, - path::Path, -}; +use std::{collections::HashMap, path::Path}; mod cursor; pub use cursor::StaticFileCursor; @@ -17,12 +14,11 @@ pub use masks::*; use reth_static_file_types::{SegmentHeader, SegmentRangeInclusive, StaticFileSegment}; /// Alias type for a map of [`StaticFileSegment`] and sorted lists of existing static file ranges. -type SortedStaticFiles = - HashMap)>>; +type SortedStaticFiles = HashMap>; /// Given the `static_files` directory path, it returns a list over the existing `static_files` /// organized by [`StaticFileSegment`]. Each segment has a sorted list of block ranges and -/// transaction ranges as presented in the file configuration. +/// segment headers as presented in the file configuration. pub fn iter_static_files(path: &Path) -> Result { if !path.exists() { reth_fs_util::create_dir_all(path).map_err(|err| NippyJarError::Custom(err.to_string()))?; @@ -39,25 +35,18 @@ pub fn iter_static_files(path: &Path) -> Result::load(&entry.path())?; - let (block_range, tx_range) = - (jar.user_header().block_range().copied(), jar.user_header().tx_range().copied()); - - if let Some(block_range) = block_range { - match static_files.entry(segment) { - Entry::Occupied(mut entry) => { - entry.get_mut().push((block_range, tx_range)); - } - Entry::Vacant(entry) => { - entry.insert(vec![(block_range, tx_range)]); - } - } + if let Some(block_range) = jar.user_header().block_range() { + static_files + .entry(segment) + .and_modify(|headers| headers.push((block_range, *jar.user_header()))) + .or_insert_with(|| vec![(block_range, *jar.user_header())]); } } } for range_list in static_files.values_mut() { // Sort by block end range. - range_list.sort_by_key(|(r, _)| r.end()); + range_list.sort_by_key(|(block_range, _)| block_range.end()); } Ok(static_files) diff --git a/crates/storage/errors/src/provider.rs b/crates/storage/errors/src/provider.rs index ed5230c18f..4aadd6a8b1 100644 --- a/crates/storage/errors/src/provider.rs +++ b/crates/storage/errors/src/provider.rs @@ -103,7 +103,11 @@ pub enum ProviderError { /// Static File is not found at specified path. #[cfg(feature = "std")] #[error("not able to find {_0} static file at {_1:?}")] - MissingStaticFilePath(StaticFileSegment, std::path::PathBuf), + MissingStaticFileSegmentPath(StaticFileSegment, std::path::PathBuf), + /// Static File is not found at specified path. + #[cfg(feature = "std")] + #[error("not able to find static file at {_0:?}")] + MissingStaticFilePath(std::path::PathBuf), /// Static File is not found for requested block. #[error("not able to find {_0} static file for block number {_1}")] MissingStaticFileBlock(StaticFileSegment, BlockNumber), diff --git a/crates/storage/libmdbx-rs/README.md b/crates/storage/libmdbx-rs/README.md index df115ee69a..f6989efa41 100644 --- a/crates/storage/libmdbx-rs/README.md +++ b/crates/storage/libmdbx-rs/README.md @@ -1,7 +1,7 @@ # libmdbx-rs -Rust bindings for [libmdbx](https://libmdbx.dqdkfa.ru). +Rust bindings for [libmdbx](https://github.com/erthink/libmdbx). Forked from an earlier Apache licenced version of the `libmdbx-rs` crate, before it changed licence to GPL. NOTE: Most of the repo came from [lmdb-rs bindings](https://github.com/mozilla/lmdb-rs). @@ -9,7 +9,7 @@ NOTE: Most of the repo came from [lmdb-rs bindings](https://github.com/mozilla/l ## Updating the libmdbx Version To update the libmdbx version you must clone it and copy the `dist/` folder in `mdbx-sys/`. -Make sure to follow the [building steps](https://libmdbx.dqdkfa.ru/usage.html#getting). +Make sure to follow the [building steps](https://github.com/erthink/libmdbx#building). ```bash # clone libmdbx to a repository outside at specific tag diff --git a/crates/storage/nippy-jar/src/lib.rs b/crates/storage/nippy-jar/src/lib.rs index 4f6b4df000..1eaa33a313 100644 --- a/crates/storage/nippy-jar/src/lib.rs +++ b/crates/storage/nippy-jar/src/lib.rs @@ -17,7 +17,7 @@ use serde::{Deserialize, Serialize}; use std::{ error::Error as StdError, fs::File, - io::Read, + io::{Read, Write}, ops::Range, path::{Path, PathBuf}, }; @@ -200,6 +200,9 @@ impl NippyJar { // Read [`Self`] located at the data file. let config_path = path.with_extension(CONFIG_FILE_EXTENSION); let config_file = File::open(&config_path) + .inspect_err(|e| { + warn!( ?path, %e, "Failed to load static file jar"); + }) .map_err(|err| reth_fs_util::FsPathError::open(err, config_path))?; let mut obj = Self::load_from_reader(config_file)?; @@ -212,6 +215,11 @@ impl NippyJar { Ok(bincode::deserialize_from(reader)?) } + /// Serializes an instance of [`Self`] to a [`Write`] type. + pub fn save_to_writer(&self, writer: W) -> Result<(), NippyJarError> { + Ok(bincode::serialize_into(writer, self)?) + } + /// Returns the path for the data file pub fn data_path(&self) -> &Path { self.path.as_ref() @@ -255,9 +263,7 @@ impl NippyJar { /// Writes all necessary configuration to file. fn freeze_config(&self) -> Result<(), NippyJarError> { - Ok(reth_fs_util::atomic_write_file(&self.config_path(), |file| { - bincode::serialize_into(file, &self) - })?) + Ok(reth_fs_util::atomic_write_file(&self.config_path(), |file| self.save_to_writer(file))?) } } @@ -1035,10 +1041,10 @@ mod tests { assert_eq!(writer.rows(), 0); assert_eq!(writer.max_row_size(), 0); assert_eq!(File::open(writer.data_path()).unwrap().metadata().unwrap().len() as usize, 0); - // Only the byte that indicates how many bytes per offset should be left + // Offset size byte (1) + final offset (8) = 9 bytes assert_eq!( File::open(writer.offsets_path()).unwrap().metadata().unwrap().len() as usize, - 1 + 9 ); writer.commit().unwrap(); assert!(!writer.is_dirty()); diff --git a/crates/storage/nippy-jar/src/writer.rs b/crates/storage/nippy-jar/src/writer.rs index cf899791ee..2225b9d6f4 100644 --- a/crates/storage/nippy-jar/src/writer.rs +++ b/crates/storage/nippy-jar/src/writer.rs @@ -292,7 +292,12 @@ impl NippyJarWriter { // If all rows are to be pruned if new_num_offsets <= 1 { // <= 1 because the one offset would actually be the expected file data size - self.offsets_file.get_mut().set_len(1)?; + // + // When no rows remain, keep the offset size byte and the final offset (data + // file size = 0). This maintains the same structure as when + // a file is initially created. + // See `NippyJarWriter::create_or_open_files` for the initial file format. + self.offsets_file.get_mut().set_len(1 + OFFSET_SIZE_BYTES as u64)?; self.data_file.get_mut().set_len(0)?; } else { // Calculate the new length for the on-disk offset list diff --git a/crates/storage/provider/Cargo.toml b/crates/storage/provider/Cargo.toml index e8599a8970..22520d82f8 100644 --- a/crates/storage/provider/Cargo.toml +++ b/crates/storage/provider/Cargo.toml @@ -62,6 +62,10 @@ tokio = { workspace = true, features = ["sync"], optional = true } # parallel utils rayon.workspace = true +[target.'cfg(unix)'.dependencies] +# rocksdb: jemalloc is recommended production workload +rocksdb = { workspace = true, features = ["jemalloc"], optional = true } + [dev-dependencies] reth-db = { workspace = true, features = ["test-utils"] } reth-primitives-traits = { workspace = true, features = ["arbitrary", "test-utils"] } @@ -70,6 +74,7 @@ reth-trie = { workspace = true, features = ["test-utils"] } reth-testing-utils.workspace = true reth-ethereum-engine-primitives.workspace = true reth-ethereum-primitives.workspace = true +reth-tracing.workspace = true revm-database-interface.workspace = true revm-state.workspace = true @@ -81,6 +86,7 @@ rand.workspace = true tokio = { workspace = true, features = ["sync", "macros", "rt-multi-thread"] } [features] +rocksdb = ["dep:rocksdb"] test-utils = [ "reth-db/test-utils", "reth-nippy-jar/test-utils", diff --git a/crates/storage/provider/src/either_writer.rs b/crates/storage/provider/src/either_writer.rs new file mode 100644 index 0000000000..80ca829e6e --- /dev/null +++ b/crates/storage/provider/src/either_writer.rs @@ -0,0 +1,993 @@ +//! Generic reader and writer abstractions for interacting with either database tables or static +//! files. + +use std::{marker::PhantomData, ops::Range}; + +#[cfg(all(unix, feature = "rocksdb"))] +use crate::providers::rocksdb::RocksDBBatch; +use crate::{ + providers::{StaticFileProvider, StaticFileProviderRWRefMut}, + StaticFileProviderFactory, +}; +use alloy_primitives::{map::HashMap, Address, BlockNumber, TxHash, TxNumber}; +use reth_db::{ + cursor::DbCursorRO, + static_file::TransactionSenderMask, + table::Value, + transaction::{CursorMutTy, CursorTy, DbTx, DbTxMut}, +}; +use reth_db_api::{ + cursor::DbCursorRW, + models::{storage_sharded_key::StorageShardedKey, ShardedKey}, + tables, + tables::BlockNumberList, +}; +use reth_errors::ProviderError; +use reth_node_types::NodePrimitives; +use reth_primitives_traits::ReceiptTy; +use reth_static_file_types::StaticFileSegment; +use reth_storage_api::{DBProvider, NodePrimitivesProvider, StorageSettingsCache}; +use reth_storage_errors::provider::ProviderResult; +use strum::{Display, EnumIs}; + +/// Type alias for [`EitherReader`] constructors. +type EitherReaderTy<'a, P, T> = + EitherReader<'a, CursorTy<

::Tx, T>,

::Primitives>; + +/// Type alias for [`EitherWriter`] constructors. +type EitherWriterTy<'a, P, T> = EitherWriter< + 'a, + CursorMutTy<

::Tx, T>, +

::Primitives, +>; + +// Helper types so constructors stay exported even when RocksDB feature is off. +// Historical data tables use a write-only RocksDB batch (no read-your-writes needed). +#[cfg(all(unix, feature = "rocksdb"))] +type RocksBatchArg<'a> = crate::providers::rocksdb::RocksDBBatch<'a>; +#[cfg(not(all(unix, feature = "rocksdb")))] +type RocksBatchArg<'a> = (); + +#[cfg(all(unix, feature = "rocksdb"))] +type RocksTxRefArg<'a> = &'a crate::providers::rocksdb::RocksTx<'a>; +#[cfg(not(all(unix, feature = "rocksdb")))] +type RocksTxRefArg<'a> = (); + +/// Represents a destination for writing data, either to database, static files, or `RocksDB`. +#[derive(Debug, Display)] +pub enum EitherWriter<'a, CURSOR, N> { + /// Write to database table via cursor + Database(CURSOR), + /// Write to static file + StaticFile(StaticFileProviderRWRefMut<'a, N>), + /// Write to `RocksDB` using a write-only batch (historical tables). + #[cfg(all(unix, feature = "rocksdb"))] + RocksDB(RocksDBBatch<'a>), +} + +impl<'a> EitherWriter<'a, (), ()> { + /// Creates a new [`EitherWriter`] for receipts based on storage settings and prune modes. + pub fn new_receipts

( + provider: &'a P, + block_number: BlockNumber, + ) -> ProviderResult>>> + where + P: DBProvider + NodePrimitivesProvider + StorageSettingsCache + StaticFileProviderFactory, + P::Tx: DbTxMut, + ReceiptTy: Value, + { + if Self::receipts_destination(provider).is_static_file() { + Ok(EitherWriter::StaticFile( + provider.get_static_file_writer(block_number, StaticFileSegment::Receipts)?, + )) + } else { + Ok(EitherWriter::Database( + provider.tx_ref().cursor_write::>>()?, + )) + } + } + + /// Returns the destination for writing receipts. + /// + /// The rules are as follows: + /// - If the node should not always write receipts to static files, and any receipt pruning is + /// enabled, write to the database. + /// - If the node should always write receipts to static files, but receipt log filter pruning + /// is enabled, write to the database. + /// - Otherwise, write to static files. + pub fn receipts_destination( + provider: &P, + ) -> EitherWriterDestination { + let receipts_in_static_files = provider.cached_storage_settings().receipts_in_static_files; + let prune_modes = provider.prune_modes_ref(); + + if !receipts_in_static_files && prune_modes.has_receipts_pruning() || + // TODO: support writing receipts to static files with log filter pruning enabled + receipts_in_static_files && !prune_modes.receipts_log_filter.is_empty() + { + EitherWriterDestination::Database + } else { + EitherWriterDestination::StaticFile + } + } + + /// Creates a new [`EitherWriter`] for senders based on storage settings. + pub fn new_senders

( + provider: &'a P, + block_number: BlockNumber, + ) -> ProviderResult> + where + P: DBProvider + NodePrimitivesProvider + StorageSettingsCache + StaticFileProviderFactory, + P::Tx: DbTxMut, + { + if EitherWriterDestination::senders(provider).is_static_file() { + Ok(EitherWriter::StaticFile( + provider + .get_static_file_writer(block_number, StaticFileSegment::TransactionSenders)?, + )) + } else { + Ok(EitherWriter::Database( + provider.tx_ref().cursor_write::()?, + )) + } + } + + /// Creates a new [`EitherWriter`] for storages history based on storage settings. + pub fn new_storages_history

( + provider: &P, + _rocksdb_batch: RocksBatchArg<'a>, + ) -> ProviderResult> + where + P: DBProvider + NodePrimitivesProvider + StorageSettingsCache, + P::Tx: DbTxMut, + { + #[cfg(all(unix, feature = "rocksdb"))] + if provider.cached_storage_settings().storages_history_in_rocksdb { + return Ok(EitherWriter::RocksDB(_rocksdb_batch)); + } + + Ok(EitherWriter::Database(provider.tx_ref().cursor_write::()?)) + } + + /// Creates a new [`EitherWriter`] for transaction hash numbers based on storage settings. + pub fn new_transaction_hash_numbers

( + provider: &P, + _rocksdb_batch: RocksBatchArg<'a>, + ) -> ProviderResult> + where + P: DBProvider + NodePrimitivesProvider + StorageSettingsCache, + P::Tx: DbTxMut, + { + #[cfg(all(unix, feature = "rocksdb"))] + if provider.cached_storage_settings().transaction_hash_numbers_in_rocksdb { + return Ok(EitherWriter::RocksDB(_rocksdb_batch)); + } + + Ok(EitherWriter::Database( + provider.tx_ref().cursor_write::()?, + )) + } + + /// Creates a new [`EitherWriter`] for account history based on storage settings. + pub fn new_accounts_history

( + provider: &P, + _rocksdb_batch: RocksBatchArg<'a>, + ) -> ProviderResult> + where + P: DBProvider + NodePrimitivesProvider + StorageSettingsCache, + P::Tx: DbTxMut, + { + #[cfg(all(unix, feature = "rocksdb"))] + if provider.cached_storage_settings().account_history_in_rocksdb { + return Ok(EitherWriter::RocksDB(_rocksdb_batch)); + } + + Ok(EitherWriter::Database(provider.tx_ref().cursor_write::()?)) + } +} + +impl<'a, CURSOR, N: NodePrimitives> EitherWriter<'a, CURSOR, N> { + /// Extracts the raw `RocksDB` write batch from this writer, if it contains one. + /// + /// Returns `Some(WriteBatchWithTransaction)` for [`Self::RocksDB`] variant, + /// `None` for other variants. + /// + /// This is used to defer `RocksDB` commits to the provider level, ensuring all + /// storage commits (MDBX, static files, `RocksDB`) happen atomically in a single place. + #[cfg(all(unix, feature = "rocksdb"))] + pub fn into_raw_rocksdb_batch(self) -> Option> { + match self { + Self::Database(_) | Self::StaticFile(_) => None, + Self::RocksDB(batch) => Some(batch.into_inner()), + } + } + + /// Increment the block number. + /// + /// Relevant only for [`Self::StaticFile`]. It is a no-op for [`Self::Database`]. + pub fn increment_block(&mut self, expected_block_number: BlockNumber) -> ProviderResult<()> { + match self { + Self::Database(_) => Ok(()), + Self::StaticFile(writer) => writer.increment_block(expected_block_number), + #[cfg(all(unix, feature = "rocksdb"))] + Self::RocksDB(_) => Err(ProviderError::UnsupportedProvider), + } + } + + /// Ensures that the writer is positioned at the specified block number. + /// + /// If the writer is positioned at a greater block number than the specified one, the writer + /// will NOT be unwound and the error will be returned. + /// + /// Relevant only for [`Self::StaticFile`]. It is a no-op for [`Self::Database`]. + pub fn ensure_at_block(&mut self, block_number: BlockNumber) -> ProviderResult<()> { + match self { + Self::Database(_) => Ok(()), + Self::StaticFile(writer) => writer.ensure_at_block(block_number), + #[cfg(all(unix, feature = "rocksdb"))] + Self::RocksDB(_) => Err(ProviderError::UnsupportedProvider), + } + } +} + +impl<'a, CURSOR, N: NodePrimitives> EitherWriter<'a, CURSOR, N> +where + N::Receipt: Value, + CURSOR: DbCursorRW>, +{ + /// Append a transaction receipt. + pub fn append_receipt(&mut self, tx_num: TxNumber, receipt: &N::Receipt) -> ProviderResult<()> { + match self { + Self::Database(cursor) => Ok(cursor.append(tx_num, receipt)?), + Self::StaticFile(writer) => writer.append_receipt(tx_num, receipt), + #[cfg(all(unix, feature = "rocksdb"))] + Self::RocksDB(_) => Err(ProviderError::UnsupportedProvider), + } + } +} + +impl<'a, CURSOR, N: NodePrimitives> EitherWriter<'a, CURSOR, N> +where + CURSOR: DbCursorRW, +{ + /// Append a transaction sender to the destination + pub fn append_sender(&mut self, tx_num: TxNumber, sender: &Address) -> ProviderResult<()> { + match self { + Self::Database(cursor) => Ok(cursor.append(tx_num, sender)?), + Self::StaticFile(writer) => writer.append_transaction_sender(tx_num, sender), + #[cfg(all(unix, feature = "rocksdb"))] + Self::RocksDB(_) => Err(ProviderError::UnsupportedProvider), + } + } + + /// Append transaction senders to the destination + pub fn append_senders(&mut self, senders: I) -> ProviderResult<()> + where + I: Iterator, + { + match self { + Self::Database(cursor) => { + for (tx_num, sender) in senders { + cursor.append(tx_num, &sender)?; + } + Ok(()) + } + Self::StaticFile(writer) => writer.append_transaction_senders(senders), + #[cfg(all(unix, feature = "rocksdb"))] + Self::RocksDB(_) => Err(ProviderError::UnsupportedProvider), + } + } + + /// Removes all transaction senders above the given transaction number, and stops at the given + /// block number. + pub fn prune_senders( + &mut self, + unwind_tx_from: TxNumber, + block: BlockNumber, + ) -> ProviderResult<()> + where + CURSOR: DbCursorRO, + { + match self { + Self::Database(cursor) => { + let mut walker = cursor.walk_range(unwind_tx_from..)?; + while walker.next().transpose()?.is_some() { + walker.delete_current()?; + } + } + Self::StaticFile(writer) => { + let static_file_transaction_sender_num = writer + .reader() + .get_highest_static_file_tx(StaticFileSegment::TransactionSenders); + + let to_delete = static_file_transaction_sender_num + .map(|static_num| (static_num + 1).saturating_sub(unwind_tx_from)) + .unwrap_or_default(); + + writer.prune_transaction_senders(to_delete, block)?; + } + #[cfg(all(unix, feature = "rocksdb"))] + Self::RocksDB(_) => return Err(ProviderError::UnsupportedProvider), + } + + Ok(()) + } +} + +impl<'a, CURSOR, N: NodePrimitives> EitherWriter<'a, CURSOR, N> +where + CURSOR: DbCursorRW + DbCursorRO, +{ + /// Puts a transaction hash number mapping. + /// + /// When `append_only` is true, uses `cursor.append()` which is significantly faster + /// but requires entries to be inserted in order and the table to be empty. + /// When false, uses `cursor.insert()` which handles arbitrary insertion order. + pub fn put_transaction_hash_number( + &mut self, + hash: TxHash, + tx_num: TxNumber, + append_only: bool, + ) -> ProviderResult<()> { + match self { + Self::Database(cursor) => { + if append_only { + Ok(cursor.append(hash, &tx_num)?) + } else { + Ok(cursor.insert(hash, &tx_num)?) + } + } + Self::StaticFile(_) => Err(ProviderError::UnsupportedProvider), + #[cfg(all(unix, feature = "rocksdb"))] + Self::RocksDB(batch) => batch.put::(hash, &tx_num), + } + } + + /// Deletes a transaction hash number mapping. + pub fn delete_transaction_hash_number(&mut self, hash: TxHash) -> ProviderResult<()> { + match self { + Self::Database(cursor) => { + if cursor.seek_exact(hash)?.is_some() { + cursor.delete_current()?; + } + Ok(()) + } + Self::StaticFile(_) => Err(ProviderError::UnsupportedProvider), + #[cfg(all(unix, feature = "rocksdb"))] + Self::RocksDB(batch) => batch.delete::(hash), + } + } +} + +impl<'a, CURSOR, N: NodePrimitives> EitherWriter<'a, CURSOR, N> +where + CURSOR: DbCursorRW + DbCursorRO, +{ + /// Puts a storage history entry. + pub fn put_storage_history( + &mut self, + key: StorageShardedKey, + value: &BlockNumberList, + ) -> ProviderResult<()> { + match self { + Self::Database(cursor) => Ok(cursor.upsert(key, value)?), + Self::StaticFile(_) => Err(ProviderError::UnsupportedProvider), + #[cfg(all(unix, feature = "rocksdb"))] + Self::RocksDB(batch) => batch.put::(key, value), + } + } + + /// Deletes a storage history entry. + pub fn delete_storage_history(&mut self, key: StorageShardedKey) -> ProviderResult<()> { + match self { + Self::Database(cursor) => { + if cursor.seek_exact(key)?.is_some() { + cursor.delete_current()?; + } + Ok(()) + } + Self::StaticFile(_) => Err(ProviderError::UnsupportedProvider), + #[cfg(all(unix, feature = "rocksdb"))] + Self::RocksDB(batch) => batch.delete::(key), + } + } +} + +impl<'a, CURSOR, N: NodePrimitives> EitherWriter<'a, CURSOR, N> +where + CURSOR: DbCursorRW + DbCursorRO, +{ + /// Puts an account history entry. + pub fn put_account_history( + &mut self, + key: ShardedKey

, + value: &BlockNumberList, + ) -> ProviderResult<()> { + match self { + Self::Database(cursor) => Ok(cursor.upsert(key, value)?), + Self::StaticFile(_) => Err(ProviderError::UnsupportedProvider), + #[cfg(all(unix, feature = "rocksdb"))] + Self::RocksDB(batch) => batch.put::(key, value), + } + } + + /// Deletes an account history entry. + pub fn delete_account_history(&mut self, key: ShardedKey
) -> ProviderResult<()> { + match self { + Self::Database(cursor) => { + if cursor.seek_exact(key)?.is_some() { + cursor.delete_current()?; + } + Ok(()) + } + Self::StaticFile(_) => Err(ProviderError::UnsupportedProvider), + #[cfg(all(unix, feature = "rocksdb"))] + Self::RocksDB(batch) => batch.delete::(key), + } + } +} + +/// Represents a source for reading data, either from database, static files, or `RocksDB`. +#[derive(Debug, Display)] +pub enum EitherReader<'a, CURSOR, N> { + /// Read from database table via cursor + Database(CURSOR, PhantomData<&'a ()>), + /// Read from static file + StaticFile(StaticFileProvider, PhantomData<&'a ()>), + /// Read from `RocksDB` transaction + #[cfg(all(unix, feature = "rocksdb"))] + RocksDB(&'a crate::providers::rocksdb::RocksTx<'a>), +} + +impl<'a> EitherReader<'a, (), ()> { + /// Creates a new [`EitherReader`] for senders based on storage settings. + pub fn new_senders

( + provider: &P, + ) -> ProviderResult> + where + P: DBProvider + NodePrimitivesProvider + StorageSettingsCache + StaticFileProviderFactory, + P::Tx: DbTx, + { + if EitherWriterDestination::senders(provider).is_static_file() { + Ok(EitherReader::StaticFile(provider.static_file_provider(), PhantomData)) + } else { + Ok(EitherReader::Database( + provider.tx_ref().cursor_read::()?, + PhantomData, + )) + } + } + + /// Creates a new [`EitherReader`] for storages history based on storage settings. + pub fn new_storages_history

( + provider: &P, + _rocksdb_tx: RocksTxRefArg<'a>, + ) -> ProviderResult> + where + P: DBProvider + NodePrimitivesProvider + StorageSettingsCache, + P::Tx: DbTx, + { + #[cfg(all(unix, feature = "rocksdb"))] + if provider.cached_storage_settings().storages_history_in_rocksdb { + return Ok(EitherReader::RocksDB(_rocksdb_tx)); + } + + Ok(EitherReader::Database( + provider.tx_ref().cursor_read::()?, + PhantomData, + )) + } + + /// Creates a new [`EitherReader`] for transaction hash numbers based on storage settings. + pub fn new_transaction_hash_numbers

( + provider: &P, + _rocksdb_tx: RocksTxRefArg<'a>, + ) -> ProviderResult> + where + P: DBProvider + NodePrimitivesProvider + StorageSettingsCache, + P::Tx: DbTx, + { + #[cfg(all(unix, feature = "rocksdb"))] + if provider.cached_storage_settings().transaction_hash_numbers_in_rocksdb { + return Ok(EitherReader::RocksDB(_rocksdb_tx)); + } + + Ok(EitherReader::Database( + provider.tx_ref().cursor_read::()?, + PhantomData, + )) + } + + /// Creates a new [`EitherReader`] for account history based on storage settings. + pub fn new_accounts_history

( + provider: &P, + _rocksdb_tx: RocksTxRefArg<'a>, + ) -> ProviderResult> + where + P: DBProvider + NodePrimitivesProvider + StorageSettingsCache, + P::Tx: DbTx, + { + #[cfg(all(unix, feature = "rocksdb"))] + if provider.cached_storage_settings().account_history_in_rocksdb { + return Ok(EitherReader::RocksDB(_rocksdb_tx)); + } + + Ok(EitherReader::Database( + provider.tx_ref().cursor_read::()?, + PhantomData, + )) + } +} + +impl EitherReader<'_, CURSOR, N> +where + CURSOR: DbCursorRO, +{ + /// Fetches the senders for a range of transactions. + pub fn senders_by_tx_range( + &mut self, + range: Range, + ) -> ProviderResult> { + match self { + Self::Database(cursor, _) => cursor + .walk_range(range)? + .map(|result| result.map_err(ProviderError::from)) + .collect::>>(), + Self::StaticFile(provider, _) => range + .clone() + .zip(provider.fetch_range_iter( + StaticFileSegment::TransactionSenders, + range, + |cursor, number| cursor.get_one::(number.into()), + )?) + .filter_map(|(tx_num, sender)| { + let result = sender.transpose()?; + Some(result.map(|sender| (tx_num, sender))) + }) + .collect::>>(), + #[cfg(all(unix, feature = "rocksdb"))] + Self::RocksDB(_) => Err(ProviderError::UnsupportedProvider), + } + } +} + +impl EitherReader<'_, CURSOR, N> +where + CURSOR: DbCursorRO, +{ + /// Gets a transaction number by its hash. + pub fn get_transaction_hash_number( + &mut self, + hash: TxHash, + ) -> ProviderResult> { + match self { + Self::Database(cursor, _) => Ok(cursor.seek_exact(hash)?.map(|(_, v)| v)), + Self::StaticFile(_, _) => Err(ProviderError::UnsupportedProvider), + #[cfg(all(unix, feature = "rocksdb"))] + Self::RocksDB(tx) => tx.get::(hash), + } + } +} + +impl EitherReader<'_, CURSOR, N> +where + CURSOR: DbCursorRO, +{ + /// Gets a storage history entry. + pub fn get_storage_history( + &mut self, + key: StorageShardedKey, + ) -> ProviderResult> { + match self { + Self::Database(cursor, _) => Ok(cursor.seek_exact(key)?.map(|(_, v)| v)), + Self::StaticFile(_, _) => Err(ProviderError::UnsupportedProvider), + #[cfg(all(unix, feature = "rocksdb"))] + Self::RocksDB(tx) => tx.get::(key), + } + } +} + +impl EitherReader<'_, CURSOR, N> +where + CURSOR: DbCursorRO, +{ + /// Gets an account history entry. + pub fn get_account_history( + &mut self, + key: ShardedKey

, + ) -> ProviderResult> { + match self { + Self::Database(cursor, _) => Ok(cursor.seek_exact(key)?.map(|(_, v)| v)), + Self::StaticFile(_, _) => Err(ProviderError::UnsupportedProvider), + #[cfg(all(unix, feature = "rocksdb"))] + Self::RocksDB(tx) => tx.get::(key), + } + } +} + +/// Destination for writing data. +#[derive(Debug, EnumIs)] +pub enum EitherWriterDestination { + /// Write to database table + Database, + /// Write to static file + StaticFile, + /// Write to `RocksDB` + RocksDB, +} + +impl EitherWriterDestination { + /// Returns the destination for writing senders based on storage settings. + pub fn senders

(provider: &P) -> Self + where + P: StorageSettingsCache, + { + // Write senders to static files only if they're explicitly enabled + if provider.cached_storage_settings().transaction_senders_in_static_files { + Self::StaticFile + } else { + Self::Database + } + } +} + +#[cfg(test)] +mod tests { + use crate::test_utils::create_test_provider_factory; + + use super::*; + use alloy_primitives::Address; + use reth_storage_api::{DatabaseProviderFactory, StorageSettings}; + + #[test] + fn test_reader_senders_by_tx_range() { + let factory = create_test_provider_factory(); + + // Insert senders only from 1 to 4, but we will query from 0 to 5. + let senders = [ + (1, Address::random()), + (2, Address::random()), + (3, Address::random()), + (4, Address::random()), + ]; + + for transaction_senders_in_static_files in [false, true] { + factory.set_storage_settings_cache( + StorageSettings::legacy() + .with_transaction_senders_in_static_files(transaction_senders_in_static_files), + ); + + let provider = factory.database_provider_rw().unwrap(); + let mut writer = EitherWriter::new_senders(&provider, 0).unwrap(); + if transaction_senders_in_static_files { + assert!(matches!(writer, EitherWriter::StaticFile(_))); + } else { + assert!(matches!(writer, EitherWriter::Database(_))); + } + + writer.increment_block(0).unwrap(); + writer.append_senders(senders.iter().copied()).unwrap(); + drop(writer); + provider.commit().unwrap(); + + let provider = factory.database_provider_ro().unwrap(); + let mut reader = EitherReader::new_senders(&provider).unwrap(); + if transaction_senders_in_static_files { + assert!(matches!(reader, EitherReader::StaticFile(_, _))); + } else { + assert!(matches!(reader, EitherReader::Database(_, _))); + } + + assert_eq!( + reader.senders_by_tx_range(0..6).unwrap(), + senders.iter().copied().collect::>(), + "{reader}" + ); + } + } +} + +#[cfg(all(test, unix, feature = "rocksdb"))] +mod rocksdb_tests { + use super::*; + use crate::{ + providers::rocksdb::{RocksDBBuilder, RocksDBProvider}, + test_utils::create_test_provider_factory, + RocksDBProviderFactory, + }; + use alloy_primitives::{Address, B256}; + use reth_db_api::{ + models::{storage_sharded_key::StorageShardedKey, IntegerList, ShardedKey}, + tables, + }; + use reth_storage_api::{DatabaseProviderFactory, StorageSettings}; + use tempfile::TempDir; + + fn create_rocksdb_provider() -> (TempDir, RocksDBProvider) { + let temp_dir = TempDir::new().unwrap(); + let provider = RocksDBBuilder::new(temp_dir.path()) + .with_table::() + .with_table::() + .with_table::() + .build() + .unwrap(); + (temp_dir, provider) + } + + /// Test that `EitherWriter::new_transaction_hash_numbers` creates a `RocksDB` writer + /// when the storage setting is enabled, and that put operations followed by commit + /// persist the data to `RocksDB`. + #[test] + fn test_either_writer_transaction_hash_numbers_with_rocksdb() { + let factory = create_test_provider_factory(); + + // Enable RocksDB for transaction hash numbers + factory.set_storage_settings_cache( + StorageSettings::legacy().with_transaction_hash_numbers_in_rocksdb(true), + ); + + let hash1 = B256::from([1u8; 32]); + let hash2 = B256::from([2u8; 32]); + let tx_num1 = 100u64; + let tx_num2 = 200u64; + + // Get the RocksDB batch from the provider + let rocksdb = factory.rocksdb_provider(); + let batch = rocksdb.batch(); + + // Create EitherWriter with RocksDB + let provider = factory.database_provider_rw().unwrap(); + let mut writer = EitherWriter::new_transaction_hash_numbers(&provider, batch).unwrap(); + + // Verify we got a RocksDB writer + assert!(matches!(writer, EitherWriter::RocksDB(_))); + + // Write transaction hash numbers (append_only=false since we're using RocksDB) + writer.put_transaction_hash_number(hash1, tx_num1, false).unwrap(); + writer.put_transaction_hash_number(hash2, tx_num2, false).unwrap(); + + // Extract the batch and register with provider for commit + if let Some(batch) = writer.into_raw_rocksdb_batch() { + provider.set_pending_rocksdb_batch(batch); + } + + // Commit via provider - this commits RocksDB batch too + provider.commit().unwrap(); + + // Verify data was written to RocksDB + let rocksdb = factory.rocksdb_provider(); + assert_eq!(rocksdb.get::(hash1).unwrap(), Some(tx_num1)); + assert_eq!(rocksdb.get::(hash2).unwrap(), Some(tx_num2)); + } + + /// Test that `EitherWriter::delete_transaction_hash_number` works with `RocksDB`. + #[test] + fn test_either_writer_delete_transaction_hash_number_with_rocksdb() { + let factory = create_test_provider_factory(); + + // Enable RocksDB for transaction hash numbers + factory.set_storage_settings_cache( + StorageSettings::legacy().with_transaction_hash_numbers_in_rocksdb(true), + ); + + let hash = B256::from([1u8; 32]); + let tx_num = 100u64; + + // First, write a value directly to RocksDB + let rocksdb = factory.rocksdb_provider(); + rocksdb.put::(hash, &tx_num).unwrap(); + assert_eq!(rocksdb.get::(hash).unwrap(), Some(tx_num)); + + // Now delete using EitherWriter + let batch = rocksdb.batch(); + let provider = factory.database_provider_rw().unwrap(); + let mut writer = EitherWriter::new_transaction_hash_numbers(&provider, batch).unwrap(); + writer.delete_transaction_hash_number(hash).unwrap(); + + // Extract the batch and commit via provider + if let Some(batch) = writer.into_raw_rocksdb_batch() { + provider.set_pending_rocksdb_batch(batch); + } + provider.commit().unwrap(); + + // Verify deletion + let rocksdb = factory.rocksdb_provider(); + assert_eq!(rocksdb.get::(hash).unwrap(), None); + } + + #[test] + fn test_rocksdb_batch_transaction_hash_numbers() { + let (_temp_dir, provider) = create_rocksdb_provider(); + + let hash1 = B256::from([1u8; 32]); + let hash2 = B256::from([2u8; 32]); + let tx_num1 = 100u64; + let tx_num2 = 200u64; + + // Write via RocksDBBatch (same as EitherWriter::RocksDB would use internally) + let mut batch = provider.batch(); + batch.put::(hash1, &tx_num1).unwrap(); + batch.put::(hash2, &tx_num2).unwrap(); + batch.commit().unwrap(); + + // Read via RocksTx (same as EitherReader::RocksDB would use internally) + let tx = provider.tx(); + assert_eq!(tx.get::(hash1).unwrap(), Some(tx_num1)); + assert_eq!(tx.get::(hash2).unwrap(), Some(tx_num2)); + + // Test missing key + let missing_hash = B256::from([99u8; 32]); + assert_eq!(tx.get::(missing_hash).unwrap(), None); + } + + #[test] + fn test_rocksdb_batch_storage_history() { + let (_temp_dir, provider) = create_rocksdb_provider(); + + let address = Address::random(); + let storage_key = B256::from([1u8; 32]); + let key = StorageShardedKey::new(address, storage_key, 1000); + let value = IntegerList::new([1, 5, 10, 50]).unwrap(); + + // Write via RocksDBBatch + let mut batch = provider.batch(); + batch.put::(key.clone(), &value).unwrap(); + batch.commit().unwrap(); + + // Read via RocksTx + let tx = provider.tx(); + let result = tx.get::(key).unwrap(); + assert_eq!(result, Some(value)); + + // Test missing key + let missing_key = StorageShardedKey::new(Address::random(), B256::ZERO, 0); + assert_eq!(tx.get::(missing_key).unwrap(), None); + } + + #[test] + fn test_rocksdb_batch_account_history() { + let (_temp_dir, provider) = create_rocksdb_provider(); + + let address = Address::random(); + let key = ShardedKey::new(address, 1000); + let value = IntegerList::new([1, 10, 100, 500]).unwrap(); + + // Write via RocksDBBatch + let mut batch = provider.batch(); + batch.put::(key.clone(), &value).unwrap(); + batch.commit().unwrap(); + + // Read via RocksTx + let tx = provider.tx(); + let result = tx.get::(key).unwrap(); + assert_eq!(result, Some(value)); + + // Test missing key + let missing_key = ShardedKey::new(Address::random(), 0); + assert_eq!(tx.get::(missing_key).unwrap(), None); + } + + #[test] + fn test_rocksdb_batch_delete_transaction_hash_number() { + let (_temp_dir, provider) = create_rocksdb_provider(); + + let hash = B256::from([1u8; 32]); + let tx_num = 100u64; + + // First write + provider.put::(hash, &tx_num).unwrap(); + assert_eq!(provider.get::(hash).unwrap(), Some(tx_num)); + + // Delete via RocksDBBatch + let mut batch = provider.batch(); + batch.delete::(hash).unwrap(); + batch.commit().unwrap(); + + // Verify deletion + assert_eq!(provider.get::(hash).unwrap(), None); + } + + #[test] + fn test_rocksdb_batch_delete_storage_history() { + let (_temp_dir, provider) = create_rocksdb_provider(); + + let address = Address::random(); + let storage_key = B256::from([1u8; 32]); + let key = StorageShardedKey::new(address, storage_key, 1000); + let value = IntegerList::new([1, 5, 10]).unwrap(); + + // First write + provider.put::(key.clone(), &value).unwrap(); + assert!(provider.get::(key.clone()).unwrap().is_some()); + + // Delete via RocksDBBatch + let mut batch = provider.batch(); + batch.delete::(key.clone()).unwrap(); + batch.commit().unwrap(); + + // Verify deletion + assert_eq!(provider.get::(key).unwrap(), None); + } + + #[test] + fn test_rocksdb_batch_delete_account_history() { + let (_temp_dir, provider) = create_rocksdb_provider(); + + let address = Address::random(); + let key = ShardedKey::new(address, 1000); + let value = IntegerList::new([1, 10, 100]).unwrap(); + + // First write + provider.put::(key.clone(), &value).unwrap(); + assert!(provider.get::(key.clone()).unwrap().is_some()); + + // Delete via RocksDBBatch + let mut batch = provider.batch(); + batch.delete::(key.clone()).unwrap(); + batch.commit().unwrap(); + + // Verify deletion + assert_eq!(provider.get::(key).unwrap(), None); + } + + /// Test that `RocksDB` commits happen at `provider.commit()` level, not at writer level. + /// + /// This ensures all storage commits (MDBX, static files, `RocksDB`) happen atomically + /// in a single place, making it easier to reason about commit ordering and consistency. + #[test] + fn test_rocksdb_commits_at_provider_level() { + let factory = create_test_provider_factory(); + + // Enable RocksDB for transaction hash numbers + factory.set_storage_settings_cache( + StorageSettings::legacy().with_transaction_hash_numbers_in_rocksdb(true), + ); + + let hash1 = B256::from([1u8; 32]); + let hash2 = B256::from([2u8; 32]); + let tx_num1 = 100u64; + let tx_num2 = 200u64; + + // Get the RocksDB batch from the provider + let rocksdb = factory.rocksdb_provider(); + let batch = rocksdb.batch(); + + // Create provider and EitherWriter + let provider = factory.database_provider_rw().unwrap(); + let mut writer = EitherWriter::new_transaction_hash_numbers(&provider, batch).unwrap(); + + // Write transaction hash numbers (append_only=false since we're using RocksDB) + writer.put_transaction_hash_number(hash1, tx_num1, false).unwrap(); + writer.put_transaction_hash_number(hash2, tx_num2, false).unwrap(); + + // Extract the raw batch from the writer and register it with the provider + let raw_batch = writer.into_raw_rocksdb_batch(); + if let Some(batch) = raw_batch { + provider.set_pending_rocksdb_batch(batch); + } + + // Data should NOT be visible yet (batch not committed) + let rocksdb = factory.rocksdb_provider(); + assert_eq!( + rocksdb.get::(hash1).unwrap(), + None, + "Data should not be visible before provider.commit()" + ); + + // Commit the provider - this should commit both MDBX and RocksDB + provider.commit().unwrap(); + + // Now data should be visible in RocksDB + let rocksdb = factory.rocksdb_provider(); + assert_eq!( + rocksdb.get::(hash1).unwrap(), + Some(tx_num1), + "Data should be visible after provider.commit()" + ); + assert_eq!( + rocksdb.get::(hash2).unwrap(), + Some(tx_num2), + "Data should be visible after provider.commit()" + ); + } +} diff --git a/crates/storage/provider/src/lib.rs b/crates/storage/provider/src/lib.rs index 70822c604b..84e1a4f8b4 100644 --- a/crates/storage/provider/src/lib.rs +++ b/crates/storage/provider/src/lib.rs @@ -21,35 +21,34 @@ pub mod providers; pub use providers::{ DatabaseProvider, DatabaseProviderRO, DatabaseProviderRW, HistoricalStateProvider, HistoricalStateProviderRef, LatestStateProvider, LatestStateProviderRef, ProviderFactory, - StaticFileAccess, StaticFileWriter, + StaticFileAccess, StaticFileProviderBuilder, StaticFileWriter, }; +pub mod changesets_utils; + #[cfg(any(test, feature = "test-utils"))] /// Common test helpers for mocking the Provider. pub mod test_utils; -/// Re-export provider error. -pub use reth_storage_errors::provider::{ProviderError, ProviderResult}; -pub use reth_static_file_types as static_file; -pub use static_file::StaticFileSegment; - -pub use reth_execution_types::*; - -pub mod changesets_utils; - -/// Re-export `OriginalValuesKnown` -pub use revm_database::states::OriginalValuesKnown; - -/// Writer standalone type. -pub mod writer; +pub mod either_writer; +pub use either_writer::*; pub use reth_chain_state::{ CanonStateNotification, CanonStateNotificationSender, CanonStateNotificationStream, CanonStateNotifications, CanonStateSubscriptions, }; - +pub use reth_execution_types::*; +/// Re-export `OriginalValuesKnown` +pub use revm_database::states::OriginalValuesKnown; // reexport traits to avoid breaking changes -pub use reth_storage_api::{HistoryWriter, StatsReader}; +pub use reth_static_file_types as static_file; +pub use reth_storage_api::{ + HistoryWriter, MetadataProvider, MetadataWriter, StatsReader, StorageSettings, + StorageSettingsCache, +}; +/// Re-export provider error. +pub use reth_storage_errors::provider::{ProviderError, ProviderResult}; +pub use static_file::StaticFileSegment; pub(crate) fn to_range>(bounds: R) -> std::ops::Range { let start = match bounds.start_bound() { diff --git a/crates/storage/provider/src/providers/blockchain_provider.rs b/crates/storage/provider/src/providers/blockchain_provider.rs index 9dbbed9e88..65946537b3 100644 --- a/crates/storage/provider/src/providers/blockchain_provider.rs +++ b/crates/storage/provider/src/providers/blockchain_provider.rs @@ -1,12 +1,15 @@ use crate::{ - providers::{ConsistentProvider, ProviderNodeTypes, StaticFileProvider}, + providers::{ + ConsistentProvider, ProviderNodeTypes, RocksDBProvider, StaticFileProvider, + StaticFileProviderRWRefMut, + }, AccountReader, BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BlockReaderIdExt, BlockSource, CanonChainTracker, CanonStateNotifications, CanonStateSubscriptions, ChainSpecProvider, ChainStateBlockReader, ChangeSetReader, DatabaseProviderFactory, HashedPostStateProvider, HeaderProvider, ProviderError, ProviderFactory, PruneCheckpointReader, - ReceiptProvider, ReceiptProviderIdExt, StageCheckpointReader, StateProviderBox, - StateProviderFactory, StateReader, StaticFileProviderFactory, TransactionVariant, - TransactionsProvider, TrieReader, + ReceiptProvider, ReceiptProviderIdExt, RocksDBProviderFactory, StageCheckpointReader, + StateProviderBox, StateProviderFactory, StateReader, StaticFileProviderFactory, + TransactionVariant, TransactionsProvider, TrieReader, }; use alloy_consensus::transaction::TransactionMeta; use alloy_eips::{BlockHashOrNumber, BlockId, BlockNumHash, BlockNumberOrTag}; @@ -23,6 +26,7 @@ use reth_node_types::{BlockTy, HeaderTy, NodeTypesWithDB, ReceiptTy, TxTy}; use reth_primitives_traits::{Account, RecoveredBlock, SealedHeader, StorageEntry}; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; +use reth_static_file_types::StaticFileSegment; use reth_storage_api::{BlockBodyIndicesProvider, NodePrimitivesProvider, StorageChangeSetReader}; use reth_storage_errors::provider::ProviderResult; use reth_trie::{updates::TrieUpdatesSorted, HashedPostState, KeccakKeyHasher}; @@ -163,6 +167,25 @@ impl StaticFileProviderFactory for BlockchainProvider { fn static_file_provider(&self) -> StaticFileProvider { self.database.static_file_provider() } + + fn get_static_file_writer( + &self, + block: BlockNumber, + segment: StaticFileSegment, + ) -> ProviderResult> { + self.database.get_static_file_writer(block, segment) + } +} + +impl RocksDBProviderFactory for BlockchainProvider { + fn rocksdb_provider(&self) -> RocksDBProvider { + self.database.rocksdb_provider() + } + + #[cfg(all(unix, feature = "rocksdb"))] + fn set_pending_rocksdb_batch(&self, _batch: rocksdb::WriteBatchWithTransaction) { + unimplemented!("BlockchainProvider wraps ProviderFactory - use DatabaseProvider::set_pending_rocksdb_batch instead") + } } impl HeaderProvider for BlockchainProvider { @@ -355,10 +378,6 @@ impl TransactionsProvider for BlockchainProvider { self.consistent_provider()?.transaction_by_hash_with_meta(tx_hash) } - fn transaction_block(&self, id: TxNumber) -> ProviderResult> { - self.consistent_provider()?.transaction_block(id) - } - fn transactions_by_block( &self, id: BlockHashOrNumber, @@ -856,7 +875,7 @@ mod tests { // Insert blocks into the database for block in &database_blocks { provider_rw.insert_block( - block.clone().try_recover().expect("failed to seal block with senders"), + &block.clone().try_recover().expect("failed to seal block with senders"), )?; } @@ -986,9 +1005,10 @@ mod tests { let provider_rw = factory.provider_rw()?; for block in database_blocks { provider_rw.insert_block( - block.clone().try_recover().expect("failed to seal block with senders"), + &block.clone().try_recover().expect("failed to seal block with senders"), )?; } + provider_rw.commit()?; // Create a new provider @@ -1084,7 +1104,7 @@ mod tests { let provider_rw = factory.provider_rw()?; for block in database_blocks { provider_rw.insert_block( - block.clone().try_recover().expect("failed to seal block with senders"), + &block.clone().try_recover().expect("failed to seal block with senders"), )?; } provider_rw.commit()?; @@ -1296,12 +1316,12 @@ mod tests { // Generate a random block to initialize the blockchain provider. let mut test_block_builder = TestBlockBuilder::eth(); - let block_1 = test_block_builder.generate_random_block(0, B256::ZERO); + let block_1 = test_block_builder.generate_random_block(0, B256::ZERO).try_recover()?; let block_hash_1 = block_1.hash(); // Insert and commit the block. let provider_rw = factory.provider_rw()?; - provider_rw.insert_block(block_1)?; + provider_rw.insert_block(&block_1)?; provider_rw.commit()?; let provider = BlockchainProvider::new(factory)?; @@ -1312,7 +1332,7 @@ mod tests { let mut rx_2 = provider.subscribe_to_canonical_state(); // Send and receive commit notifications. - let block_2 = test_block_builder.generate_random_block(1, block_hash_1); + let block_2 = test_block_builder.generate_random_block(1, block_hash_1).try_recover()?; let chain = Chain::new(vec![block_2], ExecutionOutcome::default(), None); let commit = CanonStateNotification::Commit { new: Arc::new(chain.clone()) }; in_memory_state.notify_canon_state(commit.clone()); @@ -1321,8 +1341,8 @@ mod tests { assert_eq!(notification_2, Ok(commit.clone())); // Send and receive re-org notifications. - let block_3 = test_block_builder.generate_random_block(1, block_hash_1); - let block_4 = test_block_builder.generate_random_block(2, block_3.hash()); + let block_3 = test_block_builder.generate_random_block(1, block_hash_1).try_recover()?; + let block_4 = test_block_builder.generate_random_block(2, block_3.hash()).try_recover()?; let new_chain = Chain::new(vec![block_3, block_4], ExecutionOutcome::default(), None); let re_org = CanonStateNotification::Reorg { old: Arc::new(chain), new: Arc::new(new_chain) }; @@ -2397,7 +2417,7 @@ mod tests { ), ( ONE, - transaction_block, + block_by_transaction_id, |block: &SealedBlock, tx_num: TxNumber, _: B256, _: &Vec>| ( tx_num, Some(block.number) diff --git a/crates/storage/provider/src/providers/consistent.rs b/crates/storage/provider/src/providers/consistent.rs index 67113fc5c0..fe859c01b0 100644 --- a/crates/storage/provider/src/providers/consistent.rs +++ b/crates/storage/provider/src/providers/consistent.rs @@ -1,10 +1,10 @@ use super::{DatabaseProviderRO, ProviderFactory, ProviderNodeTypes}; use crate::{ - providers::StaticFileProvider, AccountReader, BlockHashReader, BlockIdReader, BlockNumReader, - BlockReader, BlockReaderIdExt, BlockSource, ChainSpecProvider, ChangeSetReader, HeaderProvider, - ProviderError, PruneCheckpointReader, ReceiptProvider, ReceiptProviderIdExt, - StageCheckpointReader, StateReader, StaticFileProviderFactory, TransactionVariant, - TransactionsProvider, TrieReader, + providers::{StaticFileProvider, StaticFileProviderRWRefMut}, + AccountReader, BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BlockReaderIdExt, + BlockSource, ChainSpecProvider, ChangeSetReader, HeaderProvider, ProviderError, + PruneCheckpointReader, ReceiptProvider, ReceiptProviderIdExt, StageCheckpointReader, + StateReader, StaticFileProviderFactory, TransactionVariant, TransactionsProvider, TrieReader, }; use alloy_consensus::{transaction::TransactionMeta, BlockHeader}; use alloy_eips::{ @@ -23,9 +23,10 @@ use reth_node_types::{BlockTy, HeaderTy, ReceiptTy, TxTy}; use reth_primitives_traits::{Account, BlockBody, RecoveredBlock, SealedHeader, StorageEntry}; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; +use reth_static_file_types::StaticFileSegment; use reth_storage_api::{ BlockBodyIndicesProvider, DatabaseProviderFactory, NodePrimitivesProvider, StateProvider, - StorageChangeSetReader, TryIntoHistoricalStateProvider, + StateProviderBox, StorageChangeSetReader, TryIntoHistoricalStateProvider, }; use reth_storage_errors::provider::ProviderResult; use reth_trie::updates::TrieUpdatesSorted; @@ -595,9 +596,9 @@ impl ConsistentProvider { pub(crate) fn into_state_provider_at_block_hash( self, block_hash: BlockHash, - ) -> ProviderResult> { + ) -> ProviderResult { let Self { storage_provider, head_block, .. } = self; - let into_history_at_block_hash = |block_hash| -> ProviderResult> { + let into_history_at_block_hash = |block_hash| -> ProviderResult { let block_number = storage_provider .block_number(block_hash)? .ok_or(ProviderError::BlockHashNotFound(block_hash))?; @@ -642,6 +643,14 @@ impl StaticFileProviderFactory for ConsistentProvider { fn static_file_provider(&self) -> StaticFileProvider { self.storage_provider.static_file_provider() } + + fn get_static_file_writer( + &self, + block: BlockNumber, + segment: StaticFileSegment, + ) -> ProviderResult> { + self.storage_provider.get_static_file_writer(block, segment) + } } impl HeaderProvider for ConsistentProvider { @@ -964,14 +973,6 @@ impl TransactionsProvider for ConsistentProvider { self.storage_provider.transaction_by_hash_with_meta(tx_hash) } - fn transaction_block(&self, id: TxNumber) -> ProviderResult> { - self.get_in_memory_or_storage_by_tx( - id.into(), - |provider| provider.transaction_block(id), - |_, _, block_state| Ok(Some(block_state.block_ref().recovered_block().number())), - ) - } - fn transactions_by_block( &self, id: BlockHashOrNumber, @@ -1045,7 +1046,7 @@ impl ReceiptProvider for ConsistentProvider { id.into(), |provider| provider.receipt(id), |tx_index, _, block_state| { - Ok(block_state.executed_block_receipts().get(tx_index).cloned()) + Ok(block_state.executed_block_receipts_ref().get(tx_index).cloned()) }, ) } @@ -1054,7 +1055,7 @@ impl ReceiptProvider for ConsistentProvider { for block_state in self.head_block.iter().flat_map(|b| b.chain()) { let executed_block = block_state.block_ref(); let block = executed_block.recovered_block(); - let receipts = block_state.executed_block_receipts(); + let receipts = block_state.executed_block_receipts_ref(); // assuming 1:1 correspondence between transactions and receipts debug_assert_eq!( @@ -1565,7 +1566,7 @@ mod tests { let provider_rw = factory.provider_rw()?; for block in database_blocks { provider_rw.insert_block( - block.clone().try_recover().expect("failed to seal block with senders"), + &block.clone().try_recover().expect("failed to seal block with senders"), )?; } provider_rw.commit()?; @@ -1676,7 +1677,7 @@ mod tests { let provider_rw = factory.provider_rw()?; for block in database_blocks { provider_rw.insert_block( - block.clone().try_recover().expect("failed to seal block with senders"), + &block.clone().try_recover().expect("failed to seal block with senders"), )?; } provider_rw.commit()?; diff --git a/crates/storage/provider/src/providers/consistent_view.rs b/crates/storage/provider/src/providers/consistent_view.rs index d8404af541..653b3659cb 100644 --- a/crates/storage/provider/src/providers/consistent_view.rs +++ b/crates/storage/provider/src/providers/consistent_view.rs @@ -103,7 +103,7 @@ mod tests { // insert the block let provider_rw = provider_factory.provider_rw().unwrap(); - provider_rw.insert_block(genesis_block).unwrap(); + provider_rw.insert_block(&genesis_block).unwrap(); provider_rw.commit().unwrap(); // create a consistent view provider and check that a ro provider can be made @@ -121,7 +121,7 @@ mod tests { // insert the block let provider_rw = provider_factory.provider_rw().unwrap(); - provider_rw.insert_block(recovered_block).unwrap(); + provider_rw.insert_block(&recovered_block).unwrap(); provider_rw.commit().unwrap(); // ensure successful creation of a read-only provider, based on this new db state. @@ -136,7 +136,7 @@ mod tests { // insert the block let provider_rw = provider_factory.provider_rw().unwrap(); - provider_rw.insert_block(recovered_block).unwrap(); + provider_rw.insert_block(&recovered_block).unwrap(); provider_rw.commit().unwrap(); // check that creation of a read-only provider still works @@ -156,7 +156,7 @@ mod tests { // insert the block let provider_rw = provider_factory.provider_rw().unwrap(); - provider_rw.insert_block(genesis_block).unwrap(); + provider_rw.insert_block(&genesis_block).unwrap(); provider_rw.commit().unwrap(); // create a consistent view provider and check that a ro provider can be made @@ -174,7 +174,7 @@ mod tests { // insert the block let provider_rw = provider_factory.provider_rw().unwrap(); - provider_rw.insert_block(recovered_block).unwrap(); + provider_rw.insert_block(&recovered_block).unwrap(); provider_rw.commit().unwrap(); // create a second consistent view provider and check that a ro provider can be made @@ -208,7 +208,7 @@ mod tests { // reinsert the block at the same height, but with a different hash let provider_rw = provider_factory.provider_rw().unwrap(); - provider_rw.insert_block(recovered_block).unwrap(); + provider_rw.insert_block(&recovered_block).unwrap(); provider_rw.commit().unwrap(); // ensure unsuccessful creation of a read-only provider, based on this new db state. diff --git a/crates/storage/provider/src/providers/database/builder.rs b/crates/storage/provider/src/providers/database/builder.rs index 4bc8569432..8c4816bb6e 100644 --- a/crates/storage/provider/src/providers/database/builder.rs +++ b/crates/storage/provider/src/providers/database/builder.rs @@ -3,13 +3,17 @@ //! This also includes general purpose staging types that provide builder style functions that lead //! up to the intended build target. -use crate::{providers::StaticFileProvider, ProviderFactory}; +use crate::{ + providers::{NodeTypesForProvider, RocksDBProvider, StaticFileProvider}, + ProviderFactory, +}; use reth_db::{ mdbx::{DatabaseArguments, MaxReadTransactionDuration}, open_db_read_only, DatabaseEnv, }; use reth_db_api::{database_metrics::DatabaseMetrics, Database}; use reth_node_types::{NodeTypes, NodeTypesWithDBAdapter}; +use reth_storage_errors::provider::ProviderResult; use std::{ marker::PhantomData, path::{Path, PathBuf}, @@ -48,10 +52,9 @@ impl ProviderFactoryBuilder { /// /// ```no_run /// use reth_chainspec::MAINNET; - /// use reth_node_types::NodeTypes; - /// use reth_provider::providers::ProviderFactoryBuilder; + /// use reth_provider::providers::{NodeTypesForProvider, ProviderFactoryBuilder}; /// - /// fn demo>() { + /// fn demo>() { /// let provider_factory = ProviderFactoryBuilder::::default() /// .open_read_only(MAINNET.clone(), "datadir") /// .unwrap(); @@ -64,11 +67,9 @@ impl ProviderFactoryBuilder { /// /// ```no_run /// use reth_chainspec::MAINNET; - /// use reth_node_types::NodeTypes; + /// use reth_provider::providers::{NodeTypesForProvider, ProviderFactoryBuilder, ReadOnlyConfig}; /// - /// use reth_provider::providers::{ProviderFactoryBuilder, ReadOnlyConfig}; - /// - /// fn demo>() { + /// fn demo>() { /// let provider_factory = ProviderFactoryBuilder::::default() /// .open_read_only(MAINNET.clone(), ReadOnlyConfig::from_datadir("datadir").no_watch()) /// .unwrap(); @@ -84,11 +85,9 @@ impl ProviderFactoryBuilder { /// /// ```no_run /// use reth_chainspec::MAINNET; - /// use reth_node_types::NodeTypes; + /// use reth_provider::providers::{NodeTypesForProvider, ProviderFactoryBuilder, ReadOnlyConfig}; /// - /// use reth_provider::providers::{ProviderFactoryBuilder, ReadOnlyConfig}; - /// - /// fn demo>() { + /// fn demo>() { /// let provider_factory = ProviderFactoryBuilder::::default() /// .open_read_only( /// MAINNET.clone(), @@ -103,15 +102,16 @@ impl ProviderFactoryBuilder { config: impl Into, ) -> eyre::Result>>> where - N: NodeTypes, + N: NodeTypesForProvider, { - let ReadOnlyConfig { db_dir, db_args, static_files_dir, watch_static_files } = + let ReadOnlyConfig { db_dir, db_args, static_files_dir, rocksdb_dir, watch_static_files } = config.into(); - Ok(self - .db(Arc::new(open_db_read_only(db_dir, db_args)?)) + self.db(Arc::new(open_db_read_only(db_dir, db_args)?)) .chainspec(chainspec) .static_file(StaticFileProvider::read_only(static_files_dir, watch_static_files)?) - .build_provider_factory()) + .rocksdb_provider(RocksDBProvider::builder(&rocksdb_dir).with_default_tables().build()?) + .build_provider_factory() + .map_err(Into::into) } } @@ -121,7 +121,7 @@ impl Default for ProviderFactoryBuilder { } } -/// Settings for how to open the database and static files. +/// Settings for how to open the database, static files, and `RocksDB`. /// /// The default derivation from a path assumes the path is the datadir: /// [`ReadOnlyConfig::from_datadir`] @@ -133,6 +133,8 @@ pub struct ReadOnlyConfig { pub db_args: DatabaseArguments, /// The path to the static file dir pub static_files_dir: PathBuf, + /// The path to the `RocksDB` directory + pub rocksdb_dir: PathBuf, /// Whether the static files should be watched for changes. pub watch_static_files: bool, } @@ -145,6 +147,7 @@ impl ReadOnlyConfig { /// ```text /// -`datadir` /// |__db + /// |__rocksdb /// |__static_files /// ``` /// @@ -152,7 +155,13 @@ impl ReadOnlyConfig { /// [`StaticFileProvider::read_only`] pub fn from_datadir(datadir: impl AsRef) -> Self { let datadir = datadir.as_ref(); - Self::from_dirs(datadir.join("db"), datadir.join("static_files")) + Self { + db_dir: datadir.join("db"), + db_args: Default::default(), + static_files_dir: datadir.join("static_files"), + rocksdb_dir: datadir.join("rocksdb"), + watch_static_files: true, + } } /// Disables long-lived read transaction safety guarantees. @@ -170,7 +179,8 @@ impl ReadOnlyConfig { /// /// ```text /// - db - /// -static_files + /// - rocksdb + /// - static_files /// ``` /// /// By default this watches the static file directory for changes, see also @@ -181,13 +191,10 @@ impl ReadOnlyConfig { /// If the path does not exist pub fn from_db_dir(db_dir: impl AsRef) -> Self { let db_dir = db_dir.as_ref(); - let static_files_dir = std::fs::canonicalize(db_dir) - .unwrap() - .parent() - .unwrap() - .to_path_buf() - .join("static_files"); - Self::from_dirs(db_dir, static_files_dir) + let datadir = std::fs::canonicalize(db_dir).unwrap().parent().unwrap().to_path_buf(); + let static_files_dir = datadir.join("static_files"); + let rocksdb_dir = datadir.join("rocksdb"); + Self::from_dirs(db_dir, static_files_dir, rocksdb_dir) } /// Creates the config for the given paths. @@ -195,11 +202,16 @@ impl ReadOnlyConfig { /// /// By default this watches the static file directory for changes, see also /// [`StaticFileProvider::read_only`] - pub fn from_dirs(db_dir: impl AsRef, static_files_dir: impl AsRef) -> Self { + pub fn from_dirs( + db_dir: impl AsRef, + static_files_dir: impl AsRef, + rocksdb_dir: impl AsRef, + ) -> Self { Self { - static_files_dir: static_files_dir.as_ref().into(), db_dir: db_dir.as_ref().into(), db_args: Default::default(), + static_files_dir: static_files_dir.as_ref().into(), + rocksdb_dir: rocksdb_dir.as_ref().into(), watch_static_files: true, } } @@ -318,14 +330,46 @@ impl TypesAnd3 { } } -impl TypesAnd3, StaticFileProvider> +impl TypesAnd3, StaticFileProvider> where N: NodeTypes, +{ + /// Configures the `RocksDB` provider. + pub fn rocksdb_provider( + self, + rocksdb_provider: RocksDBProvider, + ) -> TypesAnd4, StaticFileProvider, RocksDBProvider> { + TypesAnd4::new(self.val_1, self.val_2, self.val_3, rocksdb_provider) + } +} + +/// This is staging type that contains the configured types and _four_ values. +#[derive(Debug)] +pub struct TypesAnd4 { + _types: PhantomData, + val_1: Val1, + val_2: Val2, + val_3: Val3, + val_4: Val4, +} + +impl TypesAnd4 { + /// Creates a new instance with the given types and four values. + pub fn new(val_1: Val1, val_2: Val2, val_3: Val3, val_4: Val4) -> Self { + Self { _types: Default::default(), val_1, val_2, val_3, val_4 } + } +} + +impl TypesAnd4, StaticFileProvider, RocksDBProvider> +where + N: NodeTypesForProvider, DB: Database + DatabaseMetrics + Clone + Unpin + 'static, { /// Creates the [`ProviderFactory`]. - pub fn build_provider_factory(self) -> ProviderFactory> { - let Self { _types, val_1, val_2, val_3 } = self; - ProviderFactory::new(val_1, val_2, val_3) + pub fn build_provider_factory( + self, + ) -> ProviderResult>> { + let Self { _types, val_1, val_2, val_3, val_4 } = self; + ProviderFactory::new(val_1, val_2, val_3, val_4) } } diff --git a/crates/storage/provider/src/providers/database/metrics.rs b/crates/storage/provider/src/providers/database/metrics.rs index 4daac3dfdd..22d861c674 100644 --- a/crates/storage/provider/src/providers/database/metrics.rs +++ b/crates/storage/provider/src/providers/database/metrics.rs @@ -45,6 +45,8 @@ pub(crate) enum Action { InsertBlockBodyIndices, InsertTransactionBlocks, GetNextTxNum, + InsertTransactionSenders, + InsertTransactionHashNumbers, } /// Database provider metrics @@ -70,6 +72,10 @@ struct DatabaseProviderMetrics { insert_tx_blocks: Histogram, /// Duration of get next tx num get_next_tx_num: Histogram, + /// Duration of insert transaction senders + insert_transaction_senders: Histogram, + /// Duration of insert transaction hash numbers + insert_transaction_hash_numbers: Histogram, } impl DatabaseProviderMetrics { @@ -85,6 +91,10 @@ impl DatabaseProviderMetrics { Action::InsertBlockBodyIndices => self.insert_block_body_indices.record(duration), Action::InsertTransactionBlocks => self.insert_tx_blocks.record(duration), Action::GetNextTxNum => self.get_next_tx_num.record(duration), + Action::InsertTransactionSenders => self.insert_transaction_senders.record(duration), + Action::InsertTransactionHashNumbers => { + self.insert_transaction_hash_numbers.record(duration) + } } } } diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index 5d3b5280cd..d76da72fd6 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -1,29 +1,35 @@ use crate::{ - providers::{state::latest::LatestStateProvider, StaticFileProvider}, + providers::{ + state::latest::LatestStateProvider, NodeTypesForProvider, RocksDBProvider, + StaticFileProvider, StaticFileProviderRWRefMut, + }, to_range, traits::{BlockSource, ReceiptProvider}, BlockHashReader, BlockNumReader, BlockReader, ChainSpecProvider, DatabaseProviderFactory, - HashedPostStateProvider, HeaderProvider, HeaderSyncGapProvider, ProviderError, - PruneCheckpointReader, StageCheckpointReader, StateProviderBox, StaticFileProviderFactory, + EitherWriterDestination, HashedPostStateProvider, HeaderProvider, HeaderSyncGapProvider, + MetadataProvider, ProviderError, PruneCheckpointReader, RocksDBProviderFactory, + StageCheckpointReader, StateProviderBox, StaticFileProviderFactory, StaticFileWriter, TransactionVariant, TransactionsProvider, }; use alloy_consensus::transaction::TransactionMeta; use alloy_eips::BlockHashOrNumber; use alloy_primitives::{Address, BlockHash, BlockNumber, TxHash, TxNumber, B256}; use core::fmt; +use parking_lot::RwLock; use reth_chainspec::ChainInfo; use reth_db::{init_db, mdbx::DatabaseArguments, DatabaseEnv}; use reth_db_api::{database::Database, models::StoredBlockBodyIndices}; use reth_errors::{RethError, RethResult}; use reth_node_types::{ - BlockTy, HeaderTy, NodeTypes, NodeTypesWithDB, NodeTypesWithDBAdapter, ReceiptTy, TxTy, + BlockTy, HeaderTy, NodeTypesWithDB, NodeTypesWithDBAdapter, ReceiptTy, TxTy, }; use reth_primitives_traits::{RecoveredBlock, SealedHeader}; use reth_prune_types::{PruneCheckpoint, PruneModes, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; use reth_static_file_types::StaticFileSegment; use reth_storage_api::{ - BlockBodyIndicesProvider, NodePrimitivesProvider, TryIntoHistoricalStateProvider, + BlockBodyIndicesProvider, NodePrimitivesProvider, StorageSettings, StorageSettingsCache, + TryIntoHistoricalStateProvider, }; use reth_storage_errors::provider::ProviderResult; use reth_trie::HashedPostState; @@ -64,39 +70,59 @@ pub struct ProviderFactory { prune_modes: PruneModes, /// The node storage handler. storage: Arc, + /// Storage configuration settings for this node + storage_settings: Arc>, + /// `RocksDB` provider + rocksdb_provider: RocksDBProvider, } -impl ProviderFactory>> { +impl ProviderFactory>> { /// Instantiates the builder for this type pub fn builder() -> ProviderFactoryBuilder { ProviderFactoryBuilder::default() } } -impl ProviderFactory { +impl ProviderFactory { /// Create new database provider factory. pub fn new( db: N::DB, chain_spec: Arc, static_file_provider: StaticFileProvider, - ) -> Self { - Self { + rocksdb_provider: RocksDBProvider, + ) -> ProviderResult { + // Load storage settings from database at init time. Creates a temporary provider + // to read persisted settings, falling back to legacy defaults if none exist. + // + // Both factory and all providers it creates should share these cached settings. + let legacy_settings = StorageSettings::legacy(); + let storage_settings = DatabaseProvider::<_, N>::new( + db.tx()?, + chain_spec.clone(), + static_file_provider.clone(), + Default::default(), + Default::default(), + Arc::new(RwLock::new(legacy_settings)), + rocksdb_provider.clone(), + ) + .storage_settings()? + .unwrap_or(legacy_settings); + + Ok(Self { db, chain_spec, static_file_provider, prune_modes: PruneModes::default(), storage: Default::default(), - } - } - - /// Enables metrics on the static file provider. - pub fn with_static_files_metrics(mut self) -> Self { - self.static_file_provider = self.static_file_provider.with_metrics(); - self + storage_settings: Arc::new(RwLock::new(storage_settings)), + rocksdb_provider, + }) } +} +impl ProviderFactory { /// Sets the pruning configuration for an existing [`ProviderFactory`]. - pub const fn with_prune_modes(mut self, prune_modes: PruneModes) -> Self { + pub fn with_prune_modes(mut self, prune_modes: PruneModes) -> Self { self.prune_modes = prune_modes; self } @@ -113,7 +139,28 @@ impl ProviderFactory { } } -impl>> ProviderFactory { +impl StorageSettingsCache for ProviderFactory { + fn cached_storage_settings(&self) -> StorageSettings { + *self.storage_settings.read() + } + + fn set_storage_settings_cache(&self, settings: StorageSettings) { + *self.storage_settings.write() = settings; + } +} + +impl RocksDBProviderFactory for ProviderFactory { + fn rocksdb_provider(&self) -> RocksDBProvider { + self.rocksdb_provider.clone() + } + + #[cfg(all(unix, feature = "rocksdb"))] + fn set_pending_rocksdb_batch(&self, _batch: rocksdb::WriteBatchWithTransaction) { + unimplemented!("ProviderFactory is a factory, not a provider - use DatabaseProvider::set_pending_rocksdb_batch instead") + } +} + +impl>> ProviderFactory { /// Create new database provider by passing a path. [`ProviderFactory`] will own the database /// instance. pub fn new_with_database_path>( @@ -121,14 +168,15 @@ impl>> ProviderFactory { chain_spec: Arc, args: DatabaseArguments, static_file_provider: StaticFileProvider, + rocksdb_provider: RocksDBProvider, ) -> RethResult { - Ok(Self { - db: Arc::new(init_db(path, args).map_err(RethError::msg)?), + Self::new( + Arc::new(init_db(path, args).map_err(RethError::msg)?), chain_spec, static_file_provider, - prune_modes: PruneModes::default(), - storage: Default::default(), - }) + rocksdb_provider, + ) + .map_err(RethError::Provider) } } @@ -147,6 +195,8 @@ impl ProviderFactory { self.static_file_provider.clone(), self.prune_modes.clone(), self.storage.clone(), + self.storage_settings.clone(), + self.rocksdb_provider.clone(), )) } @@ -162,6 +212,8 @@ impl ProviderFactory { self.static_file_provider.clone(), self.prune_modes.clone(), self.storage.clone(), + self.storage_settings.clone(), + self.rocksdb_provider.clone(), ))) } @@ -219,6 +271,14 @@ impl StaticFileProviderFactory for ProviderFactory { fn static_file_provider(&self) -> StaticFileProvider { self.static_file_provider.clone() } + + fn get_static_file_writer( + &self, + block: BlockNumber, + segment: StaticFileSegment, + ) -> ProviderResult> { + self.static_file_provider.get_writer(block, segment) + } } impl HeaderSyncGapProvider for ProviderFactory { @@ -403,10 +463,6 @@ impl TransactionsProvider for ProviderFactory { self.provider()?.transaction_by_hash_with_meta(tx_hash) } - fn transaction_block(&self, id: TxNumber) -> ProviderResult> { - self.provider()?.transaction_block(id) - } - fn transactions_by_block( &self, id: BlockHashOrNumber, @@ -432,11 +488,19 @@ impl TransactionsProvider for ProviderFactory { &self, range: impl RangeBounds, ) -> ProviderResult> { - self.provider()?.senders_by_tx_range(range) + if EitherWriterDestination::senders(self).is_static_file() { + self.static_file_provider.senders_by_tx_range(range) + } else { + self.provider()?.senders_by_tx_range(range) + } } fn transaction_sender(&self, id: TxNumber) -> ProviderResult> { - self.provider()?.transaction_sender(id) + if EitherWriterDestination::senders(self).is_static_file() { + self.static_file_provider.transaction_sender(id) + } else { + self.provider()?.transaction_sender(id) + } } } @@ -540,18 +604,34 @@ impl HashedPostStateProvider for ProviderFactory { } } +impl MetadataProvider for ProviderFactory { + fn get_metadata(&self, key: &str) -> ProviderResult>> { + self.provider()?.get_metadata(key) + } +} + impl fmt::Debug for ProviderFactory where N: NodeTypesWithDB, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let Self { db, chain_spec, static_file_provider, prune_modes, storage } = self; + let Self { + db, + chain_spec, + static_file_provider, + prune_modes, + storage, + storage_settings, + rocksdb_provider, + } = self; f.debug_struct("ProviderFactory") .field("db", &db) .field("chain_spec", &chain_spec) .field("static_file_provider", &static_file_provider) .field("prune_modes", &prune_modes) .field("storage", &storage) + .field("storage_settings", &*storage_settings.read()) + .field("rocksdb_provider", &rocksdb_provider) .finish() } } @@ -564,6 +644,8 @@ impl Clone for ProviderFactory { static_file_provider: self.static_file_provider.clone(), prune_modes: self.prune_modes.clone(), storage: self.storage.clone(), + storage_settings: self.storage_settings.clone(), + rocksdb_provider: self.rocksdb_provider.clone(), } } } @@ -582,7 +664,7 @@ mod tests { use reth_chainspec::ChainSpecBuilder; use reth_db::{ mdbx::DatabaseArguments, - test_utils::{create_test_static_files_dir, ERROR_TEMPDIR}, + test_utils::{create_test_rocksdb_dir, create_test_static_files_dir, ERROR_TEMPDIR}, }; use reth_db_api::tables; use reth_primitives_traits::SignerRecoverable; @@ -621,11 +703,13 @@ mod tests { fn provider_factory_with_database_path() { let chain_spec = ChainSpecBuilder::mainnet().build(); let (_static_dir, static_dir_path) = create_test_static_files_dir(); + let (_, rocksdb_path) = create_test_rocksdb_dir(); let factory = ProviderFactory::>::new_with_database_path( tempfile::TempDir::new().expect(ERROR_TEMPDIR).keep(), Arc::new(chain_spec), DatabaseArguments::new(Default::default()), StaticFileProvider::read_write(static_dir_path).unwrap(), + RocksDBProvider::builder(&rocksdb_path).build().unwrap(), ) .unwrap(); let provider = factory.provider().unwrap(); @@ -642,7 +726,7 @@ mod tests { { let factory = create_test_provider_factory(); let provider = factory.provider_rw().unwrap(); - assert_matches!(provider.insert_block(block.clone().try_recover().unwrap()), Ok(_)); + assert_matches!(provider.insert_block(&block.clone().try_recover().unwrap()), Ok(_)); assert_matches!( provider.transaction_sender(0), Ok(Some(sender)) if sender == block.body().transactions[0].recover_signer().unwrap() @@ -661,7 +745,7 @@ mod tests { }; let factory = create_test_provider_factory(); let provider = factory.with_prune_modes(prune_modes).provider_rw().unwrap(); - assert_matches!(provider.insert_block(block.clone().try_recover().unwrap()), Ok(_)); + assert_matches!(provider.insert_block(&block.clone().try_recover().unwrap()), Ok(_)); assert_matches!(provider.transaction_sender(0), Ok(None)); assert_matches!( provider.transaction_id(*block.body().transactions[0].tx_hash()), @@ -681,7 +765,7 @@ mod tests { let factory = create_test_provider_factory(); let provider = factory.provider_rw().unwrap(); - assert_matches!(provider.insert_block(block.clone().try_recover().unwrap()), Ok(_)); + assert_matches!(provider.insert_block(&block.clone().try_recover().unwrap()), Ok(_)); let senders = provider.take::(range.clone()); assert_eq!( diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 93baa4309d..9b4b68a606 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -4,6 +4,7 @@ use crate::{ }, providers::{ database::{chain::ChainStorage, metrics}, + rocksdb::RocksDBProvider, static_file::StaticFileWriter, NodeTypesForProvider, StaticFileProvider, }, @@ -13,16 +14,17 @@ use crate::{ }, AccountReader, BlockBodyWriter, BlockExecutionWriter, BlockHashReader, BlockNumReader, BlockReader, BlockWriter, BundleStateInit, ChainStateBlockReader, ChainStateBlockWriter, - DBProvider, HashingWriter, HeaderProvider, HeaderSyncGapProvider, HistoricalStateProvider, - HistoricalStateProviderRef, HistoryWriter, LatestStateProvider, LatestStateProviderRef, - OriginalValuesKnown, ProviderError, PruneCheckpointReader, PruneCheckpointWriter, RevertsInit, + DBProvider, EitherReader, EitherWriter, EitherWriterDestination, HashingWriter, HeaderProvider, + HeaderSyncGapProvider, HistoricalStateProvider, HistoricalStateProviderRef, HistoryWriter, + LatestStateProvider, LatestStateProviderRef, OriginalValuesKnown, ProviderError, + PruneCheckpointReader, PruneCheckpointWriter, RevertsInit, RocksDBProviderFactory, StageCheckpointReader, StateProviderBox, StateWriter, StaticFileProviderFactory, StatsReader, StorageReader, StorageTrieWriter, TransactionVariant, TransactionsProvider, TransactionsProviderExt, TrieReader, TrieWriter, }; use alloy_consensus::{ transaction::{SignerRecoverable, TransactionMeta, TxHashRef}, - BlockHeader, + BlockHeader, TxReceipt, }; use alloy_eips::BlockHashOrNumber; use alloy_primitives::{ @@ -31,6 +33,7 @@ use alloy_primitives::{ Address, BlockHash, BlockNumber, TxHash, TxNumber, B256, }; use itertools::Itertools; +use parking_lot::RwLock; use rayon::slice::ParallelSliceMut; use reth_chain_state::ExecutedBlock; use reth_chainspec::{ChainInfo, ChainSpecProvider, EthChainSpec}; @@ -39,7 +42,7 @@ use reth_db_api::{ database::Database, models::{ sharded_key, storage_sharded_key::StorageShardedKey, AccountBeforeTx, BlockNumberAddress, - BlockNumberHashedAddress, ShardedKey, StoredBlockBodyIndices, + BlockNumberHashedAddress, ShardedKey, StorageSettings, StoredBlockBodyIndices, }, table::Table, tables, @@ -57,8 +60,9 @@ use reth_prune_types::{ use reth_stages_types::{StageCheckpoint, StageId}; use reth_static_file_types::StaticFileSegment; use reth_storage_api::{ - BlockBodyIndicesProvider, BlockBodyReader, NodePrimitivesProvider, StateProvider, - StorageChangeSetReader, TryIntoHistoricalStateProvider, + BlockBodyIndicesProvider, BlockBodyReader, MetadataProvider, MetadataWriter, + NodePrimitivesProvider, StateProvider, StorageChangeSetReader, StorageSettingsCache, + TryIntoHistoricalStateProvider, }; use reth_storage_errors::provider::ProviderResult; use reth_trie::{ @@ -67,8 +71,7 @@ use reth_trie::{ TrieCursorIter, }, updates::{StorageTrieUpdatesSorted, TrieUpdatesSorted}, - BranchNodeCompact, HashedPostStateSorted, Nibbles, StoredNibbles, StoredNibblesSubKey, - TrieChangeSetsEntry, + HashedPostStateSorted, StoredNibbles, StoredNibblesSubKey, TrieChangeSetsEntry, }; use reth_trie_db::{ DatabaseAccountTrieCursor, DatabaseStorageTrieCursor, DatabaseTrieCursorFactory, @@ -80,7 +83,7 @@ use std::{ cmp::Ordering, collections::{BTreeMap, BTreeSet}, fmt::Debug, - ops::{Deref, DerefMut, Not, Range, RangeBounds, RangeFrom, RangeInclusive}, + ops::{Deref, DerefMut, Range, RangeBounds, RangeFrom, RangeInclusive}, sync::Arc, }; use tracing::{debug, trace}; @@ -129,6 +132,13 @@ impl DatabaseProviderRW { pub fn into_tx(self) -> ::TXMut { self.0.into_tx() } + + /// Override the minimum pruning distance for testing purposes. + #[cfg(any(test, feature = "test-utils"))] + pub const fn with_minimum_pruning_distance(mut self, distance: u64) -> Self { + self.0.minimum_pruning_distance = distance; + self + } } impl From> @@ -141,7 +151,6 @@ impl From> /// A provider struct that fetches data from the database. /// Wrapper around [`DbTx`] and [`DbTxMut`]. Example: [`HeaderProvider`] [`BlockHashReader`] -#[derive(Debug)] pub struct DatabaseProvider { /// Database transaction. tx: TX, @@ -153,6 +162,31 @@ pub struct DatabaseProvider { prune_modes: PruneModes, /// Node storage handler. storage: Arc, + /// Storage configuration settings for this node + storage_settings: Arc>, + /// `RocksDB` provider + rocksdb_provider: RocksDBProvider, + /// Pending `RocksDB` batches to be committed at provider commit time. + #[cfg(all(unix, feature = "rocksdb"))] + pending_rocksdb_batches: parking_lot::Mutex>>, + /// Minimum distance from tip required for pruning + minimum_pruning_distance: u64, +} + +impl Debug for DatabaseProvider { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let mut s = f.debug_struct("DatabaseProvider"); + s.field("tx", &self.tx) + .field("chain_spec", &self.chain_spec) + .field("static_file_provider", &self.static_file_provider) + .field("prune_modes", &self.prune_modes) + .field("storage", &self.storage) + .field("storage_settings", &self.storage_settings) + .field("rocksdb_provider", &self.rocksdb_provider); + #[cfg(all(unix, feature = "rocksdb"))] + s.field("pending_rocksdb_batches", &""); + s.field("minimum_pruning_distance", &self.minimum_pruning_distance).finish() + } } impl DatabaseProvider { @@ -214,7 +248,7 @@ impl DatabaseProvider { #[cfg(feature = "test-utils")] /// Sets the prune modes for provider. - pub const fn set_prune_modes(&mut self, prune_modes: PruneModes) { + pub fn set_prune_modes(&mut self, prune_modes: PruneModes) { self.prune_modes = prune_modes; } } @@ -228,9 +262,29 @@ impl StaticFileProviderFactory for DatabaseProvider { fn static_file_provider(&self) -> StaticFileProvider { self.static_file_provider.clone() } + + fn get_static_file_writer( + &self, + block: BlockNumber, + segment: StaticFileSegment, + ) -> ProviderResult> { + self.static_file_provider.get_writer(block, segment) + } } -impl> ChainSpecProvider +impl RocksDBProviderFactory for DatabaseProvider { + /// Returns the `RocksDB` provider. + fn rocksdb_provider(&self) -> RocksDBProvider { + self.rocksdb_provider.clone() + } + + #[cfg(all(unix, feature = "rocksdb"))] + fn set_pending_rocksdb_batch(&self, batch: rocksdb::WriteBatchWithTransaction) { + self.pending_rocksdb_batches.lock().push(batch); + } +} + +impl> ChainSpecProvider for DatabaseProvider { type ChainSpec = N::ChainSpec; @@ -248,8 +302,21 @@ impl DatabaseProvider { static_file_provider: StaticFileProvider, prune_modes: PruneModes, storage: Arc, + storage_settings: Arc>, + rocksdb_provider: RocksDBProvider, ) -> Self { - Self { tx, chain_spec, static_file_provider, prune_modes, storage } + Self { + tx, + chain_spec, + static_file_provider, + prune_modes, + storage, + storage_settings, + rocksdb_provider, + #[cfg(all(unix, feature = "rocksdb"))] + pending_rocksdb_batches: parking_lot::Mutex::new(Vec::new()), + minimum_pruning_distance: MINIMUM_PRUNING_DISTANCE, + } } } @@ -285,23 +352,21 @@ impl DatabaseProvider DatabaseProvider>>(from_tx..)?; - if !self.prune_modes.has_receipts_pruning() { + if EitherWriter::receipts_destination(self).is_static_file() { let static_file_receipt_num = self.static_file_provider.get_highest_static_file_tx(StaticFileSegment::Receipts); @@ -494,8 +559,21 @@ impl DatabaseProvider { static_file_provider: StaticFileProvider, prune_modes: PruneModes, storage: Arc, + storage_settings: Arc>, + rocksdb_provider: RocksDBProvider, ) -> Self { - Self { tx, chain_spec, static_file_provider, prune_modes, storage } + Self { + tx, + chain_spec, + static_file_provider, + prune_modes, + storage, + storage_settings, + rocksdb_provider, + #[cfg(all(unix, feature = "rocksdb"))] + pending_rocksdb_batches: parking_lot::Mutex::new(Vec::new()), + minimum_pruning_distance: MINIMUM_PRUNING_DISTANCE, + } } /// Consume `DbTx` or `DbTxMut`. @@ -643,17 +721,12 @@ impl DatabaseProvider { HF: Fn(RangeInclusive) -> ProviderResult>, BF: Fn(H, BodyTy, Vec

) -> ProviderResult, { - let mut senders_cursor = self.tx.cursor_read::()?; - self.block_range(range, headers_range, |header, body, tx_range| { let senders = if tx_range.is_empty() { Vec::new() } else { - // fetch senders from the senders table - let known_senders = - senders_cursor - .walk_range(tx_range.clone())? - .collect::, _>>()?; + let known_senders: HashMap = + EitherReader::new_senders(self)?.senders_by_tx_range(tx_range.clone())?; let mut senders = Vec::with_capacity(body.transactions().len()); for (tx_num, tx) in tx_range.zip(body.transactions()) { @@ -1259,11 +1332,6 @@ impl TransactionsProvider for Datab Ok(None) } - fn transaction_block(&self, id: TxNumber) -> ProviderResult> { - let mut cursor = self.tx.cursor_read::()?; - Ok(cursor.seek(id)?.map(|(_, bn)| bn)) - } - fn transactions_by_block( &self, id: BlockHashOrNumber, @@ -1311,11 +1379,19 @@ impl TransactionsProvider for Datab &self, range: impl RangeBounds, ) -> ProviderResult> { - self.cursor_read_collect::(range) + if EitherWriterDestination::senders(self).is_static_file() { + self.static_file_provider.senders_by_tx_range(range) + } else { + self.cursor_read_collect::(range) + } } fn transaction_sender(&self, id: TxNumber) -> ProviderResult> { - Ok(self.tx.get::(id)?) + if EitherWriterDestination::senders(self).is_static_file() { + self.static_file_provider.transaction_sender(id) + } else { + Ok(self.tx.get::(id)?) + } } } @@ -1605,26 +1681,29 @@ impl StateWriter )); } - let has_receipts_pruning = self.prune_modes.has_receipts_pruning(); + let mut receipts_writer = EitherWriter::new_receipts(self, first_block)?; - // Prepare receipts cursor if we are going to write receipts to the database - // - // We are writing to database if requested or if there's any kind of receipt pruning - // configured - let mut receipts_cursor = self.tx.cursor_write::>()?; - - // Prepare receipts static writer if we are going to write receipts to static files - // - // We are writing to static files if requested and if there's no receipt pruning configured - let mut receipts_static_writer = has_receipts_pruning - .not() - .then(|| self.static_file_provider.get_writer(first_block, StaticFileSegment::Receipts)) - .transpose()?; + let has_contract_log_filter = !self.prune_modes.receipts_log_filter.is_empty(); + let contract_log_pruner = self.prune_modes.receipts_log_filter.group_by_block(tip, None)?; // All receipts from the last 128 blocks are required for blockchain tree, even with // [`PruneSegment::ContractLogs`]. - let prunable_receipts = - PruneMode::Distance(MINIMUM_PRUNING_DISTANCE).should_prune(first_block, tip); + // + // Receipts can only be skipped if we're dealing with legacy nodes that write them to + // Database, OR if receipts_in_static_files is enabled but no receipts exist in static + // files yet. Once receipts exist in static files, we must continue writing to maintain + // continuity and have no gaps. + let prunable_receipts = (EitherWriter::receipts_destination(self).is_database() || + self.static_file_provider() + .get_highest_static_file_tx(StaticFileSegment::Receipts) + .is_none()) && + PruneMode::Distance(self.minimum_pruning_distance).should_prune(first_block, tip); + + // Prepare set of addresses which logs should not be pruned. + let mut allowed_addresses: HashSet = HashSet::new(); + for (_, addresses) in contract_log_pruner.range(..first_block) { + allowed_addresses.extend(addresses.iter().copied()); + } for (idx, (receipts, first_tx_index)) in execution_outcome.receipts.iter().zip(block_indices).enumerate() @@ -1632,9 +1711,7 @@ impl StateWriter let block_number = first_block + idx as u64; // Increment block number for receipts static file writer - if let Some(writer) = receipts_static_writer.as_mut() { - writer.increment_block(block_number)?; - } + receipts_writer.increment_block(block_number)?; // Skip writing receipts if pruning configuration requires us to. if prunable_receipts && @@ -1645,14 +1722,23 @@ impl StateWriter continue } + // If there are new addresses to retain after this block number, track them + if let Some(new_addresses) = contract_log_pruner.get(&block_number) { + allowed_addresses.extend(new_addresses.iter().copied()); + } + for (idx, receipt) in receipts.iter().enumerate() { let receipt_idx = first_tx_index + idx as u64; - - if let Some(writer) = &mut receipts_static_writer { - writer.append_receipt(receipt_idx, receipt)?; + // Skip writing receipt if log filter is active and it does not have any logs to + // retain + if prunable_receipts && + has_contract_log_filter && + !receipt.logs().iter().any(|log| allowed_addresses.contains(&log.address)) + { + continue } - receipts_cursor.append(receipt_idx, receipt)?; + receipts_writer.append_receipt(receipt_idx, receipt)?; } } @@ -1795,10 +1881,10 @@ impl StateWriter fn write_hashed_state(&self, hashed_state: &HashedPostStateSorted) -> ProviderResult<()> { // Write hashed account updates. let mut hashed_accounts_cursor = self.tx_ref().cursor_write::()?; - for (hashed_address, account) in hashed_state.accounts().accounts_sorted() { + for (hashed_address, account) in hashed_state.accounts() { if let Some(account) = account { - hashed_accounts_cursor.upsert(hashed_address, &account)?; - } else if hashed_accounts_cursor.seek_exact(hashed_address)?.is_some() { + hashed_accounts_cursor.upsert(*hashed_address, account)?; + } else if hashed_accounts_cursor.seek_exact(*hashed_address)?.is_some() { hashed_accounts_cursor.delete_current()?; } } @@ -1812,8 +1898,9 @@ impl StateWriter hashed_storage_cursor.delete_current_duplicates()?; } - for (hashed_slot, value) in storage.storage_slots_sorted() { - let entry = StorageEntry { key: hashed_slot, value }; + for (hashed_slot, value) in storage.storage_slots_ref() { + let entry = StorageEntry { key: *hashed_slot, value: *value }; + if let Some(db_entry) = hashed_storage_cursor.seek_by_key_subkey(*hashed_address, entry.key)? && db_entry.key == entry.key @@ -2123,17 +2210,13 @@ impl TrieWriter for DatabaseProvider // Wrap the cursor in DatabaseAccountTrieCursor let mut db_account_cursor = DatabaseAccountTrieCursor::new(curr_values_cursor); - // Static empty array for when updates_overlay is None - static EMPTY_ACCOUNT_UPDATES: Vec<(Nibbles, Option)> = Vec::new(); - - // Get the overlay updates for account trie, or use an empty array - let account_overlay_updates = updates_overlay - .map(|overlay| overlay.account_nodes_ref()) - .unwrap_or(&EMPTY_ACCOUNT_UPDATES); + // Create empty TrieUpdatesSorted for when updates_overlay is None + let empty_updates = TrieUpdatesSorted::default(); + let overlay = updates_overlay.unwrap_or(&empty_updates); // Wrap the cursor in InMemoryTrieCursor with the overlay let mut in_memory_account_cursor = - InMemoryTrieCursor::new(Some(&mut db_account_cursor), account_overlay_updates); + InMemoryTrieCursor::new_account(&mut db_account_cursor, overlay); for (path, _) in trie_updates.account_nodes_ref() { num_entries += 1; @@ -2371,8 +2454,8 @@ impl StorageTrieWriter for DatabaseP B256::default(), // Will be set per iteration ); - // Static empty array for when updates_overlay is None - static EMPTY_UPDATES: Vec<(Nibbles, Option)> = Vec::new(); + // Create empty TrieUpdatesSorted for when updates_overlay is None + let empty_updates = TrieUpdatesSorted::default(); for (hashed_address, storage_trie_updates) in storage_tries { let changeset_key = BlockNumberHashedAddress((block_number, *hashed_address)); @@ -2381,15 +2464,15 @@ impl StorageTrieWriter for DatabaseP changed_curr_values_cursor = DatabaseStorageTrieCursor::new(changed_curr_values_cursor.cursor, *hashed_address); - // Get the overlay updates for this storage trie, or use an empty array - let overlay_updates = updates_overlay - .and_then(|overlay| overlay.storage_tries_ref().get(hashed_address)) - .map(|updates| updates.storage_nodes_ref()) - .unwrap_or(&EMPTY_UPDATES); + // Get the overlay updates, or use empty updates + let overlay = updates_overlay.unwrap_or(&empty_updates); // Wrap the cursor in InMemoryTrieCursor with the overlay - let mut in_memory_changed_cursor = - InMemoryTrieCursor::new(Some(&mut changed_curr_values_cursor), overlay_updates); + let mut in_memory_changed_cursor = InMemoryTrieCursor::new_storage( + &mut changed_curr_values_cursor, + overlay, + *hashed_address, + ); // Create an iterator which produces the current values of all updated paths, or None if // they are currently unset. @@ -2405,8 +2488,11 @@ impl StorageTrieWriter for DatabaseP DatabaseStorageTrieCursor::new(wiped_nodes_cursor.cursor, *hashed_address); // Wrap the wiped nodes cursor in InMemoryTrieCursor with the overlay - let mut in_memory_wiped_cursor = - InMemoryTrieCursor::new(Some(&mut wiped_nodes_cursor), overlay_updates); + let mut in_memory_wiped_cursor = InMemoryTrieCursor::new_storage( + &mut wiped_nodes_cursor, + overlay, + *hashed_address, + ); let all_nodes = TrieCursorIter::new(&mut in_memory_wiped_cursor); @@ -2781,16 +2867,18 @@ impl BlockWrite /// If withdrawals are not empty, this will modify /// [`BlockWithdrawals`](tables::BlockWithdrawals). /// - /// If the provider has __not__ configured full sender pruning, this will modify - /// [`TransactionSenders`](tables::TransactionSenders). + /// If the provider has __not__ configured full sender pruning, this will modify either: + /// * [`StaticFileSegment::TransactionSenders`] if senders are written to static files + /// * [`tables::TransactionSenders`] if senders are written to the database /// /// If the provider has __not__ configured full transaction lookup pruning, this will modify /// [`TransactionHashNumbers`](tables::TransactionHashNumbers). fn insert_block( &self, - block: RecoveredBlock, + block: &RecoveredBlock, ) -> ProviderResult { let block_number = block.number(); + let tx_count = block.body().transaction_count() as u64; let mut durations_recorder = metrics::DurationsRecorder::default(); @@ -2801,32 +2889,33 @@ impl BlockWrite self.tx.put::(block.hash(), block_number)?; durations_recorder.record_relative(metrics::Action::InsertHeaderNumbers); - let mut next_tx_num = self + let first_tx_num = self .tx .cursor_read::()? .last()? .map(|(n, _)| n + 1) .unwrap_or_default(); durations_recorder.record_relative(metrics::Action::GetNextTxNum); - let first_tx_num = next_tx_num; - let tx_count = block.body().transaction_count() as u64; + let tx_nums_iter = std::iter::successors(Some(first_tx_num), |n| Some(n + 1)); - // Ensures we have all the senders for the block's transactions. - for (transaction, sender) in block.body().transactions_iter().zip(block.senders_iter()) { - let hash = transaction.tx_hash(); - - if self.prune_modes.sender_recovery.as_ref().is_none_or(|m| !m.is_full()) { - self.tx.put::(next_tx_num, *sender)?; - } - - if self.prune_modes.transaction_lookup.is_none_or(|m| !m.is_full()) { - self.tx.put::(*hash, next_tx_num)?; - } - next_tx_num += 1; + if self.prune_modes.sender_recovery.as_ref().is_none_or(|m| !m.is_full()) { + let mut senders_writer = EitherWriter::new_senders(self, block.number())?; + senders_writer.increment_block(block.number())?; + senders_writer + .append_senders(tx_nums_iter.clone().zip(block.senders_iter().copied()))?; + durations_recorder.record_relative(metrics::Action::InsertTransactionSenders); } - self.append_block_bodies(vec![(block_number, Some(block.into_body()))])?; + if self.prune_modes.transaction_lookup.is_none_or(|m| !m.is_full()) { + for (tx_num, transaction) in tx_nums_iter.zip(block.body().transactions_iter()) { + let hash = transaction.tx_hash(); + self.tx.put::(*hash, tx_num)?; + } + durations_recorder.record_relative(metrics::Action::InsertTransactionHashNumbers); + } + + self.append_block_bodies(vec![(block_number, Some(block.body()))])?; debug!( target: "providers::db", @@ -2840,7 +2929,7 @@ impl BlockWrite fn append_block_bodies( &self, - bodies: Vec<(BlockNumber, Option>)>, + bodies: Vec<(BlockNumber, Option<&BodyTy>)>, ) -> ProviderResult<()> { let Some(from_block) = bodies.first().map(|(block, _)| *block) else { return Ok(()) }; @@ -2891,8 +2980,9 @@ impl BlockWrite } fn remove_blocks_above(&self, block: BlockNumber) -> ProviderResult<()> { + let last_block_number = self.last_block_number()?; // Clean up HeaderNumbers for blocks being removed, we must clear all indexes from MDBX. - for hash in self.canonical_hashes_range(block + 1, self.last_block_number()? + 1)? { + for hash in self.canonical_hashes_range(block + 1, last_block_number + 1)? { self.tx.delete::(hash, None)?; } @@ -2934,7 +3024,7 @@ impl BlockWrite } } - self.remove::(unwind_tx_from..)?; + EitherWriter::new_senders(self, last_block_number)?.prune_senders(unwind_tx_from, block)?; self.remove_bodies_above(block)?; @@ -2991,7 +3081,7 @@ impl BlockWrite // Insert the blocks for block in blocks { - self.insert_block(block)?; + self.insert_block(&block)?; durations_recorder.record_relative(metrics::Action::InsertBlock); } @@ -3024,10 +3114,13 @@ impl PruneCheckpointReader for DatabaseProvide } fn get_prune_checkpoints(&self) -> ProviderResult> { - Ok(self - .tx - .cursor_read::()? - .walk(None)? + Ok(PruneSegment::variants() + .filter_map(|segment| { + self.tx + .get::(segment) + .transpose() + .map(|chk| chk.map(|chk| (segment, chk))) + }) .collect::>()?) } } @@ -3112,7 +3205,7 @@ impl DBProvider for DatabaseProvider self.prune_modes_ref() } - /// Commit database transaction and static files. + /// Commit database transaction, static files, and pending `RocksDB` batches. fn commit(self) -> ProviderResult { // For unwinding it makes more sense to commit the database first, since if // it is interrupted before the static files commit, we can just @@ -3120,9 +3213,27 @@ impl DBProvider for DatabaseProvider // checkpoints on the next start-up. if self.static_file_provider.has_unwind_queued() { self.tx.commit()?; + + #[cfg(all(unix, feature = "rocksdb"))] + { + let batches = std::mem::take(&mut *self.pending_rocksdb_batches.lock()); + for batch in batches { + self.rocksdb_provider.commit_batch(batch)?; + } + } + self.static_file_provider.commit()?; } else { self.static_file_provider.commit()?; + + #[cfg(all(unix, feature = "rocksdb"))] + { + let batches = std::mem::take(&mut *self.pending_rocksdb_batches.lock()); + for batch in batches { + self.rocksdb_provider.commit_batch(batch)?; + } + } + self.tx.commit()?; } @@ -3130,6 +3241,28 @@ impl DBProvider for DatabaseProvider } } +impl MetadataProvider for DatabaseProvider { + fn get_metadata(&self, key: &str) -> ProviderResult>> { + self.tx.get::(key.to_string()).map_err(Into::into) + } +} + +impl MetadataWriter for DatabaseProvider { + fn write_metadata(&self, key: &str, value: Vec) -> ProviderResult<()> { + self.tx.put::(key.to_string(), value).map_err(Into::into) + } +} + +impl StorageSettingsCache for DatabaseProvider { + fn cached_storage_settings(&self) -> StorageSettings { + *self.storage_settings.read() + } + + fn set_storage_settings_cache(&self, settings: StorageSettings) { + *self.storage_settings.write() = settings; + } +} + #[cfg(test)] mod tests { use super::*; @@ -3137,7 +3270,9 @@ mod tests { test_utils::{blocks::BlockchainTestData, create_test_provider_factory}, BlockWriter, }; + use reth_ethereum_primitives::Receipt; use reth_testing_utils::generators::{self, random_block, BlockParams}; + use reth_trie::Nibbles; #[test] fn test_receipts_by_block_range_empty_range() { @@ -3167,14 +3302,14 @@ mod tests { let data = BlockchainTestData::default(); let provider_rw = factory.provider_rw().unwrap(); - provider_rw.insert_block(data.genesis.clone().try_recover().unwrap()).unwrap(); + provider_rw.insert_block(&data.genesis.clone().try_recover().unwrap()).unwrap(); provider_rw .write_state( &ExecutionOutcome { first_block: 0, receipts: vec![vec![]], ..Default::default() }, crate::OriginalValuesKnown::No, ) .unwrap(); - provider_rw.insert_block(data.blocks[0].0.clone()).unwrap(); + provider_rw.insert_block(&data.blocks[0].0).unwrap(); provider_rw.write_state(&data.blocks[0].1, crate::OriginalValuesKnown::No).unwrap(); provider_rw.commit().unwrap(); @@ -3193,7 +3328,7 @@ mod tests { let data = BlockchainTestData::default(); let provider_rw = factory.provider_rw().unwrap(); - provider_rw.insert_block(data.genesis.clone().try_recover().unwrap()).unwrap(); + provider_rw.insert_block(&data.genesis.clone().try_recover().unwrap()).unwrap(); provider_rw .write_state( &ExecutionOutcome { first_block: 0, receipts: vec![vec![]], ..Default::default() }, @@ -3201,7 +3336,7 @@ mod tests { ) .unwrap(); for i in 0..3 { - provider_rw.insert_block(data.blocks[i].0.clone()).unwrap(); + provider_rw.insert_block(&data.blocks[i].0).unwrap(); provider_rw.write_state(&data.blocks[i].1, crate::OriginalValuesKnown::No).unwrap(); } provider_rw.commit().unwrap(); @@ -3223,7 +3358,7 @@ mod tests { let data = BlockchainTestData::default(); let provider_rw = factory.provider_rw().unwrap(); - provider_rw.insert_block(data.genesis.clone().try_recover().unwrap()).unwrap(); + provider_rw.insert_block(&data.genesis.clone().try_recover().unwrap()).unwrap(); provider_rw .write_state( &ExecutionOutcome { first_block: 0, receipts: vec![vec![]], ..Default::default() }, @@ -3233,7 +3368,7 @@ mod tests { // insert blocks 1-3 with receipts for i in 0..3 { - provider_rw.insert_block(data.blocks[i].0.clone()).unwrap(); + provider_rw.insert_block(&data.blocks[i].0).unwrap(); provider_rw.write_state(&data.blocks[i].1, crate::OriginalValuesKnown::No).unwrap(); } provider_rw.commit().unwrap(); @@ -3254,7 +3389,7 @@ mod tests { let data = BlockchainTestData::default(); let provider_rw = factory.provider_rw().unwrap(); - provider_rw.insert_block(data.genesis.clone().try_recover().unwrap()).unwrap(); + provider_rw.insert_block(&data.genesis.clone().try_recover().unwrap()).unwrap(); provider_rw .write_state( &ExecutionOutcome { first_block: 0, receipts: vec![vec![]], ..Default::default() }, @@ -3262,7 +3397,7 @@ mod tests { ) .unwrap(); for i in 0..3 { - provider_rw.insert_block(data.blocks[i].0.clone()).unwrap(); + provider_rw.insert_block(&data.blocks[i].0).unwrap(); provider_rw.write_state(&data.blocks[i].1, crate::OriginalValuesKnown::No).unwrap(); } provider_rw.commit().unwrap(); @@ -3298,7 +3433,7 @@ mod tests { let provider_rw = factory.provider_rw().unwrap(); for block in blocks { - provider_rw.insert_block(block.try_recover().unwrap()).unwrap(); + provider_rw.insert_block(&block.try_recover().unwrap()).unwrap(); } provider_rw.commit().unwrap(); @@ -3317,7 +3452,7 @@ mod tests { let data = BlockchainTestData::default(); let provider_rw = factory.provider_rw().unwrap(); - provider_rw.insert_block(data.genesis.clone().try_recover().unwrap()).unwrap(); + provider_rw.insert_block(&data.genesis.clone().try_recover().unwrap()).unwrap(); provider_rw .write_state( &ExecutionOutcome { first_block: 0, receipts: vec![vec![]], ..Default::default() }, @@ -3325,7 +3460,7 @@ mod tests { ) .unwrap(); for i in 0..3 { - provider_rw.insert_block(data.blocks[i].0.clone()).unwrap(); + provider_rw.insert_block(&data.blocks[i].0).unwrap(); provider_rw.write_state(&data.blocks[i].1, crate::OriginalValuesKnown::No).unwrap(); } provider_rw.commit().unwrap(); @@ -4613,4 +4748,142 @@ mod tests { "storage_nibbles2 should have the value that was created and will be deleted" ); } + + #[test] + fn test_prunable_receipts_logic() { + let insert_blocks = + |provider_rw: &DatabaseProviderRW<_, _>, tip_block: u64, tx_count: u8| { + let mut rng = generators::rng(); + for block_num in 0..=tip_block { + let block = random_block( + &mut rng, + block_num, + BlockParams { tx_count: Some(tx_count), ..Default::default() }, + ); + provider_rw.insert_block(&block.try_recover().unwrap()).unwrap(); + } + }; + + let write_receipts = |provider_rw: DatabaseProviderRW<_, _>, block: u64| { + let outcome = ExecutionOutcome { + first_block: block, + receipts: vec![vec![Receipt { + tx_type: Default::default(), + success: true, + cumulative_gas_used: block, // identifier to assert against + logs: vec![], + }]], + ..Default::default() + }; + provider_rw.write_state(&outcome, crate::OriginalValuesKnown::No).unwrap(); + provider_rw.commit().unwrap(); + }; + + // Legacy mode (receipts in DB) - should be prunable + { + let factory = create_test_provider_factory(); + let storage_settings = StorageSettings::legacy(); + factory.set_storage_settings_cache(storage_settings); + let factory = factory.with_prune_modes(PruneModes { + receipts: Some(PruneMode::Before(100)), + ..Default::default() + }); + + let tip_block = 200u64; + let first_block = 1u64; + + // create chain + let provider_rw = factory.provider_rw().unwrap(); + insert_blocks(&provider_rw, tip_block, 1); + provider_rw.commit().unwrap(); + + write_receipts( + factory.provider_rw().unwrap().with_minimum_pruning_distance(100), + first_block, + ); + write_receipts( + factory.provider_rw().unwrap().with_minimum_pruning_distance(100), + tip_block - 1, + ); + + let provider = factory.provider().unwrap(); + + for (block, num_receipts) in [(0, 0), (tip_block - 1, 1)] { + assert!(provider + .receipts_by_block(block.into()) + .unwrap() + .is_some_and(|r| r.len() == num_receipts)); + } + } + + // Static files mode + { + let factory = create_test_provider_factory(); + let storage_settings = StorageSettings::legacy().with_receipts_in_static_files(true); + factory.set_storage_settings_cache(storage_settings); + let factory = factory.with_prune_modes(PruneModes { + receipts: Some(PruneMode::Before(2)), + ..Default::default() + }); + + let tip_block = 200u64; + + // create chain + let provider_rw = factory.provider_rw().unwrap(); + insert_blocks(&provider_rw, tip_block, 1); + provider_rw.commit().unwrap(); + + // Attempt to write receipts for block 0 and 1 (should be skipped) + write_receipts(factory.provider_rw().unwrap().with_minimum_pruning_distance(100), 0); + write_receipts(factory.provider_rw().unwrap().with_minimum_pruning_distance(100), 1); + + assert!(factory + .static_file_provider() + .get_highest_static_file_tx(StaticFileSegment::Receipts) + .is_none(),); + assert!(factory + .static_file_provider() + .get_highest_static_file_block(StaticFileSegment::Receipts) + .is_some_and(|b| b == 1),); + + // Since we have prune mode Before(2), the next receipt (block 2) should be written to + // static files. + write_receipts(factory.provider_rw().unwrap().with_minimum_pruning_distance(100), 2); + assert!(factory + .static_file_provider() + .get_highest_static_file_tx(StaticFileSegment::Receipts) + .is_some_and(|num| num == 2),); + + // After having a receipt already in static files, attempt to skip the next receipt by + // changing the prune mode. It should NOT skip it and should still write the receipt, + // since static files do not support gaps. + let factory = factory.with_prune_modes(PruneModes { + receipts: Some(PruneMode::Before(100)), + ..Default::default() + }); + let provider_rw = factory.provider_rw().unwrap().with_minimum_pruning_distance(1); + assert!(PruneMode::Distance(1).should_prune(3, tip_block)); + write_receipts(provider_rw, 3); + + // Ensure we can only fetch the 2 last receipts. + // + // Test setup only has 1 tx per block and each receipt has its cumulative_gas_used set + // to the block number it belongs to easily identify and assert. + let provider = factory.provider().unwrap(); + assert!(EitherWriter::receipts_destination(&provider).is_static_file()); + for (num, num_receipts) in [(0, 0), (1, 0), (2, 1), (3, 1)] { + assert!(provider + .receipts_by_block(num.into()) + .unwrap() + .is_some_and(|r| r.len() == num_receipts)); + + let receipt = provider.receipt(num).unwrap(); + if num_receipts > 0 { + assert!(receipt.is_some_and(|r| r.cumulative_gas_used == num)); + } else { + assert!(receipt.is_none()); + } + } + } + } } diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index 41e8121991..e9d5a7c350 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -9,8 +9,8 @@ pub use database::*; mod static_file; pub use static_file::{ - StaticFileAccess, StaticFileJarProvider, StaticFileProvider, StaticFileProviderRW, - StaticFileProviderRWRefMut, StaticFileWriter, + StaticFileAccess, StaticFileJarProvider, StaticFileProvider, StaticFileProviderBuilder, + StaticFileProviderRW, StaticFileProviderRWRefMut, StaticFileWriter, }; mod state; @@ -29,6 +29,14 @@ pub use blockchain_provider::BlockchainProvider; mod consistent; pub use consistent::ConsistentProvider; +// RocksDB currently only supported on Unix platforms +// Windows support is planned for future releases +#[cfg_attr(all(unix, feature = "rocksdb"), path = "rocksdb/mod.rs")] +#[cfg_attr(not(all(unix, feature = "rocksdb")), path = "rocksdb_stub.rs")] +pub(crate) mod rocksdb; + +pub use rocksdb::{RocksDBBatch, RocksDBBuilder, RocksDBProvider, RocksTx}; + /// Helper trait to bound [`NodeTypes`] so that combined with database they satisfy /// [`ProviderNodeTypes`]. pub trait NodeTypesForProvider diff --git a/crates/storage/provider/src/providers/rocksdb/invariants.rs b/crates/storage/provider/src/providers/rocksdb/invariants.rs new file mode 100644 index 0000000000..5e420f3b19 --- /dev/null +++ b/crates/storage/provider/src/providers/rocksdb/invariants.rs @@ -0,0 +1,921 @@ +//! Invariant checking for `RocksDB` tables. +//! +//! This module provides consistency checks for tables stored in `RocksDB`, similar to the +//! consistency checks for static files. The goal is to detect and potentially heal +//! inconsistencies between `RocksDB` data and MDBX checkpoints. + +use super::RocksDBProvider; +use crate::StaticFileProviderFactory; +use alloy_eips::eip2718::Encodable2718; +use alloy_primitives::BlockNumber; +use rayon::prelude::*; +use reth_db::cursor::DbCursorRO; +use reth_db_api::{tables, transaction::DbTx}; +use reth_stages_types::StageId; +use reth_static_file_types::StaticFileSegment; +use reth_storage_api::{ + DBProvider, StageCheckpointReader, StorageSettingsCache, TransactionsProvider, +}; +use reth_storage_errors::provider::ProviderResult; + +impl RocksDBProvider { + /// Checks consistency of `RocksDB` tables against MDBX stage checkpoints. + /// + /// Returns an unwind target block number if the pipeline needs to unwind to rebuild + /// `RocksDB` data. Returns `None` if all invariants pass or if inconsistencies were healed. + /// + /// # Invariants checked + /// + /// For `TransactionHashNumbers`: + /// - The maximum `TxNumber` value should not exceed what the `TransactionLookup` stage + /// checkpoint indicates has been processed. + /// - If `RocksDB` is ahead, excess entries are pruned (healed). + /// - If `RocksDB` is behind, an unwind is required. + /// + /// For `StoragesHistory`: + /// - The maximum block number in shards should not exceed the `IndexStorageHistory` stage + /// checkpoint. + /// - Similar healing/unwind logic applies. + /// + /// # Requirements + /// + /// For pruning `TransactionHashNumbers`, the provider must be able to supply transaction + /// data (typically from static files) so that transaction hashes can be computed. This + /// implies that static files should be ahead of or in sync with `RocksDB`. + pub fn check_consistency( + &self, + provider: &Provider, + ) -> ProviderResult> + where + Provider: DBProvider + + StageCheckpointReader + + StorageSettingsCache + + StaticFileProviderFactory + + TransactionsProvider, + { + let mut unwind_target: Option = None; + + // Check TransactionHashNumbers if stored in RocksDB + if provider.cached_storage_settings().transaction_hash_numbers_in_rocksdb && + let Some(target) = self.check_transaction_hash_numbers(provider)? + { + unwind_target = Some(unwind_target.map_or(target, |t| t.min(target))); + } + + // Check StoragesHistory if stored in RocksDB + if provider.cached_storage_settings().storages_history_in_rocksdb && + let Some(target) = self.check_storages_history(provider)? + { + unwind_target = Some(unwind_target.map_or(target, |t| t.min(target))); + } + + Ok(unwind_target) + } + + /// Checks invariants for the `TransactionHashNumbers` table. + /// + /// Returns a block number to unwind to if MDBX is behind the checkpoint. + /// If static files are ahead of MDBX, excess `RocksDB` entries are pruned (healed). + /// + /// # Approach + /// + /// Instead of iterating `RocksDB` entries (which is expensive and doesn't give us the + /// tx range we need), we use static files and MDBX to determine what needs pruning: + /// - Static files are committed before `RocksDB`, so they're at least at the same height + /// - MDBX `TransactionBlocks` tells us what's been fully committed + /// - If static files have more transactions than MDBX, prune the excess range + fn check_transaction_hash_numbers( + &self, + provider: &Provider, + ) -> ProviderResult> + where + Provider: DBProvider + + StageCheckpointReader + + StaticFileProviderFactory + + TransactionsProvider, + { + // Get the TransactionLookup stage checkpoint + let checkpoint = provider + .get_stage_checkpoint(StageId::TransactionLookup)? + .map(|cp| cp.block_number) + .unwrap_or(0); + + // Get last tx_num from MDBX - this tells us what MDBX has fully committed + let mut cursor = provider.tx_ref().cursor_read::()?; + let mdbx_last = cursor.last()?; + + // Get highest tx_num from static files - this tells us what tx data is available + let highest_static_tx = provider + .static_file_provider() + .get_highest_static_file_tx(StaticFileSegment::Transactions); + + match (mdbx_last, highest_static_tx) { + (Some((mdbx_tx, mdbx_block)), Some(highest_tx)) if highest_tx > mdbx_tx => { + // Static files are ahead of MDBX - prune RocksDB entries for the excess range. + // This is the common case during recovery from a crash during unwinding. + tracing::info!( + target: "reth::providers::rocksdb", + mdbx_last_tx = mdbx_tx, + mdbx_block, + highest_static_tx = highest_tx, + "Static files ahead of MDBX, pruning TransactionHashNumbers excess data" + ); + self.prune_transaction_hash_numbers_in_range(provider, (mdbx_tx + 1)..=highest_tx)?; + + // After pruning, check if MDBX is behind checkpoint + if checkpoint > mdbx_block { + tracing::warn!( + target: "reth::providers::rocksdb", + mdbx_block, + checkpoint, + "MDBX behind checkpoint after pruning, unwind needed" + ); + return Ok(Some(mdbx_block)); + } + } + (Some((_mdbx_tx, mdbx_block)), _) => { + // MDBX and static files are in sync (or static files don't have more data). + // Check if MDBX is behind checkpoint. + if checkpoint > mdbx_block { + tracing::warn!( + target: "reth::providers::rocksdb", + mdbx_block, + checkpoint, + "MDBX behind checkpoint, unwind needed" + ); + return Ok(Some(mdbx_block)); + } + } + (None, Some(highest_tx)) => { + // MDBX has no transactions but static files have data. + // This means RocksDB might have stale entries - prune them all. + tracing::info!( + target: "reth::providers::rocksdb", + highest_static_tx = highest_tx, + "MDBX empty but static files have data, pruning all TransactionHashNumbers" + ); + self.prune_transaction_hash_numbers_in_range(provider, 0..=highest_tx)?; + } + (None, None) => { + // Both MDBX and static files are empty. + // If checkpoint says we should have data, that's an inconsistency. + if checkpoint > 0 { + tracing::warn!( + target: "reth::providers::rocksdb", + checkpoint, + "Checkpoint set but no transaction data exists, unwind needed" + ); + return Ok(Some(0)); + } + } + } + + Ok(None) + } + + /// Prunes `TransactionHashNumbers` entries for transactions in the given range. + /// + /// This fetches transactions from the provider, computes their hashes in parallel, + /// and deletes the corresponding entries from `RocksDB` by key. This approach is more + /// scalable than iterating all rows because it only processes the transactions that + /// need to be pruned. + /// + /// # Requirements + /// + /// The provider must be able to supply transaction data (typically from static files) + /// so that transaction hashes can be computed. This implies that static files should + /// be ahead of or in sync with `RocksDB`. + fn prune_transaction_hash_numbers_in_range( + &self, + provider: &Provider, + tx_range: std::ops::RangeInclusive, + ) -> ProviderResult<()> + where + Provider: TransactionsProvider, + { + if tx_range.is_empty() { + return Ok(()); + } + + // Fetch transactions in the range and compute their hashes in parallel + let hashes: Vec<_> = provider + .transactions_by_tx_range(tx_range.clone())? + .into_par_iter() + .map(|tx| tx.trie_hash()) + .collect(); + + if !hashes.is_empty() { + tracing::info!( + target: "reth::providers::rocksdb", + deleted_count = hashes.len(), + tx_range_start = *tx_range.start(), + tx_range_end = *tx_range.end(), + "Pruning TransactionHashNumbers entries by tx range" + ); + + let mut batch = self.batch(); + for hash in hashes { + batch.delete::(hash)?; + } + batch.commit()?; + } + + Ok(()) + } + + /// Checks invariants for the `StoragesHistory` table. + /// + /// Returns a block number to unwind to if `RocksDB` is behind the checkpoint. + /// If `RocksDB` is ahead of the checkpoint, excess entries are pruned (healed). + fn check_storages_history( + &self, + provider: &Provider, + ) -> ProviderResult> + where + Provider: DBProvider + StageCheckpointReader, + { + // Get the IndexStorageHistory stage checkpoint + let checkpoint = provider + .get_stage_checkpoint(StageId::IndexStorageHistory)? + .map(|cp| cp.block_number) + .unwrap_or(0); + + // Check if RocksDB has any data + let rocks_first = self.first::()?; + + match rocks_first { + Some(_) => { + // If checkpoint is 0 but we have data, clear everything + if checkpoint == 0 { + tracing::info!( + target: "reth::providers::rocksdb", + "StoragesHistory has data but checkpoint is 0, clearing all" + ); + self.prune_storages_history_above(0)?; + return Ok(None); + } + + // Find the max highest_block_number (excluding u64::MAX sentinel) across all + // entries + let mut max_highest_block = 0u64; + for result in self.iter::()? { + let (key, _) = result?; + let highest = key.sharded_key.highest_block_number; + if highest != u64::MAX && highest > max_highest_block { + max_highest_block = highest; + } + } + + // If any entry has highest_block > checkpoint, prune excess + if max_highest_block > checkpoint { + tracing::info!( + target: "reth::providers::rocksdb", + rocks_highest = max_highest_block, + checkpoint, + "StoragesHistory ahead of checkpoint, pruning excess data" + ); + self.prune_storages_history_above(checkpoint)?; + } + + Ok(None) + } + None => { + // Empty RocksDB table + if checkpoint > 0 { + // Stage says we should have data but we don't + return Ok(Some(0)); + } + Ok(None) + } + } + } + + /// Prunes `StoragesHistory` entries where `highest_block_number` > `max_block`. + /// + /// For `StoragesHistory`, the key contains `highest_block_number`, so we can iterate + /// and delete entries where `key.sharded_key.highest_block_number > max_block`. + /// + /// TODO(): this iterates the whole table, + /// which is inefficient. Use changeset-based pruning instead. + fn prune_storages_history_above(&self, max_block: BlockNumber) -> ProviderResult<()> { + use reth_db_api::models::storage_sharded_key::StorageShardedKey; + + let mut to_delete: Vec = Vec::new(); + for result in self.iter::()? { + let (key, _) = result?; + let highest_block = key.sharded_key.highest_block_number; + if max_block == 0 || (highest_block != u64::MAX && highest_block > max_block) { + to_delete.push(key); + } + } + + let deleted = to_delete.len(); + if deleted > 0 { + tracing::info!( + target: "reth::providers::rocksdb", + deleted_count = deleted, + max_block, + "Pruning StoragesHistory entries" + ); + + let mut batch = self.batch(); + for key in to_delete { + batch.delete::(key)?; + } + batch.commit()?; + } + + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{ + providers::rocksdb::RocksDBBuilder, test_utils::create_test_provider_factory, BlockWriter, + DatabaseProviderFactory, StageCheckpointWriter, TransactionsProvider, + }; + use alloy_primitives::{Address, B256}; + use reth_db::cursor::DbCursorRW; + use reth_db_api::{ + models::{storage_sharded_key::StorageShardedKey, StorageSettings}, + tables::{self, BlockNumberList}, + transaction::DbTxMut, + }; + use reth_stages_types::StageCheckpoint; + use reth_testing_utils::generators::{self, BlockRangeParams}; + use tempfile::TempDir; + + #[test] + fn test_first_last_empty_rocksdb() { + let temp_dir = TempDir::new().unwrap(); + let provider = RocksDBBuilder::new(temp_dir.path()) + .with_table::() + .with_table::() + .build() + .unwrap(); + + // Empty RocksDB, no checkpoints - should be consistent + let first = provider.first::().unwrap(); + let last = provider.last::().unwrap(); + + assert!(first.is_none()); + assert!(last.is_none()); + } + + #[test] + fn test_first_last_with_data() { + let temp_dir = TempDir::new().unwrap(); + let provider = RocksDBBuilder::new(temp_dir.path()) + .with_table::() + .build() + .unwrap(); + + // Insert some data + let tx_hash = B256::from([1u8; 32]); + provider.put::(tx_hash, &100).unwrap(); + + // RocksDB has data + let last = provider.last::().unwrap(); + assert!(last.is_some()); + assert_eq!(last.unwrap().1, 100); + } + + #[test] + fn test_check_consistency_empty_rocksdb_no_checkpoint_is_ok() { + let temp_dir = TempDir::new().unwrap(); + let rocksdb = RocksDBBuilder::new(temp_dir.path()) + .with_table::() + .with_table::() + .build() + .unwrap(); + + // Create a test provider factory for MDBX + let factory = create_test_provider_factory(); + factory.set_storage_settings_cache( + StorageSettings::legacy() + .with_transaction_hash_numbers_in_rocksdb(true) + .with_storages_history_in_rocksdb(true), + ); + + let provider = factory.database_provider_ro().unwrap(); + + // Empty RocksDB and no checkpoints - should be consistent (None = no unwind needed) + let result = rocksdb.check_consistency(&provider).unwrap(); + assert_eq!(result, None); + } + + #[test] + fn test_check_consistency_empty_rocksdb_with_checkpoint_needs_unwind() { + let temp_dir = TempDir::new().unwrap(); + let rocksdb = RocksDBBuilder::new(temp_dir.path()) + .with_table::() + .build() + .unwrap(); + + // Create a test provider factory for MDBX + let factory = create_test_provider_factory(); + factory.set_storage_settings_cache( + StorageSettings::legacy().with_transaction_hash_numbers_in_rocksdb(true), + ); + + // Set a checkpoint indicating we should have processed up to block 100 + { + let provider = factory.database_provider_rw().unwrap(); + provider + .save_stage_checkpoint(StageId::TransactionLookup, StageCheckpoint::new(100)) + .unwrap(); + provider.commit().unwrap(); + } + + let provider = factory.database_provider_ro().unwrap(); + + // RocksDB is empty but checkpoint says block 100 was processed + // This means RocksDB is missing data and we need to unwind to rebuild + let result = rocksdb.check_consistency(&provider).unwrap(); + assert_eq!(result, Some(0), "Should require unwind to block 0 to rebuild RocksDB"); + } + + #[test] + fn test_check_consistency_mdbx_empty_static_files_have_data_prunes_rocksdb() { + let temp_dir = TempDir::new().unwrap(); + let rocksdb = RocksDBBuilder::new(temp_dir.path()) + .with_table::() + .build() + .unwrap(); + + let factory = create_test_provider_factory(); + factory.set_storage_settings_cache( + StorageSettings::legacy().with_transaction_hash_numbers_in_rocksdb(true), + ); + + // Generate blocks with real transactions and insert them + let mut rng = generators::rng(); + let blocks = generators::random_block_range( + &mut rng, + 0..=2, + BlockRangeParams { parent: Some(B256::ZERO), tx_count: 2..3, ..Default::default() }, + ); + + let mut tx_hashes = Vec::new(); + { + let provider = factory.database_provider_rw().unwrap(); + let mut tx_count = 0u64; + for block in &blocks { + provider + .insert_block(&block.clone().try_recover().expect("recover block")) + .unwrap(); + for tx in &block.body().transactions { + let hash = tx.trie_hash(); + tx_hashes.push(hash); + rocksdb.put::(hash, &tx_count).unwrap(); + tx_count += 1; + } + } + provider.commit().unwrap(); + } + + // Simulate crash recovery: MDBX was reset but static files and RocksDB still have data. + // Clear TransactionBlocks to simulate empty MDBX state. + { + let provider = factory.database_provider_rw().unwrap(); + let mut cursor = provider.tx_ref().cursor_write::().unwrap(); + let mut to_delete = Vec::new(); + let mut walker = cursor.walk(Some(0)).unwrap(); + while let Some((tx_num, _)) = walker.next().transpose().unwrap() { + to_delete.push(tx_num); + } + drop(walker); + for tx_num in to_delete { + cursor.seek_exact(tx_num).unwrap(); + cursor.delete_current().unwrap(); + } + // No checkpoint set (checkpoint = 0) + provider.commit().unwrap(); + } + + // Verify RocksDB data exists + assert!(rocksdb.last::().unwrap().is_some()); + + let provider = factory.database_provider_ro().unwrap(); + + // MDBX TransactionBlocks is empty, but static files have transaction data. + // This means RocksDB has stale data that should be pruned (healed). + let result = rocksdb.check_consistency(&provider).unwrap(); + assert_eq!(result, None, "Should heal by pruning, no unwind needed"); + + // Verify data was pruned + for hash in &tx_hashes { + assert!( + rocksdb.get::(*hash).unwrap().is_none(), + "RocksDB should be empty after pruning" + ); + } + } + + #[test] + fn test_check_consistency_storages_history_empty_with_checkpoint_needs_unwind() { + let temp_dir = TempDir::new().unwrap(); + let rocksdb = RocksDBBuilder::new(temp_dir.path()) + .with_table::() + .build() + .unwrap(); + + // Create a test provider factory for MDBX + let factory = create_test_provider_factory(); + factory.set_storage_settings_cache( + StorageSettings::legacy().with_storages_history_in_rocksdb(true), + ); + + // Set a checkpoint indicating we should have processed up to block 100 + { + let provider = factory.database_provider_rw().unwrap(); + provider + .save_stage_checkpoint(StageId::IndexStorageHistory, StageCheckpoint::new(100)) + .unwrap(); + provider.commit().unwrap(); + } + + let provider = factory.database_provider_ro().unwrap(); + + // RocksDB is empty but checkpoint says block 100 was processed + let result = rocksdb.check_consistency(&provider).unwrap(); + assert_eq!(result, Some(0), "Should require unwind to block 0 to rebuild StoragesHistory"); + } + + #[test] + fn test_check_consistency_storages_history_has_data_no_checkpoint_prunes_data() { + let temp_dir = TempDir::new().unwrap(); + let rocksdb = RocksDBBuilder::new(temp_dir.path()) + .with_table::() + .build() + .unwrap(); + + // Insert data into RocksDB + let key = StorageShardedKey::new(Address::ZERO, B256::ZERO, 50); + let block_list = BlockNumberList::new_pre_sorted([10, 20, 30, 50]); + rocksdb.put::(key, &block_list).unwrap(); + + // Verify data exists + assert!(rocksdb.last::().unwrap().is_some()); + + // Create a test provider factory for MDBX with NO checkpoint + let factory = create_test_provider_factory(); + factory.set_storage_settings_cache( + StorageSettings::legacy().with_storages_history_in_rocksdb(true), + ); + + let provider = factory.database_provider_ro().unwrap(); + + // RocksDB has data but checkpoint is 0 + // This means RocksDB has stale data that should be pruned (healed) + let result = rocksdb.check_consistency(&provider).unwrap(); + assert_eq!(result, None, "Should heal by pruning, no unwind needed"); + + // Verify data was pruned + assert!( + rocksdb.last::().unwrap().is_none(), + "RocksDB should be empty after pruning" + ); + } + + #[test] + fn test_check_consistency_mdbx_behind_checkpoint_needs_unwind() { + let temp_dir = TempDir::new().unwrap(); + let rocksdb = RocksDBBuilder::new(temp_dir.path()) + .with_table::() + .build() + .unwrap(); + + let factory = create_test_provider_factory(); + factory.set_storage_settings_cache( + StorageSettings::legacy().with_transaction_hash_numbers_in_rocksdb(true), + ); + + // Generate blocks with real transactions (blocks 0-2, 6 transactions total) + let mut rng = generators::rng(); + let blocks = generators::random_block_range( + &mut rng, + 0..=2, + BlockRangeParams { parent: Some(B256::ZERO), tx_count: 2..3, ..Default::default() }, + ); + + { + let provider = factory.database_provider_rw().unwrap(); + let mut tx_count = 0u64; + for block in &blocks { + provider + .insert_block(&block.clone().try_recover().expect("recover block")) + .unwrap(); + for tx in &block.body().transactions { + let hash = tx.trie_hash(); + rocksdb.put::(hash, &tx_count).unwrap(); + tx_count += 1; + } + } + provider.commit().unwrap(); + } + + // Now simulate a scenario where checkpoint is ahead of MDBX. + // This happens when the checkpoint was saved but MDBX data was lost/corrupted. + // Set checkpoint to block 10 (beyond our actual data at block 2) + { + let provider = factory.database_provider_rw().unwrap(); + provider + .save_stage_checkpoint(StageId::TransactionLookup, StageCheckpoint::new(10)) + .unwrap(); + provider.commit().unwrap(); + } + + let provider = factory.database_provider_ro().unwrap(); + + // MDBX has data up to block 2, but checkpoint says block 10 was processed. + // The static files highest tx matches MDBX last tx (both at block 2). + // Checkpoint > mdbx_block means we need to unwind to rebuild. + let result = rocksdb.check_consistency(&provider).unwrap(); + assert_eq!( + result, + Some(2), + "Should require unwind to block 2 (MDBX's last block) to rebuild from checkpoint" + ); + } + + #[test] + fn test_check_consistency_rocksdb_ahead_of_checkpoint_prunes_excess() { + let temp_dir = TempDir::new().unwrap(); + let rocksdb = RocksDBBuilder::new(temp_dir.path()) + .with_table::() + .build() + .unwrap(); + + // Create a test provider factory for MDBX + let factory = create_test_provider_factory(); + factory.set_storage_settings_cache( + StorageSettings::legacy().with_transaction_hash_numbers_in_rocksdb(true), + ); + + // Generate blocks with real transactions: + // Blocks 0-5, each with 2 transactions = 12 total transactions (0-11) + let mut rng = generators::rng(); + let blocks = generators::random_block_range( + &mut rng, + 0..=5, + BlockRangeParams { parent: Some(B256::ZERO), tx_count: 2..3, ..Default::default() }, + ); + + // Track which hashes belong to which blocks + let mut tx_hashes = Vec::new(); + let mut tx_count = 0u64; + { + let provider = factory.database_provider_rw().unwrap(); + // Insert ALL blocks (0-5) to write transactions to static files + for block in &blocks { + provider + .insert_block(&block.clone().try_recover().expect("recover block")) + .unwrap(); + for tx in &block.body().transactions { + let hash = tx.trie_hash(); + tx_hashes.push(hash); + rocksdb.put::(hash, &tx_count).unwrap(); + tx_count += 1; + } + } + provider.commit().unwrap(); + } + + // Simulate crash recovery scenario: + // MDBX was unwound to block 2, but RocksDB and static files still have more data. + // Remove TransactionBlocks entries for blocks 3-5 to simulate MDBX unwind. + { + let provider = factory.database_provider_rw().unwrap(); + // Delete TransactionBlocks entries for tx > 5 (i.e., for blocks 3-5) + // TransactionBlocks maps last_tx_in_block -> block_number + // After unwind, only entries for blocks 0-2 should remain (tx 5 -> block 2) + let mut cursor = provider.tx_ref().cursor_write::().unwrap(); + // Walk and delete entries where block > 2 + let mut to_delete = Vec::new(); + let mut walker = cursor.walk(Some(0)).unwrap(); + while let Some((tx_num, block_num)) = walker.next().transpose().unwrap() { + if block_num > 2 { + to_delete.push(tx_num); + } + } + drop(walker); + for tx_num in to_delete { + cursor.seek_exact(tx_num).unwrap(); + cursor.delete_current().unwrap(); + } + + // Set checkpoint to block 2 + provider + .save_stage_checkpoint(StageId::TransactionLookup, StageCheckpoint::new(2)) + .unwrap(); + provider.commit().unwrap(); + } + + let provider = factory.database_provider_ro().unwrap(); + + // RocksDB has tx hashes for all blocks (0-5) + // MDBX TransactionBlocks only goes up to tx 5 (block 2) + // Static files have data for all txs (0-11) + // This means RocksDB is ahead and should prune entries for tx 6-11 + let result = rocksdb.check_consistency(&provider).unwrap(); + assert_eq!(result, None, "Should heal by pruning, no unwind needed"); + + // Verify: hashes for blocks 0-2 (tx 0-5) should remain, blocks 3-5 (tx 6-11) should be + // pruned First 6 hashes should remain + for (i, hash) in tx_hashes.iter().take(6).enumerate() { + assert!( + rocksdb.get::(*hash).unwrap().is_some(), + "tx {} should remain", + i + ); + } + // Last 6 hashes should be pruned + for (i, hash) in tx_hashes.iter().skip(6).enumerate() { + assert!( + rocksdb.get::(*hash).unwrap().is_none(), + "tx {} should be pruned", + i + 6 + ); + } + } + + #[test] + fn test_check_consistency_storages_history_ahead_of_checkpoint_prunes_excess() { + let temp_dir = TempDir::new().unwrap(); + let rocksdb = RocksDBBuilder::new(temp_dir.path()) + .with_table::() + .build() + .unwrap(); + + // Insert data into RocksDB with different highest_block_numbers + let key_block_50 = StorageShardedKey::new(Address::ZERO, B256::ZERO, 50); + let key_block_100 = StorageShardedKey::new(Address::ZERO, B256::from([1u8; 32]), 100); + let key_block_150 = StorageShardedKey::new(Address::ZERO, B256::from([2u8; 32]), 150); + let key_block_max = StorageShardedKey::new(Address::ZERO, B256::from([3u8; 32]), u64::MAX); + + let block_list = BlockNumberList::new_pre_sorted([10, 20, 30]); + rocksdb.put::(key_block_50.clone(), &block_list).unwrap(); + rocksdb.put::(key_block_100.clone(), &block_list).unwrap(); + rocksdb.put::(key_block_150.clone(), &block_list).unwrap(); + rocksdb.put::(key_block_max.clone(), &block_list).unwrap(); + + // Create a test provider factory for MDBX + let factory = create_test_provider_factory(); + factory.set_storage_settings_cache( + StorageSettings::legacy().with_storages_history_in_rocksdb(true), + ); + + // Set checkpoint to block 100 + { + let provider = factory.database_provider_rw().unwrap(); + provider + .save_stage_checkpoint(StageId::IndexStorageHistory, StageCheckpoint::new(100)) + .unwrap(); + provider.commit().unwrap(); + } + + let provider = factory.database_provider_ro().unwrap(); + + // RocksDB has entries with highest_block = 150 which exceeds checkpoint (100) + // Should prune entries where highest_block > 100 (but not u64::MAX sentinel) + let result = rocksdb.check_consistency(&provider).unwrap(); + assert_eq!(result, None, "Should heal by pruning, no unwind needed"); + + // Verify key_block_150 was pruned, but others remain + assert!( + rocksdb.get::(key_block_50).unwrap().is_some(), + "Entry with highest_block=50 should remain" + ); + assert!( + rocksdb.get::(key_block_100).unwrap().is_some(), + "Entry with highest_block=100 should remain" + ); + assert!( + rocksdb.get::(key_block_150).unwrap().is_none(), + "Entry with highest_block=150 should be pruned" + ); + assert!( + rocksdb.get::(key_block_max).unwrap().is_some(), + "Entry with highest_block=u64::MAX (sentinel) should remain" + ); + } + + /// Test that pruning works by fetching transactions and computing their hashes, + /// rather than iterating all rows. This test uses random blocks with unique + /// transactions so we can verify the correct entries are pruned. + #[test] + fn test_prune_transaction_hash_numbers_by_range() { + let temp_dir = TempDir::new().unwrap(); + let rocksdb = RocksDBBuilder::new(temp_dir.path()) + .with_table::() + .build() + .unwrap(); + + // Create a test provider factory for MDBX + let factory = create_test_provider_factory(); + factory.set_storage_settings_cache( + StorageSettings::legacy().with_transaction_hash_numbers_in_rocksdb(true), + ); + + // Generate random blocks with unique transactions + // Block 0 (genesis) has no transactions + // Blocks 1-5 each have 2 transactions = 10 transactions total + let mut rng = generators::rng(); + let blocks = generators::random_block_range( + &mut rng, + 0..=5, + BlockRangeParams { parent: Some(B256::ZERO), tx_count: 2..3, ..Default::default() }, + ); + + // Insert blocks into the database + let mut tx_count = 0u64; + let mut tx_hashes = Vec::new(); + { + let provider = factory.database_provider_rw().unwrap(); + + for block in &blocks { + provider + .insert_block(&block.clone().try_recover().expect("recover block")) + .unwrap(); + + // Store transaction hash -> tx_number mappings in RocksDB + for tx in &block.body().transactions { + let hash = tx.trie_hash(); + tx_hashes.push(hash); + rocksdb.put::(hash, &tx_count).unwrap(); + tx_count += 1; + } + } + + // Set checkpoint to block 2 (meaning we should only have tx hashes for blocks 0-2) + // Blocks 0, 1, 2 have 6 transactions (2 each), so tx 0-5 should remain + provider + .save_stage_checkpoint(StageId::TransactionLookup, StageCheckpoint::new(2)) + .unwrap(); + provider.commit().unwrap(); + } + + // At this point: + // - RocksDB has tx hashes for blocks 0-5 (10 total: 2 per block) + // - Checkpoint says we only processed up to block 2 + // - We need to prune tx hashes for blocks 3, 4, 5 (tx 6-9) + + // Verify RocksDB has the expected number of entries before pruning + let rocksdb_count_before: usize = + rocksdb.iter::().unwrap().count(); + assert_eq!( + rocksdb_count_before, tx_count as usize, + "RocksDB should have all {} transaction hashes before pruning", + tx_count + ); + + let provider = factory.database_provider_ro().unwrap(); + + // Verify we can fetch transactions by tx range + let all_txs = provider.transactions_by_tx_range(0..tx_count).unwrap(); + assert_eq!(all_txs.len(), tx_count as usize, "Should be able to fetch all transactions"); + + // Verify the hashes match between what we stored and what we compute from fetched txs + for (i, tx) in all_txs.iter().enumerate() { + let computed_hash = tx.trie_hash(); + assert_eq!( + computed_hash, tx_hashes[i], + "Hash mismatch for tx {}: stored {:?} vs computed {:?}", + i, tx_hashes[i], computed_hash + ); + } + + // Blocks 0, 1, 2 have 2 tx each = 6 tx total (indices 0-5) + // We want to keep tx 0-5, prune tx 6-9 + let max_tx_to_keep = 5u64; + let tx_to_prune_start = max_tx_to_keep + 1; + + // Prune transactions 6-9 (blocks 3-5) + rocksdb + .prune_transaction_hash_numbers_in_range(&provider, tx_to_prune_start..=(tx_count - 1)) + .expect("prune should succeed"); + + // Verify: transactions 0-5 should remain, 6-9 should be pruned + let mut remaining_count = 0; + for result in rocksdb.iter::().unwrap() { + let (_hash, tx_num) = result.unwrap(); + assert!( + tx_num <= max_tx_to_keep, + "Transaction {} should have been pruned (> {})", + tx_num, + max_tx_to_keep + ); + remaining_count += 1; + } + assert_eq!( + remaining_count, + (max_tx_to_keep + 1) as usize, + "Should have {} transactions (0-{})", + max_tx_to_keep + 1, + max_tx_to_keep + ); + } +} diff --git a/crates/storage/provider/src/providers/rocksdb/metrics.rs b/crates/storage/provider/src/providers/rocksdb/metrics.rs new file mode 100644 index 0000000000..890d9faac2 --- /dev/null +++ b/crates/storage/provider/src/providers/rocksdb/metrics.rs @@ -0,0 +1,91 @@ +use std::{collections::HashMap, time::Duration}; + +use itertools::Itertools; +use metrics::{Counter, Histogram}; +use reth_db::Tables; +use reth_metrics::Metrics; +use strum::{EnumIter, IntoEnumIterator}; + +const ROCKSDB_TABLES: &[&str] = &[Tables::TransactionHashNumbers.name()]; + +/// Metrics for the `RocksDB` provider. +#[derive(Debug)] +pub(crate) struct RocksDBMetrics { + operations: HashMap<(&'static str, RocksDBOperation), RocksDBOperationMetrics>, +} + +impl Default for RocksDBMetrics { + fn default() -> Self { + let mut operations = ROCKSDB_TABLES + .iter() + .copied() + .cartesian_product(RocksDBOperation::iter()) + .map(|(table, operation)| { + ( + (table, operation), + RocksDBOperationMetrics::new_with_labels(&[ + ("table", table), + ("operation", operation.as_str()), + ]), + ) + }) + .collect::>(); + + // Add special "Batch" entry for batch write operations + operations.insert( + ("Batch", RocksDBOperation::BatchWrite), + RocksDBOperationMetrics::new_with_labels(&[ + ("table", "Batch"), + ("operation", RocksDBOperation::BatchWrite.as_str()), + ]), + ); + + Self { operations } + } +} + +impl RocksDBMetrics { + /// Records operation metrics with the given operation label and table name. + pub(crate) fn record_operation( + &self, + operation: RocksDBOperation, + table: &'static str, + duration: Duration, + ) { + let metrics = + self.operations.get(&(table, operation)).expect("operation metrics should exist"); + + metrics.calls_total.increment(1); + metrics.duration_seconds.record(duration.as_secs_f64()); + } +} + +/// `RocksDB` operations that are tracked +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EnumIter)] +pub(crate) enum RocksDBOperation { + Get, + Put, + Delete, + BatchWrite, +} + +impl RocksDBOperation { + const fn as_str(&self) -> &'static str { + match self { + Self::Get => "get", + Self::Put => "put", + Self::Delete => "delete", + Self::BatchWrite => "batch-write", + } + } +} + +/// Metrics for a specific `RocksDB` operation on a table +#[derive(Metrics, Clone)] +#[metrics(scope = "rocksdb.provider")] +pub(crate) struct RocksDBOperationMetrics { + /// Total number of calls + calls_total: Counter, + /// Duration of operations + duration_seconds: Histogram, +} diff --git a/crates/storage/provider/src/providers/rocksdb/mod.rs b/crates/storage/provider/src/providers/rocksdb/mod.rs new file mode 100644 index 0000000000..5c6cf11f32 --- /dev/null +++ b/crates/storage/provider/src/providers/rocksdb/mod.rs @@ -0,0 +1,7 @@ +//! [`RocksDBProvider`] implementation + +mod invariants; +mod metrics; +mod provider; + +pub use provider::{RocksDBBatch, RocksDBBuilder, RocksDBProvider, RocksTx}; diff --git a/crates/storage/provider/src/providers/rocksdb/provider.rs b/crates/storage/provider/src/providers/rocksdb/provider.rs new file mode 100644 index 0000000000..5039e86d3f --- /dev/null +++ b/crates/storage/provider/src/providers/rocksdb/provider.rs @@ -0,0 +1,1097 @@ +use super::metrics::{RocksDBMetrics, RocksDBOperation}; +use reth_db_api::{ + table::{Compress, Decompress, Encode, Table}, + tables, DatabaseError, +}; +use reth_storage_errors::{ + db::{DatabaseErrorInfo, DatabaseWriteError, DatabaseWriteOperation, LogLevel}, + provider::{ProviderError, ProviderResult}, +}; +use rocksdb::{ + BlockBasedOptions, Cache, ColumnFamilyDescriptor, CompactionPri, DBCompressionType, + IteratorMode, Options, Transaction, TransactionDB, TransactionDBOptions, TransactionOptions, + WriteBatchWithTransaction, WriteOptions, +}; +use std::{ + fmt, + path::{Path, PathBuf}, + sync::Arc, + time::Instant, +}; + +/// Default cache size for `RocksDB` block cache (128 MB). +const DEFAULT_CACHE_SIZE: usize = 128 << 20; + +/// Default block size for `RocksDB` tables (16 KB). +const DEFAULT_BLOCK_SIZE: usize = 16 * 1024; + +/// Default max background jobs for `RocksDB` compaction and flushing. +const DEFAULT_MAX_BACKGROUND_JOBS: i32 = 6; + +/// Default bytes per sync for `RocksDB` WAL writes (1 MB). +const DEFAULT_BYTES_PER_SYNC: u64 = 1_048_576; + +/// Default bloom filter bits per key (~1% false positive rate). +const DEFAULT_BLOOM_FILTER_BITS: f64 = 10.0; + +/// Default buffer capacity for compression in batches. +/// 4 KiB matches common block/page sizes and comfortably holds typical history values, +/// reducing the first few reallocations without over-allocating. +const DEFAULT_COMPRESS_BUF_CAPACITY: usize = 4096; + +/// Builder for [`RocksDBProvider`]. +pub struct RocksDBBuilder { + path: PathBuf, + column_families: Vec, + enable_metrics: bool, + enable_statistics: bool, + log_level: rocksdb::LogLevel, + block_cache: Cache, +} + +impl fmt::Debug for RocksDBBuilder { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("RocksDBBuilder") + .field("path", &self.path) + .field("column_families", &self.column_families) + .field("enable_metrics", &self.enable_metrics) + .finish() + } +} + +impl RocksDBBuilder { + /// Creates a new builder with optimized default options. + pub fn new(path: impl AsRef) -> Self { + let cache = Cache::new_lru_cache(DEFAULT_CACHE_SIZE); + Self { + path: path.as_ref().to_path_buf(), + column_families: Vec::new(), + enable_metrics: false, + enable_statistics: false, + log_level: rocksdb::LogLevel::Info, + block_cache: cache, + } + } + + /// Creates default table options with shared block cache. + fn default_table_options(cache: &Cache) -> BlockBasedOptions { + let mut table_options = BlockBasedOptions::default(); + table_options.set_block_size(DEFAULT_BLOCK_SIZE); + table_options.set_cache_index_and_filter_blocks(true); + table_options.set_pin_l0_filter_and_index_blocks_in_cache(true); + // Shared block cache for all column families. + table_options.set_block_cache(cache); + // Bloom filter: 10 bits/key = ~1% false positive rate, full filter for better read + // performance. this setting is good trade off a little bit of memory for better + // point lookup performance. see https://github.com/facebook/rocksdb/wiki/RocksDB-Bloom-Filter#configuration-basics + table_options.set_bloom_filter(DEFAULT_BLOOM_FILTER_BITS, false); + table_options.set_optimize_filters_for_memory(true); + table_options + } + + /// Creates optimized `RocksDB` options per `RocksDB` wiki recommendations. + fn default_options( + log_level: rocksdb::LogLevel, + cache: &Cache, + enable_statistics: bool, + ) -> Options { + // Follow recommend tuning guide from RocksDB wiki, see https://github.com/facebook/rocksdb/wiki/Setup-Options-and-Basic-Tuning + let table_options = Self::default_table_options(cache); + + let mut options = Options::default(); + options.set_block_based_table_factory(&table_options); + options.create_if_missing(true); + options.create_missing_column_families(true); + options.set_max_background_jobs(DEFAULT_MAX_BACKGROUND_JOBS); + options.set_bytes_per_sync(DEFAULT_BYTES_PER_SYNC); + + options.set_bottommost_compression_type(DBCompressionType::Zstd); + options.set_bottommost_zstd_max_train_bytes(0, true); + options.set_compression_type(DBCompressionType::Lz4); + options.set_compaction_pri(CompactionPri::MinOverlappingRatio); + + options.set_log_level(log_level); + + // Statistics can view from RocksDB log file + if enable_statistics { + options.enable_statistics(); + } + + options + } + + /// Creates optimized column family options. + fn default_column_family_options(cache: &Cache) -> Options { + // Follow recommend tuning guide from RocksDB wiki, see https://github.com/facebook/rocksdb/wiki/Setup-Options-and-Basic-Tuning + let table_options = Self::default_table_options(cache); + + let mut cf_options = Options::default(); + cf_options.set_block_based_table_factory(&table_options); + cf_options.set_level_compaction_dynamic_level_bytes(true); + // Recommend to use Zstd for bottommost compression and Lz4 for other levels, see https://github.com/facebook/rocksdb/wiki/Compression#configuration + cf_options.set_compression_type(DBCompressionType::Lz4); + cf_options.set_bottommost_compression_type(DBCompressionType::Zstd); + // Only use Zstd compression, disable dictionary training + cf_options.set_bottommost_zstd_max_train_bytes(0, true); + + cf_options + } + + /// Adds a column family for a specific table type. + pub fn with_table(mut self) -> Self { + self.column_families.push(T::NAME.to_string()); + self + } + + /// Registers the default tables used by reth for `RocksDB` storage. + /// + /// This registers: + /// - [`tables::TransactionHashNumbers`] - Transaction hash to number mapping + /// - [`tables::AccountsHistory`] - Account history index + /// - [`tables::StoragesHistory`] - Storage history index + pub fn with_default_tables(self) -> Self { + self.with_table::() + .with_table::() + .with_table::() + } + + /// Enables metrics. + pub const fn with_metrics(mut self) -> Self { + self.enable_metrics = true; + self + } + + /// Enables `RocksDB` internal statistics collection. + pub const fn with_statistics(mut self) -> Self { + self.enable_statistics = true; + self + } + + /// Sets the log level from `DatabaseArgs` configuration. + pub const fn with_database_log_level(mut self, log_level: Option) -> Self { + if let Some(level) = log_level { + self.log_level = convert_log_level(level); + } + self + } + + /// Sets a custom block cache size. + pub fn with_block_cache_size(mut self, capacity_bytes: usize) -> Self { + self.block_cache = Cache::new_lru_cache(capacity_bytes); + self + } + + /// Builds the [`RocksDBProvider`]. + pub fn build(self) -> ProviderResult { + let options = + Self::default_options(self.log_level, &self.block_cache, self.enable_statistics); + + let cf_descriptors: Vec = self + .column_families + .iter() + .map(|name| { + ColumnFamilyDescriptor::new( + name.clone(), + Self::default_column_family_options(&self.block_cache), + ) + }) + .collect(); + + // Use TransactionDB for MDBX-like transaction semantics (read-your-writes, rollback) + let txn_db_options = TransactionDBOptions::default(); + let db = TransactionDB::open_cf_descriptors( + &options, + &txn_db_options, + &self.path, + cf_descriptors, + ) + .map_err(|e| { + ProviderError::Database(DatabaseError::Open(DatabaseErrorInfo { + message: e.to_string().into(), + code: -1, + })) + })?; + + let metrics = self.enable_metrics.then(RocksDBMetrics::default); + + Ok(RocksDBProvider(Arc::new(RocksDBProviderInner { db, metrics }))) + } +} + +/// Some types don't support compression (eg. B256), and we don't want to be copying them to the +/// allocated buffer when we can just use their reference. +macro_rules! compress_to_buf_or_ref { + ($buf:expr, $value:expr) => { + if let Some(value) = $value.uncompressable_ref() { + Some(value) + } else { + $buf.clear(); + $value.compress_to_buf(&mut $buf); + None + } + }; +} + +/// `RocksDB` provider for auxiliary storage layer beside main database MDBX. +#[derive(Debug)] +pub struct RocksDBProvider(Arc); + +/// Inner state for `RocksDB` provider. +struct RocksDBProviderInner { + /// `RocksDB` database instance with transaction support. + db: TransactionDB, + /// Metrics latency & operations. + metrics: Option, +} + +impl fmt::Debug for RocksDBProviderInner { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("RocksDBProviderInner") + .field("db", &"") + .field("metrics", &self.metrics) + .finish() + } +} + +impl Clone for RocksDBProvider { + fn clone(&self) -> Self { + Self(self.0.clone()) + } +} + +impl RocksDBProvider { + /// Creates a new `RocksDB` provider. + pub fn new(path: impl AsRef) -> ProviderResult { + RocksDBBuilder::new(path).build() + } + + /// Creates a new `RocksDB` provider builder. + pub fn builder(path: impl AsRef) -> RocksDBBuilder { + RocksDBBuilder::new(path) + } + + /// Creates a new transaction with MDBX-like semantics (read-your-writes, rollback). + pub fn tx(&self) -> RocksTx<'_> { + let write_options = WriteOptions::default(); + let txn_options = TransactionOptions::default(); + let inner = self.0.db.transaction_opt(&write_options, &txn_options); + RocksTx { inner, provider: self } + } + + /// Creates a new batch for atomic writes. + /// + /// Use [`Self::write_batch`] for closure-based atomic writes. + /// Use this method when the batch needs to be held by [`crate::EitherWriter`]. + pub fn batch(&self) -> RocksDBBatch<'_> { + RocksDBBatch { + provider: self, + inner: WriteBatchWithTransaction::::default(), + buf: Vec::with_capacity(DEFAULT_COMPRESS_BUF_CAPACITY), + } + } + + /// Gets the column family handle for a table. + fn get_cf_handle(&self) -> Result<&rocksdb::ColumnFamily, DatabaseError> { + self.0 + .db + .cf_handle(T::NAME) + .ok_or_else(|| DatabaseError::Other(format!("Column family '{}' not found", T::NAME))) + } + + /// Executes a function and records metrics with the given operation and table name. + fn execute_with_operation_metric( + &self, + operation: RocksDBOperation, + table: &'static str, + f: impl FnOnce(&Self) -> T, + ) -> T { + let start = self.0.metrics.as_ref().map(|_| Instant::now()); + let res = f(self); + + if let (Some(start), Some(metrics)) = (start, &self.0.metrics) { + metrics.record_operation(operation, table, start.elapsed()); + } + + res + } + + /// Gets a value from the specified table. + pub fn get(&self, key: T::Key) -> ProviderResult> { + self.get_encoded::(&key.encode()) + } + + /// Gets a value from the specified table using pre-encoded key. + pub fn get_encoded( + &self, + key: &::Encoded, + ) -> ProviderResult> { + self.execute_with_operation_metric(RocksDBOperation::Get, T::NAME, |this| { + let result = + this.0.db.get_cf(this.get_cf_handle::()?, key.as_ref()).map_err(|e| { + ProviderError::Database(DatabaseError::Read(DatabaseErrorInfo { + message: e.to_string().into(), + code: -1, + })) + })?; + + Ok(result.and_then(|value| T::Value::decompress(&value).ok())) + }) + } + + /// Puts upsert a value into the specified table with the given key. + pub fn put(&self, key: T::Key, value: &T::Value) -> ProviderResult<()> { + let encoded_key = key.encode(); + self.put_encoded::(&encoded_key, value) + } + + /// Puts a value into the specified table using pre-encoded key. + pub fn put_encoded( + &self, + key: &::Encoded, + value: &T::Value, + ) -> ProviderResult<()> { + self.execute_with_operation_metric(RocksDBOperation::Put, T::NAME, |this| { + // for simplify the code, we need allocate buf here each time because `RocksDBProvider` + // is thread safe if user want to avoid allocate buf each time, they can use + // write_batch api + let mut buf = Vec::new(); + let value_bytes = compress_to_buf_or_ref!(buf, value).unwrap_or(&buf); + + this.0.db.put_cf(this.get_cf_handle::()?, key, value_bytes).map_err(|e| { + ProviderError::Database(DatabaseError::Write(Box::new(DatabaseWriteError { + info: DatabaseErrorInfo { message: e.to_string().into(), code: -1 }, + operation: DatabaseWriteOperation::PutUpsert, + table_name: T::NAME, + key: key.as_ref().to_vec(), + }))) + }) + }) + } + + /// Deletes a value from the specified table. + pub fn delete(&self, key: T::Key) -> ProviderResult<()> { + self.execute_with_operation_metric(RocksDBOperation::Delete, T::NAME, |this| { + this.0.db.delete_cf(this.get_cf_handle::()?, key.encode().as_ref()).map_err(|e| { + ProviderError::Database(DatabaseError::Delete(DatabaseErrorInfo { + message: e.to_string().into(), + code: -1, + })) + }) + }) + } + + /// Gets the first (smallest key) entry from the specified table. + pub fn first(&self) -> ProviderResult> { + self.execute_with_operation_metric(RocksDBOperation::Get, T::NAME, |this| { + let cf = this.get_cf_handle::()?; + let mut iter = this.0.db.iterator_cf(cf, IteratorMode::Start); + + match iter.next() { + Some(Ok((key_bytes, value_bytes))) => { + let key = ::decode(&key_bytes) + .map_err(|_| ProviderError::Database(DatabaseError::Decode))?; + let value = T::Value::decompress(&value_bytes) + .map_err(|_| ProviderError::Database(DatabaseError::Decode))?; + Ok(Some((key, value))) + } + Some(Err(e)) => { + Err(ProviderError::Database(DatabaseError::Read(DatabaseErrorInfo { + message: e.to_string().into(), + code: -1, + }))) + } + None => Ok(None), + } + }) + } + + /// Gets the last (largest key) entry from the specified table. + pub fn last(&self) -> ProviderResult> { + self.execute_with_operation_metric(RocksDBOperation::Get, T::NAME, |this| { + let cf = this.get_cf_handle::()?; + let mut iter = this.0.db.iterator_cf(cf, IteratorMode::End); + + match iter.next() { + Some(Ok((key_bytes, value_bytes))) => { + let key = ::decode(&key_bytes) + .map_err(|_| ProviderError::Database(DatabaseError::Decode))?; + let value = T::Value::decompress(&value_bytes) + .map_err(|_| ProviderError::Database(DatabaseError::Decode))?; + Ok(Some((key, value))) + } + Some(Err(e)) => { + Err(ProviderError::Database(DatabaseError::Read(DatabaseErrorInfo { + message: e.to_string().into(), + code: -1, + }))) + } + None => Ok(None), + } + }) + } + + /// Creates an iterator over all entries in the specified table. + /// + /// Returns decoded `(Key, Value)` pairs in key order. + pub fn iter(&self) -> ProviderResult> { + let cf = self.get_cf_handle::()?; + let iter = self.0.db.iterator_cf(cf, IteratorMode::Start); + Ok(RocksDBIter { inner: iter, _marker: std::marker::PhantomData }) + } + + /// Writes a batch of operations atomically. + pub fn write_batch(&self, f: F) -> ProviderResult<()> + where + F: FnOnce(&mut RocksDBBatch<'_>) -> ProviderResult<()>, + { + self.execute_with_operation_metric(RocksDBOperation::BatchWrite, "Batch", |this| { + let mut batch_handle = this.batch(); + f(&mut batch_handle)?; + batch_handle.commit() + }) + } + + /// Commits a raw `WriteBatchWithTransaction` to `RocksDB`. + /// + /// This is used when the batch was extracted via [`RocksDBBatch::into_inner`] + /// and needs to be committed at a later point (e.g., at provider commit time). + pub fn commit_batch(&self, batch: WriteBatchWithTransaction) -> ProviderResult<()> { + self.0.db.write_opt(batch, &WriteOptions::default()).map_err(|e| { + ProviderError::Database(DatabaseError::Commit(DatabaseErrorInfo { + message: e.to_string().into(), + code: -1, + })) + }) + } +} + +/// Handle for building a batch of operations atomically. +/// +/// Uses `WriteBatchWithTransaction` for atomic writes without full transaction overhead. +/// Unlike [`RocksTx`], this does NOT support read-your-writes. Use for write-only flows +/// where you don't need to read back uncommitted data within the same operation +/// (e.g., history index writes). +#[must_use = "batch must be committed"] +pub struct RocksDBBatch<'a> { + provider: &'a RocksDBProvider, + inner: WriteBatchWithTransaction, + buf: Vec, +} + +impl fmt::Debug for RocksDBBatch<'_> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("RocksDBBatch") + .field("provider", &self.provider) + .field("batch", &"") + // Number of operations in this batch + .field("length", &self.inner.len()) + // Total serialized size (encoded key + compressed value + metadata) of this batch + // in bytes + .field("size_in_bytes", &self.inner.size_in_bytes()) + .finish() + } +} + +impl<'a> RocksDBBatch<'a> { + /// Puts a value into the batch. + pub fn put(&mut self, key: T::Key, value: &T::Value) -> ProviderResult<()> { + let encoded_key = key.encode(); + self.put_encoded::(&encoded_key, value) + } + + /// Puts a value into the batch using pre-encoded key. + pub fn put_encoded( + &mut self, + key: &::Encoded, + value: &T::Value, + ) -> ProviderResult<()> { + let value_bytes = compress_to_buf_or_ref!(self.buf, value).unwrap_or(&self.buf); + self.inner.put_cf(self.provider.get_cf_handle::()?, key, value_bytes); + Ok(()) + } + + /// Deletes a value from the batch. + pub fn delete(&mut self, key: T::Key) -> ProviderResult<()> { + self.inner.delete_cf(self.provider.get_cf_handle::()?, key.encode().as_ref()); + Ok(()) + } + + /// Commits the batch to the database. + /// + /// This consumes the batch and writes all operations atomically to `RocksDB`. + pub fn commit(self) -> ProviderResult<()> { + self.provider.0.db.write_opt(self.inner, &WriteOptions::default()).map_err(|e| { + ProviderError::Database(DatabaseError::Commit(DatabaseErrorInfo { + message: e.to_string().into(), + code: -1, + })) + }) + } + + /// Returns the number of write operations (puts + deletes) queued in this batch. + pub fn len(&self) -> usize { + self.inner.len() + } + + /// Returns `true` if the batch contains no operations. + pub fn is_empty(&self) -> bool { + self.inner.is_empty() + } + + /// Returns a reference to the underlying `RocksDB` provider. + pub const fn provider(&self) -> &RocksDBProvider { + self.provider + } + + /// Consumes the batch and returns the underlying `WriteBatchWithTransaction`. + /// + /// This is used to defer commits to the provider level. + pub fn into_inner(self) -> WriteBatchWithTransaction { + self.inner + } +} + +/// `RocksDB` transaction wrapper providing MDBX-like semantics. +/// +/// Supports: +/// - Read-your-writes: reads see uncommitted writes within the same transaction +/// - Atomic commit/rollback +/// - Iteration over uncommitted data +/// +/// Note: `Transaction` is `Send` but NOT `Sync`. This wrapper does not implement +/// `DbTx`/`DbTxMut` traits directly; use RocksDB-specific methods instead. +pub struct RocksTx<'db> { + inner: Transaction<'db, TransactionDB>, + provider: &'db RocksDBProvider, +} + +impl fmt::Debug for RocksTx<'_> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("RocksTx").field("provider", &self.provider).finish_non_exhaustive() + } +} + +impl<'db> RocksTx<'db> { + /// Gets a value from the specified table. Sees uncommitted writes in this transaction. + pub fn get(&self, key: T::Key) -> ProviderResult> { + let encoded_key = key.encode(); + self.get_encoded::(&encoded_key) + } + + /// Gets a value using pre-encoded key. Sees uncommitted writes in this transaction. + pub fn get_encoded( + &self, + key: &::Encoded, + ) -> ProviderResult> { + let cf = self.provider.get_cf_handle::()?; + let result = self.inner.get_cf(cf, key.as_ref()).map_err(|e| { + ProviderError::Database(DatabaseError::Read(DatabaseErrorInfo { + message: e.to_string().into(), + code: -1, + })) + })?; + + Ok(result.and_then(|value| T::Value::decompress(&value).ok())) + } + + /// Puts a value into the specified table. + pub fn put(&self, key: T::Key, value: &T::Value) -> ProviderResult<()> { + let encoded_key = key.encode(); + self.put_encoded::(&encoded_key, value) + } + + /// Puts a value using pre-encoded key. + pub fn put_encoded( + &self, + key: &::Encoded, + value: &T::Value, + ) -> ProviderResult<()> { + let cf = self.provider.get_cf_handle::()?; + let mut buf = Vec::new(); + let value_bytes = compress_to_buf_or_ref!(buf, value).unwrap_or(&buf); + + self.inner.put_cf(cf, key.as_ref(), value_bytes).map_err(|e| { + ProviderError::Database(DatabaseError::Write(Box::new(DatabaseWriteError { + info: DatabaseErrorInfo { message: e.to_string().into(), code: -1 }, + operation: DatabaseWriteOperation::PutUpsert, + table_name: T::NAME, + key: key.as_ref().to_vec(), + }))) + }) + } + + /// Deletes a value from the specified table. + pub fn delete(&self, key: T::Key) -> ProviderResult<()> { + let cf = self.provider.get_cf_handle::()?; + self.inner.delete_cf(cf, key.encode().as_ref()).map_err(|e| { + ProviderError::Database(DatabaseError::Delete(DatabaseErrorInfo { + message: e.to_string().into(), + code: -1, + })) + }) + } + + /// Creates an iterator for the specified table. Sees uncommitted writes in this transaction. + /// + /// Returns an iterator that yields `(encoded_key, compressed_value)` pairs. + pub fn iter(&self) -> ProviderResult> { + let cf = self.provider.get_cf_handle::()?; + let iter = self.inner.iterator_cf(cf, IteratorMode::Start); + Ok(RocksTxIter { inner: iter, _marker: std::marker::PhantomData }) + } + + /// Creates an iterator starting from the given key (inclusive). + pub fn iter_from(&self, key: T::Key) -> ProviderResult> { + let cf = self.provider.get_cf_handle::()?; + let encoded_key = key.encode(); + let iter = self + .inner + .iterator_cf(cf, IteratorMode::From(encoded_key.as_ref(), rocksdb::Direction::Forward)); + Ok(RocksTxIter { inner: iter, _marker: std::marker::PhantomData }) + } + + /// Commits the transaction, persisting all changes. + pub fn commit(self) -> ProviderResult<()> { + self.inner.commit().map_err(|e| { + ProviderError::Database(DatabaseError::Commit(DatabaseErrorInfo { + message: e.to_string().into(), + code: -1, + })) + }) + } + + /// Rolls back the transaction, discarding all changes. + pub fn rollback(self) -> ProviderResult<()> { + self.inner.rollback().map_err(|e| { + ProviderError::Database(DatabaseError::Other(format!("rollback failed: {e}"))) + }) + } +} + +/// Iterator over a `RocksDB` table (non-transactional). +/// +/// Yields decoded `(Key, Value)` pairs in key order. +pub struct RocksDBIter<'db, T: Table> { + inner: rocksdb::DBIteratorWithThreadMode<'db, TransactionDB>, + _marker: std::marker::PhantomData, +} + +impl fmt::Debug for RocksDBIter<'_, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("RocksDBIter").field("table", &T::NAME).finish_non_exhaustive() + } +} + +impl Iterator for RocksDBIter<'_, T> { + type Item = ProviderResult<(T::Key, T::Value)>; + + fn next(&mut self) -> Option { + let (key_bytes, value_bytes) = match self.inner.next()? { + Ok(kv) => kv, + Err(e) => { + return Some(Err(ProviderError::Database(DatabaseError::Read(DatabaseErrorInfo { + message: e.to_string().into(), + code: -1, + })))) + } + }; + + // Decode key + let key = match ::decode(&key_bytes) { + Ok(k) => k, + Err(_) => return Some(Err(ProviderError::Database(DatabaseError::Decode))), + }; + + // Decompress value + let value = match T::Value::decompress(&value_bytes) { + Ok(v) => v, + Err(_) => return Some(Err(ProviderError::Database(DatabaseError::Decode))), + }; + + Some(Ok((key, value))) + } +} + +/// Iterator over a `RocksDB` table within a transaction. +/// +/// Yields decoded `(Key, Value)` pairs. Sees uncommitted writes. +pub struct RocksTxIter<'tx, T: Table> { + inner: rocksdb::DBIteratorWithThreadMode<'tx, Transaction<'tx, TransactionDB>>, + _marker: std::marker::PhantomData, +} + +impl fmt::Debug for RocksTxIter<'_, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("RocksTxIter").field("table", &T::NAME).finish_non_exhaustive() + } +} + +impl Iterator for RocksTxIter<'_, T> { + type Item = ProviderResult<(T::Key, T::Value)>; + + fn next(&mut self) -> Option { + let (key_bytes, value_bytes) = match self.inner.next()? { + Ok(kv) => kv, + Err(e) => { + return Some(Err(ProviderError::Database(DatabaseError::Read(DatabaseErrorInfo { + message: e.to_string().into(), + code: -1, + })))) + } + }; + + // Decode key + let key = match ::decode(&key_bytes) { + Ok(k) => k, + Err(_) => return Some(Err(ProviderError::Database(DatabaseError::Decode))), + }; + + // Decompress value + let value = match T::Value::decompress(&value_bytes) { + Ok(v) => v, + Err(_) => return Some(Err(ProviderError::Database(DatabaseError::Decode))), + }; + + Some(Ok((key, value))) + } +} + +/// Converts Reth's [`LogLevel`] to `RocksDB`'s [`rocksdb::LogLevel`]. +const fn convert_log_level(level: LogLevel) -> rocksdb::LogLevel { + match level { + LogLevel::Fatal => rocksdb::LogLevel::Fatal, + LogLevel::Error => rocksdb::LogLevel::Error, + LogLevel::Warn => rocksdb::LogLevel::Warn, + LogLevel::Notice | LogLevel::Verbose => rocksdb::LogLevel::Info, + LogLevel::Debug | LogLevel::Trace | LogLevel::Extra => rocksdb::LogLevel::Debug, + } +} + +#[cfg(test)] +mod tests { + use super::*; + use alloy_primitives::{Address, TxHash, B256}; + use reth_db_api::{ + models::{sharded_key::ShardedKey, storage_sharded_key::StorageShardedKey, IntegerList}, + table::Table, + tables, + }; + use tempfile::TempDir; + + #[test] + fn test_with_default_tables_registers_required_column_families() { + let temp_dir = TempDir::new().unwrap(); + + // Build with default tables + let provider = RocksDBBuilder::new(temp_dir.path()).with_default_tables().build().unwrap(); + + // Should be able to write/read TransactionHashNumbers + let tx_hash = TxHash::from(B256::from([1u8; 32])); + provider.put::(tx_hash, &100).unwrap(); + assert_eq!(provider.get::(tx_hash).unwrap(), Some(100)); + + // Should be able to write/read AccountsHistory + let key = ShardedKey::new(Address::ZERO, 100); + let value = IntegerList::default(); + provider.put::(key.clone(), &value).unwrap(); + assert!(provider.get::(key).unwrap().is_some()); + + // Should be able to write/read StoragesHistory + let key = StorageShardedKey::new(Address::ZERO, B256::ZERO, 100); + provider.put::(key.clone(), &value).unwrap(); + assert!(provider.get::(key).unwrap().is_some()); + } + + #[derive(Debug)] + struct TestTable; + + impl Table for TestTable { + const NAME: &'static str = "TestTable"; + const DUPSORT: bool = false; + type Key = u64; + type Value = Vec; + } + + #[test] + fn test_basic_operations() { + let temp_dir = TempDir::new().unwrap(); + + let provider = RocksDBBuilder::new(temp_dir.path()) + .with_table::() // Type-safe! + .build() + .unwrap(); + + let key = 42u64; + let value = b"test_value".to_vec(); + + // Test write + provider.put::(key, &value).unwrap(); + + // Test read + let result = provider.get::(key).unwrap(); + assert_eq!(result, Some(value)); + + // Test delete + provider.delete::(key).unwrap(); + + // Verify deletion + assert_eq!(provider.get::(key).unwrap(), None); + } + + #[test] + fn test_batch_operations() { + let temp_dir = TempDir::new().unwrap(); + let provider = + RocksDBBuilder::new(temp_dir.path()).with_table::().build().unwrap(); + + // Write multiple entries in a batch + provider + .write_batch(|batch| { + for i in 0..10u64 { + let value = format!("value_{i}").into_bytes(); + batch.put::(i, &value)?; + } + Ok(()) + }) + .unwrap(); + + // Read all entries + for i in 0..10u64 { + let value = format!("value_{i}").into_bytes(); + assert_eq!(provider.get::(i).unwrap(), Some(value)); + } + + // Delete all entries in a batch + provider + .write_batch(|batch| { + for i in 0..10u64 { + batch.delete::(i)?; + } + Ok(()) + }) + .unwrap(); + + // Verify all deleted + for i in 0..10u64 { + assert_eq!(provider.get::(i).unwrap(), None); + } + } + + #[test] + fn test_with_real_table() { + let temp_dir = TempDir::new().unwrap(); + let provider = RocksDBBuilder::new(temp_dir.path()) + .with_table::() + .with_metrics() + .build() + .unwrap(); + + let tx_hash = TxHash::from(B256::from([1u8; 32])); + + // Insert and retrieve + provider.put::(tx_hash, &100).unwrap(); + assert_eq!(provider.get::(tx_hash).unwrap(), Some(100)); + + // Batch insert multiple transactions + provider + .write_batch(|batch| { + for i in 0..10u64 { + let hash = TxHash::from(B256::from([i as u8; 32])); + let value = i * 100; + batch.put::(hash, &value)?; + } + Ok(()) + }) + .unwrap(); + + // Verify batch insertions + for i in 0..10u64 { + let hash = TxHash::from(B256::from([i as u8; 32])); + assert_eq!( + provider.get::(hash).unwrap(), + Some(i * 100) + ); + } + } + #[test] + fn test_statistics_enabled() { + let temp_dir = TempDir::new().unwrap(); + // Just verify that building with statistics doesn't panic + let provider = RocksDBBuilder::new(temp_dir.path()) + .with_table::() + .with_statistics() + .build() + .unwrap(); + + // Do operations - data should be immediately readable with TransactionDB + for i in 0..10 { + let value = vec![i as u8]; + provider.put::(i, &value).unwrap(); + // Verify write is visible + assert_eq!(provider.get::(i).unwrap(), Some(value)); + } + } + + #[test] + fn test_data_persistence() { + let temp_dir = TempDir::new().unwrap(); + let provider = + RocksDBBuilder::new(temp_dir.path()).with_table::().build().unwrap(); + + // Insert data - TransactionDB writes are immediately visible + let value = vec![42u8; 1000]; + for i in 0..100 { + provider.put::(i, &value).unwrap(); + } + + // Verify data is readable + for i in 0..100 { + assert!(provider.get::(i).unwrap().is_some(), "Data should be readable"); + } + } + + #[test] + fn test_transaction_read_your_writes() { + let temp_dir = TempDir::new().unwrap(); + let provider = + RocksDBBuilder::new(temp_dir.path()).with_table::().build().unwrap(); + + // Create a transaction + let tx = provider.tx(); + + // Write data within the transaction + let key = 42u64; + let value = b"test_value".to_vec(); + tx.put::(key, &value).unwrap(); + + // Read-your-writes: should see uncommitted data in same transaction + let result = tx.get::(key).unwrap(); + assert_eq!( + result, + Some(value.clone()), + "Transaction should see its own uncommitted writes" + ); + + // Data should NOT be visible via provider (outside transaction) + let provider_result = provider.get::(key).unwrap(); + assert_eq!(provider_result, None, "Uncommitted data should not be visible outside tx"); + + // Commit the transaction + tx.commit().unwrap(); + + // Now data should be visible via provider + let committed_result = provider.get::(key).unwrap(); + assert_eq!(committed_result, Some(value), "Committed data should be visible"); + } + + #[test] + fn test_transaction_rollback() { + let temp_dir = TempDir::new().unwrap(); + let provider = + RocksDBBuilder::new(temp_dir.path()).with_table::().build().unwrap(); + + // First, put some initial data + let key = 100u64; + let initial_value = b"initial".to_vec(); + provider.put::(key, &initial_value).unwrap(); + + // Create a transaction and modify data + let tx = provider.tx(); + let new_value = b"modified".to_vec(); + tx.put::(key, &new_value).unwrap(); + + // Verify modification is visible within transaction + assert_eq!(tx.get::(key).unwrap(), Some(new_value)); + + // Rollback instead of commit + tx.rollback().unwrap(); + + // Data should be unchanged (initial value) + let result = provider.get::(key).unwrap(); + assert_eq!(result, Some(initial_value), "Rollback should preserve original data"); + } + + #[test] + fn test_transaction_iterator() { + let temp_dir = TempDir::new().unwrap(); + let provider = + RocksDBBuilder::new(temp_dir.path()).with_table::().build().unwrap(); + + // Create a transaction + let tx = provider.tx(); + + // Write multiple entries + for i in 0..5u64 { + let value = format!("value_{i}").into_bytes(); + tx.put::(i, &value).unwrap(); + } + + // Iterate - should see uncommitted writes + let mut count = 0; + for result in tx.iter::().unwrap() { + let (key, value) = result.unwrap(); + assert_eq!(value, format!("value_{key}").into_bytes()); + count += 1; + } + assert_eq!(count, 5, "Iterator should see all uncommitted writes"); + + // Commit + tx.commit().unwrap(); + } + + #[test] + fn test_batch_manual_commit() { + let temp_dir = TempDir::new().unwrap(); + let provider = + RocksDBBuilder::new(temp_dir.path()).with_table::().build().unwrap(); + + // Create a batch via provider.batch() + let mut batch = provider.batch(); + + // Add entries + for i in 0..10u64 { + let value = format!("batch_value_{i}").into_bytes(); + batch.put::(i, &value).unwrap(); + } + + // Verify len/is_empty + assert_eq!(batch.len(), 10); + assert!(!batch.is_empty()); + + // Data should NOT be visible before commit + assert_eq!(provider.get::(0).unwrap(), None); + + // Commit the batch + batch.commit().unwrap(); + + // Now data should be visible + for i in 0..10u64 { + let value = format!("batch_value_{i}").into_bytes(); + assert_eq!(provider.get::(i).unwrap(), Some(value)); + } + } + + #[test] + fn test_first_and_last_entry() { + let temp_dir = TempDir::new().unwrap(); + let provider = + RocksDBBuilder::new(temp_dir.path()).with_table::().build().unwrap(); + + // Empty table should return None for both + assert_eq!(provider.first::().unwrap(), None); + assert_eq!(provider.last::().unwrap(), None); + + // Insert some entries + provider.put::(10, &b"value_10".to_vec()).unwrap(); + provider.put::(20, &b"value_20".to_vec()).unwrap(); + provider.put::(5, &b"value_5".to_vec()).unwrap(); + + // First should return the smallest key + let first = provider.first::().unwrap(); + assert_eq!(first, Some((5, b"value_5".to_vec()))); + + // Last should return the largest key + let last = provider.last::().unwrap(); + assert_eq!(last, Some((20, b"value_20".to_vec()))); + } +} diff --git a/crates/storage/provider/src/providers/rocksdb_stub.rs b/crates/storage/provider/src/providers/rocksdb_stub.rs new file mode 100644 index 0000000000..32b79c1880 --- /dev/null +++ b/crates/storage/provider/src/providers/rocksdb_stub.rs @@ -0,0 +1,215 @@ +//! Stub implementation of `RocksDB` provider. +//! +//! This module provides placeholder types that allow the code to compile when `RocksDB` is not +//! available (either on non-Unix platforms or when the `rocksdb` feature is not enabled). +//! Operations will produce errors if actually attempted. + +use reth_db_api::table::{Encode, Table}; +use reth_storage_errors::{ + db::LogLevel, + provider::{ProviderError::UnsupportedProvider, ProviderResult}, +}; +use std::path::Path; + +/// A stub `RocksDB` provider. +/// +/// This type exists to allow code to compile when `RocksDB` is not available (either on non-Unix +/// platforms or when the `rocksdb` feature is not enabled). When using this stub, the +/// `transaction_hash_numbers_in_rocksdb` flag should be set to `false` to ensure all operations +/// route to MDBX instead. +#[derive(Debug, Clone)] +pub struct RocksDBProvider; + +impl RocksDBProvider { + /// Creates a new stub `RocksDB` provider. + /// + /// On non-Unix platforms, this returns an error indicating `RocksDB` is not supported. + pub fn new(_path: impl AsRef) -> ProviderResult { + Ok(Self) + } + + /// Creates a new stub `RocksDB` provider builder. + pub fn builder(path: impl AsRef) -> RocksDBBuilder { + RocksDBBuilder::new(path) + } + + /// Get a value from `RocksDB` (stub implementation). + pub fn get(&self, _key: T::Key) -> ProviderResult> { + Err(UnsupportedProvider) + } + + /// Get a value from `RocksDB` using pre-encoded key (stub implementation). + pub const fn get_encoded( + &self, + _key: &::Encoded, + ) -> ProviderResult> { + Err(UnsupportedProvider) + } + + /// Put a value into `RocksDB` (stub implementation). + pub fn put(&self, _key: T::Key, _value: &T::Value) -> ProviderResult<()> { + Err(UnsupportedProvider) + } + + /// Put a value into `RocksDB` using pre-encoded key (stub implementation). + pub const fn put_encoded( + &self, + _key: &::Encoded, + _value: &T::Value, + ) -> ProviderResult<()> { + Err(UnsupportedProvider) + } + + /// Delete a value from `RocksDB` (stub implementation). + pub fn delete(&self, _key: T::Key) -> ProviderResult<()> { + Err(UnsupportedProvider) + } + + /// Write a batch of operations (stub implementation). + pub fn write_batch(&self, _f: F) -> ProviderResult<()> + where + F: FnOnce(&mut RocksDBBatch) -> ProviderResult<()>, + { + Err(UnsupportedProvider) + } + + /// Creates a new transaction (stub implementation). + pub const fn tx(&self) -> RocksTx { + RocksTx + } +} + +/// A stub batch writer for `RocksDB` on non-Unix platforms. +#[derive(Debug)] +pub struct RocksDBBatch; + +impl RocksDBBatch { + /// Puts a value into the batch (stub implementation). + pub fn put(&self, _key: T::Key, _value: &T::Value) -> ProviderResult<()> { + Err(UnsupportedProvider) + } + + /// Puts a value into the batch using pre-encoded key (stub implementation). + pub const fn put_encoded( + &self, + _key: &::Encoded, + _value: &T::Value, + ) -> ProviderResult<()> { + Err(UnsupportedProvider) + } + + /// Deletes a value from the batch (stub implementation). + pub fn delete(&self, _key: T::Key) -> ProviderResult<()> { + Err(UnsupportedProvider) + } +} + +/// A stub builder for `RocksDB` on non-Unix platforms. +#[derive(Debug)] +pub struct RocksDBBuilder; + +impl RocksDBBuilder { + /// Creates a new stub builder. + pub fn new>(_path: P) -> Self { + Self + } + + /// Adds a column family for a specific table type (stub implementation). + pub const fn with_table(self) -> Self { + self + } + + /// Registers the default tables used by reth for `RocksDB` storage (stub implementation). + pub const fn with_default_tables(self) -> Self { + self + } + + /// Enables metrics (stub implementation). + pub const fn with_metrics(self) -> Self { + self + } + + /// Enables `RocksDB` internal statistics collection (stub implementation). + pub const fn with_statistics(self) -> Self { + self + } + + /// Sets the log level from `DatabaseArgs` configuration (stub implementation). + pub const fn with_database_log_level(self, _log_level: Option) -> Self { + self + } + + /// Sets a custom block cache size (stub implementation). + pub const fn with_block_cache_size(self, _capacity_bytes: usize) -> Self { + self + } + + /// Build the `RocksDB` provider (stub implementation). + pub const fn build(self) -> ProviderResult { + Ok(RocksDBProvider) + } +} + +/// A stub transaction for `RocksDB`. +#[derive(Debug)] +pub struct RocksTx; + +impl RocksTx { + /// Gets a value from the specified table (stub implementation). + pub fn get(&self, _key: T::Key) -> ProviderResult> { + Err(UnsupportedProvider) + } + + /// Gets a value using pre-encoded key (stub implementation). + pub const fn get_encoded( + &self, + _key: &::Encoded, + ) -> ProviderResult> { + Err(UnsupportedProvider) + } + + /// Puts a value into the specified table (stub implementation). + pub fn put(&self, _key: T::Key, _value: &T::Value) -> ProviderResult<()> { + Err(UnsupportedProvider) + } + + /// Puts a value using pre-encoded key (stub implementation). + pub const fn put_encoded( + &self, + _key: &::Encoded, + _value: &T::Value, + ) -> ProviderResult<()> { + Err(UnsupportedProvider) + } + + /// Deletes a value from the specified table (stub implementation). + pub fn delete(&self, _key: T::Key) -> ProviderResult<()> { + Err(UnsupportedProvider) + } + + /// Creates an iterator for the specified table (stub implementation). + pub const fn iter(&self) -> ProviderResult> { + Err(UnsupportedProvider) + } + + /// Creates an iterator starting from the given key (stub implementation). + pub fn iter_from(&self, _key: T::Key) -> ProviderResult> { + Err(UnsupportedProvider) + } + + /// Commits the transaction (stub implementation). + pub const fn commit(self) -> ProviderResult<()> { + Err(UnsupportedProvider) + } + + /// Rolls back the transaction (stub implementation). + pub const fn rollback(self) -> ProviderResult<()> { + Err(UnsupportedProvider) + } +} + +/// A stub iterator for `RocksDB` transactions. +#[derive(Debug)] +pub struct RocksTxIter<'a, T> { + _marker: std::marker::PhantomData<(&'a (), T)>, +} diff --git a/crates/storage/provider/src/providers/state/historical.rs b/crates/storage/provider/src/providers/state/historical.rs index f3e69bf7d9..969f35fb7a 100644 --- a/crates/storage/provider/src/providers/state/historical.rs +++ b/crates/storage/provider/src/providers/state/historical.rs @@ -1,6 +1,6 @@ use crate::{ - providers::state::macros::delegate_provider_impls, AccountReader, BlockHashReader, - ChangeSetReader, HashedPostStateProvider, ProviderError, StateProvider, StateRootProvider, + AccountReader, BlockHashReader, ChangeSetReader, HashedPostStateProvider, ProviderError, + StateProvider, StateRootProvider, }; use alloy_eips::merge::EPOCH_SLOTS; use alloy_primitives::{Address, BlockNumber, Bytes, StorageKey, StorageValue, B256}; @@ -21,8 +21,9 @@ use reth_trie::{ proof::{Proof, StorageProof}, updates::TrieUpdates, witness::TrieWitness, - AccountProof, HashedPostState, HashedStorage, KeccakKeyHasher, MultiProof, MultiProofTargets, - StateRoot, StorageMultiProof, StorageRoot, TrieInput, + AccountProof, HashedPostState, HashedPostStateSorted, HashedStorage, KeccakKeyHasher, + MultiProof, MultiProofTargets, StateRoot, StorageMultiProof, StorageRoot, TrieInput, + TrieInputSorted, }; use reth_trie_db::{ DatabaseHashedPostState, DatabaseHashedStorage, DatabaseProof, DatabaseStateRoot, @@ -118,7 +119,7 @@ impl<'b, Provider: DBProvider + BlockNumReader> HistoricalStateProviderRef<'b, P } /// Retrieve revert hashed state for this history provider. - fn revert_state(&self) -> ProviderResult { + fn revert_state(&self) -> ProviderResult { if !self.lowest_available_blocks.is_account_history_available(self.block_number) || !self.lowest_available_blocks.is_storage_history_available(self.block_number) { @@ -133,7 +134,8 @@ impl<'b, Provider: DBProvider + BlockNumReader> HistoricalStateProviderRef<'b, P ); } - Ok(HashedPostState::from_reverts::(self.tx(), self.block_number..)?) + HashedPostStateSorted::from_reverts::(self.tx(), self.block_number..) + .map_err(ProviderError::from) } /// Retrieve revert hashed storage for this history provider and target address. @@ -287,14 +289,15 @@ impl StateRootProvider { fn state_root(&self, hashed_state: HashedPostState) -> ProviderResult { let mut revert_state = self.revert_state()?; - revert_state.extend(hashed_state); - StateRoot::overlay_root(self.tx(), revert_state) + let hashed_state_sorted = hashed_state.into_sorted(); + revert_state.extend_ref(&hashed_state_sorted); + StateRoot::overlay_root(self.tx(), &revert_state) .map_err(|err| ProviderError::Database(err.into())) } fn state_root_from_nodes(&self, mut input: TrieInput) -> ProviderResult { - input.prepend(self.revert_state()?); - StateRoot::overlay_root_from_nodes(self.tx(), input) + input.prepend(self.revert_state()?.into()); + StateRoot::overlay_root_from_nodes(self.tx(), TrieInputSorted::from_unsorted(input)) .map_err(|err| ProviderError::Database(err.into())) } @@ -303,8 +306,9 @@ impl StateRootProvider hashed_state: HashedPostState, ) -> ProviderResult<(B256, TrieUpdates)> { let mut revert_state = self.revert_state()?; - revert_state.extend(hashed_state); - StateRoot::overlay_root_with_updates(self.tx(), revert_state) + let hashed_state_sorted = hashed_state.into_sorted(); + revert_state.extend_ref(&hashed_state_sorted); + StateRoot::overlay_root_with_updates(self.tx(), &revert_state) .map_err(|err| ProviderError::Database(err.into())) } @@ -312,9 +316,12 @@ impl StateRootProvider &self, mut input: TrieInput, ) -> ProviderResult<(B256, TrieUpdates)> { - input.prepend(self.revert_state()?); - StateRoot::overlay_root_from_nodes_with_updates(self.tx(), input) - .map_err(|err| ProviderError::Database(err.into())) + input.prepend(self.revert_state()?.into()); + StateRoot::overlay_root_from_nodes_with_updates( + self.tx(), + TrieInputSorted::from_unsorted(input), + ) + .map_err(|err| ProviderError::Database(err.into())) } } @@ -367,8 +374,9 @@ impl StateProofProvider address: Address, slots: &[B256], ) -> ProviderResult { - input.prepend(self.revert_state()?); - Proof::overlay_account_proof(self.tx(), input, address, slots).map_err(ProviderError::from) + input.prepend(self.revert_state()?.into()); + let proof = as DatabaseProof>::from_tx(self.tx()); + proof.overlay_account_proof(input, address, slots).map_err(ProviderError::from) } fn multiproof( @@ -376,19 +384,20 @@ impl StateProofProvider mut input: TrieInput, targets: MultiProofTargets, ) -> ProviderResult { - input.prepend(self.revert_state()?); - Proof::overlay_multiproof(self.tx(), input, targets).map_err(ProviderError::from) + input.prepend(self.revert_state()?.into()); + let proof = as DatabaseProof>::from_tx(self.tx()); + proof.overlay_multiproof(input, targets).map_err(ProviderError::from) } fn witness(&self, mut input: TrieInput, target: HashedPostState) -> ProviderResult> { - input.prepend(self.revert_state()?); + input.prepend(self.revert_state()?.into()); TrieWitness::overlay_witness(self.tx(), input, target) .map_err(ProviderError::from) .map(|hm| hm.into_values().collect()) } } -impl HashedPostStateProvider for HistoricalStateProviderRef<'_, Provider> { +impl HashedPostStateProvider for HistoricalStateProviderRef<'_, Provider> { fn hashed_post_state(&self, bundle_state: &revm_database::BundleState) -> HashedPostState { HashedPostState::from_bundle_state::(bundle_state.state()) } @@ -485,7 +494,7 @@ impl HistoricalStateProvider { } // Delegates all provider impls to [HistoricalStateProviderRef] -delegate_provider_impls!(HistoricalStateProvider where [Provider: DBProvider + BlockNumReader + BlockHashReader + ChangeSetReader]); +reth_storage_api::macros::delegate_provider_impls!(HistoricalStateProvider where [Provider: DBProvider + BlockNumReader + BlockHashReader + ChangeSetReader]); /// Lowest blocks at which different parts of the state are available. /// They may be [Some] if pruning is enabled. diff --git a/crates/storage/provider/src/providers/state/latest.rs b/crates/storage/provider/src/providers/state/latest.rs index de8eef2cc9..c76918d714 100644 --- a/crates/storage/provider/src/providers/state/latest.rs +++ b/crates/storage/provider/src/providers/state/latest.rs @@ -1,6 +1,5 @@ use crate::{ - providers::state::macros::delegate_provider_impls, AccountReader, BlockHashReader, - HashedPostStateProvider, StateProvider, StateRootProvider, + AccountReader, BlockHashReader, HashedPostStateProvider, StateProvider, StateRootProvider, }; use alloy_primitives::{Address, BlockNumber, Bytes, StorageKey, StorageValue, B256}; use reth_db_api::{cursor::DbDupCursorRO, tables, transaction::DbTx}; @@ -12,7 +11,7 @@ use reth_trie::{ updates::TrieUpdates, witness::TrieWitness, AccountProof, HashedPostState, HashedStorage, KeccakKeyHasher, MultiProof, MultiProofTargets, - StateRoot, StorageMultiProof, StorageRoot, TrieInput, + StateRoot, StorageMultiProof, StorageRoot, TrieInput, TrieInputSorted, }; use reth_trie_db::{ DatabaseProof, DatabaseStateRoot, DatabaseStorageProof, DatabaseStorageRoot, @@ -58,14 +57,14 @@ impl BlockHashReader for LatestStateProviderRef<'_, P } } -impl StateRootProvider for LatestStateProviderRef<'_, Provider> { +impl StateRootProvider for LatestStateProviderRef<'_, Provider> { fn state_root(&self, hashed_state: HashedPostState) -> ProviderResult { - StateRoot::overlay_root(self.tx(), hashed_state) + StateRoot::overlay_root(self.tx(), &hashed_state.into_sorted()) .map_err(|err| ProviderError::Database(err.into())) } fn state_root_from_nodes(&self, input: TrieInput) -> ProviderResult { - StateRoot::overlay_root_from_nodes(self.tx(), input) + StateRoot::overlay_root_from_nodes(self.tx(), TrieInputSorted::from_unsorted(input)) .map_err(|err| ProviderError::Database(err.into())) } @@ -73,7 +72,7 @@ impl StateRootProvider for LatestStateProviderRef<' &self, hashed_state: HashedPostState, ) -> ProviderResult<(B256, TrieUpdates)> { - StateRoot::overlay_root_with_updates(self.tx(), hashed_state) + StateRoot::overlay_root_with_updates(self.tx(), &hashed_state.into_sorted()) .map_err(|err| ProviderError::Database(err.into())) } @@ -81,12 +80,15 @@ impl StateRootProvider for LatestStateProviderRef<' &self, input: TrieInput, ) -> ProviderResult<(B256, TrieUpdates)> { - StateRoot::overlay_root_from_nodes_with_updates(self.tx(), input) - .map_err(|err| ProviderError::Database(err.into())) + StateRoot::overlay_root_from_nodes_with_updates( + self.tx(), + TrieInputSorted::from_unsorted(input), + ) + .map_err(|err| ProviderError::Database(err.into())) } } -impl StorageRootProvider for LatestStateProviderRef<'_, Provider> { +impl StorageRootProvider for LatestStateProviderRef<'_, Provider> { fn storage_root( &self, address: Address, @@ -117,14 +119,15 @@ impl StorageRootProvider for LatestStateProviderRef } } -impl StateProofProvider for LatestStateProviderRef<'_, Provider> { +impl StateProofProvider for LatestStateProviderRef<'_, Provider> { fn proof( &self, input: TrieInput, address: Address, slots: &[B256], ) -> ProviderResult { - Proof::overlay_account_proof(self.tx(), input, address, slots).map_err(ProviderError::from) + let proof = as DatabaseProof>::from_tx(self.tx()); + proof.overlay_account_proof(input, address, slots).map_err(ProviderError::from) } fn multiproof( @@ -132,7 +135,8 @@ impl StateProofProvider for LatestStateProviderRef< input: TrieInput, targets: MultiProofTargets, ) -> ProviderResult { - Proof::overlay_multiproof(self.tx(), input, targets).map_err(ProviderError::from) + let proof = as DatabaseProof>::from_tx(self.tx()); + proof.overlay_multiproof(input, targets).map_err(ProviderError::from) } fn witness(&self, input: TrieInput, target: HashedPostState) -> ProviderResult> { @@ -142,7 +146,7 @@ impl StateProofProvider for LatestStateProviderRef< } } -impl HashedPostStateProvider for LatestStateProviderRef<'_, Provider> { +impl HashedPostStateProvider for LatestStateProviderRef<'_, Provider> { fn hashed_post_state(&self, bundle_state: &revm_database::BundleState) -> HashedPostState { HashedPostState::from_bundle_state::(bundle_state.state()) } @@ -194,7 +198,7 @@ impl LatestStateProvider { } // Delegates all provider impls to [LatestStateProviderRef] -delegate_provider_impls!(LatestStateProvider where [Provider: DBProvider + BlockHashReader ]); +reth_storage_api::macros::delegate_provider_impls!(LatestStateProvider where [Provider: DBProvider + BlockHashReader ]); #[cfg(test)] mod tests { diff --git a/crates/storage/provider/src/providers/state/mod.rs b/crates/storage/provider/src/providers/state/mod.rs index f26302531e..fb4109fdc3 100644 --- a/crates/storage/provider/src/providers/state/mod.rs +++ b/crates/storage/provider/src/providers/state/mod.rs @@ -1,5 +1,4 @@ //! [`StateProvider`](crate::StateProvider) implementations pub(crate) mod historical; pub(crate) mod latest; -pub(crate) mod macros; pub(crate) mod overlay; diff --git a/crates/storage/provider/src/providers/state/overlay.rs b/crates/storage/provider/src/providers/state/overlay.rs index 28f04f9f76..99232aec8f 100644 --- a/crates/storage/provider/src/providers/state/overlay.rs +++ b/crates/storage/provider/src/providers/state/overlay.rs @@ -1,23 +1,57 @@ use alloy_primitives::{BlockNumber, B256}; +use metrics::{Counter, Histogram}; +use parking_lot::RwLock; use reth_db_api::DatabaseError; -use reth_errors::ProviderError; +use reth_errors::{ProviderError, ProviderResult}; +use reth_metrics::Metrics; use reth_prune_types::PruneSegment; use reth_stages_types::StageId; use reth_storage_api::{ - DBProvider, DatabaseProviderFactory, DatabaseProviderROFactory, PruneCheckpointReader, - StageCheckpointReader, TrieReader, + BlockNumReader, DBProvider, DatabaseProviderFactory, DatabaseProviderROFactory, + PruneCheckpointReader, StageCheckpointReader, TrieReader, }; use reth_trie::{ hashed_cursor::{HashedCursorFactory, HashedPostStateCursorFactory}, trie_cursor::{InMemoryTrieCursorFactory, TrieCursorFactory}, updates::TrieUpdatesSorted, - HashedPostState, HashedPostStateSorted, KeccakKeyHasher, + HashedPostStateSorted, KeccakKeyHasher, }; use reth_trie_db::{ DatabaseHashedCursorFactory, DatabaseHashedPostState, DatabaseTrieCursorFactory, }; -use std::sync::Arc; -use tracing::debug; +use std::{ + collections::{hash_map::Entry, HashMap}, + sync::Arc, + time::{Duration, Instant}, +}; +use tracing::{debug, debug_span, instrument}; + +/// Metrics for overlay state provider operations. +#[derive(Clone, Metrics)] +#[metrics(scope = "storage.providers.overlay")] +pub(crate) struct OverlayStateProviderMetrics { + /// Duration of creating the database provider transaction + create_provider_duration: Histogram, + /// Duration of retrieving trie updates from the database + retrieve_trie_reverts_duration: Histogram, + /// Duration of retrieving hashed state from the database + retrieve_hashed_state_reverts_duration: Histogram, + /// Size of trie updates (number of entries) + trie_updates_size: Histogram, + /// Size of hashed state (number of entries) + hashed_state_size: Histogram, + /// Overall duration of the [`OverlayStateProviderFactory::database_provider_ro`] call + database_provider_ro_duration: Histogram, + /// Number of cache misses when fetching [`Overlay`]s from the overlay cache. + overlay_cache_misses: Counter, +} + +/// Contains all fields required to initialize an [`OverlayStateProvider`]. +#[derive(Debug, Clone)] +struct Overlay { + trie_updates: Arc, + hashed_post_state: Arc, +} /// Factory for creating overlay state providers with optional reverts and overlays. /// @@ -27,30 +61,42 @@ use tracing::debug; pub struct OverlayStateProviderFactory { /// The underlying database provider factory factory: F, - /// Optional block number for collecting reverts - block_number: Option, + /// Optional block hash for collecting reverts + block_hash: Option, /// Optional trie overlay trie_overlay: Option>, /// Optional hashed state overlay hashed_state_overlay: Option>, + /// Metrics for tracking provider operations + metrics: OverlayStateProviderMetrics, + /// A cache which maps `db_tip -> Overlay`. If the db tip changes during usage of the factory + /// then a new entry will get added to this, but in most cases only one entry is present. + overlay_cache: Arc>>, } impl OverlayStateProviderFactory { /// Create a new overlay state provider factory - pub const fn new(factory: F) -> Self { - Self { factory, block_number: None, trie_overlay: None, hashed_state_overlay: None } + pub fn new(factory: F) -> Self { + Self { + factory, + block_hash: None, + trie_overlay: None, + hashed_state_overlay: None, + metrics: OverlayStateProviderMetrics::default(), + overlay_cache: Default::default(), + } } - /// Set the block number for collecting reverts. All state will be reverted to the point + /// Set the block hash for collecting reverts. All state will be reverted to the point /// _after_ this block has been processed. - pub const fn with_block_number(mut self, block_number: Option) -> Self { - self.block_number = block_number; + pub const fn with_block_hash(mut self, block_hash: Option) -> Self { + self.block_hash = block_hash; self } /// Set the trie overlay. /// - /// This overlay will be applied on top of any reverts applied via `with_block_number`. + /// This overlay will be applied on top of any reverts applied via `with_block_hash`. pub fn with_trie_overlay(mut self, trie_overlay: Option>) -> Self { self.trie_overlay = trie_overlay; self @@ -58,7 +104,7 @@ impl OverlayStateProviderFactory { /// Set the hashed state overlay /// - /// This overlay will be applied on top of any reverts applied via `with_block_number`. + /// This overlay will be applied on top of any reverts applied via `with_block_hash`. pub fn with_hashed_state_overlay( mut self, hashed_state_overlay: Option>, @@ -71,46 +117,70 @@ impl OverlayStateProviderFactory { impl OverlayStateProviderFactory where F: DatabaseProviderFactory, - F::Provider: TrieReader + StageCheckpointReader + PruneCheckpointReader, + F::Provider: TrieReader + StageCheckpointReader + PruneCheckpointReader + BlockNumReader, { - /// Validates that there are sufficient changesets to revert to the requested block number. + /// Returns the block number for [`Self`]'s `block_hash` field, if any. + fn get_requested_block_number( + &self, + provider: &F::Provider, + ) -> ProviderResult> { + if let Some(block_hash) = self.block_hash { + Ok(Some( + provider + .convert_hash_or_number(block_hash.into())? + .ok_or_else(|| ProviderError::BlockHashNotFound(block_hash))?, + )) + } else { + Ok(None) + } + } + + /// Returns the block which is at the tip of the DB, i.e. the block which the state tables of + /// the DB are currently synced to. + fn get_db_tip_block_number(&self, provider: &F::Provider) -> ProviderResult { + provider + .get_stage_checkpoint(StageId::MerkleChangeSets)? + .as_ref() + .map(|chk| chk.block_number) + .ok_or_else(|| ProviderError::InsufficientChangesets { requested: 0, available: 0..=0 }) + } + + /// Returns whether or not it is required to collect reverts, and validates that there are + /// sufficient changesets to revert to the requested block number if so. /// /// Returns an error if the `MerkleChangeSets` checkpoint doesn't cover the requested block. /// Takes into account both the stage checkpoint and the prune checkpoint to determine the /// available data range. - fn validate_changesets_availability( + fn reverts_required( &self, provider: &F::Provider, + db_tip_block: BlockNumber, requested_block: BlockNumber, - ) -> Result<(), ProviderError> { - // Get the MerkleChangeSets stage and prune checkpoints. - let stage_checkpoint = provider.get_stage_checkpoint(StageId::MerkleChangeSets)?; + ) -> ProviderResult { + // If the requested block is the DB tip then there won't be any reverts necessary, and we + // can simply return Ok. + if db_tip_block == requested_block { + return Ok(false) + } + + // Get the MerkleChangeSets prune checkpoints, which will be used to determine the lower + // bound. let prune_checkpoint = provider.get_prune_checkpoint(PruneSegment::MerkleChangeSets)?; - // Get the upper bound from stage checkpoint - let upper_bound = - stage_checkpoint.as_ref().map(|chk| chk.block_number).ok_or_else(|| { - ProviderError::InsufficientChangesets { - requested: requested_block, - available: 0..=0, - } - })?; - - // Extract a possible lower bound from stage checkpoint if available - let stage_lower_bound = stage_checkpoint.as_ref().and_then(|chk| { - chk.merkle_changesets_stage_checkpoint().map(|stage_chk| stage_chk.block_range.from) - }); - - // Extract a possible lower bound from prune checkpoint if available + // Extract the lower bound from prune checkpoint if available. + // + // If not available we assume pruning has never ran and so there is no lower bound. This + // should not generally happen, since MerkleChangeSets always have pruning enabled, but when + // starting a new node from scratch (e.g. in a test case or benchmark) it can surface. + // // The prune checkpoint's block_number is the highest pruned block, so data is available // starting from the next block - let prune_lower_bound = - prune_checkpoint.and_then(|chk| chk.block_number.map(|block| block + 1)); + let lower_bound = prune_checkpoint + .and_then(|chk| chk.block_number) + .map(|block_number| block_number + 1) + .unwrap_or_default(); - // Use the higher of the two lower bounds. If neither is available assume unbounded. - let lower_bound = stage_lower_bound.max(prune_lower_bound).unwrap_or(0); - - let available_range = lower_bound..=upper_bound; + let available_range = lower_bound..=db_tip_block; // Check if the requested block is within the available range if !available_range.contains(&requested_block) { @@ -120,57 +190,95 @@ where }); } - Ok(()) + Ok(true) } -} -impl DatabaseProviderROFactory for OverlayStateProviderFactory -where - F: DatabaseProviderFactory, - F::Provider: TrieReader + StageCheckpointReader + PruneCheckpointReader, -{ - type Provider = OverlayStateProvider; - - /// Create a read-only [`OverlayStateProvider`]. - fn database_provider_ro(&self) -> Result, ProviderError> { - // Get a read-only provider - let provider = self.factory.database_provider_ro()?; - - // If block_number is provided, collect reverts - let (trie_updates, hashed_state) = if let Some(from_block) = self.block_number { - // Validate that we have sufficient changesets for the requested block - self.validate_changesets_availability(&provider, from_block)?; + /// Calculates a new [`Overlay`] given a transaction and the current db tip. + #[instrument( + level = "debug", + target = "providers::state::overlay", + skip_all, + fields(db_tip_block) + )] + fn calculate_overlay( + &self, + provider: &F::Provider, + db_tip_block: BlockNumber, + ) -> ProviderResult { + // Set up variables we'll use for recording metrics. There's two different code-paths here, + // and we want to make sure both record metrics, so we do metrics recording after. + let retrieve_trie_reverts_duration; + let retrieve_hashed_state_reverts_duration; + let trie_updates_total_len; + let hashed_state_updates_total_len; + // If block_hash is provided, collect reverts + let (trie_updates, hashed_post_state) = if let Some(from_block) = + self.get_requested_block_number(provider)? && + self.reverts_required(provider, db_tip_block, from_block)? + { // Collect trie reverts - let mut trie_updates_mut = provider.trie_reverts(from_block + 1)?; + let mut trie_reverts = { + let _guard = + debug_span!(target: "providers::state::overlay", "Retrieving trie reverts") + .entered(); - // Collect state reverts using HashedPostState::from_reverts - let reverted_state = HashedPostState::from_reverts::( - provider.tx_ref(), - from_block + 1.., - )?; - let mut hashed_state_mut = reverted_state.into_sorted(); + let start = Instant::now(); + let res = provider.trie_reverts(from_block + 1)?; + retrieve_trie_reverts_duration = start.elapsed(); + res + }; - // Extend with overlays if provided - if let Some(trie_overlay) = &self.trie_overlay { - trie_updates_mut.extend_ref(trie_overlay); - } + // Collect state reverts + let mut hashed_state_reverts = { + let _guard = debug_span!(target: "providers::state::overlay", "Retrieving hashed state reverts").entered(); - if let Some(hashed_state_overlay) = &self.hashed_state_overlay { - hashed_state_mut.extend_ref(hashed_state_overlay); - } + let start = Instant::now(); + let res = HashedPostStateSorted::from_reverts::( + provider.tx_ref(), + from_block + 1.., + )?; + retrieve_hashed_state_reverts_duration = start.elapsed(); + res + }; + + // Extend with overlays if provided. If the reverts are empty we should just use the + // overlays directly, because `extend_ref` will actually clone the overlay. + let trie_updates = match self.trie_overlay.as_ref() { + Some(trie_overlay) if trie_reverts.is_empty() => Arc::clone(trie_overlay), + Some(trie_overlay) => { + trie_reverts.extend_ref(trie_overlay); + Arc::new(trie_reverts) + } + None => Arc::new(trie_reverts), + }; + + let hashed_state_updates = match self.hashed_state_overlay.as_ref() { + Some(hashed_state_overlay) if hashed_state_reverts.is_empty() => { + Arc::clone(hashed_state_overlay) + } + Some(hashed_state_overlay) => { + hashed_state_reverts.extend_ref(hashed_state_overlay); + Arc::new(hashed_state_reverts) + } + None => Arc::new(hashed_state_reverts), + }; + + trie_updates_total_len = trie_updates.total_len(); + hashed_state_updates_total_len = hashed_state_updates.total_len(); debug!( target: "providers::state::overlay", + block_hash = ?self.block_hash, ?from_block, - num_trie_updates = ?trie_updates_mut.total_len(), - num_state_updates = ?hashed_state_mut.total_len(), + num_trie_updates = ?trie_updates_total_len, + num_state_updates = ?hashed_state_updates_total_len, "Reverted to target block", ); - (Arc::new(trie_updates_mut), Arc::new(hashed_state_mut)) + (trie_updates, hashed_state_updates) } else { - // If no block_number, use overlays directly or defaults + // If no block_hash, use overlays directly or defaults let trie_updates = self.trie_overlay.clone().unwrap_or_else(|| Arc::new(TrieUpdatesSorted::default())); let hashed_state = self @@ -178,10 +286,99 @@ where .clone() .unwrap_or_else(|| Arc::new(HashedPostStateSorted::default())); + retrieve_trie_reverts_duration = Duration::ZERO; + retrieve_hashed_state_reverts_duration = Duration::ZERO; + trie_updates_total_len = trie_updates.total_len(); + hashed_state_updates_total_len = hashed_state.total_len(); + (trie_updates, hashed_state) }; - Ok(OverlayStateProvider::new(provider, trie_updates, hashed_state)) + // Record metrics + self.metrics + .retrieve_trie_reverts_duration + .record(retrieve_trie_reverts_duration.as_secs_f64()); + self.metrics + .retrieve_hashed_state_reverts_duration + .record(retrieve_hashed_state_reverts_duration.as_secs_f64()); + self.metrics.trie_updates_size.record(trie_updates_total_len as f64); + self.metrics.hashed_state_size.record(hashed_state_updates_total_len as f64); + + Ok(Overlay { trie_updates, hashed_post_state }) + } + + /// Fetches an [`Overlay`] from the cache based on the current db tip block. If there is no + /// cached value then this calculates the [`Overlay`] and populates the cache. + #[instrument(level = "debug", target = "providers::state::overlay", skip_all)] + fn get_overlay(&self, provider: &F::Provider) -> ProviderResult { + // If we have no anchor block configured then we will never need to get trie reverts, just + // return the in-memory overlay. + if self.block_hash.is_none() { + let trie_updates = + self.trie_overlay.clone().unwrap_or_else(|| Arc::new(TrieUpdatesSorted::default())); + let hashed_post_state = self + .hashed_state_overlay + .clone() + .unwrap_or_else(|| Arc::new(HashedPostStateSorted::default())); + return Ok(Overlay { trie_updates, hashed_post_state }) + } + + let db_tip_block = self.get_db_tip_block_number(provider)?; + + // If the overlay is present in the cache then return it directly. + if let Some(overlay) = self.overlay_cache.as_ref().read().get(&db_tip_block) { + return Ok(overlay.clone()); + } + + // If the overlay is not present then we need to calculate a new one. We grab a write lock, + // and then check the cache again in case some other thread populated the cache since we + // checked with the read-lock. If still not present we calculate and populate. + let mut cache_miss = false; + let overlay = match self.overlay_cache.as_ref().write().entry(db_tip_block) { + Entry::Occupied(entry) => entry.get().clone(), + Entry::Vacant(entry) => { + cache_miss = true; + let overlay = self.calculate_overlay(provider, db_tip_block)?; + entry.insert(overlay.clone()); + overlay + } + }; + + if cache_miss { + self.metrics.overlay_cache_misses.increment(1); + } + + Ok(overlay) + } +} + +impl DatabaseProviderROFactory for OverlayStateProviderFactory +where + F: DatabaseProviderFactory, + F::Provider: TrieReader + StageCheckpointReader + PruneCheckpointReader + BlockNumReader, +{ + type Provider = OverlayStateProvider; + + /// Create a read-only [`OverlayStateProvider`]. + #[instrument(level = "debug", target = "providers::state::overlay", skip_all)] + fn database_provider_ro(&self) -> ProviderResult> { + let overall_start = Instant::now(); + + // Get a read-only provider + let provider = { + let _guard = + debug_span!(target: "providers::state::overlay", "Creating db provider").entered(); + + let start = Instant::now(); + let res = self.factory.database_provider_ro()?; + self.metrics.create_provider_duration.record(start.elapsed()); + res + }; + + let Overlay { trie_updates, hashed_post_state } = self.get_overlay(&provider)?; + + self.metrics.database_provider_ro_duration.record(overall_start.elapsed()); + Ok(OverlayStateProvider::new(provider, trie_updates, hashed_post_state)) } } diff --git a/crates/storage/provider/src/providers/static_file/jar.rs b/crates/storage/provider/src/providers/static_file/jar.rs index 2cd7ec98ae..5548cb5c1f 100644 --- a/crates/storage/provider/src/providers/static_file/jar.rs +++ b/crates/storage/provider/src/providers/static_file/jar.rs @@ -6,16 +6,18 @@ use crate::{ to_range, BlockHashReader, BlockNumReader, HeaderProvider, ReceiptProvider, TransactionsProvider, }; -use alloy_consensus::transaction::{SignerRecoverable, TransactionMeta}; +use alloy_consensus::transaction::TransactionMeta; use alloy_eips::{eip2718::Encodable2718, BlockHashOrNumber}; use alloy_primitives::{Address, BlockHash, BlockNumber, TxHash, TxNumber, B256}; use reth_chainspec::ChainInfo; use reth_db::static_file::{ BlockHashMask, HeaderMask, HeaderWithHashMask, ReceiptMask, StaticFileCursor, TransactionMask, + TransactionSenderMask, }; use reth_db_api::table::{Decompress, Value}; use reth_node_types::NodePrimitives; use reth_primitives_traits::{SealedHeader, SignedTransaction}; +use reth_storage_api::range_size_hint; use reth_storage_errors::provider::{ProviderError, ProviderResult}; use std::{ fmt::Debug, @@ -104,12 +106,10 @@ impl> HeaderProvider for StaticFileJarProv &self, range: impl RangeBounds, ) -> ProviderResult> { - let range = to_range(range); - let mut cursor = self.cursor()?; - let mut headers = Vec::with_capacity((range.end - range.start) as usize); + let mut headers = Vec::with_capacity(range_size_hint(&range).unwrap_or(1024)); - for num in range { + for num in to_range(range) { if let Some(header) = cursor.get_one::>(num.into())? { headers.push(header); } @@ -133,12 +133,10 @@ impl> HeaderProvider for StaticFileJarProv range: impl RangeBounds, mut predicate: impl FnMut(&SealedHeader) -> bool, ) -> ProviderResult>> { - let range = to_range(range); - let mut cursor = self.cursor()?; - let mut headers = Vec::with_capacity((range.end - range.start) as usize); + let mut headers = Vec::with_capacity(range_size_hint(&range).unwrap_or(1024)); - for number in range { + for number in to_range(range) { if let Some((header, hash)) = cursor.get_two::>(number.into())? { @@ -236,11 +234,6 @@ impl> TransactionsPr Err(ProviderError::UnsupportedProvider) } - fn transaction_block(&self, _id: TxNumber) -> ProviderResult> { - // Information on indexing table [`tables::TransactionBlocks`] - Err(ProviderError::UnsupportedProvider) - } - fn transactions_by_block( &self, _block_id: BlockHashOrNumber, @@ -263,31 +256,34 @@ impl> TransactionsPr &self, range: impl RangeBounds, ) -> ProviderResult> { - let range = to_range(range); let mut cursor = self.cursor()?; - let mut txes = Vec::with_capacity((range.end - range.start) as usize); + let mut txs = Vec::with_capacity(range_size_hint(&range).unwrap_or(1024)); - for num in range { + for num in to_range(range) { if let Some(tx) = cursor.get_one::>(num.into())? { - txes.push(tx) + txs.push(tx) } } - Ok(txes) + Ok(txs) } fn senders_by_tx_range( &self, range: impl RangeBounds, ) -> ProviderResult> { - let txs = self.transactions_by_tx_range(range)?; - Ok(reth_primitives_traits::transaction::recover::recover_signers(&txs)?) + let mut cursor = self.cursor()?; + let mut senders = Vec::with_capacity(range_size_hint(&range).unwrap_or(1024)); + + for num in to_range(range) { + if let Some(tx) = cursor.get_one::(num.into())? { + senders.push(tx) + } + } + Ok(senders) } - fn transaction_sender(&self, num: TxNumber) -> ProviderResult> { - Ok(self - .cursor()? - .get_one::>(num.into())? - .and_then(|tx| tx.recover_signer().ok())) + fn transaction_sender(&self, id: TxNumber) -> ProviderResult> { + self.cursor()?.get_one::(id.into()) } } @@ -322,11 +318,10 @@ impl, ) -> ProviderResult> { - let range = to_range(range); let mut cursor = self.cursor()?; - let mut receipts = Vec::with_capacity((range.end - range.start) as usize); + let mut receipts = Vec::with_capacity(range_size_hint(&range).unwrap_or(1024)); - for num in range { + for num in to_range(range) { if let Some(tx) = cursor.get_one::>(num.into())? { receipts.push(tx) } diff --git a/crates/storage/provider/src/providers/static_file/manager.rs b/crates/storage/provider/src/providers/static_file/manager.rs index ea7eec9e9d..fa588c7bdc 100644 --- a/crates/storage/provider/src/providers/static_file/manager.rs +++ b/crates/storage/provider/src/providers/static_file/manager.rs @@ -3,14 +3,11 @@ use super::{ StaticFileJarProvider, StaticFileProviderRW, StaticFileProviderRWRefMut, }; use crate::{ - to_range, BlockHashReader, BlockNumReader, BlockReader, BlockSource, HeaderProvider, - ReceiptProvider, StageCheckpointReader, StatsReader, TransactionVariant, TransactionsProvider, - TransactionsProviderExt, -}; -use alloy_consensus::{ - transaction::{SignerRecoverable, TransactionMeta}, - Header, + to_range, BlockHashReader, BlockNumReader, BlockReader, BlockSource, EitherWriter, + EitherWriterDestination, HeaderProvider, ReceiptProvider, StageCheckpointReader, StatsReader, + TransactionVariant, TransactionsProvider, TransactionsProviderExt, }; +use alloy_consensus::{transaction::TransactionMeta, Header}; use alloy_eips::{eip2718::Encodable2718, BlockHashOrNumber}; use alloy_primitives::{b256, keccak256, Address, BlockHash, BlockNumber, TxHash, TxNumber, B256}; use dashmap::DashMap; @@ -21,7 +18,7 @@ use reth_db::{ lockfile::StorageLock, static_file::{ iter_static_files, BlockHashMask, HeaderMask, HeaderWithHashMask, ReceiptMask, - StaticFileCursor, TransactionMask, + StaticFileCursor, TransactionMask, TransactionSenderMask, }, }; use reth_db_api::{ @@ -40,22 +37,20 @@ use reth_static_file_types::{ find_fixed_range, HighestStaticFiles, SegmentHeader, SegmentRangeInclusive, StaticFileSegment, DEFAULT_BLOCKS_PER_STATIC_FILE, }; -use reth_storage_api::{BlockBodyIndicesProvider, DBProvider}; +use reth_storage_api::{BlockBodyIndicesProvider, DBProvider, StorageSettingsCache}; use reth_storage_errors::provider::{ProviderError, ProviderResult}; use std::{ - collections::{hash_map::Entry, BTreeMap, HashMap}, + collections::{BTreeMap, HashMap}, fmt::Debug, - marker::PhantomData, ops::{Deref, Range, RangeBounds, RangeInclusive}, path::{Path, PathBuf}, sync::{atomic::AtomicU64, mpsc, Arc}, }; use tracing::{debug, info, trace, warn}; -/// Alias type for a map that can be queried for block ranges from a transaction -/// segment respectively. It uses `TxNumber` to represent the transaction end of a static file -/// range. -type SegmentRanges = HashMap>; +/// Alias type for a map that can be queried for block or transaction ranges. It uses `u64` to +/// represent either a block or a transaction number end of a static file range. +type SegmentRanges = BTreeMap; /// Access mode on a static file provider. RO/RW. #[derive(Debug, Default, PartialEq, Eq)] @@ -96,14 +91,92 @@ impl Clone for StaticFileProvider { } } -impl StaticFileProvider { - /// Creates a new [`StaticFileProvider`] with the given [`StaticFileAccess`]. - fn new(path: impl AsRef, access: StaticFileAccess) -> ProviderResult { - let provider = Self(Arc::new(StaticFileProviderInner::new(path, access)?)); +/// Builder for [`StaticFileProvider`] that allows configuration before initialization. +#[derive(Debug)] +pub struct StaticFileProviderBuilder { + inner: StaticFileProviderInner, +} + +impl StaticFileProviderBuilder { + /// Creates a new builder with read-write access. + pub fn read_write(path: impl AsRef) -> ProviderResult { + StaticFileProviderInner::new(path, StaticFileAccess::RW).map(|inner| Self { inner }) + } + + /// Creates a new builder with read-only access. + pub fn read_only(path: impl AsRef) -> ProviderResult { + StaticFileProviderInner::new(path, StaticFileAccess::RO).map(|inner| Self { inner }) + } + + /// Set custom blocks per file for specific segments. + /// + /// Each static file segment is stored across multiple files, and each of these files contains + /// up to the specified number of blocks of data. When the file gets full, a new file is + /// created with the new block range. + /// + /// This setting affects the size of each static file, and can be set per segment. + /// + /// If it is changed for an existing node, existing static files will not be affected and will + /// be finished with the old blocks per file setting, but new static files will use the new + /// setting. + pub fn with_blocks_per_file_for_segments( + mut self, + segments: HashMap, + ) -> Self { + self.inner.blocks_per_file.extend(segments); + self + } + + /// Set a custom number of blocks per file for all segments. + pub fn with_blocks_per_file(mut self, blocks_per_file: u64) -> Self { + for segment in StaticFileSegment::iter() { + self.inner.blocks_per_file.insert(segment, blocks_per_file); + } + self + } + + /// Set a custom number of blocks per file for a specific segment. + pub fn with_blocks_per_file_for_segment( + mut self, + segment: StaticFileSegment, + blocks_per_file: u64, + ) -> Self { + self.inner.blocks_per_file.insert(segment, blocks_per_file); + self + } + + /// Enables metrics on the [`StaticFileProvider`]. + pub fn with_metrics(mut self) -> Self { + self.inner.metrics = Some(Arc::new(StaticFileProviderMetrics::default())); + self + } + + /// Sets the genesis block number for the [`StaticFileProvider`]. + /// + /// This configures the genesis block number, which is used to determine the starting point + /// for block indexing and querying operations. + /// + /// # Arguments + /// + /// * `genesis_block_number` - The block number of the genesis block. + /// + /// # Returns + /// + /// Returns `Self` to allow method chaining. + pub const fn with_genesis_block_number(mut self, genesis_block_number: u64) -> Self { + self.inner.genesis_block_number = genesis_block_number; + self + } + + /// Builds the final [`StaticFileProvider`] and initializes the index. + pub fn build(self) -> ProviderResult> { + let provider = StaticFileProvider(Arc::new(self.inner)); provider.initialize_index()?; Ok(provider) } +} +impl StaticFileProvider { /// Creates a new [`StaticFileProvider`] with read-only access. /// /// Set `watch_directory` to `true` to track the most recent changes in static files. Otherwise, @@ -114,7 +187,7 @@ impl StaticFileProvider { /// /// See also [`StaticFileProvider::watch_directory`]. pub fn read_only(path: impl AsRef, watch_directory: bool) -> ProviderResult { - let provider = Self::new(path, StaticFileAccess::RO)?; + let provider = StaticFileProviderBuilder::read_only(path)?.build()?; if watch_directory { provider.watch_directory(); @@ -125,7 +198,7 @@ impl StaticFileProvider { /// Creates a new [`StaticFileProvider`] with read-write access. pub fn read_write(path: impl AsRef) -> ProviderResult { - Self::new(path, StaticFileAccess::RW) + StaticFileProviderBuilder::read_write(path)?.build() } /// Watches the directory for changes and updates the in-memory index when modifications @@ -227,31 +300,19 @@ pub struct StaticFileProviderInner { /// Maintains a map which allows for concurrent access to different `NippyJars`, over different /// segments and ranges. map: DashMap<(BlockNumber, StaticFileSegment), LoadedJar>, - /// Min static file range for each segment. - /// This index is initialized on launch to keep track of the lowest, non-expired static file - /// per segment. - /// - /// This tracks the lowest static file per segment together with the block range in that - /// file. E.g. static file is batched in 500k block intervals then the lowest static file - /// is [0..499K], and the block range is start = 0, end = 499K. - /// This index is mainly used to History expiry, which targets transactions, e.g. pre-merge - /// history expiry would lead to removing all static files below the merge height. - static_files_min_block: RwLock>, + /// Indexes per segment. + indexes: RwLock>, /// This is an additional index that tracks the expired height, this will track the highest /// block number that has been expired (missing). The first, non expired block is /// `expired_history_height + 1`. /// /// This is effectively the transaction range that has been expired: - /// [`StaticFileProvider::delete_transactions_below`] and mirrors + /// [`StaticFileProvider::delete_segment_below_block`] and mirrors /// `static_files_min_block[transactions] - blocks_per_file`. /// /// This additional tracker exists for more efficient lookups because the node must be aware of /// the expired height. earliest_history_height: AtomicU64, - /// Max static file block for each segment - static_files_max_block: RwLock>, - /// Available static file block ranges on disk indexed by max transactions. - static_files_tx_index: RwLock, /// Directory where `static_files` are located path: PathBuf, /// Maintains a writer set of [`StaticFileSegment`]. @@ -260,12 +321,12 @@ pub struct StaticFileProviderInner { metrics: Option>, /// Access rights of the provider. access: StaticFileAccess, - /// Number of blocks per file. - blocks_per_file: u64, + /// Number of blocks per file, per segment. + blocks_per_file: HashMap, /// Write lock for when access is [`StaticFileAccess::RW`]. _lock_file: Option, - /// Node primitives - _pd: PhantomData, + /// Genesis block number, default is 0; + genesis_block_number: u64, } impl StaticFileProviderInner { @@ -277,19 +338,22 @@ impl StaticFileProviderInner { None }; + let mut blocks_per_file = HashMap::new(); + for segment in StaticFileSegment::iter() { + blocks_per_file.insert(segment, DEFAULT_BLOCKS_PER_STATIC_FILE); + } + let provider = Self { map: Default::default(), + indexes: Default::default(), writers: Default::default(), - static_files_min_block: Default::default(), earliest_history_height: Default::default(), - static_files_max_block: Default::default(), - static_files_tx_index: Default::default(), path: path.as_ref().to_path_buf(), metrics: None, access, - blocks_per_file: DEFAULT_BLOCKS_PER_STATIC_FILE, + blocks_per_file, _lock_file, - _pd: Default::default(), + genesis_block_number: 0, }; Ok(provider) @@ -301,78 +365,174 @@ impl StaticFileProviderInner { /// Each static file has a fixed number of blocks. This gives out the range where the requested /// block is positioned. - pub const fn find_fixed_range(&self, block: BlockNumber) -> SegmentRangeInclusive { - find_fixed_range(block, self.blocks_per_file) + /// + /// If the specified block falls into one of the ranges of already initialized static files, + /// this function will return that range. + /// + /// If no matching file exists, this function will derive a new range from the end of the last + /// existing file, if any. + pub fn find_fixed_range_with_block_index( + &self, + segment: StaticFileSegment, + block_index: Option<&SegmentRanges>, + block: BlockNumber, + ) -> SegmentRangeInclusive { + let blocks_per_file = + self.blocks_per_file.get(&segment).copied().unwrap_or(DEFAULT_BLOCKS_PER_STATIC_FILE); + + if let Some(block_index) = block_index { + // Find first block range that contains the requested block + if let Some((_, range)) = block_index.iter().find(|(max_block, _)| block <= **max_block) + { + // Found matching range for an existing file using block index + return *range + } else if let Some((_, range)) = block_index.last_key_value() { + // Didn't find matching range for an existing file, derive a new range from the end + // of the last existing file range. + // + // `block` is always higher than `range.end()` here, because we iterated over all + // `block_index` ranges above and didn't find one that contains our block + let blocks_after_last_range = block - range.end(); + let segments_to_skip = (blocks_after_last_range - 1) / blocks_per_file; + let start = range.end() + 1 + segments_to_skip * blocks_per_file; + return SegmentRangeInclusive::new(start, start + blocks_per_file - 1) + } + } + // No block index is available, derive a new range using the fixed number of blocks, + // starting from the beginning. + find_fixed_range(block, blocks_per_file) + } + + /// Each static file has a fixed number of blocks. This gives out the range where the requested + /// block is positioned. + /// + /// If the specified block falls into one of the ranges of already initialized static files, + /// this function will return that range. + /// + /// If no matching file exists, this function will derive a new range from the end of the last + /// existing file, if any. + /// + /// This function will block indefinitely if a write lock for + /// [`Self::indexes`] is already acquired. In that case, use + /// [`Self::find_fixed_range_with_block_index`]. + pub fn find_fixed_range( + &self, + segment: StaticFileSegment, + block: BlockNumber, + ) -> SegmentRangeInclusive { + self.find_fixed_range_with_block_index( + segment, + self.indexes + .read() + .get(&segment) + .map(|index| &index.expected_block_ranges_by_max_block), + block, + ) + } + + /// Get genesis block number + pub const fn genesis_block_number(&self) -> u64 { + self.genesis_block_number } } impl StaticFileProvider { - /// Set a custom number of blocks per file. - #[cfg(any(test, feature = "test-utils"))] - pub fn with_custom_blocks_per_file(self, blocks_per_file: u64) -> Self { - let mut provider = - Arc::try_unwrap(self.0).expect("should be called when initializing only"); - provider.blocks_per_file = blocks_per_file; - Self(Arc::new(provider)) - } - - /// Enables metrics on the [`StaticFileProvider`]. - pub fn with_metrics(self) -> Self { - let mut provider = - Arc::try_unwrap(self.0).expect("should be called when initializing only"); - provider.metrics = Some(Arc::new(StaticFileProviderMetrics::default())); - Self(Arc::new(provider)) - } - /// Reports metrics for the static files. pub fn report_metrics(&self) -> ProviderResult<()> { let Some(metrics) = &self.metrics else { return Ok(()) }; let static_files = iter_static_files(&self.path).map_err(ProviderError::other)?; - for (segment, ranges) in static_files { + for (segment, headers) in static_files { let mut entries = 0; let mut size = 0; - for (block_range, _) in &ranges { - let fixed_block_range = self.find_fixed_range(block_range.start()); + for (block_range, _) in &headers { + let fixed_block_range = self.find_fixed_range(segment, block_range.start()); let jar_provider = self - .get_segment_provider(segment, || Some(fixed_block_range), None)? + .get_segment_provider_for_range(segment, || Some(fixed_block_range), None)? .ok_or_else(|| { ProviderError::MissingStaticFileBlock(segment, block_range.start()) })?; entries += jar_provider.rows(); - let data_size = reth_fs_util::metadata(jar_provider.data_path()) + let data_path = jar_provider.data_path().to_path_buf(); + let index_path = jar_provider.index_path(); + let offsets_path = jar_provider.offsets_path(); + let config_path = jar_provider.config_path(); + + // can release jar early + drop(jar_provider); + + let data_size = reth_fs_util::metadata(data_path) .map(|metadata| metadata.len()) .unwrap_or_default(); - let index_size = reth_fs_util::metadata(jar_provider.index_path()) + let index_size = reth_fs_util::metadata(index_path) .map(|metadata| metadata.len()) .unwrap_or_default(); - let offsets_size = reth_fs_util::metadata(jar_provider.offsets_path()) + let offsets_size = reth_fs_util::metadata(offsets_path) .map(|metadata| metadata.len()) .unwrap_or_default(); - let config_size = reth_fs_util::metadata(jar_provider.config_path()) + let config_size = reth_fs_util::metadata(config_path) .map(|metadata| metadata.len()) .unwrap_or_default(); size += data_size + index_size + offsets_size + config_size; } - metrics.record_segment(segment, size, ranges.len(), entries); + metrics.record_segment(segment, size, headers.len(), entries); } Ok(()) } + /// Gets the [`StaticFileJarProvider`] of the requested segment and start index that can be + /// either block or transaction. + pub fn get_segment_provider( + &self, + segment: StaticFileSegment, + number: u64, + ) -> ProviderResult> { + if segment.is_block_based() { + self.get_segment_provider_for_block(segment, number, None) + } else { + self.get_segment_provider_for_transaction(segment, number, None) + } + } + + /// Gets the [`StaticFileJarProvider`] of the requested segment and start index that can be + /// either block or transaction. + /// + /// If the segment is not found, returns [`None`]. + pub fn get_maybe_segment_provider( + &self, + segment: StaticFileSegment, + number: u64, + ) -> ProviderResult>> { + let provider = if segment.is_block_based() { + self.get_segment_provider_for_block(segment, number, None) + } else { + self.get_segment_provider_for_transaction(segment, number, None) + }; + + match provider { + Ok(provider) => Ok(Some(provider)), + Err( + ProviderError::MissingStaticFileBlock(_, _) | + ProviderError::MissingStaticFileTx(_, _), + ) => Ok(None), + Err(err) => Err(err), + } + } + /// Gets the [`StaticFileJarProvider`] of the requested segment and block. - pub fn get_segment_provider_from_block( + pub fn get_segment_provider_for_block( &self, segment: StaticFileSegment, block: BlockNumber, path: Option<&Path>, ) -> ProviderResult> { - self.get_segment_provider( + self.get_segment_provider_for_range( segment, || self.get_segment_ranges_from_block(segment, block), path, @@ -381,13 +541,13 @@ impl StaticFileProvider { } /// Gets the [`StaticFileJarProvider`] of the requested segment and transaction. - pub fn get_segment_provider_from_transaction( + pub fn get_segment_provider_for_transaction( &self, segment: StaticFileSegment, tx: TxNumber, path: Option<&Path>, ) -> ProviderResult> { - self.get_segment_provider( + self.get_segment_provider_for_range( segment, || self.get_segment_ranges_from_transaction(segment, tx), path, @@ -398,7 +558,7 @@ impl StaticFileProvider { /// Gets the [`StaticFileJarProvider`] of the requested segment and block or transaction. /// /// `fn_range` should make sure the range goes through `find_fixed_range`. - pub fn get_segment_provider( + pub fn get_segment_provider_for_range( &self, segment: StaticFileSegment, fn_range: impl Fn() -> Option, @@ -411,7 +571,7 @@ impl StaticFileProvider { &path .file_name() .ok_or_else(|| { - ProviderError::MissingStaticFilePath(segment, path.to_path_buf()) + ProviderError::MissingStaticFileSegmentPath(segment, path.to_path_buf()) })? .to_string_lossy(), ) @@ -432,6 +592,21 @@ impl StaticFileProvider { Ok(None) } + /// Gets the [`StaticFileJarProvider`] of the requested path. + pub fn get_segment_provider_for_path( + &self, + path: &Path, + ) -> ProviderResult>> { + StaticFileSegment::parse_filename( + &path + .file_name() + .ok_or_else(|| ProviderError::MissingStaticFilePath(path.to_path_buf()))? + .to_string_lossy(), + ) + .map(|(segment, block_range)| self.get_or_create_jar_provider(segment, &block_range)) + .transpose() + } + /// Given a segment and block range it removes the cached provider from the map. /// /// CAUTION: cached provider should be dropped before calling this or IT WILL deadlock. @@ -443,43 +618,59 @@ impl StaticFileProvider { self.map.remove(&(fixed_block_range_end, segment)); } - /// This handles history expiry by deleting all transaction static files below the given block. + /// This handles history expiry by deleting all static files for the given segment below the + /// given block. /// /// For example if block is 1M and the blocks per file are 500K this will delete all individual /// files below 1M, so 0-499K and 500K-999K. /// /// This will not delete the file that contains the block itself, because files can only be /// removed entirely. - pub fn delete_transactions_below(&self, block: BlockNumber) -> ProviderResult<()> { + /// + /// # Safety + /// + /// This method will never delete the highest static file for the segment, even if the + /// requested block is higher than the highest block in static files. This ensures we always + /// maintain at least one static file if any exist. + /// + /// Returns a list of `SegmentHeader`s from the deleted jars. + pub fn delete_segment_below_block( + &self, + segment: StaticFileSegment, + block: BlockNumber, + ) -> ProviderResult> { // Nothing to delete if block is 0. if block == 0 { - return Ok(()) + return Ok(Vec::new()) } + let highest_block = self.get_highest_static_file_block(segment); + let mut deleted_headers = Vec::new(); + loop { - let Some(block_height) = - self.get_lowest_static_file_block(StaticFileSegment::Transactions) - else { - return Ok(()) + let Some(block_height) = self.get_lowest_range_end(segment) else { + return Ok(deleted_headers) }; - if block_height >= block { - return Ok(()) + // Stop if we've reached the target block or the highest static file + if block_height >= block || Some(block_height) == highest_block { + return Ok(deleted_headers) } debug!( target: "provider::static_file", + ?segment, ?block_height, - "Deleting transaction static file below block" + "Deleting static file below block" ); // now we need to wipe the static file, this will take care of updating the index and - // advance the lowest tracked block height for the transactions segment. - self.delete_jar(StaticFileSegment::Transactions, block_height) - .inspect_err(|err| { - warn!( target: "provider::static_file", %block_height, ?err, "Failed to delete transaction static file below block") - }) - ?; + // advance the lowest tracked block height for the segment. + let header = self.delete_jar(segment, block_height).inspect_err(|err| { + warn!( target: "provider::static_file", ?segment, %block_height, ?err, "Failed to delete static file below block") + })?; + + deleted_headers.push(header); } } @@ -488,8 +679,14 @@ impl StaticFileProvider { /// CAUTION: destructive. Deletes files on disk. /// /// This will re-initialize the index after deletion, so all files are tracked. - pub fn delete_jar(&self, segment: StaticFileSegment, block: BlockNumber) -> ProviderResult<()> { - let fixed_block_range = self.find_fixed_range(block); + /// + /// Returns the `SegmentHeader` of the deleted jar. + pub fn delete_jar( + &self, + segment: StaticFileSegment, + block: BlockNumber, + ) -> ProviderResult { + let fixed_block_range = self.find_fixed_range(segment, block); let key = (fixed_block_range.end(), segment); let jar = if let Some((_, jar)) = self.map.remove(&key) { jar.jar @@ -505,11 +702,14 @@ impl StaticFileProvider { NippyJar::::load(&file).map_err(ProviderError::other)? }; + let header = *jar.user_header(); jar.delete().map_err(ProviderError::other)?; + // SAFETY: this is currently necessary to ensure that certain indexes like + // `static_files_min_block` have the correct values after pruning. self.initialize_index()?; - Ok(()) + Ok(header) } /// Given a segment and block range it returns a cached @@ -547,11 +747,16 @@ impl StaticFileProvider { segment: StaticFileSegment, block: u64, ) -> Option { - self.static_files_max_block - .read() - .get(&segment) - .filter(|max| **max >= block) - .map(|_| self.find_fixed_range(block)) + let indexes = self.indexes.read(); + let index = indexes.get(&segment)?; + + (index.max_block >= block).then(|| { + self.find_fixed_range_with_block_index( + segment, + Some(&index.expected_block_ranges_by_max_block), + block, + ) + }) } /// Gets a static file segment's fixed block range from the provider inner @@ -561,12 +766,13 @@ impl StaticFileProvider { segment: StaticFileSegment, tx: u64, ) -> Option { - let static_files = self.static_files_tx_index.read(); - let segment_static_files = static_files.get(&segment)?; + let indexes = self.indexes.read(); + let index = indexes.get(&segment)?; + let available_block_ranges_by_max_tx = index.available_block_ranges_by_max_tx.as_ref()?; // It's more probable that the request comes from a newer tx height, so we iterate // the static_files in reverse. - let mut static_files_rev_iter = segment_static_files.iter().rev().peekable(); + let mut static_files_rev_iter = available_block_ranges_by_max_tx.iter().rev().peekable(); while let Some((tx_end, block_range)) = static_files_rev_iter.next() { if tx > *tx_end { @@ -575,7 +781,11 @@ impl StaticFileProvider { } let tx_start = static_files_rev_iter.peek().map(|(tx_end, _)| *tx_end + 1).unwrap_or(0); if tx_start <= tx { - return Some(self.find_fixed_range(block_range.end())) + return Some(self.find_fixed_range_with_block_index( + segment, + Some(&index.expected_block_ranges_by_max_block), + block_range.end(), + )) } } None @@ -592,28 +802,89 @@ impl StaticFileProvider { segment: StaticFileSegment, segment_max_block: Option, ) -> ProviderResult<()> { - let mut max_block = self.static_files_max_block.write(); - let mut tx_index = self.static_files_tx_index.write(); + debug!( + target: "provider::static_file", + ?segment, + ?segment_max_block, + "Updating provider index" + ); + let mut indexes = self.indexes.write(); match segment_max_block { Some(segment_max_block) => { - // Update the max block for the segment - max_block.insert(segment, segment_max_block); - let fixed_range = self.find_fixed_range(segment_max_block); + let fixed_range = self.find_fixed_range_with_block_index( + segment, + indexes.get(&segment).map(|index| &index.expected_block_ranges_by_max_block), + segment_max_block, + ); let jar = NippyJar::::load( &self.path.join(segment.filename(&fixed_range)), ) .map_err(ProviderError::other)?; + let index = indexes + .entry(segment) + .and_modify(|index| { + // Update max block + index.max_block = segment_max_block; + + // Update expected block range index + + // Remove all expected block ranges that are less than the new max block + index + .expected_block_ranges_by_max_block + .retain(|_, block_range| block_range.start() < fixed_range.start()); + // Insert new expected block range + index + .expected_block_ranges_by_max_block + .insert(fixed_range.end(), fixed_range); + }) + .or_insert_with(|| StaticFileSegmentIndex { + min_block_range: None, + max_block: segment_max_block, + expected_block_ranges_by_max_block: BTreeMap::from([( + fixed_range.end(), + fixed_range, + )]), + available_block_ranges_by_max_tx: None, + }); + + // Update min_block to track the lowest block range of the segment. + // This is initially set by initialize_index() on node startup, but must be updated + // as the file grows to prevent stale values. + // + // Without this update, min_block can remain at genesis (e.g. Some([0..=0]) or None) + // even after syncing to higher blocks (e.g. [0..=100]). A stale + // min_block causes get_lowest_static_file_block() to return the + // wrong end value, which breaks pruning logic that relies on it for + // safety checks. + // + // Example progression: + // 1. Node starts, initialize_index() sets min_block = [0..=0] + // 2. Sync to block 100, this update sets min_block = [0..=100] + // 3. Pruner calls get_lowest_static_file_block() -> returns 100 (correct). Without + // this update, it would incorrectly return 0 (stale) + if let Some(current_block_range) = jar.user_header().block_range() { + if let Some(min_block_range) = index.min_block_range.as_mut() { + // delete_jar WILL ALWAYS re-initialize all indexes, so we are always + // sure that current_min is always the lowest. + if current_block_range.start() == min_block_range.start() { + *min_block_range = current_block_range; + } + } else { + index.min_block_range = Some(current_block_range); + } + } + // Updates the tx index by first removing all entries which have a higher // block_start than our current static file. if let Some(tx_range) = jar.user_header().tx_range() { - let tx_end = tx_range.end(); - // Current block range has the same block start as `fixed_range``, but block end // might be different if we are still filling this static file. - if let Some(current_block_range) = jar.user_header().block_range().copied() { + if let Some(current_block_range) = jar.user_header().block_range() { + let tx_end = tx_range.end(); + // Considering that `update_index` is called when we either append/truncate, // we are sure that we are handling the latest data // points. @@ -622,86 +893,94 @@ impl StaticFileProvider { // equal than our current one. This is important in the case // that we prune a lot of rows resulting in a file (and thus // a higher block range) deletion. - tx_index - .entry(segment) - .and_modify(|index| { - index.retain(|_, block_range| { - block_range.start() < fixed_range.start() - }); - index.insert(tx_end, current_block_range); - }) - .or_insert_with(|| BTreeMap::from([(tx_end, current_block_range)])); + if let Some(index) = index.available_block_ranges_by_max_tx.as_mut() { + index + .retain(|_, block_range| block_range.start() < fixed_range.start()); + index.insert(tx_end, current_block_range); + } else { + index.available_block_ranges_by_max_tx = + Some(BTreeMap::from([(tx_end, current_block_range)])); + } } } else if segment.is_tx_based() { // The unwinded file has no more transactions/receipts. However, the highest // block is within this files' block range. We only retain // entries with block ranges before the current one. - tx_index.entry(segment).and_modify(|index| { + if let Some(index) = index.available_block_ranges_by_max_tx.as_mut() { index.retain(|_, block_range| block_range.start() < fixed_range.start()); - }); + } // If the index is empty, just remove it. - if tx_index.get(&segment).is_some_and(|index| index.is_empty()) { - tx_index.remove(&segment); - } + index.available_block_ranges_by_max_tx.take_if(|index| index.is_empty()); } // Update the cached provider. + debug!(target: "provider::static_file", ?segment, "Inserting updated jar into cache"); self.map.insert((fixed_range.end(), segment), LoadedJar::new(jar)?); // Delete any cached provider that no longer has an associated jar. + debug!(target: "provider::static_file", ?segment, "Cleaning up jar map"); self.map.retain(|(end, seg), _| !(*seg == segment && *end > fixed_range.end())); } None => { - tx_index.remove(&segment); - max_block.remove(&segment); + debug!(target: "provider::static_file", ?segment, "Removing segment from index"); + indexes.remove(&segment); } }; + debug!(target: "provider::static_file", ?segment, "Updated provider index"); Ok(()) } /// Initializes the inner transaction and block index pub fn initialize_index(&self) -> ProviderResult<()> { - let mut min_block = self.static_files_min_block.write(); - let mut max_block = self.static_files_max_block.write(); - let mut tx_index = self.static_files_tx_index.write(); + let mut indexes = self.indexes.write(); + indexes.clear(); - min_block.clear(); - max_block.clear(); - tx_index.clear(); - - for (segment, ranges) in iter_static_files(&self.path).map_err(ProviderError::other)? { + for (segment, headers) in iter_static_files(&self.path).map_err(ProviderError::other)? { // Update first and last block for each segment - if let Some((first_block_range, _)) = ranges.first() { - min_block.insert(segment, *first_block_range); - } - if let Some((last_block_range, _)) = ranges.last() { - max_block.insert(segment, last_block_range.end()); - } + // + // It's safe to call `expect` here, because every segment has at least one header + // associated with it. + let min_block_range = Some(headers.first().expect("headers are not empty").0); + let max_block = headers.last().expect("headers are not empty").0.end(); - // Update tx -> block_range index - for (block_range, tx_range) in ranges { - if let Some(tx_range) = tx_range { + let mut expected_block_ranges_by_max_block = BTreeMap::default(); + let mut available_block_ranges_by_max_tx = None; + + for (block_range, header) in headers { + // Update max expected block -> expected_block_range index + expected_block_ranges_by_max_block + .insert(header.expected_block_end(), header.expected_block_range()); + + // Update max tx -> block_range index + if let Some(tx_range) = header.tx_range() { let tx_end = tx_range.end(); - match tx_index.entry(segment) { - Entry::Occupied(mut index) => { - index.get_mut().insert(tx_end, block_range); - } - Entry::Vacant(index) => { - index.insert(BTreeMap::from([(tx_end, block_range)])); - } - }; + available_block_ranges_by_max_tx + .get_or_insert_with(BTreeMap::default) + .insert(tx_end, block_range); } } + + indexes.insert( + segment, + StaticFileSegmentIndex { + min_block_range, + max_block, + expected_block_ranges_by_max_block, + available_block_ranges_by_max_tx, + }, + ); } // If this is a re-initialization, we need to clear this as well self.map.clear(); // initialize the expired history height to the lowest static file block - if let Some(lowest_range) = min_block.get(&StaticFileSegment::Transactions) { + if let Some(lowest_range) = + indexes.get(&StaticFileSegment::Transactions).and_then(|index| index.min_block_range) + { // the earliest height is the lowest available block number self.earliest_history_height .store(lowest_range.start(), std::sync::atomic::Ordering::Relaxed); @@ -736,10 +1015,13 @@ impl StaticFileProvider { pub fn check_consistency( &self, provider: &Provider, - has_receipt_pruning: bool, ) -> ProviderResult> where - Provider: DBProvider + BlockReader + StageCheckpointReader + ChainSpecProvider, + Provider: DBProvider + + BlockReader + + StageCheckpointReader + + ChainSpecProvider + + StorageSettingsCache, N: NodePrimitives, { // OVM historical import is broken and does not work with this check. It's importing @@ -774,43 +1056,41 @@ impl StaticFileProvider { }; for segment in StaticFileSegment::iter() { - if has_receipt_pruning && segment.is_receipts() { - // Pruned nodes (including full node) do not store receipts as static files. - continue + debug!(target: "reth::providers::static_file", ?segment, "Checking consistency for segment"); + match segment { + StaticFileSegment::Headers | StaticFileSegment::Transactions => {} + StaticFileSegment::Receipts => { + if EitherWriter::receipts_destination(provider).is_database() { + // Old pruned nodes (including full node) do not store receipts as static + // files. + debug!(target: "reth::providers::static_file", ?segment, "Skipping receipts consistency check: receipts stored in database"); + continue + } + + if NamedChain::Gnosis == provider.chain_spec().chain_id() || + NamedChain::Chiado == provider.chain_spec().chain_id() + { + // Gnosis and Chiado's historical import is broken and does not work with + // this check. They are importing receipts along + // with importing headers/bodies. + debug!(target: "reth::providers::static_file", ?segment, "Skipping receipts consistency check: broken historical import for gnosis/chiado"); + continue; + } + } + StaticFileSegment::TransactionSenders => { + if EitherWriterDestination::senders(provider).is_database() { + continue + } + } } - if segment.is_receipts() && - (NamedChain::Gnosis == provider.chain_spec().chain_id() || - NamedChain::Chiado == provider.chain_spec().chain_id()) - { - // Gnosis and Chiado's historical import is broken and does not work with this - // check. They are importing receipts along with importing - // headers/bodies. - continue; - } - - let initial_highest_block = self.get_highest_static_file_block(segment); - - // File consistency is broken if: - // - // * appending data was interrupted before a config commit, then data file will be - // truncated according to the config. - // - // * pruning data was interrupted before a config commit, then we have deleted data that - // we are expected to still have. We need to check the Database and unwind everything - // accordingly. - if self.access.is_read_only() { - self.check_segment_consistency(segment)?; - } else { - // Fetching the writer will attempt to heal any file level inconsistency. - self.latest_writer(segment)?; - } + // Heal file-level inconsistencies and get before/after highest block + let (initial_highest_block, mut highest_block) = self.maybe_heal_segment(segment)?; // Only applies to block-based static files. (Headers) // // The updated `highest_block` may have decreased if we healed from a pruning // interruption. - let mut highest_block = self.get_highest_static_file_block(segment); if initial_highest_block != highest_block { info!( target: "reth::providers::static_file", @@ -828,20 +1108,25 @@ impl StaticFileProvider { // from a pruning interruption might have decreased the number of transactions without // being able to update the last block of the static file segment. let highest_tx = self.get_highest_static_file_tx(segment); + debug!(target: "reth::providers::static_file", ?segment, ?highest_tx, ?highest_block, "Highest transaction for segment"); if let Some(highest_tx) = highest_tx { let mut last_block = highest_block.unwrap_or_default(); + debug!(target: "reth::providers::static_file", ?segment, last_block, highest_tx, "Verifying last transaction matches last block indices"); loop { if let Some(indices) = provider.block_body_indices(last_block)? { + debug!(target: "reth::providers::static_file", ?segment, last_block, last_tx_num = indices.last_tx_num(), highest_tx, "Found block body indices"); if indices.last_tx_num() <= highest_tx { break } } else { + debug!(target: "reth::providers::static_file", ?segment, last_block, "Block body indices not found, static files ahead of database"); // If the block body indices can not be found, then it means that static // files is ahead of database, and the `ensure_invariants` check will fix // it by comparing with stage checkpoints. break } if last_block == 0 { + debug!(target: "reth::providers::static_file", ?segment, "Reached block 0 in verification loop"); break } last_block -= 1; @@ -858,6 +1143,7 @@ impl StaticFileProvider { } } + debug!(target: "reth::providers::static_file", ?segment, "Ensuring invariants for segment"); if let Some(unwind) = match segment { StaticFileSegment::Headers => self .ensure_invariants::<_, tables::Headers>( @@ -880,8 +1166,18 @@ impl StaticFileProvider { highest_tx, highest_block, )?, + StaticFileSegment::TransactionSenders => self + .ensure_invariants::<_, tables::TransactionSenders>( + provider, + segment, + highest_tx, + highest_block, + )?, } { + debug!(target: "reth::providers::static_file", ?segment, unwind_target=unwind, "Invariants check returned unwind target"); update_unwind_target(unwind); + } else { + debug!(target: "reth::providers::static_file", ?segment, "Invariants check completed, no unwind needed"); } } @@ -891,17 +1187,65 @@ impl StaticFileProvider { /// Checks consistency of the latest static file segment and throws an error if at fault. /// Read-only. pub fn check_segment_consistency(&self, segment: StaticFileSegment) -> ProviderResult<()> { + debug!(target: "reth::providers::static_file", ?segment, "Checking segment consistency"); if let Some(latest_block) = self.get_highest_static_file_block(segment) { - let file_path = - self.directory().join(segment.filename(&self.find_fixed_range(latest_block))); + let file_path = self + .directory() + .join(segment.filename(&self.find_fixed_range(segment, latest_block))); + debug!(target: "reth::providers::static_file", ?segment, ?file_path, latest_block, "Loading NippyJar for consistency check"); let jar = NippyJar::::load(&file_path).map_err(ProviderError::other)?; + debug!(target: "reth::providers::static_file", ?segment, "NippyJar loaded, checking consistency"); NippyJarChecker::new(jar).check_consistency().map_err(ProviderError::other)?; + debug!(target: "reth::providers::static_file", ?segment, "NippyJar consistency check passed"); + } else { + debug!(target: "reth::providers::static_file", ?segment, "No static file block found, skipping consistency check"); } Ok(()) } + /// Attempts to heal file-level (`NippyJar`) inconsistencies for a single static file segment. + /// + /// Returns the highest block before and after healing, which can be used to detect + /// if healing from a pruning interruption decreased the highest block. + /// + /// File consistency is broken if: + /// + /// * appending data was interrupted before a config commit, then data file will be truncated + /// according to the config. + /// + /// * pruning data was interrupted before a config commit, then we have deleted data that we are + /// expected to still have. We need to check the Database and unwind everything accordingly. + /// + /// **Note:** In read-only mode, this will return an error if a consistency issue is detected, + /// since healing requires write access. + fn maybe_heal_segment( + &self, + segment: StaticFileSegment, + ) -> ProviderResult<(Option, Option)> { + let initial_highest_block = self.get_highest_static_file_block(segment); + debug!(target: "reth::providers::static_file", ?segment, ?initial_highest_block, "Initial highest block for segment"); + + if self.access.is_read_only() { + // Read-only mode: cannot modify files, so just validate consistency and error if + // broken. + debug!(target: "reth::providers::static_file", ?segment, "Checking segment consistency (read-only)"); + self.check_segment_consistency(segment)?; + } else { + // Writable mode: fetching the writer will automatically heal any file-level + // inconsistency by truncating data to match the last committed config. + debug!(target: "reth::providers::static_file", ?segment, "Fetching latest writer which might heal any potential inconsistency"); + self.latest_writer(segment)?; + } + + // The updated `highest_block` may have decreased if we healed from a pruning + // interruption. + let highest_block = self.get_highest_static_file_block(segment); + + Ok((initial_highest_block, highest_block)) + } + /// Check invariants for each corresponding table and static file segment: /// /// * the corresponding database table should overlap or have continuity in their keys @@ -926,9 +1270,11 @@ impl StaticFileProvider { where Provider: DBProvider + BlockReader + StageCheckpointReader, { + debug!(target: "reth::providers::static_file", ?segment, ?highest_static_file_entry, ?highest_static_file_block, "Ensuring invariants"); let mut db_cursor = provider.tx_ref().cursor_read::()?; if let Some((db_first_entry, _)) = db_cursor.first()? { + debug!(target: "reth::providers::static_file", ?segment, db_first_entry, "Found first database entry"); if let (Some(highest_entry), Some(highest_block)) = (highest_static_file_entry, highest_static_file_block) { @@ -952,8 +1298,11 @@ impl StaticFileProvider { highest_static_file_entry .is_none_or(|highest_entry| db_last_entry > highest_entry) { + debug!(target: "reth::providers::static_file", ?segment, db_last_entry, ?highest_static_file_entry, "Database has entries beyond static files, no unwind needed"); return Ok(None) } + } else { + debug!(target: "reth::providers::static_file", ?segment, "No database entries found"); } let highest_static_file_entry = highest_static_file_entry.unwrap_or_default(); @@ -961,14 +1310,15 @@ impl StaticFileProvider { // If static file entry is ahead of the database entries, then ensure the checkpoint block // number matches. - let checkpoint_block_number = provider - .get_stage_checkpoint(match segment { - StaticFileSegment::Headers => StageId::Headers, - StaticFileSegment::Transactions => StageId::Bodies, - StaticFileSegment::Receipts => StageId::Execution, - })? - .unwrap_or_default() - .block_number; + let stage_id = match segment { + StaticFileSegment::Headers => StageId::Headers, + StaticFileSegment::Transactions => StageId::Bodies, + StaticFileSegment::Receipts => StageId::Execution, + StaticFileSegment::TransactionSenders => StageId::SenderRecovery, + }; + let checkpoint_block_number = + provider.get_stage_checkpoint(stage_id)?.unwrap_or_default().block_number; + debug!(target: "reth::providers::static_file", ?segment, ?stage_id, checkpoint_block_number, highest_static_file_block, "Retrieved stage checkpoint"); // If the checkpoint is ahead, then we lost static file data. May be data corruption. if checkpoint_block_number > highest_static_file_block { @@ -994,22 +1344,43 @@ impl StaticFileProvider { "Unwinding static file segment." ); let mut writer = self.latest_writer(segment)?; - if segment.is_headers() { - // TODO(joshie): is_block_meta - writer.prune_headers(highest_static_file_block - checkpoint_block_number)?; - } else if let Some(block) = provider.block_body_indices(checkpoint_block_number)? { - // todo joshie: is querying block_body_indices a potential issue once bbi is moved - // to sf as well - let number = highest_static_file_entry - block.last_tx_num(); - if segment.is_receipts() { - writer.prune_receipts(number, checkpoint_block_number)?; - } else { - writer.prune_transactions(number, checkpoint_block_number)?; + match segment { + StaticFileSegment::Headers => { + let prune_count = highest_static_file_block - checkpoint_block_number; + debug!(target: "reth::providers::static_file", ?segment, prune_count, "Pruning headers"); + // TODO(joshie): is_block_meta + writer.prune_headers(prune_count)?; + } + StaticFileSegment::Transactions | + StaticFileSegment::Receipts | + StaticFileSegment::TransactionSenders => { + if let Some(block) = provider.block_body_indices(checkpoint_block_number)? { + let number = highest_static_file_entry - block.last_tx_num(); + debug!(target: "reth::providers::static_file", ?segment, prune_count = number, checkpoint_block_number, "Pruning transaction based segment"); + + match segment { + StaticFileSegment::Transactions => { + writer.prune_transactions(number, checkpoint_block_number)? + } + StaticFileSegment::Receipts => { + writer.prune_receipts(number, checkpoint_block_number)? + } + StaticFileSegment::TransactionSenders => { + writer.prune_transaction_senders(number, checkpoint_block_number)? + } + StaticFileSegment::Headers => unreachable!(), + } + } else { + debug!(target: "reth::providers::static_file", ?segment, checkpoint_block_number, "No block body indices found for checkpoint block"); + } } } + debug!(target: "reth::providers::static_file", ?segment, "Committing writer after pruning"); writer.commit()?; + debug!(target: "reth::providers::static_file", ?segment, "Writer committed successfully"); } + debug!(target: "reth::providers::static_file", ?segment, "Invariants ensured, returning None"); Ok(None) } @@ -1024,45 +1395,46 @@ impl StaticFileProvider { self.earliest_history_height.load(std::sync::atomic::Ordering::Relaxed) } - /// Gets the lowest transaction static file block if it exists. - /// - /// For example if the transactions static file has blocks 0-499, this will return 499.. - /// - /// If there is nothing on disk for the given segment, this will return [`None`]. - pub fn get_lowest_transaction_static_file_block(&self) -> Option { - self.get_lowest_static_file_block(StaticFileSegment::Transactions) - } - - /// Gets the lowest static file's block height if it exists for a static file segment. - /// - /// For example if the static file has blocks 0-499, this will return 499.. - /// - /// If there is nothing on disk for the given segment, this will return [`None`]. - pub fn get_lowest_static_file_block(&self, segment: StaticFileSegment) -> Option { - self.static_files_min_block.read().get(&segment).map(|range| range.end()) - } - /// Gets the lowest static file's block range if it exists for a static file segment. /// /// If there is nothing on disk for the given segment, this will return [`None`]. pub fn get_lowest_range(&self, segment: StaticFileSegment) -> Option { - self.static_files_min_block.read().get(&segment).copied() + self.indexes.read().get(&segment).and_then(|index| index.min_block_range) + } + + /// Gets the lowest static file's block range start if it exists for a static file segment. + /// + /// For example if the lowest static file has blocks 0-499, this will return 0. + /// + /// If there is nothing on disk for the given segment, this will return [`None`]. + pub fn get_lowest_range_start(&self, segment: StaticFileSegment) -> Option { + self.get_lowest_range(segment).map(|range| range.start()) + } + + /// Gets the lowest static file's block range end if it exists for a static file segment. + /// + /// For example if the static file has blocks 0-499, this will return 499. + /// + /// If there is nothing on disk for the given segment, this will return [`None`]. + pub fn get_lowest_range_end(&self, segment: StaticFileSegment) -> Option { + self.get_lowest_range(segment).map(|range| range.end()) } /// Gets the highest static file's block height if it exists for a static file segment. /// /// If there is nothing on disk for the given segment, this will return [`None`]. pub fn get_highest_static_file_block(&self, segment: StaticFileSegment) -> Option { - self.static_files_max_block.read().get(&segment).copied() + self.indexes.read().get(&segment).map(|index| index.max_block) } /// Gets the highest static file transaction. /// /// If there is nothing on disk for the given segment, this will return [`None`]. pub fn get_highest_static_file_tx(&self, segment: StaticFileSegment) -> Option { - self.static_files_tx_index + self.indexes .read() .get(&segment) + .and_then(|index| index.available_block_ranges_by_max_tx.as_ref()) .and_then(|index| index.last_key_value().map(|(last_tx, _)| *last_tx)) } @@ -1080,16 +1452,14 @@ impl StaticFileProvider { segment: StaticFileSegment, func: impl Fn(StaticFileJarProvider<'_, N>) -> ProviderResult>, ) -> ProviderResult> { - if let Some(highest_block) = self.get_highest_static_file_block(segment) { - let mut range = self.find_fixed_range(highest_block); - while range.end() > 0 { - if let Some(res) = func(self.get_or_create_jar_provider(segment, &range)?)? { + if let Some(ranges) = + self.indexes.read().get(&segment).map(|index| &index.expected_block_ranges_by_max_block) + { + // Iterate through all ranges in reverse order (highest to lowest) + for range in ranges.values().rev() { + if let Some(res) = func(self.get_or_create_jar_provider(segment, range)?)? { return Ok(Some(res)) } - range = SegmentRangeInclusive::new( - range.start().saturating_sub(self.blocks_per_file), - range.end().saturating_sub(self.blocks_per_file), - ); } } @@ -1119,13 +1489,7 @@ impl StaticFileProvider { /// If the static file is missing, the `result` is returned. macro_rules! get_provider { ($number:expr) => {{ - let provider = if segment.is_block_based() { - self.get_segment_provider_from_block(segment, $number, None) - } else { - self.get_segment_provider_from_transaction(segment, $number, None) - }; - - match provider { + match self.get_segment_provider(segment, $number) { Ok(provider) => provider, Err( ProviderError::MissingStaticFileBlock(_, _) | @@ -1179,37 +1543,37 @@ impl StaticFileProvider { /// Fetches data within a specified range across multiple static files. /// - /// Returns an iterator over the data + /// Returns an iterator over the data. Yields [`None`] if the data for the specified number is + /// not found. pub fn fetch_range_iter<'a, T, F>( &'a self, segment: StaticFileSegment, range: Range, get_fn: F, - ) -> ProviderResult> + 'a> + ) -> ProviderResult>> + 'a> where F: Fn(&mut StaticFileCursor<'_>, u64) -> ProviderResult> + 'a, T: std::fmt::Debug, { - let get_provider = move |start: u64| { - if segment.is_block_based() { - self.get_segment_provider_from_block(segment, start, None) - } else { - self.get_segment_provider_from_transaction(segment, start, None) - } - }; - - let mut provider = Some(get_provider(range.start)?); - Ok(range.filter_map(move |number| { - match get_fn(&mut provider.as_ref().expect("qed").cursor().ok()?, number).transpose() { - Some(result) => Some(result), + let mut provider = self.get_maybe_segment_provider(segment, range.start)?; + Ok(range.map(move |number| { + match provider + .as_ref() + .map(|provider| get_fn(&mut provider.cursor()?, number)) + .and_then(|result| result.transpose()) + { + Some(result) => result.map(Some), None => { - // There is a very small chance of hitting a deadlock if two consecutive static - // files share the same bucket in the internal dashmap and - // we don't drop the current provider before requesting the - // next one. + // There is a very small chance of hitting a deadlock if two consecutive + // static files share the same bucket in the internal dashmap and we don't drop + // the current provider before requesting the next one. provider.take(); - provider = Some(get_provider(number).ok()?); - get_fn(&mut provider.as_ref().expect("qed").cursor().ok()?, number).transpose() + provider = self.get_maybe_segment_provider(segment, number)?; + provider + .as_ref() + .map(|provider| get_fn(&mut provider.cursor()?, number)) + .and_then(|result| result.transpose()) + .transpose() } } })) @@ -1260,7 +1624,7 @@ impl StaticFileProvider { /// /// # Arguments /// * `segment` - The segment of the static file to query. - /// * `block_range` - The range of data to fetch. + /// * `block_or_tx_range` - The range of data to fetch. /// * `fetch_from_static_file` - A function to fetch data from the `static_file`. /// * `fetch_from_database` - A function to fetch data from the database. /// * `predicate` - A function used to evaluate each item in the fetched data. Fetching is @@ -1304,17 +1668,62 @@ impl StaticFileProvider { Ok(data) } - /// Returns `static_files` directory + /// Returns static files directory #[cfg(any(test, feature = "test-utils"))] pub fn path(&self) -> &Path { &self.path } - /// Returns `static_files` transaction index + /// Returns transaction index #[cfg(any(test, feature = "test-utils"))] - pub fn tx_index(&self) -> &RwLock { - &self.static_files_tx_index + pub fn tx_index(&self, segment: StaticFileSegment) -> Option { + self.indexes + .read() + .get(&segment) + .and_then(|index| index.available_block_ranges_by_max_tx.as_ref()) + .cloned() } + + /// Returns expected block index + #[cfg(any(test, feature = "test-utils"))] + pub fn expected_block_index(&self, segment: StaticFileSegment) -> Option { + self.indexes + .read() + .get(&segment) + .map(|index| &index.expected_block_ranges_by_max_block) + .cloned() + } +} + +#[derive(Debug)] +struct StaticFileSegmentIndex { + /// Min static file block range. + /// + /// This index is initialized on launch to keep track of the lowest, non-expired static file + /// per segment and gets updated on [`StaticFileProvider::update_index`]. + /// + /// This tracks the lowest static file per segment together with the block range in that + /// file. E.g. static file is batched in 500k block intervals then the lowest static file + /// is [0..499K], and the block range is start = 0, end = 499K. + /// + /// This index is mainly used for history expiry, which targets transactions, e.g. pre-merge + /// history expiry would lead to removing all static files below the merge height. + min_block_range: Option, + /// Max static file block. + max_block: u64, + /// Expected static file block ranges indexed by max expected blocks. + /// + /// For example, a static file for expected block range `0..=499_000` may have only block range + /// `0..=1000` contained in it, as it's not fully filled yet. This index maps the max expected + /// block to the expected range, i.e. block `499_000` to block range `0..=499_000`. + expected_block_ranges_by_max_block: SegmentRanges, + /// Available on disk static file block ranges indexed by max transactions. + /// + /// For example, a static file for block range `0..=499_000` may only have block range + /// `0..=1000` and transaction range `0..=2000` contained in it. This index maps the max + /// available transaction to the available block range, i.e. transaction `2000` to block range + /// `0..=1000`. + available_block_ranges_by_max_tx: Option, } /// Helper trait to manage different [`StaticFileProviderRW`] of an `Arc StaticFileWriter for StaticFileProvider { &self, segment: StaticFileSegment, ) -> ProviderResult> { - self.get_writer(self.get_highest_static_file_block(segment).unwrap_or_default(), segment) + let genesis_number = self.0.as_ref().genesis_block_number(); + self.get_writer( + self.get_highest_static_file_block(segment).unwrap_or(genesis_number), + segment, + ) } fn commit(&self) -> ProviderResult<()> { @@ -1395,7 +1808,7 @@ impl> HeaderProvider for StaticFileProvide } fn header_by_number(&self, num: BlockNumber) -> ProviderResult> { - self.get_segment_provider_from_block(StaticFileSegment::Headers, num, None) + self.get_segment_provider_for_block(StaticFileSegment::Headers, num, None) .and_then(|provider| provider.header_by_number(num)) .or_else(|err| { if let ProviderError::MissingStaticFileBlock(_, _) = err { @@ -1422,7 +1835,7 @@ impl> HeaderProvider for StaticFileProvide &self, num: BlockNumber, ) -> ProviderResult>> { - self.get_segment_provider_from_block(StaticFileSegment::Headers, num, None) + self.get_segment_provider_for_block(StaticFileSegment::Headers, num, None) .and_then(|provider| provider.sealed_header(num)) .or_else(|err| { if let ProviderError::MissingStaticFileBlock(_, _) = err { @@ -1453,7 +1866,7 @@ impl> HeaderProvider for StaticFileProvide impl BlockHashReader for StaticFileProvider { fn block_hash(&self, num: u64) -> ProviderResult> { - self.get_segment_provider_from_block(StaticFileSegment::Headers, num, None) + self.get_segment_provider_for_block(StaticFileSegment::Headers, num, None) .and_then(|provider| provider.block_hash(num)) .or_else(|err| { if let ProviderError::MissingStaticFileBlock(_, _) = err { @@ -1484,7 +1897,7 @@ impl> Rec type Receipt = N::Receipt; fn receipt(&self, num: TxNumber) -> ProviderResult> { - self.get_segment_provider_from_transaction(StaticFileSegment::Receipts, num, None) + self.get_segment_provider_for_transaction(StaticFileSegment::Receipts, num, None) .and_then(|provider| provider.receipt(num)) .or_else(|err| { if let ProviderError::MissingStaticFileTx(_, _) = err { @@ -1613,7 +2026,7 @@ impl> TransactionsPr } fn transaction_by_id(&self, num: TxNumber) -> ProviderResult> { - self.get_segment_provider_from_transaction(StaticFileSegment::Transactions, num, None) + self.get_segment_provider_for_transaction(StaticFileSegment::Transactions, num, None) .and_then(|provider| provider.transaction_by_id(num)) .or_else(|err| { if let ProviderError::MissingStaticFileTx(_, _) = err { @@ -1628,7 +2041,7 @@ impl> TransactionsPr &self, num: TxNumber, ) -> ProviderResult> { - self.get_segment_provider_from_transaction(StaticFileSegment::Transactions, num, None) + self.get_segment_provider_for_transaction(StaticFileSegment::Transactions, num, None) .and_then(|provider| provider.transaction_by_id_unhashed(num)) .or_else(|err| { if let ProviderError::MissingStaticFileTx(_, _) = err { @@ -1656,11 +2069,6 @@ impl> TransactionsPr Err(ProviderError::UnsupportedProvider) } - fn transaction_block(&self, _id: TxNumber) -> ProviderResult> { - // Required data not present in static_files - Err(ProviderError::UnsupportedProvider) - } - fn transactions_by_block( &self, _block_id: BlockHashOrNumber, @@ -1693,15 +2101,24 @@ impl> TransactionsPr &self, range: impl RangeBounds, ) -> ProviderResult> { - let txes = self.transactions_by_tx_range(range)?; - Ok(reth_primitives_traits::transaction::recover::recover_signers(&txes)?) + self.fetch_range_with_predicate( + StaticFileSegment::TransactionSenders, + to_range(range), + |cursor, number| cursor.get_one::(number.into()), + |_| true, + ) } fn transaction_sender(&self, id: TxNumber) -> ProviderResult> { - match self.transaction_by_id_unhashed(id)? { - Some(tx) => Ok(tx.recover_signer().ok()), - None => Ok(None), - } + self.get_segment_provider_for_transaction(StaticFileSegment::TransactionSenders, id, None) + .and_then(|provider| provider.transaction_sender(id)) + .or_else(|err| { + if let ProviderError::MissingStaticFileTx(_, _) = err { + Ok(None) + } else { + Err(err) + } + }) } } @@ -1833,6 +2250,10 @@ impl StatsReader for StaticFileProvider { .map(|txs| txs + 1) .unwrap_or_default() as usize), + tables::TransactionSenders::NAME => Ok(self + .get_highest_static_file_tx(StaticFileSegment::TransactionSenders) + .map(|txs| txs + 1) + .unwrap_or_default() as usize), _ => Err(ProviderError::UnsupportedProvider), } } @@ -1851,3 +2272,135 @@ where tx.encode_2718(rlp_buf); Ok((keccak256(rlp_buf), tx_id)) } + +#[cfg(test)] +mod tests { + use std::collections::BTreeMap; + + use reth_chain_state::EthPrimitives; + use reth_db::test_utils::create_test_static_files_dir; + use reth_static_file_types::{SegmentRangeInclusive, StaticFileSegment}; + + use crate::StaticFileProviderBuilder; + + #[test] + fn test_find_fixed_range_with_block_index() -> eyre::Result<()> { + let (static_dir, _) = create_test_static_files_dir(); + let sf_rw = StaticFileProviderBuilder::::read_write(&static_dir)? + .with_blocks_per_file(100) + .build()?; + + let segment = StaticFileSegment::Headers; + + // Test with None - should use default behavior + assert_eq!( + sf_rw.find_fixed_range_with_block_index(segment, None, 0), + SegmentRangeInclusive::new(0, 99) + ); + assert_eq!( + sf_rw.find_fixed_range_with_block_index(segment, None, 250), + SegmentRangeInclusive::new(200, 299) + ); + + // Test with empty index - should fall back to default behavior + assert_eq!( + sf_rw.find_fixed_range_with_block_index(segment, Some(&BTreeMap::new()), 150), + SegmentRangeInclusive::new(100, 199) + ); + + // Create block index with existing ranges + let block_index = BTreeMap::from_iter([ + (99, SegmentRangeInclusive::new(0, 99)), + (199, SegmentRangeInclusive::new(100, 199)), + (299, SegmentRangeInclusive::new(200, 299)), + ]); + + // Test blocks within existing ranges - should return the matching range + assert_eq!( + sf_rw.find_fixed_range_with_block_index(segment, Some(&block_index), 0), + SegmentRangeInclusive::new(0, 99) + ); + assert_eq!( + sf_rw.find_fixed_range_with_block_index(segment, Some(&block_index), 50), + SegmentRangeInclusive::new(0, 99) + ); + assert_eq!( + sf_rw.find_fixed_range_with_block_index(segment, Some(&block_index), 99), + SegmentRangeInclusive::new(0, 99) + ); + assert_eq!( + sf_rw.find_fixed_range_with_block_index(segment, Some(&block_index), 100), + SegmentRangeInclusive::new(100, 199) + ); + assert_eq!( + sf_rw.find_fixed_range_with_block_index(segment, Some(&block_index), 150), + SegmentRangeInclusive::new(100, 199) + ); + assert_eq!( + sf_rw.find_fixed_range_with_block_index(segment, Some(&block_index), 199), + SegmentRangeInclusive::new(100, 199) + ); + + // Test blocks beyond existing ranges - should derive new ranges from the last range + // Block 300 is exactly one segment after the last range + assert_eq!( + sf_rw.find_fixed_range_with_block_index(segment, Some(&block_index), 300), + SegmentRangeInclusive::new(300, 399) + ); + assert_eq!( + sf_rw.find_fixed_range_with_block_index(segment, Some(&block_index), 350), + SegmentRangeInclusive::new(300, 399) + ); + + // Block 500 skips one segment (300-399) + assert_eq!( + sf_rw.find_fixed_range_with_block_index(segment, Some(&block_index), 500), + SegmentRangeInclusive::new(500, 599) + ); + + // Block 1000 skips many segments + assert_eq!( + sf_rw.find_fixed_range_with_block_index(segment, Some(&block_index), 1000), + SegmentRangeInclusive::new(1000, 1099) + ); + + // Test with block index having different sizes than blocks_per_file setting + // This simulates the scenario where blocks_per_file was changed between runs + let mixed_size_index = BTreeMap::from_iter([ + (49, SegmentRangeInclusive::new(0, 49)), // 50 blocks + (149, SegmentRangeInclusive::new(50, 149)), // 100 blocks + (349, SegmentRangeInclusive::new(150, 349)), // 200 blocks + ]); + + // Blocks within existing ranges should return those ranges regardless of size + assert_eq!( + sf_rw.find_fixed_range_with_block_index(segment, Some(&mixed_size_index), 25), + SegmentRangeInclusive::new(0, 49) + ); + assert_eq!( + sf_rw.find_fixed_range_with_block_index(segment, Some(&mixed_size_index), 100), + SegmentRangeInclusive::new(50, 149) + ); + assert_eq!( + sf_rw.find_fixed_range_with_block_index(segment, Some(&mixed_size_index), 200), + SegmentRangeInclusive::new(150, 349) + ); + + // Block after the last range should derive using current blocks_per_file (100) + // from the end of the last range (349) + assert_eq!( + sf_rw.find_fixed_range_with_block_index(segment, Some(&mixed_size_index), 350), + SegmentRangeInclusive::new(350, 449) + ); + assert_eq!( + sf_rw.find_fixed_range_with_block_index(segment, Some(&mixed_size_index), 450), + SegmentRangeInclusive::new(450, 549) + ); + assert_eq!( + sf_rw.find_fixed_range_with_block_index(segment, Some(&mixed_size_index), 550), + SegmentRangeInclusive::new(550, 649) + ); + + Ok(()) + } +} diff --git a/crates/storage/provider/src/providers/static_file/mod.rs b/crates/storage/provider/src/providers/static_file/mod.rs index 3c25f157bb..9935c8168f 100644 --- a/crates/storage/provider/src/providers/static_file/mod.rs +++ b/crates/storage/provider/src/providers/static_file/mod.rs @@ -1,5 +1,7 @@ mod manager; -pub use manager::{StaticFileAccess, StaticFileProvider, StaticFileWriter}; +pub use manager::{ + StaticFileAccess, StaticFileProvider, StaticFileProviderBuilder, StaticFileWriter, +}; mod jar; pub use jar::StaticFileJarProvider; @@ -55,10 +57,11 @@ impl Deref for LoadedJar { mod tests { use super::*; use crate::{ + providers::static_file::manager::StaticFileProviderBuilder, test_utils::create_test_provider_factory, HeaderProvider, StaticFileProviderFactory, }; use alloy_consensus::{Header, SignableTransaction, Transaction, TxLegacy}; - use alloy_primitives::{BlockHash, Signature, TxNumber, B256}; + use alloy_primitives::{Address, BlockHash, Signature, TxNumber, B256, U160}; use rand::seq::SliceRandom; use reth_db::test_utils::create_test_static_files_dir; use reth_db_api::{transaction::DbTxMut, CanonicalHeaders, HeaderNumbers, Headers}; @@ -68,7 +71,7 @@ mod tests { }; use reth_storage_api::{ReceiptProvider, TransactionsProvider}; use reth_testing_utils::generators::{self, random_header_range}; - use std::{fmt::Debug, fs, ops::Range, path::Path}; + use std::{collections::BTreeMap, fmt::Debug, fs, ops::Range, path::Path}; fn assert_eyre(got: T, expected: T, msg: &str) -> eyre::Result<()> { if got != expected { @@ -78,7 +81,7 @@ mod tests { } #[test] - fn test_snap() { + fn test_static_files() { // Ranges let row_count = 100u64; let range = 0..=(row_count - 1); @@ -126,7 +129,7 @@ mod tests { let db_provider = factory.provider().unwrap(); let manager = db_provider.static_file_provider(); let jar_provider = manager - .get_segment_provider_from_block(StaticFileSegment::Headers, 0, Some(&static_file)) + .get_segment_provider_for_block(StaticFileSegment::Headers, 0, Some(&static_file)) .unwrap(); assert!(!headers.is_empty()); @@ -157,9 +160,11 @@ mod tests { // [ Headers Creation and Commit ] { - let sf_rw = StaticFileProvider::::read_write(&static_dir) - .expect("Failed to create static file provider") - .with_custom_blocks_per_file(blocks_per_file); + let sf_rw = StaticFileProviderBuilder::::read_write(&static_dir) + .expect("Failed to create static file provider builder") + .with_blocks_per_file(blocks_per_file) + .build() + .expect("Failed to build static file provider"); let mut header_writer = sf_rw.latest_writer(StaticFileSegment::Headers).unwrap(); @@ -251,9 +256,11 @@ mod tests { // Test cases execution { - let sf_rw = StaticFileProvider::read_write(&static_dir) - .expect("Failed to create static file provider") - .with_custom_blocks_per_file(blocks_per_file); + let sf_rw = StaticFileProviderBuilder::read_write(&static_dir) + .expect("Failed to create static file provider builder") + .with_blocks_per_file(blocks_per_file) + .build() + .expect("Failed to build static file provider"); assert_eq!(sf_rw.get_highest_static_file_block(StaticFileSegment::Headers), Some(tip)); assert_eq!( @@ -307,16 +314,25 @@ mod tests { // Append transaction/receipt if there's still a transaction count to append if tx_count > 0 { - if segment.is_receipts() { - // Used as ID for validation - receipt.cumulative_gas_used = *next_tx_num; - writer.append_receipt(*next_tx_num, &receipt).unwrap(); - } else { - // Used as ID for validation - tx.nonce = *next_tx_num; - let tx: TransactionSigned = - tx.clone().into_signed(Signature::test_signature()).into(); - writer.append_transaction(*next_tx_num, &tx).unwrap(); + match segment { + StaticFileSegment::Headers => panic!("non tx based segment"), + StaticFileSegment::Transactions => { + // Used as ID for validation + tx.nonce = *next_tx_num; + let tx: TransactionSigned = + tx.clone().into_signed(Signature::test_signature()).into(); + writer.append_transaction(*next_tx_num, &tx).unwrap(); + } + StaticFileSegment::Receipts => { + // Used as ID for validation + receipt.cumulative_gas_used = *next_tx_num; + writer.append_receipt(*next_tx_num, &receipt).unwrap(); + } + StaticFileSegment::TransactionSenders => { + // Used as ID for validation + let sender = Address::from(U160::from(*next_tx_num)); + writer.append_transaction_sender(*next_tx_num, &sender).unwrap(); + } } *next_tx_num += 1; tx_count -= 1; @@ -371,20 +387,21 @@ mod tests { block_ranges.iter().zip(expected_tx_ranges).for_each(|(block_range, expected_tx_range)| { assert_eq!( sf_rw - .get_segment_provider_from_block(segment, block_range.start, None) + .get_segment_provider_for_block(segment, block_range.start, None) .unwrap() .user_header() .tx_range(), - expected_tx_range.as_ref() + expected_tx_range ); }); // Ensure transaction index - let tx_index = sf_rw.tx_index().read(); - let expected_tx_index = - vec![(8, SegmentRangeInclusive::new(0, 9)), (9, SegmentRangeInclusive::new(20, 29))]; + let expected_tx_index = BTreeMap::from([ + (8, SegmentRangeInclusive::new(0, 9)), + (9, SegmentRangeInclusive::new(20, 29)), + ]); assert_eq!( - tx_index.get(&segment).map(|index| index.iter().map(|(k, v)| (*k, *v)).collect()), + sf_rw.tx_index(segment), (!expected_tx_index.is_empty()).then_some(expected_tx_index), "tx index mismatch", ); @@ -407,15 +424,20 @@ mod tests { last_block: u64, expected_tx_tip: Option, expected_file_count: i32, - expected_tx_index: Vec<(TxNumber, SegmentRangeInclusive)>, + expected_tx_index: BTreeMap, ) -> eyre::Result<()> { let mut writer = sf_rw.latest_writer(segment)?; // Prune transactions or receipts based on the segment type - if segment.is_receipts() { - writer.prune_receipts(prune_count, last_block)?; - } else { - writer.prune_transactions(prune_count, last_block)?; + match segment { + StaticFileSegment::Headers => panic!("non tx based segment"), + StaticFileSegment::Transactions => { + writer.prune_transactions(prune_count, last_block)? + } + StaticFileSegment::Receipts => writer.prune_receipts(prune_count, last_block)?, + StaticFileSegment::TransactionSenders => { + writer.prune_transaction_senders(prune_count, last_block)? + } } writer.commit()?; @@ -430,18 +452,25 @@ mod tests { // Verify that transactions and receipts are returned correctly. Uses // cumulative_gas_used & nonce as ids. if let Some(id) = expected_tx_tip { - if segment.is_receipts() { - assert_eyre( - expected_tx_tip, - sf_rw.receipt(id)?.map(|r| r.cumulative_gas_used), - "tx mismatch", - )?; - } else { - assert_eyre( + match segment { + StaticFileSegment::Headers => panic!("non tx based segment"), + StaticFileSegment::Transactions => assert_eyre( expected_tx_tip, sf_rw.transaction_by_id(id)?.map(|t| t.nonce()), "tx mismatch", - )?; + )?, + StaticFileSegment::Receipts => assert_eyre( + expected_tx_tip, + sf_rw.receipt(id)?.map(|r| r.cumulative_gas_used), + "receipt mismatch", + )?, + StaticFileSegment::TransactionSenders => assert_eyre( + expected_tx_tip, + sf_rw + .transaction_sender(id)? + .map(|s| u64::try_from(U160::from_be_bytes(s.0.into())).unwrap()), + "sender mismatch", + )?, } } @@ -453,9 +482,8 @@ mod tests { )?; // Ensure that the inner tx index (max_tx -> block range) is as expected - let tx_index = sf_rw.tx_index().read(); assert_eyre( - tx_index.get(&segment).map(|index| index.iter().map(|(k, v)| (*k, *v)).collect()), + sf_rw.tx_index(segment).map(|index| index.iter().map(|(k, v)| (*k, *v)).collect()), (!expected_tx_index.is_empty()).then_some(expected_tx_index), "tx index mismatch", )?; @@ -466,15 +494,19 @@ mod tests { for segment in segments { let (static_dir, _) = create_test_static_files_dir(); - let sf_rw = StaticFileProvider::read_write(&static_dir) - .expect("Failed to create static file provider") - .with_custom_blocks_per_file(blocks_per_file); + let sf_rw = StaticFileProviderBuilder::read_write(&static_dir) + .expect("Failed to create static file provider builder") + .with_blocks_per_file(blocks_per_file) + .build() + .expect("Failed to build static file provider"); setup_tx_based_scenario(&sf_rw, segment, blocks_per_file); - let sf_rw = StaticFileProvider::read_write(&static_dir) - .expect("Failed to create static file provider") - .with_custom_blocks_per_file(blocks_per_file); + let sf_rw = StaticFileProviderBuilder::read_write(&static_dir) + .expect("Failed to create static file provider builder") + .with_blocks_per_file(blocks_per_file) + .build() + .expect("Failed to build static file provider"); let highest_tx = sf_rw.get_highest_static_file_tx(segment).unwrap(); // Test cases @@ -489,7 +521,7 @@ mod tests { blocks_per_file * 2, Some(highest_tx - 1), initial_file_count, - vec![(highest_tx - 1, SegmentRangeInclusive::new(0, 9))], + BTreeMap::from([(highest_tx - 1, SegmentRangeInclusive::new(0, 9))]), ), // Case 1: 10..=19 has no txs. There are no txes in the whole block range, but want // to unwind to block 9. Ensures that the 20..=29 and 10..=19 files @@ -499,7 +531,7 @@ mod tests { blocks_per_file - 1, Some(highest_tx - 1), files_per_range, - vec![(highest_tx - 1, SegmentRangeInclusive::new(0, 9))], + BTreeMap::from([(highest_tx - 1, SegmentRangeInclusive::new(0, 9))]), ), // Case 2: Prune most txs up to block 1. ( @@ -507,10 +539,10 @@ mod tests { 1, Some(0), files_per_range, - vec![(0, SegmentRangeInclusive::new(0, 1))], + BTreeMap::from([(0, SegmentRangeInclusive::new(0, 1))]), ), // Case 3: Prune remaining tx and ensure that file is not deleted. - (1, 0, None, files_per_range, vec![]), + (1, 0, None, files_per_range, BTreeMap::from([])), ]; // Loop through test cases @@ -547,4 +579,84 @@ mod tests { Ok(count) } + + #[test] + fn test_dynamic_size() -> eyre::Result<()> { + let (static_dir, _) = create_test_static_files_dir(); + + { + let sf_rw = StaticFileProviderBuilder::::read_write(&static_dir)? + .with_blocks_per_file(10) + .build()?; + let mut header_writer = sf_rw.latest_writer(StaticFileSegment::Headers)?; + + let mut header = Header::default(); + for num in 0..=15 { + header.number = num; + header_writer.append_header(&header, &BlockHash::default()).unwrap(); + } + header_writer.commit().unwrap(); + + assert_eq!(sf_rw.headers_range(0..=15)?.len(), 16); + assert_eq!( + sf_rw.expected_block_index(StaticFileSegment::Headers), + Some(BTreeMap::from([ + (9, SegmentRangeInclusive::new(0, 9)), + (19, SegmentRangeInclusive::new(10, 19)) + ])), + ) + } + + { + let sf_rw = StaticFileProviderBuilder::::read_write(&static_dir)? + .with_blocks_per_file(5) + .build()?; + let mut header_writer = sf_rw.latest_writer(StaticFileSegment::Headers)?; + + let mut header = Header::default(); + for num in 16..=22 { + header.number = num; + header_writer.append_header(&header, &BlockHash::default()).unwrap(); + } + header_writer.commit().unwrap(); + + assert_eq!(sf_rw.headers_range(0..=22)?.len(), 23); + assert_eq!( + sf_rw.expected_block_index(StaticFileSegment::Headers), + Some(BTreeMap::from([ + (9, SegmentRangeInclusive::new(0, 9)), + (19, SegmentRangeInclusive::new(10, 19)), + (24, SegmentRangeInclusive::new(20, 24)) + ])) + ) + } + + { + let sf_rw = StaticFileProviderBuilder::::read_write(&static_dir)? + .with_blocks_per_file(15) + .build()?; + let mut header_writer = sf_rw.latest_writer(StaticFileSegment::Headers)?; + + let mut header = Header::default(); + for num in 23..=40 { + header.number = num; + header_writer.append_header(&header, &BlockHash::default()).unwrap(); + } + header_writer.commit().unwrap(); + + assert_eq!(sf_rw.headers_range(0..=40)?.len(), 41); + assert_eq!( + sf_rw.expected_block_index(StaticFileSegment::Headers), + Some(BTreeMap::from([ + (9, SegmentRangeInclusive::new(0, 9)), + (19, SegmentRangeInclusive::new(10, 19)), + (24, SegmentRangeInclusive::new(20, 24)), + (39, SegmentRangeInclusive::new(25, 39)), + (54, SegmentRangeInclusive::new(40, 54)) + ])) + ) + } + + Ok(()) + } } diff --git a/crates/storage/provider/src/providers/static_file/writer.rs b/crates/storage/provider/src/providers/static_file/writer.rs index 7b0ae9ce11..92bc8b0481 100644 --- a/crates/storage/provider/src/providers/static_file/writer.rs +++ b/crates/storage/provider/src/providers/static_file/writer.rs @@ -13,6 +13,7 @@ use reth_static_file_types::{SegmentHeader, SegmentRangeInclusive, StaticFileSeg use reth_storage_errors::provider::{ProviderError, ProviderResult, StaticFileWriterError}; use std::{ borrow::Borrow, + cmp::Ordering, fmt::Debug, path::{Path, PathBuf}, sync::{Arc, Weak}, @@ -29,6 +30,7 @@ pub(crate) struct StaticFileWriters { headers: RwLock>>, transactions: RwLock>>, receipts: RwLock>>, + transaction_senders: RwLock>>, } impl Default for StaticFileWriters { @@ -37,6 +39,7 @@ impl Default for StaticFileWriters { headers: Default::default(), transactions: Default::default(), receipts: Default::default(), + transaction_senders: Default::default(), } } } @@ -51,6 +54,7 @@ impl StaticFileWriters { StaticFileSegment::Headers => self.headers.write(), StaticFileSegment::Transactions => self.transactions.write(), StaticFileSegment::Receipts => self.receipts.write(), + StaticFileSegment::TransactionSenders => self.transaction_senders.write(), }; if write_guard.is_none() { @@ -61,17 +65,25 @@ impl StaticFileWriters { } pub(crate) fn commit(&self) -> ProviderResult<()> { - for writer_lock in [&self.headers, &self.transactions, &self.receipts] { + debug!(target: "provider::static_file", "Committing all static file segments"); + + for writer_lock in + [&self.headers, &self.transactions, &self.receipts, &self.transaction_senders] + { let mut writer = writer_lock.write(); if let Some(writer) = writer.as_mut() { writer.commit()?; } } + + debug!(target: "provider::static_file", "Committed all static file segments"); Ok(()) } pub(crate) fn has_unwind_queued(&self) -> bool { - for writer_lock in [&self.headers, &self.transactions, &self.receipts] { + for writer_lock in + [&self.headers, &self.transactions, &self.receipts, &self.transaction_senders] + { let writer = writer_lock.read(); if let Some(writer) = writer.as_ref() && writer.will_prune_on_commit() @@ -161,8 +173,8 @@ impl StaticFileProviderRW { let static_file_provider = Self::upgrade_provider_to_strong_reference(&reader); - let block_range = static_file_provider.find_fixed_range(block); - let (jar, path) = match static_file_provider.get_segment_provider_from_block( + let block_range = static_file_provider.find_fixed_range(segment, block); + let (jar, path) = match static_file_provider.get_segment_provider_for_block( segment, block_range.start(), None, @@ -219,6 +231,14 @@ impl StaticFileProviderRW { self.user_header_mut().prune(pruned_rows); } + debug!( + target: "provider::static_file", + segment = ?self.writer.user_header().segment(), + path = ?self.data_path, + pruned_rows, + "Ensuring end range consistency" + ); + self.writer.commit().map_err(ProviderError::other)?; // Updates the [SnapshotProvider] manager @@ -237,6 +257,12 @@ impl StaticFileProviderRW { // Truncates the data file if instructed to. if let Some((to_delete, last_block_number)) = self.prune_on_commit.take() { + debug!( + target: "provider::static_file", + segment = ?self.writer.user_header().segment(), + to_delete, + "Pruning data on commit" + ); match self.writer.user_header().segment() { StaticFileSegment::Headers => self.prune_header_data(to_delete)?, StaticFileSegment::Transactions => self @@ -244,10 +270,20 @@ impl StaticFileProviderRW { StaticFileSegment::Receipts => { self.prune_receipt_data(to_delete, last_block_number.expect("should exist"))? } + StaticFileSegment::TransactionSenders => self.prune_transaction_sender_data( + to_delete, + last_block_number.expect("should exist"), + )?, } } if self.writer.is_dirty() { + debug!( + target: "provider::static_file", + segment = ?self.writer.user_header().segment(), + "Committing writer to disk" + ); + // Commits offsets and new user_header to disk self.writer.commit().map_err(ProviderError::other)?; @@ -264,7 +300,7 @@ impl StaticFileProviderRW { segment = ?self.writer.user_header().segment(), path = ?self.data_path, duration = ?start.elapsed(), - "Commit" + "Committed writer to disk" ); self.update_index()?; @@ -280,6 +316,12 @@ impl StaticFileProviderRW { pub fn commit_without_sync_all(&mut self) -> ProviderResult<()> { let start = Instant::now(); + debug!( + target: "provider::static_file", + segment = ?self.writer.user_header().segment(), + "Committing writer to disk (without sync)" + ); + // Commits offsets and new user_header to disk self.writer.commit_without_sync_all().map_err(ProviderError::other)?; @@ -296,7 +338,7 @@ impl StaticFileProviderRW { segment = ?self.writer.user_header().segment(), path = ?self.data_path, duration = ?start.elapsed(), - "Commit" + "Committed writer to disk (without sync)" ); self.update_index()?; @@ -321,17 +363,47 @@ impl StaticFileProviderRW { .as_ref() .map(|block_range| block_range.end()) .or_else(|| { - (self.writer.user_header().expected_block_start() > 0) - .then(|| self.writer.user_header().expected_block_start() - 1) + (self.writer.user_header().expected_block_start() > + self.reader().genesis_block_number()) + .then(|| self.writer.user_header().expected_block_start() - 1) }); self.reader().update_index(self.writer.user_header().segment(), segment_max_block) } + /// Ensures that the writer is positioned at the specified block number. + /// + /// If the writer is positioned at a greater block number than the specified one, the writer + /// will NOT be unwound and the error will be returned. + pub fn ensure_at_block(&mut self, advance_to: BlockNumber) -> ProviderResult<()> { + let current_block = if let Some(current_block_number) = self.current_block_number() { + current_block_number + } else { + self.increment_block(0)?; + 0 + }; + + match current_block.cmp(&advance_to) { + Ordering::Less => { + for block in current_block + 1..=advance_to { + self.increment_block(block)?; + } + } + Ordering::Equal => {} + Ordering::Greater => { + return Err(ProviderError::UnexpectedStaticFileBlockNumber( + self.writer.user_header().segment(), + current_block, + advance_to, + )); + } + } + + Ok(()) + } + /// Allows to increment the [`SegmentHeader`] end block. It will commit the current static file, /// and create the next one if we are past the end range. - /// - /// Returns the current [`BlockNumber`] as seen in the static file. pub fn increment_block(&mut self, expected_block_number: BlockNumber) -> ProviderResult<()> { let segment = self.writer.user_header().segment(); @@ -351,7 +423,7 @@ impl StaticFileProviderRW { self.data_path = data_path; *self.writer.user_header_mut() = SegmentHeader::new( - self.reader().find_fixed_range(last_block + 1), + self.reader().find_fixed_range(segment, last_block + 1), None, None, segment, @@ -371,6 +443,11 @@ impl StaticFileProviderRW { Ok(()) } + /// Returns the current block number of the static file writer. + pub fn current_block_number(&self) -> Option { + self.writer.user_header().block_end() + } + /// Returns a block number that is one next to the current tip of static files. pub fn next_block_number(&self) -> u64 { // The next static file block number can be found by checking the one after block_end. @@ -499,8 +576,6 @@ impl StaticFileProviderRW { } /// Appends to tx number-based static file. - /// - /// Returns the current [`TxNumber`] as seen in the static file. fn append_with_tx_number( &mut self, tx_num: TxNumber, @@ -529,8 +604,6 @@ impl StaticFileProviderRW { /// /// It **CALLS** `increment_block()` since the number of headers is equal to the number of /// blocks. - /// - /// Returns the current [`BlockNumber`] as seen in the static file. pub fn append_header(&mut self, header: &N::BlockHeader, hash: &BlockHash) -> ProviderResult<()> where N::BlockHeader: Compact, @@ -542,8 +615,6 @@ impl StaticFileProviderRW { /// /// It **CALLS** `increment_block()` since the number of headers is equal to the number of /// blocks. - /// - /// Returns the current [`BlockNumber`] as seen in the static file. pub fn append_header_with_td( &mut self, header: &N::BlockHeader, @@ -575,12 +646,41 @@ impl StaticFileProviderRW { Ok(()) } + /// Appends header to static file without calling `increment_block`. + /// This is useful for genesis blocks with non-zero block numbers. + pub fn append_header_direct( + &mut self, + header: &N::BlockHeader, + total_difficulty: U256, + hash: &BlockHash, + ) -> ProviderResult<()> + where + N::BlockHeader: Compact, + { + let start = Instant::now(); + self.ensure_no_queued_prune()?; + + debug_assert!(self.writer.user_header().segment() == StaticFileSegment::Headers); + + self.append_column(header)?; + self.append_column(CompactU256::from(total_difficulty))?; + self.append_column(hash)?; + + if let Some(metrics) = &self.metrics { + metrics.record_segment_operation( + StaticFileSegment::Headers, + StaticFileProviderOperation::Append, + Some(start.elapsed()), + ); + } + + Ok(()) + } + /// Appends transaction to static file. /// /// It **DOES NOT CALL** `increment_block()`, it should be handled elsewhere. There might be /// empty blocks and this function wouldn't be called. - /// - /// Returns the current [`TxNumber`] as seen in the static file. pub fn append_transaction(&mut self, tx_num: TxNumber, tx: &N::SignedTx) -> ProviderResult<()> where N::SignedTx: Compact, @@ -606,8 +706,6 @@ impl StaticFileProviderRW { /// /// It **DOES NOT** call `increment_block()`, it should be handled elsewhere. There might be /// empty blocks and this function wouldn't be called. - /// - /// Returns the current [`TxNumber`] as seen in the static file. pub fn append_receipt(&mut self, tx_num: TxNumber, receipt: &N::Receipt) -> ProviderResult<()> where N::Receipt: Compact, @@ -630,9 +728,7 @@ impl StaticFileProviderRW { } /// Appends multiple receipts to the static file. - /// - /// Returns the current [`TxNumber`] as seen in the static file, if any. - pub fn append_receipts(&mut self, receipts: I) -> ProviderResult> + pub fn append_receipts(&mut self, receipts: I) -> ProviderResult<()> where I: Iterator>, R: Borrow, @@ -643,20 +739,18 @@ impl StaticFileProviderRW { let mut receipts_iter = receipts.into_iter().peekable(); // If receipts are empty, we can simply return None if receipts_iter.peek().is_none() { - return Ok(None); + return Ok(()); } let start = Instant::now(); self.ensure_no_queued_prune()?; // At this point receipts contains at least one receipt, so this would be overwritten. - let mut tx_number = 0; let mut count: u64 = 0; for receipt_result in receipts_iter { let (tx_num, receipt) = receipt_result?; self.append_with_tx_number(tx_num, receipt.borrow())?; - tx_number = tx_num; count += 1; } @@ -669,7 +763,68 @@ impl StaticFileProviderRW { ); } - Ok(Some(tx_number)) + Ok(()) + } + + /// Appends transaction sender to static file. + /// + /// It **DOES NOT** call `increment_block()`, it should be handled elsewhere. There might be + /// empty blocks and this function wouldn't be called. + pub fn append_transaction_sender( + &mut self, + tx_num: TxNumber, + sender: &alloy_primitives::Address, + ) -> ProviderResult<()> { + let start = Instant::now(); + self.ensure_no_queued_prune()?; + + debug_assert!(self.writer.user_header().segment() == StaticFileSegment::TransactionSenders); + self.append_with_tx_number(tx_num, sender)?; + + if let Some(metrics) = &self.metrics { + metrics.record_segment_operation( + StaticFileSegment::TransactionSenders, + StaticFileProviderOperation::Append, + Some(start.elapsed()), + ); + } + + Ok(()) + } + + /// Appends multiple transaction senders to the static file. + pub fn append_transaction_senders(&mut self, senders: I) -> ProviderResult<()> + where + I: Iterator, + { + debug_assert!(self.writer.user_header().segment() == StaticFileSegment::TransactionSenders); + + let mut senders_iter = senders.into_iter().peekable(); + // If senders are empty, we can simply return + if senders_iter.peek().is_none() { + return Ok(()); + } + + let start = Instant::now(); + self.ensure_no_queued_prune()?; + + // At this point senders contains at least one sender, so this would be overwritten. + let mut count: u64 = 0; + for (tx_num, sender) in senders_iter { + self.append_with_tx_number(tx_num, sender)?; + count += 1; + } + + if let Some(metrics) = &self.metrics { + metrics.record_segment_operations( + StaticFileSegment::TransactionSenders, + StaticFileProviderOperation::Append, + count, + Some(start.elapsed()), + ); + } + + Ok(()) } /// Adds an instruction to prune `to_delete` transactions during commit. @@ -696,6 +851,21 @@ impl StaticFileProviderRW { self.queue_prune(to_delete, Some(last_block)) } + /// Adds an instruction to prune `to_delete` transaction senders during commit. + /// + /// Note: `last_block` refers to the block the unwinds ends at. + pub fn prune_transaction_senders( + &mut self, + to_delete: u64, + last_block: BlockNumber, + ) -> ProviderResult<()> { + debug_assert_eq!( + self.writer.user_header().segment(), + StaticFileSegment::TransactionSenders + ); + self.queue_prune(to_delete, Some(last_block)) + } + /// Adds an instruction to prune `to_delete` headers during commit. pub fn prune_headers(&mut self, to_delete: u64) -> ProviderResult<()> { debug_assert_eq!(self.writer.user_header().segment(), StaticFileSegment::Headers); @@ -772,6 +942,29 @@ impl StaticFileProviderRW { Ok(()) } + /// Prunes the last `to_delete` transaction senders from the data file. + fn prune_transaction_sender_data( + &mut self, + to_delete: u64, + last_block: BlockNumber, + ) -> ProviderResult<()> { + let start = Instant::now(); + + debug_assert!(self.writer.user_header().segment() == StaticFileSegment::TransactionSenders); + + self.truncate(to_delete, Some(last_block))?; + + if let Some(metrics) = &self.metrics { + metrics.record_segment_operation( + StaticFileSegment::TransactionSenders, + StaticFileProviderOperation::Prune, + Some(start.elapsed()), + ); + } + + Ok(()) + } + /// Prunes the last `to_delete` headers from the data file. fn prune_header_data(&mut self, to_delete: u64) -> ProviderResult<()> { let start = Instant::now(); @@ -791,7 +984,8 @@ impl StaticFileProviderRW { Ok(()) } - fn reader(&self) -> StaticFileProvider { + /// Returns a [`StaticFileProvider`] associated with this writer. + pub fn reader(&self) -> StaticFileProvider { Self::upgrade_provider_to_strong_reference(&self.reader) } diff --git a/crates/storage/provider/src/test_utils/blocks.rs b/crates/storage/provider/src/test_utils/blocks.rs index 0b27c5dc99..4a3e293992 100644 --- a/crates/storage/provider/src/test_utils/blocks.rs +++ b/crates/storage/provider/src/test_utils/blocks.rs @@ -45,8 +45,8 @@ pub fn assert_genesis_block( assert_eq!(tx.table::().unwrap(), vec![]); assert_eq!(tx.table::().unwrap(), vec![]); assert_eq!(tx.table::().unwrap(), vec![]); - // TODO check after this gets done: https://github.com/paradigmxyz/reth/issues/1588 - // Bytecodes are not reverted assert_eq!(tx.table::().unwrap(), vec![]); + // Reorged bytecodes are not reverted per https://github.com/paradigmxyz/reth/issues/1588 + // assert_eq!(tx.table::().unwrap(), vec![]); assert_eq!(tx.table::().unwrap(), vec![]); assert_eq!(tx.table::().unwrap(), vec![]); assert_eq!(tx.table::().unwrap(), vec![]); diff --git a/crates/storage/provider/src/test_utils/mock.rs b/crates/storage/provider/src/test_utils/mock.rs index 16388de91a..1ddd03652f 100644 --- a/crates/storage/provider/src/test_utils/mock.rs +++ b/crates/storage/provider/src/test_utils/mock.rs @@ -118,7 +118,6 @@ impl MockEthProvider { /// Add multiple blocks to local block store pub fn extend_blocks(&self, iter: impl IntoIterator) { for (hash, block) in iter { - self.add_header(hash, block.header().clone()); self.add_block(hash, block) } } @@ -403,18 +402,6 @@ impl TransactionsProvider Ok(None) } - fn transaction_block(&self, id: TxNumber) -> ProviderResult> { - let lock = self.blocks.lock(); - let mut current_tx_number: TxNumber = 0; - for block in lock.values() { - if current_tx_number + (block.body().transaction_count() as TxNumber) > id { - return Ok(Some(block.header().number())) - } - current_tx_number += block.body().transaction_count() as TxNumber; - } - Ok(None) - } - fn transactions_by_block( &self, id: BlockHashOrNumber, diff --git a/crates/storage/provider/src/test_utils/mod.rs b/crates/storage/provider/src/test_utils/mod.rs index ccda2d60e8..3002f31abd 100644 --- a/crates/storage/provider/src/test_utils/mod.rs +++ b/crates/storage/provider/src/test_utils/mod.rs @@ -1,16 +1,18 @@ use crate::{ - providers::{ProviderNodeTypes, StaticFileProvider}, + providers::{NodeTypesForProvider, ProviderNodeTypes, RocksDBBuilder, StaticFileProvider}, HashingWriter, ProviderFactory, TrieWriter, }; use alloy_primitives::B256; use reth_chainspec::{ChainSpec, MAINNET}; use reth_db::{ - test_utils::{create_test_rw_db, create_test_static_files_dir, TempDatabase}, + test_utils::{ + create_test_rocksdb_dir, create_test_rw_db, create_test_static_files_dir, TempDatabase, + }, DatabaseEnv, }; use reth_errors::ProviderResult; use reth_ethereum_engine_primitives::EthEngineTypes; -use reth_node_types::{NodeTypes, NodeTypesWithDBAdapter}; +use reth_node_types::NodeTypesWithDBAdapter; use reth_primitives_traits::{Account, StorageEntry}; use reth_trie::StateRoot; use reth_trie_db::DatabaseStateRoot; @@ -50,16 +52,22 @@ pub fn create_test_provider_factory_with_chain_spec( } /// Creates test provider factory with provided chain spec. -pub fn create_test_provider_factory_with_node_types( +pub fn create_test_provider_factory_with_node_types( chain_spec: Arc, ) -> ProviderFactory>>> { let (static_dir, _) = create_test_static_files_dir(); + let (rocksdb_dir, _) = create_test_rocksdb_dir(); let db = create_test_rw_db(); ProviderFactory::new( db, chain_spec, StaticFileProvider::read_write(static_dir.keep()).expect("static file provider"), + RocksDBBuilder::new(&rocksdb_dir) + .with_default_tables() + .build() + .expect("failed to create test RocksDB provider"), ) + .expect("failed to create test provider factory") } /// Inserts the genesis alloc from the provided chain spec into the trie. diff --git a/crates/storage/provider/src/test_utils/noop.rs b/crates/storage/provider/src/test_utils/noop.rs index b1ec5d4739..64eff68b03 100644 --- a/crates/storage/provider/src/test_utils/noop.rs +++ b/crates/storage/provider/src/test_utils/noop.rs @@ -1,6 +1,10 @@ //! Additional testing support for `NoopProvider`. -use crate::{providers::StaticFileProvider, StaticFileProviderFactory}; +use crate::{ + providers::{RocksDBProvider, StaticFileProvider, StaticFileProviderRWRefMut}, + RocksDBProviderFactory, StaticFileProviderFactory, +}; +use reth_errors::{ProviderError, ProviderResult}; use reth_primitives_traits::NodePrimitives; use std::path::PathBuf; @@ -11,4 +15,23 @@ impl StaticFileProviderFactory for NoopProvid fn static_file_provider(&self) -> StaticFileProvider { StaticFileProvider::read_only(PathBuf::default(), false).unwrap() } + + fn get_static_file_writer( + &self, + _block: alloy_primitives::BlockNumber, + _segment: reth_static_file_types::StaticFileSegment, + ) -> ProviderResult> { + Err(ProviderError::ReadOnlyStaticFileAccess) + } +} + +impl RocksDBProviderFactory for NoopProvider { + fn rocksdb_provider(&self) -> RocksDBProvider { + RocksDBProvider::builder(PathBuf::default()).build().unwrap() + } + + #[cfg(all(unix, feature = "rocksdb"))] + fn set_pending_rocksdb_batch(&self, _batch: rocksdb::WriteBatchWithTransaction) { + // No-op for NoopProvider + } } diff --git a/crates/storage/provider/src/traits/full.rs b/crates/storage/provider/src/traits/full.rs index 6fe88a6640..c6a69e2fbf 100644 --- a/crates/storage/provider/src/traits/full.rs +++ b/crates/storage/provider/src/traits/full.rs @@ -2,8 +2,9 @@ use crate::{ AccountReader, BlockReader, BlockReaderIdExt, ChainSpecProvider, ChangeSetReader, - DatabaseProviderFactory, HashedPostStateProvider, PruneCheckpointReader, StageCheckpointReader, - StateProviderFactory, StateReader, StaticFileProviderFactory, TrieReader, + DatabaseProviderFactory, HashedPostStateProvider, PruneCheckpointReader, + RocksDBProviderFactory, StageCheckpointReader, StateProviderFactory, StateReader, + StaticFileProviderFactory, TrieReader, }; use reth_chain_state::{CanonStateSubscriptions, ForkChoiceSubscriptions}; use reth_node_types::{BlockTy, HeaderTy, NodeTypesWithDB, ReceiptTy, TxTy}; @@ -17,6 +18,7 @@ pub trait FullProvider: Provider: BlockReader + TrieReader + StageCheckpointReader + PruneCheckpointReader, > + NodePrimitivesProvider + StaticFileProviderFactory + + RocksDBProviderFactory + BlockReaderIdExt< Transaction = TxTy, Block = BlockTy, @@ -44,6 +46,7 @@ impl FullProvider for T where Provider: BlockReader + TrieReader + StageCheckpointReader + PruneCheckpointReader, > + NodePrimitivesProvider + StaticFileProviderFactory + + RocksDBProviderFactory + BlockReaderIdExt< Transaction = TxTy, Block = BlockTy, diff --git a/crates/storage/provider/src/traits/mod.rs b/crates/storage/provider/src/traits/mod.rs index 2837a4505f..1e43cdbbd7 100644 --- a/crates/storage/provider/src/traits/mod.rs +++ b/crates/storage/provider/src/traits/mod.rs @@ -8,5 +8,8 @@ pub use reth_chainspec::ChainSpecProvider; mod static_file_provider; pub use static_file_provider::StaticFileProviderFactory; +mod rocksdb_provider; +pub use rocksdb_provider::RocksDBProviderFactory; + mod full; pub use full::FullProvider; diff --git a/crates/storage/provider/src/traits/rocksdb_provider.rs b/crates/storage/provider/src/traits/rocksdb_provider.rs new file mode 100644 index 0000000000..9d2186677d --- /dev/null +++ b/crates/storage/provider/src/traits/rocksdb_provider.rs @@ -0,0 +1,16 @@ +use crate::providers::RocksDBProvider; + +/// `RocksDB` provider factory. +/// +/// This trait provides access to the `RocksDB` provider +pub trait RocksDBProviderFactory { + /// Returns the `RocksDB` provider. + fn rocksdb_provider(&self) -> RocksDBProvider; + + /// Adds a pending `RocksDB` batch to be committed when this provider is committed. + /// + /// This allows deferring `RocksDB` commits to happen at the same time as MDBX and static file + /// commits, ensuring atomicity across all storage backends. + #[cfg(all(unix, feature = "rocksdb"))] + fn set_pending_rocksdb_batch(&self, batch: rocksdb::WriteBatchWithTransaction); +} diff --git a/crates/storage/provider/src/traits/static_file_provider.rs b/crates/storage/provider/src/traits/static_file_provider.rs index 9daab7e5a8..bb4e49e8f4 100644 --- a/crates/storage/provider/src/traits/static_file_provider.rs +++ b/crates/storage/provider/src/traits/static_file_provider.rs @@ -1,9 +1,21 @@ +use alloy_primitives::BlockNumber; +use reth_errors::ProviderResult; +use reth_static_file_types::StaticFileSegment; use reth_storage_api::NodePrimitivesProvider; -use crate::providers::StaticFileProvider; +use crate::providers::{StaticFileProvider, StaticFileProviderRWRefMut}; /// Static file provider factory. pub trait StaticFileProviderFactory: NodePrimitivesProvider { /// Create new instance of static file provider. fn static_file_provider(&self) -> StaticFileProvider; + + /// Returns a mutable reference to a + /// [`StaticFileProviderRW`](`crate::providers::StaticFileProviderRW`) of a + /// [`StaticFileSegment`]. + fn get_static_file_writer( + &self, + block: BlockNumber, + segment: StaticFileSegment, + ) -> ProviderResult>; } diff --git a/crates/storage/provider/src/writer/mod.rs b/crates/storage/provider/src/writer/mod.rs index 6d990e17a4..0c67634dbf 100644 --- a/crates/storage/provider/src/writer/mod.rs +++ b/crates/storage/provider/src/writer/mod.rs @@ -917,7 +917,7 @@ mod tests { assert_eq!( StateRoot::overlay_root( tx, - provider_factory.hashed_post_state(&state.bundle_state) + &provider_factory.hashed_post_state(&state.bundle_state).into_sorted() ) .unwrap(), state_root(expected.clone().into_iter().map(|(address, (account, storage))| ( diff --git a/crates/storage/rpc-provider/README.md b/crates/storage/rpc-provider/README.md index 7180d41840..f1b51a9574 100644 --- a/crates/storage/rpc-provider/README.md +++ b/crates/storage/rpc-provider/README.md @@ -65,7 +65,7 @@ This provider implements the same traits as the local `BlockchainProvider`, maki Licensed under either of: -- Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) -- MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) +- Apache License, Version 2.0, ([LICENSE-APACHE](../../../LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) +- MIT license ([LICENSE-MIT](../../../LICENSE-MIT) or http://opensource.org/licenses/MIT) at your option. diff --git a/crates/storage/rpc-provider/src/lib.rs b/crates/storage/rpc-provider/src/lib.rs index 6e5bd17218..3ce5f6f914 100644 --- a/crates/storage/rpc-provider/src/lib.rs +++ b/crates/storage/rpc-provider/src/lib.rs @@ -675,10 +675,6 @@ where Err(ProviderError::UnsupportedProvider) } - fn transaction_block(&self, _id: TxNumber) -> ProviderResult> { - Err(ProviderError::UnsupportedProvider) - } - fn transactions_by_block( &self, block: BlockHashOrNumber, @@ -1571,10 +1567,6 @@ where Err(ProviderError::UnsupportedProvider) } - fn transaction_block(&self, _id: TxNumber) -> Result, ProviderError> { - Err(ProviderError::UnsupportedProvider) - } - fn transactions_by_block( &self, _block: alloy_rpc_types::BlockHashOrNumber, diff --git a/crates/storage/storage-api/Cargo.toml b/crates/storage/storage-api/Cargo.toml index a62193a5dd..83cbbbd714 100644 --- a/crates/storage/storage-api/Cargo.toml +++ b/crates/storage/storage-api/Cargo.toml @@ -32,6 +32,7 @@ alloy-consensus.workspace = true alloy-rpc-types-engine.workspace = true auto_impl.workspace = true +serde_json = { workspace = true, optional = true } [features] default = ["std"] @@ -50,10 +51,12 @@ std = [ "reth-storage-errors/std", "reth-db-models/std", "reth-trie-common/std", + "serde_json?/std", ] db-api = [ "dep:reth-db-api", + "dep:serde_json", ] serde = [ diff --git a/crates/storage/storage-api/src/block.rs b/crates/storage/storage-api/src/block.rs index b9ab206a6b..3c3a3bfbc9 100644 --- a/crates/storage/storage-api/src/block.rs +++ b/crates/storage/storage-api/src/block.rs @@ -54,7 +54,6 @@ pub trait BlockReader: + TransactionsProvider + ReceiptProvider + Send - + Sync { /// The block type this provider reads. type Block: reth_primitives_traits::Block< @@ -149,7 +148,7 @@ pub trait BlockReader: fn block_by_transaction_id(&self, id: TxNumber) -> ProviderResult>; } -impl BlockReader for Arc { +impl BlockReader for Arc { type Block = T::Block; fn find_block_by_hash( @@ -210,7 +209,7 @@ impl BlockReader for Arc { } } -impl BlockReader for &T { +impl BlockReader for &T { type Block = T::Block; fn find_block_by_hash( @@ -382,7 +381,7 @@ pub trait BlockReaderIdExt: BlockReader + ReceiptProviderIdExt { } /// Functionality to read the last known chain blocks from the database. -pub trait ChainStateBlockReader: Send + Sync { +pub trait ChainStateBlockReader: Send { /// Returns the last finalized block number. /// /// If no finalized block has been written yet, this returns `None`. @@ -394,7 +393,7 @@ pub trait ChainStateBlockReader: Send + Sync { } /// Functionality to write the last known chain blocks to the database. -pub trait ChainStateBlockWriter: Send + Sync { +pub trait ChainStateBlockWriter: Send { /// Saves the given finalized block number in the DB. fn save_finalized_block_number(&self, block_number: BlockNumber) -> ProviderResult<()>; diff --git a/crates/storage/storage-api/src/block_hash.rs b/crates/storage/storage-api/src/block_hash.rs index a617d31ebd..8d60f72358 100644 --- a/crates/storage/storage-api/src/block_hash.rs +++ b/crates/storage/storage-api/src/block_hash.rs @@ -4,8 +4,8 @@ use alloy_primitives::{BlockNumber, B256}; use reth_storage_errors::provider::ProviderResult; /// Client trait for fetching block hashes by number. -#[auto_impl::auto_impl(&, Arc, Box)] -pub trait BlockHashReader: Send + Sync { +#[auto_impl::auto_impl(&, Box, Arc)] +pub trait BlockHashReader { /// Get the hash of the block with the given number. Returns `None` if no block with this number /// exists. fn block_hash(&self, number: BlockNumber) -> ProviderResult>; diff --git a/crates/storage/storage-api/src/block_id.rs b/crates/storage/storage-api/src/block_id.rs index e00ad950e2..ae0a35701e 100644 --- a/crates/storage/storage-api/src/block_id.rs +++ b/crates/storage/storage-api/src/block_id.rs @@ -9,7 +9,7 @@ use reth_storage_errors::provider::{ProviderError, ProviderResult}; /// /// This trait also supports fetching block hashes and block numbers from a [`BlockHashOrNumber`]. #[auto_impl::auto_impl(&, Arc)] -pub trait BlockNumReader: BlockHashReader + Send + Sync { +pub trait BlockNumReader: BlockHashReader + Send { /// Returns the current info for the chain. fn chain_info(&self) -> ProviderResult; diff --git a/crates/storage/storage-api/src/block_indices.rs b/crates/storage/storage-api/src/block_indices.rs index 5a4f1e22bb..6122872cf3 100644 --- a/crates/storage/storage-api/src/block_indices.rs +++ b/crates/storage/storage-api/src/block_indices.rs @@ -6,7 +6,7 @@ use reth_storage_errors::provider::ProviderResult; /// Client trait for fetching block body indices related data. #[auto_impl::auto_impl(&, Arc)] -pub trait BlockBodyIndicesProvider: Send + Sync { +pub trait BlockBodyIndicesProvider: Send { /// Returns the block body indices with matching number from database. /// /// Returns `None` if block is not found. diff --git a/crates/storage/storage-api/src/block_writer.rs b/crates/storage/storage-api/src/block_writer.rs index 3bbde88d3e..233e9898d1 100644 --- a/crates/storage/storage-api/src/block_writer.rs +++ b/crates/storage/storage-api/src/block_writer.rs @@ -9,7 +9,7 @@ use reth_trie_common::HashedPostStateSorted; /// `BlockExecution` Writer pub trait BlockExecutionWriter: - NodePrimitivesProvider> + BlockWriter + Send + Sync + NodePrimitivesProvider> + BlockWriter { /// Take all of the blocks above the provided number and their execution result /// @@ -39,8 +39,8 @@ impl BlockExecutionWriter for &T { } /// Block Writer -#[auto_impl::auto_impl(&, Arc, Box)] -pub trait BlockWriter: Send + Sync { +#[auto_impl::auto_impl(&, Box)] +pub trait BlockWriter { /// The body this writer can write. type Block: Block; /// The receipt type for [`ExecutionOutcome`]. @@ -53,7 +53,7 @@ pub trait BlockWriter: Send + Sync { /// and transition in the block. fn insert_block( &self, - block: RecoveredBlock, + block: &RecoveredBlock, ) -> ProviderResult; /// Appends a batch of block bodies extending the canonical chain. This is invoked during @@ -63,7 +63,7 @@ pub trait BlockWriter: Send + Sync { /// Bodies are passed as [`Option`]s, if body is `None` the corresponding block is empty. fn append_block_bodies( &self, - bodies: Vec<(BlockNumber, Option<::Body>)>, + bodies: Vec<(BlockNumber, Option<&::Body>)>, ) -> ProviderResult<()>; /// Removes all blocks above the given block number from the database. diff --git a/crates/storage/storage-api/src/chain.rs b/crates/storage/storage-api/src/chain.rs index 5b159715ad..40a8c975f9 100644 --- a/crates/storage/storage-api/src/chain.rs +++ b/crates/storage/storage-api/src/chain.rs @@ -28,7 +28,7 @@ pub trait BlockBodyWriter { fn write_block_bodies( &self, provider: &Provider, - bodies: Vec<(BlockNumber, Option)>, + bodies: Vec<(BlockNumber, Option<&Body>)>, ) -> ProviderResult<()>; /// Removes all block bodies above the given block number from the database. @@ -102,7 +102,7 @@ where fn write_block_bodies( &self, provider: &Provider, - bodies: Vec<(u64, Option>)>, + bodies: Vec<(u64, Option<&alloy_consensus::BlockBody>)>, ) -> ProviderResult<()> { let mut ommers_cursor = provider.tx_ref().cursor_write::>()?; let mut withdrawals_cursor = @@ -113,11 +113,12 @@ where // Write ommers if any if !body.ommers.is_empty() { - ommers_cursor.append(block_number, &StoredBlockOmmers { ommers: body.ommers })?; + ommers_cursor + .append(block_number, &StoredBlockOmmers { ommers: body.ommers.clone() })?; } // Write withdrawals if any - if let Some(withdrawals) = body.withdrawals && + if let Some(withdrawals) = body.withdrawals.clone() && !withdrawals.is_empty() { withdrawals_cursor.append(block_number, &StoredBlockWithdrawals { withdrawals })?; @@ -212,7 +213,7 @@ where fn write_block_bodies( &self, _provider: &Provider, - _bodies: Vec<(u64, Option>)>, + _bodies: Vec<(u64, Option<&alloy_consensus::BlockBody>)>, ) -> ProviderResult<()> { // noop Ok(()) diff --git a/crates/storage/storage-api/src/database_provider.rs b/crates/storage/storage-api/src/database_provider.rs index 8b5d8281f4..b206ca0922 100644 --- a/crates/storage/storage-api/src/database_provider.rs +++ b/crates/storage/storage-api/src/database_provider.rs @@ -183,7 +183,8 @@ where } } -fn range_size_hint(range: &impl RangeBounds) -> Option { +/// Returns the length of the range if the range has a bounded end. +pub fn range_size_hint(range: &impl RangeBounds) -> Option { let start = match range.start_bound().cloned() { Bound::Included(start) => start, Bound::Excluded(start) => start.checked_add(1)?, diff --git a/crates/storage/storage-api/src/hashing.rs b/crates/storage/storage-api/src/hashing.rs index dfbb00ab8f..7c1ced53c1 100644 --- a/crates/storage/storage-api/src/hashing.rs +++ b/crates/storage/storage-api/src/hashing.rs @@ -8,8 +8,8 @@ use reth_primitives_traits::{Account, StorageEntry}; use reth_storage_errors::provider::ProviderResult; /// Hashing Writer -#[auto_impl(&, Arc, Box)] -pub trait HashingWriter: Send + Sync { +#[auto_impl(&, Box)] +pub trait HashingWriter: Send { /// Unwind and clear account hashing. /// /// # Returns diff --git a/crates/storage/storage-api/src/header.rs b/crates/storage/storage-api/src/header.rs index 39b2eef903..0f18c55b41 100644 --- a/crates/storage/storage-api/src/header.rs +++ b/crates/storage/storage-api/src/header.rs @@ -10,7 +10,7 @@ pub type ProviderHeader

=

::Header; /// Client trait for fetching `Header` related data. #[auto_impl::auto_impl(&, Arc)] -pub trait HeaderProvider: Send + Sync { +pub trait HeaderProvider: Send { /// The header type this provider supports. type Header: BlockHeader; diff --git a/crates/storage/storage-api/src/header_sync_gap.rs b/crates/storage/storage-api/src/header_sync_gap.rs index 86b8e93dcb..29f86bc835 100644 --- a/crates/storage/storage-api/src/header_sync_gap.rs +++ b/crates/storage/storage-api/src/header_sync_gap.rs @@ -3,7 +3,7 @@ use reth_primitives_traits::{BlockHeader, SealedHeader}; use reth_storage_errors::provider::ProviderResult; /// Provider for getting the local tip header for sync gap calculation. -pub trait HeaderSyncGapProvider: Send + Sync { +pub trait HeaderSyncGapProvider: Send { /// The header type. type Header: BlockHeader; diff --git a/crates/storage/storage-api/src/history.rs b/crates/storage/storage-api/src/history.rs index e15b791f0f..d47f354ab6 100644 --- a/crates/storage/storage-api/src/history.rs +++ b/crates/storage/storage-api/src/history.rs @@ -7,8 +7,8 @@ use reth_primitives_traits::StorageEntry; use reth_storage_errors::provider::ProviderResult; /// History Writer -#[auto_impl(&, Arc, Box)] -pub trait HistoryWriter: Send + Sync { +#[auto_impl(&, Box)] +pub trait HistoryWriter: Send { /// Unwind and clear account history indices. /// /// Returns number of changesets walked. diff --git a/crates/storage/storage-api/src/lib.rs b/crates/storage/storage-api/src/lib.rs index 897802da98..0daf280519 100644 --- a/crates/storage/storage-api/src/lib.rs +++ b/crates/storage/storage-api/src/lib.rs @@ -94,5 +94,14 @@ pub use state_writer::*; mod header_sync_gap; pub use header_sync_gap::HeaderSyncGapProvider; +#[cfg(feature = "db-api")] +pub mod metadata; +#[cfg(feature = "db-api")] +pub use metadata::{MetadataProvider, MetadataWriter, StorageSettingsCache}; +#[cfg(feature = "db-api")] +pub use reth_db_api::models::StorageSettings; + mod full; pub use full::*; + +pub mod macros; diff --git a/crates/storage/provider/src/providers/state/macros.rs b/crates/storage/storage-api/src/macros.rs similarity index 65% rename from crates/storage/provider/src/providers/state/macros.rs rename to crates/storage/storage-api/src/macros.rs index 74bb371819..a299c529b8 100644 --- a/crates/storage/provider/src/providers/state/macros.rs +++ b/crates/storage/storage-api/src/macros.rs @@ -1,9 +1,10 @@ -//! Helper macros for implementing traits for various [`StateProvider`](crate::StateProvider) +//! Helper macros for implementing traits for various `StateProvider` //! implementations /// A macro that delegates trait implementations to the `as_ref` function of the type. /// /// Used to implement provider traits. +#[macro_export] macro_rules! delegate_impls_to_as_ref { (for $target:ty => $($trait:ident $(where [$($generics:tt)*])? { $(fn $func:ident$(<$($generic_arg:ident: $generic_arg_ty:path),*>)?(&self, $($arg:ident: $argty:ty),*) -> $ret:path;)* })* ) => { @@ -19,45 +20,46 @@ macro_rules! delegate_impls_to_as_ref { }; } -pub(crate) use delegate_impls_to_as_ref; +pub use delegate_impls_to_as_ref; /// Delegates the provider trait implementations to the `as_ref` function of the type: /// /// [`AccountReader`](crate::AccountReader) /// [`BlockHashReader`](crate::BlockHashReader) /// [`StateProvider`](crate::StateProvider) +#[macro_export] macro_rules! delegate_provider_impls { ($target:ty $(where [$($generics:tt)*])?) => { - $crate::providers::state::macros::delegate_impls_to_as_ref!( + $crate::macros::delegate_impls_to_as_ref!( for $target => AccountReader $(where [$($generics)*])? { - fn basic_account(&self, address: &alloy_primitives::Address) -> reth_storage_errors::provider::ProviderResult>; + fn basic_account(&self, address: &alloy_primitives::Address) -> reth_storage_api::errors::provider::ProviderResult>; } BlockHashReader $(where [$($generics)*])? { - fn block_hash(&self, number: u64) -> reth_storage_errors::provider::ProviderResult>; - fn canonical_hashes_range(&self, start: alloy_primitives::BlockNumber, end: alloy_primitives::BlockNumber) -> reth_storage_errors::provider::ProviderResult>; + fn block_hash(&self, number: u64) -> reth_storage_api::errors::provider::ProviderResult>; + fn canonical_hashes_range(&self, start: alloy_primitives::BlockNumber, end: alloy_primitives::BlockNumber) -> reth_storage_api::errors::provider::ProviderResult>; } StateProvider $(where [$($generics)*])? { - fn storage(&self, account: alloy_primitives::Address, storage_key: alloy_primitives::StorageKey) -> reth_storage_errors::provider::ProviderResult>; + fn storage(&self, account: alloy_primitives::Address, storage_key: alloy_primitives::StorageKey) -> reth_storage_api::errors::provider::ProviderResult>; } BytecodeReader $(where [$($generics)*])? { - fn bytecode_by_hash(&self, code_hash: &alloy_primitives::B256) -> reth_storage_errors::provider::ProviderResult>; + fn bytecode_by_hash(&self, code_hash: &alloy_primitives::B256) -> reth_storage_api::errors::provider::ProviderResult>; } StateRootProvider $(where [$($generics)*])? { - fn state_root(&self, state: reth_trie::HashedPostState) -> reth_storage_errors::provider::ProviderResult; - fn state_root_from_nodes(&self, input: reth_trie::TrieInput) -> reth_storage_errors::provider::ProviderResult; - fn state_root_with_updates(&self, state: reth_trie::HashedPostState) -> reth_storage_errors::provider::ProviderResult<(alloy_primitives::B256, reth_trie::updates::TrieUpdates)>; - fn state_root_from_nodes_with_updates(&self, input: reth_trie::TrieInput) -> reth_storage_errors::provider::ProviderResult<(alloy_primitives::B256, reth_trie::updates::TrieUpdates)>; + fn state_root(&self, state: reth_trie::HashedPostState) -> reth_storage_api::errors::provider::ProviderResult; + fn state_root_from_nodes(&self, input: reth_trie::TrieInput) -> reth_storage_api::errors::provider::ProviderResult; + fn state_root_with_updates(&self, state: reth_trie::HashedPostState) -> reth_storage_api::errors::provider::ProviderResult<(alloy_primitives::B256, reth_trie::updates::TrieUpdates)>; + fn state_root_from_nodes_with_updates(&self, input: reth_trie::TrieInput) -> reth_storage_api::errors::provider::ProviderResult<(alloy_primitives::B256, reth_trie::updates::TrieUpdates)>; } StorageRootProvider $(where [$($generics)*])? { - fn storage_root(&self, address: alloy_primitives::Address, storage: reth_trie::HashedStorage) -> reth_storage_errors::provider::ProviderResult; - fn storage_proof(&self, address: alloy_primitives::Address, slot: alloy_primitives::B256, storage: reth_trie::HashedStorage) -> reth_storage_errors::provider::ProviderResult; - fn storage_multiproof(&self, address: alloy_primitives::Address, slots: &[alloy_primitives::B256], storage: reth_trie::HashedStorage) -> reth_storage_errors::provider::ProviderResult; + fn storage_root(&self, address: alloy_primitives::Address, storage: reth_trie::HashedStorage) -> reth_storage_api::errors::provider::ProviderResult; + fn storage_proof(&self, address: alloy_primitives::Address, slot: alloy_primitives::B256, storage: reth_trie::HashedStorage) -> reth_storage_api::errors::provider::ProviderResult; + fn storage_multiproof(&self, address: alloy_primitives::Address, slots: &[alloy_primitives::B256], storage: reth_trie::HashedStorage) -> reth_storage_api::errors::provider::ProviderResult; } StateProofProvider $(where [$($generics)*])? { - fn proof(&self, input: reth_trie::TrieInput, address: alloy_primitives::Address, slots: &[alloy_primitives::B256]) -> reth_storage_errors::provider::ProviderResult; - fn multiproof(&self, input: reth_trie::TrieInput, targets: reth_trie::MultiProofTargets) -> reth_storage_errors::provider::ProviderResult; - fn witness(&self, input: reth_trie::TrieInput, target: reth_trie::HashedPostState) -> reth_storage_errors::provider::ProviderResult>; + fn proof(&self, input: reth_trie::TrieInput, address: alloy_primitives::Address, slots: &[alloy_primitives::B256]) -> reth_storage_api::errors::provider::ProviderResult; + fn multiproof(&self, input: reth_trie::TrieInput, targets: reth_trie::MultiProofTargets) -> reth_storage_api::errors::provider::ProviderResult; + fn witness(&self, input: reth_trie::TrieInput, target: reth_trie::HashedPostState) -> reth_storage_api::errors::provider::ProviderResult>; } HashedPostStateProvider $(where [$($generics)*])? { fn hashed_post_state(&self, bundle_state: &revm_database::BundleState) -> reth_trie::HashedPostState; @@ -66,4 +68,4 @@ macro_rules! delegate_provider_impls { } } -pub(crate) use delegate_provider_impls; +pub use delegate_provider_impls; diff --git a/crates/storage/storage-api/src/metadata.rs b/crates/storage/storage-api/src/metadata.rs new file mode 100644 index 0000000000..8bb263fcd6 --- /dev/null +++ b/crates/storage/storage-api/src/metadata.rs @@ -0,0 +1,53 @@ +//! Metadata provider trait for reading and writing node metadata. + +use reth_db_api::models::StorageSettings; +use reth_storage_errors::provider::{ProviderError, ProviderResult}; + +/// Metadata keys. +pub mod keys { + /// Storage configuration settings for this node. + pub const STORAGE_SETTINGS: &str = "storage_settings"; +} + +/// Client trait for reading node metadata from the database. +#[auto_impl::auto_impl(&)] +pub trait MetadataProvider: Send { + /// Get a metadata value by key + fn get_metadata(&self, key: &str) -> ProviderResult>>; + + /// Get storage settings for this node + fn storage_settings(&self) -> ProviderResult> { + self.get_metadata(keys::STORAGE_SETTINGS)? + .map(|bytes| serde_json::from_slice(&bytes).map_err(ProviderError::other)) + .transpose() + } +} + +/// Client trait for writing node metadata to the database. +pub trait MetadataWriter: Send { + /// Write a metadata value + fn write_metadata(&self, key: &str, value: Vec) -> ProviderResult<()>; + + /// Write storage settings for this node + /// + /// Be sure to update provider factory cache with + /// [`StorageSettingsCache::set_storage_settings_cache`]. + fn write_storage_settings(&self, settings: StorageSettings) -> ProviderResult<()> { + self.write_metadata( + keys::STORAGE_SETTINGS, + serde_json::to_vec(&settings).map_err(ProviderError::other)?, + ) + } +} + +/// Trait for caching storage settings on a provider factory. +pub trait StorageSettingsCache: Send { + /// Gets the cached storage settings. + fn cached_storage_settings(&self) -> StorageSettings; + + /// Sets the storage settings of this `ProviderFactory`. + /// + /// IMPORTANT: It does not save settings in storage, that should be done by + /// [`MetadataWriter::write_storage_settings`] + fn set_storage_settings_cache(&self, settings: StorageSettings); +} diff --git a/crates/storage/storage-api/src/noop.rs b/crates/storage/storage-api/src/noop.rs index e538e1216e..2eca6c5cd8 100644 --- a/crates/storage/storage-api/src/noop.rs +++ b/crates/storage/storage-api/src/noop.rs @@ -273,10 +273,6 @@ impl TransactionsProvider for NoopProvider ProviderResult> { - Ok(None) - } - fn transactions_by_block( &self, _block_id: BlockHashOrNumber, diff --git a/crates/storage/storage-api/src/prune_checkpoint.rs b/crates/storage/storage-api/src/prune_checkpoint.rs index 6b3abebd6c..10009c7b90 100644 --- a/crates/storage/storage-api/src/prune_checkpoint.rs +++ b/crates/storage/storage-api/src/prune_checkpoint.rs @@ -3,8 +3,8 @@ use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_storage_errors::provider::ProviderResult; /// The trait for fetching prune checkpoint related data. -#[auto_impl::auto_impl(&, Arc)] -pub trait PruneCheckpointReader: Send + Sync { +#[auto_impl::auto_impl(&)] +pub trait PruneCheckpointReader: Send { /// Fetch the prune checkpoint for the given segment. fn get_prune_checkpoint( &self, @@ -16,8 +16,8 @@ pub trait PruneCheckpointReader: Send + Sync { } /// The trait for updating prune checkpoint related data. -#[auto_impl::auto_impl(&, Arc)] -pub trait PruneCheckpointWriter: Send + Sync { +#[auto_impl::auto_impl(&)] +pub trait PruneCheckpointWriter { /// Save prune checkpoint. fn save_prune_checkpoint( &self, diff --git a/crates/storage/storage-api/src/receipts.rs b/crates/storage/storage-api/src/receipts.rs index f8390ee538..06c5b65a30 100644 --- a/crates/storage/storage-api/src/receipts.rs +++ b/crates/storage/storage-api/src/receipts.rs @@ -11,7 +11,7 @@ pub type ProviderReceipt

=

::Receipt; /// Client trait for fetching receipt data. #[auto_impl::auto_impl(&, Arc)] -pub trait ReceiptProvider: Send + Sync { +pub trait ReceiptProvider { /// The receipt type. type Receipt: Receipt; diff --git a/crates/storage/storage-api/src/stage_checkpoint.rs b/crates/storage/storage-api/src/stage_checkpoint.rs index 37324e6082..d643dfbde9 100644 --- a/crates/storage/storage-api/src/stage_checkpoint.rs +++ b/crates/storage/storage-api/src/stage_checkpoint.rs @@ -4,8 +4,8 @@ use reth_stages_types::{StageCheckpoint, StageId}; use reth_storage_errors::provider::ProviderResult; /// The trait for fetching stage checkpoint related data. -#[auto_impl::auto_impl(&, Arc)] -pub trait StageCheckpointReader: Send + Sync { +#[auto_impl::auto_impl(&)] +pub trait StageCheckpointReader: Send { /// Fetch the checkpoint for the given stage. fn get_stage_checkpoint(&self, id: StageId) -> ProviderResult>; @@ -18,8 +18,8 @@ pub trait StageCheckpointReader: Send + Sync { } /// The trait for updating stage checkpoint related data. -#[auto_impl::auto_impl(&, Arc)] -pub trait StageCheckpointWriter: Send + Sync { +#[auto_impl::auto_impl(&)] +pub trait StageCheckpointWriter { /// Save stage checkpoint. fn save_stage_checkpoint(&self, id: StageId, checkpoint: StageCheckpoint) -> ProviderResult<()>; diff --git a/crates/storage/storage-api/src/state.rs b/crates/storage/storage-api/src/state.rs index dc8241fb95..a4665cb1db 100644 --- a/crates/storage/storage-api/src/state.rs +++ b/crates/storage/storage-api/src/state.rs @@ -14,8 +14,8 @@ use reth_trie_common::HashedPostState; use revm_database::BundleState; /// This just receives state, or [`ExecutionOutcome`], from the provider -#[auto_impl::auto_impl(&, Arc, Box)] -pub trait StateReader: Send + Sync { +#[auto_impl::auto_impl(&, Box)] +pub trait StateReader: Send { /// Receipt type in [`ExecutionOutcome`]. type Receipt: Send + Sync; @@ -27,10 +27,10 @@ pub trait StateReader: Send + Sync { } /// Type alias of boxed [`StateProvider`]. -pub type StateProviderBox = Box; +pub type StateProviderBox = Box; /// An abstraction for a type that provides state data. -#[auto_impl(&, Arc, Box)] +#[auto_impl(&, Box)] pub trait StateProvider: BlockHashReader + AccountReader @@ -39,8 +39,6 @@ pub trait StateProvider: + StorageRootProvider + StateProofProvider + HashedPostStateProvider - + Send - + Sync { /// Get storage of given account. fn storage( @@ -97,15 +95,15 @@ pub trait AccountInfoReader: AccountReader + BytecodeReader {} impl AccountInfoReader for T {} /// Trait that provides the hashed state from various sources. -#[auto_impl(&, Arc, Box)] -pub trait HashedPostStateProvider: Send + Sync { +#[auto_impl(&, Box)] +pub trait HashedPostStateProvider { /// Returns the `HashedPostState` of the provided [`BundleState`]. fn hashed_post_state(&self, bundle_state: &BundleState) -> HashedPostState; } /// Trait for reading bytecode associated with a given code hash. -#[auto_impl(&, Arc, Box)] -pub trait BytecodeReader: Send + Sync { +#[auto_impl(&, Box)] +pub trait BytecodeReader { /// Get account code by its hash fn bytecode_by_hash(&self, code_hash: &B256) -> ProviderResult>; } @@ -142,8 +140,8 @@ pub trait TryIntoHistoricalStateProvider { /// This affects tracing, or replaying blocks, which will need to be executed on top of the state of /// the parent block. For example, in order to trace block `n`, the state after block `n - 1` needs /// to be used, since block `n` was executed on its parent block's state. -#[auto_impl(&, Arc, Box)] -pub trait StateProviderFactory: BlockIdReader + Send + Sync { +#[auto_impl(&, Box, Arc)] +pub trait StateProviderFactory: BlockIdReader + Send { /// Storage provider for latest block. fn latest(&self) -> ProviderResult; diff --git a/crates/storage/storage-api/src/stats.rs b/crates/storage/storage-api/src/stats.rs index d8a14e0ee7..34a39a9274 100644 --- a/crates/storage/storage-api/src/stats.rs +++ b/crates/storage/storage-api/src/stats.rs @@ -1,8 +1,8 @@ use reth_db_api::table::Table; /// The trait for fetching provider statistics. -#[auto_impl::auto_impl(&, Arc)] -pub trait StatsReader: Send + Sync { +#[auto_impl::auto_impl(&)] +pub trait StatsReader { /// Fetch the number of entries in the corresponding [Table]. Depending on the provider, it may /// route to different data sources other than [Table]. fn count_entries(&self) -> reth_storage_errors::provider::ProviderResult; diff --git a/crates/storage/storage-api/src/storage.rs b/crates/storage/storage-api/src/storage.rs index 8f560d8cfb..51a9c5e5e5 100644 --- a/crates/storage/storage-api/src/storage.rs +++ b/crates/storage/storage-api/src/storage.rs @@ -8,8 +8,8 @@ use reth_primitives_traits::StorageEntry; use reth_storage_errors::provider::ProviderResult; /// Storage reader -#[auto_impl::auto_impl(&, Arc, Box)] -pub trait StorageReader: Send + Sync { +#[auto_impl::auto_impl(&, Box)] +pub trait StorageReader: Send { /// Get plainstate storages for addresses and storage keys. fn plain_state_storages( &self, @@ -34,8 +34,8 @@ pub trait StorageReader: Send + Sync { /// Storage `ChangeSet` reader #[cfg(feature = "db-api")] -#[auto_impl::auto_impl(&, Arc, Box)] -pub trait StorageChangeSetReader: Send + Sync { +#[auto_impl::auto_impl(&, Box)] +pub trait StorageChangeSetReader: Send { /// Iterate over storage changesets and return the storage state from before this block. fn storage_changeset( &self, diff --git a/crates/storage/storage-api/src/transactions.rs b/crates/storage/storage-api/src/transactions.rs index 732d043759..354504d966 100644 --- a/crates/storage/storage-api/src/transactions.rs +++ b/crates/storage/storage-api/src/transactions.rs @@ -22,7 +22,7 @@ pub enum TransactionVariant { /// Client trait for fetching transactions related data. #[auto_impl::auto_impl(&, Arc)] -pub trait TransactionsProvider: BlockNumReader + Send + Sync { +pub trait TransactionsProvider: BlockNumReader + Send { /// The transaction type this provider reads. type Transaction: Send + Sync + SignedTransaction; @@ -49,9 +49,6 @@ pub trait TransactionsProvider: BlockNumReader + Send + Sync { hash: TxHash, ) -> ProviderResult>; - /// Get transaction block number - fn transaction_block(&self, id: TxNumber) -> ProviderResult>; - /// Get transactions by block id. fn transactions_by_block( &self, diff --git a/crates/storage/storage-api/src/trie.rs b/crates/storage/storage-api/src/trie.rs index 9ff02c106e..45ee5ce803 100644 --- a/crates/storage/storage-api/src/trie.rs +++ b/crates/storage/storage-api/src/trie.rs @@ -9,7 +9,7 @@ use reth_trie_common::{ /// A type that can compute the state root of a given post state. #[auto_impl::auto_impl(&, Box, Arc)] -pub trait StateRootProvider: Send + Sync { +pub trait StateRootProvider { /// Returns the state root of the `BundleState` on top of the current state. /// /// # Note @@ -40,8 +40,8 @@ pub trait StateRootProvider: Send + Sync { } /// A type that can compute the storage root for a given account. -#[auto_impl::auto_impl(&, Box, Arc)] -pub trait StorageRootProvider: Send + Sync { +#[auto_impl::auto_impl(&, Box)] +pub trait StorageRootProvider { /// Returns the storage root of the `HashedStorage` for target address on top of the current /// state. fn storage_root(&self, address: Address, hashed_storage: HashedStorage) @@ -66,8 +66,8 @@ pub trait StorageRootProvider: Send + Sync { } /// A type that can generate state proof on top of a given post state. -#[auto_impl::auto_impl(&, Box, Arc)] -pub trait StateProofProvider: Send + Sync { +#[auto_impl::auto_impl(&, Box)] +pub trait StateProofProvider { /// Get account and storage proofs of target keys in the `HashedPostState` /// on top of the current state. fn proof( @@ -90,8 +90,8 @@ pub trait StateProofProvider: Send + Sync { } /// Trie Reader -#[auto_impl::auto_impl(&, Arc, Box)] -pub trait TrieReader: Send + Sync { +#[auto_impl::auto_impl(&, Box)] +pub trait TrieReader: Send { /// Returns the [`TrieUpdatesSorted`] for reverting the trie database to its state prior to the /// given block and onwards having been processed. fn trie_reverts(&self, from: BlockNumber) -> ProviderResult; @@ -104,8 +104,8 @@ pub trait TrieReader: Send + Sync { } /// Trie Writer -#[auto_impl::auto_impl(&, Arc, Box)] -pub trait TrieWriter: Send + Sync { +#[auto_impl::auto_impl(&, Box)] +pub trait TrieWriter: Send { /// Writes trie updates to the database. /// /// Returns the number of entries modified. @@ -146,8 +146,8 @@ pub trait TrieWriter: Send + Sync { } /// Storage Trie Writer -#[auto_impl::auto_impl(&, Arc, Box)] -pub trait StorageTrieWriter: Send + Sync { +#[auto_impl::auto_impl(&, Box)] +pub trait StorageTrieWriter: Send { /// Writes storage trie updates from the given storage trie map with already sorted updates. /// /// Expects the storage trie updates to already be sorted by the hashed address key. diff --git a/crates/tasks/src/lib.rs b/crates/tasks/src/lib.rs index 473a727e10..905c91e11b 100644 --- a/crates/tasks/src/lib.rs +++ b/crates/tasks/src/lib.rs @@ -383,15 +383,17 @@ impl TaskExecutor { { let on_shutdown = self.on_shutdown.clone(); - // Clone only the specific counter that we need. - let finished_regular_tasks_total_metrics = - self.metrics.finished_regular_tasks_total.clone(); + // Choose the appropriate finished counter based on task kind + let finished_counter = match task_kind { + TaskKind::Default => self.metrics.finished_regular_tasks_total.clone(), + TaskKind::Blocking => self.metrics.finished_regular_blocking_tasks_total.clone(), + }; + // Wrap the original future to increment the finished tasks counter upon completion let task = { async move { // Create an instance of IncCounterOnDrop with the counter to increment - let _inc_counter_on_drop = - IncCounterOnDrop::new(finished_regular_tasks_total_metrics); + let _inc_counter_on_drop = IncCounterOnDrop::new(finished_counter); let fut = pin!(fut); let _ = select(on_shutdown, fut).await; } @@ -633,7 +635,7 @@ impl TaskExecutor { impl TaskSpawner for TaskExecutor { fn spawn(&self, fut: BoxFuture<'static, ()>) -> JoinHandle<()> { self.metrics.inc_regular_tasks(); - self.spawn(fut) + Self::spawn(self, fut) } fn spawn_critical(&self, name: &'static str, fut: BoxFuture<'static, ()>) -> JoinHandle<()> { @@ -642,8 +644,8 @@ impl TaskSpawner for TaskExecutor { } fn spawn_blocking(&self, fut: BoxFuture<'static, ()>) -> JoinHandle<()> { - self.metrics.inc_regular_tasks(); - self.spawn_blocking(fut) + self.metrics.inc_regular_blocking_tasks(); + Self::spawn_blocking(self, fut) } fn spawn_critical_blocking( diff --git a/crates/tasks/src/metrics.rs b/crates/tasks/src/metrics.rs index c486fa681c..24d3065a52 100644 --- a/crates/tasks/src/metrics.rs +++ b/crates/tasks/src/metrics.rs @@ -16,6 +16,10 @@ pub struct TaskExecutorMetrics { pub(crate) regular_tasks_total: Counter, /// Number of finished spawned regular tasks pub(crate) finished_regular_tasks_total: Counter, + /// Number of spawned regular blocking tasks + pub(crate) regular_blocking_tasks_total: Counter, + /// Number of finished spawned regular blocking tasks + pub(crate) finished_regular_blocking_tasks_total: Counter, } impl TaskExecutorMetrics { @@ -28,6 +32,11 @@ impl TaskExecutorMetrics { pub(crate) fn inc_regular_tasks(&self) { self.regular_tasks_total.increment(1); } + + /// Increments the counter for spawned regular blocking tasks. + pub(crate) fn inc_regular_blocking_tasks(&self) { + self.regular_blocking_tasks_total.increment(1); + } } /// Helper type for increasing counters even if a task fails diff --git a/crates/tracing-otlp/src/lib.rs b/crates/tracing-otlp/src/lib.rs index 2cfd332a40..c7af074ad1 100644 --- a/crates/tracing-otlp/src/lib.rs +++ b/crates/tracing-otlp/src/lib.rs @@ -12,7 +12,7 @@ use opentelemetry::{global, trace::TracerProvider, KeyValue, Value}; use opentelemetry_otlp::{SpanExporter, WithExportConfig}; use opentelemetry_sdk::{ propagation::TraceContextPropagator, - trace::{SdkTracer, SdkTracerProvider}, + trace::{Sampler, SdkTracer, SdkTracerProvider}, Resource, }; use opentelemetry_semantic_conventions::{attribute::SERVICE_VERSION, SCHEMA_URL}; @@ -29,36 +29,92 @@ const HTTP_TRACE_ENDPOINT: &str = "/v1/traces"; /// /// This layer can be added to a [`tracing_subscriber::Registry`] to enable `OpenTelemetry` tracing /// with OTLP export to an url. -pub fn span_layer( - service_name: impl Into, - endpoint: &Url, - protocol: OtlpProtocol, -) -> eyre::Result> +pub fn span_layer(otlp_config: OtlpConfig) -> eyre::Result> where for<'span> S: Subscriber + LookupSpan<'span>, { global::set_text_map_propagator(TraceContextPropagator::new()); - let resource = build_resource(service_name); + let resource = build_resource(otlp_config.service_name.clone()); let span_builder = SpanExporter::builder(); - let span_exporter = match protocol { - OtlpProtocol::Http => span_builder.with_http().with_endpoint(endpoint.as_str()).build()?, - OtlpProtocol::Grpc => span_builder.with_tonic().with_endpoint(endpoint.as_str()).build()?, + let span_exporter = match otlp_config.protocol { + OtlpProtocol::Http => { + span_builder.with_http().with_endpoint(otlp_config.endpoint.as_str()).build()? + } + OtlpProtocol::Grpc => { + span_builder.with_tonic().with_endpoint(otlp_config.endpoint.as_str()).build()? + } }; + let sampler = build_sampler(otlp_config.sample_ratio)?; + let tracer_provider = SdkTracerProvider::builder() .with_resource(resource) + .with_sampler(sampler) .with_batch_exporter(span_exporter) .build(); global::set_tracer_provider(tracer_provider.clone()); - let tracer = tracer_provider.tracer("reth"); + let tracer = tracer_provider.tracer(otlp_config.service_name); Ok(tracing_opentelemetry::layer().with_tracer(tracer)) } +/// Configuration for OTLP trace export. +#[derive(Debug, Clone)] +pub struct OtlpConfig { + /// Service name for trace identification + service_name: String, + /// Otlp endpoint URL + endpoint: Url, + /// Transport protocol, HTTP or gRPC + protocol: OtlpProtocol, + /// Optional sampling ratio, from 0.0 to 1.0 + sample_ratio: Option, +} + +impl OtlpConfig { + /// Creates a new OTLP configuration. + pub fn new( + service_name: impl Into, + endpoint: Url, + protocol: OtlpProtocol, + sample_ratio: Option, + ) -> eyre::Result { + if let Some(ratio) = sample_ratio { + ensure!( + (0.0..=1.0).contains(&ratio), + "Sample ratio must be between 0.0 and 1.0, got: {}", + ratio + ); + } + + Ok(Self { service_name: service_name.into(), endpoint, protocol, sample_ratio }) + } + + /// Returns the service name. + pub fn service_name(&self) -> &str { + &self.service_name + } + + /// Returns the OTLP endpoint URL. + pub const fn endpoint(&self) -> &Url { + &self.endpoint + } + + /// Returns the transport protocol. + pub const fn protocol(&self) -> OtlpProtocol { + self.protocol + } + + /// Returns the sampling ratio. + pub const fn sample_ratio(&self) -> Option { + self.sample_ratio + } +} + // Builds OTLP resource with service information. fn build_resource(service_name: impl Into) -> Resource { Resource::builder() @@ -67,6 +123,18 @@ fn build_resource(service_name: impl Into) -> Resource { .build() } +/// Builds the appropriate sampler based on the sample ratio. +fn build_sampler(sample_ratio: Option) -> eyre::Result { + match sample_ratio { + // Default behavior: sample all traces + None | Some(1.0) => Ok(Sampler::ParentBased(Box::new(Sampler::AlwaysOn))), + // Don't sample anything + Some(0.0) => Ok(Sampler::ParentBased(Box::new(Sampler::AlwaysOff))), + // Sample based on trace ID ratio + Some(ratio) => Ok(Sampler::ParentBased(Box::new(Sampler::TraceIdRatioBased(ratio)))), + } +} + /// OTLP transport protocol type #[derive(Debug, Clone, Copy, PartialEq, Eq, ValueEnum)] pub enum OtlpProtocol { diff --git a/crates/tracing/Cargo.toml b/crates/tracing/Cargo.toml index 8cf83e138c..fd5f5f55de 100644 --- a/crates/tracing/Cargo.toml +++ b/crates/tracing/Cargo.toml @@ -26,8 +26,9 @@ tracing-logfmt.workspace = true clap = { workspace = true, features = ["derive"] } eyre.workspace = true rolling-file.workspace = true -url = { workspace = true, optional = true } +tracing-samply = { workspace = true, optional = true } [features] default = ["otlp"] -otlp = ["reth-tracing-otlp", "dep:url"] +otlp = ["reth-tracing-otlp"] +samply = ["tracing-samply"] diff --git a/crates/tracing/src/layers.rs b/crates/tracing/src/layers.rs index 33f8c90ada..44462ca7c6 100644 --- a/crates/tracing/src/layers.rs +++ b/crates/tracing/src/layers.rs @@ -1,4 +1,6 @@ use crate::formatter::LogFormat; +#[cfg(feature = "otlp")] +use reth_tracing_otlp::{span_layer, OtlpConfig}; use rolling_file::{RollingConditionBasic, RollingFileAppender}; use std::{ fmt, @@ -6,11 +8,6 @@ use std::{ }; use tracing_appender::non_blocking::WorkerGuard; use tracing_subscriber::{filter::Directive, EnvFilter, Layer, Registry}; -#[cfg(feature = "otlp")] -use { - reth_tracing_otlp::{span_layer, OtlpProtocol}, - url::Url, -}; /// A worker guard returned by the file layer. /// @@ -137,14 +134,12 @@ impl Layers { #[cfg(feature = "otlp")] pub fn with_span_layer( &mut self, - service_name: String, - endpoint_exporter: Url, + otlp_config: OtlpConfig, filter: EnvFilter, - otlp_protocol: OtlpProtocol, ) -> eyre::Result<()> { // Create the span provider - let span_layer = span_layer(service_name, &endpoint_exporter, otlp_protocol) + let span_layer = span_layer(otlp_config) .map_err(|e| eyre::eyre!("Failed to build OTLP span exporter {}", e))? .with_filter(filter); diff --git a/crates/tracing/src/lib.rs b/crates/tracing/src/lib.rs index 7b06398e8c..346cc5faf1 100644 --- a/crates/tracing/src/lib.rs +++ b/crates/tracing/src/lib.rs @@ -224,6 +224,12 @@ impl Tracer for RethTracer { None }; + #[cfg(feature = "samply")] + layers.add_layer( + tracing_samply::SamplyLayer::new() + .map_err(|e| eyre::eyre!("Failed to create samply layer: {}", e))?, + ); + // The error is returned if the global default subscriber is already set, // so it's safe to ignore it let _ = tracing_subscriber::registry().with(layers.into_inner()).try_init(); diff --git a/crates/transaction-pool/src/blobstore/disk.rs b/crates/transaction-pool/src/blobstore/disk.rs index b883345aac..cec7e50900 100644 --- a/crates/transaction-pool/src/blobstore/disk.rs +++ b/crates/transaction-pool/src/blobstore/disk.rs @@ -59,6 +59,73 @@ impl DiskFileBlobStore { fn clear_cache(&self) { self.inner.blob_cache.lock().clear() } + + /// Look up EIP-7594 blobs by their versioned hashes. + /// + /// This returns a result vector with the **same length and order** as the input + /// `versioned_hashes`. Each element is `Some(BlobAndProofV2)` if the blob is available, or + /// `None` if it is missing or an older sidecar version. + /// + /// The lookup first scans the in-memory cache and, if not all blobs are found, falls back to + /// reading candidate sidecars from disk using the `versioned_hash -> tx_hash` index. + fn get_by_versioned_hashes_eip7594( + &self, + versioned_hashes: &[B256], + ) -> Result>, BlobStoreError> { + // we must return the blobs in order but we don't necessarily find them in the requested + // order + let mut result = vec![None; versioned_hashes.len()]; + + // first scan all cached full sidecars + for (_tx_hash, blob_sidecar) in self.inner.blob_cache.lock().iter() { + if let Some(blob_sidecar) = blob_sidecar.as_eip7594() { + for (hash_idx, match_result) in + blob_sidecar.match_versioned_hashes(versioned_hashes) + { + result[hash_idx] = Some(match_result); + } + } + + // return early if all blobs are found. + if result.iter().all(|blob| blob.is_some()) { + return Ok(result); + } + } + + // not all versioned hashes were found, try to look up a matching tx + let mut missing_tx_hashes = Vec::new(); + + { + let mut versioned_to_txhashes = self.inner.versioned_hashes_to_txhash.lock(); + for (idx, _) in + result.iter().enumerate().filter(|(_, blob_and_proof)| blob_and_proof.is_none()) + { + // this is safe because the result vec has the same len + let versioned_hash = versioned_hashes[idx]; + if let Some(tx_hash) = versioned_to_txhashes.get(&versioned_hash).copied() { + missing_tx_hashes.push(tx_hash); + } + } + } + + // if we have missing blobs, try to read them from disk and try again + if !missing_tx_hashes.is_empty() { + let blobs_from_disk = self.inner.read_many_decoded(missing_tx_hashes); + for (_, blob_sidecar) in blobs_from_disk { + if let Some(blob_sidecar) = blob_sidecar.as_eip7594() { + for (hash_idx, match_result) in + blob_sidecar.match_versioned_hashes(versioned_hashes) + { + if result[hash_idx].is_none() { + result[hash_idx] = Some(match_result); + } + } + } + } + } + + Ok(result) + } } impl BlobStore for DiskFileBlobStore { @@ -84,6 +151,9 @@ impl BlobStore for DiskFileBlobStore { } fn delete_all(&self, txs: Vec) -> Result<(), BlobStoreError> { + if txs.is_empty() { + return Ok(()) + } let txs = self.inner.retain_existing(txs)?; self.inner.txs_to_delete.write().extend(txs); Ok(()) @@ -205,58 +275,7 @@ impl BlobStore for DiskFileBlobStore { &self, versioned_hashes: &[B256], ) -> Result>, BlobStoreError> { - // we must return the blobs in order but we don't necessarily find them in the requested - // order - let mut result = vec![None; versioned_hashes.len()]; - - // first scan all cached full sidecars - for (_tx_hash, blob_sidecar) in self.inner.blob_cache.lock().iter() { - if let Some(blob_sidecar) = blob_sidecar.as_eip7594() { - for (hash_idx, match_result) in - blob_sidecar.match_versioned_hashes(versioned_hashes) - { - result[hash_idx] = Some(match_result); - } - } - - // return early if all blobs are found. - if result.iter().all(|blob| blob.is_some()) { - // got all blobs, can return early - return Ok(Some(result.into_iter().map(Option::unwrap).collect())) - } - } - - // not all versioned hashes were found, try to look up a matching tx - let mut missing_tx_hashes = Vec::new(); - - { - let mut versioned_to_txhashes = self.inner.versioned_hashes_to_txhash.lock(); - for (idx, _) in - result.iter().enumerate().filter(|(_, blob_and_proof)| blob_and_proof.is_none()) - { - // this is safe because the result vec has the same len - let versioned_hash = versioned_hashes[idx]; - if let Some(tx_hash) = versioned_to_txhashes.get(&versioned_hash).copied() { - missing_tx_hashes.push(tx_hash); - } - } - } - - // if we have missing blobs, try to read them from disk and try again - if !missing_tx_hashes.is_empty() { - let blobs_from_disk = self.inner.read_many_decoded(missing_tx_hashes); - for (_, blob_sidecar) in blobs_from_disk { - if let Some(blob_sidecar) = blob_sidecar.as_eip7594() { - for (hash_idx, match_result) in - blob_sidecar.match_versioned_hashes(versioned_hashes) - { - if result[hash_idx].is_none() { - result[hash_idx] = Some(match_result); - } - } - } - } - } + let result = self.get_by_versioned_hashes_eip7594(versioned_hashes)?; // only return the blobs if we found all requested versioned hashes if result.iter().all(|blob| blob.is_some()) { @@ -266,6 +285,13 @@ impl BlobStore for DiskFileBlobStore { } } + fn get_by_versioned_hashes_v3( + &self, + versioned_hashes: &[B256], + ) -> Result>, BlobStoreError> { + self.get_by_versioned_hashes_eip7594(versioned_hashes) + } + fn data_size_hint(&self) -> Option { Some(self.inner.size_tracker.data_size()) } @@ -656,7 +682,12 @@ pub enum OpenDiskFileBlobStore { #[cfg(test)] mod tests { use alloy_consensus::BlobTransactionSidecar; - use alloy_eips::eip7594::BlobTransactionSidecarVariant; + use alloy_eips::{ + eip4844::{kzg_to_versioned_hash, Blob, BlobAndProofV2, Bytes48}, + eip7594::{ + BlobTransactionSidecarEip7594, BlobTransactionSidecarVariant, CELLS_PER_EXT_BLOB, + }, + }; use super::*; use std::sync::atomic::Ordering; @@ -682,6 +713,20 @@ mod tests { .collect() } + fn eip7594_single_blob_sidecar() -> (BlobTransactionSidecarVariant, B256, BlobAndProofV2) { + let blob = Blob::default(); + let commitment = Bytes48::default(); + let cell_proofs = vec![Bytes48::default(); CELLS_PER_EXT_BLOB]; + + let versioned_hash = kzg_to_versioned_hash(commitment.as_slice()); + + let expected = + BlobAndProofV2 { blob: Box::new(Blob::default()), proofs: cell_proofs.clone() }; + let sidecar = BlobTransactionSidecarEip7594::new(vec![blob], vec![commitment], cell_proofs); + + (BlobTransactionSidecarVariant::Eip7594(sidecar), versioned_hash, expected) + } + #[test] fn disk_insert_all_get_all() { let (store, _dir) = tmp_store(); @@ -851,4 +896,33 @@ mod tests { assert_eq!(stat.delete_succeed, 3); assert_eq!(stat.delete_failed, 0); } + + #[test] + fn disk_get_blobs_v3_returns_partial_results() { + let (store, _dir) = tmp_store(); + + let (sidecar, versioned_hash, expected) = eip7594_single_blob_sidecar(); + store.insert(TxHash::random(), sidecar).unwrap(); + + assert_ne!(versioned_hash, B256::ZERO); + + let request = vec![versioned_hash, B256::ZERO]; + let v2 = store.get_by_versioned_hashes_v2(&request).unwrap(); + assert!(v2.is_none(), "v2 must return null if any requested blob is missing"); + + let v3 = store.get_by_versioned_hashes_v3(&request).unwrap(); + assert_eq!(v3, vec![Some(expected), None]); + } + + #[test] + fn disk_get_blobs_v3_can_fallback_to_disk() { + let (store, _dir) = tmp_store(); + + let (sidecar, versioned_hash, expected) = eip7594_single_blob_sidecar(); + store.insert(TxHash::random(), sidecar).unwrap(); + store.clear_cache(); + + let v3 = store.get_by_versioned_hashes_v3(&[versioned_hash]).unwrap(); + assert_eq!(v3, vec![Some(expected)]); + } } diff --git a/crates/transaction-pool/src/blobstore/mem.rs b/crates/transaction-pool/src/blobstore/mem.rs index 44dff1cceb..41afa244d4 100644 --- a/crates/transaction-pool/src/blobstore/mem.rs +++ b/crates/transaction-pool/src/blobstore/mem.rs @@ -13,6 +13,35 @@ pub struct InMemoryBlobStore { inner: Arc, } +impl InMemoryBlobStore { + /// Look up EIP-7594 blobs by their versioned hashes. + /// + /// This returns a result vector with the **same length and order** as the input + /// `versioned_hashes`. Each element is `Some(BlobAndProofV2)` if the blob is available, or + /// `None` if it is missing or an older sidecar version. + fn get_by_versioned_hashes_eip7594( + &self, + versioned_hashes: &[B256], + ) -> Vec> { + let mut result = vec![None; versioned_hashes.len()]; + for (_tx_hash, blob_sidecar) in self.inner.store.read().iter() { + if let Some(blob_sidecar) = blob_sidecar.as_eip7594() { + for (hash_idx, match_result) in + blob_sidecar.match_versioned_hashes(versioned_hashes) + { + result[hash_idx] = Some(match_result); + } + } + + // Return early if all blobs are found. + if result.iter().all(|blob| blob.is_some()) { + break; + } + } + result + } +} + #[derive(Debug, Default)] struct InMemoryBlobStoreInner { /// Storage for all blob data. @@ -99,8 +128,13 @@ impl BlobStore for InMemoryBlobStore { &self, txs: Vec, ) -> Result>, BlobStoreError> { + if txs.is_empty() { + return Ok(Vec::new()); + } let store = self.inner.store.read(); - Ok(txs.into_iter().filter_map(|tx| store.get(&tx).cloned()).collect()) + txs.into_iter() + .map(|tx| store.get(&tx).cloned().ok_or(BlobStoreError::MissingSidecar(tx))) + .collect() } fn get_by_versioned_hashes_v1( @@ -129,20 +163,7 @@ impl BlobStore for InMemoryBlobStore { &self, versioned_hashes: &[B256], ) -> Result>, BlobStoreError> { - let mut result = vec![None; versioned_hashes.len()]; - for (_tx_hash, blob_sidecar) in self.inner.store.read().iter() { - if let Some(blob_sidecar) = blob_sidecar.as_eip7594() { - for (hash_idx, match_result) in - blob_sidecar.match_versioned_hashes(versioned_hashes) - { - result[hash_idx] = Some(match_result); - } - } - - if result.iter().all(|blob| blob.is_some()) { - break; - } - } + let result = self.get_by_versioned_hashes_eip7594(versioned_hashes); if result.iter().all(|blob| blob.is_some()) { Ok(Some(result.into_iter().map(Option::unwrap).collect())) } else { @@ -150,6 +171,13 @@ impl BlobStore for InMemoryBlobStore { } } + fn get_by_versioned_hashes_v3( + &self, + versioned_hashes: &[B256], + ) -> Result>, BlobStoreError> { + Ok(self.get_by_versioned_hashes_eip7594(versioned_hashes)) + } + fn data_size_hint(&self) -> Option { Some(self.inner.size_tracker.data_size()) } @@ -178,3 +206,45 @@ fn insert_size( store.insert(tx, Arc::new(blob)); add } + +#[cfg(test)] +mod tests { + use super::*; + use alloy_eips::{ + eip4844::{kzg_to_versioned_hash, Blob, BlobAndProofV2, Bytes48}, + eip7594::{ + BlobTransactionSidecarEip7594, BlobTransactionSidecarVariant, CELLS_PER_EXT_BLOB, + }, + }; + + fn eip7594_single_blob_sidecar() -> (BlobTransactionSidecarVariant, B256, BlobAndProofV2) { + let blob = Blob::default(); + let commitment = Bytes48::default(); + let cell_proofs = vec![Bytes48::default(); CELLS_PER_EXT_BLOB]; + + let versioned_hash = kzg_to_versioned_hash(commitment.as_slice()); + + let expected = + BlobAndProofV2 { blob: Box::new(Blob::default()), proofs: cell_proofs.clone() }; + let sidecar = BlobTransactionSidecarEip7594::new(vec![blob], vec![commitment], cell_proofs); + + (BlobTransactionSidecarVariant::Eip7594(sidecar), versioned_hash, expected) + } + + #[test] + fn mem_get_blobs_v3_returns_partial_results() { + let store = InMemoryBlobStore::default(); + + let (sidecar, versioned_hash, expected) = eip7594_single_blob_sidecar(); + store.insert(B256::random(), sidecar).unwrap(); + + assert_ne!(versioned_hash, B256::ZERO); + + let request = vec![versioned_hash, B256::ZERO]; + let v2 = store.get_by_versioned_hashes_v2(&request).unwrap(); + assert!(v2.is_none(), "v2 must return null if any requested blob is missing"); + + let v3 = store.get_by_versioned_hashes_v3(&request).unwrap(); + assert_eq!(v3, vec![Some(expected), None]); + } +} diff --git a/crates/transaction-pool/src/blobstore/mod.rs b/crates/transaction-pool/src/blobstore/mod.rs index ee7eb45af0..7806bf1e5e 100644 --- a/crates/transaction-pool/src/blobstore/mod.rs +++ b/crates/transaction-pool/src/blobstore/mod.rs @@ -100,6 +100,15 @@ pub trait BlobStore: fmt::Debug + Send + Sync + 'static { versioned_hashes: &[B256], ) -> Result>, BlobStoreError>; + /// Return the [`BlobAndProofV2`]s for a list of blob versioned hashes. + /// + /// The response is always the same length as the request. Missing or older-version blobs are + /// returned as `None` elements. + fn get_by_versioned_hashes_v3( + &self, + versioned_hashes: &[B256], + ) -> Result>, BlobStoreError>; + /// Data size of all transactions in the blob store. fn data_size_hint(&self) -> Option; diff --git a/crates/transaction-pool/src/blobstore/noop.rs b/crates/transaction-pool/src/blobstore/noop.rs index bb03253ee6..55f7fc8a10 100644 --- a/crates/transaction-pool/src/blobstore/noop.rs +++ b/crates/transaction-pool/src/blobstore/noop.rs @@ -78,6 +78,13 @@ impl BlobStore for NoopBlobStore { Ok(None) } + fn get_by_versioned_hashes_v3( + &self, + versioned_hashes: &[B256], + ) -> Result>, BlobStoreError> { + Ok(vec![None; versioned_hashes.len()]) + } + fn data_size_hint(&self) -> Option { Some(0) } diff --git a/crates/transaction-pool/src/error.rs b/crates/transaction-pool/src/error.rs index 3bcbb4cd0a..ea64833248 100644 --- a/crates/transaction-pool/src/error.rs +++ b/crates/transaction-pool/src/error.rs @@ -396,6 +396,23 @@ impl InvalidPoolTransactionError { } } + /// Returns true if this is a [`Self::Consensus`] variant. + pub const fn as_consensus(&self) -> Option<&InvalidTransactionError> { + match self { + Self::Consensus(err) => Some(err), + _ => None, + } + } + + /// Returns true if this is [`InvalidTransactionError::NonceNotConsistent`] and the + /// transaction's nonce is lower than the state's. + pub fn is_nonce_too_low(&self) -> bool { + match self { + Self::Consensus(err) => err.is_nonce_too_low(), + _ => false, + } + } + /// Returns `true` if an import failed due to an oversized transaction pub const fn is_oversized(&self) -> bool { matches!(self, Self::OversizedData { .. }) diff --git a/crates/transaction-pool/src/identifier.rs b/crates/transaction-pool/src/identifier.rs index d2610ee9ba..8cb80869ab 100644 --- a/crates/transaction-pool/src/identifier.rs +++ b/crates/transaction-pool/src/identifier.rs @@ -66,6 +66,12 @@ impl SenderId { std::ops::Bound::Included(TransactionId::new(self, 0)) } + /// Returns a `Range` for [`TransactionId`] starting with nonce `0` and ending with nonce + /// `u64::MAX` + pub const fn range(self) -> std::ops::RangeInclusive { + TransactionId::new(self, 0)..=TransactionId::new(self, u64::MAX) + } + /// Converts the sender to a [`TransactionId`] with the given nonce. pub const fn into_transaction_id(self, nonce: u64) -> TransactionId { TransactionId::new(self, nonce) diff --git a/crates/transaction-pool/src/lib.rs b/crates/transaction-pool/src/lib.rs index 7f3fa4a117..caf3fc95fb 100644 --- a/crates/transaction-pool/src/lib.rs +++ b/crates/transaction-pool/src/lib.rs @@ -354,8 +354,8 @@ where Self { pool: Arc::new(PoolInner::new(validator, ordering, blob_store, config)) } } - /// Returns the wrapped pool. - pub(crate) fn inner(&self) -> &PoolInner { + /// Returns the wrapped pool internals. + pub fn inner(&self) -> &PoolInner { &self.pool } @@ -364,6 +364,11 @@ where self.inner().config() } + /// Get the validator reference. + pub fn validator(&self) -> &V { + self.inner().validator() + } + /// Validates the given transaction async fn validate( &self, @@ -384,23 +389,6 @@ where self.pool.validator().validate_transactions_with_origin(origin, transactions).await } - /// Validates all transactions with their individual origins. - /// - /// This returns the validated transactions in the same order as input. - async fn validate_all_with_origins( - &self, - transactions: Vec<(TransactionOrigin, V::Transaction)>, - ) -> Vec<(TransactionOrigin, TransactionValidationOutcome)> { - if transactions.len() == 1 { - let (origin, tx) = transactions.into_iter().next().unwrap(); - let res = self.pool.validator().validate_transaction(origin, tx).await; - return vec![(origin, res)] - } - let origins: Vec<_> = transactions.iter().map(|(origin, _)| *origin).collect(); - let tx_outcomes = self.pool.validator().validate_transactions(transactions).await; - origins.into_iter().zip(tx_outcomes).collect() - } - /// Number of transactions in the entire pool pub fn len(&self) -> usize { self.pool.len() @@ -516,18 +504,6 @@ where self.pool.add_transactions(origin, validated.into_iter()) } - async fn add_transactions_with_origins( - &self, - transactions: Vec<(TransactionOrigin, Self::Transaction)>, - ) -> Vec> { - if transactions.is_empty() { - return Vec::new() - } - let validated = self.validate_all_with_origins(transactions).await; - - self.pool.add_transactions_with_origins(validated) - } - fn transaction_event_listener(&self, tx_hash: TxHash) -> Option { self.pool.add_transaction_event_listener(tx_hash) } @@ -556,7 +532,7 @@ where } fn pooled_transaction_hashes_max(&self, max: usize) -> Vec { - self.pooled_transaction_hashes().into_iter().take(max).collect() + self.pool.pooled_transactions_hashes_max(max) } fn pooled_transactions(&self) -> Vec>> { @@ -775,6 +751,13 @@ where ) -> Result>, BlobStoreError> { self.pool.blob_store().get_by_versioned_hashes_v2(versioned_hashes) } + + fn get_blobs_for_versioned_hashes_v3( + &self, + versioned_hashes: &[B256], + ) -> Result>, BlobStoreError> { + self.pool.blob_store().get_by_versioned_hashes_v3(versioned_hashes) + } } impl TransactionPoolExt for Pool diff --git a/crates/transaction-pool/src/maintain.rs b/crates/transaction-pool/src/maintain.rs index 0e30a2473b..89d81e4037 100644 --- a/crates/transaction-pool/src/maintain.rs +++ b/crates/transaction-pool/src/maintain.rs @@ -269,17 +269,26 @@ pub async fn maintain_transaction_pool( } } _ = stale_eviction_interval.tick() => { - let stale_txs: Vec<_> = pool - .queued_transactions() + let queued = pool + .queued_transactions(); + let mut stale_blobs = Vec::new(); + let now = std::time::Instant::now(); + let stale_txs: Vec<_> = queued .into_iter() .filter(|tx| { // filter stale transactions based on config - (tx.origin.is_external() || config.no_local_exemptions) && tx.timestamp.elapsed() > config.max_tx_lifetime + (tx.origin.is_external() || config.no_local_exemptions) && now - tx.timestamp > config.max_tx_lifetime + }) + .map(|tx| { + if tx.is_eip4844() { + stale_blobs.push(*tx.hash()); + } + *tx.hash() }) - .map(|tx| *tx.hash()) .collect(); debug!(target: "txpool", count=%stale_txs.len(), "removing stale transactions"); pool.remove_transactions(stale_txs); + pool.delete_blobs(stale_blobs); } } // handle the result of the account reload diff --git a/crates/transaction-pool/src/metrics.rs b/crates/transaction-pool/src/metrics.rs index d9926dafa0..b683ccf672 100644 --- a/crates/transaction-pool/src/metrics.rs +++ b/crates/transaction-pool/src/metrics.rs @@ -10,56 +10,58 @@ use reth_metrics::{ #[metrics(scope = "transaction_pool")] pub struct TxPoolMetrics { /// Number of transactions inserted in the pool - pub(crate) inserted_transactions: Counter, + pub inserted_transactions: Counter, /// Number of invalid transactions - pub(crate) invalid_transactions: Counter, + pub invalid_transactions: Counter, /// Number of removed transactions from the pool - pub(crate) removed_transactions: Counter, + pub removed_transactions: Counter, /// Number of transactions in the pending sub-pool - pub(crate) pending_pool_transactions: Gauge, + pub pending_pool_transactions: Gauge, /// Total amount of memory used by the transactions in the pending sub-pool in bytes - pub(crate) pending_pool_size_bytes: Gauge, + pub pending_pool_size_bytes: Gauge, /// Number of transactions in the basefee sub-pool - pub(crate) basefee_pool_transactions: Gauge, + pub basefee_pool_transactions: Gauge, /// Total amount of memory used by the transactions in the basefee sub-pool in bytes - pub(crate) basefee_pool_size_bytes: Gauge, + pub basefee_pool_size_bytes: Gauge, /// Number of transactions in the queued sub-pool - pub(crate) queued_pool_transactions: Gauge, + pub queued_pool_transactions: Gauge, /// Total amount of memory used by the transactions in the queued sub-pool in bytes - pub(crate) queued_pool_size_bytes: Gauge, + pub queued_pool_size_bytes: Gauge, /// Number of transactions in the blob sub-pool - pub(crate) blob_pool_transactions: Gauge, + pub blob_pool_transactions: Gauge, /// Total amount of memory used by the transactions in the blob sub-pool in bytes - pub(crate) blob_pool_size_bytes: Gauge, + pub blob_pool_size_bytes: Gauge, /// Number of all transactions of all sub-pools: pending + basefee + queued + blob - pub(crate) total_transactions: Gauge, + pub total_transactions: Gauge, /// Number of all legacy transactions in the pool - pub(crate) total_legacy_transactions: Gauge, + pub total_legacy_transactions: Gauge, /// Number of all EIP-2930 transactions in the pool - pub(crate) total_eip2930_transactions: Gauge, + pub total_eip2930_transactions: Gauge, /// Number of all EIP-1559 transactions in the pool - pub(crate) total_eip1559_transactions: Gauge, + pub total_eip1559_transactions: Gauge, /// Number of all EIP-4844 transactions in the pool - pub(crate) total_eip4844_transactions: Gauge, + pub total_eip4844_transactions: Gauge, /// Number of all EIP-7702 transactions in the pool - pub(crate) total_eip7702_transactions: Gauge, + pub total_eip7702_transactions: Gauge, + /// Number of all other transactions in the pool + pub total_other_transactions: Gauge, /// How often the pool was updated after the canonical state changed - pub(crate) performed_state_updates: Counter, + pub performed_state_updates: Counter, /// Counter for the number of pending transactions evicted - pub(crate) pending_transactions_evicted: Counter, + pub pending_transactions_evicted: Counter, /// Counter for the number of basefee transactions evicted - pub(crate) basefee_transactions_evicted: Counter, + pub basefee_transactions_evicted: Counter, /// Counter for the number of blob transactions evicted - pub(crate) blob_transactions_evicted: Counter, + pub blob_transactions_evicted: Counter, /// Counter for the number of queued transactions evicted - pub(crate) queued_transactions_evicted: Counter, + pub queued_transactions_evicted: Counter, } /// Transaction pool blobstore metrics @@ -67,13 +69,13 @@ pub struct TxPoolMetrics { #[metrics(scope = "transaction_pool")] pub struct BlobStoreMetrics { /// Number of failed inserts into the blobstore - pub(crate) blobstore_failed_inserts: Counter, + pub blobstore_failed_inserts: Counter, /// Number of failed deletes into the blobstore - pub(crate) blobstore_failed_deletes: Counter, + pub blobstore_failed_deletes: Counter, /// The number of bytes the blobs in the blobstore take up - pub(crate) blobstore_byte_size: Gauge, + pub blobstore_byte_size: Gauge, /// How many blobs are currently in the blobstore - pub(crate) blobstore_entries: Gauge, + pub blobstore_entries: Gauge, } /// Transaction pool maintenance metrics @@ -82,35 +84,39 @@ pub struct BlobStoreMetrics { pub struct MaintainPoolMetrics { /// Gauge indicating the number of addresses with pending updates in the pool, /// requiring their account information to be fetched. - pub(crate) dirty_accounts: Gauge, + pub dirty_accounts: Gauge, /// Counter for the number of times the pool state diverged from the canonical blockchain /// state. - pub(crate) drift_count: Counter, + pub drift_count: Counter, /// Counter for the number of transactions reinserted into the pool following a blockchain /// reorganization (reorg). - pub(crate) reinserted_transactions: Counter, + pub reinserted_transactions: Counter, /// Counter for the number of finalized blob transactions that have been removed from tracking. - pub(crate) deleted_tracked_finalized_blobs: Counter, + pub deleted_tracked_finalized_blobs: Counter, } impl MaintainPoolMetrics { + /// Sets the number of dirty accounts in the pool. #[inline] - pub(crate) fn set_dirty_accounts_len(&self, count: usize) { + pub fn set_dirty_accounts_len(&self, count: usize) { self.dirty_accounts.set(count as f64); } + /// Increments the count of reinserted transactions. #[inline] - pub(crate) fn inc_reinserted_transactions(&self, count: usize) { + pub fn inc_reinserted_transactions(&self, count: usize) { self.reinserted_transactions.increment(count as u64); } + /// Increments the count of deleted tracked finalized blobs. #[inline] - pub(crate) fn inc_deleted_tracked_blobs(&self, count: usize) { + pub fn inc_deleted_tracked_blobs(&self, count: usize) { self.deleted_tracked_finalized_blobs.increment(count as u64); } + /// Increments the drift count by one. #[inline] - pub(crate) fn inc_drift(&self) { + pub fn inc_drift(&self) { self.drift_count.increment(1); } } @@ -120,17 +126,17 @@ impl MaintainPoolMetrics { #[metrics(scope = "transaction_pool")] pub struct AllTransactionsMetrics { /// Number of all transactions by hash in the pool - pub(crate) all_transactions_by_hash: Gauge, + pub all_transactions_by_hash: Gauge, /// Number of all transactions by id in the pool - pub(crate) all_transactions_by_id: Gauge, + pub all_transactions_by_id: Gauge, /// Number of all transactions by all senders in the pool - pub(crate) all_transactions_by_all_senders: Gauge, + pub all_transactions_by_all_senders: Gauge, /// Number of blob transactions nonce gaps. - pub(crate) blob_transactions_nonce_gaps: Counter, + pub blob_transactions_nonce_gaps: Counter, /// The current blob base fee - pub(crate) blob_base_fee: Gauge, + pub blob_base_fee: Gauge, /// The current base fee - pub(crate) base_fee: Gauge, + pub base_fee: Gauge, } /// Transaction pool validation metrics @@ -138,7 +144,7 @@ pub struct AllTransactionsMetrics { #[metrics(scope = "transaction_pool")] pub struct TxPoolValidationMetrics { /// How long to successfully validate a blob - pub(crate) blob_validation_duration: Histogram, + pub blob_validation_duration: Histogram, } /// Transaction pool validator task metrics @@ -146,5 +152,5 @@ pub struct TxPoolValidationMetrics { #[metrics(scope = "transaction_pool")] pub struct TxPoolValidatorMetrics { /// Number of in-flight validation job sends waiting for channel capacity - pub(crate) inflight_validation_jobs: Gauge, + pub inflight_validation_jobs: Gauge, } diff --git a/crates/transaction-pool/src/noop.rs b/crates/transaction-pool/src/noop.rs index dc5bb9c307..8ddcf82392 100644 --- a/crates/transaction-pool/src/noop.rs +++ b/crates/transaction-pool/src/noop.rs @@ -98,19 +98,6 @@ impl TransactionPool for NoopTransactionPool { .collect() } - async fn add_transactions_with_origins( - &self, - transactions: Vec<(TransactionOrigin, Self::Transaction)>, - ) -> Vec> { - transactions - .into_iter() - .map(|(_, transaction)| { - let hash = *transaction.hash(); - Err(PoolError::other(hash, Box::new(NoopInsertError::new(transaction)))) - }) - .collect() - } - fn transaction_event_listener(&self, _tx_hash: TxHash) -> Option { None } @@ -358,6 +345,13 @@ impl TransactionPool for NoopTransactionPool { ) -> Result>, BlobStoreError> { Ok(None) } + + fn get_blobs_for_versioned_hashes_v3( + &self, + versioned_hashes: &[B256], + ) -> Result>, BlobStoreError> { + Ok(vec![None; versioned_hashes.len()]) + } } /// A [`TransactionValidator`] that does nothing. diff --git a/crates/transaction-pool/src/pool/best.rs b/crates/transaction-pool/src/pool/best.rs index 90cd042df6..d97f28ee75 100644 --- a/crates/transaction-pool/src/pool/best.rs +++ b/crates/transaction-pool/src/pool/best.rs @@ -32,7 +32,7 @@ pub(crate) struct BestTransactionsWithFees { } impl crate::traits::BestTransactions for BestTransactionsWithFees { - fn mark_invalid(&mut self, tx: &Self::Item, kind: InvalidPoolTransactionError) { + fn mark_invalid(&mut self, tx: &Self::Item, kind: &InvalidPoolTransactionError) { BestTransactions::mark_invalid(&mut self.best, tx, kind) } @@ -68,7 +68,7 @@ impl Iterator for BestTransactionsWithFees { crate::traits::BestTransactions::mark_invalid( self, &best, - InvalidPoolTransactionError::Underpriced, + &InvalidPoolTransactionError::Underpriced, ); } } @@ -112,7 +112,7 @@ impl BestTransactions { pub(crate) fn mark_invalid( &mut self, tx: &Arc>, - _kind: InvalidPoolTransactionError, + _kind: &InvalidPoolTransactionError, ) { self.invalid.insert(tx.sender_id()); } @@ -126,7 +126,7 @@ impl BestTransactions { } /// Non-blocking read on the new pending transactions subscription channel - fn try_recv(&mut self) -> Option> { + fn try_recv(&mut self) -> Option> { loop { match self.new_transaction_receiver.as_mut()?.try_recv() { Ok(tx) => { @@ -135,9 +135,9 @@ impl BestTransactions { { // we skip transactions if we already yielded a transaction with lower // priority - return None + return Some(IncomingTransaction::Stash(tx)) } - return Some(tx) + return Some(IncomingTransaction::Process(tx)) } // note TryRecvError::Lagged can be returned here, which is an error that attempts // to correct itself on consecutive try_recv() attempts @@ -170,41 +170,31 @@ impl BestTransactions { for _ in 0..MAX_NEW_TRANSACTIONS_PER_BATCH { if let Some(pending_tx) = self.try_recv() { // same logic as PendingPool::add_transaction/PendingPool::best_with_unlocked - let tx_id = *pending_tx.transaction.id(); - if self.ancestor(&tx_id).is_none() { - self.independent.insert(pending_tx.clone()); + + match pending_tx { + IncomingTransaction::Process(tx) => { + let tx_id = *tx.transaction.id(); + if self.ancestor(&tx_id).is_none() { + self.independent.insert(tx.clone()); + } + self.all.insert(tx_id, tx); + } + IncomingTransaction::Stash(tx) => { + let tx_id = *tx.transaction.id(); + self.all.insert(tx_id, tx); + } } - self.all.insert(tx_id, pending_tx); } else { break; } } } -} -impl crate::traits::BestTransactions for BestTransactions { - fn mark_invalid(&mut self, tx: &Self::Item, kind: InvalidPoolTransactionError) { - Self::mark_invalid(self, tx, kind) - } - - fn no_updates(&mut self) { - self.new_transaction_receiver.take(); - self.last_priority.take(); - } - - fn skip_blobs(&mut self) { - self.set_skip_blobs(true); - } - - fn set_skip_blobs(&mut self, skip_blobs: bool) { - self.skip_blobs = skip_blobs; - } -} - -impl Iterator for BestTransactions { - type Item = Arc>; - - fn next(&mut self) -> Option { + /// Returns the next best transaction and its priority value. + #[allow(clippy::type_complexity)] + pub fn next_tx_and_priority( + &mut self, + ) -> Option<(Arc>, Priority)> { loop { self.add_new_transactions(); // Remove the next independent tx with the highest priority @@ -231,7 +221,7 @@ impl Iterator for BestTransactions { // transactions are returned self.mark_invalid( &best.transaction, - InvalidPoolTransactionError::Eip4844( + &InvalidPoolTransactionError::Eip4844( Eip4844PoolTransactionError::NoEip4844Blobs, ), ) @@ -239,12 +229,66 @@ impl Iterator for BestTransactions { if self.new_transaction_receiver.is_some() { self.last_priority = Some(best.priority.clone()) } - return Some(best.transaction) + return Some((best.transaction, best.priority)) } } } } +/// Result of attempting to receive a new transaction from the channel during iteration. +/// +/// This enum determines how a newly received transaction should be handled based on its priority +/// relative to transactions already yielded by the iterator. +enum IncomingTransaction { + /// Process the transaction normally: add to both `all` map and potentially to `independent` + /// set (if it has no ancestor). + /// + /// This variant is used when the transaction's priority is lower than or equal to the last + /// yielded transaction, meaning it can be safely processed without breaking the descending + /// priority order. + Process(PendingTransaction), + + /// Stash the transaction: add only to the `all` map, but NOT to the `independent` set. + /// + /// This variant is used when the transaction has a higher priority than the last yielded + /// transaction. We cannot yield it immediately (to maintain strict priority ordering), but we + /// must still track it so that: + /// - Its descendants can find it via `ancestor()` lookups + /// - We prevent those descendants from being incorrectly promoted to `independent` + /// + /// Without stashing, if a child of this transaction arrives later, it would fail to find its + /// parent in `all`, be marked as `independent`, and be yielded out of order (before its + /// parent), causing nonce gaps. + Stash(PendingTransaction), +} + +impl crate::traits::BestTransactions for BestTransactions { + fn mark_invalid(&mut self, tx: &Self::Item, kind: &InvalidPoolTransactionError) { + Self::mark_invalid(self, tx, kind) + } + + fn no_updates(&mut self) { + self.new_transaction_receiver.take(); + self.last_priority.take(); + } + + fn skip_blobs(&mut self) { + self.set_skip_blobs(true); + } + + fn set_skip_blobs(&mut self, skip_blobs: bool) { + self.skip_blobs = skip_blobs; + } +} + +impl Iterator for BestTransactions { + type Item = Arc>; + + fn next(&mut self) -> Option { + self.next_tx_and_priority().map(|(tx, _)| tx) + } +} + /// A [`BestTransactions`](crate::traits::BestTransactions) implementation that filters the /// transactions of iter with predicate. /// @@ -277,7 +321,9 @@ where } self.best.mark_invalid( &best, - InvalidPoolTransactionError::Consensus(InvalidTransactionError::TxTypeNotSupported), + &InvalidPoolTransactionError::Consensus( + InvalidTransactionError::TxTypeNotSupported, + ), ); } } @@ -288,7 +334,7 @@ where I: crate::traits::BestTransactions, P: FnMut(&::Item) -> bool + Send, { - fn mark_invalid(&mut self, tx: &Self::Item, kind: InvalidPoolTransactionError) { + fn mark_invalid(&mut self, tx: &Self::Item, kind: &InvalidPoolTransactionError) { crate::traits::BestTransactions::mark_invalid(&mut self.best, tx, kind) } @@ -377,7 +423,7 @@ where I: crate::traits::BestTransactions>>, T: PoolTransaction, { - fn mark_invalid(&mut self, tx: &Self::Item, kind: InvalidPoolTransactionError) { + fn mark_invalid(&mut self, tx: &Self::Item, kind: &InvalidPoolTransactionError) { self.inner.mark_invalid(tx, kind) } @@ -448,7 +494,7 @@ mod tests { let invalid = best.independent.iter().next().unwrap(); best.mark_invalid( &invalid.transaction.clone(), - InvalidPoolTransactionError::Consensus(InvalidTransactionError::TxTypeNotSupported), + &InvalidPoolTransactionError::Consensus(InvalidTransactionError::TxTypeNotSupported), ); // iterator is empty @@ -477,7 +523,7 @@ mod tests { crate::traits::BestTransactions::mark_invalid( &mut *best, &tx, - InvalidPoolTransactionError::Consensus(InvalidTransactionError::TxTypeNotSupported), + &InvalidPoolTransactionError::Consensus(InvalidTransactionError::TxTypeNotSupported), ); assert!(Iterator::next(&mut best).is_none()); } diff --git a/crates/transaction-pool/src/pool/events.rs b/crates/transaction-pool/src/pool/events.rs index 89cfc95bdf..034c761612 100644 --- a/crates/transaction-pool/src/pool/events.rs +++ b/crates/transaction-pool/src/pool/events.rs @@ -2,6 +2,7 @@ use crate::{traits::PropagateKind, PoolTransaction, SubPool, ValidPoolTransactio use alloy_primitives::{TxHash, B256}; use std::sync::Arc; +use crate::pool::QueuedReason; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; @@ -11,7 +12,9 @@ pub enum FullTransactionEvent { /// Transaction has been added to the pending pool. Pending(TxHash), /// Transaction has been added to the queued pool. - Queued(TxHash), + /// + /// If applicable, attached the specific reason why this was queued. + Queued(TxHash, Option), /// Transaction has been included in the block belonging to this hash. Mined { /// The hash of the mined transaction. @@ -40,7 +43,7 @@ impl Clone for FullTransactionEvent { fn clone(&self) -> Self { match self { Self::Pending(hash) => Self::Pending(*hash), - Self::Queued(hash) => Self::Queued(*hash), + Self::Queued(hash, reason) => Self::Queued(*hash, reason.clone()), Self::Mined { tx_hash, block_hash } => { Self::Mined { tx_hash: *tx_hash, block_hash: *block_hash } } @@ -80,7 +83,7 @@ impl TransactionEvent { /// Returns `true` if the event is final and no more events are expected for this transaction /// hash. pub const fn is_final(&self) -> bool { - matches!(self, Self::Replaced(_) | Self::Mined(_) | Self::Discarded) + matches!(self, Self::Replaced(_) | Self::Mined(_) | Self::Discarded | Self::Invalid) } } diff --git a/crates/transaction-pool/src/pool/listener.rs b/crates/transaction-pool/src/pool/listener.rs index 280fb4ad10..d711bb27e1 100644 --- a/crates/transaction-pool/src/pool/listener.rs +++ b/crates/transaction-pool/src/pool/listener.rs @@ -1,7 +1,10 @@ //! Listeners for the transaction-pool use crate::{ - pool::events::{FullTransactionEvent, NewTransactionEvent, TransactionEvent}, + pool::{ + events::{FullTransactionEvent, NewTransactionEvent, TransactionEvent}, + QueuedReason, + }, traits::{NewBlobSidecar, PropagateKind}, PoolTransaction, ValidPoolTransaction, }; @@ -17,6 +20,7 @@ use tokio::sync::mpsc::{ self as mpsc, error::TrySendError, Receiver, Sender, UnboundedReceiver, UnboundedSender, }; use tracing::debug; + /// The size of the event channel used to propagate transaction events. const TX_POOL_EVENT_CHANNEL_SIZE: usize = 1024; @@ -29,6 +33,11 @@ pub struct TransactionEvents { } impl TransactionEvents { + /// Create a new instance of this stream. + pub const fn new(hash: TxHash, events: UnboundedReceiver) -> Self { + Self { hash, events } + } + /// The hash for this transaction pub const fn hash(&self) -> TxHash { self.hash @@ -73,7 +82,7 @@ impl Stream for AllTransactionsEvents { /// This is essentially a multi-producer, multi-consumer channel where each event is broadcast to /// all active receivers. #[derive(Debug)] -pub(crate) struct PoolEventBroadcast { +pub struct PoolEventBroadcast { /// All listeners for all transaction events. all_events_broadcaster: AllPoolEventsBroadcaster, /// All listeners for events for a certain transaction hash. @@ -112,12 +121,12 @@ impl PoolEventBroadcast { /// Returns true if no listeners are installed #[inline] - pub(crate) fn is_empty(&self) -> bool { + pub fn is_empty(&self) -> bool { self.all_events_broadcaster.is_empty() && self.broadcasters_by_hash.is_empty() } /// Create a new subscription for the given transaction hash. - pub(crate) fn subscribe(&mut self, tx_hash: TxHash) -> TransactionEvents { + pub fn subscribe(&mut self, tx_hash: TxHash) -> TransactionEvents { let (tx, rx) = tokio::sync::mpsc::unbounded_channel(); match self.broadcasters_by_hash.entry(tx_hash) { @@ -132,14 +141,14 @@ impl PoolEventBroadcast { } /// Create a new subscription for all transactions. - pub(crate) fn subscribe_all(&mut self) -> AllTransactionsEvents { + pub fn subscribe_all(&mut self) -> AllTransactionsEvents { let (tx, rx) = tokio::sync::mpsc::channel(TX_POOL_EVENT_CHANNEL_SIZE); self.all_events_broadcaster.senders.push(tx); AllTransactionsEvents::new(rx) } /// Notify listeners about a transaction that was added to the pending queue. - pub(crate) fn pending(&mut self, tx: &TxHash, replaced: Option>>) { + pub fn pending(&mut self, tx: &TxHash, replaced: Option>>) { self.broadcast_event(tx, TransactionEvent::Pending, FullTransactionEvent::Pending(*tx)); if let Some(replaced) = replaced { @@ -149,7 +158,7 @@ impl PoolEventBroadcast { } /// Notify listeners about a transaction that was replaced. - pub(crate) fn replaced(&mut self, tx: Arc>, replaced_by: TxHash) { + pub fn replaced(&mut self, tx: Arc>, replaced_by: TxHash) { let transaction = Arc::clone(&tx); self.broadcast_event( tx.hash(), @@ -159,12 +168,16 @@ impl PoolEventBroadcast { } /// Notify listeners about a transaction that was added to the queued pool. - pub(crate) fn queued(&mut self, tx: &TxHash) { - self.broadcast_event(tx, TransactionEvent::Queued, FullTransactionEvent::Queued(*tx)); + pub fn queued(&mut self, tx: &TxHash, reason: Option) { + self.broadcast_event( + tx, + TransactionEvent::Queued, + FullTransactionEvent::Queued(*tx, reason), + ); } /// Notify listeners about a transaction that was propagated. - pub(crate) fn propagated(&mut self, tx: &TxHash, peers: Vec) { + pub fn propagated(&mut self, tx: &TxHash, peers: Vec) { let peers = Arc::new(peers); self.broadcast_event( tx, @@ -175,7 +188,7 @@ impl PoolEventBroadcast { /// Notify listeners about all discarded transactions. #[inline] - pub(crate) fn discarded_many(&mut self, discarded: &[Arc>]) { + pub fn discarded_many(&mut self, discarded: &[Arc>]) { if self.is_empty() { return } @@ -185,17 +198,17 @@ impl PoolEventBroadcast { } /// Notify listeners about a transaction that was discarded. - pub(crate) fn discarded(&mut self, tx: &TxHash) { + pub fn discarded(&mut self, tx: &TxHash) { self.broadcast_event(tx, TransactionEvent::Discarded, FullTransactionEvent::Discarded(*tx)); } /// Notify listeners about a transaction that was invalid. - pub(crate) fn invalid(&mut self, tx: &TxHash) { + pub fn invalid(&mut self, tx: &TxHash) { self.broadcast_event(tx, TransactionEvent::Invalid, FullTransactionEvent::Invalid(*tx)); } /// Notify listeners that the transaction was mined - pub(crate) fn mined(&mut self, tx: &TxHash, block_hash: B256) { + pub fn mined(&mut self, tx: &TxHash, block_hash: B256) { self.broadcast_event( tx, TransactionEvent::Mined(block_hash), @@ -258,17 +271,18 @@ impl PoolEventBroadcaster { /// An active listener for new pending transactions. #[derive(Debug)] -pub(crate) struct PendingTransactionHashListener { - pub(crate) sender: mpsc::Sender, +pub struct PendingTransactionHashListener { + /// The sender of the channel to send transaction hashes to. + pub sender: mpsc::Sender, /// Whether to include transactions that should not be propagated over the network. - pub(crate) kind: TransactionListenerKind, + pub kind: TransactionListenerKind, } impl PendingTransactionHashListener { /// Attempts to send all hashes to the listener. /// /// Returns false if the channel is closed (receiver dropped) - pub(crate) fn send_all(&self, hashes: impl IntoIterator) -> bool { + pub fn send_all(&self, hashes: impl IntoIterator) -> bool { for tx_hash in hashes { match self.sender.try_send(tx_hash) { Ok(()) => {} @@ -292,27 +306,25 @@ impl PendingTransactionHashListener { /// An active listener for new pending transactions. #[derive(Debug)] -pub(crate) struct TransactionListener { - pub(crate) sender: mpsc::Sender>, +pub struct TransactionListener { + /// The sender of the channel to send new transaction events to. + pub sender: mpsc::Sender>, /// Whether to include transactions that should not be propagated over the network. - pub(crate) kind: TransactionListenerKind, + pub kind: TransactionListenerKind, } impl TransactionListener { /// Attempts to send the event to the listener. /// /// Returns false if the channel is closed (receiver dropped) - pub(crate) fn send(&self, event: NewTransactionEvent) -> bool { + pub fn send(&self, event: NewTransactionEvent) -> bool { self.send_all(std::iter::once(event)) } /// Attempts to send all events to the listener. /// /// Returns false if the channel is closed (receiver dropped) - pub(crate) fn send_all( - &self, - events: impl IntoIterator>, - ) -> bool { + pub fn send_all(&self, events: impl IntoIterator>) -> bool { for event in events { match self.sender.try_send(event) { Ok(()) => {} diff --git a/crates/transaction-pool/src/pool/mod.rs b/crates/transaction-pool/src/pool/mod.rs index 04f0e6e0b3..4d7e450424 100644 --- a/crates/transaction-pool/src/pool/mod.rs +++ b/crates/transaction-pool/src/pool/mod.rs @@ -89,7 +89,6 @@ use crate::{ }; use alloy_primitives::{Address, TxHash, B256}; -use best::BestTransactions; use parking_lot::{Mutex, RwLock, RwLockReadGuard, RwLockWriteGuard}; use reth_eth_wire_types::HandleMempoolData; use reth_execution_types::ChangedAccount; @@ -110,11 +109,13 @@ pub use pending::PendingPool; use reth_primitives_traits::Block; mod best; +pub use best::BestTransactions; + mod blob; -mod listener; +pub mod listener; mod parked; pub mod pending; -pub(crate) mod size; +pub mod size; pub(crate) mod state; pub mod txpool; mod update; @@ -144,9 +145,9 @@ where /// Manages listeners for transaction state change events. event_listener: RwLock>, /// Listeners for new _full_ pending transactions. - pending_transaction_listener: Mutex>, + pending_transaction_listener: RwLock>, /// Listeners for new transactions added to the pool. - transaction_listener: Mutex>>, + transaction_listener: RwLock>>, /// Listener for new blob transaction sidecars added to the pool. blob_transaction_sidecar_listener: Mutex>, /// Metrics for the blob store @@ -212,17 +213,17 @@ where } /// Converts the changed accounts to a map of sender ids to sender info (internal identifier - /// used for accounts) + /// used for __tracked__ accounts) fn changed_senders( &self, accs: impl Iterator, ) -> FxHashMap { - let mut identifiers = self.identifiers.write(); + let identifiers = self.identifiers.read(); accs.into_iter() - .map(|acc| { + .filter_map(|acc| { let ChangedAccount { address, nonce, balance } = acc; - let sender_id = identifiers.sender_id_or_create(address); - (sender_id, SenderInfo { state_nonce: nonce, balance }) + let sender_id = identifiers.sender_id(&address)?; + Some((sender_id, SenderInfo { state_nonce: nonce, balance })) }) .collect() } @@ -242,7 +243,12 @@ where pub fn add_pending_listener(&self, kind: TransactionListenerKind) -> mpsc::Receiver { let (sender, rx) = mpsc::channel(self.config.pending_tx_listener_buffer_size); let listener = PendingTransactionHashListener { sender, kind }; - self.pending_transaction_listener.lock().push(listener); + + let mut listeners = self.pending_transaction_listener.write(); + // Clean up dead listeners before adding new one + listeners.retain(|l| !l.sender.is_closed()); + listeners.push(listener); + rx } @@ -253,7 +259,12 @@ where ) -> mpsc::Receiver> { let (sender, rx) = mpsc::channel(self.config.new_tx_listener_buffer_size); let listener = TransactionListener { sender, kind }; - self.transaction_listener.lock().push(listener); + + let mut listeners = self.transaction_listener.write(); + // Clean up dead listeners before adding new one + listeners.retain(|l| !l.sender.is_closed()); + listeners.push(listener); + rx } /// Adds a new blob sidecar listener to the pool that gets notified about every new @@ -283,19 +294,18 @@ where self.pool.read() } - /// Returns hashes of transactions in the pool that can be propagated. - pub fn pooled_transactions_hashes(&self) -> Vec { - self.get_pool_data() - .all() - .transactions_iter() - .filter(|tx| tx.propagate) - .map(|tx| *tx.hash()) - .collect() - } - /// Returns transactions in the pool that can be propagated pub fn pooled_transactions(&self) -> Vec>> { - self.get_pool_data().all().transactions_iter().filter(|tx| tx.propagate).cloned().collect() + let mut out = Vec::new(); + self.append_pooled_transactions(&mut out); + out + } + + /// Returns hashes of transactions in the pool that can be propagated. + pub fn pooled_transactions_hashes(&self) -> Vec { + let mut out = Vec::new(); + self.append_pooled_transactions_hashes(&mut out); + out } /// Returns only the first `max` transactions in the pool that can be propagated. @@ -303,12 +313,61 @@ where &self, max: usize, ) -> Vec>> { + let mut out = Vec::new(); + self.append_pooled_transactions_max(max, &mut out); + out + } + + /// Extends the given vector with all transactions in the pool that can be propagated. + pub fn append_pooled_transactions( + &self, + out: &mut Vec>>, + ) { + out.extend( + self.get_pool_data().all().transactions_iter().filter(|tx| tx.propagate).cloned(), + ); + } + + /// Extends the given vector with the hashes of all transactions in the pool that can be + /// propagated. + pub fn append_pooled_transactions_hashes(&self, out: &mut Vec) { + out.extend( + self.get_pool_data() + .all() + .transactions_iter() + .filter(|tx| tx.propagate) + .map(|tx| *tx.hash()), + ); + } + + /// Extends the given vector with only the first `max` transactions in the pool that can be + /// propagated. + pub fn append_pooled_transactions_max( + &self, + max: usize, + out: &mut Vec>>, + ) { + out.extend( + self.get_pool_data() + .all() + .transactions_iter() + .filter(|tx| tx.propagate) + .take(max) + .cloned(), + ); + } + + /// Returns only the first `max` hashes of transactions in the pool that can be propagated. + pub fn pooled_transactions_hashes_max(&self, max: usize) -> Vec { + if max == 0 { + return Vec::new(); + } self.get_pool_data() .all() .transactions_iter() .filter(|tx| tx.propagate) .take(max) - .cloned() + .map(|tx| *tx.hash()) .collect() } @@ -329,8 +388,7 @@ where } else { transaction .transaction - .clone() - .try_into_pooled() + .clone_into_pooled() .inspect_err(|err| { debug!( target: "txpool", %err, @@ -422,51 +480,14 @@ where let UpdateOutcome { promoted, discarded } = self.pool.write().update_accounts(changed_senders); - // Notify about promoted pending transactions (similar to notify_on_new_state) - if !promoted.is_empty() { - self.pending_transaction_listener.lock().retain_mut(|listener| { - let promoted_hashes = promoted.iter().filter_map(|tx| { - if listener.kind.is_propagate_only() && !tx.propagate { - None - } else { - Some(*tx.hash()) - } - }); - listener.send_all(promoted_hashes) - }); - - // in this case we should also emit promoted transactions in full - self.transaction_listener.lock().retain_mut(|listener| { - let promoted_txs = promoted.iter().filter_map(|tx| { - if listener.kind.is_propagate_only() && !tx.propagate { - None - } else { - Some(NewTransactionEvent::pending(tx.clone())) - } - }); - listener.send_all(promoted_txs) - }); - } - - { - let mut listener = self.event_listener.write(); - if !listener.is_empty() { - for tx in &promoted { - listener.pending(tx.hash(), None); - } - for tx in &discarded { - listener.discarded(tx.hash()); - } - } - } - - // This deletes outdated blob txs from the blob store, based on the account's nonce. This is - // called during txpool maintenance when the pool drifted. - self.delete_discarded_blobs(discarded.iter()); + self.notify_on_transaction_updates(promoted, discarded); } /// Add a single validated transaction into the pool. /// + /// Returns the outcome and optionally metadata to be processed after the pool lock is + /// released. + /// /// Note: this is only used internally by [`Self::add_transactions()`], all new transaction(s) /// come in through that function, either as a batch or `std::iter::once`. fn add_transaction( @@ -474,7 +495,7 @@ where pool: &mut RwLockWriteGuard<'_, TxPool>, origin: TransactionOrigin, tx: TransactionValidationOutcome, - ) -> PoolResult { + ) -> (PoolResult, Option>) { match tx { TransactionValidationOutcome::Valid { balance, @@ -488,7 +509,7 @@ where let transaction_id = TransactionId::new(sender_id, transaction.nonce()); // split the valid transaction and the blob sidecar if it has any - let (transaction, maybe_sidecar) = match transaction { + let (transaction, blob_sidecar) = match transaction { ValidTransaction::Valid(tx) => (tx, None), ValidTransaction::ValidWithSidecar { transaction, sidecar } => { debug_assert!( @@ -508,50 +529,26 @@ where authority_ids: authorities.map(|auths| self.get_sender_ids(auths)), }; - let added = pool.add_transaction(tx, balance, state_nonce, bytecode_hash)?; + let added = match pool.add_transaction(tx, balance, state_nonce, bytecode_hash) { + Ok(added) => added, + Err(err) => return (Err(err), None), + }; let hash = *added.hash(); let state = added.transaction_state(); - // transaction was successfully inserted into the pool - if let Some(sidecar) = maybe_sidecar { - // notify blob sidecar listeners - self.on_new_blob_sidecar(&hash, &sidecar); - // store the sidecar in the blob store - self.insert_blob(hash, sidecar); - } + let meta = AddedTransactionMeta { added, blob_sidecar }; - if let Some(replaced) = added.replaced_blob_transaction() { - debug!(target: "txpool", "[{:?}] delete replaced blob sidecar", replaced); - // delete the replaced transaction from the blob store - self.delete_blob(replaced); - } - - // Notify about new pending transactions - if let Some(pending) = added.as_pending() { - self.on_new_pending_transaction(pending); - } - - // Notify tx event listeners - self.notify_event_listeners(&added); - - if let Some(discarded) = added.discarded_transactions() { - self.delete_discarded_blobs(discarded.iter()); - } - - // Notify listeners for _all_ transactions - self.on_new_transaction(added.into_new_transaction_event()); - - Ok(AddedTransactionOutcome { hash, state }) + (Ok(AddedTransactionOutcome { hash, state }), Some(meta)) } TransactionValidationOutcome::Invalid(tx, err) => { let mut listener = self.event_listener.write(); listener.invalid(tx.hash()); - Err(PoolError::new(*tx.hash(), err)) + (Err(PoolError::new(*tx.hash(), err)), None) } TransactionValidationOutcome::Error(tx_hash, err) => { let mut listener = self.event_listener.write(); listener.discarded(&tx_hash); - Err(PoolError::other(tx_hash, err)) + (Err(PoolError::other(tx_hash, err)), None) } } } @@ -571,36 +568,47 @@ where Ok(listener) } - /// Adds all transactions in the iterator to the pool, each with its individual origin, - /// returning a list of results. - /// - /// Note: A large batch may lock the pool for a long time that blocks important operations - /// like updating the pool on canonical state changes. The caller should consider having - /// a max batch size to balance transaction insertions with other updates. - pub fn add_transactions_with_origins( + /// Adds all transactions in the iterator to the pool, returning a list of results. + pub fn add_transactions( &self, - transactions: impl IntoIterator< - Item = (TransactionOrigin, TransactionValidationOutcome), - >, + origin: TransactionOrigin, + transactions: impl IntoIterator>, ) -> Vec> { - // Process all transactions in one write lock, maintaining individual origins - let (mut added, discarded) = { + // Collect results and metadata while holding the pool write lock + let (mut results, added_metas, discarded) = { let mut pool = self.pool.write(); - let added = transactions + let mut added_metas = Vec::new(); + + let results = transactions .into_iter() - .map(|(origin, tx)| self.add_transaction(&mut pool, origin, tx)) + .map(|tx| { + let (result, meta) = self.add_transaction(&mut pool, origin, tx); + + // Only collect metadata for successful insertions + if result.is_ok() && + let Some(meta) = meta + { + added_metas.push(meta); + } + + result + }) .collect::>(); // Enforce the pool size limits if at least one transaction was added successfully - let discarded = if added.iter().any(Result::is_ok) { + let discarded = if results.iter().any(Result::is_ok) { pool.discard_worst() } else { Default::default() }; - (added, discarded) + (results, added_metas, discarded) }; + for meta in added_metas { + self.on_added_transaction(meta); + } + if !discarded.is_empty() { // Delete any blobs associated with discarded blob transactions self.delete_discarded_blobs(discarded.iter()); @@ -611,60 +619,113 @@ where // A newly added transaction may be immediately discarded, so we need to // adjust the result here - for res in &mut added { + for res in &mut results { if let Ok(AddedTransactionOutcome { hash, .. }) = res && discarded_hashes.contains(hash) { *res = Err(PoolError::new(*hash, PoolErrorKind::DiscardedOnInsert)) } } - } + }; - added + results } - /// Adds all transactions in the iterator to the pool, returning a list of results. + /// Process a transaction that was added to the pool. /// - /// Note: A large batch may lock the pool for a long time that blocks important operations - /// like updating the pool on canonical state changes. The caller should consider having - /// a max batch size to balance transaction insertions with other updates. - pub fn add_transactions( - &self, - origin: TransactionOrigin, - transactions: impl IntoIterator>, - ) -> Vec> { - self.add_transactions_with_origins(transactions.into_iter().map(|tx| (origin, tx))) + /// Performs blob storage operations and sends all notifications. This should be called + /// after the pool write lock has been released to avoid blocking pool operations. + fn on_added_transaction(&self, meta: AddedTransactionMeta) { + // Handle blob sidecar storage and notifications for EIP-4844 transactions + if let Some(sidecar) = meta.blob_sidecar { + let hash = *meta.added.hash(); + self.on_new_blob_sidecar(&hash, &sidecar); + self.insert_blob(hash, sidecar); + } + + // Delete replaced blob sidecar if any + if let Some(replaced) = meta.added.replaced_blob_transaction() { + debug!(target: "txpool", "[{:?}] delete replaced blob sidecar", replaced); + self.delete_blob(replaced); + } + + // Delete discarded blob sidecars if any, this doesnt do any IO. + if let Some(discarded) = meta.added.discarded_transactions() { + self.delete_discarded_blobs(discarded.iter()); + } + + // Notify pending transaction listeners + if let Some(pending) = meta.added.as_pending() { + self.on_new_pending_transaction(pending); + } + + // Notify event listeners + self.notify_event_listeners(&meta.added); + + // Notify new transaction listeners + self.on_new_transaction(meta.added.into_new_transaction_event()); } /// Notify all listeners about a new pending transaction. - fn on_new_pending_transaction(&self, pending: &AddedPendingTransaction) { - let propagate_allowed = pending.is_propagate_allowed(); + /// + /// See also [`Self::add_pending_listener`] + /// + /// CAUTION: This function is only intended to be used manually in order to use this type's + /// pending transaction receivers when manually implementing the + /// [`TransactionPool`](crate::TransactionPool) trait for a custom pool implementation + /// [`TransactionPool::pending_transactions_listener_for`](crate::TransactionPool). + pub fn on_new_pending_transaction(&self, pending: &AddedPendingTransaction) { + let mut needs_cleanup = false; - let mut transaction_listeners = self.pending_transaction_listener.lock(); - transaction_listeners.retain_mut(|listener| { - if listener.kind.is_propagate_only() && !propagate_allowed { - // only emit this hash to listeners that are only allowed to receive propagate only - // transactions, such as network - return !listener.sender.is_closed() + { + let listeners = self.pending_transaction_listener.read(); + for listener in listeners.iter() { + if !listener.send_all(pending.pending_transactions(listener.kind)) { + needs_cleanup = true; + } } + } - // broadcast all pending transactions to the listener - listener.send_all(pending.pending_transactions(listener.kind)) - }); + // Clean up dead listeners if we detected any closed channels + if needs_cleanup { + self.pending_transaction_listener + .write() + .retain(|listener| !listener.sender.is_closed()); + } } /// Notify all listeners about a newly inserted pending transaction. - fn on_new_transaction(&self, event: NewTransactionEvent) { - let mut transaction_listeners = self.transaction_listener.lock(); - transaction_listeners.retain_mut(|listener| { - if listener.kind.is_propagate_only() && !event.transaction.propagate { - // only emit this hash to listeners that are only allowed to receive propagate only - // transactions, such as network - return !listener.sender.is_closed() - } + /// + /// See also [`Self::add_new_transaction_listener`] + /// + /// CAUTION: This function is only intended to be used manually in order to use this type's + /// transaction receivers when manually implementing the + /// [`TransactionPool`](crate::TransactionPool) trait for a custom pool implementation + /// [`TransactionPool::new_transactions_listener_for`](crate::TransactionPool). + pub fn on_new_transaction(&self, event: NewTransactionEvent) { + let mut needs_cleanup = false; - listener.send(event.clone()) - }); + { + let listeners = self.transaction_listener.read(); + for listener in listeners.iter() { + if listener.kind.is_propagate_only() && !event.transaction.propagate { + if listener.sender.is_closed() { + needs_cleanup = true; + } + // Skip non-propagate transactions for propagate-only listeners + continue + } + + if !listener.send(event.clone()) { + needs_cleanup = true; + } + } + } + + // Clean up dead listeners if we detected any closed channels + if needs_cleanup { + self.transaction_listener.write().retain(|listener| !listener.sender.is_closed()); + } } /// Notify all listeners about a blob sidecar for a newly inserted blob (eip4844) transaction. @@ -698,16 +759,33 @@ where fn notify_on_new_state(&self, outcome: OnNewCanonicalStateOutcome) { trace!(target: "txpool", promoted=outcome.promoted.len(), discarded= outcome.discarded.len() ,"notifying listeners on state change"); - // notify about promoted pending transactions - // emit hashes - self.pending_transaction_listener - .lock() - .retain_mut(|listener| listener.send_all(outcome.pending_transactions(listener.kind))); + // notify about promoted pending transactions - emit hashes + let mut needs_pending_cleanup = false; + { + let listeners = self.pending_transaction_listener.read(); + for listener in listeners.iter() { + if !listener.send_all(outcome.pending_transactions(listener.kind)) { + needs_pending_cleanup = true; + } + } + } + if needs_pending_cleanup { + self.pending_transaction_listener.write().retain(|l| !l.sender.is_closed()); + } // emit full transactions - self.transaction_listener.lock().retain_mut(|listener| { - listener.send_all(outcome.full_pending_transactions(listener.kind)) - }); + let mut needs_tx_cleanup = false; + { + let listeners = self.transaction_listener.read(); + for listener in listeners.iter() { + if !listener.send_all(outcome.full_pending_transactions(listener.kind)) { + needs_tx_cleanup = true; + } + } + } + if needs_tx_cleanup { + self.transaction_listener.write().retain(|l| !l.sender.is_closed()); + } let OnNewCanonicalStateOutcome { mined, promoted, discarded, block_hash } = outcome; @@ -727,8 +805,92 @@ where } } + /// Notifies all listeners about the transaction movements. + /// + /// This will emit events according to the provided changes. + /// + /// CAUTION: This function is only intended to be used manually in order to use this type's + /// [`TransactionEvents`] receivers when manually implementing the + /// [`TransactionPool`](crate::TransactionPool) trait for a custom pool implementation + /// [`TransactionPool::transaction_event_listener`](crate::TransactionPool). + #[allow(clippy::type_complexity)] + pub fn notify_on_transaction_updates( + &self, + promoted: Vec>>, + discarded: Vec>>, + ) { + // Notify about promoted pending transactions (similar to notify_on_new_state) + if !promoted.is_empty() { + let mut needs_pending_cleanup = false; + { + let listeners = self.pending_transaction_listener.read(); + for listener in listeners.iter() { + let promoted_hashes = promoted.iter().filter_map(|tx| { + if listener.kind.is_propagate_only() && !tx.propagate { + None + } else { + Some(*tx.hash()) + } + }); + if !listener.send_all(promoted_hashes) { + needs_pending_cleanup = true; + } + } + } + if needs_pending_cleanup { + self.pending_transaction_listener.write().retain(|l| !l.sender.is_closed()); + } + + // in this case we should also emit promoted transactions in full + let mut needs_tx_cleanup = false; + { + let listeners = self.transaction_listener.read(); + for listener in listeners.iter() { + let promoted_txs = promoted.iter().filter_map(|tx| { + if listener.kind.is_propagate_only() && !tx.propagate { + None + } else { + Some(NewTransactionEvent::pending(tx.clone())) + } + }); + if !listener.send_all(promoted_txs) { + needs_tx_cleanup = true; + } + } + } + if needs_tx_cleanup { + self.transaction_listener.write().retain(|l| !l.sender.is_closed()); + } + } + + { + let mut listener = self.event_listener.write(); + if !listener.is_empty() { + for tx in &promoted { + listener.pending(tx.hash(), None); + } + for tx in &discarded { + listener.discarded(tx.hash()); + } + } + } + + if !discarded.is_empty() { + // This deletes outdated blob txs from the blob store, based on the account's nonce. + // This is called during txpool maintenance when the pool drifted. + self.delete_discarded_blobs(discarded.iter()); + } + } + /// Fire events for the newly added transaction if there are any. - fn notify_event_listeners(&self, tx: &AddedTransaction) { + /// + /// See also [`Self::add_transaction_event_listener`]. + /// + /// CAUTION: This function is only intended to be used manually in order to use this type's + /// [`TransactionEvents`] receivers when manually implementing the + /// [`TransactionPool`](crate::TransactionPool) trait for a custom pool implementation + /// [`TransactionPool::transaction_event_listener`](crate::TransactionPool). + pub fn notify_event_listeners(&self, tx: &AddedTransaction) { let mut listener = self.event_listener.write(); if listener.is_empty() { // nothing to notify @@ -747,8 +909,8 @@ where listener.discarded(tx.hash()); } } - AddedTransaction::Parked { transaction, replaced, .. } => { - listener.queued(transaction.hash()); + AddedTransaction::Parked { transaction, replaced, queued_reason, .. } => { + listener.queued(transaction.hash(), queued_reason.clone()); if let Some(replaced) = replaced { listener.replaced(replaced.clone(), *transaction.hash()); } @@ -1060,17 +1222,29 @@ impl fmt::Debug for PoolInner { } } +/// Metadata for a transaction that was added to the pool. +/// +/// This holds all the data needed to complete post-insertion operations (notifications, +/// blob storage). +#[derive(Debug)] +struct AddedTransactionMeta { + /// The transaction that was added to the pool + added: AddedTransaction, + /// Optional blob sidecar for EIP-4844 transactions + blob_sidecar: Option, +} + /// Tracks an added transaction and all graph changes caused by adding it. #[derive(Debug, Clone)] pub struct AddedPendingTransaction { /// Inserted transaction. - transaction: Arc>, + pub transaction: Arc>, /// Replaced transaction. - replaced: Option>>, + pub replaced: Option>>, /// transactions promoted to the pending queue - promoted: Vec>>, + pub promoted: Vec>>, /// transactions that failed and became discarded - discarded: Vec>>, + pub discarded: Vec>>, } impl AddedPendingTransaction { @@ -1086,11 +1260,6 @@ impl AddedPendingTransaction { let iter = std::iter::once(&self.transaction).chain(self.promoted.iter()); PendingTransactionIter { kind, iter } } - - /// Returns if the transaction should be propagated. - pub(crate) fn is_propagate_allowed(&self) -> bool { - self.transaction.propagate - } } pub(crate) struct PendingTransactionIter { @@ -1164,7 +1333,7 @@ pub enum AddedTransaction { impl AddedTransaction { /// Returns whether the transaction has been added to the pending pool. - pub(crate) const fn as_pending(&self) -> Option<&AddedPendingTransaction> { + pub const fn as_pending(&self) -> Option<&AddedPendingTransaction> { match self { Self::Pending(tx) => Some(tx), _ => None, @@ -1172,7 +1341,7 @@ impl AddedTransaction { } /// Returns the replaced transaction if there was one - pub(crate) const fn replaced(&self) -> Option<&Arc>> { + pub const fn replaced(&self) -> Option<&Arc>> { match self { Self::Pending(tx) => tx.replaced.as_ref(), Self::Parked { replaced, .. } => replaced.as_ref(), @@ -1193,7 +1362,7 @@ impl AddedTransaction { } /// Returns the hash of the transaction - pub(crate) fn hash(&self) -> &TxHash { + pub fn hash(&self) -> &TxHash { match self { Self::Pending(tx) => tx.transaction.hash(), Self::Parked { transaction, .. } => transaction.hash(), @@ -1201,7 +1370,7 @@ impl AddedTransaction { } /// Converts this type into the event type for listeners - pub(crate) fn into_new_transaction_event(self) -> NewTransactionEvent { + pub fn into_new_transaction_event(self) -> NewTransactionEvent { match self { Self::Pending(tx) => { NewTransactionEvent { subpool: SubPool::Pending, transaction: tx.transaction } @@ -1230,7 +1399,7 @@ impl AddedTransaction { } /// Returns the queued reason if the transaction is parked with a queued reason. - pub(crate) const fn queued_reason(&self) -> Option<&QueuedReason> { + pub const fn queued_reason(&self) -> Option<&QueuedReason> { match self { Self::Pending(_) => None, Self::Parked { queued_reason, .. } => queued_reason.as_ref(), @@ -1238,7 +1407,7 @@ impl AddedTransaction { } /// Returns the transaction state based on the subpool and queued reason. - pub(crate) fn transaction_state(&self) -> AddedTransactionState { + pub fn transaction_state(&self) -> AddedTransactionState { match self.subpool() { SubPool::Pending => AddedTransactionState::Pending, _ => { diff --git a/crates/transaction-pool/src/pool/parked.rs b/crates/transaction-pool/src/pool/parked.rs index 193442174c..3ca86d8106 100644 --- a/crates/transaction-pool/src/pool/parked.rs +++ b/crates/transaction-pool/src/pool/parked.rs @@ -186,11 +186,11 @@ impl ParkedPool { { // NOTE: This will not panic due to `!last_sender_transaction.is_empty()` let sender_id = self.last_sender_submission.last().unwrap().sender_id; - let list = self.get_txs_by_sender(sender_id); // Drop transactions from this sender until the pool is under limits - for txid in list.into_iter().rev() { - if let Some(tx) = self.remove_transaction(&txid) { + while let Some((tx_id, _)) = self.by_id.range(sender_id.range()).next_back() { + let tx_id = *tx_id; + if let Some(tx) = self.remove_transaction(&tx_id) { removed.push(tx); } diff --git a/crates/transaction-pool/src/pool/pending.rs b/crates/transaction-pool/src/pool/pending.rs index 317066137d..8f43493410 100644 --- a/crates/transaction-pool/src/pool/pending.rs +++ b/crates/transaction-pool/src/pool/pending.rs @@ -278,14 +278,6 @@ impl PendingPool { } } - /// Returns the ancestor the given transaction, the transaction with `nonce - 1`. - /// - /// Note: for a transaction with nonce higher than the current on chain nonce this will always - /// return an ancestor since all transaction in this pool are gapless. - fn ancestor(&self, id: &TransactionId) -> Option<&PendingTransaction> { - self.get(&id.unchecked_ancestor()?) - } - /// Adds a new transactions to the pending queue. /// /// # Panics @@ -342,14 +334,35 @@ impl PendingPool { let tx = self.by_id.remove(id)?; self.size_of -= tx.transaction.size(); - if let Some(highest) = self.highest_nonces.get(&id.sender) { - if highest.transaction.nonce() == id.nonce { - self.highest_nonces.remove(&id.sender); + match self.highest_nonces.entry(id.sender) { + Entry::Occupied(mut entry) => { + if entry.get().transaction.nonce() == id.nonce { + // we just removed the tx with the highest nonce for this sender, find the + // highest remaining tx from that sender + if let Some((_, new_highest)) = self + .by_id + .range(( + id.sender.start_bound(), + std::ops::Bound::Included(TransactionId::new(id.sender, u64::MAX)), + )) + .last() + { + // insert the new highest nonce for this sender + entry.insert(new_highest.clone()); + } else { + entry.remove(); + } + } } - if let Some(ancestor) = self.ancestor(id) { - self.highest_nonces.insert(id.sender, ancestor.clone()); + Entry::Vacant(_) => { + debug_assert!( + false, + "removed transaction without a tracked highest nonce {:?}", + id + ); } } + Some(tx.transaction) } @@ -970,6 +983,7 @@ mod tests { } #[test] + #[cfg(debug_assertions)] #[should_panic(expected = "transaction already included")] fn test_handle_duplicates() { let mut f = MockTransactionFactory::default(); @@ -1054,4 +1068,61 @@ mod tests { assert!(pool.get_txs_by_sender(sender_b).is_empty()); assert!(pool.get_txs_by_sender(sender_c).is_empty()); } + + #[test] + fn test_remove_non_highest_keeps_highest() { + let mut f = MockTransactionFactory::default(); + let mut pool = PendingPool::new(MockOrdering::default()); + let sender = address!("0x00000000000000000000000000000000000000aa"); + let txs = MockTransactionSet::dependent(sender, 0, 3, TxType::Eip1559).into_vec(); + for tx in txs { + pool.add_transaction(f.validated_arc(tx), 0); + } + pool.assert_invariants(); + let sender_id = f.ids.sender_id(&sender).unwrap(); + let mid_id = TransactionId::new(sender_id, 1); + let _ = pool.remove_transaction(&mid_id); + let highest = pool.highest_nonces.get(&sender_id).unwrap(); + assert_eq!(highest.transaction.nonce(), 2); + pool.assert_invariants(); + } + + #[test] + fn test_cascade_removal_recomputes_highest() { + let mut f = MockTransactionFactory::default(); + let mut pool = PendingPool::new(MockOrdering::default()); + let sender = address!("0x00000000000000000000000000000000000000bb"); + let txs = MockTransactionSet::dependent(sender, 0, 4, TxType::Eip1559).into_vec(); + for tx in txs { + pool.add_transaction(f.validated_arc(tx), 0); + } + pool.assert_invariants(); + let sender_id = f.ids.sender_id(&sender).unwrap(); + let id3 = TransactionId::new(sender_id, 3); + let _ = pool.remove_transaction(&id3); + let highest = pool.highest_nonces.get(&sender_id).unwrap(); + assert_eq!(highest.transaction.nonce(), 2); + let id2 = TransactionId::new(sender_id, 2); + let _ = pool.remove_transaction(&id2); + let highest = pool.highest_nonces.get(&sender_id).unwrap(); + assert_eq!(highest.transaction.nonce(), 1); + pool.assert_invariants(); + } + + #[test] + fn test_remove_only_tx_clears_highest() { + let mut f = MockTransactionFactory::default(); + let mut pool = PendingPool::new(MockOrdering::default()); + let sender = address!("0x00000000000000000000000000000000000000cc"); + let txs = MockTransactionSet::dependent(sender, 0, 1, TxType::Eip1559).into_vec(); + for tx in txs { + pool.add_transaction(f.validated_arc(tx), 0); + } + pool.assert_invariants(); + let sender_id = f.ids.sender_id(&sender).unwrap(); + let id0 = TransactionId::new(sender_id, 0); + let _ = pool.remove_transaction(&id0); + assert!(!pool.highest_nonces.contains_key(&sender_id)); + pool.assert_invariants(); + } } diff --git a/crates/transaction-pool/src/pool/state.rs b/crates/transaction-pool/src/pool/state.rs index 187d472f5a..20351a7a0e 100644 --- a/crates/transaction-pool/src/pool/state.rs +++ b/crates/transaction-pool/src/pool/state.rs @@ -117,7 +117,25 @@ impl TxState { } } SubPool::BaseFee => Some(QueuedReason::InsufficientBaseFee), - SubPool::Blob => Some(QueuedReason::InsufficientBlobFee), + SubPool::Blob => { + // For blob transactions, derive the queued reason from flags similarly to Queued. + if !self.contains(Self::NO_NONCE_GAPS) { + Some(QueuedReason::NonceGap) + } else if !self.contains(Self::ENOUGH_BALANCE) { + Some(QueuedReason::InsufficientBalance) + } else if !self.contains(Self::NO_PARKED_ANCESTORS) { + Some(QueuedReason::ParkedAncestors) + } else if !self.contains(Self::NOT_TOO_MUCH_GAS) { + Some(QueuedReason::TooMuchGas) + } else if !self.contains(Self::ENOUGH_FEE_CAP_BLOCK) { + Some(QueuedReason::InsufficientBaseFee) + } else if !self.contains(Self::ENOUGH_BLOB_FEE_CAP_BLOCK) { + Some(QueuedReason::InsufficientBlobFee) + } else { + // Fallback for unexpected non-pending blob state + Some(QueuedReason::InsufficientBlobFee) + } + } } } } @@ -308,4 +326,84 @@ mod tests { assert!(state.is_blob()); assert!(!state.is_pending()); } + + #[test] + fn test_blob_reason_insufficient_base_fee() { + // Blob tx with all structural bits set and blob fee sufficient, but base fee insufficient + let state = TxState::NO_PARKED_ANCESTORS | + TxState::NO_NONCE_GAPS | + TxState::ENOUGH_BALANCE | + TxState::NOT_TOO_MUCH_GAS | + TxState::ENOUGH_BLOB_FEE_CAP_BLOCK | + TxState::BLOB_TRANSACTION; + // ENOUGH_FEE_CAP_BLOCK intentionally not set + let subpool: SubPool = state.into(); + assert_eq!(subpool, SubPool::Blob); + let reason = state.determine_queued_reason(subpool); + assert_eq!(reason, Some(QueuedReason::InsufficientBaseFee)); + } + + #[test] + fn test_blob_reason_insufficient_blob_fee() { + // Blob tx with all structural bits set and base fee sufficient, but blob fee insufficient + let state = TxState::NO_PARKED_ANCESTORS | + TxState::NO_NONCE_GAPS | + TxState::ENOUGH_BALANCE | + TxState::NOT_TOO_MUCH_GAS | + TxState::ENOUGH_FEE_CAP_BLOCK | + TxState::BLOB_TRANSACTION; + // ENOUGH_BLOB_FEE_CAP_BLOCK intentionally not set + let subpool: SubPool = state.into(); + assert_eq!(subpool, SubPool::Blob); + let reason = state.determine_queued_reason(subpool); + assert_eq!(reason, Some(QueuedReason::InsufficientBlobFee)); + } + + #[test] + fn test_blob_reason_nonce_gap() { + // Blob tx with nonce gap should report NonceGap regardless of fee bits + let mut state = TxState::NO_PARKED_ANCESTORS | + TxState::ENOUGH_BALANCE | + TxState::NOT_TOO_MUCH_GAS | + TxState::ENOUGH_FEE_CAP_BLOCK | + TxState::ENOUGH_BLOB_FEE_CAP_BLOCK | + TxState::BLOB_TRANSACTION; + state.remove(TxState::NO_NONCE_GAPS); + let subpool: SubPool = state.into(); + assert_eq!(subpool, SubPool::Blob); + let reason = state.determine_queued_reason(subpool); + assert_eq!(reason, Some(QueuedReason::NonceGap)); + } + + #[test] + fn test_blob_reason_insufficient_balance() { + // Blob tx with insufficient balance + let state = TxState::NO_PARKED_ANCESTORS | + TxState::NO_NONCE_GAPS | + TxState::NOT_TOO_MUCH_GAS | + TxState::ENOUGH_FEE_CAP_BLOCK | + TxState::ENOUGH_BLOB_FEE_CAP_BLOCK | + TxState::BLOB_TRANSACTION; + // ENOUGH_BALANCE intentionally not set + let subpool: SubPool = state.into(); + assert_eq!(subpool, SubPool::Blob); + let reason = state.determine_queued_reason(subpool); + assert_eq!(reason, Some(QueuedReason::InsufficientBalance)); + } + + #[test] + fn test_blob_reason_too_much_gas() { + // Blob tx exceeding gas limit + let mut state = TxState::NO_PARKED_ANCESTORS | + TxState::NO_NONCE_GAPS | + TxState::ENOUGH_BALANCE | + TxState::ENOUGH_FEE_CAP_BLOCK | + TxState::ENOUGH_BLOB_FEE_CAP_BLOCK | + TxState::BLOB_TRANSACTION; + state.remove(TxState::NOT_TOO_MUCH_GAS); + let subpool: SubPool = state.into(); + assert_eq!(subpool, SubPool::Blob); + let reason = state.determine_queued_reason(subpool); + assert_eq!(reason, Some(QueuedReason::TooMuchGas)); + } } diff --git a/crates/transaction-pool/src/pool/txpool.rs b/crates/transaction-pool/src/pool/txpool.rs index 49247dc8b8..b9e305f7e4 100644 --- a/crates/transaction-pool/src/pool/txpool.rs +++ b/crates/transaction-pool/src/pool/txpool.rs @@ -85,8 +85,6 @@ use tracing::{trace, warn}; /// new --> |apply state changes| pool /// ``` pub struct TxPool { - /// Contains the currently known information about the senders. - sender_info: FxHashMap, /// pending subpool /// /// Holds transactions that are ready to be executed on the current state. @@ -116,8 +114,6 @@ pub struct TxPool { all_transactions: AllTransactions, /// Transaction pool metrics metrics: TxPoolMetrics, - /// The last update kind that was applied to the pool. - latest_update_kind: Option, } // === impl TxPool === @@ -126,7 +122,6 @@ impl TxPool { /// Create a new graph pool instance. pub fn new(ordering: T, config: PoolConfig) -> Self { Self { - sender_info: Default::default(), pending_pool: PendingPool::with_buffer( ordering, config.max_new_pending_txs_notifications, @@ -137,7 +132,6 @@ impl TxPool { all_transactions: AllTransactions::new(&config), config, metrics: Default::default(), - latest_update_kind: None, } } @@ -168,7 +162,7 @@ impl TxPool { let mut last_consecutive_tx = None; // ensure this operates on the most recent - if let Some(current) = self.sender_info.get(&on_chain.sender) { + if let Some(current) = self.all_transactions.sender_info.get(&on_chain.sender) { on_chain.nonce = on_chain.nonce.max(current.state_nonce); } @@ -628,7 +622,7 @@ impl TxPool { let updates = self.all_transactions.update(&changed_senders); // track changed accounts - self.sender_info.extend(changed_senders); + self.all_transactions.sender_info.extend(changed_senders); // Process the sub-pool updates let update = self.process_updates(updates); @@ -646,7 +640,7 @@ impl TxPool { block_info: BlockInfo, mined_transactions: Vec, changed_senders: FxHashMap, - update_kind: PoolUpdateKind, + _update_kind: PoolUpdateKind, ) -> OnNewCanonicalStateOutcome { // update block info let block_hash = block_info.last_seen_block_hash; @@ -682,9 +676,6 @@ impl TxPool { self.update_transaction_type_metrics(); self.metrics.performed_state_updates.increment(1); - // Update the latest update kind - self.latest_update_kind = Some(update_kind); - OnNewCanonicalStateOutcome { block_hash, mined: mined_transactions, @@ -714,6 +705,7 @@ impl TxPool { let mut eip1559_count = 0; let mut eip4844_count = 0; let mut eip7702_count = 0; + let mut other_count = 0; for tx in self.all_transactions.transactions_iter() { match tx.transaction.ty() { @@ -722,7 +714,7 @@ impl TxPool { EIP1559_TX_TYPE_ID => eip1559_count += 1, EIP4844_TX_TYPE_ID => eip4844_count += 1, EIP7702_TX_TYPE_ID => eip7702_count += 1, - _ => {} // Ignore other types + _ => other_count += 1, } } @@ -731,6 +723,7 @@ impl TxPool { self.metrics.total_eip1559_transactions.set(eip1559_count as f64); self.metrics.total_eip4844_transactions.set(eip4844_count as f64); self.metrics.total_eip7702_transactions.set(eip7702_count as f64); + self.metrics.total_other_transactions.set(other_count as f64); } pub(crate) fn add_transaction( @@ -747,7 +740,8 @@ impl TxPool { self.validate_auth(&tx, on_chain_nonce, on_chain_code_hash)?; // Update sender info with balance and nonce - self.sender_info + self.all_transactions + .sender_info .entry(tx.sender_id()) .or_default() .update(on_chain_nonce, on_chain_balance); @@ -942,6 +936,7 @@ impl TxPool { /// This will move/discard the given transaction according to the `PoolUpdate` fn process_updates(&mut self, updates: Vec) -> UpdateOutcome { let mut outcome = UpdateOutcome::default(); + let mut removed = 0; for PoolUpdate { id, current, destination } in updates { match destination { Destination::Discard => { @@ -949,7 +944,7 @@ impl TxPool { if let Some(tx) = self.prune_transaction_by_id(&id) { outcome.discarded.push(tx); } - self.metrics.removed_transactions.increment(1); + removed += 1; } Destination::Pool(move_to) => { debug_assert_ne!(&move_to, ¤t, "destination must be different"); @@ -964,6 +959,10 @@ impl TxPool { } } + if removed > 0 { + self.metrics.removed_transactions.increment(removed); + } + outcome } @@ -1328,6 +1327,8 @@ pub(crate) struct AllTransactions { by_hash: HashMap>>, /// _All_ transaction in the pool sorted by their sender and nonce pair. txs: BTreeMap>, + /// Contains the currently known information about the senders. + sender_info: FxHashMap, /// Tracks the number of transactions by sender that are currently in the pool. tx_counter: FxHashMap, /// The current block number the pool keeps track of. @@ -1395,6 +1396,7 @@ impl AllTransactions { let count = entry.get_mut(); if *count == 1 { entry.remove(); + self.sender_info.remove(&sender); self.metrics.all_transactions_by_all_senders.decrement(1.0); return } @@ -2132,6 +2134,7 @@ impl Default for AllTransactions { block_gas_limit: ETHEREUM_BLOCK_GAS_LIMIT_30M, by_hash: Default::default(), txs: Default::default(), + sender_info: Default::default(), tx_counter: Default::default(), last_seen_block_number: Default::default(), last_seen_block_hash: Default::default(), @@ -3618,7 +3621,7 @@ mod tests { // update the tracked nonce let mut info = SenderInfo::default(); info.update(8, U256::ZERO); - pool.sender_info.insert(sender_id, info); + pool.all_transactions.sender_info.insert(sender_id, info); let next_tx = pool.get_highest_consecutive_transaction_by_sender(sender_id.into_transaction_id(5)); assert_eq!(next_tx.map(|tx| tx.nonce()), Some(9), "Expected nonce 9 for on-chain nonce 8"); diff --git a/crates/transaction-pool/src/test_utils/pool.rs b/crates/transaction-pool/src/test_utils/pool.rs index ab7bebae2f..90d66b47fa 100644 --- a/crates/transaction-pool/src/test_utils/pool.rs +++ b/crates/transaction-pool/src/test_utils/pool.rs @@ -3,7 +3,8 @@ #![allow(dead_code)] use crate::{ - pool::{txpool::TxPool, AddedTransaction}, + error::PoolErrorKind, + pool::{state::SubPool, txpool::TxPool, AddedTransaction}, test_utils::{MockOrdering, MockTransactionDistribution, MockTransactionFactory}, TransactionOrdering, }; @@ -74,6 +75,8 @@ pub(crate) struct MockTransactionSimulator { executed: HashMap, /// "Validates" generated transactions. validator: MockTransactionFactory, + /// Represents the gaps in nonces for each sender. + nonce_gaps: HashMap, /// The rng instance used to select senders and scenarios. rng: R, } @@ -91,10 +94,23 @@ impl MockTransactionSimulator { tx_generator: config.tx_generator, executed: Default::default(), validator: Default::default(), + nonce_gaps: Default::default(), rng, } } + /// Creates a pool configured for this simulator + /// + /// This is needed because `MockPool::default()` sets `pending_basefee` to 7, but we might want + /// to use different values + pub(crate) fn create_pool(&self) -> MockPool { + let mut pool = MockPool::default(); + let mut info = pool.block_info(); + info.pending_basefee = self.base_fee as u64; + pool.set_block_info(info); + pool + } + /// Returns a random address from the senders set fn rng_address(&mut self) -> Address { let idx = self.rng.random_range(0..self.senders.len()); @@ -116,17 +132,20 @@ impl MockTransactionSimulator { match scenario { ScenarioType::OnchainNonce => { - let tx = self - .tx_generator - .tx(on_chain_nonce, &mut self.rng) - .with_gas_price(self.base_fee); + // uses fee from fee_ranges + let tx = self.tx_generator.tx(on_chain_nonce, &mut self.rng).with_sender(sender); let valid_tx = self.validator.validated(tx); let res = - pool.add_transaction(valid_tx, on_chain_balance, on_chain_nonce, None).unwrap(); - - // TODO(mattsse): need a way expect based on the current state of the pool and tx - // settings + match pool.add_transaction(valid_tx, on_chain_balance, on_chain_nonce, None) { + Ok(res) => res, + Err(e) => match e.kind { + // skip pool capacity/replacement errors (not relevant) + PoolErrorKind::SpammerExceededCapacity(_) | + PoolErrorKind::ReplacementUnderpriced => return, + _ => panic!("unexpected error: {e:?}"), + }, + }; match res { AddedTransaction::Pending(_) => {} @@ -135,15 +154,173 @@ impl MockTransactionSimulator { } } - // TODO(mattsse): check subpools + self.executed + .entry(sender) + .or_insert_with(|| ExecutedScenarios { sender, scenarios: vec![] }) // in the case of a new sender + .scenarios + .push(ExecutedScenario { + balance: on_chain_balance, + nonce: on_chain_nonce, + scenario: Scenario::OnchainNonce { nonce: on_chain_nonce }, + }); + + self.nonces.insert(sender, on_chain_nonce + 1); } - ScenarioType::HigherNonce { .. } => { - unimplemented!() + + ScenarioType::HigherNonce { skip } => { + // if this sender already has a nonce gap, skip + if self.nonce_gaps.contains_key(&sender) { + return; + } + + let higher_nonce = on_chain_nonce + skip; + + // uses fee from fee_ranges + let tx = self.tx_generator.tx(higher_nonce, &mut self.rng).with_sender(sender); + let valid_tx = self.validator.validated(tx); + + let res = + match pool.add_transaction(valid_tx, on_chain_balance, on_chain_nonce, None) { + Ok(res) => res, + Err(e) => match e.kind { + // skip pool capacity/replacement errors (not relevant) + PoolErrorKind::SpammerExceededCapacity(_) | + PoolErrorKind::ReplacementUnderpriced => return, + _ => panic!("unexpected error: {e:?}"), + }, + }; + + match res { + AddedTransaction::Pending(_) => { + panic!("expected parked") + } + AddedTransaction::Parked { subpool, .. } => { + assert_eq!( + subpool, + SubPool::Queued, + "expected to be moved to queued subpool" + ); + } + } + + self.executed + .entry(sender) + .or_insert_with(|| ExecutedScenarios { sender, scenarios: vec![] }) // in the case of a new sender + .scenarios + .push(ExecutedScenario { + balance: on_chain_balance, + nonce: on_chain_nonce, + scenario: Scenario::HigherNonce { + onchain: on_chain_nonce, + nonce: higher_nonce, + }, + }); + self.nonce_gaps.insert(sender, higher_nonce); + } + + ScenarioType::BelowBaseFee { fee } => { + // fee should be in [MIN_PROTOCOL_BASE_FEE, base_fee) + let tx = self + .tx_generator + .tx(on_chain_nonce, &mut self.rng) + .with_sender(sender) + .with_gas_price(fee); + let valid_tx = self.validator.validated(tx); + + let res = + match pool.add_transaction(valid_tx, on_chain_balance, on_chain_nonce, None) { + Ok(res) => res, + Err(e) => match e.kind { + // skip pool capacity/replacement errors (not relevant) + PoolErrorKind::SpammerExceededCapacity(_) | + PoolErrorKind::ReplacementUnderpriced => return, + _ => panic!("unexpected error: {e:?}"), + }, + }; + + match res { + AddedTransaction::Pending(_) => panic!("expected parked"), + AddedTransaction::Parked { subpool, .. } => { + assert_eq!( + subpool, + SubPool::BaseFee, + "expected to be moved to base fee subpool" + ); + } + } + self.executed + .entry(sender) + .or_insert_with(|| ExecutedScenarios { sender, scenarios: vec![] }) // in the case of a new sender + .scenarios + .push(ExecutedScenario { + balance: on_chain_balance, + nonce: on_chain_nonce, + scenario: Scenario::BelowBaseFee { fee }, + }); + } + + ScenarioType::FillNonceGap => { + if self.nonce_gaps.is_empty() { + return; + } + + let gap_senders: Vec

= self.nonce_gaps.keys().copied().collect(); + let idx = self.rng.random_range(0..gap_senders.len()); + let gap_sender = gap_senders[idx]; + let queued_nonce = self.nonce_gaps[&gap_sender]; + + let sender_onchain_nonce = self.nonces[&gap_sender]; + let sender_balance = self.balances[&gap_sender]; + + for fill_nonce in sender_onchain_nonce..queued_nonce { + let tx = + self.tx_generator.tx(fill_nonce, &mut self.rng).with_sender(gap_sender); + let valid_tx = self.validator.validated(tx); + + let res = match pool.add_transaction( + valid_tx, + sender_balance, + sender_onchain_nonce, + None, + ) { + Ok(res) => res, + Err(e) => match e.kind { + // skip pool capacity/replacement errors (not relevant) + PoolErrorKind::SpammerExceededCapacity(_) | + PoolErrorKind::ReplacementUnderpriced => return, + _ => panic!("unexpected error: {e:?}"), + }, + }; + + match res { + AddedTransaction::Pending(_) => {} + AddedTransaction::Parked { .. } => { + panic!("expected pending when filling gap") + } + } + + self.executed + .entry(gap_sender) + .or_insert_with(|| ExecutedScenarios { + sender: gap_sender, + scenarios: vec![], + }) + .scenarios + .push(ExecutedScenario { + balance: sender_balance, + nonce: fill_nonce, + scenario: Scenario::FillNonceGap { + filled_nonce: fill_nonce, + promoted_nonce: queued_nonce, + }, + }); + } + self.nonces.insert(gap_sender, queued_nonce + 1); + self.nonce_gaps.remove(&gap_sender); } } - // make sure everything is set - pool.enforce_invariants() + pool.enforce_invariants(); } } @@ -172,6 +349,8 @@ impl MockSimulatorConfig { pub(crate) enum ScenarioType { OnchainNonce, HigherNonce { skip: u64 }, + BelowBaseFee { fee: u128 }, + FillNonceGap, } /// The actual scenario, ready to be executed @@ -186,10 +365,12 @@ pub(crate) enum Scenario { OnchainNonce { nonce: u64 }, /// Send a tx with a higher nonce that what the sender has on chain HigherNonce { onchain: u64, nonce: u64 }, - Multi { - // Execute multiple test scenarios - scenario: Vec, - }, + /// Send a tx with a base fee below the base fee of the pool + BelowBaseFee { fee: u128 }, + /// Fill a nonce gap to promote queued transactions + FillNonceGap { filled_nonce: u64, promoted_nonce: u64 }, + /// Execute multiple test scenarios + Multi { scenario: Vec }, } /// Represents an executed scenario @@ -226,17 +407,18 @@ mod tests { blob_pct: 0, }; + let base_fee = 10u128; let fee_ranges = MockFeeRange { - gas_price: (10u128..100).try_into().unwrap(), - priority_fee: (10u128..100).try_into().unwrap(), - max_fee: (100u128..110).try_into().unwrap(), + gas_price: (base_fee..100).try_into().unwrap(), + priority_fee: (1u128..10).try_into().unwrap(), + max_fee: (base_fee..110).try_into().unwrap(), max_fee_blob: (1u128..100).try_into().unwrap(), }; let config = MockSimulatorConfig { num_senders: 10, scenarios: vec![ScenarioType::OnchainNonce], - base_fee: 10, + base_fee, tx_generator: MockTransactionDistribution::new( transaction_ratio, fee_ranges, @@ -245,8 +427,181 @@ mod tests { ), }; let mut simulator = MockTransactionSimulator::new(rand::rng(), config); - let mut pool = MockPool::default(); + let mut pool = simulator.create_pool(); simulator.next(&mut pool); + assert_eq!(pool.pending().len(), 1); + assert_eq!(pool.queued().len(), 0); + assert_eq!(pool.base_fee().len(), 0); + } + + #[test] + fn test_higher_nonce_scenario() { + let transaction_ratio = MockTransactionRatio { + legacy_pct: 30, + dynamic_fee_pct: 70, + access_list_pct: 0, + blob_pct: 0, + }; + + let base_fee = 10u128; + let fee_ranges = MockFeeRange { + gas_price: (base_fee..100).try_into().unwrap(), + priority_fee: (1u128..10).try_into().unwrap(), + max_fee: (base_fee..110).try_into().unwrap(), + max_fee_blob: (1u128..100).try_into().unwrap(), + }; + + let config = MockSimulatorConfig { + num_senders: 10, + scenarios: vec![ScenarioType::HigherNonce { skip: 1 }], + base_fee, + tx_generator: MockTransactionDistribution::new( + transaction_ratio, + fee_ranges, + 10..100, + 10..100, + ), + }; + let mut simulator = MockTransactionSimulator::new(rand::rng(), config); + let mut pool = simulator.create_pool(); + + simulator.next(&mut pool); + assert_eq!(pool.pending().len(), 0); + assert_eq!(pool.queued().len(), 1); + assert_eq!(pool.base_fee().len(), 0); + } + + #[test] + fn test_below_base_fee_scenario() { + let transaction_ratio = MockTransactionRatio { + legacy_pct: 30, + dynamic_fee_pct: 70, + access_list_pct: 0, + blob_pct: 0, + }; + + let base_fee = 10u128; + let fee_ranges = MockFeeRange { + gas_price: (base_fee..100).try_into().unwrap(), + priority_fee: (1u128..10).try_into().unwrap(), + max_fee: (base_fee..110).try_into().unwrap(), + max_fee_blob: (1u128..100).try_into().unwrap(), + }; + + let config = MockSimulatorConfig { + num_senders: 10, + scenarios: vec![ScenarioType::BelowBaseFee { fee: 8 }], /* fee should be in + * [MIN_PROTOCOL_BASE_FEE, + * base_fee) */ + base_fee, + tx_generator: MockTransactionDistribution::new( + transaction_ratio, + fee_ranges, + 10..100, + 10..100, + ), + }; + let mut simulator = MockTransactionSimulator::new(rand::rng(), config); + let mut pool = simulator.create_pool(); + + simulator.next(&mut pool); + assert_eq!(pool.pending().len(), 0); + assert_eq!(pool.queued().len(), 0); + assert_eq!(pool.base_fee().len(), 1); + } + + #[test] + fn test_fill_nonce_gap_scenario() { + let transaction_ratio = MockTransactionRatio { + legacy_pct: 30, + dynamic_fee_pct: 70, + access_list_pct: 0, + blob_pct: 0, + }; + + let base_fee = 10u128; + let fee_ranges = MockFeeRange { + gas_price: (base_fee..100).try_into().unwrap(), + priority_fee: (1u128..10).try_into().unwrap(), + max_fee: (base_fee..110).try_into().unwrap(), + max_fee_blob: (1u128..100).try_into().unwrap(), + }; + + let config = MockSimulatorConfig { + num_senders: 5, + scenarios: vec![ScenarioType::HigherNonce { skip: 5 }], + base_fee, + tx_generator: MockTransactionDistribution::new( + transaction_ratio, + fee_ranges, + 10..100, + 10..100, + ), + }; + let mut simulator = MockTransactionSimulator::new(rand::rng(), config); + let mut pool = simulator.create_pool(); + + // create some nonce gaps + for _ in 0..10 { + simulator.next(&mut pool); + } + + let num_gaps = simulator.nonce_gaps.len(); + + assert_eq!(pool.pending().len(), 0); + assert_eq!(pool.queued().len(), num_gaps); + assert_eq!(pool.base_fee().len(), 0); + + simulator.scenarios = vec![ScenarioType::FillNonceGap]; + for _ in 0..num_gaps { + simulator.next(&mut pool); + } + + let expected_pending = num_gaps * 6; + assert_eq!(pool.pending().len(), expected_pending); + assert_eq!(pool.queued().len(), 0); + assert_eq!(pool.base_fee().len(), 0); + } + + #[test] + fn test_random_scenarios() { + let transaction_ratio = MockTransactionRatio { + legacy_pct: 30, + dynamic_fee_pct: 70, + access_list_pct: 0, + blob_pct: 0, + }; + + let base_fee = 10u128; + let fee_ranges = MockFeeRange { + gas_price: (base_fee..100).try_into().unwrap(), + priority_fee: (1u128..10).try_into().unwrap(), + max_fee: (base_fee..110).try_into().unwrap(), + max_fee_blob: (1u128..100).try_into().unwrap(), + }; + + let config = MockSimulatorConfig { + num_senders: 10, + scenarios: vec![ + ScenarioType::OnchainNonce, + ScenarioType::HigherNonce { skip: 2 }, + ScenarioType::BelowBaseFee { fee: 8 }, + ScenarioType::FillNonceGap, + ], + base_fee, + tx_generator: MockTransactionDistribution::new( + transaction_ratio, + fee_ranges, + 10..100, + 10..100, + ), + }; + let mut simulator = MockTransactionSimulator::new(rand::rng(), config); + let mut pool = simulator.create_pool(); + + for _ in 0..1000 { + simulator.next(&mut pool); + } } } diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index 2b9d8bae8a..f6e7403846 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -177,16 +177,6 @@ pub trait TransactionPool: Clone + Debug + Send + Sync { transactions: Vec, ) -> impl Future>> + Send; - /// Adds multiple _unvalidated_ transactions with individual origins. - /// - /// Each transaction can have its own [`TransactionOrigin`]. - /// - /// Consumer: RPC - fn add_transactions_with_origins( - &self, - transactions: Vec<(TransactionOrigin, Self::Transaction)>, - ) -> impl Future>> + Send; - /// Submit a consensus transaction directly to the pool fn add_consensus_transaction( &self, @@ -293,6 +283,16 @@ pub trait TransactionPool: Clone + Debug + Send + Sync { NewSubpoolTransactionStream::new(self.new_transactions_listener(), SubPool::Queued) } + /// Returns a new Stream that yields new transactions added to the blob sub-pool. + /// + /// This is a convenience wrapper around [`Self::new_transactions_listener`] that filters for + /// [`SubPool::Blob`](crate::SubPool). + fn new_blob_pool_transactions_listener( + &self, + ) -> NewSubpoolTransactionStream { + NewSubpoolTransactionStream::new(self.new_transactions_listener(), SubPool::Blob) + } + /// Returns the _hashes_ of all transactions in the pool that are allowed to be propagated. /// /// This excludes hashes that aren't allowed to be propagated. @@ -638,6 +638,15 @@ pub trait TransactionPool: Clone + Debug + Send + Sync { &self, versioned_hashes: &[B256], ) -> Result>, BlobStoreError>; + + /// Return the [`BlobAndProofV2`]s for a list of blob versioned hashes. + /// + /// The response is always the same length as the request. Missing or older-version blobs are + /// returned as `None` elements. + fn get_blobs_for_versioned_hashes_v3( + &self, + versioned_hashes: &[B256], + ) -> Result>, BlobStoreError>; } /// Extension for [`TransactionPool`] trait that allows to set the current block info. @@ -699,12 +708,12 @@ impl AllPoolTransactions { /// Returns an iterator over all pending [`Recovered`] transactions. pub fn pending_recovered(&self) -> impl Iterator> + '_ { - self.pending.iter().map(|tx| tx.transaction.clone().into_consensus()) + self.pending.iter().map(|tx| tx.transaction.clone_into_consensus()) } /// Returns an iterator over all queued [`Recovered`] transactions. pub fn queued_recovered(&self) -> impl Iterator> + '_ { - self.queued.iter().map(|tx| tx.transaction.clone().into_consensus()) + self.queued.iter().map(|tx| tx.transaction.clone_into_consensus()) } /// Returns an iterator over all transactions, both pending and queued. @@ -712,7 +721,7 @@ impl AllPoolTransactions { self.pending .iter() .chain(self.queued.iter()) - .map(|tx| tx.transaction.clone().into_consensus()) + .map(|tx| tx.transaction.clone_into_consensus()) } } @@ -920,7 +929,7 @@ pub trait BestTransactions: Iterator + Send { /// Implementers must ensure all subsequent transaction _don't_ depend on this transaction. /// In other words, this must remove the given transaction _and_ drain all transaction that /// depend on it. - fn mark_invalid(&mut self, transaction: &Self::Item, kind: InvalidPoolTransactionError); + fn mark_invalid(&mut self, transaction: &Self::Item, kind: &InvalidPoolTransactionError); /// An iterator may be able to receive additional pending transactions that weren't present it /// the pool when it was created. @@ -982,7 +991,7 @@ impl BestTransactions for Box where T: BestTransactions + ?Sized, { - fn mark_invalid(&mut self, transaction: &Self::Item, kind: InvalidPoolTransactionError) { + fn mark_invalid(&mut self, transaction: &Self::Item, kind: &InvalidPoolTransactionError) { (**self).mark_invalid(transaction, kind) } @@ -1001,7 +1010,7 @@ where /// A no-op implementation that yields no transactions. impl BestTransactions for std::iter::Empty { - fn mark_invalid(&mut self, _tx: &T, _kind: InvalidPoolTransactionError) {} + fn mark_invalid(&mut self, _tx: &T, _kind: &InvalidPoolTransactionError) {} fn no_updates(&mut self) {} @@ -1181,6 +1190,14 @@ pub trait PoolTransaction: Ok(Recovered::new_unchecked(tx.try_into()?, signer)) } + /// Clones the consensus transactions and tries to convert the `Consensus` type into the + /// `Pooled` type. + fn clone_into_pooled(&self) -> Result, Self::TryFromConsensusError> { + let consensus = self.clone_into_consensus(); + let (tx, signer) = consensus.into_parts(); + Ok(Recovered::new_unchecked(tx.try_into()?, signer)) + } + /// Converts the `Pooled` type into the `Consensus` type. fn pooled_into_consensus(tx: Self::Pooled) -> Self::Consensus { tx.into() @@ -1225,6 +1242,11 @@ pub trait PoolTransaction: Ok(()) } } + + /// Allows to communicate to the pool that the transaction doesn't require a nonce check. + fn requires_nonce_check(&self) -> bool { + true + } } /// Super trait for transactions that can be converted to and from Eth transactions intended for the @@ -1580,7 +1602,7 @@ pub struct PoolSize { pub queued_size: usize, /// Number of all transactions of all sub-pools /// - /// Note: this is the sum of ```pending + basefee + queued``` + /// Note: this is the sum of ```pending + basefee + queued + blob``` pub total: usize, } diff --git a/crates/transaction-pool/src/validate/eth.rs b/crates/transaction-pool/src/validate/eth.rs index 1436093d5b..da3e8680e5 100644 --- a/crates/transaction-pool/src/validate/eth.rs +++ b/crates/transaction-pool/src/validate/eth.rs @@ -207,7 +207,7 @@ where &self, origin: TransactionOrigin, transaction: Tx, - state: &mut Option>, + state: &mut Option>, ) -> TransactionValidationOutcome { self.validate_one_with_provider(origin, transaction, state) } @@ -219,7 +219,7 @@ where &self, origin: TransactionOrigin, transaction: Tx, - maybe_state: &mut Option>, + maybe_state: &mut Option>, ) -> TransactionValidationOutcome { match self.validate_one_no_state(origin, transaction) { Ok(transaction) => { @@ -247,6 +247,20 @@ where } } + /// Validates a single transaction with the provided state provider. + pub fn validate_one_with_state_provider( + &self, + origin: TransactionOrigin, + transaction: Tx, + state: impl AccountInfoReader, + ) -> TransactionValidationOutcome { + let tx = match self.validate_one_no_state(origin, transaction) { + Ok(tx) => tx, + Err(invalid_outcome) => return invalid_outcome, + }; + self.validate_one_against_state(origin, tx, state) + } + /// Performs stateless validation on single transaction. Returns unaltered input transaction /// if all checks pass, so transaction can continue through to stateful validation as argument /// to [`validate_one_against_state`](Self::validate_one_against_state). @@ -396,15 +410,12 @@ where match self.tx_fee_cap { Some(0) | None => {} // Skip if cap is 0 or None Some(tx_fee_cap_wei) => { - // max possible tx fee is (gas_price * gas_limit) - // (if EIP1559) max possible tx fee is (max_fee_per_gas * gas_limit) - let gas_price = transaction.max_fee_per_gas(); - let max_tx_fee_wei = gas_price.saturating_mul(transaction_gas_limit as u128); + let max_tx_fee_wei = transaction.cost().saturating_sub(transaction.value()); if max_tx_fee_wei > tx_fee_cap_wei { return Err(TransactionValidationOutcome::Invalid( transaction, InvalidPoolTransactionError::ExceedsFeeCap { - max_tx_fee_wei, + max_tx_fee_wei: max_tx_fee_wei.saturating_to(), tx_fee_cap_wei, }, )) @@ -534,7 +545,9 @@ where }; // Checks for nonce - if let Err(err) = self.validate_sender_nonce(&transaction, &account) { + if transaction.requires_nonce_check() && + let Err(err) = self.validate_sender_nonce(&transaction, &account) + { return TransactionValidationOutcome::Invalid(transaction, err) } @@ -1713,7 +1726,7 @@ mod tests { ExtendedAccount::new(transaction.nonce(), alloy_primitives::U256::ZERO), ); - // Valdiate with balance check enabled + // Validate with balance check enabled let validator = EthTransactionValidatorBuilder::new(provider.clone()) .build(InMemoryBlobStore::default()); @@ -1729,7 +1742,7 @@ mod tests { panic!("Expected Invalid outcome with InsufficientFunds error"); } - // Valdiate with balance check disabled + // Validate with balance check disabled let validator = EthTransactionValidatorBuilder::new(provider) .disable_balance_check() // This should allow the transaction through despite zero balance .build(InMemoryBlobStore::default()); diff --git a/crates/transaction-pool/src/validate/mod.rs b/crates/transaction-pool/src/validate/mod.rs index 725f83c392..59e187dd1c 100644 --- a/crates/transaction-pool/src/validate/mod.rs +++ b/crates/transaction-pool/src/validate/mod.rs @@ -74,6 +74,14 @@ impl TransactionValidationOutcome { } } + /// Returns the [`ValidTransaction`] if this is a [`TransactionValidationOutcome::Valid`]. + pub const fn as_valid_transaction(&self) -> Option<&ValidTransaction> { + match self { + Self::Valid { transaction, .. } => Some(transaction), + _ => None, + } + } + /// Returns true if the transaction is valid. pub const fn is_valid(&self) -> bool { matches!(self, Self::Valid { .. }) @@ -452,11 +460,7 @@ impl ValidPoolTransaction { /// This applies to both standard gas fees and, for blob-carrying transactions (EIP-4844), /// the blob-specific fees. #[inline] - pub(crate) fn is_underpriced( - &self, - maybe_replacement: &Self, - price_bumps: &PriceBumpConfig, - ) -> bool { + pub fn is_underpriced(&self, maybe_replacement: &Self, price_bumps: &PriceBumpConfig) -> bool { // Retrieve the required price bump percentage for this type of transaction. // // The bump is different for EIP-4844 and other transactions. See `PriceBumpConfig`. @@ -515,7 +519,7 @@ impl fmt::Debug for ValidPoolTransaction { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("ValidPoolTransaction") .field("id", &self.transaction_id) - .field("pragate", &self.propagate) + .field("propagate", &self.propagate) .field("origin", &self.origin) .field("hash", self.transaction.hash()) .field("tx", &self.transaction) diff --git a/crates/transaction-pool/src/validate/task.rs b/crates/transaction-pool/src/validate/task.rs index fc22ce4ceb..0959d5b3fd 100644 --- a/crates/transaction-pool/src/validate/task.rs +++ b/crates/transaction-pool/src/validate/task.rs @@ -180,9 +180,15 @@ impl TransactionValidationTaskExecutor { /// /// Initializes the executor with the provided validator and sets up communication for /// validation tasks. - pub fn new(validator: V) -> Self { - let (tx, _) = ValidationTask::new(); - Self { validator: Arc::new(validator), to_validation_task: Arc::new(sync::Mutex::new(tx)) } + pub fn new(validator: V) -> (Self, ValidationTask) { + let (tx, task) = ValidationTask::new(); + ( + Self { + validator: Arc::new(validator), + to_validation_task: Arc::new(sync::Mutex::new(tx)), + }, + task, + ) } } @@ -285,3 +291,60 @@ where self.validator.on_new_head_block(new_tip_block) } } + +#[cfg(test)] +mod tests { + use super::*; + use crate::{ + test_utils::MockTransaction, + validate::{TransactionValidationOutcome, ValidTransaction}, + TransactionOrigin, + }; + use alloy_primitives::{Address, U256}; + + #[derive(Debug)] + struct NoopValidator; + + impl TransactionValidator for NoopValidator { + type Transaction = MockTransaction; + + async fn validate_transaction( + &self, + _origin: TransactionOrigin, + transaction: Self::Transaction, + ) -> TransactionValidationOutcome { + TransactionValidationOutcome::Valid { + balance: U256::ZERO, + state_nonce: 0, + bytecode_hash: None, + transaction: ValidTransaction::Valid(transaction), + propagate: false, + authorities: Some(Vec::
::new()), + } + } + } + + #[tokio::test] + async fn executor_new_spawns_and_validates_single() { + let validator = NoopValidator; + let (executor, task) = TransactionValidationTaskExecutor::new(validator); + tokio::spawn(task.run()); + let tx = MockTransaction::legacy(); + let out = executor.validate_transaction(TransactionOrigin::External, tx).await; + assert!(matches!(out, TransactionValidationOutcome::Valid { .. })); + } + + #[tokio::test] + async fn executor_new_spawns_and_validates_batch() { + let validator = NoopValidator; + let (executor, task) = TransactionValidationTaskExecutor::new(validator); + tokio::spawn(task.run()); + let txs = vec![ + (TransactionOrigin::External, MockTransaction::legacy()), + (TransactionOrigin::Local, MockTransaction::legacy()), + ]; + let out = executor.validate_transactions(txs).await; + assert_eq!(out.len(), 2); + assert!(out.iter().all(|o| matches!(o, TransactionValidationOutcome::Valid { .. }))); + } +} diff --git a/crates/transaction-pool/tests/it/listeners.rs b/crates/transaction-pool/tests/it/listeners.rs index d0a9c9c5aa..25d95dd86f 100644 --- a/crates/transaction-pool/tests/it/listeners.rs +++ b/crates/transaction-pool/tests/it/listeners.rs @@ -28,6 +28,52 @@ async fn txpool_listener_by_hash() { assert_matches!(events.next().await, Some(TransactionEvent::Discarded)); } +#[tokio::test(flavor = "multi_thread")] +async fn txpool_listener_pending_promotions_propagate_only() { + let txpool = + TestPoolBuilder::default().with_validator(MockTransactionValidator::no_propagate_local()); + let mut mock_tx_factory = MockTransactionFactory::default(); + + // Create two transactions from the same sender with nonces 0 (local, non-propagatable) and 1 + let mut tx_local = mock_tx_factory.create_eip1559(); + let mut tx_external = mock_tx_factory.create_eip1559(); + + // Ensure same sender + let sender = *tx_local.transaction.get_sender(); + tx_external.transaction.set_sender(sender); + + // Set explicit nonces to create a nonce gap scenario + tx_local.transaction.set_nonce(0); + tx_external.transaction.set_nonce(1); + + let hash_local = *tx_local.transaction.hash(); + let hash_external = *tx_external.transaction.hash(); + + // Listeners: propagate-only and all + let mut listener_network = txpool.pending_transactions_listener(); + let mut listener_all = txpool.pending_transactions_listener_for(TransactionListenerKind::All); + + // Insert the higher-nonce external tx first; it should be queued due to nonce gap + let res = + txpool.add_transaction(TransactionOrigin::External, tx_external.transaction.clone()).await; + assert!(res.is_ok()); + + // Now insert the local tx with nonce 0; it becomes pending and should promote the external tx + let res = txpool.add_transaction(TransactionOrigin::Local, tx_local.transaction.clone()).await; + assert!(res.is_ok()); + + // All-listener should receive both pending hashes in order: inserted local first, then promoted + // external + let inserted_all_first = listener_all.recv().await.unwrap(); + let inserted_all_second = listener_all.recv().await.unwrap(); + assert_eq!(inserted_all_first, hash_local); + assert_eq!(inserted_all_second, hash_external); + + // Propagate-only listener should receive only the external tx (local is non-propagatable) + let inserted_network = listener_network.recv().await.unwrap(); + assert_eq!(inserted_network, hash_external); +} + #[tokio::test(flavor = "multi_thread")] async fn txpool_listener_replace_event() { let txpool = TestPoolBuilder::default(); @@ -82,7 +128,7 @@ async fn txpool_listener_queued_event() { assert_matches!(events.next().await, Some(TransactionEvent::Queued)); // The listener of all should receive queued event as well. - assert_matches!(all_tx_events.next().await, Some(FullTransactionEvent::Queued(hash)) if hash == *transaction.get_hash()); + assert_matches!(all_tx_events.next().await, Some(FullTransactionEvent::Queued(hash,_ )) if hash == *transaction.get_hash()); } #[tokio::test(flavor = "multi_thread")] diff --git a/crates/trie/common/benches/prefix_set.rs b/crates/trie/common/benches/prefix_set.rs index bc2a8dc259..b5703e1941 100644 --- a/crates/trie/common/benches/prefix_set.rs +++ b/crates/trie/common/benches/prefix_set.rs @@ -76,12 +76,6 @@ pub fn prefix_set_lookups(c: &mut Criterion) { test_data.clone(), size, ); - prefix_set_bench::( - &mut group, - "`Vec` with binary search lookup", - test_data.clone(), - size, - ); } } @@ -207,43 +201,6 @@ mod implementations { false } } - - #[derive(Default)] - pub struct VecBinarySearchPrefixSet { - keys: Vec, - sorted: bool, - } - - impl PrefixSetMutAbstraction for VecBinarySearchPrefixSet { - type Frozen = Self; - - fn insert(&mut self, key: Nibbles) { - self.sorted = false; - self.keys.push(key); - } - - fn freeze(self) -> Self::Frozen { - self - } - } - - impl PrefixSetAbstraction for VecBinarySearchPrefixSet { - fn contains(&mut self, prefix: Nibbles) -> bool { - if !self.sorted { - self.keys.sort(); - self.sorted = true; - } - - match self.keys.binary_search(&prefix) { - Ok(_) => true, - Err(idx) => match self.keys.get(idx) { - Some(key) => key.starts_with(&prefix), - None => false, // prefix > last key - }, - } - } - } - #[derive(Default)] pub struct VecCursorPrefixSet { keys: Vec, diff --git a/crates/trie/common/src/hashed_state.rs b/crates/trie/common/src/hashed_state.rs index 8fb994dadd..edfb821bc6 100644 --- a/crates/trie/common/src/hashed_state.rs +++ b/crates/trie/common/src/hashed_state.rs @@ -9,7 +9,7 @@ use crate::{ use alloc::{borrow::Cow, vec::Vec}; use alloy_primitives::{ keccak256, - map::{hash_map, B256Map, B256Set, HashMap, HashSet}, + map::{hash_map, B256Map, HashMap, HashSet}, Address, B256, U256, }; use itertools::Itertools; @@ -22,7 +22,8 @@ use rayon::prelude::{IntoParallelIterator, ParallelIterator}; use revm_database::{AccountStatus, BundleAccount}; -/// Representation of in-memory hashed state. +/// In-memory hashed state that stores account and storage changes with keccak256-hashed keys in +/// hash maps. #[derive(PartialEq, Eq, Clone, Default, Debug)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct HashedPostState { @@ -278,6 +279,15 @@ impl HashedPostState { ChunkedHashedPostState::new(self, size) } + /// Returns the number of items that will be considered during chunking in `[Self::chunks]`. + pub fn chunking_length(&self) -> usize { + self.accounts.len() + + self.storages + .values() + .map(|storage| if storage.wiped { 1 } else { 0 } + storage.storage.len()) + .sum::() + } + /// Extend this hashed post state with contents of another. /// Entries in the second hashed post state take precedence. pub fn extend(&mut self, other: Self) { @@ -322,19 +332,40 @@ impl HashedPostState { } } - /// Converts hashed post state into [`HashedPostStateSorted`]. - pub fn into_sorted(self) -> HashedPostStateSorted { - let mut updated_accounts = Vec::new(); - let mut destroyed_accounts = HashSet::default(); - for (hashed_address, info) in self.accounts { - if let Some(info) = info { - updated_accounts.push((hashed_address, info)); - } else { - destroyed_accounts.insert(hashed_address); + /// Extend this hashed post state with sorted data, converting directly into the unsorted + /// `HashMap` representation. This is more efficient than first converting to `HashedPostState` + /// and then extending, as it avoids creating intermediate `HashMap` allocations. + pub fn extend_from_sorted(&mut self, sorted: &HashedPostStateSorted) { + // Reserve capacity for accounts + self.accounts.reserve(sorted.accounts.len()); + + // Insert accounts (Some = updated, None = destroyed) + for (address, account) in &sorted.accounts { + self.accounts.insert(*address, *account); + } + + // Reserve capacity for storages + self.storages.reserve(sorted.storages.len()); + + // Extend storages + for (hashed_address, sorted_storage) in &sorted.storages { + match self.storages.entry(*hashed_address) { + hash_map::Entry::Vacant(entry) => { + let mut new_storage = HashedStorage::new(false); + new_storage.extend_from_sorted(sorted_storage); + entry.insert(new_storage); + } + hash_map::Entry::Occupied(mut entry) => { + entry.get_mut().extend_from_sorted(sorted_storage); + } } } - updated_accounts.sort_unstable_by_key(|(address, _)| *address); - let accounts = HashedAccountsSorted { accounts: updated_accounts, destroyed_accounts }; + } + + /// Converts hashed post state into [`HashedPostStateSorted`]. + pub fn into_sorted(self) -> HashedPostStateSorted { + let mut accounts: Vec<_> = self.accounts.into_iter().collect(); + accounts.sort_unstable_by_key(|(address, _)| *address); let storages = self .storages @@ -345,30 +376,16 @@ impl HashedPostState { HashedPostStateSorted { accounts, storages } } - /// Converts hashed post state into [`HashedPostStateSorted`], but keeping the maps allocated by - /// draining. - /// - /// This effectively clears all the fields in the [`HashedPostStateSorted`]. - /// - /// This allows us to reuse the allocated space. This allocates new space for the sorted hashed - /// post state, like `into_sorted`. - pub fn drain_into_sorted(&mut self) -> HashedPostStateSorted { - let mut updated_accounts = Vec::new(); - let mut destroyed_accounts = HashSet::default(); - for (hashed_address, info) in self.accounts.drain() { - if let Some(info) = info { - updated_accounts.push((hashed_address, info)); - } else { - destroyed_accounts.insert(hashed_address); - } - } - updated_accounts.sort_unstable_by_key(|(address, _)| *address); - let accounts = HashedAccountsSorted { accounts: updated_accounts, destroyed_accounts }; + /// Creates a sorted copy without consuming self. + /// More efficient than `.clone().into_sorted()` as it avoids cloning `HashMap` metadata. + pub fn clone_into_sorted(&self) -> HashedPostStateSorted { + let mut accounts: Vec<_> = self.accounts.iter().map(|(&k, &v)| (k, v)).collect(); + accounts.sort_unstable_by_key(|(address, _)| *address); let storages = self .storages - .drain() - .map(|(hashed_address, storage)| (hashed_address, storage.into_sorted())) + .iter() + .map(|(&hashed_address, storage)| (hashed_address, storage.clone_into_sorted())) .collect(); HashedPostStateSorted { accounts, storages } @@ -441,43 +458,62 @@ impl HashedStorage { self.storage.extend(other.storage.iter().map(|(&k, &v)| (k, v))); } + /// Extend hashed storage with sorted data, converting directly into the unsorted `HashMap` + /// representation. This is more efficient than first converting to `HashedStorage` and + /// then extending, as it avoids creating intermediate `HashMap` allocations. + pub fn extend_from_sorted(&mut self, sorted: &HashedStorageSorted) { + if sorted.wiped { + self.wiped = true; + self.storage.clear(); + } + + // Reserve capacity for all slots + self.storage.reserve(sorted.storage_slots.len()); + + // Insert all storage slots + for (slot, value) in &sorted.storage_slots { + self.storage.insert(*slot, *value); + } + } + /// Converts hashed storage into [`HashedStorageSorted`]. pub fn into_sorted(self) -> HashedStorageSorted { - let mut non_zero_valued_slots = Vec::new(); - let mut zero_valued_slots = HashSet::default(); - for (hashed_slot, value) in self.storage { - if value.is_zero() { - zero_valued_slots.insert(hashed_slot); - } else { - non_zero_valued_slots.push((hashed_slot, value)); - } - } - non_zero_valued_slots.sort_unstable_by_key(|(key, _)| *key); + let mut storage_slots: Vec<_> = self.storage.into_iter().collect(); + storage_slots.sort_unstable_by_key(|(key, _)| *key); - HashedStorageSorted { non_zero_valued_slots, zero_valued_slots, wiped: self.wiped } + HashedStorageSorted { storage_slots, wiped: self.wiped } + } + + /// Creates a sorted copy without consuming self. + /// More efficient than `.clone().into_sorted()` as it avoids cloning `HashMap` metadata. + pub fn clone_into_sorted(&self) -> HashedStorageSorted { + let mut storage_slots: Vec<_> = self.storage.iter().map(|(&k, &v)| (k, v)).collect(); + storage_slots.sort_unstable_by_key(|(key, _)| *key); + + HashedStorageSorted { storage_slots, wiped: self.wiped } } } /// Sorted hashed post state optimized for iterating during state trie calculation. #[derive(PartialEq, Eq, Clone, Default, Debug)] pub struct HashedPostStateSorted { - /// Updated state of accounts. - pub accounts: HashedAccountsSorted, - /// Map of hashed addresses to hashed storage. + /// Sorted collection of account updates. `None` indicates a destroyed account. + pub accounts: Vec<(B256, Option)>, + /// Map of hashed addresses to their sorted storage updates. pub storages: B256Map, } impl HashedPostStateSorted { /// Create new instance of [`HashedPostStateSorted`] pub const fn new( - accounts: HashedAccountsSorted, + accounts: Vec<(B256, Option)>, storages: B256Map, ) -> Self { Self { accounts, storages } } /// Returns reference to hashed accounts. - pub const fn accounts(&self) -> &HashedAccountsSorted { + pub const fn accounts(&self) -> &Vec<(B256, Option)> { &self.accounts } @@ -486,18 +522,61 @@ impl HashedPostStateSorted { &self.storages } + /// Returns `true` if there are no account or storage updates. + pub fn is_empty(&self) -> bool { + self.accounts.is_empty() && self.storages.is_empty() + } + /// Returns the total number of updates including all accounts and storage updates. pub fn total_len(&self) -> usize { - self.accounts.accounts.len() + - self.accounts.destroyed_accounts.len() + - self.storages.values().map(|storage| storage.len()).sum::() + self.accounts.len() + self.storages.values().map(|s| s.len()).sum::() + } + + /// Construct [`TriePrefixSetsMut`] from hashed post state. + /// + /// The prefix sets contain the hashed account and storage keys that have been changed in the + /// post state. + pub fn construct_prefix_sets(&self) -> TriePrefixSetsMut { + let mut account_prefix_set = PrefixSetMut::with_capacity(self.accounts.len()); + let mut destroyed_accounts = HashSet::default(); + for (hashed_address, account) in &self.accounts { + account_prefix_set.insert(Nibbles::unpack(hashed_address)); + if account.is_none() { + destroyed_accounts.insert(*hashed_address); + } + } + + let mut storage_prefix_sets = + B256Map::with_capacity_and_hasher(self.storages.len(), Default::default()); + for (hashed_address, hashed_storage) in &self.storages { + // Ensure account trie covers storage overlays even if account map is empty. + account_prefix_set.insert(Nibbles::unpack(hashed_address)); + + let prefix_set = if hashed_storage.wiped { + PrefixSetMut::all() + } else { + let mut prefix_set = + PrefixSetMut::with_capacity(hashed_storage.storage_slots.len()); + prefix_set.extend_keys( + hashed_storage + .storage_slots + .iter() + .map(|(hashed_slot, _)| Nibbles::unpack(hashed_slot)), + ); + prefix_set + }; + + storage_prefix_sets.insert(*hashed_address, prefix_set); + } + + TriePrefixSetsMut { account_prefix_set, storage_prefix_sets, destroyed_accounts } } /// Extends this state with contents of another sorted state. /// Entries in `other` take precedence for duplicate keys. pub fn extend_ref(&mut self, other: &Self) { // Extend accounts - self.accounts.extend_ref(&other.accounts); + extend_sorted_vec(&mut self.accounts, &other.accounts); // Extend storages for (hashed_address, other_storage) in &other.storages { @@ -507,6 +586,12 @@ impl HashedPostStateSorted { .or_insert_with(|| other_storage.clone()); } } + + /// Clears all accounts and storage data. + pub fn clear(&mut self) { + self.accounts.clear(); + self.storages.clear(); + } } impl AsRef for HashedPostStateSorted { @@ -515,47 +600,11 @@ impl AsRef for HashedPostStateSorted { } } -/// Sorted account state optimized for iterating during state trie calculation. -#[derive(Clone, Eq, PartialEq, Default, Debug)] -pub struct HashedAccountsSorted { - /// Sorted collection of hashed addresses and their account info. - pub accounts: Vec<(B256, Account)>, - /// Set of destroyed account keys. - pub destroyed_accounts: B256Set, -} - -impl HashedAccountsSorted { - /// Returns a sorted iterator over updated accounts. - pub fn accounts_sorted(&self) -> impl Iterator)> { - self.accounts - .iter() - .map(|(address, account)| (*address, Some(*account))) - .chain(self.destroyed_accounts.iter().map(|address| (*address, None))) - .sorted_by_key(|entry| *entry.0) - } - - /// Extends this collection with contents of another sorted collection. - /// Entries in `other` take precedence for duplicate keys. - pub fn extend_ref(&mut self, other: &Self) { - // Updates take precedence over removals, so we want removals from `other` to only apply to - // the previous accounts. - self.accounts.retain(|(addr, _)| !other.destroyed_accounts.contains(addr)); - - // Extend the sorted accounts vector - extend_sorted_vec(&mut self.accounts, &other.accounts); - - // Merge destroyed accounts sets - self.destroyed_accounts.extend(&other.destroyed_accounts); - } -} - /// Sorted hashed storage optimized for iterating during state trie calculation. #[derive(Clone, Eq, PartialEq, Debug)] pub struct HashedStorageSorted { - /// Sorted hashed storage slots with non-zero value. - pub non_zero_valued_slots: Vec<(B256, U256)>, - /// Slots that have been zero valued. - pub zero_valued_slots: B256Set, + /// Sorted collection of updated storage slots. [`U256::ZERO`] indicates a deleted value. + pub storage_slots: Vec<(B256, U256)>, /// Flag indicating whether the storage was wiped or not. pub wiped: bool, } @@ -566,45 +615,69 @@ impl HashedStorageSorted { self.wiped } - /// Returns a sorted iterator over updated storage slots. - pub fn storage_slots_sorted(&self) -> impl Iterator { - self.non_zero_valued_slots - .iter() - .map(|(hashed_slot, value)| (*hashed_slot, *value)) - .chain(self.zero_valued_slots.iter().map(|hashed_slot| (*hashed_slot, U256::ZERO))) - .sorted_by_key(|entry| *entry.0) + /// Returns reference to updated storage slots. + pub fn storage_slots_ref(&self) -> &[(B256, U256)] { + &self.storage_slots } /// Returns the total number of storage slot updates. - pub fn len(&self) -> usize { - self.non_zero_valued_slots.len() + self.zero_valued_slots.len() + pub const fn len(&self) -> usize { + self.storage_slots.len() } /// Returns `true` if there are no storage slot updates. - pub fn is_empty(&self) -> bool { - self.non_zero_valued_slots.is_empty() && self.zero_valued_slots.is_empty() + pub const fn is_empty(&self) -> bool { + self.storage_slots.is_empty() } - /// Extends this storage with contents of another sorted storage. - /// Entries in `other` take precedence for duplicate keys. + /// Extends the storage slots updates with another set of sorted updates. + /// + /// If `other` is marked as deleted, this will be marked as deleted and all slots cleared. + /// Otherwise, nodes are merged with `other`'s values taking precedence for duplicates. pub fn extend_ref(&mut self, other: &Self) { if other.wiped { // If other is wiped, clear everything and copy from other self.wiped = true; - self.non_zero_valued_slots.clear(); - self.zero_valued_slots.clear(); - self.non_zero_valued_slots.extend_from_slice(&other.non_zero_valued_slots); - self.zero_valued_slots.extend(&other.zero_valued_slots); + self.storage_slots.clear(); + self.storage_slots.extend(other.storage_slots.iter().copied()); return; } - self.non_zero_valued_slots.retain(|(slot, _)| !other.zero_valued_slots.contains(slot)); - // Extend the sorted non-zero valued slots - extend_sorted_vec(&mut self.non_zero_valued_slots, &other.non_zero_valued_slots); + extend_sorted_vec(&mut self.storage_slots, &other.storage_slots); + } +} - // Merge zero valued slots sets - self.zero_valued_slots.extend(&other.zero_valued_slots); +impl From for HashedStorage { + fn from(sorted: HashedStorageSorted) -> Self { + let mut storage = B256Map::default(); + + // Add all storage slots (including zero-valued ones which indicate deletion) + for (slot, value) in sorted.storage_slots { + storage.insert(slot, value); + } + + Self { wiped: sorted.wiped, storage } + } +} + +impl From for HashedPostState { + fn from(sorted: HashedPostStateSorted) -> Self { + let mut accounts = B256Map::default(); + + // Add all accounts (Some for updated, None for destroyed) + for (address, account) in sorted.accounts { + accounts.insert(address, account); + } + + // Convert storages + let storages = sorted + .storages + .into_iter() + .map(|(address, storage)| (address, storage.into())) + .collect(); + + Self { accounts, storages } } } @@ -1146,87 +1219,92 @@ mod tests { fn test_hashed_post_state_sorted_extend_ref() { // Test extending accounts let mut state1 = HashedPostStateSorted { - accounts: HashedAccountsSorted { - accounts: vec![ - (B256::from([1; 32]), Account::default()), - (B256::from([3; 32]), Account::default()), - ], - destroyed_accounts: B256Set::from_iter([B256::from([5; 32])]), - }, + accounts: vec![ + (B256::from([1; 32]), Some(Account::default())), + (B256::from([3; 32]), Some(Account::default())), + (B256::from([5; 32]), None), + ], storages: B256Map::default(), }; let state2 = HashedPostStateSorted { - accounts: HashedAccountsSorted { - accounts: vec![ - (B256::from([2; 32]), Account::default()), - (B256::from([3; 32]), Account { nonce: 1, ..Default::default() }), // Override - (B256::from([4; 32]), Account::default()), - ], - destroyed_accounts: B256Set::from_iter([B256::from([6; 32])]), - }, + accounts: vec![ + (B256::from([2; 32]), Some(Account::default())), + (B256::from([3; 32]), Some(Account { nonce: 1, ..Default::default() })), /* Override */ + (B256::from([4; 32]), Some(Account::default())), + (B256::from([6; 32]), None), + ], storages: B256Map::default(), }; state1.extend_ref(&state2); // Check accounts are merged and sorted - assert_eq!(state1.accounts.accounts.len(), 4); - assert_eq!(state1.accounts.accounts[0].0, B256::from([1; 32])); - assert_eq!(state1.accounts.accounts[1].0, B256::from([2; 32])); - assert_eq!(state1.accounts.accounts[2].0, B256::from([3; 32])); - assert_eq!(state1.accounts.accounts[2].1.nonce, 1); // Should have state2's value - assert_eq!(state1.accounts.accounts[3].0, B256::from([4; 32])); - - // Check destroyed accounts are merged - assert!(state1.accounts.destroyed_accounts.contains(&B256::from([5; 32]))); - assert!(state1.accounts.destroyed_accounts.contains(&B256::from([6; 32]))); + assert_eq!(state1.accounts.len(), 6); + assert_eq!(state1.accounts[0].0, B256::from([1; 32])); + assert_eq!(state1.accounts[1].0, B256::from([2; 32])); + assert_eq!(state1.accounts[2].0, B256::from([3; 32])); + assert_eq!(state1.accounts[2].1.unwrap().nonce, 1); // Should have state2's value + assert_eq!(state1.accounts[3].0, B256::from([4; 32])); + assert_eq!(state1.accounts[4].0, B256::from([5; 32])); + assert_eq!(state1.accounts[4].1, None); + assert_eq!(state1.accounts[5].0, B256::from([6; 32])); + assert_eq!(state1.accounts[5].1, None); } #[test] fn test_hashed_storage_sorted_extend_ref() { // Test normal extension let mut storage1 = HashedStorageSorted { - non_zero_valued_slots: vec![ + storage_slots: vec![ (B256::from([1; 32]), U256::from(10)), (B256::from([3; 32]), U256::from(30)), + (B256::from([5; 32]), U256::ZERO), ], - zero_valued_slots: B256Set::from_iter([B256::from([5; 32])]), wiped: false, }; let storage2 = HashedStorageSorted { - non_zero_valued_slots: vec![ + storage_slots: vec![ (B256::from([2; 32]), U256::from(20)), (B256::from([3; 32]), U256::from(300)), // Override (B256::from([4; 32]), U256::from(40)), + (B256::from([6; 32]), U256::ZERO), ], - zero_valued_slots: B256Set::from_iter([B256::from([6; 32])]), wiped: false, }; storage1.extend_ref(&storage2); - assert_eq!(storage1.non_zero_valued_slots.len(), 4); - assert_eq!(storage1.non_zero_valued_slots[0].0, B256::from([1; 32])); - assert_eq!(storage1.non_zero_valued_slots[1].0, B256::from([2; 32])); - assert_eq!(storage1.non_zero_valued_slots[2].0, B256::from([3; 32])); - assert_eq!(storage1.non_zero_valued_slots[2].1, U256::from(300)); // Should have storage2's value - assert_eq!(storage1.non_zero_valued_slots[3].0, B256::from([4; 32])); - assert!(storage1.zero_valued_slots.contains(&B256::from([5; 32]))); - assert!(storage1.zero_valued_slots.contains(&B256::from([6; 32]))); + assert_eq!(storage1.storage_slots.len(), 6); + assert_eq!(storage1.storage_slots[0].0, B256::from([1; 32])); + assert_eq!(storage1.storage_slots[0].1, U256::from(10)); + assert_eq!(storage1.storage_slots[1].0, B256::from([2; 32])); + assert_eq!(storage1.storage_slots[1].1, U256::from(20)); + assert_eq!(storage1.storage_slots[2].0, B256::from([3; 32])); + assert_eq!(storage1.storage_slots[2].1, U256::from(300)); // Should have storage2's value + assert_eq!(storage1.storage_slots[3].0, B256::from([4; 32])); + assert_eq!(storage1.storage_slots[3].1, U256::from(40)); + assert_eq!(storage1.storage_slots[4].0, B256::from([5; 32])); + assert_eq!(storage1.storage_slots[4].1, U256::ZERO); + assert_eq!(storage1.storage_slots[5].0, B256::from([6; 32])); + assert_eq!(storage1.storage_slots[5].1, U256::ZERO); assert!(!storage1.wiped); // Test wiped storage let mut storage3 = HashedStorageSorted { - non_zero_valued_slots: vec![(B256::from([1; 32]), U256::from(10))], - zero_valued_slots: B256Set::from_iter([B256::from([2; 32])]), + storage_slots: vec![ + (B256::from([1; 32]), U256::from(10)), + (B256::from([2; 32]), U256::ZERO), + ], wiped: false, }; let storage4 = HashedStorageSorted { - non_zero_valued_slots: vec![(B256::from([3; 32]), U256::from(30))], - zero_valued_slots: B256Set::from_iter([B256::from([4; 32])]), + storage_slots: vec![ + (B256::from([3; 32]), U256::from(30)), + (B256::from([4; 32]), U256::ZERO), + ], wiped: true, }; @@ -1234,9 +1312,221 @@ mod tests { assert!(storage3.wiped); // When wiped, should only have storage4's values - assert_eq!(storage3.non_zero_valued_slots.len(), 1); - assert_eq!(storage3.non_zero_valued_slots[0].0, B256::from([3; 32])); - assert_eq!(storage3.zero_valued_slots.len(), 1); - assert!(storage3.zero_valued_slots.contains(&B256::from([4; 32]))); + assert_eq!(storage3.storage_slots.len(), 2); + assert_eq!(storage3.storage_slots[0].0, B256::from([3; 32])); + assert_eq!(storage3.storage_slots[0].1, U256::from(30)); + assert_eq!(storage3.storage_slots[1].0, B256::from([4; 32])); + assert_eq!(storage3.storage_slots[1].1, U256::ZERO); + } + + /// Test extending with sorted accounts merges correctly into `HashMap` + #[test] + fn test_hashed_post_state_extend_from_sorted_with_accounts() { + let addr1 = B256::random(); + let addr2 = B256::random(); + + let mut state = HashedPostState::default(); + state.accounts.insert(addr1, Some(Default::default())); + + let mut sorted_state = HashedPostStateSorted::default(); + sorted_state.accounts.push((addr2, Some(Default::default()))); + + state.extend_from_sorted(&sorted_state); + + assert_eq!(state.accounts.len(), 2); + assert!(state.accounts.contains_key(&addr1)); + assert!(state.accounts.contains_key(&addr2)); + } + + /// Test destroyed accounts (None values) are inserted correctly + #[test] + fn test_hashed_post_state_extend_from_sorted_with_destroyed_accounts() { + let addr1 = B256::random(); + + let mut state = HashedPostState::default(); + + let mut sorted_state = HashedPostStateSorted::default(); + sorted_state.accounts.push((addr1, None)); + + state.extend_from_sorted(&sorted_state); + + assert!(state.accounts.contains_key(&addr1)); + assert_eq!(state.accounts.get(&addr1), Some(&None)); + } + + /// Test non-wiped storage merges both zero and non-zero valued slots + #[test] + fn test_hashed_storage_extend_from_sorted_non_wiped() { + let slot1 = B256::random(); + let slot2 = B256::random(); + let slot3 = B256::random(); + + let mut storage = HashedStorage::from_iter(false, [(slot1, U256::from(100))]); + + let sorted = HashedStorageSorted { + storage_slots: vec![(slot2, U256::from(200)), (slot3, U256::ZERO)], + wiped: false, + }; + + storage.extend_from_sorted(&sorted); + + assert!(!storage.wiped); + assert_eq!(storage.storage.len(), 3); + assert_eq!(storage.storage.get(&slot1), Some(&U256::from(100))); + assert_eq!(storage.storage.get(&slot2), Some(&U256::from(200))); + assert_eq!(storage.storage.get(&slot3), Some(&U256::ZERO)); + } + + /// Test wiped=true clears existing storage and only keeps new slots (critical edge case) + #[test] + fn test_hashed_storage_extend_from_sorted_wiped() { + let slot1 = B256::random(); + let slot2 = B256::random(); + + let mut storage = HashedStorage::from_iter(false, [(slot1, U256::from(100))]); + + let sorted = + HashedStorageSorted { storage_slots: vec![(slot2, U256::from(200))], wiped: true }; + + storage.extend_from_sorted(&sorted); + + assert!(storage.wiped); + // After wipe, old storage should be cleared and only new storage remains + assert_eq!(storage.storage.len(), 1); + assert_eq!(storage.storage.get(&slot2), Some(&U256::from(200))); + } + + #[test] + fn test_hashed_post_state_chunking_length() { + let addr1 = B256::from([1; 32]); + let addr2 = B256::from([2; 32]); + let addr3 = B256::from([3; 32]); + let addr4 = B256::from([4; 32]); + let slot1 = B256::from([1; 32]); + let slot2 = B256::from([2; 32]); + let slot3 = B256::from([3; 32]); + + let state = HashedPostState { + accounts: B256Map::from_iter([(addr1, None), (addr2, None), (addr4, None)]), + storages: B256Map::from_iter([ + ( + addr1, + HashedStorage { + wiped: false, + storage: B256Map::from_iter([ + (slot1, U256::ZERO), + (slot2, U256::ZERO), + (slot3, U256::ZERO), + ]), + }, + ), + ( + addr2, + HashedStorage { + wiped: true, + storage: B256Map::from_iter([ + (slot1, U256::ZERO), + (slot2, U256::ZERO), + (slot3, U256::ZERO), + ]), + }, + ), + ( + addr3, + HashedStorage { + wiped: false, + storage: B256Map::from_iter([ + (slot1, U256::ZERO), + (slot2, U256::ZERO), + (slot3, U256::ZERO), + ]), + }, + ), + ]), + }; + + let chunking_length = state.chunking_length(); + for size in 1..=state.clone().chunks(1).count() { + let chunk_count = state.clone().chunks(size).count(); + let expected_count = chunking_length.div_ceil(size); + assert_eq!( + chunk_count, expected_count, + "chunking_length: {}, size: {}", + chunking_length, size + ); + } + } + + #[test] + fn test_clone_into_sorted_equivalence() { + let addr1 = B256::from([1; 32]); + let addr2 = B256::from([2; 32]); + let addr3 = B256::from([3; 32]); + let slot1 = B256::from([1; 32]); + let slot2 = B256::from([2; 32]); + let slot3 = B256::from([3; 32]); + + let state = HashedPostState { + accounts: B256Map::from_iter([ + (addr1, Some(Account { nonce: 1, balance: U256::from(100), bytecode_hash: None })), + (addr2, None), + (addr3, Some(Account::default())), + ]), + storages: B256Map::from_iter([ + ( + addr1, + HashedStorage { + wiped: false, + storage: B256Map::from_iter([ + (slot1, U256::from(10)), + (slot2, U256::from(20)), + ]), + }, + ), + ( + addr2, + HashedStorage { + wiped: true, + storage: B256Map::from_iter([(slot3, U256::ZERO)]), + }, + ), + ]), + }; + + // clone_into_sorted should produce the same result as clone().into_sorted() + let sorted_via_clone = state.clone().into_sorted(); + let sorted_via_clone_into = state.clone_into_sorted(); + + assert_eq!(sorted_via_clone, sorted_via_clone_into); + + // Verify the original state is not consumed + assert_eq!(state.accounts.len(), 3); + assert_eq!(state.storages.len(), 2); + } + + #[test] + fn test_hashed_storage_clone_into_sorted_equivalence() { + let slot1 = B256::from([1; 32]); + let slot2 = B256::from([2; 32]); + let slot3 = B256::from([3; 32]); + + let storage = HashedStorage { + wiped: true, + storage: B256Map::from_iter([ + (slot1, U256::from(100)), + (slot2, U256::ZERO), + (slot3, U256::from(300)), + ]), + }; + + // clone_into_sorted should produce the same result as clone().into_sorted() + let sorted_via_clone = storage.clone().into_sorted(); + let sorted_via_clone_into = storage.clone_into_sorted(); + + assert_eq!(sorted_via_clone, sorted_via_clone_into); + + // Verify the original storage is not consumed + assert_eq!(storage.storage.len(), 3); + assert!(storage.wiped); } } diff --git a/crates/trie/common/src/input.rs b/crates/trie/common/src/input.rs index 522cfa9ed4..3d3bc2349d 100644 --- a/crates/trie/common/src/input.rs +++ b/crates/trie/common/src/input.rs @@ -1,4 +1,9 @@ -use crate::{prefix_set::TriePrefixSetsMut, updates::TrieUpdates, HashedPostState}; +use crate::{ + prefix_set::TriePrefixSetsMut, + updates::{TrieUpdates, TrieUpdatesSorted}, + HashedPostState, HashedPostStateSorted, +}; +use alloc::sync::Arc; /// Inputs for trie-related computations. #[derive(Default, Debug, Clone)] @@ -41,10 +46,21 @@ impl TrieInput { input } + /// Create new trie input from the provided sorted blocks, from oldest to newest. + /// Converts sorted types to unsorted for aggregation. + pub fn from_blocks_sorted<'a>( + blocks: impl IntoIterator, + ) -> Self { + let mut input = Self::default(); + for (hashed_state, trie_updates) in blocks { + // Extend directly from sorted types, avoiding intermediate HashMap allocations + input.nodes.extend_from_sorted(trie_updates); + input.state.extend_from_sorted(hashed_state); + } + input + } + /// Extend the trie input with the provided blocks, from oldest to newest. - /// - /// For blocks with missing trie updates, the trie input will be extended with prefix sets - /// constructed from the state of this block and the state itself, **without** trie updates. pub fn extend_with_blocks<'a>( &mut self, blocks: impl IntoIterator, @@ -119,3 +135,40 @@ impl TrieInput { self } } + +/// Sorted variant of [`TrieInput`] for efficient proof generation. +/// +/// This type holds sorted versions of trie data structures, which eliminates the need +/// for expensive sorting operations during multiproof generation. +#[derive(Default, Debug, Clone)] +pub struct TrieInputSorted { + /// Sorted cached in-memory intermediate trie nodes. + pub nodes: Arc, + /// Sorted in-memory overlay hashed state. + pub state: Arc, + /// Prefix sets for computation. + pub prefix_sets: TriePrefixSetsMut, +} + +impl TrieInputSorted { + /// Create new sorted trie input. + pub const fn new( + nodes: Arc, + state: Arc, + prefix_sets: TriePrefixSetsMut, + ) -> Self { + Self { nodes, state, prefix_sets } + } + + /// Create from unsorted [`TrieInput`] by sorting. + pub fn from_unsorted(input: TrieInput) -> Self { + Self { + nodes: Arc::new(input.nodes.into_sorted()), + state: Arc::new(input.state.into_sorted()), + prefix_sets: input.prefix_sets, + } + } +} + +#[cfg(test)] +mod tests {} diff --git a/crates/trie/common/src/lib.rs b/crates/trie/common/src/lib.rs index e4292a5201..f212dd2910 100644 --- a/crates/trie/common/src/lib.rs +++ b/crates/trie/common/src/lib.rs @@ -17,7 +17,7 @@ pub use hashed_state::*; /// Input for trie computation. mod input; -pub use input::TrieInput; +pub use input::{TrieInput, TrieInputSorted}; /// The implementation of hash builder. pub mod hash_builder; @@ -41,6 +41,9 @@ pub use storage::{StorageTrieEntry, TrieChangeSetsEntry}; mod subnode; pub use subnode::StoredSubNode; +mod trie; +pub use trie::{ProofTrieNode, TrieMasks}; + /// The implementation of a container for storing intermediate changes to a trie. /// The container indicates when the trie has been modified. pub mod prefix_set; diff --git a/crates/trie/common/src/prefix_set.rs b/crates/trie/common/src/prefix_set.rs index 35c4bc6783..74fdb78911 100644 --- a/crates/trie/common/src/prefix_set.rs +++ b/crates/trie/common/src/prefix_set.rs @@ -71,16 +71,18 @@ pub struct TriePrefixSets { /// This data structure stores a set of `Nibbles` and provides methods to insert /// new elements and check whether any existing element has a given prefix. /// -/// Internally, this implementation uses a `Vec` and aims to act like a `BTreeSet` in being both -/// sorted and deduplicated. It does this by keeping a `sorted` flag. The `sorted` flag represents -/// whether or not the `Vec` is definitely sorted. When a new element is added, it is set to -/// `false.`. The `Vec` is sorted and deduplicated when `sorted` is `true` and: -/// * An element is being checked for inclusion (`contains`), or -/// * The set is being converted into an immutable `PrefixSet` (`freeze`) +/// Internally, this implementation stores keys in an unsorted `Vec` together with an +/// `all` flag. The `all` flag indicates that every entry should be considered changed and that +/// individual keys can be ignored. /// -/// This means that a `PrefixSet` will always be sorted and deduplicated when constructed from a -/// `PrefixSetMut`. +/// Sorting and deduplication do not happen during insertion or membership checks on this mutable +/// structure. Instead, keys are sorted and deduplicated when converting into the immutable +/// `PrefixSet` via `freeze()`. The immutable `PrefixSet` provides `contains` and relies on the +/// sorted and unique keys produced by `freeze()`; it does not perform additional sorting or +/// deduplication. /// +/// This guarantees that a `PrefixSet` constructed from a `PrefixSetMut` is always sorted and +/// deduplicated. /// # Examples /// /// ``` @@ -165,8 +167,7 @@ impl PrefixSetMut { } else { self.keys.sort_unstable(); self.keys.dedup(); - // We need to shrink in both the sorted and non-sorted cases because deduping may have - // occurred either on `freeze`, or during `contains`. + // Shrink after deduplication to release unused capacity. self.keys.shrink_to_fit(); PrefixSet { index: 0, all: false, keys: Arc::new(self.keys) } } diff --git a/crates/trie/common/src/proofs.rs b/crates/trie/common/src/proofs.rs index b7961f047a..a8e0bb59b9 100644 --- a/crates/trie/common/src/proofs.rs +++ b/crates/trie/common/src/proofs.rs @@ -89,6 +89,11 @@ impl MultiProofTargets { pub fn chunks(self, size: usize) -> ChunkedMultiProofTargets { ChunkedMultiProofTargets::new(self, size) } + + /// Returns the number of items that will be considered during chunking in `[Self::chunks]`. + pub fn chunking_length(&self) -> usize { + self.values().map(|slots| 1 + slots.len().saturating_sub(1)).sum::() + } } /// An iterator that yields chunks of the proof targets of at most `size` account and storage @@ -1067,4 +1072,33 @@ mod tests { acc.storage_root = EMPTY_ROOT_HASH; assert_eq!(acc, inverse); } + + #[test] + fn test_multiproof_targets_chunking_length() { + let mut targets = MultiProofTargets::default(); + targets.insert(B256::with_last_byte(1), B256Set::default()); + targets.insert( + B256::with_last_byte(2), + B256Set::from_iter([B256::with_last_byte(10), B256::with_last_byte(20)]), + ); + targets.insert( + B256::with_last_byte(3), + B256Set::from_iter([ + B256::with_last_byte(30), + B256::with_last_byte(31), + B256::with_last_byte(32), + ]), + ); + + let chunking_length = targets.chunking_length(); + for size in 1..=targets.clone().chunks(1).count() { + let chunk_count = targets.clone().chunks(size).count(); + let expected_count = chunking_length.div_ceil(size); + assert_eq!( + chunk_count, expected_count, + "chunking_length: {}, size: {}", + chunking_length, size + ); + } + } } diff --git a/crates/trie/common/src/storage.rs b/crates/trie/common/src/storage.rs index 1e56739386..77d037ff2e 100644 --- a/crates/trie/common/src/storage.rs +++ b/crates/trie/common/src/storage.rs @@ -1,4 +1,5 @@ use super::{BranchNodeCompact, StoredNibblesSubKey}; +use reth_primitives_traits::ValueWithSubKey; /// Account storage trie node. /// @@ -12,6 +13,14 @@ pub struct StorageTrieEntry { pub node: BranchNodeCompact, } +impl ValueWithSubKey for StorageTrieEntry { + type SubKey = StoredNibblesSubKey; + + fn get_subkey(&self) -> Self::SubKey { + self.nibbles.clone() + } +} + // NOTE: Removing reth_codec and manually encode subkey // and compress second part of the value. If we have compression // over whole value (Even SubKey) that would mess up fetching of values with seek_by_key_subkey @@ -46,6 +55,14 @@ pub struct TrieChangeSetsEntry { pub node: Option, } +impl ValueWithSubKey for TrieChangeSetsEntry { + type SubKey = StoredNibblesSubKey; + + fn get_subkey(&self) -> Self::SubKey { + self.nibbles.clone() + } +} + #[cfg(any(test, feature = "reth-codec"))] impl reth_codecs::Compact for TrieChangeSetsEntry { fn to_compact(&self, buf: &mut B) -> usize diff --git a/crates/trie/common/src/trie.rs b/crates/trie/common/src/trie.rs new file mode 100644 index 0000000000..8794839301 --- /dev/null +++ b/crates/trie/common/src/trie.rs @@ -0,0 +1,45 @@ +//! Types related to sparse trie nodes and masks. + +use crate::Nibbles; +use alloy_trie::{nodes::TrieNode, TrieMask}; + +/// Struct for passing around branch node mask information. +/// +/// Branch nodes can have up to 16 children (one for each nibble). +/// The masks represent which children are stored in different ways: +/// - `hash_mask`: Indicates which children are stored as hashes in the database +/// - `tree_mask`: Indicates which children are complete subtrees stored in the database +/// +/// These masks are essential for efficient trie traversal and serialization, as they +/// determine how nodes should be encoded and stored on disk. +#[derive(Debug, PartialEq, Eq, Clone, Copy)] +pub struct TrieMasks { + /// Branch node hash mask, if any. + /// + /// When a bit is set, the corresponding child node's hash is stored in the trie. + /// + /// This mask enables selective hashing of child nodes. + pub hash_mask: Option, + /// Branch node tree mask, if any. + /// + /// When a bit is set, the corresponding child subtree is stored in the database. + pub tree_mask: Option, +} + +impl TrieMasks { + /// Helper function, returns both fields `hash_mask` and `tree_mask` as [`None`] + pub const fn none() -> Self { + Self { hash_mask: None, tree_mask: None } + } +} + +/// Carries all information needed by a sparse trie to reveal a particular node. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct ProofTrieNode { + /// Path of the node. + pub path: Nibbles, + /// The node itself. + pub node: TrieNode, + /// Tree and hash masks for the node, if known. + pub masks: TrieMasks, +} diff --git a/crates/trie/common/src/updates.rs b/crates/trie/common/src/updates.rs index b0d178cd1d..f1db882781 100644 --- a/crates/trie/common/src/updates.rs +++ b/crates/trie/common/src/updates.rs @@ -73,6 +73,44 @@ impl TrieUpdates { self.account_nodes.retain(|nibbles, _| !other.removed_nodes.contains(nibbles)); } + /// Extend trie updates with sorted data, converting directly into the unsorted `HashMap` + /// representation. This is more efficient than first converting to `TrieUpdates` and + /// then extending, as it avoids creating intermediate `HashMap` allocations. + /// + /// This top-level helper merges account nodes and delegates each account's storage trie to + /// [`StorageTrieUpdates::extend_from_sorted`]. + pub fn extend_from_sorted(&mut self, sorted: &TrieUpdatesSorted) { + // Reserve capacity for account nodes + let new_nodes_count = sorted.account_nodes.len(); + self.account_nodes.reserve(new_nodes_count); + + // Insert account nodes from sorted (only non-None entries) + for (nibbles, maybe_node) in &sorted.account_nodes { + if nibbles.is_empty() { + continue; + } + match maybe_node { + Some(node) => { + self.removed_nodes.remove(nibbles); + self.account_nodes.insert(*nibbles, node.clone()); + } + None => { + self.account_nodes.remove(nibbles); + self.removed_nodes.insert(*nibbles); + } + } + } + + // Extend storage tries + self.storage_tries.reserve(sorted.storage_tries.len()); + for (hashed_address, sorted_storage) in &sorted.storage_tries { + self.storage_tries + .entry(*hashed_address) + .or_default() + .extend_from_sorted(sorted_storage); + } + } + /// Insert storage updates for a given hashed address. pub fn insert_storage_updates( &mut self, @@ -108,17 +146,6 @@ impl TrieUpdates { /// Converts trie updates into [`TrieUpdatesSorted`]. pub fn into_sorted(mut self) -> TrieUpdatesSorted { - self.drain_into_sorted() - } - - /// Converts trie updates into [`TrieUpdatesSorted`], but keeping the maps allocated by - /// draining. - /// - /// This effectively clears all the fields in the [`TrieUpdatesSorted`]. - /// - /// This allows us to reuse the allocated space. This allocates new space for the sorted - /// updates, like `into_sorted`. - pub fn drain_into_sorted(&mut self) -> TrieUpdatesSorted { let mut account_nodes = self .account_nodes .drain() @@ -253,6 +280,38 @@ impl StorageTrieUpdates { self.storage_nodes.retain(|nibbles, _| !other.removed_nodes.contains(nibbles)); } + /// Extend storage trie updates with sorted data, converting directly into the unsorted + /// `HashMap` representation. This is more efficient than first converting to + /// `StorageTrieUpdates` and then extending, as it avoids creating intermediate `HashMap` + /// allocations. + /// + /// This is invoked from [`TrieUpdates::extend_from_sorted`] for each account. + pub fn extend_from_sorted(&mut self, sorted: &StorageTrieUpdatesSorted) { + if sorted.is_deleted { + self.storage_nodes.clear(); + self.removed_nodes.clear(); + } + self.is_deleted |= sorted.is_deleted; + + // Reserve capacity for storage nodes + let new_nodes_count = sorted.storage_nodes.len(); + self.storage_nodes.reserve(new_nodes_count); + + // Remove nodes marked as removed and insert new nodes + for (nibbles, maybe_node) in &sorted.storage_nodes { + if nibbles.is_empty() { + continue; + } + if let Some(node) = maybe_node { + self.removed_nodes.remove(nibbles); + self.storage_nodes.insert(*nibbles, node.clone()); + } else { + self.storage_nodes.remove(nibbles); + self.removed_nodes.insert(*nibbles); + } + } + } + /// Finalize storage trie updates for by taking updates from walker and hash builder. pub fn finalize(&mut self, hash_builder: HashBuilder, removed_keys: HashSet) { // Retrieve updated nodes from hash builder. @@ -499,6 +558,12 @@ impl TrieUpdatesSorted { .or_insert_with(|| storage_trie.clone()); } } + + /// Clears all account nodes and storage tries. + pub fn clear(&mut self) { + self.account_nodes.clear(); + self.storage_tries.clear(); + } } impl AsRef for TrieUpdatesSorted { @@ -749,6 +814,151 @@ mod tests { assert_eq!(storage3.storage_nodes[0].0, Nibbles::from_nibbles_unchecked([0x06])); assert_eq!(storage3.storage_nodes[1].0, Nibbles::from_nibbles_unchecked([0x07])); } + + /// Test extending with storage tries adds both nodes and removed nodes correctly + #[test] + fn test_trie_updates_extend_from_sorted_with_storage_tries() { + let hashed_address = B256::from([1; 32]); + + let mut updates = TrieUpdates::default(); + + let storage_trie = StorageTrieUpdatesSorted { + is_deleted: false, + storage_nodes: vec![ + (Nibbles::from_nibbles_unchecked([0x0a]), Some(BranchNodeCompact::default())), + (Nibbles::from_nibbles_unchecked([0x0b]), None), + ], + }; + + let sorted = TrieUpdatesSorted { + account_nodes: vec![], + storage_tries: B256Map::from_iter([(hashed_address, storage_trie)]), + }; + + updates.extend_from_sorted(&sorted); + + assert_eq!(updates.storage_tries.len(), 1); + let storage = updates.storage_tries.get(&hashed_address).unwrap(); + assert!(!storage.is_deleted); + assert_eq!(storage.storage_nodes.len(), 1); + assert!(storage.removed_nodes.contains(&Nibbles::from_nibbles_unchecked([0x0b]))); + } + + /// Test deleted=true clears old storage nodes before adding new ones (critical edge case) + #[test] + fn test_trie_updates_extend_from_sorted_with_deleted_storage() { + let hashed_address = B256::from([1; 32]); + + let mut updates = TrieUpdates::default(); + updates.storage_tries.insert( + hashed_address, + StorageTrieUpdates { + is_deleted: false, + storage_nodes: HashMap::from_iter([( + Nibbles::from_nibbles_unchecked([0x01]), + BranchNodeCompact::default(), + )]), + removed_nodes: Default::default(), + }, + ); + + let storage_trie = StorageTrieUpdatesSorted { + is_deleted: true, + storage_nodes: vec![( + Nibbles::from_nibbles_unchecked([0x0a]), + Some(BranchNodeCompact::default()), + )], + }; + + let sorted = TrieUpdatesSorted { + account_nodes: vec![], + storage_tries: B256Map::from_iter([(hashed_address, storage_trie)]), + }; + + updates.extend_from_sorted(&sorted); + + let storage = updates.storage_tries.get(&hashed_address).unwrap(); + assert!(storage.is_deleted); + // After deletion, old nodes should be cleared + assert_eq!(storage.storage_nodes.len(), 1); + assert!(storage.storage_nodes.contains_key(&Nibbles::from_nibbles_unchecked([0x0a]))); + } + + /// Test non-deleted storage merges nodes and tracks removed nodes + #[test] + fn test_storage_trie_updates_extend_from_sorted_non_deleted() { + let mut storage = StorageTrieUpdates { + is_deleted: false, + storage_nodes: HashMap::from_iter([( + Nibbles::from_nibbles_unchecked([0x01]), + BranchNodeCompact::default(), + )]), + removed_nodes: Default::default(), + }; + + let sorted = StorageTrieUpdatesSorted { + is_deleted: false, + storage_nodes: vec![ + (Nibbles::from_nibbles_unchecked([0x02]), Some(BranchNodeCompact::default())), + (Nibbles::from_nibbles_unchecked([0x03]), None), + ], + }; + + storage.extend_from_sorted(&sorted); + + assert!(!storage.is_deleted); + assert_eq!(storage.storage_nodes.len(), 2); + assert!(storage.removed_nodes.contains(&Nibbles::from_nibbles_unchecked([0x03]))); + } + + /// Test deleted=true clears old nodes before extending (edge case) + #[test] + fn test_storage_trie_updates_extend_from_sorted_deleted() { + let mut storage = StorageTrieUpdates { + is_deleted: false, + storage_nodes: HashMap::from_iter([( + Nibbles::from_nibbles_unchecked([0x01]), + BranchNodeCompact::default(), + )]), + removed_nodes: Default::default(), + }; + + let sorted = StorageTrieUpdatesSorted { + is_deleted: true, + storage_nodes: vec![( + Nibbles::from_nibbles_unchecked([0x0a]), + Some(BranchNodeCompact::default()), + )], + }; + + storage.extend_from_sorted(&sorted); + + assert!(storage.is_deleted); + // Old nodes should be cleared when deleted + assert_eq!(storage.storage_nodes.len(), 1); + assert!(storage.storage_nodes.contains_key(&Nibbles::from_nibbles_unchecked([0x0a]))); + } + + /// Test empty nibbles are filtered out during conversion (edge case bug) + #[test] + fn test_trie_updates_extend_from_sorted_filters_empty_nibbles() { + let mut updates = TrieUpdates::default(); + + let sorted = TrieUpdatesSorted { + account_nodes: vec![ + (Nibbles::default(), Some(BranchNodeCompact::default())), // Empty nibbles + (Nibbles::from_nibbles_unchecked([0x01]), Some(BranchNodeCompact::default())), + ], + storage_tries: B256Map::default(), + }; + + updates.extend_from_sorted(&sorted); + + // Empty nibbles should be filtered out + assert_eq!(updates.account_nodes.len(), 1); + assert!(updates.account_nodes.contains_key(&Nibbles::from_nibbles_unchecked([0x01]))); + assert!(!updates.account_nodes.contains_key(&Nibbles::default())); + } } /// Bincode-compatible trie updates type serde implementations. diff --git a/crates/trie/common/src/utils.rs b/crates/trie/common/src/utils.rs index 5a2234fe26..2c30b474bc 100644 --- a/crates/trie/common/src/utils.rs +++ b/crates/trie/common/src/utils.rs @@ -1,4 +1,5 @@ use alloc::vec::Vec; +use core::cmp::Ordering; /// Helper function to extend a sorted vector with another sorted vector. /// Values from `other` take precedence for duplicate keys. @@ -24,7 +25,6 @@ where // Iterate through target and update/collect items from other for target_item in target.iter_mut() { while let Some(other_item) = other_iter.peek() { - use core::cmp::Ordering; match other_item.0.cmp(&target_item.0) { Ordering::Less => { // Other item comes before current target item, collect it @@ -51,3 +51,16 @@ where target.sort_unstable_by(|a, b| a.0.cmp(&b.0)); } } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_extend_sorted_vec() { + let mut target = vec![(1, "a"), (3, "c")]; + let other = vec![(2, "b"), (3, "c_new")]; + extend_sorted_vec(&mut target, &other); + assert_eq!(target, vec![(1, "a"), (2, "b"), (3, "c_new")]); + } +} diff --git a/crates/trie/db/src/hashed_cursor.rs b/crates/trie/db/src/hashed_cursor.rs index 4fe3d57429..10a1fd8363 100644 --- a/crates/trie/db/src/hashed_cursor.rs +++ b/crates/trie/db/src/hashed_cursor.rs @@ -69,6 +69,10 @@ where fn next(&mut self) -> Result, DatabaseError> { self.0.next() } + + fn reset(&mut self) { + // Database cursors are stateless, no reset needed + } } /// The structure wrapping a database cursor for hashed storage and @@ -102,6 +106,10 @@ where fn next(&mut self) -> Result, DatabaseError> { Ok(self.cursor.next_dup_val()?.map(|e| (e.key, e.value))) } + + fn reset(&mut self) { + // Database cursors are stateless, no reset needed + } } impl HashedStorageCursor for DatabaseHashedStorageCursor @@ -111,4 +119,8 @@ where fn is_storage_empty(&mut self) -> Result { Ok(self.cursor.seek_exact(self.hashed_address)?.is_none()) } + + fn set_hashed_address(&mut self, hashed_address: B256) { + self.hashed_address = hashed_address; + } } diff --git a/crates/trie/db/src/proof.rs b/crates/trie/db/src/proof.rs index 8b338001fa..597fe7a039 100644 --- a/crates/trie/db/src/proof.rs +++ b/crates/trie/db/src/proof.rs @@ -11,13 +11,16 @@ use reth_trie::{ }; /// Extends [`Proof`] with operations specific for working with a database transaction. -pub trait DatabaseProof<'a, TX> { - /// Create a new [Proof] from database transaction. - fn from_tx(tx: &'a TX) -> Self; +pub trait DatabaseProof<'a> { + /// Associated type for the database transaction. + type Tx; + + /// Create a new [`Proof`] instance from database transaction. + fn from_tx(tx: &'a Self::Tx) -> Self; /// Generates the state proof for target account based on [`TrieInput`]. fn overlay_account_proof( - tx: &'a TX, + &self, input: TrieInput, address: Address, slots: &[B256], @@ -25,59 +28,49 @@ pub trait DatabaseProof<'a, TX> { /// Generates the state [`MultiProof`] for target hashed account and storage keys. fn overlay_multiproof( - tx: &'a TX, + &self, input: TrieInput, targets: MultiProofTargets, ) -> Result; } -impl<'a, TX: DbTx> DatabaseProof<'a, TX> +impl<'a, TX: DbTx> DatabaseProof<'a> for Proof, DatabaseHashedCursorFactory<&'a TX>> { - /// Create a new [Proof] instance from database transaction. - fn from_tx(tx: &'a TX) -> Self { + type Tx = TX; + + fn from_tx(tx: &'a Self::Tx) -> Self { Self::new(DatabaseTrieCursorFactory::new(tx), DatabaseHashedCursorFactory::new(tx)) } - fn overlay_account_proof( - tx: &'a TX, + &self, input: TrieInput, address: Address, slots: &[B256], ) -> Result { let nodes_sorted = input.nodes.into_sorted(); let state_sorted = input.state.into_sorted(); - Self::from_tx(tx) - .with_trie_cursor_factory(InMemoryTrieCursorFactory::new( - DatabaseTrieCursorFactory::new(tx), - &nodes_sorted, - )) - .with_hashed_cursor_factory(HashedPostStateCursorFactory::new( - DatabaseHashedCursorFactory::new(tx), - &state_sorted, - )) - .with_prefix_sets_mut(input.prefix_sets) - .account_proof(address, slots) + Proof::new( + InMemoryTrieCursorFactory::new(self.trie_cursor_factory().clone(), &nodes_sorted), + HashedPostStateCursorFactory::new(self.hashed_cursor_factory().clone(), &state_sorted), + ) + .with_prefix_sets_mut(input.prefix_sets) + .account_proof(address, slots) } fn overlay_multiproof( - tx: &'a TX, + &self, input: TrieInput, targets: MultiProofTargets, ) -> Result { let nodes_sorted = input.nodes.into_sorted(); let state_sorted = input.state.into_sorted(); - Self::from_tx(tx) - .with_trie_cursor_factory(InMemoryTrieCursorFactory::new( - DatabaseTrieCursorFactory::new(tx), - &nodes_sorted, - )) - .with_hashed_cursor_factory(HashedPostStateCursorFactory::new( - DatabaseHashedCursorFactory::new(tx), - &state_sorted, - )) - .with_prefix_sets_mut(input.prefix_sets) - .multiproof(targets) + Proof::new( + InMemoryTrieCursorFactory::new(self.trie_cursor_factory().clone(), &nodes_sorted), + HashedPostStateCursorFactory::new(self.hashed_cursor_factory().clone(), &state_sorted), + ) + .with_prefix_sets_mut(input.prefix_sets) + .multiproof(targets) } } @@ -104,7 +97,11 @@ pub trait DatabaseStorageProof<'a, TX> { } impl<'a, TX: DbTx> DatabaseStorageProof<'a, TX> - for StorageProof, DatabaseHashedCursorFactory<&'a TX>> + for StorageProof< + 'static, + DatabaseTrieCursorFactory<&'a TX>, + DatabaseHashedCursorFactory<&'a TX>, + > { fn from_tx(tx: &'a TX, address: Address) -> Self { Self::new(DatabaseTrieCursorFactory::new(tx), DatabaseHashedCursorFactory::new(tx), address) @@ -122,13 +119,13 @@ impl<'a, TX: DbTx> DatabaseStorageProof<'a, TX> Default::default(), HashMap::from_iter([(hashed_address, storage.into_sorted())]), ); - Self::from_tx(tx, address) - .with_hashed_cursor_factory(HashedPostStateCursorFactory::new( - DatabaseHashedCursorFactory::new(tx), - &state_sorted, - )) - .with_prefix_set_mut(prefix_set) - .storage_proof(slot) + StorageProof::new( + DatabaseTrieCursorFactory::new(tx), + HashedPostStateCursorFactory::new(DatabaseHashedCursorFactory::new(tx), &state_sorted), + address, + ) + .with_prefix_set_mut(prefix_set) + .storage_proof(slot) } fn overlay_storage_multiproof( @@ -144,12 +141,12 @@ impl<'a, TX: DbTx> DatabaseStorageProof<'a, TX> Default::default(), HashMap::from_iter([(hashed_address, storage.into_sorted())]), ); - Self::from_tx(tx, address) - .with_hashed_cursor_factory(HashedPostStateCursorFactory::new( - DatabaseHashedCursorFactory::new(tx), - &state_sorted, - )) - .with_prefix_set_mut(prefix_set) - .storage_multiproof(targets) + StorageProof::new( + DatabaseTrieCursorFactory::new(tx), + HashedPostStateCursorFactory::new(DatabaseHashedCursorFactory::new(tx), &state_sorted), + address, + ) + .with_prefix_set_mut(prefix_set) + .storage_multiproof(targets) } } diff --git a/crates/trie/db/src/state.rs b/crates/trie/db/src/state.rs index 6d37c5f341..ecd50a18f7 100644 --- a/crates/trie/db/src/state.rs +++ b/crates/trie/db/src/state.rs @@ -1,8 +1,5 @@ use crate::{DatabaseHashedCursorFactory, DatabaseTrieCursorFactory, PrefixSetLoader}; -use alloy_primitives::{ - map::{AddressMap, B256Map}, - BlockNumber, B256, U256, -}; +use alloy_primitives::{map::B256Map, BlockNumber, B256}; use reth_db_api::{ cursor::DbCursorRO, models::{AccountBeforeTx, BlockNumberAddress, BlockNumberAddressRange}, @@ -13,11 +10,11 @@ use reth_db_api::{ use reth_execution_errors::StateRootError; use reth_trie::{ hashed_cursor::HashedPostStateCursorFactory, trie_cursor::InMemoryTrieCursorFactory, - updates::TrieUpdates, HashedPostState, HashedStorage, KeccakKeyHasher, KeyHasher, StateRoot, - StateRootProgress, TrieInput, + updates::TrieUpdates, HashedPostStateSorted, HashedStorageSorted, KeccakKeyHasher, KeyHasher, + StateRoot, StateRootProgress, TrieInputSorted, }; use std::{ - collections::HashMap, + collections::HashSet, ops::{RangeBounds, RangeInclusive}, }; use tracing::{debug, instrument}; @@ -73,7 +70,7 @@ pub trait DatabaseStateRoot<'a, TX>: Sized { range: RangeInclusive, ) -> Result; - /// Calculate the state root for this [`HashedPostState`]. + /// Calculate the state root for this [`HashedPostStateSorted`]. /// Internally, this method retrieves prefixsets and uses them /// to calculate incremental state root. /// @@ -99,40 +96,43 @@ pub trait DatabaseStateRoot<'a, TX>: Sized { /// /// // Calculate the state root /// let tx = db.tx().expect("failed to create transaction"); - /// let state_root = StateRoot::overlay_root(&tx, hashed_state); + /// let state_root = StateRoot::overlay_root(&tx, &hashed_state.into_sorted()); /// ``` /// /// # Returns /// - /// The state root for this [`HashedPostState`]. - fn overlay_root(tx: &'a TX, post_state: HashedPostState) -> Result; + /// The state root for this [`HashedPostStateSorted`]. + fn overlay_root(tx: &'a TX, post_state: &HashedPostStateSorted) + -> Result; - /// Calculates the state root for this [`HashedPostState`] and returns it alongside trie + /// Calculates the state root for this [`HashedPostStateSorted`] and returns it alongside trie /// updates. See [`Self::overlay_root`] for more info. fn overlay_root_with_updates( tx: &'a TX, - post_state: HashedPostState, + post_state: &HashedPostStateSorted, ) -> Result<(B256, TrieUpdates), StateRootError>; - /// Calculates the state root for provided [`HashedPostState`] using cached intermediate nodes. - fn overlay_root_from_nodes(tx: &'a TX, input: TrieInput) -> Result; + /// Calculates the state root for provided [`HashedPostStateSorted`] using cached intermediate + /// nodes. + fn overlay_root_from_nodes(tx: &'a TX, input: TrieInputSorted) -> Result; - /// Calculates the state root and trie updates for provided [`HashedPostState`] using + /// Calculates the state root and trie updates for provided [`HashedPostStateSorted`] using /// cached intermediate nodes. fn overlay_root_from_nodes_with_updates( tx: &'a TX, - input: TrieInput, + input: TrieInputSorted, ) -> Result<(B256, TrieUpdates), StateRootError>; } -/// Extends [`HashedPostState`] with operations specific for working with a database transaction. +/// Extends [`HashedPostStateSorted`] with operations specific for working with a database +/// transaction. pub trait DatabaseHashedPostState: Sized { - /// Initializes [`HashedPostState`] from reverts. Iterates over state reverts in the specified - /// range and aggregates them into hashed state in reverse. + /// Initializes [`HashedPostStateSorted`] from reverts. Iterates over state reverts in the + /// specified range and aggregates them into sorted hashed state. fn from_reverts( tx: &TX, range: impl RangeBounds, - ) -> Result; + ) -> Result; } impl<'a, TX: DbTx> DatabaseStateRoot<'a, TX> @@ -174,12 +174,14 @@ impl<'a, TX: DbTx> DatabaseStateRoot<'a, TX> Self::incremental_root_calculator(tx, range)?.root_with_progress() } - fn overlay_root(tx: &'a TX, post_state: HashedPostState) -> Result { + fn overlay_root( + tx: &'a TX, + post_state: &HashedPostStateSorted, + ) -> Result { let prefix_sets = post_state.construct_prefix_sets().freeze(); - let state_sorted = post_state.into_sorted(); StateRoot::new( DatabaseTrieCursorFactory::new(tx), - HashedPostStateCursorFactory::new(DatabaseHashedCursorFactory::new(tx), &state_sorted), + HashedPostStateCursorFactory::new(DatabaseHashedCursorFactory::new(tx), post_state), ) .with_prefix_sets(prefix_sets) .root() @@ -187,24 +189,27 @@ impl<'a, TX: DbTx> DatabaseStateRoot<'a, TX> fn overlay_root_with_updates( tx: &'a TX, - post_state: HashedPostState, + post_state: &HashedPostStateSorted, ) -> Result<(B256, TrieUpdates), StateRootError> { let prefix_sets = post_state.construct_prefix_sets().freeze(); - let state_sorted = post_state.into_sorted(); StateRoot::new( DatabaseTrieCursorFactory::new(tx), - HashedPostStateCursorFactory::new(DatabaseHashedCursorFactory::new(tx), &state_sorted), + HashedPostStateCursorFactory::new(DatabaseHashedCursorFactory::new(tx), post_state), ) .with_prefix_sets(prefix_sets) .root_with_updates() } - fn overlay_root_from_nodes(tx: &'a TX, input: TrieInput) -> Result { - let state_sorted = input.state.into_sorted(); - let nodes_sorted = input.nodes.into_sorted(); + fn overlay_root_from_nodes(tx: &'a TX, input: TrieInputSorted) -> Result { StateRoot::new( - InMemoryTrieCursorFactory::new(DatabaseTrieCursorFactory::new(tx), &nodes_sorted), - HashedPostStateCursorFactory::new(DatabaseHashedCursorFactory::new(tx), &state_sorted), + InMemoryTrieCursorFactory::new( + DatabaseTrieCursorFactory::new(tx), + input.nodes.as_ref(), + ), + HashedPostStateCursorFactory::new( + DatabaseHashedCursorFactory::new(tx), + input.state.as_ref(), + ), ) .with_prefix_sets(input.prefix_sets.freeze()) .root() @@ -212,77 +217,123 @@ impl<'a, TX: DbTx> DatabaseStateRoot<'a, TX> fn overlay_root_from_nodes_with_updates( tx: &'a TX, - input: TrieInput, + input: TrieInputSorted, ) -> Result<(B256, TrieUpdates), StateRootError> { - let state_sorted = input.state.into_sorted(); - let nodes_sorted = input.nodes.into_sorted(); StateRoot::new( - InMemoryTrieCursorFactory::new(DatabaseTrieCursorFactory::new(tx), &nodes_sorted), - HashedPostStateCursorFactory::new(DatabaseHashedCursorFactory::new(tx), &state_sorted), + InMemoryTrieCursorFactory::new( + DatabaseTrieCursorFactory::new(tx), + input.nodes.as_ref(), + ), + HashedPostStateCursorFactory::new( + DatabaseHashedCursorFactory::new(tx), + input.state.as_ref(), + ), ) .with_prefix_sets(input.prefix_sets.freeze()) .root_with_updates() } } -impl DatabaseHashedPostState for HashedPostState { +impl DatabaseHashedPostState for HashedPostStateSorted { + /// Builds a sorted hashed post-state from reverts. + /// + /// Reads MDBX data directly into Vecs, using `HashSet`s only to track seen keys. + /// This avoids intermediate `HashMap` allocations since MDBX data is already sorted. + /// + /// - Reads the first occurrence of each changed account/storage slot in the range. + /// - Hashes keys and returns them already ordered for trie iteration. #[instrument(target = "trie::db", skip(tx), fields(range))] fn from_reverts( tx: &TX, range: impl RangeBounds, ) -> Result { - // Iterate over account changesets and record value before first occurring account change. - let account_range = (range.start_bound(), range.end_bound()); // to avoid cloning - let mut accounts = HashMap::new(); + // Read accounts directly into Vec with HashSet to track seen keys. + // Only keep the first (oldest) occurrence of each account. + let mut accounts = Vec::new(); + let mut seen_accounts = HashSet::new(); + let account_range = (range.start_bound(), range.end_bound()); let mut account_changesets_cursor = tx.cursor_read::()?; + for entry in account_changesets_cursor.walk_range(account_range)? { let (_, AccountBeforeTx { address, info }) = entry?; - accounts.entry(address).or_insert(info); + if seen_accounts.insert(address) { + accounts.push((KH::hash_key(address), info)); + } } + accounts.sort_unstable_by_key(|(hash, _)| *hash); - // Iterate over storage changesets and record value before first occurring storage change. + // Read storages directly into B256Map> with HashSet to track seen keys. + // Only keep the first (oldest) occurrence of each (address, slot) pair. let storage_range: BlockNumberAddressRange = range.into(); - let mut storages = AddressMap::>::default(); + let mut storages = B256Map::>::default(); + let mut seen_storage_keys = HashSet::new(); let mut storage_changesets_cursor = tx.cursor_read::()?; + for entry in storage_changesets_cursor.walk_range(storage_range)? { let (BlockNumberAddress((_, address)), storage) = entry?; - let account_storage = storages.entry(address).or_default(); - account_storage.entry(storage.key).or_insert(storage.value); + if seen_storage_keys.insert((address, storage.key)) { + let hashed_address = KH::hash_key(address); + storages + .entry(hashed_address) + .or_default() + .push((KH::hash_key(storage.key), storage.value)); + } } - let hashed_accounts = - accounts.into_iter().map(|(address, info)| (KH::hash_key(address), info)).collect(); - + // Sort storage slots and convert to HashedStorageSorted let hashed_storages = storages .into_iter() - .map(|(address, storage)| { - ( - KH::hash_key(address), - HashedStorage::from_iter( - // The `wiped` flag indicates only whether previous storage entries - // should be looked up in db or not. For reverts it's a noop since all - // wiped changes had been written as storage reverts. - false, - storage.into_iter().map(|(slot, value)| (KH::hash_key(slot), value)), - ), - ) + .map(|(address, mut slots)| { + slots.sort_unstable_by_key(|(slot, _)| *slot); + (address, HashedStorageSorted { storage_slots: slots, wiped: false }) }) .collect(); - Ok(Self { accounts: hashed_accounts, storages: hashed_storages }) + Ok(Self::new(accounts, hashed_storages)) } } #[cfg(test)] mod tests { use super::*; - use alloy_primitives::{hex, map::HashMap, Address, U256}; + use alloy_primitives::{hex, map::HashMap, Address, B256, U256}; use reth_db::test_utils::create_test_rw_db; - use reth_db_api::database::Database; - use reth_trie::KeccakKeyHasher; + use reth_db_api::{ + database::Database, + models::{AccountBeforeTx, BlockNumberAddress}, + tables, + transaction::DbTxMut, + }; + use reth_primitives_traits::{Account, StorageEntry}; + use reth_trie::{HashedPostState, HashedStorage, KeccakKeyHasher}; use revm::state::AccountInfo; use revm_database::BundleState; + /// Overlay root calculation works with sorted state. + #[test] + fn overlay_root_with_sorted_state() { + let db = create_test_rw_db(); + let tx = db.tx().expect("failed to create transaction"); + + let mut hashed_state = HashedPostState::default(); + hashed_state.accounts.insert( + B256::from(U256::from(1)), + Some(Account { nonce: 1, balance: U256::from(10), bytecode_hash: None }), + ); + hashed_state.accounts.insert(B256::from(U256::from(2)), None); + hashed_state.storages.insert( + B256::from(U256::from(1)), + HashedStorage::from_iter(false, [(B256::from(U256::from(3)), U256::from(30))]), + ); + + let sorted = hashed_state.into_sorted(); + let overlay_root = StateRoot::overlay_root(&tx, &sorted).unwrap(); + + // Just verify it produces a valid root + assert!(!overlay_root.is_zero()); + } + + /// Builds hashed state from a bundle and checks the known state root. #[test] fn from_bundle_state_with_rayon() { let address1 = Address::with_last_byte(1); @@ -308,8 +359,102 @@ mod tests { let db = create_test_rw_db(); let tx = db.tx().expect("failed to create transaction"); assert_eq!( - StateRoot::overlay_root(&tx, post_state).unwrap(), + StateRoot::overlay_root(&tx, &post_state.into_sorted()).unwrap(), hex!("b464525710cafcf5d4044ac85b72c08b1e76231b8d91f288fe438cc41d8eaafd") ); } + + /// Verifies `from_reverts` keeps first occurrence per key and preserves ordering guarantees. + #[test] + fn from_reverts_keeps_first_occurrence_and_ordering() { + let db = create_test_rw_db(); + let tx = db.tx_mut().expect("failed to create rw tx"); + + let address1 = Address::with_last_byte(1); + let address2 = Address::with_last_byte(2); + let slot1 = B256::from(U256::from(11)); + let slot2 = B256::from(U256::from(22)); + + // Account changesets: only first occurrence per address should be kept. + tx.put::( + 1, + AccountBeforeTx { + address: address1, + info: Some(Account { nonce: 1, ..Default::default() }), + }, + ) + .unwrap(); + tx.put::( + 2, + AccountBeforeTx { + address: address1, + info: Some(Account { nonce: 2, ..Default::default() }), + }, + ) + .unwrap(); + tx.put::(3, AccountBeforeTx { address: address2, info: None }) + .unwrap(); + + // Storage changesets: only first occurrence per slot should be kept, and slots sorted. + tx.put::( + BlockNumberAddress((1, address1)), + StorageEntry { key: slot2, value: U256::from(200) }, + ) + .unwrap(); + tx.put::( + BlockNumberAddress((2, address1)), + StorageEntry { key: slot1, value: U256::from(100) }, + ) + .unwrap(); + tx.put::( + BlockNumberAddress((3, address1)), + StorageEntry { key: slot1, value: U256::from(999) }, // should be ignored + ) + .unwrap(); + + tx.commit().unwrap(); + let tx = db.tx().expect("failed to create ro tx"); + + let sorted = HashedPostStateSorted::from_reverts::(&tx, 1..=3).unwrap(); + + // Verify first occurrences were kept (nonce 1, not 2) + assert_eq!(sorted.accounts.len(), 2); + let hashed_addr1 = KeccakKeyHasher::hash_key(address1); + let account1 = sorted.accounts.iter().find(|(addr, _)| *addr == hashed_addr1).unwrap(); + assert_eq!(account1.1.unwrap().nonce, 1); + + // Ordering guarantees - accounts sorted by hashed address + assert!(sorted.accounts.windows(2).all(|w| w[0].0 <= w[1].0)); + + // Ordering guarantees - storage slots sorted by hashed slot + for storage in sorted.storages.values() { + assert!(storage.storage_slots.windows(2).all(|w| w[0].0 <= w[1].0)); + } + } + + /// Empty block range returns empty state. + #[test] + fn from_reverts_empty_range() { + let db = create_test_rw_db(); + + // Insert data outside the query range + db.update(|tx| { + tx.put::( + 100, + AccountBeforeTx { + address: Address::with_last_byte(1), + info: Some(Account { nonce: 1, ..Default::default() }), + }, + ) + .unwrap(); + }) + .unwrap(); + + let tx = db.tx().unwrap(); + + // Query a range with no data + let sorted = HashedPostStateSorted::from_reverts::(&tx, 1..=10).unwrap(); + assert!(sorted.accounts.is_empty()); + assert!(sorted.storages.is_empty()); + } } diff --git a/crates/trie/db/src/trie_cursor.rs b/crates/trie/db/src/trie_cursor.rs index d05c3fd92d..7b9c402545 100644 --- a/crates/trie/db/src/trie_cursor.rs +++ b/crates/trie/db/src/trie_cursor.rs @@ -6,7 +6,7 @@ use reth_db_api::{ DatabaseError, }; use reth_trie::{ - trie_cursor::{TrieCursor, TrieCursorFactory}, + trie_cursor::{TrieCursor, TrieCursorFactory, TrieStorageCursor}, updates::StorageTrieUpdatesSorted, BranchNodeCompact, Nibbles, StorageTrieEntry, StoredNibbles, StoredNibblesSubKey, }; @@ -91,6 +91,10 @@ where fn current(&mut self) -> Result, DatabaseError> { Ok(self.0.current()?.map(|(k, _)| k.0)) } + + fn reset(&mut self) { + // No-op for database cursors + } } /// A cursor over the storage tries stored in the database. @@ -190,6 +194,19 @@ where fn current(&mut self) -> Result, DatabaseError> { Ok(self.cursor.current()?.map(|(_, v)| v.nibbles.0)) } + + fn reset(&mut self) { + // No-op for database cursors + } +} + +impl TrieStorageCursor for DatabaseStorageTrieCursor +where + C: DbCursorRO + DbDupCursorRO + Send + Sync, +{ + fn set_hashed_address(&mut self, hashed_address: B256) { + self.hashed_address = hashed_address; + } } #[cfg(test)] diff --git a/crates/trie/db/src/witness.rs b/crates/trie/db/src/witness.rs index c5995e4d98..afcdb67670 100644 --- a/crates/trie/db/src/witness.rs +++ b/crates/trie/db/src/witness.rs @@ -34,17 +34,12 @@ impl<'a, TX: DbTx> DatabaseTrieWitness<'a, TX> ) -> Result, TrieWitnessError> { let nodes_sorted = input.nodes.into_sorted(); let state_sorted = input.state.into_sorted(); - Self::from_tx(tx) - .with_trie_cursor_factory(InMemoryTrieCursorFactory::new( - DatabaseTrieCursorFactory::new(tx), - &nodes_sorted, - )) - .with_hashed_cursor_factory(HashedPostStateCursorFactory::new( - DatabaseHashedCursorFactory::new(tx), - &state_sorted, - )) - .with_prefix_sets_mut(input.prefix_sets) - .always_include_root_node() - .compute(target) + TrieWitness::new( + InMemoryTrieCursorFactory::new(DatabaseTrieCursorFactory::new(tx), &nodes_sorted), + HashedPostStateCursorFactory::new(DatabaseHashedCursorFactory::new(tx), &state_sorted), + ) + .with_prefix_sets_mut(input.prefix_sets) + .always_include_root_node() + .compute(target) } } diff --git a/crates/trie/db/tests/post_state.rs b/crates/trie/db/tests/post_state.rs index ae59bc871e..d3f8fe3648 100644 --- a/crates/trie/db/tests/post_state.rs +++ b/crates/trie/db/tests/post_state.rs @@ -208,6 +208,18 @@ fn fuzz_hashed_account_cursor() { ); } +/// Tests `is_storage_empty()` correctly distinguishes wiped storage from storage with zero values. +/// +/// Key distinction: +/// - `wiped = true`: Storage cleared/deleted → empty +/// - `wiped = false` with zeros: Explicit zero values → not empty +/// +/// Test cases: +/// 1. No entries → empty +/// 2. Non-zero values → not empty +/// 3. Some zero values, not wiped → not empty +/// 4. Wiped + zero post-state → empty +/// 5. Wiped + non-zero post-state → not empty #[test] fn storage_is_empty() { let address = B256::random(); @@ -244,6 +256,23 @@ fn storage_is_empty() { assert!(!cursor.is_storage_empty().unwrap()); } + // Some zero values, but not wiped + { + let wiped = false; + let mut hashed_storage = HashedStorage::new(wiped); + hashed_storage.storage.insert(B256::with_last_byte(0), U256::ZERO); + + let mut hashed_post_state = HashedPostState::default(); + hashed_post_state.storages.insert(address, hashed_storage); + + let sorted = hashed_post_state.into_sorted(); + let tx = db.tx().unwrap(); + let factory = + HashedPostStateCursorFactory::new(DatabaseHashedCursorFactory::new(&tx), &sorted); + let mut cursor = factory.hashed_storage_cursor(address).unwrap(); + assert!(!cursor.is_storage_empty().unwrap()); + } + // wiped storage, must be empty { let wiped = true; @@ -488,3 +517,80 @@ fn fuzz_hashed_storage_cursor() { assert_storage_cursor_order(&factory, expected.into_iter()); }); } + +#[test] +fn all_storage_slots_deleted_not_wiped_exact_keys() { + // This test reproduces an edge case where: + // - wiped = false + // - All post state entries are deletions (None values) + // - Database has corresponding entries + // - Expected: NO leaves should be returned (all deleted) + let address = B256::random(); + + // Generate 42 storage entries with keys distributed across the keyspace + let db_entries: Vec<(B256, u64)> = (0..42) + .map(|i| { + let mut key_bytes = [0u8; 32]; + key_bytes[0] = (i * 6) as u8; // Spread keys across keyspace + key_bytes[31] = i as u8; // Ensure uniqueness + (B256::from(key_bytes), i as u64 + 1) + }) + .collect(); + + let db = create_test_rw_db(); + db.update(|tx| { + for (key, value) in &db_entries { + tx.put::( + address, + StorageEntry { key: *key, value: U256::from(*value) }, + ) + .unwrap(); + } + }) + .unwrap(); + + // Create post state with same keys but all Zero values (deletions) + let mut hashed_storage = HashedStorage::new(false); + for (key, _) in &db_entries { + hashed_storage.storage.insert(*key, U256::ZERO); // Zero value = deletion + } + + let mut hashed_post_state = HashedPostState::default(); + hashed_post_state.storages.insert(address, hashed_storage); + + let sorted = hashed_post_state.into_sorted(); + let tx = db.tx().unwrap(); + let factory = HashedPostStateCursorFactory::new(DatabaseHashedCursorFactory::new(&tx), &sorted); + + let mut cursor = factory.hashed_storage_cursor(address).unwrap(); + + // Seek to beginning should return None (all slots are deleted) + let result = cursor.seek(B256::ZERO).unwrap(); + assert_eq!( + result, None, + "Expected no entries when all slots are deleted, but got {:?}", + result + ); + + // Test seek operations at various positions - all should return None + // Pattern: before all, early range, mid-early range, mid-late range, late range, near end + let seek_keys = vec![ + B256::ZERO, // Before all entries + B256::right_padding_from(&[0x5d]), + B256::right_padding_from(&[0x5e]), + B256::right_padding_from(&[0x5f]), + B256::right_padding_from(&[0xc2]), + B256::right_padding_from(&[0xc5]), + B256::right_padding_from(&[0xc9]), + B256::right_padding_from(&[0xf0]), + ]; + + for seek_key in seek_keys { + let result = cursor.seek(seek_key).unwrap(); + assert_eq!(result, None, "Expected None when seeking to {} but got {:?}", seek_key, result); + } + + // next() should also always return None + let result = cursor.next().unwrap(); + assert_eq!(result, None, "Expected None from next() but got {:?}", result); +} diff --git a/crates/trie/db/tests/proof.rs b/crates/trie/db/tests/proof.rs index 401ba07b22..402f0cabff 100644 --- a/crates/trie/db/tests/proof.rs +++ b/crates/trie/db/tests/proof.rs @@ -86,7 +86,8 @@ fn testspec_proofs() { let provider = factory.provider().unwrap(); for (target, expected_proof) in data { let target = Address::from_str(target).unwrap(); - let account_proof = Proof::from_tx(provider.tx_ref()).account_proof(target, &[]).unwrap(); + let proof = as DatabaseProof>::from_tx(provider.tx_ref()); + let account_proof = proof.account_proof(target, &[]).unwrap(); similar_asserts::assert_eq!( account_proof.proof, expected_proof, @@ -106,7 +107,8 @@ fn testspec_empty_storage_proof() { let slots = Vec::from([B256::with_last_byte(1), B256::with_last_byte(3)]); let provider = factory.provider().unwrap(); - let account_proof = Proof::from_tx(provider.tx_ref()).account_proof(target, &slots).unwrap(); + let proof = as DatabaseProof>::from_tx(provider.tx_ref()); + let account_proof = proof.account_proof(target, &slots).unwrap(); assert_eq!(account_proof.storage_root, EMPTY_ROOT_HASH, "expected empty storage root"); assert_eq!(slots.len(), account_proof.storage_proofs.len()); @@ -141,7 +143,8 @@ fn mainnet_genesis_account_proof() { ]); let provider = factory.provider().unwrap(); - let account_proof = Proof::from_tx(provider.tx_ref()).account_proof(target, &[]).unwrap(); + let proof = as DatabaseProof>::from_tx(provider.tx_ref()); + let account_proof = proof.account_proof(target, &[]).unwrap(); similar_asserts::assert_eq!(account_proof.proof, expected_account_proof); assert_eq!(account_proof.verify(root), Ok(())); } @@ -164,7 +167,8 @@ fn mainnet_genesis_account_proof_nonexistent() { ]); let provider = factory.provider().unwrap(); - let account_proof = Proof::from_tx(provider.tx_ref()).account_proof(target, &[]).unwrap(); + let proof = as DatabaseProof>::from_tx(provider.tx_ref()); + let account_proof = proof.account_proof(target, &[]).unwrap(); similar_asserts::assert_eq!(account_proof.proof, expected_account_proof); assert_eq!(account_proof.verify(root), Ok(())); } @@ -259,7 +263,8 @@ fn holesky_deposit_contract_proof() { }; let provider = factory.provider().unwrap(); - let account_proof = Proof::from_tx(provider.tx_ref()).account_proof(target, &slots).unwrap(); + let proof = as DatabaseProof>::from_tx(provider.tx_ref()); + let account_proof = proof.account_proof(target, &slots).unwrap(); similar_asserts::assert_eq!(account_proof, expected); assert_eq!(account_proof.verify(root), Ok(())); } diff --git a/crates/trie/db/tests/witness.rs b/crates/trie/db/tests/witness.rs index 5dfa1c3e4a..14457fccc6 100644 --- a/crates/trie/db/tests/witness.rs +++ b/crates/trie/db/tests/witness.rs @@ -41,7 +41,8 @@ fn includes_empty_node_preimage() { provider.insert_account_for_hashing([(address, Some(Account::default()))]).unwrap(); let state_root = StateRoot::from_tx(provider.tx_ref()).root().unwrap(); - let multiproof = Proof::from_tx(provider.tx_ref()) + let proof = as DatabaseProof>::from_tx(provider.tx_ref()); + let multiproof = proof .multiproof(MultiProofTargets::from_iter([( hashed_address, HashSet::from_iter([hashed_slot]), @@ -82,7 +83,8 @@ fn includes_nodes_for_destroyed_storage_nodes() { .unwrap(); let state_root = StateRoot::from_tx(provider.tx_ref()).root().unwrap(); - let multiproof = Proof::from_tx(provider.tx_ref()) + let proof = as DatabaseProof>::from_tx(provider.tx_ref()); + let multiproof = proof .multiproof(MultiProofTargets::from_iter([( hashed_address, HashSet::from_iter([hashed_slot]), @@ -130,7 +132,8 @@ fn correctly_decodes_branch_node_values() { .unwrap(); let state_root = StateRoot::from_tx(provider.tx_ref()).root().unwrap(); - let multiproof = Proof::from_tx(provider.tx_ref()) + let proof = as DatabaseProof>::from_tx(provider.tx_ref()); + let multiproof = proof .multiproof(MultiProofTargets::from_iter([( hashed_address, HashSet::from_iter([hashed_slot1, hashed_slot2]), diff --git a/crates/trie/parallel/src/proof.rs b/crates/trie/parallel/src/proof.rs index 09f5e56e77..433c13fb08 100644 --- a/crates/trie/parallel/src/proof.rs +++ b/crates/trie/parallel/src/proof.rs @@ -329,7 +329,7 @@ mod tests { let rt = Runtime::new().unwrap(); let factory = reth_provider::providers::OverlayStateProviderFactory::new(factory); - let task_ctx = ProofTaskCtx::new(factory, Default::default()); + let task_ctx = ProofTaskCtx::new(factory); let proof_worker_handle = ProofWorkerHandle::new(rt.handle().clone(), task_ctx, 1, 1); let parallel_result = diff --git a/crates/trie/parallel/src/proof_task.rs b/crates/trie/parallel/src/proof_task.rs index 7e453cbc7c..58dc99fc37 100644 --- a/crates/trie/parallel/src/proof_task.rs +++ b/crates/trie/parallel/src/proof_task.rs @@ -42,14 +42,14 @@ use alloy_rlp::{BufMut, Encodable}; use crossbeam_channel::{unbounded, Receiver as CrossbeamReceiver, Sender as CrossbeamSender}; use dashmap::DashMap; use reth_execution_errors::{SparseTrieError, SparseTrieErrorKind}; -use reth_provider::{DatabaseProviderROFactory, ProviderError}; +use reth_provider::{DatabaseProviderROFactory, ProviderError, ProviderResult}; use reth_storage_errors::db::DatabaseError; use reth_trie::{ - hashed_cursor::HashedCursorFactory, + hashed_cursor::{HashedCursorFactory, HashedCursorMetricsCache, InstrumentedHashedCursor}, node_iter::{TrieElement, TrieNodeIter}, - prefix_set::{TriePrefixSets, TriePrefixSetsMut}, + prefix_set::TriePrefixSets, proof::{ProofBlindedAccountProvider, ProofBlindedStorageProvider, StorageProof}, - trie_cursor::TrieCursorFactory, + trie_cursor::{InstrumentedTrieCursor, TrieCursorFactory, TrieCursorMetricsCache}, walker::TrieWalker, DecodedMultiProof, DecodedStorageMultiProof, HashBuilder, HashedPostState, MultiProofTargets, Nibbles, TRIE_ACCOUNT_RLP_MAX_SIZE, @@ -72,934 +72,13 @@ use tokio::runtime::Handle; use tracing::{debug, debug_span, error, trace}; #[cfg(feature = "metrics")] -use crate::proof_task_metrics::ProofTaskTrieMetrics; +use crate::proof_task_metrics::{ + ProofTaskCursorMetrics, ProofTaskCursorMetricsCache, ProofTaskTrieMetrics, +}; type StorageProofResult = Result; type TrieNodeProviderResult = Result, SparseTrieError>; -/// Result of a proof calculation, which can be either an account multiproof or a storage proof. -#[derive(Debug)] -pub enum ProofResult { - /// Account multiproof with statistics - AccountMultiproof { - /// The account multiproof - proof: DecodedMultiProof, - /// Statistics collected during proof computation - stats: ParallelTrieStats, - }, - /// Storage proof for a specific account - StorageProof { - /// The hashed address this storage proof belongs to - hashed_address: B256, - /// The storage multiproof - proof: DecodedStorageMultiProof, - }, -} - -impl ProofResult { - /// Convert this proof result into a `DecodedMultiProof`. - /// - /// For account multiproofs, returns the multiproof directly (discarding stats). - /// For storage proofs, wraps the storage proof into a minimal multiproof. - pub fn into_multiproof(self) -> DecodedMultiProof { - match self { - Self::AccountMultiproof { proof, stats: _ } => proof, - Self::StorageProof { hashed_address, proof } => { - DecodedMultiProof::from_storage_proof(hashed_address, proof) - } - } - } -} - -/// Channel used by worker threads to deliver `ProofResultMessage` items back to -/// `MultiProofTask`. -/// -/// Workers use this sender to deliver proof results directly to `MultiProofTask`. -pub type ProofResultSender = CrossbeamSender; - -/// Message containing a completed proof result with metadata for direct delivery to -/// `MultiProofTask`. -/// -/// This type enables workers to send proof results directly to the `MultiProofTask` event loop. -#[derive(Debug)] -pub struct ProofResultMessage { - /// Sequence number for ordering proofs - pub sequence_number: u64, - /// The proof calculation result (either account multiproof or storage proof) - pub result: Result, - /// Time taken for the entire proof calculation (from dispatch to completion) - pub elapsed: Duration, - /// Original state update that triggered this proof - pub state: HashedPostState, -} - -/// Context for sending proof calculation results back to `MultiProofTask`. -/// -/// This struct contains all context needed to send and track proof calculation results. -/// Workers use this to deliver completed proofs back to the main event loop. -#[derive(Debug, Clone)] -pub struct ProofResultContext { - /// Channel sender for result delivery - pub sender: ProofResultSender, - /// Sequence number for proof ordering - pub sequence_number: u64, - /// Original state update that triggered this proof - pub state: HashedPostState, - /// Calculation start time for measuring elapsed duration - pub start_time: Instant, -} - -impl ProofResultContext { - /// Creates a new proof result context. - pub const fn new( - sender: ProofResultSender, - sequence_number: u64, - state: HashedPostState, - start_time: Instant, - ) -> Self { - Self { sender, sequence_number, state, start_time } - } -} - -/// Internal message for storage workers. -#[derive(Debug)] -enum StorageWorkerJob { - /// Storage proof computation request - StorageProof { - /// Storage proof input parameters - input: StorageProofInput, - /// Context for sending the proof result. - proof_result_sender: ProofResultContext, - }, - /// Blinded storage node retrieval request - BlindedStorageNode { - /// Target account - account: B256, - /// Path to the storage node - path: Nibbles, - /// Channel to send result back to original caller - result_sender: Sender, - }, -} - -/// Worker loop for storage trie operations. -/// -/// # Lifecycle -/// -/// Each worker: -/// 1. Receives `StorageWorkerJob` from crossbeam unbounded channel -/// 2. Computes result using its dedicated long-lived transaction -/// 3. Sends result directly to original caller via `std::mpsc` -/// 4. Repeats until channel closes (graceful shutdown) -/// -/// # Transaction Reuse -/// -/// Reuses the same transaction and cursor factories across multiple operations -/// to avoid transaction creation and cursor factory setup overhead. -/// -/// # Panic Safety -/// -/// If this function panics, the worker thread terminates but other workers -/// continue operating and the system degrades gracefully. -/// -/// # Shutdown -/// -/// Worker shuts down when the crossbeam channel closes (all senders dropped). -fn storage_worker_loop( - task_ctx: ProofTaskCtx, - work_rx: CrossbeamReceiver, - worker_id: usize, - available_workers: Arc, - #[cfg(feature = "metrics")] metrics: ProofTaskTrieMetrics, -) where - Factory: DatabaseProviderROFactory, -{ - // Create provider from factory - let provider = task_ctx - .factory - .database_provider_ro() - .expect("Storage worker failed to initialize: unable to create provider"); - let proof_tx = ProofTaskTx::new(provider, task_ctx.prefix_sets, worker_id); - - trace!( - target: "trie::proof_task", - worker_id, - "Storage worker started" - ); - - let mut storage_proofs_processed = 0u64; - let mut storage_nodes_processed = 0u64; - - // Initially mark this worker as available. - available_workers.fetch_add(1, Ordering::Relaxed); - - while let Ok(job) = work_rx.recv() { - // Mark worker as busy. - available_workers.fetch_sub(1, Ordering::Relaxed); - - match job { - StorageWorkerJob::StorageProof { input, proof_result_sender } => { - let hashed_address = input.hashed_address; - let ProofResultContext { sender, sequence_number: seq, state, start_time } = - proof_result_sender; - - trace!( - target: "trie::proof_task", - worker_id, - hashed_address = ?hashed_address, - prefix_set_len = input.prefix_set.len(), - target_slots_len = input.target_slots.len(), - "Processing storage proof" - ); - - let proof_start = Instant::now(); - let result = proof_tx.compute_storage_proof(input); - - let proof_elapsed = proof_start.elapsed(); - storage_proofs_processed += 1; - - let result_msg = result.map(|storage_proof| ProofResult::StorageProof { - hashed_address, - proof: storage_proof, - }); - - if sender - .send(ProofResultMessage { - sequence_number: seq, - result: result_msg, - elapsed: start_time.elapsed(), - state, - }) - .is_err() - { - trace!( - target: "trie::proof_task", - worker_id, - hashed_address = ?hashed_address, - storage_proofs_processed, - "Proof result receiver dropped, discarding result" - ); - } - - trace!( - target: "trie::proof_task", - worker_id, - hashed_address = ?hashed_address, - proof_time_us = proof_elapsed.as_micros(), - total_processed = storage_proofs_processed, - "Storage proof completed" - ); - - // Mark worker as available again. - available_workers.fetch_add(1, Ordering::Relaxed); - } - - StorageWorkerJob::BlindedStorageNode { account, path, result_sender } => { - trace!( - target: "trie::proof_task", - worker_id, - ?account, - ?path, - "Processing blinded storage node" - ); - - let storage_node_provider = ProofBlindedStorageProvider::new( - &proof_tx.provider, - &proof_tx.provider, - proof_tx.prefix_sets.clone(), - account, - ); - - let start = Instant::now(); - let result = storage_node_provider.trie_node(&path); - let elapsed = start.elapsed(); - - storage_nodes_processed += 1; - - if result_sender.send(result).is_err() { - trace!( - target: "trie::proof_task", - worker_id, - ?account, - ?path, - storage_nodes_processed, - "Blinded storage node receiver dropped, discarding result" - ); - } - - trace!( - target: "trie::proof_task", - worker_id, - ?account, - ?path, - elapsed_us = elapsed.as_micros(), - total_processed = storage_nodes_processed, - "Blinded storage node completed" - ); - - // Mark worker as available again. - available_workers.fetch_add(1, Ordering::Relaxed); - } - } - } - - trace!( - target: "trie::proof_task", - worker_id, - storage_proofs_processed, - storage_nodes_processed, - "Storage worker shutting down" - ); - - #[cfg(feature = "metrics")] - metrics.record_storage_nodes(storage_nodes_processed as usize); -} - -/// Worker loop for account trie operations. -/// -/// # Lifecycle -/// -/// Each worker initializes its providers, advertises availability, then loops: -/// take a job, mark busy, compute the proof, send the result, and mark available again. -/// The loop ends gracefully once the channel closes. -/// -/// # Transaction Reuse -/// -/// Reuses the same transaction and cursor factories across multiple operations -/// to avoid transaction creation and cursor factory setup overhead. -/// -/// # Panic Safety -/// -/// If this function panics, the worker thread terminates but other workers -/// continue operating and the system degrades gracefully. -/// -/// # Shutdown -/// -/// Worker shuts down when the crossbeam channel closes (all senders dropped). -fn account_worker_loop( - task_ctx: ProofTaskCtx, - work_rx: CrossbeamReceiver, - storage_work_tx: CrossbeamSender, - worker_id: usize, - available_workers: Arc, - #[cfg(feature = "metrics")] metrics: ProofTaskTrieMetrics, -) where - Factory: DatabaseProviderROFactory, -{ - // Create provider from factory - let provider = task_ctx - .factory - .database_provider_ro() - .expect("Account worker failed to initialize: unable to create provider"); - let proof_tx = ProofTaskTx::new(provider, task_ctx.prefix_sets, worker_id); - - trace!( - target: "trie::proof_task", - worker_id, - "Account worker started" - ); - - let mut account_proofs_processed = 0u64; - let mut account_nodes_processed = 0u64; - - // Count this worker as available only after successful initialization. - available_workers.fetch_add(1, Ordering::Relaxed); - - while let Ok(job) = work_rx.recv() { - // Mark worker as busy. - available_workers.fetch_sub(1, Ordering::Relaxed); - - match job { - AccountWorkerJob::AccountMultiproof { input } => { - let AccountMultiproofInput { - targets, - mut prefix_sets, - collect_branch_node_masks, - multi_added_removed_keys, - missed_leaves_storage_roots, - proof_result_sender: - ProofResultContext { - sender: result_tx, - sequence_number: seq, - state, - start_time: start, - }, - } = *input; - - let span = debug_span!( - target: "trie::proof_task", - "Account multiproof calculation", - targets = targets.len(), - worker_id, - ); - let _span_guard = span.enter(); - - trace!( - target: "trie::proof_task", - "Processing account multiproof" - ); - - let proof_start = Instant::now(); - - let mut tracker = ParallelTrieTracker::default(); - - let mut storage_prefix_sets = std::mem::take(&mut prefix_sets.storage_prefix_sets); - - let storage_root_targets_len = StorageRootTargets::count( - &prefix_sets.account_prefix_set, - &storage_prefix_sets, - ); - - tracker.set_precomputed_storage_roots(storage_root_targets_len as u64); - - let storage_proof_receivers = match dispatch_storage_proofs( - &storage_work_tx, - &targets, - &mut storage_prefix_sets, - collect_branch_node_masks, - multi_added_removed_keys.as_ref(), - ) { - Ok(receivers) => receivers, - Err(error) => { - // Send error through result channel - error!(target: "trie::proof_task", "Failed to dispatch storage proofs: {error}"); - let _ = result_tx.send(ProofResultMessage { - sequence_number: seq, - result: Err(error), - elapsed: start.elapsed(), - state, - }); - continue; - } - }; - - // Use the missed leaves cache passed from the multiproof manager - let account_prefix_set = std::mem::take(&mut prefix_sets.account_prefix_set); - - let ctx = AccountMultiproofParams { - targets: &targets, - prefix_set: account_prefix_set, - collect_branch_node_masks, - multi_added_removed_keys: multi_added_removed_keys.as_ref(), - storage_proof_receivers, - missed_leaves_storage_roots: missed_leaves_storage_roots.as_ref(), - }; - - let result = build_account_multiproof_with_storage_roots( - &proof_tx.provider, - ctx, - &mut tracker, - ); - - let proof_elapsed = proof_start.elapsed(); - let total_elapsed = start.elapsed(); - let stats = tracker.finish(); - let result = result.map(|proof| ProofResult::AccountMultiproof { proof, stats }); - account_proofs_processed += 1; - - // Send result to MultiProofTask - if result_tx - .send(ProofResultMessage { - sequence_number: seq, - result, - elapsed: total_elapsed, - state, - }) - .is_err() - { - trace!( - target: "trie::proof_task", - worker_id, - account_proofs_processed, - "Account multiproof receiver dropped, discarding result" - ); - } - - trace!( - target: "trie::proof_task", - proof_time_us = proof_elapsed.as_micros(), - total_elapsed_us = total_elapsed.as_micros(), - total_processed = account_proofs_processed, - "Account multiproof completed" - ); - drop(_span_guard); - - // Mark worker as available again. - available_workers.fetch_add(1, Ordering::Relaxed); - } - - AccountWorkerJob::BlindedAccountNode { path, result_sender } => { - let span = debug_span!( - target: "trie::proof_task", - "Blinded account node calculation", - ?path, - worker_id, - ); - let _span_guard = span.enter(); - - trace!( - target: "trie::proof_task", - "Processing blinded account node" - ); - - let account_node_provider = ProofBlindedAccountProvider::new( - &proof_tx.provider, - &proof_tx.provider, - proof_tx.prefix_sets.clone(), - ); - - let start = Instant::now(); - let result = account_node_provider.trie_node(&path); - let elapsed = start.elapsed(); - - account_nodes_processed += 1; - - if result_sender.send(result).is_err() { - trace!( - target: "trie::proof_task", - worker_id, - ?path, - account_nodes_processed, - "Blinded account node receiver dropped, discarding result" - ); - } - - trace!( - target: "trie::proof_task", - node_time_us = elapsed.as_micros(), - total_processed = account_nodes_processed, - "Blinded account node completed" - ); - drop(_span_guard); - - // Mark worker as available again. - available_workers.fetch_add(1, Ordering::Relaxed); - } - } - } - - trace!( - target: "trie::proof_task", - worker_id, - account_proofs_processed, - account_nodes_processed, - "Account worker shutting down" - ); - - #[cfg(feature = "metrics")] - metrics.record_account_nodes(account_nodes_processed as usize); -} - -/// Builds an account multiproof by consuming storage proof receivers lazily during trie walk. -/// -/// This is a helper function used by account workers to build the account subtree proof -/// while storage proofs are still being computed. Receivers are consumed only when needed, -/// enabling interleaved parallelism between account trie traversal and storage proof computation. -/// -/// Returns a `DecodedMultiProof` containing the account subtree and storage proofs. -fn build_account_multiproof_with_storage_roots

( - provider: &P, - ctx: AccountMultiproofParams<'_>, - tracker: &mut ParallelTrieTracker, -) -> Result -where - P: TrieCursorFactory + HashedCursorFactory, -{ - let accounts_added_removed_keys = - ctx.multi_added_removed_keys.as_ref().map(|keys| keys.get_accounts()); - - // Create the walker. - let walker = TrieWalker::<_>::state_trie( - provider.account_trie_cursor().map_err(ProviderError::Database)?, - ctx.prefix_set, - ) - .with_added_removed_keys(accounts_added_removed_keys) - .with_deletions_retained(true); - - // Create a hash builder to rebuild the root node since it is not available in the database. - let retainer = ctx - .targets - .keys() - .map(Nibbles::unpack) - .collect::() - .with_added_removed_keys(accounts_added_removed_keys); - let mut hash_builder = HashBuilder::default() - .with_proof_retainer(retainer) - .with_updates(ctx.collect_branch_node_masks); - - // Initialize storage multiproofs map with pre-allocated capacity. - // Proofs will be inserted as they're consumed from receivers during trie walk. - let mut collected_decoded_storages: B256Map = - B256Map::with_capacity_and_hasher(ctx.targets.len(), Default::default()); - let mut account_rlp = Vec::with_capacity(TRIE_ACCOUNT_RLP_MAX_SIZE); - let mut account_node_iter = TrieNodeIter::state_trie( - walker, - provider.hashed_account_cursor().map_err(ProviderError::Database)?, - ); - - let mut storage_proof_receivers = ctx.storage_proof_receivers; - - while let Some(account_node) = account_node_iter.try_next().map_err(ProviderError::Database)? { - match account_node { - TrieElement::Branch(node) => { - hash_builder.add_branch(node.key, node.value, node.children_are_in_trie); - } - TrieElement::Leaf(hashed_address, account) => { - let root = match storage_proof_receivers.remove(&hashed_address) { - Some(receiver) => { - // Block on this specific storage proof receiver - enables interleaved - // parallelism - let proof_msg = receiver.recv().map_err(|_| { - ParallelStateRootError::StorageRoot( - reth_execution_errors::StorageRootError::Database( - DatabaseError::Other(format!( - "Storage proof channel closed for {hashed_address}" - )), - ), - ) - })?; - - // Extract storage proof from the result - let proof = match proof_msg.result? { - ProofResult::StorageProof { hashed_address: addr, proof } => { - debug_assert_eq!( - addr, - hashed_address, - "storage worker must return same address: expected {hashed_address}, got {addr}" - ); - proof - } - ProofResult::AccountMultiproof { .. } => { - unreachable!("storage worker only sends StorageProof variant") - } - }; - - let root = proof.root; - collected_decoded_storages.insert(hashed_address, proof); - root - } - // Since we do not store all intermediate nodes in the database, there might - // be a possibility of re-adding a non-modified leaf to the hash builder. - None => { - tracker.inc_missed_leaves(); - - match ctx.missed_leaves_storage_roots.entry(hashed_address) { - dashmap::Entry::Occupied(occ) => *occ.get(), - dashmap::Entry::Vacant(vac) => { - let root = - StorageProof::new_hashed(provider, provider, hashed_address) - .with_prefix_set_mut(Default::default()) - .storage_multiproof( - ctx.targets - .get(&hashed_address) - .cloned() - .unwrap_or_default(), - ) - .map_err(|e| { - ParallelStateRootError::StorageRoot( - reth_execution_errors::StorageRootError::Database( - DatabaseError::Other(e.to_string()), - ), - ) - })? - .root; - - vac.insert(root); - root - } - } - } - }; - - // Encode account - account_rlp.clear(); - let account = account.into_trie_account(root); - account.encode(&mut account_rlp as &mut dyn BufMut); - - hash_builder.add_leaf(Nibbles::unpack(hashed_address), &account_rlp); - } - } - } - - // Consume remaining storage proof receivers for accounts not encountered during trie walk. - for (hashed_address, receiver) in storage_proof_receivers { - if let Ok(proof_msg) = receiver.recv() { - // Extract storage proof from the result - if let Ok(ProofResult::StorageProof { proof, .. }) = proof_msg.result { - collected_decoded_storages.insert(hashed_address, proof); - } - } - } - - let _ = hash_builder.root(); - - let account_subtree_raw_nodes = hash_builder.take_proof_nodes(); - let decoded_account_subtree = DecodedProofNodes::try_from(account_subtree_raw_nodes)?; - - let (branch_node_hash_masks, branch_node_tree_masks) = if ctx.collect_branch_node_masks { - let updated_branch_nodes = hash_builder.updated_branch_nodes.unwrap_or_default(); - ( - updated_branch_nodes.iter().map(|(path, node)| (*path, node.hash_mask)).collect(), - updated_branch_nodes.into_iter().map(|(path, node)| (path, node.tree_mask)).collect(), - ) - } else { - (Default::default(), Default::default()) - }; - - Ok(DecodedMultiProof { - account_subtree: decoded_account_subtree, - branch_node_hash_masks, - branch_node_tree_masks, - storages: collected_decoded_storages, - }) -} - -/// Queues storage proofs for all accounts in the targets and returns receivers. -/// -/// This function queues all storage proof tasks to the worker pool but returns immediately -/// with receivers, allowing the account trie walk to proceed in parallel with storage proof -/// computation. This enables interleaved parallelism for better performance. -/// -/// Propagates errors up if queuing fails. Receivers must be consumed by the caller. -fn dispatch_storage_proofs( - storage_work_tx: &CrossbeamSender, - targets: &MultiProofTargets, - storage_prefix_sets: &mut B256Map, - with_branch_node_masks: bool, - multi_added_removed_keys: Option<&Arc>, -) -> Result>, ParallelStateRootError> { - let mut storage_proof_receivers = - B256Map::with_capacity_and_hasher(targets.len(), Default::default()); - - // Dispatch all storage proofs to worker pool - for (hashed_address, target_slots) in targets.iter() { - let prefix_set = storage_prefix_sets.remove(hashed_address).unwrap_or_default(); - - // Create channel for receiving ProofResultMessage - let (result_tx, result_rx) = crossbeam_channel::unbounded(); - let start = Instant::now(); - - // Create computation input (data only, no communication channel) - let input = StorageProofInput::new( - *hashed_address, - prefix_set, - target_slots.clone(), - with_branch_node_masks, - multi_added_removed_keys.cloned(), - ); - - // Always dispatch a storage proof so we obtain the storage root even when no slots are - // requested. - storage_work_tx - .send(StorageWorkerJob::StorageProof { - input, - proof_result_sender: ProofResultContext::new( - result_tx, - 0, - HashedPostState::default(), - start, - ), - }) - .map_err(|_| { - ParallelStateRootError::Other(format!( - "Failed to queue storage proof for {}: storage worker pool unavailable", - hashed_address - )) - })?; - - storage_proof_receivers.insert(*hashed_address, result_rx); - } - - Ok(storage_proof_receivers) -} - -/// This contains all information shared between all storage proof instances. -#[derive(Debug)] -pub struct ProofTaskTx { - /// The provider that implements `TrieCursorFactory` and `HashedCursorFactory`. - provider: Provider, - - /// The prefix sets for the computation. - prefix_sets: Arc, - - /// Identifier for the worker within the worker pool, used only for tracing. - id: usize, -} - -impl ProofTaskTx { - /// Initializes a [`ProofTaskTx`] with the given provider, prefix sets, and ID. - const fn new(provider: Provider, prefix_sets: Arc, id: usize) -> Self { - Self { provider, prefix_sets, id } - } -} - -impl ProofTaskTx -where - Provider: TrieCursorFactory + HashedCursorFactory, -{ - /// Compute storage proof. - /// - /// Used by storage workers in the worker pool to compute storage proofs. - #[inline] - fn compute_storage_proof(&self, input: StorageProofInput) -> StorageProofResult { - // Consume the input so we can move large collections (e.g. target slots) without cloning. - let StorageProofInput { - hashed_address, - prefix_set, - target_slots, - with_branch_node_masks, - multi_added_removed_keys, - } = input; - - // Get or create added/removed keys context - let multi_added_removed_keys = - multi_added_removed_keys.unwrap_or_else(|| Arc::new(MultiAddedRemovedKeys::new())); - let added_removed_keys = multi_added_removed_keys.get_storage(&hashed_address); - - let span = debug_span!( - target: "trie::proof_task", - "Storage proof calculation", - hashed_address = ?hashed_address, - worker_id = self.id, - ); - let _span_guard = span.enter(); - - let proof_start = Instant::now(); - - // Compute raw storage multiproof - let raw_proof_result = - StorageProof::new_hashed(&self.provider, &self.provider, hashed_address) - .with_prefix_set_mut(PrefixSetMut::from(prefix_set.iter().copied())) - .with_branch_node_masks(with_branch_node_masks) - .with_added_removed_keys(added_removed_keys) - .storage_multiproof(target_slots) - .map_err(|e| ParallelStateRootError::Other(e.to_string())); - - // Decode proof into DecodedStorageMultiProof - let decoded_result = raw_proof_result.and_then(|raw_proof| { - raw_proof.try_into().map_err(|e: alloy_rlp::Error| { - ParallelStateRootError::Other(format!( - "Failed to decode storage proof for {}: {}", - hashed_address, e - )) - }) - }); - - trace!( - target: "trie::proof_task", - hashed_address = ?hashed_address, - proof_time_us = proof_start.elapsed().as_micros(), - worker_id = self.id, - "Completed storage proof calculation" - ); - - decoded_result - } -} - -/// Input parameters for storage proof computation. -#[derive(Debug)] -pub struct StorageProofInput { - /// The hashed address for which the proof is calculated. - hashed_address: B256, - /// The prefix set for the proof calculation. - prefix_set: PrefixSet, - /// The target slots for the proof calculation. - target_slots: B256Set, - /// Whether or not to collect branch node masks - with_branch_node_masks: bool, - /// Provided by the user to give the necessary context to retain extra proofs. - multi_added_removed_keys: Option>, -} - -impl StorageProofInput { - /// Creates a new [`StorageProofInput`] with the given hashed address, prefix set, and target - /// slots. - pub const fn new( - hashed_address: B256, - prefix_set: PrefixSet, - target_slots: B256Set, - with_branch_node_masks: bool, - multi_added_removed_keys: Option>, - ) -> Self { - Self { - hashed_address, - prefix_set, - target_slots, - with_branch_node_masks, - multi_added_removed_keys, - } - } -} - -/// Input parameters for account multiproof computation. -#[derive(Debug, Clone)] -pub struct AccountMultiproofInput { - /// The targets for which to compute the multiproof. - pub targets: MultiProofTargets, - /// The prefix sets for the proof calculation. - pub prefix_sets: TriePrefixSets, - /// Whether or not to collect branch node masks. - pub collect_branch_node_masks: bool, - /// Provided by the user to give the necessary context to retain extra proofs. - pub multi_added_removed_keys: Option>, - /// Cached storage proof roots for missed leaves encountered during account trie walk. - pub missed_leaves_storage_roots: Arc>, - /// Context for sending the proof result. - pub proof_result_sender: ProofResultContext, -} - -/// Parameters for building an account multiproof with pre-computed storage roots. -struct AccountMultiproofParams<'a> { - /// The targets for which to compute the multiproof. - targets: &'a MultiProofTargets, - /// The prefix set for the account trie walk. - prefix_set: PrefixSet, - /// Whether or not to collect branch node masks. - collect_branch_node_masks: bool, - /// Provided by the user to give the necessary context to retain extra proofs. - multi_added_removed_keys: Option<&'a Arc>, - /// Receivers for storage proofs being computed in parallel. - storage_proof_receivers: B256Map>, - /// Cached storage proof roots for missed leaves encountered during account trie walk. - missed_leaves_storage_roots: &'a DashMap, -} - -/// Internal message for account workers. -#[derive(Debug)] -enum AccountWorkerJob { - /// Account multiproof computation request - AccountMultiproof { - /// Account multiproof input parameters - input: Box, - }, - /// Blinded account node retrieval request - BlindedAccountNode { - /// Path to the account node - path: Nibbles, - /// Channel to send result back to original caller - result_sender: Sender, - }, -} - -/// Data used for initializing cursor factories that is shared across all storage proof instances. -#[derive(Clone, Debug)] -pub struct ProofTaskCtx { - /// The factory for creating state providers. - factory: Factory, - /// The collection of prefix sets for the computation. Since the prefix sets _always_ - /// invalidate the in-memory nodes, not all keys from `state_sorted` might be present here, - /// if we have cached nodes for them. - prefix_sets: Arc, -} - -impl ProofTaskCtx { - /// Creates a new [`ProofTaskCtx`] with the given factory and prefix sets. - pub const fn new(factory: Factory, prefix_sets: Arc) -> Self { - Self { factory, prefix_sets } - } -} - /// A handle that provides type-safe access to proof worker pools. /// /// The handle stores direct senders to both storage and account worker pools, @@ -1017,6 +96,10 @@ pub struct ProofWorkerHandle { /// Counter tracking available account workers. Workers decrement when starting work, /// increment when finishing. Used to determine whether to chunk multiproofs. account_available_workers: Arc, + /// Total number of storage workers spawned + storage_worker_count: usize, + /// Total number of account workers spawned + account_worker_count: usize, } impl ProofWorkerHandle { @@ -1070,22 +153,34 @@ impl ProofWorkerHandle { executor.spawn_blocking(move || { #[cfg(feature = "metrics")] let metrics = ProofTaskTrieMetrics::default(); + #[cfg(feature = "metrics")] + let cursor_metrics = ProofTaskCursorMetrics::new(); let _guard = span.enter(); - storage_worker_loop( + let worker = StorageProofWorker::new( task_ctx_clone, work_rx_clone, worker_id, storage_available_workers_clone, #[cfg(feature = "metrics")] metrics, - ) + #[cfg(feature = "metrics")] + cursor_metrics, + ); + if let Err(error) = worker.run() { + error!( + target: "trie::proof_task", + worker_id, + ?error, + "Storage worker failed" + ); + } }); } drop(parent_span); let parent_span = - debug_span!(target: "trie::proof_task", "account proof workers", ?storage_worker_count) + debug_span!(target: "trie::proof_task", "account proof workers", ?account_worker_count) .entered(); // Spawn account workers for worker_id in 0..account_worker_count { @@ -1098,17 +193,29 @@ impl ProofWorkerHandle { executor.spawn_blocking(move || { #[cfg(feature = "metrics")] let metrics = ProofTaskTrieMetrics::default(); + #[cfg(feature = "metrics")] + let cursor_metrics = ProofTaskCursorMetrics::new(); let _guard = span.enter(); - account_worker_loop( + let worker = AccountProofWorker::new( task_ctx_clone, work_rx_clone, - storage_work_tx_clone, worker_id, + storage_work_tx_clone, account_available_workers_clone, #[cfg(feature = "metrics")] metrics, - ) + #[cfg(feature = "metrics")] + cursor_metrics, + ); + if let Err(error) = worker.run() { + error!( + target: "trie::proof_task", + worker_id, + ?error, + "Account worker failed" + ); + } }); } drop(parent_span); @@ -1118,17 +225,19 @@ impl ProofWorkerHandle { account_work_tx, storage_available_workers, account_available_workers, + storage_worker_count, + account_worker_count, } } - /// Returns true if there are available storage workers to process tasks. - pub fn has_available_storage_workers(&self) -> bool { - self.storage_available_workers.load(Ordering::Relaxed) > 0 + /// Returns how many storage workers are currently available/idle. + pub fn available_storage_workers(&self) -> usize { + self.storage_available_workers.load(Ordering::Relaxed) } - /// Returns true if there are available account workers to process tasks. - pub fn has_available_account_workers(&self) -> bool { - self.account_available_workers.load(Ordering::Relaxed) > 0 + /// Returns how many account workers are currently available/idle. + pub fn available_account_workers(&self) -> usize { + self.account_available_workers.load(Ordering::Relaxed) } /// Returns the number of pending storage tasks in the queue. @@ -1141,6 +250,30 @@ impl ProofWorkerHandle { self.account_work_tx.len() } + /// Returns the total number of storage workers in the pool. + pub const fn total_storage_workers(&self) -> usize { + self.storage_worker_count + } + + /// Returns the total number of account workers in the pool. + pub const fn total_account_workers(&self) -> usize { + self.account_worker_count + } + + /// Returns the number of storage workers currently processing tasks. + /// + /// This is calculated as total workers minus available workers. + pub fn active_storage_workers(&self) -> usize { + self.storage_worker_count.saturating_sub(self.available_storage_workers()) + } + + /// Returns the number of account workers currently processing tasks. + /// + /// This is calculated as total workers minus available workers. + pub fn active_account_workers(&self) -> usize { + self.account_worker_count.saturating_sub(self.available_account_workers()) + } + /// Dispatch a storage proof computation to storage worker pool /// /// The result will be sent via the `proof_result_sender` channel. @@ -1244,6 +377,132 @@ impl ProofWorkerHandle { } } +/// Data used for initializing cursor factories that is shared across all storage proof instances. +#[derive(Clone, Debug)] +pub struct ProofTaskCtx { + /// The factory for creating state providers. + factory: Factory, +} + +impl ProofTaskCtx { + /// Creates a new [`ProofTaskCtx`] with the given factory. + pub const fn new(factory: Factory) -> Self { + Self { factory } + } +} + +/// This contains all information shared between all storage proof instances. +#[derive(Debug)] +pub struct ProofTaskTx { + /// The provider that implements `TrieCursorFactory` and `HashedCursorFactory`. + provider: Provider, + + /// Identifier for the worker within the worker pool, used only for tracing. + id: usize, +} + +impl ProofTaskTx { + /// Initializes a [`ProofTaskTx`] with the given provider and ID. + const fn new(provider: Provider, id: usize) -> Self { + Self { provider, id } + } +} + +impl ProofTaskTx +where + Provider: TrieCursorFactory + HashedCursorFactory, +{ + /// Compute storage proof. + /// + /// Used by storage workers in the worker pool to compute storage proofs. + #[inline] + fn compute_storage_proof( + &self, + input: StorageProofInput, + trie_cursor_metrics: &mut TrieCursorMetricsCache, + hashed_cursor_metrics: &mut HashedCursorMetricsCache, + ) -> StorageProofResult { + // Consume the input so we can move large collections (e.g. target slots) without cloning. + let StorageProofInput { + hashed_address, + prefix_set, + target_slots, + with_branch_node_masks, + multi_added_removed_keys, + } = input; + + // Get or create added/removed keys context + let multi_added_removed_keys = + multi_added_removed_keys.unwrap_or_else(|| Arc::new(MultiAddedRemovedKeys::new())); + let added_removed_keys = multi_added_removed_keys.get_storage(&hashed_address); + + let span = debug_span!( + target: "trie::proof_task", + "Storage proof calculation", + ?hashed_address, + target_slots = ?target_slots.len(), + worker_id = self.id, + ); + let _span_guard = span.enter(); + + let proof_start = Instant::now(); + + // Compute raw storage multiproof + let raw_proof_result = + StorageProof::new_hashed(&self.provider, &self.provider, hashed_address) + .with_prefix_set_mut(PrefixSetMut::from(prefix_set.iter().copied())) + .with_branch_node_masks(with_branch_node_masks) + .with_added_removed_keys(added_removed_keys) + .with_trie_cursor_metrics(trie_cursor_metrics) + .with_hashed_cursor_metrics(hashed_cursor_metrics) + .storage_multiproof(target_slots) + .map_err(|e| ParallelStateRootError::Other(e.to_string())); + trie_cursor_metrics.record_span("trie_cursor"); + hashed_cursor_metrics.record_span("hashed_cursor"); + + // Decode proof into DecodedStorageMultiProof + let decoded_result = raw_proof_result.and_then(|raw_proof| { + raw_proof.try_into().map_err(|e: alloy_rlp::Error| { + ParallelStateRootError::Other(format!( + "Failed to decode storage proof for {}: {}", + hashed_address, e + )) + }) + }); + + trace!( + target: "trie::proof_task", + hashed_address = ?hashed_address, + proof_time_us = proof_start.elapsed().as_micros(), + worker_id = self.id, + "Completed storage proof calculation" + ); + + decoded_result + } + + /// Process a blinded storage node request. + /// + /// Used by storage workers to retrieve blinded storage trie nodes for proof construction. + fn process_blinded_storage_node( + &self, + account: B256, + path: &Nibbles, + ) -> TrieNodeProviderResult { + let storage_node_provider = + ProofBlindedStorageProvider::new(&self.provider, &self.provider, account); + storage_node_provider.trie_node(path) + } + + /// Process a blinded account node request. + /// + /// Used by account workers to retrieve blinded account trie nodes for proof construction. + fn process_blinded_account_node(&self, path: &Nibbles) -> TrieNodeProviderResult { + let account_node_provider = + ProofBlindedAccountProvider::new(&self.provider, &self.provider); + account_node_provider.trie_node(path) + } +} impl TrieNodeProviderFactory for ProofWorkerHandle { type AccountNodeProvider = ProofTaskTrieNodeProvider; type StorageNodeProvider = ProofTaskTrieNodeProvider; @@ -1292,17 +551,1047 @@ impl TrieNodeProvider for ProofTaskTrieNodeProvider { } } } +/// Result of a proof calculation, which can be either an account multiproof or a storage proof. +#[derive(Debug)] +pub enum ProofResult { + /// Account multiproof with statistics + AccountMultiproof { + /// The account multiproof + proof: DecodedMultiProof, + /// Statistics collected during proof computation + stats: ParallelTrieStats, + }, + /// Storage proof for a specific account + StorageProof { + /// The hashed address this storage proof belongs to + hashed_address: B256, + /// The storage multiproof + proof: DecodedStorageMultiProof, + }, +} + +impl ProofResult { + /// Convert this proof result into a `DecodedMultiProof`. + /// + /// For account multiproofs, returns the multiproof directly (discarding stats). + /// For storage proofs, wraps the storage proof into a minimal multiproof. + pub fn into_multiproof(self) -> DecodedMultiProof { + match self { + Self::AccountMultiproof { proof, stats: _ } => proof, + Self::StorageProof { hashed_address, proof } => { + DecodedMultiProof::from_storage_proof(hashed_address, proof) + } + } + } +} +/// Channel used by worker threads to deliver `ProofResultMessage` items back to +/// `MultiProofTask`. +/// +/// Workers use this sender to deliver proof results directly to `MultiProofTask`. +pub type ProofResultSender = CrossbeamSender; + +/// Message containing a completed proof result with metadata for direct delivery to +/// `MultiProofTask`. +/// +/// This type enables workers to send proof results directly to the `MultiProofTask` event loop. +#[derive(Debug)] +pub struct ProofResultMessage { + /// Sequence number for ordering proofs + pub sequence_number: u64, + /// The proof calculation result (either account multiproof or storage proof) + pub result: Result, + /// Time taken for the entire proof calculation (from dispatch to completion) + pub elapsed: Duration, + /// Original state update that triggered this proof + pub state: HashedPostState, +} + +/// Context for sending proof calculation results back to `MultiProofTask`. +/// +/// This struct contains all context needed to send and track proof calculation results. +/// Workers use this to deliver completed proofs back to the main event loop. +#[derive(Debug, Clone)] +pub struct ProofResultContext { + /// Channel sender for result delivery + pub sender: ProofResultSender, + /// Sequence number for proof ordering + pub sequence_number: u64, + /// Original state update that triggered this proof + pub state: HashedPostState, + /// Calculation start time for measuring elapsed duration + pub start_time: Instant, +} + +impl ProofResultContext { + /// Creates a new proof result context. + pub const fn new( + sender: ProofResultSender, + sequence_number: u64, + state: HashedPostState, + start_time: Instant, + ) -> Self { + Self { sender, sequence_number, state, start_time } + } +} +/// Internal message for storage workers. +#[derive(Debug)] +enum StorageWorkerJob { + /// Storage proof computation request + StorageProof { + /// Storage proof input parameters + input: StorageProofInput, + /// Context for sending the proof result. + proof_result_sender: ProofResultContext, + }, + /// Blinded storage node retrieval request + BlindedStorageNode { + /// Target account + account: B256, + /// Path to the storage node + path: Nibbles, + /// Channel to send result back to original caller + result_sender: Sender, + }, +} + +/// Worker for storage trie operations. +/// +/// Each worker maintains a dedicated database transaction and processes +/// storage proof requests and blinded node lookups. +struct StorageProofWorker { + /// Shared task context with database factory and prefix sets + task_ctx: ProofTaskCtx, + /// Channel for receiving work + work_rx: CrossbeamReceiver, + /// Unique identifier for this worker (used for tracing) + worker_id: usize, + /// Counter tracking worker availability + available_workers: Arc, + /// Metrics collector for this worker + #[cfg(feature = "metrics")] + metrics: ProofTaskTrieMetrics, + /// Cursor metrics for this worker + #[cfg(feature = "metrics")] + cursor_metrics: ProofTaskCursorMetrics, +} + +impl StorageProofWorker +where + Factory: DatabaseProviderROFactory, +{ + /// Creates a new storage proof worker. + const fn new( + task_ctx: ProofTaskCtx, + work_rx: CrossbeamReceiver, + worker_id: usize, + available_workers: Arc, + #[cfg(feature = "metrics")] metrics: ProofTaskTrieMetrics, + #[cfg(feature = "metrics")] cursor_metrics: ProofTaskCursorMetrics, + ) -> Self { + Self { + task_ctx, + work_rx, + worker_id, + available_workers, + #[cfg(feature = "metrics")] + metrics, + #[cfg(feature = "metrics")] + cursor_metrics, + } + } + + /// Runs the worker loop, processing jobs until the channel closes. + /// + /// # Lifecycle + /// + /// 1. Initializes database provider and transaction + /// 2. Advertises availability + /// 3. Processes jobs in a loop: + /// - Receives job from channel + /// - Marks worker as busy + /// - Processes the job + /// - Marks worker as available + /// 4. Shuts down when channel closes + /// + /// # Panic Safety + /// + /// If this function panics, the worker thread terminates but other workers + /// continue operating and the system degrades gracefully. + fn run(mut self) -> ProviderResult<()> { + let Self { + task_ctx, + work_rx, + worker_id, + available_workers, + #[cfg(feature = "metrics")] + metrics, + #[cfg(feature = "metrics")] + ref mut cursor_metrics, + } = self; + + // Create provider from factory + let provider = task_ctx.factory.database_provider_ro()?; + let proof_tx = ProofTaskTx::new(provider, worker_id); + + trace!( + target: "trie::proof_task", + worker_id, + "Storage worker started" + ); + + let mut storage_proofs_processed = 0u64; + let mut storage_nodes_processed = 0u64; + let mut cursor_metrics_cache = ProofTaskCursorMetricsCache::default(); + + // Initially mark this worker as available. + available_workers.fetch_add(1, Ordering::Relaxed); + + while let Ok(job) = work_rx.recv() { + // Mark worker as busy. + available_workers.fetch_sub(1, Ordering::Relaxed); + + match job { + StorageWorkerJob::StorageProof { input, proof_result_sender } => { + Self::process_storage_proof( + worker_id, + &proof_tx, + input, + proof_result_sender, + &mut storage_proofs_processed, + &mut cursor_metrics_cache, + ); + } + + StorageWorkerJob::BlindedStorageNode { account, path, result_sender } => { + Self::process_blinded_node( + worker_id, + &proof_tx, + account, + path, + result_sender, + &mut storage_nodes_processed, + ); + } + } + + // Mark worker as available again. + available_workers.fetch_add(1, Ordering::Relaxed); + } + + trace!( + target: "trie::proof_task", + worker_id, + storage_proofs_processed, + storage_nodes_processed, + "Storage worker shutting down" + ); + + #[cfg(feature = "metrics")] + { + metrics.record_storage_nodes(storage_nodes_processed as usize); + cursor_metrics.record(&mut cursor_metrics_cache); + } + + Ok(()) + } + + /// Processes a storage proof request. + fn process_storage_proof( + worker_id: usize, + proof_tx: &ProofTaskTx, + input: StorageProofInput, + proof_result_sender: ProofResultContext, + storage_proofs_processed: &mut u64, + cursor_metrics_cache: &mut ProofTaskCursorMetricsCache, + ) where + Provider: TrieCursorFactory + HashedCursorFactory, + { + let hashed_address = input.hashed_address; + let ProofResultContext { sender, sequence_number: seq, state, start_time } = + proof_result_sender; + + let mut trie_cursor_metrics = TrieCursorMetricsCache::default(); + let mut hashed_cursor_metrics = HashedCursorMetricsCache::default(); + + trace!( + target: "trie::proof_task", + worker_id, + hashed_address = ?hashed_address, + prefix_set_len = input.prefix_set.len(), + target_slots_len = input.target_slots.len(), + "Processing storage proof" + ); + + let proof_start = Instant::now(); + let result = proof_tx.compute_storage_proof( + input, + &mut trie_cursor_metrics, + &mut hashed_cursor_metrics, + ); + + let proof_elapsed = proof_start.elapsed(); + *storage_proofs_processed += 1; + + let result_msg = result.map(|storage_proof| ProofResult::StorageProof { + hashed_address, + proof: storage_proof, + }); + + if sender + .send(ProofResultMessage { + sequence_number: seq, + result: result_msg, + elapsed: start_time.elapsed(), + state, + }) + .is_err() + { + trace!( + target: "trie::proof_task", + worker_id, + hashed_address = ?hashed_address, + storage_proofs_processed, + "Proof result receiver dropped, discarding result" + ); + } + + trace!( + target: "trie::proof_task", + worker_id, + hashed_address = ?hashed_address, + proof_time_us = proof_elapsed.as_micros(), + total_processed = storage_proofs_processed, + trie_cursor_duration_us = trie_cursor_metrics.total_duration.as_micros(), + hashed_cursor_duration_us = hashed_cursor_metrics.total_duration.as_micros(), + ?trie_cursor_metrics, + ?hashed_cursor_metrics, + "Storage proof completed" + ); + + #[cfg(feature = "metrics")] + { + // Accumulate per-proof metrics into the worker's cache + let per_proof_cache = ProofTaskCursorMetricsCache { + account_trie_cursor: TrieCursorMetricsCache::default(), + account_hashed_cursor: HashedCursorMetricsCache::default(), + storage_trie_cursor: trie_cursor_metrics, + storage_hashed_cursor: hashed_cursor_metrics, + }; + cursor_metrics_cache.extend(&per_proof_cache); + } + } + + /// Processes a blinded storage node lookup request. + fn process_blinded_node( + worker_id: usize, + proof_tx: &ProofTaskTx, + account: B256, + path: Nibbles, + result_sender: Sender, + storage_nodes_processed: &mut u64, + ) where + Provider: TrieCursorFactory + HashedCursorFactory, + { + trace!( + target: "trie::proof_task", + worker_id, + ?account, + ?path, + "Processing blinded storage node" + ); + + let start = Instant::now(); + let result = proof_tx.process_blinded_storage_node(account, &path); + let elapsed = start.elapsed(); + + *storage_nodes_processed += 1; + + if result_sender.send(result).is_err() { + trace!( + target: "trie::proof_task", + worker_id, + ?account, + ?path, + storage_nodes_processed, + "Blinded storage node receiver dropped, discarding result" + ); + } + + trace!( + target: "trie::proof_task", + worker_id, + ?account, + ?path, + elapsed_us = elapsed.as_micros(), + total_processed = storage_nodes_processed, + "Blinded storage node completed" + ); + } +} + +/// Worker for account trie operations. +/// +/// Each worker maintains a dedicated database transaction and processes +/// account multiproof requests and blinded node lookups. +struct AccountProofWorker { + /// Shared task context with database factory and prefix sets + task_ctx: ProofTaskCtx, + /// Channel for receiving work + work_rx: CrossbeamReceiver, + /// Unique identifier for this worker (used for tracing) + worker_id: usize, + /// Channel for dispatching storage proof work + storage_work_tx: CrossbeamSender, + /// Counter tracking worker availability + available_workers: Arc, + /// Metrics collector for this worker + #[cfg(feature = "metrics")] + metrics: ProofTaskTrieMetrics, + /// Cursor metrics for this worker + #[cfg(feature = "metrics")] + cursor_metrics: ProofTaskCursorMetrics, +} + +impl AccountProofWorker +where + Factory: DatabaseProviderROFactory, +{ + /// Creates a new account proof worker. + const fn new( + task_ctx: ProofTaskCtx, + work_rx: CrossbeamReceiver, + worker_id: usize, + storage_work_tx: CrossbeamSender, + available_workers: Arc, + #[cfg(feature = "metrics")] metrics: ProofTaskTrieMetrics, + #[cfg(feature = "metrics")] cursor_metrics: ProofTaskCursorMetrics, + ) -> Self { + Self { + task_ctx, + work_rx, + worker_id, + storage_work_tx, + available_workers, + #[cfg(feature = "metrics")] + metrics, + #[cfg(feature = "metrics")] + cursor_metrics, + } + } + + /// Runs the worker loop, processing jobs until the channel closes. + /// + /// # Lifecycle + /// + /// 1. Initializes database provider and transaction + /// 2. Advertises availability + /// 3. Processes jobs in a loop: + /// - Receives job from channel + /// - Marks worker as busy + /// - Processes the job + /// - Marks worker as available + /// 4. Shuts down when channel closes + /// + /// # Panic Safety + /// + /// If this function panics, the worker thread terminates but other workers + /// continue operating and the system degrades gracefully. + fn run(mut self) -> ProviderResult<()> { + let Self { + task_ctx, + work_rx, + worker_id, + storage_work_tx, + available_workers, + #[cfg(feature = "metrics")] + metrics, + #[cfg(feature = "metrics")] + ref mut cursor_metrics, + } = self; + + // Create provider from factory + let provider = task_ctx.factory.database_provider_ro()?; + let proof_tx = ProofTaskTx::new(provider, worker_id); + + trace!( + target: "trie::proof_task", + worker_id, + "Account worker started" + ); + + let mut account_proofs_processed = 0u64; + let mut account_nodes_processed = 0u64; + let mut cursor_metrics_cache = ProofTaskCursorMetricsCache::default(); + + // Count this worker as available only after successful initialization. + available_workers.fetch_add(1, Ordering::Relaxed); + + while let Ok(job) = work_rx.recv() { + // Mark worker as busy. + available_workers.fetch_sub(1, Ordering::Relaxed); + + match job { + AccountWorkerJob::AccountMultiproof { input } => { + Self::process_account_multiproof( + worker_id, + &proof_tx, + storage_work_tx.clone(), + *input, + &mut account_proofs_processed, + &mut cursor_metrics_cache, + ); + } + + AccountWorkerJob::BlindedAccountNode { path, result_sender } => { + Self::process_blinded_node( + worker_id, + &proof_tx, + path, + result_sender, + &mut account_nodes_processed, + ); + } + } + + // Mark worker as available again. + available_workers.fetch_add(1, Ordering::Relaxed); + } + + trace!( + target: "trie::proof_task", + worker_id, + account_proofs_processed, + account_nodes_processed, + "Account worker shutting down" + ); + + #[cfg(feature = "metrics")] + { + metrics.record_account_nodes(account_nodes_processed as usize); + cursor_metrics.record(&mut cursor_metrics_cache); + } + + Ok(()) + } + + /// Processes an account multiproof request. + fn process_account_multiproof( + worker_id: usize, + proof_tx: &ProofTaskTx, + storage_work_tx: CrossbeamSender, + input: AccountMultiproofInput, + account_proofs_processed: &mut u64, + cursor_metrics_cache: &mut ProofTaskCursorMetricsCache, + ) where + Provider: TrieCursorFactory + HashedCursorFactory, + { + let AccountMultiproofInput { + targets, + mut prefix_sets, + collect_branch_node_masks, + multi_added_removed_keys, + missed_leaves_storage_roots, + proof_result_sender: + ProofResultContext { sender: result_tx, sequence_number: seq, state, start_time: start }, + } = input; + + let span = debug_span!( + target: "trie::proof_task", + "Account multiproof calculation", + targets = targets.len(), + worker_id, + ); + let _span_guard = span.enter(); + + trace!( + target: "trie::proof_task", + "Processing account multiproof" + ); + + let proof_start = Instant::now(); + + let mut tracker = ParallelTrieTracker::default(); + + let mut storage_prefix_sets = std::mem::take(&mut prefix_sets.storage_prefix_sets); + + let storage_root_targets_len = + StorageRootTargets::count(&prefix_sets.account_prefix_set, &storage_prefix_sets); + + tracker.set_precomputed_storage_roots(storage_root_targets_len as u64); + + let storage_proof_receivers = match dispatch_storage_proofs( + &storage_work_tx, + &targets, + &mut storage_prefix_sets, + collect_branch_node_masks, + multi_added_removed_keys.as_ref(), + ) { + Ok(receivers) => receivers, + Err(error) => { + // Send error through result channel + error!(target: "trie::proof_task", "Failed to dispatch storage proofs: {error}"); + let _ = result_tx.send(ProofResultMessage { + sequence_number: seq, + result: Err(error), + elapsed: start.elapsed(), + state, + }); + return; + } + }; + + // Use the missed leaves cache passed from the multiproof manager + let account_prefix_set = std::mem::take(&mut prefix_sets.account_prefix_set); + + let ctx = AccountMultiproofParams { + targets: &targets, + prefix_set: account_prefix_set, + collect_branch_node_masks, + multi_added_removed_keys: multi_added_removed_keys.as_ref(), + storage_proof_receivers, + missed_leaves_storage_roots: missed_leaves_storage_roots.as_ref(), + }; + + let result = + build_account_multiproof_with_storage_roots(&proof_tx.provider, ctx, &mut tracker); + + let proof_elapsed = proof_start.elapsed(); + let total_elapsed = start.elapsed(); + let proof_cursor_metrics = tracker.cursor_metrics; + proof_cursor_metrics.record_spans(); + + let stats = tracker.finish(); + let result = result.map(|proof| ProofResult::AccountMultiproof { proof, stats }); + *account_proofs_processed += 1; + + // Send result to MultiProofTask + if result_tx + .send(ProofResultMessage { + sequence_number: seq, + result, + elapsed: total_elapsed, + state, + }) + .is_err() + { + trace!( + target: "trie::proof_task", + worker_id, + account_proofs_processed, + "Account multiproof receiver dropped, discarding result" + ); + } + + trace!( + target: "trie::proof_task", + proof_time_us = proof_elapsed.as_micros(), + total_elapsed_us = total_elapsed.as_micros(), + total_processed = account_proofs_processed, + account_trie_cursor_duration_us = proof_cursor_metrics.account_trie_cursor.total_duration.as_micros(), + account_hashed_cursor_duration_us = proof_cursor_metrics.account_hashed_cursor.total_duration.as_micros(), + storage_trie_cursor_duration_us = proof_cursor_metrics.storage_trie_cursor.total_duration.as_micros(), + storage_hashed_cursor_duration_us = proof_cursor_metrics.storage_hashed_cursor.total_duration.as_micros(), + account_trie_cursor_metrics = ?proof_cursor_metrics.account_trie_cursor, + account_hashed_cursor_metrics = ?proof_cursor_metrics.account_hashed_cursor, + storage_trie_cursor_metrics = ?proof_cursor_metrics.storage_trie_cursor, + storage_hashed_cursor_metrics = ?proof_cursor_metrics.storage_hashed_cursor, + "Account multiproof completed" + ); + + #[cfg(feature = "metrics")] + // Accumulate per-proof metrics into the worker's cache + cursor_metrics_cache.extend(&proof_cursor_metrics); + } + + /// Processes a blinded account node lookup request. + fn process_blinded_node( + worker_id: usize, + proof_tx: &ProofTaskTx, + path: Nibbles, + result_sender: Sender, + account_nodes_processed: &mut u64, + ) where + Provider: TrieCursorFactory + HashedCursorFactory, + { + let span = debug_span!( + target: "trie::proof_task", + "Blinded account node calculation", + ?path, + worker_id, + ); + let _span_guard = span.enter(); + + trace!( + target: "trie::proof_task", + "Processing blinded account node" + ); + + let start = Instant::now(); + let result = proof_tx.process_blinded_account_node(&path); + let elapsed = start.elapsed(); + + *account_nodes_processed += 1; + + if result_sender.send(result).is_err() { + trace!( + target: "trie::proof_task", + worker_id, + ?path, + account_nodes_processed, + "Blinded account node receiver dropped, discarding result" + ); + } + + trace!( + target: "trie::proof_task", + node_time_us = elapsed.as_micros(), + total_processed = account_nodes_processed, + "Blinded account node completed" + ); + } +} + +/// Builds an account multiproof by consuming storage proof receivers lazily during trie walk. +/// +/// This is a helper function used by account workers to build the account subtree proof +/// while storage proofs are still being computed. Receivers are consumed only when needed, +/// enabling interleaved parallelism between account trie traversal and storage proof computation. +/// +/// Returns a `DecodedMultiProof` containing the account subtree and storage proofs. +fn build_account_multiproof_with_storage_roots

( + provider: &P, + ctx: AccountMultiproofParams<'_>, + tracker: &mut ParallelTrieTracker, +) -> Result +where + P: TrieCursorFactory + HashedCursorFactory, +{ + let accounts_added_removed_keys = + ctx.multi_added_removed_keys.as_ref().map(|keys| keys.get_accounts()); + + // Create local metrics caches for account cursors. We can't directly use the metrics caches in + // the tracker due to the call to `inc_missed_leaves` which occurs on it. + let mut account_trie_cursor_metrics = TrieCursorMetricsCache::default(); + let mut account_hashed_cursor_metrics = HashedCursorMetricsCache::default(); + + // Wrap account trie cursor with instrumented cursor + let account_trie_cursor = provider.account_trie_cursor().map_err(ProviderError::Database)?; + let account_trie_cursor = + InstrumentedTrieCursor::new(account_trie_cursor, &mut account_trie_cursor_metrics); + + // Create the walker. + let walker = TrieWalker::<_>::state_trie(account_trie_cursor, ctx.prefix_set) + .with_added_removed_keys(accounts_added_removed_keys) + .with_deletions_retained(true); + + // Create a hash builder to rebuild the root node since it is not available in the database. + let retainer = ctx + .targets + .keys() + .map(Nibbles::unpack) + .collect::() + .with_added_removed_keys(accounts_added_removed_keys); + let mut hash_builder = HashBuilder::default() + .with_proof_retainer(retainer) + .with_updates(ctx.collect_branch_node_masks); + + // Initialize storage multiproofs map with pre-allocated capacity. + // Proofs will be inserted as they're consumed from receivers during trie walk. + let mut collected_decoded_storages: B256Map = + B256Map::with_capacity_and_hasher(ctx.targets.len(), Default::default()); + let mut account_rlp = Vec::with_capacity(TRIE_ACCOUNT_RLP_MAX_SIZE); + + // Wrap account hashed cursor with instrumented cursor + let account_hashed_cursor = + provider.hashed_account_cursor().map_err(ProviderError::Database)?; + let account_hashed_cursor = + InstrumentedHashedCursor::new(account_hashed_cursor, &mut account_hashed_cursor_metrics); + + let mut account_node_iter = TrieNodeIter::state_trie(walker, account_hashed_cursor); + + let mut storage_proof_receivers = ctx.storage_proof_receivers; + + while let Some(account_node) = account_node_iter.try_next().map_err(ProviderError::Database)? { + match account_node { + TrieElement::Branch(node) => { + hash_builder.add_branch(node.key, node.value, node.children_are_in_trie); + } + TrieElement::Leaf(hashed_address, account) => { + let root = match storage_proof_receivers.remove(&hashed_address) { + Some(receiver) => { + let _guard = debug_span!( + target: "trie::proof_task", + "Waiting for storage proof", + ?hashed_address, + ); + // Block on this specific storage proof receiver - enables interleaved + // parallelism + let proof_msg = receiver.recv().map_err(|_| { + ParallelStateRootError::StorageRoot( + reth_execution_errors::StorageRootError::Database( + DatabaseError::Other(format!( + "Storage proof channel closed for {hashed_address}" + )), + ), + ) + })?; + + drop(_guard); + + // Extract storage proof from the result + let proof = match proof_msg.result? { + ProofResult::StorageProof { hashed_address: addr, proof } => { + debug_assert_eq!( + addr, + hashed_address, + "storage worker must return same address: expected {hashed_address}, got {addr}" + ); + proof + } + ProofResult::AccountMultiproof { .. } => { + unreachable!("storage worker only sends StorageProof variant") + } + }; + + let root = proof.root; + collected_decoded_storages.insert(hashed_address, proof); + root + } + // Since we do not store all intermediate nodes in the database, there might + // be a possibility of re-adding a non-modified leaf to the hash builder. + None => { + tracker.inc_missed_leaves(); + + match ctx.missed_leaves_storage_roots.entry(hashed_address) { + dashmap::Entry::Occupied(occ) => *occ.get(), + dashmap::Entry::Vacant(vac) => { + let root = + StorageProof::new_hashed(provider, provider, hashed_address) + .with_prefix_set_mut(Default::default()) + .with_trie_cursor_metrics( + &mut tracker.cursor_metrics.storage_trie_cursor, + ) + .with_hashed_cursor_metrics( + &mut tracker.cursor_metrics.storage_hashed_cursor, + ) + .storage_multiproof( + ctx.targets + .get(&hashed_address) + .cloned() + .unwrap_or_default(), + ) + .map_err(|e| { + ParallelStateRootError::StorageRoot( + reth_execution_errors::StorageRootError::Database( + DatabaseError::Other(e.to_string()), + ), + ) + })? + .root; + + vac.insert(root); + root + } + } + } + }; + + // Encode account + account_rlp.clear(); + let account = account.into_trie_account(root); + account.encode(&mut account_rlp as &mut dyn BufMut); + + hash_builder.add_leaf(Nibbles::unpack(hashed_address), &account_rlp); + } + } + } + + // Consume remaining storage proof receivers for accounts not encountered during trie walk. + for (hashed_address, receiver) in storage_proof_receivers { + if let Ok(proof_msg) = receiver.recv() { + // Extract storage proof from the result + if let Ok(ProofResult::StorageProof { proof, .. }) = proof_msg.result { + collected_decoded_storages.insert(hashed_address, proof); + } + } + } + + let _ = hash_builder.root(); + + let account_subtree_raw_nodes = hash_builder.take_proof_nodes(); + let decoded_account_subtree = DecodedProofNodes::try_from(account_subtree_raw_nodes)?; + + let (branch_node_hash_masks, branch_node_tree_masks) = if ctx.collect_branch_node_masks { + let updated_branch_nodes = hash_builder.updated_branch_nodes.unwrap_or_default(); + ( + updated_branch_nodes.iter().map(|(path, node)| (*path, node.hash_mask)).collect(), + updated_branch_nodes.into_iter().map(|(path, node)| (path, node.tree_mask)).collect(), + ) + } else { + (Default::default(), Default::default()) + }; + + // Extend tracker with accumulated metrics from account cursors + tracker.cursor_metrics.account_trie_cursor.extend(&account_trie_cursor_metrics); + tracker.cursor_metrics.account_hashed_cursor.extend(&account_hashed_cursor_metrics); + + Ok(DecodedMultiProof { + account_subtree: decoded_account_subtree, + branch_node_hash_masks, + branch_node_tree_masks, + storages: collected_decoded_storages, + }) +} +/// Queues storage proofs for all accounts in the targets and returns receivers. +/// +/// This function queues all storage proof tasks to the worker pool but returns immediately +/// with receivers, allowing the account trie walk to proceed in parallel with storage proof +/// computation. This enables interleaved parallelism for better performance. +/// +/// Propagates errors up if queuing fails. Receivers must be consumed by the caller. +fn dispatch_storage_proofs( + storage_work_tx: &CrossbeamSender, + targets: &MultiProofTargets, + storage_prefix_sets: &mut B256Map, + with_branch_node_masks: bool, + multi_added_removed_keys: Option<&Arc>, +) -> Result>, ParallelStateRootError> { + let mut storage_proof_receivers = + B256Map::with_capacity_and_hasher(targets.len(), Default::default()); + + // Dispatch all storage proofs to worker pool + for (hashed_address, target_slots) in targets.iter() { + let prefix_set = storage_prefix_sets.remove(hashed_address).unwrap_or_default(); + + // Create channel for receiving ProofResultMessage + let (result_tx, result_rx) = crossbeam_channel::unbounded(); + let start = Instant::now(); + + // Create computation input (data only, no communication channel) + let input = StorageProofInput::new( + *hashed_address, + prefix_set, + target_slots.clone(), + with_branch_node_masks, + multi_added_removed_keys.cloned(), + ); + + // Always dispatch a storage proof so we obtain the storage root even when no slots are + // requested. + storage_work_tx + .send(StorageWorkerJob::StorageProof { + input, + proof_result_sender: ProofResultContext::new( + result_tx, + 0, + HashedPostState::default(), + start, + ), + }) + .map_err(|_| { + ParallelStateRootError::Other(format!( + "Failed to queue storage proof for {}: storage worker pool unavailable", + hashed_address + )) + })?; + + storage_proof_receivers.insert(*hashed_address, result_rx); + } + + Ok(storage_proof_receivers) +} +/// Input parameters for storage proof computation. +#[derive(Debug)] +pub struct StorageProofInput { + /// The hashed address for which the proof is calculated. + hashed_address: B256, + /// The prefix set for the proof calculation. + prefix_set: PrefixSet, + /// The target slots for the proof calculation. + target_slots: B256Set, + /// Whether or not to collect branch node masks + with_branch_node_masks: bool, + /// Provided by the user to give the necessary context to retain extra proofs. + multi_added_removed_keys: Option>, +} + +impl StorageProofInput { + /// Creates a new [`StorageProofInput`] with the given hashed address, prefix set, and target + /// slots. + pub const fn new( + hashed_address: B256, + prefix_set: PrefixSet, + target_slots: B256Set, + with_branch_node_masks: bool, + multi_added_removed_keys: Option>, + ) -> Self { + Self { + hashed_address, + prefix_set, + target_slots, + with_branch_node_masks, + multi_added_removed_keys, + } + } +} +/// Input parameters for account multiproof computation. +#[derive(Debug, Clone)] +pub struct AccountMultiproofInput { + /// The targets for which to compute the multiproof. + pub targets: MultiProofTargets, + /// The prefix sets for the proof calculation. + pub prefix_sets: TriePrefixSets, + /// Whether or not to collect branch node masks. + pub collect_branch_node_masks: bool, + /// Provided by the user to give the necessary context to retain extra proofs. + pub multi_added_removed_keys: Option>, + /// Cached storage proof roots for missed leaves encountered during account trie walk. + pub missed_leaves_storage_roots: Arc>, + /// Context for sending the proof result. + pub proof_result_sender: ProofResultContext, +} + +/// Parameters for building an account multiproof with pre-computed storage roots. +struct AccountMultiproofParams<'a> { + /// The targets for which to compute the multiproof. + targets: &'a MultiProofTargets, + /// The prefix set for the account trie walk. + prefix_set: PrefixSet, + /// Whether or not to collect branch node masks. + collect_branch_node_masks: bool, + /// Provided by the user to give the necessary context to retain extra proofs. + multi_added_removed_keys: Option<&'a Arc>, + /// Receivers for storage proofs being computed in parallel. + storage_proof_receivers: B256Map>, + /// Cached storage proof roots for missed leaves encountered during account trie walk. + missed_leaves_storage_roots: &'a DashMap, +} + +/// Internal message for account workers. +#[derive(Debug)] +enum AccountWorkerJob { + /// Account multiproof computation request + AccountMultiproof { + /// Account multiproof input parameters + input: Box, + }, + /// Blinded account node retrieval request + BlindedAccountNode { + /// Path to the account node + path: Nibbles, + /// Channel to send result back to original caller + result_sender: Sender, + }, +} #[cfg(test)] mod tests { use super::*; use reth_provider::test_utils::create_test_provider_factory; - use reth_trie_common::prefix_set::TriePrefixSetsMut; - use std::sync::Arc; use tokio::{runtime::Builder, task}; fn test_ctx(factory: Factory) -> ProofTaskCtx { - ProofTaskCtx::new(factory, Arc::new(TriePrefixSetsMut::default())) + ProofTaskCtx::new(factory) } /// Ensures `ProofWorkerHandle::new` spawns workers correctly. diff --git a/crates/trie/parallel/src/proof_task_metrics.rs b/crates/trie/parallel/src/proof_task_metrics.rs index 6492e28d12..f9b8d70c16 100644 --- a/crates/trie/parallel/src/proof_task_metrics.rs +++ b/crates/trie/parallel/src/proof_task_metrics.rs @@ -1,4 +1,9 @@ use reth_metrics::{metrics::Histogram, Metrics}; +use reth_trie::{ + hashed_cursor::{HashedCursorMetrics, HashedCursorMetricsCache}, + trie_cursor::{TrieCursorMetrics, TrieCursorMetricsCache}, + TrieType, +}; /// Metrics for the proof task. #[derive(Clone, Metrics)] @@ -21,3 +26,87 @@ impl ProofTaskTrieMetrics { self.blinded_storage_nodes.record(count as f64); } } + +/// Cursor metrics for proof task operations. +#[derive(Clone, Debug)] +pub struct ProofTaskCursorMetrics { + /// Metrics for account trie cursor operations. + pub account_trie_cursor: TrieCursorMetrics, + /// Metrics for account hashed cursor operations. + pub account_hashed_cursor: HashedCursorMetrics, + /// Metrics for storage trie cursor operations. + pub storage_trie_cursor: TrieCursorMetrics, + /// Metrics for storage hashed cursor operations. + pub storage_hashed_cursor: HashedCursorMetrics, +} + +impl ProofTaskCursorMetrics { + /// Create a new instance with properly initialized cursor metrics. + pub fn new() -> Self { + Self { + account_trie_cursor: TrieCursorMetrics::new(TrieType::State), + account_hashed_cursor: HashedCursorMetrics::new(TrieType::State), + storage_trie_cursor: TrieCursorMetrics::new(TrieType::Storage), + storage_hashed_cursor: HashedCursorMetrics::new(TrieType::Storage), + } + } + + /// Record the cached metrics from the provided cache and reset the cache counters. + /// + /// This method adds the current counter values from the cache to the Prometheus metrics + /// and then resets all cache counters to zero. + pub fn record(&mut self, cache: &mut ProofTaskCursorMetricsCache) { + self.account_trie_cursor.record(&mut cache.account_trie_cursor); + self.account_hashed_cursor.record(&mut cache.account_hashed_cursor); + self.storage_trie_cursor.record(&mut cache.storage_trie_cursor); + self.storage_hashed_cursor.record(&mut cache.storage_hashed_cursor); + cache.reset(); + } +} + +impl Default for ProofTaskCursorMetrics { + fn default() -> Self { + Self::new() + } +} + +/// Cached cursor metrics for proof task operations. +#[derive(Clone, Debug, Default, Copy)] +pub struct ProofTaskCursorMetricsCache { + /// Cached metrics for account trie cursor operations. + pub account_trie_cursor: TrieCursorMetricsCache, + /// Cached metrics for account hashed cursor operations. + pub account_hashed_cursor: HashedCursorMetricsCache, + /// Cached metrics for storage trie cursor operations. + pub storage_trie_cursor: TrieCursorMetricsCache, + /// Cached metrics for storage hashed cursor operations. + pub storage_hashed_cursor: HashedCursorMetricsCache, +} + +impl ProofTaskCursorMetricsCache { + /// Extend this cache by adding the counts from another cache. + /// + /// This accumulates the counter values from `other` into this cache. + pub fn extend(&mut self, other: &Self) { + self.account_trie_cursor.extend(&other.account_trie_cursor); + self.account_hashed_cursor.extend(&other.account_hashed_cursor); + self.storage_trie_cursor.extend(&other.storage_trie_cursor); + self.storage_hashed_cursor.extend(&other.storage_hashed_cursor); + } + + /// Reset all counters to zero. + pub const fn reset(&mut self) { + self.account_trie_cursor.reset(); + self.account_hashed_cursor.reset(); + self.storage_trie_cursor.reset(); + self.storage_hashed_cursor.reset(); + } + + /// Record the spans for metrics. + pub fn record_spans(&self) { + self.account_trie_cursor.record_span("account_trie_cursor"); + self.account_hashed_cursor.record_span("account_hashed_cursor"); + self.storage_trie_cursor.record_span("storage_trie_cursor"); + self.storage_hashed_cursor.record_span("storage_hashed_cursor"); + } +} diff --git a/crates/trie/parallel/src/stats.rs b/crates/trie/parallel/src/stats.rs index de5b0a628e..088b95c970 100644 --- a/crates/trie/parallel/src/stats.rs +++ b/crates/trie/parallel/src/stats.rs @@ -1,3 +1,5 @@ +#[cfg(feature = "metrics")] +use crate::proof_task_metrics::ProofTaskCursorMetricsCache; use derive_more::Deref; use reth_trie::stats::{TrieStats, TrieTracker}; @@ -34,6 +36,9 @@ pub struct ParallelTrieTracker { trie: TrieTracker, precomputed_storage_roots: u64, missed_leaves: u64, + #[cfg(feature = "metrics")] + /// Local tracking of cursor-related metrics + pub cursor_metrics: ProofTaskCursorMetricsCache, } impl ParallelTrieTracker { diff --git a/crates/trie/sparse-parallel/src/trie.rs b/crates/trie/sparse-parallel/src/trie.rs index 133cdfece4..3ccc5aad1a 100644 --- a/crates/trie/sparse-parallel/src/trie.rs +++ b/crates/trie/sparse-parallel/src/trie.rs @@ -9,18 +9,16 @@ use alloy_trie::{BranchNodeCompact, TrieMask, EMPTY_ROOT_HASH}; use reth_execution_errors::{SparseTrieErrorKind, SparseTrieResult}; use reth_trie_common::{ prefix_set::{PrefixSet, PrefixSetMut}, - BranchNodeRef, ExtensionNodeRef, LeafNodeRef, Nibbles, RlpNode, TrieNode, CHILD_INDEX_RANGE, + BranchNodeRef, ExtensionNodeRef, LeafNodeRef, Nibbles, ProofTrieNode, RlpNode, TrieMasks, + TrieNode, CHILD_INDEX_RANGE, }; use reth_trie_sparse::{ provider::{RevealedNode, TrieNodeProvider}, - LeafLookup, LeafLookupError, RevealedSparseNode, RlpNodeStackItem, SparseNode, SparseNodeType, - SparseTrieInterface, SparseTrieUpdates, TrieMasks, + LeafLookup, LeafLookupError, RlpNodeStackItem, SparseNode, SparseNodeType, SparseTrieInterface, + SparseTrieUpdates, }; use smallvec::SmallVec; -use std::{ - cmp::{Ord, Ordering, PartialOrd}, - sync::mpsc, -}; +use std::cmp::{Ord, Ordering, PartialOrd}; use tracing::{debug, instrument, trace}; /// The maximum length of a path, in nibbles, which belongs to the upper subtrie of a @@ -172,7 +170,7 @@ impl SparseTrieInterface for ParallelSparseTrie { self } - fn reveal_nodes(&mut self, mut nodes: Vec) -> SparseTrieResult<()> { + fn reveal_nodes(&mut self, mut nodes: Vec) -> SparseTrieResult<()> { if nodes.is_empty() { return Ok(()) } @@ -180,7 +178,7 @@ impl SparseTrieInterface for ParallelSparseTrie { // Sort nodes first by their subtrie, and secondarily by their path. This allows for // grouping nodes by their subtrie using `chunk_by`. nodes.sort_unstable_by( - |RevealedSparseNode { path: path_a, .. }, RevealedSparseNode { path: path_b, .. }| { + |ProofTrieNode { path: path_a, .. }, ProofTrieNode { path: path_b, .. }| { let subtrie_type_a = SparseSubtrieType::from_path(path_a); let subtrie_type_b = SparseSubtrieType::from_path(path_b); subtrie_type_a.cmp(&subtrie_type_b).then(path_a.cmp(path_b)) @@ -188,7 +186,7 @@ impl SparseTrieInterface for ParallelSparseTrie { ); // Update the top-level branch node masks. This is simple and can't be done in parallel. - for RevealedSparseNode { path, masks, .. } in &nodes { + for ProofTrieNode { path, masks, .. } in &nodes { if let Some(tree_mask) = masks.tree_mask { self.branch_node_tree_masks.insert(*path, tree_mask); } @@ -264,11 +262,9 @@ impl SparseTrieInterface for ParallelSparseTrie { }) .collect(); - let (tx, rx) = mpsc::channel(); - // Zip the lower subtries and their corresponding node groups, and reveal lower subtrie // nodes in parallel - lower_subtries + let results: Vec<_> = lower_subtries .into_par_iter() .zip(node_groups.into_par_iter()) .map(|((subtrie_idx, mut subtrie), nodes)| { @@ -285,16 +281,12 @@ impl SparseTrieInterface for ParallelSparseTrie { } (subtrie_idx, subtrie, Ok(())) }) - .for_each_init(|| tx.clone(), |tx, result| tx.send(result).unwrap()); + .collect(); - drop(tx); - - // Take back all lower subtries which were sent to the rayon pool, collecting the last - // seen error in the process and returning that. If we don't fully drain the channel - // then we lose lower sparse tries, putting the whole ParallelSparseTrie in an - // inconsistent state. + // Put subtries back which were processed in the rayon pool, collecting the last + // seen error in the process and returning that. let mut any_err = Ok(()); - for (subtrie_idx, subtrie, res) in rx { + for (subtrie_idx, subtrie, res) in results { self.lower_subtries[subtrie_idx] = LowerSparseSubtrie::Revealed(subtrie); if res.is_err() { any_err = res; @@ -743,24 +735,12 @@ impl SparseTrieInterface for ParallelSparseTrie { // Update subtrie hashes in parallel { use rayon::iter::{IntoParallelIterator, ParallelIterator}; - use tracing::debug_span; - - let (tx, rx) = mpsc::channel(); let branch_node_tree_masks = &self.branch_node_tree_masks; let branch_node_hash_masks = &self.branch_node_hash_masks; - let span = tracing::Span::current(); - changed_subtries + let updated_subtries: Vec<_> = changed_subtries .into_par_iter() .map(|mut changed_subtrie| { - let _enter = debug_span!( - target: "trie::parallel_sparse", - parent: span.clone(), - "subtrie", - index = changed_subtrie.index - ) - .entered(); - #[cfg(feature = "metrics")] let start = std::time::Instant::now(); changed_subtrie.subtrie.update_hashes( @@ -773,10 +753,9 @@ impl SparseTrieInterface for ParallelSparseTrie { self.metrics.subtrie_hash_update_latency.record(start.elapsed()); changed_subtrie }) - .for_each_init(|| tx.clone(), |tx, result| tx.send(result).unwrap()); + .collect(); - drop(tx); - self.insert_changed_subtries(rx); + self.insert_changed_subtries(updated_subtries); } } @@ -2678,7 +2657,7 @@ mod tests { use reth_primitives_traits::Account; use reth_provider::{test_utils::create_test_provider_factory, TrieWriter}; use reth_trie::{ - hashed_cursor::{noop::NoopHashedAccountCursor, HashedPostStateAccountCursor}, + hashed_cursor::{noop::NoopHashedCursor, HashedPostStateCursor}, node_iter::{TrieElement, TrieNodeIter}, trie_cursor::{noop::NoopAccountTrieCursor, TrieCursor, TrieCursorFactory}, walker::TrieWalker, @@ -2688,14 +2667,14 @@ mod tests { prefix_set::PrefixSetMut, proof::{ProofNodes, ProofRetainer}, updates::TrieUpdates, - BranchNode, ExtensionNode, HashBuilder, LeafNode, RlpNode, TrieMask, TrieNode, - EMPTY_ROOT_HASH, + BranchNode, ExtensionNode, HashBuilder, LeafNode, ProofTrieNode, RlpNode, TrieMask, + TrieMasks, TrieNode, EMPTY_ROOT_HASH, }; use reth_trie_db::DatabaseTrieCursorFactory; use reth_trie_sparse::{ provider::{DefaultTrieNodeProvider, RevealedNode, TrieNodeProvider}, - LeafLookup, LeafLookupError, RevealedSparseNode, SerialSparseTrie, SparseNode, - SparseTrieInterface, SparseTrieUpdates, TrieMasks, + LeafLookup, LeafLookupError, SerialSparseTrie, SparseNode, SparseTrieInterface, + SparseTrieUpdates, }; use std::collections::{BTreeMap, BTreeSet}; @@ -2990,9 +2969,9 @@ mod tests { .into_sorted(); let mut node_iter = TrieNodeIter::state_trie( walker, - HashedPostStateAccountCursor::new( - NoopHashedAccountCursor::default(), - hashed_post_state.accounts(), + HashedPostStateCursor::new_account( + NoopHashedCursor::::default(), + &hashed_post_state, ), ); @@ -3277,7 +3256,7 @@ mod tests { let node = create_leaf_node([0x2, 0x3], 42); let masks = TrieMasks::none(); - trie.reveal_nodes(vec![RevealedSparseNode { path, node, masks }]).unwrap(); + trie.reveal_nodes(vec![ProofTrieNode { path, node, masks }]).unwrap(); assert_matches!( trie.upper_subtrie.nodes.get(&path), @@ -3298,7 +3277,7 @@ mod tests { let node = create_leaf_node([0x3, 0x4], 42); let masks = TrieMasks::none(); - trie.reveal_nodes(vec![RevealedSparseNode { path, node, masks }]).unwrap(); + trie.reveal_nodes(vec![ProofTrieNode { path, node, masks }]).unwrap(); // Check that the lower subtrie was created let idx = path_subtrie_index_unchecked(&path); @@ -3322,7 +3301,7 @@ mod tests { let node = create_leaf_node([0x4, 0x5], 42); let masks = TrieMasks::none(); - trie.reveal_nodes(vec![RevealedSparseNode { path, node, masks }]).unwrap(); + trie.reveal_nodes(vec![ProofTrieNode { path, node, masks }]).unwrap(); // Check that the lower subtrie's path hasn't changed let idx = path_subtrie_index_unchecked(&path); @@ -3383,7 +3362,7 @@ mod tests { let node = create_extension_node([0x2], child_hash); let masks = TrieMasks::none(); - trie.reveal_nodes(vec![RevealedSparseNode { path, node, masks }]).unwrap(); + trie.reveal_nodes(vec![ProofTrieNode { path, node, masks }]).unwrap(); // Extension node should be in upper trie assert_matches!( @@ -3445,7 +3424,7 @@ mod tests { let node = create_branch_node_with_children(&[0x0, 0x7, 0xf], child_hashes.clone()); let masks = TrieMasks::none(); - trie.reveal_nodes(vec![RevealedSparseNode { path, node, masks }]).unwrap(); + trie.reveal_nodes(vec![ProofTrieNode { path, node, masks }]).unwrap(); // Branch node should be in upper trie assert_matches!( @@ -3502,10 +3481,10 @@ mod tests { // Reveal nodes using reveal_nodes trie.reveal_nodes(vec![ - RevealedSparseNode { path: branch_path, node: branch_node, masks: TrieMasks::none() }, - RevealedSparseNode { path: leaf_1_path, node: leaf_1, masks: TrieMasks::none() }, - RevealedSparseNode { path: leaf_2_path, node: leaf_2, masks: TrieMasks::none() }, - RevealedSparseNode { path: leaf_3_path, node: leaf_3, masks: TrieMasks::none() }, + ProofTrieNode { path: branch_path, node: branch_node, masks: TrieMasks::none() }, + ProofTrieNode { path: leaf_1_path, node: leaf_1, masks: TrieMasks::none() }, + ProofTrieNode { path: leaf_2_path, node: leaf_2, masks: TrieMasks::none() }, + ProofTrieNode { path: leaf_3_path, node: leaf_3, masks: TrieMasks::none() }, ]) .unwrap(); @@ -4207,7 +4186,7 @@ mod tests { // Convert the logs into reveal_nodes call on a fresh ParallelSparseTrie let nodes = vec![ // Branch at 0x4f8807 - RevealedSparseNode { + ProofTrieNode { path: branch_path, node: { TrieNode::Branch(BranchNode::new( @@ -4270,7 +4249,7 @@ mod tests { }, }, // Branch at 0x4f88072 - RevealedSparseNode { + ProofTrieNode { path: removed_branch_path, node: { let stack = vec![ @@ -4290,7 +4269,7 @@ mod tests { }, }, // Extension at 0x4f880722 - RevealedSparseNode { + ProofTrieNode { path: Nibbles::from_nibbles([0x4, 0xf, 0x8, 0x8, 0x0, 0x7, 0x2, 0x2]), node: { let extension_node = ExtensionNode::new( @@ -4304,7 +4283,7 @@ mod tests { masks: TrieMasks { hash_mask: None, tree_mask: None }, }, // Leaf at 0x4f88072c - RevealedSparseNode { + ProofTrieNode { path: Nibbles::from_nibbles([0x4, 0xf, 0x8, 0x8, 0x0, 0x7, 0x2, 0xc]), node: { let leaf_node = LeafNode::new( @@ -4423,9 +4402,9 @@ mod tests { // Step 2: Reveal nodes in the trie let mut trie = ParallelSparseTrie::from_root(extension, TrieMasks::none(), true).unwrap(); trie.reveal_nodes(vec![ - RevealedSparseNode { path: branch_path, node: branch, masks: TrieMasks::none() }, - RevealedSparseNode { path: leaf_1_path, node: leaf_1, masks: TrieMasks::none() }, - RevealedSparseNode { path: leaf_2_path, node: leaf_2, masks: TrieMasks::none() }, + ProofTrieNode { path: branch_path, node: branch, masks: TrieMasks::none() }, + ProofTrieNode { path: leaf_1_path, node: leaf_1, masks: TrieMasks::none() }, + ProofTrieNode { path: leaf_2_path, node: leaf_2, masks: TrieMasks::none() }, ]) .unwrap(); @@ -4960,12 +4939,12 @@ mod tests { // └── 1 -> Leaf (Path = 1) sparse .reveal_nodes(vec![ - RevealedSparseNode { + ProofTrieNode { path: Nibbles::default(), node: branch, masks: TrieMasks { hash_mask: None, tree_mask: Some(TrieMask::new(0b01)) }, }, - RevealedSparseNode { + ProofTrieNode { path: Nibbles::from_nibbles([0x1]), node: TrieNode::Leaf(leaf), masks: TrieMasks::none(), @@ -5009,12 +4988,12 @@ mod tests { // └── 1 -> Leaf (Path = 1) sparse .reveal_nodes(vec![ - RevealedSparseNode { + ProofTrieNode { path: Nibbles::default(), node: branch, masks: TrieMasks { hash_mask: None, tree_mask: Some(TrieMask::new(0b01)) }, }, - RevealedSparseNode { + ProofTrieNode { path: Nibbles::from_nibbles([0x1]), node: TrieNode::Leaf(leaf), masks: TrieMasks::none(), @@ -5361,13 +5340,13 @@ mod tests { Default::default(), [key1()], ); - let revealed_nodes: Vec = hash_builder_proof_nodes + let revealed_nodes: Vec = hash_builder_proof_nodes .nodes_sorted() .into_iter() .map(|(path, node)| { let hash_mask = branch_node_hash_masks.get(&path).copied(); let tree_mask = branch_node_tree_masks.get(&path).copied(); - RevealedSparseNode { + ProofTrieNode { path, node: TrieNode::decode(&mut &node[..]).unwrap(), masks: TrieMasks { hash_mask, tree_mask }, @@ -5399,13 +5378,13 @@ mod tests { Default::default(), [key3()], ); - let revealed_nodes: Vec = hash_builder_proof_nodes + let revealed_nodes: Vec = hash_builder_proof_nodes .nodes_sorted() .into_iter() .map(|(path, node)| { let hash_mask = branch_node_hash_masks.get(&path).copied(); let tree_mask = branch_node_tree_masks.get(&path).copied(); - RevealedSparseNode { + ProofTrieNode { path, node: TrieNode::decode(&mut &node[..]).unwrap(), masks: TrieMasks { hash_mask, tree_mask }, @@ -5478,13 +5457,13 @@ mod tests { Default::default(), [key1(), Nibbles::from_nibbles_unchecked([0x01])], ); - let revealed_nodes: Vec = hash_builder_proof_nodes + let revealed_nodes: Vec = hash_builder_proof_nodes .nodes_sorted() .into_iter() .map(|(path, node)| { let hash_mask = branch_node_hash_masks.get(&path).copied(); let tree_mask = branch_node_tree_masks.get(&path).copied(); - RevealedSparseNode { + ProofTrieNode { path, node: TrieNode::decode(&mut &node[..]).unwrap(), masks: TrieMasks { hash_mask, tree_mask }, @@ -5516,13 +5495,13 @@ mod tests { Default::default(), [key2()], ); - let revealed_nodes: Vec = hash_builder_proof_nodes + let revealed_nodes: Vec = hash_builder_proof_nodes .nodes_sorted() .into_iter() .map(|(path, node)| { let hash_mask = branch_node_hash_masks.get(&path).copied(); let tree_mask = branch_node_tree_masks.get(&path).copied(); - RevealedSparseNode { + ProofTrieNode { path, node: TrieNode::decode(&mut &node[..]).unwrap(), masks: TrieMasks { hash_mask, tree_mask }, @@ -5601,13 +5580,13 @@ mod tests { Default::default(), [key1()], ); - let revealed_nodes: Vec = hash_builder_proof_nodes + let revealed_nodes: Vec = hash_builder_proof_nodes .nodes_sorted() .into_iter() .map(|(path, node)| { let hash_mask = branch_node_hash_masks.get(&path).copied(); let tree_mask = branch_node_tree_masks.get(&path).copied(); - RevealedSparseNode { + ProofTrieNode { path, node: TrieNode::decode(&mut &node[..]).unwrap(), masks: TrieMasks { hash_mask, tree_mask }, @@ -6595,16 +6574,12 @@ mod tests { let leaf_masks = TrieMasks::none(); trie.reveal_nodes(vec![ - RevealedSparseNode { + ProofTrieNode { path: Nibbles::from_nibbles([0x3]), node: TrieNode::Branch(branch_0x3_node), masks: branch_0x3_masks, }, - RevealedSparseNode { - path: leaf_path, - node: TrieNode::Leaf(leaf_node), - masks: leaf_masks, - }, + ProofTrieNode { path: leaf_path, node: TrieNode::Leaf(leaf_node), masks: leaf_masks }, ]) .unwrap(); diff --git a/crates/trie/sparse/benches/root.rs b/crates/trie/sparse/benches/root.rs index 9eaf54c2d0..ece0aa5313 100644 --- a/crates/trie/sparse/benches/root.rs +++ b/crates/trie/sparse/benches/root.rs @@ -5,14 +5,14 @@ use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion}; use itertools::Itertools; use proptest::{prelude::*, strategy::ValueTree, test_runner::TestRunner}; use reth_trie::{ - hashed_cursor::{noop::NoopHashedStorageCursor, HashedPostStateStorageCursor}, + hashed_cursor::{noop::NoopHashedCursor, HashedPostStateCursor}, node_iter::{TrieElement, TrieNodeIter}, trie_cursor::{noop::NoopStorageTrieCursor, InMemoryTrieCursor}, updates::StorageTrieUpdates, walker::TrieWalker, HashedStorage, }; -use reth_trie_common::{HashBuilder, Nibbles}; +use reth_trie_common::{updates::TrieUpdatesSorted, HashBuilder, Nibbles}; use reth_trie_sparse::{provider::DefaultTrieNodeProvider, SerialSparseTrie, SparseTrie}; fn calculate_root_from_leaves(c: &mut Criterion) { @@ -133,18 +133,36 @@ fn calculate_root_from_leaves_repeated(c: &mut Criterion) { ) }; + // Create a TrieUpdatesSorted with just this storage trie + let mut storage_tries = Default::default(); + alloy_primitives::map::B256Map::insert( + &mut storage_tries, + B256::ZERO, + trie_updates_sorted.clone(), + ); + let full_trie_updates = + TrieUpdatesSorted::new(Vec::new(), storage_tries); + let walker = TrieWalker::<_>::storage_trie( - InMemoryTrieCursor::new( - Some(NoopStorageTrieCursor::default()), - &trie_updates_sorted.storage_nodes, + InMemoryTrieCursor::new_storage( + NoopStorageTrieCursor::default(), + &full_trie_updates, + B256::ZERO, ), prefix_set, ); + let hashed_address = B256::ZERO; + let mut storages = alloy_primitives::map::B256Map::default(); + storages.insert(hashed_address, storage_sorted.clone()); + let hashed_post_state = + reth_trie::HashedPostStateSorted::new(Vec::new(), storages); + let mut node_iter = TrieNodeIter::storage_trie( walker, - HashedPostStateStorageCursor::new( - NoopHashedStorageCursor::default(), - Some(&storage_sorted), + HashedPostStateCursor::new_storage( + NoopHashedCursor::::default(), + &hashed_post_state, + hashed_address, ), ); diff --git a/crates/trie/sparse/benches/update.rs b/crates/trie/sparse/benches/update.rs deleted file mode 100644 index dff0260a9a..0000000000 --- a/crates/trie/sparse/benches/update.rs +++ /dev/null @@ -1,104 +0,0 @@ -#![allow(missing_docs)] - -use alloy_primitives::{B256, U256}; -use criterion::{criterion_group, criterion_main, BatchSize, BenchmarkId, Criterion}; -use proptest::{prelude::*, strategy::ValueTree}; -use rand::seq::IteratorRandom; -use reth_trie_common::Nibbles; -use reth_trie_sparse::{provider::DefaultTrieNodeProvider, SerialSparseTrie, SparseTrie}; - -const LEAF_COUNTS: [usize; 2] = [1_000, 5_000]; - -fn update_leaf(c: &mut Criterion) { - let mut group = c.benchmark_group("update_leaf"); - - for leaf_count in LEAF_COUNTS { - group.bench_function(BenchmarkId::from_parameter(leaf_count), |b| { - let leaves = generate_leaves(leaf_count); - // Start with an empty trie - let provider = DefaultTrieNodeProvider; - - b.iter_batched( - || { - let mut trie = SparseTrie::::revealed_empty(); - // Pre-populate with data - for (path, value) in leaves.iter().cloned() { - trie.update_leaf(path, value, &provider).unwrap(); - } - - let new_leaves = leaves - .iter() - // Update 10% of existing leaves with new values - .choose_multiple(&mut rand::rng(), leaf_count / 10) - .into_iter() - .map(|(path, _)| { - ( - path, - alloy_rlp::encode_fixed_size(&U256::from(path.len() * 2)).to_vec(), - ) - }) - .collect::>(); - - (trie, new_leaves) - }, - |(mut trie, new_leaves)| { - for (path, new_value) in new_leaves { - trie.update_leaf(*path, new_value, &provider).unwrap(); - } - trie - }, - BatchSize::LargeInput, - ); - }); - } -} - -fn remove_leaf(c: &mut Criterion) { - let mut group = c.benchmark_group("remove_leaf"); - - for leaf_count in LEAF_COUNTS { - group.bench_function(BenchmarkId::from_parameter(leaf_count), |b| { - let leaves = generate_leaves(leaf_count); - // Start with an empty trie - let provider = DefaultTrieNodeProvider; - - b.iter_batched( - || { - let mut trie = SparseTrie::::revealed_empty(); - // Pre-populate with data - for (path, value) in leaves.iter().cloned() { - trie.update_leaf(path, value, &provider).unwrap(); - } - - let delete_leaves = leaves - .iter() - .map(|(path, _)| path) - // Remove 10% leaves - .choose_multiple(&mut rand::rng(), leaf_count / 10); - - (trie, delete_leaves) - }, - |(mut trie, delete_leaves)| { - for path in delete_leaves { - trie.remove_leaf(path, &provider).unwrap(); - } - trie - }, - BatchSize::LargeInput, - ); - }); - } -} - -fn generate_leaves(size: usize) -> Vec<(Nibbles, Vec)> { - proptest::collection::hash_map(any::(), any::(), size) - .new_tree(&mut Default::default()) - .unwrap() - .current() - .iter() - .map(|(key, value)| (Nibbles::unpack(key), alloy_rlp::encode_fixed_size(value).to_vec())) - .collect() -} - -criterion_group!(benches, update_leaf, remove_leaf); -criterion_main!(benches); diff --git a/crates/trie/sparse/src/state.rs b/crates/trie/sparse/src/state.rs index f142385c3c..f02c748430 100644 --- a/crates/trie/sparse/src/state.rs +++ b/crates/trie/sparse/src/state.rs @@ -1,7 +1,7 @@ use crate::{ provider::{TrieNodeProvider, TrieNodeProviderFactory}, traits::SparseTrieInterface, - RevealedSparseNode, SerialSparseTrie, SparseTrie, TrieMasks, + SerialSparseTrie, SparseTrie, }; use alloc::{collections::VecDeque, vec::Vec}; use alloy_primitives::{ @@ -15,8 +15,9 @@ use reth_primitives_traits::Account; use reth_trie_common::{ proof::ProofNodes, updates::{StorageTrieUpdates, TrieUpdates}, - DecodedMultiProof, DecodedStorageMultiProof, MultiProof, Nibbles, RlpNode, StorageMultiProof, - TrieAccount, TrieMask, TrieNode, EMPTY_ROOT_HASH, TRIE_ACCOUNT_RLP_MAX_SIZE, + DecodedMultiProof, DecodedStorageMultiProof, MultiProof, Nibbles, ProofTrieNode, RlpNode, + StorageMultiProof, TrieAccount, TrieMask, TrieMasks, TrieNode, EMPTY_ROOT_HASH, + TRIE_ACCOUNT_RLP_MAX_SIZE, }; use tracing::{instrument, trace}; @@ -275,13 +276,12 @@ where { use rayon::iter::{ParallelBridge, ParallelIterator}; - let (tx, rx) = std::sync::mpsc::channel(); let retain_updates = self.retain_updates; // Process all storage trie revealings in parallel, having first removed the // `reveal_nodes` tracking and `SparseTrie`s for each account from their HashMaps. // These will be returned after processing. - storages + let results: Vec<_> = storages .into_iter() .map(|(account, storage_subtree)| { let revealed_nodes = self.storage.take_or_create_revealed_paths(&account); @@ -300,14 +300,12 @@ where (account, revealed_nodes, trie, result) }) - .for_each_init(|| tx.clone(), |tx, result| tx.send(result).unwrap()); - - drop(tx); + .collect(); // Return `revealed_nodes` and `SparseTrie` for each account, incrementing metrics and // returning the last error seen if any. let mut any_err = Ok(()); - for (account, revealed_nodes, trie, result) in rx { + for (account, revealed_nodes, trie, result) in results { self.storage.revealed_paths.insert(account, revealed_nodes); self.storage.tries.insert(account, trie); if let Ok(_metric_values) = result { @@ -945,9 +943,9 @@ struct ProofNodesMetricValues { #[derive(Debug, PartialEq, Eq)] struct FilterMappedProofNodes { /// Root node which was pulled out of the original node set to be handled specially. - root_node: Option, + root_node: Option, /// Filtered, decoded and unsorted proof nodes. Root node is removed. - nodes: Vec, + nodes: Vec, /// Number of new nodes that will be revealed. This includes all children of branch nodes, even /// if they are not in the proof. new_nodes: usize, @@ -955,7 +953,7 @@ struct FilterMappedProofNodes { metric_values: ProofNodesMetricValues, } -/// Filters the decoded nodes that are already revealed, maps them to `RevealedSparseNodes`, +/// Filters the decoded nodes that are already revealed, maps them to `SparseTrieNode`s, /// separates the root node if present, and returns additional information about the number of /// total, skipped, and new nodes. fn filter_map_revealed_nodes( @@ -1006,7 +1004,7 @@ fn filter_map_revealed_nodes( _ => TrieMasks::none(), }; - let node = RevealedSparseNode { path, node: proof_node, masks }; + let node = ProofTrieNode { path, node: proof_node, masks }; if is_root { // Perform sanity check. @@ -1382,12 +1380,12 @@ mod tests { assert_eq!( decoded, FilterMappedProofNodes { - root_node: Some(RevealedSparseNode { + root_node: Some(ProofTrieNode { path: Nibbles::default(), node: branch, masks: TrieMasks::none(), }), - nodes: vec![RevealedSparseNode { + nodes: vec![ProofTrieNode { path: Nibbles::from_nibbles([0x1]), node: leaf, masks: TrieMasks::none(), diff --git a/crates/trie/sparse/src/traits.rs b/crates/trie/sparse/src/traits.rs index 308695ec0f..55a17a9a7f 100644 --- a/crates/trie/sparse/src/traits.rs +++ b/crates/trie/sparse/src/traits.rs @@ -7,9 +7,9 @@ use alloy_primitives::{ map::{HashMap, HashSet}, B256, }; -use alloy_trie::{BranchNodeCompact, TrieMask}; +use alloy_trie::BranchNodeCompact; use reth_execution_errors::SparseTrieResult; -use reth_trie_common::{Nibbles, TrieNode}; +use reth_trie_common::{Nibbles, ProofTrieNode, TrieMasks, TrieNode}; use crate::provider::TrieNodeProvider; @@ -74,7 +74,7 @@ pub trait SparseTrieInterface: Sized + Debug + Send + Sync { node: TrieNode, masks: TrieMasks, ) -> SparseTrieResult<()> { - self.reveal_nodes(vec![RevealedSparseNode { path, node, masks }]) + self.reveal_nodes(vec![ProofTrieNode { path, node, masks }]) } /// Reveals one or more trie nodes if they have not been revealed before. @@ -91,7 +91,7 @@ pub trait SparseTrieInterface: Sized + Debug + Send + Sync { /// # Returns /// /// `Ok(())` if successful, or an error if any of the nodes was not revealed. - fn reveal_nodes(&mut self, nodes: Vec) -> SparseTrieResult<()>; + fn reveal_nodes(&mut self, nodes: Vec) -> SparseTrieResult<()>; /// Updates the value of a leaf node at the specified path. /// @@ -232,36 +232,6 @@ pub trait SparseTrieInterface: Sized + Debug + Send + Sync { fn shrink_values_to(&mut self, size: usize); } -/// Struct for passing around branch node mask information. -/// -/// Branch nodes can have up to 16 children (one for each nibble). -/// The masks represent which children are stored in different ways: -/// - `hash_mask`: Indicates which children are stored as hashes in the database -/// - `tree_mask`: Indicates which children are complete subtrees stored in the database -/// -/// These masks are essential for efficient trie traversal and serialization, as they -/// determine how nodes should be encoded and stored on disk. -#[derive(Debug, PartialEq, Eq, Clone, Copy)] -pub struct TrieMasks { - /// Branch node hash mask, if any. - /// - /// When a bit is set, the corresponding child node's hash is stored in the trie. - /// - /// This mask enables selective hashing of child nodes. - pub hash_mask: Option, - /// Branch node tree mask, if any. - /// - /// When a bit is set, the corresponding child subtree is stored in the database. - pub tree_mask: Option, -} - -impl TrieMasks { - /// Helper function, returns both fields `hash_mask` and `tree_mask` as [`None`] - pub const fn none() -> Self { - Self { hash_mask: None, tree_mask: None } - } -} - /// Tracks modifications to the sparse trie structure. /// /// Maintains references to both modified and pruned/removed branches, enabling @@ -307,14 +277,3 @@ pub enum LeafLookup { /// Leaf does not exist (exclusion proof found). NonExistent, } - -/// Carries all information needed by a sparse trie to reveal a particular node. -#[derive(Debug, PartialEq, Eq)] -pub struct RevealedSparseNode { - /// Path of the node. - pub path: Nibbles, - /// The node itself. - pub node: TrieNode, - /// Tree and hash masks for the node, if known. - pub masks: TrieMasks, -} diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index 891b718693..acad15bc15 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -1,7 +1,6 @@ use crate::{ provider::{RevealedNode, TrieNodeProvider}, - LeafLookup, LeafLookupError, RevealedSparseNode, SparseTrieInterface, SparseTrieUpdates, - TrieMasks, + LeafLookup, LeafLookupError, SparseTrieInterface, SparseTrieUpdates, }; use alloc::{ borrow::Cow, @@ -20,8 +19,8 @@ use alloy_rlp::Decodable; use reth_execution_errors::{SparseTrieErrorKind, SparseTrieResult}; use reth_trie_common::{ prefix_set::{PrefixSet, PrefixSetMut}, - BranchNodeCompact, BranchNodeRef, ExtensionNodeRef, LeafNodeRef, Nibbles, RlpNode, TrieMask, - TrieNode, CHILD_INDEX_RANGE, EMPTY_ROOT_HASH, + BranchNodeCompact, BranchNodeRef, ExtensionNodeRef, LeafNodeRef, Nibbles, ProofTrieNode, + RlpNode, TrieMask, TrieMasks, TrieNode, CHILD_INDEX_RANGE, EMPTY_ROOT_HASH, }; use smallvec::SmallVec; use tracing::{debug, instrument, trace}; @@ -589,7 +588,7 @@ impl SparseTrieInterface for SerialSparseTrie { Ok(()) } - fn reveal_nodes(&mut self, mut nodes: Vec) -> SparseTrieResult<()> { + fn reveal_nodes(&mut self, mut nodes: Vec) -> SparseTrieResult<()> { nodes.sort_unstable_by_key(|node| node.path); for node in nodes { self.reveal_node(node.path, node.node, node.masks)?; @@ -974,6 +973,7 @@ impl SparseTrieInterface for SerialSparseTrie { expected_value: Option<&Vec>, ) -> Result { // Helper function to check if a value matches the expected value + #[inline] fn check_value_match( actual_value: &Vec, expected_value: Option<&Vec>, @@ -2355,7 +2355,7 @@ mod tests { use reth_primitives_traits::Account; use reth_provider::{test_utils::create_test_provider_factory, TrieWriter}; use reth_trie::{ - hashed_cursor::{noop::NoopHashedAccountCursor, HashedPostStateAccountCursor}, + hashed_cursor::{noop::NoopHashedCursor, HashedPostStateCursor}, node_iter::{TrieElement, TrieNodeIter}, trie_cursor::{noop::NoopAccountTrieCursor, TrieCursor, TrieCursorFactory}, walker::TrieWalker, @@ -2415,9 +2415,9 @@ mod tests { .into_sorted(); let mut node_iter = TrieNodeIter::state_trie( walker, - HashedPostStateAccountCursor::new( - NoopHashedAccountCursor::default(), - hashed_post_state.accounts(), + HashedPostStateCursor::new_account( + NoopHashedCursor::::default(), + &hashed_post_state, ), ); diff --git a/crates/trie/trie/Cargo.toml b/crates/trie/trie/Cargo.toml index 403d187e46..d3540adda8 100644 --- a/crates/trie/trie/Cargo.toml +++ b/crates/trie/trie/Cargo.toml @@ -42,6 +42,7 @@ metrics = { workspace = true, optional = true } # `test-utils` feature triehash = { workspace = true, optional = true } +parking_lot = { workspace = true, optional = true } [dev-dependencies] # reth @@ -63,6 +64,7 @@ parking_lot.workspace = true pretty_assertions.workspace = true proptest-arbitrary-interop.workspace = true proptest.workspace = true +rand.workspace = true [features] metrics = ["reth-metrics", "dep:metrics"] @@ -83,9 +85,11 @@ serde = [ "revm-state/serde", "parking_lot/serde", "reth-ethereum-primitives/serde", + "rand/serde", ] test-utils = [ "triehash", + "parking_lot", "reth-primitives-traits/test-utils", "reth-trie-common/test-utils", "reth-ethereum-primitives/test-utils", @@ -101,3 +105,8 @@ harness = false name = "trie_root" required-features = ["test-utils"] harness = false + +[[bench]] +name = "proof_v2" +required-features = ["test-utils"] +harness = false diff --git a/crates/trie/trie/benches/proof_v2.rs b/crates/trie/trie/benches/proof_v2.rs new file mode 100644 index 0000000000..e5123ddc9a --- /dev/null +++ b/crates/trie/trie/benches/proof_v2.rs @@ -0,0 +1,178 @@ +#![allow(missing_docs, unreachable_pub)] +use alloy_primitives::{ + map::{B256Map, B256Set}, + B256, U256, +}; +use criterion::{criterion_group, criterion_main, BatchSize, BenchmarkId, Criterion}; +use proptest::{prelude::*, strategy::ValueTree, test_runner::TestRunner}; +use reth_trie::{ + hashed_cursor::{mock::MockHashedCursorFactory, HashedCursorFactory}, + proof::StorageProof, + proof_v2::StorageProofCalculator, + trie_cursor::{mock::MockTrieCursorFactory, TrieCursorFactory}, +}; +use reth_trie_common::{HashedPostState, HashedStorage}; + +/// Generate test data for benchmarking. +/// +/// Returns a tuple of: +/// - Hashed address for the storage trie +/// - `HashedPostState` with random storage slots +/// - Proof targets as B256 (sorted) for V2 implementation +/// - Equivalent [`B256Set`] for legacy implementation +fn generate_test_data( + dataset_size: usize, + num_targets: usize, +) -> (B256, HashedPostState, Vec, B256Set) { + let mut runner = TestRunner::deterministic(); + + // Use a fixed hashed address for the storage trie + let hashed_address = B256::from([0x42; 32]); + + // Generate random storage slots (key -> value) + let storage_strategy = + proptest::collection::vec((any::<[u8; 32]>(), any::()), dataset_size); + + let storage_entries = storage_strategy.new_tree(&mut runner).unwrap().current(); + + // Convert to storage map + let storage_map: B256Map = storage_entries + .iter() + .map(|(slot_bytes, value)| (B256::from(*slot_bytes), U256::from(*value))) + .collect(); + + // Create HashedPostState with single account's storage + let mut storages = B256Map::default(); + let hashed_storage = HashedStorage { + wiped: false, + storage: storage_map.iter().map(|(k, v)| (*k, *v)).collect(), + }; + storages.insert(hashed_address, hashed_storage); + + let hashed_post_state = HashedPostState { accounts: B256Map::default(), storages }; + + // Generate proof targets: 80% from existing slots, 20% random + let slot_keys: Vec = storage_map.keys().copied().collect(); + + let targets_strategy = proptest::collection::vec( + prop::bool::weighted(0.8).prop_flat_map(move |from_slots| { + if from_slots && !slot_keys.is_empty() { + prop::sample::select(slot_keys.clone()).boxed() + } else { + any::<[u8; 32]>().prop_map(B256::from).boxed() + } + }), + num_targets, + ); + + let target_b256s = targets_strategy.new_tree(&mut runner).unwrap().current(); + + // Sort B256 targets for V2 (storage_proof expects sorted targets) + let mut targets: Vec = target_b256s.clone(); + targets.sort(); + + // Create B256Set for legacy + let legacy_targets: B256Set = target_b256s.into_iter().collect(); + + (hashed_address, hashed_post_state, targets, legacy_targets) +} + +/// Create cursor factories from a `HashedPostState` for storage trie testing. +/// +/// This mimics the test harness pattern from the `proof_v2` tests by using `StateRoot` +/// to generate `TrieUpdates` from the `HashedPostState`. +fn create_cursor_factories( + post_state: &HashedPostState, +) -> (MockTrieCursorFactory, MockHashedCursorFactory) { + use reth_trie::{updates::StorageTrieUpdates, StateRoot}; + + // Create empty trie cursor factory to serve as the initial state for StateRoot + // Ensure that there's a storage trie dataset for every storage account + let storage_tries: B256Map<_> = post_state + .storages + .keys() + .copied() + .map(|addr| (addr, StorageTrieUpdates::default())) + .collect(); + + let empty_trie_cursor_factory = + MockTrieCursorFactory::from_trie_updates(reth_trie_common::updates::TrieUpdates { + storage_tries: storage_tries.clone(), + ..Default::default() + }); + + // Create mock hashed cursor factory from the post state + let hashed_cursor_factory = MockHashedCursorFactory::from_hashed_post_state(post_state.clone()); + + // Generate TrieUpdates using StateRoot + let (_root, mut trie_updates) = + StateRoot::new(empty_trie_cursor_factory, hashed_cursor_factory.clone()) + .root_with_updates() + .expect("StateRoot should succeed"); + + // Continue using empty storage tries for each account + trie_updates.storage_tries = storage_tries; + + // Initialize trie cursor factory from the generated TrieUpdates + let trie_cursor_factory = MockTrieCursorFactory::from_trie_updates(trie_updates); + + (trie_cursor_factory, hashed_cursor_factory) +} + +// Benchmark comparing legacy and V2 implementations +fn bench_proof_algos(c: &mut Criterion) { + let mut group = c.benchmark_group("Proof"); + for dataset_size in [128, 1024, 10240] { + for num_targets in [1, 16, 64, 128, 512, 2048] { + let (hashed_address, hashed_post_state, targets, legacy_targets) = + generate_test_data(dataset_size, num_targets); + + // Create mock cursor factories from the hashed post state + let (trie_cursor_factory, hashed_cursor_factory) = + create_cursor_factories(&hashed_post_state); + + let bench_name = format!("dataset_{dataset_size}/targets_{num_targets}"); + + group.bench_function(BenchmarkId::new("Legacy", &bench_name), |b| { + b.iter_batched( + || legacy_targets.clone(), + |targets| { + StorageProof::new_hashed( + trie_cursor_factory.clone(), + hashed_cursor_factory.clone(), + hashed_address, + ) + .storage_multiproof(targets) + .expect("Legacy proof generation failed"); + }, + BatchSize::SmallInput, + ); + }); + + group.bench_function(BenchmarkId::new("V2", &bench_name), |b| { + let trie_cursor = trie_cursor_factory + .storage_trie_cursor(hashed_address) + .expect("Failed to create trie cursor"); + let hashed_cursor = hashed_cursor_factory + .hashed_storage_cursor(hashed_address) + .expect("Failed to create hashed cursor"); + + let mut proof_calculator = + StorageProofCalculator::new_storage(trie_cursor, hashed_cursor); + + b.iter_batched( + || targets.clone(), + |targets| { + proof_calculator + .storage_proof(hashed_address, targets) + .expect("Proof generation failed"); + }, + BatchSize::SmallInput, + ); + }); + } + } +} + +criterion_group!(proof_comparison, bench_proof_algos); +criterion_main!(proof_comparison); diff --git a/crates/trie/trie/src/forward_cursor.rs b/crates/trie/trie/src/forward_cursor.rs index c99b0d049e..5abb5e2431 100644 --- a/crates/trie/trie/src/forward_cursor.rs +++ b/crates/trie/trie/src/forward_cursor.rs @@ -4,8 +4,9 @@ #[derive(Debug)] pub struct ForwardInMemoryCursor<'a, K, V> { /// The reference to the pre-sorted collection of entries. - entries: std::slice::Iter<'a, (K, V)>, - is_empty: bool, + entries: &'a [(K, V)], + /// Current index in the collection. + idx: usize, } impl<'a, K, V> ForwardInMemoryCursor<'a, K, V> { @@ -13,25 +14,42 @@ impl<'a, K, V> ForwardInMemoryCursor<'a, K, V> { /// /// The cursor expects all of the entries to have been sorted in advance. #[inline] - pub fn new(entries: &'a [(K, V)]) -> Self { - Self { entries: entries.iter(), is_empty: entries.is_empty() } + pub const fn new(entries: &'a [(K, V)]) -> Self { + Self { entries, idx: 0 } } /// Returns `true` if the cursor is empty, regardless of its position. #[inline] pub const fn is_empty(&self) -> bool { - self.is_empty + self.entries.is_empty() + } + + /// Returns `true` if any entry satisfies the predicate. + #[inline] + pub fn has_any(&self, predicate: F) -> bool + where + F: Fn(&(K, V)) -> bool, + { + self.entries.iter().any(predicate) } /// Returns the current entry pointed to be the cursor, or `None` if no entries are left. #[inline] pub fn current(&self) -> Option<&(K, V)> { - self.entries.clone().next() + self.entries.get(self.idx) + } + + /// Resets the cursor to the beginning of the collection. + #[inline] + pub const fn reset(&mut self) { + self.idx = 0; } #[inline] fn next(&mut self) -> Option<&(K, V)> { - self.entries.next() + let entry = self.entries.get(self.idx)?; + self.idx += 1; + Some(entry) } } diff --git a/crates/trie/trie/src/hashed_cursor/metrics.rs b/crates/trie/trie/src/hashed_cursor/metrics.rs new file mode 100644 index 0000000000..71de95eab1 --- /dev/null +++ b/crates/trie/trie/src/hashed_cursor/metrics.rs @@ -0,0 +1,190 @@ +use super::{HashedCursor, HashedStorageCursor}; +use alloy_primitives::B256; +use reth_storage_errors::db::DatabaseError; +use std::time::{Duration, Instant}; +use tracing::debug_span; + +#[cfg(feature = "metrics")] +use crate::TrieType; +#[cfg(feature = "metrics")] +use reth_metrics::metrics::{self, Histogram}; + +/// Prometheus metrics for hashed cursor operations. +/// +/// Tracks the number of cursor operations for monitoring and performance analysis. +#[cfg(feature = "metrics")] +#[derive(Clone, Debug)] +pub struct HashedCursorMetrics { + /// Histogram tracking overall time spent in database operations + overall_duration: Histogram, + /// Histogram for `next()` operations + next_histogram: Histogram, + /// Histogram for `seek()` operations + seek_histogram: Histogram, + /// Histogram for `is_storage_empty()` operations + is_storage_empty_histogram: Histogram, +} + +#[cfg(feature = "metrics")] +impl HashedCursorMetrics { + /// Create a new metrics instance with the specified trie type label. + pub fn new(trie_type: TrieType) -> Self { + let trie_type_str = trie_type.as_str(); + + Self { + overall_duration: metrics::histogram!( + "trie.hashed_cursor.overall_duration", + "type" => trie_type_str + ), + next_histogram: metrics::histogram!( + "trie.hashed_cursor.operations", + "type" => trie_type_str, + "operation" => "next" + ), + seek_histogram: metrics::histogram!( + "trie.hashed_cursor.operations", + "type" => trie_type_str, + "operation" => "seek" + ), + is_storage_empty_histogram: metrics::histogram!( + "trie.hashed_cursor.operations", + "type" => trie_type_str, + "operation" => "is_storage_empty" + ), + } + } + + /// Record the cached metrics from the provided cache and reset the cache counters. + /// + /// This method adds the current counter values from the cache to the Prometheus metrics + /// and then resets all cache counters to zero. + pub fn record(&mut self, cache: &mut HashedCursorMetricsCache) { + self.next_histogram.record(cache.next_count as f64); + self.seek_histogram.record(cache.seek_count as f64); + self.is_storage_empty_histogram.record(cache.is_storage_empty_count as f64); + self.overall_duration.record(cache.total_duration.as_secs_f64()); + cache.reset(); + } +} + +/// Cached metrics counters for hashed cursor operations. +#[derive(Debug, Copy, Clone)] +pub struct HashedCursorMetricsCache { + /// Counter for `next()` calls + pub next_count: usize, + /// Counter for `seek()` calls + pub seek_count: usize, + /// Counter for `is_storage_empty()` calls (if applicable) + pub is_storage_empty_count: usize, + /// Total duration spent in database operations + pub total_duration: Duration, +} + +impl Default for HashedCursorMetricsCache { + fn default() -> Self { + Self { + next_count: 0, + seek_count: 0, + is_storage_empty_count: 0, + total_duration: Duration::ZERO, + } + } +} + +impl HashedCursorMetricsCache { + /// Reset all counters to zero. + pub const fn reset(&mut self) { + self.next_count = 0; + self.seek_count = 0; + self.is_storage_empty_count = 0; + self.total_duration = Duration::ZERO; + } + + /// Extend this cache by adding the counts from another cache. + /// + /// This accumulates the counter values from `other` into this cache. + pub fn extend(&mut self, other: &Self) { + self.next_count += other.next_count; + self.seek_count += other.seek_count; + self.is_storage_empty_count += other.is_storage_empty_count; + self.total_duration += other.total_duration; + } + + /// Record the span for metrics. + pub fn record_span(&self, name: &'static str) { + let _span = debug_span!( + target: "trie::trie_cursor", + "Hashed cursor metrics", + name, + next_count = self.next_count, + seek_count = self.seek_count, + is_storage_empty_count = self.is_storage_empty_count, + total_duration = self.total_duration.as_secs_f64(), + ) + .entered(); + } +} + +/// A wrapper around a [`HashedCursor`] that tracks metrics for cursor operations. +/// +/// This implementation counts the number of times each cursor operation is called: +/// - `next()` - Move to the next entry +/// - `seek()` - Seek to a key or the next greater key +#[derive(Debug)] +pub struct InstrumentedHashedCursor<'metrics, C> { + /// The underlying cursor being wrapped + cursor: C, + /// Cached metrics counters + metrics: &'metrics mut HashedCursorMetricsCache, +} + +impl<'metrics, C> InstrumentedHashedCursor<'metrics, C> { + /// Create a new metrics cursor wrapping the given cursor. + pub const fn new(cursor: C, metrics: &'metrics mut HashedCursorMetricsCache) -> Self { + Self { cursor, metrics } + } +} + +impl<'metrics, C> HashedCursor for InstrumentedHashedCursor<'metrics, C> +where + C: HashedCursor, +{ + type Value = C::Value; + + fn seek(&mut self, key: B256) -> Result, DatabaseError> { + let start = Instant::now(); + self.metrics.seek_count += 1; + let result = self.cursor.seek(key); + self.metrics.total_duration += start.elapsed(); + result + } + + fn next(&mut self) -> Result, DatabaseError> { + let start = Instant::now(); + self.metrics.next_count += 1; + let result = self.cursor.next(); + self.metrics.total_duration += start.elapsed(); + result + } + + fn reset(&mut self) { + self.cursor.reset() + } +} + +impl<'metrics, C> HashedStorageCursor for InstrumentedHashedCursor<'metrics, C> +where + C: HashedStorageCursor, +{ + fn is_storage_empty(&mut self) -> Result { + let start = Instant::now(); + self.metrics.is_storage_empty_count += 1; + let result = self.cursor.is_storage_empty(); + self.metrics.total_duration += start.elapsed(); + result + } + + fn set_hashed_address(&mut self, hashed_address: B256) { + self.cursor.set_hashed_address(hashed_address) + } +} diff --git a/crates/trie/trie/src/hashed_cursor/mock.rs b/crates/trie/trie/src/hashed_cursor/mock.rs index f091ae6ffe..15edd97ade 100644 --- a/crates/trie/trie/src/hashed_cursor/mock.rs +++ b/crates/trie/trie/src/hashed_cursor/mock.rs @@ -7,18 +7,19 @@ use alloy_primitives::{map::B256Map, B256, U256}; use parking_lot::{Mutex, MutexGuard}; use reth_primitives_traits::Account; use reth_storage_errors::db::DatabaseError; +use reth_trie_common::HashedPostState; use tracing::instrument; /// Mock hashed cursor factory. #[derive(Clone, Default, Debug)] pub struct MockHashedCursorFactory { hashed_accounts: Arc>, - hashed_storage_tries: B256Map>>, + hashed_storage_tries: Arc>>, /// List of keys that the hashed accounts cursor has visited. visited_account_keys: Arc>>>, /// List of keys that the hashed storages cursor has visited, per storage trie. - visited_storage_keys: B256Map>>>>, + visited_storage_keys: Arc>>>>, } impl MockHashedCursorFactory { @@ -31,15 +32,44 @@ impl MockHashedCursorFactory { hashed_storage_tries.keys().map(|k| (*k, Default::default())).collect(); Self { hashed_accounts: Arc::new(hashed_accounts), - hashed_storage_tries: hashed_storage_tries - .into_iter() - .map(|(k, v)| (k, Arc::new(v))) - .collect(), + hashed_storage_tries: Arc::new(hashed_storage_tries), visited_account_keys: Default::default(), - visited_storage_keys, + visited_storage_keys: Arc::new(visited_storage_keys), } } + /// Creates a new mock hashed cursor factory from a `HashedPostState`. + pub fn from_hashed_post_state(post_state: HashedPostState) -> Self { + // Extract accounts from post state, filtering out None (deleted accounts) + let hashed_accounts: BTreeMap = post_state + .accounts + .into_iter() + .filter_map(|(addr, account)| account.map(|acc| (addr, acc))) + .collect(); + + // Extract storages from post state + let mut hashed_storages: B256Map> = post_state + .storages + .into_iter() + .map(|(addr, hashed_storage)| { + // Convert HashedStorage to BTreeMap, filtering out zero values (deletions) + let storage_map: BTreeMap = hashed_storage + .storage + .into_iter() + .filter_map(|(slot, value)| (value != U256::ZERO).then_some((slot, value))) + .collect(); + (addr, storage_map) + }) + .collect(); + + // Ensure all accounts have at least an empty storage + for account in hashed_accounts.keys() { + hashed_storages.entry(*account).or_default(); + } + + Self::new(hashed_accounts, hashed_storages) + } + /// Returns a reference to the list of visited hashed account keys. pub fn visited_account_keys(&self) -> MutexGuard<'_, Vec>> { self.visited_account_keys.lock() @@ -72,35 +102,93 @@ impl HashedCursorFactory for MockHashedCursorFactory { &self, hashed_address: B256, ) -> Result, DatabaseError> { - Ok(MockHashedCursor::new( - self.hashed_storage_tries - .get(&hashed_address) - .ok_or_else(|| { - DatabaseError::Other(format!("storage trie for {hashed_address:?} not found")) - })? - .clone(), - self.visited_storage_keys - .get(&hashed_address) - .ok_or_else(|| { - DatabaseError::Other(format!("storage trie for {hashed_address:?} not found")) - })? - .clone(), - )) + MockHashedCursor::new_storage( + self.hashed_storage_tries.clone(), + self.visited_storage_keys.clone(), + hashed_address, + ) } } +/// Mock hashed cursor type - determines whether this is an account or storage cursor. +#[derive(Debug)] +enum MockHashedCursorType { + Account { + values: Arc>, + visited_keys: Arc>>>, + }, + Storage { + all_storage_values: Arc>>, + all_visited_storage_keys: Arc>>>>, + current_hashed_address: B256, + }, +} + /// Mock hashed cursor. -#[derive(Default, Debug)] +#[derive(Debug)] pub struct MockHashedCursor { /// The current key. If set, it is guaranteed to exist in `values`. current_key: Option, - values: Arc>, - visited_keys: Arc>>>, + cursor_type: MockHashedCursorType, } impl MockHashedCursor { - fn new(values: Arc>, visited_keys: Arc>>>) -> Self { - Self { current_key: None, values, visited_keys } + /// Creates a new mock hashed cursor for accounts with the given values and key tracking. + pub const fn new( + values: Arc>, + visited_keys: Arc>>>, + ) -> Self { + Self { + current_key: None, + cursor_type: MockHashedCursorType::Account { values, visited_keys }, + } + } + + /// Creates a new mock hashed cursor for storage with access to all storage tries. + pub fn new_storage( + all_storage_values: Arc>>, + all_visited_storage_keys: Arc>>>>, + hashed_address: B256, + ) -> Result { + if !all_storage_values.contains_key(&hashed_address) { + return Err(DatabaseError::Other(format!( + "storage trie for {hashed_address:?} not found" + ))); + } + Ok(Self { + current_key: None, + cursor_type: MockHashedCursorType::Storage { + all_storage_values, + all_visited_storage_keys, + current_hashed_address: hashed_address, + }, + }) + } + + /// Returns the values map for the current cursor type. + fn values(&self) -> &BTreeMap { + match &self.cursor_type { + MockHashedCursorType::Account { values, .. } => values.as_ref(), + MockHashedCursorType::Storage { + all_storage_values, current_hashed_address, .. + } => all_storage_values + .get(current_hashed_address) + .expect("current_hashed_address should exist in all_storage_values"), + } + } + + /// Returns the visited keys mutex for the current cursor type. + fn visited_keys(&self) -> &Mutex>> { + match &self.cursor_type { + MockHashedCursorType::Account { visited_keys, .. } => visited_keys.as_ref(), + MockHashedCursorType::Storage { + all_visited_storage_keys, + current_hashed_address, + .. + } => all_visited_storage_keys + .get(current_hashed_address) + .expect("current_hashed_address should exist in all_visited_storage_keys"), + } } } @@ -110,11 +198,11 @@ impl HashedCursor for MockHashedCursor { #[instrument(skip(self), ret(level = "trace"))] fn seek(&mut self, key: B256) -> Result, DatabaseError> { // Find the first key that is greater than or equal to the given key. - let entry = self.values.iter().find_map(|(k, v)| (k >= &key).then(|| (*k, v.clone()))); + let entry = self.values().iter().find_map(|(k, v)| (k >= &key).then(|| (*k, v.clone()))); if let Some((key, _)) = &entry { self.current_key = Some(*key); } - self.visited_keys.lock().push(KeyVisit { + self.visited_keys().lock().push(KeyVisit { visit_type: KeyVisitType::SeekNonExact(key), visited_key: entry.as_ref().map(|(k, _)| *k), }); @@ -123,7 +211,7 @@ impl HashedCursor for MockHashedCursor { #[instrument(skip(self), ret(level = "trace"))] fn next(&mut self) -> Result, DatabaseError> { - let mut iter = self.values.iter(); + let mut iter = self.values().iter(); // Jump to the first key that has a prefix of the current key if it's set, or to the first // key otherwise. iter.find(|(k, _)| { @@ -135,17 +223,33 @@ impl HashedCursor for MockHashedCursor { if let Some((key, _)) = &entry { self.current_key = Some(*key); } - self.visited_keys.lock().push(KeyVisit { + self.visited_keys().lock().push(KeyVisit { visit_type: KeyVisitType::Next, visited_key: entry.as_ref().map(|(k, _)| *k), }); Ok(entry) } + + fn reset(&mut self) { + self.current_key = None; + } } impl HashedStorageCursor for MockHashedCursor { #[instrument(level = "trace", skip(self), ret)] fn is_storage_empty(&mut self) -> Result { - Ok(self.values.is_empty()) + Ok(self.values().is_empty()) + } + + fn set_hashed_address(&mut self, hashed_address: B256) { + self.reset(); + match &mut self.cursor_type { + MockHashedCursorType::Storage { current_hashed_address, .. } => { + *current_hashed_address = hashed_address; + } + MockHashedCursorType::Account { .. } => { + panic!("set_hashed_address called on account cursor") + } + } } } diff --git a/crates/trie/trie/src/hashed_cursor/mod.rs b/crates/trie/trie/src/hashed_cursor/mod.rs index 6c4788a336..3b9d29964f 100644 --- a/crates/trie/trie/src/hashed_cursor/mod.rs +++ b/crates/trie/trie/src/hashed_cursor/mod.rs @@ -10,9 +10,15 @@ pub use post_state::*; pub mod noop; /// Mock trie cursor implementations. -#[cfg(test)] +#[cfg(any(test, feature = "test-utils"))] pub mod mock; +/// Metrics tracking hashed cursor implementations. +pub mod metrics; +#[cfg(feature = "metrics")] +pub use metrics::HashedCursorMetrics; +pub use metrics::{HashedCursorMetricsCache, InstrumentedHashedCursor}; + /// The factory trait for creating cursors over the hashed state. #[auto_impl::auto_impl(&)] pub trait HashedCursorFactory { @@ -47,6 +53,13 @@ pub trait HashedCursor { /// Move the cursor to the next entry and return it. fn next(&mut self) -> Result, DatabaseError>; + + /// Reset the cursor to its initial state. + /// + /// # Important + /// + /// After calling this method, the subsequent operation MUST be a [`HashedCursor::seek`] call. + fn reset(&mut self); } /// The cursor for iterating over hashed storage entries. @@ -54,4 +67,11 @@ pub trait HashedCursor { pub trait HashedStorageCursor: HashedCursor { /// Returns `true` if there are no entries for a given key. fn is_storage_empty(&mut self) -> Result; + + /// Set the hashed address for the storage cursor. + /// + /// # Important + /// + /// After calling this method, the subsequent operation MUST be a [`HashedCursor::seek`] call. + fn set_hashed_address(&mut self, hashed_address: B256); } diff --git a/crates/trie/trie/src/hashed_cursor/noop.rs b/crates/trie/trie/src/hashed_cursor/noop.rs index e5bc44f0f5..88726d3b67 100644 --- a/crates/trie/trie/src/hashed_cursor/noop.rs +++ b/crates/trie/trie/src/hashed_cursor/noop.rs @@ -1,5 +1,6 @@ use super::{HashedCursor, HashedCursorFactory, HashedStorageCursor}; use alloy_primitives::{B256, U256}; +use core::marker::PhantomData; use reth_primitives_traits::Account; use reth_storage_errors::db::DatabaseError; @@ -10,33 +11,43 @@ pub struct NoopHashedCursorFactory; impl HashedCursorFactory for NoopHashedCursorFactory { type AccountCursor<'a> - = NoopHashedAccountCursor + = NoopHashedCursor where Self: 'a; type StorageCursor<'a> - = NoopHashedStorageCursor + = NoopHashedCursor where Self: 'a; fn hashed_account_cursor(&self) -> Result, DatabaseError> { - Ok(NoopHashedAccountCursor::default()) + Ok(NoopHashedCursor::default()) } fn hashed_storage_cursor( &self, _hashed_address: B256, ) -> Result, DatabaseError> { - Ok(NoopHashedStorageCursor::default()) + Ok(NoopHashedCursor::default()) } } -/// Noop account hashed cursor. -#[derive(Default, Debug)] -#[non_exhaustive] -pub struct NoopHashedAccountCursor; +/// Generic noop hashed cursor. +#[derive(Debug)] +pub struct NoopHashedCursor { + _marker: PhantomData, +} -impl HashedCursor for NoopHashedAccountCursor { - type Value = Account; +impl Default for NoopHashedCursor { + fn default() -> Self { + Self { _marker: PhantomData } + } +} + +impl HashedCursor for NoopHashedCursor +where + V: std::fmt::Debug, +{ + type Value = V; fn seek(&mut self, _key: B256) -> Result, DatabaseError> { Ok(None) @@ -45,27 +56,18 @@ impl HashedCursor for NoopHashedAccountCursor { fn next(&mut self) -> Result, DatabaseError> { Ok(None) } -} -/// Noop account hashed cursor. -#[derive(Default, Debug)] -#[non_exhaustive] -pub struct NoopHashedStorageCursor; - -impl HashedCursor for NoopHashedStorageCursor { - type Value = U256; - - fn seek(&mut self, _key: B256) -> Result, DatabaseError> { - Ok(None) - } - - fn next(&mut self) -> Result, DatabaseError> { - Ok(None) + fn reset(&mut self) { + // Noop } } -impl HashedStorageCursor for NoopHashedStorageCursor { +impl HashedStorageCursor for NoopHashedCursor { fn is_storage_empty(&mut self) -> Result { Ok(true) } + + fn set_hashed_address(&mut self, _hashed_address: B256) { + // Noop + } } diff --git a/crates/trie/trie/src/hashed_cursor/post_state.rs b/crates/trie/trie/src/hashed_cursor/post_state.rs index 896251f363..fe2c6b9e0c 100644 --- a/crates/trie/trie/src/hashed_cursor/post_state.rs +++ b/crates/trie/trie/src/hashed_cursor/post_state.rs @@ -1,9 +1,9 @@ use super::{HashedCursor, HashedCursorFactory, HashedStorageCursor}; use crate::forward_cursor::ForwardInMemoryCursor; -use alloy_primitives::{map::B256Set, B256, U256}; +use alloy_primitives::{B256, U256}; use reth_primitives_traits::Account; use reth_storage_errors::db::DatabaseError; -use reth_trie_common::{HashedAccountsSorted, HashedPostStateSorted, HashedStorageSorted}; +use reth_trie_common::HashedPostStateSorted; /// The hashed cursor factory for the post state. #[derive(Clone, Debug)] @@ -25,130 +25,248 @@ where T: AsRef, { type AccountCursor<'cursor> - = HashedPostStateAccountCursor<'overlay, CF::AccountCursor<'cursor>> + = HashedPostStateCursor<'overlay, CF::AccountCursor<'cursor>, Option> where Self: 'cursor; type StorageCursor<'cursor> - = HashedPostStateStorageCursor<'overlay, CF::StorageCursor<'cursor>> + = HashedPostStateCursor<'overlay, CF::StorageCursor<'cursor>, U256> where Self: 'cursor; fn hashed_account_cursor(&self) -> Result, DatabaseError> { let cursor = self.cursor_factory.hashed_account_cursor()?; - Ok(HashedPostStateAccountCursor::new(cursor, &self.post_state.as_ref().accounts)) + Ok(HashedPostStateCursor::new_account(cursor, self.post_state.as_ref())) } fn hashed_storage_cursor( &self, hashed_address: B256, ) -> Result, DatabaseError> { + let post_state = self.post_state.as_ref(); let cursor = self.cursor_factory.hashed_storage_cursor(hashed_address)?; - Ok(HashedPostStateStorageCursor::new( - cursor, - self.post_state.as_ref().storages.get(&hashed_address), - )) + Ok(HashedPostStateCursor::new_storage(cursor, post_state, hashed_address)) } } -/// The cursor to iterate over post state hashed accounts and corresponding database entries. -/// It will always give precedence to the data from the hashed post state. +/// Trait for types that can be used with [`HashedPostStateCursor`] as a value. +/// +/// This enables uniform handling of deletions across different wrapper types: +/// - `Option`: `None` indicates deletion +/// - `U256`: `U256::ZERO` indicates deletion (maps to `None`) +/// +/// This design allows us to use `U256::ZERO`, rather than an Option, to indicate deletion for +/// storage (which maps cleanly to how changesets are stored in the DB) while not requiring two +/// different cursor implementations. +pub trait HashedPostStateCursorValue: Copy { + /// The non-zero type returned by `into_option`. + /// For `Option`, this is `Account`. + /// For `U256`, this is `U256`. + type NonZero: Copy + std::fmt::Debug; + + /// Returns `Some(&NonZero)` if the value is present, `None` if deleted. + fn into_option(self) -> Option; +} + +impl HashedPostStateCursorValue for Option { + type NonZero = Account; + + fn into_option(self) -> Option { + self + } +} + +impl HashedPostStateCursorValue for U256 { + type NonZero = Self; + + fn into_option(self) -> Option { + (self != Self::ZERO).then_some(self) + } +} + +/// A cursor to iterate over state updates and corresponding database entries. +/// It will always give precedence to the data from the post state updates. #[derive(Debug)] -pub struct HashedPostStateAccountCursor<'a, C> { - /// The database cursor. +pub struct HashedPostStateCursor<'a, C, V> +where + V: HashedPostStateCursorValue, +{ + /// The underlying cursor. cursor: C, - /// Forward-only in-memory cursor over accounts. - post_state_cursor: ForwardInMemoryCursor<'a, B256, Account>, - /// Reference to the collection of account keys that were destroyed. - destroyed_accounts: &'a B256Set, - /// The last hashed account that was returned by the cursor. + /// Whether the underlying cursor should be ignored (when storage was wiped). + cursor_wiped: bool, + /// Entry that `database_cursor` is currently pointing to. + cursor_entry: Option<(B256, V::NonZero)>, + /// Forward-only in-memory cursor over underlying V. + post_state_cursor: ForwardInMemoryCursor<'a, B256, V>, + /// The last hashed key that was returned by the cursor. /// De facto, this is a current cursor position. - last_account: Option, + last_key: Option, + /// Tracks whether `seek` has been called. Used to prevent re-seeking the DB cursor + /// when it has been exhausted by iteration. + seeked: bool, + /// Reference to the full post state. + post_state: &'a HashedPostStateSorted, } -impl<'a, C> HashedPostStateAccountCursor<'a, C> +impl<'a, C> HashedPostStateCursor<'a, C, Option> where C: HashedCursor, { - /// Create new instance of [`HashedPostStateAccountCursor`]. - pub fn new(cursor: C, post_state_accounts: &'a HashedAccountsSorted) -> Self { - let post_state_cursor = ForwardInMemoryCursor::new(&post_state_accounts.accounts); - let destroyed_accounts = &post_state_accounts.destroyed_accounts; - Self { cursor, post_state_cursor, destroyed_accounts, last_account: None } - } - - /// Returns `true` if the account has been destroyed. - /// This check is used for evicting account keys from the state trie. - /// - /// This function only checks the post state, not the database, because the latter does not - /// store destroyed accounts. - fn is_account_cleared(&self, account: &B256) -> bool { - self.destroyed_accounts.contains(account) - } - - fn seek_inner(&mut self, key: B256) -> Result, DatabaseError> { - // Take the next account from the post state with the key greater than or equal to the - // sought key. - let post_state_entry = self.post_state_cursor.seek(&key); - - // It's an exact match, return the account from post state without looking up in the - // database. - if post_state_entry.is_some_and(|entry| entry.0 == key) { - return Ok(post_state_entry) - } - - // It's not an exact match, reposition to the first greater or equal account that wasn't - // cleared. - let mut db_entry = self.cursor.seek(key)?; - while db_entry.as_ref().is_some_and(|(address, _)| self.is_account_cleared(address)) { - db_entry = self.cursor.next()?; - } - - // Compare two entries and return the lowest. - Ok(Self::compare_entries(post_state_entry, db_entry)) - } - - fn next_inner(&mut self, last_account: B256) -> Result, DatabaseError> { - // Take the next account from the post state with the key greater than the last sought key. - let post_state_entry = self.post_state_cursor.first_after(&last_account); - - // If post state was given precedence or account was cleared, move the cursor forward. - let mut db_entry = self.cursor.seek(last_account)?; - while db_entry.as_ref().is_some_and(|(address, _)| { - address <= &last_account || self.is_account_cleared(address) - }) { - db_entry = self.cursor.next()?; - } - - // Compare two entries and return the lowest. - Ok(Self::compare_entries(post_state_entry, db_entry)) - } - - /// Return the account with the lowest hashed account key. - /// - /// Given the next post state and database entries, return the smallest of the two. - /// If the account keys are the same, the post state entry is given precedence. - fn compare_entries( - post_state_item: Option<(B256, Account)>, - db_item: Option<(B256, Account)>, - ) -> Option<(B256, Account)> { - if let Some((post_state_entry, db_entry)) = post_state_item.zip(db_item) { - // If both are not empty, return the smallest of the two - // Post state is given precedence if keys are equal - Some(if post_state_entry.0 <= db_entry.0 { post_state_entry } else { db_entry }) - } else { - // Return either non-empty entry - db_item.or(post_state_item) + /// Create new account cursor which combines a DB cursor and the post state. + pub fn new_account(cursor: C, post_state: &'a HashedPostStateSorted) -> Self { + let post_state_cursor = ForwardInMemoryCursor::new(&post_state.accounts); + Self { + cursor, + cursor_wiped: false, + cursor_entry: None, + post_state_cursor, + last_key: None, + seeked: false, + post_state, } } } -impl HashedCursor for HashedPostStateAccountCursor<'_, C> +impl<'a, C> HashedPostStateCursor<'a, C, U256> where - C: HashedCursor, + C: HashedStorageCursor, { - type Value = Account; + /// Create new storage cursor with full post state reference. + /// This allows the cursor to switch between storage tries when `set_hashed_address` is called. + pub fn new_storage( + cursor: C, + post_state: &'a HashedPostStateSorted, + hashed_address: B256, + ) -> Self { + let (post_state_cursor, cursor_wiped) = + Self::get_storage_overlay(post_state, hashed_address); + Self { + cursor, + cursor_wiped, + cursor_entry: None, + post_state_cursor, + last_key: None, + seeked: false, + post_state, + } + } - /// Seek the next entry for a given hashed account key. + /// Returns the storage overlay for `hashed_address` and whether it was wiped. + fn get_storage_overlay( + post_state: &'a HashedPostStateSorted, + hashed_address: B256, + ) -> (ForwardInMemoryCursor<'a, B256, U256>, bool) { + let post_state_storage = post_state.storages.get(&hashed_address); + let cursor_wiped = post_state_storage.is_some_and(|u| u.is_wiped()); + let storage_slots = post_state_storage.map(|u| u.storage_slots_ref()).unwrap_or(&[]); + + (ForwardInMemoryCursor::new(storage_slots), cursor_wiped) + } +} + +impl<'a, C, V> HashedPostStateCursor<'a, C, V> +where + C: HashedCursor, + V: HashedPostStateCursorValue, +{ + /// Returns a mutable reference to the underlying cursor if it's not wiped, None otherwise. + fn get_cursor_mut(&mut self) -> Option<&mut C> { + (!self.cursor_wiped).then_some(&mut self.cursor) + } + + /// Asserts that the next entry to be returned from the cursor is not previous to the last entry + /// returned. + fn set_last_key(&mut self, next_entry: &Option<(B256, V::NonZero)>) { + let next_key = next_entry.as_ref().map(|e| e.0); + debug_assert!( + self.last_key.is_none_or(|last| next_key.is_none_or(|next| next >= last)), + "Cannot return entry {:?} previous to the last returned entry at {:?}", + next_key, + self.last_key, + ); + self.last_key = next_key; + } + + /// Seeks the `cursor_entry` field of the struct using the cursor. + fn cursor_seek(&mut self, key: B256) -> Result<(), DatabaseError> { + // Only seek if: + // 1. We have a cursor entry and need to seek forward (entry.0 < key), OR + // 2. We have no cursor entry and haven't seeked yet (!self.seeked) + let should_seek = match self.cursor_entry.as_ref() { + Some(entry) => entry.0 < key, + None => !self.seeked, + }; + + if should_seek { + self.cursor_entry = self.get_cursor_mut().map(|c| c.seek(key)).transpose()?.flatten(); + } + + Ok(()) + } + + /// Seeks the `cursor_entry` field of the struct to the subsequent entry using the cursor. + fn cursor_next(&mut self) -> Result<(), DatabaseError> { + debug_assert!(self.seeked); + + // If the previous entry is `None`, and we've done a seek previously, then the cursor is + // exhausted, and we shouldn't call `next` again. + if self.cursor_entry.is_some() { + self.cursor_entry = self.get_cursor_mut().map(|c| c.next()).transpose()?.flatten(); + } + + Ok(()) + } + + /// Compares the current in-memory entry with the current entry of the cursor, and applies the + /// in-memory entry to the cursor entry as an overlay. + /// + /// This may consume and move forward the current entries when the overlay indicates a removed + /// node. + fn choose_next_entry(&mut self) -> Result, DatabaseError> { + loop { + let post_state_current = + self.post_state_cursor.current().copied().map(|(k, v)| (k, v.into_option())); + + match (post_state_current, &self.cursor_entry) { + (Some((mem_key, None)), _) + if self.cursor_entry.as_ref().is_none_or(|(db_key, _)| &mem_key < db_key) => + { + // If overlay has a removed value but DB cursor is exhausted or ahead of the + // in-memory cursor then move ahead in-memory, as there might be further + // non-removed overlay values. + self.post_state_cursor.first_after(&mem_key); + } + (Some((mem_key, None)), Some((db_key, _))) if &mem_key == db_key => { + // If overlay has a removed value which is returned from DB then move both + // cursors ahead to the next key. + self.post_state_cursor.first_after(&mem_key); + self.cursor_next()?; + } + (Some((mem_key, Some(value))), _) + if self.cursor_entry.as_ref().is_none_or(|(db_key, _)| &mem_key <= db_key) => + { + // If overlay returns a value prior to the DB's value, or the DB is exhausted, + // then we return the overlay's value. + return Ok(Some((mem_key, value))) + } + // All other cases: + // - mem_key > db_key + // - overlay is exhausted + // Return the db_entry. If DB is also exhausted then this returns None. + _ => return Ok(self.cursor_entry), + } + } + } +} + +impl HashedCursor for HashedPostStateCursor<'_, C, V> +where + C: HashedCursor, + V: HashedPostStateCursorValue, +{ + type Value = V::NonZero; + + /// Seek the next entry for a given hashed key. /// /// If the post state contains the exact match for the key, return it. /// Otherwise, retrieve the next entries that are greater than or equal to the key from the @@ -157,9 +275,13 @@ where /// The returned account key is memoized and the cursor remains positioned at that key until /// [`HashedCursor::seek`] or [`HashedCursor::next`] are called. fn seek(&mut self, key: B256) -> Result, DatabaseError> { - // Find the closes account. - let entry = self.seek_inner(key)?; - self.last_account = entry.as_ref().map(|entry| entry.0); + self.cursor_seek(key)?; + self.post_state_cursor.seek(&key); + + self.seeked = true; + + let entry = self.choose_next_entry()?; + self.set_last_key(&entry); Ok(entry) } @@ -168,153 +290,58 @@ where /// If the cursor is positioned at the entry, return the entry with next greater key. /// Returns [None] if the previous memoized or the next greater entries are missing. /// - /// NOTE: This function will not return any entry unless [`HashedCursor::seek`] has been - /// called. + /// NOTE: This function will not return any entry unless [`HashedCursor::seek`] has been called. fn next(&mut self) -> Result, DatabaseError> { - let next = match self.last_account { - Some(account) => { - let entry = self.next_inner(account)?; - self.last_account = entry.as_ref().map(|entry| entry.0); - entry - } - // no previous entry was found - None => None, + debug_assert!(self.seeked, "Cursor must be seek'd before next is called"); + + // A `last_key` of `None` indicates that the cursor is exhausted. + let Some(last_key) = self.last_key else { + return Ok(None); }; - Ok(next) - } -} -/// The cursor to iterate over post state hashed storages and corresponding database entries. -/// It will always give precedence to the data from the post state. -#[derive(Debug)] -pub struct HashedPostStateStorageCursor<'a, C> { - /// The database cursor. - cursor: C, - /// Forward-only in-memory cursor over non zero-valued account storage slots. - post_state_cursor: Option>, - /// Reference to the collection of storage slot keys that were cleared. - cleared_slots: Option<&'a B256Set>, - /// Flag indicating whether database storage was wiped. - storage_wiped: bool, - /// The last slot that has been returned by the cursor. - /// De facto, this is the cursor's position for the given account key. - last_slot: Option, -} - -impl<'a, C> HashedPostStateStorageCursor<'a, C> -where - C: HashedStorageCursor, -{ - /// Create new instance of [`HashedPostStateStorageCursor`] for the given hashed address. - pub fn new(cursor: C, post_state_storage: Option<&'a HashedStorageSorted>) -> Self { - let post_state_cursor = - post_state_storage.map(|s| ForwardInMemoryCursor::new(&s.non_zero_valued_slots)); - let cleared_slots = post_state_storage.map(|s| &s.zero_valued_slots); - let storage_wiped = post_state_storage.is_some_and(|s| s.wiped); - Self { cursor, post_state_cursor, cleared_slots, storage_wiped, last_slot: None } - } - - /// Check if the slot was zeroed out in the post state. - /// The database is not checked since it already has no zero-valued slots. - fn is_slot_zero_valued(&self, slot: &B256) -> bool { - self.cleared_slots.is_some_and(|s| s.contains(slot)) - } - - /// Find the storage entry in post state or database that's greater or equal to provided subkey. - fn seek_inner(&mut self, subkey: B256) -> Result, DatabaseError> { - // Attempt to find the account's storage in post state. - let post_state_entry = self.post_state_cursor.as_mut().and_then(|c| c.seek(&subkey)); - - // If database storage was wiped or it's an exact match, - // return the storage slot from post state without looking up in the database. - if self.storage_wiped || post_state_entry.is_some_and(|entry| entry.0 == subkey) { - return Ok(post_state_entry) - } - - // It's not an exact match and storage was not wiped, - // reposition to the first greater or equal account. - let mut db_entry = self.cursor.seek(subkey)?; - while db_entry.as_ref().is_some_and(|entry| self.is_slot_zero_valued(&entry.0)) { - db_entry = self.cursor.next()?; - } - - // Compare two entries and return the lowest. - Ok(Self::compare_entries(post_state_entry, db_entry)) - } - - /// Find the storage entry that is right after current cursor position. - fn next_inner(&mut self, last_slot: B256) -> Result, DatabaseError> { - // Attempt to find the account's storage in post state. - let post_state_entry = - self.post_state_cursor.as_mut().and_then(|c| c.first_after(&last_slot)); - - // Return post state entry immediately if database was wiped. - if self.storage_wiped { - return Ok(post_state_entry) - } - - // If post state was given precedence, move the cursor forward. - // If the entry was already returned or is zero-valued, move to the next. - let mut db_entry = self.cursor.seek(last_slot)?; - while db_entry - .as_ref() - .is_some_and(|entry| entry.0 == last_slot || self.is_slot_zero_valued(&entry.0)) + // If either cursor is currently pointing to the last entry which was returned then consume + // that entry so that `choose_next_entry` is looking at the subsequent one. + if let Some((key, _)) = self.post_state_cursor.current() && + key == &last_key { - db_entry = self.cursor.next()?; + self.post_state_cursor.first_after(&last_key); } - // Compare two entries and return the lowest. - Ok(Self::compare_entries(post_state_entry, db_entry)) - } - - /// Return the storage entry with the lowest hashed storage key (hashed slot). - /// - /// Given the next post state and database entries, return the smallest of the two. - /// If the storage keys are the same, the post state entry is given precedence. - fn compare_entries( - post_state_item: Option<(B256, U256)>, - db_item: Option<(B256, U256)>, - ) -> Option<(B256, U256)> { - if let Some((post_state_entry, db_entry)) = post_state_item.zip(db_item) { - // If both are not empty, return the smallest of the two - // Post state is given precedence if keys are equal - Some(if post_state_entry.0 <= db_entry.0 { post_state_entry } else { db_entry }) - } else { - // Return either non-empty entry - db_item.or(post_state_item) + if let Some((key, _)) = &self.cursor_entry && + key == &last_key + { + self.cursor_next()?; } - } -} -impl HashedCursor for HashedPostStateStorageCursor<'_, C> -where - C: HashedStorageCursor, -{ - type Value = U256; - - /// Seek the next account storage entry for a given hashed key pair. - fn seek(&mut self, subkey: B256) -> Result, DatabaseError> { - let entry = self.seek_inner(subkey)?; - self.last_slot = entry.as_ref().map(|entry| entry.0); + let entry = self.choose_next_entry()?; + self.set_last_key(&entry); Ok(entry) } - /// Return the next account storage entry for the current account key. - fn next(&mut self) -> Result, DatabaseError> { - let next = match self.last_slot { - Some(last_slot) => { - let entry = self.next_inner(last_slot)?; - self.last_slot = entry.as_ref().map(|entry| entry.0); - entry - } - // no previous entry was found - None => None, - }; - Ok(next) + fn reset(&mut self) { + let Self { + cursor, + cursor_wiped, + cursor_entry, + post_state_cursor, + last_key, + seeked, + post_state: _, + } = self; + + cursor.reset(); + post_state_cursor.reset(); + + *cursor_wiped = false; + *cursor_entry = None; + *last_key = None; + *seeked = false; } } -impl HashedStorageCursor for HashedPostStateStorageCursor<'_, C> +/// The cursor to iterate over post state hashed values and corresponding database entries. +/// It will always give precedence to the data from the post state. +impl HashedStorageCursor for HashedPostStateCursor<'_, C, U256> where C: HashedStorageCursor, { @@ -323,15 +350,232 @@ where /// This function should be called before attempting to call [`HashedCursor::seek`] or /// [`HashedCursor::next`]. fn is_storage_empty(&mut self) -> Result { - let is_empty = match &self.post_state_cursor { - Some(cursor) => { - // If the storage has been wiped at any point - self.storage_wiped && - // and the current storage does not contain any non-zero values - cursor.is_empty() - } - None => self.cursor.is_storage_empty()?, - }; - Ok(is_empty) + // Storage is not empty if it has non-zero slots. + if self.post_state_cursor.has_any(|(_, value)| value.into_option().is_some()) { + return Ok(false); + } + + // If no non-zero slots in post state, check the database. + // Returns true if cursor is wiped. + self.get_cursor_mut().map_or(Ok(true), |c| c.is_storage_empty()) + } + + fn set_hashed_address(&mut self, hashed_address: B256) { + self.reset(); + self.cursor.set_hashed_address(hashed_address); + (self.post_state_cursor, self.cursor_wiped) = + HashedPostStateCursor::::get_storage_overlay(self.post_state, hashed_address); + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::hashed_cursor::mock::MockHashedCursor; + use parking_lot::Mutex; + use std::{collections::BTreeMap, sync::Arc}; + + mod proptest_tests { + use super::*; + use itertools::Itertools; + use proptest::prelude::*; + + /// Merge `db_nodes` with `post_state_nodes`, applying the post state overlay. + /// This properly handles deletions (ZERO values for U256, None for Account). + fn merge_with_overlay( + db_nodes: Vec<(B256, V::NonZero)>, + post_state_nodes: Vec<(B256, V)>, + ) -> Vec<(B256, V::NonZero)> + where + V: HashedPostStateCursorValue, + V::NonZero: Copy, + { + db_nodes + .into_iter() + .merge_join_by(post_state_nodes, |db_entry, mem_entry| db_entry.0.cmp(&mem_entry.0)) + .filter_map(|entry| match entry { + // Only in db: keep it + itertools::EitherOrBoth::Left((key, node)) => Some((key, node)), + // Only in post state: keep if not a deletion + itertools::EitherOrBoth::Right((key, wrapped)) => { + wrapped.into_option().map(|val| (key, val)) + } + // In both: post state takes precedence (keep if not a deletion) + itertools::EitherOrBoth::Both(_, (key, wrapped)) => { + wrapped.into_option().map(|val| (key, val)) + } + }) + .collect() + } + + /// Generate a strategy for U256 values + fn u256_strategy() -> impl Strategy { + any::().prop_map(U256::from) + } + + /// Generate a sorted vector of (B256, U256) entries + fn sorted_db_nodes_strategy() -> impl Strategy> { + prop::collection::vec((any::(), u256_strategy()), 0..20).prop_map(|entries| { + let mut result: Vec<(B256, U256)> = entries + .into_iter() + .map(|(byte, value)| (B256::repeat_byte(byte), value)) + .collect(); + result.sort_by(|a, b| a.0.cmp(&b.0)); + result.dedup_by(|a, b| a.0 == b.0); + result + }) + } + + /// Generate a sorted vector of (B256, U256) entries (including deletions as ZERO) + fn sorted_post_state_nodes_strategy() -> impl Strategy> { + // Explicitly inject ZERO values to model post-state deletions. + prop::collection::vec((any::(), u256_strategy(), any::()), 0..20).prop_map( + |entries| { + let mut result: Vec<(B256, U256)> = entries + .into_iter() + .map(|(byte, value, is_deletion)| { + let effective_value = if is_deletion { U256::ZERO } else { value }; + (B256::repeat_byte(byte), effective_value) + }) + .collect(); + result.sort_by(|a, b| a.0.cmp(&b.0)); + result.dedup_by(|a, b| a.0 == b.0); + result + }, + ) + } + + proptest! { + #![proptest_config(ProptestConfig::with_cases(1000))] + /// Tests `HashedPostStateCursor` produces identical results to a pre-merged cursor + /// across 1000 random scenarios. + /// + /// For random DB entries and post-state changes, creates two cursors: + /// - Control: pre-merged data (expected behavior) + /// - Test: `HashedPostStateCursor` (lazy overlay) + /// + /// Executes random sequences of `next()` and `seek()` operations, asserting + /// both cursors return identical results. + #[test] + fn proptest_hashed_post_state_cursor( + db_nodes in sorted_db_nodes_strategy(), + post_state_nodes in sorted_post_state_nodes_strategy(), + op_choices in prop::collection::vec(any::(), 10..500), + ) { + reth_tracing::init_test_tracing(); + use tracing::debug; + + debug!("Starting proptest!"); + + // Create the expected results by merging the two sorted vectors, + // properly handling deletions (ZERO values in post_state_nodes) + let expected_combined = merge_with_overlay(db_nodes.clone(), post_state_nodes.clone()); + + // Collect all keys for operation generation + let all_keys: Vec = expected_combined.iter().map(|(k, _)| *k).collect(); + + // Create a control cursor using the combined result with a mock cursor + let control_db_map: BTreeMap = expected_combined.into_iter().collect(); + let control_db_arc = Arc::new(control_db_map); + let control_visited_keys = Arc::new(Mutex::new(Vec::new())); + let mut control_cursor = MockHashedCursor::new(control_db_arc, control_visited_keys); + + // Create the HashedPostStateCursor being tested + let db_nodes_map: BTreeMap = db_nodes.into_iter().collect(); + let db_nodes_arc = Arc::new(db_nodes_map); + let visited_keys = Arc::new(Mutex::new(Vec::new())); + let mock_cursor = MockHashedCursor::new(db_nodes_arc, visited_keys); + + // Create a HashedPostStateSorted with the storage data + let hashed_address = B256::ZERO; + let storage_sorted = reth_trie_common::HashedStorageSorted { + storage_slots: post_state_nodes, + wiped: false, + }; + let mut storages = alloy_primitives::map::B256Map::default(); + storages.insert(hashed_address, storage_sorted); + let post_state = HashedPostStateSorted::new(Vec::new(), storages); + + let mut test_cursor = HashedPostStateCursor::new_storage(mock_cursor, &post_state, hashed_address); + + // Test: seek to the beginning first + let control_first = control_cursor.seek(B256::ZERO).unwrap(); + let test_first = test_cursor.seek(B256::ZERO).unwrap(); + debug!( + control=?control_first.as_ref().map(|(k, _)| k), + test=?test_first.as_ref().map(|(k, _)| k), + "Initial seek returned", + ); + assert_eq!(control_first, test_first, "Initial seek mismatch"); + + // If both cursors returned None, nothing to test + if control_first.is_none() && test_first.is_none() { + return Ok(()); + } + + // Track the last key returned from the cursor + let mut last_returned_key = control_first.as_ref().map(|(k, _)| *k); + + // Execute a sequence of random operations + for choice in op_choices { + let op_type = choice % 2; // Only 2 operation types: next and seek + + match op_type { + 0 => { + // Next operation + let control_result = control_cursor.next().unwrap(); + let test_result = test_cursor.next().unwrap(); + debug!( + control=?control_result.as_ref().map(|(k, _)| k), + test=?test_result.as_ref().map(|(k, _)| k), + "Next returned", + ); + assert_eq!(control_result, test_result, "Next operation mismatch"); + + last_returned_key = control_result.as_ref().map(|(k, _)| *k); + + // Stop if both cursors are exhausted + if control_result.is_none() && test_result.is_none() { + break; + } + } + _ => { + // Seek operation - choose a key >= last_returned_key + if all_keys.is_empty() { + continue; + } + + let valid_keys: Vec<_> = all_keys + .iter() + .filter(|k| last_returned_key.is_none_or(|last| **k >= last)) + .collect(); + + if valid_keys.is_empty() { + continue; + } + + let key = *valid_keys[(choice as usize / 2) % valid_keys.len()]; + + let control_result = control_cursor.seek(key).unwrap(); + let test_result = test_cursor.seek(key).unwrap(); + debug!( + control=?control_result.as_ref().map(|(k, _)| k), + test=?test_result.as_ref().map(|(k, _)| k), + ?key, + "Seek returned", + ); + assert_eq!(control_result, test_result, "Seek operation mismatch for key {:?}", key); + + last_returned_key = control_result.as_ref().map(|(k, _)| *k); + + // Stop if both cursors are exhausted + if control_result.is_none() && test_result.is_none() { + break; + } + } + } + } + } + } } } diff --git a/crates/trie/trie/src/lib.rs b/crates/trie/trie/src/lib.rs index e53049b587..aef322fb7c 100644 --- a/crates/trie/trie/src/lib.rs +++ b/crates/trie/trie/src/lib.rs @@ -32,6 +32,9 @@ pub mod node_iter; /// Merkle proof generation. pub mod proof; +/// Merkle proof generation v2 (leaf-only implementation). +pub mod proof_v2; + /// Trie witness generation. pub mod witness; @@ -61,7 +64,7 @@ pub mod metrics; pub mod test_utils; /// Collection of mock types for testing. -#[cfg(test)] +#[cfg(any(test, feature = "test-utils"))] pub mod mock; /// Verification of existing stored trie nodes against state data. diff --git a/crates/trie/trie/src/node_iter.rs b/crates/trie/trie/src/node_iter.rs index 862176c803..7d53bd4b6d 100644 --- a/crates/trie/trie/src/node_iter.rs +++ b/crates/trie/trie/src/node_iter.rs @@ -160,15 +160,15 @@ where /// /// If `metrics` feature is enabled, it also updates the metrics. fn next_hashed_entry(&mut self) -> Result, DatabaseError> { - let result = self.hashed_cursor.next(); + let next = self.hashed_cursor.next()?; - self.last_next_result = result.clone()?; + self.last_next_result = next; #[cfg(feature = "metrics")] { self.metrics.inc_leaf_nodes_advanced(); } - result + Ok(next) } } @@ -306,10 +306,11 @@ where #[cfg(test)] mod tests { + use super::{TrieElement, TrieNodeIter}; use crate::{ hashed_cursor::{ - mock::MockHashedCursorFactory, noop::NoopHashedAccountCursor, HashedCursorFactory, - HashedPostStateAccountCursor, + mock::MockHashedCursorFactory, noop::NoopHashedCursor, HashedCursorFactory, + HashedPostStateCursor, }, mock::{KeyVisit, KeyVisitType}, trie_cursor::{ @@ -332,8 +333,6 @@ mod tests { }; use std::collections::BTreeMap; - use super::{TrieElement, TrieNodeIter}; - /// Calculate the branch node stored in the database by feeding the provided state to the hash /// builder and taking the trie updates. fn get_hash_builder_branch_nodes( @@ -353,9 +352,9 @@ mod tests { let mut node_iter = TrieNodeIter::state_trie( walker, - HashedPostStateAccountCursor::new( - NoopHashedAccountCursor::default(), - hashed_post_state.accounts(), + HashedPostStateCursor::new_account( + NoopHashedCursor::::default(), + &hashed_post_state, ), ); diff --git a/crates/trie/trie/src/proof/mod.rs b/crates/trie/trie/src/proof/mod.rs index 348cdb430a..c0b9012355 100644 --- a/crates/trie/trie/src/proof/mod.rs +++ b/crates/trie/trie/src/proof/mod.rs @@ -1,8 +1,11 @@ use crate::{ - hashed_cursor::{HashedCursorFactory, HashedStorageCursor}, + hashed_cursor::{ + HashedCursorFactory, HashedCursorMetricsCache, HashedStorageCursor, + InstrumentedHashedCursor, + }, node_iter::{TrieElement, TrieNodeIter}, prefix_set::{PrefixSetMut, TriePrefixSetsMut}, - trie_cursor::TrieCursorFactory, + trie_cursor::{InstrumentedTrieCursor, TrieCursorFactory, TrieCursorMetricsCache}, walker::TrieWalker, HashBuilder, Nibbles, TRIE_ACCOUNT_RLP_MAX_SIZE, }; @@ -80,6 +83,16 @@ impl Proof { self.collect_branch_node_masks = branch_node_masks; self } + + /// Get a reference to the trie cursor factory. + pub const fn trie_cursor_factory(&self) -> &T { + &self.trie_cursor_factory + } + + /// Get a reference to the hashed cursor factory. + pub const fn hashed_cursor_factory(&self) -> &H { + &self.hashed_cursor_factory + } } impl Proof @@ -134,6 +147,8 @@ where TrieElement::Leaf(hashed_address, account) => { let proof_targets = targets.remove(&hashed_address); let leaf_is_proof_target = proof_targets.is_some(); + let collect_storage_masks = + self.collect_branch_node_masks && leaf_is_proof_target; let storage_prefix_set = self .prefix_sets .storage_prefix_sets @@ -145,7 +160,7 @@ where hashed_address, ) .with_prefix_set_mut(storage_prefix_set) - .with_branch_node_masks(self.collect_branch_node_masks) + .with_branch_node_masks(collect_storage_masks) .storage_multiproof(proof_targets.unwrap_or_default())?; // Encode account @@ -184,7 +199,7 @@ where /// Generates storage merkle proofs. #[derive(Debug)] -pub struct StorageProof { +pub struct StorageProof<'a, T, H, K = AddedRemovedKeys> { /// The factory for traversing trie nodes. trie_cursor_factory: T, /// The factory for hashed cursors. @@ -197,9 +212,13 @@ pub struct StorageProof { collect_branch_node_masks: bool, /// Provided by the user to give the necessary context to retain extra proofs. added_removed_keys: Option, + /// Optional reference to accumulate trie cursor metrics. + trie_cursor_metrics: Option<&'a mut TrieCursorMetricsCache>, + /// Optional reference to accumulate hashed cursor metrics. + hashed_cursor_metrics: Option<&'a mut HashedCursorMetricsCache>, } -impl StorageProof { +impl StorageProof<'static, T, H> { /// Create a new [`StorageProof`] instance. pub fn new(t: T, h: H, address: Address) -> Self { Self::new_hashed(t, h, keccak256(address)) @@ -214,13 +233,18 @@ impl StorageProof { prefix_set: PrefixSetMut::default(), collect_branch_node_masks: false, added_removed_keys: None, + trie_cursor_metrics: None, + hashed_cursor_metrics: None, } } } -impl StorageProof { +impl<'a, T, H, K> StorageProof<'a, T, H, K> { /// Set the trie cursor factory. - pub fn with_trie_cursor_factory(self, trie_cursor_factory: TF) -> StorageProof { + pub fn with_trie_cursor_factory( + self, + trie_cursor_factory: TF, + ) -> StorageProof<'a, TF, H, K> { StorageProof { trie_cursor_factory, hashed_cursor_factory: self.hashed_cursor_factory, @@ -228,6 +252,8 @@ impl StorageProof { prefix_set: self.prefix_set, collect_branch_node_masks: self.collect_branch_node_masks, added_removed_keys: self.added_removed_keys, + trie_cursor_metrics: self.trie_cursor_metrics, + hashed_cursor_metrics: self.hashed_cursor_metrics, } } @@ -235,7 +261,7 @@ impl StorageProof { pub fn with_hashed_cursor_factory( self, hashed_cursor_factory: HF, - ) -> StorageProof { + ) -> StorageProof<'a, T, HF, K> { StorageProof { trie_cursor_factory: self.trie_cursor_factory, hashed_cursor_factory, @@ -243,6 +269,8 @@ impl StorageProof { prefix_set: self.prefix_set, collect_branch_node_masks: self.collect_branch_node_masks, added_removed_keys: self.added_removed_keys, + trie_cursor_metrics: self.trie_cursor_metrics, + hashed_cursor_metrics: self.hashed_cursor_metrics, } } @@ -258,6 +286,24 @@ impl StorageProof { self } + /// Set the trie cursor metrics cache to accumulate metrics into. + pub const fn with_trie_cursor_metrics( + mut self, + metrics: &'a mut TrieCursorMetricsCache, + ) -> Self { + self.trie_cursor_metrics = Some(metrics); + self + } + + /// Set the hashed cursor metrics cache to accumulate metrics into. + pub const fn with_hashed_cursor_metrics( + mut self, + metrics: &'a mut HashedCursorMetricsCache, + ) -> Self { + self.hashed_cursor_metrics = Some(metrics); + self + } + /// Configures the retainer to retain proofs for certain nodes which would otherwise fall /// outside the target set, when those nodes might be required to calculate the state root when /// keys have been added or removed to the trie. @@ -266,7 +312,7 @@ impl StorageProof { pub fn with_added_removed_keys( self, added_removed_keys: Option, - ) -> StorageProof { + ) -> StorageProof<'a, T, H, K2> { StorageProof { trie_cursor_factory: self.trie_cursor_factory, hashed_cursor_factory: self.hashed_cursor_factory, @@ -274,11 +320,13 @@ impl StorageProof { prefix_set: self.prefix_set, collect_branch_node_masks: self.collect_branch_node_masks, added_removed_keys, + trie_cursor_metrics: self.trie_cursor_metrics, + hashed_cursor_metrics: self.hashed_cursor_metrics, } } } -impl StorageProof +impl<'a, T, H, K> StorageProof<'a, T, H, K> where T: TrieCursorFactory, H: HashedCursorFactory, @@ -295,22 +343,37 @@ where /// Generate storage proof. pub fn storage_multiproof( - mut self, + self, targets: B256Set, ) -> Result { - let mut hashed_storage_cursor = + let mut discard_hashed_cursor_metrics = HashedCursorMetricsCache::default(); + let hashed_cursor_metrics = + self.hashed_cursor_metrics.unwrap_or(&mut discard_hashed_cursor_metrics); + + let hashed_storage_cursor = self.hashed_cursor_factory.hashed_storage_cursor(self.hashed_address)?; + let mut hashed_storage_cursor = + InstrumentedHashedCursor::new(hashed_storage_cursor, hashed_cursor_metrics); + // short circuit on empty storage if hashed_storage_cursor.is_storage_empty()? { return Ok(StorageMultiProof::empty()) } + let mut discard_trie_cursor_metrics = TrieCursorMetricsCache::default(); + let trie_cursor_metrics = + self.trie_cursor_metrics.unwrap_or(&mut discard_trie_cursor_metrics); + let target_nibbles = targets.into_iter().map(Nibbles::unpack).collect::>(); - self.prefix_set.extend_keys(target_nibbles.clone()); + let mut prefix_set = self.prefix_set; + prefix_set.extend_keys(target_nibbles.clone()); let trie_cursor = self.trie_cursor_factory.storage_trie_cursor(self.hashed_address)?; - let walker = TrieWalker::<_>::storage_trie(trie_cursor, self.prefix_set.freeze()) + + let trie_cursor = InstrumentedTrieCursor::new(trie_cursor, trie_cursor_metrics); + + let walker = TrieWalker::<_>::storage_trie(trie_cursor, prefix_set.freeze()) .with_added_removed_keys(self.added_removed_keys.as_ref()); let retainer = ProofRetainer::from_iter(target_nibbles) diff --git a/crates/trie/trie/src/proof/trie_node.rs b/crates/trie/trie/src/proof/trie_node.rs index 3e197072d4..f6c62b866e 100644 --- a/crates/trie/trie/src/proof/trie_node.rs +++ b/crates/trie/trie/src/proof/trie_node.rs @@ -2,11 +2,11 @@ use super::{Proof, StorageProof}; use crate::{hashed_cursor::HashedCursorFactory, trie_cursor::TrieCursorFactory}; use alloy_primitives::{map::HashSet, B256}; use reth_execution_errors::{SparseTrieError, SparseTrieErrorKind}; -use reth_trie_common::{prefix_set::TriePrefixSetsMut, MultiProofTargets, Nibbles}; +use reth_trie_common::{MultiProofTargets, Nibbles}; use reth_trie_sparse::provider::{ pad_path_to_key, RevealedNode, TrieNodeProvider, TrieNodeProviderFactory, }; -use std::{sync::Arc, time::Instant}; +use std::time::Instant; use tracing::{enabled, trace, Level}; /// Factory for instantiating providers capable of retrieving blinded trie nodes via proofs. @@ -16,25 +16,19 @@ pub struct ProofTrieNodeProviderFactory { trie_cursor_factory: T, /// The factory for hashed cursors. hashed_cursor_factory: H, - /// A set of prefix sets that have changes. - prefix_sets: Arc, } impl ProofTrieNodeProviderFactory { /// Create new proof-based blinded provider factory. - pub const fn new( - trie_cursor_factory: T, - hashed_cursor_factory: H, - prefix_sets: Arc, - ) -> Self { - Self { trie_cursor_factory, hashed_cursor_factory, prefix_sets } + pub const fn new(trie_cursor_factory: T, hashed_cursor_factory: H) -> Self { + Self { trie_cursor_factory, hashed_cursor_factory } } } impl TrieNodeProviderFactory for ProofTrieNodeProviderFactory where - T: TrieCursorFactory + Clone + Send + Sync, - H: HashedCursorFactory + Clone + Send + Sync, + T: TrieCursorFactory + Clone, + H: HashedCursorFactory + Clone, { type AccountNodeProvider = ProofBlindedAccountProvider; type StorageNodeProvider = ProofBlindedStorageProvider; @@ -43,7 +37,6 @@ where ProofBlindedAccountProvider { trie_cursor_factory: self.trie_cursor_factory.clone(), hashed_cursor_factory: self.hashed_cursor_factory.clone(), - prefix_sets: self.prefix_sets.clone(), } } @@ -51,7 +44,6 @@ where ProofBlindedStorageProvider { trie_cursor_factory: self.trie_cursor_factory.clone(), hashed_cursor_factory: self.hashed_cursor_factory.clone(), - prefix_sets: self.prefix_sets.clone(), account, } } @@ -64,18 +56,12 @@ pub struct ProofBlindedAccountProvider { trie_cursor_factory: T, /// The factory for hashed cursors. hashed_cursor_factory: H, - /// A set of prefix sets that have changes. - prefix_sets: Arc, } impl ProofBlindedAccountProvider { /// Create new proof-based blinded account node provider. - pub const fn new( - trie_cursor_factory: T, - hashed_cursor_factory: H, - prefix_sets: Arc, - ) -> Self { - Self { trie_cursor_factory, hashed_cursor_factory, prefix_sets } + pub const fn new(trie_cursor_factory: T, hashed_cursor_factory: H) -> Self { + Self { trie_cursor_factory, hashed_cursor_factory } } } @@ -89,7 +75,6 @@ where let targets = MultiProofTargets::from_iter([(pad_path_to_key(path), HashSet::default())]); let mut proof = Proof::new(&self.trie_cursor_factory, &self.hashed_cursor_factory) - .with_prefix_sets_mut(self.prefix_sets.as_ref().clone()) .with_branch_node_masks(true) .multiproof(targets) .map_err(|error| SparseTrieErrorKind::Other(Box::new(error)))?; @@ -117,21 +102,14 @@ pub struct ProofBlindedStorageProvider { trie_cursor_factory: T, /// The factory for hashed cursors. hashed_cursor_factory: H, - /// A set of prefix sets that have changes. - prefix_sets: Arc, /// Target account. account: B256, } impl ProofBlindedStorageProvider { /// Create new proof-based blinded storage node provider. - pub const fn new( - trie_cursor_factory: T, - hashed_cursor_factory: H, - prefix_sets: Arc, - account: B256, - ) -> Self { - Self { trie_cursor_factory, hashed_cursor_factory, prefix_sets, account } + pub const fn new(trie_cursor_factory: T, hashed_cursor_factory: H, account: B256) -> Self { + Self { trie_cursor_factory, hashed_cursor_factory, account } } } @@ -144,14 +122,11 @@ where let start = enabled!(target: "trie::proof::blinded", Level::TRACE).then(Instant::now); let targets = HashSet::from_iter([pad_path_to_key(path)]); - let storage_prefix_set = - self.prefix_sets.storage_prefix_sets.get(&self.account).cloned().unwrap_or_default(); let mut proof = StorageProof::new_hashed( &self.trie_cursor_factory, &self.hashed_cursor_factory, self.account, ) - .with_prefix_set_mut(storage_prefix_set) .with_branch_node_masks(true) .storage_multiproof(targets) .map_err(|error| SparseTrieErrorKind::Other(Box::new(error)))?; diff --git a/crates/trie/trie/src/proof_v2/mod.rs b/crates/trie/trie/src/proof_v2/mod.rs new file mode 100644 index 0000000000..7606164dda --- /dev/null +++ b/crates/trie/trie/src/proof_v2/mod.rs @@ -0,0 +1,1697 @@ +//! Proof calculation version 2: Leaf-only implementation. +//! +//! This module provides a rewritten proof calculator that: +//! - Uses only leaf data (HashedAccounts/Storages) to generate proofs +//! - Returns proof nodes sorted lexicographically by path +//! - Automatically resets after each calculation +//! - Re-uses cursors across calculations +//! - Supports generic value types with lazy evaluation + +use crate::{ + hashed_cursor::{HashedCursor, HashedStorageCursor}, + trie_cursor::{depth_first, TrieCursor, TrieStorageCursor}, +}; +use alloy_primitives::{B256, U256}; +use alloy_rlp::Encodable; +use alloy_trie::{BranchNodeCompact, TrieMask}; +use reth_execution_errors::trie::StateProofError; +use reth_trie_common::{BranchNode, Nibbles, ProofTrieNode, RlpNode, TrieMasks, TrieNode}; +use std::{cmp::Ordering, iter::Peekable}; +use tracing::{instrument, trace}; + +mod value; +pub use value::*; + +mod node; +use node::*; + +/// Target to use with the `tracing` crate. +static TRACE_TARGET: &str = "trie::proof_v2"; + +/// Number of bytes to pre-allocate for [`ProofCalculator`]'s `rlp_encode_buf` field. +const RLP_ENCODE_BUF_SIZE: usize = 1024; + +/// A [`Nibbles`] which contains 64 zero nibbles. +static PATH_ALL_ZEROS: Nibbles = { + let mut path = Nibbles::new(); + let mut i = 0; + while i < 64 { + path.push_unchecked(0); + i += 1; + } + path +}; + +/// A proof calculator that generates merkle proofs using only leaf data. +/// +/// The calculator: +/// - Accepts one or more B256 proof targets sorted lexicographically +/// - Returns proof nodes sorted lexicographically by path +/// - Automatically resets after each calculation +/// - Re-uses cursors from one calculation to the next +#[derive(Debug)] +pub struct ProofCalculator { + /// Trie cursor for traversing stored branch nodes. + trie_cursor: TC, + /// Hashed cursor for iterating over leaf data. + hashed_cursor: HC, + /// Branches which are currently in the process of being constructed, each being a child of + /// the previous one. + branch_stack: Vec, + /// The path of the last branch in `branch_stack`. + branch_path: Nibbles, + /// Children of branches in the `branch_stack`. + /// + /// Each branch in `branch_stack` tracks which children are in this stack using its + /// `state_mask`; the number of children the branch has in this stack is equal to the number of + /// bits set in its `state_mask`. + /// + /// The children for the bottom branch in `branch_stack` are found at the bottom of this stack, + /// and so on. When a branch is removed from `branch_stack` its children are removed from this + /// one, and the branch is pushed onto this stack in their place (see [`Self::pop_branch`]. + /// + /// Children on the `child_stack` are converted to [`ProofTrieBranchChild::RlpNode`]s via the + /// [`Self::commit_child`] method. Committing a child indicates that no further changes are + /// expected to happen to it (e.g. splitting its short key when inserting a new branch). Given + /// that keys are consumed in lexicographical order, only the last child on the stack can + /// ever be modified, and therefore all children besides the last are expected to be + /// [`ProofTrieBranchChild::RlpNode`]s. + child_stack: Vec>, + /// Cached branch data pulled from the `trie_cursor`. The calculator will use the cached + /// [`BranchNodeCompact::hashes`] to skip over the calculation of sub-tries in the overall + /// trie. The cached hashes cannot be used for any paths which are prefixes of a proof target. + cached_branch_stack: Vec<(Nibbles, BranchNodeCompact)>, + /// The proofs which will be returned from the calculation. This gets taken at the end of every + /// proof call. + retained_proofs: Vec, + /// Free-list of re-usable buffers of [`RlpNode`]s, used for encoding branch nodes to RLP. + /// + /// We are generally able to re-use these buffers across different branch nodes for the + /// duration of a proof calculation, but occasionally we will lose one when when a branch + /// node is returned as a `ProofTrieNode`. + rlp_nodes_bufs: Vec>, + /// Re-usable byte buffer, used for RLP encoding. + rlp_encode_buf: Vec, +} + +impl ProofCalculator { + /// Create a new [`ProofCalculator`] instance for calculating account proofs. + pub fn new(trie_cursor: TC, hashed_cursor: HC) -> Self { + Self { + trie_cursor, + hashed_cursor, + branch_stack: Vec::<_>::with_capacity(64), + branch_path: Nibbles::new(), + child_stack: Vec::<_>::new(), + cached_branch_stack: Vec::<_>::with_capacity(64), + retained_proofs: Vec::<_>::new(), + rlp_nodes_bufs: Vec::<_>::new(), + rlp_encode_buf: Vec::<_>::with_capacity(RLP_ENCODE_BUF_SIZE), + } + } +} + +/// Helper type for the [`Iterator`] used to pass targets in from the caller. +type TargetsIter = Peekable>; + +impl ProofCalculator +where + TC: TrieCursor, + HC: HashedCursor, + VE: LeafValueEncoder, +{ + /// Takes a re-usable `RlpNode` buffer from the internal free-list, or allocates a new one if + /// the free-list is empty. + /// + /// The returned Vec will have a length of zero. + fn take_rlp_nodes_buf(&mut self) -> Vec { + self.rlp_nodes_bufs + .pop() + .map(|mut buf| { + buf.clear(); + buf + }) + .unwrap_or_else(|| Vec::with_capacity(16)) + } + + // Returns zero if `branch_stack` is empty, one otherwise. + // + // This is used when working with the `ext_len` field of [`ProofTrieBranch`]. The `ext_len` is + // calculated by taking the difference of the current `branch_path` and the new branch's path; + // if the new branch has a parent branch (ie `branch_stack` is not empty) then 1 is subtracted + // from the `ext_len` to account for the child's nibble on the parent. + #[inline] + const fn maybe_parent_nibble(&self) -> usize { + !self.branch_stack.is_empty() as usize + } + + /// Returns true if the proof of a node at the given path should be retained. + /// A node is retained if its path is a prefix of any target. + /// This may move the + /// `targets` iterator forward if the given path comes after the current target. + /// + /// This method takes advantage of the [`WindowIter`] component of [`TargetsIter`] to only check + /// a single target at a time. The [`WindowIter`] allows us to look at a current target and the + /// next target simultaneously, forming an end-exclusive range. + /// + /// ```text + /// * Given targets: [ 0x012, 0x045, 0x678 ] + /// * targets.next() returns: + /// - (0x012, Some(0x045)): covers (0x012..0x045) + /// - (0x045, Some(0x678)): covers (0x045..0x678) + /// - (0x678, None): covers (0x678..) + /// ``` + /// + /// As long as the path which is passed in lies within that range we can continue to use the + /// current target. Once the path goes beyond that range (ie path >= next target) then we can be + /// sure that no further paths will be in the range, and we can iterate forward. + /// + /// ```text + /// * Given: + /// - path: 0x04 + /// - targets.peek() returns (0x012, Some(0x045)) + /// + /// * 0x04 comes _after_ 0x045 in depth-first order, so (0x012..0x045) does not contain 0x04. + /// + /// * targets.next() is called. + /// + /// * targets.peek() now returns (0x045, Some(0x678)). This does contain 0x04. + /// + /// * 0x04 is a prefix of 0x045, and so is retained. + /// ``` + /// + /// Because paths in the trie are visited in depth-first order, it's imperative that targets are + /// given in depth-first order as well. If the targets were generated off of B256s, which is + /// the common-case, then this is equivalent to lexicographical order. + fn should_retain( + &self, + targets: &mut TargetsIter>, + path: &Nibbles, + ) -> bool { + trace!(target: TRACE_TARGET, ?path, target = ?targets.peek(), "should_retain: called"); + debug_assert!(self.retained_proofs.last().is_none_or( + |ProofTrieNode { path: last_retained_path, .. }| { + depth_first::cmp(path, last_retained_path) == Ordering::Greater + } + ), + "should_retain called with path {path:?} which is not after previously retained node {:?} in depth-first order", + self.retained_proofs.last().map(|n| n.path), + ); + + let &(mut lower, mut upper) = targets.peek().expect("targets is never exhausted"); + + loop { + // If the node in question is a prefix of the target then we retain + if lower.starts_with(path) { + return true + } + + // If the path isn't in the current range then iterate forward until it is (or until + // there is no upper bound, indicating unbounded). + if upper.is_some_and(|upper| depth_first::cmp(path, &upper) != Ordering::Less) { + targets.next(); + trace!(target: TRACE_TARGET, target = ?targets.peek(), "upper target <= path, next target"); + let &(l, u) = targets.peek().expect("targets is never exhausted"); + (lower, upper) = (l, u); + } else { + return false + } + } + } + + /// Takes a child which has been removed from the `child_stack` and converts it to an + /// [`RlpNode`]. + /// + /// Calling this method indicates that the child will not undergo any further modifications, and + /// therefore can be retained as a proof node if applicable. + fn commit_child( + &mut self, + targets: &mut TargetsIter>, + child_path: Nibbles, + child: ProofTrieBranchChild, + ) -> Result { + // If the child is already an `RlpNode` then there is nothing to do. + if let ProofTrieBranchChild::RlpNode(rlp_node) = child { + return Ok(rlp_node) + } + + // If we should retain the child then do so. + if self.should_retain(targets, &child_path) { + trace!(target: TRACE_TARGET, ?child_path, "Retaining child"); + + // Convert to `ProofTrieNode`, which will be what is retained. + // + // If this node is a branch then its `rlp_nodes_buf` will be taken and not returned to + // the `rlp_nodes_bufs` free-list. + self.rlp_encode_buf.clear(); + let proof_node = child.into_proof_trie_node(child_path, &mut self.rlp_encode_buf)?; + + // Use the `ProofTrieNode` to encode the `RlpNode`, and then push it onto retained + // nodes before returning. + self.rlp_encode_buf.clear(); + proof_node.node.encode(&mut self.rlp_encode_buf); + + self.retained_proofs.push(proof_node); + return Ok(RlpNode::from_rlp(&self.rlp_encode_buf)); + } + + // If the child path is not being retained then we convert directly to an `RlpNode` + // using `into_rlp`. Since we are not retaining the node we can recover any `RlpNode` + // buffers for the free-list here, hence why we do this as a separate logical branch. + self.rlp_encode_buf.clear(); + let (child_rlp_node, freed_rlp_nodes_buf) = child.into_rlp(&mut self.rlp_encode_buf)?; + + // If there is an `RlpNode` buffer which can be re-used then push it onto the free-list. + if let Some(buf) = freed_rlp_nodes_buf { + self.rlp_nodes_bufs.push(buf); + } + + Ok(child_rlp_node) + } + + /// Returns the path of the child of the currently under-construction branch at the given + /// nibble. + #[inline] + fn child_path_at(&self, nibble: u8) -> Nibbles { + let mut child_path = self.branch_path; + debug_assert!(child_path.len() < 64); + child_path.push_unchecked(nibble); + child_path + } + + /// Returns index of the highest nibble which is set in the mask. + /// + /// # Panics + /// + /// Will panic in debug mode if the mask is empty. + #[inline] + fn highest_set_nibble(mask: TrieMask) -> u8 { + debug_assert!(!mask.is_empty()); + (u16::BITS - mask.leading_zeros() - 1) as u8 + } + + /// Returns the path of the child on top of the `child_stack`, or the root path if the stack is + /// empty. + fn last_child_path(&self) -> Nibbles { + // If there is no branch under construction then the top child must be the root child. + let Some(branch) = self.branch_stack.last() else { + return Nibbles::new(); + }; + + self.child_path_at(Self::highest_set_nibble(branch.state_mask)) + } + + /// Calls [`Self::commit_child`] on the last child of `child_stack`, replacing it with a + /// [`ProofTrieBranchChild::RlpNode`]. + /// + /// If `child_stack` is empty then this is a no-op. + /// + /// NOTE that this method call relies on the `state_mask` of the top branch of the + /// `branch_stack` to determine the last child's path. When committing the last child prior to + /// pushing a new child, it's important to set the new child's `state_mask` bit _after_ the call + /// to this method. + fn commit_last_child( + &mut self, + targets: &mut TargetsIter>, + ) -> Result<(), StateProofError> { + let Some(child) = self.child_stack.pop() else { return Ok(()) }; + + // If the child is already an `RlpNode` then there is nothing to do, push it back on with no + // changes. + if let ProofTrieBranchChild::RlpNode(_) = child { + self.child_stack.push(child); + return Ok(()) + } + + let child_path = self.last_child_path(); + // TODO theoretically `commit_child` only needs to convert to an `RlpNode` if it's going to + // retain the proof, otherwise we could leave the child as-is on the stack and convert it + // when popping the branch, giving more time to the DeferredEncoder to do async work. + let child_rlp_node = self.commit_child(targets, child_path, child)?; + + // Replace the child on the stack + self.child_stack.push(ProofTrieBranchChild::RlpNode(child_rlp_node)); + Ok(()) + } + + /// Creates a new leaf node on a branch, setting its `state_mask` bit and pushing the leaf onto + /// the `child_stack`. + /// + /// # Panics + /// + /// - If `branch_stack` is empty + /// - If the leaf's nibble is already set in the branch's `state_mask`. + fn push_new_leaf( + &mut self, + targets: &mut TargetsIter>, + leaf_nibble: u8, + leaf_short_key: Nibbles, + leaf_val: VE::DeferredEncoder, + ) -> Result<(), StateProofError> { + // Before pushing the new leaf onto the `child_stack` we need to commit the previous last + // child, so that only `child_stack`'s final child is a non-RlpNode. + self.commit_last_child(targets)?; + + // Once the last child is committed we set the new child's bit on the top branch's + // `state_mask` and push that new child. + let branch = self.branch_stack.last_mut().expect("branch_stack cannot be empty"); + + debug_assert!(!branch.state_mask.is_bit_set(leaf_nibble)); + branch.state_mask.set_bit(leaf_nibble); + + self.child_stack + .push(ProofTrieBranchChild::Leaf { short_key: leaf_short_key, value: leaf_val }); + + Ok(()) + } + + /// Pushes a new branch onto the `branch_stack` based on the path and short key of the last + /// child on the `child_stack` and the path of the next child which will be pushed on to the + /// stack after this call. + /// + /// Returns the nibble of the branch's `state_mask` which should be set for the new child, and + /// short key that the next child should use. + fn push_new_branch(&mut self, new_child_path: Nibbles) -> (u8, Nibbles) { + // First determine the new child's shortkey relative to the current branch. If there is no + // current branch then the short key is the full path. + let new_child_short_key = if self.branch_stack.is_empty() { + new_child_path + } else { + // When there is a current branch then trim off its path as well as the nibble that it + // has set for this leaf. + trim_nibbles_prefix(&new_child_path, self.branch_path.len() + 1) + }; + + // Get the new branch's first child, which is the child on the top of the stack with which + // the new child shares the same nibble on the current branch. + let first_child = self + .child_stack + .last_mut() + .expect("push_new_branch can't be called with empty child_stack"); + + let first_child_short_key = first_child.short_key(); + debug_assert!( + !first_child_short_key.is_empty(), + "push_new_branch called when top child on stack is not a leaf or extension with a short key", + ); + + // Determine how many nibbles are shared between the new branch's first child and the new + // child. This common prefix will be the extension of the new branch + let common_prefix_len = first_child_short_key.common_prefix_length(&new_child_short_key); + + // Trim off the common prefix from the first child's short key, plus one nibble which will + // stored by the new branch itself in its state mask. + let first_child_nibble = first_child_short_key.get_unchecked(common_prefix_len); + first_child.trim_short_key_prefix(common_prefix_len + 1); + + // Similarly, trim off the common prefix, plus one nibble for the new branch, from the new + // child's short key. + let new_child_nibble = new_child_short_key.get_unchecked(common_prefix_len); + let new_child_short_key = trim_nibbles_prefix(&new_child_short_key, common_prefix_len + 1); + + // Update the branch path to reflect the new branch about to be pushed. Its path will be + // the path of the previous branch, plus the nibble shared by each child, plus the parent + // extension (denoted by a non-zero `ext_len`). Since the new branch's path is a prefix of + // the original new_child_path we can just slice that. + // + // If the new branch is the first branch then we do not add the extra 1, as there is no + // nibble in a parent branch to account for. + let branch_path_len = + self.branch_path.len() + common_prefix_len + self.maybe_parent_nibble(); + self.branch_path = new_child_path.slice_unchecked(0, branch_path_len); + + // Push the new branch onto the `branch_stack`. We do not yet set the `state_mask` bit of + // the new child; whatever actually pushes the child onto the `child_stack` is expected to + // do that. + self.branch_stack.push(ProofTrieBranch { + ext_len: common_prefix_len as u8, + state_mask: TrieMask::new(1 << first_child_nibble), + masks: TrieMasks::none(), + }); + + trace!( + target: TRACE_TARGET, + ?new_child_path, + ?common_prefix_len, + ?first_child_nibble, + branch_path = ?self.branch_path, + "Pushed new branch", + ); + + (new_child_nibble, new_child_short_key) + } + + /// Pops the top branch off of the `branch_stack`, hashes its children on the `child_stack`, and + /// replaces those children on the `child_stack`. The `branch_path` field will be updated + /// accordingly. + /// + /// # Panics + /// + /// This method panics if `branch_stack` is empty. + fn pop_branch( + &mut self, + targets: &mut TargetsIter>, + ) -> Result<(), StateProofError> { + trace!( + target: TRACE_TARGET, + branch = ?self.branch_stack.last(), + branch_path = ?self.branch_path, + child_stack_len = ?self.child_stack.len(), + "pop_branch: called", + ); + + // Ensure the final child on the child stack has been committed, as this method expects all + // children of the branch to have been committed. + self.commit_last_child(targets)?; + + let mut rlp_nodes_buf = self.take_rlp_nodes_buf(); + let branch = self.branch_stack.pop().expect("branch_stack cannot be empty"); + + // Take the branch's children off the stack, using the state mask to determine how many + // there are. + let num_children = branch.state_mask.count_ones() as usize; + debug_assert!(num_children > 1, "A branch must have at least two children"); + debug_assert!( + self.child_stack.len() >= num_children, + "Stack is missing necessary children ({num_children:?})" + ); + + // Collect children into an `RlpNode` Vec by committing and pushing each of them. + for (idx, child) in + self.child_stack.drain(self.child_stack.len() - num_children..).enumerate() + { + let ProofTrieBranchChild::RlpNode(child_rlp_node) = child else { + panic!( + "all branch children must have been committed, found {} at index {idx:?}", + std::any::type_name_of_val(&child) + ); + }; + rlp_nodes_buf.push(child_rlp_node); + } + + debug_assert_eq!( + rlp_nodes_buf.len(), + branch.state_mask.count_ones() as usize, + "children length must match number of bits set in state_mask" + ); + + // Calculate the short key of the parent extension (if the branch has a parent extension). + // It's important to calculate this short key prior to modifying the `branch_path`. + let short_key = trim_nibbles_prefix( + &self.branch_path, + self.branch_path.len() - branch.ext_len as usize, + ); + + // Wrap the `BranchNode` so it can be pushed onto the child stack. + let mut branch_as_child = ProofTrieBranchChild::Branch { + node: BranchNode::new(rlp_nodes_buf, branch.state_mask), + masks: branch.masks, + }; + + // If there is an extension then encode the branch as an `RlpNode` and use it to construct + // the extension in its place + if !short_key.is_empty() { + let branch_rlp_node = self.commit_child(targets, self.branch_path, branch_as_child)?; + branch_as_child = ProofTrieBranchChild::Extension { short_key, child: branch_rlp_node }; + }; + + self.child_stack.push(branch_as_child); + + // Update the branch_path. If this branch is the only branch then only its extension needs + // to be trimmed, otherwise we also need to remove its nibble from its parent. + let new_path_len = + self.branch_path.len() - branch.ext_len as usize - self.maybe_parent_nibble(); + + debug_assert!(self.branch_path.len() >= new_path_len); + self.branch_path = self.branch_path.slice_unchecked(0, new_path_len); + + Ok(()) + } + + /// Adds a single leaf for a key to the stack, possibly collapsing an existing branch and/or + /// creating a new one depending on the path of the key. + fn push_leaf( + &mut self, + targets: &mut TargetsIter>, + key: Nibbles, + val: VE::DeferredEncoder, + ) -> Result<(), StateProofError> { + loop { + trace!( + target: TRACE_TARGET, + ?key, + branch_stack_len = ?self.branch_stack.len(), + branch_path = ?self.branch_path, + child_stack_len = ?self.child_stack.len(), + "push_leaf: loop", + ); + + // Get the `state_mask` of the branch currently being built. If there are no branches + // on the stack then it means either the trie is empty or only a single leaf has been + // added previously. + let curr_branch_state_mask = match self.branch_stack.last() { + Some(curr_branch) => curr_branch.state_mask, + None if self.child_stack.is_empty() => { + // If the child stack is empty then this is the first leaf, push it and be done + self.child_stack + .push(ProofTrieBranchChild::Leaf { short_key: key, value: val }); + return Ok(()) + } + None => { + // If the child stack is not empty then it must only have a single other child + // which is either a leaf or extension with a non-zero short key. + debug_assert_eq!(self.child_stack.len(), 1); + debug_assert!(!self + .child_stack + .last() + .expect("already checked for emptiness") + .short_key() + .is_empty()); + let (nibble, short_key) = self.push_new_branch(key); + self.push_new_leaf(targets, nibble, short_key, val)?; + return Ok(()) + } + }; + + // Find the common prefix length, which is the number of nibbles shared between the + // current branch and the key. + let common_prefix_len = self.branch_path.common_prefix_length(&key); + + // If the current branch does not share all of its nibbles with the new key then it is + // not the parent of the new key. In this case the current branch will have no more + // children. We can pop it and loop back to the top to try again with its parent branch. + if common_prefix_len < self.branch_path.len() { + self.pop_branch(targets)?; + continue + } + + // If the current branch is a prefix of the new key then the leaf is a child of the + // branch. If the branch doesn't have the leaf's nibble set then the leaf can be added + // directly, otherwise a new branch must be created in-between this branch and that + // existing child. + let nibble = key.get_unchecked(common_prefix_len); + if curr_branch_state_mask.is_bit_set(nibble) { + // Push a new branch which splits the short key of the existing child at this + // nibble. + let (nibble, short_key) = self.push_new_branch(key); + // Push the new leaf onto the new branch. + self.push_new_leaf(targets, nibble, short_key, val)?; + } else { + let short_key = key.slice_unchecked(common_prefix_len + 1, key.len()); + self.push_new_leaf(targets, nibble, short_key, val)?; + } + + return Ok(()) + } + } + + /// Given the lower and upper bounds (exclusive) of a range of keys, iterates over the + /// `hashed_cursor` and calculates all trie nodes possible based on those keys. If the upper + /// bound is None then it is considered unbounded. + /// + /// It is expected that this method is "driven" by `next_uncached_key_range`, which decides + /// which ranges of keys need to be calculated based on what cached trie data is available. + #[instrument( + target = TRACE_TARGET, + level = "trace", + skip(self, value_encoder, targets, hashed_cursor_current), + )] + fn calculate_key_range( + &mut self, + value_encoder: &VE, + targets: &mut TargetsIter>, + hashed_cursor_current: &mut Option<(Nibbles, VE::DeferredEncoder)>, + lower_bound: Nibbles, + upper_bound: Option, + ) -> Result<(), StateProofError> { + // A helper closure for mapping entries returned from the `hashed_cursor`, converting the + // key to Nibbles and immediately creating the DeferredValueEncoder so that encoding of the + // leaf value can begin ASAP. + let map_hashed_cursor_entry = |(key_b256, val): (B256, _)| { + debug_assert_eq!(key_b256.len(), 32); + // SAFETY: key is a B256 and so is exactly 32-bytes. + let key = unsafe { Nibbles::unpack_unchecked(key_b256.as_slice()) }; + let val = value_encoder.deferred_encoder(key_b256, val); + (key, val) + }; + + // If the cursor hasn't been used, or the last iterated key is prior to this range's + // key range, then seek forward to at least the first key. + if hashed_cursor_current.as_ref().is_none_or(|(key, _)| key < &lower_bound) { + let lower_key = B256::right_padding_from(&lower_bound.pack()); + *hashed_cursor_current = + self.hashed_cursor.seek(lower_key)?.map(map_hashed_cursor_entry); + } + + // Loop over all keys in the range, calling `push_leaf` on each. + while let Some((key, _)) = hashed_cursor_current.as_ref() && + upper_bound.is_none_or(|upper_bound| key < &upper_bound) + { + let (key, val) = + core::mem::take(hashed_cursor_current).expect("while-let checks for Some"); + self.push_leaf(targets, key, val)?; + *hashed_cursor_current = self.hashed_cursor.next()?.map(map_hashed_cursor_entry); + } + + Ok(()) + } + + /// Constructs and returns a new [`ProofTrieBranch`] based on an existing [`BranchNodeCompact`]. + #[inline] + const fn new_from_cached_branch( + cached_branch: &BranchNodeCompact, + ext_len: u8, + ) -> ProofTrieBranch { + ProofTrieBranch { + ext_len, + state_mask: TrieMask::new(0), + masks: TrieMasks { + tree_mask: Some(cached_branch.tree_mask), + hash_mask: Some(cached_branch.hash_mask), + }, + } + } + + /// Pushes a new branch onto the `branch_stack` which is based on a cached branch obtained via + /// the trie cursor. + /// + /// If there is already a child at the top branch of `branch_stack` occupying this new branch's + /// nibble then that child will have its short-key split with another new branch, and this + /// cached branch will be a child of that splitting branch. + fn push_cached_branch( + &mut self, + targets: &mut TargetsIter>, + cached_path: Nibbles, + cached_branch: &BranchNodeCompact, + ) -> Result<(), StateProofError> { + debug_assert!( + cached_path.starts_with(&self.branch_path), + "push_cached_branch called with path {cached_path:?} which is not a child of current branch {:?}", + self.branch_path, + ); + + let parent_branch = self.branch_stack.last(); + + // If both stacks are empty then there were no leaves before this cached branch, push it and + // be done; the extension of the branch will be its full path. + if self.child_stack.is_empty() && parent_branch.is_none() { + self.branch_path = cached_path; + self.branch_stack + .push(Self::new_from_cached_branch(cached_branch, cached_path.len() as u8)); + return Ok(()) + } + + // Get the nibble which should be set in the parent branch's `state_mask` for this new + // branch. + let cached_branch_nibble = cached_path.get_unchecked(self.branch_path.len()); + + // We calculate the `ext_len` of the new branch, and potentially update its nibble if a new + // parent branch is inserted here, based on the state of the parent branch. + let (cached_branch_nibble, ext_len) = if parent_branch + .is_none_or(|parent_branch| parent_branch.state_mask.is_bit_set(cached_branch_nibble)) + { + // If the `child_stack` is not empty but the `branch_stack` is then it implies that + // there must be a leaf or extension at the root of the trie whose short-key will get + // split by a new branch, which will become the parent of both that leaf/extension and + // this new branch. + // + // Similarly, if there is a branch on the `branch_stack` but its `state_mask` bit for + // this new branch is already set, then there must be a leaf/extension with a short-key + // to be split. + debug_assert!(!self + .child_stack + .last() + .expect("already checked for emptiness") + .short_key() + .is_empty()); + + // Split that leaf/extension's short key with a new branch. + let (nibble, short_key) = self.push_new_branch(cached_path); + (nibble, short_key.len()) + } else { + // If there is a parent branch but its `state_mask` bit for this branch is not set + // then we can simply calculate the `ext_len` based on the difference of each, minus + // 1 to account for the nibble in the `state_mask`. + (cached_branch_nibble, cached_path.len() - self.branch_path.len() - 1) + }; + + // `commit_last_child` relies on the last set bit of the parent branch's `state_mask` to + // determine the path of the last child on the `child_stack`. Since we are about to + // change that mask we need to commit that last child first. + self.commit_last_child(targets)?; + + // When pushing a new branch we need to set its child nibble in the `state_mask` of + // its parent, if there is one. + if let Some(parent_branch) = self.branch_stack.last_mut() { + parent_branch.state_mask.set_bit(cached_branch_nibble); + } + + // Finally update the `branch_path` and push the new branch. + self.branch_path = cached_path; + self.branch_stack.push(Self::new_from_cached_branch(cached_branch, ext_len as u8)); + + trace!( + target: TRACE_TARGET, + branch=?self.branch_stack.last(), + branch_path=?self.branch_path, + "Pushed cached branch", + ); + + Ok(()) + } + + /// Attempts to pop off the top branch of the `cached_branch_stack`, returning + /// [`PopCachedBranchOutcome::Popped`] on success. Returns other variants to indicate that the + /// stack is empty and what to do about it. + /// + /// This method only returns [`PopCachedBranchOutcome::CalculateLeaves`] if there is a cached + /// branch on top of the stack. + #[inline] + fn try_pop_cached_branch( + &mut self, + trie_cursor_state: &mut TrieCursorState, + uncalculated_lower_bound: &Option, + ) -> Result { + // If there is a branch on top of the stack we use that. + if let Some(cached) = self.cached_branch_stack.pop() { + return Ok(PopCachedBranchOutcome::Popped(cached)); + } + + // There is no cached branch on the stack. It's possible that another one exists + // farther on in the trie, but we perform some checks first to prevent unnecessary + // attempts to find it. + + // If the `uncalculated_lower_bound` is None it indicates that there can be no more + // leaf data, so similarly there be no more branches. + let Some(uncalculated_lower_bound) = uncalculated_lower_bound else { + return Ok(PopCachedBranchOutcome::Exhausted) + }; + + // If [`TrieCursorState::path`] returns None it means that the cursor has been + // exhausted, so there can be no more cached data. + let Some(trie_cursor_path) = trie_cursor_state.path() else { + return Ok(PopCachedBranchOutcome::Exhausted) + }; + + // If the trie cursor is seeked to a branch whose leaves have already been processed + // then we can't use it, instead we seek forward and try again. + if trie_cursor_path < uncalculated_lower_bound { + *trie_cursor_state = + TrieCursorState::new(self.trie_cursor.seek(*uncalculated_lower_bound)?); + + // Having just seeked forward we need to check if the cursor is now exhausted. + if matches!(trie_cursor_state, TrieCursorState::Exhausted) { + return Ok(PopCachedBranchOutcome::Exhausted) + }; + } + + // At this point we can be sure that the cursor is in an `Available` state. We know for + // sure it's not `Exhausted` because of the call to `path` above, and we know it's not + // `Taken` because we push all taken branches onto the `cached_branch_stack`, and the + // stack is empty. + // + // We will use this `Available` cached branch as our next branch. + let cached = trie_cursor_state.take(); + trace!(target: TRACE_TARGET, cached=?cached, "Pushed next trie node onto cached_branch_stack"); + + // If the calculated range is not caught up to the next cached branch it means there + // are portions of the trie prior to that branch which may need to be calculated; + // return the uncalculated range up to that branch to make that happen. + // + // If the next cached branch's path is all zeros then we can skip this catch-up step, + // because there cannot be any keys prior to that range. + let cached_path = &cached.0; + if uncalculated_lower_bound < cached_path && !PATH_ALL_ZEROS.starts_with(cached_path) { + let range = (*uncalculated_lower_bound, Some(*cached_path)); + trace!(target: TRACE_TARGET, ?range, "Returning key range to calculate in order to catch up to cached branch"); + + // Push the cached branch onto the stack so it's available once the leaf range is done + // being calculated. + self.cached_branch_stack.push(cached); + + return Ok(PopCachedBranchOutcome::CalculateLeaves(range)); + } + + Ok(PopCachedBranchOutcome::Popped(cached)) + } + + /// Accepts the current state of both hashed and trie cursors, and determines the next range of + /// hashed keys which need to be processed using [`Self::push_leaf`]. + /// + /// This method will use cached branch node data from the trie cursor to skip over all possible + /// ranges of keys, to reduce computation as much as possible. + /// + /// # Returns + /// + /// - `None`: No more data to process, finish computation + /// + /// - `Some(lower, None)`: Indicates to call `push_leaf` on all keys starting at `lower`, with + /// no upper bound. This method won't be called again after this. + /// + /// - `Some(lower, Some(upper))`: Indicates to call `push_leaf` on all keys starting at `lower`, + /// up to but excluding `upper`, and then call this method once done. + #[instrument(target = TRACE_TARGET, level = "trace", skip_all)] + fn next_uncached_key_range( + &mut self, + targets: &mut TargetsIter>, + trie_cursor_state: &mut TrieCursorState, + hashed_key_current_path: Option, + ) -> Result)>, StateProofError> { + // Pop any under-construction branches that are now complete. + // All trie data prior to the current cached branch, if any, has been computed. Any branches + // which were under-construction previously, and which are not on the same path as this + // cached branch, can be assumed to be completed; they will not have any further keys added + // to them. + if let Some(cached_path) = self.cached_branch_stack.last().map(|kv| kv.0) { + while !cached_path.starts_with(&self.branch_path) { + self.pop_branch(targets)?; + } + } + + // `uncalculated_lower_bound` tracks the lower bound of node paths which have yet to be + // visited, either via the hashed key cursor (`calculate_key_range`) or trie cursor (this + // method). If this is None then there are no further nodes which could exist. + // + // This starts off being based on the hashed cursor's current position, which is the + // next hashed key which hasn't been processed. If that is None then we start from zero. + let mut uncalculated_lower_bound = Some(hashed_key_current_path.unwrap_or_default()); + + loop { + // Pop the currently cached branch node. + // + // NOTE we pop off the `cached_branch_stack` because cloning the `BranchNodeCompact` + // means cloning an Arc, which incurs synchronization overhead. We have to be sure to + // push the cached branch back onto the stack once done. + let (cached_path, cached_branch) = match self + .try_pop_cached_branch(trie_cursor_state, &uncalculated_lower_bound)? + { + PopCachedBranchOutcome::Popped(cached) => cached, + PopCachedBranchOutcome::Exhausted => { + // If cached branches are exhausted it's possible that there is still an + // unbounded range of leaves to be processed. `uncalculated_lower_bound` is + // used to return that range. + trace!(target: TRACE_TARGET, ?uncalculated_lower_bound, "Exhausted cached trie nodes"); + return Ok(uncalculated_lower_bound.map(|lower| (lower, None))); + } + PopCachedBranchOutcome::CalculateLeaves(range) => { + return Ok(Some(range)); + } + }; + + trace!( + target: TRACE_TARGET, + branch_path = ?self.branch_path, + branch_state_mask = ?self.branch_stack.last().map(|b| b.state_mask), + ?cached_path, + cached_branch_state_mask = ?cached_branch.state_mask, + cached_branch_hash_mask = ?cached_branch.hash_mask, + "loop", + ); + + // Since we've popped all branches which don't start with cached_path, branch_path at + // this point must be equal to or shorter than cached_path. + debug_assert!( + self.branch_path.len() < cached_path.len() || self.branch_path == cached_path, + "branch_path {:?} is different-or-longer-than cached_path {cached_path:?}", + self.branch_path + ); + + // If the branch_path != cached_path it means the branch_stack is either empty, or the + // top branch is the parent of this cached branch. Either way we push a branch + // corresponding to the cached one onto the stack, so we can begin constructing it. + if self.branch_path != cached_path { + self.push_cached_branch(targets, cached_path, &cached_branch)?; + } + + // At this point the top of the branch stack is the same branch which was found in the + // cache. + let curr_branch = + self.branch_stack.last().expect("top of branch_stack corresponds to cached branch"); + + let cached_state_mask = cached_branch.state_mask.get(); + let curr_state_mask = curr_branch.state_mask.get(); + + // Determine all child nibbles which are set in the cached branch but not the + // under-construction branch. + let next_child_nibbles = curr_state_mask ^ cached_state_mask; + debug_assert_eq!( + cached_state_mask | next_child_nibbles, cached_state_mask, + "curr_branch has state_mask bits set which aren't set on cached_branch. curr_branch:{:?}", + curr_state_mask, + ); + + // If there are no further children to construct for this branch then pop it off both + // stacks and loop using the parent branch. + if next_child_nibbles == 0 { + trace!( + target: TRACE_TARGET, + path=?cached_path, + ?curr_branch, + ?cached_branch, + "No further children, popping branch", + ); + self.pop_branch(targets)?; + + // no need to pop from `cached_branch_stack`, the current cached branch is already + // popped (see note at the top of the loop). + + // The just-popped branch is completely processed; we know there can be no more keys + // with that prefix. Set the lower bound which can be returned from this method to + // be the next possible prefix, if any. + uncalculated_lower_bound = increment_and_strip_trailing_zeros(&cached_path); + + continue + } + + // Determine the next nibble of the branch which has not yet been constructed, and + // determine the child's full path. + let child_nibble = next_child_nibbles.trailing_zeros() as u8; + let child_path = self.child_path_at(child_nibble); + + // If the `hash_mask` bit is set for the next child it means the child's hash is cached + // in the `cached_branch`. We can use that instead of re-calculating the hash of the + // entire sub-trie. + // + // If the child needs to be retained for a proof then we should not use the cached + // hash, and instead continue on to calculate its node manually. + if cached_branch.hash_mask.is_bit_set(child_nibble) { + // Commit the last child. We do this here for two reasons: + // - `commit_last_child` will check if the last child needs to be retained. We need + // to check that before the subsequent `should_retain` call here to prevent + // `targets` from being moved beyond the last child before it is checked. + // - If we do end up using the cached hash value, then we will need to commit the + // last child before pushing a new one onto the stack anyway. + self.commit_last_child(targets)?; + + if !self.should_retain(targets, &child_path) { + // Pull this child's hash out of the cached branch node. To get the hash's index + // we first need to calculate the mask of which cached hashes have already been + // used by this branch (if any). The number of set bits in that mask will be the + // index of the next hash in the array to use. + let curr_hashed_used_mask = cached_branch.hash_mask.get() & curr_state_mask; + let hash_idx = curr_hashed_used_mask.count_ones() as usize; + let hash = cached_branch.hashes[hash_idx]; + + trace!( + target: TRACE_TARGET, + ?child_path, + ?hash_idx, + ?hash, + "Using cached hash for child", + ); + + self.child_stack.push(ProofTrieBranchChild::RlpNode(RlpNode::word_rlp(&hash))); + self.branch_stack + .last_mut() + .expect("already asserted there is a last branch") + .state_mask + .set_bit(child_nibble); + + // Update the `uncalculated_lower_bound` to indicate that the child whose bit + // was just set is completely processed. + uncalculated_lower_bound = increment_and_strip_trailing_zeros(&child_path); + + // Push the current cached branch back onto the stack before looping. + self.cached_branch_stack.push((cached_path, cached_branch)); + + continue + } + } + + // We now want to check if there is a cached branch node at this child. The cached + // branch node may be the node at this child directly, or this child may be an + // extension and the cached branch is the child of that extension. + + // All trie nodes prior to `child_path` will not be modified further, so we can seek the + // trie cursor to the next cached node at-or-after `child_path`. + if trie_cursor_state.path().is_some_and(|path| path < &child_path) { + trace!(target: TRACE_TARGET, ?child_path, "Seeking trie cursor to child path"); + *trie_cursor_state = TrieCursorState::new(self.trie_cursor.seek(child_path)?); + } + + // If the next cached branch node is a child of `child_path` then we can assume it is + // the cached branch for this child. We push it onto the `cached_branch_stack` and loop + // back to the top. + if let TrieCursorState::Available(next_cached_path, next_cached_branch) = + &trie_cursor_state && + next_cached_path.starts_with(&child_path) + { + // Push the current cached branch back on before pushing its child and then looping + self.cached_branch_stack.push((cached_path, cached_branch)); + + trace!( + target: TRACE_TARGET, + ?child_path, + ?next_cached_path, + ?next_cached_branch, + "Pushing cached branch for child", + ); + self.cached_branch_stack.push(trie_cursor_state.take()); + continue; + } + + // There is no cached data for the sub-trie at this child, we must recalculate the + // sub-trie root (this child) using the leaves. Return the range of keys based on the + // child path. + let child_path_upper = increment_and_strip_trailing_zeros(&child_path); + trace!( + target: TRACE_TARGET, + lower=?child_path, + upper=?child_path_upper, + "Returning sub-trie's key range to calculate", + ); + + // Push the current cached branch back onto the stack before returning. + self.cached_branch_stack.push((cached_path, cached_branch)); + + return Ok(Some((child_path, child_path_upper))); + } + } + + /// Internal implementation of proof calculation. Assumes both cursors have already been reset. + /// See docs on [`Self::proof`] for expected behavior. + fn proof_inner( + &mut self, + value_encoder: &VE, + targets: impl IntoIterator, + ) -> Result, StateProofError> { + trace!(target: TRACE_TARGET, "proof_inner: called"); + + // In debug builds, verify that targets are sorted + #[cfg(debug_assertions)] + let targets = { + let mut prev: Option = None; + targets.into_iter().inspect(move |target| { + if let Some(prev) = prev { + debug_assert!(&prev <= target, "prev:{prev:?} target:{target:?}"); + } + prev = Some(*target); + }) + }; + + #[cfg(not(debug_assertions))] + let targets = targets.into_iter(); + + // Convert B256 targets into Nibbles. + let targets = targets.into_iter().map(|key| { + // SAFETY: key is a B256 and so is exactly 32-bytes. + unsafe { Nibbles::unpack_unchecked(key.as_slice()) } + }); + + // Wrap targets into a `TargetsIter`. + let mut targets = WindowIter::new(targets).peekable(); + + // If there are no targets then nothing could be returned, return early. + if targets.peek().is_none() { + trace!(target: TRACE_TARGET, "Empty targets, returning"); + return Ok(Vec::new()) + } + + // Ensure initial state is cleared. By the end of the method call these should be empty once + // again. + debug_assert!(self.branch_stack.is_empty()); + debug_assert!(self.branch_path.is_empty()); + debug_assert!(self.child_stack.is_empty()); + + // Initialize the hashed cursor to None to indicate it hasn't been seeked yet. + let mut hashed_cursor_current: Option<(Nibbles, VE::DeferredEncoder)> = None; + + // Initialize the `trie_cursor_state` with the node closest to root. + let mut trie_cursor_state = TrieCursorState::new(self.trie_cursor.seek(Nibbles::new())?); + + loop { + // Determine the range of keys of the overall trie which need to be re-computed. + let Some((lower_bound, upper_bound)) = self.next_uncached_key_range( + &mut targets, + &mut trie_cursor_state, + hashed_cursor_current.as_ref().map(|kv| kv.0), + )? + else { + // If `next_uncached_key_range` determines that there can be no more keys then + // complete the computation. + break; + }; + + // Calculate the trie for that range of keys + self.calculate_key_range( + value_encoder, + &mut targets, + &mut hashed_cursor_current, + lower_bound, + upper_bound, + )?; + + // Once outside `calculate_key_range`, `hashed_cursor_current` will be at the first key + // after the range. + // + // If the `hashed_cursor_current` is None then there are no more keys at all, meaning + // the trie couldn't possibly have more data and we should complete computation. + if hashed_cursor_current.is_none() { + break; + } + } + + // Once there's no more leaves we can pop the remaining branches, if any. + while !self.branch_stack.is_empty() { + self.pop_branch(&mut targets)?; + } + + // At this point the branch stack should be empty. If the child stack is empty it means no + // keys were ever iterated from the hashed cursor in the first place. Otherwise there should + // only be a single node left: the root node. + debug_assert!(self.branch_stack.is_empty()); + debug_assert!(self.branch_path.is_empty()); + debug_assert!(self.child_stack.len() < 2); + + // All targets match the root node, so always retain it. Determine the root node based on + // the child stack, and push the proof of the root node onto the result stack. + let root_node = if let Some(node) = self.child_stack.pop() { + self.rlp_encode_buf.clear(); + node.into_proof_trie_node(Nibbles::new(), &mut self.rlp_encode_buf)? + } else { + ProofTrieNode { + path: Nibbles::new(), // root path + node: TrieNode::EmptyRoot, + masks: TrieMasks::none(), + } + }; + self.retained_proofs.push(root_node); + + trace!( + target: TRACE_TARGET, + retained_proofs_len = ?self.retained_proofs.len(), + "proof_inner: returning", + ); + Ok(core::mem::take(&mut self.retained_proofs)) + } +} + +impl ProofCalculator +where + TC: TrieCursor, + HC: HashedCursor, + VE: LeafValueEncoder, +{ + /// Generate a proof for the given targets. + /// + /// Given lexicographically sorted targets, returns nodes whose paths are a prefix of any + /// target. The returned nodes will be sorted lexicographically by path. + /// + /// # Panics + /// + /// In debug builds, panics if the targets are not sorted lexicographically. + #[instrument(target = TRACE_TARGET, level = "trace", skip_all)] + pub fn proof( + &mut self, + value_encoder: &VE, + targets: impl IntoIterator, + ) -> Result, StateProofError> { + self.trie_cursor.reset(); + self.hashed_cursor.reset(); + self.proof_inner(value_encoder, targets) + } +} + +/// A proof calculator for storage tries. +pub type StorageProofCalculator = ProofCalculator; + +impl StorageProofCalculator +where + TC: TrieStorageCursor, + HC: HashedStorageCursor, +{ + /// Create a new [`StorageProofCalculator`] instance. + pub fn new_storage(trie_cursor: TC, hashed_cursor: HC) -> Self { + Self::new(trie_cursor, hashed_cursor) + } + + /// Generate a proof for a storage trie at the given hashed address. + /// + /// Given lexicographically sorted targets, returns nodes whose paths are a prefix of any + /// target. The returned nodes will be sorted lexicographically by path. + /// + /// # Panics + /// + /// In debug builds, panics if the targets are not sorted lexicographically. + #[instrument(target = TRACE_TARGET, level = "trace", skip(self, targets))] + pub fn storage_proof( + &mut self, + hashed_address: B256, + targets: impl IntoIterator, + ) -> Result, StateProofError> { + /// Static storage value encoder instance used by all storage proofs. + static STORAGE_VALUE_ENCODER: StorageValueEncoder = StorageValueEncoder; + + self.hashed_cursor.set_hashed_address(hashed_address); + + // Shortcut: check if storage is empty + if self.hashed_cursor.is_storage_empty()? { + // Return a single EmptyRoot node at the root path + return Ok(vec![ProofTrieNode { + path: Nibbles::default(), + node: TrieNode::EmptyRoot, + masks: TrieMasks::none(), + }]) + } + + // Don't call `set_hashed_address` on the trie cursor until after the previous shortcut has + // been checked. + self.trie_cursor.set_hashed_address(hashed_address); + + // Use the static StorageValueEncoder and pass it to proof_inner + self.proof_inner(&STORAGE_VALUE_ENCODER, targets) + } +} + +/// `WindowIter` is a wrapper around an [`Iterator`] which allows viewing both previous and current +/// items on every iteration. It is similar to `itertools::tuple_windows`, except that the final +/// item returned will contain the previous item and `None` as the current. +struct WindowIter { + iter: I, + prev: Option, +} + +impl WindowIter { + /// Wraps an iterator with a [`WindowIter`]. + const fn new(iter: I) -> Self { + Self { iter, prev: None } + } +} + +impl> Iterator for WindowIter { + /// The iterator returns the previous and current items, respectively. If the underlying + /// iterator is exhausted then `Some(prev, None)` is returned on the subsequent call to + /// `WindowIter::next`, and `None` from the call after that. + type Item = (I::Item, Option); + + fn next(&mut self) -> Option { + loop { + match (self.prev, self.iter.next()) { + (None, None) => return None, + (None, Some(v)) => { + self.prev = Some(v); + } + (Some(v), next) => { + self.prev = next; + return Some((v, next)) + } + } + } + } +} + +/// Used to track the state of the trie cursor, allowing us to differentiate between a branch having +/// been taken (used as a cached branch) and the cursor having been exhausted. +#[derive(Debug)] +enum TrieCursorState { + /// Cursor is seeked to this path and the node has not been used yet. + Available(Nibbles, BranchNodeCompact), + /// Cursor is seeked to this path, but the node has been used. + Taken(Nibbles), + /// Cursor has been exhausted. + Exhausted, +} + +impl TrieCursorState { + /// Creates a [`Self`] based on an entry returned from the cursor itself. + fn new(entry: Option<(Nibbles, BranchNodeCompact)>) -> Self { + entry.map_or(Self::Exhausted, |(path, node)| Self::Available(path, node)) + } + + /// Returns the path the cursor is seeked to, or None if it's exhausted. + const fn path(&self) -> Option<&Nibbles> { + match self { + Self::Available(path, _) | Self::Taken(path) => Some(path), + Self::Exhausted => None, + } + } + + /// Takes the path and node from a [`Self::Available`]. Panics if not [`Self::Available`]. + fn take(&mut self) -> (Nibbles, BranchNodeCompact) { + let Self::Available(path, _) = self else { + panic!("take called on non-Available: {self:?}") + }; + + let path = *path; + let Self::Available(path, node) = core::mem::replace(self, Self::Taken(path)) else { + unreachable!("already checked that self is Self::Available"); + }; + + (path, node) + } +} + +/// Describes the state of the currently cached branch node (if any). +enum PopCachedBranchOutcome { + /// Cached branch has been popped from the `cached_branch_stack` and is ready to be used. + Popped((Nibbles, BranchNodeCompact)), + /// All cached branches have been exhausted. + Exhausted, + /// Need to calculate leaves from this range (exclusive upper) before the cached branch + /// (catch-up range). If None then + CalculateLeaves((Nibbles, Option)), +} + +/// Increments the nibbles and strips any trailing zeros. +/// +/// This function wraps `Nibbles::increment` and when it returns a value with trailing zeros, +/// it strips those zeros using bit manipulation on the underlying U256. +fn increment_and_strip_trailing_zeros(nibbles: &Nibbles) -> Option { + let mut result = nibbles.increment()?; + + // If result is empty, just return it + if result.is_empty() { + return Some(result); + } + + // Get access to the underlying U256 to detect trailing zeros + let uint_val = *result.as_mut_uint_unchecked(); + let non_zero_prefix_len = 64 - (uint_val.trailing_zeros() / 4); + result.truncate(non_zero_prefix_len); + + Some(result) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{ + hashed_cursor::{ + mock::MockHashedCursorFactory, HashedCursorFactory, HashedCursorMetricsCache, + InstrumentedHashedCursor, + }, + proof::Proof, + trie_cursor::{ + depth_first, mock::MockTrieCursorFactory, InstrumentedTrieCursor, TrieCursorFactory, + TrieCursorMetricsCache, + }, + }; + use alloy_primitives::map::{B256Map, B256Set}; + use alloy_rlp::Decodable; + use assert_matches::assert_matches; + use itertools::Itertools; + use reth_primitives_traits::Account; + use reth_trie_common::{ + updates::{StorageTrieUpdates, TrieUpdates}, + HashedPostState, MultiProofTargets, TrieNode, + }; + + /// Target to use with the `tracing` crate. + static TRACE_TARGET: &str = "trie::proof_v2::tests"; + + /// A test harness for comparing `ProofCalculator` and legacy `Proof` implementations. + /// + /// This harness creates mock cursor factories from a `HashedPostState` and provides + /// a method to test that both proof implementations produce equivalent results. + struct ProofTestHarness { + /// Mock factory for trie cursors (empty by default for leaf-only tests) + trie_cursor_factory: MockTrieCursorFactory, + /// Mock factory for hashed cursors, populated from `HashedPostState` + hashed_cursor_factory: MockHashedCursorFactory, + } + + impl ProofTestHarness { + /// Creates a new test harness from a `HashedPostState`. + /// + /// The `HashedPostState` is used to populate the mock hashed cursor factory directly. + /// The trie cursor factory is initialized from `TrieUpdates` generated by `StateRoot`. + fn new(post_state: HashedPostState) -> Self { + // Create empty trie cursor factory to serve as the initial state for StateRoot + // Ensure that there's a storage trie dataset for every account, to make + // `MockTrieCursorFactory` happy. + let storage_tries: B256Map<_> = post_state + .accounts + .keys() + .copied() + .map(|addr| (addr, StorageTrieUpdates::default())) + .collect(); + + let empty_trie_cursor_factory = MockTrieCursorFactory::from_trie_updates(TrieUpdates { + storage_tries: storage_tries.clone(), + ..Default::default() + }); + + // Create mock hashed cursor factory from the post state + let hashed_cursor_factory = MockHashedCursorFactory::from_hashed_post_state(post_state); + + // Generate TrieUpdates using StateRoot + let (_root, mut trie_updates) = + crate::StateRoot::new(empty_trie_cursor_factory, hashed_cursor_factory.clone()) + .root_with_updates() + .expect("StateRoot should succeed"); + + // Continue using empty storage tries for each account, to keep `MockTrieCursorFactory` + // happy. + trie_updates.storage_tries = storage_tries; + + // Initialize trie cursor factory from the generated TrieUpdates + let trie_cursor_factory = MockTrieCursorFactory::from_trie_updates(trie_updates); + + Self { trie_cursor_factory, hashed_cursor_factory } + } + + /// Asserts that `ProofCalculator` and legacy `Proof` produce equivalent results for account + /// proofs. + /// + /// This method calls both implementations with the given account targets and compares + /// the results. + fn assert_proof( + &self, + targets: impl IntoIterator, + ) -> Result<(), StateProofError> { + let targets_vec = targets.into_iter().sorted().collect::>(); + + // Convert B256 targets to MultiProofTargets for legacy implementation + // For account-only proofs, each account maps to an empty storage set + let legacy_targets = targets_vec + .iter() + .map(|addr| (*addr, B256Set::default())) + .collect::(); + + // Create ProofCalculator (proof_v2) with account cursors + let trie_cursor = self.trie_cursor_factory.account_trie_cursor()?; + let hashed_cursor = self.hashed_cursor_factory.hashed_account_cursor()?; + + // Collect metrics for cursors + let mut trie_cursor_metrics = TrieCursorMetricsCache::default(); + let trie_cursor = InstrumentedTrieCursor::new(trie_cursor, &mut trie_cursor_metrics); + let mut hashed_cursor_metrics = HashedCursorMetricsCache::default(); + let hashed_cursor = + InstrumentedHashedCursor::new(hashed_cursor, &mut hashed_cursor_metrics); + + // Call ProofCalculator::proof with account targets + let value_encoder = SyncAccountValueEncoder::new( + self.trie_cursor_factory.clone(), + self.hashed_cursor_factory.clone(), + ); + let mut proof_calculator = ProofCalculator::new(trie_cursor, hashed_cursor); + let proof_v2_result = proof_calculator.proof(&value_encoder, targets_vec.clone())?; + + // Output metrics + trace!(target: TRACE_TARGET, ?trie_cursor_metrics, "V2 trie cursor metrics"); + trace!(target: TRACE_TARGET, ?hashed_cursor_metrics, "V2 hashed cursor metrics"); + + // Call Proof::multiproof (legacy implementation) + let proof_legacy_result = + Proof::new(self.trie_cursor_factory.clone(), self.hashed_cursor_factory.clone()) + .with_branch_node_masks(true) + .multiproof(legacy_targets)?; + + // Decode and sort legacy proof nodes + let mut proof_legacy_nodes = proof_legacy_result + .account_subtree + .iter() + .map(|(path, node_enc)| { + let mut buf = node_enc.as_ref(); + let node = TrieNode::decode(&mut buf) + .expect("legacy implementation should not produce malformed proof nodes"); + + // The legacy proof calculator will calculate masks for the root node, even + // though we never store the root node so the masks for it aren't really valid. + let masks = if path.is_empty() { + TrieMasks::none() + } else { + TrieMasks { + hash_mask: proof_legacy_result + .branch_node_hash_masks + .get(path) + .copied(), + tree_mask: proof_legacy_result + .branch_node_tree_masks + .get(path) + .copied(), + } + }; + + ProofTrieNode { path: *path, node, masks } + }) + .sorted_by(|a, b| depth_first::cmp(&a.path, &b.path)) + .collect::>(); + + // When no targets are given the legacy implementation will still produce the root node + // in the proof. This differs from the V2 implementation, which produces nothing when + // given no targets. + if targets_vec.is_empty() { + assert_matches!( + proof_legacy_nodes.pop(), + Some(ProofTrieNode { path, .. }) if path.is_empty() + ); + assert!(proof_legacy_nodes.is_empty()); + } + + // Basic comparison: both should succeed and produce identical results + pretty_assertions::assert_eq!(proof_legacy_nodes, proof_v2_result); + + Ok(()) + } + } + + mod proptest_tests { + use super::*; + use alloy_primitives::{map::B256Map, U256}; + use proptest::prelude::*; + use reth_trie_common::HashedPostState; + + /// Generate a strategy for Account values + fn account_strategy() -> impl Strategy { + (any::(), any::(), any::<[u8; 32]>()).prop_map( + |(nonce, balance, code_hash)| Account { + nonce, + balance: U256::from(balance), + bytecode_hash: Some(B256::from(code_hash)), + }, + ) + } + + /// Generate a strategy for `HashedPostState` with random accounts + fn hashed_post_state_strategy() -> impl Strategy { + prop::collection::vec((any::<[u8; 32]>(), account_strategy()), 0..=100).prop_map( + |accounts| { + let account_map = accounts + .into_iter() + .map(|(addr_bytes, account)| (B256::from(addr_bytes), Some(account))) + .collect::>(); + + HashedPostState { accounts: account_map, ..Default::default() } + }, + ) + } + + /// Generate a strategy for proof targets that are 80% from the `HashedPostState` accounts + /// and 20% random keys. + fn proof_targets_strategy(account_keys: Vec) -> impl Strategy> { + let num_accounts = account_keys.len(); + + // Generate between 0 and (num_accounts + 5) targets + let target_count = 0..=(num_accounts + 5); + + target_count.prop_flat_map(move |count| { + let account_keys = account_keys.clone(); + prop::collection::vec( + prop::bool::weighted(0.8).prop_flat_map(move |from_accounts| { + if from_accounts && !account_keys.is_empty() { + // 80% chance: pick from existing account keys + prop::sample::select(account_keys.clone()).boxed() + } else { + // 20% chance: generate random B256 + any::<[u8; 32]>().prop_map(B256::from).boxed() + } + }), + count, + ) + }) + } + + proptest! { + #![proptest_config(ProptestConfig::with_cases(8000))] + #[test] + /// Tests that ProofCalculator produces valid proofs for randomly generated + /// HashedPostState with proof targets. + /// + /// This test: + /// - Generates random accounts in a HashedPostState + /// - Generates proof targets: 80% from existing account keys, 20% random + /// - Creates a test harness with the generated state + /// - Calls assert_proof with the generated targets + /// - Verifies both ProofCalculator and legacy Proof produce equivalent results + fn proptest_proof_with_targets( + (post_state, targets) in hashed_post_state_strategy() + .prop_flat_map(|post_state| { + let mut account_keys: Vec = post_state.accounts.keys().copied().collect(); + // Sort to ensure deterministic order when using PROPTEST_RNG_SEED + account_keys.sort_unstable(); + let targets_strategy = proof_targets_strategy(account_keys); + (Just(post_state), targets_strategy) + }) + ) { + reth_tracing::init_test_tracing(); + let harness = ProofTestHarness::new(post_state); + + // Pass generated targets to both implementations + harness.assert_proof(targets).expect("Proof generation failed"); + } + } + } + + #[test] + fn test_big_trie() { + use rand::prelude::*; + + reth_tracing::init_test_tracing(); + let mut rng = rand::rngs::SmallRng::seed_from_u64(1); + + let mut rand_b256 = || { + let mut buf: [u8; 32] = [0; 32]; + rng.fill_bytes(&mut buf); + B256::from_slice(&buf) + }; + + // Generate random HashedPostState. + let mut post_state = HashedPostState::default(); + for _ in 0..10240 { + let hashed_addr = rand_b256(); + let account = Account { bytecode_hash: Some(hashed_addr), ..Default::default() }; + post_state.accounts.insert(hashed_addr, Some(account)); + } + + // Collect targets; partially from real keys, partially random keys which probably won't + // exist. + let num_real_targets = post_state.accounts.len() * 5; + let mut targets = + post_state.accounts.keys().copied().sorted().take(num_real_targets).collect::>(); + for _ in 0..post_state.accounts.len() / 5 { + targets.push(rand_b256()); + } + targets.sort(); + + // Create test harness + let harness = ProofTestHarness::new(post_state); + + // Assert the proof + harness.assert_proof(targets).expect("Proof generation failed"); + } + + #[test] + fn test_increment_and_strip_trailing_zeros() { + let test_cases: Vec<(Nibbles, Option)> = vec![ + // Basic increment without trailing zeros + (Nibbles::from_nibbles([0x1, 0x2, 0x3]), Some(Nibbles::from_nibbles([0x1, 0x2, 0x4]))), + // Increment with trailing zeros - should be stripped + (Nibbles::from_nibbles([0x0, 0x0, 0xF]), Some(Nibbles::from_nibbles([0x0, 0x1]))), + (Nibbles::from_nibbles([0x0, 0xF, 0xF]), Some(Nibbles::from_nibbles([0x1]))), + // Overflow case + (Nibbles::from_nibbles([0xF, 0xF, 0xF]), None), + // Empty nibbles + (Nibbles::new(), None), + // Single nibble + (Nibbles::from_nibbles([0x5]), Some(Nibbles::from_nibbles([0x6]))), + // All Fs except last - results in trailing zeros after increment + (Nibbles::from_nibbles([0xE, 0xF, 0xF]), Some(Nibbles::from_nibbles([0xF]))), + ]; + + for (input, expected) in test_cases { + let result = increment_and_strip_trailing_zeros(&input); + assert_eq!(result, expected, "Failed for input: {:?}", input); + } + } +} diff --git a/crates/trie/trie/src/proof_v2/node.rs b/crates/trie/trie/src/proof_v2/node.rs new file mode 100644 index 0000000000..9300123fbe --- /dev/null +++ b/crates/trie/trie/src/proof_v2/node.rs @@ -0,0 +1,223 @@ +use crate::proof_v2::DeferredValueEncoder; +use alloy_rlp::Encodable; +use alloy_trie::nodes::ExtensionNodeRef; +use reth_execution_errors::trie::StateProofError; +use reth_trie_common::{ + BranchNode, ExtensionNode, LeafNode, LeafNodeRef, Nibbles, ProofTrieNode, RlpNode, TrieMask, + TrieMasks, TrieNode, +}; + +/// A trie node which is the child of a branch in the trie. +#[derive(Debug)] +pub(crate) enum ProofTrieBranchChild { + /// A leaf node whose value has yet to be calculated and encoded. + Leaf { + /// The short key of the leaf. + short_key: Nibbles, + /// The [`DeferredValueEncoder`] which will encode the leaf's value. + value: RF, + }, + /// An extension node whose child branch has been converted to an [`RlpNode`] + Extension { + /// The short key of the leaf. + short_key: Nibbles, + /// The [`RlpNode`] of the child branch. + child: RlpNode, + }, + /// A branch node whose children have already been flattened into [`RlpNode`]s. + Branch { + /// The node itself, for use during RLP encoding. + node: BranchNode, + /// Bitmasks carried over from cached `BranchNodeCompact` values, if any. + masks: TrieMasks, + }, + /// A node whose type is not known, as it has already been converted to an [`RlpNode`]. + RlpNode(RlpNode), +} + +impl ProofTrieBranchChild { + /// Converts this child into its RLP node representation. This potentially also returns an + /// `RlpNode` buffer which can be re-used for other [`ProofTrieBranchChild`]s. + pub(crate) fn into_rlp( + self, + buf: &mut Vec, + ) -> Result<(RlpNode, Option>), StateProofError> { + match self { + Self::Leaf { short_key, value } => { + // RLP encode the value itself + value.encode(buf)?; + let value_enc_len = buf.len(); + + // Determine the required buffer size for the encoded leaf + let leaf_enc_len = LeafNodeRef::new(&short_key, buf).length(); + + // We want to re-use buf for the encoding of the leaf node as well. To do this we + // will keep appending to it, leaving the already encoded value in-place. First we + // must ensure the buffer is big enough, then we'll split. + buf.resize(value_enc_len + leaf_enc_len, 0); + + // SAFETY we have just resized the above to be greater than `value_enc_len`, so it + // must be in-bounds. + let (value_buf, mut leaf_buf) = + unsafe { buf.split_at_mut_unchecked(value_enc_len) }; + + // Encode the leaf into the right side of the split buffer, and return the RlpNode. + LeafNodeRef::new(&short_key, value_buf).encode(&mut leaf_buf); + Ok((RlpNode::from_rlp(&buf[value_enc_len..]), None)) + } + Self::Extension { short_key, child } => { + ExtensionNodeRef::new(&short_key, child.as_slice()).encode(buf); + Ok((RlpNode::from_rlp(buf), None)) + } + Self::Branch { node: branch_node, .. } => { + branch_node.encode(buf); + Ok((RlpNode::from_rlp(buf), Some(branch_node.stack))) + } + Self::RlpNode(rlp_node) => Ok((rlp_node, None)), + } + } + + /// Converts this child into a [`ProofTrieNode`] having the given path. + /// + /// # Panics + /// + /// If called on a [`Self::RlpNode`]. + pub(crate) fn into_proof_trie_node( + self, + path: Nibbles, + buf: &mut Vec, + ) -> Result { + let (node, masks) = match self { + Self::Leaf { short_key, value } => { + value.encode(buf)?; + // Counter-intuitively a clone is better here than a `core::mem::take`. If we take + // the buffer then future RLP-encodes will need to re-allocate a new one, and + // RLP-encodes after those may need a bigger buffer and therefore re-alloc again. + // + // By cloning here we do a single allocation of exactly the size we need to take + // this value, and the passed in buffer can remain with whatever large capacity it + // already has. + let rlp_val = buf.clone(); + (TrieNode::Leaf(LeafNode::new(short_key, rlp_val)), TrieMasks::none()) + } + Self::Extension { short_key, child } => { + (TrieNode::Extension(ExtensionNode { key: short_key, child }), TrieMasks::none()) + } + Self::Branch { node, masks } => (TrieNode::Branch(node), masks), + Self::RlpNode(_) => panic!("Cannot call `into_proof_trie_node` on RlpNode"), + }; + + Ok(ProofTrieNode { node, path, masks }) + } + + /// Returns the short key of the child, if it is a leaf or extension, or empty if its a + /// [`Self::Branch`] or [`Self::RlpNode`]. + pub(crate) fn short_key(&self) -> &Nibbles { + match self { + Self::Leaf { short_key, .. } | Self::Extension { short_key, .. } => short_key, + Self::Branch { .. } | Self::RlpNode(_) => { + static EMPTY_NIBBLES: Nibbles = Nibbles::new(); + &EMPTY_NIBBLES + } + } + } + + /// Trims the given number of nibbles off the head of the short key. + /// + /// If the node is an extension and the given length is the same as its short key length, then + /// the node is replaced with its child. + /// + /// # Panics + /// + /// - If the given len is longer than the short key + /// - If the given len is the same as the length of a leaf's short key + /// - If the node is a [`Self::Branch`] or [`Self::RlpNode`] + pub(crate) fn trim_short_key_prefix(&mut self, len: usize) { + match self { + Self::Extension { short_key, child } if short_key.len() == len => { + *self = Self::RlpNode(core::mem::take(child)); + } + Self::Leaf { short_key, .. } | Self::Extension { short_key, .. } => { + *short_key = trim_nibbles_prefix(short_key, len); + } + Self::Branch { .. } | Self::RlpNode(_) => { + panic!("Cannot call `trim_short_key_prefix` on Branch or RlpNode") + } + } + } +} + +/// A single branch in the trie which is under construction. The actual child nodes of the branch +/// will be tracked as [`ProofTrieBranchChild`]s on a stack. +#[derive(Debug)] +pub(crate) struct ProofTrieBranch { + /// The length of the parent extension node's short key. If zero then the branch's parent is + /// not an extension but instead another branch. + pub(crate) ext_len: u8, + /// A mask tracking which child nibbles are set on the branch so far. There will be a single + /// child on the stack for each set bit. + pub(crate) state_mask: TrieMask, + /// Bitmasks which are subsets of `state_mask`. + pub(crate) masks: TrieMasks, +} + +/// Trims the first `len` nibbles from the head of the given `Nibbles`. +/// +/// # Panics +/// +/// Panics if the given `len` is greater than the length of the `Nibbles`. +pub(crate) fn trim_nibbles_prefix(n: &Nibbles, len: usize) -> Nibbles { + debug_assert!(n.len() >= len); + n.slice_unchecked(len, n.len()) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_trim_nibbles_prefix_basic() { + // Create nibbles [1, 2, 3, 4, 5, 6] + let nibbles = Nibbles::from_nibbles([1, 2, 3, 4, 5, 6]); + + // Trim first 2 nibbles + let trimmed = trim_nibbles_prefix(&nibbles, 2); + assert_eq!(trimmed.len(), 4); + + // Verify the remaining nibbles are [3, 4, 5, 6] + assert_eq!(trimmed.get(0), Some(3)); + assert_eq!(trimmed.get(1), Some(4)); + assert_eq!(trimmed.get(2), Some(5)); + assert_eq!(trimmed.get(3), Some(6)); + } + + #[test] + fn test_trim_nibbles_prefix_zero() { + // Create nibbles [10, 11, 12, 13] + let nibbles = Nibbles::from_nibbles([10, 11, 12, 13]); + + // Trim zero nibbles - should return identical nibbles + let trimmed = trim_nibbles_prefix(&nibbles, 0); + assert_eq!(trimmed, nibbles); + } + + #[test] + fn test_trim_nibbles_prefix_all() { + // Create nibbles [1, 2, 3, 4] + let nibbles = Nibbles::from_nibbles([1, 2, 3, 4]); + + // Trim all nibbles - should return empty + let trimmed = trim_nibbles_prefix(&nibbles, 4); + assert!(trimmed.is_empty()); + } + + #[test] + fn test_trim_nibbles_prefix_empty() { + // Create empty nibbles + let nibbles = Nibbles::new(); + + // Trim zero from empty - should return empty + let trimmed = trim_nibbles_prefix(&nibbles, 0); + assert!(trimmed.is_empty()); + } +} diff --git a/crates/trie/trie/src/proof_v2/value.rs b/crates/trie/trie/src/proof_v2/value.rs new file mode 100644 index 0000000000..b97e7579d4 --- /dev/null +++ b/crates/trie/trie/src/proof_v2/value.rs @@ -0,0 +1,173 @@ +//! Generic value encoder types for proof calculation with lazy evaluation. + +use crate::{ + hashed_cursor::HashedCursorFactory, proof_v2::ProofCalculator, trie_cursor::TrieCursorFactory, +}; +use alloy_primitives::{B256, U256}; +use alloy_rlp::Encodable; +use reth_execution_errors::trie::StateProofError; +use reth_primitives_traits::Account; +use std::rc::Rc; + +/// A trait for deferred RLP-encoding of leaf values. +pub trait DeferredValueEncoder { + /// RLP encodes the value into the provided buffer. + /// + /// # Arguments + /// + /// * `buf` - A mutable buffer to encode the data into + fn encode(self, buf: &mut Vec) -> Result<(), StateProofError>; +} + +/// A trait for RLP-encoding values for proof calculation. This trait is designed to allow the lazy +/// computation of leaf values in a generic way. +/// +/// When calculating a leaf value in a storage trie the [`DeferredValueEncoder`] simply holds onto +/// the slot value, and the `encode` method synchronously RLP-encodes it. +/// +/// When calculating a leaf value in the accounts trie we create a [`DeferredValueEncoder`] to +/// initiate any asynchronous computation of the account's storage root we want to do. Later we call +/// [`DeferredValueEncoder::encode`] to obtain the result of that computation and RLP-encode it. +pub trait LeafValueEncoder { + /// The type of value being encoded (e.g., U256 for storage, Account for accounts). + type Value; + + /// The type that will compute and encode the value when needed. + type DeferredEncoder: DeferredValueEncoder; + + /// Returns an encoder that will RLP-encode the value when its `encode` method is called. + /// + /// # Arguments + /// + /// * `key` - The key the value was stored at in the DB + /// * `value` - The value to encode + /// + /// The returned deferred encoder will be called as late as possible in the algorithm to + /// maximize the time available for parallel computation (e.g., storage root calculation). + fn deferred_encoder(&self, key: B256, value: Self::Value) -> Self::DeferredEncoder; +} + +/// An encoder for storage slot values. +/// +/// This encoder simply RLP-encodes U256 storage values directly. +#[derive(Debug, Clone, Copy, Default)] +pub struct StorageValueEncoder; + +/// The deferred encoder for a storage slot value. +#[derive(Debug, Clone, Copy)] +pub struct StorageDeferredValueEncoder(U256); + +impl DeferredValueEncoder for StorageDeferredValueEncoder { + fn encode(self, buf: &mut Vec) -> Result<(), StateProofError> { + self.0.encode(buf); + Ok(()) + } +} + +impl LeafValueEncoder for StorageValueEncoder { + type Value = U256; + type DeferredEncoder = StorageDeferredValueEncoder; + + fn deferred_encoder(&self, _key: B256, value: Self::Value) -> Self::DeferredEncoder { + StorageDeferredValueEncoder(value) + } +} + +/// An account value encoder that synchronously computes storage roots. +/// +/// This encoder contains factories for creating trie and hashed cursors. Storage roots are +/// computed synchronously within the deferred encoder using a `StorageProofCalculator`. +#[derive(Debug, Clone)] +pub struct SyncAccountValueEncoder { + /// Factory for creating trie cursors. + trie_cursor_factory: Rc, + /// Factory for creating hashed cursors. + hashed_cursor_factory: Rc, +} + +impl SyncAccountValueEncoder { + /// Create a new account value encoder with the given factories. + pub fn new(trie_cursor_factory: T, hashed_cursor_factory: H) -> Self { + Self { + trie_cursor_factory: Rc::new(trie_cursor_factory), + hashed_cursor_factory: Rc::new(hashed_cursor_factory), + } + } +} + +/// The deferred encoder for an account value with synchronous storage root calculation. +#[derive(Debug, Clone)] +pub struct SyncAccountDeferredValueEncoder { + trie_cursor_factory: Rc, + hashed_cursor_factory: Rc, + hashed_address: B256, + account: Account, +} + +impl DeferredValueEncoder for SyncAccountDeferredValueEncoder +where + T: TrieCursorFactory, + H: HashedCursorFactory, +{ + // Synchronously computes the storage root for this account and RLP-encodes the resulting + // `TrieAccount` into `buf` + fn encode(self, buf: &mut Vec) -> Result<(), StateProofError> { + // Create cursors for storage proof calculation + let trie_cursor = self.trie_cursor_factory.storage_trie_cursor(self.hashed_address)?; + let hashed_cursor = + self.hashed_cursor_factory.hashed_storage_cursor(self.hashed_address)?; + + // Create storage proof calculator with StorageValueEncoder + let mut storage_proof_calculator = ProofCalculator::new_storage(trie_cursor, hashed_cursor); + + // Compute storage root by calling storage_proof with the root path as a target. + // This returns just the root node of the storage trie. + let storage_root = storage_proof_calculator + .storage_proof(self.hashed_address, [B256::ZERO]) + .map(|nodes| { + // Encode the root node to RLP and hash it + let root_node = + nodes.first().expect("storage_proof always returns at least the root"); + root_node.node.encode(buf); + + let storage_root = alloy_primitives::keccak256(buf.as_slice()); + + // Clear the buffer so we can re-use it to encode the TrieAccount + buf.clear(); + + storage_root + })?; + + // Combine account with storage root to create TrieAccount + let trie_account = self.account.into_trie_account(storage_root); + + // Encode the trie account + trie_account.encode(buf); + + Ok(()) + } +} + +impl LeafValueEncoder for SyncAccountValueEncoder +where + T: TrieCursorFactory, + H: HashedCursorFactory, +{ + type Value = Account; + type DeferredEncoder = SyncAccountDeferredValueEncoder; + + fn deferred_encoder( + &self, + hashed_address: B256, + account: Self::Value, + ) -> Self::DeferredEncoder { + // Return a deferred encoder that will synchronously compute the storage root when encode() + // is called. + SyncAccountDeferredValueEncoder { + trie_cursor_factory: self.trie_cursor_factory.clone(), + hashed_cursor_factory: self.hashed_cursor_factory.clone(), + hashed_address, + account, + } + } +} diff --git a/crates/trie/trie/src/trie_cursor/in_memory.rs b/crates/trie/trie/src/trie_cursor/in_memory.rs index d9658150f3..941fbf9633 100644 --- a/crates/trie/trie/src/trie_cursor/in_memory.rs +++ b/crates/trie/trie/src/trie_cursor/in_memory.rs @@ -1,4 +1,4 @@ -use super::{TrieCursor, TrieCursorFactory}; +use super::{TrieCursor, TrieCursorFactory, TrieStorageCursor}; use crate::{forward_cursor::ForwardInMemoryCursor, updates::TrieUpdatesSorted}; use alloy_primitives::B256; use reth_storage_errors::db::DatabaseError; @@ -37,29 +37,16 @@ where fn account_trie_cursor(&self) -> Result, DatabaseError> { let cursor = self.cursor_factory.account_trie_cursor()?; - Ok(InMemoryTrieCursor::new(Some(cursor), self.trie_updates.as_ref().account_nodes_ref())) + Ok(InMemoryTrieCursor::new_account(cursor, self.trie_updates.as_ref())) } fn storage_trie_cursor( &self, hashed_address: B256, ) -> Result, DatabaseError> { - // if the storage trie has no updates then we use this as the in-memory overlay. - static EMPTY_UPDATES: Vec<(Nibbles, Option)> = Vec::new(); - - let storage_trie_updates = - self.trie_updates.as_ref().storage_tries_ref().get(&hashed_address); - let (storage_nodes, cleared) = storage_trie_updates - .map(|u| (u.storage_nodes_ref(), u.is_deleted())) - .unwrap_or((&EMPTY_UPDATES, false)); - - let cursor = if cleared { - None - } else { - Some(self.cursor_factory.storage_trie_cursor(hashed_address)?) - }; - - Ok(InMemoryTrieCursor::new(cursor, storage_nodes)) + let trie_updates = self.trie_updates.as_ref(); + let cursor = self.cursor_factory.storage_trie_cursor(hashed_address)?; + Ok(InMemoryTrieCursor::new_storage(cursor, trie_updates, hashed_address)) } } @@ -67,37 +54,74 @@ where /// It will always give precedence to the data from the trie updates. #[derive(Debug)] pub struct InMemoryTrieCursor<'a, C> { - /// The underlying cursor. If None then it is assumed there is no DB data. - cursor: Option, + /// The underlying cursor. + cursor: C, + /// Whether the underlying cursor should be ignored (when storage trie was wiped). + cursor_wiped: bool, /// Entry that `cursor` is currently pointing to. cursor_entry: Option<(Nibbles, BranchNodeCompact)>, /// Forward-only in-memory cursor over storage trie nodes. in_memory_cursor: ForwardInMemoryCursor<'a, Nibbles, Option>, /// The key most recently returned from the Cursor. last_key: Option, - #[cfg(debug_assertions)] /// Whether an initial seek was called. seeked: bool, + /// Reference to the full trie updates. + trie_updates: &'a TrieUpdatesSorted, } impl<'a, C: TrieCursor> InMemoryTrieCursor<'a, C> { - /// Create new trie cursor which combines a DB cursor (None to assume empty DB) and a set of - /// in-memory trie nodes. - pub fn new( - cursor: Option, - trie_updates: &'a [(Nibbles, Option)], - ) -> Self { - let in_memory_cursor = ForwardInMemoryCursor::new(trie_updates); + /// Create new account trie cursor which combines a DB cursor and the trie updates. + pub fn new_account(cursor: C, trie_updates: &'a TrieUpdatesSorted) -> Self { + let in_memory_cursor = ForwardInMemoryCursor::new(trie_updates.account_nodes_ref()); Self { cursor, + cursor_wiped: false, cursor_entry: None, in_memory_cursor, last_key: None, - #[cfg(debug_assertions)] seeked: false, + trie_updates, } } + /// Create new storage trie cursor with full trie updates reference. + /// This allows the cursor to switch between storage tries when `set_hashed_address` is called. + pub fn new_storage( + cursor: C, + trie_updates: &'a TrieUpdatesSorted, + hashed_address: B256, + ) -> Self { + let (in_memory_cursor, cursor_wiped) = + Self::get_storage_overlay(trie_updates, hashed_address); + Self { + cursor, + cursor_wiped, + cursor_entry: None, + in_memory_cursor, + last_key: None, + seeked: false, + trie_updates, + } + } + + /// Returns the storage overlay for `hashed_address` and whether it was deleted. + fn get_storage_overlay( + trie_updates: &'a TrieUpdatesSorted, + hashed_address: B256, + ) -> (ForwardInMemoryCursor<'a, Nibbles, Option>, bool) { + let storage_trie_updates = trie_updates.storage_tries_ref().get(&hashed_address); + let cursor_wiped = storage_trie_updates.is_some_and(|u| u.is_deleted()); + let storage_nodes = storage_trie_updates.map(|u| u.storage_nodes_ref()).unwrap_or(&[]); + + (ForwardInMemoryCursor::new(storage_nodes), cursor_wiped) + } + + /// Returns a mutable reference to the underlying cursor if it's not wiped, None otherwise. + fn get_cursor_mut(&mut self) -> Option<&mut C> { + (!self.cursor_wiped).then_some(&mut self.cursor) + } + /// Asserts that the next entry to be returned from the cursor is not previous to the last entry /// returned. fn set_last_key(&mut self, next_entry: &Option<(Nibbles, BranchNodeCompact)>) { @@ -113,14 +137,16 @@ impl<'a, C: TrieCursor> InMemoryTrieCursor<'a, C> { /// Seeks the `cursor_entry` field of the struct using the cursor. fn cursor_seek(&mut self, key: Nibbles) -> Result<(), DatabaseError> { - if let Some(entry) = self.cursor_entry.as_ref() && - entry.0 >= key - { - // If already seeked to the given key then don't do anything. Also if we're seeked past - // the given key then don't anything, because `TrieCursor` is specifically a - // forward-only cursor. - } else { - self.cursor_entry = self.cursor.as_mut().map(|c| c.seek(key)).transpose()?.flatten(); + // Only seek if: + // 1. We have a cursor entry and need to seek forward (entry.0 < key), OR + // 2. We have no cursor entry and haven't seeked yet (!self.seeked) + let should_seek = match self.cursor_entry.as_ref() { + Some(entry) => entry.0 < key, + None => !self.seeked, + }; + + if should_seek { + self.cursor_entry = self.get_cursor_mut().map(|c| c.seek(key)).transpose()?.flatten(); } Ok(()) @@ -128,15 +154,12 @@ impl<'a, C: TrieCursor> InMemoryTrieCursor<'a, C> { /// Seeks the `cursor_entry` field of the struct to the subsequent entry using the cursor. fn cursor_next(&mut self) -> Result<(), DatabaseError> { - #[cfg(debug_assertions)] - { - debug_assert!(self.seeked); - } + debug_assert!(self.seeked); // If the previous entry is `None`, and we've done a seek previously, then the cursor is // exhausted and we shouldn't call `next` again. if self.cursor_entry.is_some() { - self.cursor_entry = self.cursor.as_mut().map(|c| c.next()).transpose()?.flatten(); + self.cursor_entry = self.get_cursor_mut().map(|c| c.next()).transpose()?.flatten(); } Ok(()) @@ -189,10 +212,7 @@ impl TrieCursor for InMemoryTrieCursor<'_, C> { self.cursor_seek(key)?; let mem_entry = self.in_memory_cursor.seek(&key); - #[cfg(debug_assertions)] - { - self.seeked = true; - } + self.seeked = true; let entry = match (mem_entry, &self.cursor_entry) { (Some((mem_key, entry_inner)), _) if mem_key == key => { @@ -213,10 +233,7 @@ impl TrieCursor for InMemoryTrieCursor<'_, C> { self.cursor_seek(key)?; self.in_memory_cursor.seek(&key); - #[cfg(debug_assertions)] - { - self.seeked = true; - } + self.seeked = true; let entry = self.choose_next_entry()?; self.set_last_key(&entry); @@ -224,10 +241,7 @@ impl TrieCursor for InMemoryTrieCursor<'_, C> { } fn next(&mut self) -> Result, DatabaseError> { - #[cfg(debug_assertions)] - { - debug_assert!(self.seeked, "Cursor must be seek'd before next is called"); - } + debug_assert!(self.seeked, "Cursor must be seek'd before next is called"); // A `last_key` of `None` indicates that the cursor is exhausted. let Some(last_key) = self.last_key else { @@ -256,9 +270,38 @@ impl TrieCursor for InMemoryTrieCursor<'_, C> { fn current(&mut self) -> Result, DatabaseError> { match &self.last_key { Some(key) => Ok(Some(*key)), - None => Ok(self.cursor.as_mut().map(|c| c.current()).transpose()?.flatten()), + None => Ok(self.get_cursor_mut().map(|c| c.current()).transpose()?.flatten()), } } + + fn reset(&mut self) { + let Self { + cursor, + cursor_wiped, + cursor_entry, + in_memory_cursor, + last_key, + seeked, + trie_updates: _, + } = self; + + cursor.reset(); + in_memory_cursor.reset(); + + *cursor_wiped = false; + *cursor_entry = None; + *last_key = None; + *seeked = false; + } +} + +impl TrieStorageCursor for InMemoryTrieCursor<'_, C> { + fn set_hashed_address(&mut self, hashed_address: B256) { + self.reset(); + self.cursor.set_hashed_address(hashed_address); + (self.in_memory_cursor, self.cursor_wiped) = + Self::get_storage_overlay(self.trie_updates, hashed_address); + } } #[cfg(test)] @@ -282,7 +325,8 @@ mod tests { let visited_keys = Arc::new(Mutex::new(Vec::new())); let mock_cursor = MockTrieCursor::new(db_nodes_arc, visited_keys); - let mut cursor = InMemoryTrieCursor::new(Some(mock_cursor), &test_case.in_memory_nodes); + let trie_updates = TrieUpdatesSorted::new(test_case.in_memory_nodes, Default::default()); + let mut cursor = InMemoryTrieCursor::new_account(mock_cursor, &trie_updates); let mut results = Vec::new(); @@ -465,7 +509,8 @@ mod tests { let visited_keys = Arc::new(Mutex::new(Vec::new())); let mock_cursor = MockTrieCursor::new(db_nodes_arc, visited_keys); - let mut cursor = InMemoryTrieCursor::new(Some(mock_cursor), &in_memory_nodes); + let trie_updates = TrieUpdatesSorted::new(in_memory_nodes, Default::default()); + let mut cursor = InMemoryTrieCursor::new_account(mock_cursor, &trie_updates); let result = cursor.seek_exact(Nibbles::from_nibbles([0x2])).unwrap(); assert_eq!( @@ -564,7 +609,8 @@ mod tests { let visited_keys = Arc::new(Mutex::new(Vec::new())); let mock_cursor = MockTrieCursor::new(db_nodes_arc, visited_keys); - let mut cursor = InMemoryTrieCursor::new(Some(mock_cursor), &in_memory_nodes); + let trie_updates = TrieUpdatesSorted::new(in_memory_nodes, Default::default()); + let mut cursor = InMemoryTrieCursor::new_account(mock_cursor, &trie_updates); assert_eq!(cursor.current().unwrap(), None); @@ -578,6 +624,81 @@ mod tests { assert_eq!(cursor.current().unwrap(), Some(Nibbles::from_nibbles([0x3]))); } + #[test] + fn test_all_storage_slots_deleted_not_wiped_exact_keys() { + use tracing::debug; + reth_tracing::init_test_tracing(); + + // This test reproduces an edge case where: + // - cursor is not None (not wiped) + // - All in-memory entries are deletions (None values) + // - Database has corresponding entries + // - Expected: NO leaves should be returned (all deleted) + + // Generate 42 trie node entries with keys distributed across the keyspace + let mut db_nodes: Vec<(Nibbles, BranchNodeCompact)> = (0..10) + .map(|i| { + let key_bytes = vec![(i * 6) as u8, i as u8]; // Spread keys across keyspace + let nibbles = Nibbles::unpack(key_bytes); + (nibbles, BranchNodeCompact::new(i as u16, i as u16, 0, vec![], None)) + }) + .collect(); + + db_nodes.sort_by_key(|(key, _)| *key); + db_nodes.dedup_by_key(|(key, _)| *key); + + for (key, _) in &db_nodes { + debug!("node at {key:?}"); + } + + // Create in-memory entries with same keys but all None values (deletions) + let in_memory_nodes: Vec<(Nibbles, Option)> = + db_nodes.iter().map(|(key, _)| (*key, None)).collect(); + + let db_nodes_map: BTreeMap = db_nodes.into_iter().collect(); + let db_nodes_arc = Arc::new(db_nodes_map); + let visited_keys = Arc::new(Mutex::new(Vec::new())); + let mock_cursor = MockTrieCursor::new(db_nodes_arc, visited_keys); + + let trie_updates = TrieUpdatesSorted::new(in_memory_nodes, Default::default()); + let mut cursor = InMemoryTrieCursor::new_account(mock_cursor, &trie_updates); + + // Seek to beginning should return None (all nodes are deleted) + tracing::debug!("seeking to 0x"); + let result = cursor.seek(Nibbles::default()).unwrap(); + assert_eq!( + result, None, + "Expected no entries when all nodes are deleted, but got {:?}", + result + ); + + // Test seek operations at various positions - all should return None + let seek_keys = vec![ + Nibbles::unpack([0x00]), + Nibbles::unpack([0x5d]), + Nibbles::unpack([0x5e]), + Nibbles::unpack([0x5f]), + Nibbles::unpack([0xc2]), + Nibbles::unpack([0xc5]), + Nibbles::unpack([0xc9]), + Nibbles::unpack([0xf0]), + ]; + + for seek_key in seek_keys { + tracing::debug!("seeking to {seek_key:?}"); + let result = cursor.seek(seek_key).unwrap(); + assert_eq!( + result, None, + "Expected None when seeking to {:?} but got {:?}", + seek_key, result + ); + } + + // next() should also always return None + let result = cursor.next().unwrap(); + assert_eq!(result, None, "Expected None from next() but got {:?}", result); + } + mod proptest_tests { use super::*; use itertools::Itertools; @@ -628,7 +749,7 @@ mod tests { /// Generate a sorted vector of (Nibbles, `BranchNodeCompact`) entries fn sorted_db_nodes_strategy() -> impl Strategy> { prop::collection::vec( - (prop::collection::vec(any::(), 0..3), branch_node_strategy()), + (prop::collection::vec(any::(), 0..2), branch_node_strategy()), 0..20, ) .prop_map(|entries| { @@ -648,7 +769,7 @@ mod tests { ) -> impl Strategy)>> { prop::collection::vec( ( - prop::collection::vec(any::(), 0..3), + prop::collection::vec(any::(), 0..2), prop::option::of(branch_node_strategy()), ), 0..20, @@ -666,7 +787,7 @@ mod tests { } proptest! { - #![proptest_config(ProptestConfig::with_cases(1000))] + #![proptest_config(ProptestConfig::with_cases(10000))] #[test] fn proptest_in_memory_trie_cursor( @@ -677,7 +798,12 @@ mod tests { reth_tracing::init_test_tracing(); use tracing::debug; - debug!("Starting proptest!"); + debug!( + db_paths=?db_nodes.iter().map(|(k, _)| k).collect::>(), + in_mem_nodes=?in_memory_nodes.iter().map(|(k, v)| (k, v.is_some())).collect::>(), + num_op_choices=?op_choices.len(), + "Starting proptest!", + ); // Create the expected results by merging the two sorted vectors, // properly handling deletions (None values in in_memory_nodes) @@ -699,7 +825,8 @@ mod tests { let db_nodes_arc = Arc::new(db_nodes_map); let visited_keys = Arc::new(Mutex::new(Vec::new())); let mock_cursor = MockTrieCursor::new(db_nodes_arc, visited_keys); - let mut test_cursor = InMemoryTrieCursor::new(Some(mock_cursor), &in_memory_nodes); + let trie_updates = TrieUpdatesSorted::new(in_memory_nodes, Default::default()); + let mut test_cursor = InMemoryTrieCursor::new_account(mock_cursor, &trie_updates); // Test: seek to the beginning first let control_first = control_cursor.seek(Nibbles::default()).unwrap(); @@ -757,7 +884,7 @@ mod tests { continue; } - let key = *valid_keys[(choice as usize / 3) % valid_keys.len()]; + let key = *valid_keys[choice as usize % valid_keys.len()]; let control_result = control_cursor.seek(key).unwrap(); let test_result = test_cursor.seek(key).unwrap(); @@ -791,7 +918,7 @@ mod tests { continue; } - let key = *valid_keys[(choice as usize / 3) % valid_keys.len()]; + let key = *valid_keys[choice as usize % valid_keys.len()]; let control_result = control_cursor.seek_exact(key).unwrap(); let test_result = test_cursor.seek_exact(key).unwrap(); diff --git a/crates/trie/trie/src/trie_cursor/metrics.rs b/crates/trie/trie/src/trie_cursor/metrics.rs new file mode 100644 index 0000000000..ebbe75002c --- /dev/null +++ b/crates/trie/trie/src/trie_cursor/metrics.rs @@ -0,0 +1,189 @@ +use super::{TrieCursor, TrieStorageCursor}; +use crate::{BranchNodeCompact, Nibbles}; +use alloy_primitives::B256; +use reth_storage_errors::db::DatabaseError; +use std::time::{Duration, Instant}; +use tracing::debug_span; + +#[cfg(feature = "metrics")] +use crate::TrieType; +#[cfg(feature = "metrics")] +use reth_metrics::metrics::{self, Histogram}; + +/// Prometheus metrics for trie cursor operations. +/// +/// Tracks the number of cursor operations for monitoring and performance analysis. +#[cfg(feature = "metrics")] +#[derive(Clone, Debug)] +pub struct TrieCursorMetrics { + /// Histogram tracking overall time spent in database operations + overall_duration: Histogram, + /// Histogram for `next()` operations + next_histogram: Histogram, + /// Histogram for `seek()` operations + seek_histogram: Histogram, + /// Histogram for `seek_exact()` operations + seek_exact_histogram: Histogram, +} + +#[cfg(feature = "metrics")] +impl TrieCursorMetrics { + /// Create a new metrics instance with the specified trie type label. + pub fn new(trie_type: TrieType) -> Self { + let trie_type_str = trie_type.as_str(); + + Self { + overall_duration: metrics::histogram!( + "trie.cursor.overall_duration", + "type" => trie_type_str + ), + next_histogram: metrics::histogram!( + "trie.cursor.operations", + "type" => trie_type_str, + "operation" => "next" + ), + seek_histogram: metrics::histogram!( + "trie.cursor.operations", + "type" => trie_type_str, + "operation" => "seek" + ), + seek_exact_histogram: metrics::histogram!( + "trie.cursor.operations", + "type" => trie_type_str, + "operation" => "seek_exact" + ), + } + } + + /// Record the cached metrics from the provided cache and reset the cache counters. + /// + /// This method adds the current counter values from the cache to the Prometheus metrics + /// and then resets all cache counters to zero. + pub fn record(&mut self, cache: &mut TrieCursorMetricsCache) { + self.next_histogram.record(cache.next_count as f64); + self.seek_histogram.record(cache.seek_count as f64); + self.seek_exact_histogram.record(cache.seek_exact_count as f64); + self.overall_duration.record(cache.total_duration.as_secs_f64()); + cache.reset(); + } +} + +/// Cached metrics counters for trie cursor operations. +#[derive(Debug, Copy, Clone)] +pub struct TrieCursorMetricsCache { + /// Counter for `next()` calls + pub next_count: usize, + /// Counter for `seek()` calls + pub seek_count: usize, + /// Counter for `seek_exact()` calls + pub seek_exact_count: usize, + /// Total duration spent in database operations + pub total_duration: Duration, +} + +impl Default for TrieCursorMetricsCache { + fn default() -> Self { + Self { next_count: 0, seek_count: 0, seek_exact_count: 0, total_duration: Duration::ZERO } + } +} + +impl TrieCursorMetricsCache { + /// Reset all counters to zero. + pub const fn reset(&mut self) { + self.next_count = 0; + self.seek_count = 0; + self.seek_exact_count = 0; + self.total_duration = Duration::ZERO; + } + + /// Extend this cache by adding the counts from another cache. + /// + /// This accumulates the counter values from `other` into this cache. + pub fn extend(&mut self, other: &Self) { + self.next_count += other.next_count; + self.seek_count += other.seek_count; + self.seek_exact_count += other.seek_exact_count; + self.total_duration += other.total_duration; + } + + /// Record the span for metrics. + pub fn record_span(&self, name: &'static str) { + let _span = debug_span!( + target: "trie::trie_cursor", + "Trie cursor metrics", + name, + next_count = self.next_count, + seek_count = self.seek_count, + seek_exact_count = self.seek_exact_count, + total_duration = self.total_duration.as_secs_f64(), + ) + .entered(); + } +} + +/// A wrapper around a [`TrieCursor`] that tracks metrics for cursor operations. +/// +/// This implementation counts the number of times each cursor operation is called: +/// - `next()` - Move to the next entry +/// - `seek()` - Seek to a key or the next greater key +/// - `seek_exact()` - Seek to an exact key match +#[derive(Debug)] +pub struct InstrumentedTrieCursor<'metrics, C> { + /// The underlying cursor being wrapped + cursor: C, + /// Cached metrics counters + metrics: &'metrics mut TrieCursorMetricsCache, +} + +impl<'metrics, C> InstrumentedTrieCursor<'metrics, C> { + /// Create a new metrics cursor wrapping the given cursor. + pub const fn new(cursor: C, metrics: &'metrics mut TrieCursorMetricsCache) -> Self { + Self { cursor, metrics } + } +} + +impl<'metrics, C: TrieCursor> TrieCursor for InstrumentedTrieCursor<'metrics, C> { + fn seek_exact( + &mut self, + key: Nibbles, + ) -> Result, DatabaseError> { + let start = Instant::now(); + self.metrics.seek_exact_count += 1; + let result = self.cursor.seek_exact(key); + self.metrics.total_duration += start.elapsed(); + result + } + + fn seek( + &mut self, + key: Nibbles, + ) -> Result, DatabaseError> { + let start = Instant::now(); + self.metrics.seek_count += 1; + let result = self.cursor.seek(key); + self.metrics.total_duration += start.elapsed(); + result + } + + fn next(&mut self) -> Result, DatabaseError> { + let start = Instant::now(); + self.metrics.next_count += 1; + let result = self.cursor.next(); + self.metrics.total_duration += start.elapsed(); + result + } + + fn current(&mut self) -> Result, DatabaseError> { + self.cursor.current() + } + + fn reset(&mut self) { + self.cursor.reset() + } +} + +impl<'metrics, C: TrieStorageCursor> TrieStorageCursor for InstrumentedTrieCursor<'metrics, C> { + fn set_hashed_address(&mut self, hashed_address: B256) { + self.cursor.set_hashed_address(hashed_address) + } +} diff --git a/crates/trie/trie/src/trie_cursor/mock.rs b/crates/trie/trie/src/trie_cursor/mock.rs index 313df0443e..5f29a6734b 100644 --- a/crates/trie/trie/src/trie_cursor/mock.rs +++ b/crates/trie/trie/src/trie_cursor/mock.rs @@ -2,24 +2,25 @@ use parking_lot::{Mutex, MutexGuard}; use std::{collections::BTreeMap, sync::Arc}; use tracing::instrument; -use super::{TrieCursor, TrieCursorFactory}; +use super::{TrieCursor, TrieCursorFactory, TrieStorageCursor}; use crate::{ mock::{KeyVisit, KeyVisitType}, BranchNodeCompact, Nibbles, }; use alloy_primitives::{map::B256Map, B256}; use reth_storage_errors::db::DatabaseError; +use reth_trie_common::updates::TrieUpdates; /// Mock trie cursor factory. #[derive(Clone, Default, Debug)] pub struct MockTrieCursorFactory { account_trie_nodes: Arc>, - storage_tries: B256Map>>, + storage_tries: Arc>>, /// List of keys that the account trie cursor has visited. visited_account_keys: Arc>>>, /// List of keys that the storage trie cursor has visited, per storage trie. - visited_storage_keys: B256Map>>>>, + visited_storage_keys: Arc>>>>, } impl MockTrieCursorFactory { @@ -31,12 +32,33 @@ impl MockTrieCursorFactory { let visited_storage_keys = storage_tries.keys().map(|k| (*k, Default::default())).collect(); Self { account_trie_nodes: Arc::new(account_trie_nodes), - storage_tries: storage_tries.into_iter().map(|(k, v)| (k, Arc::new(v))).collect(), + storage_tries: Arc::new(storage_tries), visited_account_keys: Default::default(), - visited_storage_keys, + visited_storage_keys: Arc::new(visited_storage_keys), } } + /// Creates a new mock trie cursor factory from `TrieUpdates`. + pub fn from_trie_updates(updates: TrieUpdates) -> Self { + // Convert account nodes from HashMap to BTreeMap + let account_trie_nodes: BTreeMap = + updates.account_nodes.into_iter().collect(); + + // Convert storage tries + let storage_tries: B256Map> = updates + .storage_tries + .into_iter() + .map(|(addr, storage_updates)| { + // Convert storage nodes from HashMap to BTreeMap + let storage_nodes: BTreeMap = + storage_updates.storage_nodes.into_iter().collect(); + (addr, storage_nodes) + }) + .collect(); + + Self::new(account_trie_nodes, storage_tries) + } + /// Returns a reference to the list of visited account keys. pub fn visited_account_keys(&self) -> MutexGuard<'_, Vec>> { self.visited_account_keys.lock() @@ -71,40 +93,94 @@ impl TrieCursorFactory for MockTrieCursorFactory { &self, hashed_address: B256, ) -> Result, DatabaseError> { - Ok(MockTrieCursor::new( - self.storage_tries - .get(&hashed_address) - .ok_or_else(|| { - DatabaseError::Other(format!("storage trie for {hashed_address:?} not found")) - })? - .clone(), - self.visited_storage_keys - .get(&hashed_address) - .ok_or_else(|| { - DatabaseError::Other(format!("storage trie for {hashed_address:?} not found")) - })? - .clone(), - )) + MockTrieCursor::new_storage( + self.storage_tries.clone(), + self.visited_storage_keys.clone(), + hashed_address, + ) } } +/// Mock trie cursor type - determines whether this is an account or storage cursor. +#[derive(Debug)] +enum MockTrieCursorType { + Account { + trie_nodes: Arc>, + visited_keys: Arc>>>, + }, + Storage { + all_storage_tries: Arc>>, + all_visited_storage_keys: Arc>>>>, + current_hashed_address: B256, + }, +} + /// Mock trie cursor. -#[derive(Default, Debug)] +#[derive(Debug)] #[non_exhaustive] pub struct MockTrieCursor { /// The current key. If set, it is guaranteed to exist in `trie_nodes`. current_key: Option, - trie_nodes: Arc>, - visited_keys: Arc>>>, + cursor_type: MockTrieCursorType, } impl MockTrieCursor { - /// Creates a new mock trie cursor with the given trie nodes and key tracking. - pub fn new( + /// Creates a new mock trie cursor for accounts with the given trie nodes and key tracking. + pub const fn new( trie_nodes: Arc>, visited_keys: Arc>>>, ) -> Self { - Self { current_key: None, trie_nodes, visited_keys } + Self { + current_key: None, + cursor_type: MockTrieCursorType::Account { trie_nodes, visited_keys }, + } + } + + /// Creates a new mock trie cursor for storage with access to all storage tries. + pub fn new_storage( + all_storage_tries: Arc>>, + all_visited_storage_keys: Arc>>>>, + hashed_address: B256, + ) -> Result { + if !all_storage_tries.contains_key(&hashed_address) { + return Err(DatabaseError::Other(format!( + "storage trie for {hashed_address:?} not found" + ))); + } + Ok(Self { + current_key: None, + cursor_type: MockTrieCursorType::Storage { + all_storage_tries, + all_visited_storage_keys, + current_hashed_address: hashed_address, + }, + }) + } + + /// Returns the trie nodes map for the current cursor type. + fn trie_nodes(&self) -> &BTreeMap { + match &self.cursor_type { + MockTrieCursorType::Account { trie_nodes, .. } => trie_nodes.as_ref(), + MockTrieCursorType::Storage { all_storage_tries, current_hashed_address, .. } => { + all_storage_tries + .get(current_hashed_address) + .expect("current_hashed_address should exist in all_storage_tries") + } + } + } + + /// Returns the visited keys mutex for the current cursor type. + fn visited_keys(&self) -> &Mutex>> { + match &self.cursor_type { + MockTrieCursorType::Account { visited_keys, .. } => visited_keys.as_ref(), + MockTrieCursorType::Storage { + all_visited_storage_keys, + current_hashed_address, + .. + } => all_visited_storage_keys + .get(current_hashed_address) + .expect("current_hashed_address should exist in all_visited_storage_keys"), + } } } @@ -114,11 +190,11 @@ impl TrieCursor for MockTrieCursor { &mut self, key: Nibbles, ) -> Result, DatabaseError> { - let entry = self.trie_nodes.get(&key).cloned().map(|value| (key, value)); + let entry = self.trie_nodes().get(&key).cloned().map(|value| (key, value)); if let Some((key, _)) = &entry { self.current_key = Some(*key); } - self.visited_keys.lock().push(KeyVisit { + self.visited_keys().lock().push(KeyVisit { visit_type: KeyVisitType::SeekExact(key), visited_key: entry.as_ref().map(|(k, _)| *k), }); @@ -131,11 +207,12 @@ impl TrieCursor for MockTrieCursor { key: Nibbles, ) -> Result, DatabaseError> { // Find the first key that is greater than or equal to the given key. - let entry = self.trie_nodes.iter().find_map(|(k, v)| (k >= &key).then(|| (*k, v.clone()))); + let entry = + self.trie_nodes().iter().find_map(|(k, v)| (k >= &key).then(|| (*k, v.clone()))); if let Some((key, _)) = &entry { self.current_key = Some(*key); } - self.visited_keys.lock().push(KeyVisit { + self.visited_keys().lock().push(KeyVisit { visit_type: KeyVisitType::SeekNonExact(key), visited_key: entry.as_ref().map(|(k, _)| *k), }); @@ -144,7 +221,7 @@ impl TrieCursor for MockTrieCursor { #[instrument(skip(self), ret(level = "trace"))] fn next(&mut self) -> Result, DatabaseError> { - let mut iter = self.trie_nodes.iter(); + let mut iter = self.trie_nodes().iter(); // Jump to the first key that has a prefix of the current key if it's set, or to the first // key otherwise. iter.find(|(k, _)| self.current_key.as_ref().is_none_or(|current| k.starts_with(current))) @@ -154,7 +231,7 @@ impl TrieCursor for MockTrieCursor { if let Some((key, _)) = &entry { self.current_key = Some(*key); } - self.visited_keys.lock().push(KeyVisit { + self.visited_keys().lock().push(KeyVisit { visit_type: KeyVisitType::Next, visited_key: entry.as_ref().map(|(k, _)| *k), }); @@ -165,4 +242,22 @@ impl TrieCursor for MockTrieCursor { fn current(&mut self) -> Result, DatabaseError> { Ok(self.current_key) } + + fn reset(&mut self) { + self.current_key = None; + } +} + +impl TrieStorageCursor for MockTrieCursor { + fn set_hashed_address(&mut self, hashed_address: B256) { + self.reset(); + match &mut self.cursor_type { + MockTrieCursorType::Storage { current_hashed_address, .. } => { + *current_hashed_address = hashed_address; + } + MockTrieCursorType::Account { .. } => { + panic!("set_hashed_address called on account cursor") + } + } + } } diff --git a/crates/trie/trie/src/trie_cursor/mod.rs b/crates/trie/trie/src/trie_cursor/mod.rs index 05a6c09e94..ce6b852af4 100644 --- a/crates/trie/trie/src/trie_cursor/mod.rs +++ b/crates/trie/trie/src/trie_cursor/mod.rs @@ -15,9 +15,15 @@ pub mod noop; pub mod depth_first; /// Mock trie cursor implementations. -#[cfg(test)] +#[cfg(any(test, feature = "test-utils"))] pub mod mock; +/// Metrics tracking trie cursor implementations. +pub mod metrics; +#[cfg(feature = "metrics")] +pub use metrics::TrieCursorMetrics; +pub use metrics::{InstrumentedTrieCursor, TrieCursorMetricsCache}; + pub use self::{depth_first::DepthFirstTrieIterator, in_memory::*, subnode::CursorSubNode}; /// Factory for creating trie cursors. @@ -29,7 +35,7 @@ pub trait TrieCursorFactory { Self: 'a; /// The storage trie cursor type. - type StorageTrieCursor<'a>: TrieCursor + type StorageTrieCursor<'a>: TrieStorageCursor where Self: 'a; @@ -62,6 +68,26 @@ pub trait TrieCursor { /// Get the current entry. fn current(&mut self) -> Result, DatabaseError>; + + /// Reset the cursor to the beginning. + /// + /// # Important + /// + /// After calling this method, the subsequent operation MUST be a [`TrieCursor::seek`] or + /// [`TrieCursor::seek_exact`] call. + fn reset(&mut self); +} + +/// A cursor for traversing storage trie nodes. +#[auto_impl::auto_impl(&mut)] +pub trait TrieStorageCursor: TrieCursor { + /// Set the hashed address for the storage trie cursor. + /// + /// # Important + /// + /// After calling this method, the subsequent operation MUST be a [`TrieCursor::seek`] or + /// [`TrieCursor::seek_exact`] call. + fn set_hashed_address(&mut self, hashed_address: B256); } /// Iterator wrapper for `TrieCursor` types diff --git a/crates/trie/trie/src/trie_cursor/noop.rs b/crates/trie/trie/src/trie_cursor/noop.rs index a00a18e4f0..0a51fd806d 100644 --- a/crates/trie/trie/src/trie_cursor/noop.rs +++ b/crates/trie/trie/src/trie_cursor/noop.rs @@ -1,4 +1,4 @@ -use super::{TrieCursor, TrieCursorFactory}; +use super::{TrieCursor, TrieCursorFactory, TrieStorageCursor}; use crate::{BranchNodeCompact, Nibbles}; use alloy_primitives::B256; use reth_storage_errors::db::DatabaseError; @@ -60,6 +60,10 @@ impl TrieCursor for NoopAccountTrieCursor { fn current(&mut self) -> Result, DatabaseError> { Ok(None) } + + fn reset(&mut self) { + // Noop + } } /// Noop storage trie cursor. @@ -89,4 +93,14 @@ impl TrieCursor for NoopStorageTrieCursor { fn current(&mut self) -> Result, DatabaseError> { Ok(None) } + + fn reset(&mut self) { + // Noop + } +} + +impl TrieStorageCursor for NoopStorageTrieCursor { + fn set_hashed_address(&mut self, _hashed_address: B256) { + // Noop + } } diff --git a/crates/trie/trie/src/witness.rs b/crates/trie/trie/src/witness.rs index 871d599c76..b082a482b5 100644 --- a/crates/trie/trie/src/witness.rs +++ b/crates/trie/trie/src/witness.rs @@ -24,7 +24,7 @@ use reth_trie_sparse::{ provider::{RevealedNode, TrieNodeProvider, TrieNodeProviderFactory}, SerialSparseTrie, SparseStateTrie, }; -use std::sync::{mpsc, Arc}; +use std::sync::mpsc; /// State transition witness for the trie. #[derive(Debug)] @@ -95,8 +95,8 @@ impl TrieWitness { impl TrieWitness where - T: TrieCursorFactory + Clone + Send + Sync, - H: HashedCursorFactory + Clone + Send + Sync, + T: TrieCursorFactory + Clone, + H: HashedCursorFactory + Clone, { /// Compute the state transition witness for the trie. Gather all required nodes /// to apply `state` on top of the current trie state. @@ -147,11 +147,7 @@ where let (tx, rx) = mpsc::channel(); let blinded_provider_factory = WitnessTrieNodeProviderFactory::new( - ProofTrieNodeProviderFactory::new( - self.trie_cursor_factory, - self.hashed_cursor_factory, - Arc::new(self.prefix_sets), - ), + ProofTrieNodeProviderFactory::new(self.trie_cursor_factory, self.hashed_cursor_factory), tx, ); let mut sparse_trie = SparseStateTrie::::new(); diff --git a/docs/cli/help.rs b/docs/cli/help.rs index 0474d00e72..30c769b9ff 100755 --- a/docs/cli/help.rs +++ b/docs/cli/help.rs @@ -62,6 +62,10 @@ struct Args { #[arg(long)] root_summary: bool, + /// Whether to generate TypeScript sidebar files + #[arg(long)] + sidebar: bool, + /// Print verbose output #[arg(short, long)] verbose: bool, @@ -140,10 +144,18 @@ fn main() -> io::Result<()> { if args.verbose { println!("Updating root summary in \"{}\"", path.to_string_lossy()); } - // TODO: This is where we update the cli reference sidebar.ts update_root_summary(path, &root_summary)?; } + // Generate TypeScript sidebar files. + if args.sidebar { + let vocs_dir = Path::new(args.root_dir.as_str()).join("vocs"); + if args.verbose { + println!("Generating TypeScript sidebar files in \"{}\"", vocs_dir.display()); + } + generate_sidebar_files(&vocs_dir, &output, args.verbose)?; + } + Ok(()) } @@ -235,7 +247,7 @@ fn cmd_summary(cmd: &Cmd, indent: usize) -> String { let cmd_s = cmd.to_string(); let cmd_path = cmd_s.replace(" ", "/"); let indent_string = " ".repeat(indent + (cmd.subcommands.len() * 2)); - format!("{}- [`{}`](/cli/{})\n", indent_string, cmd_s, cmd_path) + format!("{}- [`{}`](./{}.mdx)\n", indent_string, cmd_s, cmd_path) } /// Overwrites the root SUMMARY.mdx file with the generated content. @@ -247,6 +259,186 @@ fn update_root_summary(root_dir: &Path, root_summary: &str) -> io::Result<()> { write_file(&summary_file, root_summary) } +/// Generates TypeScript sidebar files for each command. +fn generate_sidebar_files(vocs_dir: &Path, output: &[(Cmd, String)], verbose: bool) -> io::Result<()> { + // Group commands by their root command name (reth or op-reth) + // Also create a map of commands to their help output + let mut commands_by_root: std::collections::HashMap> = std::collections::HashMap::new(); + let mut help_map: std::collections::HashMap = std::collections::HashMap::new(); + + for (cmd, help_output) in output { + let root_name = cmd.command_name().to_string(); + commands_by_root.entry(root_name.clone()).or_insert_with(Vec::new).push(cmd); + // Store help output for each command using its string representation as key + help_map.insert(cmd.to_string(), help_output.clone()); + } + + // Generate sidebar file for each root command + for (root_name, cmds) in commands_by_root { + // Get root help output + let root_help = help_map.get(&root_name).map(|s| s.as_str()); + let sidebar_content = generate_sidebar_ts(&root_name, cmds, root_help, &help_map)?; + let file_name = match root_name.as_str() { + "reth" => "sidebar-cli-reth.ts", + "op-reth" => "sidebar-cli-op-reth.ts", + _ => { + if verbose { + println!("Skipping unknown command: {}", root_name); + } + continue; + } + }; + + let sidebar_file = vocs_dir.join(file_name); + if verbose { + println!("Writing sidebar file: {}", sidebar_file.display()); + } + write_file(&sidebar_file, &sidebar_content)?; + } + + Ok(()) +} + +/// Generates TypeScript code for a sidebar file. +fn generate_sidebar_ts( + root_name: &str, + commands: Vec<&Cmd>, + root_help: Option<&str>, + help_map: &std::collections::HashMap, +) -> io::Result { + // Find all top-level commands (commands with exactly one subcommand) + let mut top_level_commands: Vec<&Cmd> = commands + .iter() + .copied() + .filter(|cmd| cmd.subcommands.len() == 1) + .collect(); + + // Remove duplicates using a set + let mut seen = std::collections::HashSet::new(); + top_level_commands.retain(|cmd| { + let key = &cmd.subcommands[0]; + seen.insert(key.clone()) + }); + + // Sort by the order they appear in help output, not alphabetically + if let Some(help) = root_help { + let help_order = parse_sub_commands(help); + top_level_commands.sort_by(|a, b| { + let a_name = &a.subcommands[0]; + let b_name = &b.subcommands[0]; + let a_pos = help_order.iter().position(|x| x == a_name).unwrap_or(usize::MAX); + let b_pos = help_order.iter().position(|x| x == b_name).unwrap_or(usize::MAX); + a_pos.cmp(&b_pos) + }); + } + + // Generate TypeScript code + let var_name = match root_name { + "reth" => "rethCliSidebar", + "op-reth" => "opRethCliSidebar", + _ => "cliSidebar", + }; + + let mut ts_code = String::from("import { SidebarItem } from \"vocs\";\n\n"); + ts_code.push_str(&format!("export const {}: SidebarItem = {{\n", var_name)); + ts_code.push_str(&format!(" text: \"{}\",\n", root_name)); + ts_code.push_str(&format!(" link: \"/cli/{}\",\n", root_name)); + ts_code.push_str(" collapsed: false,\n"); + ts_code.push_str(" items: [\n"); + + for (idx, cmd) in top_level_commands.iter().enumerate() { + let is_last = idx == top_level_commands.len() - 1; + if let Some(item_str) = build_sidebar_item(root_name, cmd, &commands, 1, help_map, is_last) { + ts_code.push_str(&item_str); + } + } + + ts_code.push_str(" ]\n"); + ts_code.push_str("};\n\n"); + + Ok(ts_code) +} + +/// Builds a sidebar item for a command and its children. +/// Returns TypeScript code string. +fn build_sidebar_item( + root_name: &str, + cmd: &Cmd, + all_commands: &[&Cmd], + depth: usize, + help_map: &std::collections::HashMap, + is_last: bool, +) -> Option { + let full_cmd_name = cmd.to_string(); + let link_path = format!("/cli/{}", full_cmd_name.replace(" ", "/")); + + // Find all direct child commands (commands whose subcommands start with this command's subcommands) + let mut children: Vec<&Cmd> = all_commands + .iter() + .copied() + .filter(|other_cmd| { + other_cmd.subcommands.len() == cmd.subcommands.len() + 1 + && other_cmd.subcommands[..cmd.subcommands.len()] == cmd.subcommands[..] + }) + .collect(); + + // Sort children by the order they appear in help output, not alphabetically + if children.len() > 1 { + // Get help output for this command to determine subcommand order + if let Some(help_output) = help_map.get(&full_cmd_name) { + let help_order = parse_sub_commands(help_output); + children.sort_by(|a, b| { + let a_name = a.subcommands.last().unwrap(); + let b_name = b.subcommands.last().unwrap(); + let a_pos = help_order.iter().position(|x| x == a_name).unwrap_or(usize::MAX); + let b_pos = help_order.iter().position(|x| x == b_name).unwrap_or(usize::MAX); + a_pos.cmp(&b_pos) + }); + } else { + // Fall back to alphabetical if we can't get help + children.sort_by(|a, b| { + a.subcommands.last().unwrap().cmp(b.subcommands.last().unwrap()) + }); + } + } + + let indent = " ".repeat(depth); + let mut item_str = String::new(); + + item_str.push_str(&format!("{}{{\n", indent)); + item_str.push_str(&format!("{} text: \"{}\",\n", indent, full_cmd_name)); + item_str.push_str(&format!("{} link: \"{}\"", indent, link_path)); + + if !children.is_empty() { + item_str.push_str(",\n"); + item_str.push_str(&format!("{} collapsed: true,\n", indent)); + item_str.push_str(&format!("{} items: [\n", indent)); + + for (idx, child_cmd) in children.iter().enumerate() { + let child_is_last = idx == children.len() - 1; + if let Some(child_str) = build_sidebar_item(root_name, child_cmd, all_commands, depth + 1, help_map, child_is_last) { + item_str.push_str(&child_str); + } + } + + item_str.push_str(&format!("{} ]\n", indent)); + if is_last { + item_str.push_str(&format!("{}}}\n", indent)); + } else { + item_str.push_str(&format!("{}}},\n", indent)); + } + } else { + item_str.push_str("\n"); + if is_last { + item_str.push_str(&format!("{}}}\n", indent)); + } else { + item_str.push_str(&format!("{}}},\n", indent)); + } + } + + Some(item_str) +} + /// Preprocesses the help output of a command. fn preprocess_help(s: &str) -> Cow<'_, str> { static REPLACEMENTS: LazyLock> = LazyLock::new(|| { diff --git a/docs/cli/update.sh b/docs/cli/update.sh index b75dbd789a..9ec5b9473c 100755 --- a/docs/cli/update.sh +++ b/docs/cli/update.sh @@ -3,8 +3,9 @@ set -eo pipefail DOCS_ROOT="$(dirname "$(dirname "$0")")" RETH=${1:-"$(dirname "$DOCS_ROOT")/target/debug/reth"} +OP_RETH=${2:-"$(dirname "$DOCS_ROOT")/target/debug/op-reth"} VOCS_PAGES_ROOT="$DOCS_ROOT/vocs/docs/pages" -echo "Generating CLI documentation for reth at $RETH" +echo "Generating CLI documentation for reth at $RETH and op-reth at $OP_RETH" echo "Using docs root: $DOCS_ROOT" echo "Using vocs pages root: $VOCS_PAGES_ROOT" @@ -13,9 +14,11 @@ cmd=( --root-dir "$DOCS_ROOT/" --root-indentation 2 --root-summary + --sidebar --verbose --out-dir "$VOCS_PAGES_ROOT/cli/" - "$RETH" + "$RETH" "$OP_RETH" ) echo "Running: $" "${cmd[*]}" "${cmd[@]}" + diff --git a/docs/crates/db.md b/docs/crates/db.md index abaa1c83bb..4e368cad77 100644 --- a/docs/crates/db.md +++ b/docs/crates/db.md @@ -8,12 +8,14 @@ The database is a central component to Reth, enabling persistent storage for dat Within Reth, the database is organized via "tables". A table is any struct that implements the `Table` trait. -[File: crates/storage/db-api/src/table.rs](https://github.com/paradigmxyz/reth/blob/bf9cac7571f018fec581fe3647862dab527aeafb/crates/storage/db-api/src/table.rs#L64-L93) +[File: crates/storage/db-api/src/table.rs](https://github.com/paradigmxyz/reth/blob/main/crates/storage/db-api/src/table.rs#L87-L101) ```rust ignore pub trait Table: Send + Sync + Debug + 'static { /// Return table name as it is present inside the MDBX. const NAME: &'static str; + /// Whether the table is also a `DUPSORT` table. + const DUPSORT: bool; /// Key element of `Table`. /// /// Sorting should be taken into account when encoding this. @@ -32,10 +34,10 @@ pub trait Value: Compress + Decompress + Serialize {} The `Table` trait has two generic values, `Key` and `Value`, which need to implement the `Key` and `Value` traits, respectively. The `Encode` trait is responsible for transforming data into bytes so it can be stored in the database, while the `Decode` trait transforms the bytes back into their original form. Similarly, the `Compress` and `Decompress` traits transform the data to and from a compressed format when storing or reading data from the database. -There are many tables within the node, all used to store different types of data from `Headers` to `Transactions` and more. Below is a list of all of the tables. You can follow [this link](https://github.com/paradigmxyz/reth/blob/bf9cac7571f018fec581fe3647862dab527aeafb/crates/storage/db/src/tables/mod.rs#L274-L414) if you would like to see the table definitions for any of the tables below. +There are many tables within the node, all used to store different types of data from `Headers` to `Transactions` and more. Below is a list of all of the tables. You can follow [this link](https://github.com/paradigmxyz/reth/blob/main/crates/storage/db-api/src/tables/mod.rs) if you would like to see the table definitions for any of the tables below. - CanonicalHeaders -- HeaderTerminalDifficulties +- HeaderTerminalDifficulties (deprecated) - HeaderNumbers - Headers - BlockBodyIndices @@ -56,26 +58,29 @@ There are many tables within the node, all used to store different types of data - HashedStorages - AccountsTrie - StoragesTrie +- AccountsTrieChangeSets +- StoragesTrieChangeSets - TransactionSenders - StageCheckpoints - StageCheckpointProgresses - PruneCheckpoints - VersionHistory - ChainState +- Metadata
## Database -Reth's database design revolves around its main [Database trait](https://github.com/paradigmxyz/reth/blob/bf9cac7571f018fec581fe3647862dab527aeafb/crates/storage/db-api/src/database.rs#L8-L52), which implements the database's functionality across many types. Let's take a quick look at the `Database` trait and how it works. +Reth's database design revolves around its main [Database trait](https://github.com/paradigmxyz/reth/blob/main/crates/storage/db-api/src/database.rs#L8-L52), which implements the database's functionality across many types. Let's take a quick look at the `Database` trait and how it works. -[File: crates/storage/db-api/src/database.rs](https://github.com/paradigmxyz/reth/blob/bf9cac7571f018fec581fe3647862dab527aeafb/crates/storage/db-api/src/database.rs#L8-L52) +[File: crates/storage/db-api/src/database.rs](https://github.com/paradigmxyz/reth/blob/main/crates/storage/db-api/src/database.rs#L8-L52) ```rust ignore /// Main Database trait that can open read-only and read-write transactions. /// /// Sealed trait which cannot be implemented by 3rd parties, exposed only for consumption. -pub trait Database: Send + Sync { +pub trait Database: Send + Sync + Debug { /// Read-Only database transaction type TX: DbTx + Send + Sync + Debug + 'static; /// Read-Write database transaction @@ -93,11 +98,11 @@ pub trait Database: Send + Sync { /// end of the execution. fn view(&self, f: F) -> Result where - F: FnOnce(&Self::TX) -> T, + F: FnOnce(&mut Self::TX) -> T, { - let tx = self.tx()?; + let mut tx = self.tx()?; - let res = f(&tx); + let res = f(&mut tx); tx.commit()?; Ok(res) @@ -119,50 +124,39 @@ pub trait Database: Send + Sync { } ``` -Any type that implements the `Database` trait can create a database transaction, as well as view or update existing transactions. As an example, let's revisit the `Transaction` struct from the `stages` crate. This struct contains a field named `db` which is a reference to a generic type `DB` that implements the `Database` trait. The `Transaction` struct can use the `db` field to store new headers, bodies and senders in the database. In the code snippet below, you can see the `Transaction::open()` method, which uses the `Database::tx_mut()` function to create a mutable transaction. - -[File: crates/stages/src/db.rs](https://github.com/paradigmxyz/reth/blob/00a49f5ee78b0a88fea409283e6bb9c96d4bb31e/crates/stages/src/db.rs#L28) +Any type that implements the `Database` trait can create a database transaction, as well as view or update existing transactions. For example, you can open a read-write transaction directly via `tx_mut()`, write to tables, and commit: ```rust ignore -pub struct Transaction<'this, DB: Database> { - /// A handle to the DB. - pub(crate) db: &'this DB, - tx: Option<::TXMut>, -} - -//--snip-- -impl<'this, DB> Transaction<'this, DB> -where - DB: Database, -{ - //--snip-- - - /// Open a new inner transaction. - pub fn open(&mut self) -> Result<(), Error> { - self.tx = Some(self.db.tx_mut()?); - Ok(()) - } -} +let tx = db.tx_mut()?; +tx.put::(block_number, block.hash())?; +tx.put::(block_number, header.clone())?; +tx.put::(block.hash(), block_number)?; +tx.commit()?; ``` The `Database` defines two associated types `TX` and `TXMut`. -[File: crates/storage/db-api/src/database.rs](https://github.com/paradigmxyz/reth/blob/bf9cac7571f018fec581fe3647862dab527aeafb/crates/storage/db-api/src/database.rs#L54-L78) +[File: crates/storage/db-api/src/database.rs](https://github.com/paradigmxyz/reth/blob/main/crates/storage/db-api/src/database.rs) The `TX` type can be any type that implements the `DbTx` trait, which provides a set of functions to interact with read only transactions. -[File: crates/storage/db-api/src/transaction.rs](https://github.com/paradigmxyz/reth/blob/bf9cac7571f018fec581fe3647862dab527aeafb/crates/storage/db-api/src/transaction.rs#L7-L29) +[File: crates/storage/db-api/src/transaction.rs](https://github.com/paradigmxyz/reth/blob/main/crates/storage/db-api/src/transaction.rs#L11-L40) ```rust ignore /// Read only transaction -pub trait DbTx: Send + Sync { +pub trait DbTx: Debug + Send + Sync { /// Cursor type for this read-only transaction type Cursor: DbCursorRO + Send + Sync; /// `DupCursor` type for this read-only transaction type DupCursor: DbDupCursorRO + DbCursorRO + Send + Sync; - /// Get value + /// Get value by an owned key fn get(&self, key: T::Key) -> Result, DatabaseError>; + /// Get value by a reference to the encoded key (avoids cloning for raw keys) + fn get_by_encoded_key( + &self, + key: &::Encoded, + ) -> Result, DatabaseError>; /// Commit for read only transaction will consume and free transaction and allows /// freeing of memory pages fn commit(self) -> Result; @@ -181,7 +175,7 @@ pub trait DbTx: Send + Sync { The `TXMut` type can be any type that implements the `DbTxMut` trait, which provides a set of functions to interact with read/write transactions and the associated cursor types. -[File: crates/storage/db-api/src/transaction.rs](https://github.com/paradigmxyz/reth/blob/bf9cac7571f018fec581fe3647862dab527aeafb/crates/storage/db-api/src/transaction.rs#L31-L54) +[File: crates/storage/db-api/src/transaction.rs](https://github.com/paradigmxyz/reth/blob/main/crates/storage/db-api/src/transaction.rs) ```rust ignore /// Read write transaction that allows writing to database @@ -196,8 +190,12 @@ pub trait DbTxMut: Send + Sync { + Send + Sync; - /// Put value in database + /// Put value to database fn put(&self, key: T::Key, value: T::Value) -> Result<(), DatabaseError>; + /// Append value with the largest key to database (fast path) + fn append(&self, key: T::Key, value: T::Value) -> Result<(), DatabaseError> { + self.put::(key, value) + } /// Delete value from database fn delete(&self, key: T::Key, value: Option) -> Result; @@ -212,21 +210,16 @@ pub trait DbTxMut: Send + Sync { Let's take a look at the `DbTx` and `DbTxMut` traits in action. -Revisiting the `DatabaseProvider` struct as an example, the `DatabaseProvider::header_by_number()` function uses the `DbTx::get()` function to get a header from the `Headers` table. +Revisiting the `DatabaseProvider` struct as an example, the `DatabaseProvider::header_by_number()` function currently delegates to the static-file provider: -[File: crates/storage/provider/src/providers/database/provider.rs](https://github.com/paradigmxyz/reth/blob/bf9cac7571f018fec581fe3647862dab527aeafb/crates/storage/provider/src/providers/database/provider.rs#L1319-L1336) +[File: crates/storage/provider/src/providers/database/mod.rs](https://github.com/paradigmxyz/reth/blob/main/crates/storage/provider/src/providers/database/mod.rs#L280-L282) ```rust ignore impl HeaderProvider for DatabaseProvider { //--snip-- - fn header_by_number(&self, num: BlockNumber) -> ProviderResult> { - self.static_file_provider.get_with_static_file_or_database( - StaticFileSegment::Headers, - num, - |static_file| static_file.header_by_number(num), - || Ok(self.tx.get::(num)?), - ) + fn header_by_number(&self, num: BlockNumber) -> ProviderResult> { + self.static_file_provider.header_by_number(num) } //--snip-- @@ -235,7 +228,7 @@ impl HeaderProvider for DatabaseProvider { Notice that the function uses a [turbofish](https://techblog.tonsser.com/posts/what-is-rusts-turbofish) to define which table to use when passing in the `key` to the `DbTx::get()` function. Taking a quick look at the function definition, a generic `T` is defined that implements the `Table` trait mentioned at the beginning of this chapter. -[File: crates/storage/db-api/src/transaction.rs](https://github.com/paradigmxyz/reth/blob/bf9cac7571f018fec581fe3647862dab527aeafb/crates/storage/db-api/src/transaction.rs#L15) +[File: crates/storage/db-api/src/transaction.rs](https://github.com/paradigmxyz/reth/blob/main/crates/storage/db-api/src/transaction.rs) ```rust ignore fn get(&self, key: T::Key) -> Result, DatabaseError>; @@ -245,31 +238,29 @@ This design pattern is very powerful and allows Reth to use the methods availabl Let's take a look at a couple of examples before moving on. In the snippet below, the `DbTxMut::put()` method is used to insert values into the `CanonicalHeaders`, `Headers` and `HeaderNumbers` tables. -[File: crates/storage/provider/src/providers/database/provider.rs](https://github.com/paradigmxyz/reth/blob/bf9cac7571f018fec581fe3647862dab527aeafb/crates/storage/provider/src/providers/database/provider.rs#L2606-L2745) +[File: crates/storage/provider/src/providers/database/provider.rs](https://github.com/paradigmxyz/reth/blob/main/crates/storage/provider/src/providers/database/provider.rs) ```rust ignore self.tx.put::(block_number, block.hash())?; -self.tx.put::(block_number, block.header.as_ref().clone())?; +self.tx.put::(block_number, block.header.clone())?; self.tx.put::(block.hash(), block_number)?; ``` Let's take a look at the `DatabaseProviderRW` struct, which is used to create a mutable transaction to interact with the database. The `DatabaseProviderRW` struct implements the `Deref` and `DerefMut` traits, which return a reference to its first field, which is a `TxMut`. Recall that `TxMut` is a generic type on the `Database` trait, which is defined as `type TXMut: DbTxMut + DbTx + Send + Sync;`, giving it access to all of the functions available to `DbTx`, including the `DbTx::get()` function. -This next example uses the `DbTx::cursor_read()` method to get a `Cursor`. The `Cursor` type provides a way to traverse through rows in a database table, one row at a time. A cursor enables the program to perform an operation (updating, deleting, etc) on each row in the table individually. The following code snippet gets a cursor for a few different tables in the database. +This next example shows reading headers from static files using the static-file provider. -[File: crates/static-file/static-file/src/segments/headers.rs](https://github.com/paradigmxyz/reth/blob/bf9cac7571f018fec581fe3647862dab527aeafb/crates/static-file/static-file/src/segments/headers.rs#L22-L58) +[File: crates/storage/provider/src/providers/static_file/manager.rs](https://github.com/paradigmxyz/reth/blob/main/crates/storage/provider/src/providers/static_file/manager.rs#L1680-L1690) ```rust ignore -# Get a cursor for the Headers table -let mut headers_cursor = provider.tx_ref().cursor_read::()?; -# Then we can walk the cursor to get the headers for a specific block range -let headers_walker = headers_cursor.walk_range(block_range.clone())?; +// Read headers for a specific block range from static files +let headers = provider.static_file_provider().headers_range(block_range.clone())?; ``` Let's look at an example of how cursors are used. The code snippet below contains the `unwind` method from the `BodyStage` defined in the `stages` crate. This function is responsible for unwinding any changes to the database if there is an error when executing the body stage within the Reth pipeline. -[File: crates/stages/stages/src/stages/bodies.rs](https://github.com/paradigmxyz/reth/blob/bf9cac7571f018fec581fe3647862dab527aeafb/crates/stages/stages/src/stages/bodies.rs#L267-L345) +[File: crates/stages/stages/src/stages/bodies.rs](https://github.com/paradigmxyz/reth/blob/main/crates/stages/stages/src/stages/bodies.rs) ```rust ignore /// Unwind the stage. diff --git a/docs/crates/eth-wire.md b/docs/crates/eth-wire.md index cf62ab143e..3915e9241c 100644 --- a/docs/crates/eth-wire.md +++ b/docs/crates/eth-wire.md @@ -413,3 +413,6 @@ additional "satellite" protocols (e.g. `snap`) using negotiated `SharedCapabilit - Starting with ETH69: - `BlockRangeUpdate (0x11)` announces the historical block range served. - Receipts omit bloom: encoded as `Receipts69` instead of `Receipts`. +- Starting with ETH70 (EIP-7975): + - Status reuses the ETH69 format (no additional block range fields). + - Receipts continue to omit bloom; `GetReceipts`/`Receipts` add the eth/70 variants to support partial receipt ranges (`firstBlockReceiptIndex` and `lastBlockIncomplete`). diff --git a/docs/crates/stages.md b/docs/crates/stages.md index a6f107c2c0..fbc1641bd4 100644 --- a/docs/crates/stages.md +++ b/docs/crates/stages.md @@ -1,28 +1,56 @@ # Stages -The `stages` lib plays a central role in syncing the node, maintaining state, updating the database and more. The stages involved in the Reth pipeline are the `HeaderStage`, `BodyStage`, `SenderRecoveryStage`, and `ExecutionStage` (note that this list is non-exhaustive, and more pipeline stages will be added in the near future). Each of these stages is queued up and stored within the Reth pipeline. +The `stages` lib plays a central role in syncing the node, maintaining state, updating the database and more. The stages involved in the Reth pipeline are queued up and stored within the Reth pipeline. In the default configuration, the pipeline runs the following stages in order: + +- EraStage (optional, for ERA1 import) +- HeaderStage +- BodyStage +- SenderRecoveryStage +- ExecutionStage +- PruneSenderRecoveryStage (if pruning for sender recovery is enabled) +- MerkleStage (unwind) +- AccountHashingStage +- StorageHashingStage +- MerkleStage (execute) +- MerkleChangeSets +- TransactionLookupStage +- IndexStorageHistoryStage +- IndexAccountHistoryStage +- PruneStage +- FinishStage -When the node is first started, a new `Pipeline` is initialized and all of the stages are added into `Pipeline.stages`. Then, the `Pipeline::run` function is called, which starts the pipeline, executing all of the stages continuously in an infinite loop. This process syncs the chain, keeping everything up to date with the chain tip. +When the node is first started, a new `Pipeline` is initialized and all of the stages are added into `Pipeline.stages`. Then, the `Pipeline::run` function is called, which starts the pipeline, executing all of the stages continuously in an infinite loop. This process syncs the chain, keeping everything up to date with the chain tip. Each stage within the pipeline implements the `Stage` trait which provides function interfaces to get the stage id, execute the stage and unwind the changes to the database if there was an issue during the stage execution. -To get a better idea of what is happening at each part of the pipeline, let's walk through what is going on under the hood when a stage is executed, starting with `HeaderStage`. +To get a better idea of what is happening at each part of the pipeline, let's walk through what is going on under the hood when a stage is executed, starting with `EraStage`. + +
+ + +## EraStage + +The `EraStage` is an optional stage that imports pre-merge historical block data from [ERA1 files](https://github.com/eth-clients/e2store-format-specs/blob/main/formats/era1.md). ERA1 is a standardized format for storing Ethereum's historical chain data, allowing nodes to quickly bootstrap by importing pre-synced data instead of downloading it from peers. + +When enabled, the `EraStage` reads block headers and bodies from ERA1 files (either from a local directory or downloaded from a remote HTTP host) and writes them directly to static files. This provides a faster alternative to downloading historical data over P2P, especially useful for syncing the pre-merge portion of the chain. + +The stage processes ERA1 files sequentially, extracting headers and bodies from genesis up to the last pre-merge block. Note that receipts are not included in ERA1 files and will be generated later during the `ExecutionStage`. + +If no ERA1 source is configured or all ERA1 data has been imported, the stage simply passes through, allowing subsequent stages to continue with P2P-based syncing.
## HeaderStage - -The `HeaderStage` is responsible for syncing the block headers, validating the header integrity and writing the headers to the database. When the `execute()` function is called, the local head of the chain is updated to the most recent block height previously executed by the stage. At this point, the node status is also updated with that block's height, hash and total difficulty. These values are used during any new eth/65 handshakes. After updating the head, a stream is established with other peers in the network to sync the missing chain headers between the most recent state stored in the database and the chain tip. The `HeaderStage` contains a `downloader` attribute, which is a type that implements the `HeaderDownloader` trait. A `HeaderDownloader` is a `Stream` that returns batches of headers. +The `HeaderStage` is responsible for syncing the block headers, validating the header integrity and writing the headers to storage. When the stage runs, it determines the sync gap between the local head and the tip, then downloads headers in reverse (from tip down to the local head) using a `HeaderDownloader` stream. Headers are buffered in ETL collectors and then written to static files and to `HeaderNumbers` in the database in a single step. -The `HeaderStage` relies on the downloader stream to return the headers in descending order starting from the chain tip down to the latest block in the database. While other stages in the `Pipeline` start from the most recent block in the database up to the chain tip, the `HeaderStage` works in reverse to avoid [long-range attacks](https://messari.io/report/long-range-attack). When a node downloads headers in ascending order, it will not know if it is being subjected to a long-range attack until it reaches the most recent blocks. To combat this, the `HeaderStage` starts by getting the chain tip from the Consensus Layer, verifies the tip, and then walks backwards by the parent hash. +The `HeaderStage` relies on the downloader stream to return the headers in descending order starting from the chain tip down to the latest block in the database. While other stages in the `Pipeline` start from the most recent block in the database up to the chain tip, the `HeaderStage` works in reverse to avoid [long-range attacks](https://messari.io/report/long-range-attack). When a node downloads headers in ascending order, it will not know if it is being subjected to a long-range attack until it reaches the most recent blocks. To combat this, the `HeaderStage` starts by getting the chain tip, verifies the tip, and then walks backwards by the parent hash. - -Each header is then validated to ensure that it has the proper parent. Note that this is only a basic response validation, and the `HeaderDownloader` uses the `validate` method during the `stream`, so that each header is validated according to the consensus specification before the header is yielded from the stream. After this, each header is then written to the database. If a header is not valid or the stream encounters any other error, the error is propagated up through the stage execution, the changes to the database are unwound and the stage is resumed from the most recent valid state. +Each header is validated to ensure it correctly attaches to its parent and conforms to consensus expectations by the downloader before it is yielded. After download, headers are written to storage. If a header is not valid or the stream encounters any other error, the error is propagated up through the stage execution, the changes to the database are unwound and the stage is resumed from the most recent valid state. -This process continues until all of the headers have been downloaded and written to the database. Finally, the total difficulty of the chain's head is updated and the function returns `Ok(ExecOutput { stage_progress, done: true })`, signaling that the header sync has been completed successfully. +This process continues until all of the headers have been downloaded and written to storage. Finally, the function returns, for example: `Ok(ExecOutput { checkpoint: StageCheckpoint::new(last_header_number).with_headers_stage_checkpoint(...), done: true })`, signaling that the header sync has been completed successfully.
@@ -36,9 +64,9 @@ The transactions root is a value that is calculated based on the transactions in When the `BodyStage` is looking at the headers to determine which block to download, it will skip the blocks where the `header.ommers_hash` and the `header.transaction_root` are empty, denoting that the block is empty as well. -Once the `BodyStage` determines which block bodies to fetch, a new `bodies_stream` is created which downloads all of the bodies from the `starting_block`, up until the `target_block` is specified. Each time the `bodies_stream` yields a value, a `SealedBlock` is created using the block header, the ommers hash and the newly downloaded block body. +Once the `BodyStage` determines which block bodies to fetch, a new `bodies_stream` is created which downloads all of the bodies from the `starting_block`, up until the `target_block` is specified. Each time the `bodies_stream` yields a value, a response is received indicating either an empty block or a full block body to be written. -The new block is then pre-validated, checking that the ommers hash and transactions root in the block header are the same in the block body. Following a successful pre-validation, the `BodyStage` loops through each transaction in the `block.body`, adding the transaction to the database. This process is repeated for every downloaded block body, with the `BodyStage` returning `Ok(ExecOutput { stage_progress, done: true })` signaling it successfully completed. +The `BodyStage` writes the received block bodies to storage. Validation of block body correctness relative to headers is enforced by the downloader and later by execution/consensus. This process is repeated for every downloaded block body, with the `BodyStage` returning `Ok(ExecOutput { checkpoint: StageCheckpoint::new(highest_block).with_entities_stage_checkpoint(...), done: ... })` signaling progress/completion.
@@ -50,7 +78,7 @@ In an [ECDSA (Elliptic Curve Digital Signature Algorithm) signature](https://wik The "r" is the x-coordinate of a point on the elliptic curve that is calculated as part of the signature process. The "s" is the s-value that is calculated during the signature process. It is derived from the private key and the message being signed. Lastly, the "v" is the "recovery value" that is used to recover the public key from the signature, which is derived from the signature and the message that was signed. Together, the "r", "s", and "v" values make up an ECDSA signature, and they are used to verify the authenticity of the signed transaction. -Once the transaction signer has been recovered, the signer is then added to the database. This process is repeated for every transaction that was retrieved, and similarly to previous stages, `Ok(ExecOutput { stage_progress, done: true })` is returned to signal a successful completion of the stage. +Once the transaction signer has been recovered, the signer is then added to the database. This process is repeated for every transaction that was retrieved, and similarly to previous stages, `Ok(ExecOutput { checkpoint: StageCheckpoint::new(end_block).with_entities_stage_checkpoint(...), done: ... })` is returned to signal a successful completion of the stage.
@@ -58,15 +86,19 @@ Once the transaction signer has been recovered, the signer is then added to the Finally, after all headers, bodies and senders are added to the database, the `ExecutionStage` starts to execute. This stage is responsible for executing all of the transactions and updating the state stored in the database. -After all headers and their corresponding transactions have been executed, all of the resulting state changes are applied to the database, updating account balances, account bytecode and other state changes. After applying all of the execution state changes, if there was a block reward, it is applied to the validator's account. +After all headers and their corresponding transactions have been executed, all of the resulting state changes are applied to the database, updating account balances, account bytecode and other state changes. In post-Merge Ethereum, there is no inflationary block reward on the Execution Layer; fees/priority tips are handled within transaction execution. -At the end of the `execute()` function, a familiar value is returned, `Ok(ExecOutput { stage_progress, done: true })` signaling a successful completion of the `ExecutionStage`. +At the end of the `execute()` function, a familiar value is returned, `Ok(ExecOutput { checkpoint: StageCheckpoint::new(stage_progress).with_execution_stage_checkpoint(...), done: ... })` signaling a successful completion of the `ExecutionStage`.
## MerkleUnwindStage -The `MerkleUnwindStage` is responsible for unwinding the Merkle Patricia trie state when a reorg occurs or when there's a need to rollback state changes. This stage ensures that the state trie remains consistent with the chain's canonical history by reverting any state changes that need to be undone. It works closely with the `MerkleExecuteStage` to maintain state integrity. +The `MerkleUnwindStage` is responsible for unwinding the Merkle Patricia trie when reorgs occur or when there's a need to roll back state changes. This ensures the trie remains consistent with the chain's canonical history by reverting changes beyond the unwind point. It typically runs before the hashing stages to unwind trie state during reorgs or rollbacks. + +## MerkleExecuteStage + +The `MerkleExecuteStage` runs after `AccountHashingStage` and `StorageHashingStage` and is responsible for constructing or updating the state root based on the latest hashed account and storage data. It processes state changes from executed transactions and maintains the state root included in block headers.
@@ -82,9 +114,9 @@ The `StorageHashingStage` is responsible for computing hashes of contract storag
-## MerkleExecuteStage +## MerkleChangeSets -The `MerkleExecuteStage` handles the construction and updates of the Merkle Patricia trie, which is Ethereum's core data structure for storing state. This stage processes state changes from executed transactions and builds the corresponding branches in the state trie. It's responsible for maintaining the state root that's included in block headers. +The `MerkleChangeSets` stage consolidates and finalizes Merkle-related change sets after the `MerkleStage` execution mode has run, ensuring consistent trie updates and checkpoints.
@@ -106,6 +138,18 @@ The `IndexAccountHistoryStage` builds indices for account history, tracking how
+## PruneSenderRecoveryStage + +The `PruneSenderRecoveryStage` removes entries from `TransactionSenders` according to configured prune modes. It typically runs after `ExecutionStage` when pruning for sender recovery is enabled. + +
+ +## PruneStage + +The `PruneStage` performs pruning for the configured segments (such as history tables) based on `PruneModes`. It runs after hashing/merkle and history indexing stages. + +
+ ## FinishStage The `FinishStage` is the final stage in the pipeline that performs cleanup and verification tasks. It ensures that all previous stages have been completed successfully and that the node's state is consistent. This stage may also update various metrics and status indicators to reflect the completion of a sync cycle. @@ -116,4 +160,4 @@ The `FinishStage` is the final stage in the pipeline that performs cleanup and v Now that we have covered all of the stages that are currently included in the `Pipeline`, you know how the Reth client stays synced with the chain tip and updates the database with all of the new headers, bodies, senders and state changes. While this chapter provides an overview on how the pipeline stages work, the following chapters will dive deeper into the database, the networking stack and other exciting corners of the Reth codebase. Feel free to check out any parts of the codebase mentioned in this chapter, and when you are ready, the next chapter will dive into the `database`. -[Next Chapter]() +[Next Chapter](db.md) diff --git a/docs/design/database.md b/docs/design/database.md index e0874c2155..0d22bb3f9a 100644 --- a/docs/design/database.md +++ b/docs/design/database.md @@ -2,8 +2,8 @@ ## Abstractions -- We created a [Database trait abstraction](https://github.com/paradigmxyz/reth/blob/main/crates/cli/commands/src/db/mod.rs) using Rust Stable GATs which frees us from being bound to a single database implementation. We currently use MDBX, but are exploring [redb](https://github.com/cberner/redb) as an alternative. -- We then iterated on [`Transaction`](https://github.com/paradigmxyz/reth/blob/main/crates/storage/errors/src/db.rs) as a non-leaky abstraction with helpers for strictly-typed and unit-tested higher-level database abstractions. +- We created a [Database trait abstraction](https://github.com/paradigmxyz/reth/blob/main/crates/storage/db-api/src/database.rs) using Rust Stable GATs which frees us from being bound to a single database implementation. We currently use MDBX, but are exploring [redb](https://github.com/cberner/redb) as an alternative. +- We then iterated on [`Transaction`](https://github.com/paradigmxyz/reth/blob/main/crates/storage/db-api/src/transaction.rs) as a non-leaky abstraction with helpers for strictly-typed and unit-tested higher-level database abstractions. ## Codecs diff --git a/docs/design/review.md b/docs/design/review.md index 22a32ef904..1e639ad98c 100644 --- a/docs/design/review.md +++ b/docs/design/review.md @@ -4,7 +4,7 @@ This document contains some of our research on how other codebases designed vari ## P2P -* [`Sentry`](https://erigon.gitbook.io/erigon/advanced-usage/sentry), a pluggable p2p node following the [Erigon gRPC architecture](https://erigon.substack.com/p/current-status-of-silkworm-and-silkrpc): +* [`Sentry`](https://docs.erigon.tech/fundamentals/modules/sentry), a pluggable p2p node following the [Erigon gRPC architecture](https://erigon.substack.com/p/current-status-of-silkworm-and-silkrpc): * [`vorot93`](https://github.com/vorot93/) first started by implementing a rust devp2p stack in [`devp2p`](https://github.com/vorot93/devp2p) * vorot93 then started work on sentry, using devp2p, to satisfy the erigon architecture of modular components connected with gRPC. * The code from rust-ethereum/devp2p was merged into sentry, and rust-ethereum/devp2p was archived diff --git a/docs/repo/layout.md b/docs/repo/layout.md index 22aae4c351..93fbd28f3d 100644 --- a/docs/repo/layout.md +++ b/docs/repo/layout.md @@ -40,10 +40,19 @@ All binaries are stored in [`bin`](../../bin). These crates are related to the database. - [`storage/codecs`](../../crates/storage/codecs): Different storage codecs. -- [`storage/libmdbx-rs`](../../crates/storage/libmdbx-rs): Rust bindings for [libmdbx](https://libmdbx.dqdkfa.ru). A fork of an earlier Apache-licensed version of [libmdbx-rs][libmdbx-rs]. +- [`storage/codecs/derive`](../../crates/storage/codecs/derive): Derive macros for storage codecs. +- [`storage/libmdbx-rs`](../../crates/storage/libmdbx-rs): Rust bindings for [libmdbx](https://github.com/erthink/libmdbx). A fork of an earlier Apache-licensed version of [libmdbx-rs][libmdbx-rs]. - [`storage/db`](../../crates/storage/db): Strongly typed Database abstractions (transactions, cursors, tables) over lower level database backends. - Implemented backends: mdbx +- [`storage/db-api`](../../crates/storage/db-api): High-level database access traits used across storage crates. +- [`storage/db-common`](../../crates/storage/db-common): Shared database helpers and utilities. +- [`storage/db-models`](../../crates/storage/db-models): Typed database models for on-disk tables. +- [`storage/storage-api`](../../crates/storage/storage-api): Storage-facing APIs used by higher-level components. - [`storage/provider`](../../crates/storage/provider): Traits which provide a higher level api over the database to access the Ethereum state and historical data (transactions, blocks etc.) +- [`storage/rpc-provider`](../../crates/storage/rpc-provider): Storage provider implementations tailored for RPC access patterns. +- [`storage/errors`](../../crates/storage/errors): Common error types used by storage crates. +- [`storage/nippy-jar`](../../crates/storage/nippy-jar): Compressed columnar storage for historical data. +- [`storage/zstd-compressors`](../../crates/storage/zstd-compressors): Zstandard-based compressors used by storage components. ### Networking @@ -62,16 +71,21 @@ The networking component mainly lives in [`net/network`](../../crates/net/networ - Contains: Peer banlist. - [`net/network-api`](../../crates/net/network-api): Contains traits that define the networking component as a whole. Other components that interface with the network stack only need to depend on this crate for the relevant types. - [`net/nat`](../../crates/net/nat): A small helper crate that resolves the external IP of the running node using various methods (such as a manually provided IP, using UPnP etc.) +- [`net/network-types`](../../crates/net/network-types): Common networking types (peer identifiers, capabilities, messages, etc.). +- [`net/p2p`](../../crates/net/p2p): Higher-level P2P networking helpers and utilities. +- [`net/peers`](../../crates/net/peers): Peer set management, scoring and reputation support. #### Discovery - [`net/discv4`](../../crates/net/discv4): An implementation of the [discv4][discv4] protocol +- [`net/discv5`](../../crates/net/discv5): An implementation of the discv5 node discovery protocol. - [`net/dns`](../../crates/net/dns): An implementation of node discovery via DNS ([EIP-1459][eip-1459]) #### Protocol - [`net/eth-wire`](../../crates/net/eth-wire): Implements the `eth` wire protocol and the ``RLPx`` networking stack. - [`net/ecies`](../../crates/net/ecies): Implementation of the Elliptic Curve Integrated Encryption Scheme used in the ``RLPx`` handshake. +- [`net/eth-wire-types`](../../crates/net/eth-wire-types): Common types used by the `eth` wire protocol and RLPx networking stack. #### Downloaders @@ -81,7 +95,9 @@ The networking component mainly lives in [`net/network`](../../crates/net/networ Different consensus mechanisms. -- [`consensus/common`](../../crates/consensus/common): Common consensus functions and traits (e.g. fee calculation) +- [`consensus/common`](../../crates/consensus/common): Common consensus functions and traits (e.g. fee calculation). +- [`consensus/consensus`](../../crates/consensus/consensus): Core consensus engine interfaces and implementations. +- [`consensus/debug-client`](../../crates/consensus/debug-client): Utilities for interacting with the consensus engine in debugging and testing scenarios. ### Execution @@ -96,7 +112,9 @@ Crates related to transaction execution. These crates implement the main syncing drivers of reth. -- [`stages`](../../crates/stages): A pipelined sync, including implementation of various stages. This is used during initial sync and is faster than the tree-like structure for longer sync ranges. +- [`stages/api`](../../crates/stages/api): Public API for the staged sync pipeline. +- [`stages/stages`](../../crates/stages/stages): Implementations of the individual sync stages and the pipeline driver. This is used during initial sync and is faster than the tree-like structure for longer sync ranges. +- [`stages/types`](../../crates/stages/types): Shared types used by the staged sync pipeline. ### RPC @@ -146,6 +164,10 @@ Crates related to building and validating payloads (blocks). - [`transaction-pool`](../../crates/transaction-pool): An in-memory pending transactions pool. - [`payload/builder`](../../crates/payload/builder): Abstractions for payload building and a payload builder service that works with multiple kinds of payload resolvers. - [`payload/basic`](../../crates/payload/basic): A basic payload generator. +- [`payload/builder-primitives`](../../crates/payload/builder-primitives): Common primitives used by payload builders. +- [`payload/primitives`](../../crates/payload/primitives): Shared types used when building and validating payloads. +- [`payload/util`](../../crates/payload/util): Utility helpers used by payload building and validation logic. +- [`payload/validator`](../../crates/payload/validator): Payload validation helpers and utilities. ### Primitives @@ -159,6 +181,19 @@ These crates define primitive types or algorithms. Crates related to the Optimism rollup live in [optimism](../../crates/optimism/). +#### Ethereum-Specific Crates + +Ethereum mainnet-specific implementations and primitives live in `crates/ethereum/`. + +- **reth-ethereum-engine-primitives** (`crates/ethereum/engine-primitives`) + Ethereum-specific types for engine API, consensus messages, and payload attributes. + +- **reth-ethereum-forks** (`crates/ethereum/hardforks`) + Mainnet-specific hardfork definitions, activation schedules, and feature flags. + +- **reth-ethereum-payload-builder** (`crates/ethereum/payload`) + Ethereum-tailored payload builder implementing mainnet block production rules. + ### Misc Small utility crates. @@ -169,6 +204,12 @@ Small utility crates. - [`metrics/metrics-derive`](https://github.com/rkrasiuk/metrics-derive): A derive-style API for creating metrics - [`metrics/reth-node-metrics`](../../crates/node/metrics/): The implementation of metrics server, recorder, hooks. - [`tracing`](../../crates/tracing): A small utility crate to install a uniform [`tracing`][tracing] subscriber +- [`fs-util`](../../crates/fs-util): Small filesystem utilities shared across the node. +- [`tokio-util`](../../crates/tokio-util): Tokio-related utilities used by reth. +- [`static-file`](../../crates/static-file): Utilities for bundling and serving static files. +- [`tracing-otlp`](../../crates/tracing-otlp): Exporter for sending [`tracing`][tracing] spans to OTLP/OTel backends. +- [`errors`](../../crates/errors): Common error types shared across multiple crates. +- [`e2e-test-utils`](../../crates/e2e-test-utils): Helpers for end-to-end tests of the node. [libmdbx-rs]: https://crates.io/crates/libmdbx [discv4]: https://github.com/ethereum/devp2p/blob/master/discv4.md diff --git a/docs/vocs/docs/pages/cli/SUMMARY.mdx b/docs/vocs/docs/pages/cli/SUMMARY.mdx index 7f7012f4c1..02b174f29c 100644 --- a/docs/vocs/docs/pages/cli/SUMMARY.mdx +++ b/docs/vocs/docs/pages/cli/SUMMARY.mdx @@ -1,44 +1,104 @@ - - [`reth`](/cli/reth) - - [`reth node`](/cli/reth/node) - - [`reth init`](/cli/reth/init) - - [`reth init-state`](/cli/reth/init-state) - - [`reth import`](/cli/reth/import) - - [`reth import-era`](/cli/reth/import-era) - - [`reth export-era`](/cli/reth/export-era) - - [`reth dump-genesis`](/cli/reth/dump-genesis) - - [`reth db`](/cli/reth/db) - - [`reth db stats`](/cli/reth/db/stats) - - [`reth db list`](/cli/reth/db/list) - - [`reth db checksum`](/cli/reth/db/checksum) - - [`reth db diff`](/cli/reth/db/diff) - - [`reth db get`](/cli/reth/db/get) - - [`reth db get mdbx`](/cli/reth/db/get/mdbx) - - [`reth db get static-file`](/cli/reth/db/get/static-file) - - [`reth db drop`](/cli/reth/db/drop) - - [`reth db clear`](/cli/reth/db/clear) - - [`reth db clear mdbx`](/cli/reth/db/clear/mdbx) - - [`reth db clear static-file`](/cli/reth/db/clear/static-file) - - [`reth db repair-trie`](/cli/reth/db/repair-trie) - - [`reth db version`](/cli/reth/db/version) - - [`reth db path`](/cli/reth/db/path) - - [`reth download`](/cli/reth/download) - - [`reth stage`](/cli/reth/stage) - - [`reth stage run`](/cli/reth/stage/run) - - [`reth stage drop`](/cli/reth/stage/drop) - - [`reth stage dump`](/cli/reth/stage/dump) - - [`reth stage dump execution`](/cli/reth/stage/dump/execution) - - [`reth stage dump storage-hashing`](/cli/reth/stage/dump/storage-hashing) - - [`reth stage dump account-hashing`](/cli/reth/stage/dump/account-hashing) - - [`reth stage dump merkle`](/cli/reth/stage/dump/merkle) - - [`reth stage unwind`](/cli/reth/stage/unwind) - - [`reth stage unwind to-block`](/cli/reth/stage/unwind/to-block) - - [`reth stage unwind num-blocks`](/cli/reth/stage/unwind/num-blocks) - - [`reth p2p`](/cli/reth/p2p) - - [`reth p2p header`](/cli/reth/p2p/header) - - [`reth p2p body`](/cli/reth/p2p/body) - - [`reth p2p rlpx`](/cli/reth/p2p/rlpx) - - [`reth p2p rlpx ping`](/cli/reth/p2p/rlpx/ping) - - [`reth p2p bootnode`](/cli/reth/p2p/bootnode) - - [`reth config`](/cli/reth/config) - - [`reth prune`](/cli/reth/prune) - - [`reth re-execute`](/cli/reth/re-execute) \ No newline at end of file + - [`reth`](./reth.mdx) + - [`reth node`](./reth/node.mdx) + - [`reth init`](./reth/init.mdx) + - [`reth init-state`](./reth/init-state.mdx) + - [`reth import`](./reth/import.mdx) + - [`reth import-era`](./reth/import-era.mdx) + - [`reth export-era`](./reth/export-era.mdx) + - [`reth dump-genesis`](./reth/dump-genesis.mdx) + - [`reth db`](./reth/db.mdx) + - [`reth db stats`](./reth/db/stats.mdx) + - [`reth db list`](./reth/db/list.mdx) + - [`reth db checksum`](./reth/db/checksum.mdx) + - [`reth db diff`](./reth/db/diff.mdx) + - [`reth db get`](./reth/db/get.mdx) + - [`reth db get mdbx`](./reth/db/get/mdbx.mdx) + - [`reth db get static-file`](./reth/db/get/static-file.mdx) + - [`reth db drop`](./reth/db/drop.mdx) + - [`reth db clear`](./reth/db/clear.mdx) + - [`reth db clear mdbx`](./reth/db/clear/mdbx.mdx) + - [`reth db clear static-file`](./reth/db/clear/static-file.mdx) + - [`reth db repair-trie`](./reth/db/repair-trie.mdx) + - [`reth db static-file-header`](./reth/db/static-file-header.mdx) + - [`reth db static-file-header block`](./reth/db/static-file-header/block.mdx) + - [`reth db static-file-header path`](./reth/db/static-file-header/path.mdx) + - [`reth db version`](./reth/db/version.mdx) + - [`reth db path`](./reth/db/path.mdx) + - [`reth db settings`](./reth/db/settings.mdx) + - [`reth db settings get`](./reth/db/settings/get.mdx) + - [`reth db settings set`](./reth/db/settings/set.mdx) + - [`reth db settings set receipts_in_static_files`](./reth/db/settings/set/receipts_in_static_files.mdx) + - [`reth db settings set transaction_senders_in_static_files`](./reth/db/settings/set/transaction_senders_in_static_files.mdx) + - [`reth db account-storage`](./reth/db/account-storage.mdx) + - [`reth download`](./reth/download.mdx) + - [`reth stage`](./reth/stage.mdx) + - [`reth stage run`](./reth/stage/run.mdx) + - [`reth stage drop`](./reth/stage/drop.mdx) + - [`reth stage dump`](./reth/stage/dump.mdx) + - [`reth stage dump execution`](./reth/stage/dump/execution.mdx) + - [`reth stage dump storage-hashing`](./reth/stage/dump/storage-hashing.mdx) + - [`reth stage dump account-hashing`](./reth/stage/dump/account-hashing.mdx) + - [`reth stage dump merkle`](./reth/stage/dump/merkle.mdx) + - [`reth stage unwind`](./reth/stage/unwind.mdx) + - [`reth stage unwind to-block`](./reth/stage/unwind/to-block.mdx) + - [`reth stage unwind num-blocks`](./reth/stage/unwind/num-blocks.mdx) + - [`reth p2p`](./reth/p2p.mdx) + - [`reth p2p header`](./reth/p2p/header.mdx) + - [`reth p2p body`](./reth/p2p/body.mdx) + - [`reth p2p rlpx`](./reth/p2p/rlpx.mdx) + - [`reth p2p rlpx ping`](./reth/p2p/rlpx/ping.mdx) + - [`reth p2p bootnode`](./reth/p2p/bootnode.mdx) + - [`reth config`](./reth/config.mdx) + - [`reth prune`](./reth/prune.mdx) + - [`reth re-execute`](./reth/re-execute.mdx) + - [`op-reth`](./op-reth.mdx) + - [`op-reth node`](./op-reth/node.mdx) + - [`op-reth init`](./op-reth/init.mdx) + - [`op-reth init-state`](./op-reth/init-state.mdx) + - [`op-reth import-op`](./op-reth/import-op.mdx) + - [`op-reth import-receipts-op`](./op-reth/import-receipts-op.mdx) + - [`op-reth dump-genesis`](./op-reth/dump-genesis.mdx) + - [`op-reth db`](./op-reth/db.mdx) + - [`op-reth db stats`](./op-reth/db/stats.mdx) + - [`op-reth db list`](./op-reth/db/list.mdx) + - [`op-reth db checksum`](./op-reth/db/checksum.mdx) + - [`op-reth db diff`](./op-reth/db/diff.mdx) + - [`op-reth db get`](./op-reth/db/get.mdx) + - [`op-reth db get mdbx`](./op-reth/db/get/mdbx.mdx) + - [`op-reth db get static-file`](./op-reth/db/get/static-file.mdx) + - [`op-reth db drop`](./op-reth/db/drop.mdx) + - [`op-reth db clear`](./op-reth/db/clear.mdx) + - [`op-reth db clear mdbx`](./op-reth/db/clear/mdbx.mdx) + - [`op-reth db clear static-file`](./op-reth/db/clear/static-file.mdx) + - [`op-reth db repair-trie`](./op-reth/db/repair-trie.mdx) + - [`op-reth db static-file-header`](./op-reth/db/static-file-header.mdx) + - [`op-reth db static-file-header block`](./op-reth/db/static-file-header/block.mdx) + - [`op-reth db static-file-header path`](./op-reth/db/static-file-header/path.mdx) + - [`op-reth db version`](./op-reth/db/version.mdx) + - [`op-reth db path`](./op-reth/db/path.mdx) + - [`op-reth db settings`](./op-reth/db/settings.mdx) + - [`op-reth db settings get`](./op-reth/db/settings/get.mdx) + - [`op-reth db settings set`](./op-reth/db/settings/set.mdx) + - [`op-reth db settings set receipts_in_static_files`](./op-reth/db/settings/set/receipts_in_static_files.mdx) + - [`op-reth db settings set transaction_senders_in_static_files`](./op-reth/db/settings/set/transaction_senders_in_static_files.mdx) + - [`op-reth db account-storage`](./op-reth/db/account-storage.mdx) + - [`op-reth stage`](./op-reth/stage.mdx) + - [`op-reth stage run`](./op-reth/stage/run.mdx) + - [`op-reth stage drop`](./op-reth/stage/drop.mdx) + - [`op-reth stage dump`](./op-reth/stage/dump.mdx) + - [`op-reth stage dump execution`](./op-reth/stage/dump/execution.mdx) + - [`op-reth stage dump storage-hashing`](./op-reth/stage/dump/storage-hashing.mdx) + - [`op-reth stage dump account-hashing`](./op-reth/stage/dump/account-hashing.mdx) + - [`op-reth stage dump merkle`](./op-reth/stage/dump/merkle.mdx) + - [`op-reth stage unwind`](./op-reth/stage/unwind.mdx) + - [`op-reth stage unwind to-block`](./op-reth/stage/unwind/to-block.mdx) + - [`op-reth stage unwind num-blocks`](./op-reth/stage/unwind/num-blocks.mdx) + - [`op-reth p2p`](./op-reth/p2p.mdx) + - [`op-reth p2p header`](./op-reth/p2p/header.mdx) + - [`op-reth p2p body`](./op-reth/p2p/body.mdx) + - [`op-reth p2p rlpx`](./op-reth/p2p/rlpx.mdx) + - [`op-reth p2p rlpx ping`](./op-reth/p2p/rlpx/ping.mdx) + - [`op-reth p2p bootnode`](./op-reth/p2p/bootnode.mdx) + - [`op-reth config`](./op-reth/config.mdx) + - [`op-reth prune`](./op-reth/prune.mdx) + - [`op-reth re-execute`](./op-reth/re-execute.mdx) \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/op-reth.md b/docs/vocs/docs/pages/cli/op-reth.md index 2b56fa662c..27738005fb 100644 --- a/docs/vocs/docs/pages/cli/op-reth.md +++ b/docs/vocs/docs/pages/cli/op-reth.md @@ -78,7 +78,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off Display: @@ -93,4 +93,4 @@ Display: -q, --quiet Silence all log output -``` \ No newline at end of file +``` diff --git a/docs/vocs/docs/pages/cli/op-reth.mdx b/docs/vocs/docs/pages/cli/op-reth.mdx new file mode 100644 index 0000000000..d0bbd53e2b --- /dev/null +++ b/docs/vocs/docs/pages/cli/op-reth.mdx @@ -0,0 +1,156 @@ +# op-reth + +Reth + +```bash +$ op-reth --help +``` +```txt +Usage: op-reth [OPTIONS] + +Commands: + node Start the node + init Initialize the database from a genesis file + init-state Initialize the database from a state dump file + import-op This syncs RLP encoded OP blocks below Bedrock from a file, without executing + import-receipts-op This imports RLP encoded receipts from a file + dump-genesis Dumps genesis block JSON configuration to stdout + db Database debugging utilities + stage Manipulate individual stages + p2p P2P Debugging utilities + config Write config to stdout + prune Prune according to the configuration without any limits + re-execute Re-execute blocks in parallel to verify historical sync correctness + help Print this message or the help of the given subcommand(s) + +Options: + -h, --help + Print help (see a summary with '-h') + + -V, --version + Print version + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.name + The prefix name of the log files + + [default: reth.log] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + Possible values: + - always: Colors on + - auto: Auto-detect + - never: Colors off + + [default: always] + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] +``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/op-reth/config.mdx b/docs/vocs/docs/pages/cli/op-reth/config.mdx new file mode 100644 index 0000000000..a82594b7fd --- /dev/null +++ b/docs/vocs/docs/pages/cli/op-reth/config.mdx @@ -0,0 +1,144 @@ +# op-reth config + +Write config to stdout + +```bash +$ op-reth config --help +``` +```txt +Usage: op-reth config [OPTIONS] + +Options: + --config + The path to the configuration file to use. + + --default + Show the default config + + -h, --help + Print help (see a summary with '-h') + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.name + The prefix name of the log files + + [default: reth.log] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + Possible values: + - always: Colors on + - auto: Auto-detect + - never: Colors off + + [default: always] + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] +``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/op-reth/db.mdx b/docs/vocs/docs/pages/cli/op-reth/db.mdx new file mode 100644 index 0000000000..be7e33ec07 --- /dev/null +++ b/docs/vocs/docs/pages/cli/op-reth/db.mdx @@ -0,0 +1,258 @@ +# op-reth db + +Database debugging utilities + +```bash +$ op-reth db --help +``` +```txt +Usage: op-reth db [OPTIONS] + +Commands: + stats Lists all the tables, their entry count and their size + list Lists the contents of a table + checksum Calculates the content checksum of a table + diff Create a diff between two database tables or two entire databases + get Gets the content of a table for the given key + drop Deletes all database entries + clear Deletes all table entries + repair-trie Verifies trie consistency and outputs any inconsistencies + static-file-header Reads and displays the static file segment header + version Lists current and local database versions + path Returns the full database path + settings Manage storage settings + account-storage Gets storage size information for an account + help Print this message or the help of the given subcommand(s) + +Options: + -h, --help + Print help (see a summary with '-h') + +Datadir: + --datadir + The path to the data dir for all reth files and subdirectories. + + Defaults to the OS-specific data directory: + + - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` + - Windows: `{FOLDERID_RoamingAppData}/reth/` + - macOS: `$HOME/Library/Application Support/reth/` + + [default: default] + + --datadir.static-files + The absolute path to store static files in. + + --datadir.rocksdb + The absolute path to store `RocksDB` database in. + + --config + The path to the configuration file to use + + --chain + The chain this node is running. + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + optimism, optimism_sepolia, optimism-sepolia, base, base_sepolia, base-sepolia, arena-z, arena-z-sepolia, automata, base-devnet-0-sepolia-dev-0, bob, boba-sepolia, boba, camp-sepolia, celo, creator-chain-testnet-sepolia, cyber, cyber-sepolia, ethernity, ethernity-sepolia, fraxtal, funki, funki-sepolia, hashkeychain, ink, ink-sepolia, lisk, lisk-sepolia, lyra, metal, metal-sepolia, mint, mode, mode-sepolia, oplabs-devnet-0-sepolia-dev-0, orderly, ozean-sepolia, pivotal-sepolia, polynomial, race, race-sepolia, radius_testnet-sepolia, redstone, rehearsal-0-bn-0-rehearsal-0-bn, rehearsal-0-bn-1-rehearsal-0-bn, settlus-mainnet, settlus-sepolia-sepolia, shape, shape-sepolia, silent-data-mainnet, snax, soneium, soneium-minato-sepolia, sseed, swan, swell, tbn, tbn-sepolia, unichain, unichain-sepolia, worldchain, worldchain-sepolia, xterio-eth, zora, zora-sepolia, dev + + [default: optimism] + +Database: + --db.log-level + Database logging level. Levels higher than "notice" require a debug build + + Possible values: + - fatal: Enables logging for critical conditions, i.e. assertion failures + - error: Enables logging for error conditions + - warn: Enables logging for warning conditions + - notice: Enables logging for normal but significant condition + - verbose: Enables logging for verbose informational + - debug: Enables logging for debug-level messages + - trace: Enables logging for trace debug-level messages + - extra: Enables logging for extra debug-level messages + + --db.exclusive + Open environment in exclusive/monopolistic mode. Makes it possible to open a database on an NFS volume + + [possible values: true, false] + + --db.max-size + Maximum database size (e.g., 4TB, 8TB). + + This sets the "map size" of the database. If the database grows beyond this limit, the node will stop with an "environment map size limit reached" error. + + The default value is 8TB. + + --db.page-size + Database page size (e.g., 4KB, 8KB, 16KB). + + Specifies the page size used by the MDBX database. + + The page size determines the maximum database size. MDBX supports up to 2^31 pages, so with the default 4KB page size, the maximum database size is 8TB. To allow larger databases, increase this value to 8KB or higher. + + WARNING: This setting is only configurable at database creation; changing it later requires re-syncing. + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + + --db.read-transaction-timeout + Read transaction timeout in seconds, 0 means no timeout + + --db.max-readers + Maximum number of readers allowed to access the database concurrently + + --db.sync-mode + Controls how aggressively the database synchronizes data to disk + +Static Files: + --static-files.blocks-per-file.headers + Number of blocks per file for the headers segment + + --static-files.blocks-per-file.transactions + Number of blocks per file for the transactions segment + + --static-files.blocks-per-file.receipts + Number of blocks per file for the receipts segment + + --static-files.blocks-per-file.transaction-senders + Number of blocks per file for the transaction senders segment + + --static-files.receipts + Store receipts in static files instead of the database. + + When enabled, receipts will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + --static-files.transaction-senders + Store transaction senders in static files instead of the database. + + When enabled, transaction senders will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.name + The prefix name of the log files + + [default: reth.log] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + Possible values: + - always: Colors on + - auto: Auto-detect + - never: Colors off + + [default: always] + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] +``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/op-reth/db/account-storage.mdx b/docs/vocs/docs/pages/cli/op-reth/db/account-storage.mdx new file mode 100644 index 0000000000..4beaf21624 --- /dev/null +++ b/docs/vocs/docs/pages/cli/op-reth/db/account-storage.mdx @@ -0,0 +1,152 @@ +# op-reth db account-storage + +Gets storage size information for an account + +```bash +$ op-reth db account-storage --help +``` +```txt +Usage: op-reth db account-storage [OPTIONS]

+ +Arguments: +
+ The account address to check storage for + +Options: + -h, --help + Print help (see a summary with '-h') + +Datadir: + --chain + The chain this node is running. + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + optimism, optimism_sepolia, optimism-sepolia, base, base_sepolia, base-sepolia, arena-z, arena-z-sepolia, automata, base-devnet-0-sepolia-dev-0, bob, boba-sepolia, boba, camp-sepolia, celo, creator-chain-testnet-sepolia, cyber, cyber-sepolia, ethernity, ethernity-sepolia, fraxtal, funki, funki-sepolia, hashkeychain, ink, ink-sepolia, lisk, lisk-sepolia, lyra, metal, metal-sepolia, mint, mode, mode-sepolia, oplabs-devnet-0-sepolia-dev-0, orderly, ozean-sepolia, pivotal-sepolia, polynomial, race, race-sepolia, radius_testnet-sepolia, redstone, rehearsal-0-bn-0-rehearsal-0-bn, rehearsal-0-bn-1-rehearsal-0-bn, settlus-mainnet, settlus-sepolia-sepolia, shape, shape-sepolia, silent-data-mainnet, snax, soneium, soneium-minato-sepolia, sseed, swan, swell, tbn, tbn-sepolia, unichain, unichain-sepolia, worldchain, worldchain-sepolia, xterio-eth, zora, zora-sepolia, dev + + [default: optimism] + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.name + The prefix name of the log files + + [default: reth.log] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + Possible values: + - always: Colors on + - auto: Auto-detect + - never: Colors off + + [default: always] + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] +``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/op-reth/db/checksum.mdx b/docs/vocs/docs/pages/cli/op-reth/db/checksum.mdx new file mode 100644 index 0000000000..c45597647f --- /dev/null +++ b/docs/vocs/docs/pages/cli/op-reth/db/checksum.mdx @@ -0,0 +1,161 @@ +# op-reth db checksum + +Calculates the content checksum of a table + +```bash +$ op-reth db checksum --help +``` +```txt +Usage: op-reth db checksum [OPTIONS] + +Arguments: +
+ The table name + +Options: + --start-key + The start of the range to checksum + + --end-key + The end of the range to checksum + + --limit + The maximum number of records that are queried and used to compute the checksum + + -h, --help + Print help (see a summary with '-h') + +Datadir: + --chain + The chain this node is running. + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + optimism, optimism_sepolia, optimism-sepolia, base, base_sepolia, base-sepolia, arena-z, arena-z-sepolia, automata, base-devnet-0-sepolia-dev-0, bob, boba-sepolia, boba, camp-sepolia, celo, creator-chain-testnet-sepolia, cyber, cyber-sepolia, ethernity, ethernity-sepolia, fraxtal, funki, funki-sepolia, hashkeychain, ink, ink-sepolia, lisk, lisk-sepolia, lyra, metal, metal-sepolia, mint, mode, mode-sepolia, oplabs-devnet-0-sepolia-dev-0, orderly, ozean-sepolia, pivotal-sepolia, polynomial, race, race-sepolia, radius_testnet-sepolia, redstone, rehearsal-0-bn-0-rehearsal-0-bn, rehearsal-0-bn-1-rehearsal-0-bn, settlus-mainnet, settlus-sepolia-sepolia, shape, shape-sepolia, silent-data-mainnet, snax, soneium, soneium-minato-sepolia, sseed, swan, swell, tbn, tbn-sepolia, unichain, unichain-sepolia, worldchain, worldchain-sepolia, xterio-eth, zora, zora-sepolia, dev + + [default: optimism] + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.name + The prefix name of the log files + + [default: reth.log] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + Possible values: + - always: Colors on + - auto: Auto-detect + - never: Colors off + + [default: always] + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] +``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/op-reth/db/clear.mdx b/docs/vocs/docs/pages/cli/op-reth/db/clear.mdx new file mode 100644 index 0000000000..2700544377 --- /dev/null +++ b/docs/vocs/docs/pages/cli/op-reth/db/clear.mdx @@ -0,0 +1,153 @@ +# op-reth db clear + +Deletes all table entries + +```bash +$ op-reth db clear --help +``` +```txt +Usage: op-reth db clear [OPTIONS] + +Commands: + mdbx Deletes all database table entries + static-file Deletes all static file segment entries + help Print this message or the help of the given subcommand(s) + +Options: + -h, --help + Print help (see a summary with '-h') + +Datadir: + --chain + The chain this node is running. + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + optimism, optimism_sepolia, optimism-sepolia, base, base_sepolia, base-sepolia, arena-z, arena-z-sepolia, automata, base-devnet-0-sepolia-dev-0, bob, boba-sepolia, boba, camp-sepolia, celo, creator-chain-testnet-sepolia, cyber, cyber-sepolia, ethernity, ethernity-sepolia, fraxtal, funki, funki-sepolia, hashkeychain, ink, ink-sepolia, lisk, lisk-sepolia, lyra, metal, metal-sepolia, mint, mode, mode-sepolia, oplabs-devnet-0-sepolia-dev-0, orderly, ozean-sepolia, pivotal-sepolia, polynomial, race, race-sepolia, radius_testnet-sepolia, redstone, rehearsal-0-bn-0-rehearsal-0-bn, rehearsal-0-bn-1-rehearsal-0-bn, settlus-mainnet, settlus-sepolia-sepolia, shape, shape-sepolia, silent-data-mainnet, snax, soneium, soneium-minato-sepolia, sseed, swan, swell, tbn, tbn-sepolia, unichain, unichain-sepolia, worldchain, worldchain-sepolia, xterio-eth, zora, zora-sepolia, dev + + [default: optimism] + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.name + The prefix name of the log files + + [default: reth.log] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + Possible values: + - always: Colors on + - auto: Auto-detect + - never: Colors off + + [default: always] + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] +``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/op-reth/db/clear/mdbx.mdx b/docs/vocs/docs/pages/cli/op-reth/db/clear/mdbx.mdx new file mode 100644 index 0000000000..2468f94ca2 --- /dev/null +++ b/docs/vocs/docs/pages/cli/op-reth/db/clear/mdbx.mdx @@ -0,0 +1,152 @@ +# op-reth db clear mdbx + +Deletes all database table entries + +```bash +$ op-reth db clear mdbx --help +``` +```txt +Usage: op-reth db clear mdbx [OPTIONS]
+ +Arguments: +
+ + +Options: + -h, --help + Print help (see a summary with '-h') + +Datadir: + --chain + The chain this node is running. + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + optimism, optimism_sepolia, optimism-sepolia, base, base_sepolia, base-sepolia, arena-z, arena-z-sepolia, automata, base-devnet-0-sepolia-dev-0, bob, boba-sepolia, boba, camp-sepolia, celo, creator-chain-testnet-sepolia, cyber, cyber-sepolia, ethernity, ethernity-sepolia, fraxtal, funki, funki-sepolia, hashkeychain, ink, ink-sepolia, lisk, lisk-sepolia, lyra, metal, metal-sepolia, mint, mode, mode-sepolia, oplabs-devnet-0-sepolia-dev-0, orderly, ozean-sepolia, pivotal-sepolia, polynomial, race, race-sepolia, radius_testnet-sepolia, redstone, rehearsal-0-bn-0-rehearsal-0-bn, rehearsal-0-bn-1-rehearsal-0-bn, settlus-mainnet, settlus-sepolia-sepolia, shape, shape-sepolia, silent-data-mainnet, snax, soneium, soneium-minato-sepolia, sseed, swan, swell, tbn, tbn-sepolia, unichain, unichain-sepolia, worldchain, worldchain-sepolia, xterio-eth, zora, zora-sepolia, dev + + [default: optimism] + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.name + The prefix name of the log files + + [default: reth.log] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + Possible values: + - always: Colors on + - auto: Auto-detect + - never: Colors off + + [default: always] + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] +``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/op-reth/db/clear/static-file.mdx b/docs/vocs/docs/pages/cli/op-reth/db/clear/static-file.mdx new file mode 100644 index 0000000000..18721bb9c8 --- /dev/null +++ b/docs/vocs/docs/pages/cli/op-reth/db/clear/static-file.mdx @@ -0,0 +1,156 @@ +# op-reth db clear static-file + +Deletes all static file segment entries + +```bash +$ op-reth db clear static-file --help +``` +```txt +Usage: op-reth db clear static-file [OPTIONS] + +Arguments: + + Possible values: + - headers: Static File segment responsible for the `CanonicalHeaders`, `Headers`, `HeaderTerminalDifficulties` tables + - transactions: Static File segment responsible for the `Transactions` table + - receipts: Static File segment responsible for the `Receipts` table + - transaction-senders: Static File segment responsible for the `TransactionSenders` table + +Options: + -h, --help + Print help (see a summary with '-h') + +Datadir: + --chain + The chain this node is running. + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + optimism, optimism_sepolia, optimism-sepolia, base, base_sepolia, base-sepolia, arena-z, arena-z-sepolia, automata, base-devnet-0-sepolia-dev-0, bob, boba-sepolia, boba, camp-sepolia, celo, creator-chain-testnet-sepolia, cyber, cyber-sepolia, ethernity, ethernity-sepolia, fraxtal, funki, funki-sepolia, hashkeychain, ink, ink-sepolia, lisk, lisk-sepolia, lyra, metal, metal-sepolia, mint, mode, mode-sepolia, oplabs-devnet-0-sepolia-dev-0, orderly, ozean-sepolia, pivotal-sepolia, polynomial, race, race-sepolia, radius_testnet-sepolia, redstone, rehearsal-0-bn-0-rehearsal-0-bn, rehearsal-0-bn-1-rehearsal-0-bn, settlus-mainnet, settlus-sepolia-sepolia, shape, shape-sepolia, silent-data-mainnet, snax, soneium, soneium-minato-sepolia, sseed, swan, swell, tbn, tbn-sepolia, unichain, unichain-sepolia, worldchain, worldchain-sepolia, xterio-eth, zora, zora-sepolia, dev + + [default: optimism] + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.name + The prefix name of the log files + + [default: reth.log] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + Possible values: + - always: Colors on + - auto: Auto-detect + - never: Colors off + + [default: always] + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] +``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/op-reth/db/diff.mdx b/docs/vocs/docs/pages/cli/op-reth/db/diff.mdx new file mode 100644 index 0000000000..82be159fb5 --- /dev/null +++ b/docs/vocs/docs/pages/cli/op-reth/db/diff.mdx @@ -0,0 +1,204 @@ +# op-reth db diff + +Create a diff between two database tables or two entire databases + +```bash +$ op-reth db diff --help +``` +```txt +Usage: op-reth db diff [OPTIONS] --secondary-datadir --output + +Options: + --secondary-datadir + The path to the data dir for all reth files and subdirectories. + + -h, --help + Print help (see a summary with '-h') + +Database: + --db.log-level + Database logging level. Levels higher than "notice" require a debug build + + Possible values: + - fatal: Enables logging for critical conditions, i.e. assertion failures + - error: Enables logging for error conditions + - warn: Enables logging for warning conditions + - notice: Enables logging for normal but significant condition + - verbose: Enables logging for verbose informational + - debug: Enables logging for debug-level messages + - trace: Enables logging for trace debug-level messages + - extra: Enables logging for extra debug-level messages + + --db.exclusive + Open environment in exclusive/monopolistic mode. Makes it possible to open a database on an NFS volume + + [possible values: true, false] + + --db.max-size + Maximum database size (e.g., 4TB, 8TB). + + This sets the "map size" of the database. If the database grows beyond this limit, the node will stop with an "environment map size limit reached" error. + + The default value is 8TB. + + --db.page-size + Database page size (e.g., 4KB, 8KB, 16KB). + + Specifies the page size used by the MDBX database. + + The page size determines the maximum database size. MDBX supports up to 2^31 pages, so with the default 4KB page size, the maximum database size is 8TB. To allow larger databases, increase this value to 8KB or higher. + + WARNING: This setting is only configurable at database creation; changing it later requires re-syncing. + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + + --db.read-transaction-timeout + Read transaction timeout in seconds, 0 means no timeout + + --db.max-readers + Maximum number of readers allowed to access the database concurrently + + --db.sync-mode + Controls how aggressively the database synchronizes data to disk + + --table
+ The table name to diff. If not specified, all tables are diffed. + + --output + The output directory for the diff report. + +Datadir: + --chain + The chain this node is running. + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + optimism, optimism_sepolia, optimism-sepolia, base, base_sepolia, base-sepolia, arena-z, arena-z-sepolia, automata, base-devnet-0-sepolia-dev-0, bob, boba-sepolia, boba, camp-sepolia, celo, creator-chain-testnet-sepolia, cyber, cyber-sepolia, ethernity, ethernity-sepolia, fraxtal, funki, funki-sepolia, hashkeychain, ink, ink-sepolia, lisk, lisk-sepolia, lyra, metal, metal-sepolia, mint, mode, mode-sepolia, oplabs-devnet-0-sepolia-dev-0, orderly, ozean-sepolia, pivotal-sepolia, polynomial, race, race-sepolia, radius_testnet-sepolia, redstone, rehearsal-0-bn-0-rehearsal-0-bn, rehearsal-0-bn-1-rehearsal-0-bn, settlus-mainnet, settlus-sepolia-sepolia, shape, shape-sepolia, silent-data-mainnet, snax, soneium, soneium-minato-sepolia, sseed, swan, swell, tbn, tbn-sepolia, unichain, unichain-sepolia, worldchain, worldchain-sepolia, xterio-eth, zora, zora-sepolia, dev + + [default: optimism] + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.name + The prefix name of the log files + + [default: reth.log] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + Possible values: + - always: Colors on + - auto: Auto-detect + - never: Colors off + + [default: always] + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] +``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/op-reth/db/drop.mdx b/docs/vocs/docs/pages/cli/op-reth/db/drop.mdx new file mode 100644 index 0000000000..51d5e6d274 --- /dev/null +++ b/docs/vocs/docs/pages/cli/op-reth/db/drop.mdx @@ -0,0 +1,151 @@ +# op-reth db drop + +Deletes all database entries + +```bash +$ op-reth db drop --help +``` +```txt +Usage: op-reth db drop [OPTIONS] + +Options: + -f, --force + Bypasses the interactive confirmation and drops the database directly + + -h, --help + Print help (see a summary with '-h') + +Datadir: + --chain + The chain this node is running. + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + optimism, optimism_sepolia, optimism-sepolia, base, base_sepolia, base-sepolia, arena-z, arena-z-sepolia, automata, base-devnet-0-sepolia-dev-0, bob, boba-sepolia, boba, camp-sepolia, celo, creator-chain-testnet-sepolia, cyber, cyber-sepolia, ethernity, ethernity-sepolia, fraxtal, funki, funki-sepolia, hashkeychain, ink, ink-sepolia, lisk, lisk-sepolia, lyra, metal, metal-sepolia, mint, mode, mode-sepolia, oplabs-devnet-0-sepolia-dev-0, orderly, ozean-sepolia, pivotal-sepolia, polynomial, race, race-sepolia, radius_testnet-sepolia, redstone, rehearsal-0-bn-0-rehearsal-0-bn, rehearsal-0-bn-1-rehearsal-0-bn, settlus-mainnet, settlus-sepolia-sepolia, shape, shape-sepolia, silent-data-mainnet, snax, soneium, soneium-minato-sepolia, sseed, swan, swell, tbn, tbn-sepolia, unichain, unichain-sepolia, worldchain, worldchain-sepolia, xterio-eth, zora, zora-sepolia, dev + + [default: optimism] + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.name + The prefix name of the log files + + [default: reth.log] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + Possible values: + - always: Colors on + - auto: Auto-detect + - never: Colors off + + [default: always] + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] +``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/op-reth/db/get.mdx b/docs/vocs/docs/pages/cli/op-reth/db/get.mdx new file mode 100644 index 0000000000..3bd6d445fc --- /dev/null +++ b/docs/vocs/docs/pages/cli/op-reth/db/get.mdx @@ -0,0 +1,153 @@ +# op-reth db get + +Gets the content of a table for the given key + +```bash +$ op-reth db get --help +``` +```txt +Usage: op-reth db get [OPTIONS] + +Commands: + mdbx Gets the content of a database table for the given key + static-file Gets the content of a static file segment for the given key + help Print this message or the help of the given subcommand(s) + +Options: + -h, --help + Print help (see a summary with '-h') + +Datadir: + --chain + The chain this node is running. + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + optimism, optimism_sepolia, optimism-sepolia, base, base_sepolia, base-sepolia, arena-z, arena-z-sepolia, automata, base-devnet-0-sepolia-dev-0, bob, boba-sepolia, boba, camp-sepolia, celo, creator-chain-testnet-sepolia, cyber, cyber-sepolia, ethernity, ethernity-sepolia, fraxtal, funki, funki-sepolia, hashkeychain, ink, ink-sepolia, lisk, lisk-sepolia, lyra, metal, metal-sepolia, mint, mode, mode-sepolia, oplabs-devnet-0-sepolia-dev-0, orderly, ozean-sepolia, pivotal-sepolia, polynomial, race, race-sepolia, radius_testnet-sepolia, redstone, rehearsal-0-bn-0-rehearsal-0-bn, rehearsal-0-bn-1-rehearsal-0-bn, settlus-mainnet, settlus-sepolia-sepolia, shape, shape-sepolia, silent-data-mainnet, snax, soneium, soneium-minato-sepolia, sseed, swan, swell, tbn, tbn-sepolia, unichain, unichain-sepolia, worldchain, worldchain-sepolia, xterio-eth, zora, zora-sepolia, dev + + [default: optimism] + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.name + The prefix name of the log files + + [default: reth.log] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + Possible values: + - always: Colors on + - auto: Auto-detect + - never: Colors off + + [default: always] + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] +``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/op-reth/db/get/mdbx.mdx b/docs/vocs/docs/pages/cli/op-reth/db/get/mdbx.mdx new file mode 100644 index 0000000000..3e0f225bc8 --- /dev/null +++ b/docs/vocs/docs/pages/cli/op-reth/db/get/mdbx.mdx @@ -0,0 +1,167 @@ +# op-reth db get mdbx + +Gets the content of a database table for the given key + +```bash +$ op-reth db get mdbx --help +``` +```txt +Usage: op-reth db get mdbx [OPTIONS]
[SUBKEY] [END_KEY] [END_SUBKEY] + +Arguments: +
+ + + + The key to get content for + + [SUBKEY] + The subkey to get content for + + [END_KEY] + Optional end key for range query (exclusive upper bound) + + [END_SUBKEY] + Optional end subkey for range query (exclusive upper bound) + +Options: + --raw + Output bytes instead of human-readable decoded value + + -h, --help + Print help (see a summary with '-h') + +Datadir: + --chain + The chain this node is running. + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + optimism, optimism_sepolia, optimism-sepolia, base, base_sepolia, base-sepolia, arena-z, arena-z-sepolia, automata, base-devnet-0-sepolia-dev-0, bob, boba-sepolia, boba, camp-sepolia, celo, creator-chain-testnet-sepolia, cyber, cyber-sepolia, ethernity, ethernity-sepolia, fraxtal, funki, funki-sepolia, hashkeychain, ink, ink-sepolia, lisk, lisk-sepolia, lyra, metal, metal-sepolia, mint, mode, mode-sepolia, oplabs-devnet-0-sepolia-dev-0, orderly, ozean-sepolia, pivotal-sepolia, polynomial, race, race-sepolia, radius_testnet-sepolia, redstone, rehearsal-0-bn-0-rehearsal-0-bn, rehearsal-0-bn-1-rehearsal-0-bn, settlus-mainnet, settlus-sepolia-sepolia, shape, shape-sepolia, silent-data-mainnet, snax, soneium, soneium-minato-sepolia, sseed, swan, swell, tbn, tbn-sepolia, unichain, unichain-sepolia, worldchain, worldchain-sepolia, xterio-eth, zora, zora-sepolia, dev + + [default: optimism] + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.name + The prefix name of the log files + + [default: reth.log] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + Possible values: + - always: Colors on + - auto: Auto-detect + - never: Colors off + + [default: always] + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] +``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/op-reth/db/get/static-file.mdx b/docs/vocs/docs/pages/cli/op-reth/db/get/static-file.mdx new file mode 100644 index 0000000000..1cf808ced2 --- /dev/null +++ b/docs/vocs/docs/pages/cli/op-reth/db/get/static-file.mdx @@ -0,0 +1,162 @@ +# op-reth db get static-file + +Gets the content of a static file segment for the given key + +```bash +$ op-reth db get static-file --help +``` +```txt +Usage: op-reth db get static-file [OPTIONS] + +Arguments: + + Possible values: + - headers: Static File segment responsible for the `CanonicalHeaders`, `Headers`, `HeaderTerminalDifficulties` tables + - transactions: Static File segment responsible for the `Transactions` table + - receipts: Static File segment responsible for the `Receipts` table + - transaction-senders: Static File segment responsible for the `TransactionSenders` table + + + The key to get content for + +Options: + --raw + Output bytes instead of human-readable decoded value + + -h, --help + Print help (see a summary with '-h') + +Datadir: + --chain + The chain this node is running. + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + optimism, optimism_sepolia, optimism-sepolia, base, base_sepolia, base-sepolia, arena-z, arena-z-sepolia, automata, base-devnet-0-sepolia-dev-0, bob, boba-sepolia, boba, camp-sepolia, celo, creator-chain-testnet-sepolia, cyber, cyber-sepolia, ethernity, ethernity-sepolia, fraxtal, funki, funki-sepolia, hashkeychain, ink, ink-sepolia, lisk, lisk-sepolia, lyra, metal, metal-sepolia, mint, mode, mode-sepolia, oplabs-devnet-0-sepolia-dev-0, orderly, ozean-sepolia, pivotal-sepolia, polynomial, race, race-sepolia, radius_testnet-sepolia, redstone, rehearsal-0-bn-0-rehearsal-0-bn, rehearsal-0-bn-1-rehearsal-0-bn, settlus-mainnet, settlus-sepolia-sepolia, shape, shape-sepolia, silent-data-mainnet, snax, soneium, soneium-minato-sepolia, sseed, swan, swell, tbn, tbn-sepolia, unichain, unichain-sepolia, worldchain, worldchain-sepolia, xterio-eth, zora, zora-sepolia, dev + + [default: optimism] + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.name + The prefix name of the log files + + [default: reth.log] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + Possible values: + - always: Colors on + - auto: Auto-detect + - never: Colors off + + [default: always] + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] +``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/op-reth/db/list.mdx b/docs/vocs/docs/pages/cli/op-reth/db/list.mdx new file mode 100644 index 0000000000..3c45dac546 --- /dev/null +++ b/docs/vocs/docs/pages/cli/op-reth/db/list.mdx @@ -0,0 +1,194 @@ +# op-reth db list + +Lists the contents of a table + +```bash +$ op-reth db list --help +``` +```txt +Usage: op-reth db list [OPTIONS]
+ +Arguments: +
+ The table name + +Options: + -s, --skip + Skip first N entries + + [default: 0] + + -r, --reverse + Reverse the order of the entries. If enabled last table entries are read + + -l, --len + How many items to take from the walker + + [default: 5] + + --search + Search parameter for both keys and values. Prefix it with `0x` to search for binary data, and text otherwise. + + ATTENTION! For compressed tables (`Transactions` and `Receipts`), there might be missing results since the search uses the raw uncompressed value from the database. + + --min-row-size + Minimum size of row in bytes + + [default: 0] + + --min-key-size + Minimum size of key in bytes + + [default: 0] + + --min-value-size + Minimum size of value in bytes + + [default: 0] + + -c, --count + Returns the number of rows found + + -j, --json + Dump as JSON instead of using TUI + + --raw + Output bytes instead of human-readable decoded value + + -h, --help + Print help (see a summary with '-h') + +Datadir: + --chain + The chain this node is running. + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + optimism, optimism_sepolia, optimism-sepolia, base, base_sepolia, base-sepolia, arena-z, arena-z-sepolia, automata, base-devnet-0-sepolia-dev-0, bob, boba-sepolia, boba, camp-sepolia, celo, creator-chain-testnet-sepolia, cyber, cyber-sepolia, ethernity, ethernity-sepolia, fraxtal, funki, funki-sepolia, hashkeychain, ink, ink-sepolia, lisk, lisk-sepolia, lyra, metal, metal-sepolia, mint, mode, mode-sepolia, oplabs-devnet-0-sepolia-dev-0, orderly, ozean-sepolia, pivotal-sepolia, polynomial, race, race-sepolia, radius_testnet-sepolia, redstone, rehearsal-0-bn-0-rehearsal-0-bn, rehearsal-0-bn-1-rehearsal-0-bn, settlus-mainnet, settlus-sepolia-sepolia, shape, shape-sepolia, silent-data-mainnet, snax, soneium, soneium-minato-sepolia, sseed, swan, swell, tbn, tbn-sepolia, unichain, unichain-sepolia, worldchain, worldchain-sepolia, xterio-eth, zora, zora-sepolia, dev + + [default: optimism] + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.name + The prefix name of the log files + + [default: reth.log] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + Possible values: + - always: Colors on + - auto: Auto-detect + - never: Colors off + + [default: always] + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] +``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/op-reth/db/path.mdx b/docs/vocs/docs/pages/cli/op-reth/db/path.mdx new file mode 100644 index 0000000000..3fd2245d9a --- /dev/null +++ b/docs/vocs/docs/pages/cli/op-reth/db/path.mdx @@ -0,0 +1,148 @@ +# op-reth db path + +Returns the full database path + +```bash +$ op-reth db path --help +``` +```txt +Usage: op-reth db path [OPTIONS] + +Options: + -h, --help + Print help (see a summary with '-h') + +Datadir: + --chain + The chain this node is running. + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + optimism, optimism_sepolia, optimism-sepolia, base, base_sepolia, base-sepolia, arena-z, arena-z-sepolia, automata, base-devnet-0-sepolia-dev-0, bob, boba-sepolia, boba, camp-sepolia, celo, creator-chain-testnet-sepolia, cyber, cyber-sepolia, ethernity, ethernity-sepolia, fraxtal, funki, funki-sepolia, hashkeychain, ink, ink-sepolia, lisk, lisk-sepolia, lyra, metal, metal-sepolia, mint, mode, mode-sepolia, oplabs-devnet-0-sepolia-dev-0, orderly, ozean-sepolia, pivotal-sepolia, polynomial, race, race-sepolia, radius_testnet-sepolia, redstone, rehearsal-0-bn-0-rehearsal-0-bn, rehearsal-0-bn-1-rehearsal-0-bn, settlus-mainnet, settlus-sepolia-sepolia, shape, shape-sepolia, silent-data-mainnet, snax, soneium, soneium-minato-sepolia, sseed, swan, swell, tbn, tbn-sepolia, unichain, unichain-sepolia, worldchain, worldchain-sepolia, xterio-eth, zora, zora-sepolia, dev + + [default: optimism] + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.name + The prefix name of the log files + + [default: reth.log] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + Possible values: + - always: Colors on + - auto: Auto-detect + - never: Colors off + + [default: always] + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] +``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/op-reth/db/repair-trie.mdx b/docs/vocs/docs/pages/cli/op-reth/db/repair-trie.mdx new file mode 100644 index 0000000000..371c2e0e3a --- /dev/null +++ b/docs/vocs/docs/pages/cli/op-reth/db/repair-trie.mdx @@ -0,0 +1,156 @@ +# op-reth db repair-trie + +Verifies trie consistency and outputs any inconsistencies + +```bash +$ op-reth db repair-trie --help +``` +```txt +Usage: op-reth db repair-trie [OPTIONS] + +Options: + --dry-run + Only show inconsistencies without making any repairs + + --metrics + Enable Prometheus metrics. + + The metrics will be served at the given interface and port. + + -h, --help + Print help (see a summary with '-h') + +Datadir: + --chain + The chain this node is running. + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + optimism, optimism_sepolia, optimism-sepolia, base, base_sepolia, base-sepolia, arena-z, arena-z-sepolia, automata, base-devnet-0-sepolia-dev-0, bob, boba-sepolia, boba, camp-sepolia, celo, creator-chain-testnet-sepolia, cyber, cyber-sepolia, ethernity, ethernity-sepolia, fraxtal, funki, funki-sepolia, hashkeychain, ink, ink-sepolia, lisk, lisk-sepolia, lyra, metal, metal-sepolia, mint, mode, mode-sepolia, oplabs-devnet-0-sepolia-dev-0, orderly, ozean-sepolia, pivotal-sepolia, polynomial, race, race-sepolia, radius_testnet-sepolia, redstone, rehearsal-0-bn-0-rehearsal-0-bn, rehearsal-0-bn-1-rehearsal-0-bn, settlus-mainnet, settlus-sepolia-sepolia, shape, shape-sepolia, silent-data-mainnet, snax, soneium, soneium-minato-sepolia, sseed, swan, swell, tbn, tbn-sepolia, unichain, unichain-sepolia, worldchain, worldchain-sepolia, xterio-eth, zora, zora-sepolia, dev + + [default: optimism] + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.name + The prefix name of the log files + + [default: reth.log] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + Possible values: + - always: Colors on + - auto: Auto-detect + - never: Colors off + + [default: always] + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] +``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/op-reth/db/settings.mdx b/docs/vocs/docs/pages/cli/op-reth/db/settings.mdx new file mode 100644 index 0000000000..5550c3c784 --- /dev/null +++ b/docs/vocs/docs/pages/cli/op-reth/db/settings.mdx @@ -0,0 +1,153 @@ +# op-reth db settings + +Manage storage settings + +```bash +$ op-reth db settings --help +``` +```txt +Usage: op-reth db settings [OPTIONS] + +Commands: + get Get current storage settings from database + set Set storage settings in database + help Print this message or the help of the given subcommand(s) + +Options: + -h, --help + Print help (see a summary with '-h') + +Datadir: + --chain + The chain this node is running. + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + optimism, optimism_sepolia, optimism-sepolia, base, base_sepolia, base-sepolia, arena-z, arena-z-sepolia, automata, base-devnet-0-sepolia-dev-0, bob, boba-sepolia, boba, camp-sepolia, celo, creator-chain-testnet-sepolia, cyber, cyber-sepolia, ethernity, ethernity-sepolia, fraxtal, funki, funki-sepolia, hashkeychain, ink, ink-sepolia, lisk, lisk-sepolia, lyra, metal, metal-sepolia, mint, mode, mode-sepolia, oplabs-devnet-0-sepolia-dev-0, orderly, ozean-sepolia, pivotal-sepolia, polynomial, race, race-sepolia, radius_testnet-sepolia, redstone, rehearsal-0-bn-0-rehearsal-0-bn, rehearsal-0-bn-1-rehearsal-0-bn, settlus-mainnet, settlus-sepolia-sepolia, shape, shape-sepolia, silent-data-mainnet, snax, soneium, soneium-minato-sepolia, sseed, swan, swell, tbn, tbn-sepolia, unichain, unichain-sepolia, worldchain, worldchain-sepolia, xterio-eth, zora, zora-sepolia, dev + + [default: optimism] + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.name + The prefix name of the log files + + [default: reth.log] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + Possible values: + - always: Colors on + - auto: Auto-detect + - never: Colors off + + [default: always] + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] +``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/op-reth/db/settings/get.mdx b/docs/vocs/docs/pages/cli/op-reth/db/settings/get.mdx new file mode 100644 index 0000000000..5f214b383a --- /dev/null +++ b/docs/vocs/docs/pages/cli/op-reth/db/settings/get.mdx @@ -0,0 +1,148 @@ +# op-reth db settings get + +Get current storage settings from database + +```bash +$ op-reth db settings get --help +``` +```txt +Usage: op-reth db settings get [OPTIONS] + +Options: + -h, --help + Print help (see a summary with '-h') + +Datadir: + --chain + The chain this node is running. + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + optimism, optimism_sepolia, optimism-sepolia, base, base_sepolia, base-sepolia, arena-z, arena-z-sepolia, automata, base-devnet-0-sepolia-dev-0, bob, boba-sepolia, boba, camp-sepolia, celo, creator-chain-testnet-sepolia, cyber, cyber-sepolia, ethernity, ethernity-sepolia, fraxtal, funki, funki-sepolia, hashkeychain, ink, ink-sepolia, lisk, lisk-sepolia, lyra, metal, metal-sepolia, mint, mode, mode-sepolia, oplabs-devnet-0-sepolia-dev-0, orderly, ozean-sepolia, pivotal-sepolia, polynomial, race, race-sepolia, radius_testnet-sepolia, redstone, rehearsal-0-bn-0-rehearsal-0-bn, rehearsal-0-bn-1-rehearsal-0-bn, settlus-mainnet, settlus-sepolia-sepolia, shape, shape-sepolia, silent-data-mainnet, snax, soneium, soneium-minato-sepolia, sseed, swan, swell, tbn, tbn-sepolia, unichain, unichain-sepolia, worldchain, worldchain-sepolia, xterio-eth, zora, zora-sepolia, dev + + [default: optimism] + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.name + The prefix name of the log files + + [default: reth.log] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + Possible values: + - always: Colors on + - auto: Auto-detect + - never: Colors off + + [default: always] + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] +``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/op-reth/db/settings/set.mdx b/docs/vocs/docs/pages/cli/op-reth/db/settings/set.mdx new file mode 100644 index 0000000000..fda9c48637 --- /dev/null +++ b/docs/vocs/docs/pages/cli/op-reth/db/settings/set.mdx @@ -0,0 +1,153 @@ +# op-reth db settings set + +Set storage settings in database + +```bash +$ op-reth db settings set --help +``` +```txt +Usage: op-reth db settings set [OPTIONS] + +Commands: + receipts_in_static_files Store receipts in static files instead of the database + transaction_senders_in_static_files Store transaction senders in static files instead of the database + help Print this message or the help of the given subcommand(s) + +Options: + -h, --help + Print help (see a summary with '-h') + +Datadir: + --chain + The chain this node is running. + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + optimism, optimism_sepolia, optimism-sepolia, base, base_sepolia, base-sepolia, arena-z, arena-z-sepolia, automata, base-devnet-0-sepolia-dev-0, bob, boba-sepolia, boba, camp-sepolia, celo, creator-chain-testnet-sepolia, cyber, cyber-sepolia, ethernity, ethernity-sepolia, fraxtal, funki, funki-sepolia, hashkeychain, ink, ink-sepolia, lisk, lisk-sepolia, lyra, metal, metal-sepolia, mint, mode, mode-sepolia, oplabs-devnet-0-sepolia-dev-0, orderly, ozean-sepolia, pivotal-sepolia, polynomial, race, race-sepolia, radius_testnet-sepolia, redstone, rehearsal-0-bn-0-rehearsal-0-bn, rehearsal-0-bn-1-rehearsal-0-bn, settlus-mainnet, settlus-sepolia-sepolia, shape, shape-sepolia, silent-data-mainnet, snax, soneium, soneium-minato-sepolia, sseed, swan, swell, tbn, tbn-sepolia, unichain, unichain-sepolia, worldchain, worldchain-sepolia, xterio-eth, zora, zora-sepolia, dev + + [default: optimism] + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.name + The prefix name of the log files + + [default: reth.log] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + Possible values: + - always: Colors on + - auto: Auto-detect + - never: Colors off + + [default: always] + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] +``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/op-reth/db/settings/set/receipts_in_static_files.mdx b/docs/vocs/docs/pages/cli/op-reth/db/settings/set/receipts_in_static_files.mdx new file mode 100644 index 0000000000..490ee06ce9 --- /dev/null +++ b/docs/vocs/docs/pages/cli/op-reth/db/settings/set/receipts_in_static_files.mdx @@ -0,0 +1,152 @@ +# op-reth db settings set receipts_in_static_files + +Store receipts in static files instead of the database + +```bash +$ op-reth db settings set receipts_in_static_files --help +``` +```txt +Usage: op-reth db settings set receipts_in_static_files [OPTIONS] + +Arguments: + + [possible values: true, false] + +Options: + -h, --help + Print help (see a summary with '-h') + +Datadir: + --chain + The chain this node is running. + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + optimism, optimism_sepolia, optimism-sepolia, base, base_sepolia, base-sepolia, arena-z, arena-z-sepolia, automata, base-devnet-0-sepolia-dev-0, bob, boba-sepolia, boba, camp-sepolia, celo, creator-chain-testnet-sepolia, cyber, cyber-sepolia, ethernity, ethernity-sepolia, fraxtal, funki, funki-sepolia, hashkeychain, ink, ink-sepolia, lisk, lisk-sepolia, lyra, metal, metal-sepolia, mint, mode, mode-sepolia, oplabs-devnet-0-sepolia-dev-0, orderly, ozean-sepolia, pivotal-sepolia, polynomial, race, race-sepolia, radius_testnet-sepolia, redstone, rehearsal-0-bn-0-rehearsal-0-bn, rehearsal-0-bn-1-rehearsal-0-bn, settlus-mainnet, settlus-sepolia-sepolia, shape, shape-sepolia, silent-data-mainnet, snax, soneium, soneium-minato-sepolia, sseed, swan, swell, tbn, tbn-sepolia, unichain, unichain-sepolia, worldchain, worldchain-sepolia, xterio-eth, zora, zora-sepolia, dev + + [default: optimism] + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.name + The prefix name of the log files + + [default: reth.log] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + Possible values: + - always: Colors on + - auto: Auto-detect + - never: Colors off + + [default: always] + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] +``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/op-reth/db/settings/set/transaction_senders_in_static_files.mdx b/docs/vocs/docs/pages/cli/op-reth/db/settings/set/transaction_senders_in_static_files.mdx new file mode 100644 index 0000000000..1947c57293 --- /dev/null +++ b/docs/vocs/docs/pages/cli/op-reth/db/settings/set/transaction_senders_in_static_files.mdx @@ -0,0 +1,152 @@ +# op-reth db settings set transaction_senders_in_static_files + +Store transaction senders in static files instead of the database + +```bash +$ op-reth db settings set transaction_senders_in_static_files --help +``` +```txt +Usage: op-reth db settings set transaction_senders_in_static_files [OPTIONS] + +Arguments: + + [possible values: true, false] + +Options: + -h, --help + Print help (see a summary with '-h') + +Datadir: + --chain + The chain this node is running. + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + optimism, optimism_sepolia, optimism-sepolia, base, base_sepolia, base-sepolia, arena-z, arena-z-sepolia, automata, base-devnet-0-sepolia-dev-0, bob, boba-sepolia, boba, camp-sepolia, celo, creator-chain-testnet-sepolia, cyber, cyber-sepolia, ethernity, ethernity-sepolia, fraxtal, funki, funki-sepolia, hashkeychain, ink, ink-sepolia, lisk, lisk-sepolia, lyra, metal, metal-sepolia, mint, mode, mode-sepolia, oplabs-devnet-0-sepolia-dev-0, orderly, ozean-sepolia, pivotal-sepolia, polynomial, race, race-sepolia, radius_testnet-sepolia, redstone, rehearsal-0-bn-0-rehearsal-0-bn, rehearsal-0-bn-1-rehearsal-0-bn, settlus-mainnet, settlus-sepolia-sepolia, shape, shape-sepolia, silent-data-mainnet, snax, soneium, soneium-minato-sepolia, sseed, swan, swell, tbn, tbn-sepolia, unichain, unichain-sepolia, worldchain, worldchain-sepolia, xterio-eth, zora, zora-sepolia, dev + + [default: optimism] + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.name + The prefix name of the log files + + [default: reth.log] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + Possible values: + - always: Colors on + - auto: Auto-detect + - never: Colors off + + [default: always] + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] +``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/op-reth/db/static-file-header.mdx b/docs/vocs/docs/pages/cli/op-reth/db/static-file-header.mdx new file mode 100644 index 0000000000..8b5aae0156 --- /dev/null +++ b/docs/vocs/docs/pages/cli/op-reth/db/static-file-header.mdx @@ -0,0 +1,153 @@ +# op-reth db static-file-header + +Reads and displays the static file segment header + +```bash +$ op-reth db static-file-header --help +``` +```txt +Usage: op-reth db static-file-header [OPTIONS] + +Commands: + block Query by segment and block number + path Query by path to static file + help Print this message or the help of the given subcommand(s) + +Options: + -h, --help + Print help (see a summary with '-h') + +Datadir: + --chain + The chain this node is running. + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + optimism, optimism_sepolia, optimism-sepolia, base, base_sepolia, base-sepolia, arena-z, arena-z-sepolia, automata, base-devnet-0-sepolia-dev-0, bob, boba-sepolia, boba, camp-sepolia, celo, creator-chain-testnet-sepolia, cyber, cyber-sepolia, ethernity, ethernity-sepolia, fraxtal, funki, funki-sepolia, hashkeychain, ink, ink-sepolia, lisk, lisk-sepolia, lyra, metal, metal-sepolia, mint, mode, mode-sepolia, oplabs-devnet-0-sepolia-dev-0, orderly, ozean-sepolia, pivotal-sepolia, polynomial, race, race-sepolia, radius_testnet-sepolia, redstone, rehearsal-0-bn-0-rehearsal-0-bn, rehearsal-0-bn-1-rehearsal-0-bn, settlus-mainnet, settlus-sepolia-sepolia, shape, shape-sepolia, silent-data-mainnet, snax, soneium, soneium-minato-sepolia, sseed, swan, swell, tbn, tbn-sepolia, unichain, unichain-sepolia, worldchain, worldchain-sepolia, xterio-eth, zora, zora-sepolia, dev + + [default: optimism] + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.name + The prefix name of the log files + + [default: reth.log] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + Possible values: + - always: Colors on + - auto: Auto-detect + - never: Colors off + + [default: always] + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] +``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/op-reth/db/static-file-header/block.mdx b/docs/vocs/docs/pages/cli/op-reth/db/static-file-header/block.mdx new file mode 100644 index 0000000000..5374651ae2 --- /dev/null +++ b/docs/vocs/docs/pages/cli/op-reth/db/static-file-header/block.mdx @@ -0,0 +1,161 @@ +# op-reth db static-file-header block + +Query by segment and block number + +```bash +$ op-reth db static-file-header block --help +``` +```txt +Usage: op-reth db static-file-header block [OPTIONS] + +Arguments: + + Static file segment + + Possible values: + - headers: Static File segment responsible for the `CanonicalHeaders`, `Headers`, `HeaderTerminalDifficulties` tables + - transactions: Static File segment responsible for the `Transactions` table + - receipts: Static File segment responsible for the `Receipts` table + - transaction-senders: Static File segment responsible for the `TransactionSenders` table + + + Block number to query + +Options: + -h, --help + Print help (see a summary with '-h') + +Datadir: + --chain + The chain this node is running. + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + optimism, optimism_sepolia, optimism-sepolia, base, base_sepolia, base-sepolia, arena-z, arena-z-sepolia, automata, base-devnet-0-sepolia-dev-0, bob, boba-sepolia, boba, camp-sepolia, celo, creator-chain-testnet-sepolia, cyber, cyber-sepolia, ethernity, ethernity-sepolia, fraxtal, funki, funki-sepolia, hashkeychain, ink, ink-sepolia, lisk, lisk-sepolia, lyra, metal, metal-sepolia, mint, mode, mode-sepolia, oplabs-devnet-0-sepolia-dev-0, orderly, ozean-sepolia, pivotal-sepolia, polynomial, race, race-sepolia, radius_testnet-sepolia, redstone, rehearsal-0-bn-0-rehearsal-0-bn, rehearsal-0-bn-1-rehearsal-0-bn, settlus-mainnet, settlus-sepolia-sepolia, shape, shape-sepolia, silent-data-mainnet, snax, soneium, soneium-minato-sepolia, sseed, swan, swell, tbn, tbn-sepolia, unichain, unichain-sepolia, worldchain, worldchain-sepolia, xterio-eth, zora, zora-sepolia, dev + + [default: optimism] + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.name + The prefix name of the log files + + [default: reth.log] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + Possible values: + - always: Colors on + - auto: Auto-detect + - never: Colors off + + [default: always] + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] +``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/op-reth/db/static-file-header/path.mdx b/docs/vocs/docs/pages/cli/op-reth/db/static-file-header/path.mdx new file mode 100644 index 0000000000..abe179a08f --- /dev/null +++ b/docs/vocs/docs/pages/cli/op-reth/db/static-file-header/path.mdx @@ -0,0 +1,152 @@ +# op-reth db static-file-header path + +Query by path to static file + +```bash +$ op-reth db static-file-header path --help +``` +```txt +Usage: op-reth db static-file-header path [OPTIONS] + +Arguments: + + Path to the static file + +Options: + -h, --help + Print help (see a summary with '-h') + +Datadir: + --chain + The chain this node is running. + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + optimism, optimism_sepolia, optimism-sepolia, base, base_sepolia, base-sepolia, arena-z, arena-z-sepolia, automata, base-devnet-0-sepolia-dev-0, bob, boba-sepolia, boba, camp-sepolia, celo, creator-chain-testnet-sepolia, cyber, cyber-sepolia, ethernity, ethernity-sepolia, fraxtal, funki, funki-sepolia, hashkeychain, ink, ink-sepolia, lisk, lisk-sepolia, lyra, metal, metal-sepolia, mint, mode, mode-sepolia, oplabs-devnet-0-sepolia-dev-0, orderly, ozean-sepolia, pivotal-sepolia, polynomial, race, race-sepolia, radius_testnet-sepolia, redstone, rehearsal-0-bn-0-rehearsal-0-bn, rehearsal-0-bn-1-rehearsal-0-bn, settlus-mainnet, settlus-sepolia-sepolia, shape, shape-sepolia, silent-data-mainnet, snax, soneium, soneium-minato-sepolia, sseed, swan, swell, tbn, tbn-sepolia, unichain, unichain-sepolia, worldchain, worldchain-sepolia, xterio-eth, zora, zora-sepolia, dev + + [default: optimism] + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.name + The prefix name of the log files + + [default: reth.log] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + Possible values: + - always: Colors on + - auto: Auto-detect + - never: Colors off + + [default: always] + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] +``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/op-reth/db/stats.mdx b/docs/vocs/docs/pages/cli/op-reth/db/stats.mdx new file mode 100644 index 0000000000..0b8efa30ad --- /dev/null +++ b/docs/vocs/docs/pages/cli/op-reth/db/stats.mdx @@ -0,0 +1,164 @@ +# op-reth db stats + +Lists all the tables, their entry count and their size + +```bash +$ op-reth db stats --help +``` +```txt +Usage: op-reth db stats [OPTIONS] + +Options: + --skip-consistency-checks + Skip consistency checks for static files + + --detailed-sizes + Show only the total size for static files + + --detailed-segments + Show detailed information per static file segment + + --checksum + Show a checksum of each table in the database. + + WARNING: this option will take a long time to run, as it needs to traverse and hash the entire database. + + For individual table checksums, use the `reth db checksum` command. + + -h, --help + Print help (see a summary with '-h') + +Datadir: + --chain + The chain this node is running. + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + optimism, optimism_sepolia, optimism-sepolia, base, base_sepolia, base-sepolia, arena-z, arena-z-sepolia, automata, base-devnet-0-sepolia-dev-0, bob, boba-sepolia, boba, camp-sepolia, celo, creator-chain-testnet-sepolia, cyber, cyber-sepolia, ethernity, ethernity-sepolia, fraxtal, funki, funki-sepolia, hashkeychain, ink, ink-sepolia, lisk, lisk-sepolia, lyra, metal, metal-sepolia, mint, mode, mode-sepolia, oplabs-devnet-0-sepolia-dev-0, orderly, ozean-sepolia, pivotal-sepolia, polynomial, race, race-sepolia, radius_testnet-sepolia, redstone, rehearsal-0-bn-0-rehearsal-0-bn, rehearsal-0-bn-1-rehearsal-0-bn, settlus-mainnet, settlus-sepolia-sepolia, shape, shape-sepolia, silent-data-mainnet, snax, soneium, soneium-minato-sepolia, sseed, swan, swell, tbn, tbn-sepolia, unichain, unichain-sepolia, worldchain, worldchain-sepolia, xterio-eth, zora, zora-sepolia, dev + + [default: optimism] + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.name + The prefix name of the log files + + [default: reth.log] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + Possible values: + - always: Colors on + - auto: Auto-detect + - never: Colors off + + [default: always] + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] +``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/op-reth/db/version.mdx b/docs/vocs/docs/pages/cli/op-reth/db/version.mdx new file mode 100644 index 0000000000..56250ad6e1 --- /dev/null +++ b/docs/vocs/docs/pages/cli/op-reth/db/version.mdx @@ -0,0 +1,148 @@ +# op-reth db version + +Lists current and local database versions + +```bash +$ op-reth db version --help +``` +```txt +Usage: op-reth db version [OPTIONS] + +Options: + -h, --help + Print help (see a summary with '-h') + +Datadir: + --chain + The chain this node is running. + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + optimism, optimism_sepolia, optimism-sepolia, base, base_sepolia, base-sepolia, arena-z, arena-z-sepolia, automata, base-devnet-0-sepolia-dev-0, bob, boba-sepolia, boba, camp-sepolia, celo, creator-chain-testnet-sepolia, cyber, cyber-sepolia, ethernity, ethernity-sepolia, fraxtal, funki, funki-sepolia, hashkeychain, ink, ink-sepolia, lisk, lisk-sepolia, lyra, metal, metal-sepolia, mint, mode, mode-sepolia, oplabs-devnet-0-sepolia-dev-0, orderly, ozean-sepolia, pivotal-sepolia, polynomial, race, race-sepolia, radius_testnet-sepolia, redstone, rehearsal-0-bn-0-rehearsal-0-bn, rehearsal-0-bn-1-rehearsal-0-bn, settlus-mainnet, settlus-sepolia-sepolia, shape, shape-sepolia, silent-data-mainnet, snax, soneium, soneium-minato-sepolia, sseed, swan, swell, tbn, tbn-sepolia, unichain, unichain-sepolia, worldchain, worldchain-sepolia, xterio-eth, zora, zora-sepolia, dev + + [default: optimism] + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.name + The prefix name of the log files + + [default: reth.log] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + Possible values: + - always: Colors on + - auto: Auto-detect + - never: Colors off + + [default: always] + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] +``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/op-reth/dump-genesis.mdx b/docs/vocs/docs/pages/cli/op-reth/dump-genesis.mdx new file mode 100644 index 0000000000..c91b9ae389 --- /dev/null +++ b/docs/vocs/docs/pages/cli/op-reth/dump-genesis.mdx @@ -0,0 +1,147 @@ +# op-reth dump-genesis + +Dumps genesis block JSON configuration to stdout + +```bash +$ op-reth dump-genesis --help +``` +```txt +Usage: op-reth dump-genesis [OPTIONS] + +Options: + --chain + The chain this node is running. + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + optimism, optimism_sepolia, optimism-sepolia, base, base_sepolia, base-sepolia, arena-z, arena-z-sepolia, automata, base-devnet-0-sepolia-dev-0, bob, boba-sepolia, boba, camp-sepolia, celo, creator-chain-testnet-sepolia, cyber, cyber-sepolia, ethernity, ethernity-sepolia, fraxtal, funki, funki-sepolia, hashkeychain, ink, ink-sepolia, lisk, lisk-sepolia, lyra, metal, metal-sepolia, mint, mode, mode-sepolia, oplabs-devnet-0-sepolia-dev-0, orderly, ozean-sepolia, pivotal-sepolia, polynomial, race, race-sepolia, radius_testnet-sepolia, redstone, rehearsal-0-bn-0-rehearsal-0-bn, rehearsal-0-bn-1-rehearsal-0-bn, settlus-mainnet, settlus-sepolia-sepolia, shape, shape-sepolia, silent-data-mainnet, snax, soneium, soneium-minato-sepolia, sseed, swan, swell, tbn, tbn-sepolia, unichain, unichain-sepolia, worldchain, worldchain-sepolia, xterio-eth, zora, zora-sepolia, dev + + [default: optimism] + + -h, --help + Print help (see a summary with '-h') + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.name + The prefix name of the log files + + [default: reth.log] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + Possible values: + - always: Colors on + - auto: Auto-detect + - never: Colors off + + [default: always] + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] +``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/op-reth/import-op.mdx b/docs/vocs/docs/pages/cli/op-reth/import-op.mdx new file mode 100644 index 0000000000..d60abada8d --- /dev/null +++ b/docs/vocs/docs/pages/cli/op-reth/import-op.mdx @@ -0,0 +1,251 @@ +# op-reth import-op + +This syncs RLP encoded OP blocks below Bedrock from a file, without executing + +```bash +$ op-reth import-op --help +``` +```txt +Usage: op-reth import-op [OPTIONS] + +Options: + -h, --help + Print help (see a summary with '-h') + +Datadir: + --datadir + The path to the data dir for all reth files and subdirectories. + + Defaults to the OS-specific data directory: + + - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` + - Windows: `{FOLDERID_RoamingAppData}/reth/` + - macOS: `$HOME/Library/Application Support/reth/` + + [default: default] + + --datadir.static-files + The absolute path to store static files in. + + --datadir.rocksdb + The absolute path to store `RocksDB` database in. + + --config + The path to the configuration file to use + + --chain + The chain this node is running. + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + optimism, optimism_sepolia, optimism-sepolia, base, base_sepolia, base-sepolia, arena-z, arena-z-sepolia, automata, base-devnet-0-sepolia-dev-0, bob, boba-sepolia, boba, camp-sepolia, celo, creator-chain-testnet-sepolia, cyber, cyber-sepolia, ethernity, ethernity-sepolia, fraxtal, funki, funki-sepolia, hashkeychain, ink, ink-sepolia, lisk, lisk-sepolia, lyra, metal, metal-sepolia, mint, mode, mode-sepolia, oplabs-devnet-0-sepolia-dev-0, orderly, ozean-sepolia, pivotal-sepolia, polynomial, race, race-sepolia, radius_testnet-sepolia, redstone, rehearsal-0-bn-0-rehearsal-0-bn, rehearsal-0-bn-1-rehearsal-0-bn, settlus-mainnet, settlus-sepolia-sepolia, shape, shape-sepolia, silent-data-mainnet, snax, soneium, soneium-minato-sepolia, sseed, swan, swell, tbn, tbn-sepolia, unichain, unichain-sepolia, worldchain, worldchain-sepolia, xterio-eth, zora, zora-sepolia, dev + + [default: optimism] + +Database: + --db.log-level + Database logging level. Levels higher than "notice" require a debug build + + Possible values: + - fatal: Enables logging for critical conditions, i.e. assertion failures + - error: Enables logging for error conditions + - warn: Enables logging for warning conditions + - notice: Enables logging for normal but significant condition + - verbose: Enables logging for verbose informational + - debug: Enables logging for debug-level messages + - trace: Enables logging for trace debug-level messages + - extra: Enables logging for extra debug-level messages + + --db.exclusive + Open environment in exclusive/monopolistic mode. Makes it possible to open a database on an NFS volume + + [possible values: true, false] + + --db.max-size + Maximum database size (e.g., 4TB, 8TB). + + This sets the "map size" of the database. If the database grows beyond this limit, the node will stop with an "environment map size limit reached" error. + + The default value is 8TB. + + --db.page-size + Database page size (e.g., 4KB, 8KB, 16KB). + + Specifies the page size used by the MDBX database. + + The page size determines the maximum database size. MDBX supports up to 2^31 pages, so with the default 4KB page size, the maximum database size is 8TB. To allow larger databases, increase this value to 8KB or higher. + + WARNING: This setting is only configurable at database creation; changing it later requires re-syncing. + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + + --db.read-transaction-timeout + Read transaction timeout in seconds, 0 means no timeout + + --db.max-readers + Maximum number of readers allowed to access the database concurrently + + --db.sync-mode + Controls how aggressively the database synchronizes data to disk + +Static Files: + --static-files.blocks-per-file.headers + Number of blocks per file for the headers segment + + --static-files.blocks-per-file.transactions + Number of blocks per file for the transactions segment + + --static-files.blocks-per-file.receipts + Number of blocks per file for the receipts segment + + --static-files.blocks-per-file.transaction-senders + Number of blocks per file for the transaction senders segment + + --static-files.receipts + Store receipts in static files instead of the database. + + When enabled, receipts will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + --static-files.transaction-senders + Store transaction senders in static files instead of the database. + + When enabled, transaction senders will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + --chunk-len + Chunk byte length to read from file. + + + The path to a block file for import. + + The online stages (headers and bodies) are replaced by a file import, after which the + remaining stages are executed. + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.name + The prefix name of the log files + + [default: reth.log] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + Possible values: + - always: Colors on + - auto: Auto-detect + - never: Colors off + + [default: always] + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] +``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/op-reth/import-receipts-op.mdx b/docs/vocs/docs/pages/cli/op-reth/import-receipts-op.mdx new file mode 100644 index 0000000000..85be78419d --- /dev/null +++ b/docs/vocs/docs/pages/cli/op-reth/import-receipts-op.mdx @@ -0,0 +1,251 @@ +# op-reth import-receipts-op + +This imports RLP encoded receipts from a file + +```bash +$ op-reth import-receipts-op --help +``` +```txt +Usage: op-reth import-receipts-op [OPTIONS] + +Options: + -h, --help + Print help (see a summary with '-h') + +Datadir: + --datadir + The path to the data dir for all reth files and subdirectories. + + Defaults to the OS-specific data directory: + + - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` + - Windows: `{FOLDERID_RoamingAppData}/reth/` + - macOS: `$HOME/Library/Application Support/reth/` + + [default: default] + + --datadir.static-files + The absolute path to store static files in. + + --datadir.rocksdb + The absolute path to store `RocksDB` database in. + + --config + The path to the configuration file to use + + --chain + The chain this node is running. + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + optimism, optimism_sepolia, optimism-sepolia, base, base_sepolia, base-sepolia, arena-z, arena-z-sepolia, automata, base-devnet-0-sepolia-dev-0, bob, boba-sepolia, boba, camp-sepolia, celo, creator-chain-testnet-sepolia, cyber, cyber-sepolia, ethernity, ethernity-sepolia, fraxtal, funki, funki-sepolia, hashkeychain, ink, ink-sepolia, lisk, lisk-sepolia, lyra, metal, metal-sepolia, mint, mode, mode-sepolia, oplabs-devnet-0-sepolia-dev-0, orderly, ozean-sepolia, pivotal-sepolia, polynomial, race, race-sepolia, radius_testnet-sepolia, redstone, rehearsal-0-bn-0-rehearsal-0-bn, rehearsal-0-bn-1-rehearsal-0-bn, settlus-mainnet, settlus-sepolia-sepolia, shape, shape-sepolia, silent-data-mainnet, snax, soneium, soneium-minato-sepolia, sseed, swan, swell, tbn, tbn-sepolia, unichain, unichain-sepolia, worldchain, worldchain-sepolia, xterio-eth, zora, zora-sepolia, dev + + [default: optimism] + +Database: + --db.log-level + Database logging level. Levels higher than "notice" require a debug build + + Possible values: + - fatal: Enables logging for critical conditions, i.e. assertion failures + - error: Enables logging for error conditions + - warn: Enables logging for warning conditions + - notice: Enables logging for normal but significant condition + - verbose: Enables logging for verbose informational + - debug: Enables logging for debug-level messages + - trace: Enables logging for trace debug-level messages + - extra: Enables logging for extra debug-level messages + + --db.exclusive + Open environment in exclusive/monopolistic mode. Makes it possible to open a database on an NFS volume + + [possible values: true, false] + + --db.max-size + Maximum database size (e.g., 4TB, 8TB). + + This sets the "map size" of the database. If the database grows beyond this limit, the node will stop with an "environment map size limit reached" error. + + The default value is 8TB. + + --db.page-size + Database page size (e.g., 4KB, 8KB, 16KB). + + Specifies the page size used by the MDBX database. + + The page size determines the maximum database size. MDBX supports up to 2^31 pages, so with the default 4KB page size, the maximum database size is 8TB. To allow larger databases, increase this value to 8KB or higher. + + WARNING: This setting is only configurable at database creation; changing it later requires re-syncing. + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + + --db.read-transaction-timeout + Read transaction timeout in seconds, 0 means no timeout + + --db.max-readers + Maximum number of readers allowed to access the database concurrently + + --db.sync-mode + Controls how aggressively the database synchronizes data to disk + +Static Files: + --static-files.blocks-per-file.headers + Number of blocks per file for the headers segment + + --static-files.blocks-per-file.transactions + Number of blocks per file for the transactions segment + + --static-files.blocks-per-file.receipts + Number of blocks per file for the receipts segment + + --static-files.blocks-per-file.transaction-senders + Number of blocks per file for the transaction senders segment + + --static-files.receipts + Store receipts in static files instead of the database. + + When enabled, receipts will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + --static-files.transaction-senders + Store transaction senders in static files instead of the database. + + When enabled, transaction senders will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + --chunk-len + Chunk byte length to read from file. + + + The path to a receipts file for import. File must use `OpGethReceiptFileCodec` (used for + exporting OP chain segment below Bedrock block via testinprod/op-geth). + + + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.name + The prefix name of the log files + + [default: reth.log] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + Possible values: + - always: Colors on + - auto: Auto-detect + - never: Colors off + + [default: always] + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] +``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/op-reth/init-state.mdx b/docs/vocs/docs/pages/cli/op-reth/init-state.mdx new file mode 100644 index 0000000000..f00042eb94 --- /dev/null +++ b/docs/vocs/docs/pages/cli/op-reth/init-state.mdx @@ -0,0 +1,281 @@ +# op-reth init-state + +Initialize the database from a state dump file + +```bash +$ op-reth init-state --help +``` +```txt +Usage: op-reth init-state [OPTIONS] + +Options: + -h, --help + Print help (see a summary with '-h') + +Datadir: + --datadir + The path to the data dir for all reth files and subdirectories. + + Defaults to the OS-specific data directory: + + - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` + - Windows: `{FOLDERID_RoamingAppData}/reth/` + - macOS: `$HOME/Library/Application Support/reth/` + + [default: default] + + --datadir.static-files + The absolute path to store static files in. + + --datadir.rocksdb + The absolute path to store `RocksDB` database in. + + --config + The path to the configuration file to use + + --chain + The chain this node is running. + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + optimism, optimism_sepolia, optimism-sepolia, base, base_sepolia, base-sepolia, arena-z, arena-z-sepolia, automata, base-devnet-0-sepolia-dev-0, bob, boba-sepolia, boba, camp-sepolia, celo, creator-chain-testnet-sepolia, cyber, cyber-sepolia, ethernity, ethernity-sepolia, fraxtal, funki, funki-sepolia, hashkeychain, ink, ink-sepolia, lisk, lisk-sepolia, lyra, metal, metal-sepolia, mint, mode, mode-sepolia, oplabs-devnet-0-sepolia-dev-0, orderly, ozean-sepolia, pivotal-sepolia, polynomial, race, race-sepolia, radius_testnet-sepolia, redstone, rehearsal-0-bn-0-rehearsal-0-bn, rehearsal-0-bn-1-rehearsal-0-bn, settlus-mainnet, settlus-sepolia-sepolia, shape, shape-sepolia, silent-data-mainnet, snax, soneium, soneium-minato-sepolia, sseed, swan, swell, tbn, tbn-sepolia, unichain, unichain-sepolia, worldchain, worldchain-sepolia, xterio-eth, zora, zora-sepolia, dev + + [default: optimism] + +Database: + --db.log-level + Database logging level. Levels higher than "notice" require a debug build + + Possible values: + - fatal: Enables logging for critical conditions, i.e. assertion failures + - error: Enables logging for error conditions + - warn: Enables logging for warning conditions + - notice: Enables logging for normal but significant condition + - verbose: Enables logging for verbose informational + - debug: Enables logging for debug-level messages + - trace: Enables logging for trace debug-level messages + - extra: Enables logging for extra debug-level messages + + --db.exclusive + Open environment in exclusive/monopolistic mode. Makes it possible to open a database on an NFS volume + + [possible values: true, false] + + --db.max-size + Maximum database size (e.g., 4TB, 8TB). + + This sets the "map size" of the database. If the database grows beyond this limit, the node will stop with an "environment map size limit reached" error. + + The default value is 8TB. + + --db.page-size + Database page size (e.g., 4KB, 8KB, 16KB). + + Specifies the page size used by the MDBX database. + + The page size determines the maximum database size. MDBX supports up to 2^31 pages, so with the default 4KB page size, the maximum database size is 8TB. To allow larger databases, increase this value to 8KB or higher. + + WARNING: This setting is only configurable at database creation; changing it later requires re-syncing. + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + + --db.read-transaction-timeout + Read transaction timeout in seconds, 0 means no timeout + + --db.max-readers + Maximum number of readers allowed to access the database concurrently + + --db.sync-mode + Controls how aggressively the database synchronizes data to disk + +Static Files: + --static-files.blocks-per-file.headers + Number of blocks per file for the headers segment + + --static-files.blocks-per-file.transactions + Number of blocks per file for the transactions segment + + --static-files.blocks-per-file.receipts + Number of blocks per file for the receipts segment + + --static-files.blocks-per-file.transaction-senders + Number of blocks per file for the transaction senders segment + + --static-files.receipts + Store receipts in static files instead of the database. + + When enabled, receipts will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + --static-files.transaction-senders + Store transaction senders in static files instead of the database. + + When enabled, transaction senders will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + --without-evm + Specifies whether to initialize the state without relying on EVM historical data. + + When enabled, and before inserting the state, it creates a dummy chain up to the last EVM block specified. It then, appends the first block provided block. + + - **Note**: **Do not** import receipts and blocks beforehand, or this will fail or be ignored. + + --header + Header file containing the header in an RLP encoded format. + + --header-hash + Hash of the header. + + --without-ovm + Specifies whether to initialize the state without relying on OVM or EVM historical data. + + When enabled, and before inserting the state, it creates a dummy chain up to the last OVM block (#105235062) (14GB / 90 seconds). It then, appends the Bedrock block. This is hardcoded for OP mainnet, for other OP chains you will need to pass in a header. + + - **Note**: **Do not** import receipts and blocks beforehand, or this will fail or be ignored. + + + JSONL file with state dump. + + Must contain accounts in following format, additional account fields are ignored. Must + also contain { "root": \ } as first line. + { + "balance": "\", + "nonce": \, + "code": "\", + "storage": { + "\": "\", + .. + }, + "address": "\", + } + + Allows init at a non-genesis block. Caution! Blocks must be manually imported up until + and including the non-genesis block to init chain at. See 'import' command. + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.name + The prefix name of the log files + + [default: reth.log] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + Possible values: + - always: Colors on + - auto: Auto-detect + - never: Colors off + + [default: always] + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] +``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/op-reth/init.mdx b/docs/vocs/docs/pages/cli/op-reth/init.mdx new file mode 100644 index 0000000000..03384822a9 --- /dev/null +++ b/docs/vocs/docs/pages/cli/op-reth/init.mdx @@ -0,0 +1,242 @@ +# op-reth init + +Initialize the database from a genesis file + +```bash +$ op-reth init --help +``` +```txt +Usage: op-reth init [OPTIONS] + +Options: + -h, --help + Print help (see a summary with '-h') + +Datadir: + --datadir + The path to the data dir for all reth files and subdirectories. + + Defaults to the OS-specific data directory: + + - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` + - Windows: `{FOLDERID_RoamingAppData}/reth/` + - macOS: `$HOME/Library/Application Support/reth/` + + [default: default] + + --datadir.static-files + The absolute path to store static files in. + + --datadir.rocksdb + The absolute path to store `RocksDB` database in. + + --config + The path to the configuration file to use + + --chain + The chain this node is running. + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + optimism, optimism_sepolia, optimism-sepolia, base, base_sepolia, base-sepolia, arena-z, arena-z-sepolia, automata, base-devnet-0-sepolia-dev-0, bob, boba-sepolia, boba, camp-sepolia, celo, creator-chain-testnet-sepolia, cyber, cyber-sepolia, ethernity, ethernity-sepolia, fraxtal, funki, funki-sepolia, hashkeychain, ink, ink-sepolia, lisk, lisk-sepolia, lyra, metal, metal-sepolia, mint, mode, mode-sepolia, oplabs-devnet-0-sepolia-dev-0, orderly, ozean-sepolia, pivotal-sepolia, polynomial, race, race-sepolia, radius_testnet-sepolia, redstone, rehearsal-0-bn-0-rehearsal-0-bn, rehearsal-0-bn-1-rehearsal-0-bn, settlus-mainnet, settlus-sepolia-sepolia, shape, shape-sepolia, silent-data-mainnet, snax, soneium, soneium-minato-sepolia, sseed, swan, swell, tbn, tbn-sepolia, unichain, unichain-sepolia, worldchain, worldchain-sepolia, xterio-eth, zora, zora-sepolia, dev + + [default: optimism] + +Database: + --db.log-level + Database logging level. Levels higher than "notice" require a debug build + + Possible values: + - fatal: Enables logging for critical conditions, i.e. assertion failures + - error: Enables logging for error conditions + - warn: Enables logging for warning conditions + - notice: Enables logging for normal but significant condition + - verbose: Enables logging for verbose informational + - debug: Enables logging for debug-level messages + - trace: Enables logging for trace debug-level messages + - extra: Enables logging for extra debug-level messages + + --db.exclusive + Open environment in exclusive/monopolistic mode. Makes it possible to open a database on an NFS volume + + [possible values: true, false] + + --db.max-size + Maximum database size (e.g., 4TB, 8TB). + + This sets the "map size" of the database. If the database grows beyond this limit, the node will stop with an "environment map size limit reached" error. + + The default value is 8TB. + + --db.page-size + Database page size (e.g., 4KB, 8KB, 16KB). + + Specifies the page size used by the MDBX database. + + The page size determines the maximum database size. MDBX supports up to 2^31 pages, so with the default 4KB page size, the maximum database size is 8TB. To allow larger databases, increase this value to 8KB or higher. + + WARNING: This setting is only configurable at database creation; changing it later requires re-syncing. + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + + --db.read-transaction-timeout + Read transaction timeout in seconds, 0 means no timeout + + --db.max-readers + Maximum number of readers allowed to access the database concurrently + + --db.sync-mode + Controls how aggressively the database synchronizes data to disk + +Static Files: + --static-files.blocks-per-file.headers + Number of blocks per file for the headers segment + + --static-files.blocks-per-file.transactions + Number of blocks per file for the transactions segment + + --static-files.blocks-per-file.receipts + Number of blocks per file for the receipts segment + + --static-files.blocks-per-file.transaction-senders + Number of blocks per file for the transaction senders segment + + --static-files.receipts + Store receipts in static files instead of the database. + + When enabled, receipts will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + --static-files.transaction-senders + Store transaction senders in static files instead of the database. + + When enabled, transaction senders will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.name + The prefix name of the log files + + [default: reth.log] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + Possible values: + - always: Colors on + - auto: Auto-detect + - never: Colors off + + [default: always] + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] +``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/op-reth/node.mdx b/docs/vocs/docs/pages/cli/op-reth/node.mdx new file mode 100644 index 0000000000..ee1adfee78 --- /dev/null +++ b/docs/vocs/docs/pages/cli/op-reth/node.mdx @@ -0,0 +1,1175 @@ +# op-reth node + +Start the node + +```bash +$ op-reth node --help +``` +```txt +Usage: op-reth node [OPTIONS] + +Options: + --config + The path to the configuration file to use. + + --chain + The chain this node is running. + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + optimism, optimism_sepolia, optimism-sepolia, base, base_sepolia, base-sepolia, arena-z, arena-z-sepolia, automata, base-devnet-0-sepolia-dev-0, bob, boba-sepolia, boba, camp-sepolia, celo, creator-chain-testnet-sepolia, cyber, cyber-sepolia, ethernity, ethernity-sepolia, fraxtal, funki, funki-sepolia, hashkeychain, ink, ink-sepolia, lisk, lisk-sepolia, lyra, metal, metal-sepolia, mint, mode, mode-sepolia, oplabs-devnet-0-sepolia-dev-0, orderly, ozean-sepolia, pivotal-sepolia, polynomial, race, race-sepolia, radius_testnet-sepolia, redstone, rehearsal-0-bn-0-rehearsal-0-bn, rehearsal-0-bn-1-rehearsal-0-bn, settlus-mainnet, settlus-sepolia-sepolia, shape, shape-sepolia, silent-data-mainnet, snax, soneium, soneium-minato-sepolia, sseed, swan, swell, tbn, tbn-sepolia, unichain, unichain-sepolia, worldchain, worldchain-sepolia, xterio-eth, zora, zora-sepolia, dev + + [default: optimism] + + --instance + Add a new instance of a node. + + Configures the ports of the node to avoid conflicts with the defaults. This is useful for running multiple nodes on the same machine. + + Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. + + Changes to the following port numbers: - `DISCOVERY_PORT`: default + `instance` - 1 - `AUTH_PORT`: default + `instance` * 100 - 100 - `HTTP_RPC_PORT`: default - `instance` + 1 - `WS_RPC_PORT`: default + `instance` * 2 - 2 - `IPC_PATH`: default + `-instance` + + --with-unused-ports + Sets all ports to unused, allowing the OS to choose random unused ports when sockets are bound. + + Mutually exclusive with `--instance`. + + -h, --help + Print help (see a summary with '-h') + +Metrics: + --metrics + Enable Prometheus metrics. + + The metrics will be served at the given interface and port. + + --metrics.prometheus.push.url + URL for pushing Prometheus metrics to a push gateway. + + If set, the node will periodically push metrics to the specified push gateway URL. + + --metrics.prometheus.push.interval + Interval in seconds for pushing metrics to push gateway. + + Default: 5 seconds + + [default: 5] + +Datadir: + --datadir + The path to the data dir for all reth files and subdirectories. + + Defaults to the OS-specific data directory: + + - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` + - Windows: `{FOLDERID_RoamingAppData}/reth/` + - macOS: `$HOME/Library/Application Support/reth/` + + [default: default] + + --datadir.static-files + The absolute path to store static files in. + + --datadir.rocksdb + The absolute path to store `RocksDB` database in. + +Networking: + -d, --disable-discovery + Disable the discovery service + + --disable-dns-discovery + Disable the DNS discovery + + --disable-discv4-discovery + Disable Discv4 discovery + + --enable-discv5-discovery + Enable Discv5 discovery + + --disable-nat + Disable Nat discovery + + --discovery.addr + The UDP address to use for devp2p peer discovery version 4 + + [default: 0.0.0.0] + + --discovery.port + The UDP port to use for devp2p peer discovery version 4 + + [default: 30303] + + --discovery.v5.addr + The UDP IPv4 address to use for devp2p peer discovery version 5. Overwritten by `RLPx` address, if it's also IPv4 + + --discovery.v5.addr.ipv6 + The UDP IPv6 address to use for devp2p peer discovery version 5. Overwritten by `RLPx` address, if it's also IPv6 + + --discovery.v5.port + The UDP IPv4 port to use for devp2p peer discovery version 5. Not used unless `--addr` is IPv4, or `--discovery.v5.addr` is set + + [default: 9200] + + --discovery.v5.port.ipv6 + The UDP IPv6 port to use for devp2p peer discovery version 5. Not used unless `--addr` is IPv6, or `--discovery.addr.ipv6` is set + + [default: 9200] + + --discovery.v5.lookup-interval + The interval in seconds at which to carry out periodic lookup queries, for the whole run of the program + + [default: 20] + + --discovery.v5.bootstrap.lookup-interval + The interval in seconds at which to carry out boost lookup queries, for a fixed number of times, at bootstrap + + [default: 5] + + --discovery.v5.bootstrap.lookup-countdown + The number of times to carry out boost lookup queries at bootstrap + + [default: 200] + + --trusted-peers + Comma separated enode URLs of trusted peers for P2P connections. + + --trusted-peers enode://abcd@192.168.0.1:30303 + + --trusted-only + Connect to or accept from trusted peers only + + --bootnodes + Comma separated enode URLs for P2P discovery bootstrap. + + Will fall back to a network-specific default if not specified. + + --dns-retries + Amount of DNS resolution requests retries to perform when peering + + [default: 0] + + --peers-file + The path to the known peers file. Connected peers are dumped to this file on nodes + shutdown, and read on startup. Cannot be used with `--no-persist-peers`. + + --identity + Custom node identity + + [default: reth/-/] + + --p2p-secret-key + Secret key to use for this node. + + This will also deterministically set the peer ID. If not specified, it will be set in the data dir for the chain being used. + + --p2p-secret-key-hex + Hex encoded secret key to use for this node. + + This will also deterministically set the peer ID. Cannot be used together with `--p2p-secret-key`. + + --no-persist-peers + Do not persist peers. + + --nat + NAT resolution method (any|none|upnp|publicip|extip:\) + + [default: any] + + --addr + Network listening address + + [default: 0.0.0.0] + + --port + Network listening port + + [default: 30303] + + --max-outbound-peers + Maximum number of outbound peers. default: 100 + + --max-inbound-peers + Maximum number of inbound peers. default: 30 + + --max-peers + Maximum number of total peers (inbound + outbound). + + Splits peers using approximately 2:1 inbound:outbound ratio. Cannot be used together with `--max-outbound-peers` or `--max-inbound-peers`. + + --max-tx-reqs + Max concurrent `GetPooledTransactions` requests. + + [default: 130] + + --max-tx-reqs-peer + Max concurrent `GetPooledTransactions` requests per peer. + + [default: 1] + + --max-seen-tx-history + Max number of seen transactions to remember per peer. + + Default is 320 transaction hashes. + + [default: 320] + + --max-pending-imports + Max number of transactions to import concurrently. + + [default: 4096] + + --pooled-tx-response-soft-limit + Experimental, for usage in research. Sets the max accumulated byte size of transactions + to pack in one response. + Spec'd at 2MiB. + + [default: 2097152] + + --pooled-tx-pack-soft-limit + Experimental, for usage in research. Sets the max accumulated byte size of transactions to + request in one request. + + Since `RLPx` protocol version 68, the byte size of a transaction is shared as metadata in a + transaction announcement (see `RLPx` specs). This allows a node to request a specific size + response. + + By default, nodes request only 128 KiB worth of transactions, but should a peer request + more, up to 2 MiB, a node will answer with more than 128 KiB. + + Default is 128 KiB. + + [default: 131072] + + --max-tx-pending-fetch + Max capacity of cache of hashes for transactions pending fetch. + + [default: 25600] + + --net-if.experimental + Name of network interface used to communicate with peers. + + If flag is set, but no value is passed, the default interface for docker `eth0` is tried. + + --tx-propagation-policy + Transaction Propagation Policy + + The policy determines which peers transactions are gossiped to. + + [default: All] + + --tx-ingress-policy + Transaction ingress policy + + Determines which peers' transactions are accepted over P2P. + + [default: All] + + --disable-tx-gossip + Disable transaction pool gossip + + Disables gossiping of transactions in the mempool to peers. This can be omitted for personal nodes, though providers should always opt to enable this flag. + + --tx-propagation-mode + Sets the transaction propagation mode by determining how new pending transactions are propagated to other peers in full. + + Examples: sqrt, all, max:10 + + [default: sqrt] + + --required-block-hashes + Comma separated list of required block hashes or block number=hash pairs. Peers that don't have these blocks will be filtered out. Format: hash or `block_number=hash` (e.g., 23115201=0x1234...) + + --network-id + Optional network ID to override the chain specification's network ID for P2P connections + + --netrestrict + Restrict network communication to the given IP networks (CIDR masks). + + Comma separated list of CIDR network specifications. Only peers with IP addresses within these ranges will be allowed to connect. + + Example: --netrestrict "192.168.0.0/16,10.0.0.0/8" + +RPC: + --http + Enable the HTTP-RPC server + + --http.addr + Http server address to listen on + + [default: 127.0.0.1] + + --http.port + Http server port to listen on + + [default: 8545] + + --http.disable-compression + Disable compression for HTTP responses + + --http.api + Rpc Modules to be configured for the HTTP server + + [possible values: admin, debug, eth, net, trace, txpool, web3, rpc, reth, ots, flashbots, miner, mev, testing] + + --http.corsdomain + Http Corsdomain to allow request from + + --ws + Enable the WS-RPC server + + --ws.addr + Ws server address to listen on + + [default: 127.0.0.1] + + --ws.port + Ws server port to listen on + + [default: 8546] + + --ws.origins + Origins from which to accept `WebSocket` requests + + --ws.api + Rpc Modules to be configured for the WS server + + [possible values: admin, debug, eth, net, trace, txpool, web3, rpc, reth, ots, flashbots, miner, mev, testing] + + --ipcdisable + Disable the IPC-RPC server + + --ipcpath + Filename for IPC socket/pipe within the datadir + + [default: .ipc] + + --ipc.permissions + Set the permissions for the IPC socket file, in octal format. + + If not specified, the permissions will be set by the system's umask. + + --authrpc.addr + Auth server address to listen on + + [default: 127.0.0.1] + + --authrpc.port + Auth server port to listen on + + [default: 8551] + + --authrpc.jwtsecret + Path to a JWT secret to use for the authenticated engine-API RPC server. + + This will enforce JWT authentication for all requests coming from the consensus layer. + + If no path is provided, a secret will be generated and stored in the datadir under `//jwt.hex`. For mainnet this would be `~/.reth/mainnet/jwt.hex` by default. + + --auth-ipc + Enable auth engine API over IPC + + --auth-ipc.path + Filename for auth IPC socket/pipe within the datadir + + [default: _engine_api.ipc] + + --disable-auth-server + Disable the auth/engine API server. + + This will prevent the authenticated engine-API server from starting. Use this if you're running a node that doesn't need to serve engine API requests. + + --rpc.jwtsecret + Hex encoded JWT secret to authenticate the regular RPC server(s), see `--http.api` and `--ws.api`. + + This is __not__ used for the authenticated engine-API RPC server, see `--authrpc.jwtsecret`. + + --rpc.max-request-size + Set the maximum RPC request payload size for both HTTP and WS in megabytes + + [default: 15] + + --rpc.max-response-size + Set the maximum RPC response payload size for both HTTP and WS in megabytes + + [default: 160] + [aliases: --rpc.returndata.limit] + + --rpc.max-subscriptions-per-connection + Set the maximum concurrent subscriptions per connection + + [default: 1024] + + --rpc.max-connections + Maximum number of RPC server connections + + [default: 500] + + --rpc.max-tracing-requests + Maximum number of concurrent tracing requests. + + By default this chooses a sensible value based on the number of available cores. Tracing requests are generally CPU bound. Choosing a value that is higher than the available CPU cores can have a negative impact on the performance of the node and affect the node's ability to maintain sync. + + [default: ] + + --rpc.max-blocking-io-requests + Maximum number of concurrent blocking IO requests. + + Blocking IO requests include `eth_call`, `eth_estimateGas`, and similar methods that require EVM execution. These are spawned as blocking tasks to avoid blocking the async runtime. + + [default: 256] + + --rpc.max-trace-filter-blocks + Maximum number of blocks for `trace_filter` requests + + [default: 100] + + --rpc.max-blocks-per-filter + Maximum number of blocks that could be scanned per filter request. (0 = entire chain) + + [default: 100000] + + --rpc.max-logs-per-response + Maximum number of logs that can be returned in a single response. (0 = no limit) + + [default: 20000] + + --rpc.gascap + Maximum gas limit for `eth_call` and call tracing RPC methods + + [default: 50000000] + + --rpc.evm-memory-limit + Maximum memory the EVM can allocate per RPC request + + [default: 4294967295] + + --rpc.txfeecap + Maximum eth transaction fee (in ether) that can be sent via the RPC APIs (0 = no cap) + + [default: 1.0] + + --rpc.max-simulate-blocks + Maximum number of blocks for `eth_simulateV1` call + + [default: 256] + + --rpc.eth-proof-window + The maximum proof window for historical proof generation. This value allows for generating historical proofs up to configured number of blocks from current tip (up to `tip - window`) + + [default: 0] + + --rpc.proof-permits + Maximum number of concurrent getproof requests + + [default: 25] + + --rpc.pending-block + Configures the pending block behavior for RPC responses. + + Options: full (include all transactions), empty (header only), none (disable pending blocks). + + [default: full] + + --rpc.forwarder + Endpoint to forward transactions to + + --builder.disallow + Path to file containing disallowed addresses, json-encoded list of strings. Block validation API will reject blocks containing transactions from these addresses + +RPC State Cache: + --rpc-cache.max-blocks + Max number of blocks in cache + + [default: 5000] + + --rpc-cache.max-receipts + Max number receipts in cache + + [default: 2000] + + --rpc-cache.max-headers + Max number of headers in cache + + [default: 1000] + + --rpc-cache.max-concurrent-db-requests + Max number of concurrent database requests + + [default: 512] + +Gas Price Oracle: + --gpo.blocks + Number of recent blocks to check for gas price + + [default: 20] + + --gpo.ignoreprice + Gas Price below which gpo will ignore transactions + + [default: 2] + + --gpo.maxprice + Maximum transaction priority fee(or gasprice before London Fork) to be recommended by gpo + + [default: 500000000000] + + --gpo.percentile + The percentile of gas prices to use for the estimate + + [default: 60] + + --gpo.default-suggested-fee + The default gas price to use if there are no blocks to use + + --rpc.send-raw-transaction-sync-timeout + Timeout for `send_raw_transaction_sync` RPC method + + [default: 30s] + +TxPool: + --txpool.pending-max-count + Max number of transaction in the pending sub-pool + + [default: 10000] + + --txpool.pending-max-size + Max size of the pending sub-pool in megabytes + + [default: 20] + + --txpool.basefee-max-count + Max number of transaction in the basefee sub-pool + + [default: 10000] + + --txpool.basefee-max-size + Max size of the basefee sub-pool in megabytes + + [default: 20] + + --txpool.queued-max-count + Max number of transaction in the queued sub-pool + + [default: 10000] + + --txpool.queued-max-size + Max size of the queued sub-pool in megabytes + + [default: 20] + + --txpool.blobpool-max-count + Max number of transaction in the blobpool + + [default: 10000] + + --txpool.blobpool-max-size + Max size of the blobpool in megabytes + + [default: 20] + + --txpool.blob-cache-size + Max number of entries for the in memory cache of the blob store + + --txpool.disable-blobs-support + Disable EIP-4844 blob transaction support + + --txpool.max-account-slots + Max number of executable transaction slots guaranteed per account + + [default: 16] + + --txpool.pricebump + Price bump (in %) for the transaction pool underpriced check + + [default: 10] + + --txpool.minimal-protocol-fee + Minimum base fee required by the protocol + + [default: 7] + + --txpool.minimum-priority-fee + Minimum priority fee required for transaction acceptance into the pool. Transactions with priority fee below this value will be rejected + + --txpool.gas-limit + The default enforced gas limit for transactions entering the pool + + [default: 30000000] + + --txpool.max-tx-gas + Maximum gas limit for individual transactions. Transactions exceeding this limit will be rejected by the transaction pool + + --blobpool.pricebump + Price bump percentage to replace an already existing blob transaction + + [default: 100] + + --txpool.max-tx-input-bytes + Max size in bytes of a single transaction allowed to enter the pool + + [default: 131072] + + --txpool.max-cached-entries + The maximum number of blobs to keep in the in memory blob cache + + [default: 100] + + --txpool.nolocals + Flag to disable local transaction exemptions + + --txpool.locals + Flag to allow certain addresses as local + + --txpool.no-local-transactions-propagation + Flag to toggle local transaction propagation + + --txpool.additional-validation-tasks + Number of additional transaction validation tasks to spawn + + [default: 1] + + --txpool.max-pending-txns + Maximum number of pending transactions from the network to buffer + + [default: 2048] + + --txpool.max-new-txns + Maximum number of new transactions to buffer + + [default: 1024] + + --txpool.max-new-pending-txs-notifications + How many new pending transactions to buffer and send to in progress pending transaction iterators + + [default: 200] + + --txpool.lifetime + Maximum amount of time non-executable transaction are queued + + [default: 10800] + + --txpool.transactions-backup + Path to store the local transaction backup at, to survive node restarts + + --txpool.disable-transactions-backup + Disables transaction backup to disk on node shutdown + + --txpool.max-batch-size + Max batch size for transaction pool insertions + + [default: 1] + +Builder: + --builder.extradata + Block extra data set by the payload builder + + [default: reth//] + + --builder.gaslimit + Target gas limit for built blocks + + --builder.interval + The interval at which the job should build a new payload after the last. + + Interval is specified in seconds or in milliseconds if the value ends with `ms`: * `50ms` -> 50 milliseconds * `1` -> 1 second + + [default: 1] + + --builder.deadline + The deadline for when the payload builder job should resolve + + [default: 12] + + --builder.max-tasks + Maximum number of tasks to spawn for building a payload + + [default: 3] + + --builder.max-blobs + Maximum number of blobs to include per block + +Debug: + --debug.terminate + Flag indicating whether the node should be terminated after the pipeline sync + + --debug.tip + Set the chain tip manually for testing purposes. + + NOTE: This is a temporary flag + + --debug.max-block + Runs the sync only up to the specified block + + --debug.etherscan [] + Runs a fake consensus client that advances the chain using recent block hashes on Etherscan. If specified, requires an `ETHERSCAN_API_KEY` environment variable + + --debug.rpc-consensus-url + Runs a fake consensus client using blocks fetched from an RPC endpoint. Supports both HTTP and `WebSocket` endpoints - `WebSocket` endpoints will use subscriptions, while HTTP endpoints will poll for new blocks + + --debug.skip-fcu + If provided, the engine will skip `n` consecutive FCUs + + --debug.skip-new-payload + If provided, the engine will skip `n` consecutive new payloads + + --debug.reorg-frequency + If provided, the chain will be reorged at specified frequency + + --debug.reorg-depth + The reorg depth for chain reorgs + + --debug.engine-api-store + The path to store engine API messages at. If specified, all of the intercepted engine API messages will be written to specified location + + --debug.invalid-block-hook + Determines which type of invalid block hook to install + + Example: `witness,prestate` + + [default: witness] + [possible values: witness, pre-state, opcode] + + --debug.healthy-node-rpc-url + The RPC URL of a healthy node to use for comparing invalid block hook results against. + + Debug setting that enables execution witness comparison for troubleshooting bad blocks. + When enabled, the node will collect execution witnesses from the specified source and + compare them against local execution when a bad block is encountered, helping identify + discrepancies in state execution. + + --ethstats + The URL of the ethstats server to connect to. Example: `nodename:secret@host:port` + + --debug.startup-sync-state-idle + Set the node to idle state when the backfill is not running. + + This makes the `eth_syncing` RPC return "Idle" when the node has just started or finished the backfill, but did not yet receive any new blocks. + +Database: + --db.log-level + Database logging level. Levels higher than "notice" require a debug build + + Possible values: + - fatal: Enables logging for critical conditions, i.e. assertion failures + - error: Enables logging for error conditions + - warn: Enables logging for warning conditions + - notice: Enables logging for normal but significant condition + - verbose: Enables logging for verbose informational + - debug: Enables logging for debug-level messages + - trace: Enables logging for trace debug-level messages + - extra: Enables logging for extra debug-level messages + + --db.exclusive + Open environment in exclusive/monopolistic mode. Makes it possible to open a database on an NFS volume + + [possible values: true, false] + + --db.max-size + Maximum database size (e.g., 4TB, 8TB). + + This sets the "map size" of the database. If the database grows beyond this limit, the node will stop with an "environment map size limit reached" error. + + The default value is 8TB. + + --db.page-size + Database page size (e.g., 4KB, 8KB, 16KB). + + Specifies the page size used by the MDBX database. + + The page size determines the maximum database size. MDBX supports up to 2^31 pages, so with the default 4KB page size, the maximum database size is 8TB. To allow larger databases, increase this value to 8KB or higher. + + WARNING: This setting is only configurable at database creation; changing it later requires re-syncing. + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + + --db.read-transaction-timeout + Read transaction timeout in seconds, 0 means no timeout + + --db.max-readers + Maximum number of readers allowed to access the database concurrently + + --db.sync-mode + Controls how aggressively the database synchronizes data to disk + +Dev testnet: + --dev + Start the node in dev mode + + This mode uses a local proof-of-authority consensus engine with either fixed block times + or automatically mined blocks. + Disables network discovery and enables local http server. + Prefunds 20 accounts derived by mnemonic "test test test test test test test test test test + test junk" with 10 000 ETH each. + + --dev.block-max-transactions + How many transactions to mine per block + + --dev.block-time + Interval between blocks. + + Parses strings using [`humantime::parse_duration`] + --dev.block-time 12s + + --dev.mnemonic + Derive dev accounts from a fixed mnemonic instead of random ones. + + [default: "test test test test test test test test test test test junk"] + +Pruning: + --full + Run full node. Only the most recent [`MINIMUM_PRUNING_DISTANCE`] block states are stored + + --prune.block-interval + Minimum pruning interval measured in blocks + + --prune.sender-recovery.full + Prunes all sender recovery data + + --prune.sender-recovery.distance + Prune sender recovery data before the `head-N` block number. In other words, keep last N + 1 blocks + + --prune.sender-recovery.before + Prune sender recovery data before the specified block number. The specified block number is not pruned + + --prune.transaction-lookup.full + Prunes all transaction lookup data + + --prune.transaction-lookup.distance + Prune transaction lookup data before the `head-N` block number. In other words, keep last N + 1 blocks + + --prune.transaction-lookup.before + Prune transaction lookup data before the specified block number. The specified block number is not pruned + + --prune.receipts.full + Prunes all receipt data + + --prune.receipts.pre-merge + Prune receipts before the merge block + + --prune.receipts.distance + Prune receipts before the `head-N` block number. In other words, keep last N + 1 blocks + + --prune.receipts.before + Prune receipts before the specified block number. The specified block number is not pruned + + --prune.receiptslogfilter + Configure receipts log filter. Format: <`address`>:<`prune_mode`>... where <`prune_mode`> can be 'full', 'distance:<`blocks`>', or 'before:<`block_number`>' + + --prune.account-history.full + Prunes all account history + + --prune.account-history.distance + Prune account before the `head-N` block number. In other words, keep last N + 1 blocks + + --prune.account-history.before + Prune account history before the specified block number. The specified block number is not pruned + + --prune.storage-history.full + Prunes all storage history data + + --prune.storage-history.distance + Prune storage history before the `head-N` block number. In other words, keep last N + 1 blocks + + --prune.storage-history.before + Prune storage history before the specified block number. The specified block number is not pruned + + --prune.bodies.pre-merge + Prune bodies before the merge block + + --prune.bodies.distance + Prune bodies before the `head-N` block number. In other words, keep last N + 1 blocks + + --prune.bodies.before + Prune storage history before the specified block number. The specified block number is not pruned + +Engine: + --engine.persistence-threshold + Configure persistence threshold for the engine. This determines how many canonical blocks must be in-memory, ahead of the last persisted block, before flushing canonical blocks to disk again. + + To persist blocks as fast as the node receives them, set this value to zero. This will cause more frequent DB writes. + + [default: 2] + + --engine.memory-block-buffer-target + Configure the target number of blocks to keep in memory + + [default: 0] + + --engine.legacy-state-root + Enable legacy state root + + --engine.disable-state-cache + Disable state cache + + --engine.disable-prewarming + Disable parallel prewarming + + --engine.disable-parallel-sparse-trie + Disable the parallel sparse trie in the engine + + --engine.state-provider-metrics + Enable state provider latency metrics. This allows the engine to collect and report stats about how long state provider calls took during execution, but this does introduce slight overhead to state provider calls + + --engine.cross-block-cache-size + Configure the size of cross-block cache in megabytes + + [default: 4096] + + --engine.state-root-task-compare-updates + Enable comparing trie updates from the state root task to the trie updates from the regular state root calculation + + --engine.accept-execution-requests-hash + Enables accepting requests hash instead of an array of requests in `engine_newPayloadV4` + + --engine.multiproof-chunking + Whether multiproof task should chunk proof targets + + --engine.multiproof-chunk-size + Multiproof task chunk size for proof targets + + [default: 60] + + --engine.reserved-cpu-cores + Configure the number of reserved CPU cores for non-reth processes + + [default: 1] + + --engine.disable-precompile-cache + Disable precompile cache + + --engine.state-root-fallback + Enable state root fallback, useful for testing + + --engine.always-process-payload-attributes-on-canonical-head + Always process payload attributes and begin a payload build process even if `forkchoiceState.headBlockHash` is already the canonical head or an ancestor. See `TreeConfig::always_process_payload_attributes_on_canonical_head` for more details. + + Note: This is a no-op on OP Stack. + + --engine.allow-unwind-canonical-header + Allow unwinding canonical header to ancestor during forkchoice updates. See `TreeConfig::unwind_canonical_header` for more details + + --engine.storage-worker-count + Configure the number of storage proof workers in the Tokio blocking pool. If not specified, defaults to 2x available parallelism, clamped between 2 and 64 + + --engine.account-worker-count + Configure the number of account proof workers in the Tokio blocking pool. If not specified, defaults to the same count as storage workers + +ERA: + --era.enable + Enable import from ERA1 files + + --era.path + The path to a directory for import. + + The ERA1 files are read from the local directory parsing headers and bodies. + + --era.url + The URL to a remote host where the ERA1 files are hosted. + + The ERA1 files are read from the remote host using HTTP GET requests parsing headers + and bodies. + +Static Files: + --static-files.blocks-per-file.headers + Number of blocks per file for the headers segment + + --static-files.blocks-per-file.transactions + Number of blocks per file for the transactions segment + + --static-files.blocks-per-file.receipts + Number of blocks per file for the receipts segment + + --static-files.blocks-per-file.transaction-senders + Number of blocks per file for the transaction senders segment + + --static-files.receipts + Store receipts in static files instead of the database. + + When enabled, receipts will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + --static-files.transaction-senders + Store transaction senders in static files instead of the database. + + When enabled, transaction senders will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + +Rollup: + --rollup.sequencer + Endpoint for the sequencer mempool (can be both HTTP and WS) + + [aliases: --rollup.sequencer-http, --rollup.sequencer-ws] + + --rollup.disable-tx-pool-gossip + Disable transaction pool gossip + + --rollup.compute-pending-block + By default the pending block equals the latest block to save resources and not leak txs from the tx-pool, this flag enables computing of the pending block from the tx-pool instead. + + If `compute_pending_block` is not enabled, the payload builder will use the payload attributes from the latest block. Note that this flag is not yet functional. + + --rollup.discovery.v4 + enables discovery v4 if provided + + --rollup.enable-tx-conditional + Enable transaction conditional support on sequencer + + --rollup.supervisor-http + HTTP endpoint for the supervisor + + [default: http://localhost:1337/] + + --rollup.supervisor-safety-level + Safety level for the supervisor + + [default: CrossUnsafe] + + --rollup.sequencer-headers + Optional headers to use when connecting to the sequencer + + --rollup.historicalrpc + RPC endpoint for historical data + + --min-suggested-priority-fee + Minimum suggested priority fee (tip) in wei, default `1_000_000` + + [default: 1000000] + + --flashblocks-url + A URL pointing to a secure websocket subscription that streams out flashblocks. + + If given, the flashblocks are received to build pending block. All request with "pending" block tag will use the pending state based on flashblocks. + + --flashblock-consensus + Enable flashblock consensus client to drive the chain forward + + When enabled, the flashblock consensus client will process flashblock sequences and submit them to the engine API to advance the chain. Requires `flashblocks_url` to be set. + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.name + The prefix name of the log files + + [default: reth.log] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + Possible values: + - always: Colors on + - auto: Auto-detect + - never: Colors off + + [default: always] + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] +``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/op-reth/p2p.mdx b/docs/vocs/docs/pages/cli/op-reth/p2p.mdx new file mode 100644 index 0000000000..07246ad198 --- /dev/null +++ b/docs/vocs/docs/pages/cli/op-reth/p2p.mdx @@ -0,0 +1,145 @@ +# op-reth p2p + +P2P Debugging utilities + +```bash +$ op-reth p2p --help +``` +```txt +Usage: op-reth p2p [OPTIONS] + +Commands: + header Download block header + body Download block body + rlpx RLPx commands + bootnode Bootnode command + help Print this message or the help of the given subcommand(s) + +Options: + -h, --help + Print help (see a summary with '-h') + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.name + The prefix name of the log files + + [default: reth.log] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + Possible values: + - always: Colors on + - auto: Auto-detect + - never: Colors off + + [default: always] + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] +``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/op-reth/p2p/body.mdx b/docs/vocs/docs/pages/cli/op-reth/p2p/body.mdx new file mode 100644 index 0000000000..de1b963862 --- /dev/null +++ b/docs/vocs/docs/pages/cli/op-reth/p2p/body.mdx @@ -0,0 +1,392 @@ +# op-reth p2p body + +Download block body + +```bash +$ op-reth p2p body --help +``` +```txt +Usage: op-reth p2p body [OPTIONS] + +Options: + --retries + The number of retries per request + + [default: 5] + + -h, --help + Print help (see a summary with '-h') + +Networking: + -d, --disable-discovery + Disable the discovery service + + --disable-dns-discovery + Disable the DNS discovery + + --disable-discv4-discovery + Disable Discv4 discovery + + --enable-discv5-discovery + Enable Discv5 discovery + + --disable-nat + Disable Nat discovery + + --discovery.addr + The UDP address to use for devp2p peer discovery version 4 + + [default: 0.0.0.0] + + --discovery.port + The UDP port to use for devp2p peer discovery version 4 + + [default: 30303] + + --discovery.v5.addr + The UDP IPv4 address to use for devp2p peer discovery version 5. Overwritten by `RLPx` address, if it's also IPv4 + + --discovery.v5.addr.ipv6 + The UDP IPv6 address to use for devp2p peer discovery version 5. Overwritten by `RLPx` address, if it's also IPv6 + + --discovery.v5.port + The UDP IPv4 port to use for devp2p peer discovery version 5. Not used unless `--addr` is IPv4, or `--discovery.v5.addr` is set + + [default: 9200] + + --discovery.v5.port.ipv6 + The UDP IPv6 port to use for devp2p peer discovery version 5. Not used unless `--addr` is IPv6, or `--discovery.addr.ipv6` is set + + [default: 9200] + + --discovery.v5.lookup-interval + The interval in seconds at which to carry out periodic lookup queries, for the whole run of the program + + [default: 20] + + --discovery.v5.bootstrap.lookup-interval + The interval in seconds at which to carry out boost lookup queries, for a fixed number of times, at bootstrap + + [default: 5] + + --discovery.v5.bootstrap.lookup-countdown + The number of times to carry out boost lookup queries at bootstrap + + [default: 200] + + --trusted-peers + Comma separated enode URLs of trusted peers for P2P connections. + + --trusted-peers enode://abcd@192.168.0.1:30303 + + --trusted-only + Connect to or accept from trusted peers only + + --bootnodes + Comma separated enode URLs for P2P discovery bootstrap. + + Will fall back to a network-specific default if not specified. + + --dns-retries + Amount of DNS resolution requests retries to perform when peering + + [default: 0] + + --peers-file + The path to the known peers file. Connected peers are dumped to this file on nodes + shutdown, and read on startup. Cannot be used with `--no-persist-peers`. + + --identity + Custom node identity + + [default: reth/-/] + + --p2p-secret-key + Secret key to use for this node. + + This will also deterministically set the peer ID. If not specified, it will be set in the data dir for the chain being used. + + --p2p-secret-key-hex + Hex encoded secret key to use for this node. + + This will also deterministically set the peer ID. Cannot be used together with `--p2p-secret-key`. + + --no-persist-peers + Do not persist peers. + + --nat + NAT resolution method (any|none|upnp|publicip|extip:\) + + [default: any] + + --addr + Network listening address + + [default: 0.0.0.0] + + --port + Network listening port + + [default: 30303] + + --max-outbound-peers + Maximum number of outbound peers. default: 100 + + --max-inbound-peers + Maximum number of inbound peers. default: 30 + + --max-peers + Maximum number of total peers (inbound + outbound). + + Splits peers using approximately 2:1 inbound:outbound ratio. Cannot be used together with `--max-outbound-peers` or `--max-inbound-peers`. + + --max-tx-reqs + Max concurrent `GetPooledTransactions` requests. + + [default: 130] + + --max-tx-reqs-peer + Max concurrent `GetPooledTransactions` requests per peer. + + [default: 1] + + --max-seen-tx-history + Max number of seen transactions to remember per peer. + + Default is 320 transaction hashes. + + [default: 320] + + --max-pending-imports + Max number of transactions to import concurrently. + + [default: 4096] + + --pooled-tx-response-soft-limit + Experimental, for usage in research. Sets the max accumulated byte size of transactions + to pack in one response. + Spec'd at 2MiB. + + [default: 2097152] + + --pooled-tx-pack-soft-limit + Experimental, for usage in research. Sets the max accumulated byte size of transactions to + request in one request. + + Since `RLPx` protocol version 68, the byte size of a transaction is shared as metadata in a + transaction announcement (see `RLPx` specs). This allows a node to request a specific size + response. + + By default, nodes request only 128 KiB worth of transactions, but should a peer request + more, up to 2 MiB, a node will answer with more than 128 KiB. + + Default is 128 KiB. + + [default: 131072] + + --max-tx-pending-fetch + Max capacity of cache of hashes for transactions pending fetch. + + [default: 25600] + + --net-if.experimental + Name of network interface used to communicate with peers. + + If flag is set, but no value is passed, the default interface for docker `eth0` is tried. + + --tx-propagation-policy + Transaction Propagation Policy + + The policy determines which peers transactions are gossiped to. + + [default: All] + + --tx-ingress-policy + Transaction ingress policy + + Determines which peers' transactions are accepted over P2P. + + [default: All] + + --disable-tx-gossip + Disable transaction pool gossip + + Disables gossiping of transactions in the mempool to peers. This can be omitted for personal nodes, though providers should always opt to enable this flag. + + --tx-propagation-mode + Sets the transaction propagation mode by determining how new pending transactions are propagated to other peers in full. + + Examples: sqrt, all, max:10 + + [default: sqrt] + + --required-block-hashes + Comma separated list of required block hashes or block number=hash pairs. Peers that don't have these blocks will be filtered out. Format: hash or `block_number=hash` (e.g., 23115201=0x1234...) + + --network-id + Optional network ID to override the chain specification's network ID for P2P connections + + --netrestrict + Restrict network communication to the given IP networks (CIDR masks). + + Comma separated list of CIDR network specifications. Only peers with IP addresses within these ranges will be allowed to connect. + + Example: --netrestrict "192.168.0.0/16,10.0.0.0/8" + +Datadir: + --datadir + The path to the data dir for all reth files and subdirectories. + + Defaults to the OS-specific data directory: + + - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` + - Windows: `{FOLDERID_RoamingAppData}/reth/` + - macOS: `$HOME/Library/Application Support/reth/` + + [default: default] + + --datadir.static-files + The absolute path to store static files in. + + --datadir.rocksdb + The absolute path to store `RocksDB` database in. + + --config + The path to the configuration file to use. + + --chain + The chain this node is running. + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + optimism, optimism_sepolia, optimism-sepolia, base, base_sepolia, base-sepolia, arena-z, arena-z-sepolia, automata, base-devnet-0-sepolia-dev-0, bob, boba-sepolia, boba, camp-sepolia, celo, creator-chain-testnet-sepolia, cyber, cyber-sepolia, ethernity, ethernity-sepolia, fraxtal, funki, funki-sepolia, hashkeychain, ink, ink-sepolia, lisk, lisk-sepolia, lyra, metal, metal-sepolia, mint, mode, mode-sepolia, oplabs-devnet-0-sepolia-dev-0, orderly, ozean-sepolia, pivotal-sepolia, polynomial, race, race-sepolia, radius_testnet-sepolia, redstone, rehearsal-0-bn-0-rehearsal-0-bn, rehearsal-0-bn-1-rehearsal-0-bn, settlus-mainnet, settlus-sepolia-sepolia, shape, shape-sepolia, silent-data-mainnet, snax, soneium, soneium-minato-sepolia, sseed, swan, swell, tbn, tbn-sepolia, unichain, unichain-sepolia, worldchain, worldchain-sepolia, xterio-eth, zora, zora-sepolia, dev + + [default: optimism] + + + The block number or hash + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.name + The prefix name of the log files + + [default: reth.log] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + Possible values: + - always: Colors on + - auto: Auto-detect + - never: Colors off + + [default: always] + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] +``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/op-reth/p2p/bootnode.mdx b/docs/vocs/docs/pages/cli/op-reth/p2p/bootnode.mdx new file mode 100644 index 0000000000..a36d568ab8 --- /dev/null +++ b/docs/vocs/docs/pages/cli/op-reth/p2p/bootnode.mdx @@ -0,0 +1,156 @@ +# op-reth p2p bootnode + +Bootnode command + +```bash +$ op-reth p2p bootnode --help +``` +```txt +Usage: op-reth p2p bootnode [OPTIONS] + +Options: + --addr + Listen address for the bootnode (default: "0.0.0.0:30301") + + [default: 0.0.0.0:30301] + + --p2p-secret-key + Secret key to use for the bootnode. + + This will also deterministically set the peer ID. If a path is provided but no key exists at that path, a new random secret will be generated and stored there. If no path is specified, a new ephemeral random secret will be used. + + --nat + NAT resolution method (any|none|upnp|publicip|extip:\) + + [default: any] + + --v5 + Run a v5 topic discovery bootnode + + -h, --help + Print help (see a summary with '-h') + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.name + The prefix name of the log files + + [default: reth.log] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + Possible values: + - always: Colors on + - auto: Auto-detect + - never: Colors off + + [default: always] + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] +``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/op-reth/p2p/header.mdx b/docs/vocs/docs/pages/cli/op-reth/p2p/header.mdx new file mode 100644 index 0000000000..d176568dd5 --- /dev/null +++ b/docs/vocs/docs/pages/cli/op-reth/p2p/header.mdx @@ -0,0 +1,392 @@ +# op-reth p2p header + +Download block header + +```bash +$ op-reth p2p header --help +``` +```txt +Usage: op-reth p2p header [OPTIONS] + +Options: + --retries + The number of retries per request + + [default: 5] + + -h, --help + Print help (see a summary with '-h') + +Networking: + -d, --disable-discovery + Disable the discovery service + + --disable-dns-discovery + Disable the DNS discovery + + --disable-discv4-discovery + Disable Discv4 discovery + + --enable-discv5-discovery + Enable Discv5 discovery + + --disable-nat + Disable Nat discovery + + --discovery.addr + The UDP address to use for devp2p peer discovery version 4 + + [default: 0.0.0.0] + + --discovery.port + The UDP port to use for devp2p peer discovery version 4 + + [default: 30303] + + --discovery.v5.addr + The UDP IPv4 address to use for devp2p peer discovery version 5. Overwritten by `RLPx` address, if it's also IPv4 + + --discovery.v5.addr.ipv6 + The UDP IPv6 address to use for devp2p peer discovery version 5. Overwritten by `RLPx` address, if it's also IPv6 + + --discovery.v5.port + The UDP IPv4 port to use for devp2p peer discovery version 5. Not used unless `--addr` is IPv4, or `--discovery.v5.addr` is set + + [default: 9200] + + --discovery.v5.port.ipv6 + The UDP IPv6 port to use for devp2p peer discovery version 5. Not used unless `--addr` is IPv6, or `--discovery.addr.ipv6` is set + + [default: 9200] + + --discovery.v5.lookup-interval + The interval in seconds at which to carry out periodic lookup queries, for the whole run of the program + + [default: 20] + + --discovery.v5.bootstrap.lookup-interval + The interval in seconds at which to carry out boost lookup queries, for a fixed number of times, at bootstrap + + [default: 5] + + --discovery.v5.bootstrap.lookup-countdown + The number of times to carry out boost lookup queries at bootstrap + + [default: 200] + + --trusted-peers + Comma separated enode URLs of trusted peers for P2P connections. + + --trusted-peers enode://abcd@192.168.0.1:30303 + + --trusted-only + Connect to or accept from trusted peers only + + --bootnodes + Comma separated enode URLs for P2P discovery bootstrap. + + Will fall back to a network-specific default if not specified. + + --dns-retries + Amount of DNS resolution requests retries to perform when peering + + [default: 0] + + --peers-file + The path to the known peers file. Connected peers are dumped to this file on nodes + shutdown, and read on startup. Cannot be used with `--no-persist-peers`. + + --identity + Custom node identity + + [default: reth/-/] + + --p2p-secret-key + Secret key to use for this node. + + This will also deterministically set the peer ID. If not specified, it will be set in the data dir for the chain being used. + + --p2p-secret-key-hex + Hex encoded secret key to use for this node. + + This will also deterministically set the peer ID. Cannot be used together with `--p2p-secret-key`. + + --no-persist-peers + Do not persist peers. + + --nat + NAT resolution method (any|none|upnp|publicip|extip:\) + + [default: any] + + --addr + Network listening address + + [default: 0.0.0.0] + + --port + Network listening port + + [default: 30303] + + --max-outbound-peers + Maximum number of outbound peers. default: 100 + + --max-inbound-peers + Maximum number of inbound peers. default: 30 + + --max-peers + Maximum number of total peers (inbound + outbound). + + Splits peers using approximately 2:1 inbound:outbound ratio. Cannot be used together with `--max-outbound-peers` or `--max-inbound-peers`. + + --max-tx-reqs + Max concurrent `GetPooledTransactions` requests. + + [default: 130] + + --max-tx-reqs-peer + Max concurrent `GetPooledTransactions` requests per peer. + + [default: 1] + + --max-seen-tx-history + Max number of seen transactions to remember per peer. + + Default is 320 transaction hashes. + + [default: 320] + + --max-pending-imports + Max number of transactions to import concurrently. + + [default: 4096] + + --pooled-tx-response-soft-limit + Experimental, for usage in research. Sets the max accumulated byte size of transactions + to pack in one response. + Spec'd at 2MiB. + + [default: 2097152] + + --pooled-tx-pack-soft-limit + Experimental, for usage in research. Sets the max accumulated byte size of transactions to + request in one request. + + Since `RLPx` protocol version 68, the byte size of a transaction is shared as metadata in a + transaction announcement (see `RLPx` specs). This allows a node to request a specific size + response. + + By default, nodes request only 128 KiB worth of transactions, but should a peer request + more, up to 2 MiB, a node will answer with more than 128 KiB. + + Default is 128 KiB. + + [default: 131072] + + --max-tx-pending-fetch + Max capacity of cache of hashes for transactions pending fetch. + + [default: 25600] + + --net-if.experimental + Name of network interface used to communicate with peers. + + If flag is set, but no value is passed, the default interface for docker `eth0` is tried. + + --tx-propagation-policy + Transaction Propagation Policy + + The policy determines which peers transactions are gossiped to. + + [default: All] + + --tx-ingress-policy + Transaction ingress policy + + Determines which peers' transactions are accepted over P2P. + + [default: All] + + --disable-tx-gossip + Disable transaction pool gossip + + Disables gossiping of transactions in the mempool to peers. This can be omitted for personal nodes, though providers should always opt to enable this flag. + + --tx-propagation-mode + Sets the transaction propagation mode by determining how new pending transactions are propagated to other peers in full. + + Examples: sqrt, all, max:10 + + [default: sqrt] + + --required-block-hashes + Comma separated list of required block hashes or block number=hash pairs. Peers that don't have these blocks will be filtered out. Format: hash or `block_number=hash` (e.g., 23115201=0x1234...) + + --network-id + Optional network ID to override the chain specification's network ID for P2P connections + + --netrestrict + Restrict network communication to the given IP networks (CIDR masks). + + Comma separated list of CIDR network specifications. Only peers with IP addresses within these ranges will be allowed to connect. + + Example: --netrestrict "192.168.0.0/16,10.0.0.0/8" + +Datadir: + --datadir + The path to the data dir for all reth files and subdirectories. + + Defaults to the OS-specific data directory: + + - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` + - Windows: `{FOLDERID_RoamingAppData}/reth/` + - macOS: `$HOME/Library/Application Support/reth/` + + [default: default] + + --datadir.static-files + The absolute path to store static files in. + + --datadir.rocksdb + The absolute path to store `RocksDB` database in. + + --config + The path to the configuration file to use. + + --chain + The chain this node is running. + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + optimism, optimism_sepolia, optimism-sepolia, base, base_sepolia, base-sepolia, arena-z, arena-z-sepolia, automata, base-devnet-0-sepolia-dev-0, bob, boba-sepolia, boba, camp-sepolia, celo, creator-chain-testnet-sepolia, cyber, cyber-sepolia, ethernity, ethernity-sepolia, fraxtal, funki, funki-sepolia, hashkeychain, ink, ink-sepolia, lisk, lisk-sepolia, lyra, metal, metal-sepolia, mint, mode, mode-sepolia, oplabs-devnet-0-sepolia-dev-0, orderly, ozean-sepolia, pivotal-sepolia, polynomial, race, race-sepolia, radius_testnet-sepolia, redstone, rehearsal-0-bn-0-rehearsal-0-bn, rehearsal-0-bn-1-rehearsal-0-bn, settlus-mainnet, settlus-sepolia-sepolia, shape, shape-sepolia, silent-data-mainnet, snax, soneium, soneium-minato-sepolia, sseed, swan, swell, tbn, tbn-sepolia, unichain, unichain-sepolia, worldchain, worldchain-sepolia, xterio-eth, zora, zora-sepolia, dev + + [default: optimism] + + + The header number or hash + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.name + The prefix name of the log files + + [default: reth.log] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + Possible values: + - always: Colors on + - auto: Auto-detect + - never: Colors off + + [default: always] + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] +``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/debug.mdx b/docs/vocs/docs/pages/cli/op-reth/p2p/rlpx.mdx similarity index 57% rename from docs/vocs/docs/pages/cli/reth/debug.mdx rename to docs/vocs/docs/pages/cli/op-reth/p2p/rlpx.mdx index f56a60aa94..31aabc276b 100644 --- a/docs/vocs/docs/pages/cli/reth/debug.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/p2p/rlpx.mdx @@ -1,17 +1,16 @@ -# reth debug +# op-reth p2p rlpx -Various debug routines +RLPx commands ```bash -$ reth debug --help +$ op-reth p2p rlpx --help ``` ```txt -Usage: reth debug [OPTIONS] +Usage: op-reth p2p rlpx [OPTIONS] Commands: - merkle Debug the clean & incremental state root calculations - in-memory-merkle Debug in-memory state root calculation - help Print this message or the help of the given subcommand(s) + ping ping node + help Print this message or the help of the given subcommand(s) Options: -h, --help @@ -21,13 +20,13 @@ Logging: --log.stdout.format The format to use for logs written to stdout - [default: terminal] - Possible values: - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - terminal: Represents terminal-friendly formatting for logs + [default: terminal] + --log.stdout.filter The filter to use for logs written to stdout @@ -36,13 +35,13 @@ Logging: --log.file.format The format to use for logs written to the log file - [default: terminal] - Possible values: - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - terminal: Represents terminal-friendly formatting for logs + [default: terminal] + --log.file.filter The filter to use for logs written to the log file @@ -53,6 +52,11 @@ Logging: [default: /logs] + --log.file.name + The prefix name of the log files + + [default: reth.log] + --log.file.max-size The maximum size (in MB) of one log file @@ -74,13 +78,13 @@ Logging: --color Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - [default: always] - Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off + [default: always] + Display: -v, --verbosity... Set the minimum log level. @@ -93,4 +97,46 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/recover.mdx b/docs/vocs/docs/pages/cli/op-reth/p2p/rlpx/ping.mdx similarity index 59% rename from docs/vocs/docs/pages/cli/reth/recover.mdx rename to docs/vocs/docs/pages/cli/op-reth/p2p/rlpx/ping.mdx index 880b8482d0..4f03545a90 100644 --- a/docs/vocs/docs/pages/cli/reth/recover.mdx +++ b/docs/vocs/docs/pages/cli/op-reth/p2p/rlpx/ping.mdx @@ -1,16 +1,16 @@ -# reth recover +# op-reth p2p rlpx ping -Scripts for node recovery +ping node ```bash -$ reth recover --help +$ op-reth p2p rlpx ping --help ``` ```txt -Usage: reth recover [OPTIONS] +Usage: op-reth p2p rlpx ping [OPTIONS] -Commands: - storage-tries Recover the node by deleting dangling storage tries - help Print this message or the help of the given subcommand(s) +Arguments: + + The node to ping Options: -h, --help @@ -80,7 +80,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] @@ -97,4 +97,46 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/op-reth/prune.mdx b/docs/vocs/docs/pages/cli/op-reth/prune.mdx new file mode 100644 index 0000000000..ef0783da58 --- /dev/null +++ b/docs/vocs/docs/pages/cli/op-reth/prune.mdx @@ -0,0 +1,242 @@ +# op-reth prune + +Prune according to the configuration without any limits + +```bash +$ op-reth prune --help +``` +```txt +Usage: op-reth prune [OPTIONS] + +Options: + -h, --help + Print help (see a summary with '-h') + +Datadir: + --datadir + The path to the data dir for all reth files and subdirectories. + + Defaults to the OS-specific data directory: + + - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` + - Windows: `{FOLDERID_RoamingAppData}/reth/` + - macOS: `$HOME/Library/Application Support/reth/` + + [default: default] + + --datadir.static-files + The absolute path to store static files in. + + --datadir.rocksdb + The absolute path to store `RocksDB` database in. + + --config + The path to the configuration file to use + + --chain + The chain this node is running. + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + optimism, optimism_sepolia, optimism-sepolia, base, base_sepolia, base-sepolia, arena-z, arena-z-sepolia, automata, base-devnet-0-sepolia-dev-0, bob, boba-sepolia, boba, camp-sepolia, celo, creator-chain-testnet-sepolia, cyber, cyber-sepolia, ethernity, ethernity-sepolia, fraxtal, funki, funki-sepolia, hashkeychain, ink, ink-sepolia, lisk, lisk-sepolia, lyra, metal, metal-sepolia, mint, mode, mode-sepolia, oplabs-devnet-0-sepolia-dev-0, orderly, ozean-sepolia, pivotal-sepolia, polynomial, race, race-sepolia, radius_testnet-sepolia, redstone, rehearsal-0-bn-0-rehearsal-0-bn, rehearsal-0-bn-1-rehearsal-0-bn, settlus-mainnet, settlus-sepolia-sepolia, shape, shape-sepolia, silent-data-mainnet, snax, soneium, soneium-minato-sepolia, sseed, swan, swell, tbn, tbn-sepolia, unichain, unichain-sepolia, worldchain, worldchain-sepolia, xterio-eth, zora, zora-sepolia, dev + + [default: optimism] + +Database: + --db.log-level + Database logging level. Levels higher than "notice" require a debug build + + Possible values: + - fatal: Enables logging for critical conditions, i.e. assertion failures + - error: Enables logging for error conditions + - warn: Enables logging for warning conditions + - notice: Enables logging for normal but significant condition + - verbose: Enables logging for verbose informational + - debug: Enables logging for debug-level messages + - trace: Enables logging for trace debug-level messages + - extra: Enables logging for extra debug-level messages + + --db.exclusive + Open environment in exclusive/monopolistic mode. Makes it possible to open a database on an NFS volume + + [possible values: true, false] + + --db.max-size + Maximum database size (e.g., 4TB, 8TB). + + This sets the "map size" of the database. If the database grows beyond this limit, the node will stop with an "environment map size limit reached" error. + + The default value is 8TB. + + --db.page-size + Database page size (e.g., 4KB, 8KB, 16KB). + + Specifies the page size used by the MDBX database. + + The page size determines the maximum database size. MDBX supports up to 2^31 pages, so with the default 4KB page size, the maximum database size is 8TB. To allow larger databases, increase this value to 8KB or higher. + + WARNING: This setting is only configurable at database creation; changing it later requires re-syncing. + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + + --db.read-transaction-timeout + Read transaction timeout in seconds, 0 means no timeout + + --db.max-readers + Maximum number of readers allowed to access the database concurrently + + --db.sync-mode + Controls how aggressively the database synchronizes data to disk + +Static Files: + --static-files.blocks-per-file.headers + Number of blocks per file for the headers segment + + --static-files.blocks-per-file.transactions + Number of blocks per file for the transactions segment + + --static-files.blocks-per-file.receipts + Number of blocks per file for the receipts segment + + --static-files.blocks-per-file.transaction-senders + Number of blocks per file for the transaction senders segment + + --static-files.receipts + Store receipts in static files instead of the database. + + When enabled, receipts will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + --static-files.transaction-senders + Store transaction senders in static files instead of the database. + + When enabled, transaction senders will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.name + The prefix name of the log files + + [default: reth.log] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + Possible values: + - always: Colors on + - auto: Auto-detect + - never: Colors off + + [default: always] + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] +``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/op-reth/re-execute.mdx b/docs/vocs/docs/pages/cli/op-reth/re-execute.mdx new file mode 100644 index 0000000000..bd95fbab68 --- /dev/null +++ b/docs/vocs/docs/pages/cli/op-reth/re-execute.mdx @@ -0,0 +1,258 @@ +# op-reth re-execute + +Re-execute blocks in parallel to verify historical sync correctness + +```bash +$ op-reth re-execute --help +``` +```txt +Usage: op-reth re-execute [OPTIONS] + +Options: + -h, --help + Print help (see a summary with '-h') + +Datadir: + --datadir + The path to the data dir for all reth files and subdirectories. + + Defaults to the OS-specific data directory: + + - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` + - Windows: `{FOLDERID_RoamingAppData}/reth/` + - macOS: `$HOME/Library/Application Support/reth/` + + [default: default] + + --datadir.static-files + The absolute path to store static files in. + + --datadir.rocksdb + The absolute path to store `RocksDB` database in. + + --config + The path to the configuration file to use + + --chain + The chain this node is running. + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + optimism, optimism_sepolia, optimism-sepolia, base, base_sepolia, base-sepolia, arena-z, arena-z-sepolia, automata, base-devnet-0-sepolia-dev-0, bob, boba-sepolia, boba, camp-sepolia, celo, creator-chain-testnet-sepolia, cyber, cyber-sepolia, ethernity, ethernity-sepolia, fraxtal, funki, funki-sepolia, hashkeychain, ink, ink-sepolia, lisk, lisk-sepolia, lyra, metal, metal-sepolia, mint, mode, mode-sepolia, oplabs-devnet-0-sepolia-dev-0, orderly, ozean-sepolia, pivotal-sepolia, polynomial, race, race-sepolia, radius_testnet-sepolia, redstone, rehearsal-0-bn-0-rehearsal-0-bn, rehearsal-0-bn-1-rehearsal-0-bn, settlus-mainnet, settlus-sepolia-sepolia, shape, shape-sepolia, silent-data-mainnet, snax, soneium, soneium-minato-sepolia, sseed, swan, swell, tbn, tbn-sepolia, unichain, unichain-sepolia, worldchain, worldchain-sepolia, xterio-eth, zora, zora-sepolia, dev + + [default: optimism] + +Database: + --db.log-level + Database logging level. Levels higher than "notice" require a debug build + + Possible values: + - fatal: Enables logging for critical conditions, i.e. assertion failures + - error: Enables logging for error conditions + - warn: Enables logging for warning conditions + - notice: Enables logging for normal but significant condition + - verbose: Enables logging for verbose informational + - debug: Enables logging for debug-level messages + - trace: Enables logging for trace debug-level messages + - extra: Enables logging for extra debug-level messages + + --db.exclusive + Open environment in exclusive/monopolistic mode. Makes it possible to open a database on an NFS volume + + [possible values: true, false] + + --db.max-size + Maximum database size (e.g., 4TB, 8TB). + + This sets the "map size" of the database. If the database grows beyond this limit, the node will stop with an "environment map size limit reached" error. + + The default value is 8TB. + + --db.page-size + Database page size (e.g., 4KB, 8KB, 16KB). + + Specifies the page size used by the MDBX database. + + The page size determines the maximum database size. MDBX supports up to 2^31 pages, so with the default 4KB page size, the maximum database size is 8TB. To allow larger databases, increase this value to 8KB or higher. + + WARNING: This setting is only configurable at database creation; changing it later requires re-syncing. + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + + --db.read-transaction-timeout + Read transaction timeout in seconds, 0 means no timeout + + --db.max-readers + Maximum number of readers allowed to access the database concurrently + + --db.sync-mode + Controls how aggressively the database synchronizes data to disk + +Static Files: + --static-files.blocks-per-file.headers + Number of blocks per file for the headers segment + + --static-files.blocks-per-file.transactions + Number of blocks per file for the transactions segment + + --static-files.blocks-per-file.receipts + Number of blocks per file for the receipts segment + + --static-files.blocks-per-file.transaction-senders + Number of blocks per file for the transaction senders segment + + --static-files.receipts + Store receipts in static files instead of the database. + + When enabled, receipts will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + --static-files.transaction-senders + Store transaction senders in static files instead of the database. + + When enabled, transaction senders will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + --from + The height to start at + + [default: 1] + + --to + The height to end at. Defaults to the latest block + + --num-tasks + Number of tasks to run in parallel + + [default: 10] + + --skip-invalid-blocks + Continues with execution when an invalid block is encountered and collects these blocks + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.name + The prefix name of the log files + + [default: reth.log] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + Possible values: + - always: Colors on + - auto: Auto-detect + - never: Colors off + + [default: always] + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] +``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/op-reth/stage.mdx b/docs/vocs/docs/pages/cli/op-reth/stage.mdx new file mode 100644 index 0000000000..1322ab4579 --- /dev/null +++ b/docs/vocs/docs/pages/cli/op-reth/stage.mdx @@ -0,0 +1,145 @@ +# op-reth stage + +Manipulate individual stages + +```bash +$ op-reth stage --help +``` +```txt +Usage: op-reth stage [OPTIONS] + +Commands: + run Run a single stage + drop Drop a stage's tables from the database + dump Dumps a stage from a range into a new database + unwind Unwinds a certain block range, deleting it from the database + help Print this message or the help of the given subcommand(s) + +Options: + -h, --help + Print help (see a summary with '-h') + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.name + The prefix name of the log files + + [default: reth.log] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + Possible values: + - always: Colors on + - auto: Auto-detect + - never: Colors off + + [default: always] + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] +``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/op-reth/stage/drop.mdx b/docs/vocs/docs/pages/cli/op-reth/stage/drop.mdx new file mode 100644 index 0000000000..5f96ed8ab6 --- /dev/null +++ b/docs/vocs/docs/pages/cli/op-reth/stage/drop.mdx @@ -0,0 +1,257 @@ +# op-reth stage drop + +Drop a stage's tables from the database + +```bash +$ op-reth stage drop --help +``` +```txt +Usage: op-reth stage drop [OPTIONS] + +Options: + -h, --help + Print help (see a summary with '-h') + +Datadir: + --datadir + The path to the data dir for all reth files and subdirectories. + + Defaults to the OS-specific data directory: + + - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` + - Windows: `{FOLDERID_RoamingAppData}/reth/` + - macOS: `$HOME/Library/Application Support/reth/` + + [default: default] + + --datadir.static-files + The absolute path to store static files in. + + --datadir.rocksdb + The absolute path to store `RocksDB` database in. + + --config + The path to the configuration file to use + + --chain + The chain this node is running. + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + optimism, optimism_sepolia, optimism-sepolia, base, base_sepolia, base-sepolia, arena-z, arena-z-sepolia, automata, base-devnet-0-sepolia-dev-0, bob, boba-sepolia, boba, camp-sepolia, celo, creator-chain-testnet-sepolia, cyber, cyber-sepolia, ethernity, ethernity-sepolia, fraxtal, funki, funki-sepolia, hashkeychain, ink, ink-sepolia, lisk, lisk-sepolia, lyra, metal, metal-sepolia, mint, mode, mode-sepolia, oplabs-devnet-0-sepolia-dev-0, orderly, ozean-sepolia, pivotal-sepolia, polynomial, race, race-sepolia, radius_testnet-sepolia, redstone, rehearsal-0-bn-0-rehearsal-0-bn, rehearsal-0-bn-1-rehearsal-0-bn, settlus-mainnet, settlus-sepolia-sepolia, shape, shape-sepolia, silent-data-mainnet, snax, soneium, soneium-minato-sepolia, sseed, swan, swell, tbn, tbn-sepolia, unichain, unichain-sepolia, worldchain, worldchain-sepolia, xterio-eth, zora, zora-sepolia, dev + + [default: optimism] + +Database: + --db.log-level + Database logging level. Levels higher than "notice" require a debug build + + Possible values: + - fatal: Enables logging for critical conditions, i.e. assertion failures + - error: Enables logging for error conditions + - warn: Enables logging for warning conditions + - notice: Enables logging for normal but significant condition + - verbose: Enables logging for verbose informational + - debug: Enables logging for debug-level messages + - trace: Enables logging for trace debug-level messages + - extra: Enables logging for extra debug-level messages + + --db.exclusive + Open environment in exclusive/monopolistic mode. Makes it possible to open a database on an NFS volume + + [possible values: true, false] + + --db.max-size + Maximum database size (e.g., 4TB, 8TB). + + This sets the "map size" of the database. If the database grows beyond this limit, the node will stop with an "environment map size limit reached" error. + + The default value is 8TB. + + --db.page-size + Database page size (e.g., 4KB, 8KB, 16KB). + + Specifies the page size used by the MDBX database. + + The page size determines the maximum database size. MDBX supports up to 2^31 pages, so with the default 4KB page size, the maximum database size is 8TB. To allow larger databases, increase this value to 8KB or higher. + + WARNING: This setting is only configurable at database creation; changing it later requires re-syncing. + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + + --db.read-transaction-timeout + Read transaction timeout in seconds, 0 means no timeout + + --db.max-readers + Maximum number of readers allowed to access the database concurrently + + --db.sync-mode + Controls how aggressively the database synchronizes data to disk + +Static Files: + --static-files.blocks-per-file.headers + Number of blocks per file for the headers segment + + --static-files.blocks-per-file.transactions + Number of blocks per file for the transactions segment + + --static-files.blocks-per-file.receipts + Number of blocks per file for the receipts segment + + --static-files.blocks-per-file.transaction-senders + Number of blocks per file for the transaction senders segment + + --static-files.receipts + Store receipts in static files instead of the database. + + When enabled, receipts will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + --static-files.transaction-senders + Store transaction senders in static files instead of the database. + + When enabled, transaction senders will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + + Possible values: + - headers: The headers stage within the pipeline + - bodies: The bodies stage within the pipeline + - senders: The senders stage within the pipeline + - execution: The execution stage within the pipeline + - account-hashing: The account hashing stage within the pipeline + - storage-hashing: The storage hashing stage within the pipeline + - hashing: The account and storage hashing stages within the pipeline + - merkle: The merkle stage within the pipeline + - merkle-changesets: The merkle changesets stage within the pipeline + - tx-lookup: The transaction lookup stage within the pipeline + - account-history: The account history stage within the pipeline + - storage-history: The storage history stage within the pipeline + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.name + The prefix name of the log files + + [default: reth.log] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + Possible values: + - always: Colors on + - auto: Auto-detect + - never: Colors off + + [default: always] + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] +``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/op-reth/stage/dump.mdx b/docs/vocs/docs/pages/cli/op-reth/stage/dump.mdx new file mode 100644 index 0000000000..910ab7666f --- /dev/null +++ b/docs/vocs/docs/pages/cli/op-reth/stage/dump.mdx @@ -0,0 +1,249 @@ +# op-reth stage dump + +Dumps a stage from a range into a new database + +```bash +$ op-reth stage dump --help +``` +```txt +Usage: op-reth stage dump [OPTIONS] + +Commands: + execution Execution stage + storage-hashing `StorageHashing` stage + account-hashing `AccountHashing` stage + merkle Merkle stage + help Print this message or the help of the given subcommand(s) + +Options: + -h, --help + Print help (see a summary with '-h') + +Datadir: + --datadir + The path to the data dir for all reth files and subdirectories. + + Defaults to the OS-specific data directory: + + - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` + - Windows: `{FOLDERID_RoamingAppData}/reth/` + - macOS: `$HOME/Library/Application Support/reth/` + + [default: default] + + --datadir.static-files + The absolute path to store static files in. + + --datadir.rocksdb + The absolute path to store `RocksDB` database in. + + --config + The path to the configuration file to use + + --chain + The chain this node is running. + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + optimism, optimism_sepolia, optimism-sepolia, base, base_sepolia, base-sepolia, arena-z, arena-z-sepolia, automata, base-devnet-0-sepolia-dev-0, bob, boba-sepolia, boba, camp-sepolia, celo, creator-chain-testnet-sepolia, cyber, cyber-sepolia, ethernity, ethernity-sepolia, fraxtal, funki, funki-sepolia, hashkeychain, ink, ink-sepolia, lisk, lisk-sepolia, lyra, metal, metal-sepolia, mint, mode, mode-sepolia, oplabs-devnet-0-sepolia-dev-0, orderly, ozean-sepolia, pivotal-sepolia, polynomial, race, race-sepolia, radius_testnet-sepolia, redstone, rehearsal-0-bn-0-rehearsal-0-bn, rehearsal-0-bn-1-rehearsal-0-bn, settlus-mainnet, settlus-sepolia-sepolia, shape, shape-sepolia, silent-data-mainnet, snax, soneium, soneium-minato-sepolia, sseed, swan, swell, tbn, tbn-sepolia, unichain, unichain-sepolia, worldchain, worldchain-sepolia, xterio-eth, zora, zora-sepolia, dev + + [default: optimism] + +Database: + --db.log-level + Database logging level. Levels higher than "notice" require a debug build + + Possible values: + - fatal: Enables logging for critical conditions, i.e. assertion failures + - error: Enables logging for error conditions + - warn: Enables logging for warning conditions + - notice: Enables logging for normal but significant condition + - verbose: Enables logging for verbose informational + - debug: Enables logging for debug-level messages + - trace: Enables logging for trace debug-level messages + - extra: Enables logging for extra debug-level messages + + --db.exclusive + Open environment in exclusive/monopolistic mode. Makes it possible to open a database on an NFS volume + + [possible values: true, false] + + --db.max-size + Maximum database size (e.g., 4TB, 8TB). + + This sets the "map size" of the database. If the database grows beyond this limit, the node will stop with an "environment map size limit reached" error. + + The default value is 8TB. + + --db.page-size + Database page size (e.g., 4KB, 8KB, 16KB). + + Specifies the page size used by the MDBX database. + + The page size determines the maximum database size. MDBX supports up to 2^31 pages, so with the default 4KB page size, the maximum database size is 8TB. To allow larger databases, increase this value to 8KB or higher. + + WARNING: This setting is only configurable at database creation; changing it later requires re-syncing. + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + + --db.read-transaction-timeout + Read transaction timeout in seconds, 0 means no timeout + + --db.max-readers + Maximum number of readers allowed to access the database concurrently + + --db.sync-mode + Controls how aggressively the database synchronizes data to disk + +Static Files: + --static-files.blocks-per-file.headers + Number of blocks per file for the headers segment + + --static-files.blocks-per-file.transactions + Number of blocks per file for the transactions segment + + --static-files.blocks-per-file.receipts + Number of blocks per file for the receipts segment + + --static-files.blocks-per-file.transaction-senders + Number of blocks per file for the transaction senders segment + + --static-files.receipts + Store receipts in static files instead of the database. + + When enabled, receipts will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + --static-files.transaction-senders + Store transaction senders in static files instead of the database. + + When enabled, transaction senders will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.name + The prefix name of the log files + + [default: reth.log] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + Possible values: + - always: Colors on + - auto: Auto-detect + - never: Colors off + + [default: always] + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] +``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/op-reth/stage/dump/account-hashing.mdx b/docs/vocs/docs/pages/cli/op-reth/stage/dump/account-hashing.mdx new file mode 100644 index 0000000000..dd58e31fbb --- /dev/null +++ b/docs/vocs/docs/pages/cli/op-reth/stage/dump/account-hashing.mdx @@ -0,0 +1,160 @@ +# op-reth stage dump account-hashing + +`AccountHashing` stage + +```bash +$ op-reth stage dump account-hashing --help +``` +```txt +Usage: op-reth stage dump account-hashing [OPTIONS] --output-datadir --from --to + +Options: + --output-datadir + The path to the new datadir folder. + + -f, --from + From which block + + -t, --to + To which block + + -d, --dry-run + If passed, it will dry-run a stage execution from the newly created database right after dumping + + -h, --help + Print help (see a summary with '-h') + +Datadir: + --chain + The chain this node is running. + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + optimism, optimism_sepolia, optimism-sepolia, base, base_sepolia, base-sepolia, arena-z, arena-z-sepolia, automata, base-devnet-0-sepolia-dev-0, bob, boba-sepolia, boba, camp-sepolia, celo, creator-chain-testnet-sepolia, cyber, cyber-sepolia, ethernity, ethernity-sepolia, fraxtal, funki, funki-sepolia, hashkeychain, ink, ink-sepolia, lisk, lisk-sepolia, lyra, metal, metal-sepolia, mint, mode, mode-sepolia, oplabs-devnet-0-sepolia-dev-0, orderly, ozean-sepolia, pivotal-sepolia, polynomial, race, race-sepolia, radius_testnet-sepolia, redstone, rehearsal-0-bn-0-rehearsal-0-bn, rehearsal-0-bn-1-rehearsal-0-bn, settlus-mainnet, settlus-sepolia-sepolia, shape, shape-sepolia, silent-data-mainnet, snax, soneium, soneium-minato-sepolia, sseed, swan, swell, tbn, tbn-sepolia, unichain, unichain-sepolia, worldchain, worldchain-sepolia, xterio-eth, zora, zora-sepolia, dev + + [default: optimism] + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.name + The prefix name of the log files + + [default: reth.log] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + Possible values: + - always: Colors on + - auto: Auto-detect + - never: Colors off + + [default: always] + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] +``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/op-reth/stage/dump/execution.mdx b/docs/vocs/docs/pages/cli/op-reth/stage/dump/execution.mdx new file mode 100644 index 0000000000..47740d0e06 --- /dev/null +++ b/docs/vocs/docs/pages/cli/op-reth/stage/dump/execution.mdx @@ -0,0 +1,160 @@ +# op-reth stage dump execution + +Execution stage + +```bash +$ op-reth stage dump execution --help +``` +```txt +Usage: op-reth stage dump execution [OPTIONS] --output-datadir --from --to + +Options: + --output-datadir + The path to the new datadir folder. + + -f, --from + From which block + + -t, --to + To which block + + -d, --dry-run + If passed, it will dry-run a stage execution from the newly created database right after dumping + + -h, --help + Print help (see a summary with '-h') + +Datadir: + --chain + The chain this node is running. + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + optimism, optimism_sepolia, optimism-sepolia, base, base_sepolia, base-sepolia, arena-z, arena-z-sepolia, automata, base-devnet-0-sepolia-dev-0, bob, boba-sepolia, boba, camp-sepolia, celo, creator-chain-testnet-sepolia, cyber, cyber-sepolia, ethernity, ethernity-sepolia, fraxtal, funki, funki-sepolia, hashkeychain, ink, ink-sepolia, lisk, lisk-sepolia, lyra, metal, metal-sepolia, mint, mode, mode-sepolia, oplabs-devnet-0-sepolia-dev-0, orderly, ozean-sepolia, pivotal-sepolia, polynomial, race, race-sepolia, radius_testnet-sepolia, redstone, rehearsal-0-bn-0-rehearsal-0-bn, rehearsal-0-bn-1-rehearsal-0-bn, settlus-mainnet, settlus-sepolia-sepolia, shape, shape-sepolia, silent-data-mainnet, snax, soneium, soneium-minato-sepolia, sseed, swan, swell, tbn, tbn-sepolia, unichain, unichain-sepolia, worldchain, worldchain-sepolia, xterio-eth, zora, zora-sepolia, dev + + [default: optimism] + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.name + The prefix name of the log files + + [default: reth.log] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + Possible values: + - always: Colors on + - auto: Auto-detect + - never: Colors off + + [default: always] + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] +``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/op-reth/stage/dump/merkle.mdx b/docs/vocs/docs/pages/cli/op-reth/stage/dump/merkle.mdx new file mode 100644 index 0000000000..3b02f7199a --- /dev/null +++ b/docs/vocs/docs/pages/cli/op-reth/stage/dump/merkle.mdx @@ -0,0 +1,160 @@ +# op-reth stage dump merkle + +Merkle stage + +```bash +$ op-reth stage dump merkle --help +``` +```txt +Usage: op-reth stage dump merkle [OPTIONS] --output-datadir --from --to + +Options: + --output-datadir + The path to the new datadir folder. + + -f, --from + From which block + + -t, --to + To which block + + -d, --dry-run + If passed, it will dry-run a stage execution from the newly created database right after dumping + + -h, --help + Print help (see a summary with '-h') + +Datadir: + --chain + The chain this node is running. + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + optimism, optimism_sepolia, optimism-sepolia, base, base_sepolia, base-sepolia, arena-z, arena-z-sepolia, automata, base-devnet-0-sepolia-dev-0, bob, boba-sepolia, boba, camp-sepolia, celo, creator-chain-testnet-sepolia, cyber, cyber-sepolia, ethernity, ethernity-sepolia, fraxtal, funki, funki-sepolia, hashkeychain, ink, ink-sepolia, lisk, lisk-sepolia, lyra, metal, metal-sepolia, mint, mode, mode-sepolia, oplabs-devnet-0-sepolia-dev-0, orderly, ozean-sepolia, pivotal-sepolia, polynomial, race, race-sepolia, radius_testnet-sepolia, redstone, rehearsal-0-bn-0-rehearsal-0-bn, rehearsal-0-bn-1-rehearsal-0-bn, settlus-mainnet, settlus-sepolia-sepolia, shape, shape-sepolia, silent-data-mainnet, snax, soneium, soneium-minato-sepolia, sseed, swan, swell, tbn, tbn-sepolia, unichain, unichain-sepolia, worldchain, worldchain-sepolia, xterio-eth, zora, zora-sepolia, dev + + [default: optimism] + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.name + The prefix name of the log files + + [default: reth.log] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + Possible values: + - always: Colors on + - auto: Auto-detect + - never: Colors off + + [default: always] + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] +``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/op-reth/stage/dump/storage-hashing.mdx b/docs/vocs/docs/pages/cli/op-reth/stage/dump/storage-hashing.mdx new file mode 100644 index 0000000000..07feb42a25 --- /dev/null +++ b/docs/vocs/docs/pages/cli/op-reth/stage/dump/storage-hashing.mdx @@ -0,0 +1,160 @@ +# op-reth stage dump storage-hashing + +`StorageHashing` stage + +```bash +$ op-reth stage dump storage-hashing --help +``` +```txt +Usage: op-reth stage dump storage-hashing [OPTIONS] --output-datadir --from --to + +Options: + --output-datadir + The path to the new datadir folder. + + -f, --from + From which block + + -t, --to + To which block + + -d, --dry-run + If passed, it will dry-run a stage execution from the newly created database right after dumping + + -h, --help + Print help (see a summary with '-h') + +Datadir: + --chain + The chain this node is running. + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + optimism, optimism_sepolia, optimism-sepolia, base, base_sepolia, base-sepolia, arena-z, arena-z-sepolia, automata, base-devnet-0-sepolia-dev-0, bob, boba-sepolia, boba, camp-sepolia, celo, creator-chain-testnet-sepolia, cyber, cyber-sepolia, ethernity, ethernity-sepolia, fraxtal, funki, funki-sepolia, hashkeychain, ink, ink-sepolia, lisk, lisk-sepolia, lyra, metal, metal-sepolia, mint, mode, mode-sepolia, oplabs-devnet-0-sepolia-dev-0, orderly, ozean-sepolia, pivotal-sepolia, polynomial, race, race-sepolia, radius_testnet-sepolia, redstone, rehearsal-0-bn-0-rehearsal-0-bn, rehearsal-0-bn-1-rehearsal-0-bn, settlus-mainnet, settlus-sepolia-sepolia, shape, shape-sepolia, silent-data-mainnet, snax, soneium, soneium-minato-sepolia, sseed, swan, swell, tbn, tbn-sepolia, unichain, unichain-sepolia, worldchain, worldchain-sepolia, xterio-eth, zora, zora-sepolia, dev + + [default: optimism] + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.name + The prefix name of the log files + + [default: reth.log] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + Possible values: + - always: Colors on + - auto: Auto-detect + - never: Colors off + + [default: always] + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] +``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/op-reth/stage/run.mdx b/docs/vocs/docs/pages/cli/op-reth/stage/run.mdx new file mode 100644 index 0000000000..c57a221e5f --- /dev/null +++ b/docs/vocs/docs/pages/cli/op-reth/stage/run.mdx @@ -0,0 +1,504 @@ +# op-reth stage run + +Run a single stage. + +```bash +$ op-reth stage run --help +``` +```txt +Usage: op-reth stage run [OPTIONS] --from --to + +Options: + -h, --help + Print help (see a summary with '-h') + +Datadir: + --datadir + The path to the data dir for all reth files and subdirectories. + + Defaults to the OS-specific data directory: + + - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` + - Windows: `{FOLDERID_RoamingAppData}/reth/` + - macOS: `$HOME/Library/Application Support/reth/` + + [default: default] + + --datadir.static-files + The absolute path to store static files in. + + --datadir.rocksdb + The absolute path to store `RocksDB` database in. + + --config + The path to the configuration file to use + + --chain + The chain this node is running. + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + optimism, optimism_sepolia, optimism-sepolia, base, base_sepolia, base-sepolia, arena-z, arena-z-sepolia, automata, base-devnet-0-sepolia-dev-0, bob, boba-sepolia, boba, camp-sepolia, celo, creator-chain-testnet-sepolia, cyber, cyber-sepolia, ethernity, ethernity-sepolia, fraxtal, funki, funki-sepolia, hashkeychain, ink, ink-sepolia, lisk, lisk-sepolia, lyra, metal, metal-sepolia, mint, mode, mode-sepolia, oplabs-devnet-0-sepolia-dev-0, orderly, ozean-sepolia, pivotal-sepolia, polynomial, race, race-sepolia, radius_testnet-sepolia, redstone, rehearsal-0-bn-0-rehearsal-0-bn, rehearsal-0-bn-1-rehearsal-0-bn, settlus-mainnet, settlus-sepolia-sepolia, shape, shape-sepolia, silent-data-mainnet, snax, soneium, soneium-minato-sepolia, sseed, swan, swell, tbn, tbn-sepolia, unichain, unichain-sepolia, worldchain, worldchain-sepolia, xterio-eth, zora, zora-sepolia, dev + + [default: optimism] + +Database: + --db.log-level + Database logging level. Levels higher than "notice" require a debug build + + Possible values: + - fatal: Enables logging for critical conditions, i.e. assertion failures + - error: Enables logging for error conditions + - warn: Enables logging for warning conditions + - notice: Enables logging for normal but significant condition + - verbose: Enables logging for verbose informational + - debug: Enables logging for debug-level messages + - trace: Enables logging for trace debug-level messages + - extra: Enables logging for extra debug-level messages + + --db.exclusive + Open environment in exclusive/monopolistic mode. Makes it possible to open a database on an NFS volume + + [possible values: true, false] + + --db.max-size + Maximum database size (e.g., 4TB, 8TB). + + This sets the "map size" of the database. If the database grows beyond this limit, the node will stop with an "environment map size limit reached" error. + + The default value is 8TB. + + --db.page-size + Database page size (e.g., 4KB, 8KB, 16KB). + + Specifies the page size used by the MDBX database. + + The page size determines the maximum database size. MDBX supports up to 2^31 pages, so with the default 4KB page size, the maximum database size is 8TB. To allow larger databases, increase this value to 8KB or higher. + + WARNING: This setting is only configurable at database creation; changing it later requires re-syncing. + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + + --db.read-transaction-timeout + Read transaction timeout in seconds, 0 means no timeout + + --db.max-readers + Maximum number of readers allowed to access the database concurrently + + --db.sync-mode + Controls how aggressively the database synchronizes data to disk + +Static Files: + --static-files.blocks-per-file.headers + Number of blocks per file for the headers segment + + --static-files.blocks-per-file.transactions + Number of blocks per file for the transactions segment + + --static-files.blocks-per-file.receipts + Number of blocks per file for the receipts segment + + --static-files.blocks-per-file.transaction-senders + Number of blocks per file for the transaction senders segment + + --static-files.receipts + Store receipts in static files instead of the database. + + When enabled, receipts will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + --static-files.transaction-senders + Store transaction senders in static files instead of the database. + + When enabled, transaction senders will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + --metrics + Enable Prometheus metrics. + + The metrics will be served at the given interface and port. + + --from + The height to start at + + -t, --to + The end of the stage + + --batch-size + Batch size for stage execution and unwind + + -s, --skip-unwind + Normally, running the stage requires unwinding for stages that already have been run, in order to not rewrite to the same database slots. + + You can optionally skip the unwinding phase if you're syncing a block range that has not been synced before. + + -c, --commit + Commits the changes in the database. WARNING: potentially destructive. + + Useful when you want to run diagnostics on the database. + + NOTE: This flag is currently required for the headers, bodies, and execution stages because they use static files and must commit to properly unwind and run. + + --checkpoints + Save stage checkpoints + + + The name of the stage to run + + Possible values: + - headers: The headers stage within the pipeline + - bodies: The bodies stage within the pipeline + - senders: The senders stage within the pipeline + - execution: The execution stage within the pipeline + - account-hashing: The account hashing stage within the pipeline + - storage-hashing: The storage hashing stage within the pipeline + - hashing: The account and storage hashing stages within the pipeline + - merkle: The merkle stage within the pipeline + - merkle-changesets: The merkle changesets stage within the pipeline + - tx-lookup: The transaction lookup stage within the pipeline + - account-history: The account history stage within the pipeline + - storage-history: The storage history stage within the pipeline + +Networking: + -d, --disable-discovery + Disable the discovery service + + --disable-dns-discovery + Disable the DNS discovery + + --disable-discv4-discovery + Disable Discv4 discovery + + --enable-discv5-discovery + Enable Discv5 discovery + + --disable-nat + Disable Nat discovery + + --discovery.addr + The UDP address to use for devp2p peer discovery version 4 + + [default: 0.0.0.0] + + --discovery.port + The UDP port to use for devp2p peer discovery version 4 + + [default: 30303] + + --discovery.v5.addr + The UDP IPv4 address to use for devp2p peer discovery version 5. Overwritten by `RLPx` address, if it's also IPv4 + + --discovery.v5.addr.ipv6 + The UDP IPv6 address to use for devp2p peer discovery version 5. Overwritten by `RLPx` address, if it's also IPv6 + + --discovery.v5.port + The UDP IPv4 port to use for devp2p peer discovery version 5. Not used unless `--addr` is IPv4, or `--discovery.v5.addr` is set + + [default: 9200] + + --discovery.v5.port.ipv6 + The UDP IPv6 port to use for devp2p peer discovery version 5. Not used unless `--addr` is IPv6, or `--discovery.addr.ipv6` is set + + [default: 9200] + + --discovery.v5.lookup-interval + The interval in seconds at which to carry out periodic lookup queries, for the whole run of the program + + [default: 20] + + --discovery.v5.bootstrap.lookup-interval + The interval in seconds at which to carry out boost lookup queries, for a fixed number of times, at bootstrap + + [default: 5] + + --discovery.v5.bootstrap.lookup-countdown + The number of times to carry out boost lookup queries at bootstrap + + [default: 200] + + --trusted-peers + Comma separated enode URLs of trusted peers for P2P connections. + + --trusted-peers enode://abcd@192.168.0.1:30303 + + --trusted-only + Connect to or accept from trusted peers only + + --bootnodes + Comma separated enode URLs for P2P discovery bootstrap. + + Will fall back to a network-specific default if not specified. + + --dns-retries + Amount of DNS resolution requests retries to perform when peering + + [default: 0] + + --peers-file + The path to the known peers file. Connected peers are dumped to this file on nodes + shutdown, and read on startup. Cannot be used with `--no-persist-peers`. + + --identity + Custom node identity + + [default: reth/-/] + + --p2p-secret-key + Secret key to use for this node. + + This will also deterministically set the peer ID. If not specified, it will be set in the data dir for the chain being used. + + --p2p-secret-key-hex + Hex encoded secret key to use for this node. + + This will also deterministically set the peer ID. Cannot be used together with `--p2p-secret-key`. + + --no-persist-peers + Do not persist peers. + + --nat + NAT resolution method (any|none|upnp|publicip|extip:\) + + [default: any] + + --addr + Network listening address + + [default: 0.0.0.0] + + --port + Network listening port + + [default: 30303] + + --max-outbound-peers + Maximum number of outbound peers. default: 100 + + --max-inbound-peers + Maximum number of inbound peers. default: 30 + + --max-peers + Maximum number of total peers (inbound + outbound). + + Splits peers using approximately 2:1 inbound:outbound ratio. Cannot be used together with `--max-outbound-peers` or `--max-inbound-peers`. + + --max-tx-reqs + Max concurrent `GetPooledTransactions` requests. + + [default: 130] + + --max-tx-reqs-peer + Max concurrent `GetPooledTransactions` requests per peer. + + [default: 1] + + --max-seen-tx-history + Max number of seen transactions to remember per peer. + + Default is 320 transaction hashes. + + [default: 320] + + --max-pending-imports + Max number of transactions to import concurrently. + + [default: 4096] + + --pooled-tx-response-soft-limit + Experimental, for usage in research. Sets the max accumulated byte size of transactions + to pack in one response. + Spec'd at 2MiB. + + [default: 2097152] + + --pooled-tx-pack-soft-limit + Experimental, for usage in research. Sets the max accumulated byte size of transactions to + request in one request. + + Since `RLPx` protocol version 68, the byte size of a transaction is shared as metadata in a + transaction announcement (see `RLPx` specs). This allows a node to request a specific size + response. + + By default, nodes request only 128 KiB worth of transactions, but should a peer request + more, up to 2 MiB, a node will answer with more than 128 KiB. + + Default is 128 KiB. + + [default: 131072] + + --max-tx-pending-fetch + Max capacity of cache of hashes for transactions pending fetch. + + [default: 25600] + + --net-if.experimental + Name of network interface used to communicate with peers. + + If flag is set, but no value is passed, the default interface for docker `eth0` is tried. + + --tx-propagation-policy + Transaction Propagation Policy + + The policy determines which peers transactions are gossiped to. + + [default: All] + + --tx-ingress-policy + Transaction ingress policy + + Determines which peers' transactions are accepted over P2P. + + [default: All] + + --disable-tx-gossip + Disable transaction pool gossip + + Disables gossiping of transactions in the mempool to peers. This can be omitted for personal nodes, though providers should always opt to enable this flag. + + --tx-propagation-mode + Sets the transaction propagation mode by determining how new pending transactions are propagated to other peers in full. + + Examples: sqrt, all, max:10 + + [default: sqrt] + + --required-block-hashes + Comma separated list of required block hashes or block number=hash pairs. Peers that don't have these blocks will be filtered out. Format: hash or `block_number=hash` (e.g., 23115201=0x1234...) + + --network-id + Optional network ID to override the chain specification's network ID for P2P connections + + --netrestrict + Restrict network communication to the given IP networks (CIDR masks). + + Comma separated list of CIDR network specifications. Only peers with IP addresses within these ranges will be allowed to connect. + + Example: --netrestrict "192.168.0.0/16,10.0.0.0/8" + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.name + The prefix name of the log files + + [default: reth.log] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + Possible values: + - always: Colors on + - auto: Auto-detect + - never: Colors off + + [default: always] + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] +``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/op-reth/stage/unwind.mdx b/docs/vocs/docs/pages/cli/op-reth/stage/unwind.mdx new file mode 100644 index 0000000000..d53a6e5b46 --- /dev/null +++ b/docs/vocs/docs/pages/cli/op-reth/stage/unwind.mdx @@ -0,0 +1,250 @@ +# op-reth stage unwind + +Unwinds a certain block range, deleting it from the database + +```bash +$ op-reth stage unwind --help +``` +```txt +Usage: op-reth stage unwind [OPTIONS] + +Commands: + to-block Unwinds the database from the latest block, until the given block number or hash has been reached, that block is not included + num-blocks Unwinds the database from the latest block, until the given number of blocks have been reached + help Print this message or the help of the given subcommand(s) + +Options: + -h, --help + Print help (see a summary with '-h') + +Datadir: + --datadir + The path to the data dir for all reth files and subdirectories. + + Defaults to the OS-specific data directory: + + - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` + - Windows: `{FOLDERID_RoamingAppData}/reth/` + - macOS: `$HOME/Library/Application Support/reth/` + + [default: default] + + --datadir.static-files + The absolute path to store static files in. + + --datadir.rocksdb + The absolute path to store `RocksDB` database in. + + --config + The path to the configuration file to use + + --chain + The chain this node is running. + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + optimism, optimism_sepolia, optimism-sepolia, base, base_sepolia, base-sepolia, arena-z, arena-z-sepolia, automata, base-devnet-0-sepolia-dev-0, bob, boba-sepolia, boba, camp-sepolia, celo, creator-chain-testnet-sepolia, cyber, cyber-sepolia, ethernity, ethernity-sepolia, fraxtal, funki, funki-sepolia, hashkeychain, ink, ink-sepolia, lisk, lisk-sepolia, lyra, metal, metal-sepolia, mint, mode, mode-sepolia, oplabs-devnet-0-sepolia-dev-0, orderly, ozean-sepolia, pivotal-sepolia, polynomial, race, race-sepolia, radius_testnet-sepolia, redstone, rehearsal-0-bn-0-rehearsal-0-bn, rehearsal-0-bn-1-rehearsal-0-bn, settlus-mainnet, settlus-sepolia-sepolia, shape, shape-sepolia, silent-data-mainnet, snax, soneium, soneium-minato-sepolia, sseed, swan, swell, tbn, tbn-sepolia, unichain, unichain-sepolia, worldchain, worldchain-sepolia, xterio-eth, zora, zora-sepolia, dev + + [default: optimism] + +Database: + --db.log-level + Database logging level. Levels higher than "notice" require a debug build + + Possible values: + - fatal: Enables logging for critical conditions, i.e. assertion failures + - error: Enables logging for error conditions + - warn: Enables logging for warning conditions + - notice: Enables logging for normal but significant condition + - verbose: Enables logging for verbose informational + - debug: Enables logging for debug-level messages + - trace: Enables logging for trace debug-level messages + - extra: Enables logging for extra debug-level messages + + --db.exclusive + Open environment in exclusive/monopolistic mode. Makes it possible to open a database on an NFS volume + + [possible values: true, false] + + --db.max-size + Maximum database size (e.g., 4TB, 8TB). + + This sets the "map size" of the database. If the database grows beyond this limit, the node will stop with an "environment map size limit reached" error. + + The default value is 8TB. + + --db.page-size + Database page size (e.g., 4KB, 8KB, 16KB). + + Specifies the page size used by the MDBX database. + + The page size determines the maximum database size. MDBX supports up to 2^31 pages, so with the default 4KB page size, the maximum database size is 8TB. To allow larger databases, increase this value to 8KB or higher. + + WARNING: This setting is only configurable at database creation; changing it later requires re-syncing. + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + + --db.read-transaction-timeout + Read transaction timeout in seconds, 0 means no timeout + + --db.max-readers + Maximum number of readers allowed to access the database concurrently + + --db.sync-mode + Controls how aggressively the database synchronizes data to disk + +Static Files: + --static-files.blocks-per-file.headers + Number of blocks per file for the headers segment + + --static-files.blocks-per-file.transactions + Number of blocks per file for the transactions segment + + --static-files.blocks-per-file.receipts + Number of blocks per file for the receipts segment + + --static-files.blocks-per-file.transaction-senders + Number of blocks per file for the transaction senders segment + + --static-files.receipts + Store receipts in static files instead of the database. + + When enabled, receipts will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + --static-files.transaction-senders + Store transaction senders in static files instead of the database. + + When enabled, transaction senders will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + --offline + If this is enabled, then all stages except headers, bodies, and sender recovery will be unwound + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.name + The prefix name of the log files + + [default: reth.log] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + Possible values: + - always: Colors on + - auto: Auto-detect + - never: Colors off + + [default: always] + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] +``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/op-reth/stage/unwind/num-blocks.mdx b/docs/vocs/docs/pages/cli/op-reth/stage/unwind/num-blocks.mdx new file mode 100644 index 0000000000..d204bdc5e7 --- /dev/null +++ b/docs/vocs/docs/pages/cli/op-reth/stage/unwind/num-blocks.mdx @@ -0,0 +1,152 @@ +# op-reth stage unwind num-blocks + +Unwinds the database from the latest block, until the given number of blocks have been reached + +```bash +$ op-reth stage unwind num-blocks --help +``` +```txt +Usage: op-reth stage unwind num-blocks [OPTIONS] + +Arguments: + + + +Options: + -h, --help + Print help (see a summary with '-h') + +Datadir: + --chain + The chain this node is running. + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + optimism, optimism_sepolia, optimism-sepolia, base, base_sepolia, base-sepolia, arena-z, arena-z-sepolia, automata, base-devnet-0-sepolia-dev-0, bob, boba-sepolia, boba, camp-sepolia, celo, creator-chain-testnet-sepolia, cyber, cyber-sepolia, ethernity, ethernity-sepolia, fraxtal, funki, funki-sepolia, hashkeychain, ink, ink-sepolia, lisk, lisk-sepolia, lyra, metal, metal-sepolia, mint, mode, mode-sepolia, oplabs-devnet-0-sepolia-dev-0, orderly, ozean-sepolia, pivotal-sepolia, polynomial, race, race-sepolia, radius_testnet-sepolia, redstone, rehearsal-0-bn-0-rehearsal-0-bn, rehearsal-0-bn-1-rehearsal-0-bn, settlus-mainnet, settlus-sepolia-sepolia, shape, shape-sepolia, silent-data-mainnet, snax, soneium, soneium-minato-sepolia, sseed, swan, swell, tbn, tbn-sepolia, unichain, unichain-sepolia, worldchain, worldchain-sepolia, xterio-eth, zora, zora-sepolia, dev + + [default: optimism] + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.name + The prefix name of the log files + + [default: reth.log] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + Possible values: + - always: Colors on + - auto: Auto-detect + - never: Colors off + + [default: always] + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] +``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/op-reth/stage/unwind/to-block.mdx b/docs/vocs/docs/pages/cli/op-reth/stage/unwind/to-block.mdx new file mode 100644 index 0000000000..9577e0dd76 --- /dev/null +++ b/docs/vocs/docs/pages/cli/op-reth/stage/unwind/to-block.mdx @@ -0,0 +1,152 @@ +# op-reth stage unwind to-block + +Unwinds the database from the latest block, until the given block number or hash has been reached, that block is not included + +```bash +$ op-reth stage unwind to-block --help +``` +```txt +Usage: op-reth stage unwind to-block [OPTIONS] + +Arguments: + + + +Options: + -h, --help + Print help (see a summary with '-h') + +Datadir: + --chain + The chain this node is running. + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + optimism, optimism_sepolia, optimism-sepolia, base, base_sepolia, base-sepolia, arena-z, arena-z-sepolia, automata, base-devnet-0-sepolia-dev-0, bob, boba-sepolia, boba, camp-sepolia, celo, creator-chain-testnet-sepolia, cyber, cyber-sepolia, ethernity, ethernity-sepolia, fraxtal, funki, funki-sepolia, hashkeychain, ink, ink-sepolia, lisk, lisk-sepolia, lyra, metal, metal-sepolia, mint, mode, mode-sepolia, oplabs-devnet-0-sepolia-dev-0, orderly, ozean-sepolia, pivotal-sepolia, polynomial, race, race-sepolia, radius_testnet-sepolia, redstone, rehearsal-0-bn-0-rehearsal-0-bn, rehearsal-0-bn-1-rehearsal-0-bn, settlus-mainnet, settlus-sepolia-sepolia, shape, shape-sepolia, silent-data-mainnet, snax, soneium, soneium-minato-sepolia, sseed, swan, swell, tbn, tbn-sepolia, unichain, unichain-sepolia, worldchain, worldchain-sepolia, xterio-eth, zora, zora-sepolia, dev + + [default: optimism] + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.name + The prefix name of the log files + + [default: reth.log] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + Possible values: + - always: Colors on + - auto: Auto-detect + - never: Colors off + + [default: always] + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] +``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth.mdx b/docs/vocs/docs/pages/cli/reth.mdx index c35216d6b5..0287f2e47f 100644 --- a/docs/vocs/docs/pages/cli/reth.mdx +++ b/docs/vocs/docs/pages/cli/reth.mdx @@ -146,4 +146,13 @@ Tracing: Defaults to TRACE if not specified. [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/config.mdx b/docs/vocs/docs/pages/cli/reth/config.mdx index 6b3c9e4b65..07d09a16da 100644 --- a/docs/vocs/docs/pages/cli/reth/config.mdx +++ b/docs/vocs/docs/pages/cli/reth/config.mdx @@ -132,4 +132,13 @@ Tracing: Defaults to TRACE if not specified. [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db.mdx b/docs/vocs/docs/pages/cli/reth/db.mdx index feb902d493..da580f7e8e 100644 --- a/docs/vocs/docs/pages/cli/reth/db.mdx +++ b/docs/vocs/docs/pages/cli/reth/db.mdx @@ -9,17 +9,20 @@ $ reth db --help Usage: reth db [OPTIONS] Commands: - stats Lists all the tables, their entry count and their size - list Lists the contents of a table - checksum Calculates the content checksum of a table - diff Create a diff between two database tables or two entire databases - get Gets the content of a table for the given key - drop Deletes all database entries - clear Deletes all table entries - repair-trie Verifies trie consistency and outputs any inconsistencies - version Lists current and local database versions - path Returns the full database path - help Print this message or the help of the given subcommand(s) + stats Lists all the tables, their entry count and their size + list Lists the contents of a table + checksum Calculates the content checksum of a table + diff Create a diff between two database tables or two entire databases + get Gets the content of a table for the given key + drop Deletes all database entries + clear Deletes all table entries + repair-trie Verifies trie consistency and outputs any inconsistencies + static-file-header Reads and displays the static file segment header + version Lists current and local database versions + path Returns the full database path + settings Manage storage settings + account-storage Gets storage size information for an account + help Print this message or the help of the given subcommand(s) Options: -h, --help @@ -40,6 +43,9 @@ Datadir: --datadir.static-files The absolute path to store static files in. + --datadir.rocksdb + The absolute path to store `RocksDB` database in. + --config The path to the configuration file to use @@ -72,7 +78,20 @@ Database: [possible values: true, false] --db.max-size - Maximum database size (e.g., 4TB, 8MB) + Maximum database size (e.g., 4TB, 8TB). + + This sets the "map size" of the database. If the database grows beyond this limit, the node will stop with an "environment map size limit reached" error. + + The default value is 8TB. + + --db.page-size + Database page size (e.g., 4KB, 8KB, 16KB). + + Specifies the page size used by the MDBX database. + + The page size determines the maximum database size. MDBX supports up to 2^31 pages, so with the default 4KB page size, the maximum database size is 8TB. To allow larger databases, increase this value to 8KB or higher. + + WARNING: This setting is only configurable at database creation; changing it later requires re-syncing. --db.growth-step Database growth step (e.g., 4GB, 4KB) @@ -86,6 +105,33 @@ Database: --db.sync-mode Controls how aggressively the database synchronizes data to disk +Static Files: + --static-files.blocks-per-file.headers + Number of blocks per file for the headers segment + + --static-files.blocks-per-file.transactions + Number of blocks per file for the transactions segment + + --static-files.blocks-per-file.receipts + Number of blocks per file for the receipts segment + + --static-files.blocks-per-file.transaction-senders + Number of blocks per file for the transaction senders segment + + --static-files.receipts + Store receipts in static files instead of the database. + + When enabled, receipts will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + --static-files.transaction-senders + Store transaction senders in static files instead of the database. + + When enabled, transaction senders will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + Logging: --log.stdout.format The format to use for logs written to stdout @@ -200,4 +246,13 @@ Tracing: Defaults to TRACE if not specified. [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/account-storage.mdx b/docs/vocs/docs/pages/cli/reth/db/account-storage.mdx new file mode 100644 index 0000000000..380291feb3 --- /dev/null +++ b/docs/vocs/docs/pages/cli/reth/db/account-storage.mdx @@ -0,0 +1,152 @@ +# reth db account-storage + +Gets storage size information for an account + +```bash +$ reth db account-storage --help +``` +```txt +Usage: reth db account-storage [OPTIONS]
+ +Arguments: +
+ The account address to check storage for + +Options: + -h, --help + Print help (see a summary with '-h') + +Datadir: + --chain + The chain this node is running. + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + mainnet, sepolia, holesky, hoodi, dev + + [default: mainnet] + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.name + The prefix name of the log files + + [default: reth.log] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + Possible values: + - always: Colors on + - auto: Auto-detect + - never: Colors off + + [default: always] + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] +``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/checksum.mdx b/docs/vocs/docs/pages/cli/reth/db/checksum.mdx index 4b8b8ca2cc..21ce752c42 100644 --- a/docs/vocs/docs/pages/cli/reth/db/checksum.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/checksum.mdx @@ -149,4 +149,13 @@ Tracing: Defaults to TRACE if not specified. [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/clear.mdx b/docs/vocs/docs/pages/cli/reth/db/clear.mdx index 1548558fe3..3483b71f46 100644 --- a/docs/vocs/docs/pages/cli/reth/db/clear.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/clear.mdx @@ -141,4 +141,13 @@ Tracing: Defaults to TRACE if not specified. [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/clear/mdbx.mdx b/docs/vocs/docs/pages/cli/reth/db/clear/mdbx.mdx index b48ba18098..9fbfb1e825 100644 --- a/docs/vocs/docs/pages/cli/reth/db/clear/mdbx.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/clear/mdbx.mdx @@ -140,4 +140,13 @@ Tracing: Defaults to TRACE if not specified. [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/clear/static-file.mdx b/docs/vocs/docs/pages/cli/reth/db/clear/static-file.mdx index 9f22178ec4..61ad643752 100644 --- a/docs/vocs/docs/pages/cli/reth/db/clear/static-file.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/clear/static-file.mdx @@ -11,9 +11,10 @@ Usage: reth db clear static-file [OPTIONS] Arguments: Possible values: - - headers: Static File segment responsible for the `CanonicalHeaders`, `Headers`, `HeaderTerminalDifficulties` tables - - transactions: Static File segment responsible for the `Transactions` table - - receipts: Static File segment responsible for the `Receipts` table + - headers: Static File segment responsible for the `CanonicalHeaders`, `Headers`, `HeaderTerminalDifficulties` tables + - transactions: Static File segment responsible for the `Transactions` table + - receipts: Static File segment responsible for the `Receipts` table + - transaction-senders: Static File segment responsible for the `TransactionSenders` table Options: -h, --help @@ -143,4 +144,13 @@ Tracing: Defaults to TRACE if not specified. [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/diff.mdx b/docs/vocs/docs/pages/cli/reth/db/diff.mdx index 27cb2198aa..745211efa4 100644 --- a/docs/vocs/docs/pages/cli/reth/db/diff.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/diff.mdx @@ -35,7 +35,20 @@ Database: [possible values: true, false] --db.max-size - Maximum database size (e.g., 4TB, 8MB) + Maximum database size (e.g., 4TB, 8TB). + + This sets the "map size" of the database. If the database grows beyond this limit, the node will stop with an "environment map size limit reached" error. + + The default value is 8TB. + + --db.page-size + Database page size (e.g., 4KB, 8KB, 16KB). + + Specifies the page size used by the MDBX database. + + The page size determines the maximum database size. MDBX supports up to 2^31 pages, so with the default 4KB page size, the maximum database size is 8TB. To allow larger databases, increase this value to 8KB or higher. + + WARNING: This setting is only configurable at database creation; changing it later requires re-syncing. --db.growth-step Database growth step (e.g., 4GB, 4KB) @@ -179,4 +192,13 @@ Tracing: Defaults to TRACE if not specified. [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/drop.mdx b/docs/vocs/docs/pages/cli/reth/db/drop.mdx index c778320f2d..7529225295 100644 --- a/docs/vocs/docs/pages/cli/reth/db/drop.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/drop.mdx @@ -139,4 +139,13 @@ Tracing: Defaults to TRACE if not specified. [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/get.mdx b/docs/vocs/docs/pages/cli/reth/db/get.mdx index dfcfcac188..abb95ab6b4 100644 --- a/docs/vocs/docs/pages/cli/reth/db/get.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/get.mdx @@ -141,4 +141,13 @@ Tracing: Defaults to TRACE if not specified. [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/get/mdbx.mdx b/docs/vocs/docs/pages/cli/reth/db/get/mdbx.mdx index 981d0c9f9a..1983cfe7b2 100644 --- a/docs/vocs/docs/pages/cli/reth/db/get/mdbx.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/get/mdbx.mdx @@ -6,7 +6,7 @@ Gets the content of a database table for the given key $ reth db get mdbx --help ``` ```txt -Usage: reth db get mdbx [OPTIONS]
[SUBKEY] +Usage: reth db get mdbx [OPTIONS]
[SUBKEY] [END_KEY] [END_SUBKEY] Arguments:
@@ -18,6 +18,12 @@ Arguments: [SUBKEY] The subkey to get content for + [END_KEY] + Optional end key for range query (exclusive upper bound) + + [END_SUBKEY] + Optional end subkey for range query (exclusive upper bound) + Options: --raw Output bytes instead of human-readable decoded value @@ -149,4 +155,13 @@ Tracing: Defaults to TRACE if not specified. [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/get/static-file.mdx b/docs/vocs/docs/pages/cli/reth/db/get/static-file.mdx index 8e045a4cdf..3db4a946c4 100644 --- a/docs/vocs/docs/pages/cli/reth/db/get/static-file.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/get/static-file.mdx @@ -11,9 +11,10 @@ Usage: reth db get static-file [OPTIONS] Arguments: Possible values: - - headers: Static File segment responsible for the `CanonicalHeaders`, `Headers`, `HeaderTerminalDifficulties` tables - - transactions: Static File segment responsible for the `Transactions` table - - receipts: Static File segment responsible for the `Receipts` table + - headers: Static File segment responsible for the `CanonicalHeaders`, `Headers`, `HeaderTerminalDifficulties` tables + - transactions: Static File segment responsible for the `Transactions` table + - receipts: Static File segment responsible for the `Receipts` table + - transaction-senders: Static File segment responsible for the `TransactionSenders` table The key to get content for @@ -149,4 +150,13 @@ Tracing: Defaults to TRACE if not specified. [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/list.mdx b/docs/vocs/docs/pages/cli/reth/db/list.mdx index 3be1cd183b..0c1f4bc857 100644 --- a/docs/vocs/docs/pages/cli/reth/db/list.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/list.mdx @@ -182,4 +182,13 @@ Tracing: Defaults to TRACE if not specified. [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/path.mdx b/docs/vocs/docs/pages/cli/reth/db/path.mdx index a954093dd5..b0b2c3c754 100644 --- a/docs/vocs/docs/pages/cli/reth/db/path.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/path.mdx @@ -136,4 +136,13 @@ Tracing: Defaults to TRACE if not specified. [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/repair-trie.mdx b/docs/vocs/docs/pages/cli/reth/db/repair-trie.mdx index 6436afc213..528a24b090 100644 --- a/docs/vocs/docs/pages/cli/reth/db/repair-trie.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/repair-trie.mdx @@ -12,6 +12,11 @@ Options: --dry-run Only show inconsistencies without making any repairs + --metrics + Enable Prometheus metrics. + + The metrics will be served at the given interface and port. + -h, --help Print help (see a summary with '-h') @@ -139,4 +144,13 @@ Tracing: Defaults to TRACE if not specified. [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/settings.mdx b/docs/vocs/docs/pages/cli/reth/db/settings.mdx new file mode 100644 index 0000000000..19d707370e --- /dev/null +++ b/docs/vocs/docs/pages/cli/reth/db/settings.mdx @@ -0,0 +1,153 @@ +# reth db settings + +Manage storage settings + +```bash +$ reth db settings --help +``` +```txt +Usage: reth db settings [OPTIONS] + +Commands: + get Get current storage settings from database + set Set storage settings in database + help Print this message or the help of the given subcommand(s) + +Options: + -h, --help + Print help (see a summary with '-h') + +Datadir: + --chain + The chain this node is running. + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + mainnet, sepolia, holesky, hoodi, dev + + [default: mainnet] + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.name + The prefix name of the log files + + [default: reth.log] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + Possible values: + - always: Colors on + - auto: Auto-detect + - never: Colors off + + [default: always] + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] +``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/settings/get.mdx b/docs/vocs/docs/pages/cli/reth/db/settings/get.mdx new file mode 100644 index 0000000000..9df0654725 --- /dev/null +++ b/docs/vocs/docs/pages/cli/reth/db/settings/get.mdx @@ -0,0 +1,148 @@ +# reth db settings get + +Get current storage settings from database + +```bash +$ reth db settings get --help +``` +```txt +Usage: reth db settings get [OPTIONS] + +Options: + -h, --help + Print help (see a summary with '-h') + +Datadir: + --chain + The chain this node is running. + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + mainnet, sepolia, holesky, hoodi, dev + + [default: mainnet] + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.name + The prefix name of the log files + + [default: reth.log] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + Possible values: + - always: Colors on + - auto: Auto-detect + - never: Colors off + + [default: always] + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] +``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/settings/set.mdx b/docs/vocs/docs/pages/cli/reth/db/settings/set.mdx new file mode 100644 index 0000000000..e4412b6178 --- /dev/null +++ b/docs/vocs/docs/pages/cli/reth/db/settings/set.mdx @@ -0,0 +1,153 @@ +# reth db settings set + +Set storage settings in database + +```bash +$ reth db settings set --help +``` +```txt +Usage: reth db settings set [OPTIONS] + +Commands: + receipts_in_static_files Store receipts in static files instead of the database + transaction_senders_in_static_files Store transaction senders in static files instead of the database + help Print this message or the help of the given subcommand(s) + +Options: + -h, --help + Print help (see a summary with '-h') + +Datadir: + --chain + The chain this node is running. + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + mainnet, sepolia, holesky, hoodi, dev + + [default: mainnet] + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.name + The prefix name of the log files + + [default: reth.log] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + Possible values: + - always: Colors on + - auto: Auto-detect + - never: Colors off + + [default: always] + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] +``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/recover/storage-tries.mdx b/docs/vocs/docs/pages/cli/reth/db/settings/set/receipts_in_static_files.mdx similarity index 60% rename from docs/vocs/docs/pages/cli/reth/recover/storage-tries.mdx rename to docs/vocs/docs/pages/cli/reth/db/settings/set/receipts_in_static_files.mdx index 701dd39368..e9dadf91ef 100644 --- a/docs/vocs/docs/pages/cli/reth/recover/storage-tries.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/settings/set/receipts_in_static_files.mdx @@ -1,35 +1,22 @@ -# reth recover storage-tries +# reth db settings set receipts_in_static_files -Recover the node by deleting dangling storage tries +Store receipts in static files instead of the database ```bash -$ reth recover storage-tries --help +$ reth db settings set receipts_in_static_files --help ``` ```txt -Usage: reth recover storage-tries [OPTIONS] +Usage: reth db settings set receipts_in_static_files [OPTIONS] + +Arguments: + + [possible values: true, false] Options: -h, --help Print help (see a summary with '-h') Datadir: - --datadir - The path to the data dir for all reth files and subdirectories. - - Defaults to the OS-specific data directory: - - - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` - - Windows: `{FOLDERID_RoamingAppData}/reth/` - - macOS: `$HOME/Library/Application Support/reth/` - - [default: default] - - --datadir.static-files - The absolute path to store static files in. - - --config - The path to the configuration file to use - --chain The chain this node is running. Possible values are either a built-in chain or the path to a chain specification file. @@ -39,37 +26,6 @@ Datadir: [default: mainnet] -Database: - --db.log-level - Database logging level. Levels higher than "notice" require a debug build - - Possible values: - - fatal: Enables logging for critical conditions, i.e. assertion failures - - error: Enables logging for error conditions - - warn: Enables logging for warning conditions - - notice: Enables logging for normal but significant condition - - verbose: Enables logging for verbose informational - - debug: Enables logging for debug-level messages - - trace: Enables logging for trace debug-level messages - - extra: Enables logging for extra debug-level messages - - --db.exclusive - Open environment in exclusive/monopolistic mode. Makes it possible to open a database on an NFS volume - - [possible values: true, false] - - --db.max-size - Maximum database size (e.g., 4TB, 8MB) - - --db.growth-step - Database growth step (e.g., 4GB, 4KB) - - --db.read-transaction-timeout - Read transaction timeout in seconds, 0 means no timeout - - --db.max-readers - Maximum number of readers allowed to access the database concurrently - Logging: --log.stdout.format The format to use for logs written to stdout @@ -134,7 +90,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off [default: always] @@ -151,4 +107,46 @@ Display: -q, --quiet Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/settings/set/transaction_senders_in_static_files.mdx b/docs/vocs/docs/pages/cli/reth/db/settings/set/transaction_senders_in_static_files.mdx new file mode 100644 index 0000000000..7dd4bb91af --- /dev/null +++ b/docs/vocs/docs/pages/cli/reth/db/settings/set/transaction_senders_in_static_files.mdx @@ -0,0 +1,152 @@ +# reth db settings set transaction_senders_in_static_files + +Store transaction senders in static files instead of the database + +```bash +$ reth db settings set transaction_senders_in_static_files --help +``` +```txt +Usage: reth db settings set transaction_senders_in_static_files [OPTIONS] + +Arguments: + + [possible values: true, false] + +Options: + -h, --help + Print help (see a summary with '-h') + +Datadir: + --chain + The chain this node is running. + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + mainnet, sepolia, holesky, hoodi, dev + + [default: mainnet] + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.name + The prefix name of the log files + + [default: reth.log] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + Possible values: + - always: Colors on + - auto: Auto-detect + - never: Colors off + + [default: always] + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] +``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/static-file-header.mdx b/docs/vocs/docs/pages/cli/reth/db/static-file-header.mdx new file mode 100644 index 0000000000..0343097545 --- /dev/null +++ b/docs/vocs/docs/pages/cli/reth/db/static-file-header.mdx @@ -0,0 +1,153 @@ +# reth db static-file-header + +Reads and displays the static file segment header + +```bash +$ reth db static-file-header --help +``` +```txt +Usage: reth db static-file-header [OPTIONS] + +Commands: + block Query by segment and block number + path Query by path to static file + help Print this message or the help of the given subcommand(s) + +Options: + -h, --help + Print help (see a summary with '-h') + +Datadir: + --chain + The chain this node is running. + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + mainnet, sepolia, holesky, hoodi, dev + + [default: mainnet] + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.name + The prefix name of the log files + + [default: reth.log] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + Possible values: + - always: Colors on + - auto: Auto-detect + - never: Colors off + + [default: always] + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] +``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/static-file-header/block.mdx b/docs/vocs/docs/pages/cli/reth/db/static-file-header/block.mdx new file mode 100644 index 0000000000..edb3fb58dd --- /dev/null +++ b/docs/vocs/docs/pages/cli/reth/db/static-file-header/block.mdx @@ -0,0 +1,161 @@ +# reth db static-file-header block + +Query by segment and block number + +```bash +$ reth db static-file-header block --help +``` +```txt +Usage: reth db static-file-header block [OPTIONS] + +Arguments: + + Static file segment + + Possible values: + - headers: Static File segment responsible for the `CanonicalHeaders`, `Headers`, `HeaderTerminalDifficulties` tables + - transactions: Static File segment responsible for the `Transactions` table + - receipts: Static File segment responsible for the `Receipts` table + - transaction-senders: Static File segment responsible for the `TransactionSenders` table + + + Block number to query + +Options: + -h, --help + Print help (see a summary with '-h') + +Datadir: + --chain + The chain this node is running. + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + mainnet, sepolia, holesky, hoodi, dev + + [default: mainnet] + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.name + The prefix name of the log files + + [default: reth.log] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + Possible values: + - always: Colors on + - auto: Auto-detect + - never: Colors off + + [default: always] + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] +``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/static-file-header/path.mdx b/docs/vocs/docs/pages/cli/reth/db/static-file-header/path.mdx new file mode 100644 index 0000000000..404f711082 --- /dev/null +++ b/docs/vocs/docs/pages/cli/reth/db/static-file-header/path.mdx @@ -0,0 +1,152 @@ +# reth db static-file-header path + +Query by path to static file + +```bash +$ reth db static-file-header path --help +``` +```txt +Usage: reth db static-file-header path [OPTIONS] + +Arguments: + + Path to the static file + +Options: + -h, --help + Print help (see a summary with '-h') + +Datadir: + --chain + The chain this node is running. + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + mainnet, sepolia, holesky, hoodi, dev + + [default: mainnet] + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + [default: terminal] + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.name + The prefix name of the log files + + [default: reth.log] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + Possible values: + - always: Colors on + - auto: Auto-detect + - never: Colors off + + [default: always] + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output + +Tracing: + --tracing-otlp[=] + Enable `Opentelemetry` tracing export to an OTLP endpoint. + + If no value provided, defaults based on protocol: - HTTP: `http://localhost:4318/v1/traces` - gRPC: `http://localhost:4317` + + Example: --tracing-otlp=http://collector:4318/v1/traces + + [env: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=] + + --tracing-otlp-protocol + OTLP transport protocol to use for exporting traces. + + - `http`: expects endpoint path to end with `/v1/traces` - `grpc`: expects endpoint without a path + + Defaults to HTTP if not specified. + + Possible values: + - http: HTTP/Protobuf transport, port 4318, requires `/v1/traces` path + - grpc: gRPC transport, port 4317 + + [env: OTEL_EXPORTER_OTLP_PROTOCOL=] + [default: http] + + --tracing-otlp.filter + Set a filter directive for the OTLP tracer. This controls the verbosity of spans and events sent to the OTLP endpoint. It follows the same syntax as the `RUST_LOG` environment variable. + + Example: --tracing-otlp.filter=info,reth=debug,hyper_util=off + + Defaults to TRACE if not specified. + + [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] +``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/stats.mdx b/docs/vocs/docs/pages/cli/reth/db/stats.mdx index 5bd316847c..36ebe7938f 100644 --- a/docs/vocs/docs/pages/cli/reth/db/stats.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/stats.mdx @@ -9,6 +9,9 @@ $ reth db stats --help Usage: reth db stats [OPTIONS] Options: + --skip-consistency-checks + Skip consistency checks for static files + --detailed-sizes Show only the total size for static files @@ -149,4 +152,13 @@ Tracing: Defaults to TRACE if not specified. [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/db/version.mdx b/docs/vocs/docs/pages/cli/reth/db/version.mdx index c87496d910..5e983356be 100644 --- a/docs/vocs/docs/pages/cli/reth/db/version.mdx +++ b/docs/vocs/docs/pages/cli/reth/db/version.mdx @@ -136,4 +136,13 @@ Tracing: Defaults to TRACE if not specified. [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/download.mdx b/docs/vocs/docs/pages/cli/reth/download.mdx index e7e3b6c0df..9d897b8561 100644 --- a/docs/vocs/docs/pages/cli/reth/download.mdx +++ b/docs/vocs/docs/pages/cli/reth/download.mdx @@ -27,6 +27,9 @@ Datadir: --datadir.static-files The absolute path to store static files in. + --datadir.rocksdb + The absolute path to store `RocksDB` database in. + --config The path to the configuration file to use @@ -59,7 +62,20 @@ Database: [possible values: true, false] --db.max-size - Maximum database size (e.g., 4TB, 8MB) + Maximum database size (e.g., 4TB, 8TB). + + This sets the "map size" of the database. If the database grows beyond this limit, the node will stop with an "environment map size limit reached" error. + + The default value is 8TB. + + --db.page-size + Database page size (e.g., 4KB, 8KB, 16KB). + + Specifies the page size used by the MDBX database. + + The page size determines the maximum database size. MDBX supports up to 2^31 pages, so with the default 4KB page size, the maximum database size is 8TB. To allow larger databases, increase this value to 8KB or higher. + + WARNING: This setting is only configurable at database creation; changing it later requires re-syncing. --db.growth-step Database growth step (e.g., 4GB, 4KB) @@ -73,6 +89,33 @@ Database: --db.sync-mode Controls how aggressively the database synchronizes data to disk +Static Files: + --static-files.blocks-per-file.headers + Number of blocks per file for the headers segment + + --static-files.blocks-per-file.transactions + Number of blocks per file for the transactions segment + + --static-files.blocks-per-file.receipts + Number of blocks per file for the receipts segment + + --static-files.blocks-per-file.transaction-senders + Number of blocks per file for the transaction senders segment + + --static-files.receipts + Store receipts in static files instead of the database. + + When enabled, receipts will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + --static-files.transaction-senders + Store transaction senders in static files instead of the database. + + When enabled, transaction senders will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + -u, --url Specify a snapshot URL or let the command propose a default one. @@ -81,7 +124,7 @@ Database: - https://publicnode.com/snapshots (full nodes & testnets) If no URL is provided, the latest mainnet archive snapshot - will be proposed for download from merkle.io + will be proposed for download from https://downloads.merkle.io Logging: --log.stdout.format @@ -197,4 +240,13 @@ Tracing: Defaults to TRACE if not specified. [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/dump-genesis.mdx b/docs/vocs/docs/pages/cli/reth/dump-genesis.mdx index 7aeaa8db49..905f2fbf4a 100644 --- a/docs/vocs/docs/pages/cli/reth/dump-genesis.mdx +++ b/docs/vocs/docs/pages/cli/reth/dump-genesis.mdx @@ -135,4 +135,13 @@ Tracing: Defaults to TRACE if not specified. [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/export-era.mdx b/docs/vocs/docs/pages/cli/reth/export-era.mdx index a873781d9c..f43b294766 100644 --- a/docs/vocs/docs/pages/cli/reth/export-era.mdx +++ b/docs/vocs/docs/pages/cli/reth/export-era.mdx @@ -27,6 +27,9 @@ Datadir: --datadir.static-files The absolute path to store static files in. + --datadir.rocksdb + The absolute path to store `RocksDB` database in. + --config The path to the configuration file to use @@ -59,7 +62,20 @@ Database: [possible values: true, false] --db.max-size - Maximum database size (e.g., 4TB, 8MB) + Maximum database size (e.g., 4TB, 8TB). + + This sets the "map size" of the database. If the database grows beyond this limit, the node will stop with an "environment map size limit reached" error. + + The default value is 8TB. + + --db.page-size + Database page size (e.g., 4KB, 8KB, 16KB). + + Specifies the page size used by the MDBX database. + + The page size determines the maximum database size. MDBX supports up to 2^31 pages, so with the default 4KB page size, the maximum database size is 8TB. To allow larger databases, increase this value to 8KB or higher. + + WARNING: This setting is only configurable at database creation; changing it later requires re-syncing. --db.growth-step Database growth step (e.g., 4GB, 4KB) @@ -73,6 +89,33 @@ Database: --db.sync-mode Controls how aggressively the database synchronizes data to disk +Static Files: + --static-files.blocks-per-file.headers + Number of blocks per file for the headers segment + + --static-files.blocks-per-file.transactions + Number of blocks per file for the transactions segment + + --static-files.blocks-per-file.receipts + Number of blocks per file for the receipts segment + + --static-files.blocks-per-file.transaction-senders + Number of blocks per file for the transaction senders segment + + --static-files.receipts + Store receipts in static files instead of the database. + + When enabled, receipts will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + --static-files.transaction-senders + Store transaction senders in static files instead of the database. + + When enabled, transaction senders will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + --first-block-number Optional first block number to export from the db. It is by default 0. @@ -203,4 +246,13 @@ Tracing: Defaults to TRACE if not specified. [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/import-era.mdx b/docs/vocs/docs/pages/cli/reth/import-era.mdx index 77e7883e1b..0528b83157 100644 --- a/docs/vocs/docs/pages/cli/reth/import-era.mdx +++ b/docs/vocs/docs/pages/cli/reth/import-era.mdx @@ -27,6 +27,9 @@ Datadir: --datadir.static-files The absolute path to store static files in. + --datadir.rocksdb + The absolute path to store `RocksDB` database in. + --config The path to the configuration file to use @@ -59,7 +62,20 @@ Database: [possible values: true, false] --db.max-size - Maximum database size (e.g., 4TB, 8MB) + Maximum database size (e.g., 4TB, 8TB). + + This sets the "map size" of the database. If the database grows beyond this limit, the node will stop with an "environment map size limit reached" error. + + The default value is 8TB. + + --db.page-size + Database page size (e.g., 4KB, 8KB, 16KB). + + Specifies the page size used by the MDBX database. + + The page size determines the maximum database size. MDBX supports up to 2^31 pages, so with the default 4KB page size, the maximum database size is 8TB. To allow larger databases, increase this value to 8KB or higher. + + WARNING: This setting is only configurable at database creation; changing it later requires re-syncing. --db.growth-step Database growth step (e.g., 4GB, 4KB) @@ -73,6 +89,33 @@ Database: --db.sync-mode Controls how aggressively the database synchronizes data to disk +Static Files: + --static-files.blocks-per-file.headers + Number of blocks per file for the headers segment + + --static-files.blocks-per-file.transactions + Number of blocks per file for the transactions segment + + --static-files.blocks-per-file.receipts + Number of blocks per file for the receipts segment + + --static-files.blocks-per-file.transaction-senders + Number of blocks per file for the transaction senders segment + + --static-files.receipts + Store receipts in static files instead of the database. + + When enabled, receipts will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + --static-files.transaction-senders + Store transaction senders in static files instead of the database. + + When enabled, transaction senders will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + --path The path to a directory for import. @@ -198,4 +241,13 @@ Tracing: Defaults to TRACE if not specified. [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/import.mdx b/docs/vocs/docs/pages/cli/reth/import.mdx index 3976205164..79fc123c1f 100644 --- a/docs/vocs/docs/pages/cli/reth/import.mdx +++ b/docs/vocs/docs/pages/cli/reth/import.mdx @@ -27,6 +27,9 @@ Datadir: --datadir.static-files The absolute path to store static files in. + --datadir.rocksdb + The absolute path to store `RocksDB` database in. + --config The path to the configuration file to use @@ -59,7 +62,20 @@ Database: [possible values: true, false] --db.max-size - Maximum database size (e.g., 4TB, 8MB) + Maximum database size (e.g., 4TB, 8TB). + + This sets the "map size" of the database. If the database grows beyond this limit, the node will stop with an "environment map size limit reached" error. + + The default value is 8TB. + + --db.page-size + Database page size (e.g., 4KB, 8KB, 16KB). + + Specifies the page size used by the MDBX database. + + The page size determines the maximum database size. MDBX supports up to 2^31 pages, so with the default 4KB page size, the maximum database size is 8TB. To allow larger databases, increase this value to 8KB or higher. + + WARNING: This setting is only configurable at database creation; changing it later requires re-syncing. --db.growth-step Database growth step (e.g., 4GB, 4KB) @@ -73,6 +89,33 @@ Database: --db.sync-mode Controls how aggressively the database synchronizes data to disk +Static Files: + --static-files.blocks-per-file.headers + Number of blocks per file for the headers segment + + --static-files.blocks-per-file.transactions + Number of blocks per file for the transactions segment + + --static-files.blocks-per-file.receipts + Number of blocks per file for the receipts segment + + --static-files.blocks-per-file.transaction-senders + Number of blocks per file for the transaction senders segment + + --static-files.receipts + Store receipts in static files instead of the database. + + When enabled, receipts will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + --static-files.transaction-senders + Store transaction senders in static files instead of the database. + + When enabled, transaction senders will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + --no-state Disables stages that require state. @@ -199,4 +242,13 @@ Tracing: Defaults to TRACE if not specified. [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/init-state.mdx b/docs/vocs/docs/pages/cli/reth/init-state.mdx index 7e97d08716..471672c7a8 100644 --- a/docs/vocs/docs/pages/cli/reth/init-state.mdx +++ b/docs/vocs/docs/pages/cli/reth/init-state.mdx @@ -27,6 +27,9 @@ Datadir: --datadir.static-files The absolute path to store static files in. + --datadir.rocksdb + The absolute path to store `RocksDB` database in. + --config The path to the configuration file to use @@ -59,7 +62,20 @@ Database: [possible values: true, false] --db.max-size - Maximum database size (e.g., 4TB, 8MB) + Maximum database size (e.g., 4TB, 8TB). + + This sets the "map size" of the database. If the database grows beyond this limit, the node will stop with an "environment map size limit reached" error. + + The default value is 8TB. + + --db.page-size + Database page size (e.g., 4KB, 8KB, 16KB). + + Specifies the page size used by the MDBX database. + + The page size determines the maximum database size. MDBX supports up to 2^31 pages, so with the default 4KB page size, the maximum database size is 8TB. To allow larger databases, increase this value to 8KB or higher. + + WARNING: This setting is only configurable at database creation; changing it later requires re-syncing. --db.growth-step Database growth step (e.g., 4GB, 4KB) @@ -73,6 +89,33 @@ Database: --db.sync-mode Controls how aggressively the database synchronizes data to disk +Static Files: + --static-files.blocks-per-file.headers + Number of blocks per file for the headers segment + + --static-files.blocks-per-file.transactions + Number of blocks per file for the transactions segment + + --static-files.blocks-per-file.receipts + Number of blocks per file for the receipts segment + + --static-files.blocks-per-file.transaction-senders + Number of blocks per file for the transaction senders segment + + --static-files.receipts + Store receipts in static files instead of the database. + + When enabled, receipts will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + --static-files.transaction-senders + Store transaction senders in static files instead of the database. + + When enabled, transaction senders will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + --without-evm Specifies whether to initialize the state without relying on EVM historical data. @@ -219,4 +262,13 @@ Tracing: Defaults to TRACE if not specified. [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/init.mdx b/docs/vocs/docs/pages/cli/reth/init.mdx index bf9dd671db..4e55d4f169 100644 --- a/docs/vocs/docs/pages/cli/reth/init.mdx +++ b/docs/vocs/docs/pages/cli/reth/init.mdx @@ -27,6 +27,9 @@ Datadir: --datadir.static-files The absolute path to store static files in. + --datadir.rocksdb + The absolute path to store `RocksDB` database in. + --config The path to the configuration file to use @@ -59,7 +62,20 @@ Database: [possible values: true, false] --db.max-size - Maximum database size (e.g., 4TB, 8MB) + Maximum database size (e.g., 4TB, 8TB). + + This sets the "map size" of the database. If the database grows beyond this limit, the node will stop with an "environment map size limit reached" error. + + The default value is 8TB. + + --db.page-size + Database page size (e.g., 4KB, 8KB, 16KB). + + Specifies the page size used by the MDBX database. + + The page size determines the maximum database size. MDBX supports up to 2^31 pages, so with the default 4KB page size, the maximum database size is 8TB. To allow larger databases, increase this value to 8KB or higher. + + WARNING: This setting is only configurable at database creation; changing it later requires re-syncing. --db.growth-step Database growth step (e.g., 4GB, 4KB) @@ -73,6 +89,33 @@ Database: --db.sync-mode Controls how aggressively the database synchronizes data to disk +Static Files: + --static-files.blocks-per-file.headers + Number of blocks per file for the headers segment + + --static-files.blocks-per-file.transactions + Number of blocks per file for the transactions segment + + --static-files.blocks-per-file.receipts + Number of blocks per file for the receipts segment + + --static-files.blocks-per-file.transaction-senders + Number of blocks per file for the transaction senders segment + + --static-files.receipts + Store receipts in static files instead of the database. + + When enabled, receipts will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + --static-files.transaction-senders + Store transaction senders in static files instead of the database. + + When enabled, transaction senders will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + Logging: --log.stdout.format The format to use for logs written to stdout @@ -187,4 +230,13 @@ Tracing: Defaults to TRACE if not specified. [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/node.mdx b/docs/vocs/docs/pages/cli/reth/node.mdx index 6f8b6ae88a..38b59183e4 100644 --- a/docs/vocs/docs/pages/cli/reth/node.mdx +++ b/docs/vocs/docs/pages/cli/reth/node.mdx @@ -71,6 +71,9 @@ Datadir: --datadir.static-files The absolute path to store static files in. + --datadir.rocksdb + The absolute path to store `RocksDB` database in. + Networking: -d, --disable-discovery Disable the discovery service @@ -160,6 +163,11 @@ Networking: This will also deterministically set the peer ID. If not specified, it will be set in the data dir for the chain being used. + --p2p-secret-key-hex + Hex encoded secret key to use for this node. + + This will also deterministically set the peer ID. Cannot be used together with `--p2p-secret-key`. + --no-persist-peers Do not persist peers. @@ -179,10 +187,15 @@ Networking: [default: 30303] --max-outbound-peers - Maximum number of outbound requests. default: 100 + Maximum number of outbound peers. default: 100 --max-inbound-peers - Maximum number of inbound requests. default: 30 + Maximum number of inbound peers. default: 30 + + --max-peers + Maximum number of total peers (inbound + outbound). + + Splits peers using approximately 2:1 inbound:outbound ratio. Cannot be used together with `--max-outbound-peers` or `--max-inbound-peers`. --max-tx-reqs Max concurrent `GetPooledTransactions` requests. @@ -245,6 +258,13 @@ Networking: [default: All] + --tx-ingress-policy + Transaction ingress policy + + Determines which peers' transactions are accepted over P2P. + + [default: All] + --disable-tx-gossip Disable transaction pool gossip @@ -258,11 +278,18 @@ Networking: [default: sqrt] --required-block-hashes - Comma separated list of required block hashes. Peers that don't have these blocks will be filtered out + Comma separated list of required block hashes or block number=hash pairs. Peers that don't have these blocks will be filtered out. Format: hash or `block_number=hash` (e.g., 23115201=0x1234...) --network-id Optional network ID to override the chain specification's network ID for P2P connections + --netrestrict + Restrict network communication to the given IP networks (CIDR masks). + + Comma separated list of CIDR network specifications. Only peers with IP addresses within these ranges will be allowed to connect. + + Example: --netrestrict "192.168.0.0/16,10.0.0.0/8" + RPC: --http Enable the HTTP-RPC server @@ -283,7 +310,7 @@ RPC: --http.api Rpc Modules to be configured for the HTTP server - [possible values: admin, debug, eth, net, trace, txpool, web3, rpc, reth, ots, flashbots, miner, mev] + [possible values: admin, debug, eth, net, trace, txpool, web3, rpc, reth, ots, flashbots, miner, mev, testing] --http.corsdomain Http Corsdomain to allow request from @@ -307,7 +334,7 @@ RPC: --ws.api Rpc Modules to be configured for the WS server - [possible values: admin, debug, eth, net, trace, txpool, web3, rpc, reth, ots, flashbots, miner, mev] + [possible values: admin, debug, eth, net, trace, txpool, web3, rpc, reth, ots, flashbots, miner, mev, testing] --ipcdisable Disable the IPC-RPC server @@ -385,6 +412,13 @@ RPC: [default: ] + --rpc.max-blocking-io-requests + Maximum number of concurrent blocking IO requests. + + Blocking IO requests include `eth_call`, `eth_estimateGas`, and similar methods that require EVM execution. These are spawned as blocking tasks to avoid blocking the async runtime. + + [default: 256] + --rpc.max-trace-filter-blocks Maximum number of blocks for `trace_filter` requests @@ -405,6 +439,11 @@ RPC: [default: 50000000] + --rpc.evm-memory-limit + Maximum memory the EVM can allocate per RPC request + + [default: 4294967295] + --rpc.txfeecap Maximum eth transaction fee (in ether) that can be sent via the RPC APIs (0 = no cap) @@ -532,6 +571,9 @@ TxPool: --txpool.blob-cache-size Max number of entries for the in memory cache of the blob store + --txpool.disable-blobs-support + Disable EIP-4844 blob transaction support + --txpool.max-account-slots Max number of executable transaction slots guaranteed per account @@ -644,6 +686,9 @@ Builder: [default: 3] + --builder.max-blobs + Maximum number of blobs to include per block + Debug: --debug.terminate Flag indicating whether the node should be terminated after the pipeline sync @@ -696,6 +741,11 @@ Debug: --ethstats The URL of the ethstats server to connect to. Example: `nodename:secret@host:port` + --debug.startup-sync-state-idle + Set the node to idle state when the backfill is not running. + + This makes the `eth_syncing` RPC return "Idle" when the node has just started or finished the backfill, but did not yet receive any new blocks. + Database: --db.log-level Database logging level. Levels higher than "notice" require a debug build @@ -716,7 +766,20 @@ Database: [possible values: true, false] --db.max-size - Maximum database size (e.g., 4TB, 8MB) + Maximum database size (e.g., 4TB, 8TB). + + This sets the "map size" of the database. If the database grows beyond this limit, the node will stop with an "environment map size limit reached" error. + + The default value is 8TB. + + --db.page-size + Database page size (e.g., 4KB, 8KB, 16KB). + + Specifies the page size used by the MDBX database. + + The page size determines the maximum database size. MDBX supports up to 2^31 pages, so with the default 4KB page size, the maximum database size is 8TB. To allow larger databases, increase this value to 8KB or higher. + + WARNING: This setting is only configurable at database creation; changing it later requires re-syncing. --db.growth-step Database growth step (e.g., 4GB, 4KB) @@ -791,6 +854,9 @@ Pruning: --prune.receipts.before Prune receipts before the specified block number. The specified block number is not pruned + --prune.receiptslogfilter + Configure receipts log filter. Format: <`address`>:<`prune_mode`>... where <`prune_mode`> can be 'full', 'distance:<`blocks`>', or 'before:<`block_number`>' + --prune.account-history.full Prunes all account history @@ -820,7 +886,9 @@ Pruning: Engine: --engine.persistence-threshold - Configure persistence threshold for engine experimental + Configure persistence threshold for the engine. This determines how many canonical blocks must be in-memory, ahead of the last persisted block, before flushing canonical blocks to disk again. + + To persist blocks as fast as the node receives them, set this value to zero. This will cause more frequent DB writes. [default: 2] @@ -832,6 +900,9 @@ Engine: --engine.legacy-state-root Enable legacy state root + --engine.disable-state-cache + Disable state cache + --engine.disable-prewarming Disable parallel prewarming @@ -858,7 +929,7 @@ Engine: --engine.multiproof-chunk-size Multiproof task chunk size for proof targets - [default: 10] + [default: 60] --engine.reserved-cpu-cores Configure the number of reserved CPU cores for non-reth processes @@ -900,6 +971,33 @@ ERA: The ERA1 files are read from the remote host using HTTP GET requests parsing headers and bodies. +Static Files: + --static-files.blocks-per-file.headers + Number of blocks per file for the headers segment + + --static-files.blocks-per-file.transactions + Number of blocks per file for the transactions segment + + --static-files.blocks-per-file.receipts + Number of blocks per file for the receipts segment + + --static-files.blocks-per-file.transaction-senders + Number of blocks per file for the transaction senders segment + + --static-files.receipts + Store receipts in static files instead of the database. + + When enabled, receipts will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + --static-files.transaction-senders + Store transaction senders in static files instead of the database. + + When enabled, transaction senders will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + Ress: --ress.enable Enable support for `ress` subprotocol @@ -1038,4 +1136,13 @@ Tracing: Defaults to TRACE if not specified. [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/p2p.mdx b/docs/vocs/docs/pages/cli/reth/p2p.mdx index 7b37fdfdaa..9ceba951c1 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p.mdx @@ -133,4 +133,13 @@ Tracing: Defaults to TRACE if not specified. [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/p2p/body.mdx b/docs/vocs/docs/pages/cli/reth/p2p/body.mdx index bbe6b375e5..ccbdfdcbfe 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p/body.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p/body.mdx @@ -106,6 +106,11 @@ Networking: This will also deterministically set the peer ID. If not specified, it will be set in the data dir for the chain being used. + --p2p-secret-key-hex + Hex encoded secret key to use for this node. + + This will also deterministically set the peer ID. Cannot be used together with `--p2p-secret-key`. + --no-persist-peers Do not persist peers. @@ -125,10 +130,15 @@ Networking: [default: 30303] --max-outbound-peers - Maximum number of outbound requests. default: 100 + Maximum number of outbound peers. default: 100 --max-inbound-peers - Maximum number of inbound requests. default: 30 + Maximum number of inbound peers. default: 30 + + --max-peers + Maximum number of total peers (inbound + outbound). + + Splits peers using approximately 2:1 inbound:outbound ratio. Cannot be used together with `--max-outbound-peers` or `--max-inbound-peers`. --max-tx-reqs Max concurrent `GetPooledTransactions` requests. @@ -191,6 +201,13 @@ Networking: [default: All] + --tx-ingress-policy + Transaction ingress policy + + Determines which peers' transactions are accepted over P2P. + + [default: All] + --disable-tx-gossip Disable transaction pool gossip @@ -204,11 +221,18 @@ Networking: [default: sqrt] --required-block-hashes - Comma separated list of required block hashes. Peers that don't have these blocks will be filtered out + Comma separated list of required block hashes or block number=hash pairs. Peers that don't have these blocks will be filtered out. Format: hash or `block_number=hash` (e.g., 23115201=0x1234...) --network-id Optional network ID to override the chain specification's network ID for P2P connections + --netrestrict + Restrict network communication to the given IP networks (CIDR masks). + + Comma separated list of CIDR network specifications. Only peers with IP addresses within these ranges will be allowed to connect. + + Example: --netrestrict "192.168.0.0/16,10.0.0.0/8" + Datadir: --datadir The path to the data dir for all reth files and subdirectories. @@ -224,6 +248,9 @@ Datadir: --datadir.static-files The absolute path to store static files in. + --datadir.rocksdb + The absolute path to store `RocksDB` database in. + --config The path to the configuration file to use. @@ -353,4 +380,13 @@ Tracing: Defaults to TRACE if not specified. [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/p2p/bootnode.mdx b/docs/vocs/docs/pages/cli/reth/p2p/bootnode.mdx index 324b01daac..79335dfd92 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p/bootnode.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p/bootnode.mdx @@ -144,4 +144,13 @@ Tracing: Defaults to TRACE if not specified. [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/p2p/header.mdx b/docs/vocs/docs/pages/cli/reth/p2p/header.mdx index 533bd71de2..f83bb8f14a 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p/header.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p/header.mdx @@ -106,6 +106,11 @@ Networking: This will also deterministically set the peer ID. If not specified, it will be set in the data dir for the chain being used. + --p2p-secret-key-hex + Hex encoded secret key to use for this node. + + This will also deterministically set the peer ID. Cannot be used together with `--p2p-secret-key`. + --no-persist-peers Do not persist peers. @@ -125,10 +130,15 @@ Networking: [default: 30303] --max-outbound-peers - Maximum number of outbound requests. default: 100 + Maximum number of outbound peers. default: 100 --max-inbound-peers - Maximum number of inbound requests. default: 30 + Maximum number of inbound peers. default: 30 + + --max-peers + Maximum number of total peers (inbound + outbound). + + Splits peers using approximately 2:1 inbound:outbound ratio. Cannot be used together with `--max-outbound-peers` or `--max-inbound-peers`. --max-tx-reqs Max concurrent `GetPooledTransactions` requests. @@ -191,6 +201,13 @@ Networking: [default: All] + --tx-ingress-policy + Transaction ingress policy + + Determines which peers' transactions are accepted over P2P. + + [default: All] + --disable-tx-gossip Disable transaction pool gossip @@ -204,11 +221,18 @@ Networking: [default: sqrt] --required-block-hashes - Comma separated list of required block hashes. Peers that don't have these blocks will be filtered out + Comma separated list of required block hashes or block number=hash pairs. Peers that don't have these blocks will be filtered out. Format: hash or `block_number=hash` (e.g., 23115201=0x1234...) --network-id Optional network ID to override the chain specification's network ID for P2P connections + --netrestrict + Restrict network communication to the given IP networks (CIDR masks). + + Comma separated list of CIDR network specifications. Only peers with IP addresses within these ranges will be allowed to connect. + + Example: --netrestrict "192.168.0.0/16,10.0.0.0/8" + Datadir: --datadir The path to the data dir for all reth files and subdirectories. @@ -224,6 +248,9 @@ Datadir: --datadir.static-files The absolute path to store static files in. + --datadir.rocksdb + The absolute path to store `RocksDB` database in. + --config The path to the configuration file to use. @@ -353,4 +380,13 @@ Tracing: Defaults to TRACE if not specified. [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/p2p/rlpx.mdx b/docs/vocs/docs/pages/cli/reth/p2p/rlpx.mdx index a8ac7fbd0d..ee5d70b5fa 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p/rlpx.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p/rlpx.mdx @@ -130,4 +130,13 @@ Tracing: Defaults to TRACE if not specified. [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/p2p/rlpx/ping.mdx b/docs/vocs/docs/pages/cli/reth/p2p/rlpx/ping.mdx index 2d13663029..3bf3599145 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p/rlpx/ping.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p/rlpx/ping.mdx @@ -130,4 +130,13 @@ Tracing: Defaults to TRACE if not specified. [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/prune.mdx b/docs/vocs/docs/pages/cli/reth/prune.mdx index 2d586edd5c..e5b143e133 100644 --- a/docs/vocs/docs/pages/cli/reth/prune.mdx +++ b/docs/vocs/docs/pages/cli/reth/prune.mdx @@ -27,6 +27,9 @@ Datadir: --datadir.static-files The absolute path to store static files in. + --datadir.rocksdb + The absolute path to store `RocksDB` database in. + --config The path to the configuration file to use @@ -59,7 +62,20 @@ Database: [possible values: true, false] --db.max-size - Maximum database size (e.g., 4TB, 8MB) + Maximum database size (e.g., 4TB, 8TB). + + This sets the "map size" of the database. If the database grows beyond this limit, the node will stop with an "environment map size limit reached" error. + + The default value is 8TB. + + --db.page-size + Database page size (e.g., 4KB, 8KB, 16KB). + + Specifies the page size used by the MDBX database. + + The page size determines the maximum database size. MDBX supports up to 2^31 pages, so with the default 4KB page size, the maximum database size is 8TB. To allow larger databases, increase this value to 8KB or higher. + + WARNING: This setting is only configurable at database creation; changing it later requires re-syncing. --db.growth-step Database growth step (e.g., 4GB, 4KB) @@ -73,6 +89,33 @@ Database: --db.sync-mode Controls how aggressively the database synchronizes data to disk +Static Files: + --static-files.blocks-per-file.headers + Number of blocks per file for the headers segment + + --static-files.blocks-per-file.transactions + Number of blocks per file for the transactions segment + + --static-files.blocks-per-file.receipts + Number of blocks per file for the receipts segment + + --static-files.blocks-per-file.transaction-senders + Number of blocks per file for the transaction senders segment + + --static-files.receipts + Store receipts in static files instead of the database. + + When enabled, receipts will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + --static-files.transaction-senders + Store transaction senders in static files instead of the database. + + When enabled, transaction senders will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + Logging: --log.stdout.format The format to use for logs written to stdout @@ -187,4 +230,13 @@ Tracing: Defaults to TRACE if not specified. [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/re-execute.mdx b/docs/vocs/docs/pages/cli/reth/re-execute.mdx index e07b3f542c..4b455a9bff 100644 --- a/docs/vocs/docs/pages/cli/reth/re-execute.mdx +++ b/docs/vocs/docs/pages/cli/reth/re-execute.mdx @@ -27,6 +27,9 @@ Datadir: --datadir.static-files The absolute path to store static files in. + --datadir.rocksdb + The absolute path to store `RocksDB` database in. + --config The path to the configuration file to use @@ -59,7 +62,20 @@ Database: [possible values: true, false] --db.max-size - Maximum database size (e.g., 4TB, 8MB) + Maximum database size (e.g., 4TB, 8TB). + + This sets the "map size" of the database. If the database grows beyond this limit, the node will stop with an "environment map size limit reached" error. + + The default value is 8TB. + + --db.page-size + Database page size (e.g., 4KB, 8KB, 16KB). + + Specifies the page size used by the MDBX database. + + The page size determines the maximum database size. MDBX supports up to 2^31 pages, so with the default 4KB page size, the maximum database size is 8TB. To allow larger databases, increase this value to 8KB or higher. + + WARNING: This setting is only configurable at database creation; changing it later requires re-syncing. --db.growth-step Database growth step (e.g., 4GB, 4KB) @@ -73,6 +89,33 @@ Database: --db.sync-mode Controls how aggressively the database synchronizes data to disk +Static Files: + --static-files.blocks-per-file.headers + Number of blocks per file for the headers segment + + --static-files.blocks-per-file.transactions + Number of blocks per file for the transactions segment + + --static-files.blocks-per-file.receipts + Number of blocks per file for the receipts segment + + --static-files.blocks-per-file.transaction-senders + Number of blocks per file for the transaction senders segment + + --static-files.receipts + Store receipts in static files instead of the database. + + When enabled, receipts will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + --static-files.transaction-senders + Store transaction senders in static files instead of the database. + + When enabled, transaction senders will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + --from The height to start at @@ -86,6 +129,9 @@ Database: [default: 10] + --skip-invalid-blocks + Continues with execution when an invalid block is encountered and collects these blocks + Logging: --log.stdout.format The format to use for logs written to stdout @@ -200,4 +246,13 @@ Tracing: Defaults to TRACE if not specified. [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/stage.mdx b/docs/vocs/docs/pages/cli/reth/stage.mdx index 006c6c7434..67ca5866f4 100644 --- a/docs/vocs/docs/pages/cli/reth/stage.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage.mdx @@ -133,4 +133,13 @@ Tracing: Defaults to TRACE if not specified. [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/stage/drop.mdx b/docs/vocs/docs/pages/cli/reth/stage/drop.mdx index c14db19c58..55ebb4725e 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/drop.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/drop.mdx @@ -27,6 +27,9 @@ Datadir: --datadir.static-files The absolute path to store static files in. + --datadir.rocksdb + The absolute path to store `RocksDB` database in. + --config The path to the configuration file to use @@ -59,7 +62,20 @@ Database: [possible values: true, false] --db.max-size - Maximum database size (e.g., 4TB, 8MB) + Maximum database size (e.g., 4TB, 8TB). + + This sets the "map size" of the database. If the database grows beyond this limit, the node will stop with an "environment map size limit reached" error. + + The default value is 8TB. + + --db.page-size + Database page size (e.g., 4KB, 8KB, 16KB). + + Specifies the page size used by the MDBX database. + + The page size determines the maximum database size. MDBX supports up to 2^31 pages, so with the default 4KB page size, the maximum database size is 8TB. To allow larger databases, increase this value to 8KB or higher. + + WARNING: This setting is only configurable at database creation; changing it later requires re-syncing. --db.growth-step Database growth step (e.g., 4GB, 4KB) @@ -73,6 +89,33 @@ Database: --db.sync-mode Controls how aggressively the database synchronizes data to disk +Static Files: + --static-files.blocks-per-file.headers + Number of blocks per file for the headers segment + + --static-files.blocks-per-file.transactions + Number of blocks per file for the transactions segment + + --static-files.blocks-per-file.receipts + Number of blocks per file for the receipts segment + + --static-files.blocks-per-file.transaction-senders + Number of blocks per file for the transaction senders segment + + --static-files.receipts + Store receipts in static files instead of the database. + + When enabled, receipts will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + --static-files.transaction-senders + Store transaction senders in static files instead of the database. + + When enabled, transaction senders will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + Possible values: - headers: The headers stage within the pipeline @@ -202,4 +245,13 @@ Tracing: Defaults to TRACE if not specified. [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/stage/dump.mdx b/docs/vocs/docs/pages/cli/reth/stage/dump.mdx index c29547401b..d0f2666979 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/dump.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/dump.mdx @@ -34,6 +34,9 @@ Datadir: --datadir.static-files The absolute path to store static files in. + --datadir.rocksdb + The absolute path to store `RocksDB` database in. + --config The path to the configuration file to use @@ -66,7 +69,20 @@ Database: [possible values: true, false] --db.max-size - Maximum database size (e.g., 4TB, 8MB) + Maximum database size (e.g., 4TB, 8TB). + + This sets the "map size" of the database. If the database grows beyond this limit, the node will stop with an "environment map size limit reached" error. + + The default value is 8TB. + + --db.page-size + Database page size (e.g., 4KB, 8KB, 16KB). + + Specifies the page size used by the MDBX database. + + The page size determines the maximum database size. MDBX supports up to 2^31 pages, so with the default 4KB page size, the maximum database size is 8TB. To allow larger databases, increase this value to 8KB or higher. + + WARNING: This setting is only configurable at database creation; changing it later requires re-syncing. --db.growth-step Database growth step (e.g., 4GB, 4KB) @@ -80,6 +96,33 @@ Database: --db.sync-mode Controls how aggressively the database synchronizes data to disk +Static Files: + --static-files.blocks-per-file.headers + Number of blocks per file for the headers segment + + --static-files.blocks-per-file.transactions + Number of blocks per file for the transactions segment + + --static-files.blocks-per-file.receipts + Number of blocks per file for the receipts segment + + --static-files.blocks-per-file.transaction-senders + Number of blocks per file for the transaction senders segment + + --static-files.receipts + Store receipts in static files instead of the database. + + When enabled, receipts will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + --static-files.transaction-senders + Store transaction senders in static files instead of the database. + + When enabled, transaction senders will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + Logging: --log.stdout.format The format to use for logs written to stdout @@ -194,4 +237,13 @@ Tracing: Defaults to TRACE if not specified. [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/stage/dump/account-hashing.mdx b/docs/vocs/docs/pages/cli/reth/stage/dump/account-hashing.mdx index 70fad94ea3..80348194ce 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/dump/account-hashing.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/dump/account-hashing.mdx @@ -148,4 +148,13 @@ Tracing: Defaults to TRACE if not specified. [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/stage/dump/execution.mdx b/docs/vocs/docs/pages/cli/reth/stage/dump/execution.mdx index bed5d33329..a48c7e65db 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/dump/execution.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/dump/execution.mdx @@ -148,4 +148,13 @@ Tracing: Defaults to TRACE if not specified. [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/stage/dump/merkle.mdx b/docs/vocs/docs/pages/cli/reth/stage/dump/merkle.mdx index 3bada103c8..203751e12f 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/dump/merkle.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/dump/merkle.mdx @@ -148,4 +148,13 @@ Tracing: Defaults to TRACE if not specified. [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/stage/dump/storage-hashing.mdx b/docs/vocs/docs/pages/cli/reth/stage/dump/storage-hashing.mdx index 723a54e927..1431798792 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/dump/storage-hashing.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/dump/storage-hashing.mdx @@ -148,4 +148,13 @@ Tracing: Defaults to TRACE if not specified. [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/stage/run.mdx b/docs/vocs/docs/pages/cli/reth/stage/run.mdx index f3e4ccc0e0..f0e4f06bba 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/run.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/run.mdx @@ -27,6 +27,9 @@ Datadir: --datadir.static-files The absolute path to store static files in. + --datadir.rocksdb + The absolute path to store `RocksDB` database in. + --config The path to the configuration file to use @@ -59,7 +62,20 @@ Database: [possible values: true, false] --db.max-size - Maximum database size (e.g., 4TB, 8MB) + Maximum database size (e.g., 4TB, 8TB). + + This sets the "map size" of the database. If the database grows beyond this limit, the node will stop with an "environment map size limit reached" error. + + The default value is 8TB. + + --db.page-size + Database page size (e.g., 4KB, 8KB, 16KB). + + Specifies the page size used by the MDBX database. + + The page size determines the maximum database size. MDBX supports up to 2^31 pages, so with the default 4KB page size, the maximum database size is 8TB. To allow larger databases, increase this value to 8KB or higher. + + WARNING: This setting is only configurable at database creation; changing it later requires re-syncing. --db.growth-step Database growth step (e.g., 4GB, 4KB) @@ -73,6 +89,33 @@ Database: --db.sync-mode Controls how aggressively the database synchronizes data to disk +Static Files: + --static-files.blocks-per-file.headers + Number of blocks per file for the headers segment + + --static-files.blocks-per-file.transactions + Number of blocks per file for the transactions segment + + --static-files.blocks-per-file.receipts + Number of blocks per file for the receipts segment + + --static-files.blocks-per-file.transaction-senders + Number of blocks per file for the transaction senders segment + + --static-files.receipts + Store receipts in static files instead of the database. + + When enabled, receipts will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + --static-files.transaction-senders + Store transaction senders in static files instead of the database. + + When enabled, transaction senders will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + --metrics Enable Prometheus metrics. @@ -97,6 +140,8 @@ Database: Useful when you want to run diagnostics on the database. + NOTE: This flag is currently required for the headers, bodies, and execution stages because they use static files and must commit to properly unwind and run. + --checkpoints Save stage checkpoints @@ -206,6 +251,11 @@ Networking: This will also deterministically set the peer ID. If not specified, it will be set in the data dir for the chain being used. + --p2p-secret-key-hex + Hex encoded secret key to use for this node. + + This will also deterministically set the peer ID. Cannot be used together with `--p2p-secret-key`. + --no-persist-peers Do not persist peers. @@ -225,10 +275,15 @@ Networking: [default: 30303] --max-outbound-peers - Maximum number of outbound requests. default: 100 + Maximum number of outbound peers. default: 100 --max-inbound-peers - Maximum number of inbound requests. default: 30 + Maximum number of inbound peers. default: 30 + + --max-peers + Maximum number of total peers (inbound + outbound). + + Splits peers using approximately 2:1 inbound:outbound ratio. Cannot be used together with `--max-outbound-peers` or `--max-inbound-peers`. --max-tx-reqs Max concurrent `GetPooledTransactions` requests. @@ -291,6 +346,13 @@ Networking: [default: All] + --tx-ingress-policy + Transaction ingress policy + + Determines which peers' transactions are accepted over P2P. + + [default: All] + --disable-tx-gossip Disable transaction pool gossip @@ -304,11 +366,18 @@ Networking: [default: sqrt] --required-block-hashes - Comma separated list of required block hashes. Peers that don't have these blocks will be filtered out + Comma separated list of required block hashes or block number=hash pairs. Peers that don't have these blocks will be filtered out. Format: hash or `block_number=hash` (e.g., 23115201=0x1234...) --network-id Optional network ID to override the chain specification's network ID for P2P connections + --netrestrict + Restrict network communication to the given IP networks (CIDR masks). + + Comma separated list of CIDR network specifications. Only peers with IP addresses within these ranges will be allowed to connect. + + Example: --netrestrict "192.168.0.0/16,10.0.0.0/8" + Logging: --log.stdout.format The format to use for logs written to stdout @@ -423,4 +492,13 @@ Tracing: Defaults to TRACE if not specified. [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/stage/unwind.mdx b/docs/vocs/docs/pages/cli/reth/stage/unwind.mdx index 8bb44279f8..2ba873f682 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/unwind.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/unwind.mdx @@ -32,6 +32,9 @@ Datadir: --datadir.static-files The absolute path to store static files in. + --datadir.rocksdb + The absolute path to store `RocksDB` database in. + --config The path to the configuration file to use @@ -64,7 +67,20 @@ Database: [possible values: true, false] --db.max-size - Maximum database size (e.g., 4TB, 8MB) + Maximum database size (e.g., 4TB, 8TB). + + This sets the "map size" of the database. If the database grows beyond this limit, the node will stop with an "environment map size limit reached" error. + + The default value is 8TB. + + --db.page-size + Database page size (e.g., 4KB, 8KB, 16KB). + + Specifies the page size used by the MDBX database. + + The page size determines the maximum database size. MDBX supports up to 2^31 pages, so with the default 4KB page size, the maximum database size is 8TB. To allow larger databases, increase this value to 8KB or higher. + + WARNING: This setting is only configurable at database creation; changing it later requires re-syncing. --db.growth-step Database growth step (e.g., 4GB, 4KB) @@ -78,6 +94,33 @@ Database: --db.sync-mode Controls how aggressively the database synchronizes data to disk +Static Files: + --static-files.blocks-per-file.headers + Number of blocks per file for the headers segment + + --static-files.blocks-per-file.transactions + Number of blocks per file for the transactions segment + + --static-files.blocks-per-file.receipts + Number of blocks per file for the receipts segment + + --static-files.blocks-per-file.transaction-senders + Number of blocks per file for the transaction senders segment + + --static-files.receipts + Store receipts in static files instead of the database. + + When enabled, receipts will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + + --static-files.transaction-senders + Store transaction senders in static files instead of the database. + + When enabled, transaction senders will be written to static files on disk instead of the database. + + Note: This setting can only be configured at genesis initialization. Once the node has been initialized, changing this flag requires re-syncing from scratch. + --offline If this is enabled, then all stages except headers, bodies, and sender recovery will be unwound @@ -195,4 +238,13 @@ Tracing: Defaults to TRACE if not specified. [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/stage/unwind/num-blocks.mdx b/docs/vocs/docs/pages/cli/reth/stage/unwind/num-blocks.mdx index b04e1920b7..adad84db51 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/unwind/num-blocks.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/unwind/num-blocks.mdx @@ -140,4 +140,13 @@ Tracing: Defaults to TRACE if not specified. [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/stage/unwind/to-block.mdx b/docs/vocs/docs/pages/cli/reth/stage/unwind/to-block.mdx index 2c22f8127c..133c0b0124 100644 --- a/docs/vocs/docs/pages/cli/reth/stage/unwind/to-block.mdx +++ b/docs/vocs/docs/pages/cli/reth/stage/unwind/to-block.mdx @@ -140,4 +140,13 @@ Tracing: Defaults to TRACE if not specified. [default: debug] + + --tracing-otlp.sample-ratio + Trace sampling ratio to control the percentage of traces to export. + + Valid range: 0.0 to 1.0 - 1.0, default: Sample all traces - 0.01: Sample 1% of traces - 0.0: Disable sampling + + Example: --tracing-otlp.sample-ratio=0.0. + + [env: OTEL_TRACES_SAMPLER_ARG=] ``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/test-vectors/tables.mdx b/docs/vocs/docs/pages/cli/reth/test-vectors/tables.mdx index 2a3023817b..d5623133bf 100644 --- a/docs/vocs/docs/pages/cli/reth/test-vectors/tables.mdx +++ b/docs/vocs/docs/pages/cli/reth/test-vectors/tables.mdx @@ -95,7 +95,7 @@ Logging: Possible values: - always: Colors on - - auto: Colors on + - auto: Auto-detect - never: Colors off Display: @@ -110,4 +110,4 @@ Display: -q, --quiet Silence all log output -``` \ No newline at end of file +``` diff --git a/docs/vocs/docs/pages/exex/hello-world.mdx b/docs/vocs/docs/pages/exex/hello-world.mdx index 30eac91ee9..25263ec0ea 100644 --- a/docs/vocs/docs/pages/exex/hello-world.mdx +++ b/docs/vocs/docs/pages/exex/hello-world.mdx @@ -73,7 +73,7 @@ Now, let's extend our simplest ExEx and start actually listening to new notifica Woah, there's a lot of new stuff here! Let's go through it step by step: -- First, we've added a `while let Some(notification) = ctx.notifications.recv().await` loop that waits for new notifications to come in. +- First, we've added a `while let Some(notification) = ctx.notifications.try_next().await?` loop that waits for new notifications to come in. - The main node is responsible for sending notifications to the ExEx, so we're waiting for them to come in. - Next, we've added a `match ¬ification { ... }` block that matches on the type of the notification. - In each case, we're logging the notification and the corresponding block range, be it a chain commit, revert, or reorg. diff --git a/docs/vocs/docs/pages/exex/how-it-works.mdx b/docs/vocs/docs/pages/exex/how-it-works.mdx index 21162a7562..72c7eb3dce 100644 --- a/docs/vocs/docs/pages/exex/how-it-works.mdx +++ b/docs/vocs/docs/pages/exex/how-it-works.mdx @@ -4,6 +4,35 @@ description: How Execution Extensions (ExExes) work in Reth. # How do ExExes work? +## Architecture + +```mermaid +sequenceDiagram + participant Reth + participant ExEx + + Note over Reth,ExEx: Normal Flow + Reth->>ExEx: ChainCommit Notification + activate ExEx + ExEx->>ExEx: Process Block Data + ExEx->>Reth: FinishedHeight Event + deactivate ExEx + + Note over Reth,ExEx: Reorg Flow + Reth->>ExEx: ChainReorg Notification + activate ExEx + ExEx->>ExEx: Rollback & Re-process + ExEx->>Reth: New FinishedHeight Event + deactivate ExEx + + Note over Reth,ExEx: Revert Flow + Reth->>ExEx: ChainRevert Notification + activate ExEx + ExEx->>ExEx: Rollback & Re-process + ExEx->>Reth: New FinishedHeight Event + deactivate ExEx +``` + ExExes are just [Futures](https://doc.rust-lang.org/std/future/trait.Future.html) that run indefinitely alongside Reth – as simple as that. diff --git a/docs/vocs/docs/pages/exex/overview.mdx b/docs/vocs/docs/pages/exex/overview.mdx index abfcc8f3b8..85ad5f259e 100644 --- a/docs/vocs/docs/pages/exex/overview.mdx +++ b/docs/vocs/docs/pages/exex/overview.mdx @@ -17,6 +17,27 @@ initiated by Reth. Read more about things you can build with Execution Extensions in the [Paradigm blog](https://www.paradigm.xyz/2024/05/reth-exex). +## Architecture + +```mermaid +graph LR + subgraph "Reth Process" + Reth[Reth Core] + + Reth -->|Notifications| ExEx1[ExEx 1] + Reth -->|Notifications| ExEx2[ExEx 2] + Reth -->|Notifications| ExEx3[ExEx N] + + ExEx1 -->|Events| Reth + ExEx2 -->|Events| Reth + ExEx3 -->|Events| Reth + end + + ExEx1 --> External1[External System 1] + ExEx2 --> External2[External System 2] + ExEx3 --> External3[External System N] +``` + ## What Execution Extensions are not Execution Extensions are not separate processes that connect to the main Reth node process. diff --git a/docs/vocs/docs/pages/installation/source.mdx b/docs/vocs/docs/pages/installation/source.mdx index a7e1a2c33c..0666b818de 100644 --- a/docs/vocs/docs/pages/installation/source.mdx +++ b/docs/vocs/docs/pages/installation/source.mdx @@ -37,7 +37,7 @@ operating system: These are needed to build bindings for Reth's database. -The Minimum Supported Rust Version (MSRV) of this project is 1.80.0. If you already have a version of Rust installed, +The Minimum Supported Rust Version (MSRV) of this project is 1.88.0. If you already have a version of Rust installed, you can check your version by running `rustc --version`. To update your version of Rust, run `rustup update`. ## Build Reth diff --git a/docs/vocs/docs/pages/jsonrpc/admin.mdx b/docs/vocs/docs/pages/jsonrpc/admin.mdx index 481a4f76d7..f8dd678ebc 100644 --- a/docs/vocs/docs/pages/jsonrpc/admin.mdx +++ b/docs/vocs/docs/pages/jsonrpc/admin.mdx @@ -114,6 +114,54 @@ These include general information about the node itself, as well as what protoco } ``` +## `admin_peers` + +Returns information about peers currently known to the node. + +| Client | Method invocation | +| ------ | ------------------------------ | +| RPC | `{"method": "admin_peers", "params": []}` | + +### Example + +```js +// > {"jsonrpc":"2.0","id":1,"method":"admin_peers","params":[]} +{"jsonrpc":"2.0","id":1,"result":[ + { + "id":"44826a5d6a55f88a18298bca4773fca...", + "name":"reth/v0.0.1/x86_64-unknown-linux-gnu", + "enode":"enode://44826a5d6a55f88a18298bca4773fca5749cdc3a5c9f308aa7d810e9b31123f3e7c5fba0b1d70aac5308426f47df2a128a6747040a3815cc7dd7167d03be320d@192.168.1.1:30303", + "enr":"enr:-IS4QHCYr...", + "caps":["eth/67"], + "network":{ + "remoteAddress":"192.168.1.1:30303", + "localAddress":"127.0.0.1:30303", + "inbound":false, + "trusted":false, + "staticNode":false + }, + "protocols":{ + "eth":{"version":67} + } + } +]} +``` + +## `admin_clearTxpool` + +Clears all transactions from the transaction pool. Returns the number of removed transactions. + +| Client | Method invocation | +| ------ | ----------------------------------------- | +| RPC | `{"method": "admin_clearTxpool", "params": []}` | + +### Example + +```js +// > {"jsonrpc":"2.0","id":1,"method":"admin_clearTxpool","params":[]} +{"jsonrpc":"2.0","id":1,"result":42} +``` + ## `admin_peerEvents`, `admin_peerEvents_unsubscribe` Subscribe to events received by peers over the network. This creates a subscription that emits notifications about peer connections and disconnections. diff --git a/docs/vocs/docs/pages/jsonrpc/debug.mdx b/docs/vocs/docs/pages/jsonrpc/debug.mdx index 5b435d7dca..a6541267e1 100644 --- a/docs/vocs/docs/pages/jsonrpc/debug.mdx +++ b/docs/vocs/docs/pages/jsonrpc/debug.mdx @@ -29,6 +29,14 @@ Returns an EIP-2718 binary-encoded transaction. | ------ | ------------------------------------------------------------ | | RPC | `{"method": "debug_getRawTransaction", "params": [tx_hash]}` | +## `debug_getRawTransactions` + +Returns an array of EIP-2718 binary-encoded transactions for the given block. + +| Client | Method invocation | +| ------ | -------------------------------------------------------------- | +| RPC | `{"method": "debug_getRawTransactions", "params": [block]}` | + ## `debug_getRawReceipts` Returns an array of EIP-2718 binary-encoded receipts. @@ -102,3 +110,47 @@ The block can optionally be specified either by hash or by number as the second | Client | Method invocation | | ------ | --------------------------------------------------------------------- | | RPC | `{"method": "debug_traceCall", "params": [call, block_number, opts]}` | + +## `debug_traceCallMany` + +The `debug_traceCallMany` method lets you run multiple `eth_call`s within the context of the given block execution using the final state of the parent block as the base, followed by n transactions. + +The first argument is a list of bundles. Each bundle can overwrite the block headers, which will affect all transactions in that bundle. The trace can be configured similar to `debug_traceTransaction`. + +This method returns nested lists of traces, where the outer list length is the number of bundles and the inner list length is the number of transactions in each bundle. + +| Client | Method invocation | +| ------ | ---------------------------------------------------------------------------------- | +| RPC | `{"method": "debug_traceCallMany", "params": [bundles, state_context, opts]}` | + +## `debug_executionWitness` + +Allows for re-execution of a block with the purpose of generating an execution witness. The witness comprises a map of all hashed trie nodes to their preimages that were required during the execution of the block, including during state root recomputation. + +| Client | Method invocation | +| ------ | ------------------------------------------------------------ | +| RPC | `{"method": "debug_executionWitness", "params": [block]}` | + +## `debug_executionWitnessByBlockHash` + +Similar to [`debug_executionWitness`](#debug_executionwitness), but accepts a block hash instead of a block number or tag. + +| Client | Method invocation | +| ------ | ---------------------------------------------------------------------- | +| RPC | `{"method": "debug_executionWitnessByBlockHash", "params": [hash]}` | + +## `debug_dbGet` + +Retrieves a raw value from the database. + +| Client | Method invocation | +| ------ | -------------------------------------------------- | +| RPC | `{"method": "debug_dbGet", "params": [key]}` | + +## `debug_storageRangeAt` + +Returns the storage at the given block height and transaction index. The result can be paged by providing a `maxResult` to cap the number of storage slots returned as well as specifying the offset via `keyStart`. + +| Client | Method invocation | +| ------ | ------------------------------------------------------------------------------------------------- | +| RPC | `{"method": "debug_storageRangeAt", "params": [block_hash, tx_index, address, key_start, limit]}` | diff --git a/docs/vocs/docs/pages/jsonrpc/intro.mdx b/docs/vocs/docs/pages/jsonrpc/intro.mdx index 93cccf4692..89a9fa7ef5 100644 --- a/docs/vocs/docs/pages/jsonrpc/intro.mdx +++ b/docs/vocs/docs/pages/jsonrpc/intro.mdx @@ -26,8 +26,14 @@ The methods are grouped into namespaces, which are listed below: | [`trace`](/jsonrpc/trace) | The `trace` API provides several methods to inspect the Ethereum state, including Parity-style traces. | No | | [`admin`](/jsonrpc/admin) | The `admin` API allows you to configure your node. | **Yes** | | [`rpc`](/jsonrpc/rpc) | The `rpc` API provides information about the RPC server and its modules. | No | +| [`reth`](/jsonrpc/reth) | The `reth` API provides reth-specific methods like balance changes and chain notifications. | No | +| `ots` | The `ots` API provides Otterscan-compatible methods for block exploration. | No | +| `flashbots` | The `flashbots` API provides block submission validation methods for builders. | No | +| `miner` | The `miner` API allows you to configure miner/builder settings like extra data and gas limits. | **Yes** | +| `mev` | The `mev` API provides MEV bundle submission and simulation methods. | No | +| `testing` | The `testing` API provides methods for building blocks in a single call (testing only). | **Yes** | -Note that some APIs are sensitive, since they can be used to configure your node (`admin`), or access accounts stored on the node (`eth`). +Note that some APIs are sensitive, since they can be used to configure your node (`admin`, `miner`), access accounts stored on the node (`eth`), or perform testing operations (`testing`). Generally, it is advisable to not expose any JSONRPC namespace publicly, unless you know what you are doing. @@ -61,7 +67,7 @@ To enable JSON-RPC namespaces on the HTTP server, pass each namespace separated reth node --http --http.api eth,net,trace ``` -You can pass the `all` option, which is a convenient wrapper for all the JSON-RPC namespaces `admin,debug,eth,net,trace,txpool,web3,rpc` on the HTTP server: +You can pass the `all` option, which is a convenient wrapper for all the JSON-RPC namespaces `admin,debug,eth,net,trace,txpool,web3,rpc,reth,ots,flashbots,miner,mev,testing` on the HTTP server: ```bash reth node --http --http.api all @@ -117,7 +123,7 @@ You can use `curl`, a programming language with a low-level library, or a tool l As a reminder, you need to run the command below to enable all of these APIs using an HTTP transport: ```bash -reth node --http --http.api "admin,debug,eth,net,trace,txpool,web3,rpc" +reth node --http --http.api "admin,debug,eth,net,trace,txpool,web3,rpc,reth,ots,flashbots,miner,mev,testing" ``` This allows you to then call: diff --git a/docs/vocs/docs/pages/jsonrpc/reth.mdx b/docs/vocs/docs/pages/jsonrpc/reth.mdx new file mode 100644 index 0000000000..4a7f7d48f5 --- /dev/null +++ b/docs/vocs/docs/pages/jsonrpc/reth.mdx @@ -0,0 +1,89 @@ +--- +description: Reth-specific API for balance changes and chain notifications. +--- + +# `reth` Namespace + +The `reth` API provides reth-specific methods that are not part of the standard Ethereum JSON-RPC specification. These methods offer additional functionality for monitoring balance changes and subscribing to chain state notifications. + +## `reth_getBalanceChangesInBlock` + +Returns all ETH balance changes that occurred in a specific block. + +This method is useful for tracking value transfers, mining rewards, and other balance modifications without having to trace every transaction in a block. + +The method accepts a block identifier (number, hash, or tag like `latest`) and returns a map of addresses to their new balances. + +| Client | Method invocation | +| ------ | -------------------------------------------------------------- | +| RPC | `{"method": "reth_getBalanceChangesInBlock", "params": [block]}` | + +### Example + +```js +// > {"jsonrpc":"2.0","id":1,"method":"reth_getBalanceChangesInBlock","params":["latest"]} +{"jsonrpc":"2.0","id":1,"result":{"0x95222290dd7278aa3ddd389cc1e1d165cc4bafe5":"0x1bc16d674ec80000","0x388c818ca8b9251b393131c08a736a67ccb19297":"0x0"}} +``` + +The result is a mapping of addresses to their new balance after the block was executed. Only addresses whose balance changed during block execution are included. + +## `reth_subscribeChainNotifications`, `reth_unsubscribeChainNotifications` + +Subscribe to canonical chain state notifications. This creates a subscription that emits notifications whenever the canonical chain state changes. + +Like other subscription methods, this returns the ID of the subscription, which is then used in all events subsequently. + +To unsubscribe from chain notifications, call `reth_unsubscribeChainNotifications` with the subscription ID. + +| Client | Method invocation | +| ------ | -------------------------------------------------------------------------- | +| RPC | `{"method": "reth_subscribeChainNotifications", "params": []}` | +| RPC | `{"method": "reth_unsubscribeChainNotifications", "params": [subscription_id]}` | + +### Event Types + +The subscription emits events with the following structure: + +```json +{ + "jsonrpc": "2.0", + "method": "reth_subscription", + "params": { + "subscription": "0xcd0c3e8af590364c09d0fa6a1210faf5", + "result": { + "Commit": { // or "Reorg" + "new": { + // Chain segment with blocks, receipts, etc. + }, + "old": { + // Only present for "Reorg": chain segment that was reverted + } + } + } + } +} +``` + +- **Commit**: New blocks are added to the canonical chain. Contains only `new` with the committed chain segment. +- **Reorg**: Chain reorganization occurred. Contains both `old` (reverted blocks) and `new` (replacement blocks). + +This is particularly useful for applications that need to react immediately to chain state changes, such as indexers, monitoring tools, or ExEx (Execution Extensions). + +### Example + +```js +// > {"jsonrpc":"2.0","id":1,"method":"reth_subscribeChainNotifications","params":[]} +// responds with subscription ID +{"jsonrpc":"2.0","id":1,"result":"0xcd0c3e8af590364c09d0fa6a1210faf5"} + +// Example notification when new blocks are committed +{"jsonrpc":"2.0","method":"reth_subscription","params":{"subscription":"0xcd0c3e8af590364c09d0fa6a1210faf5","result":{"Commit":{"new":{"blocks":[...],"receipts":[...],"first_block":1000,"last_block":1000}}}}} + +// Unsubscribe +// > {"jsonrpc":"2.0","id":2,"method":"reth_unsubscribeChainNotifications","params":["0xcd0c3e8af590364c09d0fa6a1210faf5"]} +{"jsonrpc":"2.0","id":2,"result":true} +``` + +:::note +This subscription is only available over WebSocket and IPC transports, as HTTP does not support server-initiated messages. +::: diff --git a/docs/vocs/docs/pages/jsonrpc/trace.mdx b/docs/vocs/docs/pages/jsonrpc/trace.mdx index 182b6c2f70..06a31c15d1 100644 --- a/docs/vocs/docs/pages/jsonrpc/trace.mdx +++ b/docs/vocs/docs/pages/jsonrpc/trace.mdx @@ -176,9 +176,13 @@ The second parameter is an array of one or more trace types (`vmTrace`, `trace`, The third and optional parameter is a block number, block hash, or a block tag (`latest`, `finalized`, `safe`, `earliest`, `pending`). -| Client | Method invocation | -| ------ | -------------------------------------------------------------- | -| RPC | `{"method": "trace_callMany", "params": [trace[], block]}` | +The fourth and optional parameter is a `stateOverrides` object that temporarily overrides account state used for the trace (balances, nonces, code, storage). + +The fifth and optional parameter is a `blockOverrides` object that temporarily overrides block fields used for the trace (for example `timestamp`, `baseFee`, `number`). + +| Client | Method invocation | +| ------ | ------------------------------------------------------------------------------------ | +| RPC | `{"method": "trace_call", "params": [tx, trace[], block, stateOverrides, blockOverrides]}` | ### Example @@ -222,7 +226,7 @@ The second and optional parameter is a block number, block hash, or a block tag | Client | Method invocation | | ------ | ---------------------------------------------------------- | -| RPC | `{"method": "trace_callMany", "params": [trace[], block]}` | +| RPC | `{"method": "trace_callMany", "params": [[[tx, type[]], ...], block]}` | ### Example @@ -502,7 +506,7 @@ Returns trace at given position. | Client | Method invocation | | ------ | -------------------------------------------------------- | -| RPC | `{"method": "trace_get", "params": [tx_hash,indices[]]}` | +| RPC | `{"method": "trace_get", "params": [tx_hash, indices[]]}` | ### Example @@ -521,13 +525,13 @@ Returns trace at given position. "value": "0x0" }, "blockHash": "0x7eb25504e4c202cf3d62fd585d3e238f592c780cca82dacb2ed3cb5b38883add", - "blockNumber": 3068185, - "result": { + "blockNumber": 3068185, + "result": { "gasUsed": "0x183", "output": "0x0000000000000000000000000000000000000000000000000000000000000001" }, "subtraces": 0, - "traceAddress": [ + "traceAddress": [ 0 ], "transactionHash": "0x17104ac9d3312d8c136b7f44d4b8b47852618065ebfa534bd2d3b5ef218ca1f3", @@ -579,3 +583,58 @@ Returns all traces of given transaction ] } ``` + +## `trace_transactionOpcodeGas` + +Returns opcode gas usage aggregated per opcode for a single transaction in no particular order. + +| Client | Method invocation | +| ------ | --------------------------------------------------------------------- | +| RPC | `{"method": "trace_transactionOpcodeGas", "params": [tx_hash]}` | + +### Example + +```js +// > {"jsonrpc":"2.0","id":1,"method":"trace_transactionOpcodeGas","params":["0x17104ac9d3312d8c136b7f44d4b8b47852618065ebfa534bd2d3b5ef218ca1f3"]} +{ + "id": 1, + "jsonrpc": "2.0", + "result": { + "transactionHash": "0x17104ac9d3312d8c136b7f44d4b8b47852618065ebfa534bd2d3b5ef218ca1f3", + "opcodeGas": [ + { "opcode": "PUSH1", "count": 10, "gasUsed": 30 }, + { "opcode": "CALL", "count": 1, "gasUsed": 700 } + ] + } +} +``` + +## `trace_blockOpcodeGas` + +Returns opcode gas usage aggregated per opcode for every transaction in a block. + +| Client | Method invocation | +| ------ | ---------------------------------------------------------------- | +| RPC | `{"method": "trace_blockOpcodeGas", "params": [block]}` | + +### Example + +```js +// > {"jsonrpc":"2.0","id":1,"method":"trace_blockOpcodeGas","params":["latest"]} +{ + "id": 1, + "jsonrpc": "2.0", + "result": { + "blockHash": "0x7eb25504e4c202cf3d62fd585d3e238f592c780cca82dacb2ed3cb5b38883add", + "blockNumber": 3068185, + "transactions": [ + { + "transactionHash": "0x17104ac9d3312d8c136b7f44d4b8b47852618065ebfa534bd2d3b5ef218ca1f3", + "opcodeGas": [ + { "opcode": "PUSH1", "count": 10, "gasUsed": 30 } + ] + } + ] + } +} +``` diff --git a/docs/vocs/docs/pages/run/configuration.mdx b/docs/vocs/docs/pages/run/configuration.mdx index 8f34cfc691..4dfb9d6bb3 100644 --- a/docs/vocs/docs/pages/run/configuration.mdx +++ b/docs/vocs/docs/pages/run/configuration.mdx @@ -15,6 +15,7 @@ The default data directory is platform dependent: The configuration file contains the following sections: - [`[stages]`](#the-stages-section) -- Configuration of the individual sync stages + - [`era`](#era) - [`headers`](#headers) - [`bodies`](#bodies) - [`sender_recovery`](#sender_recovery) @@ -25,12 +26,15 @@ The configuration file contains the following sections: - [`transaction_lookup`](#transaction_lookup) - [`index_account_history`](#index_account_history) - [`index_storage_history`](#index_storage_history) + - [`etl`](#etl) + - [`prune`](#prune) - [`[peers]`](#the-peers-section) - [`connection_info`](#connection_info) - [`reputation_weights`](#reputation_weights) - [`backoff_durations`](#backoff_durations) - [`[sessions]`](#the-sessions-section) - [`[prune]`](#the-prune-section) +- [`[static_files]`](#the-static_files-section) ## The `[stages]` section @@ -38,6 +42,20 @@ The stages section is used to configure how individual stages in reth behave, wh The defaults shipped with Reth try to be relatively reasonable, but may not be optimal for your specific set of hardware. +### `era` + +The ERA stage configures pre-synced ERA1 data ingestion, either from a local directory or a remote host. + +```toml +[stages.era] +# Use a local directory containing ERA1 files (conflicts with `url`) +path = "/path/to/era1" +# Or download ERA1 files from a host (conflicts with `path`) +# url = "https://example.com/era1/" +# When using `url`, specify a temporary download folder +# folder = "/path/to/reth/era" +``` + ### `headers` The headers section controls both the behavior of the header stage, which downloads historical headers, as well as the primary downloader that fetches headers over P2P. @@ -111,7 +129,7 @@ The sender recovery stage recovers the address of transaction senders using tran # # Lower thresholds correspond to more frequent disk I/O (writes), # but lowers memory usage -commit_threshold = 100000 +commit_threshold = 5000000 ``` ### `execution` @@ -129,7 +147,7 @@ max_blocks = 500000 # The maximum number of state changes to keep in memory before the execution stage commits. max_changes = 5000000 # The maximum cumulative amount of gas to process before the execution stage commits. -max_cumulative_gas = 1500000000000 # 30_000_000 * 50_000_000 +max_cumulative_gas = 1500000000 # 30_000_000 * 50_000 # The maximum time spent on blocks processing before the execution stage commits. max_duration = '10m' ``` @@ -138,6 +156,16 @@ For all thresholds specified, the first to be hit will determine when the result Lower values correspond to more frequent disk writes, but also lower memory consumption. A lower value also negatively impacts sync speed, since reth keeps a cache around for the entire duration of blocks executed in the same range. +### `prune` + +Controls how frequently the prune stage commits its progress. + +```toml +[stages.prune] +# The maximum number of entries to prune before committing progress to the database. +commit_threshold = 1_000_000 +``` + ### `account_hashing` The account hashing stage builds a secondary table of accounts, where the key is the hash of the address instead of the raw address. @@ -180,10 +208,13 @@ The merkle stage uses the indexes built in the hashing stages (storage and accou ```toml [stages.merkle] +# The number of blocks to run the incremental root method for when catching up. +# When syncing a large number of blocks, incremental root building is limited +# to prevent memory issues. +incremental_threshold = 7000 # The threshold in number of blocks before the stage starts from scratch -# and re-computes the state root, discarding the trie that has already been built, -# as opposed to incrementally updating the trie. -clean_threshold = 5000 +# and rebuilds the entire trie, discarding the existing trie. +rebuild_threshold = 100000 ``` ### `transaction_lookup` @@ -231,6 +262,8 @@ An ETL (extract, transform, load) data collector. Used mainly to insert data int ```toml [stages.etl] +# Optional directory for temporary files used by ETL. Defaults to `datadir/etl-tmp` when unset. +# dir = "/path/to/reth/etl-tmp" # The maximum size in bytes of data held in memory before being flushed to disk as a file. # # Lower threshold corresponds to more frequent flushes, @@ -248,14 +281,20 @@ In the top level of the section you can configure trusted nodes, and how often r [peers] # How often reth will attempt to make outgoing connections, # if there is room for more peers -refill_slots_interval = '1s' +refill_slots_interval = '5s' # A list of ENRs for trusted peers, which are peers reth will always try to connect to. trusted_nodes = [] # Whether reth will only attempt to connect to the peers specified above, # or if it will connect to other peers in the network connect_trusted_nodes_only = false +# Maximum number of backoff attempts before we drop a non-trusted peer +max_backoff_count = 5 +# DNS resolution refresh interval for trusted nodes +trusted_nodes_resolution_interval = '1h' # The duration for which a badly behaving peer is banned ban_duration = '12h' +# Temporary per-IP throttle for inbound connection attempts +incoming_ip_throttle_duration = '30s' ``` ### `connection_info` @@ -268,6 +307,8 @@ This section configures how many peers reth will connect to. max_outbound = 100 # The maximum number of inbound peers (peers that connect to us) max_inbound = 30 +# The maximum number of concurrent outbound dials performed at once +max_concurrent_outbound_dials = 15 ``` ### `reputation_weights` @@ -288,6 +329,7 @@ timeout = -4096 bad_protocol = -2147483648 failed_to_connect = -25600 dropped = -4096 +bad_announcement = -1024 ``` ### `backoff_durations` @@ -332,6 +374,22 @@ secs = 120 nanos = 0 ``` +Additionally, you can configure when pending sessions time out, and enforce optional per-state limits. + +```toml +# Timeout after which a pending session attempt is considered failed +[sessions.pending_session_timeout] +secs = 20 +nanos = 0 + +# Optional limits (no limits are enforced by default when unset) +[sessions.limits] +max_pending_inbound = 100 +max_pending_outbound = 50 +max_established_inbound = 100 +max_established_outbound = 50 +``` + ## The `[prune]` section The prune section configures the pruning configuration. @@ -348,9 +406,10 @@ No pruning, run as archive node. This configuration will: - Run pruning every 5 blocks -- Continuously prune all transaction senders, account history and storage history before the block `head-100_000`, +- Continuously prune all transaction senders, account history, storage history and bodies history before the block `head-100_000`, i.e. keep the data for the last `100_000` blocks - Prune all receipts before the block 1920000, i.e. keep receipts from the block 1920000 +- Keep the last 128 blocks of merkle changesets (default behavior) ```toml [prune] @@ -372,6 +431,14 @@ account_history = { distance = 100_000 } # Prune all historical account states b # Storage History pruning configuration storage_history = { distance = 100_000 } # Prune all historical storage states before the block `head-100000` + +# Bodies History pruning configuration +bodies_history = { distance = 100_000 } # Prune all historical block bodies before the block `head-100000` + +# Merkle Changesets pruning configuration +# Controls pruning of AccountsTrieChangeSets and StoragesTrieChangeSets. +# Default: { distance = 128 } - keeps the last 128 blocks of merkle changesets +merkle_changesets = { distance = 128 } ``` We can also prune receipts more granular, using the logs filtering: @@ -387,4 +454,18 @@ We can also prune receipts more granular, using the logs filtering: "0xdac17f958d2ee523a2206206994597c13d831ec7" = { distance = 1000 } ``` +## The `[static_files]` section + +Configure static file segmentation. + +```toml +[static_files.blocks_per_file] +# Number of blocks per file for each segment (optional) +# Values must be greater than 0 if set +headers = 8192 +transactions = 8192 +receipts = 8192 +transaction_senders = 8192 +``` + [TOML]: https://toml.io/ diff --git a/docs/vocs/docs/pages/run/ethereum.mdx b/docs/vocs/docs/pages/run/ethereum.mdx index ef6f558a97..b8e4671538 100644 --- a/docs/vocs/docs/pages/run/ethereum.mdx +++ b/docs/vocs/docs/pages/run/ethereum.mdx @@ -88,7 +88,7 @@ In the meantime, consider setting up [observability](/run/monitoring) to monitor {/* TODO: Add more logs to help node operators debug any weird CL to EL messages! */} -[installation]: ./../../installation/overview +[installation]: ../installation/overview [docs]: https://github.com/paradigmxyz/reth/tree/main/docs [metrics]: https://github.com/paradigmxyz/reth/blob/main/docs/design/metrics.md#metrics diff --git a/docs/vocs/docs/pages/run/faq/ports.mdx b/docs/vocs/docs/pages/run/faq/ports.mdx index f9a3ba9950..469eb73e00 100644 --- a/docs/vocs/docs/pages/run/faq/ports.mdx +++ b/docs/vocs/docs/pages/run/faq/ports.mdx @@ -13,6 +13,13 @@ This section provides essential information about the ports used by the system, - **Purpose:** Peering with other nodes for synchronization of blockchain data. Nodes communicate through this port to maintain network consensus and share updated information. - **Exposure Recommendation:** This port should be exposed to enable seamless interaction and synchronization with other nodes in the network. +## Discovery v5 Port + +- **Port:** `9200` +- **Protocol:** UDP +- **Purpose:** Used for discv5 peer discovery protocol. This is a newer discovery protocol that can be enabled with `--enable-discv5-discovery`. It operates independently from the legacy discv4 discovery on port 30303. +- **Exposure Recommendation:** This port should be exposed if discv5 discovery is enabled to allow peer discovery. + ## Metrics Port - **Port:** `9001` diff --git a/docs/vocs/docs/pages/run/faq/profiling.mdx b/docs/vocs/docs/pages/run/faq/profiling.mdx index 123808ad2d..39f1ec706d 100644 --- a/docs/vocs/docs/pages/run/faq/profiling.mdx +++ b/docs/vocs/docs/pages/run/faq/profiling.mdx @@ -33,7 +33,7 @@ Reth includes a `jemalloc` feature to explicitly use jemalloc instead of the sys cargo build --features jemalloc ``` -While the `jemalloc` feature does enable jemalloc, reth has an additional feature, `profiling`, that must be used to enable heap profiling. This feature implicitly enables the `jemalloc` +While the `jemalloc` feature does enable jemalloc, reth has an additional feature, `jemalloc-prof`, that must be used to enable heap profiling. This feature implicitly enables the `jemalloc` feature as well: ``` diff --git a/docs/vocs/docs/pages/run/faq/pruning.mdx b/docs/vocs/docs/pages/run/faq/pruning.mdx index 6f646b2ee7..139b9a2233 100644 --- a/docs/vocs/docs/pages/run/faq/pruning.mdx +++ b/docs/vocs/docs/pages/run/faq/pruning.mdx @@ -127,18 +127,21 @@ The following tables describe RPC methods available in the full node. #### `debug` namespace -| RPC | Note | -| -------------------------- | ---------------------------------------------------------- | -| `debug_getRawBlock` | | -| `debug_getRawHeader` | | -| `debug_getRawReceipts` | Only for the last 10064 blocks and Beacon Deposit Contract | -| `debug_getRawTransaction` | | -| `debug_traceBlock` | Only for the last 10064 blocks | -| `debug_traceBlockByHash` | Only for the last 10064 blocks | -| `debug_traceBlockByNumber` | Only for the last 10064 blocks | -| `debug_traceCall` | Only for the last 10064 blocks | -| `debug_traceCallMany` | Only for the last 10064 blocks | -| `debug_traceTransaction` | Only for the last 10064 blocks | +| RPC | Note | +| ----------------------------------- | ---------------------------------------------------------- | +| `debug_executionWitness` | Only for the last 10064 blocks | +| `debug_executionWitnessByBlockHash` | Only for the last 10064 blocks | +| `debug_getBadBlocks` | | +| `debug_getRawBlock` | | +| `debug_getRawHeader` | | +| `debug_getRawReceipts` | Only for the last 10064 blocks and Beacon Deposit Contract | +| `debug_getRawTransaction` | | +| `debug_traceBlock` | Only for the last 10064 blocks | +| `debug_traceBlockByHash` | Only for the last 10064 blocks | +| `debug_traceBlockByNumber` | Only for the last 10064 blocks | +| `debug_traceCall` | Only for the last 10064 blocks | +| `debug_traceCallMany` | Only for the last 10064 blocks | +| `debug_traceTransaction` | Only for the last 10064 blocks | #### `eth` namespace @@ -201,13 +204,16 @@ The following tables describe RPC methods available in the full node. | RPC / Segment | Note | | ------------------------------- | ------------------------------ | | `trace_block` | Only for the last 10064 blocks | +| `trace_blockOpcodeGas` | Only for the last 10064 blocks | | `trace_call` | Only for the last 10064 blocks | | `trace_callMany` | Only for the last 10064 blocks | +| `trace_filter` | Only for the last 10064 blocks | | `trace_get` | Only for the last 10064 blocks | | `trace_rawTransaction` | Only for the last 10064 blocks | | `trace_replayBlockTransactions` | Only for the last 10064 blocks | | `trace_replayTransaction` | Only for the last 10064 blocks | | `trace_transaction` | Only for the last 10064 blocks | +| `trace_transactionOpcodeGas` | Only for the last 10064 blocks | #### `txpool` namespace @@ -227,18 +233,21 @@ The following tables describe the requirements for prune segments, per RPC metho #### `debug` namespace -| RPC / Segment | Sender Recovery | Transaction Lookup | Receipts | Account History | Storage History | -| -------------------------- | --------------- | ------------------ | -------- | --------------- | --------------- | -| `debug_getRawBlock` | ✅ | ✅ | ✅ | ✅ | ✅ | -| `debug_getRawHeader` | ✅ | ✅ | ✅ | ✅ | ✅ | -| `debug_getRawReceipts` | ✅ | ✅ | ❌ | ✅ | ✅ | -| `debug_getRawTransaction` | ✅ | ❌ | ✅ | ✅ | ✅ | -| `debug_traceBlock` | ✅ | ✅ | ✅ | ❌ | ❌ | -| `debug_traceBlockByHash` | ✅ | ✅ | ✅ | ❌ | ❌ | -| `debug_traceBlockByNumber` | ✅ | ✅ | ✅ | ❌ | ❌ | -| `debug_traceCall` | ✅ | ✅ | ✅ | ❌ | ❌ | -| `debug_traceCallMany` | ✅ | ✅ | ✅ | ❌ | ❌ | -| `debug_traceTransaction` | ✅ | ✅ | ✅ | ❌ | ❌ | +| RPC / Segment | Sender Recovery | Transaction Lookup | Receipts | Account History | Storage History | +| ----------------------------------- | --------------- | ------------------ | -------- | --------------- | --------------- | +| `debug_executionWitness` | ✅ | ✅ | ✅ | ❌ | ❌ | +| `debug_executionWitnessByBlockHash` | ✅ | ✅ | ✅ | ❌ | ❌ | +| `debug_getBadBlocks` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `debug_getRawBlock` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `debug_getRawHeader` | ✅ | ✅ | ✅ | ✅ | ✅ | +| `debug_getRawReceipts` | ✅ | ✅ | ❌ | ✅ | ✅ | +| `debug_getRawTransaction` | ✅ | ❌ | ✅ | ✅ | ✅ | +| `debug_traceBlock` | ✅ | ✅ | ✅ | ❌ | ❌ | +| `debug_traceBlockByHash` | ✅ | ✅ | ✅ | ❌ | ❌ | +| `debug_traceBlockByNumber` | ✅ | ✅ | ✅ | ❌ | ❌ | +| `debug_traceCall` | ✅ | ✅ | ✅ | ❌ | ❌ | +| `debug_traceCallMany` | ✅ | ✅ | ✅ | ❌ | ❌ | +| `debug_traceTransaction` | ✅ | ✅ | ✅ | ❌ | ❌ | #### `eth` namespace @@ -301,13 +310,16 @@ The following tables describe the requirements for prune segments, per RPC metho | RPC / Segment | Sender Recovery | Transaction Lookup | Receipts | Account History | Storage History | | ------------------------------- | --------------- | ------------------ | -------- | --------------- | --------------- | | `trace_block` | ✅ | ✅ | ✅ | ❌ | ❌ | +| `trace_blockOpcodeGas` | ✅ | ✅ | ✅ | ❌ | ❌ | | `trace_call` | ✅ | ✅ | ✅ | ❌ | ❌ | | `trace_callMany` | ✅ | ✅ | ✅ | ❌ | ❌ | +| `trace_filter` | ✅ | ✅ | ✅ | ❌ | ❌ | | `trace_get` | ✅ | ❌ | ✅ | ❌ | ❌ | | `trace_rawTransaction` | ✅ | ✅ | ✅ | ❌ | ❌ | | `trace_replayBlockTransactions` | ✅ | ✅ | ✅ | ❌ | ❌ | | `trace_replayTransaction` | ✅ | ❌ | ✅ | ❌ | ❌ | | `trace_transaction` | ✅ | ❌ | ✅ | ❌ | ❌ | +| `trace_transactionOpcodeGas` | ✅ | ❌ | ✅ | ❌ | ❌ | #### `txpool` namespace diff --git a/docs/vocs/docs/pages/run/faq/troubleshooting.mdx b/docs/vocs/docs/pages/run/faq/troubleshooting.mdx index 1f26cba9da..1c8b4f0893 100644 --- a/docs/vocs/docs/pages/run/faq/troubleshooting.mdx +++ b/docs/vocs/docs/pages/run/faq/troubleshooting.mdx @@ -24,7 +24,7 @@ This page tries to answer how to deal with the most popular issues. Externally accessing a `datadir` inside a named docker volume will usually come with folder/file ownership/permissions issues. -**It is not recommended** to use the path to the named volume as it will trigger an error code 13. `RETH_DB_PATH: /var/lib/docker/volumes/named_volume/_data/eth/db cargo r --examples db-access --path ` is **DISCOURAGED** and a mounted volume with the right permissions should be used instead. +**It is not recommended** to use the path to the named volume as it will trigger an error code 13. For example, `RETH_DATADIR=/var/lib/docker/volumes/named_volume/_data/eth cargo run -p db-access` is **DISCOURAGED** and a mounted volume with the right permissions should be used instead. ### Error code 13 diff --git a/docs/vocs/docs/pages/run/monitoring.mdx b/docs/vocs/docs/pages/run/monitoring.mdx index d6c7343609..925a98bf7c 100644 --- a/docs/vocs/docs/pages/run/monitoring.mdx +++ b/docs/vocs/docs/pages/run/monitoring.mdx @@ -10,10 +10,10 @@ Reth exposes a number of metrics which can be enabled by adding the `--metrics` reth node --metrics 127.0.0.1:9001 ``` -Alternatively, you can export metrics to an OpenTelemetry collector using `--otlp-metrics`: +Additionally, you can export spans to an OpenTelemetry collector using `--tracing-otlp`: ```bash -reth node --otlp-metrics 127.0.0.1:4318 +reth node --tracing-otlp=http://localhost:4318/v1/traces ``` Now, as the node is running, you can `curl` the endpoint you provided to the `--metrics` flag to get a text dump of the metrics at that time: @@ -22,7 +22,7 @@ Now, as the node is running, you can `curl` the endpoint you provided to the `-- curl 127.0.0.1:9001 ``` -The response from this is quite descriptive, but it can be a bit verbose. Plus, it's just a static_file of the metrics at the time that you `curl`ed the endpoint. +The response from this is quite descriptive, but it can be a bit verbose. Plus, it's just a static file of the metrics at the time that you `curl`ed the endpoint. You can run the following command in a separate terminal to periodically poll the endpoint, and just print the values (without the header text) to the terminal: @@ -138,13 +138,41 @@ To configure the dashboard in Grafana, click on the squares icon in the upper le And voilà, you should see your dashboard! If you're not yet connected to any peers, the dashboard will look like it's in an empty state, but once you are, you should see it start populating with data. +## Observability with OTLP + +Reth supports OTLP via the `tracing` crate, meaning logs and traces can be exported to OpenTelemetry backends. For example, [Grafana Tempo](https://grafana.com/oss/tempo/) and [Jaeger Tracing](https://www.jaegertracing.io/) can be used to query and explore traces and logs from reth. + +If you already have a backend set up on your infrastructure, you can point reth to export its traces by providing the `--tracing-otlp` argument. + +To run Jaeger locally, you can read the [Jaeger setup docs](https://www.jaegertracing.io/docs/2.11/getting-started/). +This should run Jaeger with the otlp port `4318` and the dashboard port `16686`: +```bash +docker run --rm --name jaeger \ + -p 16686:16686 \ + -p 4317:4317 \ + -p 4318:4318 \ + -p 5778:5778 \ + -p 9411:9411 \ + cr.jaegertracing.io/jaegertracing/jaeger:2.11.0 +``` + +Now we can provide the `--tracing-otlp` argument with reth: + +```bash +reth node --tracing-otlp=http://localhost:4317 --tracing-otlp-protocol grpc +``` + +The traces reth exported should now be searchable and viewable on http://localhost:16686. + +For environments where reth is processing a high number of transactions or blocks, it may be a good idea to bump `OTEL_BLRP_MAX_QUEUE_SIZE`, which has a default of `2048`. This controls how many log records can be recorded before batching and exporting. If this is set to too low of a value, spans and events may be dropped by the exporter. + ## Conclusion In this runbook, we took you through starting the node, exposing different log levels, exporting metrics, and finally viewing those metrics in a Grafana dashboard. This will all be very useful to you, whether you're simply running a home node and want to keep an eye on its performance, or if you're a contributor and want to see the effect that your (or others') changes have on Reth's operations. -[installation]: ../installation/installation +[installation]: ../installation/overview [release-profile]: https://doc.rust-lang.org/cargo/reference/profiles.html#release [docs]: https://github.com/paradigmxyz/reth/tree/main/docs [metrics]: https://reth.rs/run/observability.html diff --git a/docs/vocs/docs/pages/run/opstack.mdx b/docs/vocs/docs/pages/run/opstack.mdx index d472485be6..3d929c3f94 100644 --- a/docs/vocs/docs/pages/run/opstack.mdx +++ b/docs/vocs/docs/pages/run/opstack.mdx @@ -57,16 +57,25 @@ For the sake of this tutorial, we'll use the reference implementation of the Rol op-reth supports additional OP Stack specific CLI arguments: -1. `--rollup.sequencer-http ` - The sequencer endpoint to connect to. Transactions sent to the `op-reth` EL are also forwarded to this sequencer endpoint for inclusion, as the sequencer is the entity that builds blocks on OP Stack chains. +1. `--rollup.sequencer ` - The sequencer endpoint to connect to. Transactions sent to the `op-reth` EL are also forwarded to this sequencer endpoint for inclusion, as the sequencer is the entity that builds blocks on OP Stack chains. Aliases: `--rollup.sequencer-http`, `--rollup.sequencer-ws`. 1. `--rollup.disable-tx-pool-gossip` - Disables gossiping of transactions in the mempool to peers. This can be omitted for personal nodes, though providers should always opt to enable this flag. 1. `--rollup.discovery.v4` - Enables the discovery v4 protocol for peer discovery. By default, op-reth, similar to op-geth, has discovery v5 enabled and discovery v4 disabled, whereas regular reth has discovery v4 enabled and discovery v5 disabled. +1. `--rollup.compute-pending-block` - Enables computing of the pending block from the tx-pool instead of using the latest block. By default the pending block equals the latest block to save resources and not leak txs from the tx-pool. +1. `--rollup.enable-tx-conditional` - Enable transaction conditional support on sequencer. +1. `--rollup.supervisor-http ` - HTTP endpoint for the interop supervisor. +1. `--rollup.supervisor-safety-level ` - Safety level for the supervisor (default: `CrossUnsafe`). +1. `--rollup.sequencer-headers ` - Optional headers to use when connecting to the sequencer. Requires `--rollup.sequencer`. +1. `--rollup.historicalrpc ` - RPC endpoint for historical data. Alias: `--rollup.historical-rpc`. +1. `--min-suggested-priority-fee ` - Minimum suggested priority fee (tip) in wei (default: `1000000`). +1. `--flashblocks-url ` - A URL pointing to a secure websocket subscription that streams out flashblocks. If given, the flashblocks are received to build pending block. +1. `--flashblock-consensus` - Enable flashblock consensus client to drive the chain forward. Requires `--flashblocks-url`. -First, ensure that your L1 archival node is running and synced to tip. Also make sure that the beacon node / consensus layer client is running and has http APIs enabled. Then, start `op-reth` with the `--rollup.sequencer-http` flag set to the `Base Mainnet` sequencer endpoint: +First, ensure that your L1 archival node is running and synced to tip. Also make sure that the beacon node / consensus layer client is running and has http APIs enabled. Then, start `op-reth` with the `--rollup.sequencer` flag set to the `Base Mainnet` sequencer endpoint: ```sh op-reth node \ --chain base \ - --rollup.sequencer-http https://mainnet-sequencer.base.org \ + --rollup.sequencer https://mainnet-sequencer.base.org \ --http \ --ws \ --authrpc.port 9551 \ diff --git a/docs/vocs/docs/pages/run/overview.mdx b/docs/vocs/docs/pages/run/overview.mdx index d603a7be64..1a0b888ba3 100644 --- a/docs/vocs/docs/pages/run/overview.mdx +++ b/docs/vocs/docs/pages/run/overview.mdx @@ -43,5 +43,5 @@ Find answers to common questions and troubleshooting tips: | Base Sepolia | 84532 | https://base-sepolia.drpc.org | :::tip -Want to add more networks to this table? Feel free to [contribute](https://github.com/paradigmxyz/reth/edit/main/book/vocs/docs/pages/run/overview.mdx) by submitting a PR with additional networks that Reth supports! +Want to add more networks to this table? Feel free to [contribute](https://github.com/paradigmxyz/reth/edit/main/docs/vocs/docs/pages/run/overview.mdx) by submitting a PR with additional networks that Reth supports! ::: diff --git a/docs/vocs/docs/pages/sdk/custom-node/transactions.mdx b/docs/vocs/docs/pages/sdk/custom-node/transactions.mdx index 52881a368f..407ebcfdec 100644 --- a/docs/vocs/docs/pages/sdk/custom-node/transactions.mdx +++ b/docs/vocs/docs/pages/sdk/custom-node/transactions.mdx @@ -67,7 +67,7 @@ pub enum CustomTransaction { /// A regular Optimism transaction as defined by [`OpTxEnvelope`]. #[envelope(flatten)] Op(OpTxEnvelope), - /// A [`TxPayment`] tagged with type 0x7E. + /// A [`TxPayment`] tagged with type 0x2A (decimal 42). #[envelope(ty = 42)] Payment(Signed), } @@ -178,7 +178,7 @@ pub enum CustomPooledTransaction { /// A regular Optimism transaction as defined by [`OpPooledTransaction`]. #[envelope(flatten)] Op(OpPooledTransaction), - /// A [`TxPayment`] tagged with type 0x7E. + /// A [`TxPayment`] tagged with type 0x2A (decimal 42). #[envelope(ty = 42)] Payment(Signed), } diff --git a/docs/vocs/docs/pages/sdk/node-components.mdx b/docs/vocs/docs/pages/sdk/node-components.mdx index d569d499dd..f53310698b 100644 --- a/docs/vocs/docs/pages/sdk/node-components.mdx +++ b/docs/vocs/docs/pages/sdk/node-components.mdx @@ -53,44 +53,56 @@ Provides external API access to the node: ## Component Customization -Each component can be customized through Reth's builder pattern: +Each component can be customized through Reth's builder pattern. You can replace individual components +while keeping the defaults for others using `EthereumNode::components()`: ```rust -use reth_ethereum::node::{EthereumNode, NodeBuilder}; +use reth_ethereum::{ + cli::interface::Cli, + node::{ + node::EthereumAddOns, + EthereumNode, + }, +}; -let node = NodeBuilder::new(config) - .with_types::() - .with_components(|ctx| { - // Use the ComponentBuilder to customize components - ctx.components_builder() - // Custom network configuration - .network(|network_builder| { - network_builder - .peer_manager(custom_peer_manager) - .build() - }) - // Custom transaction pool - .pool(|pool_builder| { - pool_builder - .validator(custom_validator) - .ordering(custom_ordering) - .build() - }) - // Custom consensus - .consensus(custom_consensus) - // Custom EVM configuration - .evm(|evm_builder| { - evm_builder - .with_precompiles(custom_precompiles) - .build() - }) - // Build all components - .build() - }) - .build() - .await?; +fn main() { + Cli::parse_args() + .run(|builder, _| async move { + let handle = builder + // Use the default ethereum node types + .with_types::() + // Configure the components of the node + // Use default ethereum components but replace specific ones + .with_components( + EthereumNode::components() + // Custom transaction pool + .pool(CustomPoolBuilder::default()) + // Other customizable components: + // .network(CustomNetworkBuilder::default()) + // .executor(CustomExecutorBuilder::default()) + // .consensus(CustomConsensusBuilder::default()) + // .payload(CustomPayloadBuilder::default()) + ) + .with_add_ons(EthereumAddOns::default()) + .launch() + .await?; + + handle.wait_for_node_exit().await + }) + .unwrap(); +} ``` +Custom component builders must implement their respective traits (`PoolBuilder`, `NetworkBuilder`, +`ExecutorBuilder`, `ConsensusBuilder`, `PayloadServiceBuilder`). Each trait requires implementing +an async `build_*` method that receives a `BuilderContext` with access to node configuration, +providers, and task executors. + +For complete working examples with full trait implementations, see: +- [custom-node-components](https://github.com/paradigmxyz/reth/tree/main/examples/custom-node-components) - Custom transaction pool +- [custom-payload-builder](https://github.com/paradigmxyz/reth/tree/main/examples/custom-payload-builder) - Custom payload builder +- [custom-evm](https://github.com/paradigmxyz/reth/tree/main/examples/custom-evm) - Custom EVM configuration + ## Component Lifecycle Components follow a specific lifecycle starting from node builder initialization to shutdown: diff --git a/docs/vocs/docs/pages/sdk/node-components/evm.mdx b/docs/vocs/docs/pages/sdk/node-components/evm.mdx index 1460f8938f..97ad55617c 100644 --- a/docs/vocs/docs/pages/sdk/node-components/evm.mdx +++ b/docs/vocs/docs/pages/sdk/node-components/evm.mdx @@ -13,7 +13,18 @@ The EVM component manages: - State management and caching ## Architecture +```mermaid +graph TD + Config[EVM Configuration] --> Env[EVM Environment] + Config --> Executor[Block Executor] + Config --> Builder[Block Builder] + Executor --> State[(State Database)] + Builder --> State + + Builder --> Assembler[Block Assembler] + Assembler --> State +``` ## Key Concepts diff --git a/docs/vocs/docs/pages/sdk/node-components/network.mdx b/docs/vocs/docs/pages/sdk/node-components/network.mdx index f9af6f5ddc..210a3af965 100644 --- a/docs/vocs/docs/pages/sdk/node-components/network.mdx +++ b/docs/vocs/docs/pages/sdk/node-components/network.mdx @@ -41,15 +41,28 @@ The network uses multiple discovery mechanisms to find and connect to peers: ### Protocol Support - **ETH Protocol**: Core Ethereum wire protocol for blocks and transactions - + +### Network Mode +- **PoS networks**: Block broadcasting is disabled and considered a protocol violation. New blocks are obtained from the consensus layer and requested over devp2p. +- **PoW networks**: Block announcements are enabled to help propagate new blocks quickly. + ### Message Broadcasting The network efficiently propagates new blocks and transactions to peers using: - Transaction pooling and deduplication - Block announcement strategies - Bandwidth management +Note: In PoS networks, block broadcasting is disabled and considered a protocol violation. New blocks are obtained via the consensus layer (CL) and requested over devp2p. In PoW mode, block announcements are enabled. + +### Transaction Gossip Control +- A `tx_gossip_disabled` flag can be used to disable transaction gossip end-to-end. +- This is useful for private nodes, bandwidth-constrained deployments, or setups that rely on out-of-band transaction ingestion. + +### NAT and External IP Resolution +- Optional NAT resolver support helps determine and advertise the correct external IP and port for discovery and inbound connectivity. + ## Next Steps - Learn about the [Transaction Pool](/sdk/node-components/pool) - Understand [Consensus](/sdk/node-components/consensus) integration -- Explore [RPC](/sdk/node-components/rpc) server setup \ No newline at end of file +- Explore [RPC](/sdk/node-components/rpc) server setup diff --git a/docs/vocs/docs/pages/sdk/node-components/pool.mdx b/docs/vocs/docs/pages/sdk/node-components/pool.mdx index 301d794b3f..0b5a04670e 100644 --- a/docs/vocs/docs/pages/sdk/node-components/pool.mdx +++ b/docs/vocs/docs/pages/sdk/node-components/pool.mdx @@ -23,6 +23,7 @@ graph TD SubPools --> Pending[Pending Pool] SubPools --> Queued[Queued Pool] SubPools --> Base[Base Fee Pool] + SubPools --> Blob[Blob Pool] Pool --> Ordering[Transaction Ordering] Pool --> Listeners[Event Listeners] @@ -49,6 +50,7 @@ Transactions are ordered by their effective tip per gas to maximize block reward - **Pending**: Transactions ready for inclusion (correct nonce) - **Queued**: Future transactions (nonce gap exists) - **Base Fee**: Transactions priced below current base fee +- **Blob**: Blob transactions that currently do not meet base fee and/or blob fee requirements ### Pool Maintenance The pool requires periodic maintenance to: @@ -77,4 +79,4 @@ The pool supports an event system that allows other components to listen for tra - Learn about [Consensus](/sdk/node-components/consensus) validation - Explore [EVM](/sdk/node-components/evm) execution -- Understand [RPC](/sdk/node-components/rpc) server integration \ No newline at end of file +- Understand [RPC](/sdk/node-components/rpc) server integration diff --git a/docs/vocs/sidebar-cli-op-reth.ts b/docs/vocs/sidebar-cli-op-reth.ts new file mode 100644 index 0000000000..a35dab3e0b --- /dev/null +++ b/docs/vocs/sidebar-cli-op-reth.ts @@ -0,0 +1,242 @@ +import { SidebarItem } from "vocs"; + +export const opRethCliSidebar: SidebarItem = { + text: "op-reth", + link: "/cli/op-reth", + collapsed: false, + items: [ + { + text: "op-reth node", + link: "/cli/op-reth/node" + }, + { + text: "op-reth init", + link: "/cli/op-reth/init" + }, + { + text: "op-reth init-state", + link: "/cli/op-reth/init-state" + }, + { + text: "op-reth import-op", + link: "/cli/op-reth/import-op" + }, + { + text: "op-reth import-receipts-op", + link: "/cli/op-reth/import-receipts-op" + }, + { + text: "op-reth dump-genesis", + link: "/cli/op-reth/dump-genesis" + }, + { + text: "op-reth db", + link: "/cli/op-reth/db", + collapsed: true, + items: [ + { + text: "op-reth db stats", + link: "/cli/op-reth/db/stats" + }, + { + text: "op-reth db list", + link: "/cli/op-reth/db/list" + }, + { + text: "op-reth db checksum", + link: "/cli/op-reth/db/checksum" + }, + { + text: "op-reth db diff", + link: "/cli/op-reth/db/diff" + }, + { + text: "op-reth db get", + link: "/cli/op-reth/db/get", + collapsed: true, + items: [ + { + text: "op-reth db get mdbx", + link: "/cli/op-reth/db/get/mdbx" + }, + { + text: "op-reth db get static-file", + link: "/cli/op-reth/db/get/static-file" + } + ] + }, + { + text: "op-reth db drop", + link: "/cli/op-reth/db/drop" + }, + { + text: "op-reth db clear", + link: "/cli/op-reth/db/clear", + collapsed: true, + items: [ + { + text: "op-reth db clear mdbx", + link: "/cli/op-reth/db/clear/mdbx" + }, + { + text: "op-reth db clear static-file", + link: "/cli/op-reth/db/clear/static-file" + } + ] + }, + { + text: "op-reth db repair-trie", + link: "/cli/op-reth/db/repair-trie" + }, + { + text: "op-reth db static-file-header", + link: "/cli/op-reth/db/static-file-header", + collapsed: true, + items: [ + { + text: "op-reth db static-file-header block", + link: "/cli/op-reth/db/static-file-header/block" + }, + { + text: "op-reth db static-file-header path", + link: "/cli/op-reth/db/static-file-header/path" + } + ] + }, + { + text: "op-reth db version", + link: "/cli/op-reth/db/version" + }, + { + text: "op-reth db path", + link: "/cli/op-reth/db/path" + }, + { + text: "op-reth db settings", + link: "/cli/op-reth/db/settings", + collapsed: true, + items: [ + { + text: "op-reth db settings get", + link: "/cli/op-reth/db/settings/get" + }, + { + text: "op-reth db settings set", + link: "/cli/op-reth/db/settings/set", + collapsed: true, + items: [ + { + text: "op-reth db settings set receipts_in_static_files", + link: "/cli/op-reth/db/settings/set/receipts_in_static_files" + }, + { + text: "op-reth db settings set transaction_senders_in_static_files", + link: "/cli/op-reth/db/settings/set/transaction_senders_in_static_files" + } + ] + } + ] + }, + { + text: "op-reth db account-storage", + link: "/cli/op-reth/db/account-storage" + } + ] + }, + { + text: "op-reth stage", + link: "/cli/op-reth/stage", + collapsed: true, + items: [ + { + text: "op-reth stage run", + link: "/cli/op-reth/stage/run" + }, + { + text: "op-reth stage drop", + link: "/cli/op-reth/stage/drop" + }, + { + text: "op-reth stage dump", + link: "/cli/op-reth/stage/dump", + collapsed: true, + items: [ + { + text: "op-reth stage dump execution", + link: "/cli/op-reth/stage/dump/execution" + }, + { + text: "op-reth stage dump storage-hashing", + link: "/cli/op-reth/stage/dump/storage-hashing" + }, + { + text: "op-reth stage dump account-hashing", + link: "/cli/op-reth/stage/dump/account-hashing" + }, + { + text: "op-reth stage dump merkle", + link: "/cli/op-reth/stage/dump/merkle" + } + ] + }, + { + text: "op-reth stage unwind", + link: "/cli/op-reth/stage/unwind", + collapsed: true, + items: [ + { + text: "op-reth stage unwind to-block", + link: "/cli/op-reth/stage/unwind/to-block" + }, + { + text: "op-reth stage unwind num-blocks", + link: "/cli/op-reth/stage/unwind/num-blocks" + } + ] + } + ] + }, + { + text: "op-reth p2p", + link: "/cli/op-reth/p2p", + collapsed: true, + items: [ + { + text: "op-reth p2p header", + link: "/cli/op-reth/p2p/header" + }, + { + text: "op-reth p2p body", + link: "/cli/op-reth/p2p/body" + }, + { + text: "op-reth p2p rlpx", + link: "/cli/op-reth/p2p/rlpx", + collapsed: true, + items: [ + { + text: "op-reth p2p rlpx ping", + link: "/cli/op-reth/p2p/rlpx/ping" + } + ] + }, + { + text: "op-reth p2p bootnode", + link: "/cli/op-reth/p2p/bootnode" + } + ] + }, + { + text: "op-reth config", + link: "/cli/op-reth/config" + }, + { + text: "op-reth prune", + link: "/cli/op-reth/prune" + }, + { + text: "op-reth re-execute", + link: "/cli/op-reth/re-execute" + } + ] +}; diff --git a/docs/vocs/sidebar-cli-reth.ts b/docs/vocs/sidebar-cli-reth.ts new file mode 100644 index 0000000000..5e5e7f9303 --- /dev/null +++ b/docs/vocs/sidebar-cli-reth.ts @@ -0,0 +1,250 @@ +import { SidebarItem } from "vocs"; + +export const rethCliSidebar: SidebarItem = { + text: "reth", + link: "/cli/reth", + collapsed: false, + items: [ + { + text: "reth node", + link: "/cli/reth/node" + }, + { + text: "reth init", + link: "/cli/reth/init" + }, + { + text: "reth init-state", + link: "/cli/reth/init-state" + }, + { + text: "reth import", + link: "/cli/reth/import" + }, + { + text: "reth import-era", + link: "/cli/reth/import-era" + }, + { + text: "reth export-era", + link: "/cli/reth/export-era" + }, + { + text: "reth dump-genesis", + link: "/cli/reth/dump-genesis" + }, + { + text: "reth db", + link: "/cli/reth/db", + collapsed: true, + items: [ + { + text: "reth db stats", + link: "/cli/reth/db/stats" + }, + { + text: "reth db list", + link: "/cli/reth/db/list" + }, + { + text: "reth db checksum", + link: "/cli/reth/db/checksum" + }, + { + text: "reth db diff", + link: "/cli/reth/db/diff" + }, + { + text: "reth db get", + link: "/cli/reth/db/get", + collapsed: true, + items: [ + { + text: "reth db get mdbx", + link: "/cli/reth/db/get/mdbx" + }, + { + text: "reth db get static-file", + link: "/cli/reth/db/get/static-file" + } + ] + }, + { + text: "reth db drop", + link: "/cli/reth/db/drop" + }, + { + text: "reth db clear", + link: "/cli/reth/db/clear", + collapsed: true, + items: [ + { + text: "reth db clear mdbx", + link: "/cli/reth/db/clear/mdbx" + }, + { + text: "reth db clear static-file", + link: "/cli/reth/db/clear/static-file" + } + ] + }, + { + text: "reth db repair-trie", + link: "/cli/reth/db/repair-trie" + }, + { + text: "reth db static-file-header", + link: "/cli/reth/db/static-file-header", + collapsed: true, + items: [ + { + text: "reth db static-file-header block", + link: "/cli/reth/db/static-file-header/block" + }, + { + text: "reth db static-file-header path", + link: "/cli/reth/db/static-file-header/path" + } + ] + }, + { + text: "reth db version", + link: "/cli/reth/db/version" + }, + { + text: "reth db path", + link: "/cli/reth/db/path" + }, + { + text: "reth db settings", + link: "/cli/reth/db/settings", + collapsed: true, + items: [ + { + text: "reth db settings get", + link: "/cli/reth/db/settings/get" + }, + { + text: "reth db settings set", + link: "/cli/reth/db/settings/set", + collapsed: true, + items: [ + { + text: "reth db settings set receipts_in_static_files", + link: "/cli/reth/db/settings/set/receipts_in_static_files" + }, + { + text: "reth db settings set transaction_senders_in_static_files", + link: "/cli/reth/db/settings/set/transaction_senders_in_static_files" + } + ] + } + ] + }, + { + text: "reth db account-storage", + link: "/cli/reth/db/account-storage" + } + ] + }, + { + text: "reth download", + link: "/cli/reth/download" + }, + { + text: "reth stage", + link: "/cli/reth/stage", + collapsed: true, + items: [ + { + text: "reth stage run", + link: "/cli/reth/stage/run" + }, + { + text: "reth stage drop", + link: "/cli/reth/stage/drop" + }, + { + text: "reth stage dump", + link: "/cli/reth/stage/dump", + collapsed: true, + items: [ + { + text: "reth stage dump execution", + link: "/cli/reth/stage/dump/execution" + }, + { + text: "reth stage dump storage-hashing", + link: "/cli/reth/stage/dump/storage-hashing" + }, + { + text: "reth stage dump account-hashing", + link: "/cli/reth/stage/dump/account-hashing" + }, + { + text: "reth stage dump merkle", + link: "/cli/reth/stage/dump/merkle" + } + ] + }, + { + text: "reth stage unwind", + link: "/cli/reth/stage/unwind", + collapsed: true, + items: [ + { + text: "reth stage unwind to-block", + link: "/cli/reth/stage/unwind/to-block" + }, + { + text: "reth stage unwind num-blocks", + link: "/cli/reth/stage/unwind/num-blocks" + } + ] + } + ] + }, + { + text: "reth p2p", + link: "/cli/reth/p2p", + collapsed: true, + items: [ + { + text: "reth p2p header", + link: "/cli/reth/p2p/header" + }, + { + text: "reth p2p body", + link: "/cli/reth/p2p/body" + }, + { + text: "reth p2p rlpx", + link: "/cli/reth/p2p/rlpx", + collapsed: true, + items: [ + { + text: "reth p2p rlpx ping", + link: "/cli/reth/p2p/rlpx/ping" + } + ] + }, + { + text: "reth p2p bootnode", + link: "/cli/reth/p2p/bootnode" + } + ] + }, + { + text: "reth config", + link: "/cli/reth/config" + }, + { + text: "reth prune", + link: "/cli/reth/prune" + }, + { + text: "reth re-execute", + link: "/cli/reth/re-execute" + } + ] +}; diff --git a/docs/vocs/sidebar.ts b/docs/vocs/sidebar.ts index e51af1c260..31d2d8d726 100644 --- a/docs/vocs/sidebar.ts +++ b/docs/vocs/sidebar.ts @@ -1,4 +1,6 @@ import { SidebarItem } from "vocs"; +import { rethCliSidebar } from "./sidebar-cli-reth"; +import { opRethCliSidebar } from "./sidebar-cli-op-reth"; export const sidebar: SidebarItem[] = [ { @@ -288,231 +290,8 @@ export const sidebar: SidebarItem[] = [ link: "/cli/cli", collapsed: false, items: [ - { - text: "reth", - link: "/cli/reth", - collapsed: false, - items: [ - { - text: "reth node", - link: "/cli/reth/node" - }, - { - text: "reth init", - link: "/cli/reth/init" - }, - { - text: "reth init-state", - link: "/cli/reth/init-state" - }, - { - text: "reth import", - link: "/cli/reth/import" - }, - { - text: "reth import-era", - link: "/cli/reth/import-era" - }, - { - text: "reth export-era", - link: "/cli/reth/export-era" - }, - { - text: "reth dump-genesis", - link: "/cli/reth/dump-genesis" - }, - { - text: "reth db", - link: "/cli/reth/db", - collapsed: true, - items: [ - { - text: "reth db stats", - link: "/cli/reth/db/stats" - }, - { - text: "reth db list", - link: "/cli/reth/db/list" - }, - { - text: "reth db checksum", - link: "/cli/reth/db/checksum" - }, - { - text: "reth db diff", - link: "/cli/reth/db/diff" - }, - { - text: "reth db get", - link: "/cli/reth/db/get", - collapsed: true, - items: [ - { - text: "reth db get mdbx", - link: "/cli/reth/db/get/mdbx" - }, - { - text: "reth db get static-file", - link: "/cli/reth/db/get/static-file" - } - ] - }, - { - text: "reth db drop", - link: "/cli/reth/db/drop" - }, - { - text: "reth db clear", - link: "/cli/reth/db/clear", - collapsed: true, - items: [ - { - text: "reth db clear mdbx", - link: "/cli/reth/db/clear/mdbx" - }, - { - text: "reth db clear static-file", - link: "/cli/reth/db/clear/static-file" - } - ] - }, - { - text: "reth db version", - link: "/cli/reth/db/version" - }, - { - text: "reth db path", - link: "/cli/reth/db/path" - } - ] - }, - { - text: "reth download", - link: "/cli/reth/download" - }, - { - text: "reth stage", - link: "/cli/reth/stage", - collapsed: true, - items: [ - { - text: "reth stage run", - link: "/cli/reth/stage/run" - }, - { - text: "reth stage drop", - link: "/cli/reth/stage/drop" - }, - { - text: "reth stage dump", - link: "/cli/reth/stage/dump", - collapsed: true, - items: [ - { - text: "reth stage dump execution", - link: "/cli/reth/stage/dump/execution" - }, - { - text: "reth stage dump storage-hashing", - link: "/cli/reth/stage/dump/storage-hashing" - }, - { - text: "reth stage dump account-hashing", - link: "/cli/reth/stage/dump/account-hashing" - }, - { - text: "reth stage dump merkle", - link: "/cli/reth/stage/dump/merkle" - } - ] - }, - { - text: "reth stage unwind", - link: "/cli/reth/stage/unwind", - collapsed: true, - items: [ - { - text: "reth stage unwind to-block", - link: "/cli/reth/stage/unwind/to-block" - }, - { - text: "reth stage unwind num-blocks", - link: "/cli/reth/stage/unwind/num-blocks" - } - ] - } - ] - }, - { - text: "reth p2p", - link: "/cli/reth/p2p", - collapsed: true, - items: [ - { - text: "reth p2p header", - link: "/cli/reth/p2p/header" - }, - { - text: "reth p2p body", - link: "/cli/reth/p2p/body" - }, - { - text: "reth p2p rlpx", - link: "/cli/reth/p2p/rlpx", - collapsed: true, - items: [ - { - text: "reth p2p rlpx ping", - link: "/cli/reth/p2p/rlpx/ping" - } - ] - } - ] - }, - { - text: "reth config", - link: "/cli/reth/config" - }, - { - text: "reth debug", - link: "/cli/reth/debug", - collapsed: true, - items: [ - { - text: "reth debug execution", - link: "/cli/reth/debug/execution" - }, - { - text: "reth debug merkle", - link: "/cli/reth/debug/merkle" - }, - { - text: "reth debug in-memory-merkle", - link: "/cli/reth/debug/in-memory-merkle" - }, - { - text: "reth debug build-block", - link: "/cli/reth/debug/build-block" - } - ] - }, - { - text: "reth recover", - link: "/cli/reth/recover", - collapsed: true, - items: [ - { - text: "reth recover storage-tries", - link: "/cli/reth/recover/storage-tries" - } - ] - }, - { - text: "reth prune", - link: "/cli/reth/prune" - } - ] - } + rethCliSidebar, + opRethCliSidebar ] }, ] \ No newline at end of file diff --git a/docs/vocs/vocs.config.ts b/docs/vocs/vocs.config.ts index 92aee41831..a2d7efa9ad 100644 --- a/docs/vocs/vocs.config.ts +++ b/docs/vocs/vocs.config.ts @@ -21,7 +21,7 @@ export default defineConfig({ }, { text: 'GitHub', link: 'https://github.com/paradigmxyz/reth' }, { - text: 'v1.8.2', + text: 'v1.9.3', items: [ { text: 'Releases', @@ -72,5 +72,35 @@ export default defineConfig({ }, editLink: { pattern: "https://github.com/paradigmxyz/reth/edit/main/docs/vocs/docs/pages/:path", + }, + vite: { + plugins: [ + { + name: 'transform-summary-links', + apply: 'serve', // only during dev for faster feedback + enforce: 'pre', + async load(id) { + if (id.endsWith('pages/cli/SUMMARY.mdx') || id.endsWith('pages/cli/summary.mdx')) { + const { readFileSync } = await import('node:fs') + let code = readFileSync(id, 'utf-8') + code = code.replace(/\]\(\.\/([^)]+)\.mdx\)/g, '](/cli/\$1)') + return code + } + } + }, + { + name: 'transform-summary-links-build', + apply: 'build', // only apply during build + enforce: 'pre', + async load(id) { + if (id.endsWith('pages/cli/SUMMARY.mdx') || id.endsWith('pages/cli/summary.mdx')) { + const { readFileSync } = await import('node:fs') + let code = readFileSync(id, 'utf-8') + code = code.replace(/\]\(\.\/([^)]+)\.mdx\)/g, '](/cli/\$1)') + return code + } + } + } + ] } }) diff --git a/etc/docker-compose.yml b/etc/docker-compose.yml index 73311616fd..7fab1ce75a 100644 --- a/etc/docker-compose.yml +++ b/etc/docker-compose.yml @@ -61,11 +61,13 @@ services: - ./grafana/datasources:/etc/grafana/provisioning/datasources - ./grafana/dashboards:/etc/grafana/provisioning_temp/dashboards # 1. Copy dashboards from temp directory to prevent modifying original host files - # 2. Replace Prometheus datasource placeholder with the actual name + # 2. Replace Prometheus datasource placeholders with the actual name # 3. Run Grafana entrypoint: > sh -c "cp -r /etc/grafana/provisioning_temp/dashboards/. /etc/grafana/provisioning/dashboards && find /etc/grafana/provisioning/dashboards/ -name '*.json' -exec sed -i 's/$${DS_PROMETHEUS}/Prometheus/g' {} \+ && + find /etc/grafana/provisioning/dashboards/ -name '*.json' -exec sed -i 's/$${datasource}/Prometheus/g' {} \+ && + find /etc/grafana/provisioning/dashboards/ -name '*.json' -exec sed -i 's/$${VAR_INSTANCE_LABEL}/instance/g' {} \+ && /run.sh" volumes: diff --git a/etc/grafana/dashboards/overview.json b/etc/grafana/dashboards/overview.json index 46a465ca4a..ac65f47932 100644 --- a/etc/grafana/dashboards/overview.json +++ b/etc/grafana/dashboards/overview.json @@ -2,7 +2,7 @@ "__inputs": [ { "name": "DS_PROMETHEUS", - "label": "prometheus", + "label": "Prometheus", "description": "", "type": "datasource", "pluginId": "prometheus", @@ -46,7 +46,7 @@ "type": "grafana", "id": "grafana", "name": "Grafana", - "version": "12.1.0-pre" + "version": "12.2.1" }, { "type": "panel", @@ -110,7 +110,6 @@ "editable": true, "fiscalYearStartMonth": 0, "graphTooltip": 0, - "id": null, "links": [], "panels": [ { @@ -164,9 +163,7 @@ "orientation": "auto", "percentChangeColorMode": "standard", "reduceOptions": { - "calcs": [ - "lastNotNull" - ], + "calcs": ["lastNotNull"], "fields": "", "values": false }, @@ -177,12 +174,12 @@ "textMode": "name", "wideLayout": true }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "exemplar": false, @@ -234,9 +231,7 @@ "orientation": "auto", "percentChangeColorMode": "standard", "reduceOptions": { - "calcs": [ - "lastNotNull" - ], + "calcs": ["lastNotNull"], "fields": "", "values": false }, @@ -247,12 +242,12 @@ "textMode": "name", "wideLayout": true }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "exemplar": false, @@ -304,9 +299,7 @@ "orientation": "auto", "percentChangeColorMode": "standard", "reduceOptions": { - "calcs": [ - "lastNotNull" - ], + "calcs": ["lastNotNull"], "fields": "", "values": false }, @@ -317,12 +310,12 @@ "textMode": "name", "wideLayout": true }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "exemplar": false, @@ -374,9 +367,7 @@ "orientation": "auto", "percentChangeColorMode": "standard", "reduceOptions": { - "calcs": [ - "lastNotNull" - ], + "calcs": ["lastNotNull"], "fields": "", "values": false }, @@ -387,12 +378,12 @@ "textMode": "name", "wideLayout": true }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "exemplar": false, @@ -444,9 +435,7 @@ "orientation": "auto", "percentChangeColorMode": "standard", "reduceOptions": { - "calcs": [ - "lastNotNull" - ], + "calcs": ["lastNotNull"], "fields": "", "values": false }, @@ -457,12 +446,12 @@ "textMode": "name", "wideLayout": true }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "exemplar": false, @@ -514,9 +503,7 @@ "orientation": "auto", "percentChangeColorMode": "standard", "reduceOptions": { - "calcs": [ - "lastNotNull" - ], + "calcs": ["lastNotNull"], "fields": "", "values": false }, @@ -527,12 +514,12 @@ "textMode": "name", "wideLayout": true }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "exemplar": false, @@ -596,9 +583,7 @@ "minVizWidth": 75, "orientation": "auto", "reduceOptions": { - "calcs": [ - "lastNotNull" - ], + "calcs": ["lastNotNull"], "fields": "", "values": false }, @@ -606,12 +591,12 @@ "showThresholdMarkers": true, "sizing": "auto" }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "exemplar": false, @@ -672,9 +657,7 @@ "namePlacement": "auto", "orientation": "horizontal", "reduceOptions": { - "calcs": [ - "lastNotNull" - ], + "calcs": ["lastNotNull"], "fields": "", "values": false }, @@ -682,16 +665,16 @@ "sizing": "auto", "valueMode": "color" }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "exemplar": false, - "expr": "reth_sync_checkpoint{$instance_label=\"$instance\"}", + "expr": "max by (stage) (reth_sync_checkpoint{$instance_label=\"$instance\"})", "instant": true, "legendFormat": "{{stage}}", "range": false, @@ -774,9 +757,7 @@ "orientation": "auto", "percentChangeColorMode": "standard", "reduceOptions": { - "calcs": [ - "lastNotNull" - ], + "calcs": ["lastNotNull"], "fields": "", "values": false }, @@ -784,12 +765,12 @@ "textMode": "auto", "wideLayout": true }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "sum(reth_db_table_size{$instance_label=\"$instance\"})", @@ -813,7 +794,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "sum(reth_static_files_segment_size{$instance_label=\"$instance\"})", @@ -844,7 +825,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "fieldConfig": { "defaults": { @@ -875,6 +856,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": true, "stacking": { "group": "A", @@ -920,7 +902,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -928,7 +910,7 @@ "uid": "${datasource}" }, "editorMode": "builder", - "expr": "reth_sync_entities_processed{$instance_label=\"$instance\"} / reth_sync_entities_total{$instance_label=\"$instance\"}", + "expr": "avg by (stage) (reth_sync_entities_processed{$instance_label=\"$instance\"} / reth_sync_entities_total{$instance_label=\"$instance\"})", "legendFormat": "{{stage}}", "range": true, "refId": "A" @@ -940,7 +922,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "fieldConfig": { "defaults": { @@ -971,6 +953,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -1017,7 +1000,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -1025,7 +1008,7 @@ "uid": "${datasource}" }, "editorMode": "builder", - "expr": "reth_sync_checkpoint{$instance_label=\"$instance\"}", + "expr": "max by (stage) (reth_sync_checkpoint{$instance_label=\"$instance\"})", "legendFormat": "{{stage}}", "range": true, "refId": "A" @@ -1037,7 +1020,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "Latency histogram for the engine_forkchoiceUpdated RPC API", "fieldConfig": { @@ -1069,6 +1052,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -1137,7 +1121,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -1145,171 +1129,12 @@ "uid": "${datasource}" }, "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_fork_choice_updated_v1{$instance_label=\"$instance\", quantile=\"0\"}", - "fullMetaSearch": false, - "includeNullMetadata": true, - "legendFormat": "engine_forkchoiceUpdatedV1 min", - "range": true, - "refId": "A", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_fork_choice_updated_v1{$instance_label=\"$instance\", quantile=\"0.5\"}", + "editorMode": "code", + "expr": "reth_consensus_engine_beacon_forkchoice_updated_latency{$instance_label=\"$instance\", quantile=\"0\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, - "legendFormat": "engine_forkchoiceUpdatedV1 p50", - "range": true, - "refId": "B", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_fork_choice_updated_v1{$instance_label=\"$instance\", quantile=\"0.9\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_forkchoiceUpdatedV1 p90", - "range": true, - "refId": "C", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_fork_choice_updated_v1{$instance_label=\"$instance\", quantile=\"0.95\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_forkchoiceUpdatedV1 p95", - "range": true, - "refId": "D", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_fork_choice_updated_v1{$instance_label=\"$instance\", quantile=\"0.99\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_forkchoiceUpdatedV1 p99", - "range": true, - "refId": "E", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_fork_choice_updated_v2{$instance_label=\"$instance\", quantile=\"0\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_forkchoiceUpdatedV2 min", - "range": true, - "refId": "F", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_fork_choice_updated_v2{$instance_label=\"$instance\", quantile=\"0.5\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_forkchoiceUpdatedV2 p50", - "range": true, - "refId": "G", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_fork_choice_updated_v2{$instance_label=\"$instance\", quantile=\"0.9\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_forkchoiceUpdatedV2 p90", - "range": true, - "refId": "H", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_fork_choice_updated_v2{$instance_label=\"$instance\", quantile=\"0.95\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_forkchoiceUpdatedV2 p95", - "range": true, - "refId": "I", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_fork_choice_updated_v2{$instance_label=\"$instance\", quantile=\"0.99\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_forkchoiceUpdatedV2 p99", - "range": true, - "refId": "J", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_fork_choice_updated_v3{$instance_label=\"$instance\", quantile=\"0\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_forkchoiceUpdatedV3 min", + "legendFormat": "min", "range": true, "refId": "K", "useBackend": false @@ -1317,15 +1142,15 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_fork_choice_updated_v3{$instance_label=\"$instance\", quantile=\"0.5\"}", + "editorMode": "code", + "expr": "reth_consensus_engine_beacon_forkchoice_updated_latency{$instance_label=\"$instance\", quantile=\"0.5\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, - "legendFormat": "engine_forkchoiceUpdatedV3 p50", + "legendFormat": "p50", "range": true, "refId": "L", "useBackend": false @@ -1336,12 +1161,12 @@ "uid": "${datasource}" }, "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_fork_choice_updated_v3{$instance_label=\"$instance\", quantile=\"0.9\"}", + "editorMode": "code", + "expr": "reth_consensus_engine_beacon_forkchoice_updated_latency{$instance_label=\"$instance\", quantile=\"0.9\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, - "legendFormat": "engine_forkchoiceUpdatedV3 p90", + "legendFormat": "p90", "range": true, "refId": "M", "useBackend": false @@ -1349,15 +1174,15 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_fork_choice_updated_v3{$instance_label=\"$instance\", quantile=\"0.95\"}", + "editorMode": "code", + "expr": "reth_consensus_engine_beacon_forkchoice_updated_latency{$instance_label=\"$instance\", quantile=\"0.95\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, - "legendFormat": "engine_forkchoiceUpdatedV3 p95", + "legendFormat": "p95", "range": true, "refId": "N", "useBackend": false @@ -1368,12 +1193,12 @@ "uid": "${datasource}" }, "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_fork_choice_updated_v3{$instance_label=\"$instance\", quantile=\"0.99\"}", + "editorMode": "code", + "expr": "reth_consensus_engine_beacon_forkchoice_updated_latency{$instance_label=\"$instance\", quantile=\"0.99\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, - "legendFormat": "engine_forkchoiceUpdatedV3 p99", + "legendFormat": "p99", "range": true, "refId": "O", "useBackend": false @@ -1385,7 +1210,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "Latency histogram for the engine_newPayload RPC API", "fieldConfig": { @@ -1417,6 +1242,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -1485,7 +1311,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -1493,251 +1319,12 @@ "uid": "${datasource}" }, "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v1{$instance_label=\"$instance\", quantile=\"0\"}", - "fullMetaSearch": false, - "includeNullMetadata": true, - "legendFormat": "engine_newPayloadV1 min", - "range": true, - "refId": "A", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v1{$instance_label=\"$instance\", quantile=\"0.5\"}", + "editorMode": "code", + "expr": "reth_consensus_engine_beacon_new_payload_latency{$instance_label=\"$instance\", quantile=\"0\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, - "legendFormat": "engine_newPayloadV1 p50", - "range": true, - "refId": "B", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v1{$instance_label=\"$instance\", quantile=\"0.9\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_newPayloadV1 p90", - "range": true, - "refId": "C", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v1{$instance_label=\"$instance\", quantile=\"0.95\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_newPayloadV1 p95", - "range": true, - "refId": "D", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v1{$instance_label=\"$instance\", quantile=\"0.99\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_newPayloadV1 p99", - "range": true, - "refId": "E", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v2{$instance_label=\"$instance\", quantile=\"0\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_newPayloadV2 min", - "range": true, - "refId": "F", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v2{$instance_label=\"$instance\", quantile=\"0.5\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_newPayloadV2 p50", - "range": true, - "refId": "G", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v2{$instance_label=\"$instance\", quantile=\"0.9\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_newPayloadV2 p90", - "range": true, - "refId": "H", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v2{$instance_label=\"$instance\", quantile=\"0.95\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_newPayloadV2 p95", - "range": true, - "refId": "I", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v2{$instance_label=\"$instance\", quantile=\"0.99\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_newPayloadV2 p99", - "range": true, - "refId": "J", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v3{$instance_label=\"$instance\", quantile=\"0\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_newPayloadV3 min", - "range": true, - "refId": "K", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v3{$instance_label=\"$instance\", quantile=\"0.5\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_newPayloadV3 p50", - "range": true, - "refId": "L", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v3{$instance_label=\"$instance\", quantile=\"0.9\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_newPayloadV3 p90", - "range": true, - "refId": "M", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v3{$instance_label=\"$instance\", quantile=\"0.95\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_newPayloadV3 p95", - "range": true, - "refId": "N", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v3{$instance_label=\"$instance\", quantile=\"0.99\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_newPayloadV3 p99", - "range": true, - "refId": "O", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v4{$instance_label=\"$instance\", quantile=\"0\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "engine_newPayloadV4 min", + "legendFormat": "min", "range": true, "refId": "P", "useBackend": false @@ -1748,12 +1335,12 @@ "uid": "${datasource}" }, "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v4{$instance_label=\"$instance\", quantile=\"0.5\"}", + "editorMode": "code", + "expr": "reth_consensus_engine_beacon_new_payload_latency{$instance_label=\"$instance\", quantile=\"0.5\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, - "legendFormat": "engine_newPayloadV4 p50", + "legendFormat": "p50", "range": true, "refId": "Q", "useBackend": false @@ -1761,15 +1348,15 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v4{$instance_label=\"$instance\", quantile=\"0.9\"}", + "editorMode": "code", + "expr": "reth_consensus_engine_beacon_new_payload_latency{$instance_label=\"$instance\", quantile=\"0.9\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, - "legendFormat": "engine_newPayloadV4 p90", + "legendFormat": "p90", "range": true, "refId": "R", "useBackend": false @@ -1780,12 +1367,12 @@ "uid": "${datasource}" }, "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v4{$instance_label=\"$instance\", quantile=\"0.95\"}", + "editorMode": "code", + "expr": "reth_consensus_engine_beacon_new_payload_latency{$instance_label=\"$instance\", quantile=\"0.95\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, - "legendFormat": "engine_newPayloadV4 p95", + "legendFormat": "p95", "range": true, "refId": "S", "useBackend": false @@ -1793,15 +1380,15 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, - "editorMode": "builder", - "expr": "reth_engine_rpc_new_payload_v4{$instance_label=\"$instance\", quantile=\"0.99\"}", + "editorMode": "code", + "expr": "reth_consensus_engine_beacon_new_payload_latency{$instance_label=\"$instance\", quantile=\"0.99\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, - "legendFormat": "engine_newPayloadV4 p99", + "legendFormat": "p99", "range": true, "refId": "T", "useBackend": false @@ -1845,6 +1432,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -1888,15 +1476,15 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", - "expr": "reth_engine_rpc_new_payload_total_gas{$instance_label=\"$instance\", quantile=\"0.5\"}", + "expr": "reth_consensus_engine_beacon_new_payload_total_gas{$instance_label=\"$instance\", quantile=\"0.5\"}", "legendFormat": "p50", "range": true, "refId": "A" @@ -1904,10 +1492,10 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", - "expr": "reth_engine_rpc_new_payload_total_gas{$instance_label=\"$instance\", quantile=\"0.9\"}", + "expr": "reth_consensus_engine_beacon_new_payload_total_gas{$instance_label=\"$instance\", quantile=\"0.9\"}", "hide": false, "legendFormat": "p90", "range": true, @@ -1916,10 +1504,10 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", - "expr": "reth_engine_rpc_new_payload_total_gas{$instance_label=\"$instance\", quantile=\"0.95\"}", + "expr": "reth_consensus_engine_beacon_new_payload_total_gas{$instance_label=\"$instance\", quantile=\"0.95\"}", "hide": false, "legendFormat": "p95", "range": true, @@ -1928,10 +1516,10 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", - "expr": "reth_engine_rpc_new_payload_total_gas{$instance_label=\"$instance\", quantile=\"0.99\"}", + "expr": "reth_consensus_engine_beacon_new_payload_total_gas{$instance_label=\"$instance\", quantile=\"0.99\"}", "hide": false, "legendFormat": "p99", "range": true, @@ -1976,6 +1564,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -2019,15 +1608,15 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", - "expr": "reth_engine_rpc_new_payload_gas_per_second{$instance_label=\"$instance\", quantile=\"0.5\"}", + "expr": "reth_consensus_engine_beacon_new_payload_gas_per_second{$instance_label=\"$instance\", quantile=\"0.5\"}", "legendFormat": "p50", "range": true, "refId": "A" @@ -2035,10 +1624,10 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", - "expr": "reth_engine_rpc_new_payload_gas_per_second{$instance_label=\"$instance\", quantile=\"0.9\"}", + "expr": "reth_consensus_engine_beacon_new_payload_gas_per_second{$instance_label=\"$instance\", quantile=\"0.9\"}", "hide": false, "legendFormat": "p90", "range": true, @@ -2047,10 +1636,10 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", - "expr": "reth_engine_rpc_new_payload_gas_per_second{$instance_label=\"$instance\", quantile=\"0.95\"}", + "expr": "reth_consensus_engine_beacon_new_payload_gas_per_second{$instance_label=\"$instance\", quantile=\"0.95\"}", "hide": false, "legendFormat": "p95", "range": true, @@ -2059,10 +1648,10 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", - "expr": "reth_engine_rpc_new_payload_gas_per_second{$instance_label=\"$instance\", quantile=\"0.99\"}", + "expr": "reth_consensus_engine_beacon_new_payload_gas_per_second{$instance_label=\"$instance\", quantile=\"0.99\"}", "hide": false, "legendFormat": "p99", "range": true, @@ -2107,6 +1696,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -2150,12 +1740,12 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "reth_sync_execution_gas_per_second{$instance_label=\"$instance\"}", @@ -2182,7 +1772,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -2214,7 +1804,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -2246,7 +1836,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -2297,6 +1887,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -2344,12 +1935,12 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -2400,7 +1991,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "Engine API messages received by the CL, either engine_newPayload or engine_forkchoiceUpdated", "fieldConfig": { @@ -2432,6 +2023,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -2478,7 +2070,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -2494,7 +2086,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "rate(reth_consensus_engine_beacon_new_payload_messages{$instance_label=\"$instance\"}[$__rate_interval])", @@ -2542,6 +2134,7 @@ "type": "linear" }, "showPoints": "never", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -2589,12 +2182,12 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "rate(reth_consensus_engine_beacon_failed_new_payload_response_deliveries{$instance_label=\"$instance\"}[$__rate_interval])", @@ -2620,7 +2213,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "Latency histogram for the engine_newPayload to engine_forkchoiceUpdated", "fieldConfig": { @@ -2652,6 +2245,7 @@ "type": "linear" }, "showPoints": "never", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -2699,7 +2293,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -2707,7 +2301,7 @@ "uid": "${datasource}" }, "editorMode": "code", - "expr": "reth_engine_rpc_new_payload_forkchoice_updated_time_diff{$instance_label=\"$instance\"}", + "expr": "reth_consensus_engine_beacon_new_payload_forkchoice_updated_time_diff{$instance_label=\"$instance\"}", "legendFormat": "p{{quantile}}", "range": true, "refId": "A" @@ -2719,7 +2313,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "Latency histograms for the engine_getPayloadBodiesByHashV1 and engine_getPayloadBodiesByRangeV1 RPC APIs", "fieldConfig": { @@ -2751,6 +2345,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -2798,7 +2393,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -2819,7 +2414,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -2851,7 +2446,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -2883,7 +2478,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -2915,7 +2510,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -2947,7 +2542,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -2998,6 +2593,7 @@ "type": "linear" }, "showPoints": "never", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -3041,12 +2637,12 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "rate(reth_engine_rpc_blobs_blob_count{$instance_label=\"$instance\"}[$__rate_interval])", @@ -3073,7 +2669,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "fieldConfig": { "defaults": { @@ -3104,6 +2700,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -3151,7 +2748,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -3160,11 +2757,11 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "reth_engine_rpc_get_blobs_v1{$instance_label=\"$instance\", quantile=\"0.5\"}", + "expr": "reth_engine_rpc_get_blobs_v2{$instance_label=\"$instance\", quantile=\"0.5\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, - "legendFormat": "engine_getBlobsV1 p50", + "legendFormat": "engine_getBlobsV2 p50", "range": true, "refId": "A", "useBackend": false @@ -3172,15 +2769,15 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", - "expr": "reth_engine_rpc_get_blobs_v1{$instance_label=\"$instance\", quantile=\"0.95\"}", + "expr": "reth_engine_rpc_get_blobs_v2{$instance_label=\"$instance\", quantile=\"0.95\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, - "legendFormat": "engine_getBlobsV1 p95", + "legendFormat": "engine_getBlobsV2 p95", "range": true, "refId": "B", "useBackend": false @@ -3192,11 +2789,11 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "reth_engine_rpc_get_blobs_v1{$instance_label=\"$instance\", quantile=\"0.99\"}", + "expr": "reth_engine_rpc_get_blobs_v2{$instance_label=\"$instance\", quantile=\"0.99\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, - "legendFormat": "engine_getBlobsV1 p99", + "legendFormat": "engine_getBlobsV2 p99", "range": true, "refId": "C", "useBackend": false @@ -3204,15 +2801,15 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", - "expr": "reth_engine_rpc_get_blobs_v1{$instance_label=\"$instance\", quantile=\"0\"}", + "expr": "reth_engine_rpc_get_blobs_v2{$instance_label=\"$instance\", quantile=\"0\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, - "legendFormat": "engine_getBlobsV1 min", + "legendFormat": "engine_getBlobsV2 min", "range": true, "refId": "D", "useBackend": false @@ -3224,11 +2821,11 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "reth_engine_rpc_get_blobs_v1{$instance_label=\"$instance\", quantile=\"1\"}", + "expr": "reth_engine_rpc_get_blobs_v2{$instance_label=\"$instance\", quantile=\"1\"}", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, - "legendFormat": "engine_getBlobsV1 max", + "legendFormat": "engine_getBlobsV2 max", "range": true, "refId": "E", "useBackend": false @@ -3240,7 +2837,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "Total pipeline runs triggered by the sync controller", "fieldConfig": { @@ -3272,6 +2869,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -3318,7 +2916,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -3338,7 +2936,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "", "fieldConfig": { @@ -3370,6 +2968,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -3416,7 +3015,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -3450,7 +3049,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "fieldConfig": { "defaults": { @@ -3481,6 +3080,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -3528,7 +3128,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -3549,7 +3149,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -3601,6 +3201,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -3648,12 +3249,12 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "code", @@ -3687,7 +3288,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "code", @@ -3740,6 +3341,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -3787,12 +3389,12 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "code", @@ -3844,6 +3446,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -3877,9 +3480,7 @@ "id": "byNames", "options": { "mode": "exclude", - "names": [ - "Precompile cache hits" - ], + "names": ["Precompile cache hits"], "prefix": "All except:", "readOnly": true } @@ -3889,7 +3490,7 @@ "id": "custom.hideFrom", "value": { "legend": false, - "tooltip": false, + "tooltip": true, "viz": true } } @@ -3917,12 +3518,12 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "code", @@ -3931,7 +3532,7 @@ "hide": false, "includeNullMetadata": true, "instant": false, - "legendFormat": "{{address}}", + "legendFormat": "Precompile cache hits", "range": true, "refId": "A", "useBackend": false @@ -3987,6 +3588,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -4033,12 +3635,12 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "reth_tree_root_proofs_processed_histogram{$instance_label=\"$instance\",quantile=~\"(0|0.5|0.9|0.95|1)\"}", @@ -4085,6 +3687,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -4132,12 +3735,12 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "reth_tree_root_proof_calculation_duration_histogram{$instance_label=\"$instance\",quantile=~\"(0|0.5|0.9|0.95|1)\"}", @@ -4184,6 +3787,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -4231,19 +3835,110 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", - "expr": "reth_tree_root_pending_multiproofs_histogram{$instance_label=\"$instance\",quantile=~\"(0|0.5|0.9|0.95|1)\"}", + "expr": "reth_tree_root_pending_account_multiproofs_histogram{$instance_label=\"$instance\", quantile=\"0.5\"}", "instant": false, - "legendFormat": "{{quantile}} percentile", + "legendFormat": "accounts p50", "range": true, "refId": "Branch Nodes" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "expr": "reth_tree_root_pending_account_multiproofs_histogram{$instance_label=\"$instance\", quantile=\"0.9\"}", + "hide": false, + "instant": false, + "legendFormat": "accounts p90", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "expr": "reth_tree_root_pending_account_multiproofs_histogram{$instance_label=\"$instance\", quantile=\"0.95\"}", + "hide": false, + "instant": false, + "legendFormat": "accounts p95", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "expr": "reth_tree_root_pending_account_multiproofs_histogram{$instance_label=\"$instance\", quantile=\"0.99\"}", + "hide": false, + "instant": false, + "legendFormat": "accounts p99", + "range": true, + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "expr": "reth_tree_root_pending_storage_multiproofs_histogram{$instance_label=\"$instance\", quantile=\"0.5\"}", + "hide": false, + "instant": false, + "legendFormat": "storage p50", + "range": true, + "refId": "D" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "expr": "reth_tree_root_pending_storage_multiproofs_histogram{$instance_label=\"$instance\", quantile=\"0.9\"}", + "hide": false, + "instant": false, + "legendFormat": "storage p90", + "range": true, + "refId": "E" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "expr": "reth_tree_root_pending_storage_multiproofs_histogram{$instance_label=\"$instance\", quantile=\"0.95\"}", + "hide": false, + "instant": false, + "legendFormat": "storage p95", + "range": true, + "refId": "F" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "expr": "reth_tree_root_pending_storage_multiproofs_histogram{$instance_label=\"$instance\", quantile=\"0.99\"}", + "hide": false, + "instant": false, + "legendFormat": "storage p99", + "range": true, + "refId": "G" } ], "title": "Pending MultiProof requests", @@ -4283,6 +3978,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -4330,22 +4026,112 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", - "expr": "reth_tree_root_inflight_multiproofs_histogram{$instance_label=\"$instance\",quantile=~\"(0|0.5|0.9|0.95|1)\"}", + "expr": "reth_tree_root_active_account_workers_histogram{$instance_label=\"$instance\",quantile=\"0.5\"}", "instant": false, - "legendFormat": "{{quantile}} percentile", + "legendFormat": "accounts p50", "range": true, - "refId": "Branch Nodes" + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "expr": "reth_tree_root_active_account_workers_histogram{$instance_label=\"$instance\",quantile=\"0.9\"}", + "hide": false, + "instant": false, + "legendFormat": "accounts p90", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "expr": "reth_tree_root_active_account_workers_histogram{$instance_label=\"$instance\",quantile=\"0.95\"}", + "hide": false, + "instant": false, + "legendFormat": "accounts p95", + "range": true, + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "expr": "reth_tree_root_active_account_workers_histogram{$instance_label=\"$instance\",quantile=\"0.99\"}", + "hide": false, + "instant": false, + "legendFormat": "accounts p99", + "range": true, + "refId": "D" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "expr": "reth_tree_root_active_storage_workers_histogram{$instance_label=\"$instance\",quantile=\"0.5\"}", + "instant": false, + "legendFormat": "storages p50", + "range": true, + "refId": "E" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "expr": "reth_tree_root_active_storage_workers_histogram{$instance_label=\"$instance\",quantile=\"0.9\"}", + "hide": false, + "instant": false, + "legendFormat": "storages p90", + "range": true, + "refId": "F" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "expr": "reth_tree_root_active_storage_workers_histogram{$instance_label=\"$instance\",quantile=\"0.95\"}", + "hide": false, + "instant": false, + "legendFormat": "storages p95", + "range": true, + "refId": "G" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "expr": "reth_tree_root_active_storage_workers_histogram{$instance_label=\"$instance\",quantile=\"0.99\"}", + "hide": false, + "instant": false, + "legendFormat": "storages p99", + "range": true, + "refId": "H" } ], - "title": "In-flight MultiProof requests", + "title": "Active multiproof workers", "type": "timeseries" }, { @@ -4382,6 +4168,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -4429,12 +4216,12 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "reth_sparse_state_trie_multiproof_total_account_nodes{$instance_label=\"$instance\",quantile=~\"(0|0.5|0.9|0.95|1)\"}", @@ -4481,6 +4268,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -4528,12 +4316,12 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "reth_sparse_state_trie_multiproof_total_storage_nodes{$instance_label=\"$instance\",quantile=~\"(0|0.5|0.9|0.95|1)\"}", @@ -4580,6 +4368,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -4627,12 +4416,12 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "reth_sparse_state_trie_multiproof_skipped_account_nodes{$instance_label=\"$instance\",quantile=~\"(0|0.5|0.9|0.95|1)\"}", @@ -4680,6 +4469,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -4727,12 +4517,12 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "reth_sparse_state_trie_multiproof_skipped_storage_nodes{$instance_label=\"$instance\",quantile=~\"(0|0.5|0.9|0.95|1)\"}", @@ -4780,6 +4570,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -4827,12 +4618,12 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "reth_tree_root_multiproof_task_total_duration_histogram{$instance_label=\"$instance\",quantile=~\"(0|0.5|0.9|0.95|1)\"}", @@ -4881,6 +4672,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -4928,12 +4720,12 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -5000,6 +4792,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -5043,12 +4836,12 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "exemplar": false, @@ -5133,12 +4926,12 @@ "unit": "percentunit" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "exemplar": false, @@ -5189,6 +4982,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -5232,12 +5026,12 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "exemplar": false, @@ -5287,6 +5081,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -5330,12 +5125,12 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "exemplar": false, @@ -5388,6 +5183,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -5427,10 +5223,7 @@ { "id": "custom.lineStyle", "value": { - "dash": [ - 0, - 10 - ], + "dash": [0, 10], "fill": "dot" } }, @@ -5462,12 +5255,12 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "code", @@ -5548,6 +5341,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -5587,10 +5381,7 @@ { "id": "custom.lineStyle", "value": { - "dash": [ - 0, - 10 - ], + "dash": [0, 10], "fill": "dot" } }, @@ -5622,12 +5413,12 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -5705,37 +5496,32 @@ }, "id": 48, "options": { - "displayLabels": [ - "name" - ], + "displayLabels": ["name"], "legend": { "displayMode": "table", "placement": "right", "showLegend": true, - "values": [ - "value" - ] + "values": ["value"] }, "pieType": "pie", "reduceOptions": { - "calcs": [ - "lastNotNull" - ], + "calcs": ["lastNotNull"], "fields": "", "values": false }, + "sort": "desc", "tooltip": { "hideZeros": false, "mode": "single", "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "reth_db_table_size{$instance_label=\"$instance\"}", @@ -5783,6 +5569,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -5830,12 +5617,12 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "exemplar": false, @@ -5885,30 +5672,27 @@ "displayMode": "table", "placement": "right", "showLegend": true, - "values": [ - "value" - ] + "values": ["value"] }, "pieType": "pie", "reduceOptions": { - "calcs": [ - "lastNotNull" - ], + "calcs": ["lastNotNull"], "fields": "", "values": false }, + "sort": "desc", "tooltip": { "hideZeros": false, "mode": "single", "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "sum by (type) ( reth_db_table_pages{$instance_label=\"$instance\"} )", @@ -5955,6 +5739,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -6003,12 +5788,12 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "sum by (job) ( reth_db_table_size{$instance_label=\"$instance\"} )", @@ -6055,6 +5840,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -6102,12 +5888,12 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "sum(reth_db_freelist{$instance_label=\"$instance\"}) by (job)", @@ -6134,6 +5920,9 @@ "cellOptions": { "type": "auto" }, + "footer": { + "reducers": [] + }, "inspect": false }, "mappings": [], @@ -6159,7 +5948,7 @@ }, "properties": [ { - "id": "custom.hidden", + "id": "custom.hideFrom.viz", "value": true } ] @@ -6171,7 +5960,7 @@ }, "properties": [ { - "id": "custom.hidden", + "id": "custom.hideFrom.viz", "value": true } ] @@ -6183,7 +5972,7 @@ }, "properties": [ { - "id": "custom.hidden", + "id": "custom.hideFrom.viz", "value": true } ] @@ -6195,7 +5984,7 @@ }, "properties": [ { - "id": "custom.hidden", + "id": "custom.hideFrom.viz", "value": true } ] @@ -6207,7 +5996,7 @@ }, "properties": [ { - "id": "custom.hidden", + "id": "custom.hideFrom.viz", "value": true } ] @@ -6251,22 +6040,14 @@ "id": 58, "options": { "cellHeight": "sm", - "footer": { - "countRows": false, - "fields": "", - "reducer": [ - "sum" - ], - "show": false - }, "showHeader": true }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "exemplar": false, @@ -6325,37 +6106,32 @@ }, "id": 202, "options": { - "displayLabels": [ - "name" - ], + "displayLabels": ["name"], "legend": { "displayMode": "table", "placement": "right", "showLegend": true, - "values": [ - "value" - ] + "values": ["value"] }, "pieType": "pie", "reduceOptions": { - "calcs": [ - "lastNotNull" - ], + "calcs": ["lastNotNull"], "fields": "", "values": false }, + "sort": "desc", "tooltip": { "hideZeros": false, "mode": "single", "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "reth_static_files_segment_size{$instance_label=\"$instance\"}", @@ -6383,6 +6159,9 @@ "cellOptions": { "type": "auto" }, + "footer": { + "reducers": [] + }, "inspect": false }, "mappings": [], @@ -6436,7 +6215,7 @@ }, "properties": [ { - "id": "custom.hidden", + "id": "custom.hideFrom.viz", "value": true } ] @@ -6448,7 +6227,7 @@ }, "properties": [ { - "id": "custom.hidden", + "id": "custom.hideFrom.viz", "value": true } ] @@ -6460,7 +6239,7 @@ }, "properties": [ { - "id": "custom.hidden", + "id": "custom.hideFrom.viz", "value": true } ] @@ -6472,7 +6251,7 @@ }, "properties": [ { - "id": "custom.hidden", + "id": "custom.hideFrom.viz", "value": true } ] @@ -6488,22 +6267,14 @@ "id": 204, "options": { "cellHeight": "sm", - "footer": { - "countRows": false, - "fields": "", - "reducer": [ - "sum" - ], - "show": false - }, "showHeader": true }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "exemplar": false, @@ -6533,6 +6304,9 @@ "cellOptions": { "type": "auto" }, + "footer": { + "reducers": [] + }, "inspect": false }, "mappings": [], @@ -6586,7 +6360,7 @@ }, "properties": [ { - "id": "custom.hidden", + "id": "custom.hideFrom.viz", "value": true } ] @@ -6598,7 +6372,7 @@ }, "properties": [ { - "id": "custom.hidden", + "id": "custom.hideFrom.viz", "value": true } ] @@ -6610,7 +6384,7 @@ }, "properties": [ { - "id": "custom.hidden", + "id": "custom.hideFrom.viz", "value": true } ] @@ -6622,7 +6396,7 @@ }, "properties": [ { - "id": "custom.hidden", + "id": "custom.hideFrom.viz", "value": true } ] @@ -6638,22 +6412,14 @@ "id": 205, "options": { "cellHeight": "sm", - "footer": { - "countRows": false, - "fields": "", - "reducer": [ - "sum" - ], - "show": false - }, "showHeader": true }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "exemplar": false, @@ -6703,6 +6469,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -6750,12 +6517,12 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "sum by (job) ( reth_static_files_segment_size{$instance_label=\"$instance\"} )", @@ -6802,6 +6569,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -6849,12 +6617,12 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "max(max_over_time(reth_static_files_jar_provider_write_duration_seconds{$instance_label=\"$instance\", operation=\"commit-writer\", quantile=\"1\"}[$__interval]) > 0) by (segment)", @@ -6915,6 +6683,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -6929,7 +6698,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": 0 }, { "color": "red", @@ -6960,12 +6730,12 @@ "sort": "none" } }, - "pluginVersion": "11.5.1", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "reth_blockchain_tree_canonical_chain_height{$instance_label=\"$instance\"}", @@ -7013,6 +6783,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -7027,7 +6798,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": 0 }, { "color": "red", @@ -7058,12 +6830,12 @@ "sort": "none" } }, - "pluginVersion": "11.5.1", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "reth_blockchain_tree_block_buffer_blocks{$instance_label=\"$instance\"}", @@ -7110,6 +6882,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -7124,7 +6897,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": 0 }, { "color": "red", @@ -7156,12 +6930,12 @@ "sort": "none" } }, - "pluginVersion": "11.5.1", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "increase(reth_blockchain_tree_reorgs{$instance_label=\"$instance\"}[$__rate_interval])", @@ -7208,6 +6982,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -7222,7 +6997,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": 0 }, { "color": "red", @@ -7254,12 +7030,12 @@ "sort": "none" } }, - "pluginVersion": "11.5.1", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "reth_blockchain_tree_latest_reorg_depth{$instance_label=\"$instance\"}", @@ -7320,6 +7096,7 @@ "type": "linear" }, "showPoints": "never", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -7392,12 +7169,12 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "code", @@ -7484,12 +7261,12 @@ "unit": "percentunit" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "exemplar": false, @@ -7538,6 +7315,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -7585,12 +7363,12 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "max(max_over_time(reth_rpc_server_calls_time_seconds{$instance_label=\"$instance\"}[$__rate_interval])) by (method) > 0", @@ -7673,12 +7451,12 @@ "unit": "percentunit" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "exemplar": false, @@ -7727,6 +7505,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -7810,12 +7589,12 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -7848,7 +7627,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -7882,7 +7661,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -7916,7 +7695,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "builder", @@ -7954,7 +7733,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "fieldConfig": { "defaults": { @@ -7985,6 +7764,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -8032,7 +7812,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -8040,7 +7820,7 @@ "uid": "${datasource}" }, "editorMode": "code", - "expr": "sum(rate(reth_rpc_server_calls_successful_total{instance =~ \"$instance\"}[$__rate_interval])) by (method) > 0", + "expr": "sum(rate(reth_rpc_server_calls_successful_total{$instance_label=\"$instance\"}[$__rate_interval])) by (method) > 0", "instant": false, "legendFormat": "{{method}}", "range": true, @@ -8067,7 +7847,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "fieldConfig": { "defaults": { @@ -8098,6 +7878,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -8112,7 +7893,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": 0 }, { "color": "red", @@ -8168,7 +7950,7 @@ "sort": "none" } }, - "pluginVersion": "11.5.1", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -8184,7 +7966,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "reth_downloaders_headers_total_flushed{$instance_label=\"$instance\"}", @@ -8209,7 +7991,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "rate(reth_downloaders_headers_total_flushed{$instance_label=\"$instance\"}[$__rate_interval])", @@ -8257,6 +8039,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -8271,7 +8054,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": 0 }, { "color": "red", @@ -8303,12 +8087,12 @@ "sort": "none" } }, - "pluginVersion": "11.5.1", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "rate(reth_downloaders_headers_timeout_errors{$instance_label=\"$instance\"}[$__rate_interval])", @@ -8331,7 +8115,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "rate(reth_downloaders_headers_validation_errors{$instance_label=\"$instance\"}[$__rate_interval])", @@ -8379,6 +8163,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -8393,7 +8178,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": 0 }, { "color": "red", @@ -8424,12 +8210,12 @@ "sort": "none" } }, - "pluginVersion": "11.5.1", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "reth_downloaders_headers_in_flight_requests{$instance_label=\"$instance\"}", @@ -8470,7 +8256,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "The internal state of the headers downloader: the number of downloaded headers, and the number of headers sent to the header stage.", "fieldConfig": { @@ -8502,6 +8288,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -8582,7 +8369,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -8598,7 +8385,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "reth_downloaders_bodies_total_flushed{$instance_label=\"$instance\"}", @@ -8622,7 +8409,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "rate(reth_downloaders_bodies_total_downloaded{$instance_label=\"$instance\"}[$__rate_interval])", @@ -8646,7 +8433,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "reth_downloaders_bodies_buffered_blocks{$instance_label=\"$instance\"}", @@ -8674,7 +8461,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "Internal errors in the bodies downloader. These are expected to happen from time to time.", "fieldConfig": { @@ -8706,6 +8493,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -8750,7 +8538,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -8766,7 +8554,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "rate(reth_downloaders_bodies_unexpected_errors{$instance_label=\"$instance\"}[$__rate_interval])", @@ -8794,7 +8582,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "The number of connected peers and in-progress requests for bodies.", "fieldConfig": { @@ -8826,6 +8614,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -8872,7 +8661,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -8888,7 +8677,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "reth_network_connected_peers{$instance_label=\"$instance\"}", @@ -8936,6 +8725,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -9000,12 +8790,12 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "reth_downloaders_bodies_buffered_blocks_size_bytes{$instance_label=\"$instance\"}", @@ -9033,7 +8823,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "The number of blocks in a request and size in bytes of those block responses", "fieldConfig": { @@ -9065,6 +8855,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -9129,7 +8920,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -9146,7 +8937,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "reth_downloaders_bodies_response_response_length{$instance_label=\"$instance\"}", @@ -9188,7 +8979,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "", "fieldConfig": { @@ -9220,6 +9011,7 @@ "type": "linear" }, "showPoints": "never", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -9293,7 +9085,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -9318,7 +9110,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "", "fieldConfig": { @@ -9350,6 +9142,7 @@ "type": "linear" }, "showPoints": "never", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -9423,7 +9216,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -9448,7 +9241,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "", "fieldConfig": { @@ -9480,6 +9273,7 @@ "type": "linear" }, "showPoints": "never", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -9553,7 +9347,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -9578,7 +9372,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "", "fieldConfig": { @@ -9610,6 +9404,7 @@ "type": "linear" }, "showPoints": "never", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -9683,7 +9478,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -9722,7 +9517,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "Number of active jobs", "fieldConfig": { @@ -9754,6 +9549,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -9800,7 +9596,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -9820,7 +9616,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "Total number of initiated jobs", "fieldConfig": { @@ -9852,6 +9648,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -9898,7 +9695,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -9918,7 +9715,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "Total number of failed jobs", "fieldConfig": { @@ -9950,6 +9747,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -9996,7 +9794,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -10029,7 +9827,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "fieldConfig": { "defaults": { @@ -10060,6 +9858,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": true, "stacking": { "group": "A", @@ -10108,7 +9907,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -10129,7 +9928,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "fieldConfig": { "defaults": { @@ -10160,6 +9959,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": true, "stacking": { "group": "A", @@ -10208,7 +10008,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -10229,7 +10029,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "fieldConfig": { "defaults": { @@ -10260,6 +10060,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": true, "stacking": { "group": "A", @@ -10307,7 +10108,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -10342,7 +10143,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "fieldConfig": { "defaults": { @@ -10373,6 +10174,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -10433,7 +10235,7 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { @@ -10450,7 +10252,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "reth_jemalloc_allocated{$instance_label=\"$instance\"}", @@ -10476,7 +10278,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "reth_jemalloc_metadata{$instance_label=\"$instance\"}", @@ -10502,7 +10304,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "reth_jemalloc_retained{$instance_label=\"$instance\"}", @@ -10551,6 +10353,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -10598,12 +10401,12 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "reth_process_resident_memory_bytes{$instance_label=\"$instance\"}", @@ -10651,6 +10454,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -10698,12 +10502,12 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "avg(rate(reth_process_cpu_seconds_total{$instance_label=\"$instance\"}[1m]))", @@ -10751,6 +10555,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -10798,12 +10603,12 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "reth_process_open_fds{$instance_label=\"$instance\"}", @@ -10851,6 +10656,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -10898,12 +10704,12 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "reth_executor_spawn_critical_tasks_total{$instance_label=\"$instance\"}- reth_executor_spawn_finished_critical_tasks_total{$instance_label=\"$instance\"}", @@ -10952,6 +10758,7 @@ "type": "linear" }, "showPoints": "auto", + "showValues": false, "spanNulls": false, "stacking": { "group": "A", @@ -11012,12 +10819,12 @@ "sort": "none" } }, - "pluginVersion": "12.1.0-pre", + "pluginVersion": "12.2.1", "targets": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "disableTextWrap": false, "editorMode": "code", @@ -11049,20 +10856,153 @@ "title": "Task Executor regular tasks", "type": "timeseries" }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "description": "Tracks the number of regular blocking tasks currently ran by the executor.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "showValues": false, + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "semi-dark-red", + "value": 80 + } + ] + }, + "unit": "tasks/s" + }, + "overrides": [ + { + "matcher": { + "id": "byFrameRefID", + "options": "C" + }, + "properties": [ + { + "id": "unit", + "value": "tasks" + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 362 + }, + "id": 1007, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.2.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "code", + "exemplar": false, + "expr": "rate(reth_executor_spawn_regular_blocking_tasks_total{$instance_label=\"$instance\"}[$__rate_interval])", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "Tasks started", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "editorMode": "code", + "expr": "reth_executor_spawn_regular_blocking_tasks_total{$instance_label=\"$instance\"} - reth_executor_spawn_finished_regular_blocking_tasks_total{$instance_label=\"$instance\"}", + "hide": false, + "instant": false, + "legendFormat": "Tasks running", + "range": true, + "refId": "C" + } + ], + "title": "Task Executor regular blocking tasks", + "type": "timeseries" + }, { "collapsed": true, "gridPos": { "h": 1, "w": 24, "x": 0, - "y": 362 + "y": 370 }, "id": 236, "panels": [ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "The total number of canonical state notifications sent to ExExes.", "fieldConfig": { @@ -11158,7 +11098,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "The total number of events ExExes have sent to the manager.", "fieldConfig": { @@ -11254,7 +11194,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "Current and Maximum capacity of the internal state notifications buffer.", "fieldConfig": { @@ -11346,7 +11286,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "max_over_time(reth_exex_manager_max_capacity{$instance_label=\"$instance\"}[1h])", @@ -11442,7 +11382,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "reth_exex_manager_buffer_size{$instance_label=\"$instance\"}", @@ -11497,9 +11437,7 @@ "orientation": "auto", "percentChangeColorMode": "standard", "reduceOptions": { - "calcs": [ - "lastNotNull" - ], + "calcs": ["lastNotNull"], "fields": "", "values": false }, @@ -11512,7 +11450,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "builder", "expr": "reth_exex_manager_num_exexs{$instance_label=\"$instance\"}", @@ -11535,7 +11473,7 @@ "h": 1, "w": 24, "x": 0, - "y": 363 + "y": 371 }, "id": 241, "panels": [ @@ -11623,7 +11561,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "reth_exex_wal_lowest_committed_block_height{$instance_label=\"$instance\"}", @@ -11653,7 +11591,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "description": "", "fieldConfig": { @@ -11747,7 +11685,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "reth_exex_wal_notifications_count{$instance_label=\"$instance\"}", @@ -11846,7 +11784,7 @@ { "datasource": { "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "uid": "${datasource}" }, "editorMode": "code", "expr": "reth_exex_wal_size_bytes{$instance_label=\"$instance\"}", @@ -11866,7 +11804,7 @@ } ], "refresh": "5s", - "schemaVersion": 41, + "schemaVersion": 42, "tags": [], "templating": { "list": [ @@ -11932,6 +11870,6 @@ "timezone": "", "title": "Reth", "uid": "2k8BXz24x", - "version": 3, + "version": 4, "weekStart": "" } diff --git a/etc/lighthouse.yml b/etc/lighthouse.yml index fc76b1fc77..b7b6c352ee 100644 --- a/etc/lighthouse.yml +++ b/etc/lighthouse.yml @@ -3,7 +3,7 @@ name: reth services: lighthouse: restart: unless-stopped - image: sigp/lighthouse:v7.0.1 + image: sigp/lighthouse:v8.0.1 depends_on: - reth ports: diff --git a/examples/beacon-api-sidecar-fetcher/src/mined_sidecar.rs b/examples/beacon-api-sidecar-fetcher/src/mined_sidecar.rs index 56755b1e73..cc3ba9abf8 100644 --- a/examples/beacon-api-sidecar-fetcher/src/mined_sidecar.rs +++ b/examples/beacon-api-sidecar-fetcher/src/mined_sidecar.rs @@ -1,5 +1,5 @@ use crate::BeaconSidecarConfig; -use alloy_consensus::{BlockHeader, Signed, Transaction as _, TxEip4844WithSidecar, Typed2718}; +use alloy_consensus::{Signed, Transaction as _, TxEip4844WithSidecar, Typed2718}; use alloy_eips::eip7594::BlobTransactionSidecarVariant; use alloy_primitives::B256; use alloy_rpc_types_beacon::sidecar::{BeaconBlobBundle, SidecarIterator}; @@ -202,9 +202,9 @@ where .map(|tx| { let transaction_hash = *tx.tx_hash(); let block_metadata = BlockMetadata { - block_hash: new.tip().hash(), - block_number: new.tip().number(), - gas_used: new.tip().gas_used(), + block_hash: block.hash(), + block_number: block.number, + gas_used: block.gas_used, }; BlobTransactionEvent::Reorged(ReorgedBlob { transaction_hash, diff --git a/examples/bsc-p2p/src/block_import/service.rs b/examples/bsc-p2p/src/block_import/service.rs index 35003423e7..5b12d25b29 100644 --- a/examples/bsc-p2p/src/block_import/service.rs +++ b/examples/bsc-p2p/src/block_import/service.rs @@ -65,7 +65,7 @@ where impl ImportService where - Provider: BlockNumReader + Clone + 'static, + Provider: BlockNumReader + Sync + Clone + 'static, T: PayloadTypes, { /// Create a new block import service @@ -198,7 +198,7 @@ where impl Future for ImportService where - Provider: BlockNumReader + BlockHashReader + Clone + 'static + Unpin, + Provider: BlockNumReader + BlockHashReader + Sync + Clone + 'static + Unpin, T: PayloadTypes, { type Output = Result<(), Box>; diff --git a/examples/custom-engine-types/src/main.rs b/examples/custom-engine-types/src/main.rs index ca724e52af..a26ce1594a 100644 --- a/examples/custom-engine-types/src/main.rs +++ b/examples/custom-engine-types/src/main.rs @@ -41,7 +41,7 @@ use reth_ethereum::{ builder::{ components::{BasicPayloadServiceBuilder, ComponentsBuilder, PayloadBuilderBuilder}, rpc::{PayloadValidatorBuilder, RpcAddOns}, - BuilderContext, Node, NodeAdapter, NodeBuilder, + BuilderContext, Node, NodeAdapter, NodeBuilder, PayloadBuilderConfig, }, core::{args::RpcServerArgs, node_config::NodeConfig}, node::{ @@ -51,7 +51,7 @@ use reth_ethereum::{ EthEvmConfig, EthereumEthApiBuilder, }, pool::{PoolTransaction, TransactionPool}, - primitives::{Block, RecoveredBlock, SealedBlock}, + primitives::{Block, SealedBlock}, provider::{EthStorage, StateProviderFactory}, rpc::types::engine::ExecutionPayload, tasks::TaskManager, @@ -193,12 +193,11 @@ impl CustomEngineValidator { impl PayloadValidator for CustomEngineValidator { type Block = reth_ethereum::Block; - fn ensure_well_formed_payload( + fn convert_payload_to_block( &self, payload: ExecutionData, - ) -> Result, NewPayloadError> { - let sealed_block = self.inner.ensure_well_formed_payload(payload)?; - sealed_block.try_recover().map_err(|e| NewPayloadError::Other(e.into())) + ) -> Result, NewPayloadError> { + self.inner.ensure_well_formed_payload(payload).map_err(Into::into) } fn validate_payload_attributes_against_header( @@ -338,7 +337,8 @@ where ctx.provider().clone(), pool, evm_config, - EthereumBuilderConfig::new(), + EthereumBuilderConfig::new() + .with_extra_data(ctx.payload_builder_config().extra_data_bytes()), ), }; Ok(payload_builder) diff --git a/examples/custom-hardforks/Cargo.toml b/examples/custom-hardforks/Cargo.toml new file mode 100644 index 0000000000..78060f6af6 --- /dev/null +++ b/examples/custom-hardforks/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "custom-hardforks" +license.workspace = true +version = "0.1.0" +edition = "2021" +publish = false + +[dependencies] +# Core Reth dependencies for chain specs and hardforks +reth-chainspec.workspace = true +reth-network-peers.workspace = true +alloy-genesis.workspace = true +alloy-consensus.workspace = true +alloy-primitives.workspace = true +alloy-eips.workspace = true +serde = { version = "1.0", features = ["derive"] } diff --git a/examples/custom-hardforks/src/chainspec.rs b/examples/custom-hardforks/src/chainspec.rs new file mode 100644 index 0000000000..d51db59fdd --- /dev/null +++ b/examples/custom-hardforks/src/chainspec.rs @@ -0,0 +1,149 @@ +//! Custom chain specification integrating hardforks. +//! +//! This demonstrates how to build a `ChainSpec` with custom hardforks, +//! implementing required traits for integration with Reth's chain management. + +use alloy_eips::eip7840::BlobParams; +use alloy_genesis::Genesis; +use alloy_primitives::{B256, U256}; +use reth_chainspec::{ + hardfork, BaseFeeParams, Chain, ChainSpec, DepositContract, EthChainSpec, EthereumHardfork, + EthereumHardforks, ForkCondition, Hardfork, Hardforks, +}; +use reth_network_peers::NodeRecord; +use serde::{Deserialize, Serialize}; + +// Define custom hardfork variants using Reth's `hardfork!` macro. +// Each variant represents a protocol upgrade (e.g., enabling new features). +hardfork!( + /// Custom hardforks for the example chain. + /// + /// These are inspired by Ethereum's upgrades but customized for demonstration. + /// Add new variants here to extend the chain's hardfork set. + CustomHardfork { + /// Enables basic custom features (e.g., a new precompile). + BasicUpgrade, + /// Enables advanced features (e.g., state modifications). + AdvancedUpgrade, + } +); + +// Implement the `Hardfork` trait for each variant. +// This defines the name and any custom logic (e.g., feature toggles). +// Note: The hardfork! macro already implements Hardfork, so no manual impl needed. + +// Configuration for hardfork activation. +// This struct holds settings like activation blocks and is serializable for config files. +#[derive(Debug, Clone, Serialize, Deserialize, Default)] +#[serde(rename_all = "camelCase")] +pub struct CustomHardforkConfig { + /// Block number to activate BasicUpgrade. + pub basic_upgrade_block: u64, + /// Block number to activate AdvancedUpgrade. + pub advanced_upgrade_block: u64, +} + +// Custom chain spec wrapping Reth's `ChainSpec` with our hardforks. +#[derive(Debug, Clone)] +pub struct CustomChainSpec { + pub inner: ChainSpec, +} + +impl CustomChainSpec { + /// Creates a custom chain spec from a genesis file. + /// + /// This parses the [`ChainSpec`] and adds the custom hardforks. + pub fn from_genesis(genesis: Genesis) -> Self { + let extra = genesis.config.extra_fields.deserialize_as::().unwrap(); + + let mut inner = ChainSpec::from_genesis(genesis); + inner.hardforks.insert( + CustomHardfork::BasicUpgrade, + ForkCondition::Timestamp(extra.basic_upgrade_block), + ); + inner.hardforks.insert( + CustomHardfork::AdvancedUpgrade, + ForkCondition::Timestamp(extra.advanced_upgrade_block), + ); + Self { inner } + } +} + +// Implement `Hardforks` to integrate custom hardforks with Reth's system. +impl Hardforks for CustomChainSpec { + fn fork(&self, fork: H) -> ForkCondition { + self.inner.fork(fork) + } + + fn forks_iter(&self) -> impl Iterator { + self.inner.forks_iter() + } + + fn fork_id(&self, head: &reth_chainspec::Head) -> reth_chainspec::ForkId { + self.inner.fork_id(head) + } + + fn latest_fork_id(&self) -> reth_chainspec::ForkId { + self.inner.latest_fork_id() + } + + fn fork_filter(&self, head: reth_chainspec::Head) -> reth_chainspec::ForkFilter { + self.inner.fork_filter(head) + } +} + +// Implement `EthChainSpec` for compatibility with Ethereum-based nodes. +impl EthChainSpec for CustomChainSpec { + type Header = alloy_consensus::Header; + + fn chain(&self) -> Chain { + self.inner.chain() + } + + fn base_fee_params_at_timestamp(&self, timestamp: u64) -> BaseFeeParams { + self.inner.base_fee_params_at_timestamp(timestamp) + } + + fn blob_params_at_timestamp(&self, timestamp: u64) -> Option { + self.inner.blob_params_at_timestamp(timestamp) + } + + fn deposit_contract(&self) -> Option<&DepositContract> { + self.inner.deposit_contract() + } + + fn genesis_hash(&self) -> B256 { + self.inner.genesis_hash() + } + + fn prune_delete_limit(&self) -> usize { + self.inner.prune_delete_limit() + } + + fn display_hardforks(&self) -> Box { + Box::new(self.inner.display_hardforks()) + } + + fn genesis_header(&self) -> &Self::Header { + self.inner.genesis_header() + } + + fn genesis(&self) -> &Genesis { + self.inner.genesis() + } + + fn bootnodes(&self) -> Option> { + self.inner.bootnodes() + } + + fn final_paris_total_difficulty(&self) -> Option { + self.inner.final_paris_total_difficulty() + } +} + +// Implement `EthereumHardforks` to support Ethereum hardfork queries. +impl EthereumHardforks for CustomChainSpec { + fn ethereum_fork_activation(&self, fork: EthereumHardfork) -> ForkCondition { + self.inner.ethereum_fork_activation(fork) + } +} diff --git a/examples/custom-hardforks/src/main.rs b/examples/custom-hardforks/src/main.rs new file mode 100644 index 0000000000..588f260c61 --- /dev/null +++ b/examples/custom-hardforks/src/main.rs @@ -0,0 +1,5 @@ +//! Example that showcases how to inject custom hardforks. + +pub mod chainspec; + +fn main() {} diff --git a/examples/custom-inspector/src/main.rs b/examples/custom-inspector/src/main.rs index f7accf0e8c..0249a4a258 100644 --- a/examples/custom-inspector/src/main.rs +++ b/examples/custom-inspector/src/main.rs @@ -44,7 +44,7 @@ fn main() { // create a new subscription to pending transactions let mut pending_transactions = node.pool.new_pending_pool_transactions_listener(); - // get an instance of the `trace_` API handler + // get an instance of the `eth_` API handler let eth_api = node.rpc_registry.eth_api().clone(); println!("Spawning trace task!"); @@ -106,13 +106,13 @@ fn main() { /// Our custom cli args extension that adds one flag to reth default CLI. #[derive(Debug, Clone, Default, clap::Args)] struct RethCliTxpoolExt { - /// The addresses of the recipients that we want to trace. + /// The addresses of the recipients that we want to inspect. #[arg(long, value_delimiter = ',')] pub recipients: Vec
, } impl RethCliTxpoolExt { - /// Check if the recipient is in the list of recipients to trace. + /// Check if the recipient is in the list of recipients to inspect. pub fn is_match(&self, recipient: &Address) -> bool { self.recipients.is_empty() || self.recipients.contains(recipient) } diff --git a/examples/custom-node/Cargo.toml b/examples/custom-node/Cargo.toml index fe1f000625..8eb3dbd143 100644 --- a/examples/custom-node/Cargo.toml +++ b/examples/custom-node/Cargo.toml @@ -7,7 +7,6 @@ license.workspace = true [dependencies] # reth -reth-chain-state.workspace = true reth-codecs.workspace = true reth-network-peers.workspace = true reth-node-builder.workspace = true diff --git a/examples/custom-node/src/engine.rs b/examples/custom-node/src/engine.rs index 0c80e52a66..d6d363db35 100644 --- a/examples/custom-node/src/engine.rs +++ b/examples/custom-node/src/engine.rs @@ -5,17 +5,17 @@ use crate::{ CustomNode, }; use alloy_eips::eip2718::WithEncoded; +use alloy_primitives::Bytes; use op_alloy_rpc_types_engine::{OpExecutionData, OpExecutionPayload}; -use reth_chain_state::ExecutedBlock; use reth_engine_primitives::EngineApiValidator; use reth_ethereum::{ node::api::{ - validate_version_specific_fields, AddOnsContext, BuiltPayload, EngineApiMessageVersion, - EngineObjectValidationError, ExecutionPayload, FullNodeComponents, NewPayloadError, - NodePrimitives, PayloadAttributes, PayloadBuilderAttributes, PayloadOrAttributes, - PayloadTypes, PayloadValidator, + validate_version_specific_fields, AddOnsContext, BuiltPayload, BuiltPayloadExecutedBlock, + EngineApiMessageVersion, EngineObjectValidationError, ExecutionPayload, FullNodeComponents, + NewPayloadError, NodePrimitives, PayloadAttributes, PayloadBuilderAttributes, + PayloadOrAttributes, PayloadTypes, PayloadValidator, }, - primitives::{RecoveredBlock, SealedBlock}, + primitives::SealedBlock, storage::StateProviderFactory, trie::{KeccakKeyHasher, KeyHasher}, }; @@ -55,6 +55,10 @@ impl ExecutionPayload for CustomExecutionData { None } + fn block_access_list(&self) -> Option<&Bytes> { + None + } + fn parent_beacon_block_root(&self) -> Option { self.inner.parent_beacon_block_root() } @@ -66,6 +70,21 @@ impl ExecutionPayload for CustomExecutionData { fn gas_used(&self) -> u64 { self.inner.gas_used() } + + fn transaction_count(&self) -> usize { + self.inner.payload.as_v1().transactions.len() + } +} + +impl TryFrom<&reth_optimism_flashblocks::FlashBlockCompleteSequence> for CustomExecutionData { + type Error = &'static str; + + fn try_from( + sequence: &reth_optimism_flashblocks::FlashBlockCompleteSequence, + ) -> Result { + let inner = OpExecutionData::try_from(sequence)?; + Ok(Self { inner, extension: sequence.last().diff.gas_used }) + } } #[derive(Debug, Clone, Serialize, Deserialize)] @@ -167,7 +186,7 @@ impl BuiltPayload for CustomBuiltPayload { self.0.fees() } - fn executed_block(&self) -> Option> { + fn executed_block(&self) -> Option> { self.0.executed_block() } @@ -231,23 +250,6 @@ where { type Block = crate::primitives::block::Block; - fn ensure_well_formed_payload( - &self, - payload: CustomExecutionData, - ) -> Result, NewPayloadError> { - let sealed_block = PayloadValidator::::ensure_well_formed_payload( - &self.inner, - payload.inner, - )?; - let (block, senders) = sealed_block.split_sealed(); - let (header, body) = block.split_sealed_header_body(); - let header = CustomHeader { inner: header.into_header(), extension: payload.extension }; - let body = body.map_ommers(|_| CustomHeader::default()); - let block = SealedBlock::::from_parts_unhashed(header, body); - - Ok(block.with_senders(senders)) - } - fn validate_payload_attributes_against_header( &self, _attr: &CustomPayloadAttributes, @@ -256,6 +258,20 @@ where // skip default timestamp validation Ok(()) } + + fn convert_payload_to_block( + &self, + payload: CustomExecutionData, + ) -> Result, NewPayloadError> { + let sealed_block = PayloadValidator::::convert_payload_to_block( + &self.inner, + payload.inner, + )?; + let (header, body) = sealed_block.split_sealed_header_body(); + let header = CustomHeader { inner: header.into_header(), extension: payload.extension }; + let body = body.map_ommers(|_| CustomHeader::default()); + Ok(SealedBlock::::from_parts_unhashed(header, body)) + } } impl

EngineApiValidator for CustomEngineValidator

diff --git a/examples/custom-node/src/evm/config.rs b/examples/custom-node/src/evm/config.rs index a7dee31a83..c29ac07563 100644 --- a/examples/custom-node/src/evm/config.rs +++ b/examples/custom-node/src/evm/config.rs @@ -9,6 +9,7 @@ use alloy_eips::{eip2718::WithEncoded, Decodable2718}; use alloy_evm::EvmEnv; use alloy_op_evm::OpBlockExecutionCtx; use alloy_rpc_types_engine::PayloadError; +use op_alloy_rpc_types_engine::flashblock::OpFlashblockPayloadBase; use op_revm::OpSpecId; use reth_engine_primitives::ExecutableTxIterator; use reth_ethereum::{ @@ -23,8 +24,8 @@ use reth_op::{ node::{OpEvmConfig, OpNextBlockEnvAttributes, OpRethReceiptBuilder}, primitives::SignedTransaction, }; -use reth_optimism_flashblocks::ExecutionPayloadBaseV1; use reth_rpc_api::eth::helpers::pending_block::BuildPendingEnv; +use revm_primitives::Bytes; use std::sync::Arc; #[derive(Debug, Clone)] @@ -126,13 +127,15 @@ impl ConfigureEngineEvm for CustomEvmConfig { &self, payload: &CustomExecutionData, ) -> Result, Self::Error> { - Ok(payload.inner.payload.transactions().clone().into_iter().map(|encoded| { + let transactions = payload.inner.payload.transactions().clone(); + let convert = |encoded: Bytes| { let tx = CustomTransaction::decode_2718_exact(encoded.as_ref()) .map_err(Into::into) .map_err(PayloadError::Decode)?; let signer = tx.try_recover().map_err(NewPayloadError::other)?; Ok::<_, NewPayloadError>(WithEncoded::new(encoded, tx.with_signer(signer))) - })) + }; + Ok((transactions, convert)) } } @@ -143,8 +146,8 @@ pub struct CustomNextBlockEnvAttributes { extension: u64, } -impl From for CustomNextBlockEnvAttributes { - fn from(value: ExecutionPayloadBaseV1) -> Self { +impl From for CustomNextBlockEnvAttributes { + fn from(value: OpFlashblockPayloadBase) -> Self { Self { inner: value.into(), extension: 0 } } } diff --git a/examples/custom-node/src/primitives/tx_custom.rs b/examples/custom-node/src/primitives/tx_custom.rs index 8729378bd5..210696f49c 100644 --- a/examples/custom-node/src/primitives/tx_custom.rs +++ b/examples/custom-node/src/primitives/tx_custom.rs @@ -6,7 +6,6 @@ use alloy_consensus::{ use alloy_eips::{eip2930::AccessList, eip7702::SignedAuthorization, Typed2718}; use alloy_primitives::{Address, Bytes, ChainId, Signature, TxKind, B256, U256}; use alloy_rlp::{BufMut, Decodable, Encodable}; -use core::mem; use reth_ethereum::primitives::{serde_bincode_compat::RlpBincode, InMemorySize}; /// A transaction with a priority fee ([EIP-1559](https://eips.ethereum.org/EIPS/eip-1559)). @@ -75,17 +74,10 @@ impl TxPayment { super::tx::TxTypeCustom::Payment } - /// Calculates a heuristic for the in-memory size of the [TxPayment] - /// transaction. + /// Calculates a heuristic for the in-memory size of the [TxPayment] transaction. #[inline] pub fn size(&self) -> usize { - mem::size_of::() + // chain_id - mem::size_of::() + // nonce - mem::size_of::() + // gas_limit - mem::size_of::() + // max_fee_per_gas - mem::size_of::() + // max_priority_fee_per_gas - mem::size_of::

() + // to - mem::size_of::() // value + size_of::() } } diff --git a/examples/custom-node/src/rpc.rs b/examples/custom-node/src/rpc.rs index 8259297367..b6dc7742d9 100644 --- a/examples/custom-node/src/rpc.rs +++ b/examples/custom-node/src/rpc.rs @@ -3,14 +3,15 @@ use crate::{ primitives::{CustomHeader, CustomTransaction}, }; use alloy_consensus::error::ValueError; +use alloy_evm::EvmEnv; use alloy_network::TxSigner; use op_alloy_consensus::OpTxEnvelope; use op_alloy_rpc_types::{OpTransactionReceipt, OpTransactionRequest}; use reth_op::rpc::RpcTypes; use reth_rpc_api::eth::{ - transaction::TryIntoTxEnv, EthTxEnvError, SignTxRequestError, SignableTxRequest, TryIntoSimTx, + EthTxEnvError, SignTxRequestError, SignableTxRequest, TryIntoSimTx, TryIntoTxEnv, }; -use revm::context::{BlockEnv, CfgEnv}; +use revm::context::BlockEnv; #[derive(Debug, Clone, Copy, Default)] #[non_exhaustive] @@ -34,10 +35,9 @@ impl TryIntoTxEnv for OpTransactionRequest { fn try_into_tx_env( self, - cfg_env: &CfgEnv, - block_env: &BlockEnv, + evm_env: &EvmEnv, ) -> Result { - Ok(CustomTxEnv::Op(self.try_into_tx_env(cfg_env, block_env)?)) + Ok(CustomTxEnv::Op(self.try_into_tx_env(evm_env)?)) } } diff --git a/examples/custom-payload-builder/src/main.rs b/examples/custom-payload-builder/src/main.rs index c38b46a5b9..3de8297090 100644 --- a/examples/custom-payload-builder/src/main.rs +++ b/examples/custom-payload-builder/src/main.rs @@ -62,7 +62,8 @@ where ctx.provider().clone(), pool, evm_config, - EthereumBuilderConfig::new(), + EthereumBuilderConfig::new() + .with_extra_data(ctx.payload_builder_config().extra_data_bytes()), ); let conf = ctx.payload_builder_config(); diff --git a/examples/custom-rpc-middleware/Cargo.toml b/examples/custom-rpc-middleware/Cargo.toml new file mode 100644 index 0000000000..92b5975e43 --- /dev/null +++ b/examples/custom-rpc-middleware/Cargo.toml @@ -0,0 +1,14 @@ +[package] +name = "example-custom-rpc-middleware" +version = "0.0.0" +publish = false +edition.workspace = true +license.workspace = true + +[dependencies] +reth-ethereum = { workspace = true, features = ["node", "rpc", "cli"] } + +clap = { workspace = true, features = ["derive"] } +jsonrpsee = { workspace = true, features = ["server", "macros"] } +tracing.workspace = true +tower.workspace = true diff --git a/examples/custom-rpc-middleware/src/main.rs b/examples/custom-rpc-middleware/src/main.rs new file mode 100644 index 0000000000..003111b671 --- /dev/null +++ b/examples/custom-rpc-middleware/src/main.rs @@ -0,0 +1,117 @@ +//! Example of how to create a node with custom middleware that alters a returned error message from +//! the RPC +//! +//! Run with +//! +//! ```sh +//! cargo run -p example-custom-rpc-middleware node --http --dev --dev.block-time 12s --http.api=debug,eth +//! ``` +//! +//! Then make an RPC request that will result in an error +//! +//! ```sh +//! curl -s -X POST http://localhost:8545 \ +//! -H "Content-Type: application/json" \ +//! -d '{ +//! "jsonrpc": "2.0", +//! "method": "debug_getRawBlock", +//! "params": ["2"], +//! "id": 1 +//! }' | jq +//! ``` + +use clap::Parser; +use jsonrpsee::{ + core::{ + middleware::{Batch, Notification, RpcServiceT}, + server::MethodResponse, + }, + types::{ErrorObjectOwned, Id, Request}, +}; +use reth_ethereum::{ + cli::{chainspec::EthereumChainSpecParser, interface::Cli}, + node::{EthereumAddOns, EthereumNode}, +}; +use tower::Layer; + +fn main() { + Cli::::parse() + .run(|builder, _| async move { + let handle = builder + .with_types::() + .with_components(EthereumNode::components()) + .with_add_ons( + //create ethereum addons with our custom rpc middleware + EthereumAddOns::default().with_rpc_middleware(ResponseMutationLayer), + ) + .launch_with_debug_capabilities() + .await?; + + handle.wait_for_node_exit().await + }) + .unwrap(); +} + +#[derive(Clone)] +pub struct ResponseMutationLayer; + +impl Layer for ResponseMutationLayer { + type Service = ResponseMutationService; + + fn layer(&self, inner: S) -> Self::Service { + ResponseMutationService { service: inner } + } +} + +#[derive(Clone)] +pub struct ResponseMutationService { + service: S, +} + +impl RpcServiceT for ResponseMutationService +where + S: RpcServiceT< + MethodResponse = jsonrpsee::MethodResponse, + BatchResponse = jsonrpsee::MethodResponse, + NotificationResponse = jsonrpsee::MethodResponse, + > + Send + + Sync + + Clone + + 'static, +{ + type MethodResponse = S::MethodResponse; + type NotificationResponse = S::NotificationResponse; + type BatchResponse = S::BatchResponse; + + fn call<'a>(&self, req: Request<'a>) -> impl Future + Send + 'a { + tracing::info!("processed call {:?}", req); + let service = self.service.clone(); + Box::pin(async move { + let resp = service.call(req).await; + + //we can modify the response with our own custom error + if resp.is_error() { + let err = ErrorObjectOwned::owned( + -31404, + "CustomError", + Some("Our very own custom error message"), + ); + return MethodResponse::error(Id::Number(1), err); + } + + //otherwise just return the original response + resp + }) + } + + fn batch<'a>(&self, req: Batch<'a>) -> impl Future + Send + 'a { + self.service.batch(req) + } + + fn notification<'a>( + &self, + n: Notification<'a>, + ) -> impl Future + Send + 'a { + self.service.notification(n) + } +} diff --git a/examples/db-access/src/main.rs b/examples/db-access/src/main.rs index 339aa1ae3d..1042ac55be 100644 --- a/examples/db-access/src/main.rs +++ b/examples/db-access/src/main.rs @@ -1,13 +1,13 @@ #![warn(unused_crate_dependencies)] -use alloy_primitives::{Address, B256}; +use alloy_primitives::{keccak256, Address, B256}; use reth_ethereum::{ chainspec::ChainSpecBuilder, node::EthereumNode, primitives::{AlloyBlockHeader, SealedBlock, SealedHeader}, provider::{ - providers::ReadOnlyConfig, AccountReader, BlockReader, BlockSource, HeaderProvider, - ReceiptProvider, StateProvider, TransactionVariant, TransactionsProvider, + providers::ReadOnlyConfig, AccountReader, BlockNumReader, BlockReader, BlockSource, + HeaderProvider, ReceiptProvider, StateProvider, TransactionVariant, TransactionsProvider, }, rpc::eth::primitives::Filter, TransactionSigned, @@ -39,16 +39,13 @@ fn main() -> eyre::Result<()> { txs_provider_example(&provider)?; receipts_provider_example(&provider)?; + state_provider_example(factory.latest()?, &provider, provider.best_block_number()?)?; + state_provider_example(factory.history_by_block_number(block_num)?, &provider, block_num)?; + // Closes the RO transaction opened in the `factory.provider()` call. This is optional and // would happen anyway at the end of the function scope. drop(provider); - // Run the example against latest state - state_provider_example(factory.latest()?)?; - - // Run it with historical state - state_provider_example(factory.history_by_block_number(block_num)?)?; - Ok(()) } @@ -98,9 +95,6 @@ fn txs_provider_example let id = provider.transaction_id(*tx.tx_hash())?.ok_or(eyre::eyre!("txhash not found"))?; assert_eq!(id, txid); - // Can find the block of a transaction given its key - let _block = provider.transaction_block(txid)?; - // Can query the txs in the range [100, 200) let _txs_by_tx_range = provider.transactions_by_tx_range(100..200)?; // Can query the txs in the _block_ range [100, 200)] @@ -181,15 +175,21 @@ fn receipts_provider_example< let header = provider.header_by_number(header_num)?.unwrap(); let bloom = header.logs_bloom(); - // 2. Construct the address/topics filters - // For a hypothetical address, we'll want to filter down for a specific indexed topic (e.g. - // `from`). - let addr = Address::random(); - let topic = B256::random(); + // 2. Construct the address/topics filters. topic0 always refers to the event signature, so + // filter it with event_signature() (or use the .event() helper). The remaining helpers map to + // the indexed parameters in declaration order (topic1 -> first indexed param, etc). + let contract_addr = Address::random(); + let indexed_from = Address::random(); + let indexed_to = Address::random(); + let transfer_signature = keccak256("Transfer(address,address,uint256)"); - // TODO: Make it clearer how to choose between event_signature(topic0) (event name) and the - // other 3 indexed topics. This API is a bit clunky and not obvious to use at the moment. - let filter = Filter::new().address(addr).event_signature(topic); + // This matches ERC-20 Transfer events emitted by contract_addr where both indexed addresses are + // fixed. If your event declares a third indexed parameter, continue with topic3(...). + let filter = Filter::new() + .address(contract_addr) + .event_signature(transfer_signature) + .topic1(indexed_from) + .topic2(indexed_to); // 3. If the address & topics filters match do something. We use the outer check against the // bloom filter stored in the header to avoid having to query the receipts table when there @@ -207,16 +207,39 @@ fn receipts_provider_example< Ok(()) } -fn state_provider_example(provider: T) -> eyre::Result<()> { +/// The `StateProvider` allows querying the state tables. +fn state_provider_example( + provider: T, + headers: &H, + number: u64, +) -> eyre::Result<()> { let address = Address::random(); let storage_key = B256::random(); + let slots = [storage_key]; + + let header = headers.header_by_number(number)?.ok_or(eyre::eyre!("header not found"))?; + let state_root = header.state_root(); // Can get account / storage state with simple point queries - let _account = provider.basic_account(&address)?; - let _code = provider.account_code(&address)?; - let _storage = provider.storage(address, storage_key)?; - // TODO: unimplemented. - // let _proof = provider.proof(address, &[])?; + let account = provider.basic_account(&address)?; + let code = provider.account_code(&address)?; + let storage_value = provider.storage(address, storage_key)?; + + println!( + "state at block #{number}: addr={address:?}, nonce={}, balance={}, storage[{:?}]={:?}, has_code={}", + account.as_ref().map(|acc| acc.nonce).unwrap_or_default(), + account.as_ref().map(|acc| acc.balance).unwrap_or_default(), + storage_key, + storage_value, + code.is_some() + ); + + // Returns a bundled proof with the account's info + let proof = provider.proof(Default::default(), address, &slots)?; + + // Can verify the returned proof against the state root + proof.verify(state_root)?; + println!("account proof verified against state root {state_root:?}"); Ok(()) } diff --git a/examples/exex-hello-world/src/main.rs b/examples/exex-hello-world/src/main.rs index 2c89fb7262..3e86ee785a 100644 --- a/examples/exex-hello-world/src/main.rs +++ b/examples/exex-hello-world/src/main.rs @@ -81,7 +81,7 @@ where let _eth_pubsub = rpc_handle.eth_handlers().pubsub.clone(); // The TraceApi type that provides all the trace_ handlers let _trace_api = rpc_handle.trace_api(); - // The DebugApi type that provides all the trace_ handlers + // The DebugApi type that provides all the debug_ handlers let _debug_api = rpc_handle.debug_api(); while let Some(notification) = ctx.notifications.try_next().await? { diff --git a/examples/exex-subscription/src/main.rs b/examples/exex-subscription/src/main.rs index 2f0c38f385..a2b46686f7 100644 --- a/examples/exex-subscription/src/main.rs +++ b/examples/exex-subscription/src/main.rs @@ -1,13 +1,9 @@ -#![allow(dead_code)] - //! An ExEx example that installs a new RPC subscription endpoint that emits storage changes for a //! requested address. -#[allow(dead_code)] use alloy_primitives::{Address, U256}; use futures::TryStreamExt; use jsonrpsee::{ - core::SubscriptionResult, proc_macros::rpc, tracing, PendingSubscriptionSink, - SubscriptionMessage, + core::SubscriptionResult, proc_macros::rpc, PendingSubscriptionSink, SubscriptionMessage, }; use reth_ethereum::{ exex::{ExExContext, ExExEvent, ExExNotification}, @@ -168,8 +164,7 @@ async fn my_exex( fn main() -> eyre::Result<()> { reth_ethereum::cli::Cli::parse_args().run(|builder, _| async move { let (subscriptions_tx, subscriptions_rx) = mpsc::unbounded_channel::(); - - let rpc = StorageWatcherRpc::new(subscriptions_tx.clone()); + let rpc = StorageWatcherRpc::new(subscriptions_tx); let handle: NodeHandleFor = builder .node(EthereumNode::default()) diff --git a/examples/network-proxy/src/main.rs b/examples/network-proxy/src/main.rs index 51ba8e2b4a..50ef9e4e72 100644 --- a/examples/network-proxy/src/main.rs +++ b/examples/network-proxy/src/main.rs @@ -82,6 +82,7 @@ async fn main() -> eyre::Result<()> { IncomingEthRequest::GetNodeData { .. } => {} IncomingEthRequest::GetReceipts { .. } => {} IncomingEthRequest::GetReceipts69 { .. } => {} + IncomingEthRequest::GetReceipts70 { .. } => {} } } transaction_message = transactions_rx.recv() => { diff --git a/examples/rpc-db/src/main.rs b/examples/rpc-db/src/main.rs index 97bd1debdc..4a0339fd00 100644 --- a/examples/rpc-db/src/main.rs +++ b/examples/rpc-db/src/main.rs @@ -24,7 +24,7 @@ use reth_ethereum::{ pool::noop::NoopTransactionPool, provider::{ db::{mdbx::DatabaseArguments, open_db_read_only, ClientVersion, DatabaseEnv}, - providers::{BlockchainProvider, StaticFileProvider}, + providers::{BlockchainProvider, RocksDBProvider, StaticFileProvider}, ProviderFactory, }, rpc::{ @@ -53,7 +53,8 @@ async fn main() -> eyre::Result<()> { db.clone(), spec.clone(), StaticFileProvider::read_only(db_path.join("static_files"), true)?, - ); + RocksDBProvider::builder(db_path.join("rocksdb")).build().unwrap(), + )?; // 2. Set up the blockchain provider using only the database provider and a noop for the tree to // satisfy trait bounds. Tree is not used in this example since we are only operating on the @@ -80,7 +81,7 @@ async fn main() -> eyre::Result<()> { // Pick which namespaces to expose. let config = TransportRpcModuleConfig::default().with_http([RethRpcModule::Eth]); - let mut server = rpc_builder.build(config, eth_api); + let mut server = rpc_builder.build(config, eth_api, Default::default()); // Add a custom rpc namespace let custom_rpc = MyRpcExt { provider }; diff --git a/flake.lock b/flake.lock index fd2bf9ac61..4efd90828f 100644 --- a/flake.lock +++ b/flake.lock @@ -23,11 +23,11 @@ "rust-analyzer-src": "rust-analyzer-src" }, "locked": { - "lastModified": 1760942671, - "narHash": "sha256-LyO+TwzM7C8TJJkgbqC+BMnPiJX8XHQJmssTWS2Ze9k=", + "lastModified": 1761720242, + "narHash": "sha256-Zi9nWw68oUDMVOhf/+Z97wVbNV2K7eEAGZugQKqU7xw=", "owner": "nix-community", "repo": "fenix", - "rev": "b5e669194d67dbd4c659c40bb67476d9285b9a13", + "rev": "8e4d32f4cc12b3f106af6e4515b36ac046a1ec91", "type": "github" }, "original": { @@ -63,11 +63,11 @@ "rust-analyzer-src": { "flake": false, "locked": { - "lastModified": 1760898410, - "narHash": "sha256-bTMk3D0V+6t3qOjXUfWSwjztEuLoAsgtAtqp6/wwfOc=", + "lastModified": 1761686505, + "narHash": "sha256-jX6UrGS/hABDaM4jdx3+xgH3KCHP2zKHeTa8CD5myEo=", "owner": "rust-lang", "repo": "rust-analyzer", - "rev": "c7e7eb9dc42df01016d795b0fd3a9ae87b7ada1c", + "rev": "d08d54f3c10dfa41033eb780c3bddb50e09d30fc", "type": "github" }, "original": { diff --git a/testing/ef-tests/Cargo.toml b/testing/ef-tests/Cargo.toml index 745172cd82..e9cf465a98 100644 --- a/testing/ef-tests/Cargo.toml +++ b/testing/ef-tests/Cargo.toml @@ -32,7 +32,7 @@ reth-stateless = { workspace = true, features = ["secp256k1"] } reth-tracing.workspace = true reth-trie.workspace = true reth-trie-db.workspace = true -revm = { workspace = true, features = ["secp256k1", "blst", "c-kzg"] } +revm = { workspace = true, features = ["secp256k1", "blst", "c-kzg", "memory_limit"] } alloy-rlp.workspace = true alloy-primitives.workspace = true diff --git a/testing/ef-tests/src/cases/blockchain_test.rs b/testing/ef-tests/src/cases/blockchain_test.rs index c54ef2ad7b..1ecbe9a3b1 100644 --- a/testing/ef-tests/src/cases/blockchain_test.rs +++ b/testing/ef-tests/src/cases/blockchain_test.rs @@ -218,7 +218,7 @@ fn run_case( .unwrap(); provider - .insert_block(genesis_block.clone()) + .insert_block(&genesis_block) .map_err(|err| Error::block_failed(0, Default::default(), err))?; // Increment block number for receipts static file @@ -249,7 +249,7 @@ fn run_case( // Insert the block into the database provider - .insert_block(block.clone()) + .insert_block(block) .map_err(|err| Error::block_failed(block_number, Default::default(), err))?; // Commit static files, so we can query the headers for stateless execution below provider @@ -310,9 +310,11 @@ fn run_case( // Compute and check the post state root let hashed_state = HashedPostState::from_bundle_state::(output.state.state()); - let (computed_state_root, _) = - StateRoot::overlay_root_with_updates(provider.tx_ref(), hashed_state.clone()) - .map_err(|err| Error::block_failed(block_number, program_inputs.clone(), err))?; + let (computed_state_root, _) = StateRoot::overlay_root_with_updates( + provider.tx_ref(), + &hashed_state.clone_into_sorted(), + ) + .map_err(|err| Error::block_failed(block_number, program_inputs.clone(), err))?; if computed_state_root != block.state_root { return Err(Error::block_failed( block_number, diff --git a/testing/ef-tests/src/models.rs b/testing/ef-tests/src/models.rs index 49c49bf193..bc9af3fab1 100644 --- a/testing/ef-tests/src/models.rs +++ b/testing/ef-tests/src/models.rs @@ -265,7 +265,7 @@ pub enum ForkSpec { FrontierToHomesteadAt5, /// Homestead Homestead, - /// Homestead to Tangerine + /// Homestead to DAO HomesteadToDaoAt5, /// Homestead to Tangerine HomesteadToEIP150At5, @@ -317,6 +317,8 @@ pub enum ForkSpec { CancunToPragueAtTime15k, /// Prague Prague, + /// Osaka + Osaka, } impl From for ChainSpec { @@ -371,6 +373,7 @@ impl From for ChainSpec { .cancun_activated() .with_fork(EthereumHardfork::Prague, ForkCondition::Timestamp(15_000)), ForkSpec::Prague => spec_builder.prague_activated(), + ForkSpec::Osaka => spec_builder.osaka_activated(), } .build() } diff --git a/testing/prestate/tx-selfdestruct-prestate.json b/testing/prestate/tx-selfdestruct-prestate.json new file mode 100644 index 0000000000..5ab17b6c2c --- /dev/null +++ b/testing/prestate/tx-selfdestruct-prestate.json @@ -0,0 +1,43 @@ +{ + "jsonrpc": "2.0", + "id": 1, + "result": { + "0x5cf6756af36644fc13b2d5d8637a50ae7e6c5fef": { + "balance": "0x1b1989a1deedbd" + }, + "0x95222290dd7278aa3ddd389cc1e1d165cc4bafe5": { + "balance": "0xc8714456682467dc", + "nonce": 797368 + }, + "0xa7fb5ca286fc3fd67525629048a4de3ba24cba2e": { + "balance": "0x23af8fab959cdfb42", + "nonce": 236280 + }, + "0xc77ad0a71008d7094a62cfbd250a2eb2afdf2776": { + "balance": "0x0", + "code": "0x608060405234801561001057600080fd5b50600436106100935760003560e01c80638da5cb5b116100665780638da5cb5b146100f55780639f9fb96814610106578063f2fde38b14610127578063f3fef3a31461013a578063fc0c546a1461014d57600080fd5b806331d4fd77146100985780634e71e0c8146100ad5780635b51bec0146100b557806366d003ac146100d0575b600080fd5b6100ab6100a63660046104a1565b610160565b005b6100ab6101e5565b604051640302e362e360dc1b81526020015b60405180910390f35b6002546001600160a01b03165b6040516001600160a01b0390911681526020016100c7565b6000546001600160a01b03166100dd565b610119610114366004610505565b610283565b6040516100c792919061051d565b6100ab610135366004610480565b61030f565b6100ab6101483660046104dc565b610357565b6003546100dd906001600160a01b031681565b336101736000546001600160a01b031690565b6001600160a01b03161461018657600080fd5b6001600160a01b03831661019957600080fd5b600280546001600160a01b0319166001600160a01b0385161790556101be828261038b565b5050600054600280546001600160a01b0319166001600160a01b0390921691909117905550565b6001546001600160a01b0316331415610281576001546001600160a01b03166102166000546001600160a01b031690565b6001600160a01b03167f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e060405160405180910390a360018054600080546001600160a01b0383166001600160a01b031991821681179092559182169092556002805490911690911790555b565b60606000806040518060200161029890610457565b601f1982820381018352601f90910116604081815282516020808501919091206001600160f81b0319828501526bffffffffffffffffffffffff193060601b166021850152603584019890985260558084019890985281518084039098018852607590920190528551950194909420939492505050565b336103226000546001600160a01b031690565b6001600160a01b03161461033557600080fd5b600180546001600160a01b0319166001600160a01b0392909216919091179055565b3361036a6000546001600160a01b031690565b6001600160a01b03161461037d57600080fd5b610387828261038b565b5050565b60008061039783610283565b600380546001600160a01b0319166001600160a01b038881169190911790915582519294509092508216319060009085906020860183f590506001600160a01b0381166103e357600080fd5b600380546001600160a01b03191690557fd1c19fbcd4551a5edfb66d43d2e337c04837afda3482b42bdf569a8fccdae5fb6104266002546001600160a01b031690565b604080516001600160a01b0392831681529186166020830152810184905260600160405180910390a1505050505050565b6103118061058283390190565b80356001600160a01b038116811461047b57600080fd5b919050565b600060208284031215610491578081fd5b61049a82610464565b9392505050565b6000806000606084860312156104b5578182fd5b6104be84610464565b92506104cc60208501610464565b9150604084013590509250925092565b600080604083850312156104ee578182fd5b6104f783610464565b946020939093013593505050565b600060208284031215610516578081fd5b5035919050565b6040815260008351806040840152815b8181101561054a576020818701810151606086840101520161052d565b8181111561055b5782606083860101525b506001600160a01b0393909316602083015250601f91909101601f19160160600191905056fe608060408190526319b400eb60e21b8152339060009082906366d003ac9060849060209060048186803b15801561003557600080fd5b505afa158015610049573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061006d9190610271565b90506000826001600160a01b031663fc0c546a6040518163ffffffff1660e01b815260040160206040518083038186803b1580156100aa57600080fd5b505afa1580156100be573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906100e29190610271565b90506001600160a01b0381161561018d576040516370a0823160e01b815230600482015261018d9083906001600160a01b038416906370a082319060240160206040518083038186803b15801561013857600080fd5b505afa15801561014c573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061017091906102bf565b836001600160a01b031661019960201b610009179092919060201c565b816001600160a01b0316ff5b604080516001600160a01b038481166024830152604480830185905283518084039091018152606490920183526020820180516001600160e01b031663a9059cbb60e01b17905291516000928392908716916101f591906102d7565b6000604051808303816000865af19150503d8060008114610232576040519150601f19603f3d011682016040523d82523d6000602084013e610237565b606091505b5091509150818015610261575080511580610261575080806020019051810190610261919061029f565b61026a57600080fd5b5050505050565b600060208284031215610282578081fd5b81516001600160a01b0381168114610298578182fd5b9392505050565b6000602082840312156102b0578081fd5b81518015158114610298578182fd5b6000602082840312156102d0578081fd5b5051919050565b60008251815b818110156102f757602081860181015185830152016102dd565b818111156103055782828501525b50919091019291505056fea2646970667358221220820d083773baf3f84f3af74133087e936c58f2a05fdf46b525ba37dba6ae0e2d64736f6c63430008040033", + "codeHash": "0x1b064b625546024df8b0e61d74d84bba7e1f22e31ed3b3c1b37fbe533e33bd72", + "nonce": 118087, + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000000": "0x000000000000000000000000a7fb5ca286fc3fd67525629048a4de3ba24cba2e", + "0x0000000000000000000000000000000000000000000000000000000000000002": "0x000000000000000000000000a7fb5ca286fc3fd67525629048a4de3ba24cba2e", + "0x0000000000000000000000000000000000000000000000000000000000000003": "0x0000000000000000000000000000000000000000000000000000000000000000" + } + }, + "0xdac17f958d2ee523a2206206994597c13d831ec7": { + "balance": "0x1", + "code": "0x606060405260043610610196576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff16806306fdde031461019b5780630753c30c14610229578063095ea7b3146102625780630e136b19146102a45780630ecb93c0146102d157806318160ddd1461030a57806323b872dd1461033357806326976e3f1461039457806327e235e3146103e9578063313ce56714610436578063353907141461045f5780633eaaf86b146104885780633f4ba83a146104b157806359bf1abe146104c65780635c658165146105175780635c975abb1461058357806370a08231146105b05780638456cb59146105fd578063893d20e8146106125780638da5cb5b1461066757806395d89b41146106bc578063a9059cbb1461074a578063c0324c771461078c578063cc872b66146107b8578063db006a75146107db578063dd62ed3e146107fe578063dd644f721461086a578063e47d606014610893578063e4997dc5146108e4578063e5b5019a1461091d578063f2fde38b14610946578063f3bdc2281461097f575b600080fd5b34156101a657600080fd5b6101ae6109b8565b6040518080602001828103825283818151815260200191508051906020019080838360005b838110156101ee5780820151818401526020810190506101d3565b50505050905090810190601f16801561021b5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b341561023457600080fd5b610260600480803573ffffffffffffffffffffffffffffffffffffffff16906020019091905050610a56565b005b341561026d57600080fd5b6102a2600480803573ffffffffffffffffffffffffffffffffffffffff16906020019091908035906020019091905050610b73565b005b34156102af57600080fd5b6102b7610cc1565b604051808215151515815260200191505060405180910390f35b34156102dc57600080fd5b610308600480803573ffffffffffffffffffffffffffffffffffffffff16906020019091905050610cd4565b005b341561031557600080fd5b61031d610ded565b6040518082815260200191505060405180910390f35b341561033e57600080fd5b610392600480803573ffffffffffffffffffffffffffffffffffffffff1690602001909190803573ffffffffffffffffffffffffffffffffffffffff16906020019091908035906020019091905050610ebd565b005b341561039f57600080fd5b6103a761109d565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b34156103f457600080fd5b610420600480803573ffffffffffffffffffffffffffffffffffffffff169060200190919050506110c3565b6040518082815260200191505060405180910390f35b341561044157600080fd5b6104496110db565b6040518082815260200191505060405180910390f35b341561046a57600080fd5b6104726110e1565b6040518082815260200191505060405180910390f35b341561049357600080fd5b61049b6110e7565b6040518082815260200191505060405180910390f35b34156104bc57600080fd5b6104c46110ed565b005b34156104d157600080fd5b6104fd600480803573ffffffffffffffffffffffffffffffffffffffff169060200190919050506111ab565b604051808215151515815260200191505060405180910390f35b341561052257600080fd5b61056d600480803573ffffffffffffffffffffffffffffffffffffffff1690602001909190803573ffffffffffffffffffffffffffffffffffffffff16906020019091905050611201565b6040518082815260200191505060405180910390f35b341561058e57600080fd5b610596611226565b604051808215151515815260200191505060405180910390f35b34156105bb57600080fd5b6105e7600480803573ffffffffffffffffffffffffffffffffffffffff16906020019091905050611239565b6040518082815260200191505060405180910390f35b341561060857600080fd5b610610611348565b005b341561061d57600080fd5b610625611408565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b341561067257600080fd5b61067a611431565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b34156106c757600080fd5b6106cf611456565b6040518080602001828103825283818151815260200191508051906020019080838360005b8381101561070f5780820151818401526020810190506106f4565b50505050905090810190601f16801561073c5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b341561075557600080fd5b61078a600480803573ffffffffffffffffffffffffffffffffffffffff169060200190919080359060200190919050506114f4565b005b341561079757600080fd5b6107b6600480803590602001909190803590602001909190505061169e565b005b34156107c357600080fd5b6107d96004808035906020019091905050611783565b005b34156107e657600080fd5b6107fc600480803590602001909190505061197a565b005b341561080957600080fd5b610854600480803573ffffffffffffffffffffffffffffffffffffffff1690602001909190803573ffffffffffffffffffffffffffffffffffffffff16906020019091905050611b0d565b6040518082815260200191505060405180910390f35b341561087557600080fd5b61087d611c52565b6040518082815260200191505060405180910390f35b341561089e57600080fd5b6108ca600480803573ffffffffffffffffffffffffffffffffffffffff16906020019091905050611c58565b604051808215151515815260200191505060405180910390f35b34156108ef57600080fd5b61091b600480803573ffffffffffffffffffffffffffffffffffffffff16906020019091905050611c78565b005b341561092857600080fd5b610930611d91565b6040518082815260200191505060405180910390f35b341561095157600080fd5b61097d600480803573ffffffffffffffffffffffffffffffffffffffff16906020019091905050611db5565b005b341561098a57600080fd5b6109b6600480803573ffffffffffffffffffffffffffffffffffffffff16906020019091905050611e8a565b005b60078054600181600116156101000203166002900480601f016020809104026020016040519081016040528092919081815260200182805460018160011615610100020316600290048015610a4e5780601f10610a2357610100808354040283529160200191610a4e565b820191906000526020600020905b815481529060010190602001808311610a3157829003601f168201915b505050505081565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16141515610ab157600080fd5b6001600a60146101000a81548160ff02191690831515021790555080600a60006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055507fcc358699805e9a8b7f77b522628c7cb9abd07d9efb86b6fb616af1609036a99e81604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390a150565b604060048101600036905010151515610b8b57600080fd5b600a60149054906101000a900460ff1615610cb157600a60009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1663aee92d333385856040518463ffffffff167c0100000000000000000000000000000000000000000000000000000000028152600401808473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020018373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020018281526020019350505050600060405180830381600087803b1515610c9857600080fd5b6102c65a03f11515610ca957600080fd5b505050610cbc565b610cbb838361200e565b5b505050565b600a60149054906101000a900460ff1681565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16141515610d2f57600080fd5b6001600660008373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060006101000a81548160ff0219169083151502179055507f42e160154868087d6bfdc0ca23d96a1c1cfa32f1b72ba9ba27b69b98a0d819dc81604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390a150565b6000600a60149054906101000a900460ff1615610eb457600a60009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166318160ddd6000604051602001526040518163ffffffff167c0100000000000000000000000000000000000000000000000000000000028152600401602060405180830381600087803b1515610e9257600080fd5b6102c65a03f11515610ea357600080fd5b505050604051805190509050610eba565b60015490505b90565b600060149054906101000a900460ff16151515610ed957600080fd5b600660008473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060009054906101000a900460ff16151515610f3257600080fd5b600a60149054906101000a900460ff161561108c57600a60009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16638b477adb338585856040518563ffffffff167c0100000000000000000000000000000000000000000000000000000000028152600401808573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020018473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020018373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001828152602001945050505050600060405180830381600087803b151561107357600080fd5b6102c65a03f1151561108457600080fd5b505050611098565b6110978383836121ab565b5b505050565b600a60009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b60026020528060005260406000206000915090505481565b60095481565b60045481565b60015481565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614151561114857600080fd5b600060149054906101000a900460ff16151561116357600080fd5b60008060146101000a81548160ff0219169083151502179055507f7805862f689e2f13df9f062ff482ad3ad112aca9e0847911ed832e158c525b3360405160405180910390a1565b6000600660008373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060009054906101000a900460ff169050919050565b6005602052816000526040600020602052806000526040600020600091509150505481565b600060149054906101000a900460ff1681565b6000600a60149054906101000a900460ff161561133757600a60009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166370a08231836000604051602001526040518263ffffffff167c0100000000000000000000000000000000000000000000000000000000028152600401808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001915050602060405180830381600087803b151561131557600080fd5b6102c65a03f1151561132657600080fd5b505050604051805190509050611343565b61134082612652565b90505b919050565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff161415156113a357600080fd5b600060149054906101000a900460ff161515156113bf57600080fd5b6001600060146101000a81548160ff0219169083151502179055507f6985a02210a168e66602d3235cb6db0e70f92b3ba4d376a33c0f3d9434bff62560405160405180910390a1565b60008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff16905090565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b60088054600181600116156101000203166002900480601f0160208091040260200160405190810160405280929190818152602001828054600181600116156101000203166002900480156114ec5780601f106114c1576101008083540402835291602001916114ec565b820191906000526020600020905b8154815290600101906020018083116114cf57829003601f168201915b505050505081565b600060149054906101000a900460ff1615151561151057600080fd5b600660003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060009054906101000a900460ff1615151561156957600080fd5b600a60149054906101000a900460ff161561168f57600a60009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16636e18980a3384846040518463ffffffff167c0100000000000000000000000000000000000000000000000000000000028152600401808473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020018373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020018281526020019350505050600060405180830381600087803b151561167657600080fd5b6102c65a03f1151561168757600080fd5b50505061169a565b611699828261269b565b5b5050565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff161415156116f957600080fd5b60148210151561170857600080fd5b60328110151561171757600080fd5b81600381905550611736600954600a0a82612a0390919063ffffffff16565b6004819055507fb044a1e409eac5c48e5af22d4af52670dd1a99059537a78b31b48c6500a6354e600354600454604051808381526020018281526020019250505060405180910390a15050565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff161415156117de57600080fd5b60015481600154011115156117f257600080fd5b600260008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000205481600260008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002054011115156118c257600080fd5b80600260008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060008282540192505081905550806001600082825401925050819055507fcb8241adb0c3fdb35b70c24ce35c5eb0c17af7431c99f827d44a445ca624176a816040518082815260200191505060405180910390a150565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff161415156119d557600080fd5b80600154101515156119e657600080fd5b80600260008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000205410151515611a5557600080fd5b8060016000828254039250508190555080600260008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020600082825403925050819055507f702d5967f45f6513a38ffc42d6ba9bf230bd40e8f53b16363c7eb4fd2deb9a44816040518082815260200191505060405180910390a150565b6000600a60149054906101000a900460ff1615611c3f57600a60009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1663dd62ed3e84846000604051602001526040518363ffffffff167c0100000000000000000000000000000000000000000000000000000000028152600401808373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020018273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200192505050602060405180830381600087803b1515611c1d57600080fd5b6102c65a03f11515611c2e57600080fd5b505050604051805190509050611c4c565b611c498383612a3e565b90505b92915050565b60035481565b60066020528060005260406000206000915054906101000a900460ff1681565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16141515611cd357600080fd5b6000600660008373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060006101000a81548160ff0219169083151502179055507fd7e9ec6e6ecd65492dce6bf513cd6867560d49544421d0783ddf06e76c24470c81604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390a150565b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff81565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16141515611e1057600080fd5b600073ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff16141515611e8757806000806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055505b50565b60008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16141515611ee757600080fd5b600660008373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060009054906101000a900460ff161515611f3f57600080fd5b611f4882611239565b90506000600260008473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002081905550806001600082825403925050819055507f61e6e66b0d6339b2980aecc6ccc0039736791f0ccde9ed512e789a7fbdd698c68282604051808373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020018281526020019250505060405180910390a15050565b60406004810160003690501015151561202657600080fd5b600082141580156120b457506000600560003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060008573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000205414155b1515156120c057600080fd5b81600560003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060008573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020819055508273ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff167f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925846040518082815260200191505060405180910390a3505050565b60008060006060600481016000369050101515156121c857600080fd5b600560008873ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002054935061227061271061226260035488612a0390919063ffffffff16565b612ac590919063ffffffff16565b92506004548311156122825760045492505b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff84101561233e576122bd8585612ae090919063ffffffff16565b600560008973ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020819055505b6123518386612ae090919063ffffffff16565b91506123a585600260008a73ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002054612ae090919063ffffffff16565b600260008973ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000208190555061243a82600260008973ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002054612af990919063ffffffff16565b600260008873ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000208190555060008311156125e4576124f983600260008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002054612af990919063ffffffff16565b600260008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020819055506000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168773ffffffffffffffffffffffffffffffffffffffff167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef856040518082815260200191505060405180910390a35b8573ffffffffffffffffffffffffffffffffffffffff168773ffffffffffffffffffffffffffffffffffffffff167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef846040518082815260200191505060405180910390a350505050505050565b6000600260008373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020549050919050565b6000806040600481016000369050101515156126b657600080fd5b6126df6127106126d160035487612a0390919063ffffffff16565b612ac590919063ffffffff16565b92506004548311156126f15760045492505b6127048385612ae090919063ffffffff16565b915061275884600260003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002054612ae090919063ffffffff16565b600260003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020819055506127ed82600260008873ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002054612af990919063ffffffff16565b600260008773ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020819055506000831115612997576128ac83600260008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002054612af990919063ffffffff16565b600260008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020819055506000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef856040518082815260200191505060405180910390a35b8473ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef846040518082815260200191505060405180910390a35050505050565b6000806000841415612a185760009150612a37565b8284029050828482811515612a2957fe5b04141515612a3357fe5b8091505b5092915050565b6000600560008473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060008373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002054905092915050565b6000808284811515612ad357fe5b0490508091505092915050565b6000828211151515612aee57fe5b818303905092915050565b6000808284019050838110151515612b0d57fe5b80915050929150505600a165627a7a72305820645ee12d73db47fd78ba77fa1f824c3c8f9184061b3b10386beb4dc9236abb280029", + "codeHash": "0xb44fb4e949d0f78f87f79ee46428f23a2a5713ce6fc6e0beb3dda78c2ac1ea55", + "nonce": 1, + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000000": "0x000000000000000000000000c6cde7c39eb2f0f0095f41570af89efc2c1ea828", + "0x0000000000000000000000000000000000000000000000000000000000000003": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000004": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x000000000000000000000000000000000000000000000000000000000000000a": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x04af465794ace36b644b505e744c3e5bb4a1136e86a6794cce220609f98e2c59": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x2ffe96b501e60292eb0316c8fc38442c42e408e4f13dbcfa5251330006d737ed": "0x000000000000000000000000000000000000000000000000000001cc47344a6e", + "0x7778dedb5c9fd9ac49539a0a3828992d817bdb68f3215a7557c2f9ce3d7e7dd8": "0x0000000000000000000000000000000000000000000000000000000003197500" + } + } + } +} diff --git a/testing/testing-utils/Cargo.toml b/testing/testing-utils/Cargo.toml index 06e73631ef..35b4042427 100644 --- a/testing/testing-utils/Cargo.toml +++ b/testing/testing-utils/Cargo.toml @@ -14,7 +14,6 @@ workspace = true [dependencies] reth-ethereum-primitives = { workspace = true, features = ["arbitrary", "std"] } reth-primitives-traits = { workspace = true, features = ["secp256k1", "arbitrary"] } - alloy-genesis.workspace = true alloy-primitives = { workspace = true, features = ["rand"] } alloy-consensus.workspace = true @@ -23,3 +22,6 @@ alloy-eips.workspace = true rand.workspace = true secp256k1 = { workspace = true, features = ["rand"] } rand_08.workspace = true + +[dev-dependencies] +alloy-rlp.workspace = true diff --git a/testing/testing-utils/src/generators.rs b/testing/testing-utils/src/generators.rs index 52aa8eab66..f61cded524 100644 --- a/testing/testing-utils/src/generators.rs +++ b/testing/testing-utils/src/generators.rs @@ -441,7 +441,6 @@ pub fn random_contract_account_range( let mut accounts = Vec::with_capacity(acc_range.end.saturating_sub(acc_range.start) as usize); for _ in acc_range { let (address, eoa_account) = random_eoa_account(rng); - // todo: can a non-eoa account have a nonce > 0? let account = Account { bytecode_hash: Some(B256::random()), ..eoa_account }; accounts.push((address, account)) } @@ -529,7 +528,7 @@ mod tests { #[test] fn test_sign_eip_155() { // reference: https://github.com/ethereum/EIPs/blob/master/EIPS/eip-155.md#example - let transaction = Transaction::Legacy(TxLegacy { + let tx = TxLegacy { chain_id: Some(1), nonce: 9, gas_price: 20 * 10_u128.pow(9), @@ -537,12 +536,11 @@ mod tests { to: TxKind::Call(hex!("3535353535353535353535353535353535353535").into()), value: U256::from(10_u128.pow(18)), input: Bytes::default(), - }); + }; + let transaction = Transaction::Legacy(tx.clone()); - // TODO resolve dependency issue - // let expected = - // hex!("ec098504a817c800825208943535353535353535353535353535353535353535880de0b6b3a764000080018080"); - // assert_eq!(expected, &alloy_rlp::encode(transaction)); + let expected = hex!("ec098504a817c800825208943535353535353535353535353535353535353535880de0b6b3a764000080018080"); + assert_eq!(expected.as_slice(), &alloy_rlp::encode(tx)); let hash = transaction.signature_hash(); let expected = diff --git a/testing/testing-utils/src/lib.rs b/testing/testing-utils/src/lib.rs index 8baf40d1b6..e493ce78f3 100644 --- a/testing/testing-utils/src/lib.rs +++ b/testing/testing-utils/src/lib.rs @@ -8,8 +8,7 @@ #![cfg_attr(docsrs, feature(doc_cfg))] #![cfg_attr(not(test), warn(unused_crate_dependencies))] +pub mod generators; pub mod genesis_allocator; pub use genesis_allocator::GenesisAllocator; - -pub mod generators;