Merge remote-tracking branch 'origin/main' into alexey/engine-thread-priority

This commit is contained in:
Alexey Shekhirin
2025-12-12 16:03:52 +00:00
142 changed files with 4436 additions and 791 deletions

7
.github/actionlint.yaml vendored Normal file
View File

@@ -0,0 +1,7 @@
self-hosted-runner:
labels:
- depot-ubuntu-latest
- depot-ubuntu-latest-2
- depot-ubuntu-latest-4
- depot-ubuntu-latest-8
- depot-ubuntu-latest-16

View File

@@ -7,7 +7,7 @@ sim="${1}"
limit="${2}"
run_hive() {
hive --sim "${sim}" --sim.limit "${limit}" --sim.parallelism 8 --client reth 2>&1 | tee /tmp/log || true
hive --sim "${sim}" --sim.limit "${limit}" --sim.parallelism 16 --client reth 2>&1 | tee /tmp/log || true
}
check_log() {

View File

@@ -11,17 +11,19 @@ env:
CARGO_TERM_COLOR: always
BASELINE: base
SEED: reth
RUSTC_WRAPPER: "sccache"
name: bench
jobs:
codspeed:
runs-on: ubuntu-latest
runs-on: depot-ubuntu-latest
steps:
- uses: actions/checkout@v6
with:
submodules: true
- uses: rui314/setup-mold@v1
- uses: dtolnay/rust-toolchain@stable
- uses: mozilla-actions/sccache-action@v0.0.9
- uses: Swatinem/rust-cache@v2
with:
cache-on-failure: true

View File

@@ -10,9 +10,12 @@ on:
types: [opened, reopened, synchronize, closed]
merge_group:
env:
RUSTC_WRAPPER: "sccache"
jobs:
build:
runs-on: ubuntu-latest
runs-on: depot-ubuntu-latest-8
timeout-minutes: 90
steps:
- name: Checkout
@@ -33,6 +36,8 @@ jobs:
- name: Install Rust nightly
uses: dtolnay/rust-toolchain@nightly
- uses: mozilla-actions/sccache-action@v0.0.9
- name: Build docs
run: cd docs/vocs && bash scripts/build-cargo-docs.sh

View File

@@ -13,11 +13,12 @@ on:
env:
CARGO_TERM_COLOR: always
RUSTC_WRAPPER: "sccache"
name: compact-codec
jobs:
compact-codec:
runs-on: ubuntu-latest
runs-on: depot-ubuntu-latest
strategy:
matrix:
bin:
@@ -26,6 +27,7 @@ jobs:
steps:
- uses: rui314/setup-mold@v1
- uses: dtolnay/rust-toolchain@stable
- uses: mozilla-actions/sccache-action@v0.0.9
- uses: Swatinem/rust-cache@v2
with:
cache-on-failure: true

View File

@@ -11,6 +11,7 @@ on:
env:
CARGO_TERM_COLOR: always
SEED: rustethereumethereumrust
RUSTC_WRAPPER: "sccache"
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
@@ -19,13 +20,14 @@ concurrency:
jobs:
test:
name: e2e-testsuite
runs-on: ubuntu-latest
runs-on: depot-ubuntu-latest-4
env:
RUST_BACKTRACE: 1
timeout-minutes: 90
steps:
- uses: actions/checkout@v6
- uses: dtolnay/rust-toolchain@stable
- uses: mozilla-actions/sccache-action@v0.0.9
- uses: taiki-e/install-action@nextest
- uses: Swatinem/rust-cache@v2
with:

View File

@@ -24,7 +24,7 @@ jobs:
prepare-hive:
if: github.repository == 'paradigmxyz/reth'
timeout-minutes: 45
runs-on: ubuntu-latest
runs-on: depot-ubuntu-latest-16
steps:
- uses: actions/checkout@v6
- name: Checkout hive tests
@@ -178,7 +178,7 @@ jobs:
- prepare-reth
- prepare-hive
name: run ${{ matrix.scenario.sim }}${{ matrix.scenario.limit && format(' - {0}', matrix.scenario.limit) }}
runs-on: ubuntu-latest
runs-on: depot-ubuntu-latest-16
permissions:
issues: write
steps:

View File

@@ -14,6 +14,7 @@ on:
env:
CARGO_TERM_COLOR: always
SEED: rustethereumethereumrust
RUSTC_WRAPPER: "sccache"
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
@@ -23,7 +24,7 @@ jobs:
test:
name: test / ${{ matrix.network }}
if: github.event_name != 'schedule'
runs-on: ubuntu-latest
runs-on: depot-ubuntu-latest-4
env:
RUST_BACKTRACE: 1
strategy:
@@ -37,6 +38,7 @@ jobs:
- name: Install Geth
run: .github/assets/install_geth.sh
- uses: taiki-e/install-action@nextest
- uses: mozilla-actions/sccache-action@v0.0.9
- uses: Swatinem/rust-cache@v2
with:
cache-on-failure: true
@@ -74,6 +76,7 @@ jobs:
- uses: rui314/setup-mold@v1
- uses: dtolnay/rust-toolchain@stable
- uses: taiki-e/install-action@nextest
- uses: mozilla-actions/sccache-action@v0.0.9
- uses: Swatinem/rust-cache@v2
with:
cache-on-failure: true

View File

@@ -32,7 +32,7 @@ jobs:
strategy:
fail-fast: false
name: run kurtosis
runs-on: ubuntu-latest
runs-on: depot-ubuntu-latest
needs:
- prepare-reth
steps:

View File

@@ -30,7 +30,7 @@ jobs:
strategy:
fail-fast: false
name: run kurtosis
runs-on: ubuntu-latest
runs-on: depot-ubuntu-latest
needs:
- prepare-reth
steps:

View File

@@ -8,11 +8,12 @@ on:
env:
CARGO_TERM_COLOR: always
RUSTC_WRAPPER: "sccache"
jobs:
clippy-binaries:
name: clippy binaries / ${{ matrix.type }}
runs-on: ubuntu-latest
runs-on: depot-ubuntu-latest
timeout-minutes: 30
strategy:
matrix:
@@ -26,6 +27,7 @@ jobs:
- uses: dtolnay/rust-toolchain@clippy
with:
components: clippy
- uses: mozilla-actions/sccache-action@v0.0.9
- uses: Swatinem/rust-cache@v2
with:
cache-on-failure: true
@@ -40,7 +42,7 @@ jobs:
clippy:
name: clippy
runs-on: ubuntu-latest
runs-on: depot-ubuntu-latest
timeout-minutes: 30
steps:
- uses: actions/checkout@v6
@@ -48,6 +50,7 @@ jobs:
- uses: dtolnay/rust-toolchain@nightly
with:
components: clippy
- uses: mozilla-actions/sccache-action@v0.0.9
- uses: Swatinem/rust-cache@v2
with:
cache-on-failure: true
@@ -56,7 +59,7 @@ jobs:
RUSTFLAGS: -D warnings
wasm:
runs-on: ubuntu-latest
runs-on: depot-ubuntu-latest
timeout-minutes: 30
steps:
- uses: actions/checkout@v6
@@ -65,6 +68,7 @@ jobs:
with:
target: wasm32-wasip1
- uses: taiki-e/install-action@cargo-hack
- uses: mozilla-actions/sccache-action@v0.0.9
- uses: Swatinem/rust-cache@v2
with:
cache-on-failure: true
@@ -75,7 +79,7 @@ jobs:
.github/assets/check_wasm.sh
riscv:
runs-on: ubuntu-latest
runs-on: depot-ubuntu-latest
timeout-minutes: 60
steps:
- uses: actions/checkout@v6
@@ -84,6 +88,7 @@ jobs:
with:
target: riscv32imac-unknown-none-elf
- uses: taiki-e/install-action@cargo-hack
- uses: mozilla-actions/sccache-action@v0.0.9
- uses: Swatinem/rust-cache@v2
with:
cache-on-failure: true
@@ -93,17 +98,18 @@ jobs:
crate-checks:
name: crate-checks (${{ matrix.partition }}/${{ matrix.total_partitions }})
runs-on: ubuntu-latest
runs-on: depot-ubuntu-latest-4
strategy:
matrix:
partition: [1, 2]
total_partitions: [2]
partition: [1, 2, 3]
total_partitions: [3]
timeout-minutes: 60
steps:
- uses: actions/checkout@v6
- uses: rui314/setup-mold@v1
- uses: dtolnay/rust-toolchain@stable
- uses: taiki-e/install-action@cargo-hack
- uses: mozilla-actions/sccache-action@v0.0.9
- uses: Swatinem/rust-cache@v2
with:
cache-on-failure: true
@@ -111,7 +117,7 @@ jobs:
msrv:
name: MSRV
runs-on: ubuntu-latest
runs-on: depot-ubuntu-latest
timeout-minutes: 30
strategy:
matrix:
@@ -124,6 +130,7 @@ jobs:
- uses: dtolnay/rust-toolchain@master
with:
toolchain: "1.88" # MSRV
- uses: mozilla-actions/sccache-action@v0.0.9
- uses: Swatinem/rust-cache@v2
with:
cache-on-failure: true
@@ -133,12 +140,13 @@ jobs:
docs:
name: docs
runs-on: ubuntu-latest
runs-on: depot-ubuntu-latest-4
timeout-minutes: 30
steps:
- uses: actions/checkout@v6
- uses: rui314/setup-mold@v1
- uses: dtolnay/rust-toolchain@nightly
- uses: mozilla-actions/sccache-action@v0.0.9
- uses: Swatinem/rust-cache@v2
with:
cache-on-failure: true
@@ -150,7 +158,7 @@ jobs:
fmt:
name: fmt
runs-on: ubuntu-latest
runs-on: depot-ubuntu-latest
timeout-minutes: 30
steps:
- uses: actions/checkout@v6
@@ -158,17 +166,19 @@ jobs:
- uses: dtolnay/rust-toolchain@nightly
with:
components: rustfmt
- uses: mozilla-actions/sccache-action@v0.0.9
- name: Run fmt
run: cargo fmt --all --check
udeps:
name: udeps
runs-on: ubuntu-latest
runs-on: depot-ubuntu-latest
timeout-minutes: 30
steps:
- uses: actions/checkout@v6
- uses: rui314/setup-mold@v1
- uses: dtolnay/rust-toolchain@nightly
- uses: mozilla-actions/sccache-action@v0.0.9
- uses: Swatinem/rust-cache@v2
with:
cache-on-failure: true
@@ -177,12 +187,13 @@ jobs:
book:
name: book
runs-on: ubuntu-latest
runs-on: depot-ubuntu-latest
timeout-minutes: 30
steps:
- uses: actions/checkout@v6
- uses: rui314/setup-mold@v1
- uses: dtolnay/rust-toolchain@nightly
- uses: mozilla-actions/sccache-action@v0.0.9
- uses: Swatinem/rust-cache@v2
with:
cache-on-failure: true
@@ -232,31 +243,43 @@ jobs:
- name: Ensure no arbitrary or proptest dependency on default build
run: cargo tree --package reth -e=features,no-dev | grep -Eq "arbitrary|proptest" && exit 1 || exit 0
# Checks that selected rates can compile with power set of features
# Checks that selected crates can compile with power set of features
features:
name: features
runs-on: ubuntu-latest
name: features (${{ matrix.partition }}/${{ matrix.total_partitions }})
runs-on: depot-ubuntu-latest
strategy:
matrix:
partition: [1, 2]
total_partitions: [2]
timeout-minutes: 30
steps:
- uses: actions/checkout@v6
- uses: rui314/setup-mold@v1
- uses: dtolnay/rust-toolchain@clippy
- uses: mozilla-actions/sccache-action@v0.0.9
- uses: Swatinem/rust-cache@v2
with:
cache-on-failure: true
- name: cargo install cargo-hack
uses: taiki-e/install-action@cargo-hack
- run: make check-features
- run: |
cargo hack check \
--package reth-codecs \
--package reth-primitives-traits \
--package reth-primitives \
--feature-powerset \
--partition ${{ matrix.partition }}/${{ matrix.total_partitions }}
env:
RUSTFLAGS: -D warnings
# Check crates correctly propagate features
feature-propagation:
runs-on: ubuntu-latest
runs-on: depot-ubuntu-latest
timeout-minutes: 20
steps:
- uses: actions/checkout@v6
- uses: dtolnay/rust-toolchain@stable
- uses: mozilla-actions/sccache-action@v0.0.9
- uses: rui314/setup-mold@v1
- uses: taiki-e/cache-cargo-install-action@v2
with:

View File

@@ -26,7 +26,7 @@ jobs:
prepare-reth:
if: github.repository == 'paradigmxyz/reth'
timeout-minutes: 45
runs-on: ubuntu-latest
runs-on: depot-ubuntu-latest
steps:
- uses: actions/checkout@v6
- run: mkdir artifacts

View File

@@ -12,7 +12,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Update Homebrew formula
uses: dawidd6/action-homebrew-bump-formula@v6
uses: dawidd6/action-homebrew-bump-formula@v7
with:
token: ${{ secrets.HOMEBREW }}
no_fork: true

View File

@@ -22,6 +22,7 @@ env:
CARGO_TERM_COLOR: always
DOCKER_IMAGE_NAME_URL: https://ghcr.io/${{ github.repository_owner }}/reth
DOCKER_OP_IMAGE_NAME_URL: https://ghcr.io/${{ github.repository_owner }}/op-reth
RUSTC_WRAPPER: "sccache"
jobs:
dry-run:
@@ -51,6 +52,7 @@ jobs:
steps:
- uses: actions/checkout@v6
- uses: dtolnay/rust-toolchain@stable
- uses: mozilla-actions/sccache-action@v0.0.9
- name: Verify crate version matches tag
# Check that the Cargo version starts with the tag,
# so that Cargo version 1.4.8 can be matched against both v1.4.8 and v1.4.8-rc.1
@@ -104,6 +106,7 @@ jobs:
- uses: dtolnay/rust-toolchain@stable
with:
target: ${{ matrix.configs.target }}
- uses: mozilla-actions/sccache-action@v0.0.9
- name: Install cross main
id: cross_main
run: |

View File

@@ -12,6 +12,7 @@ env:
CARGO_TERM_COLOR: always
FROM_BLOCK: 0
TO_BLOCK: 50000
RUSTC_WRAPPER: "sccache"
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
@@ -22,7 +23,7 @@ jobs:
name: stage-run-test
# Only run stage commands test in merge groups
if: github.event_name == 'merge_group'
runs-on: ubuntu-latest
runs-on: depot-ubuntu-latest
env:
RUST_LOG: info,sync=error
RUST_BACKTRACE: 1
@@ -31,6 +32,7 @@ jobs:
- uses: actions/checkout@v6
- uses: rui314/setup-mold@v1
- uses: dtolnay/rust-toolchain@stable
- uses: mozilla-actions/sccache-action@v0.0.9
- uses: Swatinem/rust-cache@v2
with:
cache-on-failure: true

View File

@@ -9,6 +9,7 @@ on:
env:
CARGO_TERM_COLOR: always
RUSTC_WRAPPER: "sccache"
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
@@ -17,7 +18,7 @@ concurrency:
jobs:
sync:
name: sync (${{ matrix.chain.bin }})
runs-on: ubuntu-latest
runs-on: depot-ubuntu-latest
env:
RUST_LOG: info,sync=error
RUST_BACKTRACE: 1
@@ -41,6 +42,7 @@ jobs:
- uses: actions/checkout@v6
- uses: rui314/setup-mold@v1
- uses: dtolnay/rust-toolchain@stable
- uses: mozilla-actions/sccache-action@v0.0.9
- uses: Swatinem/rust-cache@v2
with:
cache-on-failure: true

View File

@@ -9,6 +9,7 @@ on:
env:
CARGO_TERM_COLOR: always
RUSTC_WRAPPER: "sccache"
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
@@ -17,7 +18,7 @@ concurrency:
jobs:
sync:
name: sync (${{ matrix.chain.bin }})
runs-on: ubuntu-latest
runs-on: depot-ubuntu-latest
env:
RUST_LOG: info,sync=error
RUST_BACKTRACE: 1
@@ -41,6 +42,7 @@ jobs:
- uses: actions/checkout@v6
- uses: rui314/setup-mold@v1
- uses: dtolnay/rust-toolchain@stable
- uses: mozilla-actions/sccache-action@v0.0.9
- uses: Swatinem/rust-cache@v2
with:
cache-on-failure: true

View File

@@ -11,6 +11,7 @@ on:
env:
CARGO_TERM_COLOR: always
SEED: rustethereumethereumrust
RUSTC_WRAPPER: "sccache"
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
@@ -19,7 +20,7 @@ concurrency:
jobs:
test:
name: test / ${{ matrix.type }} (${{ matrix.partition }}/${{ matrix.total_partitions }})
runs-on: ubuntu-latest
runs-on: depot-ubuntu-latest-4
env:
RUST_BACKTRACE: 1
strategy:
@@ -46,6 +47,7 @@ jobs:
- uses: actions/checkout@v6
- uses: rui314/setup-mold@v1
- uses: dtolnay/rust-toolchain@stable
- uses: mozilla-actions/sccache-action@v0.0.9
- uses: Swatinem/rust-cache@v2
with:
cache-on-failure: true
@@ -64,7 +66,7 @@ jobs:
state:
name: Ethereum state tests
runs-on: ubuntu-latest
runs-on: depot-ubuntu-latest-4
env:
RUST_LOG: info,sync=error
RUST_BACKTRACE: 1
@@ -91,6 +93,7 @@ jobs:
- uses: rui314/setup-mold@v1
- uses: dtolnay/rust-toolchain@stable
- uses: taiki-e/install-action@nextest
- uses: mozilla-actions/sccache-action@v0.0.9
- uses: Swatinem/rust-cache@v2
with:
cache-on-failure: true
@@ -98,7 +101,7 @@ jobs:
doc:
name: doc tests
runs-on: ubuntu-latest
runs-on: depot-ubuntu-latest
env:
RUST_BACKTRACE: 1
timeout-minutes: 30
@@ -106,6 +109,7 @@ jobs:
- uses: actions/checkout@v6
- uses: rui314/setup-mold@v1
- uses: dtolnay/rust-toolchain@stable
- uses: mozilla-actions/sccache-action@v0.0.9
- uses: Swatinem/rust-cache@v2
with:
cache-on-failure: true

View File

@@ -9,9 +9,12 @@ on:
branches: [main]
merge_group:
env:
RUSTC_WRAPPER: "sccache"
jobs:
check-reth:
runs-on: ubuntu-24.04
runs-on: depot-ubuntu-latest
timeout-minutes: 60
steps:
@@ -21,6 +24,7 @@ jobs:
with:
target: x86_64-pc-windows-gnu
- uses: taiki-e/install-action@cross
- uses: mozilla-actions/sccache-action@v0.0.9
- uses: Swatinem/rust-cache@v2
with:
cache-on-failure: true
@@ -30,7 +34,7 @@ jobs:
run: cargo check --target x86_64-pc-windows-gnu
check-op-reth:
runs-on: ubuntu-24.04
runs-on: depot-ubuntu-latest
timeout-minutes: 60
steps:
@@ -40,6 +44,7 @@ jobs:
with:
target: x86_64-pc-windows-gnu
- uses: taiki-e/install-action@cross
- uses: mozilla-actions/sccache-action@v0.0.9
- uses: Swatinem/rust-cache@v2
with:
cache-on-failure: true

72
Cargo.lock generated
View File

@@ -238,6 +238,18 @@ dependencies = [
"thiserror 2.0.17",
]
[[package]]
name = "alloy-eip7928"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "926b2c0d34e641cf8b17bf54ce50fda16715b9f68ad878fa6128bae410c6f890"
dependencies = [
"alloy-primitives",
"alloy-rlp",
"borsh",
"serde",
]
[[package]]
name = "alloy-eips"
version = "1.1.3"
@@ -266,9 +278,9 @@ dependencies = [
[[package]]
name = "alloy-evm"
version = "0.24.2"
version = "0.25.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "01be36ba6f5e6e62563b369e03ca529eac46aea50677f84655084b4750816574"
checksum = "3e7b4fb2418490bca9978e74208215ed5fcb21a10aba7eea487abaa60fd588db"
dependencies = [
"alloy-consensus",
"alloy-eips",
@@ -383,9 +395,9 @@ dependencies = [
[[package]]
name = "alloy-op-evm"
version = "0.24.2"
version = "0.25.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "231262d7e06000f3fb642d32d38ca75e09e78e04977c10be0a07a5ee2c869cfd"
checksum = "56afbe3c3b66435c7c3846fe639a60e4cdbc31b9263596eb2f382408b1b160c4"
dependencies = [
"alloy-consensus",
"alloy-eips",
@@ -3671,6 +3683,17 @@ dependencies = [
"tracing",
]
[[package]]
name = "example-custom-rpc-middleware"
version = "0.0.0"
dependencies = [
"clap",
"jsonrpsee",
"reth-ethereum",
"tower",
"tracing",
]
[[package]]
name = "example-db-access"
version = "0.0.0"
@@ -6188,9 +6211,9 @@ checksum = "d6790f58c7ff633d8771f42965289203411a5e5c68388703c06e14f24770b41e"
[[package]]
name = "op-alloy"
version = "0.22.4"
version = "0.23.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c3b13412d297c1f9341f678b763750b120a73ffe998fa54a94d6eda98449e7ca"
checksum = "5f8cef53b364f406ed2be3a447b2d8f2f18b07a6ff1255c287debb4cda68095b"
dependencies = [
"op-alloy-consensus",
"op-alloy-network",
@@ -6201,9 +6224,9 @@ dependencies = [
[[package]]
name = "op-alloy-consensus"
version = "0.22.4"
version = "0.23.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "726da827358a547be9f1e37c2a756b9e3729cb0350f43408164794b370cad8ae"
checksum = "736381a95471d23e267263cfcee9e1d96d30b9754a94a2819148f83379de8a86"
dependencies = [
"alloy-consensus",
"alloy-eips",
@@ -6227,9 +6250,9 @@ checksum = "a79f352fc3893dcd670172e615afef993a41798a1d3fc0db88a3e60ef2e70ecc"
[[package]]
name = "op-alloy-network"
version = "0.22.4"
version = "0.23.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f63f27e65be273ec8fcb0b6af0fd850b550979465ab93423705ceb3dfddbd2ab"
checksum = "4034183dca6bff6632e7c24c92e75ff5f0eabb58144edb4d8241814851334d47"
dependencies = [
"alloy-consensus",
"alloy-network",
@@ -6243,9 +6266,9 @@ dependencies = [
[[package]]
name = "op-alloy-provider"
version = "0.22.4"
version = "0.23.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a71456699aa256dc20119736422ad9a44da8b9585036117afb936778122093b9"
checksum = "9f1c952895ad45087d35d323e3fb73c0b5de7c6852494d81ebe997030366196a"
dependencies = [
"alloy-network",
"alloy-primitives",
@@ -6258,9 +6281,9 @@ dependencies = [
[[package]]
name = "op-alloy-rpc-jsonrpsee"
version = "0.22.4"
version = "0.23.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8ef9114426b16172254555aad34a8ea96c01895e40da92f5d12ea680a1baeaa7"
checksum = "c1c820ef9c802ebc732281a940bfb6ac2345af4d9fff041cbb64b4b546676686"
dependencies = [
"alloy-primitives",
"jsonrpsee",
@@ -6268,9 +6291,9 @@ dependencies = [
[[package]]
name = "op-alloy-rpc-types"
version = "0.22.4"
version = "0.23.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "562dd4462562c41f9fdc4d860858c40e14a25df7f983ae82047f15f08fce4d19"
checksum = "ddd87c6b9e5b6eee8d6b76f41b04368dca0e9f38d83338e5b00e730c282098a4"
dependencies = [
"alloy-consensus",
"alloy-eips",
@@ -6288,9 +6311,9 @@ dependencies = [
[[package]]
name = "op-alloy-rpc-types-engine"
version = "0.22.4"
version = "0.23.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d8f24b8cb66e4b33e6c9e508bf46b8ecafc92eadd0b93fedd306c0accb477657"
checksum = "77727699310a18cdeed32da3928c709e2704043b6584ed416397d5da65694efc"
dependencies = [
"alloy-consensus",
"alloy-eips",
@@ -6304,6 +6327,7 @@ dependencies = [
"ethereum_ssz_derive",
"op-alloy-consensus",
"serde",
"sha2",
"snap",
"thiserror 2.0.17",
]
@@ -8196,6 +8220,7 @@ name = "reth-engine-tree"
version = "1.9.3"
dependencies = [
"alloy-consensus",
"alloy-eip7928",
"alloy-eips",
"alloy-evm",
"alloy-primitives",
@@ -9221,6 +9246,7 @@ dependencies = [
"alloy-sol-types",
"eyre",
"futures",
"jsonrpsee-core",
"rand 0.9.2",
"reth-chainspec",
"reth-db",
@@ -9256,6 +9282,7 @@ dependencies = [
"serde",
"serde_json",
"similar-asserts",
"tempfile",
"tokio",
]
@@ -10146,6 +10173,7 @@ dependencies = [
"reth-db-api",
"reth-engine-primitives",
"reth-errors",
"reth-ethereum-engine-primitives",
"reth-ethereum-primitives",
"reth-evm",
"reth-evm-ethereum",
@@ -10208,6 +10236,9 @@ dependencies = [
"reth-network-peers",
"reth-rpc-eth-api",
"reth-trie-common",
"serde",
"serde_json",
"tokio",
]
[[package]]
@@ -10272,6 +10303,7 @@ dependencies = [
"reth-rpc-server-types",
"reth-storage-api",
"reth-tasks",
"reth-tokio-util",
"reth-tracing",
"reth-transaction-pool",
"serde",
@@ -11165,9 +11197,9 @@ dependencies = [
[[package]]
name = "revm-inspectors"
version = "0.33.1"
version = "0.33.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6c93974333e7acc4b2dc024b10def99707f7375a4d53db7a7f8351722d25673f"
checksum = "01def7351cd9af844150b8e88980bcd11304f33ce23c3d7c25f2a8dab87c1345"
dependencies = [
"alloy-primitives",
"alloy-rpc-types-eth",

View File

@@ -153,6 +153,7 @@ members = [
"examples/custom-node-components/",
"examples/custom-payload-builder/",
"examples/custom-rlpx-subprotocol",
"examples/custom-rpc-middleware",
"examples/custom-node",
"examples/db-access",
"examples/engine-api-access",
@@ -375,11 +376,11 @@ reth-era-utils = { path = "crates/era-utils" }
reth-errors = { path = "crates/errors" }
reth-eth-wire = { path = "crates/net/eth-wire" }
reth-eth-wire-types = { path = "crates/net/eth-wire-types" }
reth-ethereum-payload-builder = { path = "crates/ethereum/payload" }
reth-ethereum-cli = { path = "crates/ethereum/cli", default-features = false }
reth-ethereum-consensus = { path = "crates/ethereum/consensus", default-features = false }
reth-ethereum-engine-primitives = { path = "crates/ethereum/engine-primitives", default-features = false }
reth-ethereum-forks = { path = "crates/ethereum/hardforks", default-features = false }
reth-ethereum-payload-builder = { path = "crates/ethereum/payload" }
reth-ethereum-primitives = { path = "crates/ethereum/primitives", default-features = false }
reth-ethereum = { path = "crates/ethereum/reth" }
reth-etl = { path = "crates/etl" }
@@ -480,13 +481,14 @@ revm-primitives = { version = "21.0.2", default-features = false }
revm-interpreter = { version = "31.1.0", default-features = false }
revm-database-interface = { version = "8.0.5", default-features = false }
op-revm = { version = "14.1.0", default-features = false }
revm-inspectors = "0.33.1"
revm-inspectors = "0.33.2"
# eth
alloy-chains = { version = "0.2.5", default-features = false }
alloy-dyn-abi = "1.4.1"
alloy-eip2124 = { version = "0.2.0", default-features = false }
alloy-evm = { version = "0.24.1", default-features = false }
alloy-eip7928 = { version = "0.1.0" }
alloy-evm = { version = "0.25.1", default-features = false }
alloy-primitives = { version = "1.4.1", default-features = false, features = ["map-foldhash"] }
alloy-rlp = { version = "0.3.10", default-features = false, features = ["core-net"] }
alloy-sol-macro = "1.4.1"
@@ -524,13 +526,13 @@ alloy-transport-ipc = { version = "1.1.3", default-features = false }
alloy-transport-ws = { version = "1.1.3", default-features = false }
# op
alloy-op-evm = { version = "0.24.1", default-features = false }
alloy-op-evm = { version = "0.25.0", default-features = false }
alloy-op-hardforks = "0.4.4"
op-alloy-rpc-types = { version = "0.22.4", default-features = false }
op-alloy-rpc-types-engine = { version = "0.22.4", default-features = false }
op-alloy-network = { version = "0.22.4", default-features = false }
op-alloy-consensus = { version = "0.22.4", default-features = false }
op-alloy-rpc-jsonrpsee = { version = "0.22.4", default-features = false }
op-alloy-rpc-types = { version = "0.23.1", default-features = false }
op-alloy-rpc-types-engine = { version = "0.23.1", default-features = false }
op-alloy-network = { version = "0.23.1", default-features = false }
op-alloy-consensus = { version = "0.23.1", default-features = false }
op-alloy-rpc-jsonrpsee = { version = "0.23.1", default-features = false }
op-alloy-flz = { version = "0.13.1", default-features = false }
# misc

View File

@@ -523,8 +523,3 @@ pr:
make test
check-features:
cargo hack check \
--package reth-codecs \
--package reth-primitives-traits \
--package reth-primitives \
--feature-powerset

View File

@@ -506,8 +506,8 @@ async fn run_warmup_phase(
// Build additional args with conditional --debug.startup-sync-state-idle flag
let additional_args = args.build_additional_args("warmup", args.baseline_args.as_ref());
// Start reth node for warmup
let mut node_process =
// Start reth node for warmup (command is not stored for warmup phase)
let (mut node_process, _warmup_command) =
node_manager.start_node(&binary_path, warmup_ref, "warmup", &additional_args).await?;
// Wait for node to be ready and get its current tip
@@ -607,8 +607,8 @@ async fn run_benchmark_workflow(
// Build additional args with conditional --debug.startup-sync-state-idle flag
let additional_args = args.build_additional_args(ref_type, base_args_str);
// Start reth node
let mut node_process =
// Start reth node and capture the command for reporting
let (mut node_process, reth_command) =
node_manager.start_node(&binary_path, git_ref, ref_type, &additional_args).await?;
// Wait for node to be ready and get its current tip (wherever it is)
@@ -645,8 +645,9 @@ async fn run_benchmark_workflow(
// Store results for comparison
comparison_generator.add_ref_results(ref_type, &output_dir)?;
// Set the benchmark run timestamps
// Set the benchmark run timestamps and reth command
comparison_generator.set_ref_timestamps(ref_type, benchmark_start, benchmark_end)?;
comparison_generator.set_ref_command(ref_type, reth_command)?;
info!("Completed {} reference benchmark", ref_type);
}

View File

@@ -21,6 +21,8 @@ pub(crate) struct ComparisonGenerator {
feature_ref_name: String,
baseline_results: Option<BenchmarkResults>,
feature_results: Option<BenchmarkResults>,
baseline_command: Option<String>,
feature_command: Option<String>,
}
/// Represents the results from a single benchmark run
@@ -89,6 +91,7 @@ pub(crate) struct RefInfo {
pub summary: BenchmarkSummary,
pub start_timestamp: Option<DateTime<Utc>>,
pub end_timestamp: Option<DateTime<Utc>>,
pub reth_command: Option<String>,
}
/// Summary of the comparison between references.
@@ -142,6 +145,8 @@ impl ComparisonGenerator {
feature_ref_name: args.feature_ref.clone(),
baseline_results: None,
feature_results: None,
baseline_command: None,
feature_command: None,
}
}
@@ -206,6 +211,21 @@ impl ComparisonGenerator {
Ok(())
}
/// Set the reth command for a reference
pub(crate) fn set_ref_command(&mut self, ref_type: &str, command: String) -> Result<()> {
match ref_type {
"baseline" => {
self.baseline_command = Some(command);
}
"feature" => {
self.feature_command = Some(command);
}
_ => return Err(eyre!("Unknown reference type: {}", ref_type)),
}
Ok(())
}
/// Generate the final comparison report
pub(crate) async fn generate_comparison_report(&self) -> Result<()> {
info!("Generating comparison report...");
@@ -230,12 +250,14 @@ impl ComparisonGenerator {
summary: baseline.summary.clone(),
start_timestamp: baseline.start_timestamp,
end_timestamp: baseline.end_timestamp,
reth_command: self.baseline_command.clone(),
},
feature: RefInfo {
ref_name: feature.ref_name.clone(),
summary: feature.summary.clone(),
start_timestamp: feature.start_timestamp,
end_timestamp: feature.end_timestamp,
reth_command: self.feature_command.clone(),
},
comparison_summary,
per_block_comparisons,
@@ -599,6 +621,9 @@ impl ComparisonGenerator {
end.format("%Y-%m-%d %H:%M:%S UTC")
);
}
if let Some(ref cmd) = report.baseline.reth_command {
println!(" Command: {}", cmd);
}
println!();
println!("Feature Summary:");
@@ -628,6 +653,9 @@ impl ComparisonGenerator {
end.format("%Y-%m-%d %H:%M:%S UTC")
);
}
if let Some(ref cmd) = report.feature.reth_command {
println!(" Command: {}", cmd);
}
println!();
}
}

View File

@@ -240,19 +240,24 @@ impl NodeManager {
}
/// Start a reth node using the specified binary path and return the process handle
/// along with the formatted reth command string for reporting.
pub(crate) async fn start_node(
&mut self,
binary_path: &std::path::Path,
_git_ref: &str,
ref_type: &str,
additional_args: &[String],
) -> Result<tokio::process::Child> {
) -> Result<(tokio::process::Child, String)> {
// Store the binary path for later use (e.g., in unwind_to_block)
self.binary_path = Some(binary_path.to_path_buf());
let binary_path_str = binary_path.to_string_lossy();
let (reth_args, _) = self.build_reth_args(&binary_path_str, additional_args, ref_type);
// Format the reth command string for reporting
let reth_command = shlex::try_join(reth_args.iter().map(|s| s.as_str()))
.wrap_err("Failed to format reth command string")?;
// Log additional arguments if any
if !self.additional_reth_args.is_empty() {
info!("Using common additional reth arguments: {:?}", self.additional_reth_args);
@@ -346,7 +351,7 @@ impl NodeManager {
// Give the node a moment to start up
sleep(Duration::from_secs(5)).await;
Ok(child)
Ok((child, reth_command))
}
/// Wait for the node to be ready and return its current tip

View File

@@ -80,7 +80,7 @@ RUSTFLAGS="-C target-cpu=native" cargo build --profile profiling --no-default-fe
### Run the Benchmark:
First, start the reth node. Here is an example that runs `reth` compiled with the `profiling` profile, runs `samply`, and configures `reth` to run with metrics enabled:
```bash
samply record -p 3001 target/profiling/reth node --metrics localhost:9001 --authrpc.jwt-secret <jwt_file_path>
samply record -p 3001 target/profiling/reth node --metrics localhost:9001 --authrpc.jwtsecret <jwt_file_path>
```
```bash
@@ -143,5 +143,5 @@ To reproduce the benchmark, first re-set the node to the block that the benchmar
- **RPC Configuration**: The RPC endpoints should be accessible and configured correctly, specifically the RPC endpoint must support `eth_getBlockByNumber` and support fetching full transactions. The benchmark will make one RPC query per block as fast as possible, so ensure the RPC endpoint does not rate limit or block requests after a certain volume.
- **Reproducibility**: Ensure that the node is at the same state before attempting to retry a benchmark. The `new-payload-fcu` command specifically will commit to the database, so the node must be rolled back using `reth stage unwind` to reproducibly retry benchmarks.
- **Profiling tools**: If you are collecting CPU profiles, tools like [`samply`](https://github.com/mstange/samply) and [`perf`](https://perf.wiki.kernel.org/index.php/Main_Page) can be useful for analyzing node performance.
- **Benchmark Data**: `reth-bench` additionally contains a `--benchmark.output` flag, which will output gas used benchmarks across the benchmark range in CSV format. This may be useful for further data analysis.
- **Benchmark Data**: `reth-bench` additionally contains a `--output` flag, which will output gas used benchmarks across the benchmark range in CSV format. This may be useful for further data analysis.
- **Platform Information**: To ensure accurate and reproducible benchmarking, document the platform details, including hardware specifications, OS version, and any other relevant information before publishing any benchmarks.

View File

@@ -8,12 +8,17 @@ use reth_db::{
RawDupSort,
};
use reth_db_api::{
table::{Decompress, DupSort, Table},
tables, RawKey, RawTable, Receipts, TableViewer, Transactions,
cursor::{DbCursorRO, DbDupCursorRO},
database::Database,
table::{Compress, Decompress, DupSort, Table},
tables,
transaction::DbTx,
RawKey, RawTable, Receipts, TableViewer, Transactions,
};
use reth_db_common::DbTool;
use reth_node_api::{HeaderTy, ReceiptTy, TxTy};
use reth_node_builder::NodeTypesWithDB;
use reth_primitives_traits::ValueWithSubKey;
use reth_provider::{providers::ProviderNodeTypes, StaticFileProviderFactory};
use reth_static_file_types::StaticFileSegment;
use tracing::error;
@@ -39,6 +44,14 @@ enum Subcommand {
#[arg(value_parser = maybe_json_value_parser)]
subkey: Option<String>,
/// Optional end key for range query (exclusive upper bound)
#[arg(value_parser = maybe_json_value_parser)]
end_key: Option<String>,
/// Optional end subkey for range query (exclusive upper bound)
#[arg(value_parser = maybe_json_value_parser)]
end_subkey: Option<String>,
/// Output bytes instead of human-readable decoded value
#[arg(long)]
raw: bool,
@@ -61,8 +74,8 @@ impl Command {
/// Execute `db get` command
pub fn execute<N: ProviderNodeTypes>(self, tool: &DbTool<N>) -> eyre::Result<()> {
match self.subcommand {
Subcommand::Mdbx { table, key, subkey, raw } => {
table.view(&GetValueViewer { tool, key, subkey, raw })?
Subcommand::Mdbx { table, key, subkey, end_key, end_subkey, raw } => {
table.view(&GetValueViewer { tool, key, subkey, end_key, end_subkey, raw })?
}
Subcommand::StaticFile { segment, key, raw } => {
let (key, mask): (u64, _) = match segment {
@@ -154,6 +167,8 @@ struct GetValueViewer<'a, N: NodeTypesWithDB> {
tool: &'a DbTool<N>,
key: String,
subkey: Option<String>,
end_key: Option<String>,
end_subkey: Option<String>,
raw: bool,
}
@@ -163,53 +178,158 @@ impl<N: ProviderNodeTypes> TableViewer<()> for GetValueViewer<'_, N> {
fn view<T: Table>(&self) -> Result<(), Self::Error> {
let key = table_key::<T>(&self.key)?;
let content = if self.raw {
self.tool
.get::<RawTable<T>>(RawKey::from(key))?
.map(|content| hex::encode_prefixed(content.raw_value()))
} else {
self.tool.get::<T>(key)?.as_ref().map(serde_json::to_string_pretty).transpose()?
};
// A non-dupsort table cannot have subkeys. The `subkey` arg becomes the `end_key`. First we
// check that `end_key` and `end_subkey` weren't previously given, as that wouldn't be
// valid.
if self.end_key.is_some() || self.end_subkey.is_some() {
return Err(eyre::eyre!("Only END_KEY can be given for non-DUPSORT tables"));
}
match content {
Some(content) => {
println!("{content}");
}
None => {
error!(target: "reth::cli", "No content for the given table key.");
}
};
let end_key = self.subkey.clone();
// Check if we're doing a range query
if let Some(ref end_key_str) = end_key {
let end_key = table_key::<T>(end_key_str)?;
// Use walk_range to iterate over the range
self.tool.provider_factory.db_ref().view(|tx| {
let mut cursor = tx.cursor_read::<T>()?;
let walker = cursor.walk_range(key..end_key)?;
for result in walker {
let (k, v) = result?;
let json_val = if self.raw {
let raw_key = RawKey::from(k);
serde_json::json!({
"key": hex::encode_prefixed(raw_key.raw_key()),
"val": hex::encode_prefixed(v.compress().as_ref()),
})
} else {
serde_json::json!({
"key": &k,
"val": &v,
})
};
println!("{}", serde_json::to_string_pretty(&json_val)?);
}
Ok::<_, eyre::Report>(())
})??;
} else {
// Single key lookup
let content = if self.raw {
self.tool
.get::<RawTable<T>>(RawKey::from(key))?
.map(|content| hex::encode_prefixed(content.raw_value()))
} else {
self.tool.get::<T>(key)?.as_ref().map(serde_json::to_string_pretty).transpose()?
};
match content {
Some(content) => {
println!("{content}");
}
None => {
error!(target: "reth::cli", "No content for the given table key.");
}
};
}
Ok(())
}
fn view_dupsort<T: DupSort>(&self) -> Result<(), Self::Error> {
fn view_dupsort<T: DupSort>(&self) -> Result<(), Self::Error>
where
T::Value: reth_primitives_traits::ValueWithSubKey<SubKey = T::SubKey>,
{
// get a key for given table
let key = table_key::<T>(&self.key)?;
// process dupsort table
let subkey = table_subkey::<T>(self.subkey.as_deref())?;
let content = if self.raw {
self.tool
.get_dup::<RawDupSort<T>>(RawKey::from(key), RawKey::from(subkey))?
.map(|content| hex::encode_prefixed(content.raw_value()))
} else {
self.tool
.get_dup::<T>(key, subkey)?
// Check if we're doing a range query
if let Some(ref end_key_str) = self.end_key {
let end_key = table_key::<T>(end_key_str)?;
let start_subkey = table_subkey::<T>(Some(
self.subkey.as_ref().expect("must have been given if end_key is given").as_str(),
))?;
let end_subkey_parsed = self
.end_subkey
.as_ref()
.map(serde_json::to_string_pretty)
.transpose()?
};
.map(|s| table_subkey::<T>(Some(s.as_str())))
.transpose()?;
match content {
Some(content) => {
println!("{content}");
}
None => {
error!(target: "reth::cli", "No content for the given table subkey.");
}
};
self.tool.provider_factory.db_ref().view(|tx| {
let mut cursor = tx.cursor_dup_read::<T>()?;
// Seek to the starting key. If there is actually a key at the starting key then
// seek to the subkey within it.
if let Some((decoded_key, _)) = cursor.seek(key.clone())? &&
decoded_key == key
{
cursor.seek_by_key_subkey(key.clone(), start_subkey.clone())?;
}
// Get the current position to start iteration
let mut current = cursor.current()?;
while let Some((decoded_key, decoded_value)) = current {
// Extract the subkey using the ValueWithSubKey trait
let decoded_subkey = decoded_value.get_subkey();
// Check if we've reached the end (exclusive)
if (&decoded_key, Some(&decoded_subkey)) >=
(&end_key, end_subkey_parsed.as_ref())
{
break;
}
// Output the entry with both key and subkey
let json_val = if self.raw {
let raw_key = RawKey::from(decoded_key.clone());
serde_json::json!({
"key": hex::encode_prefixed(raw_key.raw_key()),
"val": hex::encode_prefixed(decoded_value.compress().as_ref()),
})
} else {
serde_json::json!({
"key": &decoded_key,
"val": &decoded_value,
})
};
println!("{}", serde_json::to_string_pretty(&json_val)?);
// Move to next entry
current = cursor.next()?;
}
Ok::<_, eyre::Report>(())
})??;
} else {
// Single key/subkey lookup
let subkey = table_subkey::<T>(self.subkey.as_deref())?;
let content = if self.raw {
self.tool
.get_dup::<RawDupSort<T>>(RawKey::from(key), RawKey::from(subkey))?
.map(|content| hex::encode_prefixed(content.raw_value()))
} else {
self.tool
.get_dup::<T>(key, subkey)?
.as_ref()
.map(serde_json::to_string_pretty)
.transpose()?
};
match content {
Some(content) => {
println!("{content}");
}
None => {
error!(target: "reth::cli", "No content for the given table subkey.");
}
};
}
Ok(())
}
}

View File

@@ -92,6 +92,8 @@ impl Command {
receipts_in_static_files: _,
transaction_senders_in_static_files: _,
storages_history_in_rocksdb: _,
transaction_hash_numbers_in_rocksdb: _,
account_history_in_rocksdb: _,
} = settings.unwrap_or_else(StorageSettings::legacy);
// Update the setting based on the key

View File

@@ -110,7 +110,7 @@ impl<C: ChainSpecParser<ChainSpec: EthChainSpec + EthereumHardforks>> InitStateC
static_file_provider.commit()?;
} else if last_block_number > 0 && last_block_number < header.number() {
return Err(eyre::eyre!(
"Data directory should be empty when calling init-state with --without-evm-history."
"Data directory should be empty when calling init-state with --without-evm."
));
}
}

View File

@@ -531,8 +531,12 @@ impl PruneConfig {
self.segments.receipts.is_some() || !self.segments.receipts_log_filter.is_empty()
}
/// Merges another `PruneConfig` into this one, taking values from the other config if and only
/// if the corresponding value in this config is not set.
/// Merges values from `other` into `self`.
/// - `Option<PruneMode>` fields: set from `other` only if `self` is `None`.
/// - `block_interval`: set from `other` only if `self.block_interval ==
/// DEFAULT_BLOCK_INTERVAL`.
/// - `merkle_changesets`: always set from `other`.
/// - `receipts_log_filter`: set from `other` only if `self` is empty and `other` is non-empty.
pub fn merge(&mut self, other: Self) {
let Self {
block_interval,
@@ -561,7 +565,7 @@ impl PruneConfig {
self.segments.account_history = self.segments.account_history.or(account_history);
self.segments.storage_history = self.segments.storage_history.or(storage_history);
self.segments.bodies_history = self.segments.bodies_history.or(bodies_history);
// Merkle changesets is not optional, so we just replace it if provided
// Merkle changesets is not optional; always take the value from `other`
self.segments.merkle_changesets = merkle_changesets;
if self.segments.receipts_log_filter.0.is_empty() && !receipts_log_filter.0.is_empty() {

View File

@@ -327,7 +327,7 @@ pub fn validate_against_parent_eip1559_base_fee<ChainSpec: EthChainSpec + Ethere
Ok(())
}
/// Validates the timestamp against the parent to make sure it is in the past.
/// Validates that the block timestamp is greater than the parent block timestamp.
#[inline]
pub fn validate_against_parent_timestamp<H: BlockHeader>(
header: &H,

View File

@@ -1,5 +1,5 @@
use crate::BlockProvider;
use alloy_provider::{Network, Provider, ProviderBuilder};
use alloy_provider::{ConnectionConfig, Network, Provider, ProviderBuilder, WebSocketConfig};
use alloy_transport::TransportResult;
use futures::{Stream, StreamExt};
use reth_node_api::Block;
@@ -25,7 +25,19 @@ impl<N: Network, PrimitiveBlock> RpcBlockProvider<N, PrimitiveBlock> {
convert: impl Fn(N::BlockResponse) -> PrimitiveBlock + Send + Sync + 'static,
) -> eyre::Result<Self> {
Ok(Self {
provider: Arc::new(ProviderBuilder::default().connect(rpc_url).await?),
provider: Arc::new(
ProviderBuilder::default()
.connect_with_config(
rpc_url,
ConnectionConfig::default().with_max_retries(u32::MAX).with_ws_config(
WebSocketConfig::default()
// allow larger messages/frames for big blocks
.max_frame_size(Some(128 * 1024 * 1024))
.max_message_size(Some(128 * 1024 * 1024)),
),
)
.await?,
),
url: rpc_url.to_string(),
convert: Arc::new(convert),
})

View File

@@ -1,5 +1,7 @@
//! Engine tree configuration.
use alloy_eips::merge::EPOCH_SLOTS;
/// Triggers persistence when the number of canonical blocks in memory exceeds this threshold.
pub const DEFAULT_PERSISTENCE_THRESHOLD: u64 = 2;
@@ -40,7 +42,7 @@ pub const DEFAULT_RESERVED_CPU_CORES: usize = 1;
/// Default maximum concurrency for prewarm task.
pub const DEFAULT_PREWARM_MAX_CONCURRENCY: usize = 16;
const DEFAULT_BLOCK_BUFFER_LIMIT: u32 = 256;
const DEFAULT_BLOCK_BUFFER_LIMIT: u32 = EPOCH_SLOTS as u32 * 2;
const DEFAULT_MAX_INVALID_HEADER_CACHE_LENGTH: u32 = 256;
const DEFAULT_MAX_EXECUTE_BLOCK_BATCH_SIZE: usize = 4;
const DEFAULT_CROSS_BLOCK_CACHE_SIZE: u64 = 4 * 1024 * 1024 * 1024;

View File

@@ -39,6 +39,7 @@ reth-trie.workspace = true
alloy-evm.workspace = true
alloy-consensus.workspace = true
alloy-eips.workspace = true
alloy-eip7928.workspace = true
alloy-primitives.workspace = true
alloy-rlp.workspace = true
alloy-rpc-types-engine.workspace = true

File diff suppressed because it is too large Load Diff

View File

@@ -11,6 +11,7 @@ use crate::tree::{
StateProviderDatabase, TreeConfig,
};
use alloy_consensus::transaction::Either;
use alloy_eip7928::BlockAccessList;
use alloy_eips::{eip1898::BlockWithParent, NumHash};
use alloy_evm::Evm;
use alloy_primitives::B256;
@@ -1243,4 +1244,10 @@ impl<T: PayloadTypes> BlockOrPayload<T> {
Self::Block(_) => "block",
}
}
/// Returns the block access list if available.
pub const fn block_access_list(&self) -> Option<Result<BlockAccessList, alloy_rlp::Error>> {
// TODO decode and return `BlockAccessList`
None
}
}

View File

@@ -252,7 +252,7 @@ where
/// Extracts block headers and bodies from `iter` and appends them using `writer` and `provider`.
///
/// Adds on to `total_difficulty` and collects hash to height using `hash_collector`.
/// Collects hash to height using `hash_collector`.
///
/// Skips all blocks below the [`start_bound`] of `block_numbers` and stops when reaching past the
/// [`end_bound`] or the end of the file.

View File

@@ -5,7 +5,6 @@ use alloy_consensus::{
};
use alloy_eips::merge::BEACON_NONCE;
use alloy_evm::{block::BlockExecutorFactory, eth::EthBlockExecutionCtx};
use alloy_primitives::Bytes;
use reth_chainspec::{EthChainSpec, EthereumHardforks};
use reth_evm::execute::{BlockAssembler, BlockAssemblerInput, BlockExecutionError};
use reth_execution_types::BlockExecutionResult;
@@ -17,14 +16,12 @@ use revm::context::Block as _;
pub struct EthBlockAssembler<ChainSpec = reth_chainspec::ChainSpec> {
/// The chainspec.
pub chain_spec: Arc<ChainSpec>,
/// Extra data to use for the blocks.
pub extra_data: Bytes,
}
impl<ChainSpec> EthBlockAssembler<ChainSpec> {
/// Creates a new [`EthBlockAssembler`].
pub fn new(chain_spec: Arc<ChainSpec>) -> Self {
Self { chain_spec, extra_data: Default::default() }
pub const fn new(chain_spec: Arc<ChainSpec>) -> Self {
Self { chain_spec }
}
}
@@ -110,7 +107,7 @@ where
gas_limit: evm_env.block_env.gas_limit(),
difficulty: evm_env.block_env.difficulty(),
gas_used: *gas_used,
extra_data: self.extra_data.clone(),
extra_data: ctx.extra_data,
parent_beacon_block_root: ctx.parent_beacon_block_root,
blob_gas_used: block_blob_gas_used,
excess_blob_gas,

View File

@@ -116,12 +116,6 @@ impl<ChainSpec, EvmFactory> EthEvmConfig<ChainSpec, EvmFactory> {
pub const fn chain_spec(&self) -> &Arc<ChainSpec> {
self.executor_factory.spec()
}
/// Sets the extra data for the block assembler.
pub fn with_extra_data(mut self, extra_data: Bytes) -> Self {
self.block_assembler.extra_data = extra_data;
self
}
}
impl<ChainSpec, EvmF> ConfigureEvm for EthEvmConfig<ChainSpec, EvmF>
@@ -193,6 +187,7 @@ where
parent_beacon_block_root: block.header().parent_beacon_block_root,
ommers: &block.body().ommers,
withdrawals: block.body().withdrawals.as_ref().map(Cow::Borrowed),
extra_data: block.header().extra_data.clone(),
})
}
@@ -206,6 +201,7 @@ where
parent_beacon_block_root: attributes.parent_beacon_block_root,
ommers: &[],
withdrawals: attributes.withdrawals.map(Cow::Owned),
extra_data: attributes.extra_data,
})
}
}
@@ -282,6 +278,7 @@ where
parent_beacon_block_root: payload.sidecar.parent_beacon_block_root(),
ommers: &[],
withdrawals: payload.payload.withdrawals().map(|w| Cow::Owned(w.clone().into())),
extra_data: payload.payload.as_v1().extra_data.clone(),
})
}

View File

@@ -61,6 +61,8 @@ reth-node-core.workspace = true
reth-e2e-test-utils.workspace = true
reth-tasks.workspace = true
reth-testing-utils.workspace = true
tempfile.workspace = true
jsonrpsee-core.workspace = true
alloy-primitives.workspace = true
alloy-provider.workspace = true

View File

@@ -32,15 +32,15 @@ use reth_node_builder::{
EngineValidatorBuilder, EthApiBuilder, EthApiCtx, Identity, PayloadValidatorBuilder,
RethRpcAddOns, RpcAddOns, RpcHandle,
},
BuilderContext, DebugNode, Node, NodeAdapter, PayloadBuilderConfig,
BuilderContext, DebugNode, Node, NodeAdapter,
};
use reth_payload_primitives::PayloadTypes;
use reth_provider::{providers::ProviderFactoryBuilder, EthStorage};
use reth_rpc::{
eth::core::{EthApiFor, EthRpcConverterFor},
ValidationApi,
TestingApi, ValidationApi,
};
use reth_rpc_api::servers::BlockSubmissionValidationApiServer;
use reth_rpc_api::servers::{BlockSubmissionValidationApiServer, TestingApiServer};
use reth_rpc_builder::{config::RethRpcServerConfig, middleware::RethRpcMiddleware};
use reth_rpc_eth_api::{
helpers::{
@@ -313,6 +313,17 @@ where
.modules
.merge_if_module_configured(RethRpcModule::Eth, eth_config.into_rpc())?;
// testing_buildBlockV1: only wire when the hidden testing module is explicitly
// requested on any transport. Default stays disabled to honor security guidance.
let testing_api = TestingApi::new(
container.registry.eth_api().clone(),
container.registry.evm_config().clone(),
)
.into_rpc();
container
.modules
.merge_if_module_configured(RethRpcModule::Testing, testing_api)?;
Ok(())
})
.await
@@ -426,9 +437,7 @@ where
type EVM = EthEvmConfig<Types::ChainSpec>;
async fn build_evm(self, ctx: &BuilderContext<Node>) -> eyre::Result<Self::EVM> {
let evm_config = EthEvmConfig::new(ctx.chain_spec())
.with_extra_data(ctx.payload_builder_config().extra_data_bytes());
Ok(evm_config)
Ok(EthEvmConfig::new(ctx.chain_spec()))
}
}

View File

@@ -54,7 +54,8 @@ where
evm_config,
EthereumBuilderConfig::new()
.with_gas_limit(gas_limit)
.with_max_blobs_per_block(conf.max_blobs_per_block()),
.with_max_blobs_per_block(conf.max_blobs_per_block())
.with_extra_data(conf.extra_data_bytes()),
))
}
}

View File

@@ -2,5 +2,6 @@
mod builder;
mod exex;
mod testing;
const fn main() {}

View File

@@ -0,0 +1,84 @@
//! E2E tests for the testing RPC namespace.
use alloy_primitives::{Address, B256};
use alloy_rpc_types_engine::ExecutionPayloadEnvelopeV4;
use jsonrpsee_core::client::ClientT;
use reth_db::test_utils::create_test_rw_db;
use reth_ethereum_engine_primitives::EthPayloadAttributes;
use reth_node_builder::{NodeBuilder, NodeConfig};
use reth_node_core::{
args::DatadirArgs,
dirs::{DataDirPath, MaybePlatformPath},
};
use reth_node_ethereum::{node::EthereumAddOns, EthereumNode};
use reth_rpc_api::TestingBuildBlockRequestV1;
use reth_rpc_server_types::{RethRpcModule, RpcModuleSelection};
use reth_tasks::TaskManager;
use std::str::FromStr;
use tempfile::tempdir;
use tokio::sync::oneshot;
#[tokio::test(flavor = "multi_thread")]
async fn testing_rpc_build_block_works() -> eyre::Result<()> {
let tasks = TaskManager::current();
let mut rpc_args = reth_node_core::args::RpcServerArgs::default().with_http();
rpc_args.http_api = Some(RpcModuleSelection::from_iter([RethRpcModule::Testing]));
let tempdir = tempdir().expect("temp datadir");
let datadir_args = DatadirArgs {
datadir: MaybePlatformPath::<DataDirPath>::from_str(tempdir.path().to_str().unwrap())
.expect("valid datadir"),
static_files_path: Some(tempdir.path().join("static")),
};
let config = NodeConfig::test().with_datadir_args(datadir_args).with_rpc(rpc_args);
let db = create_test_rw_db();
let (tx, rx): (
oneshot::Sender<eyre::Result<ExecutionPayloadEnvelopeV4>>,
oneshot::Receiver<eyre::Result<ExecutionPayloadEnvelopeV4>>,
) = oneshot::channel();
let builder = NodeBuilder::new(config)
.with_database(db)
.with_launch_context(tasks.executor())
.with_types::<EthereumNode>()
.with_components(EthereumNode::components())
.with_add_ons(EthereumAddOns::default())
.on_rpc_started(move |ctx, handles| {
let Some(client) = handles.rpc.http_client() else { return Ok(()) };
let chain = ctx.config().chain.clone();
let parent_block_hash = chain.genesis_hash();
let payload_attributes = EthPayloadAttributes {
timestamp: chain.genesis().timestamp + 1,
prev_randao: B256::ZERO,
suggested_fee_recipient: Address::ZERO,
withdrawals: None,
parent_beacon_block_root: None,
};
let request = TestingBuildBlockRequestV1 {
parent_block_hash,
payload_attributes,
transactions: vec![],
extra_data: None,
};
tokio::spawn(async move {
let res: eyre::Result<ExecutionPayloadEnvelopeV4> =
client.request("testing_buildBlockV1", [request]).await.map_err(Into::into);
let _ = tx.send(res);
});
Ok(())
});
// Launch the node with the default engine launcher.
let launcher = builder.engine_api_launcher();
let _node = builder.launch_with(launcher).await?;
// Wait for the testing RPC call to return.
let res = rx.await.expect("testing_buildBlockV1 response");
assert!(res.is_ok(), "testing_buildBlockV1 failed: {:?}", res.err());
Ok(())
}

View File

@@ -1,4 +1,5 @@
use alloy_eips::eip1559::ETHEREUM_BLOCK_GAS_LIMIT_30M;
use alloy_primitives::Bytes;
use reth_primitives_traits::constants::GAS_LIMIT_BOUND_DIVISOR;
/// Settings for the Ethereum builder.
@@ -13,6 +14,8 @@ pub struct EthereumBuilderConfig {
///
/// If `None`, defaults to the protocol maximum.
pub max_blobs_per_block: Option<u64>,
/// Extra data for built blocks.
pub extra_data: Bytes,
}
impl Default for EthereumBuilderConfig {
@@ -28,6 +31,7 @@ impl EthereumBuilderConfig {
desired_gas_limit: ETHEREUM_BLOCK_GAS_LIMIT_30M,
await_payload_on_missing: true,
max_blobs_per_block: None,
extra_data: Bytes::new(),
}
}
@@ -49,6 +53,12 @@ impl EthereumBuilderConfig {
self.max_blobs_per_block = max_blobs_per_block;
self
}
/// Set the extra data for built blocks.
pub fn with_extra_data(mut self, extra_data: Bytes) -> Self {
self.extra_data = extra_data;
self
}
}
impl EthereumBuilderConfig {

View File

@@ -168,6 +168,7 @@ where
gas_limit: builder_config.gas_limit(parent_header.gas_limit),
parent_beacon_block_root: attributes.parent_beacon_block_root(),
withdrawals: Some(attributes.withdrawals().clone()),
extra_data: builder_config.extra_data,
},
)
.map_err(PayloadBuilderError::other)?;

View File

@@ -28,7 +28,7 @@ use alloy_evm::{
block::{BlockExecutorFactory, BlockExecutorFor},
precompiles::PrecompilesMap,
};
use alloy_primitives::{Address, B256};
use alloy_primitives::{Address, Bytes, B256};
use core::{error::Error, fmt::Debug};
use execute::{BasicBlockExecutor, BlockAssembler, BlockBuilder};
use reth_execution_errors::BlockExecutionError;
@@ -501,6 +501,8 @@ pub struct NextBlockEnvAttributes {
pub parent_beacon_block_root: Option<B256>,
/// Withdrawals
pub withdrawals: Option<Withdrawals>,
/// Optional extra data.
pub extra_data: Bytes,
}
/// Abstraction over transaction environment.

View File

@@ -95,17 +95,33 @@ pub(super) mod serde_bincode_compat {
/// notification: ExExNotification<N>,
/// }
/// ```
///
/// This enum mirrors [`super::ExExNotification`] but uses borrowed [`Chain`] types
/// instead of `Arc<Chain>` for bincode compatibility.
#[derive(Debug, Serialize, Deserialize)]
#[expect(missing_docs)]
#[serde(bound = "")]
#[expect(clippy::large_enum_variant)]
pub enum ExExNotification<'a, N>
where
N: NodePrimitives,
{
ChainCommitted { new: Chain<'a, N> },
ChainReorged { old: Chain<'a, N>, new: Chain<'a, N> },
ChainReverted { old: Chain<'a, N> },
/// Chain got committed without a reorg, and only the new chain is returned.
ChainCommitted {
/// The new chain after commit.
new: Chain<'a, N>,
},
/// Chain got reorged, and both the old and the new chains are returned.
ChainReorged {
/// The old chain before reorg.
old: Chain<'a, N>,
/// The new chain after reorg.
new: Chain<'a, N>,
},
/// Chain got reverted, and only the old chain is returned.
ChainReverted {
/// The old chain before reversion.
old: Chain<'a, N>,
},
}
impl<'a, N> From<&'a super::ExExNotification<N>> for ExExNotification<'a, N>

View File

@@ -533,6 +533,27 @@ where
}
/// Modifies the addons with the given closure.
///
/// This method provides access to methods on the addons type that don't have
/// direct builder methods. It's useful for advanced configuration scenarios
/// where you need to call addon-specific methods.
///
/// # Examples
///
/// ```rust,ignore
/// use tower::layer::util::Identity;
///
/// let builder = NodeBuilder::new(config)
/// .with_types::<EthereumNode>()
/// .with_components(EthereumNode::components())
/// .with_add_ons(EthereumAddOns::default())
/// .map_add_ons(|addons| addons.with_rpc_middleware(Identity::default()));
/// ```
///
/// # See also
///
/// - [`NodeAddOns`] trait for available addon types
/// - [`crate::NodeBuilderWithComponents::extend_rpc_modules`] for RPC module configuration
pub fn map_add_ons<F>(self, f: F) -> Self
where
F: FnOnce(AO) -> AO,
@@ -579,10 +600,10 @@ where
/// .extend_rpc_modules(|ctx| {
/// // Access node components, so they can used by the CustomApi
/// let pool = ctx.pool().clone();
///
///
/// // Add custom RPC namespace
/// ctx.modules.merge_configured(CustomApi { pool }.into_rpc())?;
///
///
/// Ok(())
/// })
/// .build()?;
@@ -838,8 +859,8 @@ impl<Node: FullNodeTypes> BuilderContext<Node> {
.request_handler(self.provider().clone())
.split_with_handle();
self.executor.spawn_critical("p2p txpool", Box::pin(txpool));
self.executor.spawn_critical("p2p eth request handler", Box::pin(eth));
self.executor.spawn_critical_blocking("p2p txpool", Box::pin(txpool));
self.executor.spawn_critical_blocking("p2p eth request handler", Box::pin(eth));
let default_peers_path = self.config().datadir().known_peers();
let known_peers_file = self.config().network.persistent_peers_file(default_peers_path);

View File

@@ -235,6 +235,27 @@ where
}
/// Modifies the addons with the given closure.
///
/// This method provides access to methods on the addons type that don't have
/// direct builder methods. It's useful for advanced configuration scenarios
/// where you need to call addon-specific methods.
///
/// # Examples
///
/// ```rust,ignore
/// use tower::layer::util::Identity;
///
/// let builder = NodeBuilder::new(config)
/// .with_types::<EthereumNode>()
/// .with_components(EthereumNode::components())
/// .with_add_ons(EthereumAddOns::default())
/// .map_add_ons(|addons| addons.with_rpc_middleware(Identity::default()));
/// ```
///
/// # See also
///
/// - [`NodeAddOns`] trait for available addon types
/// - [`crate::NodeBuilderWithComponents::extend_rpc_modules`] for RPC module configuration
pub fn map_add_ons<F>(mut self, f: F) -> Self
where
F: FnOnce(AO) -> AO,

View File

@@ -1010,7 +1010,7 @@ where
.with_executor(Box::new(node.task_executor().clone()))
.with_evm_config(node.evm_config().clone())
.with_consensus(node.consensus().clone())
.build_with_auth_server(module_config, engine_api, eth_api);
.build_with_auth_server(module_config, engine_api, eth_api, engine_events.clone());
// in dev mode we generate 20 random dev-signer accounts
if config.dev.dev {
@@ -1179,6 +1179,7 @@ impl<'a, N: FullNodeComponents<Types: NodeTypes<ChainSpec: Hardforks + EthereumH
.proof_permits(self.config.proof_permits)
.gas_oracle_config(self.config.gas_oracle)
.max_batch_size(self.config.max_batch_size)
.max_blocking_io_requests(self.config.max_blocking_io_requests)
.pending_block_kind(self.config.pending_block_kind)
.raw_tx_forwarder(self.config.raw_tx_forwarder)
.evm_memory_limit(self.config.rpc_evm_memory_limit)
@@ -1188,10 +1189,7 @@ impl<'a, N: FullNodeComponents<Types: NodeTypes<ChainSpec: Hardforks + EthereumH
/// A `EthApi` that knows how to build `eth` namespace API from [`FullNodeComponents`].
pub trait EthApiBuilder<N: FullNodeComponents>: Default + Send + 'static {
/// The Ethapi implementation this builder will build.
type EthApi: EthApiTypes
+ FullEthApiServer<Provider = N::Provider, Pool = N::Pool>
+ Unpin
+ 'static;
type EthApi: FullEthApiServer<Provider = N::Provider, Pool = N::Pool>;
/// Builds the [`EthApiServer`](reth_rpc_api::eth::EthApiServer) from the given context.
fn build_eth_api(

View File

@@ -6,7 +6,7 @@ pub use network::{DiscoveryArgs, NetworkArgs};
/// RpcServerArg struct for configuring the RPC
mod rpc_server;
pub use rpc_server::RpcServerArgs;
pub use rpc_server::{DefaultRpcServerArgs, RpcServerArgs};
/// `RpcStateCacheArgs` struct for configuring RPC state cache
mod rpc_state_cache;

View File

@@ -119,6 +119,18 @@ pub struct NetworkArgs {
#[arg(long)]
pub max_inbound_peers: Option<usize>,
/// Maximum number of total peers (inbound + outbound).
///
/// Splits peers using approximately 2:1 inbound:outbound ratio. Cannot be used together with
/// `--max-outbound-peers` or `--max-inbound-peers`.
#[arg(
long,
value_name = "COUNT",
conflicts_with = "max_outbound_peers",
conflicts_with = "max_inbound_peers"
)]
pub max_peers: Option<usize>,
/// Max concurrent `GetPooledTransactions` requests.
#[arg(long = "max-tx-reqs", value_name = "COUNT", default_value_t = DEFAULT_MAX_COUNT_CONCURRENT_REQUESTS, verbatim_doc_comment)]
pub max_concurrent_tx_requests: u32,
@@ -245,6 +257,34 @@ impl NetworkArgs {
bootnodes.into_iter().filter_map(|node| node.resolve_blocking().ok()).collect()
})
}
/// Returns the max inbound peers (2:1 ratio).
pub fn resolved_max_inbound_peers(&self) -> Option<usize> {
if let Some(max_peers) = self.max_peers {
if max_peers == 0 {
Some(0)
} else {
let outbound = (max_peers / 3).max(1);
Some(max_peers.saturating_sub(outbound))
}
} else {
self.max_inbound_peers
}
}
/// Returns the max outbound peers (1:2 ratio).
pub fn resolved_max_outbound_peers(&self) -> Option<usize> {
if let Some(max_peers) = self.max_peers {
if max_peers == 0 {
Some(0)
} else {
Some((max_peers / 3).max(1))
}
} else {
self.max_outbound_peers
}
}
/// Configures and returns a `TransactionsManagerConfig` based on the current settings.
pub const fn transactions_manager_config(&self) -> TransactionsManagerConfig {
TransactionsManagerConfig {
@@ -291,8 +331,8 @@ impl NetworkArgs {
.peers_config_with_basic_nodes_from_file(
self.persistent_peers_file(peers_file).as_deref(),
)
.with_max_inbound_opt(self.max_inbound_peers)
.with_max_outbound_opt(self.max_outbound_peers)
.with_max_inbound_opt(self.resolved_max_inbound_peers())
.with_max_outbound_opt(self.resolved_max_outbound_peers())
.with_ip_filter(ip_filter);
// Configure basic network stack
@@ -434,6 +474,7 @@ impl Default for NetworkArgs {
port: DEFAULT_DISCOVERY_PORT,
max_outbound_peers: None,
max_inbound_peers: None,
max_peers: None,
max_concurrent_tx_requests: DEFAULT_MAX_COUNT_CONCURRENT_REQUESTS,
max_concurrent_tx_requests_per_peer: DEFAULT_MAX_COUNT_CONCURRENT_REQUESTS_PER_PEER,
soft_limit_byte_size_pooled_transactions_response:
@@ -758,6 +799,96 @@ mod tests {
assert!(args.disable_tx_gossip);
}
#[test]
fn parse_max_peers_flag() {
let args = CommandParser::<NetworkArgs>::parse_from(["reth", "--max-peers", "90"]).args;
assert_eq!(args.max_peers, Some(90));
assert_eq!(args.max_outbound_peers, None);
assert_eq!(args.max_inbound_peers, None);
assert_eq!(args.resolved_max_outbound_peers(), Some(30));
assert_eq!(args.resolved_max_inbound_peers(), Some(60));
}
#[test]
fn max_peers_conflicts_with_outbound() {
let result = CommandParser::<NetworkArgs>::try_parse_from([
"reth",
"--max-peers",
"90",
"--max-outbound-peers",
"50",
]);
assert!(
result.is_err(),
"Should fail when both --max-peers and --max-outbound-peers are used"
);
}
#[test]
fn max_peers_conflicts_with_inbound() {
let result = CommandParser::<NetworkArgs>::try_parse_from([
"reth",
"--max-peers",
"90",
"--max-inbound-peers",
"30",
]);
assert!(
result.is_err(),
"Should fail when both --max-peers and --max-inbound-peers are used"
);
}
#[test]
fn max_peers_split_calculation() {
let args = CommandParser::<NetworkArgs>::parse_from(["reth", "--max-peers", "90"]).args;
assert_eq!(args.max_peers, Some(90));
assert_eq!(args.resolved_max_outbound_peers(), Some(30));
assert_eq!(args.resolved_max_inbound_peers(), Some(60));
}
#[test]
fn max_peers_small_values() {
let args1 = CommandParser::<NetworkArgs>::parse_from(["reth", "--max-peers", "1"]).args;
assert_eq!(args1.resolved_max_outbound_peers(), Some(1));
assert_eq!(args1.resolved_max_inbound_peers(), Some(0));
let args2 = CommandParser::<NetworkArgs>::parse_from(["reth", "--max-peers", "2"]).args;
assert_eq!(args2.resolved_max_outbound_peers(), Some(1));
assert_eq!(args2.resolved_max_inbound_peers(), Some(1));
let args3 = CommandParser::<NetworkArgs>::parse_from(["reth", "--max-peers", "3"]).args;
assert_eq!(args3.resolved_max_outbound_peers(), Some(1));
assert_eq!(args3.resolved_max_inbound_peers(), Some(2));
}
#[test]
fn resolved_peers_without_max_peers() {
let args = CommandParser::<NetworkArgs>::parse_from([
"reth",
"--max-outbound-peers",
"75",
"--max-inbound-peers",
"15",
])
.args;
assert_eq!(args.max_peers, None);
assert_eq!(args.resolved_max_outbound_peers(), Some(75));
assert_eq!(args.resolved_max_inbound_peers(), Some(15));
}
#[test]
fn resolved_peers_with_defaults() {
let args = CommandParser::<NetworkArgs>::parse_from(["reth"]).args;
assert_eq!(args.max_peers, None);
assert_eq!(args.resolved_max_outbound_peers(), None);
assert_eq!(args.resolved_max_inbound_peers(), None);
}
#[test]
fn network_args_default_sanity_test() {
let default_args = NetworkArgs::default();

View File

@@ -7,7 +7,7 @@ use crate::args::{
use alloy_primitives::Address;
use alloy_rpc_types_engine::JwtSecret;
use clap::{
builder::{PossibleValue, RangedU64ValueParser, TypedValueParser},
builder::{PossibleValue, RangedU64ValueParser, Resettable, TypedValueParser},
Arg, Args, Command,
};
use rand::Rng;
@@ -19,12 +19,16 @@ use std::{
ffi::OsStr,
net::{IpAddr, Ipv4Addr},
path::PathBuf,
sync::OnceLock,
time::Duration,
};
use url::Url;
use super::types::MaxOr;
/// Global static RPC server defaults
static RPC_SERVER_DEFAULTS: OnceLock<DefaultRpcServerArgs> = OnceLock::new();
/// Default max number of subscriptions per connection.
pub(crate) const RPC_DEFAULT_MAX_SUBS_PER_CONN: u32 = 1024;
@@ -37,76 +41,442 @@ pub(crate) const RPC_DEFAULT_MAX_REQUEST_SIZE_MB: u32 = 15;
pub(crate) const RPC_DEFAULT_MAX_RESPONSE_SIZE_MB: u32 = 160;
/// Default number of incoming connections.
///
/// This restricts how many active connections (http, ws) the server accepts.
/// Once exceeded, the server can reject new connections.
pub(crate) const RPC_DEFAULT_MAX_CONNECTIONS: u32 = 500;
/// Default values for RPC server that can be customized
///
/// Global defaults can be set via [`DefaultRpcServerArgs::try_init`].
#[derive(Debug, Clone)]
pub struct DefaultRpcServerArgs {
http: bool,
http_addr: IpAddr,
http_port: u16,
http_disable_compression: bool,
http_api: Option<RpcModuleSelection>,
http_corsdomain: Option<String>,
ws: bool,
ws_addr: IpAddr,
ws_port: u16,
ws_allowed_origins: Option<String>,
ws_api: Option<RpcModuleSelection>,
ipcdisable: bool,
ipcpath: String,
ipc_socket_permissions: Option<String>,
auth_addr: IpAddr,
auth_port: u16,
auth_jwtsecret: Option<PathBuf>,
auth_ipc: bool,
auth_ipc_path: String,
disable_auth_server: bool,
rpc_jwtsecret: Option<JwtSecret>,
rpc_max_request_size: MaxU32,
rpc_max_response_size: MaxU32,
rpc_max_subscriptions_per_connection: MaxU32,
rpc_max_connections: MaxU32,
rpc_max_tracing_requests: usize,
rpc_max_blocking_io_requests: usize,
rpc_max_trace_filter_blocks: u64,
rpc_max_blocks_per_filter: ZeroAsNoneU64,
rpc_max_logs_per_response: ZeroAsNoneU64,
rpc_gas_cap: u64,
rpc_evm_memory_limit: u64,
rpc_tx_fee_cap: u128,
rpc_max_simulate_blocks: u64,
rpc_eth_proof_window: u64,
rpc_proof_permits: usize,
rpc_pending_block: PendingBlockKind,
rpc_forwarder: Option<Url>,
builder_disallow: Option<HashSet<Address>>,
rpc_state_cache: RpcStateCacheArgs,
gas_price_oracle: GasPriceOracleArgs,
rpc_send_raw_transaction_sync_timeout: Duration,
}
impl DefaultRpcServerArgs {
/// Initialize the global RPC server defaults with this configuration
pub fn try_init(self) -> Result<(), Self> {
RPC_SERVER_DEFAULTS.set(self)
}
/// Get a reference to the global RPC server defaults
pub fn get_global() -> &'static Self {
RPC_SERVER_DEFAULTS.get_or_init(Self::default)
}
/// Set the default HTTP enabled state
pub const fn with_http(mut self, v: bool) -> Self {
self.http = v;
self
}
/// Set the default HTTP address
pub const fn with_http_addr(mut self, v: IpAddr) -> Self {
self.http_addr = v;
self
}
/// Set the default HTTP port
pub const fn with_http_port(mut self, v: u16) -> Self {
self.http_port = v;
self
}
/// Set whether to disable HTTP compression by default
pub const fn with_http_disable_compression(mut self, v: bool) -> Self {
self.http_disable_compression = v;
self
}
/// Set the default HTTP API modules
pub fn with_http_api(mut self, v: Option<RpcModuleSelection>) -> Self {
self.http_api = v;
self
}
/// Set the default HTTP CORS domain
pub fn with_http_corsdomain(mut self, v: Option<String>) -> Self {
self.http_corsdomain = v;
self
}
/// Set the default WS enabled state
pub const fn with_ws(mut self, v: bool) -> Self {
self.ws = v;
self
}
/// Set the default WS address
pub const fn with_ws_addr(mut self, v: IpAddr) -> Self {
self.ws_addr = v;
self
}
/// Set the default WS port
pub const fn with_ws_port(mut self, v: u16) -> Self {
self.ws_port = v;
self
}
/// Set the default WS allowed origins
pub fn with_ws_allowed_origins(mut self, v: Option<String>) -> Self {
self.ws_allowed_origins = v;
self
}
/// Set the default WS API modules
pub fn with_ws_api(mut self, v: Option<RpcModuleSelection>) -> Self {
self.ws_api = v;
self
}
/// Set whether to disable IPC by default
pub const fn with_ipcdisable(mut self, v: bool) -> Self {
self.ipcdisable = v;
self
}
/// Set the default IPC path
pub fn with_ipcpath(mut self, v: String) -> Self {
self.ipcpath = v;
self
}
/// Set the default IPC socket permissions
pub fn with_ipc_socket_permissions(mut self, v: Option<String>) -> Self {
self.ipc_socket_permissions = v;
self
}
/// Set the default auth server address
pub const fn with_auth_addr(mut self, v: IpAddr) -> Self {
self.auth_addr = v;
self
}
/// Set the default auth server port
pub const fn with_auth_port(mut self, v: u16) -> Self {
self.auth_port = v;
self
}
/// Set the default auth JWT secret path
pub fn with_auth_jwtsecret(mut self, v: Option<PathBuf>) -> Self {
self.auth_jwtsecret = v;
self
}
/// Set the default auth IPC enabled state
pub const fn with_auth_ipc(mut self, v: bool) -> Self {
self.auth_ipc = v;
self
}
/// Set the default auth IPC path
pub fn with_auth_ipc_path(mut self, v: String) -> Self {
self.auth_ipc_path = v;
self
}
/// Set whether to disable the auth server by default
pub const fn with_disable_auth_server(mut self, v: bool) -> Self {
self.disable_auth_server = v;
self
}
/// Set the default RPC JWT secret
pub const fn with_rpc_jwtsecret(mut self, v: Option<JwtSecret>) -> Self {
self.rpc_jwtsecret = v;
self
}
/// Set the default max request size
pub const fn with_rpc_max_request_size(mut self, v: MaxU32) -> Self {
self.rpc_max_request_size = v;
self
}
/// Set the default max response size
pub const fn with_rpc_max_response_size(mut self, v: MaxU32) -> Self {
self.rpc_max_response_size = v;
self
}
/// Set the default max subscriptions per connection
pub const fn with_rpc_max_subscriptions_per_connection(mut self, v: MaxU32) -> Self {
self.rpc_max_subscriptions_per_connection = v;
self
}
/// Set the default max connections
pub const fn with_rpc_max_connections(mut self, v: MaxU32) -> Self {
self.rpc_max_connections = v;
self
}
/// Set the default max tracing requests
pub const fn with_rpc_max_tracing_requests(mut self, v: usize) -> Self {
self.rpc_max_tracing_requests = v;
self
}
/// Set the default max blocking IO requests
pub const fn with_rpc_max_blocking_io_requests(mut self, v: usize) -> Self {
self.rpc_max_blocking_io_requests = v;
self
}
/// Set the default max trace filter blocks
pub const fn with_rpc_max_trace_filter_blocks(mut self, v: u64) -> Self {
self.rpc_max_trace_filter_blocks = v;
self
}
/// Set the default max blocks per filter
pub const fn with_rpc_max_blocks_per_filter(mut self, v: ZeroAsNoneU64) -> Self {
self.rpc_max_blocks_per_filter = v;
self
}
/// Set the default max logs per response
pub const fn with_rpc_max_logs_per_response(mut self, v: ZeroAsNoneU64) -> Self {
self.rpc_max_logs_per_response = v;
self
}
/// Set the default gas cap
pub const fn with_rpc_gas_cap(mut self, v: u64) -> Self {
self.rpc_gas_cap = v;
self
}
/// Set the default EVM memory limit
pub const fn with_rpc_evm_memory_limit(mut self, v: u64) -> Self {
self.rpc_evm_memory_limit = v;
self
}
/// Set the default tx fee cap
pub const fn with_rpc_tx_fee_cap(mut self, v: u128) -> Self {
self.rpc_tx_fee_cap = v;
self
}
/// Set the default max simulate blocks
pub const fn with_rpc_max_simulate_blocks(mut self, v: u64) -> Self {
self.rpc_max_simulate_blocks = v;
self
}
/// Set the default eth proof window
pub const fn with_rpc_eth_proof_window(mut self, v: u64) -> Self {
self.rpc_eth_proof_window = v;
self
}
/// Set the default proof permits
pub const fn with_rpc_proof_permits(mut self, v: usize) -> Self {
self.rpc_proof_permits = v;
self
}
/// Set the default pending block kind
pub const fn with_rpc_pending_block(mut self, v: PendingBlockKind) -> Self {
self.rpc_pending_block = v;
self
}
/// Set the default RPC forwarder
pub fn with_rpc_forwarder(mut self, v: Option<Url>) -> Self {
self.rpc_forwarder = v;
self
}
/// Set the default builder disallow addresses
pub fn with_builder_disallow(mut self, v: Option<HashSet<Address>>) -> Self {
self.builder_disallow = v;
self
}
/// Set the default RPC state cache args
pub const fn with_rpc_state_cache(mut self, v: RpcStateCacheArgs) -> Self {
self.rpc_state_cache = v;
self
}
/// Set the default gas price oracle args
pub const fn with_gas_price_oracle(mut self, v: GasPriceOracleArgs) -> Self {
self.gas_price_oracle = v;
self
}
/// Set the default send raw transaction sync timeout
pub const fn with_rpc_send_raw_transaction_sync_timeout(mut self, v: Duration) -> Self {
self.rpc_send_raw_transaction_sync_timeout = v;
self
}
}
impl Default for DefaultRpcServerArgs {
fn default() -> Self {
Self {
http: false,
http_addr: Ipv4Addr::LOCALHOST.into(),
http_port: constants::DEFAULT_HTTP_RPC_PORT,
http_disable_compression: false,
http_api: None,
http_corsdomain: None,
ws: false,
ws_addr: Ipv4Addr::LOCALHOST.into(),
ws_port: constants::DEFAULT_WS_RPC_PORT,
ws_allowed_origins: None,
ws_api: None,
ipcdisable: false,
ipcpath: constants::DEFAULT_IPC_ENDPOINT.to_string(),
ipc_socket_permissions: None,
auth_addr: Ipv4Addr::LOCALHOST.into(),
auth_port: constants::DEFAULT_AUTH_PORT,
auth_jwtsecret: None,
auth_ipc: false,
auth_ipc_path: constants::DEFAULT_ENGINE_API_IPC_ENDPOINT.to_string(),
disable_auth_server: false,
rpc_jwtsecret: None,
rpc_max_request_size: RPC_DEFAULT_MAX_REQUEST_SIZE_MB.into(),
rpc_max_response_size: RPC_DEFAULT_MAX_RESPONSE_SIZE_MB.into(),
rpc_max_subscriptions_per_connection: RPC_DEFAULT_MAX_SUBS_PER_CONN.into(),
rpc_max_connections: RPC_DEFAULT_MAX_CONNECTIONS.into(),
rpc_max_tracing_requests: constants::default_max_tracing_requests(),
rpc_max_blocking_io_requests: constants::DEFAULT_MAX_BLOCKING_IO_REQUEST,
rpc_max_trace_filter_blocks: constants::DEFAULT_MAX_TRACE_FILTER_BLOCKS,
rpc_max_blocks_per_filter: constants::DEFAULT_MAX_BLOCKS_PER_FILTER.into(),
rpc_max_logs_per_response: (constants::DEFAULT_MAX_LOGS_PER_RESPONSE as u64).into(),
rpc_gas_cap: constants::gas_oracle::RPC_DEFAULT_GAS_CAP,
rpc_evm_memory_limit: (1 << 32) - 1,
rpc_tx_fee_cap: constants::DEFAULT_TX_FEE_CAP_WEI,
rpc_max_simulate_blocks: constants::DEFAULT_MAX_SIMULATE_BLOCKS,
rpc_eth_proof_window: constants::DEFAULT_ETH_PROOF_WINDOW,
rpc_proof_permits: constants::DEFAULT_PROOF_PERMITS,
rpc_pending_block: PendingBlockKind::Full,
rpc_forwarder: None,
builder_disallow: None,
rpc_state_cache: RpcStateCacheArgs::default(),
gas_price_oracle: GasPriceOracleArgs::default(),
rpc_send_raw_transaction_sync_timeout:
constants::RPC_DEFAULT_SEND_RAW_TX_SYNC_TIMEOUT_SECS,
}
}
}
/// Parameters for configuring the rpc more granularity via CLI
#[derive(Debug, Clone, Args, PartialEq, Eq)]
#[command(next_help_heading = "RPC")]
pub struct RpcServerArgs {
/// Enable the HTTP-RPC server
#[arg(long, default_value_if("dev", "true", "true"))]
#[arg(long, default_value_if("dev", "true", "true"), default_value_t = DefaultRpcServerArgs::get_global().http)]
pub http: bool,
/// Http server address to listen on
#[arg(long = "http.addr", default_value_t = IpAddr::V4(Ipv4Addr::LOCALHOST))]
#[arg(long = "http.addr", default_value_t = DefaultRpcServerArgs::get_global().http_addr)]
pub http_addr: IpAddr,
/// Http server port to listen on
#[arg(long = "http.port", default_value_t = constants::DEFAULT_HTTP_RPC_PORT)]
#[arg(long = "http.port", default_value_t = DefaultRpcServerArgs::get_global().http_port)]
pub http_port: u16,
/// Disable compression for HTTP responses
#[arg(long = "http.disable-compression", default_value_t = false)]
#[arg(long = "http.disable-compression", default_value_t = DefaultRpcServerArgs::get_global().http_disable_compression)]
pub http_disable_compression: bool,
/// Rpc Modules to be configured for the HTTP server
#[arg(long = "http.api", value_parser = RpcModuleSelectionValueParser::default())]
#[arg(long = "http.api", value_parser = RpcModuleSelectionValueParser::default(), default_value = Resettable::from(DefaultRpcServerArgs::get_global().http_api.as_ref().map(|v| v.to_string().into())))]
pub http_api: Option<RpcModuleSelection>,
/// Http Corsdomain to allow request from
#[arg(long = "http.corsdomain")]
#[arg(long = "http.corsdomain", default_value = Resettable::from(DefaultRpcServerArgs::get_global().http_corsdomain.as_ref().map(|v| v.to_string().into())))]
pub http_corsdomain: Option<String>,
/// Enable the WS-RPC server
#[arg(long)]
#[arg(long, default_value_t = DefaultRpcServerArgs::get_global().ws)]
pub ws: bool,
/// Ws server address to listen on
#[arg(long = "ws.addr", default_value_t = IpAddr::V4(Ipv4Addr::LOCALHOST))]
#[arg(long = "ws.addr", default_value_t = DefaultRpcServerArgs::get_global().ws_addr)]
pub ws_addr: IpAddr,
/// Ws server port to listen on
#[arg(long = "ws.port", default_value_t = constants::DEFAULT_WS_RPC_PORT)]
#[arg(long = "ws.port", default_value_t = DefaultRpcServerArgs::get_global().ws_port)]
pub ws_port: u16,
/// Origins from which to accept `WebSocket` requests
#[arg(id = "ws.origins", long = "ws.origins", alias = "ws.corsdomain")]
#[arg(id = "ws.origins", long = "ws.origins", alias = "ws.corsdomain", default_value = Resettable::from(DefaultRpcServerArgs::get_global().ws_allowed_origins.as_ref().map(|v| v.to_string().into())))]
pub ws_allowed_origins: Option<String>,
/// Rpc Modules to be configured for the WS server
#[arg(long = "ws.api", value_parser = RpcModuleSelectionValueParser::default())]
#[arg(long = "ws.api", value_parser = RpcModuleSelectionValueParser::default(), default_value = Resettable::from(DefaultRpcServerArgs::get_global().ws_api.as_ref().map(|v| v.to_string().into())))]
pub ws_api: Option<RpcModuleSelection>,
/// Disable the IPC-RPC server
#[arg(long)]
#[arg(long, default_value_t = DefaultRpcServerArgs::get_global().ipcdisable)]
pub ipcdisable: bool,
/// Filename for IPC socket/pipe within the datadir
#[arg(long, default_value_t = constants::DEFAULT_IPC_ENDPOINT.to_string())]
#[arg(long, default_value_t = DefaultRpcServerArgs::get_global().ipcpath.clone())]
pub ipcpath: String,
/// Set the permissions for the IPC socket file, in octal format.
///
/// If not specified, the permissions will be set by the system's umask.
#[arg(long = "ipc.permissions")]
#[arg(long = "ipc.permissions", default_value = Resettable::from(DefaultRpcServerArgs::get_global().ipc_socket_permissions.as_ref().map(|v| v.to_string().into())))]
pub ipc_socket_permissions: Option<String>,
/// Auth server address to listen on
#[arg(long = "authrpc.addr", default_value_t = IpAddr::V4(Ipv4Addr::LOCALHOST))]
#[arg(long = "authrpc.addr", default_value_t = DefaultRpcServerArgs::get_global().auth_addr)]
pub auth_addr: IpAddr,
/// Auth server port to listen on
#[arg(long = "authrpc.port", default_value_t = constants::DEFAULT_AUTH_PORT)]
#[arg(long = "authrpc.port", default_value_t = DefaultRpcServerArgs::get_global().auth_port)]
pub auth_port: u16,
/// Path to a JWT secret to use for the authenticated engine-API RPC server.
@@ -115,22 +485,22 @@ pub struct RpcServerArgs {
///
/// If no path is provided, a secret will be generated and stored in the datadir under
/// `<DIR>/<CHAIN_ID>/jwt.hex`. For mainnet this would be `~/.reth/mainnet/jwt.hex` by default.
#[arg(long = "authrpc.jwtsecret", value_name = "PATH", global = true, required = false)]
#[arg(long = "authrpc.jwtsecret", value_name = "PATH", global = true, required = false, default_value = Resettable::from(DefaultRpcServerArgs::get_global().auth_jwtsecret.as_ref().map(|v| v.to_string_lossy().into())))]
pub auth_jwtsecret: Option<PathBuf>,
/// Enable auth engine API over IPC
#[arg(long)]
#[arg(long, default_value_t = DefaultRpcServerArgs::get_global().auth_ipc)]
pub auth_ipc: bool,
/// Filename for auth IPC socket/pipe within the datadir
#[arg(long = "auth-ipc.path", default_value_t = constants::DEFAULT_ENGINE_API_IPC_ENDPOINT.to_string())]
#[arg(long = "auth-ipc.path", default_value_t = DefaultRpcServerArgs::get_global().auth_ipc_path.clone())]
pub auth_ipc_path: String,
/// Disable the auth/engine API server.
///
/// This will prevent the authenticated engine-API server from starting. Use this if you're
/// running a node that doesn't need to serve engine API requests.
#[arg(long = "disable-auth-server", alias = "disable-engine-api")]
#[arg(long = "disable-auth-server", alias = "disable-engine-api", default_value_t = DefaultRpcServerArgs::get_global().disable_auth_server)]
pub disable_auth_server: bool,
/// Hex encoded JWT secret to authenticate the regular RPC server(s), see `--http.api` and
@@ -138,23 +508,23 @@ pub struct RpcServerArgs {
///
/// This is __not__ used for the authenticated engine-API RPC server, see
/// `--authrpc.jwtsecret`.
#[arg(long = "rpc.jwtsecret", value_name = "HEX", global = true, required = false)]
#[arg(long = "rpc.jwtsecret", value_name = "HEX", global = true, required = false, default_value = Resettable::from(DefaultRpcServerArgs::get_global().rpc_jwtsecret.as_ref().map(|v| format!("{:?}", v).into())))]
pub rpc_jwtsecret: Option<JwtSecret>,
/// Set the maximum RPC request payload size for both HTTP and WS in megabytes.
#[arg(long = "rpc.max-request-size", alias = "rpc-max-request-size", default_value_t = RPC_DEFAULT_MAX_REQUEST_SIZE_MB.into())]
#[arg(long = "rpc.max-request-size", alias = "rpc-max-request-size", default_value_t = DefaultRpcServerArgs::get_global().rpc_max_request_size)]
pub rpc_max_request_size: MaxU32,
/// Set the maximum RPC response payload size for both HTTP and WS in megabytes.
#[arg(long = "rpc.max-response-size", alias = "rpc-max-response-size", visible_alias = "rpc.returndata.limit", default_value_t = RPC_DEFAULT_MAX_RESPONSE_SIZE_MB.into())]
#[arg(long = "rpc.max-response-size", alias = "rpc-max-response-size", visible_alias = "rpc.returndata.limit", default_value_t = DefaultRpcServerArgs::get_global().rpc_max_response_size)]
pub rpc_max_response_size: MaxU32,
/// Set the maximum concurrent subscriptions per connection.
#[arg(long = "rpc.max-subscriptions-per-connection", alias = "rpc-max-subscriptions-per-connection", default_value_t = RPC_DEFAULT_MAX_SUBS_PER_CONN.into())]
#[arg(long = "rpc.max-subscriptions-per-connection", alias = "rpc-max-subscriptions-per-connection", default_value_t = DefaultRpcServerArgs::get_global().rpc_max_subscriptions_per_connection)]
pub rpc_max_subscriptions_per_connection: MaxU32,
/// Maximum number of RPC server connections.
#[arg(long = "rpc.max-connections", alias = "rpc-max-connections", value_name = "COUNT", default_value_t = RPC_DEFAULT_MAX_CONNECTIONS.into())]
#[arg(long = "rpc.max-connections", alias = "rpc-max-connections", value_name = "COUNT", default_value_t = DefaultRpcServerArgs::get_global().rpc_max_connections)]
pub rpc_max_connections: MaxU32,
/// Maximum number of concurrent tracing requests.
@@ -163,19 +533,27 @@ pub struct RpcServerArgs {
/// Tracing requests are generally CPU bound.
/// Choosing a value that is higher than the available CPU cores can have a negative impact on
/// the performance of the node and affect the node's ability to maintain sync.
#[arg(long = "rpc.max-tracing-requests", alias = "rpc-max-tracing-requests", value_name = "COUNT", default_value_t = constants::default_max_tracing_requests())]
#[arg(long = "rpc.max-tracing-requests", alias = "rpc-max-tracing-requests", value_name = "COUNT", default_value_t = DefaultRpcServerArgs::get_global().rpc_max_tracing_requests)]
pub rpc_max_tracing_requests: usize,
/// Maximum number of concurrent blocking IO requests.
///
/// Blocking IO requests include `eth_call`, `eth_estimateGas`, and similar methods that
/// require EVM execution. These are spawned as blocking tasks to avoid blocking the async
/// runtime.
#[arg(long = "rpc.max-blocking-io-requests", alias = "rpc-max-blocking-io-requests", value_name = "COUNT", default_value_t = DefaultRpcServerArgs::get_global().rpc_max_blocking_io_requests)]
pub rpc_max_blocking_io_requests: usize,
/// Maximum number of blocks for `trace_filter` requests.
#[arg(long = "rpc.max-trace-filter-blocks", alias = "rpc-max-trace-filter-blocks", value_name = "COUNT", default_value_t = constants::DEFAULT_MAX_TRACE_FILTER_BLOCKS)]
#[arg(long = "rpc.max-trace-filter-blocks", alias = "rpc-max-trace-filter-blocks", value_name = "COUNT", default_value_t = DefaultRpcServerArgs::get_global().rpc_max_trace_filter_blocks)]
pub rpc_max_trace_filter_blocks: u64,
/// Maximum number of blocks that could be scanned per filter request. (0 = entire chain)
#[arg(long = "rpc.max-blocks-per-filter", alias = "rpc-max-blocks-per-filter", value_name = "COUNT", default_value_t = ZeroAsNoneU64::new(constants::DEFAULT_MAX_BLOCKS_PER_FILTER))]
#[arg(long = "rpc.max-blocks-per-filter", alias = "rpc-max-blocks-per-filter", value_name = "COUNT", default_value_t = DefaultRpcServerArgs::get_global().rpc_max_blocks_per_filter)]
pub rpc_max_blocks_per_filter: ZeroAsNoneU64,
/// Maximum number of logs that can be returned in a single response. (0 = no limit)
#[arg(long = "rpc.max-logs-per-response", alias = "rpc-max-logs-per-response", value_name = "COUNT", default_value_t = ZeroAsNoneU64::new(constants::DEFAULT_MAX_LOGS_PER_RESPONSE as u64))]
#[arg(long = "rpc.max-logs-per-response", alias = "rpc-max-logs-per-response", value_name = "COUNT", default_value_t = DefaultRpcServerArgs::get_global().rpc_max_logs_per_response)]
pub rpc_max_logs_per_response: ZeroAsNoneU64,
/// Maximum gas limit for `eth_call` and call tracing RPC methods.
@@ -184,7 +562,7 @@ pub struct RpcServerArgs {
alias = "rpc-gascap",
value_name = "GAS_CAP",
value_parser = MaxOr::new(RangedU64ValueParser::<u64>::new().range(1..)),
default_value_t = constants::gas_oracle::RPC_DEFAULT_GAS_CAP
default_value_t = DefaultRpcServerArgs::get_global().rpc_gas_cap
)]
pub rpc_gas_cap: u64,
@@ -194,7 +572,7 @@ pub struct RpcServerArgs {
alias = "rpc-evm-memory-limit",
value_name = "MEMORY_LIMIT",
value_parser = MaxOr::new(RangedU64ValueParser::<u64>::new().range(1..)),
default_value_t = (1 << 32) - 1
default_value_t = DefaultRpcServerArgs::get_global().rpc_evm_memory_limit
)]
pub rpc_evm_memory_limit: u64,
@@ -212,7 +590,7 @@ pub struct RpcServerArgs {
#[arg(
long = "rpc.max-simulate-blocks",
value_name = "BLOCKS_COUNT",
default_value_t = constants::DEFAULT_MAX_SIMULATE_BLOCKS
default_value_t = DefaultRpcServerArgs::get_global().rpc_max_simulate_blocks
)]
pub rpc_max_simulate_blocks: u64,
@@ -221,7 +599,7 @@ pub struct RpcServerArgs {
/// configured number of blocks from current tip (up to `tip - window`).
#[arg(
long = "rpc.eth-proof-window",
default_value_t = constants::DEFAULT_ETH_PROOF_WINDOW,
default_value_t = DefaultRpcServerArgs::get_global().rpc_eth_proof_window,
value_parser = RangedU64ValueParser::<u64>::new().range(..=constants::MAX_ETH_PROOF_WINDOW)
)]
pub rpc_eth_proof_window: u64,
@@ -243,7 +621,7 @@ pub struct RpcServerArgs {
/// Path to file containing disallowed addresses, json-encoded list of strings. Block
/// validation API will reject blocks containing transactions from these addresses.
#[arg(long = "builder.disallow", value_name = "PATH", value_parser = reth_cli_util::parsers::read_json_from_file::<HashSet<Address>>)]
#[arg(long = "builder.disallow", value_name = "PATH", value_parser = reth_cli_util::parsers::read_json_from_file::<HashSet<Address>>, default_value = Resettable::from(DefaultRpcServerArgs::get_global().builder_disallow.as_ref().map(|v| format!("{:?}", v).into())))]
pub builder_disallow: Option<HashSet<Address>>,
/// State cache configuration.
@@ -387,49 +765,93 @@ impl RpcServerArgs {
impl Default for RpcServerArgs {
fn default() -> Self {
let DefaultRpcServerArgs {
http,
http_addr,
http_port,
http_disable_compression,
http_api,
http_corsdomain,
ws,
ws_addr,
ws_port,
ws_allowed_origins,
ws_api,
ipcdisable,
ipcpath,
ipc_socket_permissions,
auth_addr,
auth_port,
auth_jwtsecret,
auth_ipc,
auth_ipc_path,
disable_auth_server,
rpc_jwtsecret,
rpc_max_request_size,
rpc_max_response_size,
rpc_max_subscriptions_per_connection,
rpc_max_connections,
rpc_max_tracing_requests,
rpc_max_blocking_io_requests,
rpc_max_trace_filter_blocks,
rpc_max_blocks_per_filter,
rpc_max_logs_per_response,
rpc_gas_cap,
rpc_evm_memory_limit,
rpc_tx_fee_cap,
rpc_max_simulate_blocks,
rpc_eth_proof_window,
rpc_proof_permits,
rpc_pending_block,
rpc_forwarder,
builder_disallow,
rpc_state_cache,
gas_price_oracle,
rpc_send_raw_transaction_sync_timeout,
} = DefaultRpcServerArgs::get_global().clone();
Self {
http: false,
http_addr: Ipv4Addr::LOCALHOST.into(),
http_port: constants::DEFAULT_HTTP_RPC_PORT,
http_disable_compression: false,
http_api: None,
http_corsdomain: None,
ws: false,
ws_addr: Ipv4Addr::LOCALHOST.into(),
ws_port: constants::DEFAULT_WS_RPC_PORT,
ws_allowed_origins: None,
ws_api: None,
ipcdisable: false,
ipcpath: constants::DEFAULT_IPC_ENDPOINT.to_string(),
ipc_socket_permissions: None,
auth_addr: Ipv4Addr::LOCALHOST.into(),
auth_port: constants::DEFAULT_AUTH_PORT,
auth_jwtsecret: None,
auth_ipc: false,
auth_ipc_path: constants::DEFAULT_ENGINE_API_IPC_ENDPOINT.to_string(),
disable_auth_server: false,
rpc_jwtsecret: None,
rpc_max_request_size: RPC_DEFAULT_MAX_REQUEST_SIZE_MB.into(),
rpc_max_response_size: RPC_DEFAULT_MAX_RESPONSE_SIZE_MB.into(),
rpc_max_subscriptions_per_connection: RPC_DEFAULT_MAX_SUBS_PER_CONN.into(),
rpc_max_connections: RPC_DEFAULT_MAX_CONNECTIONS.into(),
rpc_max_tracing_requests: constants::default_max_tracing_requests(),
rpc_max_trace_filter_blocks: constants::DEFAULT_MAX_TRACE_FILTER_BLOCKS,
rpc_max_blocks_per_filter: constants::DEFAULT_MAX_BLOCKS_PER_FILTER.into(),
rpc_max_logs_per_response: (constants::DEFAULT_MAX_LOGS_PER_RESPONSE as u64).into(),
rpc_gas_cap: constants::gas_oracle::RPC_DEFAULT_GAS_CAP,
rpc_evm_memory_limit: (1 << 32) - 1,
rpc_tx_fee_cap: constants::DEFAULT_TX_FEE_CAP_WEI,
rpc_max_simulate_blocks: constants::DEFAULT_MAX_SIMULATE_BLOCKS,
rpc_eth_proof_window: constants::DEFAULT_ETH_PROOF_WINDOW,
rpc_pending_block: PendingBlockKind::Full,
gas_price_oracle: GasPriceOracleArgs::default(),
rpc_state_cache: RpcStateCacheArgs::default(),
rpc_proof_permits: constants::DEFAULT_PROOF_PERMITS,
rpc_forwarder: None,
builder_disallow: Default::default(),
rpc_send_raw_transaction_sync_timeout:
constants::RPC_DEFAULT_SEND_RAW_TX_SYNC_TIMEOUT_SECS,
http,
http_addr,
http_port,
http_disable_compression,
http_api,
http_corsdomain,
ws,
ws_addr,
ws_port,
ws_allowed_origins,
ws_api,
ipcdisable,
ipcpath,
ipc_socket_permissions,
auth_addr,
auth_port,
auth_jwtsecret,
auth_ipc,
auth_ipc_path,
disable_auth_server,
rpc_jwtsecret,
rpc_max_request_size,
rpc_max_response_size,
rpc_max_subscriptions_per_connection,
rpc_max_connections,
rpc_max_tracing_requests,
rpc_max_blocking_io_requests,
rpc_max_trace_filter_blocks,
rpc_max_blocks_per_filter,
rpc_max_logs_per_response,
rpc_gas_cap,
rpc_evm_memory_limit,
rpc_tx_fee_cap,
rpc_max_simulate_blocks,
rpc_eth_proof_window,
rpc_proof_permits,
rpc_pending_block,
rpc_forwarder,
builder_disallow,
rpc_state_cache,
gas_price_oracle,
rpc_send_raw_transaction_sync_timeout,
}
}
}
@@ -542,4 +964,159 @@ mod tests {
let expected = 1_000_000_000_000_000_000u128;
assert_eq!(args.rpc_tx_fee_cap, expected); // 1 ETH default cap
}
#[test]
fn test_rpc_server_args() {
let args = RpcServerArgs {
http: true,
http_addr: "127.0.0.1".parse().unwrap(),
http_port: 8545,
http_disable_compression: false,
http_api: Some(RpcModuleSelection::try_from_selection(["eth", "admin"]).unwrap()),
http_corsdomain: Some("*".to_string()),
ws: true,
ws_addr: "127.0.0.1".parse().unwrap(),
ws_port: 8546,
ws_allowed_origins: Some("*".to_string()),
ws_api: Some(RpcModuleSelection::try_from_selection(["eth", "admin"]).unwrap()),
ipcdisable: false,
ipcpath: "reth.ipc".to_string(),
ipc_socket_permissions: Some("0o666".to_string()),
auth_addr: "127.0.0.1".parse().unwrap(),
auth_port: 8551,
auth_jwtsecret: Some(std::path::PathBuf::from("/tmp/jwt.hex")),
auth_ipc: false,
auth_ipc_path: "engine.ipc".to_string(),
disable_auth_server: false,
rpc_jwtsecret: Some(
JwtSecret::from_hex(
"0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef",
)
.unwrap(),
),
rpc_max_request_size: 15u32.into(),
rpc_max_response_size: 160u32.into(),
rpc_max_subscriptions_per_connection: 1024u32.into(),
rpc_max_connections: 500u32.into(),
rpc_max_tracing_requests: 16,
rpc_max_blocking_io_requests: 256,
rpc_max_trace_filter_blocks: 4000,
rpc_max_blocks_per_filter: 1000u64.into(),
rpc_max_logs_per_response: 10000u64.into(),
rpc_gas_cap: 50_000_000,
rpc_evm_memory_limit: 256,
rpc_tx_fee_cap: 2_000_000_000_000_000_000u128,
rpc_max_simulate_blocks: 256,
rpc_eth_proof_window: 100_000,
rpc_proof_permits: 16,
rpc_pending_block: PendingBlockKind::Full,
rpc_forwarder: Some("http://localhost:8545".parse().unwrap()),
builder_disallow: None,
rpc_state_cache: RpcStateCacheArgs {
max_blocks: 5000,
max_receipts: 2000,
max_headers: 1000,
max_concurrent_db_requests: 512,
},
gas_price_oracle: GasPriceOracleArgs {
blocks: 20,
ignore_price: 2,
max_price: 500_000_000_000,
percentile: 60,
default_suggested_fee: None,
},
rpc_send_raw_transaction_sync_timeout: std::time::Duration::from_secs(30),
};
let parsed_args = CommandParser::<RpcServerArgs>::parse_from([
"reth",
"--http",
"--http.addr",
"127.0.0.1",
"--http.port",
"8545",
"--http.api",
"eth,admin",
"--http.corsdomain",
"*",
"--ws",
"--ws.addr",
"127.0.0.1",
"--ws.port",
"8546",
"--ws.origins",
"*",
"--ws.api",
"eth,admin",
"--ipcpath",
"reth.ipc",
"--ipc.permissions",
"0o666",
"--authrpc.addr",
"127.0.0.1",
"--authrpc.port",
"8551",
"--authrpc.jwtsecret",
"/tmp/jwt.hex",
"--auth-ipc.path",
"engine.ipc",
"--rpc.jwtsecret",
"0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef",
"--rpc.max-request-size",
"15",
"--rpc.max-response-size",
"160",
"--rpc.max-subscriptions-per-connection",
"1024",
"--rpc.max-connections",
"500",
"--rpc.max-tracing-requests",
"16",
"--rpc.max-blocking-io-requests",
"256",
"--rpc.max-trace-filter-blocks",
"4000",
"--rpc.max-blocks-per-filter",
"1000",
"--rpc.max-logs-per-response",
"10000",
"--rpc.gascap",
"50000000",
"--rpc.evm-memory-limit",
"256",
"--rpc.txfeecap",
"2.0",
"--rpc.max-simulate-blocks",
"256",
"--rpc.eth-proof-window",
"100000",
"--rpc.proof-permits",
"16",
"--rpc.pending-block",
"full",
"--rpc.forwarder",
"http://localhost:8545",
"--rpc-cache.max-blocks",
"5000",
"--rpc-cache.max-receipts",
"2000",
"--rpc-cache.max-headers",
"1000",
"--rpc-cache.max-concurrent-db-requests",
"512",
"--gpo.blocks",
"20",
"--gpo.ignoreprice",
"2",
"--gpo.maxprice",
"500000000000",
"--gpo.percentile",
"60",
"--rpc.send-raw-transaction-sync-timeout",
"30s",
])
.args;
assert_eq!(parsed_args, args);
}
}

View File

@@ -70,7 +70,7 @@ use reth_chainspec::{
};
use reth_ethereum_forks::{ChainHardforks, EthereumHardfork, ForkCondition};
use reth_network_peers::NodeRecord;
use reth_optimism_primitives::ADDRESS_L2_TO_L1_MESSAGE_PASSER;
use reth_optimism_primitives::L2_TO_L1_MESSAGE_PASSER_ADDRESS;
use reth_primitives_traits::{sync::LazyLock, SealedHeader};
/// Chain spec builder for a OP stack chain.
@@ -499,7 +499,7 @@ pub fn make_op_genesis_header(genesis: &Genesis, hardforks: &ChainHardforks) ->
// If Isthmus is active, overwrite the withdrawals root with the storage root of predeploy
// `L2ToL1MessagePasser.sol`
if hardforks.fork(OpHardfork::Isthmus).active_at_timestamp(header.timestamp) &&
let Some(predeploy) = genesis.alloc.get(&ADDRESS_L2_TO_L1_MESSAGE_PASSER) &&
let Some(predeploy) = genesis.alloc.get(&L2_TO_L1_MESSAGE_PASSER_ADDRESS) &&
let Some(storage) = &predeploy.storage
{
header.withdrawals_root =

View File

@@ -98,7 +98,7 @@ mod tests {
OP_SEPOLIA_CANYON_TIMESTAMP, OP_SEPOLIA_ECOTONE_TIMESTAMP, OP_SEPOLIA_ISTHMUS_TIMESTAMP,
OP_SEPOLIA_JOVIAN_TIMESTAMP,
};
use reth_optimism_primitives::ADDRESS_L2_TO_L1_MESSAGE_PASSER;
use reth_optimism_primitives::L2_TO_L1_MESSAGE_PASSER_ADDRESS;
use tar_no_std::TarArchiveRef;
#[test]
@@ -106,7 +106,7 @@ mod tests {
let genesis = read_superchain_genesis("unichain", "mainnet").unwrap();
assert_eq!(genesis.config.chain_id, 130);
assert_eq!(genesis.timestamp, 1730748359);
assert!(genesis.alloc.contains_key(&ADDRESS_L2_TO_L1_MESSAGE_PASSER));
assert!(genesis.alloc.contains_key(&L2_TO_L1_MESSAGE_PASSER_ADDRESS));
}
#[test]
@@ -114,7 +114,7 @@ mod tests {
let genesis = read_superchain_genesis("funki", "mainnet").unwrap();
assert_eq!(genesis.config.chain_id, 33979);
assert_eq!(genesis.timestamp, 1721211095);
assert!(genesis.alloc.contains_key(&ADDRESS_L2_TO_L1_MESSAGE_PASSER));
assert!(genesis.alloc.contains_key(&L2_TO_L1_MESSAGE_PASSER_ADDRESS));
}
#[test]

View File

@@ -241,7 +241,7 @@ mod tests {
use alloy_consensus::{BlockBody, Eip658Value, Header, Receipt, TxEip7702, TxReceipt};
use alloy_eips::{eip4895::Withdrawals, eip7685::Requests};
use alloy_primitives::{Address, Bytes, Signature, U256};
use alloy_primitives::{Address, Bytes, Log, Signature, U256};
use op_alloy_consensus::{
encode_holocene_extra_data, encode_jovian_extra_data, OpTypedTransaction,
};
@@ -367,7 +367,7 @@ mod tests {
let beacon_consensus = OpBeaconConsensus::new(Arc::new(chain_spec));
let receipt = OpReceipt::Eip7702(Receipt {
let receipt = OpReceipt::Eip7702(Receipt::<Log> {
status: Eip658Value::success(),
cumulative_gas_used: GAS_USED,
logs: vec![],
@@ -436,7 +436,7 @@ mod tests {
let beacon_consensus = OpBeaconConsensus::new(Arc::new(chain_spec));
let receipt = OpReceipt::Eip7702(Receipt {
let receipt = OpReceipt::Eip7702(Receipt::<Log> {
status: Eip658Value::success(),
cumulative_gas_used: GAS_USED,
logs: vec![],
@@ -451,7 +451,9 @@ mod tests {
)),
gas_used: GAS_USED,
timestamp: u64::MAX,
receipts_root: proofs::calculate_receipt_root(std::slice::from_ref(&receipt)),
receipts_root: proofs::calculate_receipt_root(std::slice::from_ref(
&receipt.with_bloom_ref(),
)),
logs_bloom: receipt.bloom(),
..Default::default()
};
@@ -509,7 +511,7 @@ mod tests {
let beacon_consensus = OpBeaconConsensus::new(Arc::new(chain_spec));
let receipt = OpReceipt::Eip7702(Receipt {
let receipt = OpReceipt::Eip7702(Receipt::<Log> {
status: Eip658Value::success(),
cumulative_gas_used: 0,
logs: vec![],
@@ -526,7 +528,9 @@ mod tests {
)),
gas_used: 0,
timestamp: u64::MAX - 1,
receipts_root: proofs::calculate_receipt_root(std::slice::from_ref(&receipt)),
receipts_root: proofs::calculate_receipt_root(std::slice::from_ref(
&receipt.with_bloom_ref(),
)),
logs_bloom: receipt.bloom(),
extra_data: encode_jovian_extra_data(
Default::default(),
@@ -549,7 +553,9 @@ mod tests {
)),
gas_used: 0,
timestamp: u64::MAX,
receipts_root: proofs::calculate_receipt_root(std::slice::from_ref(&receipt)),
receipts_root: proofs::calculate_receipt_root(std::slice::from_ref(
&receipt.with_bloom_ref(),
)),
logs_bloom: receipt.bloom(),
parent_hash: parent.hash(),
..Default::default()
@@ -576,7 +582,7 @@ mod tests {
let beacon_consensus = OpBeaconConsensus::new(Arc::new(chain_spec));
let receipt = OpReceipt::Eip7702(Receipt {
let receipt = OpReceipt::Eip7702(Receipt::<Log> {
status: Eip658Value::success(),
cumulative_gas_used: 0,
logs: vec![],
@@ -593,7 +599,9 @@ mod tests {
)),
gas_used: 0,
timestamp: u64::MAX - 1,
receipts_root: proofs::calculate_receipt_root(std::slice::from_ref(&receipt)),
receipts_root: proofs::calculate_receipt_root(std::slice::from_ref(
&receipt.with_bloom_ref(),
)),
logs_bloom: receipt.bloom(),
extra_data: encode_jovian_extra_data(
Default::default(),
@@ -616,7 +624,9 @@ mod tests {
)),
gas_used: 0,
timestamp: u64::MAX,
receipts_root: proofs::calculate_receipt_root(std::slice::from_ref(&receipt)),
receipts_root: proofs::calculate_receipt_root(std::slice::from_ref(
&receipt.with_bloom_ref(),
)),
logs_bloom: receipt.bloom(),
parent_hash: parent.hash(),
..Default::default()
@@ -652,7 +662,7 @@ mod tests {
let beacon_consensus = OpBeaconConsensus::new(Arc::new(chain_spec));
let receipt = OpReceipt::Eip7702(Receipt {
let receipt = OpReceipt::Eip7702(Receipt::<Log> {
status: Eip658Value::success(),
cumulative_gas_used: 0,
logs: vec![],
@@ -669,7 +679,9 @@ mod tests {
)),
gas_used: 0,
timestamp: u64::MAX - 1,
receipts_root: proofs::calculate_receipt_root(std::slice::from_ref(&receipt)),
receipts_root: proofs::calculate_receipt_root(std::slice::from_ref(
&receipt.with_bloom_ref(),
)),
logs_bloom: receipt.bloom(),
extra_data: encode_jovian_extra_data(
Default::default(),
@@ -693,7 +705,9 @@ mod tests {
)),
gas_used: 0,
timestamp: u64::MAX,
receipts_root: proofs::calculate_receipt_root(std::slice::from_ref(&receipt)),
receipts_root: proofs::calculate_receipt_root(std::slice::from_ref(
&receipt.with_bloom_ref(),
)),
logs_bloom: receipt.bloom(),
parent_hash: parent.hash(),
..Default::default()
@@ -722,7 +736,7 @@ mod tests {
let beacon_consensus = OpBeaconConsensus::new(Arc::new(chain_spec));
let receipt = OpReceipt::Eip7702(Receipt {
let receipt = OpReceipt::Eip7702(Receipt::<Log> {
status: Eip658Value::success(),
cumulative_gas_used: 0,
logs: vec![],
@@ -739,7 +753,9 @@ mod tests {
)),
gas_used: 0,
timestamp: u64::MAX - 1,
receipts_root: proofs::calculate_receipt_root(std::slice::from_ref(&receipt)),
receipts_root: proofs::calculate_receipt_root(std::slice::from_ref(
&receipt.with_bloom_ref(),
)),
logs_bloom: receipt.bloom(),
extra_data: encode_holocene_extra_data(Default::default(), BaseFeeParams::optimism())
.unwrap(),
@@ -759,7 +775,9 @@ mod tests {
)),
gas_used: 0,
timestamp: u64::MAX,
receipts_root: proofs::calculate_receipt_root(std::slice::from_ref(&receipt)),
receipts_root: proofs::calculate_receipt_root(std::slice::from_ref(
&receipt.with_bloom_ref(),
)),
logs_bloom: receipt.bloom(),
parent_hash: parent.hash(),
..Default::default()

View File

@@ -4,7 +4,7 @@ use crate::OpConsensusError;
use alloy_consensus::BlockHeader;
use alloy_primitives::B256;
use alloy_trie::EMPTY_ROOT_HASH;
use reth_optimism_primitives::ADDRESS_L2_TO_L1_MESSAGE_PASSER;
use reth_optimism_primitives::L2_TO_L1_MESSAGE_PASSER_ADDRESS;
use reth_storage_api::{errors::ProviderResult, StorageRootProvider};
use reth_trie_common::HashedStorage;
use revm::database::BundleState;
@@ -32,7 +32,7 @@ pub fn withdrawals_root<DB: StorageRootProvider>(
withdrawals_root_prehashed(
state_updates
.state()
.get(&ADDRESS_L2_TO_L1_MESSAGE_PASSER)
.get(&L2_TO_L1_MESSAGE_PASSER_ADDRESS)
.map(|acc| {
HashedStorage::from_plain_storage(
acc.status,
@@ -52,7 +52,7 @@ pub fn withdrawals_root_prehashed<DB: StorageRootProvider>(
hashed_storage_updates: HashedStorage,
state: DB,
) -> ProviderResult<B256> {
state.storage_root(ADDRESS_L2_TO_L1_MESSAGE_PASSER, hashed_storage_updates)
state.storage_root(L2_TO_L1_MESSAGE_PASSER_ADDRESS, hashed_storage_updates)
}
/// Verifies block header field `withdrawals_root` against storage root of
@@ -146,7 +146,7 @@ mod test {
#[test]
fn l2tol1_message_passer_no_withdrawals() {
let hashed_address = keccak256(ADDRESS_L2_TO_L1_MESSAGE_PASSER);
let hashed_address = keccak256(L2_TO_L1_MESSAGE_PASSER_ADDRESS);
// create account storage
let init_storage = HashedStorage::from_iter(

View File

@@ -486,14 +486,14 @@ mod tests {
block2.set_hash(block2_hash);
// Create a random receipt object, receipt1
let receipt1 = OpReceipt::Legacy(Receipt {
let receipt1 = OpReceipt::Legacy(Receipt::<Log> {
cumulative_gas_used: 46913,
logs: vec![],
status: true.into(),
});
// Create another random receipt object, receipt2
let receipt2 = OpReceipt::Legacy(Receipt {
let receipt2 = OpReceipt::Legacy(Receipt::<Log> {
cumulative_gas_used: 1325345,
logs: vec![],
status: true.into(),
@@ -544,7 +544,7 @@ mod tests {
);
// Create a Receipts object with a vector of receipt vectors
let receipts = vec![vec![Some(OpReceipt::Legacy(Receipt {
let receipts = vec![vec![Some(OpReceipt::Legacy(Receipt::<Log> {
cumulative_gas_used: 46913,
logs: vec![],
status: true.into(),
@@ -602,7 +602,7 @@ mod tests {
#[test]
fn test_block_number_to_index() {
// Create a Receipts object with a vector of receipt vectors
let receipts = vec![vec![Some(OpReceipt::Legacy(Receipt {
let receipts = vec![vec![Some(OpReceipt::Legacy(Receipt::<Log> {
cumulative_gas_used: 46913,
logs: vec![],
status: true.into(),
@@ -633,7 +633,7 @@ mod tests {
#[test]
fn test_get_logs() {
// Create a Receipts object with a vector of receipt vectors
let receipts = vec![vec![OpReceipt::Legacy(Receipt {
let receipts = vec![vec![OpReceipt::Legacy(Receipt::<Log> {
cumulative_gas_used: 46913,
logs: vec![Log::<LogData>::default()],
status: true.into(),
@@ -661,7 +661,7 @@ mod tests {
#[test]
fn test_receipts_by_block() {
// Create a Receipts object with a vector of receipt vectors
let receipts = vec![vec![Some(OpReceipt::Legacy(Receipt {
let receipts = vec![vec![Some(OpReceipt::Legacy(Receipt::<Log> {
cumulative_gas_used: 46913,
logs: vec![Log::<LogData>::default()],
status: true.into(),
@@ -685,7 +685,7 @@ mod tests {
// Assert that the receipts for block number 123 match the expected receipts
assert_eq!(
receipts_by_block,
vec![&Some(OpReceipt::Legacy(Receipt {
vec![&Some(OpReceipt::Legacy(Receipt::<Log> {
cumulative_gas_used: 46913,
logs: vec![Log::<LogData>::default()],
status: true.into(),
@@ -696,7 +696,7 @@ mod tests {
#[test]
fn test_receipts_len() {
// Create a Receipts object with a vector of receipt vectors
let receipts = vec![vec![Some(OpReceipt::Legacy(Receipt {
let receipts = vec![vec![Some(OpReceipt::Legacy(Receipt::<Log> {
cumulative_gas_used: 46913,
logs: vec![Log::<LogData>::default()],
status: true.into(),
@@ -741,7 +741,7 @@ mod tests {
#[test]
fn test_revert_to() {
// Create a random receipt object
let receipt = OpReceipt::Legacy(Receipt {
let receipt = OpReceipt::Legacy(Receipt::<Log> {
cumulative_gas_used: 46913,
logs: vec![],
status: true.into(),
@@ -786,7 +786,7 @@ mod tests {
#[test]
fn test_extend_execution_outcome() {
// Create a Receipt object with specific attributes.
let receipt = OpReceipt::Legacy(Receipt {
let receipt = OpReceipt::Legacy(Receipt::<Log> {
cumulative_gas_used: 46913,
logs: vec![],
status: true.into(),
@@ -826,7 +826,7 @@ mod tests {
#[test]
fn test_split_at_execution_outcome() {
// Create a random receipt object
let receipt = OpReceipt::Legacy(Receipt {
let receipt = OpReceipt::Legacy(Receipt::<Log> {
cumulative_gas_used: 46913,
logs: vec![],
status: true.into(),

View File

@@ -18,7 +18,7 @@ use reth_node_api::{
use reth_optimism_consensus::isthmus;
use reth_optimism_forks::OpHardforks;
use reth_optimism_payload_builder::{OpExecutionPayloadValidator, OpPayloadTypes};
use reth_optimism_primitives::{OpBlock, ADDRESS_L2_TO_L1_MESSAGE_PASSER};
use reth_optimism_primitives::{OpBlock, L2_TO_L1_MESSAGE_PASSER_ADDRESS};
use reth_primitives_traits::{Block, RecoveredBlock, SealedBlock, SignedTransaction};
use reth_provider::StateProviderFactory;
use reth_trie_common::{HashedPostState, KeyHasher};
@@ -76,7 +76,7 @@ pub struct OpEngineValidator<P, Tx, ChainSpec> {
impl<P, Tx, ChainSpec> OpEngineValidator<P, Tx, ChainSpec> {
/// Instantiates a new validator.
pub fn new<KH: KeyHasher>(chain_spec: Arc<ChainSpec>, provider: P) -> Self {
let hashed_addr_l2tol1_msg_passer = KH::hash_key(ADDRESS_L2_TO_L1_MESSAGE_PASSER);
let hashed_addr_l2tol1_msg_passer = KH::hash_key(L2_TO_L1_MESSAGE_PASSER_ADDRESS);
Self {
inner: OpExecutionPayloadValidator::new(chain_spec),
provider,

View File

@@ -20,7 +20,7 @@ use reth_evm::{
};
use reth_execution_types::ExecutionOutcome;
use reth_optimism_forks::OpHardforks;
use reth_optimism_primitives::{transaction::OpTransaction, ADDRESS_L2_TO_L1_MESSAGE_PASSER};
use reth_optimism_primitives::{transaction::OpTransaction, L2_TO_L1_MESSAGE_PASSER_ADDRESS};
use reth_optimism_txpool::{
estimated_da_size::DataAvailabilitySized,
interop::{is_valid_interop, MaybeInteropTransaction},
@@ -435,7 +435,7 @@ impl<Txs> OpBuilder<'_, Txs> {
if ctx.chain_spec.is_isthmus_active_at_timestamp(ctx.attributes().timestamp()) {
// force load `L2ToL1MessagePasser.sol` so l2 withdrawals root can be computed even if
// no l2 withdrawals in block
_ = db.load_cache_account(ADDRESS_L2_TO_L1_MESSAGE_PASSER)?;
_ = db.load_cache_account(L2_TO_L1_MESSAGE_PASSER_ADDRESS)?;
}
let ExecutionWitnessRecord { hashed_state, codes, keys, lowest_block_number: _ } =

View File

@@ -13,8 +13,8 @@ extern crate alloc;
pub mod bedrock;
pub mod predeploys;
pub use predeploys::ADDRESS_L2_TO_L1_MESSAGE_PASSER;
// Re-export predeploys from op-alloy-consensus
pub use op_alloy_consensus::L2_TO_L1_MESSAGE_PASSER_ADDRESS;
pub mod transaction;
pub use transaction::*;

View File

@@ -1,8 +0,0 @@
//! Addresses of OP pre-deploys.
// todo: move to op-alloy
use alloy_primitives::{address, Address};
/// The L2 contract `L2ToL1MessagePasser`, stores commitments to withdrawal transactions.
pub const ADDRESS_L2_TO_L1_MESSAGE_PASSER: Address =
address!("0x4200000000000000000000000000000000000016");

View File

@@ -176,7 +176,7 @@ mod tests {
let mut data = Vec::with_capacity(expected.length());
let receipt = ReceiptWithBloom {
receipt: OpReceipt::Legacy(Receipt {
receipt: OpReceipt::Legacy(Receipt::<Log> {
status: Eip658Value::Eip658(false),
cumulative_gas_used: 0x1,
logs: vec![Log::new_unchecked(
@@ -207,7 +207,7 @@ mod tests {
// EIP658Receipt
let expected = ReceiptWithBloom {
receipt: OpReceipt::Legacy(Receipt {
receipt: OpReceipt::Legacy(Receipt::<Log> {
status: Eip658Value::Eip658(false),
cumulative_gas_used: 0x1,
logs: vec![Log::new_unchecked(
@@ -235,7 +235,7 @@ mod tests {
// Deposit Receipt (post-regolith)
let expected = ReceiptWithBloom {
receipt: OpReceipt::Deposit(OpDepositReceipt {
inner: Receipt {
inner: Receipt::<Log> {
status: Eip658Value::Eip658(true),
cumulative_gas_used: 46913,
logs: vec![],
@@ -260,10 +260,10 @@ mod tests {
"b901117ef9010d0182b741b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c0833d3bbf01"
);
// Deposit Receipt (post-regolith)
// Deposit Receipt (post-canyon)
let expected = ReceiptWithBloom {
receipt: OpReceipt::Deposit(OpDepositReceipt {
inner: Receipt {
inner: Receipt::<Log> {
status: Eip658Value::Eip658(true),
cumulative_gas_used: 46913,
logs: vec![],
@@ -284,7 +284,7 @@ mod tests {
#[test]
fn gigantic_receipt() {
let receipt = OpReceipt::Legacy(Receipt {
let receipt = OpReceipt::Legacy(Receipt::<Log> {
status: Eip658Value::Eip658(true),
cumulative_gas_used: 16747627,
logs: vec![
@@ -314,7 +314,7 @@ mod tests {
#[test]
fn test_encode_2718_length() {
let receipt = ReceiptWithBloom {
receipt: OpReceipt::Eip1559(Receipt {
receipt: OpReceipt::Eip1559(Receipt::<Log> {
status: Eip658Value::Eip658(true),
cumulative_gas_used: 21000,
logs: vec![],
@@ -331,7 +331,7 @@ mod tests {
// Test for legacy receipt as well
let legacy_receipt = ReceiptWithBloom {
receipt: OpReceipt::Legacy(Receipt {
receipt: OpReceipt::Legacy(Receipt::<Log> {
status: Eip658Value::Eip658(true),
cumulative_gas_used: 21000,
logs: vec![],

View File

@@ -272,6 +272,11 @@ where
fn tracing_task_guard(&self) -> &BlockingTaskGuard {
self.inner.eth_api.blocking_task_guard()
}
#[inline]
fn blocking_io_task_guard(&self) -> &Arc<tokio::sync::Semaphore> {
self.inner.eth_api.blocking_io_request_semaphore()
}
}
impl<N, Rpc> LoadFee for OpEthApi<N, Rpc>

View File

@@ -1,17 +1,16 @@
//! Loads and formats OP receipt RPC response.
use crate::{eth::RpcNodeCore, OpEthApi, OpEthApiError};
use alloy_consensus::{BlockHeader, Receipt, TxReceipt};
use alloy_consensus::{BlockHeader, Receipt, ReceiptWithBloom, TxReceipt};
use alloy_eips::eip2718::Encodable2718;
use alloy_rpc_types_eth::{Log, TransactionReceipt};
use op_alloy_consensus::{OpReceiptEnvelope, OpTransaction};
use op_alloy_consensus::{OpReceipt, OpTransaction};
use op_alloy_rpc_types::{L1BlockInfo, OpTransactionReceipt, OpTransactionReceiptFields};
use op_revm::estimate_tx_compressed_size;
use reth_chainspec::ChainSpecProvider;
use reth_node_api::NodePrimitives;
use reth_optimism_evm::RethL1BlockInfo;
use reth_optimism_forks::OpHardforks;
use reth_optimism_primitives::OpReceipt;
use reth_primitives_traits::SealedBlock;
use reth_rpc_eth_api::{
helpers::LoadReceipt,
@@ -270,7 +269,7 @@ impl OpReceiptFieldsBuilder {
#[derive(Debug)]
pub struct OpReceiptBuilder {
/// Core receipt, has all the fields of an L1 receipt and is the basis for the OP receipt.
pub core_receipt: TransactionReceipt<OpReceiptEnvelope<Log>>,
pub core_receipt: TransactionReceipt<ReceiptWithBloom<OpReceipt<Log>>>,
/// Additional OP receipt fields.
pub op_receipt_fields: OpTransactionReceiptFields,
}
@@ -294,24 +293,14 @@ impl OpReceiptBuilder {
let logs = Log::collect_for_receipt(next_log_index, meta, logs);
Receipt { status, cumulative_gas_used, logs }
};
match receipt {
OpReceipt::Legacy(receipt) => {
OpReceiptEnvelope::Legacy(map_logs(receipt).into_with_bloom())
}
OpReceipt::Eip2930(receipt) => {
OpReceiptEnvelope::Eip2930(map_logs(receipt).into_with_bloom())
}
OpReceipt::Eip1559(receipt) => {
OpReceiptEnvelope::Eip1559(map_logs(receipt).into_with_bloom())
}
OpReceipt::Eip7702(receipt) => {
OpReceiptEnvelope::Eip7702(map_logs(receipt).into_with_bloom())
}
OpReceipt::Deposit(receipt) => {
OpReceiptEnvelope::Deposit(receipt.map_inner(map_logs).into_with_bloom())
}
}
let mapped_receipt: OpReceipt<Log> = match receipt {
OpReceipt::Legacy(receipt) => OpReceipt::Legacy(map_logs(receipt)),
OpReceipt::Eip2930(receipt) => OpReceipt::Eip2930(map_logs(receipt)),
OpReceipt::Eip1559(receipt) => OpReceipt::Eip1559(map_logs(receipt)),
OpReceipt::Eip7702(receipt) => OpReceipt::Eip7702(map_logs(receipt)),
OpReceipt::Deposit(receipt) => OpReceipt::Deposit(receipt.map_inner(map_logs)),
};
mapped_receipt.into_with_bloom()
});
// In jovian, we're using the blob gas used field to store the current da

View File

@@ -3,7 +3,7 @@
use crate::{MessageValidationKind, PayloadAttributes};
use alloc::vec::Vec;
use alloy_eips::{eip1898::BlockWithParent, eip4895::Withdrawal, eip7685::Requests, BlockNumHash};
use alloy_primitives::B256;
use alloy_primitives::{Bytes, B256};
use alloy_rpc_types_engine::ExecutionData;
use core::fmt::Debug;
use serde::{de::DeserializeOwned, Serialize};
@@ -40,6 +40,11 @@ pub trait ExecutionPayload:
/// Returns `None` for pre-Shanghai blocks.
fn withdrawals(&self) -> Option<&Vec<Withdrawal>>;
/// Returns the access list included in this payload.
///
/// Returns `None` for pre-Amsterdam blocks.
fn block_access_list(&self) -> Option<&Bytes>;
/// Returns the beacon block root associated with this payload.
///
/// Returns `None` for pre-merge payloads.
@@ -69,6 +74,10 @@ impl ExecutionPayload for ExecutionData {
self.payload.withdrawals()
}
fn block_access_list(&self) -> Option<&Bytes> {
None
}
fn parent_beacon_block_root(&self) -> Option<B256> {
self.sidecar.parent_beacon_block_root()
}
@@ -172,6 +181,10 @@ impl ExecutionPayload for op_alloy_rpc_types_engine::OpExecutionData {
self.payload.as_v2().map(|p| &p.withdrawals)
}
fn block_access_list(&self) -> Option<&Bytes> {
None
}
fn parent_beacon_block_root(&self) -> Option<B256> {
self.sidecar.parent_beacon_block_root()
}

View File

@@ -218,6 +218,27 @@ pub trait BlockBody:
.zip(signers)
.map(|(tx, signer)| Recovered::new_unchecked(tx, signer)))
}
/// Returns an iterator over `Recovered<&Transaction>` for all transactions in the block body
/// _without ensuring that the signature has a low `s` value_.
///
/// This method recovers signers and returns an iterator without cloning transactions,
/// making it more efficient than recovering with owned transactions when owned values are not
/// required.
///
/// # Errors
///
/// Returns an error if any transaction's signature is invalid.
fn recover_transactions_unchecked_ref(
&self,
) -> Result<impl Iterator<Item = Recovered<&Self::Transaction>> + '_, RecoveryError> {
let signers = self.recover_signers_unchecked()?;
Ok(self
.transactions()
.iter()
.zip(signers)
.map(|(tx, signer)| Recovered::new_unchecked(tx, signer)))
}
}
impl<T, H> BlockBody for alloy_consensus::BlockBody<T, H>

View File

@@ -164,7 +164,7 @@ pub use alloy_primitives::{logs_bloom, Log, LogData};
pub mod proofs;
mod storage;
pub use storage::StorageEntry;
pub use storage::{StorageEntry, ValueWithSubKey};
pub mod sync;

View File

@@ -1,5 +1,17 @@
use alloy_primitives::{B256, U256};
/// Trait for `DupSort` table values that contain a subkey.
///
/// This trait allows extracting the subkey from a value during database iteration,
/// enabling proper range queries and filtering on `DupSort` tables.
pub trait ValueWithSubKey {
/// The type of the subkey.
type SubKey;
/// Extract the subkey from the value.
fn get_subkey(&self) -> Self::SubKey;
}
/// Account storage entry.
///
/// `key` is the subkey when used as a value in the `StorageChangeSets` table.
@@ -21,6 +33,14 @@ impl StorageEntry {
}
}
impl ValueWithSubKey for StorageEntry {
type SubKey = B256;
fn get_subkey(&self) -> Self::SubKey {
self.key
}
}
impl From<(B256, U256)> for StorageEntry {
fn from((key, value): (B256, U256)) -> Self {
Self { key, value }

View File

@@ -1,14 +1,14 @@
use crate::PruneLimiter;
use reth_db_api::{
cursor::{DbCursorRO, DbCursorRW, RangeWalker},
table::{Table, TableRow},
transaction::DbTxMut,
table::{DupSort, Table, TableRow},
transaction::{DbTx, DbTxMut},
DatabaseError,
};
use std::{fmt::Debug, ops::RangeBounds};
use tracing::debug;
pub(crate) trait DbTxPruneExt: DbTxMut {
pub(crate) trait DbTxPruneExt: DbTxMut + DbTx {
/// Prune the table for the specified pre-sorted key iterator.
///
/// Returns number of rows pruned.
@@ -123,9 +123,55 @@ pub(crate) trait DbTxPruneExt: DbTxMut {
Ok(false)
}
/// Prune a DUPSORT table for the specified key range.
///
/// Returns number of rows pruned.
fn prune_dupsort_table_with_range<T: DupSort>(
&self,
keys: impl RangeBounds<T::Key> + Clone + Debug,
limiter: &mut PruneLimiter,
mut delete_callback: impl FnMut(TableRow<T>),
) -> Result<(usize, bool), DatabaseError> {
let starting_entries = self.entries::<T>()?;
let mut cursor = self.cursor_dup_write::<T>()?;
let mut walker = cursor.walk_range(keys)?;
let done = loop {
if limiter.is_limit_reached() {
debug!(
target: "providers::db",
?limiter,
deleted_entries_limit = %limiter.is_deleted_entries_limit_reached(),
time_limit = %limiter.is_time_limit_reached(),
table = %T::NAME,
"Pruning limit reached"
);
break false
}
let Some(res) = walker.next() else { break true };
let row = res?;
walker.delete_current_duplicates()?;
limiter.increment_deleted_entries_count();
delete_callback(row);
};
debug!(
target: "providers::db",
table=?T::NAME,
cursor_current=?cursor.current(),
"done walking",
);
let ending_entries = self.entries::<T>()?;
Ok((starting_entries - ending_entries, done))
}
}
impl<Tx> DbTxPruneExt for Tx where Tx: DbTxMut {}
impl<Tx> DbTxPruneExt for Tx where Tx: DbTxMut + DbTx {}
#[cfg(test)]
mod tests {

View File

@@ -71,10 +71,9 @@ where
let mut last_storages_pruned_block = None;
let (storages_pruned, done) =
provider.tx_ref().prune_table_with_range::<tables::StoragesTrieChangeSets>(
provider.tx_ref().prune_dupsort_table_with_range::<tables::StoragesTrieChangeSets>(
storage_range,
&mut limiter,
|_| false,
|(BlockNumberHashedAddress((block_number, _)), _)| {
last_storages_pruned_block = Some(block_number);
},
@@ -90,10 +89,9 @@ where
.unwrap_or(block_range_end);
let (accounts_pruned, done) =
provider.tx_ref().prune_table_with_range::<tables::AccountsTrieChangeSets>(
provider.tx_ref().prune_dupsort_table_with_range::<tables::AccountsTrieChangeSets>(
block_range,
&mut limiter,
|_| false,
|row| last_accounts_pruned_block = row.0,
)?;

View File

@@ -107,18 +107,13 @@ impl RpcServiceT for RpcService {
fn batch<'a>(&self, req: Batch<'a>) -> impl Future<Output = Self::BatchResponse> + Send + 'a {
let entries: Vec<_> = req.into_iter().collect();
let mut got_notif = false;
let mut batch_response = BatchResponseBuilder::new_with_limit(self.max_response_body_size);
let mut pending_calls: FuturesOrdered<_> = entries
.into_iter()
.filter_map(|v| match v {
Ok(BatchEntry::Call(call)) => Some(Either::Right(self.call(call))),
Ok(BatchEntry::Notification(_n)) => {
got_notif = true;
None
}
Ok(BatchEntry::Notification(_n)) => None,
Err(_err) => Some(Either::Left(async {
MethodResponse::error(Id::Null, ErrorObject::from(ErrorCode::InvalidRequest))
})),

View File

@@ -35,9 +35,11 @@ alloy-serde.workspace = true
alloy-rpc-types-beacon.workspace = true
alloy-rpc-types-engine.workspace = true
alloy-genesis.workspace = true
serde = { workspace = true, features = ["derive"] }
# misc
jsonrpsee = { workspace = true, features = ["server", "macros"] }
serde_json.workspace = true
[features]
client = [
@@ -45,3 +47,8 @@ client = [
"jsonrpsee/async-client",
"reth-rpc-eth-api/client",
]
[dev-dependencies]
serde_json = { workspace = true }
tokio = { workspace = true, features = ["macros", "rt-multi-thread"] }
jsonrpsee = { workspace = true, features = ["client", "async-client", "http-client"] }

View File

@@ -3,7 +3,7 @@ use alloy_genesis::ChainConfig;
use alloy_json_rpc::RpcObject;
use alloy_primitives::{Address, Bytes, B256};
use alloy_rpc_types_debug::ExecutionWitness;
use alloy_rpc_types_eth::{Block, Bundle, StateContext};
use alloy_rpc_types_eth::{Bundle, StateContext};
use alloy_rpc_types_trace::geth::{
BlockTraceResult, GethDebugTracingCallOptions, GethDebugTracingOptions, GethTrace, TraceResult,
};
@@ -38,7 +38,7 @@ pub trait DebugApi<TxReq: RpcObject> {
/// Returns an array of recent bad blocks that the client has seen on the network.
#[method(name = "getBadBlocks")]
async fn bad_blocks(&self) -> RpcResult<Vec<Block>>;
async fn bad_blocks(&self) -> RpcResult<Vec<serde_json::Value>>;
/// Returns the structured logs created during the execution of EVM between two blocks
/// (excluding start) as a JSON object.

View File

@@ -25,11 +25,14 @@ mod net;
mod otterscan;
mod reth;
mod rpc;
mod testing;
mod trace;
mod txpool;
mod validation;
mod web3;
pub use testing::{TestingBuildBlockRequestV1, TESTING_BUILD_BLOCK_V1};
/// re-export of all server traits
pub use servers::*;
@@ -45,6 +48,7 @@ pub mod servers {
otterscan::OtterscanServer,
reth::RethApiServer,
rpc::RpcApiServer,
testing::TestingApiServer,
trace::TraceApiServer,
txpool::TxPoolApiServer,
validation::BlockSubmissionValidationApiServer,
@@ -75,6 +79,7 @@ pub mod clients {
otterscan::OtterscanClient,
reth::RethApiClient,
rpc::RpcApiServer,
testing::TestingApiClient,
trace::TraceApiClient,
txpool::TxPoolApiClient,
validation::BlockSubmissionValidationApiClient,

View File

@@ -0,0 +1,45 @@
//! Testing namespace for building a block in a single call.
//!
//! This follows the `testing_buildBlockV1` specification. **Highly sensitive:**
//! testing-only, powerful enough to include arbitrary transactions; must stay
//! disabled by default and never be exposed on public-facing RPC without an
//! explicit operator flag.
use alloy_primitives::{Bytes, B256};
use alloy_rpc_types_engine::{
ExecutionPayloadEnvelopeV5, PayloadAttributes as EthPayloadAttributes,
};
use jsonrpsee::proc_macros::rpc;
use serde::{Deserialize, Serialize};
/// Capability string for `testing_buildBlockV1`.
pub const TESTING_BUILD_BLOCK_V1: &str = "testing_buildBlockV1";
/// Request payload for `testing_buildBlockV1`.
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct TestingBuildBlockRequestV1 {
/// Parent block hash of the block to build.
pub parent_block_hash: B256,
/// Payload attributes (Cancun version).
pub payload_attributes: EthPayloadAttributes,
/// Raw signed transactions to force-include in order.
pub transactions: Vec<Bytes>,
/// Optional extra data for the block header.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub extra_data: Option<Bytes>,
}
/// Testing RPC interface for building a block in a single call.
#[cfg_attr(not(feature = "client"), rpc(server, namespace = "testing"))]
#[cfg_attr(feature = "client", rpc(server, client, namespace = "testing"))]
pub trait TestingApi {
/// Builds a block using the provided parent, payload attributes, and transactions.
///
/// See <https://github.com/marcindsobczak/execution-apis/blob/main/src/testing/testing_buildBlockV1.md>
#[method(name = "buildBlockV1")]
async fn build_block_v1(
&self,
request: TestingBuildBlockRequestV1,
) -> jsonrpsee::core::RpcResult<ExecutionPayloadEnvelopeV5>;
}

View File

@@ -17,6 +17,7 @@ reth-primitives-traits.workspace = true
reth-ipc.workspace = true
reth-chainspec.workspace = true
reth-consensus.workspace = true
reth-engine-primitives.workspace = true
reth-network-api.workspace = true
reth-node-core.workspace = true
reth-rpc.workspace = true
@@ -26,6 +27,7 @@ reth-rpc-layer.workspace = true
reth-rpc-eth-types.workspace = true
reth-rpc-server-types.workspace = true
reth-tasks = { workspace = true, features = ["rayon"] }
reth-tokio-util.workspace = true
reth-transaction-pool.workspace = true
reth-storage-api.workspace = true
reth-chain-state.workspace = true
@@ -63,7 +65,6 @@ reth-rpc-api = { workspace = true, features = ["client"] }
reth-rpc-engine-api.workspace = true
reth-tracing.workspace = true
reth-transaction-pool = { workspace = true, features = ["test-utils"] }
reth-engine-primitives.workspace = true
reth-node-ethereum.workspace = true
alloy-primitives.workspace = true

View File

@@ -94,6 +94,7 @@ impl RethRpcServerConfig for RpcServerArgs {
fn eth_config(&self) -> EthConfig {
EthConfig::default()
.max_tracing_requests(self.rpc_max_tracing_requests)
.max_blocking_io_requests(self.rpc_max_blocking_io_requests)
.max_trace_filter_blocks(self.rpc_max_trace_filter_blocks)
.max_blocks_per_filter(self.rpc_max_blocks_per_filter.unwrap_or_max())
.max_logs_per_response(self.rpc_max_logs_per_response.unwrap_or_max() as usize)

View File

@@ -32,6 +32,7 @@ use jsonrpsee::{
};
use reth_chainspec::{ChainSpecProvider, EthereumHardforks};
use reth_consensus::{ConsensusError, FullConsensus};
use reth_engine_primitives::ConsensusEngineEvent;
use reth_evm::ConfigureEvm;
use reth_network_api::{noop::NoopNetwork, NetworkInfo, Peers};
use reth_primitives_traits::{NodePrimitives, TxTy};
@@ -46,16 +47,18 @@ use reth_rpc_eth_api::{
TraceExt,
},
node::RpcNodeCoreAdapter,
EthApiServer, EthApiTypes, FullEthApiServer, RpcBlock, RpcConvert, RpcConverter, RpcHeader,
RpcNodeCore, RpcReceipt, RpcTransaction, RpcTxReq,
EthApiServer, EthApiTypes, FullEthApiServer, FullEthApiTypes, RpcBlock, RpcConvert,
RpcConverter, RpcHeader, RpcNodeCore, RpcReceipt, RpcTransaction, RpcTxReq,
};
use reth_rpc_eth_types::{receipt::EthReceiptConverter, EthConfig, EthSubscriptionIdProvider};
use reth_rpc_layer::{AuthLayer, Claims, CompressionLayer, JwtAuthValidator, JwtSecret};
pub use reth_rpc_server_types::RethRpcModule;
use reth_storage_api::{
AccountReader, BlockReader, ChangeSetReader, FullRpcProvider, ProviderBlock,
AccountReader, BlockReader, ChangeSetReader, FullRpcProvider, NodePrimitivesProvider,
StateProviderFactory,
};
use reth_tasks::{pool::BlockingTaskGuard, TaskSpawner, TokioTaskExecutor};
use reth_tokio_util::EventSender;
use reth_transaction_pool::{noop::NoopTransactionPool, TransactionPool};
use serde::{Deserialize, Serialize};
use std::{
@@ -74,7 +77,7 @@ use jsonrpsee::server::ServerConfigBuilder;
pub use reth_ipc::server::{
Builder as IpcServerBuilder, RpcServiceBuilder as IpcRpcServiceBuilder,
};
pub use reth_rpc_server_types::{constants, RethRpcModule, RpcModuleSelection};
pub use reth_rpc_server_types::{constants, RpcModuleSelection};
pub use tower::layer::util::{Identity, Stack};
/// Auth server utilities.
@@ -326,6 +329,7 @@ where
module_config: TransportRpcModuleConfig,
engine: impl IntoEngineApiRpcModule,
eth: EthApi,
engine_events: EventSender<ConsensusEngineEvent<N>>,
) -> (
TransportRpcModules,
AuthRpcModule,
@@ -334,16 +338,10 @@ where
where
EthApi: FullEthApiServer<Provider = Provider, Pool = Pool>,
{
let Self { provider, pool, network, executor, consensus, evm_config, .. } = self;
let config = module_config.config.clone().unwrap_or_default();
let mut registry = RpcRegistryInner::new(
provider, pool, network, executor, consensus, config, evm_config, eth,
);
let mut registry = self.into_registry(config, eth, engine_events);
let modules = registry.create_transport_rpc_modules(module_config);
let auth_module = registry.create_auth_module(engine);
(modules, auth_module, registry)
@@ -357,12 +355,23 @@ where
self,
config: RpcModuleConfig,
eth: EthApi,
engine_events: EventSender<ConsensusEngineEvent<N>>,
) -> RpcRegistryInner<Provider, Pool, Network, EthApi, EvmConfig, Consensus>
where
EthApi: EthApiTypes + 'static,
EthApi: FullEthApiServer<Provider = Provider, Pool = Pool>,
{
let Self { provider, pool, network, executor, consensus, evm_config, .. } = self;
RpcRegistryInner::new(provider, pool, network, executor, consensus, config, evm_config, eth)
RpcRegistryInner::new(
provider,
pool,
network,
executor,
consensus,
config,
evm_config,
eth,
engine_events,
)
}
/// Configures all [`RpcModule`]s specific to the given [`TransportRpcModuleConfig`] which can
@@ -371,27 +380,17 @@ where
self,
module_config: TransportRpcModuleConfig,
eth: EthApi,
engine_events: EventSender<ConsensusEngineEvent<N>>,
) -> TransportRpcModules<()>
where
EthApi: FullEthApiServer<Provider = Provider, Pool = Pool>,
{
let mut modules = TransportRpcModules::default();
let Self { provider, pool, network, executor, consensus, evm_config, .. } = self;
if !module_config.is_empty() {
let TransportRpcModuleConfig { http, ws, ipc, config } = module_config.clone();
let mut registry = RpcRegistryInner::new(
provider,
pool,
network,
executor,
consensus,
config.unwrap_or_default(),
evm_config,
eth,
);
let mut registry = self.into_registry(config.unwrap_or_default(), eth, engine_events);
modules.config = module_config;
modules.http = registry.maybe_module(http.as_ref());
@@ -487,16 +486,8 @@ impl RpcModuleConfigBuilder {
}
/// A Helper type the holds instances of the configured modules.
#[derive(Debug, Clone)]
#[expect(dead_code)] // Consensus generic, might be useful in the future
pub struct RpcRegistryInner<
Provider: BlockReader,
Pool,
Network,
EthApi: EthApiTypes,
EvmConfig,
Consensus,
> {
#[derive(Debug)]
pub struct RpcRegistryInner<Provider, Pool, Network, EthApi: EthApiTypes, EvmConfig, Consensus> {
provider: Provider,
pool: Pool,
network: Network,
@@ -511,6 +502,9 @@ pub struct RpcRegistryInner<
modules: HashMap<RethRpcModule, Methods>,
/// eth config settings
eth_config: EthConfig,
/// Notification channel for engine API events
engine_events:
EventSender<ConsensusEngineEvent<<EthApi::RpcConvert as RpcConvert>::Primitives>>,
}
// === impl RpcRegistryInner ===
@@ -527,7 +521,7 @@ where
+ 'static,
Pool: Send + Sync + Clone + 'static,
Network: Clone + 'static,
EthApi: EthApiTypes + 'static,
EthApi: FullEthApiTypes + 'static,
EvmConfig: ConfigureEvm<Primitives = N>,
{
/// Creates a new, empty instance.
@@ -541,6 +535,9 @@ where
config: RpcModuleConfig,
evm_config: EvmConfig,
eth_api: EthApi,
engine_events: EventSender<
ConsensusEngineEvent<<EthApi::Provider as NodePrimitivesProvider>::Primitives>,
>,
) -> Self
where
EvmConfig: ConfigureEvm<Primitives = N>,
@@ -560,14 +557,14 @@ where
blocking_pool_guard,
eth_config: config.eth,
evm_config,
engine_events,
}
}
}
impl<Provider, Pool, Network, EthApi, BlockExecutor, Consensus>
RpcRegistryInner<Provider, Pool, Network, EthApi, BlockExecutor, Consensus>
impl<Provider, Pool, Network, EthApi, Evm, Consensus>
RpcRegistryInner<Provider, Pool, Network, EthApi, Evm, Consensus>
where
Provider: BlockReader,
EthApi: EthApiTypes,
{
/// Returns a reference to the installed [`EthApi`].
@@ -595,6 +592,11 @@ where
&self.provider
}
/// Returns a reference to the evm config
pub const fn evm_config(&self) -> &Evm {
&self.evm_config
}
/// Returns all installed methods
pub fn methods(&self) -> Vec<Methods> {
self.modules.values().cloned().collect()
@@ -706,8 +708,7 @@ where
/// If called outside of the tokio runtime. See also [`Self::eth_api`]
pub fn register_debug(&mut self) -> &mut Self
where
EthApi: EthApiSpec + EthTransactions + TraceExt,
EvmConfig::Primitives: NodePrimitives<Block = ProviderBlock<EthApi::Provider>>,
EthApi: EthTransactions + TraceExt,
{
let debug_api = self.debug_api();
self.modules.insert(RethRpcModule::Debug, debug_api.into_rpc().into());
@@ -814,8 +815,16 @@ where
/// # Panics
///
/// If called outside of the tokio runtime. See also [`Self::eth_api`]
pub fn debug_api(&self) -> DebugApi<EthApi> {
DebugApi::new(self.eth_api().clone(), self.blocking_pool_guard.clone())
pub fn debug_api(&self) -> DebugApi<EthApi>
where
EthApi: FullEthApiTypes,
{
DebugApi::new(
self.eth_api().clone(),
self.blocking_pool_guard.clone(),
self.tasks(),
self.engine_events.new_listener(),
)
}
/// Instantiates `NetApi`
@@ -933,11 +942,14 @@ where
)
.into_rpc()
.into(),
RethRpcModule::Debug => {
DebugApi::new(eth_api.clone(), self.blocking_pool_guard.clone())
.into_rpc()
.into()
}
RethRpcModule::Debug => DebugApi::new(
eth_api.clone(),
self.blocking_pool_guard.clone(),
&*self.executor,
self.engine_events.new_listener(),
)
.into_rpc()
.into(),
RethRpcModule::Eth => {
// merge all eth handlers
let mut module = eth_api.clone().into_rpc();
@@ -986,18 +998,18 @@ where
.into_rpc()
.into()
}
// only relevant for Ethereum and configured in `EthereumAddOns`
// implementation
// TODO: can we get rid of this here?
// Custom modules are not handled here - they should be registered via
// extend_rpc_modules
RethRpcModule::Flashbots | RethRpcModule::Other(_) => Default::default(),
RethRpcModule::Miner => MinerApi::default().into_rpc().into(),
RethRpcModule::Mev => {
EthSimBundle::new(eth_api.clone(), self.blocking_pool_guard.clone())
.into_rpc()
.into()
}
// these are implementation specific and need to be handled during
// initialization and should be registered via extend_rpc_modules in the
// nodebuilder rpc addon stack
RethRpcModule::Flashbots |
RethRpcModule::Testing |
RethRpcModule::Other(_) => Default::default(),
})
.clone()
})
@@ -1005,6 +1017,33 @@ where
}
}
impl<Provider, Pool, Network, EthApi, EvmConfig, Consensus> Clone
for RpcRegistryInner<Provider, Pool, Network, EthApi, EvmConfig, Consensus>
where
EthApi: EthApiTypes,
Provider: Clone,
Pool: Clone,
Network: Clone,
EvmConfig: Clone,
Consensus: Clone,
{
fn clone(&self) -> Self {
Self {
provider: self.provider.clone(),
pool: self.pool.clone(),
network: self.network.clone(),
executor: self.executor.clone(),
evm_config: self.evm_config.clone(),
consensus: self.consensus.clone(),
eth: self.eth.clone(),
blocking_pool_guard: self.blocking_pool_guard.clone(),
modules: self.modules.clone(),
eth_config: self.eth_config.clone(),
engine_events: self.engine_events.clone(),
}
}
}
/// A builder type for configuring and launching the servers that will handle RPC requests.
///
/// Supported server transports are:

View File

@@ -9,6 +9,7 @@ use reth_ethereum_primitives::TransactionSigned;
use reth_rpc_builder::{RpcServerConfig, TransportRpcModuleConfig};
use reth_rpc_eth_api::EthApiClient;
use reth_rpc_server_types::RpcModuleSelection;
use reth_tokio_util::EventSender;
use std::{
future::Future,
sync::{
@@ -73,8 +74,11 @@ where
async fn test_rpc_middleware() {
let builder = test_rpc_builder();
let eth_api = builder.bootstrap_eth_api();
let modules =
builder.build(TransportRpcModuleConfig::set_http(RpcModuleSelection::All), eth_api);
let modules = builder.build(
TransportRpcModuleConfig::set_http(RpcModuleSelection::All),
eth_api,
EventSender::new(1),
);
let mylayer = MyMiddlewareLayer::default();

View File

@@ -7,6 +7,7 @@ use reth_rpc_builder::{
RpcServerConfig, TransportRpcModuleConfig,
};
use reth_rpc_server_types::RethRpcModule;
use reth_tokio_util::EventSender;
use crate::utils::{
launch_http, launch_http_ws_same_port, launch_ws, test_address, test_rpc_builder,
@@ -27,8 +28,11 @@ async fn test_http_addr_in_use() {
let addr = handle.http_local_addr().unwrap();
let builder = test_rpc_builder();
let eth_api = builder.bootstrap_eth_api();
let server =
builder.build(TransportRpcModuleConfig::set_http(vec![RethRpcModule::Admin]), eth_api);
let server = builder.build(
TransportRpcModuleConfig::set_http(vec![RethRpcModule::Admin]),
eth_api,
EventSender::new(1),
);
let result =
RpcServerConfig::http(Default::default()).with_http_address(addr).start(&server).await;
let err = result.unwrap_err();
@@ -41,8 +45,11 @@ async fn test_ws_addr_in_use() {
let addr = handle.ws_local_addr().unwrap();
let builder = test_rpc_builder();
let eth_api = builder.bootstrap_eth_api();
let server =
builder.build(TransportRpcModuleConfig::set_ws(vec![RethRpcModule::Admin]), eth_api);
let server = builder.build(
TransportRpcModuleConfig::set_ws(vec![RethRpcModule::Admin]),
eth_api,
EventSender::new(1),
);
let result = RpcServerConfig::ws(Default::default()).with_ws_address(addr).start(&server).await;
let err = result.unwrap_err();
assert!(is_addr_in_use_kind(&err, ServerKind::WS(addr)), "{err}");
@@ -64,6 +71,7 @@ async fn test_launch_same_port_different_modules() {
TransportRpcModuleConfig::set_ws(vec![RethRpcModule::Admin])
.with_http(vec![RethRpcModule::Eth]),
eth_api,
EventSender::new(1),
);
let addr = test_address();
let res = RpcServerConfig::ws(Default::default())
@@ -87,6 +95,7 @@ async fn test_launch_same_port_same_cors() {
TransportRpcModuleConfig::set_ws(vec![RethRpcModule::Eth])
.with_http(vec![RethRpcModule::Eth]),
eth_api,
EventSender::new(1),
);
let addr = test_address();
let res = RpcServerConfig::ws(Default::default())
@@ -108,6 +117,7 @@ async fn test_launch_same_port_different_cors() {
TransportRpcModuleConfig::set_ws(vec![RethRpcModule::Eth])
.with_http(vec![RethRpcModule::Eth]),
eth_api,
EventSender::new(1),
);
let addr = test_address();
let res = RpcServerConfig::ws(Default::default())

View File

@@ -4,6 +4,7 @@ use reth_consensus::noop::NoopConsensus;
use reth_engine_primitives::ConsensusEngineHandle;
use reth_ethereum_engine_primitives::EthEngineTypes;
use reth_ethereum_primitives::EthPrimitives;
use reth_tokio_util::EventSender;
use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4};
use reth_evm_ethereum::EthEvmConfig;
@@ -62,7 +63,8 @@ pub async fn launch_auth(secret: JwtSecret) -> AuthServerHandle {
pub async fn launch_http(modules: impl Into<RpcModuleSelection>) -> RpcServerHandle {
let builder = test_rpc_builder();
let eth_api = builder.bootstrap_eth_api();
let server = builder.build(TransportRpcModuleConfig::set_http(modules), eth_api);
let server =
builder.build(TransportRpcModuleConfig::set_http(modules), eth_api, EventSender::new(1));
RpcServerConfig::http(Default::default())
.with_http_address(test_address())
.start(&server)
@@ -74,7 +76,8 @@ pub async fn launch_http(modules: impl Into<RpcModuleSelection>) -> RpcServerHan
pub async fn launch_ws(modules: impl Into<RpcModuleSelection>) -> RpcServerHandle {
let builder = test_rpc_builder();
let eth_api = builder.bootstrap_eth_api();
let server = builder.build(TransportRpcModuleConfig::set_ws(modules), eth_api);
let server =
builder.build(TransportRpcModuleConfig::set_ws(modules), eth_api, EventSender::new(1));
RpcServerConfig::ws(Default::default())
.with_ws_address(test_address())
.start(&server)
@@ -87,8 +90,11 @@ pub async fn launch_http_ws(modules: impl Into<RpcModuleSelection>) -> RpcServer
let builder = test_rpc_builder();
let eth_api = builder.bootstrap_eth_api();
let modules = modules.into();
let server = builder
.build(TransportRpcModuleConfig::set_ws(modules.clone()).with_http(modules), eth_api);
let server = builder.build(
TransportRpcModuleConfig::set_ws(modules.clone()).with_http(modules),
eth_api,
EventSender::new(1),
);
RpcServerConfig::ws(Default::default())
.with_ws_address(test_address())
.with_ws_address(test_address())
@@ -104,8 +110,11 @@ pub async fn launch_http_ws_same_port(modules: impl Into<RpcModuleSelection>) ->
let builder = test_rpc_builder();
let modules = modules.into();
let eth_api = builder.bootstrap_eth_api();
let server = builder
.build(TransportRpcModuleConfig::set_ws(modules.clone()).with_http(modules), eth_api);
let server = builder.build(
TransportRpcModuleConfig::set_ws(modules.clone()).with_http(modules),
eth_api,
EventSender::new(1),
);
let addr = test_address();
RpcServerConfig::ws(Default::default())
.with_ws_address(addr)

View File

@@ -35,7 +35,7 @@ impl TryFromReceiptResponse<op_alloy_network::Optimism> for reth_optimism_primit
fn from_receipt_response(
receipt_response: op_alloy_rpc_types::OpTransactionReceipt,
) -> Result<Self, Self::Error> {
Ok(receipt_response.inner.inner.map_logs(Into::into).into())
Ok(receipt_response.inner.inner.into_components().0.map_logs(Into::into))
}
}
@@ -70,14 +70,17 @@ mod tests {
#[cfg(feature = "op")]
#[test]
fn test_try_from_receipt_response_optimism() {
use op_alloy_consensus::OpReceiptEnvelope;
use alloy_consensus::ReceiptWithBloom;
use op_alloy_consensus::OpReceipt;
use op_alloy_network::Optimism;
use op_alloy_rpc_types::OpTransactionReceipt;
use reth_optimism_primitives::OpReceipt;
let op_receipt = OpTransactionReceipt {
inner: alloy_rpc_types_eth::TransactionReceipt {
inner: OpReceiptEnvelope::Eip1559(Default::default()),
inner: ReceiptWithBloom {
receipt: OpReceipt::Eip1559(Default::default()),
logs_bloom: Default::default(),
},
transaction_hash: Default::default(),
transaction_index: None,
block_hash: None,

View File

@@ -90,13 +90,14 @@ impl SignableTxRequest<op_alloy_consensus::OpTxEnvelope>
) -> Result<op_alloy_consensus::OpTxEnvelope, SignTxRequestError> {
let mut tx =
self.build_typed_tx().map_err(|_| SignTxRequestError::InvalidTransactionRequest)?;
let signature = signer.sign_transaction(&mut tx).await?;
// sanity check
// sanity check: deposit transactions must not be signed by the user
if tx.is_deposit() {
return Err(SignTxRequestError::InvalidTransactionRequest);
}
let signature = signer.sign_transaction(&mut tx).await?;
Ok(tx.into_signed(signature).into())
}
}

View File

@@ -7,18 +7,29 @@ use reth_tasks::{
pool::{BlockingTaskGuard, BlockingTaskPool},
TaskSpawner,
};
use tokio::sync::{oneshot, AcquireError, OwnedSemaphorePermit};
use std::sync::Arc;
use tokio::sync::{oneshot, AcquireError, OwnedSemaphorePermit, Semaphore};
use crate::EthApiTypes;
/// Executes code on a blocking thread.
/// Helpers for spawning blocking operations.
///
/// Operations can be blocking because they require lots of CPU work and/or IO.
///
/// This differentiates between workloads that are primarily CPU bound and heavier in general (such
/// as tracing tasks) and tasks that have a more balanced profile (io and cpu), such as `eth_call`
/// and alike.
///
/// This provides access to semaphores that permit how many of those are permitted concurrently.
/// It's expected that tracing related tasks are configured with a lower threshold, because not only
/// are they CPU heavy but they can also accumulate more memory for the traces.
pub trait SpawnBlocking: EthApiTypes + Clone + Send + Sync + 'static {
/// Returns a handle for spawning IO heavy blocking tasks.
///
/// Runtime access in default trait method implementations.
fn io_task_spawner(&self) -> impl TaskSpawner;
/// Returns a handle for spawning CPU heavy blocking tasks.
/// Returns a handle for spawning __CPU heavy__ blocking tasks, such as tracing requests.
///
/// Thread pool access in default trait method implementations.
fn tracing_task_pool(&self) -> &BlockingTaskPool;
@@ -26,21 +37,121 @@ pub trait SpawnBlocking: EthApiTypes + Clone + Send + Sync + 'static {
/// Returns handle to semaphore for pool of CPU heavy blocking tasks.
fn tracing_task_guard(&self) -> &BlockingTaskGuard;
/// Returns handle to semaphore for blocking IO tasks.
///
/// This semaphore is used to limit concurrent blocking IO operations like `eth_call`,
/// `eth_estimateGas`, and similar methods that require EVM execution.
fn blocking_io_task_guard(&self) -> &Arc<Semaphore>;
/// Acquires a permit from the tracing task semaphore.
///
/// This should be used for __CPU heavy__ operations like `debug_traceTransaction`,
/// `debug_traceCall`, and similar tracing methods. These tasks are typically:
/// - Primarily CPU bound with intensive computation
/// - Can accumulate significant memory for trace results
/// - Expected to have lower concurrency limits than general blocking IO tasks
///
/// For blocking IO tasks like `eth_call` or `eth_estimateGas`, use
/// [`acquire_owned_blocking_io`](Self::acquire_owned_blocking_io) instead.
///
/// See also [`Semaphore::acquire_owned`](`tokio::sync::Semaphore::acquire_owned`).
fn acquire_owned(
fn acquire_owned_tracing(
&self,
) -> impl Future<Output = Result<OwnedSemaphorePermit, AcquireError>> + Send {
self.tracing_task_guard().clone().acquire_owned()
}
/// Acquires multiple permits from the tracing task semaphore.
///
/// This should be used for particularly heavy tracing operations that require more resources
/// than a standard trace. The permit count should reflect the expected resource consumption
/// relative to a standard tracing operation.
///
/// Like [`acquire_owned_tracing`](Self::acquire_owned_tracing), this is specifically for
/// CPU-intensive tracing tasks, not general blocking IO operations.
///
/// See also [`Semaphore::acquire_many_owned`](`tokio::sync::Semaphore::acquire_many_owned`).
fn acquire_many_owned(
fn acquire_many_owned_tracing(
&self,
n: u32,
) -> impl Future<Output = Result<OwnedSemaphorePermit, AcquireError>> + Send {
self.tracing_task_guard().clone().acquire_many_owned(n)
}
/// Acquires a permit from the blocking IO request semaphore.
///
/// This should be used for operations like `eth_call`, `eth_estimateGas`, and similar methods
/// that require EVM execution and are spawned as blocking tasks.
///
/// See also [`Semaphore::acquire_owned`](`tokio::sync::Semaphore::acquire_owned`).
fn acquire_owned_blocking_io(
&self,
) -> impl Future<Output = Result<OwnedSemaphorePermit, AcquireError>> + Send {
self.blocking_io_task_guard().clone().acquire_owned()
}
/// Acquires multiple permits from the blocking IO request semaphore.
///
/// This should be used for operations that may require more resources than a single permit
/// allows.
///
/// See also [`Semaphore::acquire_many_owned`](`tokio::sync::Semaphore::acquire_many_owned`).
fn acquire_many_owned_blocking_io(
&self,
n: u32,
) -> impl Future<Output = Result<OwnedSemaphorePermit, AcquireError>> + Send {
self.blocking_io_task_guard().clone().acquire_many_owned(n)
}
/// Acquires permits from the blocking IO request semaphore based on a calculated weight.
///
/// The weight determines the maximum number of concurrent requests of this type that can run.
/// For example, if the semaphore has 256 total permits and `weight=10`, then at most 10
/// concurrent requests of this type are allowed.
///
/// The permits acquired per request is calculated as `total_permits / weight`, with an
/// adjustment: if this result is even, we add 1 to ensure that `weight - 1` permits are
/// always available for other tasks, preventing complete semaphore exhaustion.
///
/// This should be used to explicitly limit concurrent requests based on their expected
/// resource consumption:
///
/// - **Block range queries**: Higher weight for larger ranges (fewer concurrent requests)
/// - **Complex calls**: Higher weight for expensive operations
/// - **Batch operations**: Higher weight for larger batches
/// - **Historical queries**: Higher weight for deeper history lookups
///
/// # Examples
///
/// ```ignore
/// // For a heavy request, use higher weight to limit concurrency
/// let weight = 20; // Allow at most 20 concurrent requests of this type
/// let _permit = self.acquire_weighted_blocking_io(weight).await?;
/// ```
///
/// This helps prevent resource exhaustion from concurrent expensive operations while allowing
/// many cheap operations to run in parallel.
///
/// See also [`Semaphore::acquire_many_owned`](`tokio::sync::Semaphore::acquire_many_owned`).
fn acquire_weighted_blocking_io(
&self,
weight: u32,
) -> impl Future<Output = Result<OwnedSemaphorePermit, AcquireError>> + Send {
let guard = self.blocking_io_task_guard();
let total_permits = guard.available_permits().max(1) as u32;
let weight = weight.max(1);
let mut permits_to_acquire = (total_permits / weight).max(1);
// If total_permits divides evenly by weight, add 1 to ensure that when `weight`
// concurrent requests are running, at least `weight - 1` permits remain available
// for other tasks
if total_permits.is_multiple_of(weight) {
permits_to_acquire += 1;
}
guard.clone().acquire_many_owned(permits_to_acquire)
}
/// Executes the future on a new blocking task.
///
/// Note: This is expected for futures that are dominated by blocking IO operations, for tracing

View File

@@ -212,6 +212,7 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA
overrides: EvmOverrides,
) -> impl Future<Output = Result<Bytes, Self::Error>> + Send {
async move {
let _permit = self.acquire_owned_blocking_io().await;
let res =
self.transact_call_at(request, block_number.unwrap_or_default(), overrides).await?;

View File

@@ -110,7 +110,8 @@ pub trait EthFees:
// increasing and 0 <= p <= 100
// Note: The types used ensure that the percentiles are never < 0
if let Some(percentiles) = &reward_percentiles &&
percentiles.windows(2).any(|w| w[0] > w[1] || w[0] > 100.)
(percentiles.iter().any(|p| *p < 0.0 || *p > 100.0) ||
percentiles.windows(2).any(|w| w[0] > w[1]))
{
return Err(EthApiError::InvalidRewardPercentiles.into())
}

View File

@@ -420,6 +420,7 @@ impl<H: BlockHeader> BuildPendingEnv<H> for NextBlockEnvAttributes {
gas_limit: parent.gas_limit(),
parent_beacon_block_root: parent.parent_beacon_block_root(),
withdrawals: parent.withdrawals_root().map(|_| Default::default()),
extra_data: parent.extra_data().clone(),
}
}
}

View File

@@ -16,7 +16,8 @@ use reth_rpc_eth_types::{
error::FromEvmError, EthApiError, PendingBlockEnv, RpcInvalidTransactionError,
};
use reth_storage_api::{
BlockIdReader, BlockNumReader, StateProvider, StateProviderBox, StateProviderFactory,
BlockIdReader, BlockNumReader, BlockReaderIdExt, StateProvider, StateProviderBox,
StateProviderFactory,
};
use reth_transaction_pool::TransactionPool;
@@ -96,7 +97,7 @@ pub trait EthState: LoadState + SpawnBlocking {
{
Ok(async move {
let _permit = self
.acquire_owned()
.acquire_owned_tracing()
.await
.map_err(RethError::other)
.map_err(EthApiError::Internal)?;
@@ -273,21 +274,20 @@ pub trait LoadState:
let PendingBlockEnv { evm_env, origin } = self.pending_block_env_and_cfg()?;
Ok((evm_env, origin.state_block_id()))
} else {
// Use cached values if there is no pending block
let block_hash = RpcNodeCore::provider(self)
.block_hash_for_id(at)
// we can assume that the blockid will be predominantly `Latest` (e.g. for
// `eth_call`) and if requested by number or hash we can quickly fetch just the
// header
let header = RpcNodeCore::provider(self)
.sealed_header_by_id(at)
.map_err(Self::Error::from_eth_err)?
.ok_or(EthApiError::HeaderNotFound(at))?;
let header =
self.cache().get_header(block_hash).await.map_err(Self::Error::from_eth_err)?;
.ok_or_else(|| EthApiError::HeaderNotFound(at))?;
let evm_env = self
.evm_config()
.evm_env(&header)
.map_err(RethError::other)
.map_err(Self::Error::from_eth_err)?;
Ok((evm_env, block_hash.into()))
Ok((evm_env, header.hash().into()))
}
}
}

View File

@@ -8,9 +8,10 @@ use crate::{
};
use reqwest::Url;
use reth_rpc_server_types::constants::{
default_max_tracing_requests, DEFAULT_ETH_PROOF_WINDOW, DEFAULT_MAX_BLOCKS_PER_FILTER,
DEFAULT_MAX_LOGS_PER_RESPONSE, DEFAULT_MAX_SIMULATE_BLOCKS, DEFAULT_MAX_TRACE_FILTER_BLOCKS,
DEFAULT_PROOF_PERMITS, RPC_DEFAULT_SEND_RAW_TX_SYNC_TIMEOUT_SECS,
default_max_tracing_requests, DEFAULT_ETH_PROOF_WINDOW, DEFAULT_MAX_BLOCKING_IO_REQUEST,
DEFAULT_MAX_BLOCKS_PER_FILTER, DEFAULT_MAX_LOGS_PER_RESPONSE, DEFAULT_MAX_SIMULATE_BLOCKS,
DEFAULT_MAX_TRACE_FILTER_BLOCKS, DEFAULT_PROOF_PERMITS,
RPC_DEFAULT_SEND_RAW_TX_SYNC_TIMEOUT_SECS,
};
use serde::{Deserialize, Serialize};
@@ -68,6 +69,15 @@ pub struct EthConfig {
pub eth_proof_window: u64,
/// The maximum number of tracing calls that can be executed in concurrently.
pub max_tracing_requests: usize,
/// The maximum number of blocking IO calls that can be executed in concurrently.
///
/// Requests such as `eth_call`, `eth_estimateGas` and alike require evm execution, which is
/// considered blocking since it's usually more heavy on the IO side but also CPU constrained.
/// It is expected that these are spawned as short lived blocking tokio tasks. This config
/// determines how many can be spawned concurrently, to avoid a build up in the tokio's
/// blocking pool queue since there's only a limited number of threads available. This setting
/// restricts how many tasks are spawned concurrently.
pub max_blocking_io_requests: usize,
/// Maximum number of blocks for `trace_filter` requests.
pub max_trace_filter_blocks: u64,
/// Maximum number of blocks that could be scanned per filter request in `eth_getLogs` calls.
@@ -116,6 +126,7 @@ impl Default for EthConfig {
gas_oracle: GasPriceOracleConfig::default(),
eth_proof_window: DEFAULT_ETH_PROOF_WINDOW,
max_tracing_requests: default_max_tracing_requests(),
max_blocking_io_requests: DEFAULT_MAX_BLOCKING_IO_REQUEST,
max_trace_filter_blocks: DEFAULT_MAX_TRACE_FILTER_BLOCKS,
max_blocks_per_filter: DEFAULT_MAX_BLOCKS_PER_FILTER,
max_logs_per_response: DEFAULT_MAX_LOGS_PER_RESPONSE,
@@ -152,6 +163,12 @@ impl EthConfig {
self
}
/// Configures the maximum number of blocking IO requests
pub const fn max_blocking_io_requests(mut self, max_requests: usize) -> Self {
self.max_blocking_io_requests = max_requests;
self
}
/// Configures the maximum block length to scan per `eth_getLogs` request
pub const fn max_blocks_per_filter(mut self, max_blocks: u64) -> Self {
self.max_blocks_per_filter = max_blocks;

View File

@@ -11,6 +11,7 @@ use reth_errors::ProviderError;
use reth_primitives_traits::{BlockBody, RecoveredBlock, SignedTransaction};
use reth_storage_api::{BlockReader, ProviderBlock};
use std::sync::Arc;
use thiserror::Error;
/// Returns all matching of a block's receipts when the transaction hashes are known.
pub fn matching_block_logs_with_tx_hashes<'a, I, R>(
@@ -147,30 +148,40 @@ where
/// Computes the block range based on the filter range and current block numbers.
///
/// This returns `(min(best,from), min(best,to))`.
/// Returns an error for invalid ranges rather than silently clamping values.
pub fn get_filter_block_range(
from_block: Option<u64>,
to_block: Option<u64>,
start_block: u64,
info: ChainInfo,
) -> (u64, u64) {
let mut from_block_number = start_block;
let mut to_block_number = info.best_number;
) -> Result<(u64, u64), FilterBlockRangeError> {
let from_block_number = from_block.unwrap_or(start_block);
let to_block_number = to_block.unwrap_or(info.best_number);
// if a `from_block` argument is provided then the `from_block_number` is the converted value or
// the start block if the converted value is larger than the start block, since `from_block`
// can't be a future block: `min(head, from_block)`
if let Some(filter_from_block) = from_block {
from_block_number = start_block.min(filter_from_block)
// from > to is an invalid range
if from_block_number > to_block_number {
return Err(FilterBlockRangeError::InvalidBlockRange);
}
// upper end of the range is the converted `to_block` argument, restricted by the best block:
// `min(best_number,to_block_number)`
if let Some(filter_to_block) = to_block {
to_block_number = info.best_number.min(filter_to_block);
// we cannot query blocks that don't exist yet
if to_block_number > info.best_number {
return Err(FilterBlockRangeError::BlockRangeExceedsHead);
}
(from_block_number, to_block_number)
Ok((from_block_number, to_block_number))
}
/// Errors for filter block range validation.
///
/// See also <https://github.com/ethereum/go-ethereum/blob/master/eth/filters/filter.go#L224-L230>.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Error)]
pub enum FilterBlockRangeError {
/// `from_block > to_block`
#[error("invalid block range params")]
InvalidBlockRange,
/// Block range extends beyond current head
#[error("block range extends beyond current head block")]
BlockRangeExceedsHead,
}
#[cfg(test)]
@@ -184,44 +195,73 @@ mod tests {
let from = 14000000u64;
let to = 14000100u64;
let info = ChainInfo { best_number: 15000000, ..Default::default() };
let range = get_filter_block_range(Some(from), Some(to), info.best_number, info);
let range = get_filter_block_range(Some(from), Some(to), info.best_number, info).unwrap();
assert_eq!(range, (from, to));
}
#[test]
fn test_log_range_higher() {
let from = 15000001u64;
let to = 15000002u64;
let info = ChainInfo { best_number: 15000000, ..Default::default() };
let range = get_filter_block_range(Some(from), Some(to), info.best_number, info);
assert_eq!(range, (info.best_number, info.best_number));
}
#[test]
fn test_log_range_from() {
let from = 14000000u64;
let info = ChainInfo { best_number: 15000000, ..Default::default() };
let range = get_filter_block_range(Some(from), None, info.best_number, info);
let range = get_filter_block_range(Some(from), None, 0, info).unwrap();
assert_eq!(range, (from, info.best_number));
}
#[test]
fn test_log_range_to() {
let to = 14000000u64;
let start_block = 0u64;
let info = ChainInfo { best_number: 15000000, ..Default::default() };
let range = get_filter_block_range(None, Some(to), info.best_number, info);
assert_eq!(range, (info.best_number, to));
let range = get_filter_block_range(None, Some(to), start_block, info).unwrap();
assert_eq!(range, (start_block, to));
}
#[test]
fn test_log_range_higher_error() {
// Range extends beyond head -> should error instead of clamping
let from = 15000001u64;
let to = 15000002u64;
let info = ChainInfo { best_number: 15000000, ..Default::default() };
let err = get_filter_block_range(Some(from), Some(to), info.best_number, info).unwrap_err();
assert_eq!(err, FilterBlockRangeError::BlockRangeExceedsHead);
}
#[test]
fn test_log_range_to_below_start_error() {
// to_block < start_block, default from -> invalid range
let to = 14000000u64;
let info = ChainInfo { best_number: 15000000, ..Default::default() };
let err = get_filter_block_range(None, Some(to), info.best_number, info).unwrap_err();
assert_eq!(err, FilterBlockRangeError::InvalidBlockRange);
}
#[test]
fn test_log_range_empty() {
let info = ChainInfo { best_number: 15000000, ..Default::default() };
let range = get_filter_block_range(None, None, info.best_number, info);
let range = get_filter_block_range(None, None, info.best_number, info).unwrap();
// no range given -> head
assert_eq!(range, (info.best_number, info.best_number));
}
#[test]
fn test_invalid_block_range_error() {
let from = 100;
let to = 50;
let info = ChainInfo { best_number: 150, ..Default::default() };
let err = get_filter_block_range(Some(from), Some(to), 0, info).unwrap_err();
assert_eq!(err, FilterBlockRangeError::InvalidBlockRange);
}
#[test]
fn test_block_range_exceeds_head_error() {
let from = 100;
let to = 200;
let info = ChainInfo { best_number: 150, ..Default::default() };
let err = get_filter_block_range(Some(from), Some(to), 0, info).unwrap_err();
assert_eq!(err, FilterBlockRangeError::BlockRangeExceedsHead);
}
#[test]
fn parse_log_from_only() {
let s = r#"{"fromBlock":"0xf47a42","address":["0x7de93682b9b5d80d45cd371f7a14f74d49b0914c","0x0f00392fcb466c0e4e4310d81b941e07b4d5a079","0xebf67ab8cff336d3f609127e8bbf8bd6dd93cd81"],"topics":["0x0559884fd3a460db3073b7fc896cc77986f16e378210ded43186175bf646fc5f"]}"#;
@@ -242,7 +282,8 @@ mod tests {
to_block.and_then(alloy_rpc_types_eth::BlockNumberOrTag::as_number),
start_block,
info,
);
)
.unwrap();
assert_eq!(from_block_number, 16022082);
assert_eq!(to_block_number, best_number);
}

View File

@@ -18,6 +18,20 @@ pub const DEFAULT_MAX_LOGS_PER_RESPONSE: usize = 20_000;
/// The default maximum number of blocks for `trace_filter` requests.
pub const DEFAULT_MAX_TRACE_FILTER_BLOCKS: u64 = 100;
/// Setting for how many concurrent (heavier) _blocking_ IO requests are allowed.
///
/// What is considered a blocking IO request can depend on the RPC method. In general anything that
/// requires IO is considered blocking and should be spawned as blocking. This setting is however,
/// primarily intended for heavier blocking requests that require evm execution for example,
/// `eth_call` and alike. This is intended to be used with a semaphore that must be acquired before
/// a new task is spawned to avoid unnecessary pooling if the number of inflight requests exceeds
/// the available threads in the pool.
///
/// tokio's blocking pool, has a default of 512 and could grow unbounded, since requests like
/// `eth_call` also require a lot of cpu which will occupy the thread, we can set this to a lower
/// value.
pub const DEFAULT_MAX_BLOCKING_IO_REQUEST: usize = 256;
/// The default maximum number tracing requests we're allowing concurrently.
/// Tracing is mostly CPU bound so we're limiting the number of concurrent requests to something
/// lower that the number of cores, in order to minimize the impact on the rest of the system.

View File

@@ -323,6 +323,8 @@ pub enum RethRpcModule {
Miner,
/// `mev_` module
Mev,
/// `testing_` module
Testing,
/// Custom RPC module not part of the standard set
#[strum(default)]
#[serde(untagged)]
@@ -347,6 +349,7 @@ impl RethRpcModule {
Self::Flashbots,
Self::Miner,
Self::Mev,
Self::Testing,
];
/// Returns the number of standard variants (excludes Other)
@@ -406,6 +409,7 @@ impl AsRef<str> for RethRpcModule {
Self::Flashbots => "flashbots",
Self::Miner => "miner",
Self::Mev => "mev",
Self::Testing => "testing",
}
}
}
@@ -428,6 +432,7 @@ impl FromStr for RethRpcModule {
"flashbots" => Self::Flashbots,
"miner" => Self::Miner,
"mev" => Self::Mev,
"testing" => Self::Testing,
// Any unknown module becomes Other
other => Self::Other(other.to_string()),
})

View File

@@ -38,6 +38,8 @@ reth-rpc-server-types.workspace = true
reth-network-types.workspace = true
reth-consensus.workspace = true
reth-consensus-common.workspace = true
reth-ethereum-primitives.workspace = true
reth-ethereum-engine-primitives.workspace = true
reth-node-api.workspace = true
reth-trie-common.workspace = true

View File

@@ -7,25 +7,29 @@ use alloy_evm::env::BlockEnvironment;
use alloy_genesis::ChainConfig;
use alloy_primitives::{hex::decode, uint, Address, Bytes, B256};
use alloy_rlp::{Decodable, Encodable};
use alloy_rpc_types::BlockTransactionsKind;
use alloy_rpc_types_debug::ExecutionWitness;
use alloy_rpc_types_eth::{
state::EvmOverrides, Block as RpcBlock, BlockError, Bundle, StateContext,
};
use alloy_rpc_types_eth::{state::EvmOverrides, BlockError, Bundle, StateContext};
use alloy_rpc_types_trace::geth::{
BlockTraceResult, GethDebugTracingCallOptions, GethDebugTracingOptions, GethTrace, TraceResult,
};
use async_trait::async_trait;
use futures::Stream;
use jsonrpsee::core::RpcResult;
use parking_lot::RwLock;
use reth_chainspec::{ChainSpecProvider, EthChainSpec, EthereumHardforks};
use reth_engine_primitives::ConsensusEngineEvent;
use reth_errors::RethError;
use reth_evm::{execute::Executor, ConfigureEvm, EvmEnvFor};
use reth_primitives_traits::{Block as _, BlockBody, ReceiptWithBloom, RecoveredBlock};
use reth_primitives_traits::{
Block as BlockTrait, BlockBody, BlockTy, ReceiptWithBloom, RecoveredBlock,
};
use reth_revm::{db::State, witness::ExecutionWitnessRecord};
use reth_rpc_api::DebugApiServer;
use reth_rpc_convert::RpcTxReq;
use reth_rpc_eth_api::{
helpers::{EthTransactions, TraceExt},
EthApiTypes, FromEthApiError, RpcNodeCore,
FromEthApiError, RpcConvert, RpcNodeCore,
};
use reth_rpc_eth_types::EthApiError;
use reth_rpc_server_types::{result::internal_rpc_err, ToRpcResult};
@@ -33,26 +37,52 @@ use reth_storage_api::{
BlockIdReader, BlockReaderIdExt, HeaderProvider, ProviderBlock, ReceiptProviderIdExt,
StateProofProvider, StateProviderFactory, StateRootProvider, TransactionVariant,
};
use reth_tasks::pool::BlockingTaskGuard;
use reth_tasks::{pool::BlockingTaskGuard, TaskSpawner};
use reth_trie_common::{updates::TrieUpdates, HashedPostState};
use revm::DatabaseCommit;
use revm_inspectors::tracing::{DebugInspector, TransactionContext};
use std::sync::Arc;
use serde::{Deserialize, Serialize};
use std::{collections::VecDeque, sync::Arc};
use tokio::sync::{AcquireError, OwnedSemaphorePermit};
use tokio_stream::StreamExt;
/// `debug` API implementation.
///
/// This type provides the functionality for handling `debug` related requests.
pub struct DebugApi<Eth> {
pub struct DebugApi<Eth: RpcNodeCore> {
inner: Arc<DebugApiInner<Eth>>,
}
// === impl DebugApi ===
impl<Eth> DebugApi<Eth> {
impl<Eth> DebugApi<Eth>
where
Eth: RpcNodeCore,
{
/// Create a new instance of the [`DebugApi`]
pub fn new(eth_api: Eth, blocking_task_guard: BlockingTaskGuard) -> Self {
let inner = Arc::new(DebugApiInner { eth_api, blocking_task_guard });
pub fn new(
eth_api: Eth,
blocking_task_guard: BlockingTaskGuard,
executor: impl TaskSpawner,
mut stream: impl Stream<Item = ConsensusEngineEvent<Eth::Primitives>> + Send + Unpin + 'static,
) -> Self {
let bad_block_store = BadBlockStore::default();
let inner = Arc::new(DebugApiInner {
eth_api,
blocking_task_guard,
bad_block_store: bad_block_store.clone(),
});
// Spawn a task caching bad blocks
executor.spawn(Box::pin(async move {
while let Some(event) = stream.next().await {
if let ConsensusEngineEvent::InvalidBlock(block) = event &&
let Ok(recovered) =
RecoveredBlock::try_recover_sealed(block.as_ref().clone())
{
bad_block_store.insert(recovered);
}
}
}));
Self { inner }
}
@@ -60,9 +90,7 @@ impl<Eth> DebugApi<Eth> {
pub fn eth_api(&self) -> &Eth {
&self.inner.eth_api
}
}
impl<Eth: RpcNodeCore> DebugApi<Eth> {
/// Access the underlying provider.
pub fn provider(&self) -> &Eth::Provider {
self.inner.eth_api.provider()
@@ -73,7 +101,7 @@ impl<Eth: RpcNodeCore> DebugApi<Eth> {
impl<Eth> DebugApi<Eth>
where
Eth: EthApiTypes + TraceExt + 'static,
Eth: TraceExt,
{
/// Acquires a permit to execute a tracing call.
async fn acquire_trace_permit(&self) -> Result<OwnedSemaphorePermit, AcquireError> {
@@ -610,7 +638,7 @@ where
#[async_trait]
impl<Eth> DebugApiServer<RpcTxReq<Eth::NetworkTypes>> for DebugApi<Eth>
where
Eth: EthApiTypes + EthTransactions + TraceExt + 'static,
Eth: EthTransactions + TraceExt,
{
/// Handler for `debug_getRawHeader`
async fn raw_header(&self, block_id: BlockId) -> RpcResult<Bytes> {
@@ -660,7 +688,7 @@ where
/// Handler for `debug_getRawTransactions`
/// Returns the bytes of the transaction for the given hash.
async fn raw_transactions(&self, block_id: BlockId) -> RpcResult<Vec<Bytes>> {
let block = self
let block: RecoveredBlock<BlockTy<Eth::Primitives>> = self
.provider()
.block_with_senders_by_id(block_id, TransactionVariant::NoHash)
.to_rpc_result()?
@@ -681,8 +709,36 @@ where
}
/// Handler for `debug_getBadBlocks`
async fn bad_blocks(&self) -> RpcResult<Vec<RpcBlock>> {
Ok(vec![])
async fn bad_blocks(&self) -> RpcResult<Vec<serde_json::Value>> {
let blocks = self.inner.bad_block_store.all();
let mut bad_blocks = Vec::with_capacity(blocks.len());
#[derive(Serialize, Deserialize)]
struct BadBlockSerde<T> {
block: T,
hash: B256,
rlp: Bytes,
}
for block in blocks {
let rlp = alloy_rlp::encode(block.sealed_block()).into();
let hash = block.hash();
let block = block
.clone_into_rpc_block(
BlockTransactionsKind::Full,
|tx, tx_info| self.eth_api().converter().fill(tx, tx_info),
|header, size| self.eth_api().converter().convert_header(header, size),
)
.map_err(|err| Eth::Error::from(err).into())?;
let bad_block = serde_json::to_value(BadBlockSerde { block, hash, rlp })
.map_err(|err| EthApiError::other(internal_rpc_err(err.to_string())))?;
bad_blocks.push(bad_block);
}
Ok(bad_blocks)
}
/// Handler for `debug_traceChain`
@@ -1045,21 +1101,66 @@ where
}
}
impl<Eth> std::fmt::Debug for DebugApi<Eth> {
impl<Eth: RpcNodeCore> std::fmt::Debug for DebugApi<Eth> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("DebugApi").finish_non_exhaustive()
}
}
impl<Eth> Clone for DebugApi<Eth> {
impl<Eth: RpcNodeCore> Clone for DebugApi<Eth> {
fn clone(&self) -> Self {
Self { inner: Arc::clone(&self.inner) }
}
}
struct DebugApiInner<Eth> {
struct DebugApiInner<Eth: RpcNodeCore> {
/// The implementation of `eth` API
eth_api: Eth,
// restrict the number of concurrent calls to blocking calls
blocking_task_guard: BlockingTaskGuard,
/// Cache for bad blocks.
bad_block_store: BadBlockStore<BlockTy<Eth::Primitives>>,
}
/// A bounded, deduplicating store of recently observed bad blocks.
#[derive(Clone, Debug)]
struct BadBlockStore<B: BlockTrait> {
inner: Arc<RwLock<VecDeque<Arc<RecoveredBlock<B>>>>>,
limit: usize,
}
impl<B: BlockTrait> BadBlockStore<B> {
/// Creates a new store with the given capacity.
fn new(limit: usize) -> Self {
Self { inner: Arc::new(RwLock::new(VecDeque::with_capacity(limit))), limit }
}
/// Inserts a recovered block, keeping only the most recent `limit` entries and deduplicating
/// by block hash.
fn insert(&self, block: RecoveredBlock<B>) {
let hash = block.hash();
let mut guard = self.inner.write();
// skip if we already recorded this bad block , and keep original ordering
if guard.iter().any(|b| b.hash() == hash) {
return;
}
guard.push_back(Arc::new(block));
while guard.len() > self.limit {
guard.pop_front();
}
}
/// Returns all cached bad blocks ordered from newest to oldest.
fn all(&self) -> Vec<Arc<RecoveredBlock<B>>> {
let guard = self.inner.read();
guard.iter().rev().cloned().collect()
}
}
impl<B: BlockTrait> Default for BadBlockStore<B> {
fn default() -> Self {
Self::new(64)
}
}

View File

@@ -15,7 +15,8 @@ use reth_rpc_eth_types::{
FeeHistoryCacheConfig, ForwardConfig, GasCap, GasPriceOracle, GasPriceOracleConfig,
};
use reth_rpc_server_types::constants::{
DEFAULT_ETH_PROOF_WINDOW, DEFAULT_MAX_SIMULATE_BLOCKS, DEFAULT_PROOF_PERMITS,
DEFAULT_ETH_PROOF_WINDOW, DEFAULT_MAX_BLOCKING_IO_REQUEST, DEFAULT_MAX_SIMULATE_BLOCKS,
DEFAULT_PROOF_PERMITS,
};
use reth_tasks::{pool::BlockingTaskPool, TaskSpawner, TokioTaskExecutor};
use std::{sync::Arc, time::Duration};
@@ -41,6 +42,7 @@ pub struct EthApiBuilder<N: RpcNodeCore, Rpc, NextEnv = ()> {
task_spawner: Box<dyn TaskSpawner + 'static>,
next_env: NextEnv,
max_batch_size: usize,
max_blocking_io_requests: usize,
pending_block_kind: PendingBlockKind,
raw_tx_forwarder: ForwardConfig,
send_raw_transaction_sync_timeout: Duration,
@@ -92,6 +94,7 @@ impl<N: RpcNodeCore, Rpc, NextEnv> EthApiBuilder<N, Rpc, NextEnv> {
task_spawner,
next_env,
max_batch_size,
max_blocking_io_requests,
pending_block_kind,
raw_tx_forwarder,
send_raw_transaction_sync_timeout,
@@ -113,6 +116,7 @@ impl<N: RpcNodeCore, Rpc, NextEnv> EthApiBuilder<N, Rpc, NextEnv> {
task_spawner,
next_env,
max_batch_size,
max_blocking_io_requests,
pending_block_kind,
raw_tx_forwarder,
send_raw_transaction_sync_timeout,
@@ -145,6 +149,7 @@ where
eth_state_cache_config: Default::default(),
next_env: Default::default(),
max_batch_size: 1,
max_blocking_io_requests: DEFAULT_MAX_BLOCKING_IO_REQUEST,
pending_block_kind: PendingBlockKind::Full,
raw_tx_forwarder: ForwardConfig::default(),
send_raw_transaction_sync_timeout: Duration::from_secs(30),
@@ -184,6 +189,7 @@ where
gas_oracle_config,
next_env,
max_batch_size,
max_blocking_io_requests,
pending_block_kind,
raw_tx_forwarder,
send_raw_transaction_sync_timeout,
@@ -205,6 +211,7 @@ where
gas_oracle_config,
next_env,
max_batch_size,
max_blocking_io_requests,
pending_block_kind,
raw_tx_forwarder,
send_raw_transaction_sync_timeout,
@@ -233,6 +240,7 @@ where
gas_oracle_config,
next_env: _,
max_batch_size,
max_blocking_io_requests,
pending_block_kind,
raw_tx_forwarder,
send_raw_transaction_sync_timeout,
@@ -254,6 +262,7 @@ where
gas_oracle_config,
next_env,
max_batch_size,
max_blocking_io_requests,
pending_block_kind,
raw_tx_forwarder,
send_raw_transaction_sync_timeout,
@@ -335,6 +344,12 @@ where
self
}
/// Sets the maximum number of concurrent blocking IO requests.
pub const fn max_blocking_io_requests(mut self, max_blocking_io_requests: usize) -> Self {
self.max_blocking_io_requests = max_blocking_io_requests;
self
}
/// Sets the pending block kind
pub const fn pending_block_kind(mut self, pending_block_kind: PendingBlockKind) -> Self {
self.pending_block_kind = pending_block_kind;
@@ -482,6 +497,7 @@ where
task_spawner,
next_env,
max_batch_size,
max_blocking_io_requests,
pending_block_kind,
raw_tx_forwarder,
send_raw_transaction_sync_timeout,
@@ -523,6 +539,7 @@ where
rpc_converter,
next_env,
max_batch_size,
max_blocking_io_requests,
pending_block_kind,
raw_tx_forwarder.forwarder_client(),
send_raw_transaction_sync_timeout,

View File

@@ -33,7 +33,7 @@ use reth_transaction_pool::{
blobstore::BlobSidecarConverter, noop::NoopTransactionPool, AddedTransactionOutcome,
BatchTxProcessor, BatchTxRequest, TransactionPool,
};
use tokio::sync::{broadcast, mpsc, Mutex};
use tokio::sync::{broadcast, mpsc, Mutex, Semaphore};
const DEFAULT_BROADCAST_CAPACITY: usize = 2000;
@@ -152,6 +152,7 @@ where
proof_permits: usize,
rpc_converter: Rpc,
max_batch_size: usize,
max_blocking_io_requests: usize,
pending_block_kind: PendingBlockKind,
raw_tx_forwarder: ForwardConfig,
send_raw_transaction_sync_timeout: Duration,
@@ -171,6 +172,7 @@ where
rpc_converter,
(),
max_batch_size,
max_blocking_io_requests,
pending_block_kind,
raw_tx_forwarder.forwarder_client(),
send_raw_transaction_sync_timeout,
@@ -263,6 +265,11 @@ where
fn tracing_task_guard(&self) -> &BlockingTaskGuard {
self.inner.blocking_task_guard()
}
#[inline]
fn blocking_io_task_guard(&self) -> &std::sync::Arc<tokio::sync::Semaphore> {
self.inner.blocking_io_request_semaphore()
}
}
/// Container type `EthApi`
@@ -296,6 +303,9 @@ pub struct EthApiInner<N: RpcNodeCore, Rpc: RpcConvert> {
/// Guard for getproof calls
blocking_task_guard: BlockingTaskGuard,
/// Semaphore to limit concurrent blocking IO requests (`eth_call`, `eth_estimateGas`, etc.)
blocking_io_request_semaphore: Arc<Semaphore>,
/// Transaction broadcast channel
raw_tx_sender: broadcast::Sender<Bytes>,
@@ -346,6 +356,7 @@ where
converter: Rpc,
next_env: impl PendingEnvBuilder<N::Evm>,
max_batch_size: usize,
max_blocking_io_requests: usize,
pending_block_kind: PendingBlockKind,
raw_tx_forwarder: Option<RpcClient>,
send_raw_transaction_sync_timeout: Duration,
@@ -384,6 +395,7 @@ where
blocking_task_pool,
fee_history_cache,
blocking_task_guard: BlockingTaskGuard::new(proof_permits),
blocking_io_request_semaphore: Arc::new(Semaphore::new(max_blocking_io_requests)),
raw_tx_sender,
raw_tx_forwarder,
converter,
@@ -440,6 +452,8 @@ where
}
/// Returns a handle to the blocking thread pool.
///
/// This is intended for tasks that are CPU bound.
#[inline]
pub const fn blocking_task_pool(&self) -> &BlockingTaskPool {
&self.blocking_task_pool
@@ -525,7 +539,7 @@ where
/// Returns the transaction batch sender
#[inline]
const fn tx_batch_sender(
pub const fn tx_batch_sender(
&self,
) -> &mpsc::UnboundedSender<BatchTxRequest<<N::Pool as TransactionPool>::Transaction>> {
&self.tx_batch_sender
@@ -576,6 +590,12 @@ where
pub const fn evm_memory_limit(&self) -> u64 {
self.evm_memory_limit
}
/// Returns a reference to the blocking IO request semaphore.
#[inline]
pub const fn blocking_io_request_semaphore(&self) -> &Arc<Semaphore> {
&self.blocking_io_request_semaphore
}
}
#[cfg(test)]

View File

@@ -267,7 +267,7 @@ where
.map(|num| self.provider().convert_block_number(num))
.transpose()?
.flatten();
logs_utils::get_filter_block_range(from, to, start_block, info)
logs_utils::get_filter_block_range(from, to, start_block, info)?
}
FilterBlockOption::AtBlockHash(_) => {
// blockHash is equivalent to fromBlock = toBlock = the block number with
@@ -561,7 +561,7 @@ where
}
let (from_block_number, to_block_number) =
logs_utils::get_filter_block_range(from, to, start_block, info);
logs_utils::get_filter_block_range(from, to, start_block, info)?;
self.get_logs_in_block_range(filter, from_block_number, to_block_number, limits)
.await
@@ -952,6 +952,15 @@ impl From<ProviderError> for EthFilterError {
}
}
impl From<logs_utils::FilterBlockRangeError> for EthFilterError {
fn from(err: logs_utils::FilterBlockRangeError) -> Self {
match err {
logs_utils::FilterBlockRangeError::InvalidBlockRange => Self::InvalidBlockRangeParams,
logs_utils::FilterBlockRangeError::BlockRangeExceedsHead => Self::BlockRangeExceedsHead,
}
}
}
/// Helper type for the common pattern of returning receipts, block and the original header that is
/// a match for the filter.
struct ReceiptBlockResult<P>

View File

@@ -42,6 +42,7 @@ mod net;
mod otterscan;
mod reth;
mod rpc;
mod testing;
mod trace;
mod txpool;
mod validation;
@@ -58,6 +59,7 @@ pub use otterscan::OtterscanApi;
pub use reth::RethApi;
pub use reth_rpc_convert::RpcTypes;
pub use rpc::RPCApi;
pub use testing::TestingApi;
pub use trace::TraceApi;
pub use txpool::TxPoolApi;
pub use validation::{ValidationApi, ValidationApiConfig};

Some files were not shown because too many files have changed in this diff Show More