mirror of
https://github.com/paradigmxyz/reth.git
synced 2026-01-08 23:08:19 -05:00
Merge remote-tracking branch 'origin/main' into alexey/fix-local-docker-builds
This commit is contained in:
@@ -15,3 +15,12 @@ slow-timeout = { period = "2m", terminate-after = 10 }
|
||||
[[profile.default.overrides]]
|
||||
filter = "binary(e2e_testsuite)"
|
||||
slow-timeout = { period = "2m", terminate-after = 3 }
|
||||
|
||||
[[profile.default.overrides]]
|
||||
filter = "package(reth-era) and binary(it)"
|
||||
slow-timeout = { period = "2m", terminate-after = 10 }
|
||||
|
||||
# Allow slower ethereum node e2e tests (p2p + blobs) to run up to 5 minutes.
|
||||
[[profile.default.overrides]]
|
||||
filter = "package(reth-node-ethereum) and binary(e2e)"
|
||||
slow-timeout = { period = "1m", terminate-after = 5 }
|
||||
|
||||
@@ -12,7 +12,7 @@ workflows:
|
||||
# Check that `A` activates the features of `B`.
|
||||
"propagate-feature",
|
||||
# These are the features to check:
|
||||
"--features=std,op,dev,asm-keccak,jemalloc,jemalloc-prof,tracy-allocator,serde-bincode-compat,serde,test-utils,arbitrary,bench,alloy-compat",
|
||||
"--features=std,op,dev,asm-keccak,jemalloc,jemalloc-prof,tracy-allocator,serde-bincode-compat,serde,test-utils,arbitrary,bench,alloy-compat,min-error-logs,min-warn-logs,min-info-logs,min-debug-logs,min-trace-logs,otlp,js-tracer,portable,keccak-cache-global",
|
||||
# Do not try to add a new section to `[features]` of `A` only because `B` exposes that feature. There are edge-cases where this is still needed, but we can add them manually.
|
||||
"--left-side-feature-missing=ignore",
|
||||
# Ignore the case that `A` it outside of the workspace. Otherwise it will report errors in external dependencies that we have no influence on.
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
# include source files
|
||||
!/bin
|
||||
!/crates
|
||||
!/pkg
|
||||
!/testing
|
||||
!book.toml
|
||||
!Cargo.lock
|
||||
@@ -11,6 +12,7 @@
|
||||
!Cross.toml
|
||||
!deny.toml
|
||||
!Makefile
|
||||
!README.md
|
||||
|
||||
# include for vergen constants
|
||||
!/.git
|
||||
|
||||
1
.github/CODEOWNERS
vendored
1
.github/CODEOWNERS
vendored
@@ -40,5 +40,6 @@ crates/tasks/ @mattsse
|
||||
crates/tokio-util/ @fgimenez
|
||||
crates/transaction-pool/ @mattsse @yongkangc
|
||||
crates/trie/ @Rjected @shekhirin @mediocregopher
|
||||
bin/reth-bench-compare/ @mediocregopher @shekhirin @yongkangc
|
||||
etc/ @Rjected @shekhirin
|
||||
.github/ @gakonst @DaniPopes
|
||||
|
||||
7
.github/actionlint.yaml
vendored
Normal file
7
.github/actionlint.yaml
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
self-hosted-runner:
|
||||
labels:
|
||||
- depot-ubuntu-latest
|
||||
- depot-ubuntu-latest-2
|
||||
- depot-ubuntu-latest-4
|
||||
- depot-ubuntu-latest-8
|
||||
- depot-ubuntu-latest-16
|
||||
2
.github/assets/check_wasm.sh
vendored
2
.github/assets/check_wasm.sh
vendored
@@ -11,6 +11,7 @@ exclude_crates=(
|
||||
# The following require investigation if they can be fixed
|
||||
reth-basic-payload-builder
|
||||
reth-bench
|
||||
reth-bench-compare
|
||||
reth-cli
|
||||
reth-cli-commands
|
||||
reth-cli-runner
|
||||
@@ -68,6 +69,7 @@ exclude_crates=(
|
||||
reth-payload-builder # reth-metrics
|
||||
reth-provider # tokio
|
||||
reth-prune # tokio
|
||||
reth-prune-static-files # reth-provider
|
||||
reth-stages-api # reth-provider, reth-prune
|
||||
reth-static-file # tokio
|
||||
reth-transaction-pool # c-kzg
|
||||
|
||||
48
.github/assets/hive/expected_failures.yaml
vendored
48
.github/assets/hive/expected_failures.yaml
vendored
@@ -30,7 +30,7 @@ engine-withdrawals:
|
||||
- Corrupted Block Hash Payload (INVALID) (Paris) (reth)
|
||||
- Withdrawals Fork on Canonical Block 8 / Side Block 7 - 10 Block Re-Org (Paris) (reth)
|
||||
|
||||
engine-api: []
|
||||
engine-api: [ ]
|
||||
|
||||
# no fix due to https://github.com/paradigmxyz/reth/issues/8732
|
||||
engine-cancun:
|
||||
@@ -39,33 +39,34 @@ engine-cancun:
|
||||
# in hive or its dependencies
|
||||
- Blob Transaction Ordering, Multiple Clients (Cancun) (reth)
|
||||
|
||||
sync: []
|
||||
sync: [ ]
|
||||
|
||||
engine-auth: []
|
||||
engine-auth: [ ]
|
||||
|
||||
# tests/prague/eip7702_set_code_tx/test_set_code_txs.py::test_set_code_to_non_empty_storage
|
||||
# no fix: it's too expensive to check whether the storage is empty on each creation (? - need more context on WHY)
|
||||
# EIP-7610 related tests (Revert creation in case of non-empty storage):
|
||||
#
|
||||
# tests/prague/eip7251_consolidations/test_contract_deployment.py::test_system_contract_deployment
|
||||
# modified consolidation contract, not necessarily practical on mainnet (? - need more context)
|
||||
# tests/prague/eip7702_set_code_tx/test_set_code_txs.py::test_set_code_to_non_empty_storage
|
||||
# The test artificially creates an empty account with storage, then tests EIP-7610's behavior.
|
||||
# On mainnet, ~25 such accounts exist as contract addresses (derived from keccak(prefix, caller,
|
||||
# nonce/salt), not from public keys). No private key exists for contract addresses. To trigger
|
||||
# this with EIP-7702, you'd need to recover a private key from one of the already deployed contract addresses - mathematically impossible.
|
||||
#
|
||||
# tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_*
|
||||
# Requires hash collision on create2 address to target already deployed accounts with storage.
|
||||
# ~20-30 such accounts exist from before the state-clear EIP. Creating new accounts targeting
|
||||
# these requires hash collision - mathematically impossible to trigger on mainnet.
|
||||
# ref: https://github.com/ethereum/go-ethereum/pull/28666#issuecomment-1891997143
|
||||
#
|
||||
# System contract tests (already fixed and deployed):
|
||||
#
|
||||
# tests/prague/eip6110_deposits/test_modified_contract.py::test_invalid_layout and test_invalid_log_length
|
||||
# system contract is already fixed and deployed; tests cover scenarios where contract is
|
||||
# System contract is already fixed and deployed; tests cover scenarios where contract is
|
||||
# malformed which can't happen retroactively. No point in adding checks.
|
||||
#
|
||||
# tests/prague/eip7002_el_triggerable_withdrawals/test_contract_deployment.py::test_system_contract_deployment
|
||||
# post-fork test contract deployment, should fix for spec compliance but not realistic on mainnet (? - need more context)
|
||||
#
|
||||
# tests/osaka/eip7594_peerdas/test_max_blob_per_tx.py::test_max_blobs_per_tx_fork_transition
|
||||
# reth enforces 6 blob limit from EIP-7594, but EIP-7892 raises it to 9.
|
||||
# Needs constant update in alloy. https://github.com/paradigmxyz/reth/issues/18975
|
||||
#
|
||||
# tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_*
|
||||
# status (27th June 2024): was discussed in ACDT meeting, need to be raised in ACDE.
|
||||
# tests require hash collision on already deployed accounts with storage - mathematically
|
||||
# impossible to trigger on mainnet. ~20-30 such accounts exist from before the state-clear
|
||||
# EIP, but creating new accounts targeting these requires hash collision.
|
||||
# ref: https://github.com/ethereum/go-ethereum/pull/28666#issuecomment-1891997143
|
||||
# tests/prague/eip7251_consolidations/test_contract_deployment.py::test_system_contract_deployment
|
||||
# Post-fork system contract deployment tests. Should fix for spec compliance but not realistic
|
||||
# on mainnet as these contracts are already deployed at the correct addresses.
|
||||
eels/consume-engine:
|
||||
- tests/prague/eip7702_set_code_tx/test_set_code_txs.py::test_set_code_to_non_empty_storage[fork_Prague-blockchain_test_engine-zero_nonce]-reth
|
||||
- tests/prague/eip7251_consolidations/test_contract_deployment.py::test_system_contract_deployment[fork_CancunToPragueAtTime15k-blockchain_test_engine-deploy_after_fork-nonzero_balance]-reth
|
||||
@@ -146,6 +147,13 @@ eels/consume-engine:
|
||||
- tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Prague-tx_type_1-blockchain_test_engine_from_state_test-non-empty-balance-revert-initcode]-reth
|
||||
- tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Prague-tx_type_2-blockchain_test_engine_from_state_test-non-empty-balance-revert-initcode]-reth
|
||||
- tests/paris/eip7610_create_collision/test_initcollision.py::test_init_collision_create_tx[fork_Shanghai-tx_type_0-blockchain_test_engine_from_state_test-non-empty-balance-correct-initcode]-reth
|
||||
|
||||
# Blob limit tests:
|
||||
#
|
||||
# tests/osaka/eip7594_peerdas/test_max_blob_per_tx.py::test_max_blobs_per_tx_fork_transition[fork_PragueToOsakaAtTime15k-blob_count_7-blockchain_test]
|
||||
# this test inserts a chain via chain.rlp where the last block is invalid, but expects import to stop there, this doesn't work properly with our pipeline import approach hence the import fails when the invalid block is detected.
|
||||
#. In other words, if this test fails, this means we're correctly rejecting the block.
|
||||
#. The same test exists in the consume-engine simulator where it is passing as expected
|
||||
eels/consume-rlp:
|
||||
- tests/prague/eip7702_set_code_tx/test_set_code_txs.py::test_set_code_to_non_empty_storage[fork_Prague-blockchain_test-zero_nonce]-reth
|
||||
- tests/prague/eip7251_consolidations/test_modified_consolidation_contract.py::test_system_contract_errors[fork_Prague-blockchain_test_engine-system_contract_reaches_gas_limit-system_contract_0x0000bbddc7ce488642fb579f8b00f3a590007251]-reth
|
||||
|
||||
2
.github/assets/hive/run_simulator.sh
vendored
2
.github/assets/hive/run_simulator.sh
vendored
@@ -7,7 +7,7 @@ sim="${1}"
|
||||
limit="${2}"
|
||||
|
||||
run_hive() {
|
||||
hive --sim "${sim}" --sim.limit "${limit}" --sim.parallelism 8 --client reth 2>&1 | tee /tmp/log || true
|
||||
hive --sim "${sim}" --sim.limit "${limit}" --sim.parallelism 16 --client reth 2>&1 | tee /tmp/log || true
|
||||
}
|
||||
|
||||
check_log() {
|
||||
|
||||
7
.github/workflows/bench.yml
vendored
7
.github/workflows/bench.yml
vendored
@@ -11,18 +11,19 @@ env:
|
||||
CARGO_TERM_COLOR: always
|
||||
BASELINE: base
|
||||
SEED: reth
|
||||
RUSTC_WRAPPER: "sccache"
|
||||
|
||||
name: bench
|
||||
jobs:
|
||||
codspeed:
|
||||
runs-on:
|
||||
group: Reth
|
||||
runs-on: depot-ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v6
|
||||
with:
|
||||
submodules: true
|
||||
- uses: rui314/setup-mold@v1
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
- uses: mozilla-actions/sccache-action@v0.0.9
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
cache-on-failure: true
|
||||
|
||||
9
.github/workflows/book.yml
vendored
9
.github/workflows/book.yml
vendored
@@ -10,13 +10,16 @@ on:
|
||||
types: [opened, reopened, synchronize, closed]
|
||||
merge_group:
|
||||
|
||||
env:
|
||||
RUSTC_WRAPPER: "sccache"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: depot-ubuntu-latest-8
|
||||
timeout-minutes: 90
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Install bun
|
||||
uses: oven-sh/setup-bun@v2
|
||||
@@ -33,6 +36,8 @@ jobs:
|
||||
- name: Install Rust nightly
|
||||
uses: dtolnay/rust-toolchain@nightly
|
||||
|
||||
- uses: mozilla-actions/sccache-action@v0.0.9
|
||||
|
||||
- name: Build docs
|
||||
run: cd docs/vocs && bash scripts/build-cargo-docs.sh
|
||||
|
||||
|
||||
9
.github/workflows/compact.yml
vendored
9
.github/workflows/compact.yml
vendored
@@ -13,12 +13,12 @@ on:
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
RUSTC_WRAPPER: "sccache"
|
||||
|
||||
name: compact-codec
|
||||
jobs:
|
||||
compact-codec:
|
||||
runs-on:
|
||||
group: Reth
|
||||
runs-on: depot-ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
bin:
|
||||
@@ -27,11 +27,12 @@ jobs:
|
||||
steps:
|
||||
- uses: rui314/setup-mold@v1
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
- uses: mozilla-actions/sccache-action@v0.0.9
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
cache-on-failure: true
|
||||
- name: Checkout base
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
ref: ${{ github.base_ref || 'main' }}
|
||||
# On `main` branch, generates test vectors and serializes them to disk using `Compact`.
|
||||
@@ -39,7 +40,7 @@ jobs:
|
||||
run: |
|
||||
${{ matrix.bin }} -- test-vectors compact --write
|
||||
- name: Checkout PR
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
clean: false
|
||||
# On incoming merge try to read and decode previously generated vectors with `Compact`
|
||||
|
||||
2
.github/workflows/docker-git.yml
vendored
2
.github/workflows/docker-git.yml
vendored
@@ -33,7 +33,7 @@ jobs:
|
||||
- name: 'Build and push the git-sha-tagged op-reth image'
|
||||
command: 'make IMAGE_NAME=$OP_IMAGE_NAME DOCKER_IMAGE_NAME=$OP_DOCKER_IMAGE_NAME GIT_SHA=$GIT_SHA PROFILE=maxperf op-docker-build-push-git-sha'
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v6
|
||||
- uses: rui314/setup-mold@v1
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
2
.github/workflows/docker-nightly.yml
vendored
2
.github/workflows/docker-nightly.yml
vendored
@@ -35,7 +35,7 @@ jobs:
|
||||
- name: 'Build and push the nightly profiling op-reth image'
|
||||
command: 'make IMAGE_NAME=$OP_IMAGE_NAME DOCKER_IMAGE_NAME=$OP_DOCKER_IMAGE_NAME PROFILE=profiling op-docker-build-push-nightly-profiling'
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v6
|
||||
- name: Remove bloatware
|
||||
uses: laverdet/remove-bloatware@v1.0.0
|
||||
with:
|
||||
|
||||
73
.github/workflows/docker-tag-latest.yml
vendored
Normal file
73
.github/workflows/docker-tag-latest.yml
vendored
Normal file
@@ -0,0 +1,73 @@
|
||||
# Tag a specific Docker release version as latest
|
||||
|
||||
name: docker-tag-latest
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
version:
|
||||
description: 'Release version to tag as latest (e.g., v1.8.4)'
|
||||
required: true
|
||||
type: string
|
||||
tag_reth:
|
||||
description: 'Tag reth image as latest'
|
||||
required: false
|
||||
type: boolean
|
||||
default: true
|
||||
tag_op_reth:
|
||||
description: 'Tag op-reth image as latest'
|
||||
required: false
|
||||
type: boolean
|
||||
default: false
|
||||
|
||||
env:
|
||||
DOCKER_USERNAME: ${{ github.actor }}
|
||||
|
||||
jobs:
|
||||
tag-reth-latest:
|
||||
name: Tag reth as latest
|
||||
runs-on: ubuntu-24.04
|
||||
if: ${{ inputs.tag_reth }}
|
||||
permissions:
|
||||
packages: write
|
||||
contents: read
|
||||
steps:
|
||||
- name: Log in to Docker
|
||||
run: |
|
||||
echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io --username ${DOCKER_USERNAME} --password-stdin
|
||||
|
||||
- name: Pull reth release image
|
||||
run: |
|
||||
docker pull ghcr.io/${{ github.repository_owner }}/reth:${{ inputs.version }}
|
||||
|
||||
- name: Tag reth as latest
|
||||
run: |
|
||||
docker tag ghcr.io/${{ github.repository_owner }}/reth:${{ inputs.version }} ghcr.io/${{ github.repository_owner }}/reth:latest
|
||||
|
||||
- name: Push reth latest tag
|
||||
run: |
|
||||
docker push ghcr.io/${{ github.repository_owner }}/reth:latest
|
||||
|
||||
tag-op-reth-latest:
|
||||
name: Tag op-reth as latest
|
||||
runs-on: ubuntu-24.04
|
||||
if: ${{ inputs.tag_op_reth }}
|
||||
permissions:
|
||||
packages: write
|
||||
contents: read
|
||||
steps:
|
||||
- name: Log in to Docker
|
||||
run: |
|
||||
echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io --username ${DOCKER_USERNAME} --password-stdin
|
||||
|
||||
- name: Pull op-reth release image
|
||||
run: |
|
||||
docker pull ghcr.io/${{ github.repository_owner }}/op-reth:${{ inputs.version }}
|
||||
|
||||
- name: Tag op-reth as latest
|
||||
run: |
|
||||
docker tag ghcr.io/${{ github.repository_owner }}/op-reth:${{ inputs.version }} ghcr.io/${{ github.repository_owner }}/op-reth:latest
|
||||
|
||||
- name: Push op-reth latest tag
|
||||
run: |
|
||||
docker push ghcr.io/${{ github.repository_owner }}/op-reth:latest
|
||||
4
.github/workflows/docker.yml
vendored
4
.github/workflows/docker.yml
vendored
@@ -32,7 +32,7 @@ jobs:
|
||||
- name: "Build and push op-reth image"
|
||||
command: "make IMAGE_NAME=$OP_IMAGE_NAME DOCKER_IMAGE_NAME=$OP_DOCKER_IMAGE_NAME PROFILE=maxperf op-docker-build-push"
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v6
|
||||
- uses: rui314/setup-mold@v1
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
@@ -68,7 +68,7 @@ jobs:
|
||||
- name: "Build and push op-reth image"
|
||||
command: "make IMAGE_NAME=$OP_IMAGE_NAME DOCKER_IMAGE_NAME=$OP_DOCKER_IMAGE_NAME PROFILE=maxperf op-docker-build-push-latest"
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v6
|
||||
- uses: rui314/setup-mold@v1
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
|
||||
8
.github/workflows/e2e.yml
vendored
8
.github/workflows/e2e.yml
vendored
@@ -11,6 +11,7 @@ on:
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
SEED: rustethereumethereumrust
|
||||
RUSTC_WRAPPER: "sccache"
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
@@ -19,14 +20,14 @@ concurrency:
|
||||
jobs:
|
||||
test:
|
||||
name: e2e-testsuite
|
||||
runs-on:
|
||||
group: Reth
|
||||
runs-on: depot-ubuntu-latest-4
|
||||
env:
|
||||
RUST_BACKTRACE: 1
|
||||
timeout-minutes: 90
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v6
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
- uses: mozilla-actions/sccache-action@v0.0.9
|
||||
- uses: taiki-e/install-action@nextest
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
@@ -43,4 +44,3 @@ jobs:
|
||||
--exclude 'op-reth' \
|
||||
--exclude 'reth' \
|
||||
-E 'binary(e2e_testsuite)'
|
||||
|
||||
|
||||
21
.github/workflows/grafana.yml
vendored
Normal file
21
.github/workflows/grafana.yml
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
name: grafana
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
merge_group:
|
||||
push:
|
||||
branches: [main]
|
||||
|
||||
jobs:
|
||||
check-dashboard:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
- name: Check for ${DS_PROMETHEUS} in overview.json
|
||||
run: |
|
||||
if grep -Fn '${DS_PROMETHEUS}' etc/grafana/dashboards/overview.json; then
|
||||
echo "Error: overview.json contains '\${DS_PROMETHEUS}' placeholder"
|
||||
echo "Please replace it with '\${datasource}'"
|
||||
exit 1
|
||||
fi
|
||||
echo "✓ overview.json does not contain '\${DS_PROMETHEUS}' placeholder"
|
||||
25
.github/workflows/hive.yml
vendored
25
.github/workflows/hive.yml
vendored
@@ -24,12 +24,11 @@ jobs:
|
||||
prepare-hive:
|
||||
if: github.repository == 'paradigmxyz/reth'
|
||||
timeout-minutes: 45
|
||||
runs-on:
|
||||
group: Reth
|
||||
runs-on: depot-ubuntu-latest-16
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v6
|
||||
- name: Checkout hive tests
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
repository: ethereum/hive
|
||||
path: hivetests
|
||||
@@ -45,7 +44,7 @@ jobs:
|
||||
|
||||
- name: Restore hive assets cache
|
||||
id: cache-hive
|
||||
uses: actions/cache@v4
|
||||
uses: actions/cache@v5
|
||||
with:
|
||||
path: ./hive_assets
|
||||
key: hive-assets-${{ steps.hive-commit.outputs.hash }}-${{ hashFiles('.github/assets/hive/build_simulators.sh') }}
|
||||
@@ -68,7 +67,7 @@ jobs:
|
||||
chmod +x hive
|
||||
|
||||
- name: Upload hive assets
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v6
|
||||
with:
|
||||
name: hive_assets
|
||||
path: ./hive_assets
|
||||
@@ -179,23 +178,22 @@ jobs:
|
||||
- prepare-reth
|
||||
- prepare-hive
|
||||
name: run ${{ matrix.scenario.sim }}${{ matrix.scenario.limit && format(' - {0}', matrix.scenario.limit) }}
|
||||
runs-on:
|
||||
group: Reth
|
||||
runs-on: depot-ubuntu-latest-16
|
||||
permissions:
|
||||
issues: write
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Download hive assets
|
||||
uses: actions/download-artifact@v5
|
||||
uses: actions/download-artifact@v7
|
||||
with:
|
||||
name: hive_assets
|
||||
path: /tmp
|
||||
|
||||
- name: Download reth image
|
||||
uses: actions/download-artifact@v5
|
||||
uses: actions/download-artifact@v7
|
||||
with:
|
||||
name: artifacts
|
||||
path: /tmp
|
||||
@@ -209,7 +207,7 @@ jobs:
|
||||
chmod +x /usr/local/bin/hive
|
||||
|
||||
- name: Checkout hive tests
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
repository: ethereum/hive
|
||||
ref: master
|
||||
@@ -247,8 +245,7 @@ jobs:
|
||||
notify-on-error:
|
||||
needs: test
|
||||
if: failure()
|
||||
runs-on:
|
||||
group: Reth
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Slack Webhook Action
|
||||
uses: rtCamp/action-slack-notify@v2
|
||||
|
||||
12
.github/workflows/integration.yml
vendored
12
.github/workflows/integration.yml
vendored
@@ -14,6 +14,7 @@ on:
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
SEED: rustethereumethereumrust
|
||||
RUSTC_WRAPPER: "sccache"
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
@@ -23,8 +24,7 @@ jobs:
|
||||
test:
|
||||
name: test / ${{ matrix.network }}
|
||||
if: github.event_name != 'schedule'
|
||||
runs-on:
|
||||
group: Reth
|
||||
runs-on: depot-ubuntu-latest-4
|
||||
env:
|
||||
RUST_BACKTRACE: 1
|
||||
strategy:
|
||||
@@ -32,12 +32,13 @@ jobs:
|
||||
network: ["ethereum", "optimism"]
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v6
|
||||
- uses: rui314/setup-mold@v1
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
- name: Install Geth
|
||||
run: .github/assets/install_geth.sh
|
||||
- uses: taiki-e/install-action@nextest
|
||||
- uses: mozilla-actions/sccache-action@v0.0.9
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
cache-on-failure: true
|
||||
@@ -71,12 +72,13 @@ jobs:
|
||||
if: github.event_name == 'schedule'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v6
|
||||
- uses: rui314/setup-mold@v1
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
- uses: taiki-e/install-action@nextest
|
||||
- uses: mozilla-actions/sccache-action@v0.0.9
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
cache-on-failure: true
|
||||
- name: run era1 files integration tests
|
||||
run: cargo nextest run --package reth-era --test it -- --ignored
|
||||
run: cargo nextest run --release --package reth-era --test it -- --ignored
|
||||
|
||||
13
.github/workflows/kurtosis-op.yml
vendored
13
.github/workflows/kurtosis-op.yml
vendored
@@ -9,7 +9,7 @@ on:
|
||||
|
||||
push:
|
||||
tags:
|
||||
- '*'
|
||||
- "*"
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
@@ -32,17 +32,16 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
name: run kurtosis
|
||||
runs-on:
|
||||
group: Reth
|
||||
runs-on: depot-ubuntu-latest
|
||||
needs:
|
||||
- prepare-reth
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Download reth image
|
||||
uses: actions/download-artifact@v5
|
||||
uses: actions/download-artifact@v7
|
||||
with:
|
||||
name: artifacts
|
||||
path: /tmp
|
||||
@@ -83,12 +82,10 @@ jobs:
|
||||
kurtosis service logs -a op-devnet op-cl-2151908-2-op-node-op-reth-op-kurtosis
|
||||
exit 1
|
||||
|
||||
|
||||
notify-on-error:
|
||||
needs: test
|
||||
if: failure()
|
||||
runs-on:
|
||||
group: Reth
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Slack Webhook Action
|
||||
uses: rtCamp/action-slack-notify@v2
|
||||
|
||||
14
.github/workflows/kurtosis.yml
vendored
14
.github/workflows/kurtosis.yml
vendored
@@ -9,7 +9,7 @@ on:
|
||||
|
||||
push:
|
||||
tags:
|
||||
- '*'
|
||||
- "*"
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
@@ -30,17 +30,16 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
name: run kurtosis
|
||||
runs-on:
|
||||
group: Reth
|
||||
runs-on: depot-ubuntu-latest
|
||||
needs:
|
||||
- prepare-reth
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Download reth image
|
||||
uses: actions/download-artifact@v5
|
||||
uses: actions/download-artifact@v7
|
||||
with:
|
||||
name: artifacts
|
||||
path: /tmp
|
||||
@@ -54,13 +53,12 @@ jobs:
|
||||
- name: Run kurtosis
|
||||
uses: ethpandaops/kurtosis-assertoor-github-action@v1
|
||||
with:
|
||||
ethereum_package_args: '.github/assets/kurtosis_network_params.yaml'
|
||||
ethereum_package_args: ".github/assets/kurtosis_network_params.yaml"
|
||||
|
||||
notify-on-error:
|
||||
needs: test
|
||||
if: failure()
|
||||
runs-on:
|
||||
group: Reth
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Slack Webhook Action
|
||||
uses: rtCamp/action-slack-notify@v2
|
||||
|
||||
2
.github/workflows/label-pr.yml
vendored
2
.github/workflows/label-pr.yml
vendored
@@ -11,7 +11,7 @@ jobs:
|
||||
issues: write
|
||||
pull-requests: write
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
|
||||
2
.github/workflows/lint-actions.yml
vendored
2
.github/workflows/lint-actions.yml
vendored
@@ -12,7 +12,7 @@ jobs:
|
||||
actionlint:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v6
|
||||
- name: Download actionlint
|
||||
id: get_actionlint
|
||||
run: bash <(curl https://raw.githubusercontent.com/rhysd/actionlint/main/scripts/download-actionlint.bash)
|
||||
|
||||
93
.github/workflows/lint.yml
vendored
93
.github/workflows/lint.yml
vendored
@@ -8,11 +8,12 @@ on:
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
RUSTC_WRAPPER: "sccache"
|
||||
|
||||
jobs:
|
||||
clippy-binaries:
|
||||
name: clippy binaries / ${{ matrix.type }}
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: depot-ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
strategy:
|
||||
matrix:
|
||||
@@ -21,11 +22,12 @@ jobs:
|
||||
args: --workspace --lib --examples --tests --benches --locked
|
||||
features: "ethereum asm-keccak jemalloc jemalloc-prof min-error-logs min-warn-logs min-info-logs min-debug-logs min-trace-logs"
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v6
|
||||
- uses: rui314/setup-mold@v1
|
||||
- uses: dtolnay/rust-toolchain@clippy
|
||||
with:
|
||||
components: clippy
|
||||
- uses: mozilla-actions/sccache-action@v0.0.9
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
cache-on-failure: true
|
||||
@@ -40,14 +42,15 @@ jobs:
|
||||
|
||||
clippy:
|
||||
name: clippy
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: depot-ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v6
|
||||
- uses: rui314/setup-mold@v1
|
||||
- uses: dtolnay/rust-toolchain@nightly
|
||||
with:
|
||||
components: clippy
|
||||
- uses: mozilla-actions/sccache-action@v0.0.9
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
cache-on-failure: true
|
||||
@@ -56,15 +59,16 @@ jobs:
|
||||
RUSTFLAGS: -D warnings
|
||||
|
||||
wasm:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: depot-ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v6
|
||||
- uses: rui314/setup-mold@v1
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
with:
|
||||
target: wasm32-wasip1
|
||||
- uses: taiki-e/install-action@cargo-hack
|
||||
- uses: mozilla-actions/sccache-action@v0.0.9
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
cache-on-failure: true
|
||||
@@ -75,15 +79,16 @@ jobs:
|
||||
.github/assets/check_wasm.sh
|
||||
|
||||
riscv:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: depot-ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v6
|
||||
- uses: rui314/setup-mold@v1
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
with:
|
||||
target: riscv32imac-unknown-none-elf
|
||||
- uses: taiki-e/install-action@cargo-hack
|
||||
- uses: mozilla-actions/sccache-action@v0.0.9
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
cache-on-failure: true
|
||||
@@ -92,21 +97,27 @@ jobs:
|
||||
run: .github/assets/check_rv32imac.sh
|
||||
|
||||
crate-checks:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
name: crate-checks (${{ matrix.partition }}/${{ matrix.total_partitions }})
|
||||
runs-on: depot-ubuntu-latest-4
|
||||
strategy:
|
||||
matrix:
|
||||
partition: [1, 2, 3]
|
||||
total_partitions: [3]
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v6
|
||||
- uses: rui314/setup-mold@v1
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
- uses: taiki-e/install-action@cargo-hack
|
||||
- uses: mozilla-actions/sccache-action@v0.0.9
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
cache-on-failure: true
|
||||
- run: cargo hack check --workspace
|
||||
- run: cargo hack check --workspace --partition ${{ matrix.partition }}/${{ matrix.total_partitions }}
|
||||
|
||||
msrv:
|
||||
name: MSRV
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: depot-ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
strategy:
|
||||
matrix:
|
||||
@@ -114,11 +125,12 @@ jobs:
|
||||
- binary: reth
|
||||
- binary: op-reth
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v6
|
||||
- uses: rui314/setup-mold@v1
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: "1.88" # MSRV
|
||||
- uses: mozilla-actions/sccache-action@v0.0.9
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
cache-on-failure: true
|
||||
@@ -128,12 +140,13 @@ jobs:
|
||||
|
||||
docs:
|
||||
name: docs
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: depot-ubuntu-latest-4
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v6
|
||||
- uses: rui314/setup-mold@v1
|
||||
- uses: dtolnay/rust-toolchain@nightly
|
||||
- uses: mozilla-actions/sccache-action@v0.0.9
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
cache-on-failure: true
|
||||
@@ -145,25 +158,27 @@ jobs:
|
||||
|
||||
fmt:
|
||||
name: fmt
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: depot-ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v6
|
||||
- uses: rui314/setup-mold@v1
|
||||
- uses: dtolnay/rust-toolchain@nightly
|
||||
with:
|
||||
components: rustfmt
|
||||
- uses: mozilla-actions/sccache-action@v0.0.9
|
||||
- name: Run fmt
|
||||
run: cargo fmt --all --check
|
||||
|
||||
udeps:
|
||||
name: udeps
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: depot-ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v6
|
||||
- uses: rui314/setup-mold@v1
|
||||
- uses: dtolnay/rust-toolchain@nightly
|
||||
- uses: mozilla-actions/sccache-action@v0.0.9
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
cache-on-failure: true
|
||||
@@ -172,19 +187,21 @@ jobs:
|
||||
|
||||
book:
|
||||
name: book
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: depot-ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v6
|
||||
- uses: rui314/setup-mold@v1
|
||||
- uses: dtolnay/rust-toolchain@nightly
|
||||
- uses: mozilla-actions/sccache-action@v0.0.9
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
cache-on-failure: true
|
||||
- run: cargo build --bin reth --workspace --features ethereum
|
||||
- run: cargo build --bin reth --workspace
|
||||
- run: cargo build --bin op-reth --workspace
|
||||
env:
|
||||
RUSTFLAGS: -D warnings
|
||||
- run: ./docs/cli/update.sh target/debug/reth
|
||||
- run: ./docs/cli/update.sh target/debug/reth target/debug/op-reth
|
||||
- name: Check docs changes
|
||||
run: git diff --exit-code
|
||||
|
||||
@@ -192,7 +209,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v6
|
||||
- uses: crate-ci/typos@v1
|
||||
|
||||
check-toml:
|
||||
@@ -200,7 +217,7 @@ jobs:
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
- name: Run dprint
|
||||
uses: dprint/check@v2.3
|
||||
with:
|
||||
@@ -210,7 +227,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v6
|
||||
- name: Check dashboard JSON with jq
|
||||
uses: sergeysova/jq-action@v2
|
||||
with:
|
||||
@@ -220,37 +237,45 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v6
|
||||
- uses: rui314/setup-mold@v1
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
- name: Ensure no arbitrary or proptest dependency on default build
|
||||
run: cargo tree --package reth -e=features,no-dev | grep -Eq "arbitrary|proptest" && exit 1 || exit 0
|
||||
|
||||
# Checks that selected rates can compile with power set of features
|
||||
# Checks that selected crates can compile with power set of features
|
||||
features:
|
||||
name: features
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: depot-ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v6
|
||||
- uses: rui314/setup-mold@v1
|
||||
- uses: dtolnay/rust-toolchain@clippy
|
||||
- uses: mozilla-actions/sccache-action@v0.0.9
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
cache-on-failure: true
|
||||
- name: cargo install cargo-hack
|
||||
uses: taiki-e/install-action@cargo-hack
|
||||
- run: make check-features
|
||||
- run: |
|
||||
cargo hack check \
|
||||
--package reth-codecs \
|
||||
--package reth-primitives-traits \
|
||||
--package reth-primitives \
|
||||
--feature-powerset \
|
||||
--depth 2
|
||||
env:
|
||||
RUSTFLAGS: -D warnings
|
||||
|
||||
# Check crates correctly propagate features
|
||||
feature-propagation:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: depot-ubuntu-latest
|
||||
timeout-minutes: 20
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v6
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
- uses: mozilla-actions/sccache-action@v0.0.9
|
||||
- uses: rui314/setup-mold@v1
|
||||
- uses: taiki-e/cache-cargo-install-action@v2
|
||||
with:
|
||||
|
||||
7
.github/workflows/prepare-reth.yml
vendored
7
.github/workflows/prepare-reth.yml
vendored
@@ -26,10 +26,9 @@ jobs:
|
||||
prepare-reth:
|
||||
if: github.repository == 'paradigmxyz/reth'
|
||||
timeout-minutes: 45
|
||||
runs-on:
|
||||
group: Reth
|
||||
runs-on: depot-ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v6
|
||||
- run: mkdir artifacts
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
@@ -51,7 +50,7 @@ jobs:
|
||||
|
||||
- name: Upload reth image
|
||||
id: upload
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v6
|
||||
with:
|
||||
name: artifacts
|
||||
path: ./artifacts
|
||||
|
||||
2
.github/workflows/release-dist.yml
vendored
2
.github/workflows/release-dist.yml
vendored
@@ -12,7 +12,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Update Homebrew formula
|
||||
uses: dawidd6/action-homebrew-bump-formula@v5
|
||||
uses: dawidd6/action-homebrew-bump-formula@v7
|
||||
with:
|
||||
token: ${{ secrets.HOMEBREW }}
|
||||
no_fork: true
|
||||
|
||||
92
.github/workflows/release-reproducible.yml
vendored
92
.github/workflows/release-reproducible.yml
vendored
@@ -1,11 +1,11 @@
|
||||
# This workflow is for building and pushing reproducible Docker images for releases.
|
||||
# This workflow is for building and pushing reproducible artifacts for releases
|
||||
|
||||
name: release-reproducible
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- v*
|
||||
workflow_run:
|
||||
workflows: [release]
|
||||
types: [completed]
|
||||
|
||||
env:
|
||||
DOCKER_REPRODUCIBLE_IMAGE_NAME: ghcr.io/${{ github.repository_owner }}/reth-reproducible
|
||||
@@ -13,23 +13,41 @@ env:
|
||||
jobs:
|
||||
extract-version:
|
||||
name: extract version
|
||||
if: ${{ github.event.workflow_run.conclusion == 'success' }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Extract version
|
||||
run: echo "VERSION=$(echo ${GITHUB_REF#refs/tags/})" >> $GITHUB_OUTPUT
|
||||
- name: Extract version from triggering tag
|
||||
id: extract_version
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
# Get the tag that points to the head SHA of the triggering workflow
|
||||
TAG=$(gh api /repos/${{ github.repository }}/git/refs/tags \
|
||||
--jq '.[] | select(.object.sha == "${{ github.event.workflow_run.head_sha }}") | .ref' \
|
||||
| head -1 \
|
||||
| sed 's|refs/tags/||')
|
||||
|
||||
if [ -z "$TAG" ]; then
|
||||
echo "No tag found for SHA ${{ github.event.workflow_run.head_sha }}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "VERSION=$TAG" >> $GITHUB_OUTPUT
|
||||
outputs:
|
||||
VERSION: ${{ steps.extract_version.outputs.VERSION }}
|
||||
|
||||
build-reproducible:
|
||||
name: build and push reproducible image
|
||||
name: build and push reproducible image and binaries
|
||||
runs-on: ubuntu-latest
|
||||
needs: extract-version
|
||||
needs: [extract-version]
|
||||
permissions:
|
||||
packages: write
|
||||
contents: read
|
||||
contents: write
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v6
|
||||
with:
|
||||
ref: ${{ needs.extract-version.outputs.VERSION }}
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
@@ -40,20 +58,37 @@ jobs:
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Extract Rust version from Cargo.toml
|
||||
- name: Extract Rust version
|
||||
id: rust_version
|
||||
run: |
|
||||
RUST_VERSION=$(cargo metadata --format-version 1 | jq -r '.packages[] | select(.name == "reth") | .rust_version' || echo "1")
|
||||
echo "RUST_VERSION=$RUST_VERSION" >> $GITHUB_OUTPUT
|
||||
RUST_TOOLCHAIN=$(rustc --version | cut -d' ' -f2)
|
||||
echo "RUST_TOOLCHAIN=$RUST_TOOLCHAIN" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Build and push reproducible image
|
||||
- name: Build reproducible artifacts
|
||||
uses: docker/build-push-action@v6
|
||||
id: docker_build
|
||||
with:
|
||||
context: .
|
||||
file: ./Dockerfile.reproducible
|
||||
build-args: |
|
||||
RUST_TOOLCHAIN=${{ steps.rust_version.outputs.RUST_TOOLCHAIN }}
|
||||
VERSION=${{ needs.extract-version.outputs.VERSION }}
|
||||
target: artifacts
|
||||
outputs: type=local,dest=./docker-artifacts
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
env:
|
||||
DOCKER_BUILD_RECORD_UPLOAD: false
|
||||
|
||||
- name: Build and push final image
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
file: ./Dockerfile.reproducible
|
||||
push: true
|
||||
build-args: |
|
||||
RUST_VERSION=${{ steps.rust_version.outputs.RUST_VERSION }}
|
||||
RUST_TOOLCHAIN=${{ steps.rust_version.outputs.RUST_TOOLCHAIN }}
|
||||
VERSION=${{ needs.extract-version.outputs.VERSION }}
|
||||
tags: |
|
||||
${{ env.DOCKER_REPRODUCIBLE_IMAGE_NAME }}:${{ needs.extract-version.outputs.VERSION }}
|
||||
${{ env.DOCKER_REPRODUCIBLE_IMAGE_NAME }}:latest
|
||||
@@ -62,3 +97,30 @@ jobs:
|
||||
provenance: false
|
||||
env:
|
||||
DOCKER_BUILD_RECORD_UPLOAD: false
|
||||
|
||||
- name: Prepare artifacts from Docker build
|
||||
run: |
|
||||
mkdir reproducible-artifacts
|
||||
cp docker-artifacts/reth reproducible-artifacts/reth-reproducible-${{ needs.extract-version.outputs.VERSION }}-x86_64-unknown-linux-gnu
|
||||
cp docker-artifacts/*.deb reproducible-artifacts/reth-${{ needs.extract-version.outputs.VERSION }}-x86_64-unknown-linux-gnu-reproducible.deb
|
||||
|
||||
- name: Configure GPG and create artifacts
|
||||
env:
|
||||
GPG_SIGNING_KEY: ${{ secrets.GPG_SIGNING_KEY }}
|
||||
GPG_PASSPHRASE: ${{ secrets.GPG_PASSPHRASE }}
|
||||
run: |
|
||||
export GPG_TTY=$(tty)
|
||||
echo -n "$GPG_SIGNING_KEY" | base64 --decode | gpg --batch --import
|
||||
|
||||
cd reproducible-artifacts
|
||||
tar -czf reth-reproducible-${{ needs.extract-version.outputs.VERSION }}-x86_64-unknown-linux-gnu.tar.gz reth-reproducible-${{ needs.extract-version.outputs.VERSION }}-x86_64-unknown-linux-gnu --remove-files
|
||||
echo "$GPG_PASSPHRASE" | gpg --passphrase-fd 0 --pinentry-mode loopback --batch -ab reth-reproducible-${{ needs.extract-version.outputs.VERSION }}-x86_64-unknown-linux-gnu.tar.gz
|
||||
echo "$GPG_PASSPHRASE" | gpg --passphrase-fd 0 --pinentry-mode loopback --batch -ab reth-${{ needs.extract-version.outputs.VERSION }}-x86_64-unknown-linux-gnu-reproducible.deb
|
||||
|
||||
- name: Upload reproducible artifacts to release
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
gh release upload ${{ needs.extract-version.outputs.VERSION }} \
|
||||
reproducible-artifacts/*
|
||||
|
||||
|
||||
51
.github/workflows/release.yml
vendored
51
.github/workflows/release.yml
vendored
@@ -18,10 +18,11 @@ env:
|
||||
REPO_NAME: ${{ github.repository_owner }}/reth
|
||||
IMAGE_NAME: ${{ github.repository_owner }}/reth
|
||||
OP_IMAGE_NAME: ${{ github.repository_owner }}/op-reth
|
||||
REPRODUCIBLE_IMAGE_NAME: ${{ github.repository_owner }}/reth-reproducible
|
||||
CARGO_TERM_COLOR: always
|
||||
DOCKER_IMAGE_NAME_URL: https://ghcr.io/${{ github.repository_owner }}/reth
|
||||
DOCKER_OP_IMAGE_NAME_URL: https://ghcr.io/${{ github.repository_owner }}/op-reth
|
||||
DEB_SUPPORTED_TARGETS: x86_64-unknown-linux-gnu aarch64-unknown-linux-gnu riscv64gc-unknown-linux-gnu
|
||||
RUSTC_WRAPPER: "sccache"
|
||||
|
||||
jobs:
|
||||
dry-run:
|
||||
@@ -49,8 +50,9 @@ jobs:
|
||||
needs: extract-version
|
||||
if: ${{ github.event.inputs.dry_run != 'true' }}
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v6
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
- uses: mozilla-actions/sccache-action@v0.0.9
|
||||
- name: Verify crate version matches tag
|
||||
# Check that the Cargo version starts with the tag,
|
||||
# so that Cargo version 1.4.8 can be matched against both v1.4.8 and v1.4.8-rc.1
|
||||
@@ -78,7 +80,7 @@ jobs:
|
||||
profile: maxperf
|
||||
allow_fail: false
|
||||
- target: x86_64-apple-darwin
|
||||
os: macos-13
|
||||
os: macos-14
|
||||
profile: maxperf
|
||||
allow_fail: false
|
||||
- target: aarch64-apple-darwin
|
||||
@@ -99,11 +101,12 @@ jobs:
|
||||
- command: op-build
|
||||
binary: op-reth
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v6
|
||||
- uses: rui314/setup-mold@v1
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
with:
|
||||
target: ${{ matrix.configs.target }}
|
||||
- uses: mozilla-actions/sccache-action@v0.0.9
|
||||
- name: Install cross main
|
||||
id: cross_main
|
||||
run: |
|
||||
@@ -120,20 +123,11 @@ jobs:
|
||||
|
||||
- name: Build Reth
|
||||
run: make PROFILE=${{ matrix.configs.profile }} ${{ matrix.build.command }}-${{ matrix.configs.target }}
|
||||
|
||||
- name: Build Reth deb package
|
||||
if: ${{ matrix.build.binary == 'reth' && contains(env.DEB_SUPPORTED_TARGETS, matrix.configs.target) }}
|
||||
run: make build-deb-${{ matrix.configs.target }} PROFILE=${{ matrix.configs.profile }} VERSION=${{ needs.extract-version.outputs.VERSION }}
|
||||
|
||||
- name: Move binary
|
||||
run: |
|
||||
mkdir artifacts
|
||||
[[ "${{ matrix.configs.target }}" == *windows* ]] && ext=".exe"
|
||||
|
||||
# Move deb packages if they exist
|
||||
if [[ "${{ matrix.build.binary }}" == "reth" && "${{ env.DEB_SUPPORTED_TARGETS }}" == *"${{ matrix.configs.target }}"* ]]; then
|
||||
mv "target/${{ matrix.configs.target }}/${{ matrix.configs.profile }}/${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.configs.target }}-${{ matrix.configs.profile }}.deb" ./artifacts
|
||||
fi
|
||||
mv "target/${{ matrix.configs.target }}/${{ matrix.configs.profile }}/${{ matrix.build.binary }}${ext}" ./artifacts
|
||||
|
||||
- name: Configure GPG and create artifacts
|
||||
env:
|
||||
@@ -143,42 +137,25 @@ jobs:
|
||||
export GPG_TTY=$(tty)
|
||||
echo -n "$GPG_SIGNING_KEY" | base64 --decode | gpg --batch --import
|
||||
cd artifacts
|
||||
tar -czf ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.configs.target }}.tar.gz ${{ matrix.build.binary }}*[!.deb]
|
||||
tar -czf ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.configs.target }}.tar.gz ${{ matrix.build.binary }}*
|
||||
echo "$GPG_PASSPHRASE" | gpg --passphrase-fd 0 --pinentry-mode loopback --batch -ab ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.configs.target }}.tar.gz
|
||||
if [[ -f "${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.configs.target }}-${{ matrix.configs.profile }}.deb" ]]; then
|
||||
echo "$GPG_PASSPHRASE" | gpg --passphrase-fd 0 --pinentry-mode loopback --batch -ab ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.configs.target }}-${{ matrix.configs.profile }}.deb
|
||||
fi
|
||||
mv *tar.gz* *.deb* ..
|
||||
mv *tar.gz* ..
|
||||
shell: bash
|
||||
|
||||
- name: Upload artifact
|
||||
if: ${{ github.event.inputs.dry_run != 'true' }}
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v6
|
||||
with:
|
||||
name: ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.configs.target }}.tar.gz
|
||||
path: ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.configs.target }}.tar.gz
|
||||
|
||||
- name: Upload signature
|
||||
if: ${{ github.event.inputs.dry_run != 'true' }}
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v6
|
||||
with:
|
||||
name: ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.configs.target }}.tar.gz.asc
|
||||
path: ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.configs.target }}.tar.gz.asc
|
||||
|
||||
- name: Upload deb package
|
||||
if: ${{ github.event.inputs.dry_run != 'true' && matrix.build.binary == 'reth' && contains(env.DEB_SUPPORTED_TARGETS, matrix.configs.target) }}
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.configs.target }}-${{ matrix.configs.profile }}.deb
|
||||
path: ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.configs.target }}-${{ matrix.configs.profile }}.deb
|
||||
|
||||
- name: Upload deb package signature
|
||||
if: ${{ github.event.inputs.dry_run != 'true' && matrix.build.binary == 'reth' && contains(env.DEB_SUPPORTED_TARGETS, matrix.configs.target) }}
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.configs.target }}-${{ matrix.configs.profile }}.deb.asc
|
||||
path: ${{ matrix.build.binary }}-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.configs.target }}-${{ matrix.configs.profile }}.deb.asc
|
||||
|
||||
draft-release:
|
||||
name: draft release
|
||||
runs-on: ubuntu-latest
|
||||
@@ -192,11 +169,11 @@ jobs:
|
||||
steps:
|
||||
# This is necessary for generating the changelog.
|
||||
# It has to come before "Download Artifacts" or else it deletes the artifacts.
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Download artifacts
|
||||
uses: actions/download-artifact@v5
|
||||
uses: actions/download-artifact@v7
|
||||
- name: Generate full changelog
|
||||
id: changelog
|
||||
run: |
|
||||
|
||||
78
.github/workflows/reproducible-build.yml
vendored
78
.github/workflows/reproducible-build.yml
vendored
@@ -8,25 +8,73 @@ on:
|
||||
jobs:
|
||||
build:
|
||||
name: build reproducible binaries
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ${{ matrix.runner }}
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- runner: ubuntu-latest
|
||||
machine: machine-1
|
||||
- runner: ubuntu-22.04
|
||||
machine: machine-2
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: rui314/setup-mold@v1
|
||||
- uses: actions/checkout@v6
|
||||
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
with:
|
||||
target: x86_64-unknown-linux-gnu
|
||||
- name: Install cargo-cache
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Build reproducible binary with Docker
|
||||
run: |
|
||||
cargo install cargo-cache
|
||||
- name: Build Reth
|
||||
RUST_TOOLCHAIN=$(rustc --version | cut -d' ' -f2)
|
||||
docker build \
|
||||
--build-arg "RUST_TOOLCHAIN=${RUST_TOOLCHAIN}" \
|
||||
-f Dockerfile.reproducible -t reth:release \
|
||||
--target artifacts \
|
||||
--output type=local,dest=./target .
|
||||
|
||||
- name: Calculate SHA256
|
||||
id: sha256
|
||||
run: |
|
||||
make build-reth-reproducible
|
||||
mv target/x86_64-unknown-linux-gnu/reproducible/reth reth-build-1
|
||||
- name: Clean cache
|
||||
run: make clean && cargo cache -a
|
||||
- name: Build Reth again
|
||||
sha256sum target/reth > checksum.sha256
|
||||
echo "Binaries SHA256 on ${{ matrix.machine }}: $(cat checksum.sha256)"
|
||||
|
||||
- name: Upload the hash
|
||||
uses: actions/upload-artifact@v6
|
||||
with:
|
||||
name: checksum-${{ matrix.machine }}
|
||||
path: |
|
||||
checksum.sha256
|
||||
retention-days: 1
|
||||
|
||||
compare:
|
||||
name: compare reproducible binaries
|
||||
needs: build
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Download artifacts from machine-1
|
||||
uses: actions/download-artifact@v7
|
||||
with:
|
||||
name: checksum-machine-1
|
||||
path: machine-1/
|
||||
- name: Download artifacts from machine-2
|
||||
uses: actions/download-artifact@v7
|
||||
with:
|
||||
name: checksum-machine-2
|
||||
path: machine-2/
|
||||
- name: Compare SHA256 hashes
|
||||
run: |
|
||||
make build-reth-reproducible
|
||||
mv target/x86_64-unknown-linux-gnu/reproducible/reth reth-build-2
|
||||
- name: Compare binaries
|
||||
run: cmp reth-build-1 reth-build-2
|
||||
echo "=== SHA256 Comparison ==="
|
||||
echo "Machine 1 hash:"
|
||||
cat machine-1/checksum.sha256
|
||||
echo "Machine 2 hash:"
|
||||
cat machine-2/checksum.sha256
|
||||
|
||||
if cmp -s machine-1/checksum.sha256 machine-2/checksum.sha256; then
|
||||
echo "✅ SUCCESS: Binaries are identical (reproducible build verified)"
|
||||
else
|
||||
echo "❌ FAILURE: Binaries differ (reproducible build failed)"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
7
.github/workflows/stage.yml
vendored
7
.github/workflows/stage.yml
vendored
@@ -12,6 +12,7 @@ env:
|
||||
CARGO_TERM_COLOR: always
|
||||
FROM_BLOCK: 0
|
||||
TO_BLOCK: 50000
|
||||
RUSTC_WRAPPER: "sccache"
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
@@ -22,16 +23,16 @@ jobs:
|
||||
name: stage-run-test
|
||||
# Only run stage commands test in merge groups
|
||||
if: github.event_name == 'merge_group'
|
||||
runs-on:
|
||||
group: Reth
|
||||
runs-on: depot-ubuntu-latest
|
||||
env:
|
||||
RUST_LOG: info,sync=error
|
||||
RUST_BACKTRACE: 1
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v6
|
||||
- uses: rui314/setup-mold@v1
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
- uses: mozilla-actions/sccache-action@v0.0.9
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
cache-on-failure: true
|
||||
|
||||
9
.github/workflows/sync-era.yml
vendored
9
.github/workflows/sync-era.yml
vendored
@@ -9,6 +9,7 @@ on:
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
RUSTC_WRAPPER: "sccache"
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
@@ -17,8 +18,7 @@ concurrency:
|
||||
jobs:
|
||||
sync:
|
||||
name: sync (${{ matrix.chain.bin }})
|
||||
runs-on:
|
||||
group: Reth
|
||||
runs-on: depot-ubuntu-latest
|
||||
env:
|
||||
RUST_LOG: info,sync=error
|
||||
RUST_BACKTRACE: 1
|
||||
@@ -39,9 +39,10 @@ jobs:
|
||||
block: 10000
|
||||
unwind-target: "0x118a6e922a8c6cab221fc5adfe5056d2b72d58c6580e9c5629de55299e2cf8de"
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v6
|
||||
- uses: rui314/setup-mold@v1
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
- uses: mozilla-actions/sccache-action@v0.0.9
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
cache-on-failure: true
|
||||
@@ -64,4 +65,4 @@ jobs:
|
||||
${{ matrix.chain.bin }} stage unwind num-blocks 100 --chain ${{ matrix.chain.chain }}
|
||||
- name: Run stage unwind to block hash
|
||||
run: |
|
||||
${{ matrix.chain.bin }} stage unwind to-block ${{ matrix.chain.unwind-target }} --chain ${{ matrix.chain.chain }}
|
||||
${{ matrix.chain.bin }} stage unwind to-block ${{ matrix.chain.unwind-target }} --chain ${{ matrix.chain.chain }}
|
||||
|
||||
9
.github/workflows/sync.yml
vendored
9
.github/workflows/sync.yml
vendored
@@ -9,6 +9,7 @@ on:
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
RUSTC_WRAPPER: "sccache"
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
@@ -17,8 +18,7 @@ concurrency:
|
||||
jobs:
|
||||
sync:
|
||||
name: sync (${{ matrix.chain.bin }})
|
||||
runs-on:
|
||||
group: Reth
|
||||
runs-on: depot-ubuntu-latest
|
||||
env:
|
||||
RUST_LOG: info,sync=error
|
||||
RUST_BACKTRACE: 1
|
||||
@@ -39,9 +39,10 @@ jobs:
|
||||
block: 10000
|
||||
unwind-target: "0x118a6e922a8c6cab221fc5adfe5056d2b72d58c6580e9c5629de55299e2cf8de"
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v6
|
||||
- uses: rui314/setup-mold@v1
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
- uses: mozilla-actions/sccache-action@v0.0.9
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
cache-on-failure: true
|
||||
@@ -63,4 +64,4 @@ jobs:
|
||||
${{ matrix.chain.bin }} stage unwind num-blocks 100 --chain ${{ matrix.chain.chain }}
|
||||
- name: Run stage unwind to block hash
|
||||
run: |
|
||||
${{ matrix.chain.bin }} stage unwind to-block ${{ matrix.chain.unwind-target }} --chain ${{ matrix.chain.chain }}
|
||||
${{ matrix.chain.bin }} stage unwind to-block ${{ matrix.chain.unwind-target }} --chain ${{ matrix.chain.chain }}
|
||||
|
||||
21
.github/workflows/unit.yml
vendored
21
.github/workflows/unit.yml
vendored
@@ -11,6 +11,7 @@ on:
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
SEED: rustethereumethereumrust
|
||||
RUSTC_WRAPPER: "sccache"
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
@@ -19,8 +20,7 @@ concurrency:
|
||||
jobs:
|
||||
test:
|
||||
name: test / ${{ matrix.type }} (${{ matrix.partition }}/${{ matrix.total_partitions }})
|
||||
runs-on:
|
||||
group: Reth
|
||||
runs-on: depot-ubuntu-latest-4
|
||||
env:
|
||||
RUST_BACKTRACE: 1
|
||||
strategy:
|
||||
@@ -44,9 +44,10 @@ jobs:
|
||||
total_partitions: 2
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v6
|
||||
- uses: rui314/setup-mold@v1
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
- uses: mozilla-actions/sccache-action@v0.0.9
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
cache-on-failure: true
|
||||
@@ -65,16 +66,15 @@ jobs:
|
||||
|
||||
state:
|
||||
name: Ethereum state tests
|
||||
runs-on:
|
||||
group: Reth
|
||||
runs-on: depot-ubuntu-latest-4
|
||||
env:
|
||||
RUST_LOG: info,sync=error
|
||||
RUST_BACKTRACE: 1
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v6
|
||||
- name: Checkout ethereum/tests
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
repository: ethereum/tests
|
||||
ref: 81862e4848585a438d64f911a19b3825f0f4cd95
|
||||
@@ -93,6 +93,7 @@ jobs:
|
||||
- uses: rui314/setup-mold@v1
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
- uses: taiki-e/install-action@nextest
|
||||
- uses: mozilla-actions/sccache-action@v0.0.9
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
cache-on-failure: true
|
||||
@@ -100,15 +101,15 @@ jobs:
|
||||
|
||||
doc:
|
||||
name: doc tests
|
||||
runs-on:
|
||||
group: Reth
|
||||
runs-on: depot-ubuntu-latest
|
||||
env:
|
||||
RUST_BACKTRACE: 1
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v6
|
||||
- uses: rui314/setup-mold@v1
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
- uses: mozilla-actions/sccache-action@v0.0.9
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
cache-on-failure: true
|
||||
|
||||
4
.github/workflows/update-superchain.yml
vendored
4
.github/workflows/update-superchain.yml
vendored
@@ -13,7 +13,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Install required tools
|
||||
run: |
|
||||
@@ -27,7 +27,7 @@ jobs:
|
||||
./fetch_superchain_config.sh
|
||||
|
||||
- name: Create Pull Request
|
||||
uses: peter-evans/create-pull-request@v7
|
||||
uses: peter-evans/create-pull-request@v8
|
||||
with:
|
||||
commit-message: "chore: update superchain config"
|
||||
title: "chore: update superchain config"
|
||||
|
||||
13
.github/workflows/windows.yml
vendored
13
.github/workflows/windows.yml
vendored
@@ -9,18 +9,22 @@ on:
|
||||
branches: [main]
|
||||
merge_group:
|
||||
|
||||
env:
|
||||
RUSTC_WRAPPER: "sccache"
|
||||
|
||||
jobs:
|
||||
check-reth:
|
||||
runs-on: ubuntu-24.04
|
||||
runs-on: depot-ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v6
|
||||
- uses: rui314/setup-mold@v1
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
with:
|
||||
target: x86_64-pc-windows-gnu
|
||||
- uses: taiki-e/install-action@cross
|
||||
- uses: mozilla-actions/sccache-action@v0.0.9
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
cache-on-failure: true
|
||||
@@ -30,16 +34,17 @@ jobs:
|
||||
run: cargo check --target x86_64-pc-windows-gnu
|
||||
|
||||
check-op-reth:
|
||||
runs-on: ubuntu-24.04
|
||||
runs-on: depot-ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v6
|
||||
- uses: rui314/setup-mold@v1
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
with:
|
||||
target: x86_64-pc-windows-gnu
|
||||
- uses: taiki-e/install-action@cross
|
||||
- uses: mozilla-actions/sccache-action@v0.0.9
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
cache-on-failure: true
|
||||
|
||||
2300
Cargo.lock
generated
2300
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
134
Cargo.toml
134
Cargo.toml
@@ -1,5 +1,5 @@
|
||||
[workspace.package]
|
||||
version = "1.8.2"
|
||||
version = "1.9.3"
|
||||
edition = "2024"
|
||||
rust-version = "1.88"
|
||||
license = "MIT OR Apache-2.0"
|
||||
@@ -10,6 +10,7 @@ exclude = [".github/"]
|
||||
[workspace]
|
||||
members = [
|
||||
"bin/reth-bench/",
|
||||
"bin/reth-bench-compare/",
|
||||
"bin/reth/",
|
||||
"crates/storage/rpc-provider/",
|
||||
"crates/chain-state/",
|
||||
@@ -147,10 +148,12 @@ members = [
|
||||
"examples/custom-node/",
|
||||
"examples/custom-engine-types/",
|
||||
"examples/custom-evm/",
|
||||
"examples/custom-hardforks/",
|
||||
"examples/custom-inspector/",
|
||||
"examples/custom-node-components/",
|
||||
"examples/custom-payload-builder/",
|
||||
"examples/custom-rlpx-subprotocol",
|
||||
"examples/custom-rpc-middleware",
|
||||
"examples/custom-node",
|
||||
"examples/db-access",
|
||||
"examples/engine-api-access",
|
||||
@@ -339,6 +342,7 @@ reth = { path = "bin/reth" }
|
||||
reth-storage-rpc-provider = { path = "crates/storage/rpc-provider" }
|
||||
reth-basic-payload-builder = { path = "crates/payload/basic" }
|
||||
reth-bench = { path = "bin/reth-bench" }
|
||||
reth-bench-compare = { path = "bin/reth-bench-compare" }
|
||||
reth-chain-state = { path = "crates/chain-state" }
|
||||
reth-chainspec = { path = "crates/chainspec", default-features = false }
|
||||
reth-cli = { path = "crates/cli/cli" }
|
||||
@@ -372,11 +376,11 @@ reth-era-utils = { path = "crates/era-utils" }
|
||||
reth-errors = { path = "crates/errors" }
|
||||
reth-eth-wire = { path = "crates/net/eth-wire" }
|
||||
reth-eth-wire-types = { path = "crates/net/eth-wire-types" }
|
||||
reth-ethereum-cli = { path = "crates/ethereum/cli" }
|
||||
reth-ethereum-payload-builder = { path = "crates/ethereum/payload" }
|
||||
reth-ethereum-cli = { path = "crates/ethereum/cli", default-features = false }
|
||||
reth-ethereum-consensus = { path = "crates/ethereum/consensus", default-features = false }
|
||||
reth-ethereum-engine-primitives = { path = "crates/ethereum/engine-primitives", default-features = false }
|
||||
reth-ethereum-forks = { path = "crates/ethereum/hardforks", default-features = false }
|
||||
reth-ethereum-payload-builder = { path = "crates/ethereum/payload" }
|
||||
reth-ethereum-primitives = { path = "crates/ethereum/primitives", default-features = false }
|
||||
reth-ethereum = { path = "crates/ethereum/reth" }
|
||||
reth-etl = { path = "crates/etl" }
|
||||
@@ -413,7 +417,7 @@ reth-optimism-node = { path = "crates/optimism/node" }
|
||||
reth-node-types = { path = "crates/node/types" }
|
||||
reth-op = { path = "crates/optimism/reth", default-features = false }
|
||||
reth-optimism-chainspec = { path = "crates/optimism/chainspec", default-features = false }
|
||||
reth-optimism-cli = { path = "crates/optimism/cli" }
|
||||
reth-optimism-cli = { path = "crates/optimism/cli", default-features = false }
|
||||
reth-optimism-consensus = { path = "crates/optimism/consensus", default-features = false }
|
||||
reth-optimism-forks = { path = "crates/optimism/hardforks", default-features = false }
|
||||
reth-optimism-payload-builder = { path = "crates/optimism/payload" }
|
||||
@@ -469,68 +473,66 @@ reth-ress-protocol = { path = "crates/ress/protocol" }
|
||||
reth-ress-provider = { path = "crates/ress/provider" }
|
||||
|
||||
# revm
|
||||
revm = { version = "30.1.1", default-features = false }
|
||||
revm-bytecode = { version = "7.0.2", default-features = false }
|
||||
revm-database = { version = "9.0.0", default-features = false }
|
||||
revm-state = { version = "8.0.0", default-features = false }
|
||||
revm-primitives = { version = "21.0.0", default-features = false }
|
||||
revm-interpreter = { version = "27.0.0", default-features = false }
|
||||
revm-inspector = { version = "11.1.0", default-features = false }
|
||||
revm-context = { version = "10.1.0", default-features = false }
|
||||
revm-context-interface = { version = "11.1.0", default-features = false }
|
||||
revm-database-interface = { version = "8.0.1", default-features = false }
|
||||
op-revm = { version = "11.2.0", default-features = false }
|
||||
revm-inspectors = "0.31.0"
|
||||
revm = { version = "33.1.0", default-features = false }
|
||||
revm-bytecode = { version = "7.1.1", default-features = false }
|
||||
revm-database = { version = "9.0.5", default-features = false }
|
||||
revm-state = { version = "8.1.1", default-features = false }
|
||||
revm-primitives = { version = "21.0.2", default-features = false }
|
||||
revm-interpreter = { version = "31.1.0", default-features = false }
|
||||
revm-database-interface = { version = "8.0.5", default-features = false }
|
||||
op-revm = { version = "14.1.0", default-features = false }
|
||||
revm-inspectors = "0.33.2"
|
||||
|
||||
# eth
|
||||
alloy-chains = { version = "0.2.5", default-features = false }
|
||||
alloy-dyn-abi = "1.4.1"
|
||||
alloy-eip2124 = { version = "0.2.0", default-features = false }
|
||||
alloy-evm = { version = "0.22.4", default-features = false }
|
||||
alloy-primitives = { version = "1.4.1", default-features = false, features = ["map-foldhash"] }
|
||||
alloy-eip7928 = { version = "0.1.0" }
|
||||
alloy-evm = { version = "0.25.1", default-features = false }
|
||||
alloy-primitives = { version = "1.5.0", default-features = false, features = ["map-foldhash"] }
|
||||
alloy-rlp = { version = "0.3.10", default-features = false, features = ["core-net"] }
|
||||
alloy-sol-macro = "1.4.1"
|
||||
alloy-sol-types = { version = "1.4.1", default-features = false }
|
||||
alloy-sol-macro = "1.5.0"
|
||||
alloy-sol-types = { version = "1.5.0", default-features = false }
|
||||
alloy-trie = { version = "0.9.1", default-features = false }
|
||||
|
||||
alloy-hardforks = "0.4.0"
|
||||
alloy-hardforks = "0.4.5"
|
||||
|
||||
alloy-consensus = { version = "1.0.41", default-features = false }
|
||||
alloy-contract = { version = "1.0.41", default-features = false }
|
||||
alloy-eips = { version = "1.0.41", default-features = false }
|
||||
alloy-genesis = { version = "1.0.41", default-features = false }
|
||||
alloy-json-rpc = { version = "1.0.41", default-features = false }
|
||||
alloy-network = { version = "1.0.41", default-features = false }
|
||||
alloy-network-primitives = { version = "1.0.41", default-features = false }
|
||||
alloy-provider = { version = "1.0.41", features = ["reqwest"], default-features = false }
|
||||
alloy-pubsub = { version = "1.0.41", default-features = false }
|
||||
alloy-rpc-client = { version = "1.0.41", default-features = false }
|
||||
alloy-rpc-types = { version = "1.0.41", features = ["eth"], default-features = false }
|
||||
alloy-rpc-types-admin = { version = "1.0.41", default-features = false }
|
||||
alloy-rpc-types-anvil = { version = "1.0.41", default-features = false }
|
||||
alloy-rpc-types-beacon = { version = "1.0.41", default-features = false }
|
||||
alloy-rpc-types-debug = { version = "1.0.41", default-features = false }
|
||||
alloy-rpc-types-engine = { version = "1.0.41", default-features = false }
|
||||
alloy-rpc-types-eth = { version = "1.0.41", default-features = false }
|
||||
alloy-rpc-types-mev = { version = "1.0.41", default-features = false }
|
||||
alloy-rpc-types-trace = { version = "1.0.41", default-features = false }
|
||||
alloy-rpc-types-txpool = { version = "1.0.41", default-features = false }
|
||||
alloy-serde = { version = "1.0.41", default-features = false }
|
||||
alloy-signer = { version = "1.0.41", default-features = false }
|
||||
alloy-signer-local = { version = "1.0.41", default-features = false }
|
||||
alloy-transport = { version = "1.0.41" }
|
||||
alloy-transport-http = { version = "1.0.41", features = ["reqwest-rustls-tls"], default-features = false }
|
||||
alloy-transport-ipc = { version = "1.0.41", default-features = false }
|
||||
alloy-transport-ws = { version = "1.0.41", default-features = false }
|
||||
alloy-consensus = { version = "1.1.3", default-features = false }
|
||||
alloy-contract = { version = "1.1.3", default-features = false }
|
||||
alloy-eips = { version = "1.1.3", default-features = false }
|
||||
alloy-genesis = { version = "1.1.3", default-features = false }
|
||||
alloy-json-rpc = { version = "1.1.3", default-features = false }
|
||||
alloy-network = { version = "1.1.3", default-features = false }
|
||||
alloy-network-primitives = { version = "1.1.3", default-features = false }
|
||||
alloy-provider = { version = "1.1.3", features = ["reqwest", "debug-api"], default-features = false }
|
||||
alloy-pubsub = { version = "1.1.3", default-features = false }
|
||||
alloy-rpc-client = { version = "1.1.3", default-features = false }
|
||||
alloy-rpc-types = { version = "1.1.3", features = ["eth"], default-features = false }
|
||||
alloy-rpc-types-admin = { version = "1.1.3", default-features = false }
|
||||
alloy-rpc-types-anvil = { version = "1.1.3", default-features = false }
|
||||
alloy-rpc-types-beacon = { version = "1.1.3", default-features = false }
|
||||
alloy-rpc-types-debug = { version = "1.1.3", default-features = false }
|
||||
alloy-rpc-types-engine = { version = "1.1.3", default-features = false }
|
||||
alloy-rpc-types-eth = { version = "1.1.3", default-features = false }
|
||||
alloy-rpc-types-mev = { version = "1.1.3", default-features = false }
|
||||
alloy-rpc-types-trace = { version = "1.1.3", default-features = false }
|
||||
alloy-rpc-types-txpool = { version = "1.1.3", default-features = false }
|
||||
alloy-serde = { version = "1.1.3", default-features = false }
|
||||
alloy-signer = { version = "1.1.3", default-features = false }
|
||||
alloy-signer-local = { version = "1.1.3", default-features = false }
|
||||
alloy-transport = { version = "1.1.3" }
|
||||
alloy-transport-http = { version = "1.1.3", features = ["reqwest-rustls-tls"], default-features = false }
|
||||
alloy-transport-ipc = { version = "1.1.3", default-features = false }
|
||||
alloy-transport-ws = { version = "1.1.3", default-features = false }
|
||||
|
||||
# op
|
||||
alloy-op-evm = { version = "0.22.4", default-features = false }
|
||||
alloy-op-hardforks = "0.4.0"
|
||||
op-alloy-rpc-types = { version = "0.21.0", default-features = false }
|
||||
op-alloy-rpc-types-engine = { version = "0.21.0", default-features = false }
|
||||
op-alloy-network = { version = "0.21.0", default-features = false }
|
||||
op-alloy-consensus = { version = "0.21.0", default-features = false }
|
||||
op-alloy-rpc-jsonrpsee = { version = "0.21.0", default-features = false }
|
||||
alloy-op-evm = { version = "0.25.0", default-features = false }
|
||||
alloy-op-hardforks = "0.4.4"
|
||||
op-alloy-rpc-types = { version = "0.23.1", default-features = false }
|
||||
op-alloy-rpc-types-engine = { version = "0.23.1", default-features = false }
|
||||
op-alloy-network = { version = "0.23.1", default-features = false }
|
||||
op-alloy-consensus = { version = "0.23.1", default-features = false }
|
||||
op-alloy-rpc-jsonrpsee = { version = "0.23.1", default-features = false }
|
||||
op-alloy-flz = { version = "0.13.1", default-features = false }
|
||||
|
||||
# misc
|
||||
@@ -552,8 +554,6 @@ dirs-next = "2.0.0"
|
||||
dyn-clone = "1.0.17"
|
||||
eyre = "0.6"
|
||||
fdlimit = "0.3.0"
|
||||
# pinned until downstream crypto libs migrate to 1.0 because 0.14.8 marks all types as deprecated
|
||||
generic-array = "=0.14.7"
|
||||
humantime = "2.1"
|
||||
humantime-serde = "1.1"
|
||||
itertools = { version = "0.14", default-features = false }
|
||||
@@ -574,6 +574,7 @@ serde_json = { version = "1.0", default-features = false, features = ["alloc"] }
|
||||
serde_with = { version = "3", default-features = false, features = ["macros"] }
|
||||
sha2 = { version = "0.10", default-features = false }
|
||||
shellexpand = "3.0.0"
|
||||
shlex = "1.3"
|
||||
smallvec = "1"
|
||||
strum = { version = "0.27", default-features = false }
|
||||
strum_macros = "0.27"
|
||||
@@ -586,6 +587,7 @@ url = { version = "2.3", default-features = false }
|
||||
zstd = "0.13"
|
||||
byteorder = "1"
|
||||
mini-moka = "0.10"
|
||||
moka = "0.12"
|
||||
tar-no-std = { version = "0.3.2", default-features = false }
|
||||
miniz_oxide = { version = "0.8.4", default-features = false }
|
||||
chrono = "0.4.41"
|
||||
@@ -623,8 +625,8 @@ tower = "0.5"
|
||||
tower-http = "0.6"
|
||||
|
||||
# p2p
|
||||
discv5 = "0.9"
|
||||
if-addrs = "0.13"
|
||||
discv5 = "0.10"
|
||||
if-addrs = "0.14"
|
||||
|
||||
# rpc
|
||||
jsonrpsee = "0.26.0"
|
||||
@@ -648,11 +650,14 @@ secp256k1 = { version = "0.30", default-features = false, features = ["global-co
|
||||
rand_08 = { package = "rand", version = "0.8" }
|
||||
|
||||
# for eip-4844
|
||||
c-kzg = "2.1.4"
|
||||
c-kzg = "2.1.5"
|
||||
|
||||
# config
|
||||
toml = "0.8"
|
||||
|
||||
# rocksdb
|
||||
rocksdb = { version = "0.24" }
|
||||
|
||||
# otlp obs
|
||||
opentelemetry_sdk = "0.31"
|
||||
opentelemetry = "0.31"
|
||||
@@ -664,6 +669,7 @@ tracing-opentelemetry = "0.32"
|
||||
arbitrary = "1.3"
|
||||
assert_matches = "1.5.0"
|
||||
criterion = { package = "codspeed-criterion-compat", version = "2.7" }
|
||||
insta = "1.41"
|
||||
proptest = "1.7"
|
||||
proptest-derive = "0.5"
|
||||
similar-asserts = { version = "1.5.0", features = ["serde"] }
|
||||
@@ -694,6 +700,7 @@ concat-kdf = "0.1.0"
|
||||
crossbeam-channel = "0.5.13"
|
||||
crossterm = "0.28.0"
|
||||
csv = "1.3.0"
|
||||
ctrlc = "3.4"
|
||||
ctr = "0.9.2"
|
||||
data-encoding = "2"
|
||||
delegate = "0.13"
|
||||
@@ -723,6 +730,7 @@ socket2 = { version = "0.5", default-features = false }
|
||||
sysinfo = { version = "0.33", default-features = false }
|
||||
tracing-journald = "0.3"
|
||||
tracing-logfmt = "0.3.3"
|
||||
tracing-samply = "0.1"
|
||||
tracing-subscriber = { version = "0.3", default-features = false }
|
||||
triehash = "0.8"
|
||||
typenum = "1.15.0"
|
||||
@@ -731,6 +739,9 @@ visibility = "0.1.1"
|
||||
walkdir = "2.3.3"
|
||||
vergen-git2 = "1.0.5"
|
||||
|
||||
# networking
|
||||
ipnet = "2.11"
|
||||
|
||||
# [patch.crates-io]
|
||||
# alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "3049f232fbb44d1909883e154eb38ec5962f53a3" }
|
||||
# alloy-contract = { git = "https://github.com/alloy-rs/alloy", rev = "3049f232fbb44d1909883e154eb38ec5962f53a3" }
|
||||
@@ -773,3 +784,6 @@ vergen-git2 = "1.0.5"
|
||||
# jsonrpsee-server = { git = "https://github.com/paradigmxyz/jsonrpsee", branch = "matt/make-rpc-service-pub" }
|
||||
# jsonrpsee-http-client = { git = "https://github.com/paradigmxyz/jsonrpsee", branch = "matt/make-rpc-service-pub" }
|
||||
# jsonrpsee-types = { git = "https://github.com/paradigmxyz/jsonrpsee", branch = "matt/make-rpc-service-pub" }
|
||||
|
||||
# alloy-evm = { git = "https://github.com/alloy-rs/evm", rev = "a69f0b45a6b0286e16072cb8399e02ce6ceca353" }
|
||||
# alloy-op-evm = { git = "https://github.com/alloy-rs/evm", rev = "a69f0b45a6b0286e16072cb8399e02ce6ceca353" }
|
||||
|
||||
@@ -7,7 +7,7 @@ LABEL org.opencontainers.image.source=https://github.com/paradigmxyz/reth
|
||||
LABEL org.opencontainers.image.licenses="MIT OR Apache-2.0"
|
||||
|
||||
# Install system dependencies
|
||||
RUN apt-get update && apt-get -y upgrade && apt-get install -y libclang-dev pkg-config
|
||||
RUN apt-get update && apt-get install -y libclang-dev pkg-config
|
||||
|
||||
# Builds a cargo-chef plan
|
||||
FROM chef AS planner
|
||||
@@ -18,7 +18,7 @@ FROM chef AS builder
|
||||
COPY --from=planner /app/recipe.json recipe.json
|
||||
|
||||
# Build profile, release by default
|
||||
ARG BUILD_PROFILE=release
|
||||
ARG BUILD_PROFILE=maxperf
|
||||
ENV BUILD_PROFILE=$BUILD_PROFILE
|
||||
|
||||
# Extra Cargo flags
|
||||
|
||||
@@ -1,20 +1,25 @@
|
||||
ARG RUST_VERSION=1
|
||||
ARG RUST_TOOLCHAIN=1.89.0
|
||||
FROM docker.io/rust:$RUST_TOOLCHAIN-trixie AS builder
|
||||
|
||||
FROM rust:$RUST_VERSION-bookworm AS builder
|
||||
|
||||
RUN apt-get update && apt-get install -y \
|
||||
git \
|
||||
libclang-dev=1:14.0-55.7~deb12u1
|
||||
|
||||
# Copy the project to the container
|
||||
COPY ./ /app
|
||||
ARG PROFILE
|
||||
ARG VERSION
|
||||
# Switch to snapshot repository to pin dependencies
|
||||
RUN sed -i '/^# http/{N;s|^# \(http[^ ]*\)\nURIs: .*|# \1\nURIs: \1|}' /etc/apt/sources.list.d/debian.sources
|
||||
RUN apt-get -o Acquire::Check-Valid-Until=false update && \
|
||||
apt-get install -y \
|
||||
libjemalloc-dev \
|
||||
libclang-dev \
|
||||
mold
|
||||
WORKDIR /app
|
||||
COPY . .
|
||||
RUN RUSTFLAGS_REPRODUCIBLE_EXTRA="-Clink-arg=-fuse-ld=mold" make build-reth-reproducible && \
|
||||
PROFILE=${PROFILE:-reproducible} VERSION=$VERSION make build-deb-x86_64-unknown-linux-gnu
|
||||
|
||||
RUN make build-reth-reproducible
|
||||
RUN mv /app/target/x86_64-unknown-linux-gnu/reproducible/reth /reth
|
||||
FROM scratch AS artifacts
|
||||
COPY --from=builder /app/target/x86_64-unknown-linux-gnu/reproducible/reth /reth
|
||||
COPY --from=builder /app/target/x86_64-unknown-linux-gnu/reproducible/*.deb /
|
||||
|
||||
# Create a minimal final image with just the binary
|
||||
FROM gcr.io/distroless/cc-debian12:nonroot-6755e21ccd99ddead6edc8106ba03888cbeed41a
|
||||
COPY --from=builder /reth /reth
|
||||
FROM gcr.io/distroless/cc-debian13:nonroot-239cdd2c8a6b275b6a6f6ed1428c57de2fff3e50
|
||||
COPY --from=artifacts /reth /reth
|
||||
EXPOSE 30303 30303/udp 9001 8545 8546
|
||||
ENTRYPOINT [ "/reth" ]
|
||||
|
||||
@@ -14,7 +14,7 @@ RUN cargo chef prepare --recipe-path recipe.json
|
||||
FROM chef AS builder
|
||||
COPY --from=planner /app/recipe.json recipe.json
|
||||
|
||||
ARG BUILD_PROFILE=release
|
||||
ARG BUILD_PROFILE=maxperf
|
||||
ENV BUILD_PROFILE=$BUILD_PROFILE
|
||||
|
||||
ARG RUSTFLAGS=""
|
||||
@@ -31,7 +31,7 @@ RUN cargo build --profile $BUILD_PROFILE --features "$FEATURES" --bin op-reth --
|
||||
RUN ls -la /app/target/$BUILD_PROFILE/op-reth
|
||||
RUN cp /app/target/$BUILD_PROFILE/op-reth /app/op-reth
|
||||
|
||||
FROM ubuntu:22.04 AS runtime
|
||||
FROM ubuntu AS runtime
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y ca-certificates libssl-dev pkg-config strace && \
|
||||
|
||||
26
Makefile
26
Makefile
@@ -64,14 +64,13 @@ install-op: ## Build and install the op-reth binary under `$(CARGO_HOME)/bin`.
|
||||
build: ## Build the reth binary into `target` directory.
|
||||
cargo build --bin reth --features "$(FEATURES)" --profile "$(PROFILE)"
|
||||
|
||||
.PHONY: build-reth
|
||||
build-reth: ## Build the reth binary (alias for build target).
|
||||
$(MAKE) build
|
||||
|
||||
# Environment variables for reproducible builds
|
||||
# Set timestamp from last git commit for reproducible builds
|
||||
SOURCE_DATE ?= $(shell git log -1 --pretty=%ct)
|
||||
|
||||
# Extra RUSTFLAGS for reproducible builds. Can be overridden via the environment.
|
||||
RUSTFLAGS_REPRODUCIBLE_EXTRA ?=
|
||||
|
||||
# `reproducible` only supports reth on x86_64-unknown-linux-gnu
|
||||
build-%-reproducible:
|
||||
@if [ "$*" != "reth" ]; then \
|
||||
@@ -79,14 +78,18 @@ build-%-reproducible:
|
||||
exit 1; \
|
||||
fi
|
||||
SOURCE_DATE_EPOCH=$(SOURCE_DATE) \
|
||||
RUSTFLAGS="-C symbol-mangling-version=v0 -C strip=none -C link-arg=-Wl,--build-id=none -C metadata='' --remap-path-prefix $$(pwd)=." \
|
||||
RUSTFLAGS="-C symbol-mangling-version=v0 -C strip=none -C link-arg=-Wl,--build-id=none -C metadata='' --remap-path-prefix $$(pwd)=. $(RUSTFLAGS_REPRODUCIBLE_EXTRA)" \
|
||||
LC_ALL=C \
|
||||
TZ=UTC \
|
||||
cargo build --bin reth --features "$(FEATURES)" --profile "reproducible" --locked --target x86_64-unknown-linux-gnu
|
||||
JEMALLOC_OVERRIDE=/usr/lib/x86_64-linux-gnu/libjemalloc.a \
|
||||
cargo build --bin reth --features "$(FEATURES) jemalloc-unprefixed" --profile "reproducible" --locked --target x86_64-unknown-linux-gnu
|
||||
|
||||
.PHONY: build-debug
|
||||
build-debug: ## Build the reth binary into `target/debug` directory.
|
||||
cargo build --bin reth --features "$(FEATURES)"
|
||||
.PHONY: build-debug-op
|
||||
build-debug-op: ## Build the op-reth binary into `target/debug` directory.
|
||||
cargo build --bin op-reth --features "$(FEATURES)" --manifest-path crates/optimism/bin/Cargo.toml
|
||||
|
||||
.PHONY: build-op
|
||||
build-op: ## Build the op-reth binary into `target` directory.
|
||||
@@ -387,9 +390,9 @@ db-tools: ## Compile MDBX debugging tools.
|
||||
@echo "Run \"$(DB_TOOLS_DIR)/mdbx_chk\" for the MDBX db file integrity check."
|
||||
|
||||
.PHONY: update-book-cli
|
||||
update-book-cli: build-debug ## Update book cli documentation.
|
||||
update-book-cli: build-debug build-debug-op## Update book cli documentation.
|
||||
@echo "Updating book cli doc..."
|
||||
@./docs/cli/update.sh $(CARGO_TARGET_DIR)/debug/reth
|
||||
@./docs/cli/update.sh $(CARGO_TARGET_DIR)/debug/reth $(CARGO_TARGET_DIR)/debug/op-reth
|
||||
|
||||
.PHONY: profiling
|
||||
profiling: ## Builds `reth` with optimisations, but also symbols.
|
||||
@@ -518,10 +521,3 @@ pr:
|
||||
make update-book-cli && \
|
||||
cargo docs --document-private-items && \
|
||||
make test
|
||||
|
||||
check-features:
|
||||
cargo hack check \
|
||||
--package reth-codecs \
|
||||
--package reth-primitives-traits \
|
||||
--package reth-primitives \
|
||||
--feature-powerset
|
||||
|
||||
96
bin/reth-bench-compare/Cargo.toml
Normal file
96
bin/reth-bench-compare/Cargo.toml
Normal file
@@ -0,0 +1,96 @@
|
||||
[package]
|
||||
name = "reth-bench-compare"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
rust-version.workspace = true
|
||||
license.workspace = true
|
||||
homepage.workspace = true
|
||||
repository.workspace = true
|
||||
description = "Automated reth benchmark comparison between git references"
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[[bin]]
|
||||
name = "reth-bench-compare"
|
||||
path = "src/main.rs"
|
||||
|
||||
[dependencies]
|
||||
# reth
|
||||
reth-cli-runner.workspace = true
|
||||
reth-cli-util.workspace = true
|
||||
reth-node-core.workspace = true
|
||||
reth-tracing.workspace = true
|
||||
reth-chainspec.workspace = true
|
||||
|
||||
# alloy
|
||||
alloy-provider = { workspace = true, features = ["reqwest-rustls-tls"], default-features = false }
|
||||
alloy-rpc-types-eth.workspace = true
|
||||
alloy-primitives.workspace = true
|
||||
|
||||
# CLI and argument parsing
|
||||
clap = { workspace = true, features = ["derive", "env"] }
|
||||
eyre.workspace = true
|
||||
|
||||
# Async runtime
|
||||
tokio = { workspace = true, features = ["full"] }
|
||||
tracing.workspace = true
|
||||
|
||||
# Serialization
|
||||
serde = { workspace = true, features = ["derive"] }
|
||||
serde_json.workspace = true
|
||||
|
||||
# Time handling
|
||||
chrono = { workspace = true, features = ["serde"] }
|
||||
|
||||
# Path manipulation
|
||||
shellexpand.workspace = true
|
||||
|
||||
# CSV handling
|
||||
csv.workspace = true
|
||||
|
||||
# Process management
|
||||
ctrlc.workspace = true
|
||||
shlex.workspace = true
|
||||
|
||||
[target.'cfg(unix)'.dependencies]
|
||||
nix = { version = "0.29", features = ["signal", "process"] }
|
||||
|
||||
[features]
|
||||
default = ["jemalloc"]
|
||||
|
||||
asm-keccak = [
|
||||
"reth-node-core/asm-keccak",
|
||||
"alloy-primitives/asm-keccak",
|
||||
]
|
||||
|
||||
jemalloc = [
|
||||
"reth-cli-util/jemalloc",
|
||||
"reth-node-core/jemalloc",
|
||||
]
|
||||
jemalloc-prof = ["reth-cli-util/jemalloc-prof"]
|
||||
tracy-allocator = ["reth-cli-util/tracy-allocator"]
|
||||
|
||||
min-error-logs = [
|
||||
"tracing/release_max_level_error",
|
||||
"reth-node-core/min-error-logs",
|
||||
]
|
||||
min-warn-logs = [
|
||||
"tracing/release_max_level_warn",
|
||||
"reth-node-core/min-warn-logs",
|
||||
]
|
||||
min-info-logs = [
|
||||
"tracing/release_max_level_info",
|
||||
"reth-node-core/min-info-logs",
|
||||
]
|
||||
min-debug-logs = [
|
||||
"tracing/release_max_level_debug",
|
||||
"reth-node-core/min-debug-logs",
|
||||
]
|
||||
min-trace-logs = [
|
||||
"tracing/release_max_level_trace",
|
||||
"reth-node-core/min-trace-logs",
|
||||
]
|
||||
|
||||
# no-op feature flag for switching between the `optimism` and default functionality in CI matrices
|
||||
ethereum = []
|
||||
298
bin/reth-bench-compare/src/benchmark.rs
Normal file
298
bin/reth-bench-compare/src/benchmark.rs
Normal file
@@ -0,0 +1,298 @@
|
||||
//! Benchmark execution using reth-bench.
|
||||
|
||||
use crate::cli::Args;
|
||||
use eyre::{eyre, Result, WrapErr};
|
||||
use std::{
|
||||
path::Path,
|
||||
sync::{Arc, Mutex},
|
||||
};
|
||||
use tokio::{
|
||||
fs::File as AsyncFile,
|
||||
io::{AsyncBufReadExt, AsyncWriteExt, BufReader},
|
||||
process::Command,
|
||||
};
|
||||
use tracing::{debug, error, info, warn};
|
||||
|
||||
/// Manages benchmark execution using reth-bench
|
||||
pub(crate) struct BenchmarkRunner {
|
||||
rpc_url: String,
|
||||
jwt_secret: String,
|
||||
wait_time: Option<String>,
|
||||
warmup_blocks: u64,
|
||||
}
|
||||
|
||||
impl BenchmarkRunner {
|
||||
/// Create a new `BenchmarkRunner` from CLI arguments
|
||||
pub(crate) fn new(args: &Args) -> Self {
|
||||
Self {
|
||||
rpc_url: args.get_rpc_url(),
|
||||
jwt_secret: args.jwt_secret_path().to_string_lossy().to_string(),
|
||||
wait_time: args.wait_time.clone(),
|
||||
warmup_blocks: args.get_warmup_blocks(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Clear filesystem caches (page cache, dentries, and inodes)
|
||||
pub(crate) async fn clear_fs_caches() -> Result<()> {
|
||||
info!("Clearing filesystem caches...");
|
||||
|
||||
// First sync to ensure all pending writes are flushed
|
||||
let sync_output =
|
||||
Command::new("sync").output().await.wrap_err("Failed to execute sync command")?;
|
||||
|
||||
if !sync_output.status.success() {
|
||||
return Err(eyre!("sync command failed"));
|
||||
}
|
||||
|
||||
// Drop caches - requires sudo/root permissions
|
||||
// 3 = drop pagecache, dentries, and inodes
|
||||
let drop_caches_cmd = Command::new("sudo")
|
||||
.args(["-n", "sh", "-c", "echo 3 > /proc/sys/vm/drop_caches"])
|
||||
.output()
|
||||
.await;
|
||||
|
||||
match drop_caches_cmd {
|
||||
Ok(output) if output.status.success() => {
|
||||
info!("Successfully cleared filesystem caches");
|
||||
Ok(())
|
||||
}
|
||||
Ok(output) => {
|
||||
let stderr = String::from_utf8_lossy(&output.stderr);
|
||||
if stderr.contains("sudo: a password is required") {
|
||||
warn!("Unable to clear filesystem caches: sudo password required");
|
||||
warn!(
|
||||
"For optimal benchmarking, configure passwordless sudo for cache clearing:"
|
||||
);
|
||||
warn!(" echo '$USER ALL=(ALL) NOPASSWD: /bin/sh -c echo\\\\ [0-9]\\\\ \\\\>\\\\ /proc/sys/vm/drop_caches' | sudo tee /etc/sudoers.d/drop_caches");
|
||||
Ok(())
|
||||
} else {
|
||||
Err(eyre!("Failed to clear filesystem caches: {}", stderr))
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("Unable to clear filesystem caches: {}", e);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Run a warmup benchmark for cache warming
|
||||
pub(crate) async fn run_warmup(&self, from_block: u64) -> Result<()> {
|
||||
let to_block = from_block + self.warmup_blocks;
|
||||
info!(
|
||||
"Running warmup benchmark from block {} to {} ({} blocks)",
|
||||
from_block, to_block, self.warmup_blocks
|
||||
);
|
||||
|
||||
// Build the reth-bench command for warmup (no output flag)
|
||||
let mut cmd = Command::new("reth-bench");
|
||||
cmd.args([
|
||||
"new-payload-fcu",
|
||||
"--rpc-url",
|
||||
&self.rpc_url,
|
||||
"--jwt-secret",
|
||||
&self.jwt_secret,
|
||||
"--from",
|
||||
&from_block.to_string(),
|
||||
"--to",
|
||||
&to_block.to_string(),
|
||||
]);
|
||||
|
||||
// Add wait-time argument if provided
|
||||
if let Some(ref wait_time) = self.wait_time {
|
||||
cmd.args(["--wait-time", wait_time]);
|
||||
}
|
||||
|
||||
cmd.env("RUST_LOG_STYLE", "never")
|
||||
.stdout(std::process::Stdio::piped())
|
||||
.stderr(std::process::Stdio::piped())
|
||||
.kill_on_drop(true);
|
||||
|
||||
// Set process group for consistent signal handling
|
||||
#[cfg(unix)]
|
||||
{
|
||||
cmd.process_group(0);
|
||||
}
|
||||
|
||||
debug!("Executing warmup reth-bench command: {:?}", cmd);
|
||||
|
||||
// Execute the warmup benchmark
|
||||
let mut child = cmd.spawn().wrap_err("Failed to start warmup reth-bench process")?;
|
||||
|
||||
// Stream output at debug level
|
||||
if let Some(stdout) = child.stdout.take() {
|
||||
tokio::spawn(async move {
|
||||
let reader = BufReader::new(stdout);
|
||||
let mut lines = reader.lines();
|
||||
while let Ok(Some(line)) = lines.next_line().await {
|
||||
debug!("[WARMUP] {}", line);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
if let Some(stderr) = child.stderr.take() {
|
||||
tokio::spawn(async move {
|
||||
let reader = BufReader::new(stderr);
|
||||
let mut lines = reader.lines();
|
||||
while let Ok(Some(line)) = lines.next_line().await {
|
||||
debug!("[WARMUP] {}", line);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
let status = child.wait().await.wrap_err("Failed to wait for warmup reth-bench")?;
|
||||
|
||||
if !status.success() {
|
||||
return Err(eyre!("Warmup reth-bench failed with exit code: {:?}", status.code()));
|
||||
}
|
||||
|
||||
info!("Warmup completed successfully");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Run a benchmark for the specified block range
|
||||
pub(crate) async fn run_benchmark(
|
||||
&self,
|
||||
from_block: u64,
|
||||
to_block: u64,
|
||||
output_dir: &Path,
|
||||
) -> Result<()> {
|
||||
info!(
|
||||
"Running benchmark from block {} to {} (output: {:?})",
|
||||
from_block, to_block, output_dir
|
||||
);
|
||||
|
||||
// Ensure output directory exists
|
||||
std::fs::create_dir_all(output_dir)
|
||||
.wrap_err_with(|| format!("Failed to create output directory: {output_dir:?}"))?;
|
||||
|
||||
// Create log file path for reth-bench output
|
||||
let log_file_path = output_dir.join("reth_bench.log");
|
||||
info!("reth-bench logs will be saved to: {:?}", log_file_path);
|
||||
|
||||
// Build the reth-bench command
|
||||
let mut cmd = Command::new("reth-bench");
|
||||
cmd.args([
|
||||
"new-payload-fcu",
|
||||
"--rpc-url",
|
||||
&self.rpc_url,
|
||||
"--jwt-secret",
|
||||
&self.jwt_secret,
|
||||
"--from",
|
||||
&from_block.to_string(),
|
||||
"--to",
|
||||
&to_block.to_string(),
|
||||
"--output",
|
||||
&output_dir.to_string_lossy(),
|
||||
]);
|
||||
|
||||
// Add wait-time argument if provided
|
||||
if let Some(ref wait_time) = self.wait_time {
|
||||
cmd.args(["--wait-time", wait_time]);
|
||||
}
|
||||
|
||||
cmd.env("RUST_LOG_STYLE", "never")
|
||||
.stdout(std::process::Stdio::piped())
|
||||
.stderr(std::process::Stdio::piped())
|
||||
.kill_on_drop(true);
|
||||
|
||||
// Set process group for consistent signal handling
|
||||
#[cfg(unix)]
|
||||
{
|
||||
cmd.process_group(0);
|
||||
}
|
||||
|
||||
// Debug log the command
|
||||
debug!("Executing reth-bench command: {:?}", cmd);
|
||||
|
||||
// Execute the benchmark
|
||||
let mut child = cmd.spawn().wrap_err("Failed to start reth-bench process")?;
|
||||
|
||||
// Capture stdout and stderr for error reporting
|
||||
let stdout_lines = Arc::new(Mutex::new(Vec::new()));
|
||||
let stderr_lines = Arc::new(Mutex::new(Vec::new()));
|
||||
|
||||
// Stream stdout with prefix at debug level, capture for error reporting, and write to log
|
||||
// file
|
||||
if let Some(stdout) = child.stdout.take() {
|
||||
let stdout_lines_clone = stdout_lines.clone();
|
||||
let log_file = AsyncFile::create(&log_file_path)
|
||||
.await
|
||||
.wrap_err(format!("Failed to create log file: {:?}", log_file_path))?;
|
||||
tokio::spawn(async move {
|
||||
let reader = BufReader::new(stdout);
|
||||
let mut lines = reader.lines();
|
||||
let mut log_file = log_file;
|
||||
while let Ok(Some(line)) = lines.next_line().await {
|
||||
debug!("[RETH-BENCH] {}", line);
|
||||
if let Ok(mut captured) = stdout_lines_clone.lock() {
|
||||
captured.push(line.clone());
|
||||
}
|
||||
// Write to log file (reth-bench output already has timestamps if needed)
|
||||
let log_line = format!("{}\n", line);
|
||||
if let Err(e) = log_file.write_all(log_line.as_bytes()).await {
|
||||
debug!("Failed to write to log file: {}", e);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// Stream stderr with prefix at debug level, capture for error reporting, and write to log
|
||||
// file
|
||||
if let Some(stderr) = child.stderr.take() {
|
||||
let stderr_lines_clone = stderr_lines.clone();
|
||||
let log_file = AsyncFile::options()
|
||||
.create(true)
|
||||
.append(true)
|
||||
.open(&log_file_path)
|
||||
.await
|
||||
.wrap_err(format!("Failed to open log file for stderr: {:?}", log_file_path))?;
|
||||
tokio::spawn(async move {
|
||||
let reader = BufReader::new(stderr);
|
||||
let mut lines = reader.lines();
|
||||
let mut log_file = log_file;
|
||||
while let Ok(Some(line)) = lines.next_line().await {
|
||||
debug!("[RETH-BENCH] {}", line);
|
||||
if let Ok(mut captured) = stderr_lines_clone.lock() {
|
||||
captured.push(line.clone());
|
||||
}
|
||||
// Write to log file (reth-bench output already has timestamps if needed)
|
||||
let log_line = format!("{}\n", line);
|
||||
if let Err(e) = log_file.write_all(log_line.as_bytes()).await {
|
||||
debug!("Failed to write to log file: {}", e);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
let status = child.wait().await.wrap_err("Failed to wait for reth-bench")?;
|
||||
|
||||
if !status.success() {
|
||||
// Print all captured output when command fails
|
||||
error!("reth-bench failed with exit code: {:?}", status.code());
|
||||
|
||||
if let Ok(stdout) = stdout_lines.lock() &&
|
||||
!stdout.is_empty()
|
||||
{
|
||||
error!("reth-bench stdout:");
|
||||
for line in stdout.iter() {
|
||||
error!(" {}", line);
|
||||
}
|
||||
}
|
||||
|
||||
if let Ok(stderr) = stderr_lines.lock() &&
|
||||
!stderr.is_empty()
|
||||
{
|
||||
error!("reth-bench stderr:");
|
||||
for line in stderr.iter() {
|
||||
error!(" {}", line);
|
||||
}
|
||||
}
|
||||
|
||||
return Err(eyre!("reth-bench failed with exit code: {:?}", status.code()));
|
||||
}
|
||||
|
||||
info!("Benchmark completed");
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
946
bin/reth-bench-compare/src/cli.rs
Normal file
946
bin/reth-bench-compare/src/cli.rs
Normal file
@@ -0,0 +1,946 @@
|
||||
//! CLI argument parsing and main command orchestration.
|
||||
|
||||
use alloy_provider::{Provider, ProviderBuilder};
|
||||
use clap::Parser;
|
||||
use eyre::{eyre, Result, WrapErr};
|
||||
use reth_chainspec::Chain;
|
||||
use reth_cli_runner::CliContext;
|
||||
use reth_node_core::args::{DatadirArgs, LogArgs, TraceArgs};
|
||||
use reth_tracing::FileWorkerGuard;
|
||||
use std::{net::TcpListener, path::PathBuf, str::FromStr};
|
||||
use tokio::process::Command;
|
||||
use tracing::{debug, info, warn};
|
||||
|
||||
use crate::{
|
||||
benchmark::BenchmarkRunner, comparison::ComparisonGenerator, compilation::CompilationManager,
|
||||
git::GitManager, node::NodeManager,
|
||||
};
|
||||
|
||||
/// Target for disabling the --debug.startup-sync-state-idle flag
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub(crate) enum DisableStartupSyncStateIdle {
|
||||
/// Disable for baseline and warmup runs
|
||||
Baseline,
|
||||
/// Disable for feature runs only
|
||||
Feature,
|
||||
/// Disable for all runs
|
||||
All,
|
||||
}
|
||||
|
||||
impl FromStr for DisableStartupSyncStateIdle {
|
||||
type Err = String;
|
||||
|
||||
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
|
||||
match s.to_lowercase().as_str() {
|
||||
"baseline" => Ok(Self::Baseline),
|
||||
"feature" => Ok(Self::Feature),
|
||||
"all" => Ok(Self::All),
|
||||
_ => Err(format!("Invalid value '{}'. Expected 'baseline', 'feature', or 'all'", s)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for DisableStartupSyncStateIdle {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
Self::Baseline => write!(f, "baseline"),
|
||||
Self::Feature => write!(f, "feature"),
|
||||
Self::All => write!(f, "all"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Automated reth benchmark comparison between git references
|
||||
#[derive(Debug, Parser)]
|
||||
#[command(
|
||||
name = "reth-bench-compare",
|
||||
about = "Compare reth performance between two git references (branches or tags)",
|
||||
version
|
||||
)]
|
||||
pub(crate) struct Args {
|
||||
/// Git reference (branch or tag) to use as baseline for comparison
|
||||
#[arg(long, value_name = "REF")]
|
||||
pub baseline_ref: String,
|
||||
|
||||
/// Git reference (branch or tag) to compare against the baseline
|
||||
#[arg(long, value_name = "REF")]
|
||||
pub feature_ref: String,
|
||||
|
||||
#[command(flatten)]
|
||||
pub datadir: DatadirArgs,
|
||||
|
||||
/// Number of blocks to benchmark
|
||||
#[arg(long, value_name = "N", default_value = "100")]
|
||||
pub blocks: u64,
|
||||
|
||||
/// RPC endpoint for fetching block data
|
||||
#[arg(long, value_name = "URL")]
|
||||
pub rpc_url: Option<String>,
|
||||
|
||||
/// JWT secret file path
|
||||
///
|
||||
/// If not provided, defaults to `<datadir>/<chain>/jwt.hex`.
|
||||
/// If the file doesn't exist, it will be created automatically.
|
||||
#[arg(long, value_name = "PATH")]
|
||||
pub jwt_secret: Option<PathBuf>,
|
||||
|
||||
/// Output directory for benchmark results
|
||||
#[arg(long, value_name = "PATH", default_value = "./reth-bench-compare")]
|
||||
pub output_dir: String,
|
||||
|
||||
/// Skip git branch validation (useful for testing)
|
||||
#[arg(long)]
|
||||
pub skip_git_validation: bool,
|
||||
|
||||
/// Port for reth metrics endpoint
|
||||
#[arg(long, value_name = "PORT", default_value = "5005")]
|
||||
pub metrics_port: u16,
|
||||
|
||||
/// The chain this node is running.
|
||||
///
|
||||
/// Possible values are either a built-in chain name or numeric chain ID.
|
||||
#[arg(long, value_name = "CHAIN", default_value = "mainnet", required = false)]
|
||||
pub chain: Chain,
|
||||
|
||||
/// Run reth binary with sudo (for elevated privileges)
|
||||
#[arg(long)]
|
||||
pub sudo: bool,
|
||||
|
||||
/// Generate comparison charts using Python script
|
||||
#[arg(long)]
|
||||
pub draw: bool,
|
||||
|
||||
/// Enable CPU profiling with samply during benchmark runs
|
||||
#[arg(long)]
|
||||
pub profile: bool,
|
||||
|
||||
/// Wait time between engine API calls (passed to reth-bench)
|
||||
#[arg(long, value_name = "DURATION")]
|
||||
pub wait_time: Option<String>,
|
||||
|
||||
/// Number of blocks to run for cache warmup after clearing caches.
|
||||
/// If not specified, defaults to the same as --blocks
|
||||
#[arg(long, value_name = "N")]
|
||||
pub warmup_blocks: Option<u64>,
|
||||
|
||||
/// Disable filesystem cache clearing before warmup phase.
|
||||
/// By default, filesystem caches are cleared before warmup to ensure consistent benchmarks.
|
||||
#[arg(long)]
|
||||
pub no_clear_cache: bool,
|
||||
|
||||
#[command(flatten)]
|
||||
pub logs: LogArgs,
|
||||
|
||||
#[command(flatten)]
|
||||
pub traces: TraceArgs,
|
||||
|
||||
/// Maximum queue size for OTLP Batch Span Processor (traces).
|
||||
/// Higher values prevent trace drops when benchmarking many blocks.
|
||||
#[arg(
|
||||
long,
|
||||
value_name = "OTLP_BUFFER_SIZE",
|
||||
default_value = "32768",
|
||||
help_heading = "Tracing"
|
||||
)]
|
||||
pub otlp_max_queue_size: usize,
|
||||
|
||||
/// Additional arguments to pass to baseline reth node command
|
||||
///
|
||||
/// Example: `--baseline-args "--debug.tip 0xabc..."`
|
||||
#[arg(long, value_name = "ARGS")]
|
||||
pub baseline_args: Option<String>,
|
||||
|
||||
/// Additional arguments to pass to feature reth node command
|
||||
///
|
||||
/// Example: `--feature-args "--debug.tip 0xdef..."`
|
||||
#[arg(long, value_name = "ARGS")]
|
||||
pub feature_args: Option<String>,
|
||||
|
||||
/// Additional arguments to pass to reth node command (applied to both baseline and feature)
|
||||
///
|
||||
/// All arguments after `--` will be passed directly to the reth node command.
|
||||
/// Example: `reth-bench-compare --baseline-ref main --feature-ref pr/123 -- --debug.tip
|
||||
/// 0xabc...`
|
||||
#[arg(trailing_var_arg = true, allow_hyphen_values = true)]
|
||||
pub reth_args: Vec<String>,
|
||||
|
||||
/// Comma-separated list of features to enable during reth compilation
|
||||
///
|
||||
/// Example: `jemalloc,asm-keccak`
|
||||
#[arg(long, value_name = "FEATURES", default_value = "jemalloc,asm-keccak")]
|
||||
pub features: String,
|
||||
|
||||
/// Disable automatic --debug.startup-sync-state-idle flag for specific runs.
|
||||
/// Can be "baseline", "feature", or "all".
|
||||
/// By default, the flag is passed to warmup, baseline, and feature runs.
|
||||
/// When "baseline" is specified, the flag is NOT passed to warmup OR baseline.
|
||||
/// When "feature" is specified, the flag is NOT passed to feature.
|
||||
/// When "all" is specified, the flag is NOT passed to any run.
|
||||
#[arg(long, value_name = "TARGET")]
|
||||
pub disable_startup_sync_state_idle: Option<DisableStartupSyncStateIdle>,
|
||||
}
|
||||
|
||||
impl Args {
|
||||
/// Initializes tracing with the configured options.
|
||||
pub(crate) fn init_tracing(&self) -> Result<Option<FileWorkerGuard>> {
|
||||
let guard = self.logs.init_tracing()?;
|
||||
Ok(guard)
|
||||
}
|
||||
|
||||
/// Build additional arguments for a specific ref type, conditionally including
|
||||
/// --debug.startup-sync-state-idle based on the configuration
|
||||
pub(crate) fn build_additional_args(
|
||||
&self,
|
||||
ref_type: &str,
|
||||
base_args_str: Option<&String>,
|
||||
) -> Vec<String> {
|
||||
// Parse the base arguments string if provided
|
||||
let mut args = base_args_str.map(|s| parse_args_string(s)).unwrap_or_default();
|
||||
|
||||
// Determine if we should add the --debug.startup-sync-state-idle flag
|
||||
let should_add_flag = match self.disable_startup_sync_state_idle {
|
||||
None => true, // By default, add the flag
|
||||
Some(DisableStartupSyncStateIdle::All) => false,
|
||||
Some(DisableStartupSyncStateIdle::Baseline) => {
|
||||
ref_type != "baseline" && ref_type != "warmup"
|
||||
}
|
||||
Some(DisableStartupSyncStateIdle::Feature) => ref_type != "feature",
|
||||
};
|
||||
|
||||
if should_add_flag {
|
||||
args.push("--debug.startup-sync-state-idle".to_string());
|
||||
debug!("Adding --debug.startup-sync-state-idle flag for ref_type: {}", ref_type);
|
||||
} else {
|
||||
debug!("Skipping --debug.startup-sync-state-idle flag for ref_type: {}", ref_type);
|
||||
}
|
||||
|
||||
args
|
||||
}
|
||||
|
||||
/// Get the default RPC URL for a given chain
|
||||
const fn get_default_rpc_url(chain: &Chain) -> &'static str {
|
||||
match chain.id() {
|
||||
8453 => "https://base-mainnet.rpc.ithaca.xyz", // base
|
||||
84532 => "https://base-sepolia.rpc.ithaca.xyz", // base-sepolia
|
||||
27082 => "https://rpc.hoodi.ethpandaops.io", // hoodi
|
||||
_ => "https://reth-ethereum.ithaca.xyz/rpc", // mainnet and fallback
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the RPC URL, using chain-specific default if not provided
|
||||
pub(crate) fn get_rpc_url(&self) -> String {
|
||||
self.rpc_url.clone().unwrap_or_else(|| Self::get_default_rpc_url(&self.chain).to_string())
|
||||
}
|
||||
|
||||
/// Get the JWT secret path - either provided or derived from datadir
|
||||
pub(crate) fn jwt_secret_path(&self) -> PathBuf {
|
||||
match &self.jwt_secret {
|
||||
Some(path) => {
|
||||
let jwt_secret_str = path.to_string_lossy();
|
||||
let expanded = shellexpand::tilde(&jwt_secret_str);
|
||||
PathBuf::from(expanded.as_ref())
|
||||
}
|
||||
None => {
|
||||
// Use the same logic as reth: <datadir>/<chain>/jwt.hex
|
||||
let chain_path = self.datadir.clone().resolve_datadir(self.chain);
|
||||
chain_path.jwt()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the resolved datadir path using the chain
|
||||
pub(crate) fn datadir_path(&self) -> PathBuf {
|
||||
let chain_path = self.datadir.clone().resolve_datadir(self.chain);
|
||||
chain_path.data_dir().to_path_buf()
|
||||
}
|
||||
|
||||
/// Get the expanded output directory path
|
||||
pub(crate) fn output_dir_path(&self) -> PathBuf {
|
||||
let expanded = shellexpand::tilde(&self.output_dir);
|
||||
PathBuf::from(expanded.as_ref())
|
||||
}
|
||||
|
||||
/// Get the effective warmup blocks value - either specified or defaults to blocks
|
||||
pub(crate) fn get_warmup_blocks(&self) -> u64 {
|
||||
self.warmup_blocks.unwrap_or(self.blocks)
|
||||
}
|
||||
}
|
||||
|
||||
/// Validate that the RPC endpoint chain ID matches the specified chain
|
||||
async fn validate_rpc_chain_id(rpc_url: &str, expected_chain: &Chain) -> Result<()> {
|
||||
// Create Alloy provider
|
||||
let url = rpc_url.parse().map_err(|e| eyre!("Invalid RPC URL '{}': {}", rpc_url, e))?;
|
||||
let provider = ProviderBuilder::new().connect_http(url);
|
||||
|
||||
// Query chain ID using Alloy
|
||||
let rpc_chain_id = provider
|
||||
.get_chain_id()
|
||||
.await
|
||||
.map_err(|e| eyre!("Failed to get chain ID from RPC endpoint {}: {:?}", rpc_url, e))?;
|
||||
|
||||
let expected_chain_id = expected_chain.id();
|
||||
|
||||
if rpc_chain_id != expected_chain_id {
|
||||
return Err(eyre!(
|
||||
"RPC endpoint chain ID mismatch!\n\
|
||||
Expected: {} (chain: {})\n\
|
||||
Found: {} at RPC endpoint: {}\n\n\
|
||||
Please use an RPC endpoint for the correct network or change the --chain argument.",
|
||||
expected_chain_id,
|
||||
expected_chain,
|
||||
rpc_chain_id,
|
||||
rpc_url
|
||||
));
|
||||
}
|
||||
|
||||
info!("Validated RPC endpoint chain ID");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Main comparison workflow execution
|
||||
pub(crate) async fn run_comparison(args: Args, _ctx: CliContext) -> Result<()> {
|
||||
// Create a new process group for this process and all its children
|
||||
#[cfg(unix)]
|
||||
{
|
||||
use nix::unistd::{getpid, setpgid};
|
||||
if let Err(e) = setpgid(getpid(), getpid()) {
|
||||
warn!("Failed to create process group: {e}");
|
||||
}
|
||||
}
|
||||
|
||||
info!(
|
||||
"Starting benchmark comparison between '{}' and '{}'",
|
||||
args.baseline_ref, args.feature_ref
|
||||
);
|
||||
|
||||
if args.sudo {
|
||||
info!("Running in sudo mode - reth commands will use elevated privileges");
|
||||
}
|
||||
|
||||
// Initialize Git manager
|
||||
let git_manager = GitManager::new()?;
|
||||
// Fetch all branches, tags, and commits
|
||||
git_manager.fetch_all()?;
|
||||
|
||||
// Initialize compilation manager
|
||||
let output_dir = args.output_dir_path();
|
||||
let compilation_manager = CompilationManager::new(
|
||||
git_manager.repo_root().to_string(),
|
||||
output_dir.clone(),
|
||||
git_manager.clone(),
|
||||
args.features.clone(),
|
||||
args.profile,
|
||||
)?;
|
||||
// Initialize node manager
|
||||
let mut node_manager = NodeManager::new(&args);
|
||||
|
||||
let benchmark_runner = BenchmarkRunner::new(&args);
|
||||
let mut comparison_generator = ComparisonGenerator::new(&args);
|
||||
|
||||
// Set the comparison directory in node manager to align with results directory
|
||||
node_manager.set_comparison_dir(comparison_generator.get_output_dir());
|
||||
|
||||
// Store original git state for restoration
|
||||
let original_ref = git_manager.get_current_ref()?;
|
||||
info!("Current git reference: {}", original_ref);
|
||||
|
||||
// Validate git state
|
||||
if !args.skip_git_validation {
|
||||
git_manager.validate_clean_state()?;
|
||||
git_manager.validate_refs(&[&args.baseline_ref, &args.feature_ref])?;
|
||||
}
|
||||
|
||||
// Validate RPC endpoint chain ID matches the specified chain
|
||||
let rpc_url = args.get_rpc_url();
|
||||
validate_rpc_chain_id(&rpc_url, &args.chain).await?;
|
||||
|
||||
// Setup signal handling for cleanup
|
||||
let git_manager_cleanup = git_manager.clone();
|
||||
let original_ref_cleanup = original_ref.clone();
|
||||
ctrlc::set_handler(move || {
|
||||
eprintln!("Received interrupt signal, cleaning up...");
|
||||
|
||||
// Send SIGTERM to entire process group to ensure all children exit
|
||||
#[cfg(unix)]
|
||||
{
|
||||
use nix::{
|
||||
sys::signal::{kill, Signal},
|
||||
unistd::Pid,
|
||||
};
|
||||
|
||||
// Send SIGTERM to our process group (negative PID = process group)
|
||||
let current_pid = std::process::id() as i32;
|
||||
let pgid = Pid::from_raw(-current_pid);
|
||||
if let Err(e) = kill(pgid, Signal::SIGTERM) {
|
||||
eprintln!("Failed to send SIGTERM to process group: {e}");
|
||||
}
|
||||
}
|
||||
|
||||
// Give a moment for any ongoing git operations to complete
|
||||
std::thread::sleep(std::time::Duration::from_millis(200));
|
||||
|
||||
if let Err(e) = git_manager_cleanup.switch_ref(&original_ref_cleanup) {
|
||||
eprintln!("Failed to restore original git reference: {e}");
|
||||
eprintln!("You may need to manually run: git checkout {original_ref_cleanup}");
|
||||
}
|
||||
std::process::exit(1);
|
||||
})?;
|
||||
|
||||
let result = run_benchmark_workflow(
|
||||
&git_manager,
|
||||
&compilation_manager,
|
||||
&mut node_manager,
|
||||
&benchmark_runner,
|
||||
&mut comparison_generator,
|
||||
&args,
|
||||
)
|
||||
.await;
|
||||
|
||||
// Always restore original git reference
|
||||
info!("Restoring original git reference: {}", original_ref);
|
||||
git_manager.switch_ref(&original_ref)?;
|
||||
|
||||
// Handle any errors from the workflow
|
||||
result?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Parse a string of arguments into a vector of strings
|
||||
fn parse_args_string(args_str: &str) -> Vec<String> {
|
||||
shlex::split(args_str).unwrap_or_else(|| {
|
||||
// Fallback to simple whitespace splitting if shlex fails
|
||||
args_str.split_whitespace().map(|s| s.to_string()).collect()
|
||||
})
|
||||
}
|
||||
|
||||
/// Run compilation phase for both baseline and feature binaries
|
||||
async fn run_compilation_phase(
|
||||
git_manager: &GitManager,
|
||||
compilation_manager: &CompilationManager,
|
||||
args: &Args,
|
||||
is_optimism: bool,
|
||||
) -> Result<(String, String)> {
|
||||
info!("=== Running compilation phase ===");
|
||||
|
||||
// Ensure required tools are available (only need to check once)
|
||||
compilation_manager.ensure_reth_bench_available()?;
|
||||
if args.profile {
|
||||
compilation_manager.ensure_samply_available()?;
|
||||
}
|
||||
|
||||
let refs = [&args.baseline_ref, &args.feature_ref];
|
||||
let ref_types = ["baseline", "feature"];
|
||||
|
||||
// First, resolve all refs to commits using a HashMap to avoid race conditions where a ref is
|
||||
// pushed to mid-run.
|
||||
let mut ref_commits = std::collections::HashMap::new();
|
||||
for &git_ref in &refs {
|
||||
if !ref_commits.contains_key(git_ref) {
|
||||
git_manager.switch_ref(git_ref)?;
|
||||
let commit = git_manager.get_current_commit()?;
|
||||
ref_commits.insert(git_ref.clone(), commit);
|
||||
info!("Reference {} resolves to commit: {}", git_ref, &ref_commits[git_ref][..8]);
|
||||
}
|
||||
}
|
||||
|
||||
// Now compile each ref using the resolved commits
|
||||
for (i, &git_ref) in refs.iter().enumerate() {
|
||||
let ref_type = ref_types[i];
|
||||
let commit = &ref_commits[git_ref];
|
||||
|
||||
info!(
|
||||
"Compiling {} binary for reference: {} (commit: {})",
|
||||
ref_type,
|
||||
git_ref,
|
||||
&commit[..8]
|
||||
);
|
||||
|
||||
// Switch to target reference
|
||||
git_manager.switch_ref(git_ref)?;
|
||||
|
||||
// Compile reth (with caching)
|
||||
compilation_manager.compile_reth(commit, is_optimism)?;
|
||||
|
||||
info!("Completed compilation for {} reference", ref_type);
|
||||
}
|
||||
|
||||
let baseline_commit = ref_commits[&args.baseline_ref].clone();
|
||||
let feature_commit = ref_commits[&args.feature_ref].clone();
|
||||
|
||||
info!("Compilation phase completed");
|
||||
Ok((baseline_commit, feature_commit))
|
||||
}
|
||||
|
||||
/// Run warmup phase to warm up caches before benchmarking
|
||||
async fn run_warmup_phase(
|
||||
git_manager: &GitManager,
|
||||
compilation_manager: &CompilationManager,
|
||||
node_manager: &mut NodeManager,
|
||||
benchmark_runner: &BenchmarkRunner,
|
||||
args: &Args,
|
||||
is_optimism: bool,
|
||||
baseline_commit: &str,
|
||||
) -> Result<()> {
|
||||
info!("=== Running warmup phase ===");
|
||||
|
||||
// Use baseline for warmup
|
||||
let warmup_ref = &args.baseline_ref;
|
||||
|
||||
// Switch to baseline reference
|
||||
git_manager.switch_ref(warmup_ref)?;
|
||||
|
||||
// Get the cached binary path for baseline (should already be compiled)
|
||||
let binary_path =
|
||||
compilation_manager.get_cached_binary_path_for_commit(baseline_commit, is_optimism);
|
||||
|
||||
// Verify the cached binary exists
|
||||
if !binary_path.exists() {
|
||||
return Err(eyre!(
|
||||
"Cached baseline binary not found at {:?}. Compilation phase should have created it.",
|
||||
binary_path
|
||||
));
|
||||
}
|
||||
|
||||
info!("Using cached baseline binary for warmup (commit: {})", &baseline_commit[..8]);
|
||||
|
||||
// Build additional args with conditional --debug.startup-sync-state-idle flag
|
||||
let additional_args = args.build_additional_args("warmup", args.baseline_args.as_ref());
|
||||
|
||||
// Start reth node for warmup (command is not stored for warmup phase)
|
||||
let (mut node_process, _warmup_command) =
|
||||
node_manager.start_node(&binary_path, warmup_ref, "warmup", &additional_args).await?;
|
||||
|
||||
// Wait for node to be ready and get its current tip
|
||||
let current_tip = node_manager.wait_for_node_ready_and_get_tip().await?;
|
||||
info!("Warmup node is ready at tip: {}", current_tip);
|
||||
|
||||
// Store the tip we'll unwind back to
|
||||
let original_tip = current_tip;
|
||||
|
||||
// Clear filesystem caches before warmup run only (unless disabled)
|
||||
if args.no_clear_cache {
|
||||
info!("Skipping filesystem cache clearing (--no-clear-cache flag set)");
|
||||
} else {
|
||||
BenchmarkRunner::clear_fs_caches().await?;
|
||||
}
|
||||
|
||||
// Run warmup to warm up caches
|
||||
benchmark_runner.run_warmup(current_tip).await?;
|
||||
|
||||
// Stop node before unwinding (node must be stopped to release database lock)
|
||||
node_manager.stop_node(&mut node_process).await?;
|
||||
|
||||
// Unwind back to starting block after warmup
|
||||
node_manager.unwind_to_block(original_tip).await?;
|
||||
|
||||
info!("Warmup phase completed");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Execute the complete benchmark workflow for both branches
|
||||
async fn run_benchmark_workflow(
|
||||
git_manager: &GitManager,
|
||||
compilation_manager: &CompilationManager,
|
||||
node_manager: &mut NodeManager,
|
||||
benchmark_runner: &BenchmarkRunner,
|
||||
comparison_generator: &mut ComparisonGenerator,
|
||||
args: &Args,
|
||||
) -> Result<()> {
|
||||
// Detect if this is an Optimism chain once at the beginning
|
||||
let rpc_url = args.get_rpc_url();
|
||||
let is_optimism = compilation_manager.detect_optimism_chain(&rpc_url).await?;
|
||||
|
||||
// Run compilation phase for both binaries
|
||||
let (baseline_commit, feature_commit) =
|
||||
run_compilation_phase(git_manager, compilation_manager, args, is_optimism).await?;
|
||||
|
||||
// Run warmup phase before benchmarking (skip if warmup_blocks is 0)
|
||||
if args.get_warmup_blocks() > 0 {
|
||||
run_warmup_phase(
|
||||
git_manager,
|
||||
compilation_manager,
|
||||
node_manager,
|
||||
benchmark_runner,
|
||||
args,
|
||||
is_optimism,
|
||||
&baseline_commit,
|
||||
)
|
||||
.await?;
|
||||
} else {
|
||||
info!("Skipping warmup phase (warmup_blocks is 0)");
|
||||
}
|
||||
|
||||
let refs = [&args.baseline_ref, &args.feature_ref];
|
||||
let ref_types = ["baseline", "feature"];
|
||||
let commits = [&baseline_commit, &feature_commit];
|
||||
|
||||
for (i, &git_ref) in refs.iter().enumerate() {
|
||||
let ref_type = ref_types[i];
|
||||
let commit = commits[i];
|
||||
info!("=== Processing {} reference: {} ===", ref_type, git_ref);
|
||||
|
||||
// Switch to target reference
|
||||
git_manager.switch_ref(git_ref)?;
|
||||
|
||||
// Get the cached binary path for this git reference (should already be compiled)
|
||||
let binary_path =
|
||||
compilation_manager.get_cached_binary_path_for_commit(commit, is_optimism);
|
||||
|
||||
// Verify the cached binary exists
|
||||
if !binary_path.exists() {
|
||||
return Err(eyre!(
|
||||
"Cached {} binary not found at {:?}. Compilation phase should have created it.",
|
||||
ref_type,
|
||||
binary_path
|
||||
));
|
||||
}
|
||||
|
||||
info!("Using cached {} binary (commit: {})", ref_type, &commit[..8]);
|
||||
|
||||
// Get reference-specific base arguments string
|
||||
let base_args_str = match ref_type {
|
||||
"baseline" => args.baseline_args.as_ref(),
|
||||
"feature" => args.feature_args.as_ref(),
|
||||
_ => None,
|
||||
};
|
||||
|
||||
// Build additional args with conditional --debug.startup-sync-state-idle flag
|
||||
let additional_args = args.build_additional_args(ref_type, base_args_str);
|
||||
|
||||
// Start reth node and capture the command for reporting
|
||||
let (mut node_process, reth_command) =
|
||||
node_manager.start_node(&binary_path, git_ref, ref_type, &additional_args).await?;
|
||||
|
||||
// Wait for node to be ready and get its current tip (wherever it is)
|
||||
let current_tip = node_manager.wait_for_node_ready_and_get_tip().await?;
|
||||
info!("Node is ready at tip: {}", current_tip);
|
||||
|
||||
// Store the tip we'll unwind back to
|
||||
let original_tip = current_tip;
|
||||
|
||||
// Calculate benchmark range
|
||||
// Note: reth-bench has an off-by-one error where it consumes the first block
|
||||
// of the range, so we add 1 to compensate and get exactly args.blocks blocks
|
||||
let from_block = original_tip;
|
||||
let to_block = original_tip + args.blocks;
|
||||
|
||||
// Run benchmark
|
||||
let output_dir = comparison_generator.get_ref_output_dir(ref_type);
|
||||
|
||||
// Capture start timestamp for the benchmark run
|
||||
let benchmark_start = chrono::Utc::now();
|
||||
|
||||
// Run benchmark (comparison logic is handled separately by ComparisonGenerator)
|
||||
benchmark_runner.run_benchmark(from_block, to_block, &output_dir).await?;
|
||||
|
||||
// Capture end timestamp for the benchmark run
|
||||
let benchmark_end = chrono::Utc::now();
|
||||
|
||||
// Stop node
|
||||
node_manager.stop_node(&mut node_process).await?;
|
||||
|
||||
// Unwind back to original tip
|
||||
node_manager.unwind_to_block(original_tip).await?;
|
||||
|
||||
// Store results for comparison
|
||||
comparison_generator.add_ref_results(ref_type, &output_dir)?;
|
||||
|
||||
// Set the benchmark run timestamps and reth command
|
||||
comparison_generator.set_ref_timestamps(ref_type, benchmark_start, benchmark_end)?;
|
||||
comparison_generator.set_ref_command(ref_type, reth_command)?;
|
||||
|
||||
info!("Completed {} reference benchmark", ref_type);
|
||||
}
|
||||
|
||||
// Generate comparison report
|
||||
comparison_generator.generate_comparison_report().await?;
|
||||
|
||||
// Generate charts if requested
|
||||
if args.draw {
|
||||
generate_comparison_charts(comparison_generator).await?;
|
||||
}
|
||||
|
||||
// Start samply servers if profiling was enabled
|
||||
if args.profile {
|
||||
start_samply_servers(args).await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Generate comparison charts using the Python script
|
||||
async fn generate_comparison_charts(comparison_generator: &ComparisonGenerator) -> Result<()> {
|
||||
info!("Generating comparison charts with Python script...");
|
||||
|
||||
let baseline_output_dir = comparison_generator.get_ref_output_dir("baseline");
|
||||
let feature_output_dir = comparison_generator.get_ref_output_dir("feature");
|
||||
|
||||
let baseline_csv = baseline_output_dir.join("combined_latency.csv");
|
||||
let feature_csv = feature_output_dir.join("combined_latency.csv");
|
||||
|
||||
// Check if CSV files exist
|
||||
if !baseline_csv.exists() {
|
||||
return Err(eyre!("Baseline CSV not found: {:?}", baseline_csv));
|
||||
}
|
||||
if !feature_csv.exists() {
|
||||
return Err(eyre!("Feature CSV not found: {:?}", feature_csv));
|
||||
}
|
||||
|
||||
let output_dir = comparison_generator.get_output_dir();
|
||||
let chart_output = output_dir.join("latency_comparison.png");
|
||||
|
||||
let script_path = "bin/reth-bench/scripts/compare_newpayload_latency.py";
|
||||
|
||||
info!("Running Python comparison script with uv...");
|
||||
let mut cmd = Command::new("uv");
|
||||
cmd.args([
|
||||
"run",
|
||||
script_path,
|
||||
&baseline_csv.to_string_lossy(),
|
||||
&feature_csv.to_string_lossy(),
|
||||
"-o",
|
||||
&chart_output.to_string_lossy(),
|
||||
]);
|
||||
|
||||
// Set process group for consistent signal handling
|
||||
#[cfg(unix)]
|
||||
{
|
||||
cmd.process_group(0);
|
||||
}
|
||||
|
||||
let output = cmd.output().await.map_err(|e| {
|
||||
eyre!("Failed to execute Python script with uv: {}. Make sure uv is installed.", e)
|
||||
})?;
|
||||
|
||||
if !output.status.success() {
|
||||
let stderr = String::from_utf8_lossy(&output.stderr);
|
||||
let stdout = String::from_utf8_lossy(&output.stdout);
|
||||
return Err(eyre!(
|
||||
"Python script failed with exit code {:?}:\nstdout: {}\nstderr: {}",
|
||||
output.status.code(),
|
||||
stdout,
|
||||
stderr
|
||||
));
|
||||
}
|
||||
|
||||
let stdout = String::from_utf8_lossy(&output.stdout);
|
||||
if !stdout.trim().is_empty() {
|
||||
info!("Python script output:\n{}", stdout);
|
||||
}
|
||||
|
||||
info!("Comparison chart generated: {:?}", chart_output);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Start samply servers for viewing profiles
|
||||
async fn start_samply_servers(args: &Args) -> Result<()> {
|
||||
info!("Starting samply servers for profile viewing...");
|
||||
|
||||
let output_dir = args.output_dir_path();
|
||||
let profiles_dir = output_dir.join("profiles");
|
||||
|
||||
// Build profile paths
|
||||
let baseline_profile = profiles_dir.join("baseline.json.gz");
|
||||
let feature_profile = profiles_dir.join("feature.json.gz");
|
||||
|
||||
// Check if profiles exist
|
||||
if !baseline_profile.exists() {
|
||||
warn!("Baseline profile not found: {:?}", baseline_profile);
|
||||
return Ok(());
|
||||
}
|
||||
if !feature_profile.exists() {
|
||||
warn!("Feature profile not found: {:?}", feature_profile);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Find two consecutive available ports starting from 3000
|
||||
let (baseline_port, feature_port) = find_consecutive_ports(3000)?;
|
||||
info!("Found available ports: {} and {}", baseline_port, feature_port);
|
||||
|
||||
// Get samply path
|
||||
let samply_path = get_samply_path().await?;
|
||||
|
||||
// Start baseline server
|
||||
info!("Starting samply server for baseline '{}' on port {}", args.baseline_ref, baseline_port);
|
||||
let mut baseline_cmd = Command::new(&samply_path);
|
||||
baseline_cmd
|
||||
.args(["load", "--port", &baseline_port.to_string(), &baseline_profile.to_string_lossy()])
|
||||
.kill_on_drop(true);
|
||||
|
||||
// Set process group for consistent signal handling
|
||||
#[cfg(unix)]
|
||||
{
|
||||
baseline_cmd.process_group(0);
|
||||
}
|
||||
|
||||
// Conditionally pipe output based on log level
|
||||
if tracing::enabled!(tracing::Level::DEBUG) {
|
||||
baseline_cmd.stdout(std::process::Stdio::piped()).stderr(std::process::Stdio::piped());
|
||||
} else {
|
||||
baseline_cmd.stdout(std::process::Stdio::null()).stderr(std::process::Stdio::null());
|
||||
}
|
||||
|
||||
// Debug log the command
|
||||
debug!("Executing samply load command: {:?}", baseline_cmd);
|
||||
|
||||
let mut baseline_child =
|
||||
baseline_cmd.spawn().wrap_err("Failed to start samply server for baseline")?;
|
||||
|
||||
// Stream baseline samply output if debug logging is enabled
|
||||
if tracing::enabled!(tracing::Level::DEBUG) {
|
||||
if let Some(stdout) = baseline_child.stdout.take() {
|
||||
tokio::spawn(async move {
|
||||
use tokio::io::{AsyncBufReadExt, BufReader};
|
||||
let reader = BufReader::new(stdout);
|
||||
let mut lines = reader.lines();
|
||||
while let Ok(Some(line)) = lines.next_line().await {
|
||||
debug!("[SAMPLY-BASELINE] {}", line);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
if let Some(stderr) = baseline_child.stderr.take() {
|
||||
tokio::spawn(async move {
|
||||
use tokio::io::{AsyncBufReadExt, BufReader};
|
||||
let reader = BufReader::new(stderr);
|
||||
let mut lines = reader.lines();
|
||||
while let Ok(Some(line)) = lines.next_line().await {
|
||||
debug!("[SAMPLY-BASELINE] {}", line);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Start feature server
|
||||
info!("Starting samply server for feature '{}' on port {}", args.feature_ref, feature_port);
|
||||
let mut feature_cmd = Command::new(&samply_path);
|
||||
feature_cmd
|
||||
.args(["load", "--port", &feature_port.to_string(), &feature_profile.to_string_lossy()])
|
||||
.kill_on_drop(true);
|
||||
|
||||
// Set process group for consistent signal handling
|
||||
#[cfg(unix)]
|
||||
{
|
||||
feature_cmd.process_group(0);
|
||||
}
|
||||
|
||||
// Conditionally pipe output based on log level
|
||||
if tracing::enabled!(tracing::Level::DEBUG) {
|
||||
feature_cmd.stdout(std::process::Stdio::piped()).stderr(std::process::Stdio::piped());
|
||||
} else {
|
||||
feature_cmd.stdout(std::process::Stdio::null()).stderr(std::process::Stdio::null());
|
||||
}
|
||||
|
||||
// Debug log the command
|
||||
debug!("Executing samply load command: {:?}", feature_cmd);
|
||||
|
||||
let mut feature_child =
|
||||
feature_cmd.spawn().wrap_err("Failed to start samply server for feature")?;
|
||||
|
||||
// Stream feature samply output if debug logging is enabled
|
||||
if tracing::enabled!(tracing::Level::DEBUG) {
|
||||
if let Some(stdout) = feature_child.stdout.take() {
|
||||
tokio::spawn(async move {
|
||||
use tokio::io::{AsyncBufReadExt, BufReader};
|
||||
let reader = BufReader::new(stdout);
|
||||
let mut lines = reader.lines();
|
||||
while let Ok(Some(line)) = lines.next_line().await {
|
||||
debug!("[SAMPLY-FEATURE] {}", line);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
if let Some(stderr) = feature_child.stderr.take() {
|
||||
tokio::spawn(async move {
|
||||
use tokio::io::{AsyncBufReadExt, BufReader};
|
||||
let reader = BufReader::new(stderr);
|
||||
let mut lines = reader.lines();
|
||||
while let Ok(Some(line)) = lines.next_line().await {
|
||||
debug!("[SAMPLY-FEATURE] {}", line);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Give servers time to start
|
||||
tokio::time::sleep(std::time::Duration::from_secs(2)).await;
|
||||
|
||||
// Print access information
|
||||
println!("\n=== SAMPLY PROFILE SERVERS STARTED ===");
|
||||
println!("Baseline '{}': http://127.0.0.1:{}", args.baseline_ref, baseline_port);
|
||||
println!("Feature '{}': http://127.0.0.1:{}", args.feature_ref, feature_port);
|
||||
println!("\nOpen the URLs in your browser to view the profiles.");
|
||||
println!("Press Ctrl+C to stop the servers and exit.");
|
||||
println!("=========================================\n");
|
||||
|
||||
// Wait for Ctrl+C or process termination
|
||||
let ctrl_c = tokio::signal::ctrl_c();
|
||||
let baseline_wait = baseline_child.wait();
|
||||
let feature_wait = feature_child.wait();
|
||||
|
||||
tokio::select! {
|
||||
_ = ctrl_c => {
|
||||
info!("Received Ctrl+C, shutting down samply servers...");
|
||||
}
|
||||
result = baseline_wait => {
|
||||
match result {
|
||||
Ok(status) => info!("Baseline samply server exited with status: {}", status),
|
||||
Err(e) => warn!("Baseline samply server error: {}", e),
|
||||
}
|
||||
}
|
||||
result = feature_wait => {
|
||||
match result {
|
||||
Ok(status) => info!("Feature samply server exited with status: {}", status),
|
||||
Err(e) => warn!("Feature samply server error: {}", e),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure both processes are terminated
|
||||
let _ = baseline_child.kill().await;
|
||||
let _ = feature_child.kill().await;
|
||||
|
||||
info!("Samply servers stopped.");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Find two consecutive available ports starting from the given port
|
||||
fn find_consecutive_ports(start_port: u16) -> Result<(u16, u16)> {
|
||||
for port in start_port..=65533 {
|
||||
// Check if both port and port+1 are available
|
||||
if is_port_available(port) && is_port_available(port + 1) {
|
||||
return Ok((port, port + 1));
|
||||
}
|
||||
}
|
||||
Err(eyre!("Could not find two consecutive available ports starting from {}", start_port))
|
||||
}
|
||||
|
||||
/// Check if a port is available by attempting to bind to it
|
||||
fn is_port_available(port: u16) -> bool {
|
||||
TcpListener::bind(("127.0.0.1", port)).is_ok()
|
||||
}
|
||||
|
||||
/// Get the absolute path to samply using 'which' command
|
||||
async fn get_samply_path() -> Result<String> {
|
||||
let output = Command::new("which")
|
||||
.arg("samply")
|
||||
.output()
|
||||
.await
|
||||
.wrap_err("Failed to execute 'which samply' command")?;
|
||||
|
||||
if !output.status.success() {
|
||||
return Err(eyre!("samply not found in PATH"));
|
||||
}
|
||||
|
||||
let samply_path = String::from_utf8(output.stdout)
|
||||
.wrap_err("samply path is not valid UTF-8")?
|
||||
.trim()
|
||||
.to_string();
|
||||
|
||||
if samply_path.is_empty() {
|
||||
return Err(eyre!("which samply returned empty path"));
|
||||
}
|
||||
|
||||
Ok(samply_path)
|
||||
}
|
||||
710
bin/reth-bench-compare/src/comparison.rs
Normal file
710
bin/reth-bench-compare/src/comparison.rs
Normal file
@@ -0,0 +1,710 @@
|
||||
//! Results comparison and report generation.
|
||||
|
||||
use crate::cli::Args;
|
||||
use chrono::{DateTime, Utc};
|
||||
use csv::Reader;
|
||||
use eyre::{eyre, Result, WrapErr};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::{
|
||||
cmp::Ordering,
|
||||
collections::HashMap,
|
||||
fs,
|
||||
path::{Path, PathBuf},
|
||||
};
|
||||
use tracing::{info, warn};
|
||||
|
||||
/// Manages comparison between baseline and feature reference results
|
||||
pub(crate) struct ComparisonGenerator {
|
||||
output_dir: PathBuf,
|
||||
timestamp: String,
|
||||
baseline_ref_name: String,
|
||||
feature_ref_name: String,
|
||||
baseline_results: Option<BenchmarkResults>,
|
||||
feature_results: Option<BenchmarkResults>,
|
||||
baseline_command: Option<String>,
|
||||
feature_command: Option<String>,
|
||||
}
|
||||
|
||||
/// Represents the results from a single benchmark run
|
||||
#[derive(Debug, Clone)]
|
||||
pub(crate) struct BenchmarkResults {
|
||||
pub ref_name: String,
|
||||
pub combined_latency_data: Vec<CombinedLatencyRow>,
|
||||
pub summary: BenchmarkSummary,
|
||||
pub start_timestamp: Option<DateTime<Utc>>,
|
||||
pub end_timestamp: Option<DateTime<Utc>>,
|
||||
}
|
||||
|
||||
/// Combined latency CSV row structure
|
||||
#[derive(Debug, Clone, Deserialize, Serialize)]
|
||||
pub(crate) struct CombinedLatencyRow {
|
||||
pub block_number: u64,
|
||||
pub transaction_count: u64,
|
||||
pub gas_used: u64,
|
||||
pub new_payload_latency: u128,
|
||||
}
|
||||
|
||||
/// Total gas CSV row structure
|
||||
#[derive(Debug, Clone, Deserialize, Serialize)]
|
||||
pub(crate) struct TotalGasRow {
|
||||
pub block_number: u64,
|
||||
pub transaction_count: u64,
|
||||
pub gas_used: u64,
|
||||
pub time: u128,
|
||||
}
|
||||
|
||||
/// Summary statistics for a benchmark run.
|
||||
///
|
||||
/// Latencies are derived from per-block `engine_newPayload` timings (converted from µs to ms):
|
||||
/// - `mean_new_payload_latency_ms`: arithmetic mean latency across blocks.
|
||||
/// - `median_new_payload_latency_ms`: p50 latency across blocks.
|
||||
/// - `p90_new_payload_latency_ms` / `p99_new_payload_latency_ms`: tail latencies across blocks.
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub(crate) struct BenchmarkSummary {
|
||||
pub total_blocks: u64,
|
||||
pub total_gas_used: u64,
|
||||
pub total_duration_ms: u128,
|
||||
pub mean_new_payload_latency_ms: f64,
|
||||
pub median_new_payload_latency_ms: f64,
|
||||
pub p90_new_payload_latency_ms: f64,
|
||||
pub p99_new_payload_latency_ms: f64,
|
||||
pub gas_per_second: f64,
|
||||
pub blocks_per_second: f64,
|
||||
pub min_block_number: u64,
|
||||
pub max_block_number: u64,
|
||||
}
|
||||
|
||||
/// Comparison report between two benchmark runs
|
||||
#[derive(Debug, Serialize)]
|
||||
pub(crate) struct ComparisonReport {
|
||||
pub timestamp: String,
|
||||
pub baseline: RefInfo,
|
||||
pub feature: RefInfo,
|
||||
pub comparison_summary: ComparisonSummary,
|
||||
pub per_block_comparisons: Vec<BlockComparison>,
|
||||
}
|
||||
|
||||
/// Information about a reference in the comparison
|
||||
#[derive(Debug, Serialize)]
|
||||
pub(crate) struct RefInfo {
|
||||
pub ref_name: String,
|
||||
pub summary: BenchmarkSummary,
|
||||
pub start_timestamp: Option<DateTime<Utc>>,
|
||||
pub end_timestamp: Option<DateTime<Utc>>,
|
||||
pub reth_command: Option<String>,
|
||||
}
|
||||
|
||||
/// Summary of the comparison between references.
|
||||
///
|
||||
/// Percent deltas are `(feature - baseline) / baseline * 100`:
|
||||
/// - `new_payload_latency_p50_change_percent` / p90 / p99: percent changes of the respective
|
||||
/// per-block percentiles.
|
||||
/// - `per_block_latency_change_mean_percent` / `per_block_latency_change_median_percent` are the
|
||||
/// mean and median of per-block percent deltas (feature vs baseline), capturing block-level
|
||||
/// drift.
|
||||
/// - `per_block_latency_change_std_dev_percent`: standard deviation of per-block percent changes,
|
||||
/// measuring consistency of performance changes across blocks.
|
||||
/// - `new_payload_total_latency_change_percent` is the percent change of the total newPayload time
|
||||
/// across the run.
|
||||
///
|
||||
/// Positive means slower/higher; negative means faster/lower.
|
||||
#[derive(Debug, Serialize)]
|
||||
pub(crate) struct ComparisonSummary {
|
||||
pub per_block_latency_change_mean_percent: f64,
|
||||
pub per_block_latency_change_median_percent: f64,
|
||||
pub per_block_latency_change_std_dev_percent: f64,
|
||||
pub new_payload_total_latency_change_percent: f64,
|
||||
pub new_payload_latency_p50_change_percent: f64,
|
||||
pub new_payload_latency_p90_change_percent: f64,
|
||||
pub new_payload_latency_p99_change_percent: f64,
|
||||
pub gas_per_second_change_percent: f64,
|
||||
pub blocks_per_second_change_percent: f64,
|
||||
}
|
||||
|
||||
/// Per-block comparison data
|
||||
#[derive(Debug, Serialize)]
|
||||
pub(crate) struct BlockComparison {
|
||||
pub block_number: u64,
|
||||
pub transaction_count: u64,
|
||||
pub gas_used: u64,
|
||||
pub baseline_new_payload_latency: u128,
|
||||
pub feature_new_payload_latency: u128,
|
||||
pub new_payload_latency_change_percent: f64,
|
||||
}
|
||||
|
||||
impl ComparisonGenerator {
|
||||
/// Create a new comparison generator
|
||||
pub(crate) fn new(args: &Args) -> Self {
|
||||
let now: DateTime<Utc> = Utc::now();
|
||||
let timestamp = now.format("%Y%m%d_%H%M%S").to_string();
|
||||
|
||||
Self {
|
||||
output_dir: args.output_dir_path(),
|
||||
timestamp,
|
||||
baseline_ref_name: args.baseline_ref.clone(),
|
||||
feature_ref_name: args.feature_ref.clone(),
|
||||
baseline_results: None,
|
||||
feature_results: None,
|
||||
baseline_command: None,
|
||||
feature_command: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the output directory for a specific reference
|
||||
pub(crate) fn get_ref_output_dir(&self, ref_type: &str) -> PathBuf {
|
||||
self.output_dir.join("results").join(&self.timestamp).join(ref_type)
|
||||
}
|
||||
|
||||
/// Get the main output directory for this comparison run
|
||||
pub(crate) fn get_output_dir(&self) -> PathBuf {
|
||||
self.output_dir.join("results").join(&self.timestamp)
|
||||
}
|
||||
|
||||
/// Add benchmark results for a reference
|
||||
pub(crate) fn add_ref_results(&mut self, ref_type: &str, output_path: &Path) -> Result<()> {
|
||||
let ref_name = match ref_type {
|
||||
"baseline" => &self.baseline_ref_name,
|
||||
"feature" => &self.feature_ref_name,
|
||||
_ => return Err(eyre!("Unknown reference type: {}", ref_type)),
|
||||
};
|
||||
|
||||
let results = self.load_benchmark_results(ref_name, output_path)?;
|
||||
|
||||
match ref_type {
|
||||
"baseline" => self.baseline_results = Some(results),
|
||||
"feature" => self.feature_results = Some(results),
|
||||
_ => return Err(eyre!("Unknown reference type: {}", ref_type)),
|
||||
}
|
||||
|
||||
info!("Loaded benchmark results for {} reference", ref_type);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Set the benchmark run timestamps for a reference
|
||||
pub(crate) fn set_ref_timestamps(
|
||||
&mut self,
|
||||
ref_type: &str,
|
||||
start: DateTime<Utc>,
|
||||
end: DateTime<Utc>,
|
||||
) -> Result<()> {
|
||||
match ref_type {
|
||||
"baseline" => {
|
||||
if let Some(ref mut results) = self.baseline_results {
|
||||
results.start_timestamp = Some(start);
|
||||
results.end_timestamp = Some(end);
|
||||
} else {
|
||||
return Err(eyre!("Baseline results not loaded yet"));
|
||||
}
|
||||
}
|
||||
"feature" => {
|
||||
if let Some(ref mut results) = self.feature_results {
|
||||
results.start_timestamp = Some(start);
|
||||
results.end_timestamp = Some(end);
|
||||
} else {
|
||||
return Err(eyre!("Feature results not loaded yet"));
|
||||
}
|
||||
}
|
||||
_ => return Err(eyre!("Unknown reference type: {}", ref_type)),
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Set the reth command for a reference
|
||||
pub(crate) fn set_ref_command(&mut self, ref_type: &str, command: String) -> Result<()> {
|
||||
match ref_type {
|
||||
"baseline" => {
|
||||
self.baseline_command = Some(command);
|
||||
}
|
||||
"feature" => {
|
||||
self.feature_command = Some(command);
|
||||
}
|
||||
_ => return Err(eyre!("Unknown reference type: {}", ref_type)),
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Generate the final comparison report
|
||||
pub(crate) async fn generate_comparison_report(&self) -> Result<()> {
|
||||
info!("Generating comparison report...");
|
||||
|
||||
let baseline =
|
||||
self.baseline_results.as_ref().ok_or_else(|| eyre!("Baseline results not loaded"))?;
|
||||
|
||||
let feature =
|
||||
self.feature_results.as_ref().ok_or_else(|| eyre!("Feature results not loaded"))?;
|
||||
|
||||
let per_block_comparisons = self.calculate_per_block_comparisons(baseline, feature)?;
|
||||
let comparison_summary = self.calculate_comparison_summary(
|
||||
&baseline.summary,
|
||||
&feature.summary,
|
||||
&per_block_comparisons,
|
||||
)?;
|
||||
|
||||
let report = ComparisonReport {
|
||||
timestamp: self.timestamp.clone(),
|
||||
baseline: RefInfo {
|
||||
ref_name: baseline.ref_name.clone(),
|
||||
summary: baseline.summary.clone(),
|
||||
start_timestamp: baseline.start_timestamp,
|
||||
end_timestamp: baseline.end_timestamp,
|
||||
reth_command: self.baseline_command.clone(),
|
||||
},
|
||||
feature: RefInfo {
|
||||
ref_name: feature.ref_name.clone(),
|
||||
summary: feature.summary.clone(),
|
||||
start_timestamp: feature.start_timestamp,
|
||||
end_timestamp: feature.end_timestamp,
|
||||
reth_command: self.feature_command.clone(),
|
||||
},
|
||||
comparison_summary,
|
||||
per_block_comparisons,
|
||||
};
|
||||
|
||||
// Write reports
|
||||
self.write_comparison_reports(&report).await?;
|
||||
|
||||
// Print summary to console
|
||||
self.print_comparison_summary(&report);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Load benchmark results from CSV files
|
||||
fn load_benchmark_results(
|
||||
&self,
|
||||
ref_name: &str,
|
||||
output_path: &Path,
|
||||
) -> Result<BenchmarkResults> {
|
||||
let combined_latency_path = output_path.join("combined_latency.csv");
|
||||
let total_gas_path = output_path.join("total_gas.csv");
|
||||
|
||||
let combined_latency_data = self.load_combined_latency_csv(&combined_latency_path)?;
|
||||
let total_gas_data = self.load_total_gas_csv(&total_gas_path)?;
|
||||
|
||||
let summary = self.calculate_summary(&combined_latency_data, &total_gas_data)?;
|
||||
|
||||
Ok(BenchmarkResults {
|
||||
ref_name: ref_name.to_string(),
|
||||
combined_latency_data,
|
||||
summary,
|
||||
start_timestamp: None,
|
||||
end_timestamp: None,
|
||||
})
|
||||
}
|
||||
|
||||
/// Load combined latency CSV data
|
||||
fn load_combined_latency_csv(&self, path: &Path) -> Result<Vec<CombinedLatencyRow>> {
|
||||
let mut reader = Reader::from_path(path)
|
||||
.wrap_err_with(|| format!("Failed to open combined latency CSV: {path:?}"))?;
|
||||
|
||||
let mut rows = Vec::new();
|
||||
for result in reader.deserialize() {
|
||||
let row: CombinedLatencyRow = result
|
||||
.wrap_err_with(|| format!("Failed to parse combined latency row in {path:?}"))?;
|
||||
rows.push(row);
|
||||
}
|
||||
|
||||
if rows.is_empty() {
|
||||
return Err(eyre!("No data found in combined latency CSV: {:?}", path));
|
||||
}
|
||||
|
||||
Ok(rows)
|
||||
}
|
||||
|
||||
/// Load total gas CSV data
|
||||
fn load_total_gas_csv(&self, path: &Path) -> Result<Vec<TotalGasRow>> {
|
||||
let mut reader = Reader::from_path(path)
|
||||
.wrap_err_with(|| format!("Failed to open total gas CSV: {path:?}"))?;
|
||||
|
||||
let mut rows = Vec::new();
|
||||
for result in reader.deserialize() {
|
||||
let row: TotalGasRow =
|
||||
result.wrap_err_with(|| format!("Failed to parse total gas row in {path:?}"))?;
|
||||
rows.push(row);
|
||||
}
|
||||
|
||||
if rows.is_empty() {
|
||||
return Err(eyre!("No data found in total gas CSV: {:?}", path));
|
||||
}
|
||||
|
||||
Ok(rows)
|
||||
}
|
||||
|
||||
/// Calculate summary statistics for a benchmark run.
|
||||
///
|
||||
/// Computes latency statistics from per-block `new_payload_latency` values in `combined_data`
|
||||
/// (converting from µs to ms), and throughput metrics using the total run duration from
|
||||
/// `total_gas_data`. Percentiles (p50/p90/p99) use linear interpolation on sorted latencies.
|
||||
fn calculate_summary(
|
||||
&self,
|
||||
combined_data: &[CombinedLatencyRow],
|
||||
total_gas_data: &[TotalGasRow],
|
||||
) -> Result<BenchmarkSummary> {
|
||||
if combined_data.is_empty() || total_gas_data.is_empty() {
|
||||
return Err(eyre!("Cannot calculate summary for empty data"));
|
||||
}
|
||||
|
||||
let total_blocks = combined_data.len() as u64;
|
||||
let total_gas_used: u64 = combined_data.iter().map(|r| r.gas_used).sum();
|
||||
|
||||
let total_duration_ms = total_gas_data.last().unwrap().time / 1000; // Convert microseconds to milliseconds
|
||||
|
||||
let latencies_ms: Vec<f64> =
|
||||
combined_data.iter().map(|r| r.new_payload_latency as f64 / 1000.0).collect();
|
||||
let mean_new_payload_latency_ms: f64 =
|
||||
latencies_ms.iter().sum::<f64>() / total_blocks as f64;
|
||||
|
||||
let mut sorted_latencies_ms = latencies_ms;
|
||||
sorted_latencies_ms.sort_by(|a, b| a.partial_cmp(b).unwrap_or(Ordering::Equal));
|
||||
let median_new_payload_latency_ms = percentile(&sorted_latencies_ms, 0.5);
|
||||
let p90_new_payload_latency_ms = percentile(&sorted_latencies_ms, 0.9);
|
||||
let p99_new_payload_latency_ms = percentile(&sorted_latencies_ms, 0.99);
|
||||
|
||||
let total_duration_seconds = total_duration_ms as f64 / 1000.0;
|
||||
let gas_per_second = if total_duration_seconds > f64::EPSILON {
|
||||
total_gas_used as f64 / total_duration_seconds
|
||||
} else {
|
||||
0.0
|
||||
};
|
||||
|
||||
let blocks_per_second = if total_duration_seconds > f64::EPSILON {
|
||||
total_blocks as f64 / total_duration_seconds
|
||||
} else {
|
||||
0.0
|
||||
};
|
||||
|
||||
let min_block_number = combined_data.first().unwrap().block_number;
|
||||
let max_block_number = combined_data.last().unwrap().block_number;
|
||||
|
||||
Ok(BenchmarkSummary {
|
||||
total_blocks,
|
||||
total_gas_used,
|
||||
total_duration_ms,
|
||||
mean_new_payload_latency_ms,
|
||||
median_new_payload_latency_ms,
|
||||
p90_new_payload_latency_ms,
|
||||
p99_new_payload_latency_ms,
|
||||
gas_per_second,
|
||||
blocks_per_second,
|
||||
min_block_number,
|
||||
max_block_number,
|
||||
})
|
||||
}
|
||||
|
||||
/// Calculate comparison summary between baseline and feature
|
||||
fn calculate_comparison_summary(
|
||||
&self,
|
||||
baseline: &BenchmarkSummary,
|
||||
feature: &BenchmarkSummary,
|
||||
per_block_comparisons: &[BlockComparison],
|
||||
) -> Result<ComparisonSummary> {
|
||||
let calc_percent_change = |baseline: f64, feature: f64| -> f64 {
|
||||
if baseline.abs() > f64::EPSILON {
|
||||
((feature - baseline) / baseline) * 100.0
|
||||
} else {
|
||||
0.0
|
||||
}
|
||||
};
|
||||
|
||||
// Calculate per-block statistics. "Per-block" means: for each block, compute the percent
|
||||
// change (feature - baseline) / baseline * 100, then calculate statistics across those
|
||||
// per-block percent changes. This captures how consistently the feature performs relative
|
||||
// to baseline across all blocks.
|
||||
let per_block_percent_changes: Vec<f64> =
|
||||
per_block_comparisons.iter().map(|c| c.new_payload_latency_change_percent).collect();
|
||||
let per_block_latency_change_mean_percent = if per_block_percent_changes.is_empty() {
|
||||
0.0
|
||||
} else {
|
||||
per_block_percent_changes.iter().sum::<f64>() / per_block_percent_changes.len() as f64
|
||||
};
|
||||
let per_block_latency_change_median_percent = if per_block_percent_changes.is_empty() {
|
||||
0.0
|
||||
} else {
|
||||
let mut sorted = per_block_percent_changes.clone();
|
||||
sorted.sort_by(|a, b| a.partial_cmp(b).unwrap_or(Ordering::Equal));
|
||||
percentile(&sorted, 0.5)
|
||||
};
|
||||
let per_block_latency_change_std_dev_percent =
|
||||
calculate_std_dev(&per_block_percent_changes, per_block_latency_change_mean_percent);
|
||||
|
||||
let baseline_total_latency_ms =
|
||||
baseline.mean_new_payload_latency_ms * baseline.total_blocks as f64;
|
||||
let feature_total_latency_ms =
|
||||
feature.mean_new_payload_latency_ms * feature.total_blocks as f64;
|
||||
let new_payload_total_latency_change_percent =
|
||||
calc_percent_change(baseline_total_latency_ms, feature_total_latency_ms);
|
||||
|
||||
Ok(ComparisonSummary {
|
||||
per_block_latency_change_mean_percent,
|
||||
per_block_latency_change_median_percent,
|
||||
per_block_latency_change_std_dev_percent,
|
||||
new_payload_total_latency_change_percent,
|
||||
new_payload_latency_p50_change_percent: calc_percent_change(
|
||||
baseline.median_new_payload_latency_ms,
|
||||
feature.median_new_payload_latency_ms,
|
||||
),
|
||||
new_payload_latency_p90_change_percent: calc_percent_change(
|
||||
baseline.p90_new_payload_latency_ms,
|
||||
feature.p90_new_payload_latency_ms,
|
||||
),
|
||||
new_payload_latency_p99_change_percent: calc_percent_change(
|
||||
baseline.p99_new_payload_latency_ms,
|
||||
feature.p99_new_payload_latency_ms,
|
||||
),
|
||||
gas_per_second_change_percent: calc_percent_change(
|
||||
baseline.gas_per_second,
|
||||
feature.gas_per_second,
|
||||
),
|
||||
blocks_per_second_change_percent: calc_percent_change(
|
||||
baseline.blocks_per_second,
|
||||
feature.blocks_per_second,
|
||||
),
|
||||
})
|
||||
}
|
||||
|
||||
/// Calculate per-block comparisons
|
||||
fn calculate_per_block_comparisons(
|
||||
&self,
|
||||
baseline: &BenchmarkResults,
|
||||
feature: &BenchmarkResults,
|
||||
) -> Result<Vec<BlockComparison>> {
|
||||
let mut baseline_map: HashMap<u64, &CombinedLatencyRow> = HashMap::new();
|
||||
for row in &baseline.combined_latency_data {
|
||||
baseline_map.insert(row.block_number, row);
|
||||
}
|
||||
|
||||
let mut comparisons = Vec::new();
|
||||
for feature_row in &feature.combined_latency_data {
|
||||
if let Some(baseline_row) = baseline_map.get(&feature_row.block_number) {
|
||||
let calc_percent_change = |baseline: u128, feature: u128| -> f64 {
|
||||
if baseline > 0 {
|
||||
((feature as f64 - baseline as f64) / baseline as f64) * 100.0
|
||||
} else {
|
||||
0.0
|
||||
}
|
||||
};
|
||||
|
||||
let comparison = BlockComparison {
|
||||
block_number: feature_row.block_number,
|
||||
transaction_count: feature_row.transaction_count,
|
||||
gas_used: feature_row.gas_used,
|
||||
baseline_new_payload_latency: baseline_row.new_payload_latency,
|
||||
feature_new_payload_latency: feature_row.new_payload_latency,
|
||||
new_payload_latency_change_percent: calc_percent_change(
|
||||
baseline_row.new_payload_latency,
|
||||
feature_row.new_payload_latency,
|
||||
),
|
||||
};
|
||||
comparisons.push(comparison);
|
||||
} else {
|
||||
warn!("Block {} not found in baseline data", feature_row.block_number);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(comparisons)
|
||||
}
|
||||
|
||||
/// Write comparison reports to files
|
||||
async fn write_comparison_reports(&self, report: &ComparisonReport) -> Result<()> {
|
||||
let report_dir = self.output_dir.join("results").join(&self.timestamp);
|
||||
fs::create_dir_all(&report_dir)
|
||||
.wrap_err_with(|| format!("Failed to create report directory: {report_dir:?}"))?;
|
||||
|
||||
// Write JSON report
|
||||
let json_path = report_dir.join("comparison_report.json");
|
||||
let json_content = serde_json::to_string_pretty(report)
|
||||
.wrap_err("Failed to serialize comparison report to JSON")?;
|
||||
fs::write(&json_path, json_content)
|
||||
.wrap_err_with(|| format!("Failed to write JSON report: {json_path:?}"))?;
|
||||
|
||||
// Write CSV report for per-block comparisons
|
||||
let csv_path = report_dir.join("per_block_comparison.csv");
|
||||
let mut writer = csv::Writer::from_path(&csv_path)
|
||||
.wrap_err_with(|| format!("Failed to create CSV writer: {csv_path:?}"))?;
|
||||
|
||||
for comparison in &report.per_block_comparisons {
|
||||
writer.serialize(comparison).wrap_err("Failed to write comparison row to CSV")?;
|
||||
}
|
||||
writer.flush().wrap_err("Failed to flush CSV writer")?;
|
||||
|
||||
info!("Comparison reports written to: {:?}", report_dir);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Print comparison summary to console
|
||||
fn print_comparison_summary(&self, report: &ComparisonReport) {
|
||||
// Parse and format timestamp nicely
|
||||
let formatted_timestamp = if let Ok(dt) = chrono::DateTime::parse_from_str(
|
||||
&format!("{} +0000", report.timestamp.replace('_', " ")),
|
||||
"%Y%m%d %H%M%S %z",
|
||||
) {
|
||||
dt.format("%Y-%m-%d %H:%M:%S UTC").to_string()
|
||||
} else {
|
||||
// Fallback to original if parsing fails
|
||||
report.timestamp.clone()
|
||||
};
|
||||
|
||||
println!("\n=== BENCHMARK COMPARISON SUMMARY ===");
|
||||
println!("Timestamp: {formatted_timestamp}");
|
||||
println!("Baseline: {}", report.baseline.ref_name);
|
||||
println!("Feature: {}", report.feature.ref_name);
|
||||
println!();
|
||||
|
||||
let summary = &report.comparison_summary;
|
||||
|
||||
println!("Performance Changes:");
|
||||
println!(
|
||||
" NewPayload Latency per-block mean change: {:+.2}%",
|
||||
summary.per_block_latency_change_mean_percent
|
||||
);
|
||||
println!(
|
||||
" NewPayload Latency per-block median change: {:+.2}%",
|
||||
summary.per_block_latency_change_median_percent
|
||||
);
|
||||
println!(
|
||||
" NewPayload Latency per-block std dev: {:.2}%",
|
||||
summary.per_block_latency_change_std_dev_percent
|
||||
);
|
||||
println!(
|
||||
" Total newPayload time change: {:+.2}%",
|
||||
summary.new_payload_total_latency_change_percent
|
||||
);
|
||||
println!(
|
||||
" NewPayload Latency p50: {:+.2}%",
|
||||
summary.new_payload_latency_p50_change_percent
|
||||
);
|
||||
println!(
|
||||
" NewPayload Latency p90: {:+.2}%",
|
||||
summary.new_payload_latency_p90_change_percent
|
||||
);
|
||||
println!(
|
||||
" NewPayload Latency p99: {:+.2}%",
|
||||
summary.new_payload_latency_p99_change_percent
|
||||
);
|
||||
println!(
|
||||
" Gas/Second: {:+.2}%",
|
||||
summary.gas_per_second_change_percent
|
||||
);
|
||||
println!(
|
||||
" Blocks/Second: {:+.2}%",
|
||||
summary.blocks_per_second_change_percent
|
||||
);
|
||||
println!();
|
||||
|
||||
println!("Baseline Summary:");
|
||||
let baseline = &report.baseline.summary;
|
||||
println!(
|
||||
" Blocks: {} (blocks {} to {}), Gas: {}, Duration: {:.2}s",
|
||||
baseline.total_blocks,
|
||||
baseline.min_block_number,
|
||||
baseline.max_block_number,
|
||||
baseline.total_gas_used,
|
||||
baseline.total_duration_ms as f64 / 1000.0
|
||||
);
|
||||
println!(" NewPayload latency (ms):");
|
||||
println!(
|
||||
" mean: {:.2}, p50: {:.2}, p90: {:.2}, p99: {:.2}",
|
||||
baseline.mean_new_payload_latency_ms,
|
||||
baseline.median_new_payload_latency_ms,
|
||||
baseline.p90_new_payload_latency_ms,
|
||||
baseline.p99_new_payload_latency_ms
|
||||
);
|
||||
if let (Some(start), Some(end)) =
|
||||
(&report.baseline.start_timestamp, &report.baseline.end_timestamp)
|
||||
{
|
||||
println!(
|
||||
" Started: {}, Ended: {}",
|
||||
start.format("%Y-%m-%d %H:%M:%S UTC"),
|
||||
end.format("%Y-%m-%d %H:%M:%S UTC")
|
||||
);
|
||||
}
|
||||
if let Some(ref cmd) = report.baseline.reth_command {
|
||||
println!(" Command: {}", cmd);
|
||||
}
|
||||
println!();
|
||||
|
||||
println!("Feature Summary:");
|
||||
let feature = &report.feature.summary;
|
||||
println!(
|
||||
" Blocks: {} (blocks {} to {}), Gas: {}, Duration: {:.2}s",
|
||||
feature.total_blocks,
|
||||
feature.min_block_number,
|
||||
feature.max_block_number,
|
||||
feature.total_gas_used,
|
||||
feature.total_duration_ms as f64 / 1000.0
|
||||
);
|
||||
println!(" NewPayload latency (ms):");
|
||||
println!(
|
||||
" mean: {:.2}, p50: {:.2}, p90: {:.2}, p99: {:.2}",
|
||||
feature.mean_new_payload_latency_ms,
|
||||
feature.median_new_payload_latency_ms,
|
||||
feature.p90_new_payload_latency_ms,
|
||||
feature.p99_new_payload_latency_ms
|
||||
);
|
||||
if let (Some(start), Some(end)) =
|
||||
(&report.feature.start_timestamp, &report.feature.end_timestamp)
|
||||
{
|
||||
println!(
|
||||
" Started: {}, Ended: {}",
|
||||
start.format("%Y-%m-%d %H:%M:%S UTC"),
|
||||
end.format("%Y-%m-%d %H:%M:%S UTC")
|
||||
);
|
||||
}
|
||||
if let Some(ref cmd) = report.feature.reth_command {
|
||||
println!(" Command: {}", cmd);
|
||||
}
|
||||
println!();
|
||||
}
|
||||
}
|
||||
|
||||
/// Calculate standard deviation from a set of values and their mean.
|
||||
///
|
||||
/// Computes the population standard deviation using the formula:
|
||||
/// `sqrt(sum((x - mean)²) / n)`
|
||||
///
|
||||
/// Returns 0.0 for empty input.
|
||||
fn calculate_std_dev(values: &[f64], mean: f64) -> f64 {
|
||||
if values.is_empty() {
|
||||
return 0.0;
|
||||
}
|
||||
|
||||
let variance = values
|
||||
.iter()
|
||||
.map(|x| {
|
||||
let diff = x - mean;
|
||||
diff * diff
|
||||
})
|
||||
.sum::<f64>() /
|
||||
values.len() as f64;
|
||||
|
||||
variance.sqrt()
|
||||
}
|
||||
|
||||
/// Calculate percentile using linear interpolation on a sorted slice.
|
||||
///
|
||||
/// Computes `rank = percentile × (n - 1)` where n is the array length. If the rank falls
|
||||
/// between two indices, linearly interpolates between those values. For example, with 100 values,
|
||||
/// p90 computes rank = 0.9 × 99 = 89.1, then returns `values[89] × 0.9 + values[90] × 0.1`.
|
||||
///
|
||||
/// Returns 0.0 for empty input.
|
||||
fn percentile(sorted_values: &[f64], percentile: f64) -> f64 {
|
||||
if sorted_values.is_empty() {
|
||||
return 0.0;
|
||||
}
|
||||
|
||||
let clamped = percentile.clamp(0.0, 1.0);
|
||||
let max_index = sorted_values.len() - 1;
|
||||
let rank = clamped * max_index as f64;
|
||||
let lower = rank.floor() as usize;
|
||||
let upper = rank.ceil() as usize;
|
||||
|
||||
if lower == upper {
|
||||
sorted_values[lower]
|
||||
} else {
|
||||
let weight = rank - lower as f64;
|
||||
sorted_values[lower].mul_add(1.0 - weight, sorted_values[upper] * weight)
|
||||
}
|
||||
}
|
||||
365
bin/reth-bench-compare/src/compilation.rs
Normal file
365
bin/reth-bench-compare/src/compilation.rs
Normal file
@@ -0,0 +1,365 @@
|
||||
//! Compilation operations for reth and reth-bench.
|
||||
|
||||
use crate::git::GitManager;
|
||||
use alloy_primitives::address;
|
||||
use alloy_provider::{Provider, ProviderBuilder};
|
||||
use eyre::{eyre, Result, WrapErr};
|
||||
use std::{fs, path::PathBuf, process::Command};
|
||||
use tracing::{debug, error, info, warn};
|
||||
|
||||
/// Manages compilation operations for reth components
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct CompilationManager {
|
||||
repo_root: String,
|
||||
output_dir: PathBuf,
|
||||
git_manager: GitManager,
|
||||
features: String,
|
||||
enable_profiling: bool,
|
||||
}
|
||||
|
||||
impl CompilationManager {
|
||||
/// Create a new `CompilationManager`
|
||||
pub(crate) const fn new(
|
||||
repo_root: String,
|
||||
output_dir: PathBuf,
|
||||
git_manager: GitManager,
|
||||
features: String,
|
||||
enable_profiling: bool,
|
||||
) -> Result<Self> {
|
||||
Ok(Self { repo_root, output_dir, git_manager, features, enable_profiling })
|
||||
}
|
||||
|
||||
/// Detect if the RPC endpoint is an Optimism chain
|
||||
pub(crate) async fn detect_optimism_chain(&self, rpc_url: &str) -> Result<bool> {
|
||||
info!("Detecting chain type from RPC endpoint...");
|
||||
|
||||
// Create Alloy provider
|
||||
let url = rpc_url.parse().map_err(|e| eyre!("Invalid RPC URL '{}': {}", rpc_url, e))?;
|
||||
let provider = ProviderBuilder::new().connect_http(url);
|
||||
|
||||
// Check for Optimism predeploy at address 0x420000000000000000000000000000000000000F
|
||||
let is_optimism = !provider
|
||||
.get_code_at(address!("0x420000000000000000000000000000000000000F"))
|
||||
.await?
|
||||
.is_empty();
|
||||
|
||||
if is_optimism {
|
||||
info!("Detected Optimism chain");
|
||||
} else {
|
||||
info!("Detected Ethereum chain");
|
||||
}
|
||||
|
||||
Ok(is_optimism)
|
||||
}
|
||||
|
||||
/// Get the path to the cached binary using explicit commit hash
|
||||
pub(crate) fn get_cached_binary_path_for_commit(
|
||||
&self,
|
||||
commit: &str,
|
||||
is_optimism: bool,
|
||||
) -> PathBuf {
|
||||
let identifier = &commit[..8]; // Use first 8 chars of commit
|
||||
|
||||
let binary_name = if is_optimism {
|
||||
format!("op-reth_{}", identifier)
|
||||
} else {
|
||||
format!("reth_{}", identifier)
|
||||
};
|
||||
|
||||
self.output_dir.join("bin").join(binary_name)
|
||||
}
|
||||
|
||||
/// Compile reth using cargo build and cache the binary
|
||||
pub(crate) fn compile_reth(&self, commit: &str, is_optimism: bool) -> Result<()> {
|
||||
// Validate that current git commit matches the expected commit
|
||||
let current_commit = self.git_manager.get_current_commit()?;
|
||||
if current_commit != commit {
|
||||
return Err(eyre!(
|
||||
"Git commit mismatch! Expected: {}, but currently at: {}",
|
||||
&commit[..8],
|
||||
¤t_commit[..8]
|
||||
));
|
||||
}
|
||||
|
||||
let cached_path = self.get_cached_binary_path_for_commit(commit, is_optimism);
|
||||
|
||||
// Check if cached binary already exists (since path contains commit hash, it's valid)
|
||||
if cached_path.exists() {
|
||||
info!("Using cached binary (commit: {})", &commit[..8]);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
info!("No cached binary found, compiling (commit: {})...", &commit[..8]);
|
||||
|
||||
let binary_name = if is_optimism { "op-reth" } else { "reth" };
|
||||
|
||||
info!(
|
||||
"Compiling {} with profiling configuration (commit: {})...",
|
||||
binary_name,
|
||||
&commit[..8]
|
||||
);
|
||||
|
||||
let mut cmd = Command::new("cargo");
|
||||
cmd.arg("build").arg("--profile").arg("profiling");
|
||||
|
||||
// Append samply feature when profiling to enable tracing span markers.
|
||||
// NOTE: The `samply` feature must exist in the branch being compiled. If comparing
|
||||
// against an older branch that predates the samply integration, compilation will fail
|
||||
// or markers won't appear. In that case, omit --profile or ensure both branches
|
||||
// include the samply feature support.
|
||||
let features = if self.enable_profiling && !self.features.contains("samply") {
|
||||
format!("{},samply", self.features)
|
||||
} else {
|
||||
self.features.clone()
|
||||
};
|
||||
cmd.arg("--features").arg(&features);
|
||||
info!("Using features: {}", features);
|
||||
|
||||
// Add bin-specific arguments for optimism
|
||||
if is_optimism {
|
||||
cmd.arg("--bin")
|
||||
.arg("op-reth")
|
||||
.arg("--manifest-path")
|
||||
.arg("crates/optimism/bin/Cargo.toml");
|
||||
}
|
||||
|
||||
cmd.current_dir(&self.repo_root);
|
||||
|
||||
// Set RUSTFLAGS for native CPU optimization
|
||||
cmd.env("RUSTFLAGS", "-C target-cpu=native");
|
||||
|
||||
// Debug log the command
|
||||
debug!("Executing cargo command: {:?}", cmd);
|
||||
|
||||
let output = cmd.output().wrap_err("Failed to execute cargo build command")?;
|
||||
|
||||
// Print stdout and stderr with prefixes at debug level
|
||||
let stdout = String::from_utf8_lossy(&output.stdout);
|
||||
let stderr = String::from_utf8_lossy(&output.stderr);
|
||||
|
||||
for line in stdout.lines() {
|
||||
if !line.trim().is_empty() {
|
||||
debug!("[CARGO] {}", line);
|
||||
}
|
||||
}
|
||||
|
||||
for line in stderr.lines() {
|
||||
if !line.trim().is_empty() {
|
||||
debug!("[CARGO] {}", line);
|
||||
}
|
||||
}
|
||||
|
||||
if !output.status.success() {
|
||||
// Print all output when compilation fails
|
||||
error!("Cargo build failed with exit code: {:?}", output.status.code());
|
||||
|
||||
if !stdout.trim().is_empty() {
|
||||
error!("Cargo stdout:");
|
||||
for line in stdout.lines() {
|
||||
error!(" {}", line);
|
||||
}
|
||||
}
|
||||
|
||||
if !stderr.trim().is_empty() {
|
||||
error!("Cargo stderr:");
|
||||
for line in stderr.lines() {
|
||||
error!(" {}", line);
|
||||
}
|
||||
}
|
||||
|
||||
return Err(eyre!("Compilation failed with exit code: {:?}", output.status.code()));
|
||||
}
|
||||
|
||||
info!("{} compilation completed", binary_name);
|
||||
|
||||
// Copy the compiled binary to cache
|
||||
let source_path =
|
||||
PathBuf::from(&self.repo_root).join(format!("target/profiling/{}", binary_name));
|
||||
if !source_path.exists() {
|
||||
return Err(eyre!("Compiled binary not found at {:?}", source_path));
|
||||
}
|
||||
|
||||
// Create bin directory if it doesn't exist
|
||||
let bin_dir = self.output_dir.join("bin");
|
||||
fs::create_dir_all(&bin_dir).wrap_err("Failed to create bin directory")?;
|
||||
|
||||
// Copy binary to cache
|
||||
fs::copy(&source_path, &cached_path).wrap_err("Failed to copy binary to cache")?;
|
||||
|
||||
// Make the cached binary executable
|
||||
#[cfg(unix)]
|
||||
{
|
||||
use std::os::unix::fs::PermissionsExt;
|
||||
let mut perms = fs::metadata(&cached_path)?.permissions();
|
||||
perms.set_mode(0o755);
|
||||
fs::set_permissions(&cached_path, perms)?;
|
||||
}
|
||||
|
||||
info!("Cached compiled binary at: {:?}", cached_path);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Check if reth-bench is available in PATH
|
||||
pub(crate) fn is_reth_bench_available(&self) -> bool {
|
||||
match Command::new("which").arg("reth-bench").output() {
|
||||
Ok(output) => {
|
||||
if output.status.success() {
|
||||
let path = String::from_utf8_lossy(&output.stdout);
|
||||
info!("Found reth-bench: {}", path.trim());
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
Err(_) => false,
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if samply is available in PATH
|
||||
pub(crate) fn is_samply_available(&self) -> bool {
|
||||
match Command::new("which").arg("samply").output() {
|
||||
Ok(output) => {
|
||||
if output.status.success() {
|
||||
let path = String::from_utf8_lossy(&output.stdout);
|
||||
info!("Found samply: {}", path.trim());
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
Err(_) => false,
|
||||
}
|
||||
}
|
||||
|
||||
/// Install samply using cargo
|
||||
pub(crate) fn install_samply(&self) -> Result<()> {
|
||||
info!("Installing samply via cargo...");
|
||||
|
||||
let mut cmd = Command::new("cargo");
|
||||
cmd.args(["install", "--locked", "samply"]);
|
||||
|
||||
// Debug log the command
|
||||
debug!("Executing cargo command: {:?}", cmd);
|
||||
|
||||
let output = cmd.output().wrap_err("Failed to execute cargo install samply command")?;
|
||||
|
||||
// Print stdout and stderr with prefixes at debug level
|
||||
let stdout = String::from_utf8_lossy(&output.stdout);
|
||||
let stderr = String::from_utf8_lossy(&output.stderr);
|
||||
|
||||
for line in stdout.lines() {
|
||||
if !line.trim().is_empty() {
|
||||
debug!("[CARGO-SAMPLY] {}", line);
|
||||
}
|
||||
}
|
||||
|
||||
for line in stderr.lines() {
|
||||
if !line.trim().is_empty() {
|
||||
debug!("[CARGO-SAMPLY] {}", line);
|
||||
}
|
||||
}
|
||||
|
||||
if !output.status.success() {
|
||||
// Print all output when installation fails
|
||||
error!("Cargo install samply failed with exit code: {:?}", output.status.code());
|
||||
|
||||
if !stdout.trim().is_empty() {
|
||||
error!("Cargo stdout:");
|
||||
for line in stdout.lines() {
|
||||
error!(" {}", line);
|
||||
}
|
||||
}
|
||||
|
||||
if !stderr.trim().is_empty() {
|
||||
error!("Cargo stderr:");
|
||||
for line in stderr.lines() {
|
||||
error!(" {}", line);
|
||||
}
|
||||
}
|
||||
|
||||
return Err(eyre!(
|
||||
"samply installation failed with exit code: {:?}",
|
||||
output.status.code()
|
||||
));
|
||||
}
|
||||
|
||||
info!("Samply installation completed");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Ensure samply is available, installing if necessary
|
||||
pub(crate) fn ensure_samply_available(&self) -> Result<()> {
|
||||
if self.is_samply_available() {
|
||||
Ok(())
|
||||
} else {
|
||||
warn!("samply not found in PATH, installing...");
|
||||
self.install_samply()
|
||||
}
|
||||
}
|
||||
|
||||
/// Ensure reth-bench is available, compiling if necessary
|
||||
pub(crate) fn ensure_reth_bench_available(&self) -> Result<()> {
|
||||
if self.is_reth_bench_available() {
|
||||
Ok(())
|
||||
} else {
|
||||
warn!("reth-bench not found in PATH, compiling and installing...");
|
||||
self.compile_reth_bench()
|
||||
}
|
||||
}
|
||||
|
||||
/// Compile and install reth-bench using `make install-reth-bench`
|
||||
pub(crate) fn compile_reth_bench(&self) -> Result<()> {
|
||||
info!("Compiling and installing reth-bench...");
|
||||
|
||||
let mut cmd = Command::new("make");
|
||||
cmd.arg("install-reth-bench").current_dir(&self.repo_root);
|
||||
|
||||
// Debug log the command
|
||||
debug!("Executing make command: {:?}", cmd);
|
||||
|
||||
let output = cmd.output().wrap_err("Failed to execute make install-reth-bench command")?;
|
||||
|
||||
// Print stdout and stderr with prefixes at debug level
|
||||
let stdout = String::from_utf8_lossy(&output.stdout);
|
||||
let stderr = String::from_utf8_lossy(&output.stderr);
|
||||
|
||||
for line in stdout.lines() {
|
||||
if !line.trim().is_empty() {
|
||||
debug!("[MAKE-BENCH] {}", line);
|
||||
}
|
||||
}
|
||||
|
||||
for line in stderr.lines() {
|
||||
if !line.trim().is_empty() {
|
||||
debug!("[MAKE-BENCH] {}", line);
|
||||
}
|
||||
}
|
||||
|
||||
if !output.status.success() {
|
||||
// Print all output when compilation fails
|
||||
error!("Make install-reth-bench failed with exit code: {:?}", output.status.code());
|
||||
|
||||
if !stdout.trim().is_empty() {
|
||||
error!("Make stdout:");
|
||||
for line in stdout.lines() {
|
||||
error!(" {}", line);
|
||||
}
|
||||
}
|
||||
|
||||
if !stderr.trim().is_empty() {
|
||||
error!("Make stderr:");
|
||||
for line in stderr.lines() {
|
||||
error!(" {}", line);
|
||||
}
|
||||
}
|
||||
|
||||
return Err(eyre!(
|
||||
"reth-bench compilation failed with exit code: {:?}",
|
||||
output.status.code()
|
||||
));
|
||||
}
|
||||
|
||||
info!("Reth-bench compilation completed");
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
328
bin/reth-bench-compare/src/git.rs
Normal file
328
bin/reth-bench-compare/src/git.rs
Normal file
@@ -0,0 +1,328 @@
|
||||
//! Git operations for branch management.
|
||||
|
||||
use eyre::{eyre, Result, WrapErr};
|
||||
use std::process::Command;
|
||||
use tracing::{info, warn};
|
||||
|
||||
/// Manages git operations for branch switching
|
||||
#[derive(Debug, Clone)]
|
||||
pub(crate) struct GitManager {
|
||||
repo_root: String,
|
||||
}
|
||||
|
||||
impl GitManager {
|
||||
/// Create a new `GitManager`, detecting the repository root
|
||||
pub(crate) fn new() -> Result<Self> {
|
||||
let output = Command::new("git")
|
||||
.args(["rev-parse", "--show-toplevel"])
|
||||
.output()
|
||||
.wrap_err("Failed to execute git command - is git installed?")?;
|
||||
|
||||
if !output.status.success() {
|
||||
return Err(eyre!("Not in a git repository or git command failed"));
|
||||
}
|
||||
|
||||
let repo_root = String::from_utf8(output.stdout)
|
||||
.wrap_err("Git output is not valid UTF-8")?
|
||||
.trim()
|
||||
.to_string();
|
||||
|
||||
let manager = Self { repo_root };
|
||||
info!(
|
||||
"Detected git repository at: {}, current reference: {}",
|
||||
manager.repo_root(),
|
||||
manager.get_current_ref()?
|
||||
);
|
||||
|
||||
Ok(manager)
|
||||
}
|
||||
|
||||
/// Get the current git branch name
|
||||
pub(crate) fn get_current_branch(&self) -> Result<String> {
|
||||
let output = Command::new("git")
|
||||
.args(["branch", "--show-current"])
|
||||
.current_dir(&self.repo_root)
|
||||
.output()
|
||||
.wrap_err("Failed to get current branch")?;
|
||||
|
||||
if !output.status.success() {
|
||||
return Err(eyre!("Failed to determine current branch"));
|
||||
}
|
||||
|
||||
let branch = String::from_utf8(output.stdout)
|
||||
.wrap_err("Branch name is not valid UTF-8")?
|
||||
.trim()
|
||||
.to_string();
|
||||
|
||||
if branch.is_empty() {
|
||||
return Err(eyre!("Not on a named branch (detached HEAD?)"));
|
||||
}
|
||||
|
||||
Ok(branch)
|
||||
}
|
||||
|
||||
/// Get the current git reference (branch name, tag, or commit hash)
|
||||
pub(crate) fn get_current_ref(&self) -> Result<String> {
|
||||
// First try to get branch name
|
||||
if let Ok(branch) = self.get_current_branch() {
|
||||
return Ok(branch);
|
||||
}
|
||||
|
||||
// If not on a branch, check if we're on a tag
|
||||
let tag_output = Command::new("git")
|
||||
.args(["describe", "--exact-match", "--tags", "HEAD"])
|
||||
.current_dir(&self.repo_root)
|
||||
.output()
|
||||
.wrap_err("Failed to check for tag")?;
|
||||
|
||||
if tag_output.status.success() {
|
||||
let tag = String::from_utf8(tag_output.stdout)
|
||||
.wrap_err("Tag name is not valid UTF-8")?
|
||||
.trim()
|
||||
.to_string();
|
||||
return Ok(tag);
|
||||
}
|
||||
|
||||
// If not on a branch or tag, return the commit hash
|
||||
let commit_output = Command::new("git")
|
||||
.args(["rev-parse", "HEAD"])
|
||||
.current_dir(&self.repo_root)
|
||||
.output()
|
||||
.wrap_err("Failed to get current commit")?;
|
||||
|
||||
if !commit_output.status.success() {
|
||||
return Err(eyre!("Failed to get current commit hash"));
|
||||
}
|
||||
|
||||
let commit_hash = String::from_utf8(commit_output.stdout)
|
||||
.wrap_err("Commit hash is not valid UTF-8")?
|
||||
.trim()
|
||||
.to_string();
|
||||
|
||||
Ok(commit_hash)
|
||||
}
|
||||
|
||||
/// Check if the git working directory has uncommitted changes to tracked files
|
||||
pub(crate) fn validate_clean_state(&self) -> Result<()> {
|
||||
let output = Command::new("git")
|
||||
.args(["status", "--porcelain"])
|
||||
.current_dir(&self.repo_root)
|
||||
.output()
|
||||
.wrap_err("Failed to check git status")?;
|
||||
|
||||
if !output.status.success() {
|
||||
return Err(eyre!("Git status command failed"));
|
||||
}
|
||||
|
||||
let status_output =
|
||||
String::from_utf8(output.stdout).wrap_err("Git status output is not valid UTF-8")?;
|
||||
|
||||
// Check for uncommitted changes to tracked files
|
||||
// Status codes: M = modified, A = added, D = deleted, R = renamed, C = copied, U = updated
|
||||
// ?? = untracked files (we want to ignore these)
|
||||
let has_uncommitted_changes = status_output.lines().any(|line| {
|
||||
if line.len() >= 2 {
|
||||
let status = &line[0..2];
|
||||
// Ignore untracked files (??) and ignored files (!!)
|
||||
!matches!(status, "??" | "!!")
|
||||
} else {
|
||||
false
|
||||
}
|
||||
});
|
||||
|
||||
if has_uncommitted_changes {
|
||||
warn!("Git working directory has uncommitted changes to tracked files:");
|
||||
for line in status_output.lines() {
|
||||
if line.len() >= 2 && !matches!(&line[0..2], "??" | "!!") {
|
||||
warn!(" {}", line);
|
||||
}
|
||||
}
|
||||
return Err(eyre!(
|
||||
"Git working directory has uncommitted changes to tracked files. Please commit or stash changes before running benchmark comparison."
|
||||
));
|
||||
}
|
||||
|
||||
// Check if there are untracked files and log them as info
|
||||
let untracked_files: Vec<&str> =
|
||||
status_output.lines().filter(|line| line.starts_with("??")).collect();
|
||||
|
||||
if !untracked_files.is_empty() {
|
||||
info!(
|
||||
"Git working directory has {} untracked files (this is OK)",
|
||||
untracked_files.len()
|
||||
);
|
||||
}
|
||||
|
||||
info!("Git working directory is clean (no uncommitted changes to tracked files)");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Fetch all refs from remote to ensure we have latest branches and tags
|
||||
pub(crate) fn fetch_all(&self) -> Result<()> {
|
||||
let output = Command::new("git")
|
||||
.args(["fetch", "--all", "--tags", "--quiet", "--force"])
|
||||
.current_dir(&self.repo_root)
|
||||
.output()
|
||||
.wrap_err("Failed to fetch latest refs")?;
|
||||
|
||||
if output.status.success() {
|
||||
info!("Fetched latest refs");
|
||||
} else {
|
||||
let stderr = String::from_utf8_lossy(&output.stderr);
|
||||
// Only warn if there's actual error content, not just fetch progress
|
||||
if !stderr.trim().is_empty() && !stderr.contains("-> origin/") {
|
||||
warn!("Git fetch encountered issues (continuing anyway): {}", stderr);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Validate that the specified git references exist (branches, tags, or commits)
|
||||
pub(crate) fn validate_refs(&self, refs: &[&str]) -> Result<()> {
|
||||
for &git_ref in refs {
|
||||
// Try to resolve the ref similar to `git checkout` by peeling to a commit.
|
||||
// First try the ref as-is with ^{commit}, then fall back to origin/{ref}^{commit}.
|
||||
let as_is = format!("{git_ref}^{{commit}}");
|
||||
let ref_check = Command::new("git")
|
||||
.args(["rev-parse", "--verify", &as_is])
|
||||
.current_dir(&self.repo_root)
|
||||
.output();
|
||||
|
||||
let found = if let Ok(output) = ref_check &&
|
||||
output.status.success()
|
||||
{
|
||||
info!("Validated reference exists: {}", git_ref);
|
||||
true
|
||||
} else {
|
||||
// Try remote-only branches via origin/{ref}
|
||||
let origin_ref = format!("origin/{git_ref}^{{commit}}");
|
||||
let origin_check = Command::new("git")
|
||||
.args(["rev-parse", "--verify", &origin_ref])
|
||||
.current_dir(&self.repo_root)
|
||||
.output();
|
||||
|
||||
if let Ok(output) = origin_check &&
|
||||
output.status.success()
|
||||
{
|
||||
info!("Validated remote reference exists: origin/{}", git_ref);
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
};
|
||||
|
||||
if !found {
|
||||
return Err(eyre!(
|
||||
"Git reference '{}' does not exist as branch, tag, or commit (tried '{}' and 'origin/{}^{{commit}}')",
|
||||
git_ref,
|
||||
format!("{git_ref}^{{commit}}"),
|
||||
git_ref,
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Switch to the specified git reference (branch, tag, or commit)
|
||||
pub(crate) fn switch_ref(&self, git_ref: &str) -> Result<()> {
|
||||
// First checkout the reference
|
||||
let output = Command::new("git")
|
||||
.args(["checkout", git_ref])
|
||||
.current_dir(&self.repo_root)
|
||||
.output()
|
||||
.wrap_err_with(|| format!("Failed to switch to reference '{git_ref}'"))?;
|
||||
|
||||
if !output.status.success() {
|
||||
let stderr = String::from_utf8_lossy(&output.stderr);
|
||||
return Err(eyre!("Failed to switch to reference '{}': {}", git_ref, stderr));
|
||||
}
|
||||
|
||||
// Check if this is a branch that tracks a remote and pull latest changes
|
||||
let is_branch = Command::new("git")
|
||||
.args(["show-ref", "--verify", "--quiet", &format!("refs/heads/{git_ref}")])
|
||||
.current_dir(&self.repo_root)
|
||||
.status()
|
||||
.map(|s| s.success())
|
||||
.unwrap_or(false);
|
||||
|
||||
if is_branch {
|
||||
// Check if the branch tracks a remote
|
||||
let tracking_output = Command::new("git")
|
||||
.args([
|
||||
"rev-parse",
|
||||
"--abbrev-ref",
|
||||
"--symbolic-full-name",
|
||||
&format!("{git_ref}@{{upstream}}"),
|
||||
])
|
||||
.current_dir(&self.repo_root)
|
||||
.output();
|
||||
|
||||
if let Ok(output) = tracking_output &&
|
||||
output.status.success()
|
||||
{
|
||||
let upstream = String::from_utf8_lossy(&output.stdout).trim().to_string();
|
||||
if !upstream.is_empty() && upstream != format!("{git_ref}@{{upstream}}") {
|
||||
// Branch tracks a remote, pull latest changes
|
||||
info!("Pulling latest changes for branch: {}", git_ref);
|
||||
|
||||
let pull_output = Command::new("git")
|
||||
.args(["pull", "--ff-only"])
|
||||
.current_dir(&self.repo_root)
|
||||
.output()
|
||||
.wrap_err_with(|| {
|
||||
format!("Failed to pull latest changes for branch '{git_ref}'")
|
||||
})?;
|
||||
|
||||
if pull_output.status.success() {
|
||||
info!("Successfully pulled latest changes for branch: {}", git_ref);
|
||||
} else {
|
||||
let stderr = String::from_utf8_lossy(&pull_output.stderr);
|
||||
warn!("Failed to pull latest changes for branch '{}': {}", git_ref, stderr);
|
||||
// Continue anyway, we'll use whatever version we have
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Verify the checkout succeeded by checking the current commit
|
||||
let current_commit_output = Command::new("git")
|
||||
.args(["rev-parse", "HEAD"])
|
||||
.current_dir(&self.repo_root)
|
||||
.output()
|
||||
.wrap_err("Failed to get current commit")?;
|
||||
|
||||
if !current_commit_output.status.success() {
|
||||
return Err(eyre!("Failed to verify git checkout"));
|
||||
}
|
||||
|
||||
info!("Switched to reference: {}", git_ref);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Get the current commit hash
|
||||
pub(crate) fn get_current_commit(&self) -> Result<String> {
|
||||
let output = Command::new("git")
|
||||
.args(["rev-parse", "HEAD"])
|
||||
.current_dir(&self.repo_root)
|
||||
.output()
|
||||
.wrap_err("Failed to get current commit")?;
|
||||
|
||||
if !output.status.success() {
|
||||
return Err(eyre!("Failed to get current commit hash"));
|
||||
}
|
||||
|
||||
let commit_hash = String::from_utf8(output.stdout)
|
||||
.wrap_err("Commit hash is not valid UTF-8")?
|
||||
.trim()
|
||||
.to_string();
|
||||
|
||||
Ok(commit_hash)
|
||||
}
|
||||
|
||||
/// Get the repository root path
|
||||
pub(crate) fn repo_root(&self) -> &str {
|
||||
&self.repo_root
|
||||
}
|
||||
}
|
||||
45
bin/reth-bench-compare/src/main.rs
Normal file
45
bin/reth-bench-compare/src/main.rs
Normal file
@@ -0,0 +1,45 @@
|
||||
//! # reth-bench-compare
|
||||
//!
|
||||
//! Automated tool for comparing reth performance between two git branches.
|
||||
//! This tool automates the complete workflow of compiling, running, and benchmarking
|
||||
//! reth on different branches to provide meaningful performance comparisons.
|
||||
|
||||
#![doc(
|
||||
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
|
||||
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
|
||||
issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/"
|
||||
)]
|
||||
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
|
||||
|
||||
#[global_allocator]
|
||||
static ALLOC: reth_cli_util::allocator::Allocator = reth_cli_util::allocator::new_allocator();
|
||||
|
||||
mod benchmark;
|
||||
mod cli;
|
||||
mod comparison;
|
||||
mod compilation;
|
||||
mod git;
|
||||
mod node;
|
||||
|
||||
use clap::Parser;
|
||||
use cli::{run_comparison, Args};
|
||||
use eyre::Result;
|
||||
use reth_cli_runner::CliRunner;
|
||||
|
||||
fn main() -> Result<()> {
|
||||
// Enable backtraces unless a RUST_BACKTRACE value has already been explicitly provided.
|
||||
if std::env::var_os("RUST_BACKTRACE").is_none() {
|
||||
unsafe {
|
||||
std::env::set_var("RUST_BACKTRACE", "1");
|
||||
}
|
||||
}
|
||||
|
||||
let args = Args::parse();
|
||||
|
||||
// Initialize tracing
|
||||
let _guard = args.init_tracing()?;
|
||||
|
||||
// Run until either exit or sigint or sigterm
|
||||
let runner = CliRunner::try_default_runtime()?;
|
||||
runner.run_command_until_exit(|ctx| run_comparison(args, ctx))
|
||||
}
|
||||
554
bin/reth-bench-compare/src/node.rs
Normal file
554
bin/reth-bench-compare/src/node.rs
Normal file
@@ -0,0 +1,554 @@
|
||||
//! Node management for starting, stopping, and controlling reth instances.
|
||||
|
||||
use crate::cli::Args;
|
||||
use alloy_provider::{Provider, ProviderBuilder};
|
||||
use alloy_rpc_types_eth::SyncStatus;
|
||||
use eyre::{eyre, OptionExt, Result, WrapErr};
|
||||
#[cfg(unix)]
|
||||
use nix::sys::signal::{killpg, Signal};
|
||||
#[cfg(unix)]
|
||||
use nix::unistd::Pid;
|
||||
use reth_chainspec::Chain;
|
||||
use std::{fs, path::PathBuf, time::Duration};
|
||||
use tokio::{
|
||||
fs::File as AsyncFile,
|
||||
io::{AsyncBufReadExt, AsyncWriteExt, BufReader as AsyncBufReader},
|
||||
process::Command,
|
||||
time::{sleep, timeout},
|
||||
};
|
||||
use tracing::{debug, info, warn};
|
||||
|
||||
/// Manages reth node lifecycle and operations
|
||||
pub(crate) struct NodeManager {
|
||||
datadir: Option<String>,
|
||||
metrics_port: u16,
|
||||
chain: Chain,
|
||||
use_sudo: bool,
|
||||
binary_path: Option<std::path::PathBuf>,
|
||||
enable_profiling: bool,
|
||||
output_dir: PathBuf,
|
||||
additional_reth_args: Vec<String>,
|
||||
comparison_dir: Option<PathBuf>,
|
||||
tracing_endpoint: Option<String>,
|
||||
otlp_max_queue_size: usize,
|
||||
}
|
||||
|
||||
impl NodeManager {
|
||||
/// Create a new `NodeManager` with configuration from CLI args
|
||||
pub(crate) fn new(args: &Args) -> Self {
|
||||
Self {
|
||||
datadir: Some(args.datadir_path().to_string_lossy().to_string()),
|
||||
metrics_port: args.metrics_port,
|
||||
chain: args.chain,
|
||||
use_sudo: args.sudo,
|
||||
binary_path: None,
|
||||
enable_profiling: args.profile,
|
||||
output_dir: args.output_dir_path(),
|
||||
// Filter out empty strings to prevent invalid arguments being passed to reth node
|
||||
additional_reth_args: args
|
||||
.reth_args
|
||||
.iter()
|
||||
.filter(|s| !s.is_empty())
|
||||
.cloned()
|
||||
.collect(),
|
||||
comparison_dir: None,
|
||||
tracing_endpoint: args.traces.otlp.as_ref().map(|u| u.to_string()),
|
||||
otlp_max_queue_size: args.otlp_max_queue_size,
|
||||
}
|
||||
}
|
||||
|
||||
/// Set the comparison directory path for logging
|
||||
pub(crate) fn set_comparison_dir(&mut self, dir: PathBuf) {
|
||||
self.comparison_dir = Some(dir);
|
||||
}
|
||||
|
||||
/// Get the log file path for a given reference type
|
||||
fn get_log_file_path(&self, ref_type: &str) -> Result<PathBuf> {
|
||||
let comparison_dir = self
|
||||
.comparison_dir
|
||||
.as_ref()
|
||||
.ok_or_eyre("Comparison directory not set. Call set_comparison_dir first.")?;
|
||||
|
||||
// The comparison directory already contains the full path to results/<timestamp>
|
||||
let log_dir = comparison_dir.join(ref_type);
|
||||
|
||||
// Create the directory if it doesn't exist
|
||||
fs::create_dir_all(&log_dir)
|
||||
.wrap_err(format!("Failed to create log directory: {:?}", log_dir))?;
|
||||
|
||||
let log_file = log_dir.join("reth_node.log");
|
||||
Ok(log_file)
|
||||
}
|
||||
|
||||
/// Get the perf event max sample rate from the system, capped at 10000
|
||||
fn get_perf_sample_rate(&self) -> Option<String> {
|
||||
let perf_rate_file = "/proc/sys/kernel/perf_event_max_sample_rate";
|
||||
if let Ok(content) = fs::read_to_string(perf_rate_file) {
|
||||
let rate_str = content.trim();
|
||||
if !rate_str.is_empty() {
|
||||
if let Ok(system_rate) = rate_str.parse::<u32>() {
|
||||
let capped_rate = std::cmp::min(system_rate, 10000);
|
||||
info!(
|
||||
"Detected perf_event_max_sample_rate: {}, using: {}",
|
||||
system_rate, capped_rate
|
||||
);
|
||||
return Some(capped_rate.to_string());
|
||||
}
|
||||
warn!("Failed to parse perf_event_max_sample_rate: {}", rate_str);
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
/// Get the absolute path to samply using 'which' command
|
||||
async fn get_samply_path(&self) -> Result<String> {
|
||||
let output = Command::new("which")
|
||||
.arg("samply")
|
||||
.output()
|
||||
.await
|
||||
.wrap_err("Failed to execute 'which samply' command")?;
|
||||
|
||||
if !output.status.success() {
|
||||
return Err(eyre!("samply not found in PATH"));
|
||||
}
|
||||
|
||||
let samply_path = String::from_utf8(output.stdout)
|
||||
.wrap_err("samply path is not valid UTF-8")?
|
||||
.trim()
|
||||
.to_string();
|
||||
|
||||
if samply_path.is_empty() {
|
||||
return Err(eyre!("which samply returned empty path"));
|
||||
}
|
||||
|
||||
Ok(samply_path)
|
||||
}
|
||||
|
||||
/// Build reth arguments as a vector of strings
|
||||
fn build_reth_args(
|
||||
&self,
|
||||
binary_path_str: &str,
|
||||
additional_args: &[String],
|
||||
ref_type: &str,
|
||||
) -> (Vec<String>, String) {
|
||||
let mut reth_args = vec![binary_path_str.to_string(), "node".to_string()];
|
||||
|
||||
// Add chain argument (skip for mainnet as it's the default)
|
||||
let chain_str = self.chain.to_string();
|
||||
if chain_str != "mainnet" {
|
||||
reth_args.extend_from_slice(&["--chain".to_string(), chain_str.clone()]);
|
||||
}
|
||||
|
||||
// Add datadir if specified
|
||||
if let Some(ref datadir) = self.datadir {
|
||||
reth_args.extend_from_slice(&["--datadir".to_string(), datadir.clone()]);
|
||||
}
|
||||
|
||||
// Add reth-specific arguments
|
||||
let metrics_arg = format!("0.0.0.0:{}", self.metrics_port);
|
||||
reth_args.extend_from_slice(&[
|
||||
"--engine.accept-execution-requests-hash".to_string(),
|
||||
"--metrics".to_string(),
|
||||
metrics_arg,
|
||||
"--http".to_string(),
|
||||
"--http.api".to_string(),
|
||||
"eth".to_string(),
|
||||
"--disable-discovery".to_string(),
|
||||
"--trusted-only".to_string(),
|
||||
]);
|
||||
|
||||
// Add tracing arguments if OTLP endpoint is configured
|
||||
if let Some(ref endpoint) = self.tracing_endpoint {
|
||||
info!("Enabling OTLP tracing export to: {} (service: reth-{})", endpoint, ref_type);
|
||||
// Endpoint requires equals per clap settings in reth
|
||||
reth_args.push(format!("--tracing-otlp={}", endpoint));
|
||||
}
|
||||
|
||||
// Add any additional arguments passed via command line (common to both baseline and
|
||||
// feature)
|
||||
reth_args.extend_from_slice(&self.additional_reth_args);
|
||||
|
||||
// Add reference-specific additional arguments
|
||||
reth_args.extend_from_slice(additional_args);
|
||||
|
||||
(reth_args, chain_str)
|
||||
}
|
||||
|
||||
/// Create a command for profiling mode
|
||||
async fn create_profiling_command(
|
||||
&self,
|
||||
ref_type: &str,
|
||||
reth_args: &[String],
|
||||
) -> Result<Command> {
|
||||
// Create profiles directory if it doesn't exist
|
||||
let profile_dir = self.output_dir.join("profiles");
|
||||
fs::create_dir_all(&profile_dir).wrap_err("Failed to create profiles directory")?;
|
||||
|
||||
let profile_path = profile_dir.join(format!("{}.json.gz", ref_type));
|
||||
info!("Starting reth node with samply profiling...");
|
||||
info!("Profile output: {:?}", profile_path);
|
||||
|
||||
// Get absolute path to samply
|
||||
let samply_path = self.get_samply_path().await?;
|
||||
|
||||
let mut cmd = if self.use_sudo {
|
||||
let mut sudo_cmd = Command::new("sudo");
|
||||
sudo_cmd.arg(&samply_path);
|
||||
sudo_cmd
|
||||
} else {
|
||||
Command::new(&samply_path)
|
||||
};
|
||||
|
||||
// Add samply arguments
|
||||
cmd.args(["record", "--save-only", "-o", &profile_path.to_string_lossy()]);
|
||||
|
||||
// Add rate argument if available
|
||||
if let Some(rate) = self.get_perf_sample_rate() {
|
||||
cmd.args(["--rate", &rate]);
|
||||
}
|
||||
|
||||
// Add separator and complete reth command
|
||||
cmd.arg("--");
|
||||
cmd.args(reth_args);
|
||||
|
||||
// Set environment variable to disable log styling
|
||||
cmd.env("RUST_LOG_STYLE", "never");
|
||||
|
||||
Ok(cmd)
|
||||
}
|
||||
|
||||
/// Create a command for direct reth execution
|
||||
fn create_direct_command(&self, reth_args: &[String]) -> Command {
|
||||
let binary_path = &reth_args[0];
|
||||
|
||||
let mut cmd = if self.use_sudo {
|
||||
info!("Starting reth node with sudo...");
|
||||
let mut sudo_cmd = Command::new("sudo");
|
||||
sudo_cmd.args(reth_args);
|
||||
sudo_cmd
|
||||
} else {
|
||||
info!("Starting reth node...");
|
||||
let mut reth_cmd = Command::new(binary_path);
|
||||
reth_cmd.args(&reth_args[1..]); // Skip the binary path since it's the command
|
||||
reth_cmd
|
||||
};
|
||||
|
||||
// Set environment variable to disable log styling
|
||||
cmd.env("RUST_LOG_STYLE", "never");
|
||||
|
||||
cmd
|
||||
}
|
||||
|
||||
/// Start a reth node using the specified binary path and return the process handle
|
||||
/// along with the formatted reth command string for reporting.
|
||||
pub(crate) async fn start_node(
|
||||
&mut self,
|
||||
binary_path: &std::path::Path,
|
||||
_git_ref: &str,
|
||||
ref_type: &str,
|
||||
additional_args: &[String],
|
||||
) -> Result<(tokio::process::Child, String)> {
|
||||
// Store the binary path for later use (e.g., in unwind_to_block)
|
||||
self.binary_path = Some(binary_path.to_path_buf());
|
||||
|
||||
let binary_path_str = binary_path.to_string_lossy();
|
||||
let (reth_args, _) = self.build_reth_args(&binary_path_str, additional_args, ref_type);
|
||||
|
||||
// Format the reth command string for reporting
|
||||
let reth_command = shlex::try_join(reth_args.iter().map(|s| s.as_str()))
|
||||
.wrap_err("Failed to format reth command string")?;
|
||||
|
||||
// Log additional arguments if any
|
||||
if !self.additional_reth_args.is_empty() {
|
||||
info!("Using common additional reth arguments: {:?}", self.additional_reth_args);
|
||||
}
|
||||
if !additional_args.is_empty() {
|
||||
info!("Using reference-specific additional reth arguments: {:?}", additional_args);
|
||||
}
|
||||
|
||||
let mut cmd = if self.enable_profiling {
|
||||
self.create_profiling_command(ref_type, &reth_args).await?
|
||||
} else {
|
||||
self.create_direct_command(&reth_args)
|
||||
};
|
||||
|
||||
// Set process group for better signal handling
|
||||
#[cfg(unix)]
|
||||
{
|
||||
cmd.process_group(0);
|
||||
}
|
||||
|
||||
// Set high queue size to prevent trace dropping during benchmarks
|
||||
if self.tracing_endpoint.is_some() {
|
||||
cmd.env("OTEL_BSP_MAX_QUEUE_SIZE", self.otlp_max_queue_size.to_string()); // Traces
|
||||
cmd.env("OTEL_BLRP_MAX_QUEUE_SIZE", "10000"); // Logs
|
||||
|
||||
// Set service name to differentiate baseline vs feature runs in Jaeger
|
||||
cmd.env("OTEL_SERVICE_NAME", format!("reth-{}", ref_type));
|
||||
}
|
||||
|
||||
debug!("Executing reth command: {cmd:?}");
|
||||
|
||||
let mut child = cmd
|
||||
.stdout(std::process::Stdio::piped())
|
||||
.stderr(std::process::Stdio::piped())
|
||||
.kill_on_drop(true) // Kill on drop so that on Ctrl-C for parent process we stop all child processes
|
||||
.spawn()
|
||||
.wrap_err("Failed to start reth node")?;
|
||||
|
||||
info!(
|
||||
"Reth node started with PID: {:?} (binary: {})",
|
||||
child.id().ok_or_eyre("Reth node is not running")?,
|
||||
binary_path_str
|
||||
);
|
||||
|
||||
// Prepare log file path
|
||||
let log_file_path = self.get_log_file_path(ref_type)?;
|
||||
info!("Reth node logs will be saved to: {:?}", log_file_path);
|
||||
|
||||
// Stream stdout and stderr with prefixes at debug level and to log file
|
||||
if let Some(stdout) = child.stdout.take() {
|
||||
let log_file = AsyncFile::create(&log_file_path)
|
||||
.await
|
||||
.wrap_err(format!("Failed to create log file: {:?}", log_file_path))?;
|
||||
tokio::spawn(async move {
|
||||
let reader = AsyncBufReader::new(stdout);
|
||||
let mut lines = reader.lines();
|
||||
let mut log_file = log_file;
|
||||
while let Ok(Some(line)) = lines.next_line().await {
|
||||
debug!("[RETH] {}", line);
|
||||
// Write to log file (reth already includes timestamps)
|
||||
let log_line = format!("{}\n", line);
|
||||
if let Err(e) = log_file.write_all(log_line.as_bytes()).await {
|
||||
debug!("Failed to write to log file: {}", e);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
if let Some(stderr) = child.stderr.take() {
|
||||
let log_file = AsyncFile::options()
|
||||
.create(true)
|
||||
.append(true)
|
||||
.open(&log_file_path)
|
||||
.await
|
||||
.wrap_err(format!("Failed to open log file for stderr: {:?}", log_file_path))?;
|
||||
tokio::spawn(async move {
|
||||
let reader = AsyncBufReader::new(stderr);
|
||||
let mut lines = reader.lines();
|
||||
let mut log_file = log_file;
|
||||
while let Ok(Some(line)) = lines.next_line().await {
|
||||
debug!("[RETH] {}", line);
|
||||
// Write to log file (reth already includes timestamps)
|
||||
let log_line = format!("{}\n", line);
|
||||
if let Err(e) = log_file.write_all(log_line.as_bytes()).await {
|
||||
debug!("Failed to write to log file: {}", e);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// Give the node a moment to start up
|
||||
sleep(Duration::from_secs(5)).await;
|
||||
|
||||
Ok((child, reth_command))
|
||||
}
|
||||
|
||||
/// Wait for the node to be ready and return its current tip
|
||||
pub(crate) async fn wait_for_node_ready_and_get_tip(&self) -> Result<u64> {
|
||||
info!("Waiting for node to be ready and synced...");
|
||||
|
||||
let max_wait = Duration::from_secs(120); // 2 minutes to allow for sync
|
||||
let check_interval = Duration::from_secs(2);
|
||||
let rpc_url = "http://localhost:8545";
|
||||
|
||||
// Create Alloy provider
|
||||
let url = rpc_url.parse().map_err(|e| eyre!("Invalid RPC URL '{}': {}", rpc_url, e))?;
|
||||
let provider = ProviderBuilder::new().connect_http(url);
|
||||
|
||||
timeout(max_wait, async {
|
||||
loop {
|
||||
// First check if RPC is up and node is not syncing
|
||||
match provider.syncing().await {
|
||||
Ok(sync_result) => {
|
||||
match sync_result {
|
||||
SyncStatus::Info(sync_info) => {
|
||||
debug!("Node is still syncing {sync_info:?}, waiting...");
|
||||
}
|
||||
_ => {
|
||||
// Node is not syncing, now get the tip
|
||||
match provider.get_block_number().await {
|
||||
Ok(tip) => {
|
||||
info!("Node is ready and not syncing at block: {}", tip);
|
||||
return Ok(tip);
|
||||
}
|
||||
Err(e) => {
|
||||
debug!("Failed to get block number: {}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
debug!("Node RPC not ready yet or failed to check sync status: {}", e);
|
||||
}
|
||||
}
|
||||
|
||||
sleep(check_interval).await;
|
||||
}
|
||||
})
|
||||
.await
|
||||
.wrap_err("Timed out waiting for node to be ready and synced")?
|
||||
}
|
||||
|
||||
/// Stop the reth node gracefully
|
||||
pub(crate) async fn stop_node(&self, child: &mut tokio::process::Child) -> Result<()> {
|
||||
let pid = child.id().expect("Child process ID should be available");
|
||||
|
||||
// Check if the process has already exited
|
||||
match child.try_wait() {
|
||||
Ok(Some(status)) => {
|
||||
info!("Reth node (PID: {}) has already exited with status: {:?}", pid, status);
|
||||
return Ok(());
|
||||
}
|
||||
Ok(None) => {
|
||||
// Process is still running, proceed to stop it
|
||||
info!("Stopping process gracefully with SIGINT (PID: {})...", pid);
|
||||
}
|
||||
Err(e) => {
|
||||
return Err(eyre!("Failed to check process status: {}", e));
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
{
|
||||
// Send SIGINT to process group to mimic Ctrl-C behavior
|
||||
let nix_pgid = Pid::from_raw(pid as i32);
|
||||
|
||||
match killpg(nix_pgid, Signal::SIGINT) {
|
||||
Ok(()) => {}
|
||||
Err(nix::errno::Errno::ESRCH) => {
|
||||
info!("Process group {} has already exited", pid);
|
||||
}
|
||||
Err(e) => {
|
||||
return Err(eyre!("Failed to send SIGINT to process group {}: {}", pid, e));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(unix))]
|
||||
{
|
||||
// On non-Unix systems, fall back to using external kill command
|
||||
let output = Command::new("taskkill")
|
||||
.args(["/PID", &pid.to_string(), "/F"])
|
||||
.output()
|
||||
.await
|
||||
.wrap_err("Failed to execute taskkill command")?;
|
||||
|
||||
if !output.status.success() {
|
||||
let stderr = String::from_utf8_lossy(&output.stderr);
|
||||
// Check if the error is because the process doesn't exist
|
||||
if stderr.contains("not found") || stderr.contains("not exist") {
|
||||
info!("Process {} has already exited", pid);
|
||||
} else {
|
||||
return Err(eyre!("Failed to kill process {}: {}", pid, stderr));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Wait for the process to exit
|
||||
match child.wait().await {
|
||||
Ok(status) => {
|
||||
info!("Reth node (PID: {}) exited with status: {:?}", pid, status);
|
||||
}
|
||||
Err(e) => {
|
||||
// If we get an error here, it might be because the process already exited
|
||||
debug!("Error waiting for process exit (may have already exited): {}", e);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Unwind the node to a specific block
|
||||
pub(crate) async fn unwind_to_block(&self, block_number: u64) -> Result<()> {
|
||||
if self.use_sudo {
|
||||
info!("Unwinding node to block: {} (with sudo)", block_number);
|
||||
} else {
|
||||
info!("Unwinding node to block: {}", block_number);
|
||||
}
|
||||
|
||||
// Use the binary path from the last start_node call, or fallback to default
|
||||
let binary_path = self
|
||||
.binary_path
|
||||
.as_ref()
|
||||
.map(|p| p.to_string_lossy().to_string())
|
||||
.unwrap_or_else(|| "./target/profiling/reth".to_string());
|
||||
|
||||
let mut cmd = if self.use_sudo {
|
||||
let mut sudo_cmd = Command::new("sudo");
|
||||
sudo_cmd.args([&binary_path, "stage", "unwind"]);
|
||||
sudo_cmd
|
||||
} else {
|
||||
let mut reth_cmd = Command::new(&binary_path);
|
||||
reth_cmd.args(["stage", "unwind"]);
|
||||
reth_cmd
|
||||
};
|
||||
|
||||
// Add chain argument (skip for mainnet as it's the default)
|
||||
let chain_str = self.chain.to_string();
|
||||
if chain_str != "mainnet" {
|
||||
cmd.args(["--chain", &chain_str]);
|
||||
}
|
||||
|
||||
// Add datadir if specified
|
||||
if let Some(ref datadir) = self.datadir {
|
||||
cmd.args(["--datadir", datadir]);
|
||||
}
|
||||
|
||||
cmd.args(["to-block", &block_number.to_string()]);
|
||||
|
||||
// Set environment variable to disable log styling
|
||||
cmd.env("RUST_LOG_STYLE", "never");
|
||||
|
||||
// Debug log the command
|
||||
debug!("Executing reth unwind command: {:?}", cmd);
|
||||
|
||||
let mut child = cmd
|
||||
.stdout(std::process::Stdio::piped())
|
||||
.stderr(std::process::Stdio::piped())
|
||||
.spawn()
|
||||
.wrap_err("Failed to start unwind command")?;
|
||||
|
||||
// Stream stdout and stderr with prefixes in real-time
|
||||
if let Some(stdout) = child.stdout.take() {
|
||||
tokio::spawn(async move {
|
||||
let reader = AsyncBufReader::new(stdout);
|
||||
let mut lines = reader.lines();
|
||||
while let Ok(Some(line)) = lines.next_line().await {
|
||||
debug!("[RETH-UNWIND] {}", line);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
if let Some(stderr) = child.stderr.take() {
|
||||
tokio::spawn(async move {
|
||||
let reader = AsyncBufReader::new(stderr);
|
||||
let mut lines = reader.lines();
|
||||
while let Ok(Some(line)) = lines.next_line().await {
|
||||
debug!("[RETH-UNWIND] {}", line);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// Wait for the command to complete
|
||||
let status = child.wait().await.wrap_err("Failed to wait for unwind command")?;
|
||||
|
||||
if !status.success() {
|
||||
return Err(eyre!("Unwind command failed with exit code: {:?}", status.code()));
|
||||
}
|
||||
|
||||
info!("Unwound to block: {}", block_number);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -81,11 +81,26 @@ jemalloc = [
|
||||
jemalloc-prof = ["reth-cli-util/jemalloc-prof"]
|
||||
tracy-allocator = ["reth-cli-util/tracy-allocator"]
|
||||
|
||||
min-error-logs = ["tracing/release_max_level_error"]
|
||||
min-warn-logs = ["tracing/release_max_level_warn"]
|
||||
min-info-logs = ["tracing/release_max_level_info"]
|
||||
min-debug-logs = ["tracing/release_max_level_debug"]
|
||||
min-trace-logs = ["tracing/release_max_level_trace"]
|
||||
min-error-logs = [
|
||||
"tracing/release_max_level_error",
|
||||
"reth-node-core/min-error-logs",
|
||||
]
|
||||
min-warn-logs = [
|
||||
"tracing/release_max_level_warn",
|
||||
"reth-node-core/min-warn-logs",
|
||||
]
|
||||
min-info-logs = [
|
||||
"tracing/release_max_level_info",
|
||||
"reth-node-core/min-info-logs",
|
||||
]
|
||||
min-debug-logs = [
|
||||
"tracing/release_max_level_debug",
|
||||
"reth-node-core/min-debug-logs",
|
||||
]
|
||||
min-trace-logs = [
|
||||
"tracing/release_max_level_trace",
|
||||
"reth-node-core/min-trace-logs",
|
||||
]
|
||||
|
||||
# no-op feature flag for switching between the `optimism` and default functionality in CI matrices
|
||||
ethereum = []
|
||||
|
||||
@@ -80,7 +80,7 @@ RUSTFLAGS="-C target-cpu=native" cargo build --profile profiling --no-default-fe
|
||||
### Run the Benchmark:
|
||||
First, start the reth node. Here is an example that runs `reth` compiled with the `profiling` profile, runs `samply`, and configures `reth` to run with metrics enabled:
|
||||
```bash
|
||||
samply record -p 3001 target/profiling/reth node --metrics localhost:9001 --authrpc.jwt-secret <jwt_file_path>
|
||||
samply record -p 3001 target/profiling/reth node --metrics localhost:9001 --authrpc.jwtsecret <jwt_file_path>
|
||||
```
|
||||
|
||||
```bash
|
||||
@@ -143,5 +143,5 @@ To reproduce the benchmark, first re-set the node to the block that the benchmar
|
||||
- **RPC Configuration**: The RPC endpoints should be accessible and configured correctly, specifically the RPC endpoint must support `eth_getBlockByNumber` and support fetching full transactions. The benchmark will make one RPC query per block as fast as possible, so ensure the RPC endpoint does not rate limit or block requests after a certain volume.
|
||||
- **Reproducibility**: Ensure that the node is at the same state before attempting to retry a benchmark. The `new-payload-fcu` command specifically will commit to the database, so the node must be rolled back using `reth stage unwind` to reproducibly retry benchmarks.
|
||||
- **Profiling tools**: If you are collecting CPU profiles, tools like [`samply`](https://github.com/mstange/samply) and [`perf`](https://perf.wiki.kernel.org/index.php/Main_Page) can be useful for analyzing node performance.
|
||||
- **Benchmark Data**: `reth-bench` additionally contains a `--benchmark.output` flag, which will output gas used benchmarks across the benchmark range in CSV format. This may be useful for further data analysis.
|
||||
- **Benchmark Data**: `reth-bench` additionally contains a `--output` flag, which will output gas used benchmarks across the benchmark range in CSV format. This may be useful for further data analysis.
|
||||
- **Platform Information**: To ensure accurate and reproducible benchmarking, document the platform details, including hardware specifications, OS version, and any other relevant information before publishing any benchmarks.
|
||||
|
||||
@@ -30,8 +30,8 @@ pub struct Command {
|
||||
rpc_url: String,
|
||||
|
||||
/// How long to wait after a forkchoice update before sending the next payload.
|
||||
#[arg(long, value_name = "WAIT_TIME", value_parser = parse_duration, verbatim_doc_comment)]
|
||||
wait_time: Option<Duration>,
|
||||
#[arg(long, value_name = "WAIT_TIME", value_parser = parse_duration, default_value = "250ms", verbatim_doc_comment)]
|
||||
wait_time: Duration,
|
||||
|
||||
/// The size of the block buffer (channel capacity) for prefetching blocks from the RPC
|
||||
/// endpoint.
|
||||
@@ -79,22 +79,13 @@ impl Command {
|
||||
break;
|
||||
}
|
||||
};
|
||||
let header = block.header.clone();
|
||||
|
||||
let (version, params) = match block_to_new_payload(block, is_optimism) {
|
||||
Ok(result) => result,
|
||||
Err(e) => {
|
||||
tracing::error!("Failed to convert block to new payload: {e}");
|
||||
let _ = error_sender.send(e);
|
||||
break;
|
||||
}
|
||||
};
|
||||
let head_block_hash = header.hash;
|
||||
let safe_block_hash =
|
||||
block_provider.get_block_by_number(header.number.saturating_sub(32).into());
|
||||
let head_block_hash = block.header.hash;
|
||||
let safe_block_hash = block_provider
|
||||
.get_block_by_number(block.header.number.saturating_sub(32).into());
|
||||
|
||||
let finalized_block_hash =
|
||||
block_provider.get_block_by_number(header.number.saturating_sub(64).into());
|
||||
let finalized_block_hash = block_provider
|
||||
.get_block_by_number(block.header.number.saturating_sub(64).into());
|
||||
|
||||
let (safe, finalized) = tokio::join!(safe_block_hash, finalized_block_hash,);
|
||||
|
||||
@@ -110,14 +101,7 @@ impl Command {
|
||||
|
||||
next_block += 1;
|
||||
if let Err(e) = sender
|
||||
.send((
|
||||
header,
|
||||
version,
|
||||
params,
|
||||
head_block_hash,
|
||||
safe_block_hash,
|
||||
finalized_block_hash,
|
||||
))
|
||||
.send((block, head_block_hash, safe_block_hash, finalized_block_hash))
|
||||
.await
|
||||
{
|
||||
tracing::error!("Failed to send block data: {e}");
|
||||
@@ -131,15 +115,16 @@ impl Command {
|
||||
let total_benchmark_duration = Instant::now();
|
||||
let mut total_wait_time = Duration::ZERO;
|
||||
|
||||
while let Some((header, version, params, head, safe, finalized)) = {
|
||||
while let Some((block, head, safe, finalized)) = {
|
||||
let wait_start = Instant::now();
|
||||
let result = receiver.recv().await;
|
||||
total_wait_time += wait_start.elapsed();
|
||||
result
|
||||
} {
|
||||
// just put gas used here
|
||||
let gas_used = header.gas_used;
|
||||
let block_number = header.number;
|
||||
let gas_used = block.header.gas_used;
|
||||
let block_number = block.header.number;
|
||||
let transaction_count = block.transactions.len() as u64;
|
||||
|
||||
debug!(target: "reth-bench", ?block_number, "Sending payload",);
|
||||
|
||||
@@ -150,6 +135,7 @@ impl Command {
|
||||
finalized_block_hash: finalized,
|
||||
};
|
||||
|
||||
let (version, params) = block_to_new_payload(block, is_optimism)?;
|
||||
let start = Instant::now();
|
||||
call_new_payload(&auth_provider, version, params).await?;
|
||||
|
||||
@@ -160,8 +146,13 @@ impl Command {
|
||||
// calculate the total duration and the fcu latency, record
|
||||
let total_latency = start.elapsed();
|
||||
let fcu_latency = total_latency - new_payload_result.latency;
|
||||
let combined_result =
|
||||
CombinedResult { block_number, new_payload_result, fcu_latency, total_latency };
|
||||
let combined_result = CombinedResult {
|
||||
block_number,
|
||||
transaction_count,
|
||||
new_payload_result,
|
||||
fcu_latency,
|
||||
total_latency,
|
||||
};
|
||||
|
||||
// current duration since the start of the benchmark minus the time
|
||||
// waiting for blocks
|
||||
@@ -170,13 +161,12 @@ impl Command {
|
||||
// convert gas used to gigagas, then compute gigagas per second
|
||||
info!(%combined_result);
|
||||
|
||||
// wait if we need to
|
||||
if let Some(wait_time) = self.wait_time {
|
||||
tokio::time::sleep(wait_time).await;
|
||||
}
|
||||
// wait before sending the next payload
|
||||
tokio::time::sleep(self.wait_time).await;
|
||||
|
||||
// record the current result
|
||||
let gas_row = TotalGasRow { block_number, gas_used, time: current_duration };
|
||||
let gas_row =
|
||||
TotalGasRow { block_number, transaction_count, gas_used, time: current_duration };
|
||||
results.push((gas_row, combined_result));
|
||||
}
|
||||
|
||||
|
||||
@@ -72,19 +72,9 @@ impl Command {
|
||||
break;
|
||||
}
|
||||
};
|
||||
let header = block.header.clone();
|
||||
|
||||
let (version, params) = match block_to_new_payload(block, is_optimism) {
|
||||
Ok(result) => result,
|
||||
Err(e) => {
|
||||
tracing::error!("Failed to convert block to new payload: {e}");
|
||||
let _ = error_sender.send(e);
|
||||
break;
|
||||
}
|
||||
};
|
||||
|
||||
next_block += 1;
|
||||
if let Err(e) = sender.send((header, version, params)).await {
|
||||
if let Err(e) = sender.send(block).await {
|
||||
tracing::error!("Failed to send block data: {e}");
|
||||
break;
|
||||
}
|
||||
@@ -96,23 +86,24 @@ impl Command {
|
||||
let total_benchmark_duration = Instant::now();
|
||||
let mut total_wait_time = Duration::ZERO;
|
||||
|
||||
while let Some((header, version, params)) = {
|
||||
while let Some(block) = {
|
||||
let wait_start = Instant::now();
|
||||
let result = receiver.recv().await;
|
||||
total_wait_time += wait_start.elapsed();
|
||||
result
|
||||
} {
|
||||
// just put gas used here
|
||||
let gas_used = header.gas_used;
|
||||
|
||||
let block_number = header.number;
|
||||
let block_number = block.header.number;
|
||||
let transaction_count = block.transactions.len() as u64;
|
||||
let gas_used = block.header.gas_used;
|
||||
|
||||
debug!(
|
||||
target: "reth-bench",
|
||||
number=?header.number,
|
||||
number=?block.header.number,
|
||||
"Sending payload to engine",
|
||||
);
|
||||
|
||||
let (version, params) = block_to_new_payload(block, is_optimism)?;
|
||||
|
||||
let start = Instant::now();
|
||||
call_new_payload(&auth_provider, version, params).await?;
|
||||
|
||||
@@ -124,7 +115,8 @@ impl Command {
|
||||
let current_duration = total_benchmark_duration.elapsed() - total_wait_time;
|
||||
|
||||
// record the current result
|
||||
let row = TotalGasRow { block_number, gas_used, time: current_duration };
|
||||
let row =
|
||||
TotalGasRow { block_number, transaction_count, gas_used, time: current_duration };
|
||||
results.push((row, new_payload_result));
|
||||
}
|
||||
|
||||
|
||||
@@ -67,6 +67,8 @@ impl Serialize for NewPayloadResult {
|
||||
pub(crate) struct CombinedResult {
|
||||
/// The block number of the block being processed.
|
||||
pub(crate) block_number: u64,
|
||||
/// The number of transactions in the block.
|
||||
pub(crate) transaction_count: u64,
|
||||
/// The `newPayload` result.
|
||||
pub(crate) new_payload_result: NewPayloadResult,
|
||||
/// The latency of the `forkchoiceUpdated` call.
|
||||
@@ -108,10 +110,11 @@ impl Serialize for CombinedResult {
|
||||
let fcu_latency = self.fcu_latency.as_micros();
|
||||
let new_payload_latency = self.new_payload_result.latency.as_micros();
|
||||
let total_latency = self.total_latency.as_micros();
|
||||
let mut state = serializer.serialize_struct("CombinedResult", 5)?;
|
||||
let mut state = serializer.serialize_struct("CombinedResult", 6)?;
|
||||
|
||||
// flatten the new payload result because this is meant for CSV writing
|
||||
state.serialize_field("block_number", &self.block_number)?;
|
||||
state.serialize_field("transaction_count", &self.transaction_count)?;
|
||||
state.serialize_field("gas_used", &self.new_payload_result.gas_used)?;
|
||||
state.serialize_field("new_payload_latency", &new_payload_latency)?;
|
||||
state.serialize_field("fcu_latency", &fcu_latency)?;
|
||||
@@ -125,6 +128,8 @@ impl Serialize for CombinedResult {
|
||||
pub(crate) struct TotalGasRow {
|
||||
/// The block number of the block being processed.
|
||||
pub(crate) block_number: u64,
|
||||
/// The number of transactions in the block.
|
||||
pub(crate) transaction_count: u64,
|
||||
/// The total gas used in the block.
|
||||
pub(crate) gas_used: u64,
|
||||
/// Time since the start of the benchmark.
|
||||
@@ -172,8 +177,9 @@ impl Serialize for TotalGasRow {
|
||||
{
|
||||
// convert the time to microseconds
|
||||
let time = self.time.as_micros();
|
||||
let mut state = serializer.serialize_struct("TotalGasRow", 3)?;
|
||||
let mut state = serializer.serialize_struct("TotalGasRow", 4)?;
|
||||
state.serialize_field("block_number", &self.block_number)?;
|
||||
state.serialize_field("transaction_count", &self.transaction_count)?;
|
||||
state.serialize_field("gas_used", &self.gas_used)?;
|
||||
state.serialize_field("time", &time)?;
|
||||
state.end()
|
||||
@@ -188,7 +194,12 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_write_total_gas_row_csv() {
|
||||
let row = TotalGasRow { block_number: 1, gas_used: 1_000, time: Duration::from_secs(1) };
|
||||
let row = TotalGasRow {
|
||||
block_number: 1,
|
||||
transaction_count: 10,
|
||||
gas_used: 1_000,
|
||||
time: Duration::from_secs(1),
|
||||
};
|
||||
|
||||
let mut writer = Writer::from_writer(vec![]);
|
||||
writer.serialize(row).unwrap();
|
||||
@@ -198,11 +209,11 @@ mod tests {
|
||||
let mut result = result.as_slice().lines();
|
||||
|
||||
// assert header
|
||||
let expected_first_line = "block_number,gas_used,time";
|
||||
let expected_first_line = "block_number,transaction_count,gas_used,time";
|
||||
let first_line = result.next().unwrap().unwrap();
|
||||
assert_eq!(first_line, expected_first_line);
|
||||
|
||||
let expected_second_line = "1,1000,1000000";
|
||||
let expected_second_line = "1,10,1000,1000000";
|
||||
let second_line = result.next().unwrap().unwrap();
|
||||
assert_eq!(second_line, expected_second_line);
|
||||
}
|
||||
|
||||
@@ -54,7 +54,7 @@ reth-node-api.workspace = true
|
||||
reth-node-core.workspace = true
|
||||
reth-ethereum-payload-builder.workspace = true
|
||||
reth-ethereum-primitives.workspace = true
|
||||
reth-node-ethereum = { workspace = true, features = ["js-tracer"] }
|
||||
reth-node-ethereum.workspace = true
|
||||
reth-node-builder.workspace = true
|
||||
reth-node-metrics.workspace = true
|
||||
reth-consensus.workspace = true
|
||||
@@ -81,7 +81,22 @@ backon.workspace = true
|
||||
tempfile.workspace = true
|
||||
|
||||
[features]
|
||||
default = ["jemalloc", "reth-revm/portable"]
|
||||
default = ["jemalloc", "otlp", "reth-revm/portable", "js-tracer", "keccak-cache-global", "asm-keccak"]
|
||||
|
||||
otlp = [
|
||||
"reth-ethereum-cli/otlp",
|
||||
"reth-node-core/otlp",
|
||||
]
|
||||
samply = [
|
||||
"reth-ethereum-cli/samply",
|
||||
"reth-node-core/samply",
|
||||
]
|
||||
js-tracer = [
|
||||
"reth-node-builder/js-tracer",
|
||||
"reth-node-ethereum/js-tracer",
|
||||
"reth-rpc/js-tracer",
|
||||
"reth-rpc-eth-types/js-tracer",
|
||||
]
|
||||
|
||||
dev = ["reth-ethereum-cli/dev"]
|
||||
|
||||
@@ -91,7 +106,10 @@ asm-keccak = [
|
||||
"reth-ethereum-cli/asm-keccak",
|
||||
"reth-node-ethereum/asm-keccak",
|
||||
]
|
||||
|
||||
keccak-cache-global = [
|
||||
"reth-node-core/keccak-cache-global",
|
||||
"reth-node-ethereum/keccak-cache-global",
|
||||
]
|
||||
jemalloc = [
|
||||
"reth-cli-util/jemalloc",
|
||||
"reth-node-core/jemalloc",
|
||||
@@ -103,6 +121,12 @@ jemalloc-prof = [
|
||||
"reth-cli-util/jemalloc-prof",
|
||||
"reth-ethereum-cli/jemalloc-prof",
|
||||
]
|
||||
jemalloc-unprefixed = [
|
||||
"reth-cli-util/jemalloc-unprefixed",
|
||||
"reth-node-core/jemalloc",
|
||||
"reth-node-metrics/jemalloc",
|
||||
"reth-ethereum-cli/jemalloc",
|
||||
]
|
||||
tracy-allocator = [
|
||||
"reth-cli-util/tracy-allocator",
|
||||
"reth-ethereum-cli/tracy-allocator",
|
||||
@@ -123,22 +147,27 @@ snmalloc-native = [
|
||||
min-error-logs = [
|
||||
"tracing/release_max_level_error",
|
||||
"reth-ethereum-cli/min-error-logs",
|
||||
"reth-node-core/min-error-logs",
|
||||
]
|
||||
min-warn-logs = [
|
||||
"tracing/release_max_level_warn",
|
||||
"reth-ethereum-cli/min-warn-logs",
|
||||
"reth-node-core/min-warn-logs",
|
||||
]
|
||||
min-info-logs = [
|
||||
"tracing/release_max_level_info",
|
||||
"reth-ethereum-cli/min-info-logs",
|
||||
"reth-node-core/min-info-logs",
|
||||
]
|
||||
min-debug-logs = [
|
||||
"tracing/release_max_level_debug",
|
||||
"reth-ethereum-cli/min-debug-logs",
|
||||
"reth-node-core/min-debug-logs",
|
||||
]
|
||||
min-trace-logs = [
|
||||
"tracing/release_max_level_trace",
|
||||
"reth-ethereum-cli/min-trace-logs",
|
||||
"reth-node-core/min-trace-logs",
|
||||
]
|
||||
|
||||
[[bin]]
|
||||
|
||||
444
crates/chain-state/src/deferred_trie.rs
Normal file
444
crates/chain-state/src/deferred_trie.rs
Normal file
@@ -0,0 +1,444 @@
|
||||
use alloy_primitives::B256;
|
||||
use parking_lot::Mutex;
|
||||
use reth_metrics::{metrics::Counter, Metrics};
|
||||
use reth_trie::{
|
||||
updates::{TrieUpdates, TrieUpdatesSorted},
|
||||
HashedPostState, HashedPostStateSorted, TrieInputSorted,
|
||||
};
|
||||
use std::{
|
||||
fmt,
|
||||
sync::{Arc, LazyLock},
|
||||
};
|
||||
use tracing::instrument;
|
||||
|
||||
/// Shared handle to asynchronously populated trie data.
|
||||
///
|
||||
/// Uses a try-lock + fallback computation approach for deadlock-free access.
|
||||
/// If the deferred task hasn't completed, computes trie data synchronously
|
||||
/// from stored unsorted inputs rather than blocking.
|
||||
#[derive(Clone)]
|
||||
pub struct DeferredTrieData {
|
||||
/// Shared deferred state holding either raw inputs (pending) or computed result (ready).
|
||||
state: Arc<Mutex<DeferredState>>,
|
||||
}
|
||||
|
||||
/// Sorted trie data computed for an executed block.
|
||||
/// These represent the complete set of sorted trie data required to persist
|
||||
/// block state for, and generate proofs on top of, a block.
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct ComputedTrieData {
|
||||
/// Sorted hashed post-state produced by execution.
|
||||
pub hashed_state: Arc<HashedPostStateSorted>,
|
||||
/// Sorted trie updates produced by state root computation.
|
||||
pub trie_updates: Arc<TrieUpdatesSorted>,
|
||||
/// Trie input bundled with its anchor hash, if available.
|
||||
pub anchored_trie_input: Option<AnchoredTrieInput>,
|
||||
}
|
||||
|
||||
/// Trie input bundled with its anchor hash.
|
||||
///
|
||||
/// This is used to store the trie input and anchor hash for a block together.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct AnchoredTrieInput {
|
||||
/// The persisted ancestor hash this trie input is anchored to.
|
||||
pub anchor_hash: B256,
|
||||
/// Trie input constructed from in-memory overlays.
|
||||
pub trie_input: Arc<TrieInputSorted>,
|
||||
}
|
||||
|
||||
/// Metrics for deferred trie computation.
|
||||
#[derive(Metrics)]
|
||||
#[metrics(scope = "sync.block_validation")]
|
||||
struct DeferredTrieMetrics {
|
||||
/// Number of times deferred trie data was ready (async task completed first).
|
||||
deferred_trie_async_ready: Counter,
|
||||
/// Number of times deferred trie data required synchronous computation (fallback path).
|
||||
deferred_trie_sync_fallback: Counter,
|
||||
}
|
||||
|
||||
static DEFERRED_TRIE_METRICS: LazyLock<DeferredTrieMetrics> =
|
||||
LazyLock::new(DeferredTrieMetrics::default);
|
||||
|
||||
/// Internal state for deferred trie data.
|
||||
enum DeferredState {
|
||||
/// Data is not yet available; raw inputs stored for fallback computation.
|
||||
Pending(PendingInputs),
|
||||
/// Data has been computed and is ready.
|
||||
Ready(ComputedTrieData),
|
||||
}
|
||||
|
||||
/// Inputs kept while a deferred trie computation is pending.
|
||||
#[derive(Clone, Debug)]
|
||||
struct PendingInputs {
|
||||
/// Unsorted hashed post-state from execution.
|
||||
hashed_state: Arc<HashedPostState>,
|
||||
/// Unsorted trie updates from state root computation.
|
||||
trie_updates: Arc<TrieUpdates>,
|
||||
/// The persisted ancestor hash this trie input is anchored to.
|
||||
anchor_hash: B256,
|
||||
/// Deferred trie data from ancestor blocks for merging.
|
||||
ancestors: Vec<DeferredTrieData>,
|
||||
}
|
||||
|
||||
impl fmt::Debug for DeferredTrieData {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let state = self.state.lock();
|
||||
match &*state {
|
||||
DeferredState::Pending(_) => {
|
||||
f.debug_struct("DeferredTrieData").field("state", &"pending").finish()
|
||||
}
|
||||
DeferredState::Ready(_) => {
|
||||
f.debug_struct("DeferredTrieData").field("state", &"ready").finish()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl DeferredTrieData {
|
||||
/// Create a new pending handle with fallback inputs for synchronous computation.
|
||||
///
|
||||
/// If the async task hasn't completed when `wait_cloned` is called, the trie data
|
||||
/// will be computed synchronously from these inputs. This eliminates deadlock risk.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `hashed_state` - Unsorted hashed post-state from execution
|
||||
/// * `trie_updates` - Unsorted trie updates from state root computation
|
||||
/// * `anchor_hash` - The persisted ancestor hash this trie input is anchored to
|
||||
/// * `ancestors` - Deferred trie data from ancestor blocks for merging
|
||||
pub fn pending(
|
||||
hashed_state: Arc<HashedPostState>,
|
||||
trie_updates: Arc<TrieUpdates>,
|
||||
anchor_hash: B256,
|
||||
ancestors: Vec<Self>,
|
||||
) -> Self {
|
||||
Self {
|
||||
state: Arc::new(Mutex::new(DeferredState::Pending(PendingInputs {
|
||||
hashed_state,
|
||||
trie_updates,
|
||||
anchor_hash,
|
||||
ancestors,
|
||||
}))),
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a handle that is already populated with the given [`ComputedTrieData`].
|
||||
///
|
||||
/// Useful when trie data is available immediately.
|
||||
/// [`Self::wait_cloned`] will return without any computation.
|
||||
pub fn ready(bundle: ComputedTrieData) -> Self {
|
||||
Self { state: Arc::new(Mutex::new(DeferredState::Ready(bundle))) }
|
||||
}
|
||||
|
||||
/// Sort block execution outputs and build a [`TrieInputSorted`] overlay.
|
||||
///
|
||||
/// The trie input overlay accumulates sorted hashed state (account/storage changes) and
|
||||
/// trie node updates from all in-memory ancestor blocks. This overlay is required for:
|
||||
/// - Computing state roots on top of in-memory blocks
|
||||
/// - Generating storage/account proofs for unpersisted state
|
||||
///
|
||||
/// # Process
|
||||
/// 1. Sort the current block's hashed state and trie updates
|
||||
/// 2. Merge ancestor overlays (oldest -> newest, so later state takes precedence)
|
||||
/// 3. Extend the merged overlay with this block's sorted data
|
||||
///
|
||||
/// Used by both the async background task and the synchronous fallback path.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `hashed_state` - Unsorted hashed post-state (account/storage changes) from execution
|
||||
/// * `trie_updates` - Unsorted trie node updates from state root computation
|
||||
/// * `anchor_hash` - The persisted ancestor hash this trie input is anchored to
|
||||
/// * `ancestors` - Deferred trie data from ancestor blocks for merging
|
||||
pub fn sort_and_build_trie_input(
|
||||
hashed_state: &HashedPostState,
|
||||
trie_updates: &TrieUpdates,
|
||||
anchor_hash: B256,
|
||||
ancestors: &[Self],
|
||||
) -> ComputedTrieData {
|
||||
// Sort the current block's hashed state and trie updates
|
||||
let sorted_hashed_state = Arc::new(hashed_state.clone_into_sorted());
|
||||
let sorted_trie_updates = Arc::new(trie_updates.clone().into_sorted());
|
||||
|
||||
// Merge trie data from ancestors (oldest -> newest so later state takes precedence)
|
||||
let mut overlay = TrieInputSorted::default();
|
||||
for ancestor in ancestors {
|
||||
let ancestor_data = ancestor.wait_cloned();
|
||||
{
|
||||
let state_mut = Arc::make_mut(&mut overlay.state);
|
||||
state_mut.extend_ref(ancestor_data.hashed_state.as_ref());
|
||||
}
|
||||
{
|
||||
let nodes_mut = Arc::make_mut(&mut overlay.nodes);
|
||||
nodes_mut.extend_ref(ancestor_data.trie_updates.as_ref());
|
||||
}
|
||||
}
|
||||
|
||||
// Extend overlay with current block's sorted data
|
||||
{
|
||||
let state_mut = Arc::make_mut(&mut overlay.state);
|
||||
state_mut.extend_ref(sorted_hashed_state.as_ref());
|
||||
}
|
||||
{
|
||||
let nodes_mut = Arc::make_mut(&mut overlay.nodes);
|
||||
nodes_mut.extend_ref(sorted_trie_updates.as_ref());
|
||||
}
|
||||
|
||||
ComputedTrieData::with_trie_input(
|
||||
sorted_hashed_state,
|
||||
sorted_trie_updates,
|
||||
anchor_hash,
|
||||
Arc::new(overlay),
|
||||
)
|
||||
}
|
||||
|
||||
/// Returns trie data, computing synchronously if the async task hasn't completed.
|
||||
///
|
||||
/// - If the async task has completed (`Ready`), returns the cached result.
|
||||
/// - If pending, computes synchronously from stored inputs.
|
||||
///
|
||||
/// Deadlock is avoided as long as the provided ancestors form a true ancestor chain (a DAG):
|
||||
/// - Each block only waits on its ancestors (blocks on the path to the persisted root)
|
||||
/// - Sibling blocks (forks) are never in each other's ancestor lists
|
||||
/// - A block never waits on its descendants
|
||||
///
|
||||
/// Given that invariant, circular wait dependencies are impossible.
|
||||
#[instrument(level = "debug", target = "engine::tree::deferred_trie", skip_all)]
|
||||
pub fn wait_cloned(&self) -> ComputedTrieData {
|
||||
let mut state = self.state.lock();
|
||||
match &*state {
|
||||
// If the deferred trie data is ready, return the cached result.
|
||||
DeferredState::Ready(bundle) => {
|
||||
DEFERRED_TRIE_METRICS.deferred_trie_async_ready.increment(1);
|
||||
bundle.clone()
|
||||
}
|
||||
// If the deferred trie data is pending, compute the trie data synchronously and return
|
||||
// the result. This is the fallback path if the async task hasn't completed.
|
||||
DeferredState::Pending(inputs) => {
|
||||
DEFERRED_TRIE_METRICS.deferred_trie_sync_fallback.increment(1);
|
||||
let computed = Self::sort_and_build_trie_input(
|
||||
&inputs.hashed_state,
|
||||
&inputs.trie_updates,
|
||||
inputs.anchor_hash,
|
||||
&inputs.ancestors,
|
||||
);
|
||||
*state = DeferredState::Ready(computed.clone());
|
||||
computed
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ComputedTrieData {
|
||||
/// Construct a bundle that includes trie input anchored to a persisted ancestor.
|
||||
pub const fn with_trie_input(
|
||||
hashed_state: Arc<HashedPostStateSorted>,
|
||||
trie_updates: Arc<TrieUpdatesSorted>,
|
||||
anchor_hash: B256,
|
||||
trie_input: Arc<TrieInputSorted>,
|
||||
) -> Self {
|
||||
Self {
|
||||
hashed_state,
|
||||
trie_updates,
|
||||
anchored_trie_input: Some(AnchoredTrieInput { anchor_hash, trie_input }),
|
||||
}
|
||||
}
|
||||
|
||||
/// Construct a bundle without trie input or anchor information.
|
||||
///
|
||||
/// Unlike [`Self::with_trie_input`], this constructor omits the accumulated trie input overlay
|
||||
/// and its anchor hash. Use this when the trie input is not needed, such as in block builders
|
||||
/// or sequencers that don't require proof generation on top of in-memory state.
|
||||
///
|
||||
/// The trie input anchor identifies the persisted block hash from which the in-memory overlay
|
||||
/// was built. Without it, consumers cannot determine which on-disk state to combine with.
|
||||
pub const fn without_trie_input(
|
||||
hashed_state: Arc<HashedPostStateSorted>,
|
||||
trie_updates: Arc<TrieUpdatesSorted>,
|
||||
) -> Self {
|
||||
Self { hashed_state, trie_updates, anchored_trie_input: None }
|
||||
}
|
||||
|
||||
/// Returns the anchor hash, if present.
|
||||
pub fn anchor_hash(&self) -> Option<B256> {
|
||||
self.anchored_trie_input.as_ref().map(|anchored| anchored.anchor_hash)
|
||||
}
|
||||
|
||||
/// Returns the trie input, if present.
|
||||
pub fn trie_input(&self) -> Option<&Arc<TrieInputSorted>> {
|
||||
self.anchored_trie_input.as_ref().map(|anchored| &anchored.trie_input)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use alloy_primitives::{map::B256Map, U256};
|
||||
use reth_primitives_traits::Account;
|
||||
use reth_trie::updates::TrieUpdates;
|
||||
use std::{
|
||||
sync::Arc,
|
||||
thread,
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
|
||||
fn empty_bundle() -> ComputedTrieData {
|
||||
ComputedTrieData {
|
||||
hashed_state: Arc::default(),
|
||||
trie_updates: Arc::default(),
|
||||
anchored_trie_input: None,
|
||||
}
|
||||
}
|
||||
|
||||
fn empty_pending() -> DeferredTrieData {
|
||||
empty_pending_with_anchor(B256::ZERO)
|
||||
}
|
||||
|
||||
fn empty_pending_with_anchor(anchor: B256) -> DeferredTrieData {
|
||||
DeferredTrieData::pending(
|
||||
Arc::new(HashedPostState::default()),
|
||||
Arc::new(TrieUpdates::default()),
|
||||
anchor,
|
||||
Vec::new(),
|
||||
)
|
||||
}
|
||||
|
||||
/// Verifies that a ready handle returns immediately without computation.
|
||||
#[test]
|
||||
fn ready_returns_immediately() {
|
||||
let bundle = empty_bundle();
|
||||
let deferred = DeferredTrieData::ready(bundle.clone());
|
||||
|
||||
let start = Instant::now();
|
||||
let result = deferred.wait_cloned();
|
||||
let elapsed = start.elapsed();
|
||||
|
||||
assert_eq!(result.hashed_state, bundle.hashed_state);
|
||||
assert_eq!(result.trie_updates, bundle.trie_updates);
|
||||
assert_eq!(result.anchor_hash(), bundle.anchor_hash());
|
||||
assert!(elapsed < Duration::from_millis(20));
|
||||
}
|
||||
|
||||
/// Verifies that a pending handle computes trie data synchronously via fallback.
|
||||
#[test]
|
||||
fn pending_computes_fallback() {
|
||||
let deferred = empty_pending();
|
||||
|
||||
// wait_cloned should compute from inputs without blocking
|
||||
let start = Instant::now();
|
||||
let result = deferred.wait_cloned();
|
||||
let elapsed = start.elapsed();
|
||||
|
||||
// Should return quickly (fallback computation)
|
||||
assert!(elapsed < Duration::from_millis(100));
|
||||
assert!(result.hashed_state.is_empty());
|
||||
}
|
||||
|
||||
/// Verifies that fallback computation result is cached for subsequent calls.
|
||||
#[test]
|
||||
fn fallback_result_is_cached() {
|
||||
let deferred = empty_pending();
|
||||
|
||||
// First call computes and should stash the result
|
||||
let first = deferred.wait_cloned();
|
||||
// Second call should reuse the cached result (same Arc pointer)
|
||||
let second = deferred.wait_cloned();
|
||||
|
||||
assert!(Arc::ptr_eq(&first.hashed_state, &second.hashed_state));
|
||||
assert!(Arc::ptr_eq(&first.trie_updates, &second.trie_updates));
|
||||
assert_eq!(first.anchor_hash(), second.anchor_hash());
|
||||
}
|
||||
|
||||
/// Verifies that concurrent `wait_cloned` calls result in only one computation,
|
||||
/// with all callers receiving the same cached result.
|
||||
#[test]
|
||||
fn concurrent_wait_cloned_computes_once() {
|
||||
let deferred = empty_pending();
|
||||
|
||||
// Spawn multiple threads that all call wait_cloned concurrently
|
||||
let handles: Vec<_> = (0..10)
|
||||
.map(|_| {
|
||||
let d = deferred.clone();
|
||||
thread::spawn(move || d.wait_cloned())
|
||||
})
|
||||
.collect();
|
||||
|
||||
// Collect all results
|
||||
let results: Vec<_> = handles.into_iter().map(|h| h.join().unwrap()).collect();
|
||||
|
||||
// All results should share the same Arc pointers (same computed result)
|
||||
let first = &results[0];
|
||||
for result in &results[1..] {
|
||||
assert!(Arc::ptr_eq(&first.hashed_state, &result.hashed_state));
|
||||
assert!(Arc::ptr_eq(&first.trie_updates, &result.trie_updates));
|
||||
}
|
||||
}
|
||||
|
||||
/// Tests that ancestor trie data is merged during fallback computation and that the
|
||||
/// resulting `ComputedTrieData` uses the current block's anchor hash, not the ancestor's.
|
||||
#[test]
|
||||
fn ancestors_are_merged() {
|
||||
// Create ancestor with some data
|
||||
let ancestor_bundle = ComputedTrieData {
|
||||
hashed_state: Arc::default(),
|
||||
trie_updates: Arc::default(),
|
||||
anchored_trie_input: Some(AnchoredTrieInput {
|
||||
anchor_hash: B256::with_last_byte(1),
|
||||
trie_input: Arc::new(TrieInputSorted::default()),
|
||||
}),
|
||||
};
|
||||
let ancestor = DeferredTrieData::ready(ancestor_bundle);
|
||||
|
||||
// Create pending with ancestor
|
||||
let deferred = DeferredTrieData::pending(
|
||||
Arc::new(HashedPostState::default()),
|
||||
Arc::new(TrieUpdates::default()),
|
||||
B256::with_last_byte(2),
|
||||
vec![ancestor],
|
||||
);
|
||||
|
||||
let result = deferred.wait_cloned();
|
||||
// Should have the current block's anchor, not the ancestor's
|
||||
assert_eq!(result.anchor_hash(), Some(B256::with_last_byte(2)));
|
||||
}
|
||||
|
||||
/// Ensures ancestor overlays are merged oldest -> newest so latest state wins (no overwrite by
|
||||
/// older ancestors).
|
||||
#[test]
|
||||
fn ancestors_merge_in_chronological_order() {
|
||||
let key = B256::with_last_byte(1);
|
||||
// Oldest ancestor sets nonce to 1
|
||||
let oldest_state = HashedPostStateSorted::new(
|
||||
vec![(key, Some(Account { nonce: 1, balance: U256::ZERO, bytecode_hash: None }))],
|
||||
B256Map::default(),
|
||||
);
|
||||
// Newest ancestor overwrites nonce to 2
|
||||
let newest_state = HashedPostStateSorted::new(
|
||||
vec![(key, Some(Account { nonce: 2, balance: U256::ZERO, bytecode_hash: None }))],
|
||||
B256Map::default(),
|
||||
);
|
||||
|
||||
let oldest = ComputedTrieData {
|
||||
hashed_state: Arc::new(oldest_state),
|
||||
trie_updates: Arc::default(),
|
||||
anchored_trie_input: None,
|
||||
};
|
||||
let newest = ComputedTrieData {
|
||||
hashed_state: Arc::new(newest_state),
|
||||
trie_updates: Arc::default(),
|
||||
anchored_trie_input: None,
|
||||
};
|
||||
|
||||
// Pass ancestors oldest -> newest; newest should take precedence
|
||||
let deferred = DeferredTrieData::pending(
|
||||
Arc::new(HashedPostState::default()),
|
||||
Arc::new(TrieUpdates::default()),
|
||||
B256::ZERO,
|
||||
vec![DeferredTrieData::ready(oldest), DeferredTrieData::ready(newest)],
|
||||
);
|
||||
|
||||
let result = deferred.wait_cloned();
|
||||
let overlay_state = &result.anchored_trie_input.as_ref().unwrap().trie_input.state.accounts;
|
||||
assert_eq!(overlay_state.len(), 1);
|
||||
let (_, account) = &overlay_state[0];
|
||||
assert_eq!(account.unwrap().nonce, 2);
|
||||
}
|
||||
}
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
use crate::{
|
||||
CanonStateNotification, CanonStateNotificationSender, CanonStateNotifications,
|
||||
ChainInfoTracker, MemoryOverlayStateProvider,
|
||||
ChainInfoTracker, ComputedTrieData, DeferredTrieData, MemoryOverlayStateProvider,
|
||||
};
|
||||
use alloy_consensus::{transaction::TransactionMeta, BlockHeader};
|
||||
use alloy_eips::{BlockHashOrNumber, BlockNumHash};
|
||||
@@ -17,8 +17,8 @@ use reth_primitives_traits::{
|
||||
SignedTransaction,
|
||||
};
|
||||
use reth_storage_api::StateProviderBox;
|
||||
use reth_trie::{updates::TrieUpdates, HashedPostState};
|
||||
use std::{collections::BTreeMap, sync::Arc, time::Instant};
|
||||
use reth_trie::{updates::TrieUpdatesSorted, HashedPostStateSorted, TrieInputSorted};
|
||||
use std::{collections::BTreeMap, ops::Deref, sync::Arc, time::Instant};
|
||||
use tokio::sync::{broadcast, watch};
|
||||
|
||||
/// Size of the broadcast channel used to notify canonical state events.
|
||||
@@ -565,7 +565,7 @@ impl<N: NodePrimitives> CanonicalInMemoryState<N> {
|
||||
|
||||
/// State after applying the given block, this block is part of the canonical chain that partially
|
||||
/// stored in memory and can be traced back to a canonical block on disk.
|
||||
#[derive(Debug, PartialEq, Eq, Clone)]
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct BlockState<N: NodePrimitives = EthPrimitives> {
|
||||
/// The executed block that determines the state after this block has been executed.
|
||||
block: ExecutedBlock<N>,
|
||||
@@ -573,6 +573,12 @@ pub struct BlockState<N: NodePrimitives = EthPrimitives> {
|
||||
parent: Option<Arc<Self>>,
|
||||
}
|
||||
|
||||
impl<N: NodePrimitives> PartialEq for BlockState<N> {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.block == other.block && self.parent == other.parent
|
||||
}
|
||||
}
|
||||
|
||||
impl<N: NodePrimitives> BlockState<N> {
|
||||
/// [`BlockState`] constructor.
|
||||
pub const fn new(block: ExecutedBlock<N>) -> Self {
|
||||
@@ -628,6 +634,8 @@ impl<N: NodePrimitives> BlockState<N> {
|
||||
/// We assume that the `Receipts` in the executed block `ExecutionOutcome`
|
||||
/// has only one element corresponding to the executed block associated to
|
||||
/// the state.
|
||||
///
|
||||
/// This clones the vector of receipts. To avoid it, use [`Self::executed_block_receipts_ref`].
|
||||
pub fn executed_block_receipts(&self) -> Vec<N::Receipt> {
|
||||
let receipts = self.receipts();
|
||||
|
||||
@@ -640,22 +648,30 @@ impl<N: NodePrimitives> BlockState<N> {
|
||||
receipts.first().cloned().unwrap_or_default()
|
||||
}
|
||||
|
||||
/// Returns a vector of __parent__ `BlockStates`.
|
||||
/// Returns a slice of `Receipt` of executed block that determines the state.
|
||||
/// We assume that the `Receipts` in the executed block `ExecutionOutcome`
|
||||
/// has only one element corresponding to the executed block associated to
|
||||
/// the state.
|
||||
pub fn executed_block_receipts_ref(&self) -> &[N::Receipt] {
|
||||
let receipts = self.receipts();
|
||||
|
||||
debug_assert!(
|
||||
receipts.len() <= 1,
|
||||
"Expected at most one block's worth of receipts, found {}",
|
||||
receipts.len()
|
||||
);
|
||||
|
||||
receipts.first().map(|receipts| receipts.deref()).unwrap_or_default()
|
||||
}
|
||||
|
||||
/// Returns an iterator over __parent__ `BlockStates`.
|
||||
///
|
||||
/// The block state order in the output vector is newest to oldest (highest to lowest):
|
||||
/// The block state order is newest to oldest (highest to lowest):
|
||||
/// `[5,4,3,2,1]`
|
||||
///
|
||||
/// Note: This does not include self.
|
||||
pub fn parent_state_chain(&self) -> Vec<&Self> {
|
||||
let mut parents = Vec::new();
|
||||
let mut current = self.parent.as_deref();
|
||||
|
||||
while let Some(parent) = current {
|
||||
parents.push(parent);
|
||||
current = parent.parent.as_deref();
|
||||
}
|
||||
|
||||
parents
|
||||
pub fn parent_state_chain(&self) -> impl Iterator<Item = &Self> + '_ {
|
||||
std::iter::successors(self.parent.as_deref(), |state| state.parent.as_deref())
|
||||
}
|
||||
|
||||
/// Returns a vector of `BlockStates` representing the entire in memory chain.
|
||||
@@ -666,6 +682,11 @@ impl<N: NodePrimitives> BlockState<N> {
|
||||
}
|
||||
|
||||
/// Appends the parent chain of this [`BlockState`] to the given vector.
|
||||
///
|
||||
/// Parents are appended in order from newest to oldest (highest to lowest).
|
||||
/// This does not include self, only the parent states.
|
||||
///
|
||||
/// This is a convenience method equivalent to `chain.extend(self.parent_state_chain())`.
|
||||
pub fn append_parent_chain<'a>(&'a self, chain: &mut Vec<&'a Self>) {
|
||||
chain.extend(self.parent_state_chain());
|
||||
}
|
||||
@@ -719,16 +740,17 @@ impl<N: NodePrimitives> BlockState<N> {
|
||||
}
|
||||
|
||||
/// Represents an executed block stored in-memory.
|
||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct ExecutedBlock<N: NodePrimitives = EthPrimitives> {
|
||||
/// Recovered Block
|
||||
pub recovered_block: Arc<RecoveredBlock<N::Block>>,
|
||||
/// Block's execution outcome.
|
||||
pub execution_output: Arc<ExecutionOutcome<N::Receipt>>,
|
||||
/// Block's hashed state.
|
||||
pub hashed_state: Arc<HashedPostState>,
|
||||
/// Trie updates that result from calculating the state root for the block.
|
||||
pub trie_updates: Arc<TrieUpdates>,
|
||||
/// Deferred trie data produced by execution.
|
||||
///
|
||||
/// This allows deferring the computation of the trie data which can be expensive.
|
||||
/// The data can be populated asynchronously after the block was validated.
|
||||
pub trie_data: DeferredTrieData,
|
||||
}
|
||||
|
||||
impl<N: NodePrimitives> Default for ExecutedBlock<N> {
|
||||
@@ -736,13 +758,54 @@ impl<N: NodePrimitives> Default for ExecutedBlock<N> {
|
||||
Self {
|
||||
recovered_block: Default::default(),
|
||||
execution_output: Default::default(),
|
||||
hashed_state: Default::default(),
|
||||
trie_updates: Default::default(),
|
||||
trie_data: DeferredTrieData::ready(ComputedTrieData::default()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<N: NodePrimitives> PartialEq for ExecutedBlock<N> {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
// Trie data is computed asynchronously and doesn't define block identity.
|
||||
self.recovered_block == other.recovered_block &&
|
||||
self.execution_output == other.execution_output
|
||||
}
|
||||
}
|
||||
|
||||
impl<N: NodePrimitives> ExecutedBlock<N> {
|
||||
/// Create a new [`ExecutedBlock`] with already-computed trie data.
|
||||
///
|
||||
/// Use this constructor when trie data is available immediately (e.g., sequencers,
|
||||
/// payload builders). This is the safe default path.
|
||||
pub fn new(
|
||||
recovered_block: Arc<RecoveredBlock<N::Block>>,
|
||||
execution_output: Arc<ExecutionOutcome<N::Receipt>>,
|
||||
trie_data: ComputedTrieData,
|
||||
) -> Self {
|
||||
Self { recovered_block, execution_output, trie_data: DeferredTrieData::ready(trie_data) }
|
||||
}
|
||||
|
||||
/// Create a new [`ExecutedBlock`] with deferred trie data.
|
||||
///
|
||||
/// This is useful if the trie data is populated somewhere else, e.g. asynchronously
|
||||
/// after the block was validated.
|
||||
///
|
||||
/// The [`DeferredTrieData`] handle allows expensive trie operations (sorting hashed state,
|
||||
/// sorting trie updates, and building the accumulated trie input overlay) to be performed
|
||||
/// outside the critical validation path. This can improve latency for time-sensitive
|
||||
/// operations like block validation.
|
||||
///
|
||||
/// If the data hasn't been populated when [`Self::trie_data()`] is called, computation
|
||||
/// occurs synchronously from stored inputs, so there is no blocking or deadlock risk.
|
||||
///
|
||||
/// Use [`Self::new()`] instead when trie data is already computed and available immediately.
|
||||
pub const fn with_deferred_trie_data(
|
||||
recovered_block: Arc<RecoveredBlock<N::Block>>,
|
||||
execution_output: Arc<ExecutionOutcome<N::Receipt>>,
|
||||
trie_data: DeferredTrieData,
|
||||
) -> Self {
|
||||
Self { recovered_block, execution_output, trie_data }
|
||||
}
|
||||
|
||||
/// Returns a reference to an inner [`SealedBlock`]
|
||||
#[inline]
|
||||
pub fn sealed_block(&self) -> &SealedBlock<N::Block> {
|
||||
@@ -761,16 +824,55 @@ impl<N: NodePrimitives> ExecutedBlock<N> {
|
||||
&self.execution_output
|
||||
}
|
||||
|
||||
/// Returns a reference to the hashed state result of the execution outcome
|
||||
/// Returns the trie data, computing it synchronously if not already cached.
|
||||
///
|
||||
/// Uses `OnceLock::get_or_init` internally:
|
||||
/// - If already computed: returns cached result immediately
|
||||
/// - If not computed: first caller computes, others wait for that result
|
||||
#[inline]
|
||||
pub fn hashed_state(&self) -> &HashedPostState {
|
||||
&self.hashed_state
|
||||
#[tracing::instrument(level = "debug", target = "engine::tree", name = "trie_data", skip_all)]
|
||||
pub fn trie_data(&self) -> ComputedTrieData {
|
||||
self.trie_data.wait_cloned()
|
||||
}
|
||||
|
||||
/// Returns a reference to the trie updates resulting from the execution outcome
|
||||
/// Returns a clone of the deferred trie data handle.
|
||||
///
|
||||
/// A handle is a lightweight reference that can be passed to descendants without
|
||||
/// forcing trie data to be computed immediately. The actual work runs when
|
||||
/// `wait_cloned()` is called by a consumer (e.g. when merging overlays).
|
||||
#[inline]
|
||||
pub fn trie_updates(&self) -> &TrieUpdates {
|
||||
&self.trie_updates
|
||||
pub fn trie_data_handle(&self) -> DeferredTrieData {
|
||||
self.trie_data.clone()
|
||||
}
|
||||
|
||||
/// Returns the hashed state result of the execution outcome.
|
||||
///
|
||||
/// May compute trie data synchronously if the deferred task hasn't completed.
|
||||
#[inline]
|
||||
pub fn hashed_state(&self) -> Arc<HashedPostStateSorted> {
|
||||
self.trie_data().hashed_state
|
||||
}
|
||||
|
||||
/// Returns the trie updates resulting from the execution outcome.
|
||||
///
|
||||
/// May compute trie data synchronously if the deferred task hasn't completed.
|
||||
#[inline]
|
||||
pub fn trie_updates(&self) -> Arc<TrieUpdatesSorted> {
|
||||
self.trie_data().trie_updates
|
||||
}
|
||||
|
||||
/// Returns the trie input anchored to the persisted ancestor.
|
||||
///
|
||||
/// May compute trie data synchronously if the deferred task hasn't completed.
|
||||
#[inline]
|
||||
pub fn trie_input(&self) -> Option<Arc<TrieInputSorted>> {
|
||||
self.trie_data().trie_input().cloned()
|
||||
}
|
||||
|
||||
/// Returns the anchor hash of the trie input, if present.
|
||||
#[inline]
|
||||
pub fn anchor_hash(&self) -> Option<B256> {
|
||||
self.trie_data().anchor_hash()
|
||||
}
|
||||
|
||||
/// Returns a [`BlockNumber`] of the block.
|
||||
@@ -875,8 +977,8 @@ mod tests {
|
||||
StateProofProvider, StateProvider, StateRootProvider, StorageRootProvider,
|
||||
};
|
||||
use reth_trie::{
|
||||
AccountProof, HashedStorage, MultiProof, MultiProofTargets, StorageMultiProof,
|
||||
StorageProof, TrieInput,
|
||||
updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof,
|
||||
MultiProofTargets, StorageMultiProof, StorageProof, TrieInput,
|
||||
};
|
||||
|
||||
fn create_mock_state(
|
||||
@@ -1348,18 +1450,18 @@ mod tests {
|
||||
let mut test_block_builder: TestBlockBuilder = TestBlockBuilder::default();
|
||||
let chain = create_mock_state_chain(&mut test_block_builder, 4);
|
||||
|
||||
let parents = chain[3].parent_state_chain();
|
||||
let parents: Vec<_> = chain[3].parent_state_chain().collect();
|
||||
assert_eq!(parents.len(), 3);
|
||||
assert_eq!(parents[0].block().recovered_block().number, 3);
|
||||
assert_eq!(parents[1].block().recovered_block().number, 2);
|
||||
assert_eq!(parents[2].block().recovered_block().number, 1);
|
||||
|
||||
let parents = chain[2].parent_state_chain();
|
||||
let parents: Vec<_> = chain[2].parent_state_chain().collect();
|
||||
assert_eq!(parents.len(), 2);
|
||||
assert_eq!(parents[0].block().recovered_block().number, 2);
|
||||
assert_eq!(parents[1].block().recovered_block().number, 1);
|
||||
|
||||
let parents = chain[0].parent_state_chain();
|
||||
let parents: Vec<_> = chain[0].parent_state_chain().collect();
|
||||
assert_eq!(parents.len(), 0);
|
||||
}
|
||||
|
||||
@@ -1371,7 +1473,7 @@ mod tests {
|
||||
create_mock_state(&mut test_block_builder, single_block_number, B256::random());
|
||||
let single_block_hash = single_block.block().recovered_block().hash();
|
||||
|
||||
let parents = single_block.parent_state_chain();
|
||||
let parents: Vec<_> = single_block.parent_state_chain().collect();
|
||||
assert_eq!(parents.len(), 0);
|
||||
|
||||
let block_state_chain = single_block.chain().collect::<Vec<_>>();
|
||||
|
||||
@@ -11,6 +11,9 @@
|
||||
mod in_memory;
|
||||
pub use in_memory::*;
|
||||
|
||||
mod deferred_trie;
|
||||
pub use deferred_trie::*;
|
||||
|
||||
mod noop;
|
||||
|
||||
mod chain_info;
|
||||
|
||||
@@ -5,14 +5,14 @@ use reth_errors::ProviderResult;
|
||||
use reth_primitives_traits::{Account, Bytecode, NodePrimitives};
|
||||
use reth_storage_api::{
|
||||
AccountReader, BlockHashReader, BytecodeReader, HashedPostStateProvider, StateProofProvider,
|
||||
StateProvider, StateRootProvider, StorageRootProvider,
|
||||
StateProvider, StateProviderBox, StateRootProvider, StorageRootProvider,
|
||||
};
|
||||
use reth_trie::{
|
||||
updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof,
|
||||
MultiProofTargets, StorageMultiProof, TrieInput,
|
||||
};
|
||||
use revm_database::BundleState;
|
||||
use std::sync::OnceLock;
|
||||
use std::{borrow::Cow, sync::OnceLock};
|
||||
|
||||
/// A state provider that stores references to in-memory blocks along with their state as well as a
|
||||
/// reference of the historical state provider for fallback lookups.
|
||||
@@ -24,15 +24,11 @@ pub struct MemoryOverlayStateProviderRef<
|
||||
/// Historical state provider for state lookups that are not found in memory blocks.
|
||||
pub(crate) historical: Box<dyn StateProvider + 'a>,
|
||||
/// The collection of executed parent blocks. Expected order is newest to oldest.
|
||||
pub(crate) in_memory: Vec<ExecutedBlock<N>>,
|
||||
pub(crate) in_memory: Cow<'a, [ExecutedBlock<N>]>,
|
||||
/// Lazy-loaded in-memory trie data.
|
||||
pub(crate) trie_input: OnceLock<TrieInput>,
|
||||
}
|
||||
|
||||
/// A state provider that stores references to in-memory blocks along with their state as well as
|
||||
/// the historical state provider for fallback lookups.
|
||||
pub type MemoryOverlayStateProvider<N> = MemoryOverlayStateProviderRef<'static, N>;
|
||||
|
||||
impl<'a, N: NodePrimitives> MemoryOverlayStateProviderRef<'a, N> {
|
||||
/// Create new memory overlay state provider.
|
||||
///
|
||||
@@ -42,7 +38,7 @@ impl<'a, N: NodePrimitives> MemoryOverlayStateProviderRef<'a, N> {
|
||||
/// - `historical` - a historical state provider for the latest ancestor block stored in the
|
||||
/// database.
|
||||
pub fn new(historical: Box<dyn StateProvider + 'a>, in_memory: Vec<ExecutedBlock<N>>) -> Self {
|
||||
Self { historical, in_memory, trie_input: OnceLock::new() }
|
||||
Self { historical, in_memory: Cow::Owned(in_memory), trie_input: OnceLock::new() }
|
||||
}
|
||||
|
||||
/// Turn this state provider into a state provider
|
||||
@@ -53,11 +49,10 @@ impl<'a, N: NodePrimitives> MemoryOverlayStateProviderRef<'a, N> {
|
||||
/// Return lazy-loaded trie state aggregated from in-memory blocks.
|
||||
fn trie_input(&self) -> &TrieInput {
|
||||
self.trie_input.get_or_init(|| {
|
||||
TrieInput::from_blocks(
|
||||
self.in_memory
|
||||
.iter()
|
||||
.rev()
|
||||
.map(|block| (block.hashed_state.as_ref(), block.trie_updates.as_ref())),
|
||||
let bundles: Vec<_> =
|
||||
self.in_memory.iter().rev().map(|block| block.trie_data()).collect();
|
||||
TrieInput::from_blocks_sorted(
|
||||
bundles.iter().map(|data| (data.hashed_state.as_ref(), data.trie_updates.as_ref())),
|
||||
)
|
||||
})
|
||||
}
|
||||
@@ -72,7 +67,7 @@ impl<'a, N: NodePrimitives> MemoryOverlayStateProviderRef<'a, N> {
|
||||
|
||||
impl<N: NodePrimitives> BlockHashReader for MemoryOverlayStateProviderRef<'_, N> {
|
||||
fn block_hash(&self, number: BlockNumber) -> ProviderResult<Option<B256>> {
|
||||
for block in &self.in_memory {
|
||||
for block in self.in_memory.iter() {
|
||||
if block.recovered_block().number() == number {
|
||||
return Ok(Some(block.recovered_block().hash()));
|
||||
}
|
||||
@@ -91,7 +86,7 @@ impl<N: NodePrimitives> BlockHashReader for MemoryOverlayStateProviderRef<'_, N>
|
||||
let mut in_memory_hashes = Vec::with_capacity(range.size_hint().0);
|
||||
|
||||
// iterate in ascending order (oldest to newest = low to high)
|
||||
for block in &self.in_memory {
|
||||
for block in self.in_memory.iter() {
|
||||
let block_num = block.recovered_block().number();
|
||||
if range.contains(&block_num) {
|
||||
in_memory_hashes.push(block.recovered_block().hash());
|
||||
@@ -113,7 +108,7 @@ impl<N: NodePrimitives> BlockHashReader for MemoryOverlayStateProviderRef<'_, N>
|
||||
|
||||
impl<N: NodePrimitives> AccountReader for MemoryOverlayStateProviderRef<'_, N> {
|
||||
fn basic_account(&self, address: &Address) -> ProviderResult<Option<Account>> {
|
||||
for block in &self.in_memory {
|
||||
for block in self.in_memory.iter() {
|
||||
if let Some(account) = block.execution_output.account(address) {
|
||||
return Ok(account);
|
||||
}
|
||||
@@ -217,7 +212,7 @@ impl<N: NodePrimitives> StateProvider for MemoryOverlayStateProviderRef<'_, N> {
|
||||
address: Address,
|
||||
storage_key: StorageKey,
|
||||
) -> ProviderResult<Option<StorageValue>> {
|
||||
for block in &self.in_memory {
|
||||
for block in self.in_memory.iter() {
|
||||
if let Some(value) = block.execution_output.storage(&address, storage_key.into()) {
|
||||
return Ok(Some(value));
|
||||
}
|
||||
@@ -229,7 +224,7 @@ impl<N: NodePrimitives> StateProvider for MemoryOverlayStateProviderRef<'_, N> {
|
||||
|
||||
impl<N: NodePrimitives> BytecodeReader for MemoryOverlayStateProviderRef<'_, N> {
|
||||
fn bytecode_by_hash(&self, code_hash: &B256) -> ProviderResult<Option<Bytecode>> {
|
||||
for block in &self.in_memory {
|
||||
for block in self.in_memory.iter() {
|
||||
if let Some(contract) = block.execution_output.bytecode(code_hash) {
|
||||
return Ok(Some(contract));
|
||||
}
|
||||
@@ -238,3 +233,46 @@ impl<N: NodePrimitives> BytecodeReader for MemoryOverlayStateProviderRef<'_, N>
|
||||
self.historical.bytecode_by_hash(code_hash)
|
||||
}
|
||||
}
|
||||
|
||||
/// An owned state provider that stores references to in-memory blocks along with their state as
|
||||
/// well as a reference of the historical state provider for fallback lookups.
|
||||
#[expect(missing_debug_implementations)]
|
||||
pub struct MemoryOverlayStateProvider<N: NodePrimitives = reth_ethereum_primitives::EthPrimitives> {
|
||||
/// Historical state provider for state lookups that are not found in memory blocks.
|
||||
pub(crate) historical: StateProviderBox,
|
||||
/// The collection of executed parent blocks. Expected order is newest to oldest.
|
||||
pub(crate) in_memory: Vec<ExecutedBlock<N>>,
|
||||
/// Lazy-loaded in-memory trie data.
|
||||
pub(crate) trie_input: OnceLock<TrieInput>,
|
||||
}
|
||||
|
||||
impl<N: NodePrimitives> MemoryOverlayStateProvider<N> {
|
||||
/// Create new memory overlay state provider.
|
||||
///
|
||||
/// ## Arguments
|
||||
///
|
||||
/// - `in_memory` - the collection of executed ancestor blocks in reverse.
|
||||
/// - `historical` - a historical state provider for the latest ancestor block stored in the
|
||||
/// database.
|
||||
pub fn new(historical: StateProviderBox, in_memory: Vec<ExecutedBlock<N>>) -> Self {
|
||||
Self { historical, in_memory, trie_input: OnceLock::new() }
|
||||
}
|
||||
|
||||
/// Returns a new provider that takes the `TX` as reference
|
||||
#[inline(always)]
|
||||
fn as_ref(&self) -> MemoryOverlayStateProviderRef<'_, N> {
|
||||
MemoryOverlayStateProviderRef {
|
||||
historical: Box::new(self.historical.as_ref()),
|
||||
in_memory: Cow::Borrowed(&self.in_memory),
|
||||
trie_input: self.trie_input.clone(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Wraps the [`Self`] in a `Box`.
|
||||
pub fn boxed(self) -> StateProviderBox {
|
||||
Box::new(self)
|
||||
}
|
||||
}
|
||||
|
||||
// Delegates all provider impls to [`MemoryOverlayStateProviderRef`]
|
||||
reth_storage_api::macros::delegate_provider_impls!(MemoryOverlayStateProvider<N> where [N: NodePrimitives]);
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use crate::{
|
||||
in_memory::ExecutedBlock, CanonStateNotification, CanonStateNotifications,
|
||||
CanonStateSubscriptions,
|
||||
CanonStateSubscriptions, ComputedTrieData,
|
||||
};
|
||||
use alloy_consensus::{Header, SignableTransaction, TxEip1559, TxReceipt, EMPTY_ROOT_HASH};
|
||||
use alloy_eips::{
|
||||
@@ -23,7 +23,7 @@ use reth_primitives_traits::{
|
||||
SignedTransaction,
|
||||
};
|
||||
use reth_storage_api::NodePrimitivesProvider;
|
||||
use reth_trie::{root::state_root_unhashed, updates::TrieUpdates, HashedPostState};
|
||||
use reth_trie::root::state_root_unhashed;
|
||||
use revm_database::BundleState;
|
||||
use revm_state::AccountInfo;
|
||||
use std::{
|
||||
@@ -92,7 +92,7 @@ impl<N: NodePrimitives> TestBlockBuilder<N> {
|
||||
&mut self,
|
||||
number: BlockNumber,
|
||||
parent_hash: B256,
|
||||
) -> RecoveredBlock<reth_ethereum_primitives::Block> {
|
||||
) -> SealedBlock<reth_ethereum_primitives::Block> {
|
||||
let mut rng = rand::rng();
|
||||
|
||||
let mock_tx = |nonce: u64| -> Recovered<_> {
|
||||
@@ -167,17 +167,14 @@ impl<N: NodePrimitives> TestBlockBuilder<N> {
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let block = SealedBlock::from_sealed_parts(
|
||||
SealedBlock::from_sealed_parts(
|
||||
SealedHeader::seal_slow(header),
|
||||
BlockBody {
|
||||
transactions: transactions.into_iter().map(|tx| tx.into_inner()).collect(),
|
||||
ommers: Vec::new(),
|
||||
withdrawals: Some(vec![].into()),
|
||||
},
|
||||
);
|
||||
|
||||
RecoveredBlock::try_recover_sealed_with_senders(block, vec![self.signer; num_txs as usize])
|
||||
.unwrap()
|
||||
)
|
||||
}
|
||||
|
||||
/// Creates a fork chain with the given base block.
|
||||
@@ -191,7 +188,9 @@ impl<N: NodePrimitives> TestBlockBuilder<N> {
|
||||
|
||||
for _ in 0..length {
|
||||
let block = self.generate_random_block(parent.number + 1, parent.hash());
|
||||
parent = block.clone_sealed_block();
|
||||
parent = block.clone();
|
||||
let senders = vec![self.signer; block.body().transactions.len()];
|
||||
let block = block.with_senders(senders);
|
||||
fork.push(block);
|
||||
}
|
||||
|
||||
@@ -205,20 +204,19 @@ impl<N: NodePrimitives> TestBlockBuilder<N> {
|
||||
receipts: Vec<Vec<Receipt>>,
|
||||
parent_hash: B256,
|
||||
) -> ExecutedBlock {
|
||||
let block_with_senders = self.generate_random_block(block_number, parent_hash);
|
||||
|
||||
let (block, senders) = block_with_senders.split_sealed();
|
||||
ExecutedBlock {
|
||||
recovered_block: Arc::new(RecoveredBlock::new_sealed(block, senders)),
|
||||
execution_output: Arc::new(ExecutionOutcome::new(
|
||||
let block = self.generate_random_block(block_number, parent_hash);
|
||||
let senders = vec![self.signer; block.body().transactions.len()];
|
||||
let trie_data = ComputedTrieData::default();
|
||||
ExecutedBlock::new(
|
||||
Arc::new(RecoveredBlock::new_sealed(block, senders)),
|
||||
Arc::new(ExecutionOutcome::new(
|
||||
BundleState::default(),
|
||||
receipts,
|
||||
block_number,
|
||||
vec![Requests::default()],
|
||||
)),
|
||||
hashed_state: Arc::new(HashedPostState::default()),
|
||||
trie_updates: Arc::new(TrieUpdates::default()),
|
||||
}
|
||||
trie_data,
|
||||
)
|
||||
}
|
||||
|
||||
/// Generates an [`ExecutedBlock`] that includes the given receipts.
|
||||
|
||||
@@ -30,8 +30,9 @@ pub use info::ChainInfo;
|
||||
#[cfg(any(test, feature = "test-utils"))]
|
||||
pub use spec::test_fork_ids;
|
||||
pub use spec::{
|
||||
make_genesis_header, BaseFeeParams, BaseFeeParamsKind, ChainSpec, ChainSpecBuilder,
|
||||
ChainSpecProvider, DepositContract, ForkBaseFeeParams, DEV, HOLESKY, HOODI, MAINNET, SEPOLIA,
|
||||
blob_params_to_schedule, create_chain_config, mainnet_chain_config, make_genesis_header,
|
||||
BaseFeeParams, BaseFeeParamsKind, ChainSpec, ChainSpecBuilder, ChainSpecProvider,
|
||||
DepositContract, ForkBaseFeeParams, DEV, HOLESKY, HOODI, MAINNET, SEPOLIA,
|
||||
};
|
||||
|
||||
use reth_primitives_traits::sync::OnceLock;
|
||||
|
||||
@@ -4,13 +4,20 @@ use alloy_evm::eth::spec::EthExecutorSpec;
|
||||
use crate::{
|
||||
constants::{MAINNET_DEPOSIT_CONTRACT, MAINNET_PRUNE_DELETE_LIMIT},
|
||||
ethereum::SEPOLIA_PARIS_TTD,
|
||||
holesky, hoodi,
|
||||
holesky, hoodi, mainnet,
|
||||
mainnet::{MAINNET_PARIS_BLOCK, MAINNET_PARIS_TTD},
|
||||
sepolia,
|
||||
sepolia::SEPOLIA_PARIS_BLOCK,
|
||||
EthChainSpec,
|
||||
};
|
||||
use alloc::{boxed::Box, sync::Arc, vec::Vec};
|
||||
use alloc::{
|
||||
boxed::Box,
|
||||
collections::BTreeMap,
|
||||
format,
|
||||
string::{String, ToString},
|
||||
sync::Arc,
|
||||
vec::Vec,
|
||||
};
|
||||
use alloy_chains::{Chain, NamedChain};
|
||||
use alloy_consensus::{
|
||||
constants::{
|
||||
@@ -23,7 +30,7 @@ use alloy_eips::{
|
||||
eip1559::INITIAL_BASE_FEE, eip7685::EMPTY_REQUESTS_HASH, eip7840::BlobParams,
|
||||
eip7892::BlobScheduleBlobParams,
|
||||
};
|
||||
use alloy_genesis::Genesis;
|
||||
use alloy_genesis::{ChainConfig, Genesis};
|
||||
use alloy_primitives::{address, b256, Address, BlockNumber, B256, U256};
|
||||
use alloy_trie::root::state_root_ref_unhashed;
|
||||
use core::fmt::Debug;
|
||||
@@ -73,6 +80,8 @@ pub fn make_genesis_header(genesis: &Genesis, hardforks: &ChainHardforks) -> Hea
|
||||
.then_some(EMPTY_REQUESTS_HASH);
|
||||
|
||||
Header {
|
||||
number: genesis.number.unwrap_or_default(),
|
||||
parent_hash: genesis.parent_hash.unwrap_or_default(),
|
||||
gas_limit: genesis.gas_limit,
|
||||
difficulty: genesis.difficulty,
|
||||
nonce: genesis.nonce.into(),
|
||||
@@ -113,7 +122,10 @@ pub static MAINNET: LazyLock<Arc<ChainSpec>> = LazyLock::new(|| {
|
||||
deposit_contract: Some(MAINNET_DEPOSIT_CONTRACT),
|
||||
base_fee_params: BaseFeeParamsKind::Constant(BaseFeeParams::ethereum()),
|
||||
prune_delete_limit: MAINNET_PRUNE_DELETE_LIMIT,
|
||||
blob_params: BlobScheduleBlobParams::default(),
|
||||
blob_params: BlobScheduleBlobParams::default().with_scheduled([
|
||||
(mainnet::MAINNET_BPO1_TIMESTAMP, BlobParams::bpo1()),
|
||||
(mainnet::MAINNET_BPO2_TIMESTAMP, BlobParams::bpo2()),
|
||||
]),
|
||||
};
|
||||
spec.genesis.config.dao_fork_support = true;
|
||||
spec.into()
|
||||
@@ -237,6 +249,111 @@ pub static DEV: LazyLock<Arc<ChainSpec>> = LazyLock::new(|| {
|
||||
.into()
|
||||
});
|
||||
|
||||
/// Creates a [`ChainConfig`] from the given chain, hardforks, deposit contract address, and blob
|
||||
/// schedule.
|
||||
pub fn create_chain_config(
|
||||
chain: Option<Chain>,
|
||||
hardforks: &ChainHardforks,
|
||||
deposit_contract_address: Option<Address>,
|
||||
blob_schedule: BTreeMap<String, BlobParams>,
|
||||
) -> ChainConfig {
|
||||
// Helper to extract block number from a hardfork condition
|
||||
let block_num = |fork: EthereumHardfork| hardforks.fork(fork).block_number();
|
||||
|
||||
// Helper to extract timestamp from a hardfork condition
|
||||
let timestamp = |fork: EthereumHardfork| -> Option<u64> {
|
||||
match hardforks.fork(fork) {
|
||||
ForkCondition::Timestamp(t) => Some(t),
|
||||
_ => None,
|
||||
}
|
||||
};
|
||||
|
||||
// Extract TTD from Paris fork
|
||||
let (terminal_total_difficulty, terminal_total_difficulty_passed) =
|
||||
match hardforks.fork(EthereumHardfork::Paris) {
|
||||
ForkCondition::TTD { total_difficulty, .. } => (Some(total_difficulty), true),
|
||||
_ => (None, false),
|
||||
};
|
||||
|
||||
// Check if DAO fork is supported (it has an activation block)
|
||||
let dao_fork_support = hardforks.fork(EthereumHardfork::Dao) != ForkCondition::Never;
|
||||
|
||||
ChainConfig {
|
||||
chain_id: chain.map(|c| c.id()).unwrap_or(0),
|
||||
homestead_block: block_num(EthereumHardfork::Homestead),
|
||||
dao_fork_block: block_num(EthereumHardfork::Dao),
|
||||
dao_fork_support,
|
||||
eip150_block: block_num(EthereumHardfork::Tangerine),
|
||||
eip155_block: block_num(EthereumHardfork::SpuriousDragon),
|
||||
eip158_block: block_num(EthereumHardfork::SpuriousDragon),
|
||||
byzantium_block: block_num(EthereumHardfork::Byzantium),
|
||||
constantinople_block: block_num(EthereumHardfork::Constantinople),
|
||||
petersburg_block: block_num(EthereumHardfork::Petersburg),
|
||||
istanbul_block: block_num(EthereumHardfork::Istanbul),
|
||||
muir_glacier_block: block_num(EthereumHardfork::MuirGlacier),
|
||||
berlin_block: block_num(EthereumHardfork::Berlin),
|
||||
london_block: block_num(EthereumHardfork::London),
|
||||
arrow_glacier_block: block_num(EthereumHardfork::ArrowGlacier),
|
||||
gray_glacier_block: block_num(EthereumHardfork::GrayGlacier),
|
||||
merge_netsplit_block: None,
|
||||
shanghai_time: timestamp(EthereumHardfork::Shanghai),
|
||||
cancun_time: timestamp(EthereumHardfork::Cancun),
|
||||
prague_time: timestamp(EthereumHardfork::Prague),
|
||||
osaka_time: timestamp(EthereumHardfork::Osaka),
|
||||
bpo1_time: timestamp(EthereumHardfork::Bpo1),
|
||||
bpo2_time: timestamp(EthereumHardfork::Bpo2),
|
||||
bpo3_time: timestamp(EthereumHardfork::Bpo3),
|
||||
bpo4_time: timestamp(EthereumHardfork::Bpo4),
|
||||
bpo5_time: timestamp(EthereumHardfork::Bpo5),
|
||||
terminal_total_difficulty,
|
||||
terminal_total_difficulty_passed,
|
||||
ethash: None,
|
||||
clique: None,
|
||||
parlia: None,
|
||||
extra_fields: Default::default(),
|
||||
deposit_contract_address,
|
||||
blob_schedule,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a [`ChainConfig`] for the current Ethereum mainnet chain.
|
||||
pub fn mainnet_chain_config() -> ChainConfig {
|
||||
let hardforks: ChainHardforks = EthereumHardfork::mainnet().into();
|
||||
let blob_schedule = blob_params_to_schedule(&MAINNET.blob_params, &hardforks);
|
||||
create_chain_config(
|
||||
Some(Chain::mainnet()),
|
||||
&hardforks,
|
||||
Some(MAINNET_DEPOSIT_CONTRACT.address),
|
||||
blob_schedule,
|
||||
)
|
||||
}
|
||||
|
||||
/// Converts the given [`BlobScheduleBlobParams`] into blobs schedule.
|
||||
pub fn blob_params_to_schedule(
|
||||
params: &BlobScheduleBlobParams,
|
||||
hardforks: &ChainHardforks,
|
||||
) -> BTreeMap<String, BlobParams> {
|
||||
let mut schedule = BTreeMap::new();
|
||||
schedule.insert("cancun".to_string(), params.cancun);
|
||||
schedule.insert("prague".to_string(), params.prague);
|
||||
schedule.insert("osaka".to_string(), params.osaka);
|
||||
|
||||
// Map scheduled entries back to bpo fork names by matching timestamps
|
||||
let bpo_forks = EthereumHardfork::bpo_variants();
|
||||
for (timestamp, blob_params) in ¶ms.scheduled {
|
||||
for bpo_fork in bpo_forks {
|
||||
if let ForkCondition::Timestamp(fork_ts) = hardforks.fork(bpo_fork) &&
|
||||
fork_ts == *timestamp
|
||||
{
|
||||
schedule.insert(bpo_fork.name().to_lowercase(), *blob_params);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
schedule
|
||||
}
|
||||
|
||||
/// A wrapper around [`BaseFeeParams`] that allows for specifying constant or dynamic EIP-1559
|
||||
/// parameters based on the active [Hardfork].
|
||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||
@@ -440,7 +557,26 @@ impl<H: BlockHeader> ChainSpec<H> {
|
||||
|
||||
/// Returns the hardfork display helper.
|
||||
pub fn display_hardforks(&self) -> DisplayHardforks {
|
||||
DisplayHardforks::new(self.hardforks.forks_iter())
|
||||
// Create an iterator with hardfork, condition, and optional blob metadata
|
||||
let hardforks_with_meta = self.hardforks.forks_iter().map(|(fork, condition)| {
|
||||
// Generate blob metadata for timestamp-based hardforks that have blob params
|
||||
let metadata = match condition {
|
||||
ForkCondition::Timestamp(timestamp) => {
|
||||
// Try to get blob params for this timestamp
|
||||
// This automatically handles all hardforks with blob support
|
||||
EthChainSpec::blob_params_at_timestamp(self, timestamp).map(|params| {
|
||||
format!(
|
||||
"blob: (target: {}, max: {}, fraction: {})",
|
||||
params.target_blob_count, params.max_blob_count, params.update_fraction
|
||||
)
|
||||
})
|
||||
}
|
||||
_ => None,
|
||||
};
|
||||
(fork, condition, metadata)
|
||||
});
|
||||
|
||||
DisplayHardforks::with_meta(hardforks_with_meta)
|
||||
}
|
||||
|
||||
/// Get the fork id for the given hardfork.
|
||||
@@ -492,8 +628,15 @@ impl<H: BlockHeader> ChainSpec<H> {
|
||||
|
||||
/// Compute the [`ForkId`] for the given [`Head`] following eip-6122 spec.
|
||||
///
|
||||
/// Note: In case there are multiple hardforks activated at the same block or timestamp, only
|
||||
/// the first gets applied.
|
||||
/// The fork hash is computed by starting from the genesis hash and iteratively adding
|
||||
/// block numbers (for block-based forks) or timestamps (for timestamp-based forks) of
|
||||
/// active forks. The `next` field indicates the next fork activation point, or `0` if
|
||||
/// all forks are active.
|
||||
///
|
||||
/// Block-based forks are processed first, then timestamp-based forks. Multiple hardforks
|
||||
/// activated at the same block or timestamp: only the first one is applied.
|
||||
///
|
||||
/// See: <https://eips.ethereum.org/EIPS/eip-6122>
|
||||
pub fn fork_id(&self, head: &Head) -> ForkId {
|
||||
let mut forkhash = ForkHash::from(self.genesis_hash());
|
||||
|
||||
@@ -550,6 +693,10 @@ impl<H: BlockHeader> ChainSpec<H> {
|
||||
}
|
||||
|
||||
/// An internal helper function that returns a head block that satisfies a given Fork condition.
|
||||
///
|
||||
/// Creates a [`Head`] representation for a fork activation point, used by [`Self::fork_id`] to
|
||||
/// compute fork IDs. For timestamp-based forks, includes the last block-based fork number
|
||||
/// before the merge (if any).
|
||||
pub(crate) fn satisfy(&self, cond: ForkCondition) -> Head {
|
||||
match cond {
|
||||
ForkCondition::Block(number) => Head { number, ..Default::default() },
|
||||
@@ -823,7 +970,7 @@ impl<H: BlockHeader> EthereumHardforks for ChainSpec<H> {
|
||||
|
||||
/// A trait for reading the current chainspec.
|
||||
#[auto_impl::auto_impl(&, Arc)]
|
||||
pub trait ChainSpecProvider: Debug + Send + Sync {
|
||||
pub trait ChainSpecProvider: Debug + Send {
|
||||
/// The chain spec type.
|
||||
type ChainSpec: EthChainSpec + 'static;
|
||||
|
||||
@@ -883,7 +1030,7 @@ impl ChainSpecBuilder {
|
||||
|
||||
/// Remove the given fork from the spec.
|
||||
pub fn without_fork<H: Hardfork>(mut self, fork: H) -> Self {
|
||||
self.hardforks.remove(fork);
|
||||
self.hardforks.remove(&fork);
|
||||
self
|
||||
}
|
||||
|
||||
@@ -903,9 +1050,16 @@ impl ChainSpecBuilder {
|
||||
self
|
||||
}
|
||||
|
||||
/// Enable Dao at genesis.
|
||||
pub fn dao_activated(mut self) -> Self {
|
||||
self = self.frontier_activated();
|
||||
self.hardforks.insert(EthereumHardfork::Dao, ForkCondition::Block(0));
|
||||
self
|
||||
}
|
||||
|
||||
/// Enable Homestead at genesis.
|
||||
pub fn homestead_activated(mut self) -> Self {
|
||||
self = self.frontier_activated();
|
||||
self = self.dao_activated();
|
||||
self.hardforks.insert(EthereumHardfork::Homestead, ForkCondition::Block(0));
|
||||
self
|
||||
}
|
||||
@@ -952,9 +1106,16 @@ impl ChainSpecBuilder {
|
||||
self
|
||||
}
|
||||
|
||||
/// Enable Muir Glacier at genesis.
|
||||
pub fn muirglacier_activated(mut self) -> Self {
|
||||
self = self.istanbul_activated();
|
||||
self.hardforks.insert(EthereumHardfork::MuirGlacier, ForkCondition::Block(0));
|
||||
self
|
||||
}
|
||||
|
||||
/// Enable Berlin at genesis.
|
||||
pub fn berlin_activated(mut self) -> Self {
|
||||
self = self.istanbul_activated();
|
||||
self = self.muirglacier_activated();
|
||||
self.hardforks.insert(EthereumHardfork::Berlin, ForkCondition::Block(0));
|
||||
self
|
||||
}
|
||||
@@ -966,9 +1127,23 @@ impl ChainSpecBuilder {
|
||||
self
|
||||
}
|
||||
|
||||
/// Enable Arrow Glacier at genesis.
|
||||
pub fn arrowglacier_activated(mut self) -> Self {
|
||||
self = self.london_activated();
|
||||
self.hardforks.insert(EthereumHardfork::ArrowGlacier, ForkCondition::Block(0));
|
||||
self
|
||||
}
|
||||
|
||||
/// Enable Gray Glacier at genesis.
|
||||
pub fn grayglacier_activated(mut self) -> Self {
|
||||
self = self.arrowglacier_activated();
|
||||
self.hardforks.insert(EthereumHardfork::GrayGlacier, ForkCondition::Block(0));
|
||||
self
|
||||
}
|
||||
|
||||
/// Enable Paris at genesis.
|
||||
pub fn paris_activated(mut self) -> Self {
|
||||
self = self.london_activated();
|
||||
self = self.grayglacier_activated();
|
||||
self.hardforks.insert(
|
||||
EthereumHardfork::Paris,
|
||||
ForkCondition::TTD {
|
||||
@@ -1157,8 +1332,11 @@ Merge hard forks:
|
||||
- Paris @58750000000000000000000 (network is known to be merged)
|
||||
Post-merge hard forks (timestamp based):
|
||||
- Shanghai @1681338455
|
||||
- Cancun @1710338135
|
||||
- Prague @1746612311"
|
||||
- Cancun @1710338135 blob: (target: 3, max: 6, fraction: 3338477)
|
||||
- Prague @1746612311 blob: (target: 6, max: 9, fraction: 5007716)
|
||||
- Osaka @1764798551 blob: (target: 6, max: 9, fraction: 5007716)
|
||||
- Bpo1 @1765290071 blob: (target: 10, max: 15, fraction: 8346193)
|
||||
- Bpo2 @1767747671 blob: (target: 14, max: 21, fraction: 11684671)"
|
||||
);
|
||||
}
|
||||
|
||||
@@ -1338,71 +1516,74 @@ Post-merge hard forks (timestamp based):
|
||||
&[
|
||||
(
|
||||
EthereumHardfork::Frontier,
|
||||
ForkId { hash: ForkHash([0xfc, 0x64, 0xec, 0x04]), next: 1150000 },
|
||||
ForkId { hash: ForkHash(hex!("0xfc64ec04")), next: 1150000 },
|
||||
),
|
||||
(
|
||||
EthereumHardfork::Homestead,
|
||||
ForkId { hash: ForkHash([0x97, 0xc2, 0xc3, 0x4c]), next: 1920000 },
|
||||
ForkId { hash: ForkHash(hex!("0x97c2c34c")), next: 1920000 },
|
||||
),
|
||||
(
|
||||
EthereumHardfork::Dao,
|
||||
ForkId { hash: ForkHash([0x91, 0xd1, 0xf9, 0x48]), next: 2463000 },
|
||||
ForkId { hash: ForkHash(hex!("0x91d1f948")), next: 2463000 },
|
||||
),
|
||||
(
|
||||
EthereumHardfork::Tangerine,
|
||||
ForkId { hash: ForkHash([0x7a, 0x64, 0xda, 0x13]), next: 2675000 },
|
||||
ForkId { hash: ForkHash(hex!("0x7a64da13")), next: 2675000 },
|
||||
),
|
||||
(
|
||||
EthereumHardfork::SpuriousDragon,
|
||||
ForkId { hash: ForkHash([0x3e, 0xdd, 0x5b, 0x10]), next: 4370000 },
|
||||
ForkId { hash: ForkHash(hex!("0x3edd5b10")), next: 4370000 },
|
||||
),
|
||||
(
|
||||
EthereumHardfork::Byzantium,
|
||||
ForkId { hash: ForkHash([0xa0, 0x0b, 0xc3, 0x24]), next: 7280000 },
|
||||
ForkId { hash: ForkHash(hex!("0xa00bc324")), next: 7280000 },
|
||||
),
|
||||
(
|
||||
EthereumHardfork::Constantinople,
|
||||
ForkId { hash: ForkHash([0x66, 0x8d, 0xb0, 0xaf]), next: 9069000 },
|
||||
ForkId { hash: ForkHash(hex!("0x668db0af")), next: 9069000 },
|
||||
),
|
||||
(
|
||||
EthereumHardfork::Petersburg,
|
||||
ForkId { hash: ForkHash([0x66, 0x8d, 0xb0, 0xaf]), next: 9069000 },
|
||||
ForkId { hash: ForkHash(hex!("0x668db0af")), next: 9069000 },
|
||||
),
|
||||
(
|
||||
EthereumHardfork::Istanbul,
|
||||
ForkId { hash: ForkHash([0x87, 0x9d, 0x6e, 0x30]), next: 9200000 },
|
||||
ForkId { hash: ForkHash(hex!("0x879d6e30")), next: 9200000 },
|
||||
),
|
||||
(
|
||||
EthereumHardfork::MuirGlacier,
|
||||
ForkId { hash: ForkHash([0xe0, 0x29, 0xe9, 0x91]), next: 12244000 },
|
||||
ForkId { hash: ForkHash(hex!("0xe029e991")), next: 12244000 },
|
||||
),
|
||||
(
|
||||
EthereumHardfork::Berlin,
|
||||
ForkId { hash: ForkHash([0x0e, 0xb4, 0x40, 0xf6]), next: 12965000 },
|
||||
ForkId { hash: ForkHash(hex!("0x0eb440f6")), next: 12965000 },
|
||||
),
|
||||
(
|
||||
EthereumHardfork::London,
|
||||
ForkId { hash: ForkHash([0xb7, 0x15, 0x07, 0x7d]), next: 13773000 },
|
||||
ForkId { hash: ForkHash(hex!("0xb715077d")), next: 13773000 },
|
||||
),
|
||||
(
|
||||
EthereumHardfork::ArrowGlacier,
|
||||
ForkId { hash: ForkHash([0x20, 0xc3, 0x27, 0xfc]), next: 15050000 },
|
||||
ForkId { hash: ForkHash(hex!("0x20c327fc")), next: 15050000 },
|
||||
),
|
||||
(
|
||||
EthereumHardfork::GrayGlacier,
|
||||
ForkId { hash: ForkHash([0xf0, 0xaf, 0xd0, 0xe3]), next: 1681338455 },
|
||||
ForkId { hash: ForkHash(hex!("0xf0afd0e3")), next: 1681338455 },
|
||||
),
|
||||
(
|
||||
EthereumHardfork::Shanghai,
|
||||
ForkId { hash: ForkHash([0xdc, 0xe9, 0x6c, 0x2d]), next: 1710338135 },
|
||||
ForkId { hash: ForkHash(hex!("0xdce96c2d")), next: 1710338135 },
|
||||
),
|
||||
(
|
||||
EthereumHardfork::Cancun,
|
||||
ForkId { hash: ForkHash([0x9f, 0x3d, 0x22, 0x54]), next: 1746612311 },
|
||||
ForkId { hash: ForkHash(hex!("0x9f3d2254")), next: 1746612311 },
|
||||
),
|
||||
(
|
||||
EthereumHardfork::Prague,
|
||||
ForkId { hash: ForkHash([0xc3, 0x76, 0xcf, 0x8b]), next: 0 },
|
||||
ForkId {
|
||||
hash: ForkHash(hex!("0xc376cf8b")),
|
||||
next: mainnet::MAINNET_OSAKA_TIMESTAMP,
|
||||
},
|
||||
),
|
||||
],
|
||||
);
|
||||
@@ -1415,60 +1596,60 @@ Post-merge hard forks (timestamp based):
|
||||
&[
|
||||
(
|
||||
EthereumHardfork::Frontier,
|
||||
ForkId { hash: ForkHash([0xfe, 0x33, 0x66, 0xe7]), next: 1735371 },
|
||||
ForkId { hash: ForkHash(hex!("0xfe3366e7")), next: 1735371 },
|
||||
),
|
||||
(
|
||||
EthereumHardfork::Homestead,
|
||||
ForkId { hash: ForkHash([0xfe, 0x33, 0x66, 0xe7]), next: 1735371 },
|
||||
ForkId { hash: ForkHash(hex!("0xfe3366e7")), next: 1735371 },
|
||||
),
|
||||
(
|
||||
EthereumHardfork::Tangerine,
|
||||
ForkId { hash: ForkHash([0xfe, 0x33, 0x66, 0xe7]), next: 1735371 },
|
||||
ForkId { hash: ForkHash(hex!("0xfe3366e7")), next: 1735371 },
|
||||
),
|
||||
(
|
||||
EthereumHardfork::SpuriousDragon,
|
||||
ForkId { hash: ForkHash([0xfe, 0x33, 0x66, 0xe7]), next: 1735371 },
|
||||
ForkId { hash: ForkHash(hex!("0xfe3366e7")), next: 1735371 },
|
||||
),
|
||||
(
|
||||
EthereumHardfork::Byzantium,
|
||||
ForkId { hash: ForkHash([0xfe, 0x33, 0x66, 0xe7]), next: 1735371 },
|
||||
ForkId { hash: ForkHash(hex!("0xfe3366e7")), next: 1735371 },
|
||||
),
|
||||
(
|
||||
EthereumHardfork::Constantinople,
|
||||
ForkId { hash: ForkHash([0xfe, 0x33, 0x66, 0xe7]), next: 1735371 },
|
||||
ForkId { hash: ForkHash(hex!("0xfe3366e7")), next: 1735371 },
|
||||
),
|
||||
(
|
||||
EthereumHardfork::Petersburg,
|
||||
ForkId { hash: ForkHash([0xfe, 0x33, 0x66, 0xe7]), next: 1735371 },
|
||||
ForkId { hash: ForkHash(hex!("0xfe3366e7")), next: 1735371 },
|
||||
),
|
||||
(
|
||||
EthereumHardfork::Istanbul,
|
||||
ForkId { hash: ForkHash([0xfe, 0x33, 0x66, 0xe7]), next: 1735371 },
|
||||
ForkId { hash: ForkHash(hex!("0xfe3366e7")), next: 1735371 },
|
||||
),
|
||||
(
|
||||
EthereumHardfork::Berlin,
|
||||
ForkId { hash: ForkHash([0xfe, 0x33, 0x66, 0xe7]), next: 1735371 },
|
||||
ForkId { hash: ForkHash(hex!("0xfe3366e7")), next: 1735371 },
|
||||
),
|
||||
(
|
||||
EthereumHardfork::London,
|
||||
ForkId { hash: ForkHash([0xfe, 0x33, 0x66, 0xe7]), next: 1735371 },
|
||||
ForkId { hash: ForkHash(hex!("0xfe3366e7")), next: 1735371 },
|
||||
),
|
||||
(
|
||||
EthereumHardfork::Paris,
|
||||
ForkId { hash: ForkHash([0xb9, 0x6c, 0xbd, 0x13]), next: 1677557088 },
|
||||
ForkId { hash: ForkHash(hex!("0xb96cbd13")), next: 1677557088 },
|
||||
),
|
||||
(
|
||||
EthereumHardfork::Shanghai,
|
||||
ForkId { hash: ForkHash([0xf7, 0xf9, 0xbc, 0x08]), next: 1706655072 },
|
||||
ForkId { hash: ForkHash(hex!("0xf7f9bc08")), next: 1706655072 },
|
||||
),
|
||||
(
|
||||
EthereumHardfork::Cancun,
|
||||
ForkId { hash: ForkHash([0x88, 0xcf, 0x81, 0xd9]), next: 1741159776 },
|
||||
ForkId { hash: ForkHash(hex!("0x88cf81d9")), next: 1741159776 },
|
||||
),
|
||||
(
|
||||
EthereumHardfork::Prague,
|
||||
ForkId {
|
||||
hash: ForkHash([0xed, 0x88, 0xb5, 0xfd]),
|
||||
hash: ForkHash(hex!("0xed88b5fd")),
|
||||
next: sepolia::SEPOLIA_OSAKA_TIMESTAMP,
|
||||
},
|
||||
),
|
||||
@@ -1483,75 +1664,85 @@ Post-merge hard forks (timestamp based):
|
||||
&[
|
||||
(
|
||||
Head { number: 0, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0xfc, 0x64, 0xec, 0x04]), next: 1150000 },
|
||||
ForkId { hash: ForkHash(hex!("0xfc64ec04")), next: 1150000 },
|
||||
),
|
||||
(
|
||||
Head { number: 1150000, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0x97, 0xc2, 0xc3, 0x4c]), next: 1920000 },
|
||||
ForkId { hash: ForkHash(hex!("0x97c2c34c")), next: 1920000 },
|
||||
),
|
||||
(
|
||||
Head { number: 1920000, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0x91, 0xd1, 0xf9, 0x48]), next: 2463000 },
|
||||
ForkId { hash: ForkHash(hex!("0x91d1f948")), next: 2463000 },
|
||||
),
|
||||
(
|
||||
Head { number: 2463000, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0x7a, 0x64, 0xda, 0x13]), next: 2675000 },
|
||||
ForkId { hash: ForkHash(hex!("0x7a64da13")), next: 2675000 },
|
||||
),
|
||||
(
|
||||
Head { number: 2675000, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0x3e, 0xdd, 0x5b, 0x10]), next: 4370000 },
|
||||
ForkId { hash: ForkHash(hex!("0x3edd5b10")), next: 4370000 },
|
||||
),
|
||||
(
|
||||
Head { number: 4370000, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0xa0, 0x0b, 0xc3, 0x24]), next: 7280000 },
|
||||
ForkId { hash: ForkHash(hex!("0xa00bc324")), next: 7280000 },
|
||||
),
|
||||
(
|
||||
Head { number: 7280000, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0x66, 0x8d, 0xb0, 0xaf]), next: 9069000 },
|
||||
ForkId { hash: ForkHash(hex!("0x668db0af")), next: 9069000 },
|
||||
),
|
||||
(
|
||||
Head { number: 9069000, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0x87, 0x9d, 0x6e, 0x30]), next: 9200000 },
|
||||
ForkId { hash: ForkHash(hex!("0x879d6e30")), next: 9200000 },
|
||||
),
|
||||
(
|
||||
Head { number: 9200000, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0xe0, 0x29, 0xe9, 0x91]), next: 12244000 },
|
||||
ForkId { hash: ForkHash(hex!("0xe029e991")), next: 12244000 },
|
||||
),
|
||||
(
|
||||
Head { number: 12244000, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0x0e, 0xb4, 0x40, 0xf6]), next: 12965000 },
|
||||
ForkId { hash: ForkHash(hex!("0x0eb440f6")), next: 12965000 },
|
||||
),
|
||||
(
|
||||
Head { number: 12965000, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0xb7, 0x15, 0x07, 0x7d]), next: 13773000 },
|
||||
ForkId { hash: ForkHash(hex!("0xb715077d")), next: 13773000 },
|
||||
),
|
||||
(
|
||||
Head { number: 13773000, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0x20, 0xc3, 0x27, 0xfc]), next: 15050000 },
|
||||
ForkId { hash: ForkHash(hex!("0x20c327fc")), next: 15050000 },
|
||||
),
|
||||
(
|
||||
Head { number: 15050000, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0xf0, 0xaf, 0xd0, 0xe3]), next: 1681338455 },
|
||||
ForkId { hash: ForkHash(hex!("0xf0afd0e3")), next: 1681338455 },
|
||||
),
|
||||
// First Shanghai block
|
||||
(
|
||||
Head { number: 20000000, timestamp: 1681338455, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0xdc, 0xe9, 0x6c, 0x2d]), next: 1710338135 },
|
||||
ForkId { hash: ForkHash(hex!("0xdce96c2d")), next: 1710338135 },
|
||||
),
|
||||
// First Cancun block
|
||||
(
|
||||
Head { number: 20000001, timestamp: 1710338135, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0x9f, 0x3d, 0x22, 0x54]), next: 1746612311 },
|
||||
ForkId { hash: ForkHash(hex!("0x9f3d2254")), next: 1746612311 },
|
||||
),
|
||||
// First Prague block
|
||||
(
|
||||
Head { number: 20000002, timestamp: 1746612311, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0xc3, 0x76, 0xcf, 0x8b]), next: 0 },
|
||||
Head { number: 20000004, timestamp: 1746612311, ..Default::default() },
|
||||
ForkId {
|
||||
hash: ForkHash(hex!("0xc376cf8b")),
|
||||
next: mainnet::MAINNET_OSAKA_TIMESTAMP,
|
||||
},
|
||||
),
|
||||
// Future Prague block
|
||||
// Osaka block
|
||||
(
|
||||
Head { number: 20000002, timestamp: 2000000000, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0xc3, 0x76, 0xcf, 0x8b]), next: 0 },
|
||||
Head {
|
||||
number: 20000004,
|
||||
timestamp: mainnet::MAINNET_OSAKA_TIMESTAMP,
|
||||
..Default::default()
|
||||
},
|
||||
ForkId {
|
||||
hash: ForkHash(hex!("0x5167e2a6")),
|
||||
next: mainnet::MAINNET_BPO1_TIMESTAMP,
|
||||
},
|
||||
),
|
||||
],
|
||||
);
|
||||
@@ -1564,13 +1755,13 @@ Post-merge hard forks (timestamp based):
|
||||
&[
|
||||
(
|
||||
Head { number: 0, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0xbe, 0xf7, 0x1d, 0x30]), next: 1742999832 },
|
||||
ForkId { hash: ForkHash(hex!("0xbef71d30")), next: 1742999832 },
|
||||
),
|
||||
// First Prague block
|
||||
(
|
||||
Head { number: 0, timestamp: 1742999833, ..Default::default() },
|
||||
ForkId {
|
||||
hash: ForkHash([0x09, 0x29, 0xe2, 0x4e]),
|
||||
hash: ForkHash(hex!("0x0929e24e")),
|
||||
next: hoodi::HOODI_OSAKA_TIMESTAMP,
|
||||
},
|
||||
),
|
||||
@@ -1597,43 +1788,43 @@ Post-merge hard forks (timestamp based):
|
||||
&[
|
||||
(
|
||||
Head { number: 0, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0xc6, 0x1a, 0x60, 0x98]), next: 1696000704 },
|
||||
ForkId { hash: ForkHash(hex!("0xc61a6098")), next: 1696000704 },
|
||||
),
|
||||
// First MergeNetsplit block
|
||||
(
|
||||
Head { number: 123, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0xc6, 0x1a, 0x60, 0x98]), next: 1696000704 },
|
||||
ForkId { hash: ForkHash(hex!("0xc61a6098")), next: 1696000704 },
|
||||
),
|
||||
// Last MergeNetsplit block
|
||||
(
|
||||
Head { number: 123, timestamp: 1696000703, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0xc6, 0x1a, 0x60, 0x98]), next: 1696000704 },
|
||||
ForkId { hash: ForkHash(hex!("0xc61a6098")), next: 1696000704 },
|
||||
),
|
||||
// First Shanghai block
|
||||
(
|
||||
Head { number: 123, timestamp: 1696000704, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0xfd, 0x4f, 0x01, 0x6b]), next: 1707305664 },
|
||||
ForkId { hash: ForkHash(hex!("0xfd4f016b")), next: 1707305664 },
|
||||
),
|
||||
// Last Shanghai block
|
||||
(
|
||||
Head { number: 123, timestamp: 1707305663, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0xfd, 0x4f, 0x01, 0x6b]), next: 1707305664 },
|
||||
ForkId { hash: ForkHash(hex!("0xfd4f016b")), next: 1707305664 },
|
||||
),
|
||||
// First Cancun block
|
||||
(
|
||||
Head { number: 123, timestamp: 1707305664, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0x9b, 0x19, 0x2a, 0xd0]), next: 1740434112 },
|
||||
ForkId { hash: ForkHash(hex!("0x9b192ad0")), next: 1740434112 },
|
||||
),
|
||||
// Last Cancun block
|
||||
(
|
||||
Head { number: 123, timestamp: 1740434111, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0x9b, 0x19, 0x2a, 0xd0]), next: 1740434112 },
|
||||
ForkId { hash: ForkHash(hex!("0x9b192ad0")), next: 1740434112 },
|
||||
),
|
||||
// First Prague block
|
||||
(
|
||||
Head { number: 123, timestamp: 1740434112, ..Default::default() },
|
||||
ForkId {
|
||||
hash: ForkHash([0xdf, 0xbd, 0x9b, 0xed]),
|
||||
hash: ForkHash(hex!("0xdfbd9bed")),
|
||||
next: holesky::HOLESKY_OSAKA_TIMESTAMP,
|
||||
},
|
||||
),
|
||||
@@ -1660,45 +1851,45 @@ Post-merge hard forks (timestamp based):
|
||||
&[
|
||||
(
|
||||
Head { number: 0, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0xfe, 0x33, 0x66, 0xe7]), next: 1735371 },
|
||||
ForkId { hash: ForkHash(hex!("0xfe3366e7")), next: 1735371 },
|
||||
),
|
||||
(
|
||||
Head { number: 1735370, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0xfe, 0x33, 0x66, 0xe7]), next: 1735371 },
|
||||
ForkId { hash: ForkHash(hex!("0xfe3366e7")), next: 1735371 },
|
||||
),
|
||||
(
|
||||
Head { number: 1735371, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0xb9, 0x6c, 0xbd, 0x13]), next: 1677557088 },
|
||||
ForkId { hash: ForkHash(hex!("0xb96cbd13")), next: 1677557088 },
|
||||
),
|
||||
(
|
||||
Head { number: 1735372, timestamp: 1677557087, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0xb9, 0x6c, 0xbd, 0x13]), next: 1677557088 },
|
||||
ForkId { hash: ForkHash(hex!("0xb96cbd13")), next: 1677557088 },
|
||||
),
|
||||
// First Shanghai block
|
||||
(
|
||||
Head { number: 1735373, timestamp: 1677557088, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0xf7, 0xf9, 0xbc, 0x08]), next: 1706655072 },
|
||||
ForkId { hash: ForkHash(hex!("0xf7f9bc08")), next: 1706655072 },
|
||||
),
|
||||
// Last Shanghai block
|
||||
(
|
||||
Head { number: 1735374, timestamp: 1706655071, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0xf7, 0xf9, 0xbc, 0x08]), next: 1706655072 },
|
||||
ForkId { hash: ForkHash(hex!("0xf7f9bc08")), next: 1706655072 },
|
||||
),
|
||||
// First Cancun block
|
||||
(
|
||||
Head { number: 1735375, timestamp: 1706655072, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0x88, 0xcf, 0x81, 0xd9]), next: 1741159776 },
|
||||
ForkId { hash: ForkHash(hex!("0x88cf81d9")), next: 1741159776 },
|
||||
),
|
||||
// Last Cancun block
|
||||
(
|
||||
Head { number: 1735376, timestamp: 1741159775, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0x88, 0xcf, 0x81, 0xd9]), next: 1741159776 },
|
||||
ForkId { hash: ForkHash(hex!("0x88cf81d9")), next: 1741159776 },
|
||||
),
|
||||
// First Prague block
|
||||
(
|
||||
Head { number: 1735377, timestamp: 1741159776, ..Default::default() },
|
||||
ForkId {
|
||||
hash: ForkHash([0xed, 0x88, 0xb5, 0xfd]),
|
||||
hash: ForkHash(hex!("0xed88b5fd")),
|
||||
next: sepolia::SEPOLIA_OSAKA_TIMESTAMP,
|
||||
},
|
||||
),
|
||||
@@ -1724,7 +1915,7 @@ Post-merge hard forks (timestamp based):
|
||||
&DEV,
|
||||
&[(
|
||||
Head { number: 0, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0x0b, 0x1a, 0x4e, 0xf7]), next: 0 },
|
||||
ForkId { hash: ForkHash(hex!("0x0b1a4ef7")), next: 0 },
|
||||
)],
|
||||
)
|
||||
}
|
||||
@@ -1740,131 +1931,142 @@ Post-merge hard forks (timestamp based):
|
||||
&[
|
||||
(
|
||||
Head { number: 0, timestamp: 0, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0xfc, 0x64, 0xec, 0x04]), next: 1150000 },
|
||||
ForkId { hash: ForkHash(hex!("0xfc64ec04")), next: 1150000 },
|
||||
), // Unsynced
|
||||
(
|
||||
Head { number: 1149999, timestamp: 0, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0xfc, 0x64, 0xec, 0x04]), next: 1150000 },
|
||||
ForkId { hash: ForkHash(hex!("0xfc64ec04")), next: 1150000 },
|
||||
), // Last Frontier block
|
||||
(
|
||||
Head { number: 1150000, timestamp: 0, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0x97, 0xc2, 0xc3, 0x4c]), next: 1920000 },
|
||||
ForkId { hash: ForkHash(hex!("0x97c2c34c")), next: 1920000 },
|
||||
), // First Homestead block
|
||||
(
|
||||
Head { number: 1919999, timestamp: 0, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0x97, 0xc2, 0xc3, 0x4c]), next: 1920000 },
|
||||
ForkId { hash: ForkHash(hex!("0x97c2c34c")), next: 1920000 },
|
||||
), // Last Homestead block
|
||||
(
|
||||
Head { number: 1920000, timestamp: 0, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0x91, 0xd1, 0xf9, 0x48]), next: 2463000 },
|
||||
ForkId { hash: ForkHash(hex!("0x91d1f948")), next: 2463000 },
|
||||
), // First DAO block
|
||||
(
|
||||
Head { number: 2462999, timestamp: 0, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0x91, 0xd1, 0xf9, 0x48]), next: 2463000 },
|
||||
ForkId { hash: ForkHash(hex!("0x91d1f948")), next: 2463000 },
|
||||
), // Last DAO block
|
||||
(
|
||||
Head { number: 2463000, timestamp: 0, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0x7a, 0x64, 0xda, 0x13]), next: 2675000 },
|
||||
ForkId { hash: ForkHash(hex!("0x7a64da13")), next: 2675000 },
|
||||
), // First Tangerine block
|
||||
(
|
||||
Head { number: 2674999, timestamp: 0, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0x7a, 0x64, 0xda, 0x13]), next: 2675000 },
|
||||
ForkId { hash: ForkHash(hex!("0x7a64da13")), next: 2675000 },
|
||||
), // Last Tangerine block
|
||||
(
|
||||
Head { number: 2675000, timestamp: 0, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0x3e, 0xdd, 0x5b, 0x10]), next: 4370000 },
|
||||
ForkId { hash: ForkHash(hex!("0x3edd5b10")), next: 4370000 },
|
||||
), // First Spurious block
|
||||
(
|
||||
Head { number: 4369999, timestamp: 0, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0x3e, 0xdd, 0x5b, 0x10]), next: 4370000 },
|
||||
ForkId { hash: ForkHash(hex!("0x3edd5b10")), next: 4370000 },
|
||||
), // Last Spurious block
|
||||
(
|
||||
Head { number: 4370000, timestamp: 0, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0xa0, 0x0b, 0xc3, 0x24]), next: 7280000 },
|
||||
ForkId { hash: ForkHash(hex!("0xa00bc324")), next: 7280000 },
|
||||
), // First Byzantium block
|
||||
(
|
||||
Head { number: 7279999, timestamp: 0, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0xa0, 0x0b, 0xc3, 0x24]), next: 7280000 },
|
||||
ForkId { hash: ForkHash(hex!("0xa00bc324")), next: 7280000 },
|
||||
), // Last Byzantium block
|
||||
(
|
||||
Head { number: 7280000, timestamp: 0, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0x66, 0x8d, 0xb0, 0xaf]), next: 9069000 },
|
||||
ForkId { hash: ForkHash(hex!("0x668db0af")), next: 9069000 },
|
||||
), // First and last Constantinople, first Petersburg block
|
||||
(
|
||||
Head { number: 9068999, timestamp: 0, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0x66, 0x8d, 0xb0, 0xaf]), next: 9069000 },
|
||||
ForkId { hash: ForkHash(hex!("0x668db0af")), next: 9069000 },
|
||||
), // Last Petersburg block
|
||||
(
|
||||
Head { number: 9069000, timestamp: 0, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0x87, 0x9d, 0x6e, 0x30]), next: 9200000 },
|
||||
ForkId { hash: ForkHash(hex!("0x879d6e30")), next: 9200000 },
|
||||
), // First Istanbul and first Muir Glacier block
|
||||
(
|
||||
Head { number: 9199999, timestamp: 0, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0x87, 0x9d, 0x6e, 0x30]), next: 9200000 },
|
||||
ForkId { hash: ForkHash(hex!("0x879d6e30")), next: 9200000 },
|
||||
), // Last Istanbul and first Muir Glacier block
|
||||
(
|
||||
Head { number: 9200000, timestamp: 0, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0xe0, 0x29, 0xe9, 0x91]), next: 12244000 },
|
||||
ForkId { hash: ForkHash(hex!("0xe029e991")), next: 12244000 },
|
||||
), // First Muir Glacier block
|
||||
(
|
||||
Head { number: 12243999, timestamp: 0, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0xe0, 0x29, 0xe9, 0x91]), next: 12244000 },
|
||||
ForkId { hash: ForkHash(hex!("0xe029e991")), next: 12244000 },
|
||||
), // Last Muir Glacier block
|
||||
(
|
||||
Head { number: 12244000, timestamp: 0, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0x0e, 0xb4, 0x40, 0xf6]), next: 12965000 },
|
||||
ForkId { hash: ForkHash(hex!("0x0eb440f6")), next: 12965000 },
|
||||
), // First Berlin block
|
||||
(
|
||||
Head { number: 12964999, timestamp: 0, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0x0e, 0xb4, 0x40, 0xf6]), next: 12965000 },
|
||||
ForkId { hash: ForkHash(hex!("0x0eb440f6")), next: 12965000 },
|
||||
), // Last Berlin block
|
||||
(
|
||||
Head { number: 12965000, timestamp: 0, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0xb7, 0x15, 0x07, 0x7d]), next: 13773000 },
|
||||
ForkId { hash: ForkHash(hex!("0xb715077d")), next: 13773000 },
|
||||
), // First London block
|
||||
(
|
||||
Head { number: 13772999, timestamp: 0, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0xb7, 0x15, 0x07, 0x7d]), next: 13773000 },
|
||||
ForkId { hash: ForkHash(hex!("0xb715077d")), next: 13773000 },
|
||||
), // Last London block
|
||||
(
|
||||
Head { number: 13773000, timestamp: 0, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0x20, 0xc3, 0x27, 0xfc]), next: 15050000 },
|
||||
ForkId { hash: ForkHash(hex!("0x20c327fc")), next: 15050000 },
|
||||
), // First Arrow Glacier block
|
||||
(
|
||||
Head { number: 15049999, timestamp: 0, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0x20, 0xc3, 0x27, 0xfc]), next: 15050000 },
|
||||
ForkId { hash: ForkHash(hex!("0x20c327fc")), next: 15050000 },
|
||||
), // Last Arrow Glacier block
|
||||
(
|
||||
Head { number: 15050000, timestamp: 0, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0xf0, 0xaf, 0xd0, 0xe3]), next: 1681338455 },
|
||||
ForkId { hash: ForkHash(hex!("0xf0afd0e3")), next: 1681338455 },
|
||||
), // First Gray Glacier block
|
||||
(
|
||||
Head { number: 19999999, timestamp: 1667999999, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0xf0, 0xaf, 0xd0, 0xe3]), next: 1681338455 },
|
||||
ForkId { hash: ForkHash(hex!("0xf0afd0e3")), next: 1681338455 },
|
||||
), // Last Gray Glacier block
|
||||
(
|
||||
Head { number: 20000000, timestamp: 1681338455, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0xdc, 0xe9, 0x6c, 0x2d]), next: 1710338135 },
|
||||
ForkId { hash: ForkHash(hex!("0xdce96c2d")), next: 1710338135 },
|
||||
), // Last Shanghai block
|
||||
(
|
||||
Head { number: 20000001, timestamp: 1710338134, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0xdc, 0xe9, 0x6c, 0x2d]), next: 1710338135 },
|
||||
ForkId { hash: ForkHash(hex!("0xdce96c2d")), next: 1710338135 },
|
||||
), // First Cancun block
|
||||
(
|
||||
Head { number: 20000002, timestamp: 1710338135, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0x9f, 0x3d, 0x22, 0x54]), next: 1746612311 },
|
||||
ForkId { hash: ForkHash(hex!("0x9f3d2254")), next: 1746612311 },
|
||||
), // Last Cancun block
|
||||
(
|
||||
Head { number: 20000003, timestamp: 1746612310, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0x9f, 0x3d, 0x22, 0x54]), next: 1746612311 },
|
||||
ForkId { hash: ForkHash(hex!("0x9f3d2254")), next: 1746612311 },
|
||||
), // First Prague block
|
||||
(
|
||||
Head { number: 20000004, timestamp: 1746612311, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0xc3, 0x76, 0xcf, 0x8b]), next: 0 },
|
||||
), // Future Prague block
|
||||
ForkId {
|
||||
hash: ForkHash(hex!("0xc376cf8b")),
|
||||
next: mainnet::MAINNET_OSAKA_TIMESTAMP,
|
||||
},
|
||||
),
|
||||
// Osaka block
|
||||
(
|
||||
Head { number: 20000004, timestamp: 2000000000, ..Default::default() },
|
||||
ForkId { hash: ForkHash([0xc3, 0x76, 0xcf, 0x8b]), next: 0 },
|
||||
Head {
|
||||
number: 20000004,
|
||||
timestamp: mainnet::MAINNET_OSAKA_TIMESTAMP,
|
||||
..Default::default()
|
||||
},
|
||||
ForkId {
|
||||
hash: ForkHash(hex!("0x5167e2a6")),
|
||||
next: mainnet::MAINNET_BPO1_TIMESTAMP,
|
||||
},
|
||||
),
|
||||
],
|
||||
);
|
||||
@@ -2320,7 +2522,7 @@ Post-merge hard forks (timestamp based):
|
||||
let chainspec = ChainSpec::from(genesis);
|
||||
|
||||
// make sure we are at ForkHash("bc0c2605") with Head post-cancun
|
||||
let expected_forkid = ForkId { hash: ForkHash([0xbc, 0x0c, 0x26, 0x05]), next: 0 };
|
||||
let expected_forkid = ForkId { hash: ForkHash(hex!("0xbc0c2605")), next: 0 };
|
||||
let got_forkid =
|
||||
chainspec.fork_id(&Head { number: 73, timestamp: 840, ..Default::default() });
|
||||
|
||||
@@ -2430,7 +2632,7 @@ Post-merge hard forks (timestamp based):
|
||||
assert_eq!(genesis_hash, expected_hash);
|
||||
|
||||
// check that the forkhash is correct
|
||||
let expected_forkhash = ForkHash(hex!("8062457a"));
|
||||
let expected_forkhash = ForkHash(hex!("0x8062457a"));
|
||||
assert_eq!(ForkHash::from(genesis_hash), expected_forkhash);
|
||||
}
|
||||
|
||||
@@ -2521,10 +2723,8 @@ Post-merge hard forks (timestamp based):
|
||||
|
||||
#[test]
|
||||
fn latest_eth_mainnet_fork_id() {
|
||||
assert_eq!(
|
||||
ForkId { hash: ForkHash([0xc3, 0x76, 0xcf, 0x8b]), next: 0 },
|
||||
MAINNET.latest_fork_id()
|
||||
)
|
||||
// BPO2
|
||||
assert_eq!(ForkId { hash: ForkHash(hex!("0x07c9462e")), next: 0 }, MAINNET.latest_fork_id())
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -49,6 +49,7 @@ reth-stages.workspace = true
|
||||
reth-stages-types = { workspace = true, optional = true }
|
||||
reth-static-file-types = { workspace = true, features = ["clap"] }
|
||||
reth-static-file.workspace = true
|
||||
reth-tasks.workspace = true
|
||||
reth-trie = { workspace = true, features = ["metrics"] }
|
||||
reth-trie-db = { workspace = true, features = ["metrics"] }
|
||||
reth-trie-common.workspace = true
|
||||
@@ -82,6 +83,7 @@ backon.workspace = true
|
||||
secp256k1 = { workspace = true, features = ["global-context", "std", "recovery"] }
|
||||
tokio-stream.workspace = true
|
||||
reqwest.workspace = true
|
||||
metrics.workspace = true
|
||||
|
||||
# io
|
||||
fdlimit.workspace = true
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
//! Contains common `reth` arguments
|
||||
|
||||
pub use reth_primitives_traits::header::HeaderMut;
|
||||
|
||||
use alloy_primitives::B256;
|
||||
use clap::Parser;
|
||||
use reth_chainspec::EthChainSpec;
|
||||
@@ -7,7 +9,7 @@ use reth_cli::chainspec::ChainSpecParser;
|
||||
use reth_config::{config::EtlConfig, Config};
|
||||
use reth_consensus::noop::NoopConsensus;
|
||||
use reth_db::{init_db, open_db_read_only, DatabaseEnv};
|
||||
use reth_db_common::init::init_genesis;
|
||||
use reth_db_common::init::init_genesis_with_settings;
|
||||
use reth_downloaders::{bodies::noop::NoopBodiesDownloader, headers::noop::NoopHeaderDownloader};
|
||||
use reth_eth_wire::NetPrimitivesFor;
|
||||
use reth_evm::{noop::NoopEvmConfig, ConfigureEvm};
|
||||
@@ -17,11 +19,14 @@ use reth_node_builder::{
|
||||
Node, NodeComponents, NodeComponentsBuilder, NodeTypes, NodeTypesWithDBAdapter,
|
||||
};
|
||||
use reth_node_core::{
|
||||
args::{DatabaseArgs, DatadirArgs},
|
||||
args::{DatabaseArgs, DatadirArgs, StaticFilesArgs},
|
||||
dirs::{ChainPath, DataDirPath},
|
||||
};
|
||||
use reth_provider::{
|
||||
providers::{BlockchainProvider, NodeTypesForProvider, StaticFileProvider},
|
||||
providers::{
|
||||
BlockchainProvider, NodeTypesForProvider, RocksDBProvider, StaticFileProvider,
|
||||
StaticFileProviderBuilder,
|
||||
},
|
||||
ProviderFactory, StaticFileProviderFactory,
|
||||
};
|
||||
use reth_stages::{sets::DefaultStages, Pipeline, PipelineTarget};
|
||||
@@ -57,6 +62,10 @@ pub struct EnvironmentArgs<C: ChainSpecParser> {
|
||||
/// All database related arguments
|
||||
#[command(flatten)]
|
||||
pub db: DatabaseArgs,
|
||||
|
||||
/// All static files related arguments
|
||||
#[command(flatten)]
|
||||
pub static_files: StaticFilesArgs,
|
||||
}
|
||||
|
||||
impl<C: ChainSpecParser> EnvironmentArgs<C> {
|
||||
@@ -69,10 +78,12 @@ impl<C: ChainSpecParser> EnvironmentArgs<C> {
|
||||
let data_dir = self.datadir.clone().resolve_datadir(self.chain.chain());
|
||||
let db_path = data_dir.db();
|
||||
let sf_path = data_dir.static_files();
|
||||
let rocksdb_path = data_dir.rocksdb();
|
||||
|
||||
if access.is_read_write() {
|
||||
reth_fs_util::create_dir_all(&db_path)?;
|
||||
reth_fs_util::create_dir_all(&sf_path)?;
|
||||
reth_fs_util::create_dir_all(&rocksdb_path)?;
|
||||
}
|
||||
|
||||
let config_path = self.config.clone().unwrap_or_else(|| data_dir.config());
|
||||
@@ -92,21 +103,35 @@ impl<C: ChainSpecParser> EnvironmentArgs<C> {
|
||||
}
|
||||
|
||||
info!(target: "reth::cli", ?db_path, ?sf_path, "Opening storage");
|
||||
let genesis_block_number = self.chain.genesis().number.unwrap_or_default();
|
||||
let (db, sfp) = match access {
|
||||
AccessRights::RW => (
|
||||
Arc::new(init_db(db_path, self.db.database_args())?),
|
||||
StaticFileProvider::read_write(sf_path)?,
|
||||
),
|
||||
AccessRights::RO => (
|
||||
Arc::new(open_db_read_only(&db_path, self.db.database_args())?),
|
||||
StaticFileProvider::read_only(sf_path, false)?,
|
||||
StaticFileProviderBuilder::read_write(sf_path)?
|
||||
.with_genesis_block_number(genesis_block_number)
|
||||
.build()?,
|
||||
),
|
||||
AccessRights::RO | AccessRights::RoInconsistent => {
|
||||
(Arc::new(open_db_read_only(&db_path, self.db.database_args())?), {
|
||||
let provider = StaticFileProviderBuilder::read_only(sf_path)?
|
||||
.with_genesis_block_number(genesis_block_number)
|
||||
.build()?;
|
||||
provider.watch_directory();
|
||||
provider
|
||||
})
|
||||
}
|
||||
};
|
||||
// TransactionDB only support read-write mode
|
||||
let rocksdb_provider = RocksDBProvider::builder(data_dir.rocksdb())
|
||||
.with_default_tables()
|
||||
.with_database_log_level(self.db.log_level)
|
||||
.build()?;
|
||||
|
||||
let provider_factory = self.create_provider_factory(&config, db, sfp)?;
|
||||
let provider_factory =
|
||||
self.create_provider_factory(&config, db, sfp, rocksdb_provider, access)?;
|
||||
if access.is_read_write() {
|
||||
debug!(target: "reth::cli", chain=%self.chain.chain(), genesis=?self.chain.genesis_hash(), "Initializing genesis");
|
||||
init_genesis(&provider_factory)?;
|
||||
init_genesis_with_settings(&provider_factory, self.static_files.to_settings())?;
|
||||
}
|
||||
|
||||
Ok(Environment { config, provider_factory, data_dir })
|
||||
@@ -122,23 +147,25 @@ impl<C: ChainSpecParser> EnvironmentArgs<C> {
|
||||
config: &Config,
|
||||
db: Arc<DatabaseEnv>,
|
||||
static_file_provider: StaticFileProvider<N::Primitives>,
|
||||
rocksdb_provider: RocksDBProvider,
|
||||
access: AccessRights,
|
||||
) -> eyre::Result<ProviderFactory<NodeTypesWithDBAdapter<N, Arc<DatabaseEnv>>>>
|
||||
where
|
||||
C: ChainSpecParser<ChainSpec = N::ChainSpec>,
|
||||
{
|
||||
let has_receipt_pruning = config.prune.has_receipts_pruning();
|
||||
let prune_modes = config.prune.segments.clone();
|
||||
let factory = ProviderFactory::<NodeTypesWithDBAdapter<N, Arc<DatabaseEnv>>>::new(
|
||||
db,
|
||||
self.chain.clone(),
|
||||
static_file_provider,
|
||||
)
|
||||
rocksdb_provider,
|
||||
)?
|
||||
.with_prune_modes(prune_modes.clone());
|
||||
|
||||
// Check for consistency between database and static files.
|
||||
if let Some(unwind_target) = factory
|
||||
.static_file_provider()
|
||||
.check_consistency(&factory.provider()?, has_receipt_pruning)?
|
||||
if !access.is_read_only_inconsistent() &&
|
||||
let Some(unwind_target) =
|
||||
factory.static_file_provider().check_consistency(&factory.provider()?)?
|
||||
{
|
||||
if factory.db_ref().is_read_only()? {
|
||||
warn!(target: "reth::cli", ?unwind_target, "Inconsistent storage. Restart node to heal.");
|
||||
@@ -199,6 +226,8 @@ pub enum AccessRights {
|
||||
RW,
|
||||
/// Read-only access
|
||||
RO,
|
||||
/// Read-only access with possibly inconsistent data
|
||||
RoInconsistent,
|
||||
}
|
||||
|
||||
impl AccessRights {
|
||||
@@ -206,6 +235,12 @@ impl AccessRights {
|
||||
pub const fn is_read_write(&self) -> bool {
|
||||
matches!(self, Self::RW)
|
||||
}
|
||||
|
||||
/// Returns `true` if it requires read-only access to the environment with possibly inconsistent
|
||||
/// data.
|
||||
pub const fn is_read_only_inconsistent(&self) -> bool {
|
||||
matches!(self, Self::RoInconsistent)
|
||||
}
|
||||
}
|
||||
|
||||
/// Helper alias to satisfy `FullNodeTypes` bound on [`Node`] trait generic.
|
||||
@@ -215,17 +250,6 @@ type FullTypesAdapter<T> = FullNodeTypesAdapter<
|
||||
BlockchainProvider<NodeTypesWithDBAdapter<T, Arc<DatabaseEnv>>>,
|
||||
>;
|
||||
|
||||
/// Trait for block headers that can be modified through CLI operations.
|
||||
pub trait CliHeader {
|
||||
fn set_number(&mut self, number: u64);
|
||||
}
|
||||
|
||||
impl CliHeader for alloy_consensus::Header {
|
||||
fn set_number(&mut self, number: u64) {
|
||||
self.number = number;
|
||||
}
|
||||
}
|
||||
|
||||
/// Helper trait with a common set of requirements for the
|
||||
/// [`NodeTypes`] in CLI.
|
||||
pub trait CliNodeTypes: Node<FullTypesAdapter<Self>> + NodeTypesForProvider {
|
||||
|
||||
@@ -22,13 +22,14 @@ impl Command {
|
||||
let config = if self.default {
|
||||
Config::default()
|
||||
} else {
|
||||
let path = self.config.clone().unwrap_or_default();
|
||||
// Check if the file exists
|
||||
let path = match self.config.as_ref() {
|
||||
Some(path) => path,
|
||||
None => bail!("No config file provided. Use --config <FILE> or pass --default"),
|
||||
};
|
||||
if !path.exists() {
|
||||
bail!("Config file does not exist: {}", path.display());
|
||||
}
|
||||
// Read the configuration file
|
||||
Config::from_path(&path)
|
||||
Config::from_path(path)
|
||||
.wrap_err_with(|| format!("Could not load config file: {}", path.display()))?
|
||||
};
|
||||
println!("{}", toml::to_string_pretty(&config)?);
|
||||
|
||||
92
crates/cli/commands/src/db/account_storage.rs
Normal file
92
crates/cli/commands/src/db/account_storage.rs
Normal file
@@ -0,0 +1,92 @@
|
||||
use alloy_primitives::{keccak256, Address};
|
||||
use clap::Parser;
|
||||
use human_bytes::human_bytes;
|
||||
use reth_codecs::Compact;
|
||||
use reth_db_api::{cursor::DbDupCursorRO, database::Database, tables, transaction::DbTx};
|
||||
use reth_db_common::DbTool;
|
||||
use reth_node_builder::NodeTypesWithDB;
|
||||
use std::time::{Duration, Instant};
|
||||
use tracing::info;
|
||||
|
||||
/// Log progress every 5 seconds
|
||||
const LOG_INTERVAL: Duration = Duration::from_secs(5);
|
||||
|
||||
/// The arguments for the `reth db account-storage` command
|
||||
#[derive(Parser, Debug)]
|
||||
pub struct Command {
|
||||
/// The account address to check storage for
|
||||
address: Address,
|
||||
}
|
||||
|
||||
impl Command {
|
||||
/// Execute `db account-storage` command
|
||||
pub fn execute<N: NodeTypesWithDB>(self, tool: &DbTool<N>) -> eyre::Result<()> {
|
||||
let address = self.address;
|
||||
let (slot_count, plain_size) = tool.provider_factory.db_ref().view(|tx| {
|
||||
let mut cursor = tx.cursor_dup_read::<tables::PlainStorageState>()?;
|
||||
let mut count = 0usize;
|
||||
let mut total_value_bytes = 0usize;
|
||||
let mut last_log = Instant::now();
|
||||
|
||||
// Walk all storage entries for this address
|
||||
let walker = cursor.walk_dup(Some(address), None)?;
|
||||
for entry in walker {
|
||||
let (_, storage_entry) = entry?;
|
||||
count += 1;
|
||||
// StorageEntry encodes as: 32 bytes (key/subkey uncompressed) + compressed U256
|
||||
let mut buf = Vec::new();
|
||||
let entry_len = storage_entry.to_compact(&mut buf);
|
||||
total_value_bytes += entry_len;
|
||||
|
||||
if last_log.elapsed() >= LOG_INTERVAL {
|
||||
info!(
|
||||
target: "reth::cli",
|
||||
address = %address,
|
||||
slots = count,
|
||||
key = %storage_entry.key,
|
||||
"Processing storage slots"
|
||||
);
|
||||
last_log = Instant::now();
|
||||
}
|
||||
}
|
||||
|
||||
// Add 20 bytes for the Address key (stored once per account in dupsort)
|
||||
let total_size = if count > 0 { 20 + total_value_bytes } else { 0 };
|
||||
|
||||
Ok::<_, eyre::Report>((count, total_size))
|
||||
})??;
|
||||
|
||||
// Estimate hashed storage size: 32-byte B256 key instead of 20-byte Address
|
||||
let hashed_size_estimate = if slot_count > 0 { plain_size + 12 } else { 0 };
|
||||
let total_estimate = plain_size + hashed_size_estimate;
|
||||
|
||||
let hashed_address = keccak256(address);
|
||||
|
||||
println!("Account: {address}");
|
||||
println!("Hashed address: {hashed_address}");
|
||||
println!("Storage slots: {slot_count}");
|
||||
println!("Plain storage size: {} (estimated)", human_bytes(plain_size as f64));
|
||||
println!("Hashed storage size: {} (estimated)", human_bytes(hashed_size_estimate as f64));
|
||||
println!("Total estimated size: {}", human_bytes(total_estimate as f64));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn parse_address_arg() {
|
||||
let cmd = Command::try_parse_from([
|
||||
"account-storage",
|
||||
"0xd8dA6BF26964aF9D7eEd9e03E53415D37aA96045",
|
||||
])
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
cmd.address,
|
||||
"0xd8dA6BF26964aF9D7eEd9e03E53415D37aA96045".parse::<Address>().unwrap()
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -6,8 +6,9 @@ use reth_db_api::{
|
||||
transaction::{DbTx, DbTxMut},
|
||||
TableViewer, Tables,
|
||||
};
|
||||
use reth_db_common::DbTool;
|
||||
use reth_node_builder::NodeTypesWithDB;
|
||||
use reth_provider::{ProviderFactory, StaticFileProviderFactory};
|
||||
use reth_provider::StaticFileProviderFactory;
|
||||
use reth_static_file_types::StaticFileSegment;
|
||||
|
||||
/// The arguments for the `reth db clear` command
|
||||
@@ -19,16 +20,13 @@ pub struct Command {
|
||||
|
||||
impl Command {
|
||||
/// Execute `db clear` command
|
||||
pub fn execute<N: NodeTypesWithDB>(
|
||||
self,
|
||||
provider_factory: ProviderFactory<N>,
|
||||
) -> eyre::Result<()> {
|
||||
pub fn execute<N: NodeTypesWithDB>(self, tool: &DbTool<N>) -> eyre::Result<()> {
|
||||
match self.subcommand {
|
||||
Subcommands::Mdbx { table } => {
|
||||
table.view(&ClearViewer { db: provider_factory.db_ref() })?
|
||||
table.view(&ClearViewer { db: tool.provider_factory.db_ref() })?
|
||||
}
|
||||
Subcommands::StaticFile { segment } => {
|
||||
let static_file_provider = provider_factory.static_file_provider();
|
||||
let static_file_provider = tool.provider_factory.static_file_provider();
|
||||
let static_files = iter_static_files(static_file_provider.directory())?;
|
||||
|
||||
if let Some(segment_static_files) = static_files.get(&segment) {
|
||||
|
||||
@@ -3,16 +3,22 @@ use clap::Parser;
|
||||
use reth_db::{
|
||||
static_file::{
|
||||
ColumnSelectorOne, ColumnSelectorTwo, HeaderWithHashMask, ReceiptMask, TransactionMask,
|
||||
TransactionSenderMask,
|
||||
},
|
||||
RawDupSort,
|
||||
};
|
||||
use reth_db_api::{
|
||||
table::{Decompress, DupSort, Table},
|
||||
tables, RawKey, RawTable, Receipts, TableViewer, Transactions,
|
||||
cursor::{DbCursorRO, DbDupCursorRO},
|
||||
database::Database,
|
||||
table::{Compress, Decompress, DupSort, Table},
|
||||
tables,
|
||||
transaction::DbTx,
|
||||
RawKey, RawTable, Receipts, TableViewer, Transactions,
|
||||
};
|
||||
use reth_db_common::DbTool;
|
||||
use reth_node_api::{HeaderTy, ReceiptTy, TxTy};
|
||||
use reth_node_builder::NodeTypesWithDB;
|
||||
use reth_primitives_traits::ValueWithSubKey;
|
||||
use reth_provider::{providers::ProviderNodeTypes, StaticFileProviderFactory};
|
||||
use reth_static_file_types::StaticFileSegment;
|
||||
use tracing::error;
|
||||
@@ -38,6 +44,14 @@ enum Subcommand {
|
||||
#[arg(value_parser = maybe_json_value_parser)]
|
||||
subkey: Option<String>,
|
||||
|
||||
/// Optional end key for range query (exclusive upper bound)
|
||||
#[arg(value_parser = maybe_json_value_parser)]
|
||||
end_key: Option<String>,
|
||||
|
||||
/// Optional end subkey for range query (exclusive upper bound)
|
||||
#[arg(value_parser = maybe_json_value_parser)]
|
||||
end_subkey: Option<String>,
|
||||
|
||||
/// Output bytes instead of human-readable decoded value
|
||||
#[arg(long)]
|
||||
raw: bool,
|
||||
@@ -60,8 +74,8 @@ impl Command {
|
||||
/// Execute `db get` command
|
||||
pub fn execute<N: ProviderNodeTypes>(self, tool: &DbTool<N>) -> eyre::Result<()> {
|
||||
match self.subcommand {
|
||||
Subcommand::Mdbx { table, key, subkey, raw } => {
|
||||
table.view(&GetValueViewer { tool, key, subkey, raw })?
|
||||
Subcommand::Mdbx { table, key, subkey, end_key, end_subkey, raw } => {
|
||||
table.view(&GetValueViewer { tool, key, subkey, end_key, end_subkey, raw })?
|
||||
}
|
||||
Subcommand::StaticFile { segment, key, raw } => {
|
||||
let (key, mask): (u64, _) = match segment {
|
||||
@@ -75,19 +89,21 @@ impl Command {
|
||||
StaticFileSegment::Receipts => {
|
||||
(table_key::<tables::Receipts>(&key)?, <ReceiptMask<ReceiptTy<N>>>::MASK)
|
||||
}
|
||||
StaticFileSegment::TransactionSenders => (
|
||||
table_key::<tables::TransactionSenders>(&key)?,
|
||||
<TransactionSenderMask>::MASK,
|
||||
),
|
||||
};
|
||||
|
||||
let content = tool.provider_factory.static_file_provider().find_static_file(
|
||||
segment,
|
||||
|provider| {
|
||||
let mut cursor = provider.cursor()?;
|
||||
cursor.get(key.into(), mask).map(|result| {
|
||||
result.map(|vec| {
|
||||
vec.iter().map(|slice| slice.to_vec()).collect::<Vec<_>>()
|
||||
})
|
||||
})
|
||||
},
|
||||
)?;
|
||||
let content = tool
|
||||
.provider_factory
|
||||
.static_file_provider()
|
||||
.get_segment_provider(segment, key)?
|
||||
.cursor()?
|
||||
.get(key.into(), mask)
|
||||
.map(|result| {
|
||||
result.map(|vec| vec.iter().map(|slice| slice.to_vec()).collect::<Vec<_>>())
|
||||
})?;
|
||||
|
||||
match content {
|
||||
Some(content) => {
|
||||
@@ -116,6 +132,13 @@ impl Command {
|
||||
)?;
|
||||
println!("{}", serde_json::to_string_pretty(&receipt)?);
|
||||
}
|
||||
StaticFileSegment::TransactionSenders => {
|
||||
let sender =
|
||||
<<tables::TransactionSenders as Table>::Value>::decompress(
|
||||
content[0].as_slice(),
|
||||
)?;
|
||||
println!("{}", serde_json::to_string_pretty(&sender)?);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -144,6 +167,8 @@ struct GetValueViewer<'a, N: NodeTypesWithDB> {
|
||||
tool: &'a DbTool<N>,
|
||||
key: String,
|
||||
subkey: Option<String>,
|
||||
end_key: Option<String>,
|
||||
end_subkey: Option<String>,
|
||||
raw: bool,
|
||||
}
|
||||
|
||||
@@ -153,53 +178,158 @@ impl<N: ProviderNodeTypes> TableViewer<()> for GetValueViewer<'_, N> {
|
||||
fn view<T: Table>(&self) -> Result<(), Self::Error> {
|
||||
let key = table_key::<T>(&self.key)?;
|
||||
|
||||
let content = if self.raw {
|
||||
self.tool
|
||||
.get::<RawTable<T>>(RawKey::from(key))?
|
||||
.map(|content| hex::encode_prefixed(content.raw_value()))
|
||||
} else {
|
||||
self.tool.get::<T>(key)?.as_ref().map(serde_json::to_string_pretty).transpose()?
|
||||
};
|
||||
// A non-dupsort table cannot have subkeys. The `subkey` arg becomes the `end_key`. First we
|
||||
// check that `end_key` and `end_subkey` weren't previously given, as that wouldn't be
|
||||
// valid.
|
||||
if self.end_key.is_some() || self.end_subkey.is_some() {
|
||||
return Err(eyre::eyre!("Only END_KEY can be given for non-DUPSORT tables"));
|
||||
}
|
||||
|
||||
match content {
|
||||
Some(content) => {
|
||||
println!("{content}");
|
||||
}
|
||||
None => {
|
||||
error!(target: "reth::cli", "No content for the given table key.");
|
||||
}
|
||||
};
|
||||
let end_key = self.subkey.clone();
|
||||
|
||||
// Check if we're doing a range query
|
||||
if let Some(ref end_key_str) = end_key {
|
||||
let end_key = table_key::<T>(end_key_str)?;
|
||||
|
||||
// Use walk_range to iterate over the range
|
||||
self.tool.provider_factory.db_ref().view(|tx| {
|
||||
let mut cursor = tx.cursor_read::<T>()?;
|
||||
let walker = cursor.walk_range(key..end_key)?;
|
||||
|
||||
for result in walker {
|
||||
let (k, v) = result?;
|
||||
let json_val = if self.raw {
|
||||
let raw_key = RawKey::from(k);
|
||||
serde_json::json!({
|
||||
"key": hex::encode_prefixed(raw_key.raw_key()),
|
||||
"val": hex::encode_prefixed(v.compress().as_ref()),
|
||||
})
|
||||
} else {
|
||||
serde_json::json!({
|
||||
"key": &k,
|
||||
"val": &v,
|
||||
})
|
||||
};
|
||||
|
||||
println!("{}", serde_json::to_string_pretty(&json_val)?);
|
||||
}
|
||||
|
||||
Ok::<_, eyre::Report>(())
|
||||
})??;
|
||||
} else {
|
||||
// Single key lookup
|
||||
let content = if self.raw {
|
||||
self.tool
|
||||
.get::<RawTable<T>>(RawKey::from(key))?
|
||||
.map(|content| hex::encode_prefixed(content.raw_value()))
|
||||
} else {
|
||||
self.tool.get::<T>(key)?.as_ref().map(serde_json::to_string_pretty).transpose()?
|
||||
};
|
||||
|
||||
match content {
|
||||
Some(content) => {
|
||||
println!("{content}");
|
||||
}
|
||||
None => {
|
||||
error!(target: "reth::cli", "No content for the given table key.");
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn view_dupsort<T: DupSort>(&self) -> Result<(), Self::Error> {
|
||||
fn view_dupsort<T: DupSort>(&self) -> Result<(), Self::Error>
|
||||
where
|
||||
T::Value: reth_primitives_traits::ValueWithSubKey<SubKey = T::SubKey>,
|
||||
{
|
||||
// get a key for given table
|
||||
let key = table_key::<T>(&self.key)?;
|
||||
|
||||
// process dupsort table
|
||||
let subkey = table_subkey::<T>(self.subkey.as_deref())?;
|
||||
|
||||
let content = if self.raw {
|
||||
self.tool
|
||||
.get_dup::<RawDupSort<T>>(RawKey::from(key), RawKey::from(subkey))?
|
||||
.map(|content| hex::encode_prefixed(content.raw_value()))
|
||||
} else {
|
||||
self.tool
|
||||
.get_dup::<T>(key, subkey)?
|
||||
// Check if we're doing a range query
|
||||
if let Some(ref end_key_str) = self.end_key {
|
||||
let end_key = table_key::<T>(end_key_str)?;
|
||||
let start_subkey = table_subkey::<T>(Some(
|
||||
self.subkey.as_ref().expect("must have been given if end_key is given").as_str(),
|
||||
))?;
|
||||
let end_subkey_parsed = self
|
||||
.end_subkey
|
||||
.as_ref()
|
||||
.map(serde_json::to_string_pretty)
|
||||
.transpose()?
|
||||
};
|
||||
.map(|s| table_subkey::<T>(Some(s.as_str())))
|
||||
.transpose()?;
|
||||
|
||||
match content {
|
||||
Some(content) => {
|
||||
println!("{content}");
|
||||
}
|
||||
None => {
|
||||
error!(target: "reth::cli", "No content for the given table subkey.");
|
||||
}
|
||||
};
|
||||
self.tool.provider_factory.db_ref().view(|tx| {
|
||||
let mut cursor = tx.cursor_dup_read::<T>()?;
|
||||
|
||||
// Seek to the starting key. If there is actually a key at the starting key then
|
||||
// seek to the subkey within it.
|
||||
if let Some((decoded_key, _)) = cursor.seek(key.clone())? &&
|
||||
decoded_key == key
|
||||
{
|
||||
cursor.seek_by_key_subkey(key.clone(), start_subkey.clone())?;
|
||||
}
|
||||
|
||||
// Get the current position to start iteration
|
||||
let mut current = cursor.current()?;
|
||||
|
||||
while let Some((decoded_key, decoded_value)) = current {
|
||||
// Extract the subkey using the ValueWithSubKey trait
|
||||
let decoded_subkey = decoded_value.get_subkey();
|
||||
|
||||
// Check if we've reached the end (exclusive)
|
||||
if (&decoded_key, Some(&decoded_subkey)) >=
|
||||
(&end_key, end_subkey_parsed.as_ref())
|
||||
{
|
||||
break;
|
||||
}
|
||||
|
||||
// Output the entry with both key and subkey
|
||||
let json_val = if self.raw {
|
||||
let raw_key = RawKey::from(decoded_key.clone());
|
||||
serde_json::json!({
|
||||
"key": hex::encode_prefixed(raw_key.raw_key()),
|
||||
"val": hex::encode_prefixed(decoded_value.compress().as_ref()),
|
||||
})
|
||||
} else {
|
||||
serde_json::json!({
|
||||
"key": &decoded_key,
|
||||
"val": &decoded_value,
|
||||
})
|
||||
};
|
||||
|
||||
println!("{}", serde_json::to_string_pretty(&json_val)?);
|
||||
|
||||
// Move to next entry
|
||||
current = cursor.next()?;
|
||||
}
|
||||
|
||||
Ok::<_, eyre::Report>(())
|
||||
})??;
|
||||
} else {
|
||||
// Single key/subkey lookup
|
||||
let subkey = table_subkey::<T>(self.subkey.as_deref())?;
|
||||
|
||||
let content = if self.raw {
|
||||
self.tool
|
||||
.get_dup::<RawDupSort<T>>(RawKey::from(key), RawKey::from(subkey))?
|
||||
.map(|content| hex::encode_prefixed(content.raw_value()))
|
||||
} else {
|
||||
self.tool
|
||||
.get_dup::<T>(key, subkey)?
|
||||
.as_ref()
|
||||
.map(serde_json::to_string_pretty)
|
||||
.transpose()?
|
||||
};
|
||||
|
||||
match content {
|
||||
Some(content) => {
|
||||
println!("{content}");
|
||||
}
|
||||
None => {
|
||||
error!(target: "reth::cli", "No content for the given table subkey.");
|
||||
}
|
||||
};
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,7 +3,7 @@ use alloy_primitives::hex;
|
||||
use clap::Parser;
|
||||
use eyre::WrapErr;
|
||||
use reth_chainspec::EthereumHardforks;
|
||||
use reth_db::DatabaseEnv;
|
||||
use reth_db::{transaction::DbTx, DatabaseEnv};
|
||||
use reth_db_api::{database::Database, table::Table, RawValue, TableViewer, Tables};
|
||||
use reth_db_common::{DbTool, ListFilter};
|
||||
use reth_node_builder::{NodeTypes, NodeTypesWithDBAdapter};
|
||||
@@ -96,6 +96,9 @@ impl<N: NodeTypes> TableViewer<()> for ListTableViewer<'_, N> {
|
||||
|
||||
fn view<T: Table>(&self) -> Result<(), Self::Error> {
|
||||
self.tool.provider_factory.db_ref().view(|tx| {
|
||||
// We may be using the tui for a long time
|
||||
tx.disable_long_read_transaction_safety();
|
||||
|
||||
let table_db = tx.inner.open_db(Some(self.args.table.name())).wrap_err("Could not open db.")?;
|
||||
let stats = tx.inner.db_stat(&table_db).wrap_err(format!("Could not find table: {}", self.args.table.name()))?;
|
||||
let total_entries = stats.entries();
|
||||
|
||||
@@ -2,18 +2,22 @@ use crate::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs};
|
||||
use clap::{Parser, Subcommand};
|
||||
use reth_chainspec::{EthChainSpec, EthereumHardforks};
|
||||
use reth_cli::chainspec::ChainSpecParser;
|
||||
use reth_cli_runner::CliContext;
|
||||
use reth_db::version::{get_db_version, DatabaseVersionError, DB_VERSION};
|
||||
use reth_db_common::DbTool;
|
||||
use std::{
|
||||
io::{self, Write},
|
||||
sync::Arc,
|
||||
};
|
||||
mod account_storage;
|
||||
mod checksum;
|
||||
mod clear;
|
||||
mod diff;
|
||||
mod get;
|
||||
mod list;
|
||||
mod repair_trie;
|
||||
mod settings;
|
||||
mod static_file_header;
|
||||
mod stats;
|
||||
/// DB List TUI
|
||||
mod tui;
|
||||
@@ -51,16 +55,23 @@ pub enum Subcommands {
|
||||
Clear(clear::Command),
|
||||
/// Verifies trie consistency and outputs any inconsistencies
|
||||
RepairTrie(repair_trie::Command),
|
||||
/// Reads and displays the static file segment header
|
||||
StaticFileHeader(static_file_header::Command),
|
||||
/// Lists current and local database versions
|
||||
Version,
|
||||
/// Returns the full database path
|
||||
Path,
|
||||
/// Manage storage settings
|
||||
Settings(settings::Command),
|
||||
/// Gets storage size information for an account
|
||||
AccountStorage(account_storage::Command),
|
||||
}
|
||||
|
||||
/// `db_ro_exec` opens a database in read-only mode, and then execute with the provided command
|
||||
macro_rules! db_ro_exec {
|
||||
($env:expr, $tool:ident, $N:ident, $command:block) => {
|
||||
let Environment { provider_factory, .. } = $env.init::<$N>(AccessRights::RO)?;
|
||||
/// Initializes a provider factory with specified access rights, and then execute with the provided
|
||||
/// command
|
||||
macro_rules! db_exec {
|
||||
($env:expr, $tool:ident, $N:ident, $access_rights:expr, $command:block) => {
|
||||
let Environment { provider_factory, .. } = $env.init::<$N>($access_rights)?;
|
||||
|
||||
let $tool = DbTool::new(provider_factory)?;
|
||||
$command;
|
||||
@@ -69,7 +80,10 @@ macro_rules! db_ro_exec {
|
||||
|
||||
impl<C: ChainSpecParser<ChainSpec: EthChainSpec + EthereumHardforks>> Command<C> {
|
||||
/// Execute `db` command
|
||||
pub async fn execute<N: CliNodeTypes<ChainSpec = C::ChainSpec>>(self) -> eyre::Result<()> {
|
||||
pub async fn execute<N: CliNodeTypes<ChainSpec = C::ChainSpec>>(
|
||||
self,
|
||||
ctx: CliContext,
|
||||
) -> eyre::Result<()> {
|
||||
let data_dir = self.env.datadir.clone().resolve_datadir(self.env.chain.chain());
|
||||
let db_path = data_dir.db();
|
||||
let static_files_path = data_dir.static_files();
|
||||
@@ -88,27 +102,32 @@ impl<C: ChainSpecParser<ChainSpec: EthChainSpec + EthereumHardforks>> Command<C>
|
||||
match self.command {
|
||||
// TODO: We'll need to add this on the DB trait.
|
||||
Subcommands::Stats(command) => {
|
||||
db_ro_exec!(self.env, tool, N, {
|
||||
let access_rights = if command.skip_consistency_checks {
|
||||
AccessRights::RoInconsistent
|
||||
} else {
|
||||
AccessRights::RO
|
||||
};
|
||||
db_exec!(self.env, tool, N, access_rights, {
|
||||
command.execute(data_dir, &tool)?;
|
||||
});
|
||||
}
|
||||
Subcommands::List(command) => {
|
||||
db_ro_exec!(self.env, tool, N, {
|
||||
db_exec!(self.env, tool, N, AccessRights::RO, {
|
||||
command.execute(&tool)?;
|
||||
});
|
||||
}
|
||||
Subcommands::Checksum(command) => {
|
||||
db_ro_exec!(self.env, tool, N, {
|
||||
db_exec!(self.env, tool, N, AccessRights::RO, {
|
||||
command.execute(&tool)?;
|
||||
});
|
||||
}
|
||||
Subcommands::Diff(command) => {
|
||||
db_ro_exec!(self.env, tool, N, {
|
||||
db_exec!(self.env, tool, N, AccessRights::RO, {
|
||||
command.execute(&tool)?;
|
||||
});
|
||||
}
|
||||
Subcommands::Get(command) => {
|
||||
db_ro_exec!(self.env, tool, N, {
|
||||
db_exec!(self.env, tool, N, AccessRights::RO, {
|
||||
command.execute(&tool)?;
|
||||
});
|
||||
}
|
||||
@@ -130,19 +149,26 @@ impl<C: ChainSpecParser<ChainSpec: EthChainSpec + EthereumHardforks>> Command<C>
|
||||
}
|
||||
}
|
||||
|
||||
let Environment { provider_factory, .. } = self.env.init::<N>(AccessRights::RW)?;
|
||||
let tool = DbTool::new(provider_factory)?;
|
||||
tool.drop(db_path, static_files_path, exex_wal_path)?;
|
||||
db_exec!(self.env, tool, N, AccessRights::RW, {
|
||||
tool.drop(db_path, static_files_path, exex_wal_path)?;
|
||||
});
|
||||
}
|
||||
Subcommands::Clear(command) => {
|
||||
let Environment { provider_factory, .. } = self.env.init::<N>(AccessRights::RW)?;
|
||||
command.execute(provider_factory)?;
|
||||
db_exec!(self.env, tool, N, AccessRights::RW, {
|
||||
command.execute(&tool)?;
|
||||
});
|
||||
}
|
||||
Subcommands::RepairTrie(command) => {
|
||||
let access_rights =
|
||||
if command.dry_run { AccessRights::RO } else { AccessRights::RW };
|
||||
let Environment { provider_factory, .. } = self.env.init::<N>(access_rights)?;
|
||||
command.execute(provider_factory)?;
|
||||
db_exec!(self.env, tool, N, access_rights, {
|
||||
command.execute(&tool, ctx.task_executor.clone())?;
|
||||
});
|
||||
}
|
||||
Subcommands::StaticFileHeader(command) => {
|
||||
db_exec!(self.env, tool, N, AccessRights::RoInconsistent, {
|
||||
command.execute(&tool)?;
|
||||
});
|
||||
}
|
||||
Subcommands::Version => {
|
||||
let local_db_version = match get_db_version(&db_path) {
|
||||
@@ -162,6 +188,16 @@ impl<C: ChainSpecParser<ChainSpec: EthChainSpec + EthereumHardforks>> Command<C>
|
||||
Subcommands::Path => {
|
||||
println!("{}", db_path.display());
|
||||
}
|
||||
Subcommands::Settings(command) => {
|
||||
db_exec!(self.env, tool, N, command.access_rights(), {
|
||||
command.execute(&tool)?;
|
||||
});
|
||||
}
|
||||
Subcommands::AccountStorage(command) => {
|
||||
db_exec!(self.env, tool, N, AccessRights::RO, {
|
||||
command.execute(&tool)?;
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
||||
@@ -1,20 +1,34 @@
|
||||
use clap::Parser;
|
||||
use metrics::{self, Counter};
|
||||
use reth_chainspec::EthChainSpec;
|
||||
use reth_cli_util::parse_socket_address;
|
||||
use reth_db_api::{
|
||||
cursor::{DbCursorRO, DbCursorRW, DbDupCursorRO},
|
||||
database::Database,
|
||||
tables,
|
||||
transaction::{DbTx, DbTxMut},
|
||||
};
|
||||
use reth_node_builder::NodeTypesWithDB;
|
||||
use reth_provider::{providers::ProviderNodeTypes, ProviderFactory, StageCheckpointReader};
|
||||
use reth_db_common::DbTool;
|
||||
use reth_node_core::version::version_metadata;
|
||||
use reth_node_metrics::{
|
||||
chain::ChainSpecInfo,
|
||||
hooks::Hooks,
|
||||
server::{MetricServer, MetricServerConfig},
|
||||
version::VersionInfo,
|
||||
};
|
||||
use reth_provider::{providers::ProviderNodeTypes, ChainSpecProvider, StageCheckpointReader};
|
||||
use reth_stages::StageId;
|
||||
use reth_tasks::TaskExecutor;
|
||||
use reth_trie::{
|
||||
verify::{Output, Verifier},
|
||||
Nibbles,
|
||||
};
|
||||
use reth_trie_common::{StorageTrieEntry, StoredNibbles, StoredNibblesSubKey};
|
||||
use reth_trie_db::{DatabaseHashedCursorFactory, DatabaseTrieCursorFactory};
|
||||
use std::time::{Duration, Instant};
|
||||
use std::{
|
||||
net::SocketAddr,
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
use tracing::{info, warn};
|
||||
|
||||
const PROGRESS_PERIOD: Duration = Duration::from_secs(5);
|
||||
@@ -25,27 +39,74 @@ pub struct Command {
|
||||
/// Only show inconsistencies without making any repairs
|
||||
#[arg(long)]
|
||||
pub(crate) dry_run: bool,
|
||||
|
||||
/// Enable Prometheus metrics.
|
||||
///
|
||||
/// The metrics will be served at the given interface and port.
|
||||
#[arg(long = "metrics", value_name = "ADDR:PORT", value_parser = parse_socket_address)]
|
||||
pub(crate) metrics: Option<SocketAddr>,
|
||||
}
|
||||
|
||||
impl Command {
|
||||
/// Execute `db repair-trie` command
|
||||
pub fn execute<N: ProviderNodeTypes>(
|
||||
self,
|
||||
provider_factory: ProviderFactory<N>,
|
||||
tool: &DbTool<N>,
|
||||
task_executor: TaskExecutor,
|
||||
) -> eyre::Result<()> {
|
||||
if self.dry_run {
|
||||
verify_only(provider_factory)?
|
||||
// Set up metrics server if requested
|
||||
let _metrics_handle = if let Some(listen_addr) = self.metrics {
|
||||
let chain_name = tool.provider_factory.chain_spec().chain().to_string();
|
||||
let executor = task_executor.clone();
|
||||
|
||||
let handle = task_executor.spawn_critical("metrics server", async move {
|
||||
let config = MetricServerConfig::new(
|
||||
listen_addr,
|
||||
VersionInfo {
|
||||
version: version_metadata().cargo_pkg_version.as_ref(),
|
||||
build_timestamp: version_metadata().vergen_build_timestamp.as_ref(),
|
||||
cargo_features: version_metadata().vergen_cargo_features.as_ref(),
|
||||
git_sha: version_metadata().vergen_git_sha.as_ref(),
|
||||
target_triple: version_metadata().vergen_cargo_target_triple.as_ref(),
|
||||
build_profile: version_metadata().build_profile_name.as_ref(),
|
||||
},
|
||||
ChainSpecInfo { name: chain_name },
|
||||
executor,
|
||||
Hooks::builder().build(),
|
||||
);
|
||||
|
||||
// Spawn the metrics server
|
||||
if let Err(e) = MetricServer::new(config).serve().await {
|
||||
tracing::error!("Metrics server error: {}", e);
|
||||
}
|
||||
});
|
||||
|
||||
Some(handle)
|
||||
} else {
|
||||
verify_and_repair(provider_factory)?
|
||||
None
|
||||
};
|
||||
|
||||
if self.dry_run {
|
||||
verify_only(tool)?
|
||||
} else {
|
||||
verify_and_repair(tool)?
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
fn verify_only<N: NodeTypesWithDB>(provider_factory: ProviderFactory<N>) -> eyre::Result<()> {
|
||||
fn verify_only<N: ProviderNodeTypes>(tool: &DbTool<N>) -> eyre::Result<()> {
|
||||
// Log the database block tip from Finish stage checkpoint
|
||||
let finish_checkpoint = tool
|
||||
.provider_factory
|
||||
.provider()?
|
||||
.get_stage_checkpoint(StageId::Finish)?
|
||||
.unwrap_or_default();
|
||||
info!("Database block tip: {}", finish_checkpoint.block_number);
|
||||
|
||||
// Get a database transaction directly from the database
|
||||
let db = provider_factory.db_ref();
|
||||
let db = tool.provider_factory.db_ref();
|
||||
let mut tx = db.tx()?;
|
||||
tx.disable_long_read_transaction_safety();
|
||||
|
||||
@@ -54,6 +115,8 @@ fn verify_only<N: NodeTypesWithDB>(provider_factory: ProviderFactory<N>) -> eyre
|
||||
let trie_cursor_factory = DatabaseTrieCursorFactory::new(&tx);
|
||||
let verifier = Verifier::new(&trie_cursor_factory, hashed_cursor_factory)?;
|
||||
|
||||
let metrics = RepairTrieMetrics::new();
|
||||
|
||||
let mut inconsistent_nodes = 0;
|
||||
let start_time = Instant::now();
|
||||
let mut last_progress_time = Instant::now();
|
||||
@@ -70,6 +133,21 @@ fn verify_only<N: NodeTypesWithDB>(provider_factory: ProviderFactory<N>) -> eyre
|
||||
} else {
|
||||
warn!("Inconsistency found: {output:?}");
|
||||
inconsistent_nodes += 1;
|
||||
|
||||
// Record metrics based on output type
|
||||
match output {
|
||||
Output::AccountExtra(_, _) |
|
||||
Output::AccountWrong { .. } |
|
||||
Output::AccountMissing(_, _) => {
|
||||
metrics.account_inconsistencies.increment(1);
|
||||
}
|
||||
Output::StorageExtra(_, _, _) |
|
||||
Output::StorageWrong { .. } |
|
||||
Output::StorageMissing(_, _, _) => {
|
||||
metrics.storage_inconsistencies.increment(1);
|
||||
}
|
||||
Output::Progress(_) => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -114,11 +192,13 @@ fn verify_checkpoints(provider: impl StageCheckpointReader) -> eyre::Result<()>
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn verify_and_repair<N: ProviderNodeTypes>(
|
||||
provider_factory: ProviderFactory<N>,
|
||||
) -> eyre::Result<()> {
|
||||
fn verify_and_repair<N: ProviderNodeTypes>(tool: &DbTool<N>) -> eyre::Result<()> {
|
||||
// Get a read-write database provider
|
||||
let mut provider_rw = provider_factory.provider_rw()?;
|
||||
let mut provider_rw = tool.provider_factory.provider_rw()?;
|
||||
|
||||
// Log the database block tip from Finish stage checkpoint
|
||||
let finish_checkpoint = provider_rw.get_stage_checkpoint(StageId::Finish)?.unwrap_or_default();
|
||||
info!("Database block tip: {}", finish_checkpoint.block_number);
|
||||
|
||||
// Check that a pipeline sync isn't in progress.
|
||||
verify_checkpoints(provider_rw.as_ref())?;
|
||||
@@ -138,6 +218,8 @@ fn verify_and_repair<N: ProviderNodeTypes>(
|
||||
// Create the verifier
|
||||
let verifier = Verifier::new(&trie_cursor_factory, hashed_cursor_factory)?;
|
||||
|
||||
let metrics = RepairTrieMetrics::new();
|
||||
|
||||
let mut inconsistent_nodes = 0;
|
||||
let start_time = Instant::now();
|
||||
let mut last_progress_time = Instant::now();
|
||||
@@ -149,6 +231,21 @@ fn verify_and_repair<N: ProviderNodeTypes>(
|
||||
if !matches!(output, Output::Progress(_)) {
|
||||
warn!("Inconsistency found, will repair: {output:?}");
|
||||
inconsistent_nodes += 1;
|
||||
|
||||
// Record metrics based on output type
|
||||
match &output {
|
||||
Output::AccountExtra(_, _) |
|
||||
Output::AccountWrong { .. } |
|
||||
Output::AccountMissing(_, _) => {
|
||||
metrics.account_inconsistencies.increment(1);
|
||||
}
|
||||
Output::StorageExtra(_, _, _) |
|
||||
Output::StorageWrong { .. } |
|
||||
Output::StorageMissing(_, _, _) => {
|
||||
metrics.storage_inconsistencies.increment(1);
|
||||
}
|
||||
Output::Progress(_) => {}
|
||||
}
|
||||
}
|
||||
|
||||
match output {
|
||||
@@ -247,3 +344,25 @@ fn output_progress(last_account: Nibbles, start_time: Instant, inconsistent_node
|
||||
"Repairing trie tables",
|
||||
);
|
||||
}
|
||||
|
||||
/// Metrics for tracking trie repair inconsistencies
|
||||
#[derive(Debug)]
|
||||
struct RepairTrieMetrics {
|
||||
account_inconsistencies: Counter,
|
||||
storage_inconsistencies: Counter,
|
||||
}
|
||||
|
||||
impl RepairTrieMetrics {
|
||||
fn new() -> Self {
|
||||
Self {
|
||||
account_inconsistencies: metrics::counter!(
|
||||
"db.repair_trie.inconsistencies_found",
|
||||
"type" => "account"
|
||||
),
|
||||
storage_inconsistencies: metrics::counter!(
|
||||
"db.repair_trie.inconsistencies_found",
|
||||
"type" => "storage"
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
127
crates/cli/commands/src/db/settings.rs
Normal file
127
crates/cli/commands/src/db/settings.rs
Normal file
@@ -0,0 +1,127 @@
|
||||
//! `reth db settings` command for managing storage settings
|
||||
|
||||
use clap::{ArgAction, Parser, Subcommand};
|
||||
use reth_db_common::DbTool;
|
||||
use reth_provider::{
|
||||
providers::ProviderNodeTypes, DBProvider, DatabaseProviderFactory, MetadataProvider,
|
||||
MetadataWriter, StorageSettings,
|
||||
};
|
||||
|
||||
use crate::common::AccessRights;
|
||||
|
||||
/// `reth db settings` subcommand
|
||||
#[derive(Debug, Parser)]
|
||||
pub struct Command {
|
||||
#[command(subcommand)]
|
||||
command: Subcommands,
|
||||
}
|
||||
|
||||
impl Command {
|
||||
/// Returns database access rights required for the command.
|
||||
pub fn access_rights(&self) -> AccessRights {
|
||||
match self.command {
|
||||
Subcommands::Get => AccessRights::RO,
|
||||
Subcommands::Set(_) => AccessRights::RW,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, Subcommand)]
|
||||
enum Subcommands {
|
||||
/// Get current storage settings from database
|
||||
Get,
|
||||
/// Set storage settings in database
|
||||
#[clap(subcommand)]
|
||||
Set(SetCommand),
|
||||
}
|
||||
|
||||
/// Set storage settings
|
||||
#[derive(Debug, Clone, Copy, Subcommand)]
|
||||
#[clap(rename_all = "snake_case")]
|
||||
pub enum SetCommand {
|
||||
/// Store receipts in static files instead of the database
|
||||
ReceiptsInStaticFiles {
|
||||
#[clap(action(ArgAction::Set))]
|
||||
value: bool,
|
||||
},
|
||||
/// Store transaction senders in static files instead of the database
|
||||
TransactionSendersInStaticFiles {
|
||||
#[clap(action(ArgAction::Set))]
|
||||
value: bool,
|
||||
},
|
||||
}
|
||||
|
||||
impl Command {
|
||||
/// Execute the command
|
||||
pub fn execute<N: ProviderNodeTypes>(self, tool: &DbTool<N>) -> eyre::Result<()> {
|
||||
match self.command {
|
||||
Subcommands::Get => self.get(tool),
|
||||
Subcommands::Set(cmd) => self.set(cmd, tool),
|
||||
}
|
||||
}
|
||||
|
||||
fn get<N: ProviderNodeTypes>(&self, tool: &DbTool<N>) -> eyre::Result<()> {
|
||||
// Read storage settings
|
||||
let provider = tool.provider_factory.provider()?;
|
||||
let storage_settings = provider.storage_settings()?;
|
||||
|
||||
// Display settings
|
||||
match storage_settings {
|
||||
Some(settings) => {
|
||||
println!("Current storage settings:");
|
||||
println!("{settings:#?}");
|
||||
}
|
||||
None => {
|
||||
println!("No storage settings found.");
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn set<N: ProviderNodeTypes>(&self, cmd: SetCommand, tool: &DbTool<N>) -> eyre::Result<()> {
|
||||
// Read storage settings
|
||||
let provider_rw = tool.provider_factory.database_provider_rw()?;
|
||||
// Destruct settings struct to not miss adding support for new fields
|
||||
let settings = provider_rw.storage_settings()?;
|
||||
if settings.is_none() {
|
||||
println!("No storage settings found, creating new settings.");
|
||||
}
|
||||
|
||||
let mut settings @ StorageSettings {
|
||||
receipts_in_static_files: _,
|
||||
transaction_senders_in_static_files: _,
|
||||
storages_history_in_rocksdb: _,
|
||||
transaction_hash_numbers_in_rocksdb: _,
|
||||
account_history_in_rocksdb: _,
|
||||
} = settings.unwrap_or_else(StorageSettings::legacy);
|
||||
|
||||
// Update the setting based on the key
|
||||
match cmd {
|
||||
SetCommand::ReceiptsInStaticFiles { value } => {
|
||||
if settings.receipts_in_static_files == value {
|
||||
println!("receipts_in_static_files is already set to {}", value);
|
||||
return Ok(());
|
||||
}
|
||||
settings.receipts_in_static_files = value;
|
||||
println!("Set receipts_in_static_files = {}", value);
|
||||
}
|
||||
SetCommand::TransactionSendersInStaticFiles { value } => {
|
||||
if settings.transaction_senders_in_static_files == value {
|
||||
println!("transaction_senders_in_static_files is already set to {}", value);
|
||||
return Ok(());
|
||||
}
|
||||
settings.transaction_senders_in_static_files = value;
|
||||
println!("Set transaction_senders_in_static_files = {}", value);
|
||||
}
|
||||
}
|
||||
|
||||
// Write updated settings
|
||||
provider_rw.write_storage_settings(settings)?;
|
||||
provider_rw.commit()?;
|
||||
|
||||
println!("Storage settings updated successfully.");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
63
crates/cli/commands/src/db/static_file_header.rs
Normal file
63
crates/cli/commands/src/db/static_file_header.rs
Normal file
@@ -0,0 +1,63 @@
|
||||
use clap::{Parser, Subcommand};
|
||||
use reth_db_common::DbTool;
|
||||
use reth_provider::{providers::ProviderNodeTypes, StaticFileProviderFactory};
|
||||
use reth_static_file_types::StaticFileSegment;
|
||||
use std::path::PathBuf;
|
||||
use tracing::warn;
|
||||
|
||||
/// The arguments for the `reth db static-file-header` command
|
||||
#[derive(Parser, Debug)]
|
||||
pub struct Command {
|
||||
#[command(subcommand)]
|
||||
source: Source,
|
||||
}
|
||||
|
||||
/// Source for locating the static file
|
||||
#[derive(Subcommand, Debug)]
|
||||
enum Source {
|
||||
/// Query by segment and block number
|
||||
Block {
|
||||
/// Static file segment
|
||||
#[arg(value_enum)]
|
||||
segment: StaticFileSegment,
|
||||
/// Block number to query
|
||||
block: u64,
|
||||
},
|
||||
/// Query by path to static file
|
||||
Path {
|
||||
/// Path to the static file
|
||||
path: PathBuf,
|
||||
},
|
||||
}
|
||||
|
||||
impl Command {
|
||||
/// Execute `db static-file-header` command
|
||||
pub fn execute<N: ProviderNodeTypes>(self, tool: &DbTool<N>) -> eyre::Result<()> {
|
||||
let static_file_provider = tool.provider_factory.static_file_provider();
|
||||
if let Err(err) = static_file_provider.check_consistency(&tool.provider_factory.provider()?)
|
||||
{
|
||||
warn!("Error checking consistency of static files: {err}");
|
||||
}
|
||||
|
||||
// Get the provider based on the source
|
||||
let provider = match self.source {
|
||||
Source::Path { path } => {
|
||||
static_file_provider.get_segment_provider_for_path(&path)?.ok_or_else(|| {
|
||||
eyre::eyre!("Could not find static file segment for path: {}", path.display())
|
||||
})?
|
||||
}
|
||||
Source::Block { segment, block } => {
|
||||
static_file_provider.get_segment_provider(segment, block)?
|
||||
}
|
||||
};
|
||||
|
||||
let header = provider.user_header();
|
||||
|
||||
println!("Segment: {}", header.segment());
|
||||
println!("Expected Block Range: {}", header.expected_block_range());
|
||||
println!("Block Range: {:?}", header.block_range());
|
||||
println!("Transaction Range: {:?}", header.tx_range());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -18,6 +18,10 @@ use std::{sync::Arc, time::Duration};
|
||||
#[derive(Parser, Debug)]
|
||||
/// The arguments for the `reth db stats` command
|
||||
pub struct Command {
|
||||
/// Skip consistency checks for static files.
|
||||
#[arg(long, default_value_t = false)]
|
||||
pub(crate) skip_consistency_checks: bool,
|
||||
|
||||
/// Show only the total size for static files.
|
||||
#[arg(long, default_value_t = false)]
|
||||
detailed_sizes: bool,
|
||||
@@ -191,10 +195,11 @@ impl Command {
|
||||
mut segment_config_size,
|
||||
) = (0, 0, 0, 0, 0, 0);
|
||||
|
||||
for (block_range, tx_range) in &ranges {
|
||||
let fixed_block_range = static_file_provider.find_fixed_range(block_range.start());
|
||||
for (block_range, header) in &ranges {
|
||||
let fixed_block_range =
|
||||
static_file_provider.find_fixed_range(segment, block_range.start());
|
||||
let jar_provider = static_file_provider
|
||||
.get_segment_provider(segment, || Some(fixed_block_range), None)?
|
||||
.get_segment_provider_for_range(segment, || Some(fixed_block_range), None)?
|
||||
.ok_or_else(|| {
|
||||
eyre::eyre!("Failed to get segment provider for segment: {}", segment)
|
||||
})?;
|
||||
@@ -220,7 +225,7 @@ impl Command {
|
||||
row.add_cell(Cell::new(segment))
|
||||
.add_cell(Cell::new(format!("{block_range}")))
|
||||
.add_cell(Cell::new(
|
||||
tx_range.map_or("N/A".to_string(), |tx_range| format!("{tx_range}")),
|
||||
header.tx_range().map_or("N/A".to_string(), |range| format!("{range}")),
|
||||
))
|
||||
.add_cell(Cell::new(format!("{columns} x {rows}")));
|
||||
if self.detailed_sizes {
|
||||
@@ -270,10 +275,12 @@ impl Command {
|
||||
let tx_range = {
|
||||
let start = ranges
|
||||
.iter()
|
||||
.find_map(|(_, tx_range)| tx_range.map(|r| r.start()))
|
||||
.find_map(|(_, header)| header.tx_range().map(|range| range.start()))
|
||||
.unwrap_or_default();
|
||||
let end =
|
||||
ranges.iter().rev().find_map(|(_, tx_range)| tx_range.map(|r| r.end()));
|
||||
let end = ranges
|
||||
.iter()
|
||||
.rev()
|
||||
.find_map(|(_, header)| header.tx_range().map(|range| range.end()));
|
||||
end.map(|end| SegmentRangeInclusive::new(start, end))
|
||||
};
|
||||
|
||||
|
||||
@@ -7,9 +7,10 @@ use reth_chainspec::{EthChainSpec, EthereumHardforks};
|
||||
use reth_cli::chainspec::ChainSpecParser;
|
||||
use reth_fs_util as fs;
|
||||
use std::{
|
||||
borrow::Cow,
|
||||
io::{self, Read, Write},
|
||||
path::Path,
|
||||
sync::Arc,
|
||||
sync::{Arc, OnceLock},
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
use tar::Archive;
|
||||
@@ -22,24 +23,109 @@ const MERKLE_BASE_URL: &str = "https://downloads.merkle.io";
|
||||
const EXTENSION_TAR_LZ4: &str = ".tar.lz4";
|
||||
const EXTENSION_TAR_ZSTD: &str = ".tar.zst";
|
||||
|
||||
/// Global static download defaults
|
||||
static DOWNLOAD_DEFAULTS: OnceLock<DownloadDefaults> = OnceLock::new();
|
||||
|
||||
/// Download configuration defaults
|
||||
///
|
||||
/// Global defaults can be set via [`DownloadDefaults::try_init`].
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct DownloadDefaults {
|
||||
/// List of available snapshot sources
|
||||
pub available_snapshots: Vec<Cow<'static, str>>,
|
||||
/// Default base URL for snapshots
|
||||
pub default_base_url: Cow<'static, str>,
|
||||
/// Optional custom long help text that overrides the generated help
|
||||
pub long_help: Option<String>,
|
||||
}
|
||||
|
||||
impl DownloadDefaults {
|
||||
/// Initialize the global download defaults with this configuration
|
||||
pub fn try_init(self) -> Result<(), Self> {
|
||||
DOWNLOAD_DEFAULTS.set(self)
|
||||
}
|
||||
|
||||
/// Get a reference to the global download defaults
|
||||
pub fn get_global() -> &'static DownloadDefaults {
|
||||
DOWNLOAD_DEFAULTS.get_or_init(DownloadDefaults::default_download_defaults)
|
||||
}
|
||||
|
||||
/// Default download configuration with defaults from merkle.io and publicnode
|
||||
pub fn default_download_defaults() -> Self {
|
||||
Self {
|
||||
available_snapshots: vec![
|
||||
Cow::Borrowed("https://www.merkle.io/snapshots (default, mainnet archive)"),
|
||||
Cow::Borrowed("https://publicnode.com/snapshots (full nodes & testnets)"),
|
||||
],
|
||||
default_base_url: Cow::Borrowed(MERKLE_BASE_URL),
|
||||
long_help: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Generates the long help text for the download URL argument using these defaults.
|
||||
///
|
||||
/// If a custom long_help is set, it will be returned. Otherwise, help text is generated
|
||||
/// from the available_snapshots list.
|
||||
pub fn long_help(&self) -> String {
|
||||
if let Some(ref custom_help) = self.long_help {
|
||||
return custom_help.clone();
|
||||
}
|
||||
|
||||
let mut help = String::from(
|
||||
"Specify a snapshot URL or let the command propose a default one.\n\nAvailable snapshot sources:\n",
|
||||
);
|
||||
|
||||
for source in &self.available_snapshots {
|
||||
help.push_str("- ");
|
||||
help.push_str(source);
|
||||
help.push('\n');
|
||||
}
|
||||
|
||||
help.push_str(
|
||||
"\nIf no URL is provided, the latest mainnet archive snapshot\nwill be proposed for download from ",
|
||||
);
|
||||
help.push_str(self.default_base_url.as_ref());
|
||||
help
|
||||
}
|
||||
|
||||
/// Add a snapshot source to the list
|
||||
pub fn with_snapshot(mut self, source: impl Into<Cow<'static, str>>) -> Self {
|
||||
self.available_snapshots.push(source.into());
|
||||
self
|
||||
}
|
||||
|
||||
/// Replace all snapshot sources
|
||||
pub fn with_snapshots(mut self, sources: Vec<Cow<'static, str>>) -> Self {
|
||||
self.available_snapshots = sources;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the default base URL, e.g. `https://downloads.merkle.io`.
|
||||
pub fn with_base_url(mut self, url: impl Into<Cow<'static, str>>) -> Self {
|
||||
self.default_base_url = url.into();
|
||||
self
|
||||
}
|
||||
|
||||
/// Builder: Set custom long help text, overriding the generated help
|
||||
pub fn with_long_help(mut self, help: impl Into<String>) -> Self {
|
||||
self.long_help = Some(help.into());
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for DownloadDefaults {
|
||||
fn default() -> Self {
|
||||
Self::default_download_defaults()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Parser)]
|
||||
pub struct DownloadCommand<C: ChainSpecParser> {
|
||||
#[command(flatten)]
|
||||
env: EnvironmentArgs<C>,
|
||||
|
||||
#[arg(
|
||||
long,
|
||||
short,
|
||||
help = "Custom URL to download the snapshot from",
|
||||
long_help = "Specify a snapshot URL or let the command propose a default one.\n\
|
||||
\n\
|
||||
Available snapshot sources:\n\
|
||||
- https://www.merkle.io/snapshots (default, mainnet archive)\n\
|
||||
- https://publicnode.com/snapshots (full nodes & testnets)\n\
|
||||
\n\
|
||||
If no URL is provided, the latest mainnet archive snapshot\n\
|
||||
will be proposed for download from merkle.io"
|
||||
)]
|
||||
/// Custom URL to download the snapshot from
|
||||
#[arg(long, short, long_help = DownloadDefaults::get_global().long_help())]
|
||||
url: Option<String>,
|
||||
}
|
||||
|
||||
@@ -207,9 +293,10 @@ async fn stream_and_extract(url: &str, target_dir: &Path) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Builds default URL for latest mainnet archive snapshot
|
||||
// Builds default URL for latest mainnet archive snapshot using configured defaults
|
||||
async fn get_latest_snapshot_url() -> Result<String> {
|
||||
let latest_url = format!("{MERKLE_BASE_URL}/latest.txt");
|
||||
let base_url = &DownloadDefaults::get_global().default_base_url;
|
||||
let latest_url = format!("{base_url}/latest.txt");
|
||||
let filename = Client::new()
|
||||
.get(latest_url)
|
||||
.send()
|
||||
@@ -220,5 +307,64 @@ async fn get_latest_snapshot_url() -> Result<String> {
|
||||
.trim()
|
||||
.to_string();
|
||||
|
||||
Ok(format!("{MERKLE_BASE_URL}/{filename}"))
|
||||
Ok(format!("{base_url}/{filename}"))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_download_defaults_builder() {
|
||||
let defaults = DownloadDefaults::default()
|
||||
.with_snapshot("https://example.com/snapshots (example)")
|
||||
.with_base_url("https://example.com");
|
||||
|
||||
assert_eq!(defaults.default_base_url, "https://example.com");
|
||||
assert_eq!(defaults.available_snapshots.len(), 3); // 2 defaults + 1 added
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_download_defaults_replace_snapshots() {
|
||||
let defaults = DownloadDefaults::default().with_snapshots(vec![
|
||||
Cow::Borrowed("https://custom1.com"),
|
||||
Cow::Borrowed("https://custom2.com"),
|
||||
]);
|
||||
|
||||
assert_eq!(defaults.available_snapshots.len(), 2);
|
||||
assert_eq!(defaults.available_snapshots[0], "https://custom1.com");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_long_help_generation() {
|
||||
let defaults = DownloadDefaults::default();
|
||||
let help = defaults.long_help();
|
||||
|
||||
assert!(help.contains("Available snapshot sources:"));
|
||||
assert!(help.contains("merkle.io"));
|
||||
assert!(help.contains("publicnode.com"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_long_help_override() {
|
||||
let custom_help = "This is custom help text for downloading snapshots.";
|
||||
let defaults = DownloadDefaults::default().with_long_help(custom_help);
|
||||
|
||||
let help = defaults.long_help();
|
||||
assert_eq!(help, custom_help);
|
||||
assert!(!help.contains("Available snapshot sources:"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_builder_chaining() {
|
||||
let defaults = DownloadDefaults::default()
|
||||
.with_base_url("https://custom.example.com")
|
||||
.with_snapshot("https://snapshot1.com")
|
||||
.with_snapshot("https://snapshot2.com")
|
||||
.with_long_help("Custom help for snapshots");
|
||||
|
||||
assert_eq!(defaults.default_base_url, "https://custom.example.com");
|
||||
assert_eq!(defaults.available_snapshots.len(), 4); // 2 defaults + 2 added
|
||||
assert_eq!(defaults.long_help, Some("Custom help for snapshots".to_string()));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,7 +4,7 @@ use crate::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs};
|
||||
use clap::{Args, Parser};
|
||||
use reth_chainspec::{EthChainSpec, EthereumHardforks};
|
||||
use reth_cli::chainspec::ChainSpecParser;
|
||||
use reth_era::execution_types::MAX_BLOCKS_PER_ERA1;
|
||||
use reth_era::era1::types::execution::MAX_BLOCKS_PER_ERA1;
|
||||
use reth_era_utils as era1;
|
||||
use reth_provider::DatabaseProviderFactory;
|
||||
use std::{path::PathBuf, sync::Arc};
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
//! Command that initializes the node from a genesis file.
|
||||
|
||||
use crate::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs};
|
||||
use alloy_consensus::BlockHeader;
|
||||
use clap::Parser;
|
||||
use reth_chainspec::{EthChainSpec, EthereumHardforks};
|
||||
use reth_chainspec::{ChainSpecProvider, EthChainSpec, EthereumHardforks};
|
||||
use reth_cli::chainspec::ChainSpecParser;
|
||||
use reth_provider::BlockHashReader;
|
||||
use std::sync::Arc;
|
||||
@@ -22,8 +23,9 @@ impl<C: ChainSpecParser<ChainSpec: EthChainSpec + EthereumHardforks>> InitComman
|
||||
|
||||
let Environment { provider_factory, .. } = self.env.init::<N>(AccessRights::RW)?;
|
||||
|
||||
let genesis_block_number = provider_factory.chain_spec().genesis_header().number();
|
||||
let hash = provider_factory
|
||||
.block_hash(0)?
|
||||
.block_hash(genesis_block_number)?
|
||||
.ok_or_else(|| eyre::eyre!("Genesis hash not found."))?;
|
||||
|
||||
info!(target: "reth::cli", hash = ?hash, "Genesis block written");
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
//! Command that initializes the node from a genesis file.
|
||||
|
||||
use crate::common::{AccessRights, CliHeader, CliNodeTypes, Environment, EnvironmentArgs};
|
||||
use crate::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs};
|
||||
use alloy_consensus::BlockHeader as AlloyBlockHeader;
|
||||
use alloy_primitives::{Sealable, B256};
|
||||
use clap::Parser;
|
||||
@@ -8,7 +8,7 @@ use reth_chainspec::{EthChainSpec, EthereumHardforks};
|
||||
use reth_cli::chainspec::ChainSpecParser;
|
||||
use reth_db_common::init::init_from_state_dump;
|
||||
use reth_node_api::NodePrimitives;
|
||||
use reth_primitives_traits::{BlockHeader, SealedHeader};
|
||||
use reth_primitives_traits::{header::HeaderMut, SealedHeader};
|
||||
use reth_provider::{
|
||||
BlockNumReader, DBProvider, DatabaseProviderFactory, StaticFileProviderFactory,
|
||||
StaticFileWriter,
|
||||
@@ -69,7 +69,7 @@ impl<C: ChainSpecParser<ChainSpec: EthChainSpec + EthereumHardforks>> InitStateC
|
||||
where
|
||||
N: CliNodeTypes<
|
||||
ChainSpec = C::ChainSpec,
|
||||
Primitives: NodePrimitives<BlockHeader: BlockHeader + CliHeader>,
|
||||
Primitives: NodePrimitives<BlockHeader: HeaderMut>,
|
||||
>,
|
||||
{
|
||||
info!(target: "reth::cli", "Reth init-state starting");
|
||||
@@ -110,7 +110,7 @@ impl<C: ChainSpecParser<ChainSpec: EthChainSpec + EthereumHardforks>> InitStateC
|
||||
static_file_provider.commit()?;
|
||||
} else if last_block_number > 0 && last_block_number < header.number() {
|
||||
return Err(eyre::eyre!(
|
||||
"Data directory should be empty when calling init-state with --without-evm-history."
|
||||
"Data directory should be empty when calling init-state with --without-evm."
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -79,7 +79,7 @@ where
|
||||
+ StaticFileProviderFactory<Primitives: NodePrimitives<BlockHeader: Compact>>,
|
||||
{
|
||||
provider_rw.insert_block(
|
||||
SealedBlock::<<Provider::Primitives as NodePrimitives>::Block>::from_sealed_parts(
|
||||
&SealedBlock::<<Provider::Primitives as NodePrimitives>::Block>::from_sealed_parts(
|
||||
header.clone(),
|
||||
Default::default(),
|
||||
)
|
||||
|
||||
@@ -10,7 +10,7 @@ use reth_node_builder::NodeBuilder;
|
||||
use reth_node_core::{
|
||||
args::{
|
||||
DatabaseArgs, DatadirArgs, DebugArgs, DevArgs, EngineArgs, EraArgs, MetricArgs,
|
||||
NetworkArgs, PayloadBuilderArgs, PruningArgs, RpcServerArgs, TxPoolArgs,
|
||||
NetworkArgs, PayloadBuilderArgs, PruningArgs, RpcServerArgs, StaticFilesArgs, TxPoolArgs,
|
||||
},
|
||||
node_config::NodeConfig,
|
||||
version,
|
||||
@@ -110,6 +110,10 @@ pub struct NodeCommand<C: ChainSpecParser, Ext: clap::Args + fmt::Debug = NoArgs
|
||||
#[command(flatten, next_help_heading = "ERA")]
|
||||
pub era: EraArgs,
|
||||
|
||||
/// All static files related arguments
|
||||
#[command(flatten, next_help_heading = "Static Files")]
|
||||
pub static_files: StaticFilesArgs,
|
||||
|
||||
/// Additional cli arguments
|
||||
#[command(flatten, next_help_heading = "Extension")]
|
||||
pub ext: Ext,
|
||||
@@ -145,7 +149,7 @@ where
|
||||
where
|
||||
L: Launcher<C, Ext>,
|
||||
{
|
||||
tracing::info!(target: "reth::cli", version = ?version::version_metadata().short_version, "Starting reth");
|
||||
tracing::info!(target: "reth::cli", version = ?version::version_metadata().short_version, "Starting {}", version::version_metadata().name_client);
|
||||
|
||||
let Self {
|
||||
datadir,
|
||||
@@ -162,9 +166,10 @@ where
|
||||
db,
|
||||
dev,
|
||||
pruning,
|
||||
ext,
|
||||
engine,
|
||||
era,
|
||||
static_files,
|
||||
ext,
|
||||
} = self;
|
||||
|
||||
// set up node config
|
||||
@@ -184,6 +189,7 @@ where
|
||||
pruning,
|
||||
engine,
|
||||
era,
|
||||
static_files,
|
||||
};
|
||||
|
||||
let data_dir = node_config.datadir();
|
||||
|
||||
@@ -60,7 +60,7 @@ impl Command {
|
||||
if self.v5 {
|
||||
info!("Starting discv5");
|
||||
let config = Config::builder(self.addr).build();
|
||||
let (_discv5, updates, _local_enr_discv5) = Discv5::start(&sk, config).await?;
|
||||
let (_discv5, updates) = Discv5::start(&sk, config).await?;
|
||||
discv5_updates = Some(updates);
|
||||
};
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@ use backon::{ConstantBuilder, Retryable};
|
||||
use clap::{Parser, Subcommand};
|
||||
use reth_chainspec::{EthChainSpec, EthereumHardforks, Hardforks};
|
||||
use reth_cli::chainspec::ChainSpecParser;
|
||||
use reth_cli_util::{get_secret_key, hash_or_num_value_parser};
|
||||
use reth_cli_util::hash_or_num_value_parser;
|
||||
use reth_config::Config;
|
||||
use reth_network::{BlockDownloaderProvider, NetworkConfigBuilder};
|
||||
use reth_network_p2p::bodies::client::BodiesClient;
|
||||
@@ -72,7 +72,7 @@ impl<C: ChainSpecParser<ChainSpec: EthChainSpec + Hardforks + EthereumHardforks>
|
||||
.split();
|
||||
if result.len() != 1 {
|
||||
eyre::bail!(
|
||||
"Invalid number of headers received. Expected: 1. Received: {}",
|
||||
"Invalid number of bodies received. Expected: 1. Received: {}",
|
||||
result.len()
|
||||
)
|
||||
}
|
||||
@@ -183,15 +183,13 @@ impl<C: ChainSpecParser> DownloadArgs<C> {
|
||||
config.peers.trusted_nodes_only = self.network.trusted_only;
|
||||
|
||||
let default_secret_key_path = data_dir.p2p_secret();
|
||||
let secret_key_path =
|
||||
self.network.p2p_secret_key.clone().unwrap_or(default_secret_key_path);
|
||||
let p2p_secret_key = get_secret_key(&secret_key_path)?;
|
||||
let p2p_secret_key = self.network.secret_key(default_secret_key_path)?;
|
||||
let rlpx_socket = (self.network.addr, self.network.port).into();
|
||||
let boot_nodes = self.chain.bootnodes().unwrap_or_default();
|
||||
|
||||
let net = NetworkConfigBuilder::<N::NetworkPrimitives>::new(p2p_secret_key)
|
||||
.peer_config(config.peers_config_with_basic_nodes_from_file(None))
|
||||
.external_ip_resolver(self.network.nat)
|
||||
.external_ip_resolver(self.network.nat.clone())
|
||||
.network_id(self.network.network_id)
|
||||
.boot_nodes(boot_nodes.clone())
|
||||
.apply(|builder| {
|
||||
|
||||
@@ -9,6 +9,7 @@ use clap::Parser;
|
||||
use eyre::WrapErr;
|
||||
use reth_chainspec::{EthChainSpec, EthereumHardforks, Hardforks};
|
||||
use reth_cli::chainspec::ChainSpecParser;
|
||||
use reth_cli_util::cancellation::CancellationToken;
|
||||
use reth_consensus::FullConsensus;
|
||||
use reth_evm::{execute::Executor, ConfigureEvm};
|
||||
use reth_primitives_traits::{format_gas_throughput, BlockBody, GotExpected};
|
||||
@@ -44,6 +45,10 @@ pub struct Command<C: ChainSpecParser> {
|
||||
/// Number of tasks to run in parallel
|
||||
#[arg(long, default_value = "10")]
|
||||
num_tasks: u64,
|
||||
|
||||
/// Continues with execution when an invalid block is encountered and collects these blocks.
|
||||
#[arg(long)]
|
||||
skip_invalid_blocks: bool,
|
||||
}
|
||||
|
||||
impl<C: ChainSpecParser> Command<C> {
|
||||
@@ -61,11 +66,23 @@ impl<C: ChainSpecParser<ChainSpec: EthChainSpec + Hardforks + EthereumHardforks>
|
||||
{
|
||||
let Environment { provider_factory, .. } = self.env.init::<N>(AccessRights::RO)?;
|
||||
|
||||
let provider = provider_factory.database_provider_ro()?;
|
||||
let components = components(provider_factory.chain_spec());
|
||||
|
||||
let min_block = self.from;
|
||||
let max_block = self.to.unwrap_or(provider.best_block_number()?);
|
||||
let best_block = DatabaseProviderFactory::database_provider_ro(&provider_factory)?
|
||||
.best_block_number()?;
|
||||
let mut max_block = best_block;
|
||||
if let Some(to) = self.to {
|
||||
if to > best_block {
|
||||
warn!(
|
||||
requested = to,
|
||||
best_block,
|
||||
"Requested --to is beyond available chain head; clamping to best block"
|
||||
);
|
||||
} else {
|
||||
max_block = to;
|
||||
}
|
||||
};
|
||||
|
||||
let total_blocks = max_block - min_block;
|
||||
let total_gas = calculate_gas_used_from_headers(
|
||||
@@ -83,7 +100,11 @@ impl<C: ChainSpecParser<ChainSpec: EthChainSpec + Hardforks + EthereumHardforks>
|
||||
}
|
||||
};
|
||||
|
||||
let skip_invalid_blocks = self.skip_invalid_blocks;
|
||||
let (stats_tx, mut stats_rx) = mpsc::unbounded_channel();
|
||||
let (info_tx, mut info_rx) = mpsc::unbounded_channel();
|
||||
let cancellation = CancellationToken::new();
|
||||
let _guard = cancellation.drop_guard();
|
||||
|
||||
let mut tasks = JoinSet::new();
|
||||
for i in 0..self.num_tasks {
|
||||
@@ -97,17 +118,40 @@ impl<C: ChainSpecParser<ChainSpec: EthChainSpec + Hardforks + EthereumHardforks>
|
||||
let consensus = components.consensus().clone();
|
||||
let db_at = db_at.clone();
|
||||
let stats_tx = stats_tx.clone();
|
||||
let info_tx = info_tx.clone();
|
||||
let cancellation = cancellation.clone();
|
||||
tasks.spawn_blocking(move || {
|
||||
let mut executor = evm_config.batch_executor(db_at(start_block - 1));
|
||||
for block in start_block..end_block {
|
||||
let mut executor_created = Instant::now();
|
||||
let executor_lifetime = Duration::from_secs(120);
|
||||
|
||||
'blocks: for block in start_block..end_block {
|
||||
if cancellation.is_cancelled() {
|
||||
// exit if the program is being terminated
|
||||
break
|
||||
}
|
||||
|
||||
let block = provider_factory
|
||||
.recovered_block(block.into(), TransactionVariant::NoHash)?
|
||||
.unwrap();
|
||||
let result = executor.execute_one(&block)?;
|
||||
|
||||
let result = match executor.execute_one(&block) {
|
||||
Ok(result) => result,
|
||||
Err(err) => {
|
||||
if skip_invalid_blocks {
|
||||
executor = evm_config.batch_executor(db_at(block.number()));
|
||||
let _ = info_tx.send((block, eyre::Report::new(err)));
|
||||
continue
|
||||
}
|
||||
return Err(err.into())
|
||||
}
|
||||
};
|
||||
|
||||
if let Err(err) = consensus
|
||||
.validate_block_post_execution(&block, &result)
|
||||
.wrap_err_with(|| format!("Failed to validate block {}", block.number()))
|
||||
.wrap_err_with(|| {
|
||||
format!("Failed to validate block {} {}", block.number(), block.hash())
|
||||
})
|
||||
{
|
||||
let correct_receipts =
|
||||
provider_factory.receipts_by_block(block.number().into())?.unwrap();
|
||||
@@ -143,6 +187,11 @@ impl<C: ChainSpecParser<ChainSpec: EthChainSpec + Hardforks + EthereumHardforks>
|
||||
};
|
||||
|
||||
error!(number=?block.number(), ?mismatch, "Gas usage mismatch");
|
||||
if skip_invalid_blocks {
|
||||
executor = evm_config.batch_executor(db_at(block.number()));
|
||||
let _ = info_tx.send((block, err));
|
||||
continue 'blocks;
|
||||
}
|
||||
return Err(err);
|
||||
}
|
||||
} else {
|
||||
@@ -154,9 +203,12 @@ impl<C: ChainSpecParser<ChainSpec: EthChainSpec + Hardforks + EthereumHardforks>
|
||||
}
|
||||
let _ = stats_tx.send(block.gas_used());
|
||||
|
||||
// Reset DB once in a while to avoid OOM
|
||||
if executor.size_hint() > 1_000_000 {
|
||||
// Reset DB once in a while to avoid OOM or read tx timeouts
|
||||
if executor.size_hint() > 1_000_000 ||
|
||||
executor_created.elapsed() > executor_lifetime
|
||||
{
|
||||
executor = evm_config.batch_executor(db_at(block.number()));
|
||||
executor_created = Instant::now();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -171,6 +223,7 @@ impl<C: ChainSpecParser<ChainSpec: EthChainSpec + Hardforks + EthereumHardforks>
|
||||
let mut last_logged_gas = 0;
|
||||
let mut last_logged_blocks = 0;
|
||||
let mut last_logged_time = Instant::now();
|
||||
let mut invalid_blocks = Vec::new();
|
||||
|
||||
let mut interval = tokio::time::interval(Duration::from_secs(10));
|
||||
|
||||
@@ -180,6 +233,10 @@ impl<C: ChainSpecParser<ChainSpec: EthChainSpec + Hardforks + EthereumHardforks>
|
||||
total_executed_blocks += 1;
|
||||
total_executed_gas += gas_used;
|
||||
}
|
||||
Some((block, err)) = info_rx.recv() => {
|
||||
error!(?err, block=?block.num_hash(), "Invalid block");
|
||||
invalid_blocks.push(block.num_hash());
|
||||
}
|
||||
result = tasks.join_next() => {
|
||||
if let Some(result) = result {
|
||||
if matches!(result, Err(_) | Ok(Err(_))) {
|
||||
@@ -210,12 +267,25 @@ impl<C: ChainSpecParser<ChainSpec: EthChainSpec + Hardforks + EthereumHardforks>
|
||||
}
|
||||
}
|
||||
|
||||
info!(
|
||||
start_block = min_block,
|
||||
end_block = max_block,
|
||||
throughput=?format_gas_throughput(total_executed_gas, instant.elapsed()),
|
||||
"Re-executed successfully"
|
||||
);
|
||||
if invalid_blocks.is_empty() {
|
||||
info!(
|
||||
start_block = min_block,
|
||||
end_block = max_block,
|
||||
%total_executed_blocks,
|
||||
throughput=?format_gas_throughput(total_executed_gas, instant.elapsed()),
|
||||
"Re-executed successfully"
|
||||
);
|
||||
} else {
|
||||
info!(
|
||||
start_block = min_block,
|
||||
end_block = max_block,
|
||||
%total_executed_blocks,
|
||||
invalid_block_count = invalid_blocks.len(),
|
||||
?invalid_blocks,
|
||||
throughput=?format_gas_throughput(total_executed_gas, instant.elapsed()),
|
||||
"Re-executed with invalid blocks"
|
||||
);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -1,10 +1,9 @@
|
||||
//! Database debugging tool
|
||||
use crate::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs};
|
||||
use clap::Parser;
|
||||
use itertools::Itertools;
|
||||
use reth_chainspec::EthChainSpec;
|
||||
use reth_cli::chainspec::ChainSpecParser;
|
||||
use reth_db::{mdbx::tx::Tx, static_file::iter_static_files, DatabaseError};
|
||||
use reth_db::{mdbx::tx::Tx, DatabaseError};
|
||||
use reth_db_api::{
|
||||
tables,
|
||||
transaction::{DbTx, DbTxMut},
|
||||
@@ -15,7 +14,9 @@ use reth_db_common::{
|
||||
};
|
||||
use reth_node_api::{HeaderTy, ReceiptTy, TxTy};
|
||||
use reth_node_core::args::StageEnum;
|
||||
use reth_provider::{DBProvider, DatabaseProviderFactory, StaticFileProviderFactory, TrieWriter};
|
||||
use reth_provider::{
|
||||
DBProvider, DatabaseProviderFactory, StaticFileProviderFactory, StaticFileWriter, TrieWriter,
|
||||
};
|
||||
use reth_prune::PruneSegment;
|
||||
use reth_stages::StageId;
|
||||
use reth_static_file_types::StaticFileSegment;
|
||||
@@ -44,21 +45,48 @@ impl<C: ChainSpecParser> Command<C> {
|
||||
StageEnum::Headers => Some(StaticFileSegment::Headers),
|
||||
StageEnum::Bodies => Some(StaticFileSegment::Transactions),
|
||||
StageEnum::Execution => Some(StaticFileSegment::Receipts),
|
||||
StageEnum::Senders => Some(StaticFileSegment::TransactionSenders),
|
||||
_ => None,
|
||||
};
|
||||
|
||||
// Delete static file segment data before inserting the genesis header below
|
||||
// Calling `StaticFileProviderRW::prune_*` will instruct the writer to prune rows only
|
||||
// when `StaticFileProviderRW::commit` is called. We need to do that instead of
|
||||
// deleting the jar files, otherwise if the task were to be interrupted after we
|
||||
// have deleted them, BUT before we have committed the checkpoints to the database, we'd
|
||||
// lose essential data.
|
||||
if let Some(static_file_segment) = static_file_segment {
|
||||
let static_file_provider = tool.provider_factory.static_file_provider();
|
||||
let static_files = iter_static_files(static_file_provider.directory())?;
|
||||
if let Some(segment_static_files) = static_files.get(&static_file_segment) {
|
||||
// Delete static files from the highest to the lowest block range
|
||||
for (block_range, _) in segment_static_files
|
||||
.iter()
|
||||
.sorted_by_key(|(block_range, _)| block_range.start())
|
||||
.rev()
|
||||
{
|
||||
static_file_provider.delete_jar(static_file_segment, block_range.start())?;
|
||||
if let Some(highest_block) =
|
||||
static_file_provider.get_highest_static_file_block(static_file_segment)
|
||||
{
|
||||
let mut writer = static_file_provider.latest_writer(static_file_segment)?;
|
||||
|
||||
match static_file_segment {
|
||||
StaticFileSegment::Headers => {
|
||||
// Prune all headers leaving genesis intact.
|
||||
writer.prune_headers(highest_block)?;
|
||||
}
|
||||
StaticFileSegment::Transactions => {
|
||||
let to_delete = static_file_provider
|
||||
.get_highest_static_file_tx(static_file_segment)
|
||||
.map(|tx_num| tx_num + 1)
|
||||
.unwrap_or_default();
|
||||
writer.prune_transactions(to_delete, 0)?;
|
||||
}
|
||||
StaticFileSegment::Receipts => {
|
||||
let to_delete = static_file_provider
|
||||
.get_highest_static_file_tx(static_file_segment)
|
||||
.map(|tx_num| tx_num + 1)
|
||||
.unwrap_or_default();
|
||||
writer.prune_receipts(to_delete, 0)?;
|
||||
}
|
||||
StaticFileSegment::TransactionSenders => {
|
||||
let to_delete = static_file_provider
|
||||
.get_highest_static_file_tx(static_file_segment)
|
||||
.map(|tx_num| tx_num + 1)
|
||||
.unwrap_or_default();
|
||||
writer.prune_transaction_senders(to_delete, 0)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,7 +9,7 @@ use reth_evm::ConfigureEvm;
|
||||
use reth_node_builder::NodeTypesWithDB;
|
||||
use reth_node_core::dirs::{ChainPath, DataDirPath};
|
||||
use reth_provider::{
|
||||
providers::{ProviderNodeTypes, StaticFileProvider},
|
||||
providers::{ProviderNodeTypes, RocksDBProvider, StaticFileProvider},
|
||||
DatabaseProviderFactory, ProviderFactory,
|
||||
};
|
||||
use reth_stages::{stages::ExecutionStage, Stage, StageCheckpoint, UnwindInput};
|
||||
@@ -42,7 +42,8 @@ where
|
||||
Arc::new(output_db),
|
||||
db_tool.chain(),
|
||||
StaticFileProvider::read_write(output_datadir.static_files())?,
|
||||
),
|
||||
RocksDBProvider::builder(output_datadir.rocksdb()).build()?,
|
||||
)?,
|
||||
to,
|
||||
from,
|
||||
evm_config,
|
||||
|
||||
@@ -6,7 +6,7 @@ use reth_db_api::{database::Database, table::TableImporter, tables};
|
||||
use reth_db_common::DbTool;
|
||||
use reth_node_core::dirs::{ChainPath, DataDirPath};
|
||||
use reth_provider::{
|
||||
providers::{ProviderNodeTypes, StaticFileProvider},
|
||||
providers::{ProviderNodeTypes, RocksDBProvider, StaticFileProvider},
|
||||
DatabaseProviderFactory, ProviderFactory,
|
||||
};
|
||||
use reth_stages::{stages::AccountHashingStage, Stage, StageCheckpoint, UnwindInput};
|
||||
@@ -39,7 +39,8 @@ pub(crate) async fn dump_hashing_account_stage<N: ProviderNodeTypes<DB = Arc<Dat
|
||||
Arc::new(output_db),
|
||||
db_tool.chain(),
|
||||
StaticFileProvider::read_write(output_datadir.static_files())?,
|
||||
),
|
||||
RocksDBProvider::builder(output_datadir.rocksdb()).build()?,
|
||||
)?,
|
||||
to,
|
||||
from,
|
||||
)?;
|
||||
|
||||
@@ -5,7 +5,7 @@ use reth_db_api::{database::Database, table::TableImporter, tables};
|
||||
use reth_db_common::DbTool;
|
||||
use reth_node_core::dirs::{ChainPath, DataDirPath};
|
||||
use reth_provider::{
|
||||
providers::{ProviderNodeTypes, StaticFileProvider},
|
||||
providers::{ProviderNodeTypes, RocksDBProvider, StaticFileProvider},
|
||||
DatabaseProviderFactory, ProviderFactory,
|
||||
};
|
||||
use reth_stages::{stages::StorageHashingStage, Stage, StageCheckpoint, UnwindInput};
|
||||
@@ -29,7 +29,8 @@ pub(crate) async fn dump_hashing_storage_stage<N: ProviderNodeTypes<DB = Arc<Dat
|
||||
Arc::new(output_db),
|
||||
db_tool.chain(),
|
||||
StaticFileProvider::read_write(output_datadir.static_files())?,
|
||||
),
|
||||
RocksDBProvider::builder(output_datadir.rocksdb()).build()?,
|
||||
)?,
|
||||
to,
|
||||
from,
|
||||
)?;
|
||||
|
||||
@@ -1,18 +1,18 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use super::setup;
|
||||
use alloy_primitives::BlockNumber;
|
||||
use alloy_primitives::{Address, BlockNumber};
|
||||
use eyre::Result;
|
||||
use reth_config::config::EtlConfig;
|
||||
use reth_consensus::{ConsensusError, FullConsensus};
|
||||
use reth_db::DatabaseEnv;
|
||||
use reth_db_api::{database::Database, table::TableImporter, tables};
|
||||
use reth_db_api::{database::Database, models::BlockNumberAddress, table::TableImporter, tables};
|
||||
use reth_db_common::DbTool;
|
||||
use reth_evm::ConfigureEvm;
|
||||
use reth_exex::ExExManagerHandle;
|
||||
use reth_node_core::dirs::{ChainPath, DataDirPath};
|
||||
use reth_provider::{
|
||||
providers::{ProviderNodeTypes, StaticFileProvider},
|
||||
providers::{ProviderNodeTypes, RocksDBProvider, StaticFileProvider},
|
||||
DatabaseProviderFactory, ProviderFactory,
|
||||
};
|
||||
use reth_stages::{
|
||||
@@ -62,7 +62,8 @@ where
|
||||
Arc::new(output_db),
|
||||
db_tool.chain(),
|
||||
StaticFileProvider::read_write(output_datadir.static_files())?,
|
||||
),
|
||||
RocksDBProvider::builder(output_datadir.rocksdb()).build()?,
|
||||
)?,
|
||||
to,
|
||||
from,
|
||||
)?;
|
||||
@@ -135,9 +136,13 @@ fn unwind_and_copy<N: ProviderNodeTypes>(
|
||||
|
||||
let unwind_inner_tx = provider.into_tx();
|
||||
|
||||
// TODO optimize we can actually just get the entries we need
|
||||
output_db
|
||||
.update(|tx| tx.import_dupsort::<tables::StorageChangeSets, _>(&unwind_inner_tx))??;
|
||||
output_db.update(|tx| {
|
||||
tx.import_table_with_range::<tables::StorageChangeSets, _>(
|
||||
&unwind_inner_tx,
|
||||
Some(BlockNumberAddress((from, Address::ZERO))),
|
||||
BlockNumberAddress((to, Address::repeat_byte(0xff))),
|
||||
)
|
||||
})??;
|
||||
|
||||
output_db.update(|tx| tx.import_table::<tables::HashedAccounts, _>(&unwind_inner_tx))??;
|
||||
output_db.update(|tx| tx.import_dupsort::<tables::HashedStorages, _>(&unwind_inner_tx))??;
|
||||
|
||||
@@ -84,6 +84,9 @@ pub struct Command<C: ChainSpecParser> {
|
||||
/// Commits the changes in the database. WARNING: potentially destructive.
|
||||
///
|
||||
/// Useful when you want to run diagnostics on the database.
|
||||
///
|
||||
/// NOTE: This flag is currently required for the headers, bodies, and execution stages because
|
||||
/// they use static files and must commit to properly unwind and run.
|
||||
// TODO: We should consider allowing to run hooks at the end of the stage run,
|
||||
// e.g. query the DB size, or any table data.
|
||||
#[arg(long, short)]
|
||||
@@ -105,6 +108,14 @@ impl<C: ChainSpecParser<ChainSpec: EthChainSpec + Hardforks + EthereumHardforks>
|
||||
Comp: CliNodeComponents<N>,
|
||||
F: FnOnce(Arc<C::ChainSpec>) -> Comp,
|
||||
{
|
||||
// Quit early if the stages requires a commit and `--commit` is not provided.
|
||||
if self.requires_commit() && !self.commit {
|
||||
return Err(eyre::eyre!(
|
||||
"The stage {} requires overwriting existing static files and must commit, but `--commit` was not provided. Please pass `--commit` and try again.",
|
||||
self.stage.to_string()
|
||||
));
|
||||
}
|
||||
|
||||
// Raise the fd limit of the process.
|
||||
// Does not do anything on windows.
|
||||
let _ = fdlimit::raise_fd_limit();
|
||||
@@ -116,7 +127,6 @@ impl<C: ChainSpecParser<ChainSpec: EthChainSpec + Hardforks + EthereumHardforks>
|
||||
let components = components(provider_factory.chain_spec());
|
||||
|
||||
if let Some(listen_addr) = self.metrics {
|
||||
info!(target: "reth::cli", "Starting metrics endpoint at {}", listen_addr);
|
||||
let config = MetricServerConfig::new(
|
||||
listen_addr,
|
||||
VersionInfo {
|
||||
@@ -384,4 +394,13 @@ impl<C: ChainSpecParser> Command<C> {
|
||||
pub fn chain_spec(&self) -> Option<&Arc<C::ChainSpec>> {
|
||||
Some(&self.env.chain)
|
||||
}
|
||||
|
||||
/// Returns whether or not the configured stage requires committing.
|
||||
///
|
||||
/// This is the case for stages that mainly modify static files, as there is no way to unwind
|
||||
/// these stages without committing anyways. This is because static files do not have
|
||||
/// transactions and we cannot change the view of headers without writing.
|
||||
pub fn requires_commit(&self) -> bool {
|
||||
matches!(self.stage, StageEnum::Headers | StageEnum::Bodies | StageEnum::Execution)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -97,6 +97,57 @@ impl CliRunner {
|
||||
command_res
|
||||
}
|
||||
|
||||
/// Executes a command in a blocking context with access to `CliContext`.
|
||||
///
|
||||
/// See [`Runtime::spawn_blocking`](tokio::runtime::Runtime::spawn_blocking).
|
||||
pub fn run_blocking_command_until_exit<F, E>(
|
||||
self,
|
||||
command: impl FnOnce(CliContext) -> F + Send + 'static,
|
||||
) -> Result<(), E>
|
||||
where
|
||||
F: Future<Output = Result<(), E>> + Send + 'static,
|
||||
E: Send + Sync + From<std::io::Error> + From<reth_tasks::PanickedTaskError> + 'static,
|
||||
{
|
||||
let AsyncCliRunner { context, mut task_manager, tokio_runtime } =
|
||||
AsyncCliRunner::new(self.tokio_runtime);
|
||||
|
||||
// Spawn the command on the blocking thread pool
|
||||
let handle = tokio_runtime.handle().clone();
|
||||
let command_handle =
|
||||
tokio_runtime.handle().spawn_blocking(move || handle.block_on(command(context)));
|
||||
|
||||
// Wait for the command to complete or ctrl-c
|
||||
let command_res = tokio_runtime.block_on(run_to_completion_or_panic(
|
||||
&mut task_manager,
|
||||
run_until_ctrl_c(
|
||||
async move { command_handle.await.expect("Failed to join blocking task") },
|
||||
),
|
||||
));
|
||||
|
||||
if command_res.is_err() {
|
||||
error!(target: "reth::cli", "shutting down due to error");
|
||||
} else {
|
||||
debug!(target: "reth::cli", "shutting down gracefully");
|
||||
task_manager.graceful_shutdown_with_timeout(Duration::from_secs(5));
|
||||
}
|
||||
|
||||
// Shutdown the runtime on a separate thread
|
||||
let (tx, rx) = mpsc::channel();
|
||||
std::thread::Builder::new()
|
||||
.name("tokio-runtime-shutdown".to_string())
|
||||
.spawn(move || {
|
||||
drop(tokio_runtime);
|
||||
let _ = tx.send(());
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
let _ = rx.recv_timeout(Duration::from_secs(5)).inspect_err(|err| {
|
||||
debug!(target: "reth::cli", %err, "tokio runtime shutdown timed out");
|
||||
});
|
||||
|
||||
command_res
|
||||
}
|
||||
|
||||
/// Executes a regular future until completion or until external signal received.
|
||||
pub fn run_until_ctrl_c<F, E>(self, fut: F) -> Result<(), E>
|
||||
where
|
||||
|
||||
@@ -42,6 +42,9 @@ jemalloc = ["dep:tikv-jemallocator"]
|
||||
# Enables jemalloc profiling features
|
||||
jemalloc-prof = ["jemalloc", "tikv-jemallocator?/profiling"]
|
||||
|
||||
# Enables unprefixed malloc (reproducible builds support)
|
||||
jemalloc-unprefixed = ["jemalloc", "tikv-jemallocator?/unprefixed_malloc_on_supported_platforms"]
|
||||
|
||||
# Wraps the selected allocator in the tracy profiling allocator
|
||||
tracy-allocator = ["dep:tracy-client"]
|
||||
|
||||
|
||||
103
crates/cli/util/src/cancellation.rs
Normal file
103
crates/cli/util/src/cancellation.rs
Normal file
@@ -0,0 +1,103 @@
|
||||
//! Thread-safe cancellation primitives for cooperative task cancellation.
|
||||
|
||||
use std::sync::{
|
||||
atomic::{AtomicBool, Ordering},
|
||||
Arc,
|
||||
};
|
||||
|
||||
/// A thread-safe cancellation token that can be shared across threads.
|
||||
///
|
||||
/// This token allows cooperative cancellation by providing a way to signal
|
||||
/// cancellation and check cancellation status. The token can be cloned and
|
||||
/// shared across multiple threads, with all clones sharing the same cancellation state.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```
|
||||
/// use reth_cli_util::cancellation::CancellationToken;
|
||||
/// use std::{thread, time::Duration};
|
||||
///
|
||||
/// let token = CancellationToken::new();
|
||||
/// let worker_token = token.clone();
|
||||
///
|
||||
/// let handle = thread::spawn(move || {
|
||||
/// while !worker_token.is_cancelled() {
|
||||
/// // Do work...
|
||||
/// thread::sleep(Duration::from_millis(100));
|
||||
/// }
|
||||
/// });
|
||||
///
|
||||
/// // Cancel from main thread
|
||||
/// token.cancel();
|
||||
/// handle.join().unwrap();
|
||||
/// ```
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct CancellationToken {
|
||||
cancelled: Arc<AtomicBool>,
|
||||
}
|
||||
|
||||
impl CancellationToken {
|
||||
/// Creates a new cancellation token in the non-cancelled state.
|
||||
pub fn new() -> Self {
|
||||
Self { cancelled: Arc::new(AtomicBool::new(false)) }
|
||||
}
|
||||
|
||||
/// Signals cancellation to all holders of this token and its clones.
|
||||
///
|
||||
/// Once cancelled, the token cannot be reset. This operation is thread-safe
|
||||
/// and can be called multiple times without issue.
|
||||
pub fn cancel(&self) {
|
||||
self.cancelled.store(true, Ordering::Release);
|
||||
}
|
||||
|
||||
/// Checks whether cancellation has been requested.
|
||||
///
|
||||
/// Returns `true` if [`cancel`](Self::cancel) has been called on this token
|
||||
/// or any of its clones.
|
||||
pub fn is_cancelled(&self) -> bool {
|
||||
self.cancelled.load(Ordering::Relaxed)
|
||||
}
|
||||
|
||||
/// Creates a guard that automatically cancels this token when dropped.
|
||||
///
|
||||
/// This is useful for ensuring cancellation happens when a scope exits,
|
||||
/// either normally or via panic.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```
|
||||
/// use reth_cli_util::cancellation::CancellationToken;
|
||||
///
|
||||
/// let token = CancellationToken::new();
|
||||
/// {
|
||||
/// let _guard = token.drop_guard();
|
||||
/// assert!(!token.is_cancelled());
|
||||
/// // Guard dropped here, triggering cancellation
|
||||
/// }
|
||||
/// assert!(token.is_cancelled());
|
||||
/// ```
|
||||
pub fn drop_guard(&self) -> CancellationGuard {
|
||||
CancellationGuard { token: self.clone() }
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for CancellationToken {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
/// A guard that cancels its associated [`CancellationToken`] when dropped.
|
||||
///
|
||||
/// Created by calling [`CancellationToken::drop_guard`]. When this guard is dropped,
|
||||
/// it automatically calls [`cancel`](CancellationToken::cancel) on the token.
|
||||
#[derive(Debug)]
|
||||
pub struct CancellationGuard {
|
||||
token: CancellationToken,
|
||||
}
|
||||
|
||||
impl Drop for CancellationGuard {
|
||||
fn drop(&mut self) {
|
||||
self.token.cancel();
|
||||
}
|
||||
}
|
||||
@@ -9,10 +9,11 @@
|
||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||
|
||||
pub mod allocator;
|
||||
pub mod cancellation;
|
||||
|
||||
/// Helper function to load a secret key from a file.
|
||||
pub mod load_secret_key;
|
||||
pub use load_secret_key::get_secret_key;
|
||||
pub use load_secret_key::{get_secret_key, parse_secret_key_from_hex};
|
||||
|
||||
/// Cli parsers functions.
|
||||
pub mod parsers;
|
||||
|
||||
@@ -30,6 +30,10 @@ pub enum SecretKeyError {
|
||||
/// Path to the secret key file.
|
||||
secret_file: PathBuf,
|
||||
},
|
||||
|
||||
/// Invalid hex string format.
|
||||
#[error("invalid hex string: {0}")]
|
||||
InvalidHexString(String),
|
||||
}
|
||||
|
||||
/// Attempts to load a [`SecretKey`] from a specified path. If no file exists there, then it
|
||||
@@ -60,3 +64,75 @@ pub fn get_secret_key(secret_key_path: &Path) -> Result<SecretKey, SecretKeyErro
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
/// Parses a [`SecretKey`] from a hex string.
|
||||
///
|
||||
/// The hex string can optionally start with "0x".
|
||||
pub fn parse_secret_key_from_hex(hex_str: &str) -> Result<SecretKey, SecretKeyError> {
|
||||
// Remove "0x" prefix if present
|
||||
let hex_str = hex_str.strip_prefix("0x").unwrap_or(hex_str);
|
||||
|
||||
// Decode the hex string
|
||||
let bytes = alloy_primitives::hex::decode(hex_str)
|
||||
.map_err(|e| SecretKeyError::InvalidHexString(e.to_string()))?;
|
||||
|
||||
// Parse into SecretKey
|
||||
SecretKey::from_slice(&bytes).map_err(SecretKeyError::SecretKeyDecodeError)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_parse_secret_key_from_hex_without_prefix() {
|
||||
// Valid 32-byte hex string (64 characters)
|
||||
let hex = "4c0883a69102937d6231471b5dbb6204fe512961708279f8c5c58b3b9c4e8b8f";
|
||||
let result = parse_secret_key_from_hex(hex);
|
||||
assert!(result.is_ok());
|
||||
|
||||
let secret_key = result.unwrap();
|
||||
assert_eq!(alloy_primitives::hex::encode(secret_key.secret_bytes()), hex);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_secret_key_from_hex_with_0x_prefix() {
|
||||
// Valid 32-byte hex string with 0x prefix
|
||||
let hex = "0x4c0883a69102937d6231471b5dbb6204fe512961708279f8c5c58b3b9c4e8b8f";
|
||||
let result = parse_secret_key_from_hex(hex);
|
||||
assert!(result.is_ok());
|
||||
|
||||
let secret_key = result.unwrap();
|
||||
let expected = "4c0883a69102937d6231471b5dbb6204fe512961708279f8c5c58b3b9c4e8b8f";
|
||||
assert_eq!(alloy_primitives::hex::encode(secret_key.secret_bytes()), expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_secret_key_from_hex_invalid_length() {
|
||||
// Invalid length (not 32 bytes)
|
||||
let hex = "4c0883a69102937d";
|
||||
let result = parse_secret_key_from_hex(hex);
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_secret_key_from_hex_invalid_chars() {
|
||||
// Invalid hex characters
|
||||
let hex = "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz";
|
||||
let result = parse_secret_key_from_hex(hex);
|
||||
assert!(result.is_err());
|
||||
|
||||
if let Err(SecretKeyError::InvalidHexString(_)) = result {
|
||||
// Expected error type
|
||||
} else {
|
||||
panic!("Expected InvalidHexString error");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_secret_key_from_hex_empty() {
|
||||
let hex = "";
|
||||
let result = parse_secret_key_from_hex(hex);
|
||||
assert!(result.is_err());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -31,6 +31,16 @@ pub fn parse_duration_from_secs_or_ms(
|
||||
}
|
||||
}
|
||||
|
||||
/// Helper to format a [Duration] to the format that can be parsed by
|
||||
/// [`parse_duration_from_secs_or_ms`].
|
||||
pub fn format_duration_as_secs_or_ms(duration: Duration) -> String {
|
||||
if duration.as_millis().is_multiple_of(1000) {
|
||||
format!("{}", duration.as_secs())
|
||||
} else {
|
||||
format!("{}ms", duration.as_millis())
|
||||
}
|
||||
}
|
||||
|
||||
/// Parse [`BlockHashOrNumber`]
|
||||
pub fn hash_or_num_value_parser(value: &str) -> eyre::Result<BlockHashOrNumber, eyre::Error> {
|
||||
match B256::from_str(value) {
|
||||
|
||||
@@ -126,7 +126,8 @@ pub fn install() {
|
||||
libc::sigaltstack(&raw const alt_stack, ptr::null_mut());
|
||||
|
||||
let mut sa: libc::sigaction = mem::zeroed();
|
||||
sa.sa_sigaction = print_stack_trace as libc::sighandler_t;
|
||||
sa.sa_sigaction =
|
||||
print_stack_trace as unsafe extern "C" fn(libc::c_int) as libc::sighandler_t;
|
||||
sa.sa_flags = libc::SA_NODEFER | libc::SA_RESETHAND | libc::SA_ONSTACK;
|
||||
libc::sigemptyset(&raw mut sa.sa_mask);
|
||||
libc::sigaction(libc::SIGSEGV, &raw const sa, ptr::null_mut());
|
||||
|
||||
@@ -15,6 +15,7 @@ workspace = true
|
||||
reth-network-types.workspace = true
|
||||
reth-prune-types.workspace = true
|
||||
reth-stages-types.workspace = true
|
||||
reth-static-file-types.workspace = true
|
||||
|
||||
# serde
|
||||
serde = { workspace = true, optional = true }
|
||||
@@ -22,7 +23,7 @@ humantime-serde = { workspace = true, optional = true }
|
||||
|
||||
# toml
|
||||
toml = { workspace = true, optional = true }
|
||||
eyre = { workspace = true, optional = true }
|
||||
eyre.workspace = true
|
||||
|
||||
# value objects
|
||||
url.workspace = true
|
||||
@@ -31,7 +32,6 @@ url.workspace = true
|
||||
serde = [
|
||||
"dep:serde",
|
||||
"dep:toml",
|
||||
"dep:eyre",
|
||||
"dep:humantime-serde",
|
||||
"reth-network-types/serde",
|
||||
"reth-prune-types/serde",
|
||||
|
||||
@@ -2,7 +2,9 @@
|
||||
use reth_network_types::{PeersConfig, SessionsConfig};
|
||||
use reth_prune_types::PruneModes;
|
||||
use reth_stages_types::ExecutionStageThresholds;
|
||||
use reth_static_file_types::StaticFileSegment;
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
path::{Path, PathBuf},
|
||||
time::Duration,
|
||||
};
|
||||
@@ -29,11 +31,14 @@ pub struct Config {
|
||||
pub peers: PeersConfig,
|
||||
/// Configuration for peer sessions.
|
||||
pub sessions: SessionsConfig,
|
||||
/// Configuration for static files.
|
||||
#[cfg_attr(feature = "serde", serde(default))]
|
||||
pub static_files: StaticFilesConfig,
|
||||
}
|
||||
|
||||
impl Config {
|
||||
/// Sets the pruning configuration.
|
||||
pub const fn set_prune_config(&mut self, prune_config: PruneConfig) {
|
||||
pub fn set_prune_config(&mut self, prune_config: PruneConfig) {
|
||||
self.prune = prune_config;
|
||||
}
|
||||
}
|
||||
@@ -411,6 +416,77 @@ impl EtlConfig {
|
||||
}
|
||||
}
|
||||
|
||||
/// Static files configuration.
|
||||
#[derive(Debug, Default, Clone, Copy, PartialEq, Eq)]
|
||||
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
|
||||
#[cfg_attr(feature = "serde", serde(default))]
|
||||
pub struct StaticFilesConfig {
|
||||
/// Number of blocks per file for each segment.
|
||||
pub blocks_per_file: BlocksPerFileConfig,
|
||||
}
|
||||
|
||||
/// Configuration for the number of blocks per file for each segment.
|
||||
#[derive(Debug, Default, Clone, Copy, PartialEq, Eq)]
|
||||
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
|
||||
#[cfg_attr(feature = "serde", serde(default))]
|
||||
pub struct BlocksPerFileConfig {
|
||||
/// Number of blocks per file for the headers segment.
|
||||
pub headers: Option<u64>,
|
||||
/// Number of blocks per file for the transactions segment.
|
||||
pub transactions: Option<u64>,
|
||||
/// Number of blocks per file for the receipts segment.
|
||||
pub receipts: Option<u64>,
|
||||
/// Number of blocks per file for the transaction senders segment.
|
||||
pub transaction_senders: Option<u64>,
|
||||
}
|
||||
|
||||
impl StaticFilesConfig {
|
||||
/// Validates the static files configuration.
|
||||
///
|
||||
/// Returns an error if any blocks per file value is zero.
|
||||
pub fn validate(&self) -> eyre::Result<()> {
|
||||
let BlocksPerFileConfig { headers, transactions, receipts, transaction_senders } =
|
||||
self.blocks_per_file;
|
||||
eyre::ensure!(headers != Some(0), "Headers segment blocks per file must be greater than 0");
|
||||
eyre::ensure!(
|
||||
transactions != Some(0),
|
||||
"Transactions segment blocks per file must be greater than 0"
|
||||
);
|
||||
eyre::ensure!(
|
||||
receipts != Some(0),
|
||||
"Receipts segment blocks per file must be greater than 0"
|
||||
);
|
||||
eyre::ensure!(
|
||||
transaction_senders != Some(0),
|
||||
"Transaction senders segment blocks per file must be greater than 0"
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Converts the blocks per file configuration into a [`HashMap`] per segment.
|
||||
pub fn as_blocks_per_file_map(&self) -> HashMap<StaticFileSegment, u64> {
|
||||
let BlocksPerFileConfig { headers, transactions, receipts, transaction_senders } =
|
||||
self.blocks_per_file;
|
||||
|
||||
let mut map = HashMap::new();
|
||||
// Iterating over all possible segments allows us to do an exhaustive match here,
|
||||
// to not forget to configure new segments in the future.
|
||||
for segment in StaticFileSegment::iter() {
|
||||
let blocks_per_file = match segment {
|
||||
StaticFileSegment::Headers => headers,
|
||||
StaticFileSegment::Transactions => transactions,
|
||||
StaticFileSegment::Receipts => receipts,
|
||||
StaticFileSegment::TransactionSenders => transaction_senders,
|
||||
};
|
||||
|
||||
if let Some(blocks_per_file) = blocks_per_file {
|
||||
map.insert(segment, blocks_per_file);
|
||||
}
|
||||
}
|
||||
map
|
||||
}
|
||||
}
|
||||
|
||||
/// History stage configuration.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
|
||||
@@ -451,14 +527,17 @@ impl PruneConfig {
|
||||
}
|
||||
|
||||
/// Returns whether there is any kind of receipt pruning configuration.
|
||||
pub const fn has_receipts_pruning(&self) -> bool {
|
||||
self.segments.receipts.is_some()
|
||||
pub fn has_receipts_pruning(&self) -> bool {
|
||||
self.segments.receipts.is_some() || !self.segments.receipts_log_filter.is_empty()
|
||||
}
|
||||
|
||||
/// Merges another `PruneConfig` into this one, taking values from the other config if and only
|
||||
/// if the corresponding value in this config is not set.
|
||||
/// Merges values from `other` into `self`.
|
||||
/// - `Option<PruneMode>` fields: set from `other` only if `self` is `None`.
|
||||
/// - `block_interval`: set from `other` only if `self.block_interval ==
|
||||
/// DEFAULT_BLOCK_INTERVAL`.
|
||||
/// - `merkle_changesets`: always set from `other`.
|
||||
/// - `receipts_log_filter`: set from `other` only if `self` is empty and `other` is non-empty.
|
||||
pub fn merge(&mut self, other: Self) {
|
||||
#[expect(deprecated)]
|
||||
let Self {
|
||||
block_interval,
|
||||
segments:
|
||||
@@ -470,7 +549,7 @@ impl PruneConfig {
|
||||
storage_history,
|
||||
bodies_history,
|
||||
merkle_changesets,
|
||||
receipts_log_filter: (),
|
||||
receipts_log_filter,
|
||||
},
|
||||
} = other;
|
||||
|
||||
@@ -486,8 +565,12 @@ impl PruneConfig {
|
||||
self.segments.account_history = self.segments.account_history.or(account_history);
|
||||
self.segments.storage_history = self.segments.storage_history.or(storage_history);
|
||||
self.segments.bodies_history = self.segments.bodies_history.or(bodies_history);
|
||||
// Merkle changesets is not optional, so we just replace it if provided
|
||||
// Merkle changesets is not optional; always take the value from `other`
|
||||
self.segments.merkle_changesets = merkle_changesets;
|
||||
|
||||
if self.segments.receipts_log_filter.0.is_empty() && !receipts_log_filter.0.is_empty() {
|
||||
self.segments.receipts_log_filter = receipts_log_filter;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -514,9 +597,10 @@ where
|
||||
mod tests {
|
||||
use super::{Config, EXTENSION};
|
||||
use crate::PruneConfig;
|
||||
use alloy_primitives::Address;
|
||||
use reth_network_peers::TrustedPeer;
|
||||
use reth_prune_types::{PruneMode, PruneModes};
|
||||
use std::{path::Path, str::FromStr, time::Duration};
|
||||
use reth_prune_types::{PruneMode, PruneModes, ReceiptsLogPruneConfig};
|
||||
use std::{collections::BTreeMap, path::Path, str::FromStr, time::Duration};
|
||||
|
||||
fn with_tempdir(filename: &str, proc: fn(&std::path::Path)) {
|
||||
let temp_dir = tempfile::tempdir().unwrap();
|
||||
@@ -1005,8 +1089,10 @@ receipts = 'full'
|
||||
storage_history: Some(PruneMode::Before(5000)),
|
||||
bodies_history: None,
|
||||
merkle_changesets: PruneMode::Before(0),
|
||||
#[expect(deprecated)]
|
||||
receipts_log_filter: (),
|
||||
receipts_log_filter: ReceiptsLogPruneConfig(BTreeMap::from([(
|
||||
Address::random(),
|
||||
PruneMode::Full,
|
||||
)])),
|
||||
},
|
||||
};
|
||||
|
||||
@@ -1020,11 +1106,14 @@ receipts = 'full'
|
||||
storage_history: Some(PruneMode::Distance(3000)),
|
||||
bodies_history: None,
|
||||
merkle_changesets: PruneMode::Distance(10000),
|
||||
#[expect(deprecated)]
|
||||
receipts_log_filter: (),
|
||||
receipts_log_filter: ReceiptsLogPruneConfig(BTreeMap::from([
|
||||
(Address::random(), PruneMode::Distance(1000)),
|
||||
(Address::random(), PruneMode::Before(2000)),
|
||||
])),
|
||||
},
|
||||
};
|
||||
|
||||
let original_filter = config1.segments.receipts_log_filter.clone();
|
||||
config1.merge(config2);
|
||||
|
||||
// Check that the configuration has been merged. Any configuration present in config1
|
||||
@@ -1036,6 +1125,7 @@ receipts = 'full'
|
||||
assert_eq!(config1.segments.account_history, Some(PruneMode::Distance(2000)));
|
||||
assert_eq!(config1.segments.storage_history, Some(PruneMode::Before(5000)));
|
||||
assert_eq!(config1.segments.merkle_changesets, PruneMode::Distance(10000));
|
||||
assert_eq!(config1.segments.receipts_log_filter, original_filter);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
//! Collection of methods for block validation.
|
||||
|
||||
use alloy_consensus::{
|
||||
constants::MAXIMUM_EXTRA_DATA_SIZE, BlockHeader as _, Transaction, EMPTY_OMMER_ROOT_HASH,
|
||||
};
|
||||
use alloy_consensus::{BlockHeader as _, Transaction, EMPTY_OMMER_ROOT_HASH};
|
||||
use alloy_eips::{eip4844::DATA_GAS_PER_BLOB, eip7840::BlobParams};
|
||||
use reth_chainspec::{EthChainSpec, EthereumHardfork, EthereumHardforks};
|
||||
use reth_consensus::{ConsensusError, TxGasLimitTooHighErr};
|
||||
@@ -225,13 +223,9 @@ where
|
||||
/// Validates that the EIP-4844 header fields exist and conform to the spec. This ensures that:
|
||||
///
|
||||
/// * `blob_gas_used` exists as a header field
|
||||
/// * `excess_blob_gas` exists as a header field
|
||||
/// * `parent_beacon_block_root` exists as a header field
|
||||
/// * `blob_gas_used` is a multiple of `DATA_GAS_PER_BLOB`
|
||||
/// * `excess_blob_gas` is a multiple of `DATA_GAS_PER_BLOB`
|
||||
/// * `blob_gas_used` doesn't exceed the max allowed blob gas based on the given params
|
||||
///
|
||||
/// Note: This does not enforce any restrictions on `blob_gas_used`
|
||||
pub fn validate_4844_header_standalone<H: BlockHeader>(
|
||||
header: &H,
|
||||
blob_params: BlobParams,
|
||||
@@ -264,9 +258,12 @@ pub fn validate_4844_header_standalone<H: BlockHeader>(
|
||||
/// From yellow paper: extraData: An arbitrary byte array containing data relevant to this block.
|
||||
/// This must be 32 bytes or fewer; formally Hx.
|
||||
#[inline]
|
||||
pub fn validate_header_extra_data<H: BlockHeader>(header: &H) -> Result<(), ConsensusError> {
|
||||
pub fn validate_header_extra_data<H: BlockHeader>(
|
||||
header: &H,
|
||||
max_size: usize,
|
||||
) -> Result<(), ConsensusError> {
|
||||
let extra_data_len = header.extra_data().len();
|
||||
if extra_data_len > MAXIMUM_EXTRA_DATA_SIZE {
|
||||
if extra_data_len > max_size {
|
||||
Err(ConsensusError::ExtraDataExceedsMax { len: extra_data_len })
|
||||
} else {
|
||||
Ok(())
|
||||
@@ -282,20 +279,28 @@ pub fn validate_against_parent_hash_number<H: BlockHeader>(
|
||||
header: &H,
|
||||
parent: &SealedHeader<H>,
|
||||
) -> Result<(), ConsensusError> {
|
||||
// Parent number is consistent.
|
||||
if parent.number() + 1 != header.number() {
|
||||
return Err(ConsensusError::ParentBlockNumberMismatch {
|
||||
parent_block_number: parent.number(),
|
||||
block_number: header.number(),
|
||||
})
|
||||
}
|
||||
|
||||
if parent.hash() != header.parent_hash() {
|
||||
return Err(ConsensusError::ParentHashMismatch(
|
||||
GotExpected { got: header.parent_hash(), expected: parent.hash() }.into(),
|
||||
))
|
||||
}
|
||||
|
||||
let Some(parent_number) = parent.number().checked_add(1) else {
|
||||
// parent block already reached the maximum
|
||||
return Err(ConsensusError::ParentBlockNumberMismatch {
|
||||
parent_block_number: parent.number(),
|
||||
block_number: u64::MAX,
|
||||
})
|
||||
};
|
||||
|
||||
// Parent number is consistent.
|
||||
if parent_number != header.number() {
|
||||
return Err(ConsensusError::ParentBlockNumberMismatch {
|
||||
parent_block_number: parent.number(),
|
||||
block_number: header.number(),
|
||||
})
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -330,7 +335,7 @@ pub fn validate_against_parent_eip1559_base_fee<ChainSpec: EthChainSpec + Ethere
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Validates the timestamp against the parent to make sure it is in the past.
|
||||
/// Validates that the block timestamp is greater than the parent block timestamp.
|
||||
#[inline]
|
||||
pub fn validate_against_parent_timestamp<H: BlockHeader>(
|
||||
header: &H,
|
||||
@@ -503,4 +508,21 @@ mod tests {
|
||||
}))
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn validate_header_extra_data_with_custom_limit() {
|
||||
// Test with default 32 bytes - should pass
|
||||
let header_32 = Header { extra_data: Bytes::from(vec![0; 32]), ..Default::default() };
|
||||
assert!(validate_header_extra_data(&header_32, 32).is_ok());
|
||||
|
||||
// Test exceeding default - should fail
|
||||
let header_33 = Header { extra_data: Bytes::from(vec![0; 33]), ..Default::default() };
|
||||
assert_eq!(
|
||||
validate_header_extra_data(&header_33, 32),
|
||||
Err(ConsensusError::ExtraDataExceedsMax { len: 33 })
|
||||
);
|
||||
|
||||
// Test with custom larger limit - should pass
|
||||
assert!(validate_header_extra_data(&header_33, 64).is_ok());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,7 +16,7 @@ use alloy_consensus::Header;
|
||||
use alloy_primitives::{BlockHash, BlockNumber, Bloom, B256};
|
||||
use reth_execution_types::BlockExecutionResult;
|
||||
use reth_primitives_traits::{
|
||||
constants::{MAXIMUM_GAS_LIMIT_BLOCK, MINIMUM_GAS_LIMIT},
|
||||
constants::{GAS_LIMIT_BOUND_DIVISOR, MAXIMUM_GAS_LIMIT_BLOCK, MINIMUM_GAS_LIMIT},
|
||||
transaction::error::InvalidTransactionError,
|
||||
Block, GotExpected, GotExpectedBoxed, NodePrimitives, RecoveredBlock, SealedBlock,
|
||||
SealedHeader,
|
||||
@@ -349,7 +349,7 @@ pub enum ConsensusError {
|
||||
},
|
||||
|
||||
/// Error when the child gas limit exceeds the maximum allowed increase.
|
||||
#[error("child gas_limit {child_gas_limit} max increase is {parent_gas_limit}/1024")]
|
||||
#[error("child gas_limit {child_gas_limit} exceeds the max allowed increase ({parent_gas_limit}/{GAS_LIMIT_BOUND_DIVISOR})")]
|
||||
GasLimitInvalidIncrease {
|
||||
/// The parent gas limit.
|
||||
parent_gas_limit: u64,
|
||||
@@ -378,7 +378,7 @@ pub enum ConsensusError {
|
||||
},
|
||||
|
||||
/// Error when the child gas limit exceeds the maximum allowed decrease.
|
||||
#[error("child gas_limit {child_gas_limit} max decrease is {parent_gas_limit}/1024")]
|
||||
#[error("child gas_limit {child_gas_limit} is below the max allowed decrease ({parent_gas_limit}/{GAS_LIMIT_BOUND_DIVISOR})")]
|
||||
GasLimitInvalidDecrease {
|
||||
/// The parent gas limit.
|
||||
parent_gas_limit: u64,
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user