mirror of
https://github.com/paradigmxyz/reth.git
synced 2026-02-19 03:04:27 -05:00
chore: remove op-reth from repository (#21532)
Co-authored-by: Matthias Seitz <matthias.seitz@outlook.de>
This commit is contained in:
@@ -11,7 +11,7 @@ format = "per-crate"
|
||||
# Fixed groups: all always share the same version
|
||||
# reth binaries share version
|
||||
[[fixed]]
|
||||
members = ["reth", "op-reth"]
|
||||
members = ["reth"]
|
||||
|
||||
# Packages to ignore (internal/test-only crates)
|
||||
ignore = [
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
---
|
||||
reth: patch
|
||||
op-reth: patch
|
||||
---
|
||||
|
||||
Added automated changelog generation infrastructure using wevm/changelogs-rs with Claude Code integration. Configured per-crate changelog format with fixed version groups for reth binaries and exclusions for internal test utilities.
|
||||
|
||||
1
.github/CODEOWNERS
vendored
1
.github/CODEOWNERS
vendored
@@ -19,7 +19,6 @@ crates/metrics/ @mattsse @Rjected
|
||||
crates/net/ @mattsse @Rjected
|
||||
crates/net/downloaders/ @Rjected
|
||||
crates/node/ @mattsse @Rjected @klkvr
|
||||
crates/optimism/ @mattsse @Rjected
|
||||
crates/payload/ @mattsse @Rjected
|
||||
crates/primitives-traits/ @Rjected @mattsse @klkvr
|
||||
crates/primitives/ @Rjected @mattsse @klkvr
|
||||
|
||||
36
.github/assets/kurtosis_op_network_params.yaml
vendored
36
.github/assets/kurtosis_op_network_params.yaml
vendored
@@ -1,36 +0,0 @@
|
||||
ethereum_package:
|
||||
participants:
|
||||
- el_type: reth
|
||||
el_extra_params:
|
||||
- "--rpc.eth-proof-window=100"
|
||||
cl_type: teku
|
||||
network_params:
|
||||
preset: minimal
|
||||
genesis_delay: 5
|
||||
additional_preloaded_contracts: '
|
||||
{
|
||||
"0x4e59b44847b379578588920cA78FbF26c0B4956C": {
|
||||
"balance": "0ETH",
|
||||
"code": "0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe03601600081602082378035828234f58015156039578182fd5b8082525050506014600cf3",
|
||||
"storage": {},
|
||||
"nonce": "1"
|
||||
}
|
||||
}'
|
||||
optimism_package:
|
||||
chains:
|
||||
chain0:
|
||||
participants:
|
||||
node0:
|
||||
el:
|
||||
type: op-geth
|
||||
cl:
|
||||
type: op-node
|
||||
node1:
|
||||
el:
|
||||
type: op-reth
|
||||
image: "ghcr.io/paradigmxyz/op-reth:kurtosis-ci"
|
||||
cl:
|
||||
type: op-node
|
||||
network_params:
|
||||
holocene_time_offset: 0
|
||||
isthmus_time_offset: 0
|
||||
7
.github/scripts/check_rv32imac.sh
vendored
7
.github/scripts/check_rv32imac.sh
vendored
@@ -29,13 +29,6 @@ crates_to_check=(
|
||||
reth-ethereum-primitives
|
||||
reth-ethereum-consensus
|
||||
reth-stateless
|
||||
|
||||
## optimism
|
||||
reth-optimism-chainspec
|
||||
reth-optimism-forks
|
||||
reth-optimism-consensus
|
||||
reth-optimism-primitives
|
||||
reth-optimism-evm
|
||||
)
|
||||
|
||||
# Array to hold the results
|
||||
|
||||
7
.github/scripts/check_wasm.sh
vendored
7
.github/scripts/check_wasm.sh
vendored
@@ -40,12 +40,6 @@ exclude_crates=(
|
||||
reth-node-ethereum
|
||||
reth-node-events
|
||||
reth-node-metrics
|
||||
reth-optimism-cli
|
||||
reth-optimism-flashblocks
|
||||
reth-optimism-node
|
||||
reth-optimism-payload-builder
|
||||
reth-optimism-rpc
|
||||
reth-optimism-storage
|
||||
reth-rpc
|
||||
reth-rpc-api
|
||||
reth-rpc-api-testing-util
|
||||
@@ -77,7 +71,6 @@ exclude_crates=(
|
||||
reth-trie-parallel # tokio
|
||||
reth-trie-sparse-parallel # rayon
|
||||
reth-testing-utils
|
||||
reth-optimism-txpool # reth-transaction-pool
|
||||
reth-era-downloader # tokio
|
||||
reth-era-utils # tokio
|
||||
reth-tracing-otlp
|
||||
|
||||
8
.github/scripts/verify_image_arch.sh
vendored
8
.github/scripts/verify_image_arch.sh
vendored
@@ -2,7 +2,7 @@
|
||||
# Verifies that Docker images have the expected architectures.
|
||||
#
|
||||
# Usage:
|
||||
# ./verify_image_arch.sh <targets> <registry> <ethereum_tags> <optimism_tags>
|
||||
# ./verify_image_arch.sh <targets> <registry> <ethereum_tags>
|
||||
#
|
||||
# Environment:
|
||||
# DRY_RUN=true - Skip actual verification, just print what would be checked.
|
||||
@@ -12,7 +12,6 @@ set -euo pipefail
|
||||
TARGETS="${1:-}"
|
||||
REGISTRY="${2:-}"
|
||||
ETHEREUM_TAGS="${3:-}"
|
||||
OPTIMISM_TAGS="${4:-}"
|
||||
DRY_RUN="${DRY_RUN:-false}"
|
||||
|
||||
verify_image() {
|
||||
@@ -43,17 +42,12 @@ verify_image() {
|
||||
|
||||
if [[ "$TARGETS" == *"nightly"* ]]; then
|
||||
verify_image "${REGISTRY}/reth:nightly" amd64 arm64
|
||||
verify_image "${REGISTRY}/op-reth:nightly" amd64 arm64
|
||||
verify_image "${REGISTRY}/reth:nightly-profiling" amd64
|
||||
verify_image "${REGISTRY}/reth:nightly-edge-profiling" amd64
|
||||
verify_image "${REGISTRY}/op-reth:nightly-profiling" amd64
|
||||
else
|
||||
for tag in $(echo "$ETHEREUM_TAGS" | tr ',' ' '); do
|
||||
verify_image "$tag" amd64 arm64
|
||||
done
|
||||
for tag in $(echo "$OPTIMISM_TAGS" | tr ',' ' '); do
|
||||
verify_image "$tag" amd64 arm64
|
||||
done
|
||||
fi
|
||||
|
||||
echo "All image architectures verified successfully"
|
||||
|
||||
1
.github/workflows/compact.yml
vendored
1
.github/workflows/compact.yml
vendored
@@ -23,7 +23,6 @@ jobs:
|
||||
matrix:
|
||||
bin:
|
||||
- cargo run --bin reth --features "dev"
|
||||
- cargo run --bin op-reth --features "dev" --manifest-path crates/optimism/bin/Cargo.toml
|
||||
steps:
|
||||
- uses: rui314/setup-mold@v1
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
|
||||
30
.github/workflows/docker-tag-latest.yml
vendored
30
.github/workflows/docker-tag-latest.yml
vendored
@@ -14,12 +14,6 @@ on:
|
||||
required: false
|
||||
type: boolean
|
||||
default: true
|
||||
tag_op_reth:
|
||||
description: 'Tag op-reth image as latest'
|
||||
required: false
|
||||
type: boolean
|
||||
default: false
|
||||
|
||||
env:
|
||||
DOCKER_USERNAME: ${{ github.actor }}
|
||||
|
||||
@@ -47,27 +41,3 @@ jobs:
|
||||
- name: Push reth latest tag
|
||||
run: |
|
||||
docker push ghcr.io/${{ github.repository_owner }}/reth:latest
|
||||
|
||||
tag-op-reth-latest:
|
||||
name: Tag op-reth as latest
|
||||
runs-on: ubuntu-24.04
|
||||
if: ${{ inputs.tag_op_reth }}
|
||||
permissions:
|
||||
packages: write
|
||||
contents: read
|
||||
steps:
|
||||
- name: Log in to Docker
|
||||
run: |
|
||||
echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io --username ${DOCKER_USERNAME} --password-stdin
|
||||
|
||||
- name: Pull op-reth release image
|
||||
run: |
|
||||
docker pull ghcr.io/${{ github.repository_owner }}/op-reth:${{ inputs.version }}
|
||||
|
||||
- name: Tag op-reth as latest
|
||||
run: |
|
||||
docker tag ghcr.io/${{ github.repository_owner }}/op-reth:${{ inputs.version }} ghcr.io/${{ github.repository_owner }}/op-reth:latest
|
||||
|
||||
- name: Push op-reth latest tag
|
||||
run: |
|
||||
docker push ghcr.io/${{ github.repository_owner }}/op-reth:latest
|
||||
|
||||
12
.github/workflows/docker.yml
vendored
12
.github/workflows/docker.yml
vendored
@@ -64,27 +64,23 @@ jobs:
|
||||
|
||||
if [[ "${{ github.event_name }}" == "push" ]]; then
|
||||
VERSION="${GITHUB_REF#refs/tags/}"
|
||||
echo "targets=ethereum optimism" >> "$GITHUB_OUTPUT"
|
||||
echo "targets=ethereum" >> "$GITHUB_OUTPUT"
|
||||
|
||||
# Add 'latest' tag for non-RC releases
|
||||
if [[ ! "$VERSION" =~ -rc ]]; then
|
||||
echo "ethereum_tags=${REGISTRY}/reth:${VERSION},${REGISTRY}/reth:latest" >> "$GITHUB_OUTPUT"
|
||||
echo "optimism_tags=${REGISTRY}/op-reth:${VERSION},${REGISTRY}/op-reth:latest" >> "$GITHUB_OUTPUT"
|
||||
else
|
||||
echo "ethereum_tags=${REGISTRY}/reth:${VERSION}" >> "$GITHUB_OUTPUT"
|
||||
echo "optimism_tags=${REGISTRY}/op-reth:${VERSION}" >> "$GITHUB_OUTPUT"
|
||||
fi
|
||||
|
||||
elif [[ "${{ github.event_name }}" == "schedule" ]] || [[ "${{ inputs.build_type }}" == "nightly" ]]; then
|
||||
echo "targets=nightly" >> "$GITHUB_OUTPUT"
|
||||
echo "ethereum_tags=${REGISTRY}/reth:nightly" >> "$GITHUB_OUTPUT"
|
||||
echo "optimism_tags=${REGISTRY}/op-reth:nightly" >> "$GITHUB_OUTPUT"
|
||||
|
||||
else
|
||||
# git-sha build
|
||||
echo "targets=ethereum optimism" >> "$GITHUB_OUTPUT"
|
||||
echo "targets=ethereum" >> "$GITHUB_OUTPUT"
|
||||
echo "ethereum_tags=${REGISTRY}/reth:${{ github.sha }}" >> "$GITHUB_OUTPUT"
|
||||
echo "optimism_tags=${REGISTRY}/op-reth:${{ github.sha }}" >> "$GITHUB_OUTPUT"
|
||||
fi
|
||||
|
||||
- name: Build and push images
|
||||
@@ -101,7 +97,6 @@ jobs:
|
||||
push: ${{ !(github.event_name == 'workflow_dispatch' && inputs.dry_run) }}
|
||||
set: |
|
||||
ethereum.tags=${{ steps.params.outputs.ethereum_tags }}
|
||||
optimism.tags=${{ steps.params.outputs.optimism_tags }}
|
||||
|
||||
- name: Verify image architectures
|
||||
env:
|
||||
@@ -110,8 +105,7 @@ jobs:
|
||||
./.github/scripts/verify_image_arch.sh \
|
||||
"${{ steps.params.outputs.targets }}" \
|
||||
"ghcr.io/${{ github.repository_owner }}" \
|
||||
"${{ steps.params.outputs.ethereum_tags }}" \
|
||||
"${{ steps.params.outputs.optimism_tags }}"
|
||||
"${{ steps.params.outputs.ethereum_tags }}"
|
||||
|
||||
notify:
|
||||
name: Notify on failure
|
||||
|
||||
1
.github/workflows/e2e.yml
vendored
1
.github/workflows/e2e.yml
vendored
@@ -41,7 +41,6 @@ jobs:
|
||||
--exclude 'exex-subscription' \
|
||||
--exclude 'reth-bench' \
|
||||
--exclude 'ef-tests' \
|
||||
--exclude 'op-reth' \
|
||||
--exclude 'reth' \
|
||||
-E 'binary(e2e_testsuite)'
|
||||
|
||||
|
||||
13
.github/workflows/integration.yml
vendored
13
.github/workflows/integration.yml
vendored
@@ -29,11 +29,8 @@ jobs:
|
||||
RUST_BACKTRACE: 1
|
||||
strategy:
|
||||
matrix:
|
||||
network: ["ethereum", "optimism"]
|
||||
network: ["ethereum"]
|
||||
storage: ["stable", "edge"]
|
||||
exclude:
|
||||
- network: optimism
|
||||
storage: edge
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
@@ -46,18 +43,12 @@ jobs:
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
cache-on-failure: true
|
||||
- if: matrix.network == 'ethereum'
|
||||
name: Run tests
|
||||
- name: Run tests
|
||||
run: |
|
||||
cargo nextest run \
|
||||
--locked --features "asm-keccak ${{ matrix.network }} ${{ matrix.storage == 'edge' && 'edge' || '' }}" \
|
||||
--workspace --exclude ef-tests \
|
||||
-E "kind(test) and not binary(e2e_testsuite)"
|
||||
- if: matrix.network == 'optimism'
|
||||
name: Run tests
|
||||
run: |
|
||||
cargo nextest run \
|
||||
--locked -p reth-optimism-node
|
||||
|
||||
integration-success:
|
||||
name: integration success
|
||||
|
||||
95
.github/workflows/kurtosis-op.yml
vendored
95
.github/workflows/kurtosis-op.yml
vendored
@@ -1,95 +0,0 @@
|
||||
# Runs simple OP stack setup in Kurtosis
|
||||
|
||||
name: kurtosis-op
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
- cron: "0 */6 * * *"
|
||||
|
||||
push:
|
||||
tags:
|
||||
- "*"
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
prepare-reth:
|
||||
uses: ./.github/workflows/prepare-reth.yml
|
||||
with:
|
||||
image_tag: ghcr.io/paradigmxyz/op-reth:kurtosis-ci
|
||||
binary_name: op-reth
|
||||
cargo_features: asm-keccak
|
||||
cargo_package: crates/optimism/bin/Cargo.toml
|
||||
|
||||
test:
|
||||
timeout-minutes: 60
|
||||
strategy:
|
||||
fail-fast: false
|
||||
name: run kurtosis
|
||||
runs-on: depot-ubuntu-latest
|
||||
needs:
|
||||
- prepare-reth
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Download reth image
|
||||
uses: actions/download-artifact@v7
|
||||
with:
|
||||
name: artifacts
|
||||
path: /tmp
|
||||
|
||||
- name: Load Docker image
|
||||
run: |
|
||||
docker load -i /tmp/reth_image.tar &
|
||||
wait
|
||||
docker image ls -a
|
||||
|
||||
- name: Install Foundry
|
||||
uses: foundry-rs/foundry-toolchain@v1
|
||||
|
||||
- name: Run kurtosis
|
||||
run: |
|
||||
echo "deb [trusted=yes] https://apt.fury.io/kurtosis-tech/ /" | sudo tee /etc/apt/sources.list.d/kurtosis.list
|
||||
sudo apt update
|
||||
sudo apt install kurtosis-cli
|
||||
kurtosis engine start
|
||||
kurtosis run --enclave op-devnet github.com/ethpandaops/optimism-package --args-file .github/assets/kurtosis_op_network_params.yaml
|
||||
ENCLAVE_ID=$(curl http://127.0.0.1:9779/api/enclaves | jq --raw-output 'keys[0]')
|
||||
GETH_PORT=$(curl "http://127.0.0.1:9779/api/enclaves/$ENCLAVE_ID/services" | jq '."op-el-2151908-node0-op-geth".public_ports.rpc.number')
|
||||
RETH_PORT=$(curl "http://127.0.0.1:9779/api/enclaves/$ENCLAVE_ID/services" | jq '."op-el-2151908-node1-op-reth".public_ports.rpc.number')
|
||||
echo "GETH_RPC=http://127.0.0.1:$GETH_PORT" >> $GITHUB_ENV
|
||||
echo "RETH_RPC=http://127.0.0.1:$RETH_PORT" >> $GITHUB_ENV
|
||||
|
||||
- name: Assert that clients advance
|
||||
run: |
|
||||
for i in {1..100}; do
|
||||
sleep 5
|
||||
BLOCK_GETH=$(cast bn --rpc-url $GETH_RPC)
|
||||
BLOCK_RETH=$(cast bn --rpc-url $RETH_RPC)
|
||||
|
||||
if [ $BLOCK_GETH -ge 100 ] && [ $BLOCK_RETH -ge 100 ] ; then exit 0; fi
|
||||
echo "Waiting for clients to advance..., Reth: $BLOCK_RETH Geth: $BLOCK_GETH"
|
||||
done
|
||||
kurtosis service logs -a op-devnet op-el-2151908-2-op-reth-op-node-op-kurtosis
|
||||
kurtosis service logs -a op-devnet op-cl-2151908-2-op-node-op-reth-op-kurtosis
|
||||
exit 1
|
||||
|
||||
notify-on-error:
|
||||
needs: test
|
||||
if: failure()
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Slack Webhook Action
|
||||
uses: rtCamp/action-slack-notify@v2
|
||||
env:
|
||||
SLACK_COLOR: ${{ job.status }}
|
||||
SLACK_MESSAGE: "Failed run: https://github.com/paradigmxyz/reth/actions/runs/${{ github.run_id }}"
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
10
.github/workflows/lint.yml
vendored
10
.github/workflows/lint.yml
vendored
@@ -119,11 +119,6 @@ jobs:
|
||||
name: MSRV
|
||||
runs-on: depot-ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- binary: reth
|
||||
- binary: op-reth
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
- uses: rui314/setup-mold@v1
|
||||
@@ -134,7 +129,7 @@ jobs:
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
cache-on-failure: true
|
||||
- run: cargo build --bin "${{ matrix.binary }}" --workspace
|
||||
- run: cargo build --bin reth --workspace
|
||||
env:
|
||||
RUSTFLAGS: -D warnings
|
||||
|
||||
@@ -198,10 +193,9 @@ jobs:
|
||||
with:
|
||||
cache-on-failure: true
|
||||
- run: cargo build --bin reth --workspace
|
||||
- run: cargo build --bin op-reth --workspace
|
||||
env:
|
||||
RUSTFLAGS: -D warnings
|
||||
- run: ./docs/cli/update.sh target/debug/reth target/debug/op-reth
|
||||
- run: ./docs/cli/update.sh target/debug/reth
|
||||
- name: Check docs changes
|
||||
run: git diff --exit-code
|
||||
|
||||
|
||||
2
.github/workflows/prepare-reth.yml
vendored
2
.github/workflows/prepare-reth.yml
vendored
@@ -11,7 +11,7 @@ on:
|
||||
required: false
|
||||
type: string
|
||||
default: "reth"
|
||||
description: "Binary name to build (reth or op-reth)"
|
||||
description: "Binary name to build"
|
||||
cargo_features:
|
||||
required: false
|
||||
type: string
|
||||
|
||||
15
.github/workflows/release.yml
vendored
15
.github/workflows/release.yml
vendored
@@ -17,11 +17,9 @@ on:
|
||||
env:
|
||||
REPO_NAME: ${{ github.repository_owner }}/reth
|
||||
IMAGE_NAME: ${{ github.repository_owner }}/reth
|
||||
OP_IMAGE_NAME: ${{ github.repository_owner }}/op-reth
|
||||
REPRODUCIBLE_IMAGE_NAME: ${{ github.repository_owner }}/reth-reproducible
|
||||
CARGO_TERM_COLOR: always
|
||||
DOCKER_IMAGE_NAME_URL: https://ghcr.io/${{ github.repository_owner }}/reth
|
||||
DOCKER_OP_IMAGE_NAME_URL: https://ghcr.io/${{ github.repository_owner }}/op-reth
|
||||
RUSTC_WRAPPER: "sccache"
|
||||
|
||||
jobs:
|
||||
@@ -98,8 +96,6 @@ jobs:
|
||||
build:
|
||||
- command: build
|
||||
binary: reth
|
||||
- command: op-build
|
||||
binary: op-reth
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
- uses: rui314/setup-mold@v1
|
||||
@@ -248,17 +244,6 @@ jobs:
|
||||
| <img src="https://www.svgrepo.com/download/511330/apple-173.svg" width="50"/> | x86_64 | [reth-${{ env.VERSION }}-x86_64-apple-darwin.tar.gz](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/reth-${{ env.VERSION }}-x86_64-apple-darwin.tar.gz) | [PGP Signature](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/reth-${{ env.VERSION }}-x86_64-apple-darwin.tar.gz.asc) |
|
||||
| <img src="https://www.svgrepo.com/download/511330/apple-173.svg" width="50"/> | aarch64 | [reth-${{ env.VERSION }}-aarch64-apple-darwin.tar.gz](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/reth-${{ env.VERSION }}-aarch64-apple-darwin.tar.gz) | [PGP Signature](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/reth-${{ env.VERSION }}-aarch64-apple-darwin.tar.gz.asc) |
|
||||
| <img src="https://www.svgrepo.com/download/473589/docker.svg" width="50"/> | Docker | [${{ env.IMAGE_NAME }}](${{ env.DOCKER_IMAGE_NAME_URL }}) | - |
|
||||
|
||||
### OP-Reth
|
||||
|
||||
| System | Architecture | Binary | PGP Signature |
|
||||
|:---:|:---:|:---:|:---|
|
||||
| <img src="https://www.svgrepo.com/download/473700/linux.svg" width="50"/> | x86_64 | [op-reth-${{ env.VERSION }}-x86_64-unknown-linux-gnu.tar.gz](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/op-reth-${{ env.VERSION }}-x86_64-unknown-linux-gnu.tar.gz) | [PGP Signature](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/op-reth-${{ env.VERSION }}-x86_64-unknown-linux-gnu.tar.gz.asc) |
|
||||
| <img src="https://www.svgrepo.com/download/473700/linux.svg" width="50"/> | aarch64 | [op-reth-${{ env.VERSION }}-aarch64-unknown-linux-gnu.tar.gz](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/op-reth-${{ env.VERSION }}-aarch64-unknown-linux-gnu.tar.gz) | [PGP Signature](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/op-reth-${{ env.VERSION }}-aarch64-unknown-linux-gnu.tar.gz.asc) |
|
||||
| <img src="https://www.svgrepo.com/download/513083/windows-174.svg" width="50"/> | x86_64 | [op-reth-${{ env.VERSION }}-x86_64-pc-windows-gnu.tar.gz](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/op-reth-${{ env.VERSION }}-x86_64-pc-windows-gnu.tar.gz) | [PGP Signature](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/op-reth-${{ env.VERSION }}-x86_64-pc-windows-gnu.tar.gz.asc) |
|
||||
| <img src="https://www.svgrepo.com/download/511330/apple-173.svg" width="50"/> | x86_64 | [op-reth-${{ env.VERSION }}-x86_64-apple-darwin.tar.gz](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/op-reth-${{ env.VERSION }}-x86_64-apple-darwin.tar.gz) | [PGP Signature](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/op-reth-${{ env.VERSION }}-x86_64-apple-darwin.tar.gz.asc) |
|
||||
| <img src="https://www.svgrepo.com/download/511330/apple-173.svg" width="50"/> | aarch64 | [op-reth-${{ env.VERSION }}-aarch64-apple-darwin.tar.gz](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/op-reth-${{ env.VERSION }}-aarch64-apple-darwin.tar.gz) | [PGP Signature](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/op-reth-${{ env.VERSION }}-aarch64-apple-darwin.tar.gz.asc) |
|
||||
| <img src="https://www.svgrepo.com/download/473589/docker.svg" width="50"/> | Docker | [${{ env.OP_IMAGE_NAME }}](${{ env.DOCKER_OP_IMAGE_NAME_URL }}) | - |
|
||||
ENDBODY
|
||||
)
|
||||
assets=()
|
||||
|
||||
6
.github/workflows/sync-era.yml
vendored
6
.github/workflows/sync-era.yml
vendored
@@ -32,12 +32,6 @@ jobs:
|
||||
tip: "0x91c90676cab257a59cd956d7cb0bceb9b1a71d79755c23c7277a0697ccfaf8c4"
|
||||
block: 100000
|
||||
unwind-target: "0x52e0509d33a988ef807058e2980099ee3070187f7333aae12b64d4d675f34c5a"
|
||||
- build: install-op
|
||||
bin: op-reth
|
||||
chain: base
|
||||
tip: "0xbb9b85352c7ebca6ba8efc63bd66cecd038c92ec8ebd02e153a3e0b197e672b7"
|
||||
block: 10000
|
||||
unwind-target: "0x118a6e922a8c6cab221fc5adfe5056d2b72d58c6580e9c5629de55299e2cf8de"
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
- uses: rui314/setup-mold@v1
|
||||
|
||||
6
.github/workflows/sync.yml
vendored
6
.github/workflows/sync.yml
vendored
@@ -32,12 +32,6 @@ jobs:
|
||||
tip: "0x91c90676cab257a59cd956d7cb0bceb9b1a71d79755c23c7277a0697ccfaf8c4"
|
||||
block: 100000
|
||||
unwind-target: "0x52e0509d33a988ef807058e2980099ee3070187f7333aae12b64d4d675f34c5a"
|
||||
- build: install-op
|
||||
bin: op-reth
|
||||
chain: base
|
||||
tip: "0xbb9b85352c7ebca6ba8efc63bd66cecd038c92ec8ebd02e153a3e0b197e672b7"
|
||||
block: 10000
|
||||
unwind-target: "0x118a6e922a8c6cab221fc5adfe5056d2b72d58c6580e9c5629de55299e2cf8de"
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
- uses: rui314/setup-mold@v1
|
||||
|
||||
5
.github/workflows/unit.yml
vendored
5
.github/workflows/unit.yml
vendored
@@ -26,15 +26,12 @@ jobs:
|
||||
EDGE_FEATURES: ${{ matrix.storage == 'edge' && 'edge' || '' }}
|
||||
strategy:
|
||||
matrix:
|
||||
type: [ethereum, optimism]
|
||||
type: [ethereum]
|
||||
storage: [stable, edge]
|
||||
include:
|
||||
- type: ethereum
|
||||
features: asm-keccak ethereum
|
||||
exclude_args: ""
|
||||
- type: optimism
|
||||
features: asm-keccak
|
||||
exclude_args: --exclude reth --exclude reth-bench --exclude "example-*" --exclude "reth-ethereum-*" --exclude "*-ethereum"
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
|
||||
36
.github/workflows/update-superchain.yml
vendored
36
.github/workflows/update-superchain.yml
vendored
@@ -1,36 +0,0 @@
|
||||
name: Update Superchain Config
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 3 * * 0'
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
jobs:
|
||||
update-superchain:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Install required tools
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y jq zstd qpdf yq
|
||||
|
||||
- name: Run fetch_superchain_config.sh
|
||||
run: |
|
||||
chmod +x crates/optimism/chainspec/res/fetch_superchain_config.sh
|
||||
cd crates/optimism/chainspec/res
|
||||
./fetch_superchain_config.sh
|
||||
|
||||
- name: Create Pull Request
|
||||
uses: peter-evans/create-pull-request@v8
|
||||
with:
|
||||
commit-message: "chore: update superchain config"
|
||||
title: "chore: update superchain config"
|
||||
body: "This PR updates the superchain configs via scheduled workflow."
|
||||
branch: "ci/update-superchain-config"
|
||||
delete-branch: true
|
||||
20
.github/workflows/windows.yml
vendored
20
.github/workflows/windows.yml
vendored
@@ -32,23 +32,3 @@ jobs:
|
||||
run: sudo apt-get install -y mingw-w64
|
||||
- name: Check Reth
|
||||
run: cargo check --target x86_64-pc-windows-gnu
|
||||
|
||||
check-op-reth:
|
||||
runs-on: depot-ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
- uses: rui314/setup-mold@v1
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
with:
|
||||
target: x86_64-pc-windows-gnu
|
||||
- uses: taiki-e/install-action@cross
|
||||
- uses: mozilla-actions/sccache-action@v0.0.9
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
cache-on-failure: true
|
||||
- name: mingw-w64
|
||||
run: sudo apt-get install -y mingw-w64
|
||||
- name: Check OP-Reth
|
||||
run: cargo check -p op-reth --target x86_64-pc-windows-gnu
|
||||
|
||||
@@ -24,7 +24,7 @@ Reth is a high-performance Ethereum execution client written in Rust, focusing o
|
||||
|
||||
- **Modularity**: Each crate can be used as a standalone library
|
||||
- **Performance**: Extensive use of parallelism, memory-mapped I/O, and optimized data structures
|
||||
- **Extensibility**: Traits and generic types allow for different implementations (Ethereum, Optimism, etc.)
|
||||
- **Extensibility**: Traits and generic types allow for different chain implementations
|
||||
- **Type Safety**: Strong typing throughout with minimal use of dynamic dispatch
|
||||
|
||||
## Development Workflow
|
||||
@@ -179,7 +179,6 @@ Before submitting changes, ensure:
|
||||
Label PRs appropriately, first check the available labels and then apply the relevant ones:
|
||||
* when changes are RPC related, add A-rpc label
|
||||
* when changes are docs related, add C-docs label
|
||||
* when changes are optimism related (e.g. new feature or exclusive changes to crates/optimism), add A-op-reth label
|
||||
* ... and so on, check the available labels for more options.
|
||||
* if being tasked to open a pr, ensure that all changes are properly formatted: `cargo +nightly fmt --all`
|
||||
|
||||
@@ -233,7 +232,7 @@ Tests often need expansion for:
|
||||
Common refactoring pattern:
|
||||
- Replace concrete types with generics
|
||||
- Add trait bounds for flexibility
|
||||
- Enable reuse across different chain types (Ethereum, Optimism)
|
||||
- Enable reuse across different chain types
|
||||
|
||||
#### When to Comment
|
||||
|
||||
|
||||
618
Cargo.lock
generated
618
Cargo.lock
generated
@@ -404,24 +404,6 @@ dependencies = [
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "alloy-op-evm"
|
||||
version = "0.27.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "874bfd6cacd006d05e70560f3af7faa670e31166203f9ba14fae4b169227360b"
|
||||
dependencies = [
|
||||
"alloy-consensus",
|
||||
"alloy-eips",
|
||||
"alloy-evm",
|
||||
"alloy-op-hardforks",
|
||||
"alloy-primitives",
|
||||
"auto_impl",
|
||||
"op-alloy",
|
||||
"op-revm",
|
||||
"revm",
|
||||
"thiserror 2.0.18",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "alloy-op-hardforks"
|
||||
version = "0.4.7"
|
||||
@@ -432,7 +414,6 @@ dependencies = [
|
||||
"alloy-hardforks",
|
||||
"alloy-primitives",
|
||||
"auto_impl",
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1522,7 +1503,7 @@ dependencies = [
|
||||
"addr2line",
|
||||
"cfg-if",
|
||||
"libc",
|
||||
"miniz_oxide 0.8.9",
|
||||
"miniz_oxide",
|
||||
"object",
|
||||
"rustc-demangle",
|
||||
"windows-link",
|
||||
@@ -3662,48 +3643,6 @@ dependencies = [
|
||||
"reth-ethereum",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "example-custom-node"
|
||||
version = "0.0.0"
|
||||
dependencies = [
|
||||
"alloy-consensus",
|
||||
"alloy-eips",
|
||||
"alloy-evm",
|
||||
"alloy-genesis",
|
||||
"alloy-network",
|
||||
"alloy-op-evm",
|
||||
"alloy-primitives",
|
||||
"alloy-rlp",
|
||||
"alloy-rpc-types-engine",
|
||||
"alloy-rpc-types-eth",
|
||||
"alloy-serde",
|
||||
"async-trait",
|
||||
"derive_more",
|
||||
"eyre",
|
||||
"jsonrpsee",
|
||||
"modular-bitfield",
|
||||
"op-alloy-consensus",
|
||||
"op-alloy-rpc-types",
|
||||
"op-alloy-rpc-types-engine",
|
||||
"op-revm",
|
||||
"reth-codecs",
|
||||
"reth-db-api",
|
||||
"reth-engine-primitives",
|
||||
"reth-ethereum",
|
||||
"reth-network-peers",
|
||||
"reth-node-builder",
|
||||
"reth-op",
|
||||
"reth-optimism-flashblocks",
|
||||
"reth-optimism-forks",
|
||||
"reth-payload-builder",
|
||||
"reth-rpc-api",
|
||||
"reth-rpc-engine-api",
|
||||
"revm",
|
||||
"revm-primitives",
|
||||
"serde",
|
||||
"thiserror 2.0.18",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "example-custom-node-components"
|
||||
version = "0.0.0"
|
||||
@@ -3760,30 +3699,6 @@ dependencies = [
|
||||
"reth-ethereum",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "example-engine-api-access"
|
||||
version = "0.0.0"
|
||||
dependencies = [
|
||||
"reth-db",
|
||||
"reth-node-builder",
|
||||
"reth-optimism-chainspec",
|
||||
"reth-optimism-node",
|
||||
"tokio",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "example-exex-hello-world"
|
||||
version = "0.0.0"
|
||||
dependencies = [
|
||||
"clap",
|
||||
"eyre",
|
||||
"futures",
|
||||
"reth-ethereum",
|
||||
"reth-op",
|
||||
"reth-tracing",
|
||||
"tokio",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "example-exex-test"
|
||||
version = "0.0.0"
|
||||
@@ -3875,14 +3790,6 @@ dependencies = [
|
||||
"reth-ethereum",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "example-op-db-access"
|
||||
version = "0.0.0"
|
||||
dependencies = [
|
||||
"eyre",
|
||||
"reth-op",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "example-polygon-p2p"
|
||||
version = "0.0.0"
|
||||
@@ -4104,7 +4011,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b375d6465b98090a5f25b1c7703f3859783755aa9a80433b36e0379a3ec2f369"
|
||||
dependencies = [
|
||||
"crc32fast",
|
||||
"miniz_oxide 0.8.9",
|
||||
"miniz_oxide",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -5965,16 +5872,6 @@ dependencies = [
|
||||
"simd-adler32",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "miniz_oxide"
|
||||
version = "0.9.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5faa9f23e86bd5768d76def086192ff5f869fb088da12a976ea21e9796b975f6"
|
||||
dependencies = [
|
||||
"adler2",
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "mio"
|
||||
version = "1.1.1"
|
||||
@@ -6386,12 +6283,6 @@ dependencies = [
|
||||
"thiserror 2.0.18",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "op-alloy-flz"
|
||||
version = "0.13.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a79f352fc3893dcd670172e615afef993a41798a1d3fc0db88a3e60ef2e70ecc"
|
||||
|
||||
[[package]]
|
||||
name = "op-alloy-network"
|
||||
version = "0.23.1"
|
||||
@@ -6423,16 +6314,6 @@ dependencies = [
|
||||
"op-alloy-rpc-types-engine",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "op-alloy-rpc-jsonrpsee"
|
||||
version = "0.23.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c1c820ef9c802ebc732281a940bfb6ac2345af4d9fff041cbb64b4b546676686"
|
||||
dependencies = [
|
||||
"alloy-primitives",
|
||||
"jsonrpsee",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "op-alloy-rpc-types"
|
||||
version = "0.23.1"
|
||||
@@ -6445,7 +6326,6 @@ dependencies = [
|
||||
"alloy-primitives",
|
||||
"alloy-rpc-types-eth",
|
||||
"alloy-serde",
|
||||
"arbitrary",
|
||||
"derive_more",
|
||||
"op-alloy-consensus",
|
||||
"serde",
|
||||
@@ -6465,7 +6345,6 @@ dependencies = [
|
||||
"alloy-rlp",
|
||||
"alloy-rpc-types-engine",
|
||||
"alloy-serde",
|
||||
"arbitrary",
|
||||
"derive_more",
|
||||
"ethereum_ssz 0.9.1",
|
||||
"ethereum_ssz_derive 0.9.1",
|
||||
@@ -6476,24 +6355,6 @@ dependencies = [
|
||||
"thiserror 2.0.18",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "op-reth"
|
||||
version = "1.10.2"
|
||||
dependencies = [
|
||||
"clap",
|
||||
"reth-cli-util",
|
||||
"reth-optimism-chainspec",
|
||||
"reth-optimism-cli",
|
||||
"reth-optimism-consensus",
|
||||
"reth-optimism-evm",
|
||||
"reth-optimism-forks",
|
||||
"reth-optimism-node",
|
||||
"reth-optimism-payload-builder",
|
||||
"reth-optimism-primitives",
|
||||
"reth-optimism-rpc",
|
||||
"tracing",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "op-revm"
|
||||
version = "15.0.0"
|
||||
@@ -9621,470 +9482,6 @@ dependencies = [
|
||||
"reth-primitives-traits",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "reth-op"
|
||||
version = "1.10.2"
|
||||
dependencies = [
|
||||
"reth-chainspec",
|
||||
"reth-cli-util",
|
||||
"reth-codecs",
|
||||
"reth-consensus",
|
||||
"reth-consensus-common",
|
||||
"reth-db",
|
||||
"reth-engine-local",
|
||||
"reth-eth-wire",
|
||||
"reth-evm",
|
||||
"reth-exex",
|
||||
"reth-network",
|
||||
"reth-network-api",
|
||||
"reth-node-api",
|
||||
"reth-node-builder",
|
||||
"reth-node-core",
|
||||
"reth-optimism-chainspec",
|
||||
"reth-optimism-cli",
|
||||
"reth-optimism-consensus",
|
||||
"reth-optimism-evm",
|
||||
"reth-optimism-node",
|
||||
"reth-optimism-primitives",
|
||||
"reth-optimism-rpc",
|
||||
"reth-primitives-traits",
|
||||
"reth-provider",
|
||||
"reth-revm",
|
||||
"reth-rpc",
|
||||
"reth-rpc-api",
|
||||
"reth-rpc-builder",
|
||||
"reth-rpc-eth-types",
|
||||
"reth-storage-api",
|
||||
"reth-tasks",
|
||||
"reth-transaction-pool",
|
||||
"reth-trie",
|
||||
"reth-trie-db",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "reth-optimism-chainspec"
|
||||
version = "1.10.2"
|
||||
dependencies = [
|
||||
"alloy-chains",
|
||||
"alloy-consensus",
|
||||
"alloy-eips",
|
||||
"alloy-genesis",
|
||||
"alloy-hardforks",
|
||||
"alloy-op-hardforks",
|
||||
"alloy-primitives",
|
||||
"derive_more",
|
||||
"miniz_oxide 0.9.0",
|
||||
"op-alloy-consensus",
|
||||
"op-alloy-rpc-types",
|
||||
"paste",
|
||||
"reth-chainspec",
|
||||
"reth-ethereum-forks",
|
||||
"reth-network-peers",
|
||||
"reth-optimism-forks",
|
||||
"reth-optimism-primitives",
|
||||
"reth-primitives-traits",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"tar-no-std",
|
||||
"thiserror 2.0.18",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "reth-optimism-cli"
|
||||
version = "1.10.2"
|
||||
dependencies = [
|
||||
"alloy-consensus",
|
||||
"alloy-eips",
|
||||
"alloy-primitives",
|
||||
"alloy-rlp",
|
||||
"clap",
|
||||
"derive_more",
|
||||
"eyre",
|
||||
"futures-util",
|
||||
"op-alloy-consensus",
|
||||
"proptest",
|
||||
"reth-chainspec",
|
||||
"reth-cli",
|
||||
"reth-cli-commands",
|
||||
"reth-cli-runner",
|
||||
"reth-consensus",
|
||||
"reth-db",
|
||||
"reth-db-api",
|
||||
"reth-db-common",
|
||||
"reth-downloaders",
|
||||
"reth-execution-types",
|
||||
"reth-fs-util",
|
||||
"reth-node-builder",
|
||||
"reth-node-core",
|
||||
"reth-node-events",
|
||||
"reth-node-metrics",
|
||||
"reth-optimism-chainspec",
|
||||
"reth-optimism-consensus",
|
||||
"reth-optimism-evm",
|
||||
"reth-optimism-node",
|
||||
"reth-optimism-primitives",
|
||||
"reth-primitives-traits",
|
||||
"reth-provider",
|
||||
"reth-prune",
|
||||
"reth-rpc-server-types",
|
||||
"reth-stages",
|
||||
"reth-static-file",
|
||||
"reth-static-file-types",
|
||||
"reth-tracing",
|
||||
"serde",
|
||||
"tempfile",
|
||||
"tokio",
|
||||
"tokio-util",
|
||||
"tracing",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "reth-optimism-consensus"
|
||||
version = "1.10.2"
|
||||
dependencies = [
|
||||
"alloy-chains",
|
||||
"alloy-consensus",
|
||||
"alloy-eips",
|
||||
"alloy-primitives",
|
||||
"alloy-trie",
|
||||
"op-alloy-consensus",
|
||||
"reth-chainspec",
|
||||
"reth-consensus",
|
||||
"reth-consensus-common",
|
||||
"reth-db-common",
|
||||
"reth-execution-types",
|
||||
"reth-optimism-chainspec",
|
||||
"reth-optimism-forks",
|
||||
"reth-optimism-node",
|
||||
"reth-optimism-primitives",
|
||||
"reth-primitives-traits",
|
||||
"reth-provider",
|
||||
"reth-revm",
|
||||
"reth-storage-api",
|
||||
"reth-storage-errors",
|
||||
"reth-trie",
|
||||
"reth-trie-common",
|
||||
"revm",
|
||||
"thiserror 2.0.18",
|
||||
"tracing",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "reth-optimism-evm"
|
||||
version = "1.10.2"
|
||||
dependencies = [
|
||||
"alloy-consensus",
|
||||
"alloy-eips",
|
||||
"alloy-evm",
|
||||
"alloy-genesis",
|
||||
"alloy-op-evm",
|
||||
"alloy-primitives",
|
||||
"op-alloy-consensus",
|
||||
"op-alloy-rpc-types-engine",
|
||||
"op-revm",
|
||||
"reth-chainspec",
|
||||
"reth-evm",
|
||||
"reth-execution-errors",
|
||||
"reth-execution-types",
|
||||
"reth-optimism-chainspec",
|
||||
"reth-optimism-consensus",
|
||||
"reth-optimism-forks",
|
||||
"reth-optimism-primitives",
|
||||
"reth-primitives-traits",
|
||||
"reth-revm",
|
||||
"reth-rpc-eth-api",
|
||||
"reth-storage-errors",
|
||||
"revm",
|
||||
"thiserror 2.0.18",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "reth-optimism-flashblocks"
|
||||
version = "1.10.2"
|
||||
dependencies = [
|
||||
"alloy-consensus",
|
||||
"alloy-eips",
|
||||
"alloy-primitives",
|
||||
"alloy-rpc-types-engine",
|
||||
"brotli",
|
||||
"derive_more",
|
||||
"eyre",
|
||||
"futures-util",
|
||||
"metrics",
|
||||
"op-alloy-consensus",
|
||||
"op-alloy-rpc-types-engine",
|
||||
"reth-chain-state",
|
||||
"reth-engine-primitives",
|
||||
"reth-errors",
|
||||
"reth-evm",
|
||||
"reth-execution-types",
|
||||
"reth-metrics",
|
||||
"reth-optimism-payload-builder",
|
||||
"reth-optimism-primitives",
|
||||
"reth-payload-primitives",
|
||||
"reth-primitives-traits",
|
||||
"reth-revm",
|
||||
"reth-rpc-eth-types",
|
||||
"reth-storage-api",
|
||||
"reth-tasks",
|
||||
"ringbuffer",
|
||||
"serde_json",
|
||||
"test-case",
|
||||
"tokio",
|
||||
"tokio-tungstenite 0.28.0",
|
||||
"tracing",
|
||||
"url",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "reth-optimism-forks"
|
||||
version = "1.10.2"
|
||||
dependencies = [
|
||||
"alloy-op-hardforks",
|
||||
"alloy-primitives",
|
||||
"once_cell",
|
||||
"reth-ethereum-forks",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "reth-optimism-node"
|
||||
version = "1.10.2"
|
||||
dependencies = [
|
||||
"alloy-consensus",
|
||||
"alloy-genesis",
|
||||
"alloy-network",
|
||||
"alloy-op-hardforks",
|
||||
"alloy-primitives",
|
||||
"alloy-rpc-types-engine",
|
||||
"alloy-rpc-types-eth",
|
||||
"clap",
|
||||
"eyre",
|
||||
"futures",
|
||||
"op-alloy-consensus",
|
||||
"op-alloy-network",
|
||||
"op-alloy-rpc-types-engine",
|
||||
"op-revm",
|
||||
"reth-chainspec",
|
||||
"reth-consensus",
|
||||
"reth-db",
|
||||
"reth-e2e-test-utils",
|
||||
"reth-engine-local",
|
||||
"reth-evm",
|
||||
"reth-network",
|
||||
"reth-node-api",
|
||||
"reth-node-builder",
|
||||
"reth-node-core",
|
||||
"reth-optimism-chainspec",
|
||||
"reth-optimism-consensus",
|
||||
"reth-optimism-evm",
|
||||
"reth-optimism-forks",
|
||||
"reth-optimism-node",
|
||||
"reth-optimism-payload-builder",
|
||||
"reth-optimism-primitives",
|
||||
"reth-optimism-rpc",
|
||||
"reth-optimism-storage",
|
||||
"reth-optimism-txpool",
|
||||
"reth-payload-builder",
|
||||
"reth-payload-util",
|
||||
"reth-primitives-traits",
|
||||
"reth-provider",
|
||||
"reth-revm",
|
||||
"reth-rpc",
|
||||
"reth-rpc-api",
|
||||
"reth-rpc-engine-api",
|
||||
"reth-rpc-eth-types",
|
||||
"reth-rpc-server-types",
|
||||
"reth-stages-types",
|
||||
"reth-tasks",
|
||||
"reth-tracing",
|
||||
"reth-transaction-pool",
|
||||
"reth-trie-common",
|
||||
"reth-trie-db",
|
||||
"revm",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"tokio",
|
||||
"url",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "reth-optimism-payload-builder"
|
||||
version = "1.10.2"
|
||||
dependencies = [
|
||||
"alloy-consensus",
|
||||
"alloy-eips",
|
||||
"alloy-evm",
|
||||
"alloy-primitives",
|
||||
"alloy-rlp",
|
||||
"alloy-rpc-types-debug",
|
||||
"alloy-rpc-types-engine",
|
||||
"derive_more",
|
||||
"either",
|
||||
"op-alloy-consensus",
|
||||
"op-alloy-rpc-types-engine",
|
||||
"reth-basic-payload-builder",
|
||||
"reth-chainspec",
|
||||
"reth-evm",
|
||||
"reth-execution-types",
|
||||
"reth-optimism-evm",
|
||||
"reth-optimism-forks",
|
||||
"reth-optimism-primitives",
|
||||
"reth-optimism-txpool",
|
||||
"reth-payload-builder",
|
||||
"reth-payload-builder-primitives",
|
||||
"reth-payload-primitives",
|
||||
"reth-payload-util",
|
||||
"reth-payload-validator",
|
||||
"reth-primitives-traits",
|
||||
"reth-revm",
|
||||
"reth-storage-api",
|
||||
"reth-transaction-pool",
|
||||
"revm",
|
||||
"serde",
|
||||
"sha2",
|
||||
"thiserror 2.0.18",
|
||||
"tracing",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "reth-optimism-primitives"
|
||||
version = "1.10.2"
|
||||
dependencies = [
|
||||
"alloy-consensus",
|
||||
"alloy-eips",
|
||||
"alloy-primitives",
|
||||
"alloy-rlp",
|
||||
"arbitrary",
|
||||
"bincode 1.3.3",
|
||||
"bytes",
|
||||
"modular-bitfield",
|
||||
"op-alloy-consensus",
|
||||
"proptest",
|
||||
"proptest-arbitrary-interop",
|
||||
"rand 0.8.5",
|
||||
"rand 0.9.2",
|
||||
"reth-codecs",
|
||||
"reth-primitives-traits",
|
||||
"reth-zstd-compressors",
|
||||
"rstest",
|
||||
"secp256k1 0.30.0",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"serde_with",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "reth-optimism-rpc"
|
||||
version = "1.10.2"
|
||||
dependencies = [
|
||||
"alloy-consensus",
|
||||
"alloy-eips",
|
||||
"alloy-json-rpc",
|
||||
"alloy-op-hardforks",
|
||||
"alloy-primitives",
|
||||
"alloy-rpc-client",
|
||||
"alloy-rpc-types-debug",
|
||||
"alloy-rpc-types-engine",
|
||||
"alloy-rpc-types-eth",
|
||||
"alloy-transport",
|
||||
"alloy-transport-http",
|
||||
"async-trait",
|
||||
"derive_more",
|
||||
"eyre",
|
||||
"futures",
|
||||
"jsonrpsee",
|
||||
"jsonrpsee-core",
|
||||
"jsonrpsee-types",
|
||||
"metrics",
|
||||
"op-alloy-consensus",
|
||||
"op-alloy-network",
|
||||
"op-alloy-rpc-jsonrpsee",
|
||||
"op-alloy-rpc-types",
|
||||
"op-alloy-rpc-types-engine",
|
||||
"op-revm",
|
||||
"reqwest",
|
||||
"reth-chain-state",
|
||||
"reth-chainspec",
|
||||
"reth-evm",
|
||||
"reth-metrics",
|
||||
"reth-node-api",
|
||||
"reth-node-builder",
|
||||
"reth-optimism-chainspec",
|
||||
"reth-optimism-evm",
|
||||
"reth-optimism-flashblocks",
|
||||
"reth-optimism-forks",
|
||||
"reth-optimism-payload-builder",
|
||||
"reth-optimism-primitives",
|
||||
"reth-optimism-txpool",
|
||||
"reth-primitives-traits",
|
||||
"reth-rpc",
|
||||
"reth-rpc-api",
|
||||
"reth-rpc-engine-api",
|
||||
"reth-rpc-eth-api",
|
||||
"reth-rpc-eth-types",
|
||||
"reth-rpc-server-types",
|
||||
"reth-storage-api",
|
||||
"reth-tasks",
|
||||
"reth-transaction-pool",
|
||||
"revm",
|
||||
"serde_json",
|
||||
"thiserror 2.0.18",
|
||||
"tokio",
|
||||
"tokio-stream",
|
||||
"tower",
|
||||
"tracing",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "reth-optimism-storage"
|
||||
version = "1.10.2"
|
||||
dependencies = [
|
||||
"alloy-consensus",
|
||||
"reth-codecs",
|
||||
"reth-optimism-primitives",
|
||||
"reth-prune-types",
|
||||
"reth-stages-types",
|
||||
"reth-storage-api",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "reth-optimism-txpool"
|
||||
version = "1.10.2"
|
||||
dependencies = [
|
||||
"alloy-consensus",
|
||||
"alloy-eips",
|
||||
"alloy-json-rpc",
|
||||
"alloy-primitives",
|
||||
"alloy-rpc-client",
|
||||
"alloy-rpc-types-eth",
|
||||
"alloy-serde",
|
||||
"c-kzg",
|
||||
"derive_more",
|
||||
"futures-util",
|
||||
"metrics",
|
||||
"op-alloy-consensus",
|
||||
"op-alloy-flz",
|
||||
"op-alloy-rpc-types",
|
||||
"op-revm",
|
||||
"parking_lot",
|
||||
"reth-chain-state",
|
||||
"reth-chainspec",
|
||||
"reth-evm",
|
||||
"reth-metrics",
|
||||
"reth-optimism-chainspec",
|
||||
"reth-optimism-evm",
|
||||
"reth-optimism-forks",
|
||||
"reth-optimism-primitives",
|
||||
"reth-primitives-traits",
|
||||
"reth-provider",
|
||||
"reth-storage-api",
|
||||
"reth-transaction-pool",
|
||||
"serde",
|
||||
"thiserror 2.0.18",
|
||||
"tokio",
|
||||
"tracing",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "reth-payload-builder"
|
||||
version = "1.10.2"
|
||||
@@ -12628,17 +12025,6 @@ dependencies = [
|
||||
"xattr",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tar-no-std"
|
||||
version = "0.4.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "715f9a4586706a61c571cb5ee1c3ac2bbb2cf63e15bce772307b95befef5f5ee"
|
||||
dependencies = [
|
||||
"bitflags 2.10.0",
|
||||
"log",
|
||||
"num-traits",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tempfile"
|
||||
version = "3.24.0"
|
||||
|
||||
32
Cargo.toml
32
Cargo.toml
@@ -72,24 +72,6 @@ members = [
|
||||
"crates/node/events/",
|
||||
"crates/node/metrics",
|
||||
"crates/node/types",
|
||||
"crates/optimism/bin",
|
||||
"crates/optimism/chainspec",
|
||||
"crates/optimism/cli",
|
||||
"crates/optimism/consensus",
|
||||
"crates/optimism/evm/",
|
||||
"crates/optimism/examples/custom-node",
|
||||
"crates/optimism/examples/engine-api-access",
|
||||
"crates/optimism/examples/exex-hello-world",
|
||||
"crates/optimism/examples/op-db-access",
|
||||
"crates/optimism/flashblocks/",
|
||||
"crates/optimism/hardforks/",
|
||||
"crates/optimism/node/",
|
||||
"crates/optimism/payload/",
|
||||
"crates/optimism/primitives/",
|
||||
"crates/optimism/reth/",
|
||||
"crates/optimism/rpc/",
|
||||
"crates/optimism/storage",
|
||||
"crates/optimism/txpool/",
|
||||
"crates/payload/basic/",
|
||||
"crates/payload/builder/",
|
||||
"crates/payload/builder-primitives/",
|
||||
@@ -336,7 +318,6 @@ incremental = false
|
||||
|
||||
[workspace.dependencies]
|
||||
# reth
|
||||
op-reth = { path = "crates/optimism/bin" }
|
||||
reth = { path = "bin/reth" }
|
||||
reth-storage-rpc-provider = { path = "crates/storage/rpc-provider" }
|
||||
reth-basic-payload-builder = { path = "crates/payload/basic" }
|
||||
@@ -385,7 +366,6 @@ reth-ethereum = { path = "crates/ethereum/reth" }
|
||||
reth-etl = { path = "crates/etl" }
|
||||
reth-evm = { path = "crates/evm/evm", default-features = false }
|
||||
reth-evm-ethereum = { path = "crates/ethereum/evm", default-features = false }
|
||||
reth-optimism-evm = { path = "crates/optimism/evm", default-features = false }
|
||||
reth-execution-errors = { path = "crates/evm/execution-errors", default-features = false }
|
||||
reth-execution-types = { path = "crates/evm/execution-types", default-features = false }
|
||||
reth-exex = { path = "crates/exex/exex" }
|
||||
@@ -412,18 +392,7 @@ reth-node-ethereum = { path = "crates/ethereum/node" }
|
||||
reth-node-ethstats = { path = "crates/node/ethstats" }
|
||||
reth-node-events = { path = "crates/node/events" }
|
||||
reth-node-metrics = { path = "crates/node/metrics" }
|
||||
reth-optimism-node = { path = "crates/optimism/node" }
|
||||
reth-node-types = { path = "crates/node/types" }
|
||||
reth-op = { path = "crates/optimism/reth", default-features = false }
|
||||
reth-optimism-chainspec = { path = "crates/optimism/chainspec", default-features = false }
|
||||
reth-optimism-cli = { path = "crates/optimism/cli", default-features = false }
|
||||
reth-optimism-consensus = { path = "crates/optimism/consensus", default-features = false }
|
||||
reth-optimism-forks = { path = "crates/optimism/hardforks", default-features = false }
|
||||
reth-optimism-payload-builder = { path = "crates/optimism/payload" }
|
||||
reth-optimism-primitives = { path = "crates/optimism/primitives", default-features = false }
|
||||
reth-optimism-rpc = { path = "crates/optimism/rpc" }
|
||||
reth-optimism-storage = { path = "crates/optimism/storage" }
|
||||
reth-optimism-txpool = { path = "crates/optimism/txpool" }
|
||||
reth-payload-builder = { path = "crates/payload/builder" }
|
||||
reth-payload-builder-primitives = { path = "crates/payload/builder-primitives" }
|
||||
reth-payload-primitives = { path = "crates/payload/primitives" }
|
||||
@@ -444,7 +413,6 @@ reth-rpc-engine-api = { path = "crates/rpc/rpc-engine-api" }
|
||||
reth-rpc-eth-api = { path = "crates/rpc/rpc-eth-api" }
|
||||
reth-rpc-eth-types = { path = "crates/rpc/rpc-eth-types", default-features = false }
|
||||
reth-rpc-layer = { path = "crates/rpc/rpc-layer" }
|
||||
reth-optimism-flashblocks = { path = "crates/optimism/flashblocks" }
|
||||
reth-rpc-server-types = { path = "crates/rpc/rpc-server-types" }
|
||||
reth-rpc-convert = { path = "crates/rpc/rpc-convert" }
|
||||
reth-stages = { path = "crates/stages/stages" }
|
||||
|
||||
@@ -1,9 +1,8 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
# Unified Dockerfile for reth and op-reth, optimized for Depot builds
|
||||
# Dockerfile for reth, optimized for Depot builds
|
||||
# Usage:
|
||||
# reth: --build-arg BINARY=reth
|
||||
# op-reth: --build-arg BINARY=op-reth --build-arg MANIFEST_PATH=crates/optimism/bin
|
||||
# reth: --build-arg BINARY=reth
|
||||
|
||||
FROM rust:1 AS builder
|
||||
WORKDIR /app
|
||||
@@ -19,7 +18,7 @@ ENV RUSTC_WRAPPER=sccache
|
||||
ENV SCCACHE_DIR=/sccache
|
||||
ENV SCCACHE_WEBDAV_ENDPOINT=https://cache.depot.dev
|
||||
|
||||
# Binary to build (reth or op-reth)
|
||||
# Binary to build
|
||||
ARG BINARY=reth
|
||||
|
||||
# Manifest path for the binary
|
||||
|
||||
46
DockerfileOp
46
DockerfileOp
@@ -1,46 +0,0 @@
|
||||
FROM lukemathwalker/cargo-chef:latest-rust-1 AS chef
|
||||
WORKDIR /app
|
||||
|
||||
LABEL org.opencontainers.image.source=https://github.com/paradigmxyz/reth
|
||||
LABEL org.opencontainers.image.licenses="MIT OR Apache-2.0"
|
||||
|
||||
RUN apt-get update && apt-get -y upgrade && apt-get install -y libclang-dev pkg-config
|
||||
|
||||
# Builds a cargo-chef plan
|
||||
FROM chef AS planner
|
||||
COPY . .
|
||||
RUN cargo chef prepare --recipe-path recipe.json
|
||||
|
||||
FROM chef AS builder
|
||||
COPY --from=planner /app/recipe.json recipe.json
|
||||
|
||||
ARG BUILD_PROFILE=maxperf
|
||||
ENV BUILD_PROFILE=$BUILD_PROFILE
|
||||
|
||||
ARG RUSTFLAGS=""
|
||||
ENV RUSTFLAGS="$RUSTFLAGS"
|
||||
|
||||
ARG FEATURES=""
|
||||
ENV FEATURES=$FEATURES
|
||||
|
||||
RUN cargo chef cook --profile $BUILD_PROFILE --features "$FEATURES" --recipe-path recipe.json --manifest-path /app/crates/optimism/bin/Cargo.toml
|
||||
|
||||
COPY . .
|
||||
RUN cargo build --profile $BUILD_PROFILE --features "$FEATURES" --bin op-reth --manifest-path /app/crates/optimism/bin/Cargo.toml
|
||||
|
||||
RUN ls -la /app/target/$BUILD_PROFILE/op-reth
|
||||
RUN cp /app/target/$BUILD_PROFILE/op-reth /app/op-reth
|
||||
|
||||
FROM ubuntu AS runtime
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y ca-certificates libssl-dev pkg-config strace && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
WORKDIR /app
|
||||
COPY --from=builder /app/op-reth /usr/local/bin/
|
||||
RUN chmod +x /usr/local/bin/op-reth
|
||||
COPY LICENSE-* ./
|
||||
|
||||
EXPOSE 30303 30303/udp 9001 8545 8546 7545 8551
|
||||
ENTRYPOINT ["/usr/local/bin/op-reth"]
|
||||
@@ -24,18 +24,3 @@
|
||||
parameters.
|
||||
- Update version specific validation checks in the `EngineValidator` trait.
|
||||
|
||||
## Op-Reth changes
|
||||
|
||||
### Updates to the engine API
|
||||
|
||||
Opstack tries to be as close to the L1 engine API as much as possible. Isthmus (Prague equivalent) introduced the first
|
||||
deviation from the L1 engine API with an additional field in the `ExecutionPayload`. For this reason the op engine API
|
||||
has its own server traits `OpEngineApi`.
|
||||
Adding a new versioned endpoint requires the same changes as for L1 just for the dedicated OP types.
|
||||
|
||||
### Hardforks
|
||||
|
||||
Opstack has dedicated hardforks (e.g. Isthmus), that can be entirely opstack specific (e.g. Holocene) or can be an L1
|
||||
equivalent hardfork. Since opstack sticks to the L1 header primitive, a new L1 equivalent hardfork also requires new
|
||||
equivalent consensus checks. For this reason these `OpHardfork` must be mapped to L1 `EthereumHardfork`, for example:
|
||||
`OpHardfork::Isthmus` corresponds to `EthereumHardfork::Prague`. These mappings must be defined in the `ChainSpec`.
|
||||
|
||||
53
Makefile
53
Makefile
@@ -50,13 +50,6 @@ install: ## Build and install the reth binary under `$(CARGO_HOME)/bin`.
|
||||
--profile "$(PROFILE)" \
|
||||
$(CARGO_INSTALL_EXTRA_FLAGS)
|
||||
|
||||
.PHONY: install-op
|
||||
install-op: ## Build and install the op-reth binary under `$(CARGO_HOME)/bin`.
|
||||
cargo install --path crates/optimism/bin --bin op-reth --force --locked \
|
||||
--features "$(FEATURES)" \
|
||||
--profile "$(PROFILE)" \
|
||||
$(CARGO_INSTALL_EXTRA_FLAGS)
|
||||
|
||||
.PHONY: build
|
||||
build: ## Build the reth binary into `target` directory.
|
||||
cargo build --bin reth --features "$(FEATURES)" --profile "$(PROFILE)"
|
||||
@@ -84,21 +77,10 @@ build-%-reproducible:
|
||||
.PHONY: build-debug
|
||||
build-debug: ## Build the reth binary into `target/debug` directory.
|
||||
cargo build --bin reth --features "$(FEATURES)"
|
||||
.PHONY: build-debug-op
|
||||
build-debug-op: ## Build the op-reth binary into `target/debug` directory.
|
||||
cargo build --bin op-reth --features "$(FEATURES)" --manifest-path crates/optimism/bin/Cargo.toml
|
||||
|
||||
.PHONY: build-op
|
||||
build-op: ## Build the op-reth binary into `target` directory.
|
||||
cargo build --bin op-reth --features "$(FEATURES)" --profile "$(PROFILE)" --manifest-path crates/optimism/bin/Cargo.toml
|
||||
|
||||
# Builds the reth binary natively.
|
||||
build-native-%:
|
||||
cargo build --bin reth --target $* --features "$(FEATURES)" --profile "$(PROFILE)"
|
||||
|
||||
op-build-native-%:
|
||||
cargo build --bin op-reth --target $* --features "$(FEATURES)" --profile "$(PROFILE)" --manifest-path crates/optimism/bin/Cargo.toml
|
||||
|
||||
# The following commands use `cross` to build a cross-compile.
|
||||
#
|
||||
# These commands require that:
|
||||
@@ -115,11 +97,9 @@ op-build-native-%:
|
||||
# on other systems. JEMALLOC_SYS_WITH_LG_PAGE=16 tells jemalloc to use 64-KiB
|
||||
# pages. See: https://github.com/paradigmxyz/reth/issues/6742
|
||||
build-aarch64-unknown-linux-gnu: export JEMALLOC_SYS_WITH_LG_PAGE=16
|
||||
op-build-aarch64-unknown-linux-gnu: export JEMALLOC_SYS_WITH_LG_PAGE=16
|
||||
|
||||
# No jemalloc on Windows
|
||||
build-x86_64-pc-windows-gnu: FEATURES := $(filter-out jemalloc jemalloc-prof,$(FEATURES))
|
||||
op-build-x86_64-pc-windows-gnu: FEATURES := $(filter-out jemalloc jemalloc-prof,$(FEATURES))
|
||||
|
||||
# Note: The additional rustc compiler flags are for intrinsics needed by MDBX.
|
||||
# See: https://github.com/cross-rs/cross/wiki/FAQ#undefined-reference-with-build-std
|
||||
@@ -127,10 +107,6 @@ build-%:
|
||||
RUSTFLAGS="-C link-arg=-lgcc -Clink-arg=-static-libgcc" \
|
||||
cross build --bin reth --target $* --features "$(FEATURES)" --profile "$(PROFILE)"
|
||||
|
||||
op-build-%:
|
||||
RUSTFLAGS="-C link-arg=-lgcc -Clink-arg=-static-libgcc" \
|
||||
cross build --bin op-reth --target $* --features "$(FEATURES)" --profile "$(PROFILE)" --manifest-path crates/optimism/bin/Cargo.toml
|
||||
|
||||
# Unfortunately we can't easily use cross to build for Darwin because of licensing issues.
|
||||
# If we wanted to, we would need to build a custom Docker image with the SDK available.
|
||||
#
|
||||
@@ -141,11 +117,6 @@ build-x86_64-apple-darwin:
|
||||
$(MAKE) build-native-x86_64-apple-darwin
|
||||
build-aarch64-apple-darwin:
|
||||
$(MAKE) build-native-aarch64-apple-darwin
|
||||
op-build-x86_64-apple-darwin:
|
||||
$(MAKE) op-build-native-x86_64-apple-darwin
|
||||
op-build-aarch64-apple-darwin:
|
||||
$(MAKE) op-build-native-aarch64-apple-darwin
|
||||
|
||||
build-deb-%:
|
||||
@case "$*" in \
|
||||
x86_64-unknown-linux-gnu|aarch64-unknown-linux-gnu|riscv64gc-unknown-linux-gnu) \
|
||||
@@ -266,26 +237,18 @@ db-tools: ## Compile MDBX debugging tools.
|
||||
@echo "Run \"$(DB_TOOLS_DIR)/mdbx_chk\" for the MDBX db file integrity check."
|
||||
|
||||
.PHONY: update-book-cli
|
||||
update-book-cli: build-debug build-debug-op## Update book cli documentation.
|
||||
update-book-cli: build-debug ## Update book cli documentation.
|
||||
@echo "Updating book cli doc..."
|
||||
@./docs/cli/update.sh $(CARGO_TARGET_DIR)/debug/reth $(CARGO_TARGET_DIR)/debug/op-reth
|
||||
@./docs/cli/update.sh $(CARGO_TARGET_DIR)/debug/reth
|
||||
|
||||
.PHONY: profiling
|
||||
profiling: ## Builds `reth` with optimisations, but also symbols.
|
||||
RUSTFLAGS="-C target-cpu=native" cargo build --profile profiling --features jemalloc,asm-keccak
|
||||
|
||||
.PHONY: profiling-op
|
||||
profiling-op: ## Builds `op-reth` with optimisations, but also symbols.
|
||||
RUSTFLAGS="-C target-cpu=native" cargo build --profile profiling --features jemalloc,asm-keccak --bin op-reth --manifest-path crates/optimism/bin/Cargo.toml
|
||||
|
||||
.PHONY: maxperf
|
||||
maxperf: ## Builds `reth` with the most aggressive optimisations.
|
||||
RUSTFLAGS="-C target-cpu=native" cargo build --profile maxperf --features jemalloc,asm-keccak
|
||||
|
||||
.PHONY: maxperf-op
|
||||
maxperf-op: ## Builds `op-reth` with the most aggressive optimisations.
|
||||
RUSTFLAGS="-C target-cpu=native" cargo build --profile maxperf --features jemalloc,asm-keccak --bin op-reth --manifest-path crates/optimism/bin/Cargo.toml
|
||||
|
||||
.PHONY: maxperf-no-asm
|
||||
maxperf-no-asm: ## Builds `reth` with the most aggressive optimisations, minus the "asm-keccak" feature.
|
||||
RUSTFLAGS="-C target-cpu=native" cargo build --profile maxperf --features jemalloc
|
||||
@@ -304,17 +267,6 @@ clippy:
|
||||
--all-features \
|
||||
-- -D warnings
|
||||
|
||||
clippy-op-dev:
|
||||
cargo +nightly clippy \
|
||||
--bin op-reth \
|
||||
--workspace \
|
||||
--lib \
|
||||
--examples \
|
||||
--tests \
|
||||
--benches \
|
||||
--locked \
|
||||
--all-features
|
||||
|
||||
lint-typos: ensure-typos
|
||||
typos
|
||||
|
||||
@@ -379,7 +331,6 @@ rustdocs: ## Runs `cargo docs` to generate the Rust documents in the `target/doc
|
||||
cargo-test:
|
||||
cargo test \
|
||||
--workspace \
|
||||
--bin "op-reth" \
|
||||
--lib --examples \
|
||||
--tests \
|
||||
--benches \
|
||||
|
||||
37
README.md
37
README.md
@@ -18,6 +18,11 @@
|
||||
[gh-lint]: https://github.com/paradigmxyz/reth/actions/workflows/lint.yml
|
||||
[tg-badge]: https://img.shields.io/endpoint?color=neon&logo=telegram&label=chat&url=https%3A%2F%2Ftg.sumanjay.workers.dev%2Fparadigm%5Freth
|
||||
|
||||
> **Note: OP-Reth has moved**
|
||||
>
|
||||
> The Optimism (op-reth) crates have been moved to [ethereum-optimism/optimism](https://github.com/ethereum-optimism/optimism).
|
||||
> Git contribution history has been preserved. If you are looking for op-reth, please see the new repository.
|
||||
|
||||
## What is Reth?
|
||||
|
||||
Reth (short for Rust Ethereum, [pronunciation](https://x.com/kelvinfichter/status/1597653609411268608)) is a new Ethereum full node implementation that is focused on being user-friendly, highly modular, as well as being fast and efficient. Reth is an Execution Layer (EL) and is compatible with all Ethereum Consensus Layer (CL) implementations that support the [Engine API](https://github.com/ethereum/execution-apis/tree/a0d03086564ab1838b462befbc083f873dcf0c0f/src/engine). It is originally built and driven forward by [Paradigm](https://paradigm.xyz/), and is licensed under the Apache and MIT licenses.
|
||||
@@ -32,7 +37,7 @@ More concretely, our goals are:
|
||||
2. **Performance**: Reth aims to be fast, so we use Rust and the [Erigon staged-sync](https://erigon.substack.com/p/erigon-stage-sync-and-control-flows) node architecture. We also use our Ethereum libraries (including [Alloy](https://github.com/alloy-rs/alloy/) and [revm](https://github.com/bluealloy/revm/)) which we've battle-tested and optimized via [Foundry](https://github.com/foundry-rs/foundry/).
|
||||
3. **Free for anyone to use any way they want**: Reth is free open source software, built for the community, by the community. By licensing the software under the Apache/MIT license, we want developers to use it without being bound by business licenses, or having to think about the implications of GPL-like licenses.
|
||||
4. **Client Diversity**: The Ethereum protocol becomes more antifragile when no node implementation dominates. This ensures that if there's a software bug, the network does not finalize a bad block. By building a new client, we hope to contribute to Ethereum's antifragility.
|
||||
5. **Support as many EVM chains as possible**: We aspire that Reth can full-sync not only Ethereum, but also other chains like Optimism, Polygon, BNB Smart Chain, and more. If you're working on any of these projects, please reach out.
|
||||
5. **Support as many EVM chains as possible**: We aspire that Reth can full-sync not only Ethereum, but also other chains like Optimism, Polygon, BNB Smart Chain, and more. If you're working on any of these projects, please reach out. Note: OP-Reth has moved to [ethereum-optimism/optimism](https://github.com/ethereum-optimism/optimism).
|
||||
6. **Configurability**: We want to solve for node operators that care about fast historical queries, but also for hobbyists who cannot operate on large hardware. We also want to support teams and individuals who want both sync from genesis and via "fast sync". We envision that Reth will be configurable enough and provide configurable "profiles" for the tradeoffs that each team faces.
|
||||
|
||||
## Status
|
||||
@@ -41,13 +46,13 @@ Reth is production ready, and suitable for usage in mission-critical environment
|
||||
|
||||
More historical context below:
|
||||
|
||||
- We released 1.0 "production-ready" stable Reth in June 2024.
|
||||
- Reth completed an audit with [Sigma Prime](https://sigmaprime.io/), the developers of [Lighthouse](https://github.com/sigp/lighthouse), the Rust Consensus Layer implementation. Find it [here](./audit/sigma_prime_audit_v2.pdf).
|
||||
- Revm (the EVM used in Reth) underwent an audit with [Guido Vranken](https://x.com/guidovranken) (#1 [Ethereum Bug Bounty](https://ethereum.org/en/bug-bounty)). We will publish the results soon.
|
||||
- We released multiple iterative beta versions, up to [beta.9](https://github.com/paradigmxyz/reth/releases/tag/v0.2.0-beta.9) on Monday June 3, 2024, the last beta release.
|
||||
- We released [beta](https://github.com/paradigmxyz/reth/releases/tag/v0.2.0-beta.1) on Monday March 4, 2024, our first breaking change to the database model, providing faster query speed, smaller database footprint, and allowing "history" to be mounted on separate drives.
|
||||
- We shipped iterative improvements until the last alpha release on February 28, 2024, [0.1.0-alpha.21](https://github.com/paradigmxyz/reth/releases/tag/v0.1.0-alpha.21).
|
||||
- We [initially announced](https://www.paradigm.xyz/2023/06/reth-alpha) [0.1.0-alpha.1](https://github.com/paradigmxyz/reth/releases/tag/v0.1.0-alpha.1) on June 20, 2023.
|
||||
- We released 1.0 "production-ready" stable Reth in June 2024.
|
||||
- Reth completed an audit with [Sigma Prime](https://sigmaprime.io/), the developers of [Lighthouse](https://github.com/sigp/lighthouse), the Rust Consensus Layer implementation. Find it [here](./audit/sigma_prime_audit_v2.pdf).
|
||||
- Revm (the EVM used in Reth) underwent an audit with [Guido Vranken](https://x.com/guidovranken) (#1 [Ethereum Bug Bounty](https://ethereum.org/en/bug-bounty)). We will publish the results soon.
|
||||
- We released multiple iterative beta versions, up to [beta.9](https://github.com/paradigmxyz/reth/releases/tag/v0.2.0-beta.9) on Monday June 3, 2024, the last beta release.
|
||||
- We released [beta](https://github.com/paradigmxyz/reth/releases/tag/v0.2.0-beta.1) on Monday March 4, 2024, our first breaking change to the database model, providing faster query speed, smaller database footprint, and allowing "history" to be mounted on separate drives.
|
||||
- We shipped iterative improvements until the last alpha release on February 28, 2024, [0.1.0-alpha.21](https://github.com/paradigmxyz/reth/releases/tag/v0.1.0-alpha.21).
|
||||
- We [initially announced](https://www.paradigm.xyz/2023/06/reth-alpha) [0.1.0-alpha.1](https://github.com/paradigmxyz/reth/releases/tag/v0.1.0-alpha.1) on June 20, 2023.
|
||||
|
||||
### Database compatibility
|
||||
|
||||
@@ -77,8 +82,8 @@ For a general overview of the crates, see [Project Layout](./docs/repo/layout.md
|
||||
|
||||
If you want to contribute, or follow along with contributor discussion, you can use our [main telegram](https://t.me/paradigm_reth) to chat with us about the development of Reth!
|
||||
|
||||
- Our contributor guidelines can be found in [`CONTRIBUTING.md`](./CONTRIBUTING.md).
|
||||
- See our [contributor docs](./docs) for more information on the project. A good starting point is [Project Layout](./docs/repo/layout.md).
|
||||
- Our contributor guidelines can be found in [`CONTRIBUTING.md`](./CONTRIBUTING.md).
|
||||
- See our [contributor docs](./docs) for more information on the project. A good starting point is [Project Layout](./docs/repo/layout.md).
|
||||
|
||||
### Building and testing
|
||||
|
||||
@@ -123,9 +128,9 @@ If you have any questions, first see if the answer to your question can be found
|
||||
|
||||
If the answer is not there:
|
||||
|
||||
- Join the [Telegram][tg-url] to get help, or
|
||||
- Open a [discussion](https://github.com/paradigmxyz/reth/discussions/new) with your question, or
|
||||
- Open an issue with [the bug](https://github.com/paradigmxyz/reth/issues/new?assignees=&labels=C-bug%2CS-needs-triage&projects=&template=bug.yml)
|
||||
- Join the [Telegram][tg-url] to get help, or
|
||||
- Open a [discussion](https://github.com/paradigmxyz/reth/discussions/new) with your question, or
|
||||
- Open an issue with [the bug](https://github.com/paradigmxyz/reth/issues/new?assignees=&labels=C-bug%2CS-needs-triage&projects=&template=bug.yml)
|
||||
|
||||
## Security
|
||||
|
||||
@@ -137,9 +142,9 @@ Reth is a new implementation of the Ethereum protocol. In the process of develop
|
||||
|
||||
None of this would have been possible without them, so big shoutout to the teams below:
|
||||
|
||||
- [Geth](https://github.com/ethereum/go-ethereum/): We would like to express our heartfelt gratitude to the go-ethereum team for their outstanding contributions to Ethereum over the years. Their tireless efforts and dedication have helped to shape the Ethereum ecosystem and make it the vibrant and innovative community it is today. Thank you for your hard work and commitment to the project.
|
||||
- [Erigon](https://github.com/ledgerwatch/erigon) (fka Turbo-Geth): Erigon pioneered the ["Staged Sync" architecture](https://erigon.substack.com/p/erigon-stage-sync-and-control-flows) that Reth is using, as well as [introduced MDBX](https://github.com/ledgerwatch/erigon/wiki/Choice-of-storage-engine) as the database of choice. We thank Erigon for pushing the state of the art research on the performance limits of Ethereum nodes.
|
||||
- [Akula](https://github.com/akula-bft/akula/): Reth uses forks of the Apache versions of Akula's [MDBX Bindings](https://github.com/paradigmxyz/reth/pull/132), [FastRLP](https://github.com/paradigmxyz/reth/pull/63) and [ECIES](https://github.com/paradigmxyz/reth/pull/80). Given that these packages were already released under the Apache License, and they implement standardized solutions, we decided not to reimplement them to iterate faster. We thank the Akula team for their contributions to the Rust Ethereum ecosystem and for publishing these packages.
|
||||
- [Geth](https://github.com/ethereum/go-ethereum/): We would like to express our heartfelt gratitude to the go-ethereum team for their outstanding contributions to Ethereum over the years. Their tireless efforts and dedication have helped to shape the Ethereum ecosystem and make it the vibrant and innovative community it is today. Thank you for your hard work and commitment to the project.
|
||||
- [Erigon](https://github.com/ledgerwatch/erigon) (fka Turbo-Geth): Erigon pioneered the ["Staged Sync" architecture](https://erigon.substack.com/p/erigon-stage-sync-and-control-flows) that Reth is using, as well as [introduced MDBX](https://github.com/ledgerwatch/erigon/wiki/Choice-of-storage-engine) as the database of choice. We thank Erigon for pushing the state of the art research on the performance limits of Ethereum nodes.
|
||||
- [Akula](https://github.com/akula-bft/akula/): Reth uses forks of the Apache versions of Akula's [MDBX Bindings](https://github.com/paradigmxyz/reth/pull/132), [FastRLP](https://github.com/paradigmxyz/reth/pull/63) and [ECIES](https://github.com/paradigmxyz/reth/pull/80). Given that these packages were already released under the Apache License, and they implement standardized solutions, we decided not to reimplement them to iterate faster. We thank the Akula team for their contributions to the Rust Ethereum ecosystem and for publishing these packages.
|
||||
|
||||
## Warning
|
||||
|
||||
|
||||
@@ -98,5 +98,5 @@ min-trace-logs = [
|
||||
"reth-node-core/min-trace-logs",
|
||||
]
|
||||
|
||||
# no-op feature flag for switching between the `optimism` and default functionality in CI matrices
|
||||
# no-op feature flag for CI matrices
|
||||
ethereum = []
|
||||
|
||||
@@ -474,7 +474,6 @@ async fn run_compilation_phase(
|
||||
git_manager: &GitManager,
|
||||
compilation_manager: &CompilationManager,
|
||||
args: &Args,
|
||||
is_optimism: bool,
|
||||
) -> Result<(String, String)> {
|
||||
info!("=== Running compilation phase ===");
|
||||
|
||||
@@ -527,7 +526,7 @@ async fn run_compilation_phase(
|
||||
git_manager.switch_ref(git_ref)?;
|
||||
|
||||
// Compile reth (with caching)
|
||||
compilation_manager.compile_reth(commit, is_optimism, features, rustflags)?;
|
||||
compilation_manager.compile_reth(commit, features, rustflags)?;
|
||||
|
||||
info!("Completed compilation for {} reference", ref_type);
|
||||
}
|
||||
@@ -547,7 +546,6 @@ async fn run_warmup_phase(
|
||||
node_manager: &mut NodeManager,
|
||||
benchmark_runner: &BenchmarkRunner,
|
||||
args: &Args,
|
||||
is_optimism: bool,
|
||||
baseline_commit: &str,
|
||||
starting_tip: u64,
|
||||
) -> Result<()> {
|
||||
@@ -565,8 +563,7 @@ async fn run_warmup_phase(
|
||||
git_manager.switch_ref(warmup_ref)?;
|
||||
|
||||
// Get the cached binary path for baseline (should already be compiled)
|
||||
let binary_path =
|
||||
compilation_manager.get_cached_binary_path_for_commit(baseline_commit, is_optimism);
|
||||
let binary_path = compilation_manager.get_cached_binary_path_for_commit(baseline_commit);
|
||||
|
||||
// Verify the cached binary exists
|
||||
if !binary_path.exists() {
|
||||
@@ -619,18 +616,13 @@ async fn run_benchmark_workflow(
|
||||
comparison_generator: &mut ComparisonGenerator,
|
||||
args: &Args,
|
||||
) -> Result<()> {
|
||||
// Detect if this is an Optimism chain once at the beginning
|
||||
let rpc_url = args.get_rpc_url();
|
||||
let is_optimism = compilation_manager.detect_optimism_chain(&rpc_url).await?;
|
||||
|
||||
// Run compilation phase for both binaries
|
||||
let (baseline_commit, feature_commit) =
|
||||
run_compilation_phase(git_manager, compilation_manager, args, is_optimism).await?;
|
||||
run_compilation_phase(git_manager, compilation_manager, args).await?;
|
||||
|
||||
// Switch to baseline reference and get the starting tip
|
||||
git_manager.switch_ref(&args.baseline_ref)?;
|
||||
let binary_path =
|
||||
compilation_manager.get_cached_binary_path_for_commit(&baseline_commit, is_optimism);
|
||||
let binary_path = compilation_manager.get_cached_binary_path_for_commit(&baseline_commit);
|
||||
if !binary_path.exists() {
|
||||
return Err(eyre!(
|
||||
"Cached baseline binary not found at {:?}. Compilation phase should have created it.",
|
||||
@@ -660,7 +652,6 @@ async fn run_benchmark_workflow(
|
||||
node_manager,
|
||||
benchmark_runner,
|
||||
args,
|
||||
is_optimism,
|
||||
&baseline_commit,
|
||||
starting_tip,
|
||||
)
|
||||
@@ -686,8 +677,7 @@ async fn run_benchmark_workflow(
|
||||
git_manager.switch_ref(git_ref)?;
|
||||
|
||||
// Get the cached binary path for this git reference (should already be compiled)
|
||||
let binary_path =
|
||||
compilation_manager.get_cached_binary_path_for_commit(commit, is_optimism);
|
||||
let binary_path = compilation_manager.get_cached_binary_path_for_commit(commit);
|
||||
|
||||
// Verify the cached binary exists
|
||||
if !binary_path.exists() {
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
//! Compilation operations for reth and reth-bench.
|
||||
|
||||
use crate::git::GitManager;
|
||||
use alloy_primitives::address;
|
||||
use alloy_provider::{Provider, ProviderBuilder};
|
||||
use eyre::{eyre, Result, WrapErr};
|
||||
use std::{fs, path::PathBuf, process::Command};
|
||||
use tracing::{debug, error, info, warn};
|
||||
@@ -25,54 +23,14 @@ impl CompilationManager {
|
||||
Ok(Self { repo_root, output_dir, git_manager })
|
||||
}
|
||||
|
||||
/// Detect if the RPC endpoint is an Optimism chain
|
||||
pub(crate) async fn detect_optimism_chain(&self, rpc_url: &str) -> Result<bool> {
|
||||
info!("Detecting chain type from RPC endpoint...");
|
||||
|
||||
// Create Alloy provider
|
||||
let url = rpc_url.parse().map_err(|e| eyre!("Invalid RPC URL '{}': {}", rpc_url, e))?;
|
||||
let provider = ProviderBuilder::new().connect_http(url);
|
||||
|
||||
// Check for Optimism predeploy at address 0x420000000000000000000000000000000000000F
|
||||
let is_optimism = !provider
|
||||
.get_code_at(address!("0x420000000000000000000000000000000000000F"))
|
||||
.await?
|
||||
.is_empty();
|
||||
|
||||
if is_optimism {
|
||||
info!("Detected Optimism chain");
|
||||
} else {
|
||||
info!("Detected Ethereum chain");
|
||||
}
|
||||
|
||||
Ok(is_optimism)
|
||||
}
|
||||
|
||||
/// Get the path to the cached binary using explicit commit hash
|
||||
pub(crate) fn get_cached_binary_path_for_commit(
|
||||
&self,
|
||||
commit: &str,
|
||||
is_optimism: bool,
|
||||
) -> PathBuf {
|
||||
pub(crate) fn get_cached_binary_path_for_commit(&self, commit: &str) -> PathBuf {
|
||||
let identifier = &commit[..8]; // Use first 8 chars of commit
|
||||
|
||||
let binary_name = if is_optimism {
|
||||
format!("op-reth_{}", identifier)
|
||||
} else {
|
||||
format!("reth_{}", identifier)
|
||||
};
|
||||
|
||||
self.output_dir.join("bin").join(binary_name)
|
||||
self.output_dir.join("bin").join(format!("reth_{identifier}"))
|
||||
}
|
||||
|
||||
/// Compile reth using cargo build and cache the binary
|
||||
pub(crate) fn compile_reth(
|
||||
&self,
|
||||
commit: &str,
|
||||
is_optimism: bool,
|
||||
features: &str,
|
||||
rustflags: &str,
|
||||
) -> Result<()> {
|
||||
pub(crate) fn compile_reth(&self, commit: &str, features: &str, rustflags: &str) -> Result<()> {
|
||||
// Validate that current git commit matches the expected commit
|
||||
let current_commit = self.git_manager.get_current_commit()?;
|
||||
if current_commit != commit {
|
||||
@@ -83,7 +41,7 @@ impl CompilationManager {
|
||||
));
|
||||
}
|
||||
|
||||
let cached_path = self.get_cached_binary_path_for_commit(commit, is_optimism);
|
||||
let cached_path = self.get_cached_binary_path_for_commit(commit);
|
||||
|
||||
// Check if cached binary already exists (since path contains commit hash, it's valid)
|
||||
if cached_path.exists() {
|
||||
@@ -93,7 +51,7 @@ impl CompilationManager {
|
||||
|
||||
info!("No cached binary found, compiling (commit: {})...", &commit[..8]);
|
||||
|
||||
let binary_name = if is_optimism { "op-reth" } else { "reth" };
|
||||
let binary_name = "reth";
|
||||
|
||||
info!(
|
||||
"Compiling {} with profiling configuration (commit: {})...",
|
||||
@@ -107,14 +65,6 @@ impl CompilationManager {
|
||||
cmd.arg("--features").arg(features);
|
||||
info!("Using features: {features}");
|
||||
|
||||
// Add bin-specific arguments for optimism
|
||||
if is_optimism {
|
||||
cmd.arg("--bin")
|
||||
.arg("op-reth")
|
||||
.arg("--manifest-path")
|
||||
.arg("crates/optimism/bin/Cargo.toml");
|
||||
}
|
||||
|
||||
cmd.current_dir(&self.repo_root);
|
||||
|
||||
// Set RUSTFLAGS
|
||||
|
||||
@@ -117,7 +117,7 @@ min-trace-logs = [
|
||||
"reth-node-core/min-trace-logs",
|
||||
]
|
||||
|
||||
# no-op feature flag for switching between the `optimism` and default functionality in CI matrices
|
||||
# no-op feature flag for CI matrices
|
||||
ethereum = []
|
||||
|
||||
[[bin]]
|
||||
|
||||
@@ -27,7 +27,7 @@ pub trait RethCli: Sized {
|
||||
/// The associated `ChainSpecParser` type
|
||||
type ChainSpecParser: ChainSpecParser;
|
||||
|
||||
/// The name of the implementation, eg. `reth`, `op-reth`, etc.
|
||||
/// The name of the implementation, eg. `reth`.
|
||||
fn name(&self) -> Cow<'static, str>;
|
||||
|
||||
/// The version of the node, such as `reth/v1.0.0`
|
||||
|
||||
@@ -38,7 +38,6 @@ cargo nextest run --workspace \
|
||||
--exclude 'exex-subscription' \
|
||||
--exclude 'reth-bench' \
|
||||
--exclude 'ef-tests' \
|
||||
--exclude 'op-reth' \
|
||||
--exclude 'reth' \
|
||||
-E 'binary(e2e_testsuite)'
|
||||
```
|
||||
|
||||
@@ -25,7 +25,7 @@ mod test {
|
||||
use reth_cli_commands::NodeCommand;
|
||||
|
||||
#[test]
|
||||
#[ignore = "reth cmd will print op-reth output if optimism feature enabled"]
|
||||
#[ignore = "reth cmd output differs when optimism feature enabled"]
|
||||
fn parse_dev() {
|
||||
let cmd: NodeCommand<EthereumChainSpecParser> = NodeCommand::parse_from(["reth", "--dev"]);
|
||||
let chain = DEV.clone();
|
||||
|
||||
@@ -79,7 +79,7 @@ pub type RethFullAdapter<DB, Types> =
|
||||
/// configured components and can interact with the node.
|
||||
///
|
||||
/// There are convenience functions for networks that come with a preset of types and components via
|
||||
/// the [`Node`] trait, see `reth_node_ethereum::EthereumNode` or `reth_optimism_node::OpNode`.
|
||||
/// the [`Node`] trait, see `reth_node_ethereum::EthereumNode`.
|
||||
///
|
||||
/// The [`NodeBuilder::node`] function configures the node's types and components in one step.
|
||||
///
|
||||
|
||||
@@ -1,64 +0,0 @@
|
||||
[package]
|
||||
name = "op-reth"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
rust-version.workspace = true
|
||||
license.workspace = true
|
||||
homepage.workspace = true
|
||||
repository.workspace = true
|
||||
exclude.workspace = true
|
||||
|
||||
[dependencies]
|
||||
reth-cli-util.workspace = true
|
||||
reth-optimism-cli.workspace = true
|
||||
reth-optimism-rpc.workspace = true
|
||||
reth-optimism-node.workspace = true
|
||||
reth-optimism-chainspec.workspace = true
|
||||
reth-optimism-consensus.workspace = true
|
||||
reth-optimism-evm.workspace = true
|
||||
reth-optimism-payload-builder.workspace = true
|
||||
reth-optimism-primitives.workspace = true
|
||||
reth-optimism-forks.workspace = true
|
||||
|
||||
clap = { workspace = true, features = ["derive", "env"] }
|
||||
tracing.workspace = true
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[features]
|
||||
default = ["jemalloc", "otlp", "reth-optimism-evm/portable", "js-tracer", "keccak-cache-global", "asm-keccak"]
|
||||
|
||||
otlp = ["reth-optimism-cli/otlp"]
|
||||
|
||||
js-tracer = [
|
||||
"reth-optimism-node/js-tracer",
|
||||
]
|
||||
|
||||
jemalloc = ["reth-cli-util/jemalloc", "reth-optimism-cli/jemalloc"]
|
||||
jemalloc-prof = ["jemalloc", "reth-cli-util/jemalloc-prof", "reth-optimism-cli/jemalloc-prof"]
|
||||
jemalloc-symbols = ["jemalloc-prof", "reth-optimism-cli/jemalloc-symbols"]
|
||||
tracy-allocator = ["reth-cli-util/tracy-allocator", "tracy"]
|
||||
tracy = ["reth-optimism-cli/tracy"]
|
||||
|
||||
asm-keccak = ["reth-optimism-cli/asm-keccak", "reth-optimism-node/asm-keccak"]
|
||||
keccak-cache-global = [
|
||||
"reth-optimism-cli/keccak-cache-global",
|
||||
"reth-optimism-node/keccak-cache-global",
|
||||
]
|
||||
dev = [
|
||||
"reth-optimism-cli/dev",
|
||||
"reth-optimism-primitives/arbitrary",
|
||||
]
|
||||
|
||||
min-error-logs = ["tracing/release_max_level_error"]
|
||||
min-warn-logs = ["tracing/release_max_level_warn"]
|
||||
min-info-logs = ["tracing/release_max_level_info"]
|
||||
min-debug-logs = ["tracing/release_max_level_debug"]
|
||||
min-trace-logs = ["tracing/release_max_level_trace"]
|
||||
|
||||
edge = ["reth-optimism-cli/edge"]
|
||||
|
||||
[[bin]]
|
||||
name = "op-reth"
|
||||
path = "src/main.rs"
|
||||
@@ -1,71 +0,0 @@
|
||||
//! Rust Optimism (op-reth) binary executable.
|
||||
//!
|
||||
//! ## Feature Flags
|
||||
//!
|
||||
//! - `jemalloc`: Uses [jemallocator](https://github.com/tikv/jemallocator) as the global allocator.
|
||||
//! This is **not recommended on Windows**. See [here](https://rust-lang.github.io/rfcs/1974-global-allocators.html#jemalloc)
|
||||
//! for more info.
|
||||
//! - `jemalloc-prof`: Enables [jemallocator's](https://github.com/tikv/jemallocator) heap profiling
|
||||
//! and leak detection functionality. See [jemalloc's opt.prof](https://jemalloc.net/jemalloc.3.html#opt.prof)
|
||||
//! documentation for usage details. This is **not recommended on Windows**. See [here](https://rust-lang.github.io/rfcs/1974-global-allocators.html#jemalloc)
|
||||
//! for more info.
|
||||
//! - `asm-keccak`: replaces the default, pure-Rust implementation of Keccak256 with one implemented
|
||||
//! in assembly; see [the `keccak-asm` crate](https://github.com/DaniPopes/keccak-asm) for more
|
||||
//! details and supported targets
|
||||
//! - `min-error-logs`: Disables all logs below `error` level.
|
||||
//! - `min-warn-logs`: Disables all logs below `warn` level.
|
||||
//! - `min-info-logs`: Disables all logs below `info` level. This can speed up the node, since fewer
|
||||
//! calls to the logging component are made.
|
||||
//! - `min-debug-logs`: Disables all logs below `debug` level.
|
||||
//! - `min-trace-logs`: Disables all logs below `trace` level.
|
||||
#![doc(
|
||||
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
|
||||
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
|
||||
issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/"
|
||||
)]
|
||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||
|
||||
/// Re-exported from `reth_optimism_cli`.
|
||||
pub mod cli {
|
||||
pub use reth_optimism_cli::*;
|
||||
}
|
||||
|
||||
/// Re-exported from `reth_optimism_chainspec`.
|
||||
pub mod chainspec {
|
||||
pub use reth_optimism_chainspec::*;
|
||||
}
|
||||
|
||||
/// Re-exported from `reth_optimism_consensus`.
|
||||
pub mod consensus {
|
||||
pub use reth_optimism_consensus::*;
|
||||
}
|
||||
|
||||
/// Re-exported from `reth_optimism_evm`.
|
||||
pub mod evm {
|
||||
pub use reth_optimism_evm::*;
|
||||
}
|
||||
|
||||
/// Re-exported from `reth_optimism_forks`.
|
||||
pub mod forks {
|
||||
pub use reth_optimism_forks::*;
|
||||
}
|
||||
|
||||
/// Re-exported from `reth_optimism_node`.
|
||||
pub mod node {
|
||||
pub use reth_optimism_node::*;
|
||||
}
|
||||
|
||||
/// Re-exported from `reth_optimism_payload_builder`.
|
||||
pub mod payload {
|
||||
pub use reth_optimism_payload_builder::*;
|
||||
}
|
||||
|
||||
/// Re-exported from `reth_optimism_primitives`.
|
||||
pub mod primitives {
|
||||
pub use reth_optimism_primitives::*;
|
||||
}
|
||||
|
||||
/// Re-exported from `reth_optimism_rpc`.
|
||||
pub mod rpc {
|
||||
pub use reth_optimism_rpc::*;
|
||||
}
|
||||
@@ -1,36 +0,0 @@
|
||||
#![allow(missing_docs, rustdoc::missing_crate_level_docs)]
|
||||
|
||||
use clap::Parser;
|
||||
use reth_optimism_cli::{chainspec::OpChainSpecParser, Cli};
|
||||
use reth_optimism_node::{args::RollupArgs, OpNode};
|
||||
use tracing::info;
|
||||
|
||||
#[global_allocator]
|
||||
static ALLOC: reth_cli_util::allocator::Allocator = reth_cli_util::allocator::new_allocator();
|
||||
|
||||
#[cfg(all(feature = "jemalloc-prof", unix))]
|
||||
#[unsafe(export_name = "_rjem_malloc_conf")]
|
||||
static MALLOC_CONF: &[u8] = b"prof:true,prof_active:true,lg_prof_sample:19\0";
|
||||
|
||||
fn main() {
|
||||
reth_cli_util::sigsegv_handler::install();
|
||||
|
||||
// Enable backtraces unless a RUST_BACKTRACE value has already been explicitly provided.
|
||||
if std::env::var_os("RUST_BACKTRACE").is_none() {
|
||||
unsafe {
|
||||
std::env::set_var("RUST_BACKTRACE", "1");
|
||||
}
|
||||
}
|
||||
|
||||
if let Err(err) =
|
||||
Cli::<OpChainSpecParser, RollupArgs>::parse().run(async move |builder, rollup_args| {
|
||||
info!(target: "reth::cli", "Launching node");
|
||||
let handle =
|
||||
builder.node(OpNode::new(rollup_args)).launch_with_debug_capabilities().await?;
|
||||
handle.node_exit_future.await
|
||||
})
|
||||
{
|
||||
eprintln!("Error: {err:?}");
|
||||
std::process::exit(1);
|
||||
}
|
||||
}
|
||||
@@ -1,90 +0,0 @@
|
||||
[package]
|
||||
name = "reth-optimism-chainspec"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
rust-version.workspace = true
|
||||
license.workspace = true
|
||||
homepage.workspace = true
|
||||
repository.workspace = true
|
||||
description = "EVM chain spec implementation for optimism."
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
# reth
|
||||
reth-chainspec.workspace = true
|
||||
reth-ethereum-forks.workspace = true
|
||||
reth-primitives-traits.workspace = true
|
||||
reth-network-peers.workspace = true
|
||||
|
||||
# op-reth
|
||||
reth-optimism-forks.workspace = true
|
||||
reth-optimism-primitives.workspace = true
|
||||
|
||||
# ethereum
|
||||
alloy-chains.workspace = true
|
||||
alloy-genesis.workspace = true
|
||||
alloy-primitives.workspace = true
|
||||
alloy-consensus.workspace = true
|
||||
alloy-eips.workspace = true
|
||||
alloy-hardforks.workspace = true
|
||||
|
||||
# op
|
||||
op-alloy-rpc-types.workspace = true
|
||||
|
||||
serde = { workspace = true, optional = true }
|
||||
serde_json.workspace = true
|
||||
|
||||
# io
|
||||
tar-no-std = { workspace = true, optional = true }
|
||||
miniz_oxide = { workspace = true, features = ["with-alloc"], optional = true }
|
||||
|
||||
# misc
|
||||
derive_more.workspace = true
|
||||
paste = { workspace = true, optional = true }
|
||||
thiserror = { workspace = true, optional = true }
|
||||
op-alloy-consensus.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
reth-chainspec = { workspace = true, features = ["test-utils"] }
|
||||
alloy-op-hardforks.workspace = true
|
||||
|
||||
[features]
|
||||
default = ["std"]
|
||||
superchain-configs = ["miniz_oxide", "paste", "tar-no-std", "thiserror", "thiserror", "dep:serde"]
|
||||
std = [
|
||||
"alloy-chains/std",
|
||||
"alloy-genesis/std",
|
||||
"alloy-primitives/std",
|
||||
"alloy-eips/std",
|
||||
"op-alloy-rpc-types/std",
|
||||
"reth-chainspec/std",
|
||||
"reth-ethereum-forks/std",
|
||||
"reth-primitives-traits/std",
|
||||
"reth-optimism-forks/std",
|
||||
"reth-optimism-primitives/std",
|
||||
"alloy-consensus/std",
|
||||
"derive_more/std",
|
||||
"reth-network-peers/std",
|
||||
"serde_json/std",
|
||||
"serde?/std",
|
||||
"miniz_oxide?/std",
|
||||
"thiserror?/std",
|
||||
"op-alloy-consensus/std",
|
||||
]
|
||||
serde = [
|
||||
"alloy-chains/serde",
|
||||
"alloy-consensus/serde",
|
||||
"alloy-eips/serde",
|
||||
"alloy-hardforks/serde",
|
||||
"alloy-primitives/serde",
|
||||
"miniz_oxide?/serde",
|
||||
"op-alloy-rpc-types/serde",
|
||||
"reth-ethereum-forks/serde",
|
||||
"reth-optimism-forks/serde",
|
||||
"reth-optimism-primitives/serde",
|
||||
"reth-primitives-traits/serde",
|
||||
"op-alloy-consensus/serde",
|
||||
"alloy-op-hardforks/serde",
|
||||
]
|
||||
@@ -1,140 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Usage: ./fetch_superchain_config.sh
|
||||
# Switch to the same directory as the script and run it.
|
||||
# This will checkout the latest superchain registry commit and create three files.
|
||||
# - superchain-configs.tar: A tar archive containing all superchain configs
|
||||
# - ../src/superchain/chain_specs.rs: A Rust file containing all chain specs
|
||||
|
||||
# Requires:
|
||||
# - MacOS: brew install qpdf zstd yq
|
||||
|
||||
SCRIPT_DIR=$(pwd)
|
||||
TEMP_DIR=$(mktemp -d)
|
||||
|
||||
# Clone the repository and go to the directory
|
||||
git clone --depth 1 https://github.com/ethereum-optimism/superchain-registry.git "$TEMP_DIR"
|
||||
# shellcheck disable=SC2164
|
||||
cd "$TEMP_DIR"
|
||||
|
||||
|
||||
DICT_FILE="$TEMP_DIR/superchain/extra/dictionary"
|
||||
TARGET_PATH="$TEMP_DIR/result"
|
||||
GENESIS_TARGET_PATH="$TARGET_PATH/genesis"
|
||||
CONFIGS_TARGET_PATH="$TARGET_PATH/configs"
|
||||
|
||||
GENESIS_SRC_DIR="$TEMP_DIR/superchain/extra/genesis"
|
||||
CONFIGS_SRC_DIR="$TEMP_DIR/superchain/configs"
|
||||
mkdir -p "$GENESIS_TARGET_PATH"
|
||||
mkdir -p "$CONFIGS_TARGET_PATH"
|
||||
|
||||
echo "Convert TOML files to JSON..."
|
||||
# JSON makes the handling in no-std environments easier
|
||||
find "$CONFIGS_SRC_DIR" -type f -name "*.toml" | while read -r file; do
|
||||
|
||||
# Compute destination file path
|
||||
REL_PATH="${file#"$CONFIGS_SRC_DIR"/}"
|
||||
DEST_PATH="$CONFIGS_TARGET_PATH/${REL_PATH%.toml}"
|
||||
|
||||
# Ensure destination directory exists
|
||||
mkdir -p "$(dirname "$DEST_PATH")"
|
||||
|
||||
# Convert the toml file to json
|
||||
yq -p toml -o json "$file" > "$DEST_PATH.json"
|
||||
done
|
||||
|
||||
echo "Extract and compress the genesis files..."
|
||||
# We compress the genesis files with zlib-flate to save space and to a compression
|
||||
# format that makes it easier to handle them in no-std environments
|
||||
find "$GENESIS_SRC_DIR" -type f -name "*.json.zst" | while read -r file; do
|
||||
|
||||
# Compute destination file path
|
||||
REL_PATH="${file#"$GENESIS_SRC_DIR"/}"
|
||||
DEST_PATH="$GENESIS_TARGET_PATH/${REL_PATH%.zst}"
|
||||
|
||||
# Ensure destination directory exists
|
||||
mkdir -p "$(dirname "$DEST_PATH")"
|
||||
|
||||
# Extract the file
|
||||
zstd -q -d -D="$DICT_FILE" "$file" -o "$DEST_PATH"
|
||||
|
||||
# Remove "config" field from genesis files, because it is not consistent populated.
|
||||
# We will add it back in from the chain config file during runtime.
|
||||
# See: https://github.com/ethereum-optimism/superchain-registry/issues/901
|
||||
jq -c 'del(.config)' "$DEST_PATH" > "$DEST_PATH.tmp"
|
||||
mv "$DEST_PATH.tmp" "$DEST_PATH"
|
||||
|
||||
# Compress with zlib-flate and remove the original file
|
||||
zlib-flate -compress < "$DEST_PATH" > "$DEST_PATH.zz"
|
||||
rm "$DEST_PATH"
|
||||
|
||||
done
|
||||
|
||||
# Save revision
|
||||
git rev-parse HEAD > "$TARGET_PATH/superchain_registry_commit"
|
||||
git rev-parse HEAD > "$SCRIPT_DIR/superchain_registry_commit"
|
||||
|
||||
# Copy the LICENSE file
|
||||
cp "$TEMP_DIR/LICENSE" "$TARGET_PATH/LICENSE"
|
||||
|
||||
# Set the modification time of all files to 1980-01-01 to ensure the archive is deterministic
|
||||
find "$TARGET_PATH" -exec touch -t 198001010000.00 {} +
|
||||
|
||||
# shellcheck disable=SC2164
|
||||
cd "$TARGET_PATH"
|
||||
# Create a tar archive excluding files that are not relevant superchain directory
|
||||
# shellcheck disable=SC2035
|
||||
COPYFILE_DISABLE=1 tar --no-acls --no-xattrs -cf superchain-configs.tar --exclude "._COMMIT" *
|
||||
|
||||
# Move result to the script directory
|
||||
mv superchain-configs.tar "$SCRIPT_DIR"
|
||||
|
||||
echo "Get chain name and identifiers from chainList.json"
|
||||
# shellcheck disable=SC2002
|
||||
JSON_DATA=$(cat "$TEMP_DIR/chainList.json" | jq -r 'sort_by(.parent.chain, .identifier | split("/")[1])')
|
||||
|
||||
# Extract network and chain names
|
||||
PARENT_CHAINS=$(echo "$JSON_DATA" | jq -r '.[].parent.chain')
|
||||
IDENTIFIERS=$(echo "$JSON_DATA" | jq -r '.[].identifier | split("/")[1]')
|
||||
# shellcheck disable=SC2206
|
||||
PARENT_CHAINS_ARRAY=($PARENT_CHAINS)
|
||||
# shellcheck disable=SC2206
|
||||
IDENTIFIERS_ARRAY=($IDENTIFIERS)
|
||||
|
||||
RESULTS=()
|
||||
RESULT_RS="// Generated by fetch_superchain_config.sh\nuse crate::create_superchain_specs;\n"
|
||||
ENUM_RS=""
|
||||
|
||||
echo "Generate chain_specs.rs..."
|
||||
for i in "${!PARENT_CHAINS_ARRAY[@]}"; do
|
||||
NAME="${IDENTIFIERS_ARRAY[$i]}"
|
||||
ENVIRONMENT="${PARENT_CHAINS_ARRAY[$i]}"
|
||||
# Skip Optimism and Base here because it is implemented separately
|
||||
if [ "$NAME" == "op" ] || [ "$NAME" == "base" ]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
# Validate file existence in our target path <environment>/<name>.json.zst
|
||||
FILE_PATH="$GENESIS_TARGET_PATH/$ENVIRONMENT/$NAME.json.zz"
|
||||
if [ -f "$FILE_PATH" ]; then
|
||||
RESULTS+=("{\"name\": \"$NAME\", \"environment\": \"$ENVIRONMENT\"}")
|
||||
ENUM_RS+=" (\"$NAME\", \"$ENVIRONMENT\"),\n"
|
||||
else
|
||||
echo "Error: File not found: $FILE_PATH" >&2
|
||||
fi
|
||||
done
|
||||
|
||||
|
||||
# Write chain_specs.rs
|
||||
echo -e "$RESULT_RS" > "$SCRIPT_DIR/../src/superchain/chain_specs.rs"
|
||||
|
||||
# Append the enum creation to chain_specs.rs
|
||||
echo -e "create_superchain_specs!(\n${ENUM_RS});" \
|
||||
>> "$SCRIPT_DIR/../src/superchain/chain_specs.rs"
|
||||
|
||||
# Clean up
|
||||
# shellcheck disable=SC2164
|
||||
cd "$TEMP_DIR/../"
|
||||
rm -rf "$TEMP_DIR"
|
||||
|
||||
echo "Done."
|
||||
File diff suppressed because one or more lines are too long
@@ -1 +0,0 @@
|
||||
{"nonce":"0x0","timestamp":"0x6490fdd2","extraData":"0x","gasLimit":"0x1c9c380","difficulty":"0x0","mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000","coinbase":"0x0000000000000000000000000000000000000000","stateRoot":"0x5eb6e371a698b8d68f665192350ffcecbbbf322916f4b51bd79bb6887da3f494","alloc":{"0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266":{"balance":"0xD3C21BCECCEDA1000000"},"0x70997970C51812dc3A010C7d01b50e0d17dc79C8":{"balance":"0xD3C21BCECCEDA1000000"},"0x3C44CdDdB6a900fa2b585dd299e03d12FA4293BC":{"balance":"0xD3C21BCECCEDA1000000"},"0x90F79bf6EB2c4f870365E785982E1f101E93b906":{"balance":"0xD3C21BCECCEDA1000000"},"0x15d34AAf54267DB7D7c367839AAf71A00a2C6A65":{"balance":"0xD3C21BCECCEDA1000000"},"0x9965507D1a55bcC2695C58ba16FB37d819B0A4dc":{"balance":"0xD3C21BCECCEDA1000000"},"0x976EA74026E726554dB657fA54763abd0C3a0aa9":{"balance":"0xD3C21BCECCEDA1000000"},"0x14dC79964da2C08b23698B3D3cc7Ca32193d9955":{"balance":"0xD3C21BCECCEDA1000000"},"0x23618e81E3f5cdF7f54C3d65f7FBc0aBf5B21E8f":{"balance":"0xD3C21BCECCEDA1000000"},"0xa0Ee7A142d267C1f36714E4a8F75612F20a79720":{"balance":"0xD3C21BCECCEDA1000000"},"0xBcd4042DE499D14e55001CcbB24a551F3b954096":{"balance":"0xD3C21BCECCEDA1000000"},"0x71bE63f3384f5fb98995898A86B02Fb2426c5788":{"balance":"0xD3C21BCECCEDA1000000"},"0xFABB0ac9d68B0B445fB7357272Ff202C5651694a":{"balance":"0xD3C21BCECCEDA1000000"},"0x1CBd3b2770909D4e10f157cABC84C7264073C9Ec":{"balance":"0xD3C21BCECCEDA1000000"},"0xdF3e18d64BC6A983f673Ab319CCaE4f1a57C7097":{"balance":"0xD3C21BCECCEDA1000000"},"0xcd3B766CCDd6AE721141F452C550Ca635964ce71":{"balance":"0xD3C21BCECCEDA1000000"},"0x2546BcD3c84621e976D8185a91A922aE77ECEc30":{"balance":"0xD3C21BCECCEDA1000000"},"0xbDA5747bFD65F08deb54cb465eB87D40e51B197E":{"balance":"0xD3C21BCECCEDA1000000"},"0xdD2FD4581271e230360230F9337D5c0430Bf44C0":{"balance":"0xD3C21BCECCEDA1000000"},"0x8626f6940E2eb28930eFb4CeF49B2d1F2C9C1199":{"balance":"0xD3C21BCECCEDA1000000"}},"number":"0x0","gasUsed":"0x0","parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000"}
|
||||
@@ -1 +0,0 @@
|
||||
{"config":{"ChainName":"optimism-mainnet","chainId":10,"homesteadBlock":0,"eip150Block":0,"eip155Block":0,"eip158Block":0,"byzantiumBlock":0,"constantinopleBlock":0,"petersburgBlock":0,"istanbulBlock":0,"muirGlacierBlock":0,"berlinBlock":3950000,"londonBlock":105235063,"arrowGlacierBlock":105235063,"grayGlacierBlock":105235063,"mergeNetsplitBlock":105235063,"bedrockBlock":105235063,"terminalTotalDifficulty":0,"terminalTotalDifficultyPassed":true,"optimism":{"eip1559Elasticity":6,"eip1559Denominator":50},"regolithTime":0},"difficulty":"1","gasLimit":"15000000","extradata":"0x000000000000000000000000000000000000000000000000000000000000000000000398232e2064f896018496b4b44b3d62751f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","stateRoot":"0xeddb4c1786789419153a27c4c80ff44a2226b6eda04f7e22ce5bae892ea568eb","alloc":{}}
|
||||
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
Binary file not shown.
@@ -1 +0,0 @@
|
||||
59e22d265b7a423b7f51a67a722471a6f3c3cc39
|
||||
@@ -1,40 +0,0 @@
|
||||
//! Chain specification for the Base Mainnet network.
|
||||
|
||||
use alloc::{sync::Arc, vec};
|
||||
|
||||
use alloy_chains::Chain;
|
||||
use alloy_primitives::{b256, U256};
|
||||
use reth_chainspec::{BaseFeeParams, BaseFeeParamsKind, ChainSpec};
|
||||
use reth_ethereum_forks::{EthereumHardfork, Hardfork};
|
||||
use reth_optimism_forks::{OpHardfork, BASE_MAINNET_HARDFORKS};
|
||||
use reth_primitives_traits::SealedHeader;
|
||||
|
||||
use crate::{make_op_genesis_header, LazyLock, OpChainSpec};
|
||||
|
||||
/// The Base mainnet spec
|
||||
pub static BASE_MAINNET: LazyLock<Arc<OpChainSpec>> = LazyLock::new(|| {
|
||||
let genesis = serde_json::from_str(include_str!("../res/genesis/base.json"))
|
||||
.expect("Can't deserialize Base genesis json");
|
||||
let hardforks = BASE_MAINNET_HARDFORKS.clone();
|
||||
OpChainSpec {
|
||||
inner: ChainSpec {
|
||||
chain: Chain::base_mainnet(),
|
||||
genesis_header: SealedHeader::new(
|
||||
make_op_genesis_header(&genesis, &hardforks),
|
||||
b256!("0xf712aa9241cc24369b143cf6dce85f0902a9731e70d66818a3a5845b296c73dd"),
|
||||
),
|
||||
genesis,
|
||||
paris_block_and_final_difficulty: Some((0, U256::from(0))),
|
||||
hardforks,
|
||||
base_fee_params: BaseFeeParamsKind::Variable(
|
||||
vec![
|
||||
(EthereumHardfork::London.boxed(), BaseFeeParams::optimism()),
|
||||
(OpHardfork::Canyon.boxed(), BaseFeeParams::optimism_canyon()),
|
||||
]
|
||||
.into(),
|
||||
),
|
||||
..Default::default()
|
||||
},
|
||||
}
|
||||
.into()
|
||||
});
|
||||
@@ -1,41 +0,0 @@
|
||||
//! Chain specification for the Base Sepolia testnet network.
|
||||
|
||||
use alloc::{sync::Arc, vec};
|
||||
|
||||
use alloy_chains::Chain;
|
||||
use alloy_primitives::{b256, U256};
|
||||
use reth_chainspec::{BaseFeeParams, BaseFeeParamsKind, ChainSpec, Hardfork};
|
||||
use reth_ethereum_forks::EthereumHardfork;
|
||||
use reth_optimism_forks::{OpHardfork, BASE_SEPOLIA_HARDFORKS};
|
||||
use reth_primitives_traits::SealedHeader;
|
||||
|
||||
use crate::{make_op_genesis_header, LazyLock, OpChainSpec};
|
||||
|
||||
/// The Base Sepolia spec
|
||||
pub static BASE_SEPOLIA: LazyLock<Arc<OpChainSpec>> = LazyLock::new(|| {
|
||||
let genesis = serde_json::from_str(include_str!("../res/genesis/sepolia_base.json"))
|
||||
.expect("Can't deserialize Base Sepolia genesis json");
|
||||
let hardforks = BASE_SEPOLIA_HARDFORKS.clone();
|
||||
OpChainSpec {
|
||||
inner: ChainSpec {
|
||||
chain: Chain::base_sepolia(),
|
||||
genesis_header: SealedHeader::new(
|
||||
make_op_genesis_header(&genesis, &hardforks),
|
||||
b256!("0x0dcc9e089e30b90ddfc55be9a37dd15bc551aeee999d2e2b51414c54eaf934e4"),
|
||||
),
|
||||
genesis,
|
||||
paris_block_and_final_difficulty: Some((0, U256::from(0))),
|
||||
hardforks,
|
||||
base_fee_params: BaseFeeParamsKind::Variable(
|
||||
vec![
|
||||
(EthereumHardfork::London.boxed(), BaseFeeParams::base_sepolia()),
|
||||
(OpHardfork::Canyon.boxed(), BaseFeeParams::base_sepolia_canyon()),
|
||||
]
|
||||
.into(),
|
||||
),
|
||||
prune_delete_limit: 10000,
|
||||
..Default::default()
|
||||
},
|
||||
}
|
||||
.into()
|
||||
});
|
||||
@@ -1,199 +0,0 @@
|
||||
//! Base fee related utilities for Optimism chains.
|
||||
|
||||
use core::cmp::max;
|
||||
|
||||
use alloy_consensus::BlockHeader;
|
||||
use alloy_eips::calc_next_block_base_fee;
|
||||
use op_alloy_consensus::{decode_holocene_extra_data, decode_jovian_extra_data, EIP1559ParamError};
|
||||
use reth_chainspec::{BaseFeeParams, EthChainSpec};
|
||||
use reth_optimism_forks::OpHardforks;
|
||||
|
||||
/// Extracts the Holocene 1599 parameters from the encoded extra data from the parent header.
|
||||
///
|
||||
/// Caution: Caller must ensure that holocene is active in the parent header.
|
||||
///
|
||||
/// See also [Base fee computation](https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/holocene/exec-engine.md#base-fee-computation)
|
||||
pub fn decode_holocene_base_fee<H>(
|
||||
chain_spec: impl EthChainSpec + OpHardforks,
|
||||
parent: &H,
|
||||
timestamp: u64,
|
||||
) -> Result<u64, EIP1559ParamError>
|
||||
where
|
||||
H: BlockHeader,
|
||||
{
|
||||
let (elasticity, denominator) = decode_holocene_extra_data(parent.extra_data())?;
|
||||
|
||||
let base_fee_params = if elasticity == 0 && denominator == 0 {
|
||||
chain_spec.base_fee_params_at_timestamp(timestamp)
|
||||
} else {
|
||||
BaseFeeParams::new(denominator as u128, elasticity as u128)
|
||||
};
|
||||
|
||||
Ok(parent.next_block_base_fee(base_fee_params).unwrap_or_default())
|
||||
}
|
||||
|
||||
/// Extracts the Jovian 1599 parameters from the encoded extra data from the parent header.
|
||||
/// Additionally to [`decode_holocene_base_fee`], checks if the next block base fee is less than the
|
||||
/// minimum base fee, then the minimum base fee is returned.
|
||||
///
|
||||
/// Caution: Caller must ensure that jovian is active in the parent header.
|
||||
///
|
||||
/// See also [Base fee computation](https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/jovian/exec-engine.md#base-fee-computation)
|
||||
/// and [Minimum base fee in block header](https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/jovian/exec-engine.md#minimum-base-fee-in-block-header)
|
||||
pub fn compute_jovian_base_fee<H>(
|
||||
chain_spec: impl EthChainSpec + OpHardforks,
|
||||
parent: &H,
|
||||
timestamp: u64,
|
||||
) -> Result<u64, EIP1559ParamError>
|
||||
where
|
||||
H: BlockHeader,
|
||||
{
|
||||
let (elasticity, denominator, min_base_fee) = decode_jovian_extra_data(parent.extra_data())?;
|
||||
|
||||
let base_fee_params = if elasticity == 0 && denominator == 0 {
|
||||
chain_spec.base_fee_params_at_timestamp(timestamp)
|
||||
} else {
|
||||
BaseFeeParams::new(denominator as u128, elasticity as u128)
|
||||
};
|
||||
|
||||
// Starting from Jovian, we use the maximum of the gas used and the blob gas used to calculate
|
||||
// the next base fee.
|
||||
let gas_used = max(parent.gas_used(), parent.blob_gas_used().unwrap_or_default());
|
||||
|
||||
let next_base_fee = calc_next_block_base_fee(
|
||||
gas_used,
|
||||
parent.gas_limit(),
|
||||
parent.base_fee_per_gas().unwrap_or_default(),
|
||||
base_fee_params,
|
||||
);
|
||||
|
||||
if next_base_fee < min_base_fee {
|
||||
return Ok(min_base_fee);
|
||||
}
|
||||
|
||||
Ok(next_base_fee)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use alloc::sync::Arc;
|
||||
|
||||
use op_alloy_consensus::encode_jovian_extra_data;
|
||||
use reth_chainspec::{ChainSpec, ForkCondition, Hardfork};
|
||||
use reth_optimism_forks::OpHardfork;
|
||||
|
||||
use crate::{OpChainSpec, BASE_SEPOLIA};
|
||||
|
||||
use super::*;
|
||||
|
||||
const JOVIAN_TIMESTAMP: u64 = 1900000000;
|
||||
|
||||
fn get_chainspec() -> Arc<OpChainSpec> {
|
||||
let mut base_sepolia_spec = BASE_SEPOLIA.inner.clone();
|
||||
base_sepolia_spec
|
||||
.hardforks
|
||||
.insert(OpHardfork::Jovian.boxed(), ForkCondition::Timestamp(JOVIAN_TIMESTAMP));
|
||||
Arc::new(OpChainSpec {
|
||||
inner: ChainSpec {
|
||||
chain: base_sepolia_spec.chain,
|
||||
genesis: base_sepolia_spec.genesis,
|
||||
genesis_header: base_sepolia_spec.genesis_header,
|
||||
..Default::default()
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_next_base_fee_jovian_blob_gas_used_greater_than_gas_used() {
|
||||
let chain_spec = get_chainspec();
|
||||
let mut parent = chain_spec.genesis_header().clone();
|
||||
let timestamp = JOVIAN_TIMESTAMP;
|
||||
|
||||
const GAS_LIMIT: u64 = 10_000_000_000;
|
||||
const BLOB_GAS_USED: u64 = 5_000_000_000;
|
||||
const GAS_USED: u64 = 1_000_000_000;
|
||||
const MIN_BASE_FEE: u64 = 100_000_000;
|
||||
|
||||
parent.extra_data =
|
||||
encode_jovian_extra_data([0; 8].into(), BaseFeeParams::base_sepolia(), MIN_BASE_FEE)
|
||||
.unwrap();
|
||||
parent.blob_gas_used = Some(BLOB_GAS_USED);
|
||||
parent.gas_used = GAS_USED;
|
||||
parent.gas_limit = GAS_LIMIT;
|
||||
|
||||
let expected_base_fee = calc_next_block_base_fee(
|
||||
BLOB_GAS_USED,
|
||||
parent.gas_limit(),
|
||||
parent.base_fee_per_gas().unwrap_or_default(),
|
||||
BaseFeeParams::base_sepolia(),
|
||||
);
|
||||
assert_eq!(
|
||||
expected_base_fee,
|
||||
compute_jovian_base_fee(chain_spec, &parent, timestamp).unwrap()
|
||||
);
|
||||
assert_ne!(
|
||||
expected_base_fee,
|
||||
calc_next_block_base_fee(
|
||||
GAS_USED,
|
||||
parent.gas_limit(),
|
||||
parent.base_fee_per_gas().unwrap_or_default(),
|
||||
BaseFeeParams::base_sepolia(),
|
||||
)
|
||||
)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_next_base_fee_jovian_blob_gas_used_less_than_gas_used() {
|
||||
let chain_spec = get_chainspec();
|
||||
let mut parent = chain_spec.genesis_header().clone();
|
||||
let timestamp = JOVIAN_TIMESTAMP;
|
||||
|
||||
const GAS_LIMIT: u64 = 10_000_000_000;
|
||||
const BLOB_GAS_USED: u64 = 100_000_000;
|
||||
const GAS_USED: u64 = 1_000_000_000;
|
||||
const MIN_BASE_FEE: u64 = 100_000_000;
|
||||
|
||||
parent.extra_data =
|
||||
encode_jovian_extra_data([0; 8].into(), BaseFeeParams::base_sepolia(), MIN_BASE_FEE)
|
||||
.unwrap();
|
||||
parent.blob_gas_used = Some(BLOB_GAS_USED);
|
||||
parent.gas_used = GAS_USED;
|
||||
parent.gas_limit = GAS_LIMIT;
|
||||
|
||||
let expected_base_fee = calc_next_block_base_fee(
|
||||
GAS_USED,
|
||||
parent.gas_limit(),
|
||||
parent.base_fee_per_gas().unwrap_or_default(),
|
||||
BaseFeeParams::base_sepolia(),
|
||||
);
|
||||
assert_eq!(
|
||||
expected_base_fee,
|
||||
compute_jovian_base_fee(chain_spec, &parent, timestamp).unwrap()
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_next_base_fee_jovian_min_base_fee() {
|
||||
let chain_spec = get_chainspec();
|
||||
let mut parent = chain_spec.genesis_header().clone();
|
||||
let timestamp = JOVIAN_TIMESTAMP;
|
||||
|
||||
const GAS_LIMIT: u64 = 10_000_000_000;
|
||||
const BLOB_GAS_USED: u64 = 100_000_000;
|
||||
const GAS_USED: u64 = 1_000_000_000;
|
||||
const MIN_BASE_FEE: u64 = 5_000_000_000;
|
||||
|
||||
parent.extra_data =
|
||||
encode_jovian_extra_data([0; 8].into(), BaseFeeParams::base_sepolia(), MIN_BASE_FEE)
|
||||
.unwrap();
|
||||
parent.blob_gas_used = Some(BLOB_GAS_USED);
|
||||
parent.gas_used = GAS_USED;
|
||||
parent.gas_limit = GAS_LIMIT;
|
||||
|
||||
let expected_base_fee = MIN_BASE_FEE;
|
||||
assert_eq!(
|
||||
expected_base_fee,
|
||||
compute_jovian_base_fee(chain_spec, &parent, timestamp).unwrap()
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -1,11 +0,0 @@
|
||||
//! OP stack variation of chain spec constants.
|
||||
|
||||
//------------------------------- BASE MAINNET -------------------------------//
|
||||
|
||||
/// Max gas limit on Base: <https://basescan.org/block/17208876>
|
||||
pub const BASE_MAINNET_MAX_GAS_LIMIT: u64 = 105_000_000;
|
||||
|
||||
//------------------------------- BASE SEPOLIA -------------------------------//
|
||||
|
||||
/// Max gas limit on Base Sepolia: <https://sepolia.basescan.org/block/12506483>
|
||||
pub const BASE_SEPOLIA_MAX_GAS_LIMIT: u64 = 45_000_000;
|
||||
@@ -1,34 +0,0 @@
|
||||
//! Chain specification in dev mode for custom chain.
|
||||
|
||||
use alloc::sync::Arc;
|
||||
|
||||
use alloy_chains::Chain;
|
||||
use alloy_primitives::U256;
|
||||
use reth_chainspec::{BaseFeeParams, BaseFeeParamsKind, ChainSpec};
|
||||
use reth_optimism_forks::DEV_HARDFORKS;
|
||||
use reth_primitives_traits::SealedHeader;
|
||||
|
||||
use crate::{make_op_genesis_header, LazyLock, OpChainSpec};
|
||||
|
||||
/// OP dev testnet specification
|
||||
///
|
||||
/// Includes 20 prefunded accounts with `10_000` ETH each derived from mnemonic "test test test test
|
||||
/// test test test test test test test junk".
|
||||
pub static OP_DEV: LazyLock<Arc<OpChainSpec>> = LazyLock::new(|| {
|
||||
let genesis = serde_json::from_str(include_str!("../res/genesis/dev.json"))
|
||||
.expect("Can't deserialize Dev testnet genesis json");
|
||||
let hardforks = DEV_HARDFORKS.clone();
|
||||
let genesis_header = SealedHeader::seal_slow(make_op_genesis_header(&genesis, &hardforks));
|
||||
OpChainSpec {
|
||||
inner: ChainSpec {
|
||||
chain: Chain::dev(),
|
||||
genesis_header,
|
||||
genesis,
|
||||
paris_block_and_final_difficulty: Some((0, U256::from(0))),
|
||||
hardforks,
|
||||
base_fee_params: BaseFeeParamsKind::Constant(BaseFeeParams::ethereum()),
|
||||
..Default::default()
|
||||
},
|
||||
}
|
||||
.into()
|
||||
});
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,41 +0,0 @@
|
||||
//! Chain specification for the Optimism Mainnet network.
|
||||
|
||||
use crate::{make_op_genesis_header, LazyLock, OpChainSpec};
|
||||
use alloc::{sync::Arc, vec};
|
||||
use alloy_chains::Chain;
|
||||
use alloy_primitives::{b256, U256};
|
||||
use reth_chainspec::{BaseFeeParams, BaseFeeParamsKind, ChainSpec, Hardfork};
|
||||
use reth_ethereum_forks::EthereumHardfork;
|
||||
use reth_optimism_forks::{OpHardfork, OP_MAINNET_HARDFORKS};
|
||||
use reth_primitives_traits::SealedHeader;
|
||||
|
||||
/// The Optimism Mainnet spec
|
||||
pub static OP_MAINNET: LazyLock<Arc<OpChainSpec>> = LazyLock::new(|| {
|
||||
// genesis contains empty alloc field because state at first bedrock block is imported
|
||||
// manually from trusted source
|
||||
let genesis = serde_json::from_str(include_str!("../res/genesis/optimism.json"))
|
||||
.expect("Can't deserialize Optimism Mainnet genesis json");
|
||||
let hardforks = OP_MAINNET_HARDFORKS.clone();
|
||||
OpChainSpec {
|
||||
inner: ChainSpec {
|
||||
chain: Chain::optimism_mainnet(),
|
||||
genesis_header: SealedHeader::new(
|
||||
make_op_genesis_header(&genesis, &hardforks),
|
||||
b256!("0x7ca38a1916c42007829c55e69d3e9a73265554b586a499015373241b8a3fa48b"),
|
||||
),
|
||||
genesis,
|
||||
paris_block_and_final_difficulty: Some((0, U256::from(0))),
|
||||
hardforks,
|
||||
base_fee_params: BaseFeeParamsKind::Variable(
|
||||
vec![
|
||||
(EthereumHardfork::London.boxed(), BaseFeeParams::optimism()),
|
||||
(OpHardfork::Canyon.boxed(), BaseFeeParams::optimism_canyon()),
|
||||
]
|
||||
.into(),
|
||||
),
|
||||
prune_delete_limit: 10000,
|
||||
..Default::default()
|
||||
},
|
||||
}
|
||||
.into()
|
||||
});
|
||||
@@ -1,39 +0,0 @@
|
||||
//! Chain specification for the Optimism Sepolia testnet network.
|
||||
|
||||
use crate::{make_op_genesis_header, LazyLock, OpChainSpec};
|
||||
use alloc::{sync::Arc, vec};
|
||||
use alloy_chains::{Chain, NamedChain};
|
||||
use alloy_primitives::{b256, U256};
|
||||
use reth_chainspec::{BaseFeeParams, BaseFeeParamsKind, ChainSpec, Hardfork};
|
||||
use reth_ethereum_forks::EthereumHardfork;
|
||||
use reth_optimism_forks::{OpHardfork, OP_SEPOLIA_HARDFORKS};
|
||||
use reth_primitives_traits::SealedHeader;
|
||||
|
||||
/// The OP Sepolia spec
|
||||
pub static OP_SEPOLIA: LazyLock<Arc<OpChainSpec>> = LazyLock::new(|| {
|
||||
let genesis = serde_json::from_str(include_str!("../res/genesis/sepolia_op.json"))
|
||||
.expect("Can't deserialize OP Sepolia genesis json");
|
||||
let hardforks = OP_SEPOLIA_HARDFORKS.clone();
|
||||
OpChainSpec {
|
||||
inner: ChainSpec {
|
||||
chain: Chain::from_named(NamedChain::OptimismSepolia),
|
||||
genesis_header: SealedHeader::new(
|
||||
make_op_genesis_header(&genesis, &hardforks),
|
||||
b256!("0x102de6ffb001480cc9b8b548fd05c34cd4f46ae4aa91759393db90ea0409887d"),
|
||||
),
|
||||
genesis,
|
||||
paris_block_and_final_difficulty: Some((0, U256::from(0))),
|
||||
hardforks,
|
||||
base_fee_params: BaseFeeParamsKind::Variable(
|
||||
vec![
|
||||
(EthereumHardfork::London.boxed(), BaseFeeParams::optimism_sepolia()),
|
||||
(OpHardfork::Canyon.boxed(), BaseFeeParams::optimism_sepolia_canyon()),
|
||||
]
|
||||
.into(),
|
||||
),
|
||||
prune_delete_limit: 10000,
|
||||
..Default::default()
|
||||
},
|
||||
}
|
||||
.into()
|
||||
});
|
||||
@@ -1,324 +0,0 @@
|
||||
use alloy_chains::NamedChain;
|
||||
use alloy_genesis::ChainConfig;
|
||||
use alloy_primitives::{ChainId, U256};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
/// The chain metadata stored in a superchain toml config file.
|
||||
/// Referring here as `ChainMetadata` to avoid confusion with `ChainConfig`.
|
||||
/// Find configs here: `<https://github.com/ethereum-optimism/superchain-registry/tree/main/superchain/configs>`
|
||||
/// This struct is stripped down to only include the necessary fields. We use JSON instead of
|
||||
/// TOML to make it easier to work in a no-std environment.
|
||||
#[derive(Clone, Debug, Deserialize)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub(crate) struct ChainMetadata {
|
||||
pub chain_id: ChainId,
|
||||
pub hardforks: HardforkConfig,
|
||||
pub optimism: Option<OptimismConfig>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Deserialize)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub(crate) struct HardforkConfig {
|
||||
pub canyon_time: Option<u64>,
|
||||
pub delta_time: Option<u64>,
|
||||
pub ecotone_time: Option<u64>,
|
||||
pub fjord_time: Option<u64>,
|
||||
pub granite_time: Option<u64>,
|
||||
pub holocene_time: Option<u64>,
|
||||
pub isthmus_time: Option<u64>,
|
||||
pub jovian_time: Option<u64>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Deserialize)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub(crate) struct OptimismConfig {
|
||||
pub eip1559_elasticity: u64,
|
||||
pub eip1559_denominator: u64,
|
||||
pub eip1559_denominator_canyon: Option<u64>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub(crate) struct ChainConfigExtraFields {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub bedrock_block: Option<u64>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub regolith_time: Option<u64>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub canyon_time: Option<u64>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub delta_time: Option<u64>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub ecotone_time: Option<u64>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub fjord_time: Option<u64>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub granite_time: Option<u64>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub holocene_time: Option<u64>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub isthmus_time: Option<u64>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub jovian_time: Option<u64>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub optimism: Option<ChainConfigExtraFieldsOptimism>,
|
||||
}
|
||||
|
||||
// Helper struct to serialize field for extra fields in ChainConfig
|
||||
#[derive(Clone, Debug, Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub(crate) struct ChainConfigExtraFieldsOptimism {
|
||||
pub eip1559_elasticity: u64,
|
||||
pub eip1559_denominator: u64,
|
||||
pub eip1559_denominator_canyon: Option<u64>,
|
||||
}
|
||||
|
||||
impl From<&OptimismConfig> for ChainConfigExtraFieldsOptimism {
|
||||
fn from(value: &OptimismConfig) -> Self {
|
||||
Self {
|
||||
eip1559_elasticity: value.eip1559_elasticity,
|
||||
eip1559_denominator: value.eip1559_denominator,
|
||||
eip1559_denominator_canyon: value.eip1559_denominator_canyon,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a [`ChainConfig`] filled from [`ChainMetadata`] with extra fields and handling
|
||||
/// special case for Optimism chain.
|
||||
// Mimic the behavior from https://github.com/ethereum-optimism/op-geth/blob/35e2c852/params/superchain.go#L26
|
||||
pub(crate) fn to_genesis_chain_config(chain_config: &ChainMetadata) -> ChainConfig {
|
||||
let mut res = ChainConfig {
|
||||
chain_id: chain_config.chain_id,
|
||||
homestead_block: Some(0),
|
||||
dao_fork_block: None,
|
||||
dao_fork_support: false,
|
||||
eip150_block: Some(0),
|
||||
eip155_block: Some(0),
|
||||
eip158_block: Some(0),
|
||||
byzantium_block: Some(0),
|
||||
constantinople_block: Some(0),
|
||||
petersburg_block: Some(0),
|
||||
istanbul_block: Some(0),
|
||||
muir_glacier_block: Some(0),
|
||||
berlin_block: Some(0),
|
||||
london_block: Some(0),
|
||||
arrow_glacier_block: Some(0),
|
||||
gray_glacier_block: Some(0),
|
||||
merge_netsplit_block: Some(0),
|
||||
shanghai_time: chain_config.hardforks.canyon_time, // Shanghai activates with Canyon
|
||||
cancun_time: chain_config.hardforks.ecotone_time, // Cancun activates with Ecotone
|
||||
prague_time: chain_config.hardforks.isthmus_time, // Prague activates with Isthmus
|
||||
osaka_time: None,
|
||||
terminal_total_difficulty: Some(U256::ZERO),
|
||||
terminal_total_difficulty_passed: true,
|
||||
ethash: None,
|
||||
clique: None,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
// Special case for Optimism chain
|
||||
if chain_config.chain_id == NamedChain::Optimism as ChainId {
|
||||
res.berlin_block = Some(3950000);
|
||||
res.london_block = Some(105235063);
|
||||
res.arrow_glacier_block = Some(105235063);
|
||||
res.gray_glacier_block = Some(105235063);
|
||||
res.merge_netsplit_block = Some(105235063);
|
||||
}
|
||||
|
||||
// Add extra fields for ChainConfig from Genesis
|
||||
let extra_fields = ChainConfigExtraFields {
|
||||
bedrock_block: if chain_config.chain_id == NamedChain::Optimism as ChainId {
|
||||
Some(105235063)
|
||||
} else {
|
||||
Some(0)
|
||||
},
|
||||
regolith_time: Some(0),
|
||||
canyon_time: chain_config.hardforks.canyon_time,
|
||||
delta_time: chain_config.hardforks.delta_time,
|
||||
ecotone_time: chain_config.hardforks.ecotone_time,
|
||||
fjord_time: chain_config.hardforks.fjord_time,
|
||||
granite_time: chain_config.hardforks.granite_time,
|
||||
holocene_time: chain_config.hardforks.holocene_time,
|
||||
isthmus_time: chain_config.hardforks.isthmus_time,
|
||||
jovian_time: chain_config.hardforks.jovian_time,
|
||||
optimism: chain_config.optimism.as_ref().map(|o| o.into()),
|
||||
};
|
||||
res.extra_fields =
|
||||
serde_json::to_value(extra_fields).unwrap_or_default().try_into().unwrap_or_default();
|
||||
|
||||
res
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
const BASE_CHAIN_METADATA: &str = r#"
|
||||
{
|
||||
"chain_id": 8453,
|
||||
"hardforks": {
|
||||
"canyon_time": 1704992401,
|
||||
"delta_time": 1708560000,
|
||||
"ecotone_time": 1710374401,
|
||||
"fjord_time": 1720627201,
|
||||
"granite_time": 1726070401,
|
||||
"holocene_time": 1736445601,
|
||||
"isthmus_time": 1746806401
|
||||
},
|
||||
"optimism": {
|
||||
"eip1559_elasticity": 6,
|
||||
"eip1559_denominator": 50,
|
||||
"eip1559_denominator_canyon": 250
|
||||
}
|
||||
}
|
||||
"#;
|
||||
|
||||
#[test]
|
||||
fn test_deserialize_chain_config() {
|
||||
let config: ChainMetadata = serde_json::from_str(BASE_CHAIN_METADATA).unwrap();
|
||||
assert_eq!(config.chain_id, 8453);
|
||||
// hardforks
|
||||
assert_eq!(config.hardforks.canyon_time, Some(1704992401));
|
||||
assert_eq!(config.hardforks.delta_time, Some(1708560000));
|
||||
assert_eq!(config.hardforks.ecotone_time, Some(1710374401));
|
||||
assert_eq!(config.hardforks.fjord_time, Some(1720627201));
|
||||
assert_eq!(config.hardforks.granite_time, Some(1726070401));
|
||||
assert_eq!(config.hardforks.holocene_time, Some(1736445601));
|
||||
assert_eq!(config.hardforks.isthmus_time, Some(1746806401));
|
||||
// optimism
|
||||
assert_eq!(config.optimism.as_ref().unwrap().eip1559_elasticity, 6);
|
||||
assert_eq!(config.optimism.as_ref().unwrap().eip1559_denominator, 50);
|
||||
assert_eq!(config.optimism.as_ref().unwrap().eip1559_denominator_canyon, Some(250));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_chain_config_extra_fields() {
|
||||
let extra_fields = ChainConfigExtraFields {
|
||||
bedrock_block: Some(105235063),
|
||||
regolith_time: Some(0),
|
||||
canyon_time: Some(1704992401),
|
||||
delta_time: Some(1708560000),
|
||||
ecotone_time: Some(1710374401),
|
||||
fjord_time: Some(1720627201),
|
||||
granite_time: Some(1726070401),
|
||||
holocene_time: Some(1736445601),
|
||||
isthmus_time: Some(1746806401),
|
||||
jovian_time: None,
|
||||
optimism: Option::from(ChainConfigExtraFieldsOptimism {
|
||||
eip1559_elasticity: 6,
|
||||
eip1559_denominator: 50,
|
||||
eip1559_denominator_canyon: Some(250),
|
||||
}),
|
||||
};
|
||||
let value = serde_json::to_value(extra_fields).unwrap();
|
||||
assert_eq!(value.get("bedrockBlock").unwrap(), 105235063);
|
||||
assert_eq!(value.get("regolithTime").unwrap(), 0);
|
||||
assert_eq!(value.get("canyonTime").unwrap(), 1704992401);
|
||||
assert_eq!(value.get("deltaTime").unwrap(), 1708560000);
|
||||
assert_eq!(value.get("ecotoneTime").unwrap(), 1710374401);
|
||||
assert_eq!(value.get("fjordTime").unwrap(), 1720627201);
|
||||
assert_eq!(value.get("graniteTime").unwrap(), 1726070401);
|
||||
assert_eq!(value.get("holoceneTime").unwrap(), 1736445601);
|
||||
assert_eq!(value.get("isthmusTime").unwrap(), 1746806401);
|
||||
assert_eq!(value.get("jovianTime"), None);
|
||||
let optimism = value.get("optimism").unwrap();
|
||||
assert_eq!(optimism.get("eip1559Elasticity").unwrap(), 6);
|
||||
assert_eq!(optimism.get("eip1559Denominator").unwrap(), 50);
|
||||
assert_eq!(optimism.get("eip1559DenominatorCanyon").unwrap(), 250);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_convert_to_genesis_chain_config() {
|
||||
let config: ChainMetadata = serde_json::from_str(BASE_CHAIN_METADATA).unwrap();
|
||||
let chain_config = to_genesis_chain_config(&config);
|
||||
assert_eq!(chain_config.chain_id, 8453);
|
||||
assert_eq!(chain_config.homestead_block, Some(0));
|
||||
assert_eq!(chain_config.dao_fork_block, None);
|
||||
assert!(!chain_config.dao_fork_support);
|
||||
assert_eq!(chain_config.eip150_block, Some(0));
|
||||
assert_eq!(chain_config.eip155_block, Some(0));
|
||||
assert_eq!(chain_config.eip158_block, Some(0));
|
||||
assert_eq!(chain_config.byzantium_block, Some(0));
|
||||
assert_eq!(chain_config.constantinople_block, Some(0));
|
||||
assert_eq!(chain_config.petersburg_block, Some(0));
|
||||
assert_eq!(chain_config.istanbul_block, Some(0));
|
||||
assert_eq!(chain_config.muir_glacier_block, Some(0));
|
||||
assert_eq!(chain_config.berlin_block, Some(0));
|
||||
assert_eq!(chain_config.london_block, Some(0));
|
||||
assert_eq!(chain_config.arrow_glacier_block, Some(0));
|
||||
assert_eq!(chain_config.gray_glacier_block, Some(0));
|
||||
assert_eq!(chain_config.merge_netsplit_block, Some(0));
|
||||
assert_eq!(chain_config.shanghai_time, Some(1704992401));
|
||||
assert_eq!(chain_config.cancun_time, Some(1710374401));
|
||||
assert_eq!(chain_config.prague_time, Some(1746806401));
|
||||
assert_eq!(chain_config.osaka_time, None);
|
||||
assert_eq!(chain_config.terminal_total_difficulty, Some(U256::ZERO));
|
||||
assert!(chain_config.terminal_total_difficulty_passed);
|
||||
assert_eq!(chain_config.ethash, None);
|
||||
assert_eq!(chain_config.clique, None);
|
||||
assert_eq!(chain_config.extra_fields.get("bedrockBlock").unwrap(), 0);
|
||||
assert_eq!(chain_config.extra_fields.get("regolithTime").unwrap(), 0);
|
||||
assert_eq!(chain_config.extra_fields.get("canyonTime").unwrap(), 1704992401);
|
||||
assert_eq!(chain_config.extra_fields.get("deltaTime").unwrap(), 1708560000);
|
||||
assert_eq!(chain_config.extra_fields.get("ecotoneTime").unwrap(), 1710374401);
|
||||
assert_eq!(chain_config.extra_fields.get("fjordTime").unwrap(), 1720627201);
|
||||
assert_eq!(chain_config.extra_fields.get("graniteTime").unwrap(), 1726070401);
|
||||
assert_eq!(chain_config.extra_fields.get("holoceneTime").unwrap(), 1736445601);
|
||||
assert_eq!(chain_config.extra_fields.get("isthmusTime").unwrap(), 1746806401);
|
||||
assert_eq!(chain_config.extra_fields.get("jovianTime"), None);
|
||||
let optimism = chain_config.extra_fields.get("optimism").unwrap();
|
||||
assert_eq!(optimism.get("eip1559Elasticity").unwrap(), 6);
|
||||
assert_eq!(optimism.get("eip1559Denominator").unwrap(), 50);
|
||||
assert_eq!(optimism.get("eip1559DenominatorCanyon").unwrap(), 250);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_convert_to_genesis_chain_config_op() {
|
||||
const OP_CHAIN_METADATA: &str = r#"
|
||||
{
|
||||
"chain_id": 10,
|
||||
"hardforks": {
|
||||
"canyon_time": 1704992401,
|
||||
"delta_time": 1708560000,
|
||||
"ecotone_time": 1710374401,
|
||||
"fjord_time": 1720627201,
|
||||
"granite_time": 1726070401,
|
||||
"holocene_time": 1736445601,
|
||||
"isthmus_time": 1746806401
|
||||
},
|
||||
"optimism": {
|
||||
"eip1559_elasticity": 6,
|
||||
"eip1559_denominator": 50,
|
||||
"eip1559_denominator_canyon": 250
|
||||
}
|
||||
}
|
||||
"#;
|
||||
let config: ChainMetadata = serde_json::from_str(OP_CHAIN_METADATA).unwrap();
|
||||
assert_eq!(config.hardforks.canyon_time, Some(1704992401));
|
||||
let chain_config = to_genesis_chain_config(&config);
|
||||
assert_eq!(chain_config.chain_id, 10);
|
||||
assert_eq!(chain_config.shanghai_time, Some(1704992401));
|
||||
assert_eq!(chain_config.cancun_time, Some(1710374401));
|
||||
assert_eq!(chain_config.prague_time, Some(1746806401));
|
||||
assert_eq!(chain_config.berlin_block, Some(3950000));
|
||||
assert_eq!(chain_config.london_block, Some(105235063));
|
||||
assert_eq!(chain_config.arrow_glacier_block, Some(105235063));
|
||||
assert_eq!(chain_config.gray_glacier_block, Some(105235063));
|
||||
assert_eq!(chain_config.merge_netsplit_block, Some(105235063));
|
||||
assert_eq!(chain_config.extra_fields.get("bedrockBlock").unwrap(), 105235063);
|
||||
assert_eq!(chain_config.extra_fields.get("regolithTime").unwrap(), 0);
|
||||
assert_eq!(chain_config.extra_fields.get("canyonTime").unwrap(), 1704992401);
|
||||
assert_eq!(chain_config.extra_fields.get("deltaTime").unwrap(), 1708560000);
|
||||
assert_eq!(chain_config.extra_fields.get("ecotoneTime").unwrap(), 1710374401);
|
||||
assert_eq!(chain_config.extra_fields.get("fjordTime").unwrap(), 1720627201);
|
||||
assert_eq!(chain_config.extra_fields.get("graniteTime").unwrap(), 1726070401);
|
||||
assert_eq!(chain_config.extra_fields.get("holoceneTime").unwrap(), 1736445601);
|
||||
assert_eq!(chain_config.extra_fields.get("isthmusTime").unwrap(), 1746806401);
|
||||
assert_eq!(chain_config.extra_fields.get("jovianTime"), None);
|
||||
|
||||
let optimism = chain_config.extra_fields.get("optimism").unwrap();
|
||||
assert_eq!(optimism.get("eip1559Elasticity").unwrap(), 6);
|
||||
assert_eq!(optimism.get("eip1559Denominator").unwrap(), 50);
|
||||
assert_eq!(optimism.get("eip1559DenominatorCanyon").unwrap(), 250);
|
||||
}
|
||||
}
|
||||
@@ -1,104 +0,0 @@
|
||||
/// Create a chain spec for a given superchain and environment.
|
||||
#[macro_export]
|
||||
macro_rules! create_chain_spec {
|
||||
($name:expr, $environment:expr) => {
|
||||
paste::paste! {
|
||||
/// The Optimism $name $environment spec
|
||||
pub static [<$name:upper _ $environment:upper>]: $crate::LazyLock<alloc::sync::Arc<$crate::OpChainSpec>> = $crate::LazyLock::new(|| {
|
||||
$crate::OpChainSpec::from_genesis($crate::superchain::configs::read_superchain_genesis($name, $environment)
|
||||
.unwrap_or_else(|e| panic!("Can't read {}-{} genesis: {}", $name, $environment, e)))
|
||||
.into()
|
||||
});
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/// Generates the key string for a given name and environment pair.
|
||||
#[macro_export]
|
||||
macro_rules! key_for {
|
||||
($name:expr, "mainnet") => {
|
||||
$name
|
||||
};
|
||||
($name:expr, $env:expr) => {
|
||||
concat!($name, "-", $env)
|
||||
};
|
||||
}
|
||||
|
||||
/// Create chain specs and an enum of every superchain (name, environment) pair.
|
||||
#[macro_export]
|
||||
macro_rules! create_superchain_specs {
|
||||
( $( ($name:expr, $env:expr) ),+ $(,)? ) => {
|
||||
$(
|
||||
$crate::create_chain_spec!($name, $env);
|
||||
)+
|
||||
|
||||
paste::paste! {
|
||||
/// All available superchains as an enum
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
|
||||
#[allow(non_camel_case_types)]
|
||||
pub enum Superchain {
|
||||
$(
|
||||
#[doc = concat!("Superchain variant for `", $name, "-", $env, "`.")]
|
||||
[<$name:camel _ $env:camel>],
|
||||
)+
|
||||
}
|
||||
|
||||
impl Superchain {
|
||||
/// A slice of every superchain enum variant
|
||||
pub const ALL: &'static [Self] = &[
|
||||
$(
|
||||
Self::[<$name:camel _ $env:camel>],
|
||||
)+
|
||||
];
|
||||
|
||||
/// Returns the original name
|
||||
pub const fn name(self) -> &'static str {
|
||||
match self {
|
||||
$(
|
||||
Self::[<$name:camel _ $env:camel>] => $name,
|
||||
)+
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the original environment
|
||||
pub const fn environment(self) -> &'static str {
|
||||
match self {
|
||||
$(
|
||||
Self::[<$name:camel _ $env:camel>] => $env,
|
||||
)+
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// All supported superchains, including both older and newer naming,
|
||||
/// for backwards compatibility
|
||||
pub const SUPPORTED_CHAINS: &'static [&'static str] = &[
|
||||
"optimism",
|
||||
"optimism_sepolia",
|
||||
"optimism-sepolia",
|
||||
"base",
|
||||
"base_sepolia",
|
||||
"base-sepolia",
|
||||
$(
|
||||
$crate::key_for!($name, $env),
|
||||
)+
|
||||
"dev",
|
||||
];
|
||||
|
||||
/// Parses the chain into an [`$crate::OpChainSpec`], if recognized.
|
||||
pub fn generated_chain_value_parser(s: &str) -> Option<alloc::sync::Arc<$crate::OpChainSpec>> {
|
||||
match s {
|
||||
"dev" => Some($crate::OP_DEV.clone()),
|
||||
"optimism" => Some($crate::OP_MAINNET.clone()),
|
||||
"optimism_sepolia" | "optimism-sepolia" => Some($crate::OP_SEPOLIA.clone()),
|
||||
"base" => Some($crate::BASE_MAINNET.clone()),
|
||||
"base_sepolia" | "base-sepolia" => Some($crate::BASE_SEPOLIA.clone()),
|
||||
$(
|
||||
$crate::key_for!($name, $env) => Some($crate::[<$name:upper _ $env:upper>].clone()),
|
||||
)+
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
@@ -1,64 +0,0 @@
|
||||
// Generated by fetch_superchain_config.sh
|
||||
use crate::create_superchain_specs;
|
||||
|
||||
create_superchain_specs!(
|
||||
("arena-z", "mainnet"),
|
||||
("arena-z", "sepolia"),
|
||||
("automata", "mainnet"),
|
||||
("base-devnet-0", "sepolia-dev-0"),
|
||||
("bob", "mainnet"),
|
||||
("boba", "sepolia"),
|
||||
("boba", "mainnet"),
|
||||
("camp", "sepolia"),
|
||||
("celo", "mainnet"),
|
||||
("creator-chain-testnet", "sepolia"),
|
||||
("cyber", "mainnet"),
|
||||
("cyber", "sepolia"),
|
||||
("ethernity", "mainnet"),
|
||||
("ethernity", "sepolia"),
|
||||
("fraxtal", "mainnet"),
|
||||
("funki", "mainnet"),
|
||||
("funki", "sepolia"),
|
||||
("hashkeychain", "mainnet"),
|
||||
("ink", "mainnet"),
|
||||
("ink", "sepolia"),
|
||||
("lisk", "mainnet"),
|
||||
("lisk", "sepolia"),
|
||||
("lyra", "mainnet"),
|
||||
("metal", "mainnet"),
|
||||
("metal", "sepolia"),
|
||||
("mint", "mainnet"),
|
||||
("mode", "mainnet"),
|
||||
("mode", "sepolia"),
|
||||
("oplabs-devnet-0", "sepolia-dev-0"),
|
||||
("orderly", "mainnet"),
|
||||
("ozean", "sepolia"),
|
||||
("pivotal", "sepolia"),
|
||||
("polynomial", "mainnet"),
|
||||
("race", "mainnet"),
|
||||
("race", "sepolia"),
|
||||
("radius_testnet", "sepolia"),
|
||||
("redstone", "mainnet"),
|
||||
("rehearsal-0-bn-0", "rehearsal-0-bn"),
|
||||
("rehearsal-0-bn-1", "rehearsal-0-bn"),
|
||||
("settlus-mainnet", "mainnet"),
|
||||
("settlus-sepolia", "sepolia"),
|
||||
("shape", "mainnet"),
|
||||
("shape", "sepolia"),
|
||||
("silent-data-mainnet", "mainnet"),
|
||||
("snax", "mainnet"),
|
||||
("soneium", "mainnet"),
|
||||
("soneium-minato", "sepolia"),
|
||||
("sseed", "mainnet"),
|
||||
("swan", "mainnet"),
|
||||
("swell", "mainnet"),
|
||||
("tbn", "mainnet"),
|
||||
("tbn", "sepolia"),
|
||||
("unichain", "mainnet"),
|
||||
("unichain", "sepolia"),
|
||||
("worldchain", "mainnet"),
|
||||
("worldchain", "sepolia"),
|
||||
("xterio-eth", "mainnet"),
|
||||
("zora", "mainnet"),
|
||||
("zora", "sepolia"),
|
||||
);
|
||||
@@ -1,298 +0,0 @@
|
||||
use crate::superchain::chain_metadata::{to_genesis_chain_config, ChainMetadata};
|
||||
use alloc::{
|
||||
format,
|
||||
string::{String, ToString},
|
||||
vec::Vec,
|
||||
};
|
||||
use alloy_genesis::Genesis;
|
||||
use miniz_oxide::inflate::decompress_to_vec_zlib_with_limit;
|
||||
use tar_no_std::{CorruptDataError, TarArchiveRef};
|
||||
|
||||
/// A genesis file can be up to 100MiB. This is a reasonable limit for the genesis file size.
|
||||
const MAX_GENESIS_SIZE: usize = 100 * 1024 * 1024; // 100MiB
|
||||
|
||||
/// The tar file contains the chain configs and genesis files for all chains.
|
||||
const SUPER_CHAIN_CONFIGS_TAR_BYTES: &[u8] = include_bytes!("../../res/superchain-configs.tar");
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub(crate) enum SuperchainConfigError {
|
||||
#[error("Error reading archive due to corrupt data: {0}")]
|
||||
CorruptDataError(CorruptDataError),
|
||||
#[error("Error converting bytes to UTF-8 String: {0}")]
|
||||
FromUtf8Error(#[from] alloc::string::FromUtf8Error),
|
||||
#[error("Error reading file: {0}")]
|
||||
Utf8Error(#[from] core::str::Utf8Error),
|
||||
#[error("Error deserializing JSON: {0}")]
|
||||
JsonError(#[from] serde_json::Error),
|
||||
#[error("File {0} not found in archive")]
|
||||
FileNotFound(String),
|
||||
#[error("Error decompressing file: {0}")]
|
||||
DecompressError(String),
|
||||
}
|
||||
|
||||
/// Reads the [`Genesis`] from the superchain config tar file for a superchain.
|
||||
/// For example, `read_genesis_from_superchain_config("unichain", "mainnet")`.
|
||||
pub(crate) fn read_superchain_genesis(
|
||||
name: &str,
|
||||
environment: &str,
|
||||
) -> Result<Genesis, SuperchainConfigError> {
|
||||
// Open the archive.
|
||||
let archive = TarArchiveRef::new(SUPER_CHAIN_CONFIGS_TAR_BYTES)
|
||||
.map_err(SuperchainConfigError::CorruptDataError)?;
|
||||
// Read and decompress the genesis file.
|
||||
let compressed_genesis_file =
|
||||
read_file(&archive, &format!("genesis/{environment}/{name}.json.zz"))?;
|
||||
let genesis_file =
|
||||
decompress_to_vec_zlib_with_limit(&compressed_genesis_file, MAX_GENESIS_SIZE)
|
||||
.map_err(|e| SuperchainConfigError::DecompressError(format!("{e}")))?;
|
||||
|
||||
// Load the genesis file.
|
||||
let mut genesis: Genesis = serde_json::from_slice(&genesis_file)?;
|
||||
|
||||
// The "config" field is stripped (see fetch_superchain_config.sh) from the genesis file
|
||||
// because it is not always populated. For that reason, we read the config from the chain
|
||||
// metadata file. See: https://github.com/ethereum-optimism/superchain-registry/issues/901
|
||||
genesis.config =
|
||||
to_genesis_chain_config(&read_superchain_metadata(name, environment, &archive)?);
|
||||
|
||||
Ok(genesis)
|
||||
}
|
||||
|
||||
/// Reads the [`ChainMetadata`] from the superchain config tar file for a superchain.
|
||||
/// For example, `read_superchain_config("unichain", "mainnet")`.
|
||||
fn read_superchain_metadata(
|
||||
name: &str,
|
||||
environment: &str,
|
||||
archive: &TarArchiveRef<'_>,
|
||||
) -> Result<ChainMetadata, SuperchainConfigError> {
|
||||
let config_file = read_file(archive, &format!("configs/{environment}/{name}.json"))?;
|
||||
let config_content = String::from_utf8(config_file)?;
|
||||
let chain_config: ChainMetadata = serde_json::from_str(&config_content)?;
|
||||
Ok(chain_config)
|
||||
}
|
||||
|
||||
/// Reads a file from the tar archive. The file path is relative to the root of the tar archive.
|
||||
fn read_file(
|
||||
archive: &TarArchiveRef<'_>,
|
||||
file_path: &str,
|
||||
) -> Result<Vec<u8>, SuperchainConfigError> {
|
||||
for entry in archive.entries() {
|
||||
if entry.filename().as_str()? == file_path {
|
||||
return Ok(entry.data().to_vec())
|
||||
}
|
||||
}
|
||||
Err(SuperchainConfigError::FileNotFound(file_path.to_string()))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::{generated_chain_value_parser, superchain::Superchain, SUPPORTED_CHAINS};
|
||||
use alloy_chains::NamedChain;
|
||||
use alloy_op_hardforks::{
|
||||
OpHardfork, BASE_MAINNET_CANYON_TIMESTAMP, BASE_MAINNET_ECOTONE_TIMESTAMP,
|
||||
BASE_MAINNET_ISTHMUS_TIMESTAMP, BASE_MAINNET_JOVIAN_TIMESTAMP,
|
||||
BASE_SEPOLIA_CANYON_TIMESTAMP, BASE_SEPOLIA_ECOTONE_TIMESTAMP,
|
||||
BASE_SEPOLIA_ISTHMUS_TIMESTAMP, BASE_SEPOLIA_JOVIAN_TIMESTAMP, OP_MAINNET_CANYON_TIMESTAMP,
|
||||
OP_MAINNET_ECOTONE_TIMESTAMP, OP_MAINNET_ISTHMUS_TIMESTAMP, OP_MAINNET_JOVIAN_TIMESTAMP,
|
||||
OP_SEPOLIA_CANYON_TIMESTAMP, OP_SEPOLIA_ECOTONE_TIMESTAMP, OP_SEPOLIA_ISTHMUS_TIMESTAMP,
|
||||
OP_SEPOLIA_JOVIAN_TIMESTAMP,
|
||||
};
|
||||
use reth_optimism_primitives::L2_TO_L1_MESSAGE_PASSER_ADDRESS;
|
||||
use tar_no_std::TarArchiveRef;
|
||||
|
||||
#[test]
|
||||
fn test_read_superchain_genesis() {
|
||||
let genesis = read_superchain_genesis("unichain", "mainnet").unwrap();
|
||||
assert_eq!(genesis.config.chain_id, 130);
|
||||
assert_eq!(genesis.timestamp, 1730748359);
|
||||
assert!(genesis.alloc.contains_key(&L2_TO_L1_MESSAGE_PASSER_ADDRESS));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_read_superchain_genesis_with_workaround() {
|
||||
let genesis = read_superchain_genesis("funki", "mainnet").unwrap();
|
||||
assert_eq!(genesis.config.chain_id, 33979);
|
||||
assert_eq!(genesis.timestamp, 1721211095);
|
||||
assert!(genesis.alloc.contains_key(&L2_TO_L1_MESSAGE_PASSER_ADDRESS));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_read_superchain_metadata() {
|
||||
let archive = TarArchiveRef::new(SUPER_CHAIN_CONFIGS_TAR_BYTES).unwrap();
|
||||
let chain_config = read_superchain_metadata("funki", "mainnet", &archive).unwrap();
|
||||
assert_eq!(chain_config.chain_id, 33979);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_read_all_genesis_files() {
|
||||
let archive = TarArchiveRef::new(SUPER_CHAIN_CONFIGS_TAR_BYTES).unwrap();
|
||||
// Check that all genesis files can be read without errors.
|
||||
for entry in archive.entries() {
|
||||
let filename = entry
|
||||
.filename()
|
||||
.as_str()
|
||||
.unwrap()
|
||||
.split('/')
|
||||
.map(|s| s.to_string())
|
||||
.collect::<Vec<String>>();
|
||||
if filename.first().unwrap().ne(&"genesis") {
|
||||
continue
|
||||
}
|
||||
read_superchain_metadata(
|
||||
&filename.get(2).unwrap().replace(".json.zz", ""),
|
||||
filename.get(1).unwrap(),
|
||||
&archive,
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_genesis_exists_for_all_available_chains() {
|
||||
for &chain in Superchain::ALL {
|
||||
let genesis = read_superchain_genesis(chain.name(), chain.environment());
|
||||
assert!(
|
||||
genesis.is_ok(),
|
||||
"Genesis not found for chain: {}-{}",
|
||||
chain.name(),
|
||||
chain.environment()
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_hardfork_timestamps() {
|
||||
for &chain in SUPPORTED_CHAINS {
|
||||
let metadata = generated_chain_value_parser(chain).unwrap();
|
||||
|
||||
match metadata.chain().named() {
|
||||
Some(NamedChain::Optimism) => {
|
||||
assert_eq!(
|
||||
metadata.hardforks.get(OpHardfork::Jovian).unwrap().as_timestamp().unwrap(),
|
||||
OP_MAINNET_JOVIAN_TIMESTAMP
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
metadata
|
||||
.hardforks
|
||||
.get(OpHardfork::Isthmus)
|
||||
.unwrap()
|
||||
.as_timestamp()
|
||||
.unwrap(),
|
||||
OP_MAINNET_ISTHMUS_TIMESTAMP
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
metadata.hardforks.get(OpHardfork::Canyon).unwrap().as_timestamp().unwrap(),
|
||||
OP_MAINNET_CANYON_TIMESTAMP
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
metadata
|
||||
.hardforks
|
||||
.get(OpHardfork::Ecotone)
|
||||
.unwrap()
|
||||
.as_timestamp()
|
||||
.unwrap(),
|
||||
OP_MAINNET_ECOTONE_TIMESTAMP
|
||||
);
|
||||
}
|
||||
Some(NamedChain::OptimismSepolia) => {
|
||||
assert_eq!(
|
||||
metadata.hardforks.get(OpHardfork::Jovian).unwrap().as_timestamp().unwrap(),
|
||||
OP_SEPOLIA_JOVIAN_TIMESTAMP
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
metadata
|
||||
.hardforks
|
||||
.get(OpHardfork::Isthmus)
|
||||
.unwrap()
|
||||
.as_timestamp()
|
||||
.unwrap(),
|
||||
OP_SEPOLIA_ISTHMUS_TIMESTAMP
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
metadata.hardforks.get(OpHardfork::Canyon).unwrap().as_timestamp().unwrap(),
|
||||
OP_SEPOLIA_CANYON_TIMESTAMP
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
metadata
|
||||
.hardforks
|
||||
.get(OpHardfork::Ecotone)
|
||||
.unwrap()
|
||||
.as_timestamp()
|
||||
.unwrap(),
|
||||
OP_SEPOLIA_ECOTONE_TIMESTAMP
|
||||
);
|
||||
}
|
||||
Some(NamedChain::Base) => {
|
||||
assert_eq!(
|
||||
metadata.hardforks.get(OpHardfork::Jovian).unwrap().as_timestamp().unwrap(),
|
||||
BASE_MAINNET_JOVIAN_TIMESTAMP
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
metadata
|
||||
.hardforks
|
||||
.get(OpHardfork::Isthmus)
|
||||
.unwrap()
|
||||
.as_timestamp()
|
||||
.unwrap(),
|
||||
BASE_MAINNET_ISTHMUS_TIMESTAMP
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
metadata.hardforks.get(OpHardfork::Canyon).unwrap().as_timestamp().unwrap(),
|
||||
BASE_MAINNET_CANYON_TIMESTAMP
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
metadata
|
||||
.hardforks
|
||||
.get(OpHardfork::Ecotone)
|
||||
.unwrap()
|
||||
.as_timestamp()
|
||||
.unwrap(),
|
||||
BASE_MAINNET_ECOTONE_TIMESTAMP
|
||||
);
|
||||
}
|
||||
Some(NamedChain::BaseSepolia) => {
|
||||
assert_eq!(
|
||||
metadata.hardforks.get(OpHardfork::Jovian).unwrap().as_timestamp().unwrap(),
|
||||
BASE_SEPOLIA_JOVIAN_TIMESTAMP
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
metadata
|
||||
.hardforks
|
||||
.get(OpHardfork::Isthmus)
|
||||
.unwrap()
|
||||
.as_timestamp()
|
||||
.unwrap(),
|
||||
BASE_SEPOLIA_ISTHMUS_TIMESTAMP
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
metadata.hardforks.get(OpHardfork::Canyon).unwrap().as_timestamp().unwrap(),
|
||||
BASE_SEPOLIA_CANYON_TIMESTAMP
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
metadata
|
||||
.hardforks
|
||||
.get(OpHardfork::Ecotone)
|
||||
.unwrap()
|
||||
.as_timestamp()
|
||||
.unwrap(),
|
||||
BASE_SEPOLIA_ECOTONE_TIMESTAMP
|
||||
);
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,32 +0,0 @@
|
||||
//! Support for superchain registry.
|
||||
|
||||
mod chain_metadata;
|
||||
mod chain_spec_macro;
|
||||
mod chain_specs;
|
||||
mod configs;
|
||||
|
||||
pub use chain_specs::*;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::Superchain;
|
||||
|
||||
#[test]
|
||||
fn round_trip_superchain_enum_name_and_env() {
|
||||
for &chain in Superchain::ALL {
|
||||
let name = chain.name();
|
||||
let env = chain.environment();
|
||||
|
||||
assert!(!name.is_empty(), "name() must not be empty");
|
||||
assert!(!env.is_empty(), "environment() must not be empty");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn superchain_enum_has_funki_mainnet() {
|
||||
assert!(
|
||||
Superchain::ALL.iter().any(|&c| c.name() == "funki" && c.environment() == "mainnet"),
|
||||
"Expected funki/mainnet in ALL"
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -1,127 +0,0 @@
|
||||
[package]
|
||||
name = "reth-optimism-cli"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
rust-version.workspace = true
|
||||
license.workspace = true
|
||||
homepage.workspace = true
|
||||
repository.workspace = true
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
reth-static-file-types = { workspace = true, features = ["clap"] }
|
||||
reth-cli.workspace = true
|
||||
reth-cli-commands.workspace = true
|
||||
reth-consensus.workspace = true
|
||||
reth-rpc-server-types.workspace = true
|
||||
reth-primitives-traits.workspace = true
|
||||
reth-db = { workspace = true, features = ["mdbx", "op"] }
|
||||
reth-db-api.workspace = true
|
||||
reth-db-common.workspace = true
|
||||
reth-downloaders.workspace = true
|
||||
reth-provider.workspace = true
|
||||
reth-prune.workspace = true
|
||||
reth-stages.workspace = true
|
||||
reth-static-file.workspace = true
|
||||
reth-execution-types.workspace = true
|
||||
reth-node-core.workspace = true
|
||||
reth-optimism-node.workspace = true
|
||||
reth-fs-util.workspace = true
|
||||
|
||||
# so jemalloc metrics can be included
|
||||
reth-node-metrics.workspace = true
|
||||
|
||||
## optimism
|
||||
reth-optimism-primitives.workspace = true
|
||||
reth-optimism-chainspec = { workspace = true, features = ["superchain-configs"] }
|
||||
reth-optimism-consensus.workspace = true
|
||||
|
||||
reth-chainspec.workspace = true
|
||||
reth-node-events.workspace = true
|
||||
reth-optimism-evm.workspace = true
|
||||
reth-cli-runner.workspace = true
|
||||
reth-node-builder = { workspace = true, features = ["op"] }
|
||||
reth-tracing.workspace = true
|
||||
|
||||
# eth
|
||||
alloy-eips.workspace = true
|
||||
alloy-consensus.workspace = true
|
||||
alloy-primitives.workspace = true
|
||||
alloy-rlp.workspace = true
|
||||
|
||||
# misc
|
||||
futures-util.workspace = true
|
||||
derive_more.workspace = true
|
||||
serde.workspace = true
|
||||
clap = { workspace = true, features = ["derive", "env"] }
|
||||
|
||||
tokio = { workspace = true, features = ["sync", "macros", "time", "rt-multi-thread"] }
|
||||
tokio-util = { workspace = true, features = ["codec"] }
|
||||
tracing.workspace = true
|
||||
eyre.workspace = true
|
||||
|
||||
# reth test-vectors
|
||||
proptest = { workspace = true, optional = true }
|
||||
op-alloy-consensus.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
tempfile.workspace = true
|
||||
reth-stages = { workspace = true, features = ["test-utils"] }
|
||||
|
||||
[build-dependencies]
|
||||
reth-optimism-chainspec = { workspace = true, features = ["std", "superchain-configs"] }
|
||||
|
||||
[features]
|
||||
default = []
|
||||
|
||||
# Opentelemetry feature to activate tracing and logs export
|
||||
otlp = ["reth-tracing/otlp", "reth-node-core/otlp"]
|
||||
otlp-logs = ["reth-tracing/otlp-logs", "reth-node-core/otlp-logs"]
|
||||
|
||||
asm-keccak = [
|
||||
"alloy-primitives/asm-keccak",
|
||||
"reth-node-core/asm-keccak",
|
||||
"reth-optimism-node/asm-keccak",
|
||||
]
|
||||
|
||||
keccak-cache-global = [
|
||||
"alloy-primitives/keccak-cache-global",
|
||||
"reth-node-core/keccak-cache-global",
|
||||
"reth-optimism-node/keccak-cache-global",
|
||||
]
|
||||
|
||||
# Jemalloc feature for vergen to generate correct env vars
|
||||
jemalloc = [
|
||||
"reth-node-core/jemalloc",
|
||||
"reth-node-metrics/jemalloc",
|
||||
]
|
||||
jemalloc-prof = [
|
||||
"jemalloc",
|
||||
"reth-node-metrics/jemalloc-prof",
|
||||
]
|
||||
jemalloc-symbols = [
|
||||
"jemalloc-prof",
|
||||
"reth-node-metrics/jemalloc-symbols",
|
||||
]
|
||||
|
||||
tracy = ["reth-tracing/tracy", "reth-node-core/tracy"]
|
||||
|
||||
dev = [
|
||||
"dep:proptest",
|
||||
"reth-cli-commands/arbitrary",
|
||||
]
|
||||
|
||||
serde = [
|
||||
"alloy-consensus/serde",
|
||||
"alloy-eips/serde",
|
||||
"alloy-primitives/serde",
|
||||
"op-alloy-consensus/serde",
|
||||
"reth-execution-types/serde",
|
||||
"reth-optimism-primitives/serde",
|
||||
"reth-primitives-traits/serde",
|
||||
"reth-optimism-chainspec/serde",
|
||||
]
|
||||
|
||||
edge = ["reth-cli-commands/edge", "reth-node-core/edge"]
|
||||
@@ -1,156 +0,0 @@
|
||||
use crate::{Cli, Commands};
|
||||
use eyre::{eyre, Result};
|
||||
use reth_cli::chainspec::ChainSpecParser;
|
||||
use reth_cli_commands::launcher::Launcher;
|
||||
use reth_cli_runner::CliRunner;
|
||||
use reth_node_core::args::{OtlpInitStatus, OtlpLogsStatus};
|
||||
use reth_node_metrics::recorder::install_prometheus_recorder;
|
||||
use reth_optimism_chainspec::OpChainSpec;
|
||||
use reth_optimism_consensus::OpBeaconConsensus;
|
||||
use reth_optimism_node::{OpExecutorProvider, OpNode};
|
||||
use reth_rpc_server_types::RpcModuleValidator;
|
||||
use reth_tracing::{FileWorkerGuard, Layers};
|
||||
use std::{fmt, sync::Arc};
|
||||
use tracing::{info, warn};
|
||||
|
||||
/// A wrapper around a parsed CLI that handles command execution.
|
||||
#[derive(Debug)]
|
||||
pub struct CliApp<Spec: ChainSpecParser, Ext: clap::Args + fmt::Debug, Rpc: RpcModuleValidator> {
|
||||
cli: Cli<Spec, Ext, Rpc>,
|
||||
runner: Option<CliRunner>,
|
||||
layers: Option<Layers>,
|
||||
guard: Option<FileWorkerGuard>,
|
||||
}
|
||||
|
||||
impl<C, Ext, Rpc> CliApp<C, Ext, Rpc>
|
||||
where
|
||||
C: ChainSpecParser<ChainSpec = OpChainSpec>,
|
||||
Ext: clap::Args + fmt::Debug,
|
||||
Rpc: RpcModuleValidator,
|
||||
{
|
||||
pub(crate) fn new(cli: Cli<C, Ext, Rpc>) -> Self {
|
||||
Self { cli, runner: None, layers: Some(Layers::new()), guard: None }
|
||||
}
|
||||
|
||||
/// Sets the runner for the CLI commander.
|
||||
///
|
||||
/// This replaces any existing runner with the provided one.
|
||||
pub fn set_runner(&mut self, runner: CliRunner) {
|
||||
self.runner = Some(runner);
|
||||
}
|
||||
|
||||
/// Access to tracing layers.
|
||||
///
|
||||
/// Returns a mutable reference to the tracing layers, or error
|
||||
/// if tracing initialized and layers have detached already.
|
||||
pub fn access_tracing_layers(&mut self) -> Result<&mut Layers> {
|
||||
self.layers.as_mut().ok_or_else(|| eyre!("Tracing already initialized"))
|
||||
}
|
||||
|
||||
/// Execute the configured cli command.
|
||||
///
|
||||
/// This accepts a closure that is used to launch the node via the
|
||||
/// [`NodeCommand`](reth_cli_commands::node::NodeCommand).
|
||||
pub fn run(mut self, launcher: impl Launcher<C, Ext>) -> Result<()> {
|
||||
let runner = match self.runner.take() {
|
||||
Some(runner) => runner,
|
||||
None => CliRunner::try_default_runtime()?,
|
||||
};
|
||||
|
||||
// add network name to logs dir
|
||||
// Add network name if available to the logs dir
|
||||
if let Some(chain_spec) = self.cli.command.chain_spec() {
|
||||
self.cli.logs.log_file_directory =
|
||||
self.cli.logs.log_file_directory.join(chain_spec.chain.to_string());
|
||||
}
|
||||
|
||||
self.init_tracing(&runner)?;
|
||||
|
||||
// Install the prometheus recorder to be sure to record all metrics
|
||||
install_prometheus_recorder();
|
||||
|
||||
let components = |spec: Arc<OpChainSpec>| {
|
||||
(OpExecutorProvider::optimism(spec.clone()), Arc::new(OpBeaconConsensus::new(spec)))
|
||||
};
|
||||
|
||||
match self.cli.command {
|
||||
Commands::Node(command) => {
|
||||
// Validate RPC modules using the configured validator
|
||||
if let Some(http_api) = &command.rpc.http_api {
|
||||
Rpc::validate_selection(http_api, "http.api").map_err(|e| eyre!("{e}"))?;
|
||||
}
|
||||
if let Some(ws_api) = &command.rpc.ws_api {
|
||||
Rpc::validate_selection(ws_api, "ws.api").map_err(|e| eyre!("{e}"))?;
|
||||
}
|
||||
|
||||
runner.run_command_until_exit(|ctx| command.execute(ctx, launcher))
|
||||
}
|
||||
Commands::Init(command) => {
|
||||
runner.run_blocking_until_ctrl_c(command.execute::<OpNode>())
|
||||
}
|
||||
Commands::InitState(command) => {
|
||||
runner.run_blocking_until_ctrl_c(command.execute::<OpNode>())
|
||||
}
|
||||
Commands::ImportOp(command) => {
|
||||
runner.run_blocking_until_ctrl_c(command.execute::<OpNode>())
|
||||
}
|
||||
Commands::ImportReceiptsOp(command) => {
|
||||
runner.run_blocking_until_ctrl_c(command.execute::<OpNode>())
|
||||
}
|
||||
Commands::DumpGenesis(command) => runner.run_blocking_until_ctrl_c(command.execute()),
|
||||
Commands::Db(command) => {
|
||||
runner.run_blocking_command_until_exit(|ctx| command.execute::<OpNode>(ctx))
|
||||
}
|
||||
Commands::Stage(command) => {
|
||||
runner.run_command_until_exit(|ctx| command.execute::<OpNode, _>(ctx, components))
|
||||
}
|
||||
Commands::P2P(command) => runner.run_until_ctrl_c(command.execute::<OpNode>()),
|
||||
Commands::Config(command) => runner.run_until_ctrl_c(command.execute()),
|
||||
Commands::Prune(command) => {
|
||||
runner.run_command_until_exit(|ctx| command.execute::<OpNode>(ctx))
|
||||
}
|
||||
#[cfg(feature = "dev")]
|
||||
Commands::TestVectors(command) => runner.run_until_ctrl_c(command.execute()),
|
||||
Commands::ReExecute(command) => {
|
||||
runner.run_until_ctrl_c(command.execute::<OpNode>(components))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Initializes tracing with the configured options.
|
||||
///
|
||||
/// If file logging is enabled, this function stores guard to the struct.
|
||||
/// For gRPC OTLP, it requires tokio runtime context.
|
||||
pub fn init_tracing(&mut self, runner: &CliRunner) -> Result<()> {
|
||||
if self.guard.is_none() {
|
||||
let mut layers = self.layers.take().unwrap_or_default();
|
||||
|
||||
let otlp_status = runner.block_on(self.cli.traces.init_otlp_tracing(&mut layers))?;
|
||||
let otlp_logs_status = runner.block_on(self.cli.traces.init_otlp_logs(&mut layers))?;
|
||||
|
||||
self.guard = self.cli.logs.init_tracing_with_layers(layers)?;
|
||||
info!(target: "reth::cli", "Initialized tracing, debug log directory: {}", self.cli.logs.log_file_directory);
|
||||
|
||||
match otlp_status {
|
||||
OtlpInitStatus::Started(endpoint) => {
|
||||
info!(target: "reth::cli", "Started OTLP {:?} tracing export to {endpoint}", self.cli.traces.protocol);
|
||||
}
|
||||
OtlpInitStatus::NoFeature => {
|
||||
warn!(target: "reth::cli", "Provided OTLP tracing arguments do not have effect, compile with the `otlp` feature")
|
||||
}
|
||||
OtlpInitStatus::Disabled => {}
|
||||
}
|
||||
|
||||
match otlp_logs_status {
|
||||
OtlpLogsStatus::Started(endpoint) => {
|
||||
info!(target: "reth::cli", "Started OTLP {:?} logs export to {endpoint}", self.cli.traces.protocol);
|
||||
}
|
||||
OtlpLogsStatus::NoFeature => {
|
||||
warn!(target: "reth::cli", "Provided OTLP logs arguments do not have effect, compile with the `otlp-logs` feature")
|
||||
}
|
||||
OtlpLogsStatus::Disabled => {}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -1,45 +0,0 @@
|
||||
use reth_cli::chainspec::{parse_genesis, ChainSpecParser};
|
||||
use reth_optimism_chainspec::{generated_chain_value_parser, OpChainSpec, SUPPORTED_CHAINS};
|
||||
use std::sync::Arc;
|
||||
|
||||
/// Optimism chain specification parser.
|
||||
#[derive(Debug, Clone, Default)]
|
||||
#[non_exhaustive]
|
||||
pub struct OpChainSpecParser;
|
||||
|
||||
impl ChainSpecParser for OpChainSpecParser {
|
||||
type ChainSpec = OpChainSpec;
|
||||
|
||||
const SUPPORTED_CHAINS: &'static [&'static str] = SUPPORTED_CHAINS;
|
||||
|
||||
fn parse(s: &str) -> eyre::Result<Arc<Self::ChainSpec>> {
|
||||
chain_value_parser(s)
|
||||
}
|
||||
}
|
||||
|
||||
/// Clap value parser for [`OpChainSpec`]s.
|
||||
///
|
||||
/// The value parser matches either a known chain, the path
|
||||
/// to a json file, or a json formatted string in-memory. The json needs to be a Genesis struct.
|
||||
pub fn chain_value_parser(s: &str) -> eyre::Result<Arc<OpChainSpec>, eyre::Error> {
|
||||
if let Some(op_chain_spec) = generated_chain_value_parser(s) {
|
||||
Ok(op_chain_spec)
|
||||
} else {
|
||||
Ok(Arc::new(parse_genesis(s)?.into()))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn parse_known_chain_spec() {
|
||||
for &chain in OpChainSpecParser::SUPPORTED_CHAINS {
|
||||
assert!(
|
||||
<OpChainSpecParser as ChainSpecParser>::parse(chain).is_ok(),
|
||||
"Failed to parse {chain}"
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,169 +0,0 @@
|
||||
//! Command that initializes the node by importing OP Mainnet chain segment below Bedrock, from a
|
||||
//! file.
|
||||
use clap::Parser;
|
||||
use reth_cli::chainspec::ChainSpecParser;
|
||||
use reth_cli_commands::{
|
||||
common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs},
|
||||
import::build_import_pipeline,
|
||||
};
|
||||
use reth_consensus::noop::NoopConsensus;
|
||||
use reth_db_api::{tables, transaction::DbTx};
|
||||
use reth_downloaders::file_client::{ChunkedFileReader, DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE};
|
||||
use reth_node_builder::BlockTy;
|
||||
use reth_node_core::version::version_metadata;
|
||||
use reth_optimism_chainspec::OpChainSpec;
|
||||
use reth_optimism_evm::OpExecutorProvider;
|
||||
use reth_optimism_primitives::{bedrock::is_dup_tx, OpPrimitives};
|
||||
use reth_provider::{BlockNumReader, ChainSpecProvider, HeaderProvider, StageCheckpointReader};
|
||||
use reth_prune::PruneModes;
|
||||
use reth_stages::StageId;
|
||||
use reth_static_file::StaticFileProducer;
|
||||
use std::{path::PathBuf, sync::Arc};
|
||||
use tracing::{debug, error, info};
|
||||
|
||||
/// Syncs RLP encoded blocks from a file.
|
||||
#[derive(Debug, Parser)]
|
||||
pub struct ImportOpCommand<C: ChainSpecParser> {
|
||||
#[command(flatten)]
|
||||
env: EnvironmentArgs<C>,
|
||||
|
||||
/// Chunk byte length to read from file.
|
||||
#[arg(long, value_name = "CHUNK_LEN", verbatim_doc_comment)]
|
||||
chunk_len: Option<u64>,
|
||||
|
||||
/// The path to a block file for import.
|
||||
///
|
||||
/// The online stages (headers and bodies) are replaced by a file import, after which the
|
||||
/// remaining stages are executed.
|
||||
#[arg(value_name = "IMPORT_PATH", verbatim_doc_comment)]
|
||||
path: PathBuf,
|
||||
}
|
||||
|
||||
impl<C: ChainSpecParser<ChainSpec = OpChainSpec>> ImportOpCommand<C> {
|
||||
/// Execute `import` command
|
||||
pub async fn execute<N: CliNodeTypes<ChainSpec = C::ChainSpec, Primitives = OpPrimitives>>(
|
||||
self,
|
||||
) -> eyre::Result<()> {
|
||||
info!(target: "reth::cli", "reth {} starting", version_metadata().short_version);
|
||||
|
||||
info!(target: "reth::cli",
|
||||
"Disabled stages requiring state, since cannot execute OVM state changes"
|
||||
);
|
||||
|
||||
debug!(target: "reth::cli",
|
||||
chunk_byte_len=self.chunk_len.unwrap_or(DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE),
|
||||
"Chunking chain import"
|
||||
);
|
||||
|
||||
let Environment { provider_factory, config, .. } = self.env.init::<N>(AccessRights::RW)?;
|
||||
|
||||
// we use noop here because we expect the inputs to be valid
|
||||
let consensus = Arc::new(NoopConsensus::default());
|
||||
|
||||
// open file
|
||||
let mut reader = ChunkedFileReader::new(&self.path, self.chunk_len).await?;
|
||||
|
||||
let mut total_decoded_blocks = 0;
|
||||
let mut total_decoded_txns = 0;
|
||||
let mut total_filtered_out_dup_txns = 0;
|
||||
|
||||
let mut sealed_header = provider_factory
|
||||
.sealed_header(provider_factory.last_block_number()?)?
|
||||
.expect("should have genesis");
|
||||
|
||||
let static_file_producer =
|
||||
StaticFileProducer::new(provider_factory.clone(), PruneModes::default());
|
||||
|
||||
while let Some(mut file_client) =
|
||||
reader.next_chunk::<BlockTy<N>>(consensus.clone(), Some(sealed_header)).await?
|
||||
{
|
||||
// create a new FileClient from chunk read from file
|
||||
info!(target: "reth::cli",
|
||||
"Importing chain file chunk"
|
||||
);
|
||||
|
||||
let tip = file_client.tip().ok_or_else(|| eyre::eyre!("file client has no tip"))?;
|
||||
info!(target: "reth::cli", "Chain file chunk read");
|
||||
|
||||
total_decoded_blocks += file_client.headers_len();
|
||||
total_decoded_txns += file_client.total_transactions();
|
||||
|
||||
for (block_number, body) in file_client.bodies_iter_mut() {
|
||||
body.transactions.retain(|_| {
|
||||
if is_dup_tx(block_number) {
|
||||
total_filtered_out_dup_txns += 1;
|
||||
return false
|
||||
}
|
||||
true
|
||||
})
|
||||
}
|
||||
|
||||
let (mut pipeline, events) = build_import_pipeline(
|
||||
&config,
|
||||
provider_factory.clone(),
|
||||
&consensus,
|
||||
Arc::new(file_client),
|
||||
static_file_producer.clone(),
|
||||
true,
|
||||
OpExecutorProvider::optimism(provider_factory.chain_spec()),
|
||||
)?;
|
||||
|
||||
// override the tip
|
||||
pipeline.set_tip(tip);
|
||||
debug!(target: "reth::cli", ?tip, "Tip manually set");
|
||||
|
||||
let provider = provider_factory.provider()?;
|
||||
|
||||
let latest_block_number =
|
||||
provider.get_stage_checkpoint(StageId::Finish)?.map(|ch| ch.block_number);
|
||||
tokio::spawn(reth_node_events::node::handle_events(None, latest_block_number, events));
|
||||
|
||||
// Run pipeline
|
||||
info!(target: "reth::cli", "Starting sync pipeline");
|
||||
tokio::select! {
|
||||
res = pipeline.run() => res?,
|
||||
_ = tokio::signal::ctrl_c() => {},
|
||||
}
|
||||
|
||||
sealed_header = provider_factory
|
||||
.sealed_header(provider_factory.last_block_number()?)?
|
||||
.expect("should have genesis");
|
||||
}
|
||||
|
||||
let provider = provider_factory.provider()?;
|
||||
|
||||
let total_imported_blocks = provider.tx_ref().entries::<tables::HeaderNumbers>()?;
|
||||
let total_imported_txns = provider.tx_ref().entries::<tables::TransactionHashNumbers>()?;
|
||||
|
||||
if total_decoded_blocks != total_imported_blocks ||
|
||||
total_decoded_txns != total_imported_txns + total_filtered_out_dup_txns
|
||||
{
|
||||
error!(target: "reth::cli",
|
||||
total_decoded_blocks,
|
||||
total_imported_blocks,
|
||||
total_decoded_txns,
|
||||
total_filtered_out_dup_txns,
|
||||
total_imported_txns,
|
||||
"Chain was partially imported"
|
||||
);
|
||||
}
|
||||
|
||||
info!(target: "reth::cli",
|
||||
total_imported_blocks,
|
||||
total_imported_txns,
|
||||
total_decoded_blocks,
|
||||
total_decoded_txns,
|
||||
total_filtered_out_dup_txns,
|
||||
"Chain file imported"
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<C: ChainSpecParser> ImportOpCommand<C> {
|
||||
/// Returns the underlying chain being used to run this command
|
||||
pub const fn chain_spec(&self) -> Option<&Arc<C::ChainSpec>> {
|
||||
Some(&self.env.chain)
|
||||
}
|
||||
}
|
||||
@@ -1,323 +0,0 @@
|
||||
//! Command that imports OP mainnet receipts from Bedrock datadir, exported via
|
||||
//! <https://github.com/testinprod-io/op-geth/pull/1>.
|
||||
|
||||
use crate::receipt_file_codec::OpGethReceiptFileCodec;
|
||||
use clap::Parser;
|
||||
use reth_cli::chainspec::ChainSpecParser;
|
||||
use reth_cli_commands::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs};
|
||||
use reth_db_api::tables;
|
||||
use reth_downloaders::{
|
||||
file_client::{ChunkedFileReader, DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE},
|
||||
receipt_file_client::ReceiptFileClient,
|
||||
};
|
||||
use reth_execution_types::ExecutionOutcome;
|
||||
use reth_node_builder::ReceiptTy;
|
||||
use reth_node_core::version::version_metadata;
|
||||
use reth_optimism_chainspec::OpChainSpec;
|
||||
use reth_optimism_primitives::{bedrock::is_dup_tx, OpPrimitives, OpReceipt};
|
||||
use reth_primitives_traits::NodePrimitives;
|
||||
use reth_provider::{
|
||||
providers::ProviderNodeTypes, DBProvider, DatabaseProviderFactory, OriginalValuesKnown,
|
||||
ProviderFactory, StageCheckpointReader, StageCheckpointWriter, StateWriteConfig, StateWriter,
|
||||
StaticFileProviderFactory, StatsReader,
|
||||
};
|
||||
use reth_stages::{StageCheckpoint, StageId};
|
||||
use reth_static_file_types::StaticFileSegment;
|
||||
use std::{
|
||||
path::{Path, PathBuf},
|
||||
sync::Arc,
|
||||
};
|
||||
use tracing::{debug, info, trace, warn};
|
||||
|
||||
/// Initializes the database with the genesis block.
|
||||
#[derive(Debug, Parser)]
|
||||
pub struct ImportReceiptsOpCommand<C: ChainSpecParser> {
|
||||
#[command(flatten)]
|
||||
env: EnvironmentArgs<C>,
|
||||
|
||||
/// Chunk byte length to read from file.
|
||||
#[arg(long, value_name = "CHUNK_LEN", verbatim_doc_comment)]
|
||||
chunk_len: Option<u64>,
|
||||
|
||||
/// The path to a receipts file for import. File must use `OpGethReceiptFileCodec` (used for
|
||||
/// exporting OP chain segment below Bedrock block via testinprod/op-geth).
|
||||
///
|
||||
/// <https://github.com/testinprod-io/op-geth/pull/1>
|
||||
#[arg(value_name = "IMPORT_PATH", verbatim_doc_comment)]
|
||||
path: PathBuf,
|
||||
}
|
||||
|
||||
impl<C: ChainSpecParser<ChainSpec = OpChainSpec>> ImportReceiptsOpCommand<C> {
|
||||
/// Execute `import` command
|
||||
pub async fn execute<N: CliNodeTypes<ChainSpec = C::ChainSpec, Primitives = OpPrimitives>>(
|
||||
self,
|
||||
) -> eyre::Result<()> {
|
||||
info!(target: "reth::cli", "reth {} starting", version_metadata().short_version);
|
||||
|
||||
debug!(target: "reth::cli",
|
||||
chunk_byte_len=self.chunk_len.unwrap_or(DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE),
|
||||
"Chunking receipts import"
|
||||
);
|
||||
|
||||
let Environment { provider_factory, .. } = self.env.init::<N>(AccessRights::RW)?;
|
||||
|
||||
import_receipts_from_file(
|
||||
provider_factory,
|
||||
self.path,
|
||||
self.chunk_len,
|
||||
|first_block, receipts| {
|
||||
let mut total_filtered_out_dup_txns = 0;
|
||||
for (index, receipts_for_block) in receipts.iter_mut().enumerate() {
|
||||
if is_dup_tx(first_block + index as u64) {
|
||||
receipts_for_block.clear();
|
||||
total_filtered_out_dup_txns += 1;
|
||||
}
|
||||
}
|
||||
|
||||
total_filtered_out_dup_txns
|
||||
},
|
||||
)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
impl<C: ChainSpecParser> ImportReceiptsOpCommand<C> {
|
||||
/// Returns the underlying chain being used to run this command
|
||||
pub const fn chain_spec(&self) -> Option<&Arc<C::ChainSpec>> {
|
||||
Some(&self.env.chain)
|
||||
}
|
||||
}
|
||||
|
||||
/// Imports receipts to static files from file in chunks. See [`import_receipts_from_reader`].
|
||||
pub async fn import_receipts_from_file<N, P, F>(
|
||||
provider_factory: ProviderFactory<N>,
|
||||
path: P,
|
||||
chunk_len: Option<u64>,
|
||||
filter: F,
|
||||
) -> eyre::Result<()>
|
||||
where
|
||||
N: ProviderNodeTypes<ChainSpec = OpChainSpec, Primitives: NodePrimitives<Receipt = OpReceipt>>,
|
||||
P: AsRef<Path>,
|
||||
F: FnMut(u64, &mut Vec<Vec<OpReceipt>>) -> usize,
|
||||
{
|
||||
for stage in StageId::ALL {
|
||||
let checkpoint = provider_factory.database_provider_ro()?.get_stage_checkpoint(stage)?;
|
||||
trace!(target: "reth::cli",
|
||||
?stage,
|
||||
?checkpoint,
|
||||
"Read stage checkpoints from db"
|
||||
);
|
||||
}
|
||||
|
||||
// open file
|
||||
let reader = ChunkedFileReader::new(&path, chunk_len).await?;
|
||||
|
||||
// import receipts
|
||||
let _ = import_receipts_from_reader(&provider_factory, reader, filter).await?;
|
||||
|
||||
info!(target: "reth::cli",
|
||||
"Receipt file imported"
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Imports receipts to static files. Takes a filter callback as parameter, that returns the total
|
||||
/// number of filtered out receipts.
|
||||
///
|
||||
/// Caution! Filter callback must replace completely filtered out receipts for a block, with empty
|
||||
/// vectors, rather than `vec!(None)`. This is since the code for writing to static files, expects
|
||||
/// indices in the receipts list, to map to sequential block numbers.
|
||||
pub async fn import_receipts_from_reader<N, F>(
|
||||
provider_factory: &ProviderFactory<N>,
|
||||
mut reader: ChunkedFileReader,
|
||||
mut filter: F,
|
||||
) -> eyre::Result<ImportReceiptsResult>
|
||||
where
|
||||
N: ProviderNodeTypes<Primitives: NodePrimitives<Receipt = OpReceipt>>,
|
||||
F: FnMut(u64, &mut Vec<Vec<ReceiptTy<N>>>) -> usize,
|
||||
{
|
||||
let static_file_provider = provider_factory.static_file_provider();
|
||||
|
||||
// Ensure that receipts hasn't been initialized apart from `init_genesis`.
|
||||
if let Some(num_receipts) =
|
||||
static_file_provider.get_highest_static_file_tx(StaticFileSegment::Receipts) &&
|
||||
num_receipts > 0
|
||||
{
|
||||
eyre::bail!("Expected no receipts in storage, but found {num_receipts}.");
|
||||
}
|
||||
match static_file_provider.get_highest_static_file_block(StaticFileSegment::Receipts) {
|
||||
Some(receipts_block) => {
|
||||
if receipts_block > 0 {
|
||||
eyre::bail!("Expected highest receipt block to be 0, but found {receipts_block}.");
|
||||
}
|
||||
}
|
||||
None => {
|
||||
eyre::bail!(
|
||||
"Receipts was not initialized. Please import blocks and transactions before calling this command."
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
let provider = provider_factory.database_provider_rw()?;
|
||||
let mut total_decoded_receipts = 0;
|
||||
let mut total_receipts = 0;
|
||||
let mut total_filtered_out_dup_txns = 0;
|
||||
let mut highest_block_receipts = 0;
|
||||
|
||||
let highest_block_transactions = static_file_provider
|
||||
.get_highest_static_file_block(StaticFileSegment::Transactions)
|
||||
.expect("transaction static files must exist before importing receipts");
|
||||
|
||||
while let Some(file_client) =
|
||||
reader.next_receipts_chunk::<ReceiptFileClient<OpGethReceiptFileCodec<OpReceipt>>>().await?
|
||||
{
|
||||
if highest_block_receipts == highest_block_transactions {
|
||||
warn!(target: "reth::cli", highest_block_receipts, highest_block_transactions, "Ignoring all other blocks in the file since we have reached the desired height");
|
||||
break
|
||||
}
|
||||
|
||||
// create a new file client from chunk read from file
|
||||
let ReceiptFileClient {
|
||||
mut receipts,
|
||||
mut first_block,
|
||||
total_receipts: total_receipts_chunk,
|
||||
..
|
||||
} = file_client;
|
||||
|
||||
// mark these as decoded
|
||||
total_decoded_receipts += total_receipts_chunk;
|
||||
|
||||
total_filtered_out_dup_txns += filter(first_block, &mut receipts);
|
||||
|
||||
info!(target: "reth::cli",
|
||||
first_receipts_block=?first_block,
|
||||
total_receipts_chunk,
|
||||
"Importing receipt file chunk"
|
||||
);
|
||||
|
||||
// It is possible for the first receipt returned by the file client to be the genesis
|
||||
// block. In this case, we just prepend empty receipts to the current list of receipts.
|
||||
// When initially writing to static files, the provider expects the first block to be block
|
||||
// one. So, if the first block returned by the file client is the genesis block, we remove
|
||||
// those receipts.
|
||||
if first_block == 0 {
|
||||
// remove the first empty receipts
|
||||
let genesis_receipts = receipts.remove(0);
|
||||
debug_assert!(genesis_receipts.is_empty());
|
||||
// this ensures the execution outcome and static file producer start at block 1
|
||||
first_block = 1;
|
||||
}
|
||||
highest_block_receipts = first_block + receipts.len() as u64 - 1;
|
||||
|
||||
// RLP file may have too many blocks. We ignore the excess, but warn the user.
|
||||
if highest_block_receipts > highest_block_transactions {
|
||||
let excess = highest_block_receipts - highest_block_transactions;
|
||||
highest_block_receipts -= excess;
|
||||
|
||||
// Remove the last `excess` blocks
|
||||
receipts.truncate(receipts.len() - excess as usize);
|
||||
|
||||
warn!(target: "reth::cli", highest_block_receipts, "Too many decoded blocks, ignoring the last {excess}.");
|
||||
}
|
||||
|
||||
// Update total_receipts after all filtering
|
||||
total_receipts += receipts.iter().map(|v| v.len()).sum::<usize>();
|
||||
|
||||
let execution_outcome =
|
||||
ExecutionOutcome::new(Default::default(), receipts, first_block, Default::default());
|
||||
|
||||
// finally, write the receipts
|
||||
provider.write_state(
|
||||
&execution_outcome,
|
||||
OriginalValuesKnown::Yes,
|
||||
StateWriteConfig::default(),
|
||||
)?;
|
||||
}
|
||||
|
||||
// Only commit if we have imported as many receipts as the number of transactions.
|
||||
let total_imported_txns = static_file_provider
|
||||
.count_entries::<tables::Transactions>()
|
||||
.expect("transaction static files must exist before importing receipts");
|
||||
|
||||
if total_receipts != total_imported_txns {
|
||||
eyre::bail!(
|
||||
"Number of receipts ({total_receipts}) inconsistent with transactions {total_imported_txns}"
|
||||
)
|
||||
}
|
||||
|
||||
// Only commit if the receipt block height matches the one from transactions.
|
||||
if highest_block_receipts != highest_block_transactions {
|
||||
eyre::bail!(
|
||||
"Receipt block height ({highest_block_receipts}) inconsistent with transactions' {highest_block_transactions}"
|
||||
)
|
||||
}
|
||||
|
||||
// Required or any access-write provider factory will attempt to unwind to 0.
|
||||
provider
|
||||
.save_stage_checkpoint(StageId::Execution, StageCheckpoint::new(highest_block_receipts))?;
|
||||
|
||||
provider.commit()?;
|
||||
|
||||
Ok(ImportReceiptsResult { total_decoded_receipts, total_filtered_out_dup_txns })
|
||||
}
|
||||
|
||||
/// Result of importing receipts in chunks.
|
||||
#[derive(Debug)]
|
||||
pub struct ImportReceiptsResult {
|
||||
/// Total decoded receipts.
|
||||
pub total_decoded_receipts: usize,
|
||||
/// Total filtered out receipts.
|
||||
pub total_filtered_out_dup_txns: usize,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use alloy_primitives::hex;
|
||||
use reth_db_common::init::init_genesis;
|
||||
use reth_optimism_chainspec::OP_MAINNET;
|
||||
use reth_optimism_node::OpNode;
|
||||
use reth_provider::test_utils::create_test_provider_factory_with_node_types;
|
||||
use reth_stages::test_utils::TestStageDB;
|
||||
use tempfile::tempfile;
|
||||
use tokio::{
|
||||
fs::File,
|
||||
io::{AsyncSeekExt, AsyncWriteExt, SeekFrom},
|
||||
};
|
||||
|
||||
use crate::receipt_file_codec::test::{
|
||||
HACK_RECEIPT_ENCODED_BLOCK_1, HACK_RECEIPT_ENCODED_BLOCK_2, HACK_RECEIPT_ENCODED_BLOCK_3,
|
||||
};
|
||||
|
||||
use super::*;
|
||||
|
||||
/// No receipts for genesis block
|
||||
const EMPTY_RECEIPTS_GENESIS_BLOCK: &[u8] = &hex!("c0");
|
||||
|
||||
#[ignore]
|
||||
#[tokio::test]
|
||||
async fn filter_out_genesis_block_receipts() {
|
||||
let mut f: File = tempfile().unwrap().into();
|
||||
f.write_all(EMPTY_RECEIPTS_GENESIS_BLOCK).await.unwrap();
|
||||
f.write_all(HACK_RECEIPT_ENCODED_BLOCK_1).await.unwrap();
|
||||
f.write_all(HACK_RECEIPT_ENCODED_BLOCK_2).await.unwrap();
|
||||
f.write_all(HACK_RECEIPT_ENCODED_BLOCK_3).await.unwrap();
|
||||
f.flush().await.unwrap();
|
||||
f.seek(SeekFrom::Start(0)).await.unwrap();
|
||||
|
||||
let reader = ChunkedFileReader::from_file(f, DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE, false)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let db = TestStageDB::default();
|
||||
init_genesis(&db.factory).unwrap();
|
||||
|
||||
let provider_factory =
|
||||
create_test_provider_factory_with_node_types::<OpNode>(OP_MAINNET.clone());
|
||||
let ImportReceiptsResult { total_decoded_receipts, total_filtered_out_dup_txns } =
|
||||
import_receipts_from_reader(&provider_factory, reader, |_, _| 0).await.unwrap();
|
||||
|
||||
assert_eq!(total_decoded_receipts, 3);
|
||||
assert_eq!(total_filtered_out_dup_txns, 0);
|
||||
}
|
||||
}
|
||||
@@ -1,114 +0,0 @@
|
||||
//! Command that initializes the node from a genesis file.
|
||||
|
||||
use alloy_consensus::Header;
|
||||
use clap::Parser;
|
||||
use reth_cli::chainspec::ChainSpecParser;
|
||||
use reth_cli_commands::common::{AccessRights, CliNodeTypes, Environment};
|
||||
use reth_db_common::init::init_from_state_dump;
|
||||
use reth_optimism_chainspec::OpChainSpec;
|
||||
use reth_optimism_primitives::{
|
||||
bedrock::{BEDROCK_HEADER, BEDROCK_HEADER_HASH},
|
||||
OpPrimitives,
|
||||
};
|
||||
use reth_primitives_traits::{header::HeaderMut, SealedHeader};
|
||||
use reth_provider::{
|
||||
BlockNumReader, DBProvider, DatabaseProviderFactory, StaticFileProviderFactory,
|
||||
StaticFileWriter,
|
||||
};
|
||||
use std::{io::BufReader, sync::Arc};
|
||||
use tracing::info;
|
||||
|
||||
/// Initializes the database with the genesis block.
|
||||
#[derive(Debug, Parser)]
|
||||
pub struct InitStateCommandOp<C: ChainSpecParser> {
|
||||
#[command(flatten)]
|
||||
init_state: reth_cli_commands::init_state::InitStateCommand<C>,
|
||||
|
||||
/// Specifies whether to initialize the state without relying on OVM or EVM historical data.
|
||||
///
|
||||
/// When enabled, and before inserting the state, it creates a dummy chain up to the last OVM
|
||||
/// block (#105235062) (14GB / 90 seconds). It then, appends the Bedrock block. This is
|
||||
/// hardcoded for OP mainnet, for other OP chains you will need to pass in a header.
|
||||
///
|
||||
/// - **Note**: **Do not** import receipts and blocks beforehand, or this will fail or be
|
||||
/// ignored.
|
||||
#[arg(long, default_value = "false")]
|
||||
without_ovm: bool,
|
||||
}
|
||||
|
||||
impl<C: ChainSpecParser<ChainSpec = OpChainSpec>> InitStateCommandOp<C> {
|
||||
/// Execute the `init` command
|
||||
pub async fn execute<N: CliNodeTypes<ChainSpec = C::ChainSpec, Primitives = OpPrimitives>>(
|
||||
mut self,
|
||||
) -> eyre::Result<()> {
|
||||
// If using --without-ovm for OP mainnet, handle the special case with hardcoded Bedrock
|
||||
// header. Otherwise delegate to the base InitStateCommand implementation.
|
||||
if self.without_ovm {
|
||||
if self.init_state.env.chain.is_optimism_mainnet() {
|
||||
return self.execute_with_bedrock_header::<N>();
|
||||
}
|
||||
|
||||
// For non-mainnet OP chains with --without-ovm, use the base implementation
|
||||
// by setting the without_evm flag
|
||||
self.init_state.without_evm = true;
|
||||
}
|
||||
|
||||
self.init_state.execute::<N>().await
|
||||
}
|
||||
|
||||
/// Execute init-state with hardcoded Bedrock header for OP mainnet.
|
||||
fn execute_with_bedrock_header<
|
||||
N: CliNodeTypes<ChainSpec = C::ChainSpec, Primitives = OpPrimitives>,
|
||||
>(
|
||||
self,
|
||||
) -> eyre::Result<()> {
|
||||
info!(target: "reth::cli", "Reth init-state starting for OP mainnet");
|
||||
let env = self.init_state.env.init::<N>(AccessRights::RW)?;
|
||||
|
||||
let Environment { config, provider_factory, .. } = env;
|
||||
let static_file_provider = provider_factory.static_file_provider();
|
||||
let provider_rw = provider_factory.database_provider_rw()?;
|
||||
|
||||
let last_block_number = provider_rw.last_block_number()?;
|
||||
|
||||
if last_block_number == 0 {
|
||||
reth_cli_commands::init_state::without_evm::setup_without_evm(
|
||||
&provider_rw,
|
||||
SealedHeader::new(BEDROCK_HEADER, BEDROCK_HEADER_HASH),
|
||||
|number| {
|
||||
let mut header = Header::default();
|
||||
header.set_number(number);
|
||||
header
|
||||
},
|
||||
)?;
|
||||
|
||||
// SAFETY: it's safe to commit static files, since in the event of a crash, they
|
||||
// will be unwound according to database checkpoints.
|
||||
//
|
||||
// Necessary to commit, so the BEDROCK_HEADER is accessible to provider_rw and
|
||||
// init_state_dump
|
||||
static_file_provider.commit()?;
|
||||
} else if last_block_number > 0 && last_block_number < BEDROCK_HEADER.number {
|
||||
return Err(eyre::eyre!(
|
||||
"Data directory should be empty when calling init-state with --without-ovm."
|
||||
))
|
||||
}
|
||||
|
||||
info!(target: "reth::cli", "Initiating state dump");
|
||||
|
||||
let reader = BufReader::new(reth_fs_util::open(self.init_state.state)?);
|
||||
let hash = init_from_state_dump(reader, &provider_rw, config.stages.etl)?;
|
||||
|
||||
provider_rw.commit()?;
|
||||
|
||||
info!(target: "reth::cli", hash = ?hash, "Genesis block written");
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<C: ChainSpecParser> InitStateCommandOp<C> {
|
||||
/// Returns the underlying chain being used to run this command
|
||||
pub fn chain_spec(&self) -> Option<&Arc<C::ChainSpec>> {
|
||||
self.init_state.chain_spec()
|
||||
}
|
||||
}
|
||||
@@ -1,90 +0,0 @@
|
||||
use crate::chainspec::OpChainSpecParser;
|
||||
use clap::Subcommand;
|
||||
use import::ImportOpCommand;
|
||||
use import_receipts::ImportReceiptsOpCommand;
|
||||
use reth_chainspec::{EthChainSpec, EthereumHardforks, Hardforks};
|
||||
use reth_cli::chainspec::ChainSpecParser;
|
||||
use reth_cli_commands::{
|
||||
config_cmd, db, dump_genesis, init_cmd,
|
||||
node::{self, NoArgs},
|
||||
p2p, prune, re_execute, stage,
|
||||
};
|
||||
use std::{fmt, sync::Arc};
|
||||
|
||||
pub mod import;
|
||||
pub mod import_receipts;
|
||||
pub mod init_state;
|
||||
|
||||
#[cfg(feature = "dev")]
|
||||
pub mod test_vectors;
|
||||
|
||||
/// Commands to be executed
|
||||
#[derive(Debug, Subcommand)]
|
||||
pub enum Commands<Spec: ChainSpecParser = OpChainSpecParser, Ext: clap::Args + fmt::Debug = NoArgs>
|
||||
{
|
||||
/// Start the node
|
||||
#[command(name = "node")]
|
||||
Node(Box<node::NodeCommand<Spec, Ext>>),
|
||||
/// Initialize the database from a genesis file.
|
||||
#[command(name = "init")]
|
||||
Init(init_cmd::InitCommand<Spec>),
|
||||
/// Initialize the database from a state dump file.
|
||||
#[command(name = "init-state")]
|
||||
InitState(init_state::InitStateCommandOp<Spec>),
|
||||
/// This syncs RLP encoded OP blocks below Bedrock from a file, without executing.
|
||||
#[command(name = "import-op")]
|
||||
ImportOp(ImportOpCommand<Spec>),
|
||||
/// This imports RLP encoded receipts from a file.
|
||||
#[command(name = "import-receipts-op")]
|
||||
ImportReceiptsOp(ImportReceiptsOpCommand<Spec>),
|
||||
/// Dumps genesis block JSON configuration to stdout.
|
||||
DumpGenesis(dump_genesis::DumpGenesisCommand<Spec>),
|
||||
/// Database debugging utilities
|
||||
#[command(name = "db")]
|
||||
Db(db::Command<Spec>),
|
||||
/// Manipulate individual stages.
|
||||
#[command(name = "stage")]
|
||||
Stage(Box<stage::Command<Spec>>),
|
||||
/// P2P Debugging utilities
|
||||
#[command(name = "p2p")]
|
||||
P2P(Box<p2p::Command<Spec>>),
|
||||
/// Write config to stdout
|
||||
#[command(name = "config")]
|
||||
Config(config_cmd::Command),
|
||||
/// Prune according to the configuration without any limits
|
||||
#[command(name = "prune")]
|
||||
Prune(prune::PruneCommand<Spec>),
|
||||
/// Generate Test Vectors
|
||||
#[cfg(feature = "dev")]
|
||||
#[command(name = "test-vectors")]
|
||||
TestVectors(test_vectors::Command),
|
||||
/// Re-execute blocks in parallel to verify historical sync correctness.
|
||||
#[command(name = "re-execute")]
|
||||
ReExecute(re_execute::Command<Spec>),
|
||||
}
|
||||
|
||||
impl<
|
||||
C: ChainSpecParser<ChainSpec: EthChainSpec + Hardforks + EthereumHardforks>,
|
||||
Ext: clap::Args + fmt::Debug,
|
||||
> Commands<C, Ext>
|
||||
{
|
||||
/// Returns the underlying chain being used for commands
|
||||
pub fn chain_spec(&self) -> Option<&Arc<C::ChainSpec>> {
|
||||
match self {
|
||||
Self::Node(cmd) => cmd.chain_spec(),
|
||||
Self::Init(cmd) => cmd.chain_spec(),
|
||||
Self::InitState(cmd) => cmd.chain_spec(),
|
||||
Self::DumpGenesis(cmd) => cmd.chain_spec(),
|
||||
Self::Db(cmd) => cmd.chain_spec(),
|
||||
Self::Stage(cmd) => cmd.chain_spec(),
|
||||
Self::P2P(cmd) => cmd.chain_spec(),
|
||||
Self::Config(_) => None,
|
||||
Self::Prune(cmd) => cmd.chain_spec(),
|
||||
Self::ImportOp(cmd) => cmd.chain_spec(),
|
||||
Self::ImportReceiptsOp(cmd) => cmd.chain_spec(),
|
||||
#[cfg(feature = "dev")]
|
||||
Self::TestVectors(_) => None,
|
||||
Self::ReExecute(cmd) => cmd.chain_spec(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,78 +0,0 @@
|
||||
//! Command for generating test vectors.
|
||||
|
||||
use clap::{Parser, Subcommand};
|
||||
use op_alloy_consensus::TxDeposit;
|
||||
use proptest::test_runner::TestRunner;
|
||||
use reth_chainspec::ChainSpec;
|
||||
use reth_cli_commands::{
|
||||
compact_types,
|
||||
test_vectors::{
|
||||
compact,
|
||||
compact::{
|
||||
generate_vector, read_vector, GENERATE_VECTORS as ETH_GENERATE_VECTORS,
|
||||
READ_VECTORS as ETH_READ_VECTORS,
|
||||
},
|
||||
tables,
|
||||
},
|
||||
};
|
||||
use std::sync::Arc;
|
||||
|
||||
/// Generate test-vectors for different data types.
|
||||
#[derive(Debug, Parser)]
|
||||
pub struct Command {
|
||||
#[command(subcommand)]
|
||||
command: Subcommands,
|
||||
}
|
||||
|
||||
#[derive(Subcommand, Debug)]
|
||||
/// `reth test-vectors` subcommands
|
||||
pub enum Subcommands {
|
||||
/// Generates test vectors for specified tables. If no table is specified, generate for all.
|
||||
Tables {
|
||||
/// List of table names. Case-sensitive.
|
||||
names: Vec<String>,
|
||||
},
|
||||
/// Generates test vectors for `Compact` types with `--write`. Reads and checks generated
|
||||
/// vectors with `--read`.
|
||||
#[group(multiple = false, required = true)]
|
||||
Compact {
|
||||
/// Write test vectors to a file.
|
||||
#[arg(long)]
|
||||
write: bool,
|
||||
|
||||
/// Read test vectors from a file.
|
||||
#[arg(long)]
|
||||
read: bool,
|
||||
},
|
||||
}
|
||||
|
||||
impl Command {
|
||||
/// Execute the command
|
||||
pub async fn execute(self) -> eyre::Result<()> {
|
||||
match self.command {
|
||||
Subcommands::Tables { names } => {
|
||||
tables::generate_vectors(names)?;
|
||||
}
|
||||
Subcommands::Compact { write, .. } => {
|
||||
compact_types!(
|
||||
regular: [
|
||||
TxDeposit
|
||||
], identifier: []
|
||||
);
|
||||
|
||||
if write {
|
||||
compact::generate_vectors_with(ETH_GENERATE_VECTORS)?;
|
||||
compact::generate_vectors_with(GENERATE_VECTORS)?;
|
||||
} else {
|
||||
compact::read_vectors_with(ETH_READ_VECTORS)?;
|
||||
compact::read_vectors_with(READ_VECTORS)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
/// Returns the underlying chain being used to run this command
|
||||
pub const fn chain_spec(&self) -> Option<&Arc<ChainSpec>> {
|
||||
None
|
||||
}
|
||||
}
|
||||
@@ -1,215 +0,0 @@
|
||||
//! OP-Reth CLI implementation.
|
||||
|
||||
#![doc(
|
||||
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
|
||||
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
|
||||
issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/"
|
||||
)]
|
||||
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
|
||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||
|
||||
/// A configurable App on top of the cli parser.
|
||||
pub mod app;
|
||||
/// Optimism chain specification parser.
|
||||
pub mod chainspec;
|
||||
/// Optimism CLI commands.
|
||||
pub mod commands;
|
||||
/// Module with a codec for reading and encoding receipts in files.
|
||||
///
|
||||
/// Enables decoding and encoding `OpGethReceipt` type. See <https://github.com/testinprod-io/op-geth/pull/1>.
|
||||
///
|
||||
/// Currently configured to use codec [`OpGethReceipt`](receipt_file_codec::OpGethReceipt) based on
|
||||
/// export of below Bedrock data using <https://github.com/testinprod-io/op-geth/pull/1>. Codec can
|
||||
/// be replaced with regular encoding of receipts for export.
|
||||
///
|
||||
/// NOTE: receipts can be exported using regular op-geth encoding for `Receipt` type, to fit
|
||||
/// reth's needs for importing. However, this would require patching the diff in <https://github.com/testinprod-io/op-geth/pull/1> to export the `Receipt` and not `OpGethReceipt` type (originally
|
||||
/// made for op-erigon's import needs).
|
||||
pub mod receipt_file_codec;
|
||||
|
||||
/// OVM block, same as EVM block at bedrock, except for signature of deposit transaction
|
||||
/// not having a signature back then.
|
||||
/// Enables decoding and encoding `Block` types within file contexts.
|
||||
pub mod ovm_file_codec;
|
||||
|
||||
pub use app::CliApp;
|
||||
pub use commands::{import::ImportOpCommand, import_receipts::ImportReceiptsOpCommand};
|
||||
use reth_optimism_chainspec::OpChainSpec;
|
||||
use reth_rpc_server_types::{DefaultRpcModuleValidator, RpcModuleValidator};
|
||||
|
||||
use std::{ffi::OsString, fmt, marker::PhantomData};
|
||||
|
||||
use chainspec::OpChainSpecParser;
|
||||
use clap::Parser;
|
||||
use commands::Commands;
|
||||
use futures_util::Future;
|
||||
use reth_cli::chainspec::ChainSpecParser;
|
||||
use reth_cli_commands::launcher::FnLauncher;
|
||||
use reth_cli_runner::CliRunner;
|
||||
use reth_db::DatabaseEnv;
|
||||
use reth_node_builder::{NodeBuilder, WithLaunchContext};
|
||||
use reth_node_core::{
|
||||
args::{LogArgs, TraceArgs},
|
||||
version::version_metadata,
|
||||
};
|
||||
use reth_optimism_node::args::RollupArgs;
|
||||
|
||||
// This allows us to manually enable node metrics features, required for proper jemalloc metric
|
||||
// reporting
|
||||
use reth_node_metrics as _;
|
||||
|
||||
/// The main op-reth cli interface.
|
||||
///
|
||||
/// This is the entrypoint to the executable.
|
||||
#[derive(Debug, Parser)]
|
||||
#[command(author, name = version_metadata().name_client.as_ref(), version = version_metadata().short_version.as_ref(), long_version = version_metadata().long_version.as_ref(), about = "Reth", long_about = None)]
|
||||
pub struct Cli<
|
||||
Spec: ChainSpecParser = OpChainSpecParser,
|
||||
Ext: clap::Args + fmt::Debug = RollupArgs,
|
||||
Rpc: RpcModuleValidator = DefaultRpcModuleValidator,
|
||||
> {
|
||||
/// The command to run
|
||||
#[command(subcommand)]
|
||||
pub command: Commands<Spec, Ext>,
|
||||
|
||||
/// The logging configuration for the CLI.
|
||||
#[command(flatten)]
|
||||
pub logs: LogArgs,
|
||||
|
||||
/// The metrics configuration for the CLI.
|
||||
#[command(flatten)]
|
||||
pub traces: TraceArgs,
|
||||
|
||||
/// Type marker for the RPC module validator
|
||||
#[arg(skip)]
|
||||
_phantom: PhantomData<Rpc>,
|
||||
}
|
||||
|
||||
impl Cli {
|
||||
/// Parsers only the default CLI arguments
|
||||
pub fn parse_args() -> Self {
|
||||
Self::parse()
|
||||
}
|
||||
|
||||
/// Parsers only the default CLI arguments from the given iterator
|
||||
pub fn try_parse_args_from<I, T>(itr: I) -> Result<Self, clap::error::Error>
|
||||
where
|
||||
I: IntoIterator<Item = T>,
|
||||
T: Into<OsString> + Clone,
|
||||
{
|
||||
Self::try_parse_from(itr)
|
||||
}
|
||||
}
|
||||
|
||||
impl<C, Ext, Rpc> Cli<C, Ext, Rpc>
|
||||
where
|
||||
C: ChainSpecParser<ChainSpec = OpChainSpec>,
|
||||
Ext: clap::Args + fmt::Debug,
|
||||
Rpc: RpcModuleValidator,
|
||||
{
|
||||
/// Configures the CLI and returns a [`CliApp`] instance.
|
||||
///
|
||||
/// This method is used to prepare the CLI for execution by wrapping it in a
|
||||
/// [`CliApp`] that can be further configured before running.
|
||||
pub fn configure(self) -> CliApp<C, Ext, Rpc> {
|
||||
CliApp::new(self)
|
||||
}
|
||||
|
||||
/// Execute the configured cli command.
|
||||
///
|
||||
/// This accepts a closure that is used to launch the node via the
|
||||
/// [`NodeCommand`](reth_cli_commands::node::NodeCommand).
|
||||
pub fn run<L, Fut>(self, launcher: L) -> eyre::Result<()>
|
||||
where
|
||||
L: FnOnce(WithLaunchContext<NodeBuilder<DatabaseEnv, C::ChainSpec>>, Ext) -> Fut,
|
||||
Fut: Future<Output = eyre::Result<()>>,
|
||||
{
|
||||
self.with_runner(CliRunner::try_default_runtime()?, launcher)
|
||||
}
|
||||
|
||||
/// Execute the configured cli command with the provided [`CliRunner`].
|
||||
pub fn with_runner<L, Fut>(self, runner: CliRunner, launcher: L) -> eyre::Result<()>
|
||||
where
|
||||
L: FnOnce(WithLaunchContext<NodeBuilder<DatabaseEnv, C::ChainSpec>>, Ext) -> Fut,
|
||||
Fut: Future<Output = eyre::Result<()>>,
|
||||
{
|
||||
let mut this = self.configure();
|
||||
this.set_runner(runner);
|
||||
this.run(FnLauncher::new::<C, Ext>(async move |builder, chain_spec| {
|
||||
launcher(builder, chain_spec).await
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use crate::{chainspec::OpChainSpecParser, commands::Commands, Cli};
|
||||
use clap::Parser;
|
||||
use reth_cli_commands::{node::NoArgs, NodeCommand};
|
||||
use reth_optimism_chainspec::{BASE_MAINNET, OP_DEV};
|
||||
use reth_optimism_node::args::RollupArgs;
|
||||
|
||||
#[test]
|
||||
fn parse_dev() {
|
||||
let cmd = NodeCommand::<OpChainSpecParser, NoArgs>::parse_from(["op-reth", "--dev"]);
|
||||
let chain = OP_DEV.clone();
|
||||
assert_eq!(cmd.chain.chain, chain.chain);
|
||||
assert_eq!(cmd.chain.genesis_hash(), chain.genesis_hash());
|
||||
assert_eq!(
|
||||
cmd.chain.paris_block_and_final_difficulty,
|
||||
chain.paris_block_and_final_difficulty
|
||||
);
|
||||
assert_eq!(cmd.chain.hardforks, chain.hardforks);
|
||||
|
||||
assert!(cmd.rpc.http);
|
||||
assert!(cmd.network.discovery.disable_discovery);
|
||||
|
||||
assert!(cmd.dev.dev);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_node() {
|
||||
let cmd = Cli::<OpChainSpecParser, RollupArgs>::parse_from([
|
||||
"op-reth",
|
||||
"node",
|
||||
"--chain",
|
||||
"base",
|
||||
"--datadir",
|
||||
"/mnt/datadirs/base",
|
||||
"--instance",
|
||||
"2",
|
||||
"--http",
|
||||
"--http.addr",
|
||||
"0.0.0.0",
|
||||
"--ws",
|
||||
"--ws.addr",
|
||||
"0.0.0.0",
|
||||
"--http.api",
|
||||
"admin,debug,eth,net,trace,txpool,web3,rpc,reth,ots",
|
||||
"--rollup.sequencer-http",
|
||||
"https://mainnet-sequencer.base.org",
|
||||
"--rpc-max-tracing-requests",
|
||||
"1000000",
|
||||
"--rpc.gascap",
|
||||
"18446744073709551615",
|
||||
"--rpc.max-connections",
|
||||
"429496729",
|
||||
"--rpc.max-logs-per-response",
|
||||
"0",
|
||||
"--rpc.max-subscriptions-per-connection",
|
||||
"10000",
|
||||
"--metrics",
|
||||
"9003",
|
||||
"--tracing-otlp=http://localhost:4318/v1/traces",
|
||||
"--log.file.max-size",
|
||||
"100",
|
||||
]);
|
||||
|
||||
match cmd.command {
|
||||
Commands::Node(command) => {
|
||||
assert_eq!(command.chain.as_ref(), BASE_MAINNET.as_ref());
|
||||
}
|
||||
_ => panic!("unexpected command"),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,383 +0,0 @@
|
||||
use alloy_consensus::{
|
||||
transaction::{from_eip155_value, RlpEcdsaDecodableTx, RlpEcdsaEncodableTx},
|
||||
Header, TxEip1559, TxEip2930, TxEip7702, TxLegacy,
|
||||
};
|
||||
use alloy_eips::{
|
||||
eip2718::{Decodable2718, Eip2718Error, Eip2718Result, Encodable2718},
|
||||
eip4895::Withdrawals,
|
||||
Typed2718,
|
||||
};
|
||||
use alloy_primitives::{
|
||||
bytes::{Buf, BytesMut},
|
||||
keccak256, Signature, TxHash, B256, U256,
|
||||
};
|
||||
use alloy_rlp::{Decodable, Error as RlpError, RlpDecodable};
|
||||
use derive_more::{AsRef, Deref};
|
||||
use op_alloy_consensus::{OpTxType, OpTypedTransaction, TxDeposit};
|
||||
use reth_downloaders::file_client::FileClientError;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tokio_util::codec::Decoder;
|
||||
|
||||
#[expect(dead_code)]
|
||||
/// Specific codec for reading raw block bodies from a file
|
||||
/// with optimism-specific signature handling
|
||||
pub(crate) struct OvmBlockFileCodec;
|
||||
|
||||
impl Decoder for OvmBlockFileCodec {
|
||||
type Item = OvmBlock;
|
||||
type Error = FileClientError;
|
||||
|
||||
fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
|
||||
if src.is_empty() {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
let buf_slice = &mut src.as_ref();
|
||||
let body =
|
||||
OvmBlock::decode(buf_slice).map_err(|err| FileClientError::Rlp(err, src.to_vec()))?;
|
||||
src.advance(src.len() - buf_slice.len());
|
||||
|
||||
Ok(Some(body))
|
||||
}
|
||||
}
|
||||
|
||||
/// OVM block, same as EVM block but with different transaction signature handling
|
||||
/// Pre-bedrock system transactions on Optimism were sent from the zero address
|
||||
/// with an empty signature,
|
||||
#[derive(Debug, Clone, PartialEq, Eq, RlpDecodable)]
|
||||
pub struct OvmBlock {
|
||||
/// Block header
|
||||
pub header: Header,
|
||||
/// Block body
|
||||
pub body: OvmBlockBody,
|
||||
}
|
||||
|
||||
impl OvmBlock {
|
||||
/// Decodes a `Block` from the given byte slice.
|
||||
pub fn decode(buf: &mut &[u8]) -> alloy_rlp::Result<Self> {
|
||||
let header = Header::decode(buf)?;
|
||||
let body = OvmBlockBody::decode(buf)?;
|
||||
Ok(Self { header, body })
|
||||
}
|
||||
}
|
||||
|
||||
/// The body of a block for OVM
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Default, RlpDecodable)]
|
||||
#[rlp(trailing)]
|
||||
pub struct OvmBlockBody {
|
||||
/// Transactions in the block
|
||||
pub transactions: Vec<OvmTransactionSigned>,
|
||||
/// Uncle headers for the given block
|
||||
pub ommers: Vec<Header>,
|
||||
/// Withdrawals in the block.
|
||||
pub withdrawals: Option<Withdrawals>,
|
||||
}
|
||||
|
||||
/// Signed transaction pre bedrock.
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Hash, AsRef, Deref, Serialize, Deserialize)]
|
||||
pub struct OvmTransactionSigned {
|
||||
/// Transaction hash
|
||||
pub hash: TxHash,
|
||||
/// The transaction signature values
|
||||
pub signature: Signature,
|
||||
/// Raw transaction info
|
||||
#[deref]
|
||||
#[as_ref]
|
||||
pub transaction: OpTypedTransaction,
|
||||
}
|
||||
|
||||
impl AsRef<Self> for OvmTransactionSigned {
|
||||
fn as_ref(&self) -> &Self {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl OvmTransactionSigned {
|
||||
/// Calculate transaction hash, eip2728 transaction does not contain rlp header and start with
|
||||
/// tx type.
|
||||
pub fn recalculate_hash(&self) -> B256 {
|
||||
keccak256(self.encoded_2718())
|
||||
}
|
||||
|
||||
/// Create a new signed transaction from a transaction and its signature.
|
||||
///
|
||||
/// This will also calculate the transaction hash using its encoding.
|
||||
pub fn from_transaction_and_signature(
|
||||
transaction: OpTypedTransaction,
|
||||
signature: Signature,
|
||||
) -> Self {
|
||||
let mut initial_tx = Self { transaction, hash: Default::default(), signature };
|
||||
initial_tx.hash = initial_tx.recalculate_hash();
|
||||
initial_tx
|
||||
}
|
||||
|
||||
/// Decodes legacy transaction from the data buffer into a tuple.
|
||||
///
|
||||
/// This expects `rlp(legacy_tx)`
|
||||
///
|
||||
/// Refer to the docs for [`Self::decode_rlp_legacy_transaction`] for details on the exact
|
||||
/// format expected.
|
||||
pub(crate) fn decode_rlp_legacy_transaction_tuple(
|
||||
data: &mut &[u8],
|
||||
) -> alloy_rlp::Result<(TxLegacy, TxHash, Signature)> {
|
||||
let original_encoding = *data;
|
||||
|
||||
let header = alloy_rlp::Header::decode(data)?;
|
||||
let remaining_len = data.len();
|
||||
|
||||
let transaction_payload_len = header.payload_length;
|
||||
|
||||
if transaction_payload_len > remaining_len {
|
||||
return Err(RlpError::InputTooShort);
|
||||
}
|
||||
|
||||
let mut transaction = TxLegacy {
|
||||
nonce: Decodable::decode(data)?,
|
||||
gas_price: Decodable::decode(data)?,
|
||||
gas_limit: Decodable::decode(data)?,
|
||||
to: Decodable::decode(data)?,
|
||||
value: Decodable::decode(data)?,
|
||||
input: Decodable::decode(data)?,
|
||||
chain_id: None,
|
||||
};
|
||||
|
||||
let v = Decodable::decode(data)?;
|
||||
let r: U256 = Decodable::decode(data)?;
|
||||
let s: U256 = Decodable::decode(data)?;
|
||||
|
||||
let tx_length = header.payload_length + header.length();
|
||||
let hash = keccak256(&original_encoding[..tx_length]);
|
||||
|
||||
// Handle both pre-bedrock and regular cases
|
||||
let (signature, chain_id) = if v == 0 && r.is_zero() && s.is_zero() {
|
||||
// Pre-bedrock system transactions case
|
||||
(Signature::new(r, s, false), None)
|
||||
} else {
|
||||
// Regular transaction case
|
||||
let (parity, chain_id) = from_eip155_value(v)
|
||||
.ok_or(alloy_rlp::Error::Custom("invalid parity for legacy transaction"))?;
|
||||
(Signature::new(r, s, parity), chain_id)
|
||||
};
|
||||
|
||||
// Set chain ID and verify length
|
||||
transaction.chain_id = chain_id;
|
||||
let decoded = remaining_len - data.len();
|
||||
if decoded != transaction_payload_len {
|
||||
return Err(RlpError::UnexpectedLength);
|
||||
}
|
||||
|
||||
Ok((transaction, hash, signature))
|
||||
}
|
||||
|
||||
/// Decodes legacy transaction from the data buffer.
|
||||
///
|
||||
/// This should be used _only_ be used in general transaction decoding methods, which have
|
||||
/// already ensured that the input is a legacy transaction with the following format:
|
||||
/// `rlp(legacy_tx)`
|
||||
///
|
||||
/// Legacy transactions are encoded as lists, so the input should start with a RLP list header.
|
||||
///
|
||||
/// This expects `rlp(legacy_tx)`
|
||||
// TODO: make buf advancement semantics consistent with `decode_enveloped_typed_transaction`,
|
||||
// so decoding methods do not need to manually advance the buffer
|
||||
pub fn decode_rlp_legacy_transaction(data: &mut &[u8]) -> alloy_rlp::Result<Self> {
|
||||
let (transaction, hash, signature) = Self::decode_rlp_legacy_transaction_tuple(data)?;
|
||||
let signed = Self { transaction: OpTypedTransaction::Legacy(transaction), hash, signature };
|
||||
Ok(signed)
|
||||
}
|
||||
}
|
||||
|
||||
impl Decodable for OvmTransactionSigned {
|
||||
/// This `Decodable` implementation only supports decoding rlp encoded transactions as it's used
|
||||
/// by p2p.
|
||||
///
|
||||
/// The p2p encoding format always includes an RLP header, although the type RLP header depends
|
||||
/// on whether or not the transaction is a legacy transaction.
|
||||
///
|
||||
/// If the transaction is a legacy transaction, it is just encoded as a RLP list:
|
||||
/// `rlp(tx-data)`.
|
||||
///
|
||||
/// If the transaction is a typed transaction, it is encoded as a RLP string:
|
||||
/// `rlp(tx-type || rlp(tx-data))`
|
||||
///
|
||||
/// This can be used for decoding all signed transactions in p2p `BlockBodies` responses.
|
||||
///
|
||||
/// This cannot be used for decoding EIP-4844 transactions in p2p `PooledTransactions`, since
|
||||
/// the EIP-4844 variant of [`OvmTransactionSigned`] does not include the blob sidecar.
|
||||
///
|
||||
/// For a method suitable for decoding pooled transactions, see \[`PooledTransaction`\].
|
||||
///
|
||||
/// CAUTION: Due to a quirk in [`Header::decode`], this method will succeed even if a typed
|
||||
/// transaction is encoded in this format, and does not start with a RLP header:
|
||||
/// `tx-type || rlp(tx-data)`.
|
||||
///
|
||||
/// This is because [`Header::decode`] does not advance the buffer, and returns a length-1
|
||||
/// string header if the first byte is less than `0xf7`.
|
||||
fn decode(buf: &mut &[u8]) -> alloy_rlp::Result<Self> {
|
||||
Self::network_decode(buf).map_err(Into::into)
|
||||
}
|
||||
}
|
||||
|
||||
impl Typed2718 for OvmTransactionSigned {
|
||||
fn ty(&self) -> u8 {
|
||||
self.transaction.tx_type() as u8
|
||||
}
|
||||
}
|
||||
|
||||
impl Encodable2718 for OvmTransactionSigned {
|
||||
fn type_flag(&self) -> Option<u8> {
|
||||
match self.transaction.tx_type() {
|
||||
OpTxType::Legacy => None,
|
||||
tx_type => Some(tx_type as u8),
|
||||
}
|
||||
}
|
||||
|
||||
fn encode_2718_len(&self) -> usize {
|
||||
match &self.transaction {
|
||||
OpTypedTransaction::Legacy(legacy_tx) => {
|
||||
legacy_tx.eip2718_encoded_length(&self.signature)
|
||||
}
|
||||
OpTypedTransaction::Eip2930(access_list_tx) => {
|
||||
access_list_tx.eip2718_encoded_length(&self.signature)
|
||||
}
|
||||
OpTypedTransaction::Eip1559(dynamic_fee_tx) => {
|
||||
dynamic_fee_tx.eip2718_encoded_length(&self.signature)
|
||||
}
|
||||
OpTypedTransaction::Eip7702(set_code_tx) => {
|
||||
set_code_tx.eip2718_encoded_length(&self.signature)
|
||||
}
|
||||
OpTypedTransaction::Deposit(deposit_tx) => deposit_tx.eip2718_encoded_length(),
|
||||
}
|
||||
}
|
||||
|
||||
fn encode_2718(&self, out: &mut dyn alloy_rlp::BufMut) {
|
||||
self.transaction.eip2718_encode(&self.signature, out)
|
||||
}
|
||||
}
|
||||
|
||||
impl Decodable2718 for OvmTransactionSigned {
|
||||
fn typed_decode(ty: u8, buf: &mut &[u8]) -> Eip2718Result<Self> {
|
||||
match ty.try_into().map_err(|_| Eip2718Error::UnexpectedType(ty))? {
|
||||
OpTxType::Legacy => Err(Eip2718Error::UnexpectedType(0)),
|
||||
OpTxType::Eip2930 => {
|
||||
let (tx, signature, hash) = TxEip2930::rlp_decode_signed(buf)?.into_parts();
|
||||
Ok(Self { transaction: OpTypedTransaction::Eip2930(tx), signature, hash })
|
||||
}
|
||||
OpTxType::Eip1559 => {
|
||||
let (tx, signature, hash) = TxEip1559::rlp_decode_signed(buf)?.into_parts();
|
||||
Ok(Self { transaction: OpTypedTransaction::Eip1559(tx), signature, hash })
|
||||
}
|
||||
OpTxType::Eip7702 => {
|
||||
let (tx, signature, hash) = TxEip7702::rlp_decode_signed(buf)?.into_parts();
|
||||
Ok(Self { transaction: OpTypedTransaction::Eip7702(tx), signature, hash })
|
||||
}
|
||||
OpTxType::Deposit => Ok(Self::from_transaction_and_signature(
|
||||
OpTypedTransaction::Deposit(TxDeposit::rlp_decode(buf)?),
|
||||
TxDeposit::signature(),
|
||||
)),
|
||||
}
|
||||
}
|
||||
|
||||
fn fallback_decode(buf: &mut &[u8]) -> Eip2718Result<Self> {
|
||||
Ok(Self::decode_rlp_legacy_transaction(buf)?)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::ovm_file_codec::OvmTransactionSigned;
|
||||
use alloy_consensus::Typed2718;
|
||||
use alloy_primitives::{address, b256, hex, TxKind, U256};
|
||||
use op_alloy_consensus::OpTypedTransaction;
|
||||
const DEPOSIT_FUNCTION_SELECTOR: [u8; 4] = [0xb6, 0xb5, 0x5f, 0x25];
|
||||
use alloy_rlp::Decodable;
|
||||
|
||||
#[test]
|
||||
fn test_decode_legacy_transactions() {
|
||||
// Test Case 1: contract deposit - regular L2 transaction calling deposit() function
|
||||
// tx: https://optimistic.etherscan.io/getRawTx?tx=0x7860252963a2df21113344f323035ef59648638a571eef742e33d789602c7a1c
|
||||
let deposit_tx_bytes = hex!(
|
||||
"f88881f0830f481c830c6e4594a75127121d28a9bf848f3b70e7eea26570aa770080a4b6b55f2500000000000000000000000000000000000000000000000000000000000710b238a0d5c622d92ddf37f9c18a3465a572f74d8b1aeaf50c1cfb10b3833242781fd45fa02c4f1d5819bf8b70bf651e7a063b7db63c55bd336799c6ae3e5bc72ad6ef3def"
|
||||
);
|
||||
let deposit_decoded = OvmTransactionSigned::decode(&mut &deposit_tx_bytes[..]).unwrap();
|
||||
|
||||
// Verify deposit transaction
|
||||
let deposit_tx = match &deposit_decoded.transaction {
|
||||
OpTypedTransaction::Legacy(tx) => tx,
|
||||
_ => panic!("Expected legacy transaction for NFT deposit"),
|
||||
};
|
||||
|
||||
assert_eq!(
|
||||
deposit_tx.to,
|
||||
TxKind::Call(address!("0xa75127121d28a9bf848f3b70e7eea26570aa7700"))
|
||||
);
|
||||
assert_eq!(deposit_tx.nonce, 240);
|
||||
assert_eq!(deposit_tx.gas_price, 1001500);
|
||||
assert_eq!(deposit_tx.gas_limit, 814661);
|
||||
assert_eq!(deposit_tx.value, U256::ZERO);
|
||||
assert_eq!(&deposit_tx.input.as_ref()[0..4], DEPOSIT_FUNCTION_SELECTOR);
|
||||
assert_eq!(deposit_tx.chain_id, Some(10));
|
||||
assert_eq!(
|
||||
deposit_decoded.signature.r(),
|
||||
U256::from_str_radix(
|
||||
"d5c622d92ddf37f9c18a3465a572f74d8b1aeaf50c1cfb10b3833242781fd45f",
|
||||
16
|
||||
)
|
||||
.unwrap()
|
||||
);
|
||||
assert_eq!(
|
||||
deposit_decoded.signature.s(),
|
||||
U256::from_str_radix(
|
||||
"2c4f1d5819bf8b70bf651e7a063b7db63c55bd336799c6ae3e5bc72ad6ef3def",
|
||||
16
|
||||
)
|
||||
.unwrap()
|
||||
);
|
||||
|
||||
// Test Case 2: pre-bedrock system transaction from block 105235052
|
||||
// tx: https://optimistic.etherscan.io/getRawTx?tx=0xe20b11349681dd049f8df32f5cdbb4c68d46b537685defcd86c7fa42cfe75b9e
|
||||
let system_tx_bytes = hex!(
|
||||
"f9026c830d899383124f808302a77e94a0cc33dd6f4819d473226257792afe230ec3c67f80b902046c459a280000000000000000000000004d73adb72bc3dd368966edd0f0b2148401a178e2000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000647fac7f00000000000000000000000000000000000000000000000000000000000001400000000000000000000000000000000000000000000000000000000000000084704316e5000000000000000000000000000000000000000000000000000000000000006e10975631049de3c008989b0d8c19fc720dc556ca01abfbd794c6eb5075dd000d000000000000000000000000000000000000000000000000000000000000001410975631049de3c008989b0d8c19fc720dc556ca01abfbd794c6eb5075dd000d000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000082a39325251d44e11f3b6d92f9382438eb6c8b5068d4a488d4f177b26f2ca20db34ae53467322852afcc779f25eafd124c5586f54b9026497ba934403d4c578e3c1b5aa754c918ee2ecd25402df656c2419717e4017a7aecb84af3914fd3c7bf6930369c4e6ff76950246b98e354821775f02d33cdbee5ef6aed06c15b75691692d31c00000000000000000000000000000000000000000000000000000000000038a0e8991e95e66d809f4b6fb0af27c31368ca0f30e657165c428aa681ec5ea25bbea013ed325bd97365087ec713e9817d252b59113ea18430b71a5890c4eeb6b9efc4"
|
||||
);
|
||||
let system_decoded = OvmTransactionSigned::decode(&mut &system_tx_bytes[..]).unwrap();
|
||||
|
||||
// Verify system transaction
|
||||
assert!(system_decoded.is_legacy());
|
||||
|
||||
let system_tx = match &system_decoded.transaction {
|
||||
OpTypedTransaction::Legacy(tx) => tx,
|
||||
_ => panic!("Expected Legacy transaction"),
|
||||
};
|
||||
|
||||
assert_eq!(system_tx.nonce, 887187);
|
||||
assert_eq!(system_tx.gas_price, 1200000);
|
||||
assert_eq!(system_tx.gas_limit, 173950);
|
||||
assert_eq!(
|
||||
system_tx.to,
|
||||
TxKind::Call(address!("0xa0cc33dd6f4819d473226257792afe230ec3c67f"))
|
||||
);
|
||||
assert_eq!(system_tx.value, U256::ZERO);
|
||||
assert_eq!(system_tx.chain_id, Some(10));
|
||||
|
||||
assert_eq!(
|
||||
system_decoded.signature.r(),
|
||||
U256::from_str_radix(
|
||||
"e8991e95e66d809f4b6fb0af27c31368ca0f30e657165c428aa681ec5ea25bbe",
|
||||
16
|
||||
)
|
||||
.unwrap()
|
||||
);
|
||||
assert_eq!(
|
||||
system_decoded.signature.s(),
|
||||
U256::from_str_radix(
|
||||
"13ed325bd97365087ec713e9817d252b59113ea18430b71a5890c4eeb6b9efc4",
|
||||
16
|
||||
)
|
||||
.unwrap()
|
||||
);
|
||||
assert_eq!(
|
||||
system_decoded.hash,
|
||||
b256!("0xe20b11349681dd049f8df32f5cdbb4c68d46b537685defcd86c7fa42cfe75b9e")
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -1,324 +0,0 @@
|
||||
//! Codec for reading raw receipts from a file.
|
||||
|
||||
use alloy_consensus::Receipt;
|
||||
use alloy_primitives::{
|
||||
bytes::{Buf, BytesMut},
|
||||
Address, Bloom, Bytes, Log, B256,
|
||||
};
|
||||
use alloy_rlp::{Decodable, RlpDecodable};
|
||||
use op_alloy_consensus::{OpDepositReceipt, OpTxType};
|
||||
use reth_optimism_primitives::OpReceipt;
|
||||
use tokio_util::codec::Decoder;
|
||||
|
||||
use reth_downloaders::{file_client::FileClientError, receipt_file_client::ReceiptWithBlockNumber};
|
||||
|
||||
/// Codec for reading raw receipts from a file.
|
||||
///
|
||||
/// If using with [`FramedRead`](tokio_util::codec::FramedRead), the user should make sure the
|
||||
/// framed reader has capacity for the entire receipts file. Otherwise, the decoder will return
|
||||
/// [`InputTooShort`](alloy_rlp::Error::InputTooShort), because RLP receipts can only be
|
||||
/// decoded if the internal buffer is large enough to contain the entire receipt.
|
||||
///
|
||||
/// Without ensuring the framed reader has capacity for the entire file, a receipt is likely to
|
||||
/// fall across two read buffers, the decoder will not be able to decode the receipt, which will
|
||||
/// cause it to fail.
|
||||
///
|
||||
/// It's recommended to use [`with_capacity`](tokio_util::codec::FramedRead::with_capacity) to set
|
||||
/// the capacity of the framed reader to the size of the file.
|
||||
#[derive(Debug)]
|
||||
pub struct OpGethReceiptFileCodec<R = Receipt>(core::marker::PhantomData<R>);
|
||||
|
||||
impl<R> Default for OpGethReceiptFileCodec<R> {
|
||||
fn default() -> Self {
|
||||
Self(Default::default())
|
||||
}
|
||||
}
|
||||
|
||||
impl<R> Decoder for OpGethReceiptFileCodec<R>
|
||||
where
|
||||
R: TryFrom<OpGethReceipt, Error: Into<FileClientError>>,
|
||||
{
|
||||
type Item = Option<ReceiptWithBlockNumber<R>>;
|
||||
type Error = FileClientError;
|
||||
|
||||
fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
|
||||
if src.is_empty() {
|
||||
return Ok(None)
|
||||
}
|
||||
|
||||
let buf_slice = &mut src.as_ref();
|
||||
let receipt = OpGethReceiptContainer::decode(buf_slice)
|
||||
.map_err(|err| Self::Error::Rlp(err, src.to_vec()))?
|
||||
.0;
|
||||
src.advance(src.len() - buf_slice.len());
|
||||
|
||||
Ok(Some(
|
||||
receipt
|
||||
.map(|receipt| {
|
||||
let number = receipt.block_number;
|
||||
receipt
|
||||
.try_into()
|
||||
.map_err(Into::into)
|
||||
.map(|receipt| ReceiptWithBlockNumber { receipt, number })
|
||||
})
|
||||
.transpose()?,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
/// See <https://github.com/testinprod-io/op-geth/pull/1>
|
||||
#[derive(Debug, PartialEq, Eq, RlpDecodable)]
|
||||
pub struct OpGethReceipt {
|
||||
tx_type: u8,
|
||||
post_state: Bytes,
|
||||
status: u64,
|
||||
cumulative_gas_used: u64,
|
||||
bloom: Bloom,
|
||||
/// <https://github.com/testinprod-io/op-geth/blob/29062eb0fac595eeeddd3a182a25326405c66e05/core/types/log.go#L67-L72>
|
||||
logs: Vec<Log>,
|
||||
tx_hash: B256,
|
||||
contract_address: Address,
|
||||
gas_used: u64,
|
||||
block_hash: B256,
|
||||
block_number: u64,
|
||||
transaction_index: u32,
|
||||
l1_gas_price: u64,
|
||||
l1_gas_used: u64,
|
||||
l1_fee: u64,
|
||||
fee_scalar: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, RlpDecodable)]
|
||||
#[rlp(trailing)]
|
||||
struct OpGethReceiptContainer(Option<OpGethReceipt>);
|
||||
|
||||
impl TryFrom<OpGethReceipt> for OpReceipt {
|
||||
type Error = FileClientError;
|
||||
|
||||
fn try_from(exported_receipt: OpGethReceipt) -> Result<Self, Self::Error> {
|
||||
let OpGethReceipt { tx_type, status, cumulative_gas_used, logs, .. } = exported_receipt;
|
||||
|
||||
let tx_type = OpTxType::try_from(tx_type.to_be_bytes()[0])
|
||||
.map_err(|e| FileClientError::Rlp(e.into(), vec![tx_type]))?;
|
||||
|
||||
let receipt =
|
||||
alloy_consensus::Receipt { status: (status != 0).into(), cumulative_gas_used, logs };
|
||||
|
||||
match tx_type {
|
||||
OpTxType::Legacy => Ok(Self::Legacy(receipt)),
|
||||
OpTxType::Eip2930 => Ok(Self::Eip2930(receipt)),
|
||||
OpTxType::Eip1559 => Ok(Self::Eip1559(receipt)),
|
||||
OpTxType::Eip7702 => Ok(Self::Eip7702(receipt)),
|
||||
OpTxType::Deposit => Ok(Self::Deposit(OpDepositReceipt {
|
||||
inner: receipt,
|
||||
deposit_nonce: None,
|
||||
deposit_receipt_version: None,
|
||||
})),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) mod test {
|
||||
use alloy_consensus::{Receipt, TxReceipt};
|
||||
use alloy_primitives::{address, b256, hex, LogData};
|
||||
|
||||
use super::*;
|
||||
|
||||
pub(crate) const HACK_RECEIPT_ENCODED_BLOCK_1: &[u8] = &hex!(
|
||||
"f9030ff9030c8080018303183db9010000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000400000000000100000000000000200000000002000000000000001000000000000000000004000000000000000000000000000040000400000100400000000000000100000000000000000000000000000020000000000000000000000000000000000000000000000001000000000000000000000100000000000000000000000000000000000000000000000000000000000000088000000080000000000010000000000000000000000000000800008000120000000000000000000000000000000002000f90197f89b948ce8c13d816fe6daf12d6fd9e4952e1fc88850aff863a00109fc6f55cf40689f02fbaad7af7fe7bbac8a3d2186600afc7d3e10cac60271a00000000000000000000000000000000000000000000000000000000000014218a000000000000000000000000070b17c0fe982ab4a7ac17a4c25485643151a1f2da000000000000000000000000000000000000000000000000000000000618d8837f89c948ce8c13d816fe6daf12d6fd9e4952e1fc88850aff884a092e98423f8adac6e64d0608e519fd1cefb861498385c6dee70d58fc926ddc68ca000000000000000000000000000000000000000000000000000000000d0e3ebf0a00000000000000000000000000000000000000000000000000000000000014218a000000000000000000000000070b17c0fe982ab4a7ac17a4c25485643151a1f2d80f85a948ce8c13d816fe6daf12d6fd9e4952e1fc88850aff842a0fe25c73e3b9089fac37d55c4c7efcba6f04af04cebd2fc4d6d7dbb07e1e5234fa000000000000000000000000000000000000000000000007edc6ca0bb6834800080a05e77a04531c7c107af1882d76cbff9486d0a9aa53701c30888509d4f5f2b003a9400000000000000000000000000000000000000008303183da0bee7192e575af30420cae0c7776304ac196077ee72b048970549e4f08e8754530180018212c2821c2383312e35"
|
||||
);
|
||||
|
||||
pub(crate) const HACK_RECEIPT_ENCODED_BLOCK_2: &[u8] = &hex!(
|
||||
"f90271f9026e8080018301c60db9010000080000000200000000000000000008000000000000000000000100008000000000000000000000000000000000000000000000000000000000400000000000100000000000000000000000020000000000000000000000000000000000004000000000000000000000000000000000400000000400000000000000100000000000000000000000000000020000000000000000000000000000000000000000100000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000008400000000000000000010000000000000000020000000020000000000000000000000000000000000000000000002000f8faf89c948ce8c13d816fe6daf12d6fd9e4952e1fc88850aff884a092e98423f8adac6e64d0608e519fd1cefb861498385c6dee70d58fc926ddc68ca000000000000000000000000000000000000000000000000000000000d0ea0e40a00000000000000000000000000000000000000000000000000000000000014218a0000000000000000000000000e5e7492282fd1e3bfac337a0beccd29b15b7b24080f85a948ce8c13d816fe6daf12d6fd9e4952e1fc88850aff842a0fe25c73e3b9089fac37d55c4c7efcba6f04af04cebd2fc4d6d7dbb07e1e5234fa000000000000000000000000000000000000000000000007eda7867e0c7d4800080a0af6ed8a6864d44989adc47c84f6fe0aeb1819817505c42cde6cbbcd5e14dd3179400000000000000000000000000000000000000008301c60da045fd6ce41bb8ebb2bccdaa92dd1619e287704cb07722039901a7eba63dea1d130280018212c2821c2383312e35"
|
||||
);
|
||||
|
||||
pub(crate) const HACK_RECEIPT_ENCODED_BLOCK_3: &[u8] = &hex!(
|
||||
"f90271f9026e8080018301c60db9010000000000000000000000000000000000000000400000000000000000008000000000000000000000000000000000004000000000000000000000400004000000100000000000000000000000000000000000000000000000000000000000004000000000000000000000040000000000400080000400000000000000100000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000008100000000000000000000000000000000000004000000000000000000000000008000000000000000000010000000000000000000000000000400000000000000001000000000000000000000000002000f8faf89c948ce8c13d816fe6daf12d6fd9e4952e1fc88850aff884a092e98423f8adac6e64d0608e519fd1cefb861498385c6dee70d58fc926ddc68ca000000000000000000000000000000000000000000000000000000000d101e54ba00000000000000000000000000000000000000000000000000000000000014218a0000000000000000000000000fa011d8d6c26f13abe2cefed38226e401b2b8a9980f85a948ce8c13d816fe6daf12d6fd9e4952e1fc88850aff842a0fe25c73e3b9089fac37d55c4c7efcba6f04af04cebd2fc4d6d7dbb07e1e5234fa000000000000000000000000000000000000000000000007ed8842f062774800080a08fab01dcec1da547e90a77597999e9153ff788fa6451d1cc942064427bd995019400000000000000000000000000000000000000008301c60da0da4509fe0ca03202ddbe4f68692c132d689ee098433691040ece18c3a45d44c50380018212c2821c2383312e35"
|
||||
);
|
||||
|
||||
fn hack_receipt_1() -> OpGethReceipt {
|
||||
let receipt = receipt_block_1();
|
||||
|
||||
OpGethReceipt {
|
||||
tx_type: receipt.receipt.tx_type() as u8,
|
||||
post_state: Bytes::default(),
|
||||
status: receipt.receipt.status() as u64,
|
||||
cumulative_gas_used: receipt.receipt.cumulative_gas_used(),
|
||||
bloom: Bloom::from(hex!(
|
||||
"00000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000400000000000100000000000000200000000002000000000000001000000000000000000004000000000000000000000000000040000400000100400000000000000100000000000000000000000000000020000000000000000000000000000000000000000000000001000000000000000000000100000000000000000000000000000000000000000000000000000000000000088000000080000000000010000000000000000000000000000800008000120000000000000000000000000000000002000"
|
||||
)),
|
||||
logs: receipt.receipt.into_logs(),
|
||||
tx_hash: b256!("0x5e77a04531c7c107af1882d76cbff9486d0a9aa53701c30888509d4f5f2b003a"), contract_address: Address::ZERO, gas_used: 202813,
|
||||
block_hash: b256!("0xbee7192e575af30420cae0c7776304ac196077ee72b048970549e4f08e875453"),
|
||||
block_number: receipt.number,
|
||||
transaction_index: 0,
|
||||
l1_gas_price: 1,
|
||||
l1_gas_used: 4802,
|
||||
l1_fee: 7203,
|
||||
fee_scalar: String::from("1.5"),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn receipt_block_1() -> ReceiptWithBlockNumber<OpReceipt> {
|
||||
let log_1 = Log {
|
||||
address: address!("0x8ce8c13d816fe6daf12d6fd9e4952e1fc88850af"),
|
||||
data: LogData::new(
|
||||
vec![
|
||||
b256!("0x0109fc6f55cf40689f02fbaad7af7fe7bbac8a3d2186600afc7d3e10cac60271"),
|
||||
b256!("0x0000000000000000000000000000000000000000000000000000000000014218"),
|
||||
b256!("0x00000000000000000000000070b17c0fe982ab4a7ac17a4c25485643151a1f2d"),
|
||||
],
|
||||
Bytes::from(hex!(
|
||||
"00000000000000000000000000000000000000000000000000000000618d8837"
|
||||
)),
|
||||
)
|
||||
.unwrap(),
|
||||
};
|
||||
|
||||
let log_2 = Log {
|
||||
address: address!("0x8ce8c13d816fe6daf12d6fd9e4952e1fc88850af"),
|
||||
data: LogData::new(
|
||||
vec![
|
||||
b256!("0x92e98423f8adac6e64d0608e519fd1cefb861498385c6dee70d58fc926ddc68c"),
|
||||
b256!("0x00000000000000000000000000000000000000000000000000000000d0e3ebf0"),
|
||||
b256!("0x0000000000000000000000000000000000000000000000000000000000014218"),
|
||||
b256!("0x00000000000000000000000070b17c0fe982ab4a7ac17a4c25485643151a1f2d"),
|
||||
],
|
||||
Bytes::default(),
|
||||
)
|
||||
.unwrap(),
|
||||
};
|
||||
|
||||
let log_3 = Log {
|
||||
address: address!("0x8ce8c13d816fe6daf12d6fd9e4952e1fc88850af"),
|
||||
data: LogData::new(
|
||||
vec![
|
||||
b256!("0xfe25c73e3b9089fac37d55c4c7efcba6f04af04cebd2fc4d6d7dbb07e1e5234f"),
|
||||
b256!("0x00000000000000000000000000000000000000000000007edc6ca0bb68348000"),
|
||||
],
|
||||
Bytes::default(),
|
||||
)
|
||||
.unwrap(),
|
||||
};
|
||||
|
||||
let receipt = OpReceipt::Legacy(Receipt {
|
||||
status: true.into(),
|
||||
cumulative_gas_used: 202813,
|
||||
logs: vec![log_1, log_2, log_3],
|
||||
});
|
||||
|
||||
ReceiptWithBlockNumber { receipt, number: 1 }
|
||||
}
|
||||
|
||||
pub(crate) fn receipt_block_2() -> ReceiptWithBlockNumber<OpReceipt> {
|
||||
let log_1 = Log {
|
||||
address: address!("0x8ce8c13d816fe6daf12d6fd9e4952e1fc88850af"),
|
||||
data: LogData::new(
|
||||
vec![
|
||||
b256!("0x92e98423f8adac6e64d0608e519fd1cefb861498385c6dee70d58fc926ddc68c"),
|
||||
b256!("0x00000000000000000000000000000000000000000000000000000000d0ea0e40"),
|
||||
b256!("0x0000000000000000000000000000000000000000000000000000000000014218"),
|
||||
b256!("0x000000000000000000000000e5e7492282fd1e3bfac337a0beccd29b15b7b240"),
|
||||
],
|
||||
Bytes::default(),
|
||||
)
|
||||
.unwrap(),
|
||||
};
|
||||
|
||||
let log_2 = Log {
|
||||
address: address!("0x8ce8c13d816fe6daf12d6fd9e4952e1fc88850af"),
|
||||
data: LogData::new(
|
||||
vec![
|
||||
b256!("0xfe25c73e3b9089fac37d55c4c7efcba6f04af04cebd2fc4d6d7dbb07e1e5234f"),
|
||||
b256!("0x00000000000000000000000000000000000000000000007eda7867e0c7d48000"),
|
||||
],
|
||||
Bytes::default(),
|
||||
)
|
||||
.unwrap(),
|
||||
};
|
||||
|
||||
let receipt = OpReceipt::Legacy(Receipt {
|
||||
status: true.into(),
|
||||
cumulative_gas_used: 116237,
|
||||
logs: vec![log_1, log_2],
|
||||
});
|
||||
|
||||
ReceiptWithBlockNumber { receipt, number: 2 }
|
||||
}
|
||||
|
||||
pub(crate) fn receipt_block_3() -> ReceiptWithBlockNumber<OpReceipt> {
|
||||
let log_1 = Log {
|
||||
address: address!("0x8ce8c13d816fe6daf12d6fd9e4952e1fc88850af"),
|
||||
data: LogData::new(
|
||||
vec![
|
||||
b256!("0x92e98423f8adac6e64d0608e519fd1cefb861498385c6dee70d58fc926ddc68c"),
|
||||
b256!("0x00000000000000000000000000000000000000000000000000000000d101e54b"),
|
||||
b256!("0x0000000000000000000000000000000000000000000000000000000000014218"),
|
||||
b256!("0x000000000000000000000000fa011d8d6c26f13abe2cefed38226e401b2b8a99"),
|
||||
],
|
||||
Bytes::default(),
|
||||
)
|
||||
.unwrap(),
|
||||
};
|
||||
|
||||
let log_2 = Log {
|
||||
address: address!("0x8ce8c13d816fe6daf12d6fd9e4952e1fc88850af"),
|
||||
data: LogData::new(
|
||||
vec![
|
||||
b256!("0xfe25c73e3b9089fac37d55c4c7efcba6f04af04cebd2fc4d6d7dbb07e1e5234f"),
|
||||
b256!("0x00000000000000000000000000000000000000000000007ed8842f0627748000"),
|
||||
],
|
||||
Bytes::default(),
|
||||
)
|
||||
.unwrap(),
|
||||
};
|
||||
|
||||
let receipt = OpReceipt::Legacy(Receipt {
|
||||
status: true.into(),
|
||||
cumulative_gas_used: 116237,
|
||||
logs: vec![log_1, log_2],
|
||||
});
|
||||
|
||||
ReceiptWithBlockNumber { receipt, number: 3 }
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn decode_hack_receipt() {
|
||||
let receipt = hack_receipt_1();
|
||||
|
||||
let decoded = OpGethReceiptContainer::decode(&mut &HACK_RECEIPT_ENCODED_BLOCK_1[..])
|
||||
.unwrap()
|
||||
.0
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(receipt, decoded);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn receipts_codec() {
|
||||
// rig
|
||||
|
||||
let mut receipt_1_to_3 = HACK_RECEIPT_ENCODED_BLOCK_1.to_vec();
|
||||
receipt_1_to_3.extend_from_slice(HACK_RECEIPT_ENCODED_BLOCK_2);
|
||||
receipt_1_to_3.extend_from_slice(HACK_RECEIPT_ENCODED_BLOCK_3);
|
||||
|
||||
let encoded = &mut BytesMut::from(&receipt_1_to_3[..]);
|
||||
|
||||
let mut codec = OpGethReceiptFileCodec::default();
|
||||
|
||||
// test
|
||||
|
||||
let first_decoded_receipt = codec.decode(encoded).unwrap().unwrap().unwrap();
|
||||
|
||||
assert_eq!(receipt_block_1(), first_decoded_receipt);
|
||||
|
||||
let second_decoded_receipt = codec.decode(encoded).unwrap().unwrap().unwrap();
|
||||
|
||||
assert_eq!(receipt_block_2(), second_decoded_receipt);
|
||||
|
||||
let third_decoded_receipt = codec.decode(encoded).unwrap().unwrap().unwrap();
|
||||
|
||||
assert_eq!(receipt_block_3(), third_decoded_receipt);
|
||||
}
|
||||
}
|
||||
@@ -1,76 +0,0 @@
|
||||
[package]
|
||||
name = "reth-optimism-consensus"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
rust-version.workspace = true
|
||||
license.workspace = true
|
||||
homepage.workspace = true
|
||||
repository.workspace = true
|
||||
exclude.workspace = true
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
# reth
|
||||
reth-execution-types.workspace = true
|
||||
reth-chainspec.workspace = true
|
||||
reth-consensus-common.workspace = true
|
||||
reth-consensus.workspace = true
|
||||
reth-primitives-traits.workspace = true
|
||||
reth-storage-api.workspace = true
|
||||
reth-storage-errors.workspace = true
|
||||
reth-trie-common.workspace = true
|
||||
|
||||
# op-reth
|
||||
reth-optimism-forks.workspace = true
|
||||
reth-optimism-primitives.workspace = true
|
||||
|
||||
# ethereum
|
||||
alloy-eips.workspace = true
|
||||
alloy-primitives.workspace = true
|
||||
alloy-consensus.workspace = true
|
||||
alloy-trie.workspace = true
|
||||
revm.workspace = true
|
||||
|
||||
# misc
|
||||
tracing.workspace = true
|
||||
thiserror.workspace = true
|
||||
reth-optimism-chainspec.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
reth-provider = { workspace = true, features = ["test-utils"] }
|
||||
reth-db-common.workspace = true
|
||||
reth-revm.workspace = true
|
||||
reth-trie.workspace = true
|
||||
reth-optimism-node.workspace = true
|
||||
|
||||
alloy-chains.workspace = true
|
||||
|
||||
op-alloy-consensus.workspace = true
|
||||
|
||||
[features]
|
||||
default = ["std"]
|
||||
std = [
|
||||
"reth-chainspec/std",
|
||||
"reth-consensus/std",
|
||||
"reth-consensus-common/std",
|
||||
"reth-primitives-traits/std",
|
||||
"reth-optimism-forks/std",
|
||||
"reth-optimism-chainspec/std",
|
||||
"reth-optimism-primitives/std",
|
||||
"reth-storage-api/std",
|
||||
"reth-storage-errors/std",
|
||||
"reth-trie-common/std",
|
||||
"alloy-chains/std",
|
||||
"alloy-eips/std",
|
||||
"alloy-primitives/std",
|
||||
"alloy-consensus/std",
|
||||
"alloy-trie/std",
|
||||
"reth-revm/std",
|
||||
"revm/std",
|
||||
"tracing/std",
|
||||
"thiserror/std",
|
||||
"reth-execution-types/std",
|
||||
"op-alloy-consensus/std",
|
||||
]
|
||||
@@ -1,30 +0,0 @@
|
||||
//! Optimism consensus errors
|
||||
|
||||
use alloy_primitives::B256;
|
||||
use reth_consensus::ConsensusError;
|
||||
use reth_storage_errors::provider::ProviderError;
|
||||
|
||||
/// Optimism consensus error.
|
||||
#[derive(Debug, Clone, thiserror::Error)]
|
||||
pub enum OpConsensusError {
|
||||
/// Block body has non-empty withdrawals list (l1 withdrawals).
|
||||
#[error("non-empty block body withdrawals list")]
|
||||
WithdrawalsNonEmpty,
|
||||
/// Failed to compute L2 withdrawals storage root.
|
||||
#[error("compute L2 withdrawals root failed: {_0}")]
|
||||
L2WithdrawalsRootCalculationFail(#[from] ProviderError),
|
||||
/// L2 withdrawals root missing in block header.
|
||||
#[error("L2 withdrawals root missing from block header")]
|
||||
L2WithdrawalsRootMissing,
|
||||
/// L2 withdrawals root in block header, doesn't match local storage root of predeploy.
|
||||
#[error("L2 withdrawals root mismatch, header: {header}, exec_res: {exec_res}")]
|
||||
L2WithdrawalsRootMismatch {
|
||||
/// Storage root of pre-deploy in block.
|
||||
header: B256,
|
||||
/// Storage root of pre-deploy loaded from local state.
|
||||
exec_res: B256,
|
||||
},
|
||||
/// L1 [`ConsensusError`], that also occurs on L2.
|
||||
#[error(transparent)]
|
||||
Eth(#[from] ConsensusError),
|
||||
}
|
||||
@@ -1,789 +0,0 @@
|
||||
//! Optimism Consensus implementation.
|
||||
|
||||
#![doc(
|
||||
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
|
||||
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
|
||||
issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/"
|
||||
)]
|
||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||
#![cfg_attr(not(feature = "std"), no_std)]
|
||||
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
|
||||
|
||||
extern crate alloc;
|
||||
|
||||
use alloc::{format, sync::Arc};
|
||||
use alloy_consensus::{
|
||||
constants::MAXIMUM_EXTRA_DATA_SIZE, BlockHeader as _, EMPTY_OMMER_ROOT_HASH,
|
||||
};
|
||||
use alloy_primitives::B64;
|
||||
use core::fmt::Debug;
|
||||
use reth_chainspec::EthChainSpec;
|
||||
use reth_consensus::{Consensus, ConsensusError, FullConsensus, HeaderValidator, ReceiptRootBloom};
|
||||
use reth_consensus_common::validation::{
|
||||
validate_against_parent_eip1559_base_fee, validate_against_parent_hash_number,
|
||||
validate_against_parent_timestamp, validate_cancun_gas, validate_header_base_fee,
|
||||
validate_header_extra_data, validate_header_gas,
|
||||
};
|
||||
use reth_execution_types::BlockExecutionResult;
|
||||
use reth_optimism_forks::OpHardforks;
|
||||
use reth_optimism_primitives::DepositReceipt;
|
||||
use reth_primitives_traits::{
|
||||
Block, BlockBody, BlockHeader, GotExpected, NodePrimitives, RecoveredBlock, SealedBlock,
|
||||
SealedHeader,
|
||||
};
|
||||
|
||||
mod proof;
|
||||
pub use proof::calculate_receipt_root_no_memo_optimism;
|
||||
|
||||
pub mod validation;
|
||||
pub use validation::{canyon, isthmus, validate_block_post_execution};
|
||||
|
||||
pub mod error;
|
||||
pub use error::OpConsensusError;
|
||||
|
||||
/// Optimism consensus implementation.
|
||||
///
|
||||
/// Provides basic checks as outlined in the execution specs.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct OpBeaconConsensus<ChainSpec> {
|
||||
/// Configuration
|
||||
chain_spec: Arc<ChainSpec>,
|
||||
/// Maximum allowed extra data size in bytes
|
||||
max_extra_data_size: usize,
|
||||
}
|
||||
|
||||
impl<ChainSpec> OpBeaconConsensus<ChainSpec> {
|
||||
/// Create a new instance of [`OpBeaconConsensus`]
|
||||
pub const fn new(chain_spec: Arc<ChainSpec>) -> Self {
|
||||
Self { chain_spec, max_extra_data_size: MAXIMUM_EXTRA_DATA_SIZE }
|
||||
}
|
||||
|
||||
/// Returns the maximum allowed extra data size.
|
||||
pub const fn max_extra_data_size(&self) -> usize {
|
||||
self.max_extra_data_size
|
||||
}
|
||||
|
||||
/// Sets the maximum allowed extra data size and returns the updated instance.
|
||||
pub const fn with_max_extra_data_size(mut self, size: usize) -> Self {
|
||||
self.max_extra_data_size = size;
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl<N, ChainSpec> FullConsensus<N> for OpBeaconConsensus<ChainSpec>
|
||||
where
|
||||
N: NodePrimitives<Receipt: DepositReceipt>,
|
||||
ChainSpec: EthChainSpec<Header = N::BlockHeader> + OpHardforks + Debug + Send + Sync,
|
||||
{
|
||||
fn validate_block_post_execution(
|
||||
&self,
|
||||
block: &RecoveredBlock<N::Block>,
|
||||
result: &BlockExecutionResult<N::Receipt>,
|
||||
receipt_root_bloom: Option<ReceiptRootBloom>,
|
||||
) -> Result<(), ConsensusError> {
|
||||
validate_block_post_execution(block.header(), &self.chain_spec, result, receipt_root_bloom)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B, ChainSpec> Consensus<B> for OpBeaconConsensus<ChainSpec>
|
||||
where
|
||||
B: Block,
|
||||
ChainSpec: EthChainSpec<Header = B::Header> + OpHardforks + Debug + Send + Sync,
|
||||
{
|
||||
fn validate_body_against_header(
|
||||
&self,
|
||||
body: &B::Body,
|
||||
header: &SealedHeader<B::Header>,
|
||||
) -> Result<(), ConsensusError> {
|
||||
validation::validate_body_against_header_op(&self.chain_spec, body, header.header())
|
||||
}
|
||||
|
||||
fn validate_block_pre_execution(&self, block: &SealedBlock<B>) -> Result<(), ConsensusError> {
|
||||
// Check ommers hash
|
||||
let ommers_hash = block.body().calculate_ommers_root();
|
||||
if Some(block.ommers_hash()) != ommers_hash {
|
||||
return Err(ConsensusError::BodyOmmersHashDiff(
|
||||
GotExpected {
|
||||
got: ommers_hash.unwrap_or(EMPTY_OMMER_ROOT_HASH),
|
||||
expected: block.ommers_hash(),
|
||||
}
|
||||
.into(),
|
||||
))
|
||||
}
|
||||
|
||||
// Check transaction root
|
||||
if let Err(error) = block.ensure_transaction_root_valid() {
|
||||
return Err(ConsensusError::BodyTransactionRootDiff(error.into()))
|
||||
}
|
||||
|
||||
// Check empty shanghai-withdrawals
|
||||
if self.chain_spec.is_canyon_active_at_timestamp(block.timestamp()) {
|
||||
canyon::ensure_empty_shanghai_withdrawals(block.body()).map_err(|err| {
|
||||
ConsensusError::Other(format!("failed to verify block {}: {err}", block.number()))
|
||||
})?
|
||||
} else {
|
||||
return Ok(())
|
||||
}
|
||||
|
||||
// Blob gas used validation
|
||||
// In Jovian, the blob gas used computation has changed. We are moving the blob base fee
|
||||
// validation to post-execution since the DA footprint calculation is stateful.
|
||||
// Pre-execution we only validate that the blob gas used is present in the header.
|
||||
if self.chain_spec.is_jovian_active_at_timestamp(block.timestamp()) {
|
||||
block.blob_gas_used().ok_or(ConsensusError::BlobGasUsedMissing)?;
|
||||
} else if self.chain_spec.is_ecotone_active_at_timestamp(block.timestamp()) {
|
||||
validate_cancun_gas(block)?;
|
||||
}
|
||||
|
||||
// Check withdrawals root field in header
|
||||
if self.chain_spec.is_isthmus_active_at_timestamp(block.timestamp()) {
|
||||
// storage root of withdrawals pre-deploy is verified post-execution
|
||||
isthmus::ensure_withdrawals_storage_root_is_some(block.header()).map_err(|err| {
|
||||
ConsensusError::Other(format!("failed to verify block {}: {err}", block.number()))
|
||||
})?
|
||||
} else {
|
||||
// canyon is active, else would have returned already
|
||||
canyon::ensure_empty_withdrawals_root(block.header())?
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<H, ChainSpec> HeaderValidator<H> for OpBeaconConsensus<ChainSpec>
|
||||
where
|
||||
H: BlockHeader,
|
||||
ChainSpec: EthChainSpec<Header = H> + OpHardforks + Debug + Send + Sync,
|
||||
{
|
||||
fn validate_header(&self, header: &SealedHeader<H>) -> Result<(), ConsensusError> {
|
||||
let header = header.header();
|
||||
// with OP-stack Bedrock activation number determines when TTD (eth Merge) has been reached.
|
||||
debug_assert!(
|
||||
self.chain_spec.is_bedrock_active_at_block(header.number()),
|
||||
"manually import OVM blocks"
|
||||
);
|
||||
|
||||
if header.nonce() != Some(B64::ZERO) {
|
||||
return Err(ConsensusError::TheMergeNonceIsNotZero)
|
||||
}
|
||||
|
||||
if header.ommers_hash() != EMPTY_OMMER_ROOT_HASH {
|
||||
return Err(ConsensusError::TheMergeOmmerRootIsNotEmpty)
|
||||
}
|
||||
|
||||
// Post-merge, the consensus layer is expected to perform checks such that the block
|
||||
// timestamp is a function of the slot. This is different from pre-merge, where blocks
|
||||
// are only allowed to be in the future (compared to the system's clock) by a certain
|
||||
// threshold.
|
||||
//
|
||||
// Block validation with respect to the parent should ensure that the block timestamp
|
||||
// is greater than its parent timestamp.
|
||||
|
||||
// validate header extra data for all networks post merge
|
||||
validate_header_extra_data(header, self.max_extra_data_size)?;
|
||||
validate_header_gas(header)?;
|
||||
validate_header_base_fee(header, &self.chain_spec)
|
||||
}
|
||||
|
||||
fn validate_header_against_parent(
|
||||
&self,
|
||||
header: &SealedHeader<H>,
|
||||
parent: &SealedHeader<H>,
|
||||
) -> Result<(), ConsensusError> {
|
||||
validate_against_parent_hash_number(header.header(), parent)?;
|
||||
|
||||
if self.chain_spec.is_bedrock_active_at_block(header.number()) {
|
||||
validate_against_parent_timestamp(header.header(), parent.header())?;
|
||||
}
|
||||
|
||||
validate_against_parent_eip1559_base_fee(
|
||||
header.header(),
|
||||
parent.header(),
|
||||
&self.chain_spec,
|
||||
)?;
|
||||
|
||||
// Ensure that the blob gas fields for this block are correctly set.
|
||||
// In the op-stack, the excess blob gas is always 0 for all blocks after ecotone.
|
||||
// The blob gas used and the excess blob gas should both be set after ecotone.
|
||||
// After Jovian, the blob gas used contains the current DA footprint.
|
||||
if self.chain_spec.is_ecotone_active_at_timestamp(header.timestamp()) {
|
||||
let blob_gas_used = header.blob_gas_used().ok_or(ConsensusError::BlobGasUsedMissing)?;
|
||||
|
||||
// Before Jovian and after ecotone, the blob gas used should be 0.
|
||||
if !self.chain_spec.is_jovian_active_at_timestamp(header.timestamp()) &&
|
||||
blob_gas_used != 0
|
||||
{
|
||||
return Err(ConsensusError::BlobGasUsedDiff(GotExpected {
|
||||
got: blob_gas_used,
|
||||
expected: 0,
|
||||
}));
|
||||
}
|
||||
|
||||
let excess_blob_gas =
|
||||
header.excess_blob_gas().ok_or(ConsensusError::ExcessBlobGasMissing)?;
|
||||
if excess_blob_gas != 0 {
|
||||
return Err(ConsensusError::ExcessBlobGasDiff {
|
||||
diff: GotExpected { got: excess_blob_gas, expected: 0 },
|
||||
parent_excess_blob_gas: parent.excess_blob_gas().unwrap_or(0),
|
||||
parent_blob_gas_used: parent.blob_gas_used().unwrap_or(0),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
|
||||
use alloy_consensus::{BlockBody, Eip658Value, Header, Receipt, TxEip7702, TxReceipt};
|
||||
use alloy_eips::{eip4895::Withdrawals, eip7685::Requests};
|
||||
use alloy_primitives::{Address, Bytes, Log, Signature, U256};
|
||||
use op_alloy_consensus::{
|
||||
encode_holocene_extra_data, encode_jovian_extra_data, OpTypedTransaction,
|
||||
};
|
||||
use reth_chainspec::BaseFeeParams;
|
||||
use reth_consensus::{Consensus, ConsensusError, FullConsensus, HeaderValidator};
|
||||
use reth_optimism_chainspec::{OpChainSpec, OpChainSpecBuilder, OP_MAINNET};
|
||||
use reth_optimism_primitives::{OpPrimitives, OpReceipt, OpTransactionSigned};
|
||||
use reth_primitives_traits::{proofs, RecoveredBlock, SealedBlock, SealedHeader};
|
||||
use reth_provider::BlockExecutionResult;
|
||||
|
||||
use crate::OpBeaconConsensus;
|
||||
|
||||
fn mock_tx(nonce: u64) -> OpTransactionSigned {
|
||||
let tx = TxEip7702 {
|
||||
chain_id: 1u64,
|
||||
nonce,
|
||||
max_fee_per_gas: 0x28f000fff,
|
||||
max_priority_fee_per_gas: 0x28f000fff,
|
||||
gas_limit: 10,
|
||||
to: Address::default(),
|
||||
value: U256::from(3_u64),
|
||||
input: Bytes::from(vec![1, 2]),
|
||||
access_list: Default::default(),
|
||||
authorization_list: Default::default(),
|
||||
};
|
||||
|
||||
let signature = Signature::new(U256::default(), U256::default(), true);
|
||||
|
||||
OpTransactionSigned::new_unhashed(OpTypedTransaction::Eip7702(tx), signature)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_block_blob_gas_used_validation_isthmus() {
|
||||
let chain_spec = OpChainSpecBuilder::default()
|
||||
.isthmus_activated()
|
||||
.genesis(OP_MAINNET.genesis.clone())
|
||||
.chain(OP_MAINNET.chain)
|
||||
.build();
|
||||
|
||||
// create a tx
|
||||
let transaction = mock_tx(0);
|
||||
|
||||
let beacon_consensus = OpBeaconConsensus::new(Arc::new(chain_spec));
|
||||
|
||||
let header = Header {
|
||||
base_fee_per_gas: Some(1337),
|
||||
withdrawals_root: Some(proofs::calculate_withdrawals_root(&[])),
|
||||
blob_gas_used: Some(0),
|
||||
transactions_root: proofs::calculate_transaction_root(std::slice::from_ref(
|
||||
&transaction,
|
||||
)),
|
||||
timestamp: u64::MAX,
|
||||
..Default::default()
|
||||
};
|
||||
let body = BlockBody {
|
||||
transactions: vec![transaction],
|
||||
ommers: vec![],
|
||||
withdrawals: Some(Withdrawals::default()),
|
||||
};
|
||||
|
||||
let block = SealedBlock::seal_slow(alloy_consensus::Block { header, body });
|
||||
|
||||
// validate blob, it should pass blob gas used validation
|
||||
let pre_execution = beacon_consensus.validate_block_pre_execution(&block);
|
||||
|
||||
assert!(pre_execution.is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_block_blob_gas_used_validation_failure_isthmus() {
|
||||
let chain_spec = OpChainSpecBuilder::default()
|
||||
.isthmus_activated()
|
||||
.genesis(OP_MAINNET.genesis.clone())
|
||||
.chain(OP_MAINNET.chain)
|
||||
.build();
|
||||
|
||||
// create a tx
|
||||
let transaction = mock_tx(0);
|
||||
|
||||
let beacon_consensus = OpBeaconConsensus::new(Arc::new(chain_spec));
|
||||
|
||||
let header = Header {
|
||||
base_fee_per_gas: Some(1337),
|
||||
withdrawals_root: Some(proofs::calculate_withdrawals_root(&[])),
|
||||
blob_gas_used: Some(10),
|
||||
transactions_root: proofs::calculate_transaction_root(std::slice::from_ref(
|
||||
&transaction,
|
||||
)),
|
||||
timestamp: u64::MAX,
|
||||
..Default::default()
|
||||
};
|
||||
let body = BlockBody {
|
||||
transactions: vec![transaction],
|
||||
ommers: vec![],
|
||||
withdrawals: Some(Withdrawals::default()),
|
||||
};
|
||||
|
||||
let block = SealedBlock::seal_slow(alloy_consensus::Block { header, body });
|
||||
|
||||
// validate blob, it should fail blob gas used validation
|
||||
let pre_execution = beacon_consensus.validate_block_pre_execution(&block);
|
||||
|
||||
assert!(matches!(
|
||||
pre_execution.unwrap_err(),
|
||||
ConsensusError::BlobGasUsedDiff(diff) if diff.got == 10 && diff.expected == 0
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_block_blob_gas_used_validation_jovian() {
|
||||
const BLOB_GAS_USED: u64 = 1000;
|
||||
const GAS_USED: u64 = 10;
|
||||
|
||||
let chain_spec = OpChainSpecBuilder::default()
|
||||
.jovian_activated()
|
||||
.genesis(OP_MAINNET.genesis.clone())
|
||||
.chain(OP_MAINNET.chain)
|
||||
.build();
|
||||
|
||||
// create a tx
|
||||
let transaction = mock_tx(0);
|
||||
|
||||
let beacon_consensus = OpBeaconConsensus::new(Arc::new(chain_spec));
|
||||
|
||||
let receipt = OpReceipt::Eip7702(Receipt::<Log> {
|
||||
status: Eip658Value::success(),
|
||||
cumulative_gas_used: GAS_USED,
|
||||
logs: vec![],
|
||||
});
|
||||
|
||||
let header = Header {
|
||||
base_fee_per_gas: Some(1337),
|
||||
withdrawals_root: Some(proofs::calculate_withdrawals_root(&[])),
|
||||
blob_gas_used: Some(BLOB_GAS_USED),
|
||||
transactions_root: proofs::calculate_transaction_root(std::slice::from_ref(
|
||||
&transaction,
|
||||
)),
|
||||
timestamp: u64::MAX,
|
||||
gas_used: GAS_USED,
|
||||
receipts_root: proofs::calculate_receipt_root(std::slice::from_ref(
|
||||
&receipt.with_bloom_ref(),
|
||||
)),
|
||||
logs_bloom: receipt.bloom(),
|
||||
..Default::default()
|
||||
};
|
||||
let body = BlockBody {
|
||||
transactions: vec![transaction],
|
||||
ommers: vec![],
|
||||
withdrawals: Some(Withdrawals::default()),
|
||||
};
|
||||
|
||||
let block = SealedBlock::seal_slow(alloy_consensus::Block { header, body });
|
||||
|
||||
let result = BlockExecutionResult::<OpReceipt> {
|
||||
blob_gas_used: BLOB_GAS_USED,
|
||||
receipts: vec![receipt],
|
||||
requests: Requests::default(),
|
||||
gas_used: GAS_USED,
|
||||
};
|
||||
|
||||
// validate blob, it should pass blob gas used validation
|
||||
let pre_execution = beacon_consensus.validate_block_pre_execution(&block);
|
||||
|
||||
assert!(pre_execution.is_ok());
|
||||
|
||||
let block = RecoveredBlock::new_sealed(block, vec![Address::default()]);
|
||||
|
||||
let post_execution = <OpBeaconConsensus<OpChainSpec> as FullConsensus<OpPrimitives>>::validate_block_post_execution(
|
||||
&beacon_consensus,
|
||||
&block,
|
||||
&result,
|
||||
None,
|
||||
);
|
||||
|
||||
// validate blob, it should pass blob gas used validation
|
||||
assert!(post_execution.is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_block_blob_gas_used_validation_failure_jovian() {
|
||||
const BLOB_GAS_USED: u64 = 1000;
|
||||
const GAS_USED: u64 = 10;
|
||||
|
||||
let chain_spec = OpChainSpecBuilder::default()
|
||||
.jovian_activated()
|
||||
.genesis(OP_MAINNET.genesis.clone())
|
||||
.chain(OP_MAINNET.chain)
|
||||
.build();
|
||||
|
||||
// create a tx
|
||||
let transaction = mock_tx(0);
|
||||
|
||||
let beacon_consensus = OpBeaconConsensus::new(Arc::new(chain_spec));
|
||||
|
||||
let receipt = OpReceipt::Eip7702(Receipt::<Log> {
|
||||
status: Eip658Value::success(),
|
||||
cumulative_gas_used: GAS_USED,
|
||||
logs: vec![],
|
||||
});
|
||||
|
||||
let header = Header {
|
||||
base_fee_per_gas: Some(1337),
|
||||
withdrawals_root: Some(proofs::calculate_withdrawals_root(&[])),
|
||||
blob_gas_used: Some(BLOB_GAS_USED),
|
||||
transactions_root: proofs::calculate_transaction_root(std::slice::from_ref(
|
||||
&transaction,
|
||||
)),
|
||||
gas_used: GAS_USED,
|
||||
timestamp: u64::MAX,
|
||||
receipts_root: proofs::calculate_receipt_root(std::slice::from_ref(
|
||||
&receipt.with_bloom_ref(),
|
||||
)),
|
||||
logs_bloom: receipt.bloom(),
|
||||
..Default::default()
|
||||
};
|
||||
let body = BlockBody {
|
||||
transactions: vec![transaction],
|
||||
ommers: vec![],
|
||||
withdrawals: Some(Withdrawals::default()),
|
||||
};
|
||||
|
||||
let block = SealedBlock::seal_slow(alloy_consensus::Block { header, body });
|
||||
|
||||
let result = BlockExecutionResult::<OpReceipt> {
|
||||
blob_gas_used: BLOB_GAS_USED + 1,
|
||||
receipts: vec![receipt],
|
||||
requests: Requests::default(),
|
||||
gas_used: GAS_USED,
|
||||
};
|
||||
|
||||
// validate blob, it should pass blob gas used validation
|
||||
let pre_execution = beacon_consensus.validate_block_pre_execution(&block);
|
||||
|
||||
assert!(pre_execution.is_ok());
|
||||
|
||||
let block = RecoveredBlock::new_sealed(block, vec![Address::default()]);
|
||||
|
||||
let post_execution = <OpBeaconConsensus<OpChainSpec> as FullConsensus<OpPrimitives>>::validate_block_post_execution(
|
||||
&beacon_consensus,
|
||||
&block,
|
||||
&result,
|
||||
None,
|
||||
);
|
||||
|
||||
// validate blob, it should fail blob gas used validation post execution.
|
||||
assert!(matches!(
|
||||
post_execution.unwrap_err(),
|
||||
ConsensusError::BlobGasUsedDiff(diff)
|
||||
if diff.got == BLOB_GAS_USED + 1 && diff.expected == BLOB_GAS_USED
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_header_min_base_fee_validation() {
|
||||
const MIN_BASE_FEE: u64 = 1000;
|
||||
|
||||
let chain_spec = OpChainSpecBuilder::default()
|
||||
.jovian_activated()
|
||||
.genesis(OP_MAINNET.genesis.clone())
|
||||
.chain(OP_MAINNET.chain)
|
||||
.build();
|
||||
|
||||
// create a tx
|
||||
let transaction = mock_tx(0);
|
||||
|
||||
let beacon_consensus = OpBeaconConsensus::new(Arc::new(chain_spec));
|
||||
|
||||
let receipt = OpReceipt::Eip7702(Receipt::<Log> {
|
||||
status: Eip658Value::success(),
|
||||
cumulative_gas_used: 0,
|
||||
logs: vec![],
|
||||
});
|
||||
|
||||
let parent = Header {
|
||||
number: 0,
|
||||
base_fee_per_gas: Some(MIN_BASE_FEE / 10),
|
||||
withdrawals_root: Some(proofs::calculate_withdrawals_root(&[])),
|
||||
blob_gas_used: Some(0),
|
||||
excess_blob_gas: Some(0),
|
||||
transactions_root: proofs::calculate_transaction_root(std::slice::from_ref(
|
||||
&transaction,
|
||||
)),
|
||||
gas_used: 0,
|
||||
timestamp: u64::MAX - 1,
|
||||
receipts_root: proofs::calculate_receipt_root(std::slice::from_ref(
|
||||
&receipt.with_bloom_ref(),
|
||||
)),
|
||||
logs_bloom: receipt.bloom(),
|
||||
extra_data: encode_jovian_extra_data(
|
||||
Default::default(),
|
||||
BaseFeeParams::optimism(),
|
||||
MIN_BASE_FEE,
|
||||
)
|
||||
.unwrap(),
|
||||
..Default::default()
|
||||
};
|
||||
let parent = SealedHeader::seal_slow(parent);
|
||||
|
||||
let header = Header {
|
||||
number: 1,
|
||||
base_fee_per_gas: Some(MIN_BASE_FEE),
|
||||
withdrawals_root: Some(proofs::calculate_withdrawals_root(&[])),
|
||||
blob_gas_used: Some(0),
|
||||
excess_blob_gas: Some(0),
|
||||
transactions_root: proofs::calculate_transaction_root(std::slice::from_ref(
|
||||
&transaction,
|
||||
)),
|
||||
gas_used: 0,
|
||||
timestamp: u64::MAX,
|
||||
receipts_root: proofs::calculate_receipt_root(std::slice::from_ref(
|
||||
&receipt.with_bloom_ref(),
|
||||
)),
|
||||
logs_bloom: receipt.bloom(),
|
||||
parent_hash: parent.hash(),
|
||||
..Default::default()
|
||||
};
|
||||
let header = SealedHeader::seal_slow(header);
|
||||
|
||||
let result = beacon_consensus.validate_header_against_parent(&header, &parent);
|
||||
|
||||
assert!(result.is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_header_min_base_fee_validation_failure() {
|
||||
const MIN_BASE_FEE: u64 = 1000;
|
||||
|
||||
let chain_spec = OpChainSpecBuilder::default()
|
||||
.jovian_activated()
|
||||
.genesis(OP_MAINNET.genesis.clone())
|
||||
.chain(OP_MAINNET.chain)
|
||||
.build();
|
||||
|
||||
// create a tx
|
||||
let transaction = mock_tx(0);
|
||||
|
||||
let beacon_consensus = OpBeaconConsensus::new(Arc::new(chain_spec));
|
||||
|
||||
let receipt = OpReceipt::Eip7702(Receipt::<Log> {
|
||||
status: Eip658Value::success(),
|
||||
cumulative_gas_used: 0,
|
||||
logs: vec![],
|
||||
});
|
||||
|
||||
let parent = Header {
|
||||
number: 0,
|
||||
base_fee_per_gas: Some(MIN_BASE_FEE / 10),
|
||||
withdrawals_root: Some(proofs::calculate_withdrawals_root(&[])),
|
||||
blob_gas_used: Some(0),
|
||||
excess_blob_gas: Some(0),
|
||||
transactions_root: proofs::calculate_transaction_root(std::slice::from_ref(
|
||||
&transaction,
|
||||
)),
|
||||
gas_used: 0,
|
||||
timestamp: u64::MAX - 1,
|
||||
receipts_root: proofs::calculate_receipt_root(std::slice::from_ref(
|
||||
&receipt.with_bloom_ref(),
|
||||
)),
|
||||
logs_bloom: receipt.bloom(),
|
||||
extra_data: encode_jovian_extra_data(
|
||||
Default::default(),
|
||||
BaseFeeParams::optimism(),
|
||||
MIN_BASE_FEE,
|
||||
)
|
||||
.unwrap(),
|
||||
..Default::default()
|
||||
};
|
||||
let parent = SealedHeader::seal_slow(parent);
|
||||
|
||||
let header = Header {
|
||||
number: 1,
|
||||
base_fee_per_gas: Some(MIN_BASE_FEE - 1),
|
||||
withdrawals_root: Some(proofs::calculate_withdrawals_root(&[])),
|
||||
blob_gas_used: Some(0),
|
||||
excess_blob_gas: Some(0),
|
||||
transactions_root: proofs::calculate_transaction_root(std::slice::from_ref(
|
||||
&transaction,
|
||||
)),
|
||||
gas_used: 0,
|
||||
timestamp: u64::MAX,
|
||||
receipts_root: proofs::calculate_receipt_root(std::slice::from_ref(
|
||||
&receipt.with_bloom_ref(),
|
||||
)),
|
||||
logs_bloom: receipt.bloom(),
|
||||
parent_hash: parent.hash(),
|
||||
..Default::default()
|
||||
};
|
||||
let header = SealedHeader::seal_slow(header);
|
||||
|
||||
let result = beacon_consensus.validate_header_against_parent(&header, &parent);
|
||||
|
||||
assert!(matches!(
|
||||
result.unwrap_err(),
|
||||
ConsensusError::BaseFeeDiff(diff)
|
||||
if diff.got == MIN_BASE_FEE - 1 && diff.expected == MIN_BASE_FEE
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_header_da_footprint_validation() {
|
||||
const MIN_BASE_FEE: u64 = 100_000;
|
||||
const DA_FOOTPRINT: u64 = GAS_LIMIT - 1;
|
||||
const GAS_LIMIT: u64 = 100_000_000;
|
||||
|
||||
let chain_spec = OpChainSpecBuilder::default()
|
||||
.jovian_activated()
|
||||
.genesis(OP_MAINNET.genesis.clone())
|
||||
.chain(OP_MAINNET.chain)
|
||||
.build();
|
||||
|
||||
// create a tx
|
||||
let transaction = mock_tx(0);
|
||||
|
||||
let beacon_consensus = OpBeaconConsensus::new(Arc::new(chain_spec));
|
||||
|
||||
let receipt = OpReceipt::Eip7702(Receipt::<Log> {
|
||||
status: Eip658Value::success(),
|
||||
cumulative_gas_used: 0,
|
||||
logs: vec![],
|
||||
});
|
||||
|
||||
let parent = Header {
|
||||
number: 0,
|
||||
base_fee_per_gas: Some(MIN_BASE_FEE),
|
||||
withdrawals_root: Some(proofs::calculate_withdrawals_root(&[])),
|
||||
blob_gas_used: Some(DA_FOOTPRINT),
|
||||
excess_blob_gas: Some(0),
|
||||
transactions_root: proofs::calculate_transaction_root(std::slice::from_ref(
|
||||
&transaction,
|
||||
)),
|
||||
gas_used: 0,
|
||||
timestamp: u64::MAX - 1,
|
||||
receipts_root: proofs::calculate_receipt_root(std::slice::from_ref(
|
||||
&receipt.with_bloom_ref(),
|
||||
)),
|
||||
logs_bloom: receipt.bloom(),
|
||||
extra_data: encode_jovian_extra_data(
|
||||
Default::default(),
|
||||
BaseFeeParams::optimism(),
|
||||
MIN_BASE_FEE,
|
||||
)
|
||||
.unwrap(),
|
||||
gas_limit: GAS_LIMIT,
|
||||
..Default::default()
|
||||
};
|
||||
let parent = SealedHeader::seal_slow(parent);
|
||||
|
||||
let header = Header {
|
||||
number: 1,
|
||||
base_fee_per_gas: Some(MIN_BASE_FEE + MIN_BASE_FEE / 10),
|
||||
withdrawals_root: Some(proofs::calculate_withdrawals_root(&[])),
|
||||
blob_gas_used: Some(DA_FOOTPRINT),
|
||||
excess_blob_gas: Some(0),
|
||||
transactions_root: proofs::calculate_transaction_root(std::slice::from_ref(
|
||||
&transaction,
|
||||
)),
|
||||
gas_used: 0,
|
||||
timestamp: u64::MAX,
|
||||
receipts_root: proofs::calculate_receipt_root(std::slice::from_ref(
|
||||
&receipt.with_bloom_ref(),
|
||||
)),
|
||||
logs_bloom: receipt.bloom(),
|
||||
parent_hash: parent.hash(),
|
||||
..Default::default()
|
||||
};
|
||||
let header = SealedHeader::seal_slow(header);
|
||||
|
||||
let result = beacon_consensus.validate_header_against_parent(&header, &parent);
|
||||
|
||||
assert!(result.is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_header_isthmus_validation() {
|
||||
const MIN_BASE_FEE: u64 = 100_000;
|
||||
const DA_FOOTPRINT: u64 = GAS_LIMIT - 1;
|
||||
const GAS_LIMIT: u64 = 100_000_000;
|
||||
|
||||
let chain_spec = OpChainSpecBuilder::default()
|
||||
.isthmus_activated()
|
||||
.genesis(OP_MAINNET.genesis.clone())
|
||||
.chain(OP_MAINNET.chain)
|
||||
.build();
|
||||
|
||||
// create a tx
|
||||
let transaction = mock_tx(0);
|
||||
|
||||
let beacon_consensus = OpBeaconConsensus::new(Arc::new(chain_spec));
|
||||
|
||||
let receipt = OpReceipt::Eip7702(Receipt::<Log> {
|
||||
status: Eip658Value::success(),
|
||||
cumulative_gas_used: 0,
|
||||
logs: vec![],
|
||||
});
|
||||
|
||||
let parent = Header {
|
||||
number: 0,
|
||||
base_fee_per_gas: Some(MIN_BASE_FEE),
|
||||
withdrawals_root: Some(proofs::calculate_withdrawals_root(&[])),
|
||||
blob_gas_used: Some(DA_FOOTPRINT),
|
||||
excess_blob_gas: Some(0),
|
||||
transactions_root: proofs::calculate_transaction_root(std::slice::from_ref(
|
||||
&transaction,
|
||||
)),
|
||||
gas_used: 0,
|
||||
timestamp: u64::MAX - 1,
|
||||
receipts_root: proofs::calculate_receipt_root(std::slice::from_ref(
|
||||
&receipt.with_bloom_ref(),
|
||||
)),
|
||||
logs_bloom: receipt.bloom(),
|
||||
extra_data: encode_holocene_extra_data(Default::default(), BaseFeeParams::optimism())
|
||||
.unwrap(),
|
||||
gas_limit: GAS_LIMIT,
|
||||
..Default::default()
|
||||
};
|
||||
let parent = SealedHeader::seal_slow(parent);
|
||||
|
||||
let header = Header {
|
||||
number: 1,
|
||||
base_fee_per_gas: Some(MIN_BASE_FEE - 2 * MIN_BASE_FEE / 100),
|
||||
withdrawals_root: Some(proofs::calculate_withdrawals_root(&[])),
|
||||
blob_gas_used: Some(DA_FOOTPRINT),
|
||||
excess_blob_gas: Some(0),
|
||||
transactions_root: proofs::calculate_transaction_root(std::slice::from_ref(
|
||||
&transaction,
|
||||
)),
|
||||
gas_used: 0,
|
||||
timestamp: u64::MAX,
|
||||
receipts_root: proofs::calculate_receipt_root(std::slice::from_ref(
|
||||
&receipt.with_bloom_ref(),
|
||||
)),
|
||||
logs_bloom: receipt.bloom(),
|
||||
parent_hash: parent.hash(),
|
||||
..Default::default()
|
||||
};
|
||||
let header = SealedHeader::seal_slow(header);
|
||||
|
||||
let result = beacon_consensus.validate_header_against_parent(&header, &parent);
|
||||
|
||||
assert!(matches!(
|
||||
result.unwrap_err(),
|
||||
ConsensusError::BlobGasUsedDiff(diff)
|
||||
if diff.got == DA_FOOTPRINT && diff.expected == 0
|
||||
));
|
||||
}
|
||||
}
|
||||
@@ -1,338 +0,0 @@
|
||||
//! Helper function for Receipt root calculation for Optimism hardforks.
|
||||
|
||||
use alloc::vec::Vec;
|
||||
use alloy_consensus::ReceiptWithBloom;
|
||||
use alloy_eips::eip2718::Encodable2718;
|
||||
use alloy_primitives::B256;
|
||||
use alloy_trie::root::ordered_trie_root_with_encoder;
|
||||
use reth_optimism_forks::OpHardforks;
|
||||
use reth_optimism_primitives::DepositReceipt;
|
||||
|
||||
/// Calculates the receipt root for a header.
|
||||
pub(crate) fn calculate_receipt_root_optimism<R: DepositReceipt>(
|
||||
receipts: &[ReceiptWithBloom<&R>],
|
||||
chain_spec: impl OpHardforks,
|
||||
timestamp: u64,
|
||||
) -> B256 {
|
||||
// There is a minor bug in op-geth and op-erigon where in the Regolith hardfork,
|
||||
// the receipt root calculation does not include the deposit nonce in the receipt
|
||||
// encoding. In the Regolith Hardfork, we must strip the deposit nonce from the
|
||||
// receipts before calculating the receipt root. This was corrected in the Canyon
|
||||
// hardfork.
|
||||
if chain_spec.is_regolith_active_at_timestamp(timestamp) &&
|
||||
!chain_spec.is_canyon_active_at_timestamp(timestamp)
|
||||
{
|
||||
let receipts = receipts
|
||||
.iter()
|
||||
.map(|receipt| {
|
||||
let mut receipt = receipt.clone().map_receipt(|r| r.clone());
|
||||
if let Some(receipt) = receipt.receipt.as_deposit_receipt_mut() {
|
||||
receipt.deposit_nonce = None;
|
||||
}
|
||||
receipt
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
return ordered_trie_root_with_encoder(receipts.as_slice(), |r, buf| r.encode_2718(buf))
|
||||
}
|
||||
|
||||
ordered_trie_root_with_encoder(receipts, |r, buf| r.encode_2718(buf))
|
||||
}
|
||||
|
||||
/// Calculates the receipt root for a header for the reference type of an OP receipt.
|
||||
///
|
||||
/// NOTE: Prefer calculate receipt root optimism if you have log blooms memoized.
|
||||
pub fn calculate_receipt_root_no_memo_optimism<R: DepositReceipt>(
|
||||
receipts: &[R],
|
||||
chain_spec: impl OpHardforks,
|
||||
timestamp: u64,
|
||||
) -> B256 {
|
||||
// There is a minor bug in op-geth and op-erigon where in the Regolith hardfork,
|
||||
// the receipt root calculation does not include the deposit nonce in the receipt
|
||||
// encoding. In the Regolith Hardfork, we must strip the deposit nonce from the
|
||||
// receipts before calculating the receipt root. This was corrected in the Canyon
|
||||
// hardfork.
|
||||
if chain_spec.is_regolith_active_at_timestamp(timestamp) &&
|
||||
!chain_spec.is_canyon_active_at_timestamp(timestamp)
|
||||
{
|
||||
let receipts = receipts
|
||||
.iter()
|
||||
.map(|r| {
|
||||
let mut r = (*r).clone();
|
||||
if let Some(receipt) = r.as_deposit_receipt_mut() {
|
||||
receipt.deposit_nonce = None;
|
||||
}
|
||||
r
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
return ordered_trie_root_with_encoder(&receipts, |r, buf| {
|
||||
r.with_bloom_ref().encode_2718(buf);
|
||||
})
|
||||
}
|
||||
|
||||
ordered_trie_root_with_encoder(receipts, |r, buf| {
|
||||
r.with_bloom_ref().encode_2718(buf);
|
||||
})
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use alloy_consensus::{Receipt, ReceiptWithBloom, TxReceipt};
|
||||
use alloy_primitives::{b256, bloom, hex, Address, Bytes, Log, LogData};
|
||||
use op_alloy_consensus::OpDepositReceipt;
|
||||
use reth_optimism_chainspec::BASE_SEPOLIA;
|
||||
use reth_optimism_primitives::OpReceipt;
|
||||
|
||||
/// Tests that the receipt root is computed correctly for the regolith block.
|
||||
/// This was implemented due to a minor bug in op-geth and op-erigon where in
|
||||
/// the Regolith hardfork, the receipt root calculation does not include the
|
||||
/// deposit nonce in the receipt encoding.
|
||||
/// To fix this an op-reth patch was applied to the receipt root calculation
|
||||
/// to strip the deposit nonce from each receipt before calculating the root.
|
||||
#[test]
|
||||
fn check_optimism_receipt_root() {
|
||||
let cases = [
|
||||
// Deposit nonces didn't exist in Bedrock; No need to strip. For the purposes of this
|
||||
// test, we do have them, so we should get the same root as Canyon.
|
||||
(
|
||||
"bedrock",
|
||||
1679079599,
|
||||
b256!("0xe255fed45eae7ede0556fe4fabc77b0d294d18781a5a581cab09127bc4cd9ffb"),
|
||||
),
|
||||
// Deposit nonces introduced in Regolith. They weren't included in the receipt RLP,
|
||||
// so we need to strip them - the receipt root will differ.
|
||||
(
|
||||
"regolith",
|
||||
1679079600,
|
||||
b256!("0xe255fed45eae7ede0556fe4fabc77b0d294d18781a5a581cab09127bc4cd9ffb"),
|
||||
),
|
||||
// Receipt root hashing bug fixed in Canyon. Back to including the deposit nonce
|
||||
// in the receipt RLP when computing the receipt root.
|
||||
(
|
||||
"canyon",
|
||||
1699981200,
|
||||
b256!("0x6eefbb5efb95235476654a8bfbf8cb64a4f5f0b0c80b700b0c5964550beee6d7"),
|
||||
),
|
||||
];
|
||||
|
||||
for case in cases {
|
||||
let receipts = [
|
||||
// 0xb0d6ee650637911394396d81172bd1c637d568ed1fbddab0daddfca399c58b53
|
||||
OpReceipt::Deposit(OpDepositReceipt {
|
||||
inner: Receipt {
|
||||
status: true.into(),
|
||||
cumulative_gas_used: 46913,
|
||||
logs: vec![],
|
||||
},
|
||||
deposit_nonce: Some(4012991u64),
|
||||
deposit_receipt_version: None,
|
||||
}),
|
||||
// 0x2f433586bae30573c393adfa02bc81d2a1888a3d6c9869f473fb57245166bd9a
|
||||
OpReceipt::Eip1559(Receipt {
|
||||
status: true.into(),
|
||||
cumulative_gas_used: 118083,
|
||||
logs: vec![
|
||||
Log {
|
||||
address: hex!("ddb6dcce6b794415145eb5caa6cd335aeda9c272").into(),
|
||||
data: LogData::new_unchecked(
|
||||
vec![
|
||||
b256!("0xc3d58168c5ae7397731d063d5bbf3d657854427343f4c083240f7aacaa2d0f62"),
|
||||
b256!("0x000000000000000000000000c498902843af527e674846bb7edefa8ad62b8fb9"),
|
||||
b256!("0x000000000000000000000000c498902843af527e674846bb7edefa8ad62b8fb9"),
|
||||
b256!("0x0000000000000000000000000000000000000000000000000000000000000000"),
|
||||
],
|
||||
Bytes::from_static(&hex!("00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001"))
|
||||
)
|
||||
},
|
||||
Log {
|
||||
address: hex!("ddb6dcce6b794415145eb5caa6cd335aeda9c272").into(),
|
||||
data: LogData::new_unchecked(
|
||||
vec![
|
||||
b256!("0xc3d58168c5ae7397731d063d5bbf3d657854427343f4c083240f7aacaa2d0f62"),
|
||||
b256!("0x000000000000000000000000c498902843af527e674846bb7edefa8ad62b8fb9"),
|
||||
b256!("0x0000000000000000000000000000000000000000000000000000000000000000"),
|
||||
b256!("0x000000000000000000000000c498902843af527e674846bb7edefa8ad62b8fb9"),
|
||||
],
|
||||
Bytes::from_static(&hex!("00000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000000000000000000000000000000000001"))
|
||||
)
|
||||
},
|
||||
Log {
|
||||
address: hex!("ddb6dcce6b794415145eb5caa6cd335aeda9c272").into(),
|
||||
data: LogData::new_unchecked(
|
||||
vec![
|
||||
b256!("0x0eb774bb9698a73583fe07b6972cf2dcc08d1d97581a22861f45feb86b395820"),
|
||||
b256!("0x000000000000000000000000c498902843af527e674846bb7edefa8ad62b8fb9"),
|
||||
b256!("0x000000000000000000000000c498902843af527e674846bb7edefa8ad62b8fb9"),
|
||||
], Bytes::from_static(&hex!("0000000000000000000000000000000000000000000000000000000000000003")))
|
||||
},
|
||||
]}),
|
||||
// 0x6c33676e8f6077f46a62eabab70bc6d1b1b18a624b0739086d77093a1ecf8266
|
||||
OpReceipt::Eip1559(Receipt {
|
||||
status: true.into(),
|
||||
cumulative_gas_used: 189253,
|
||||
logs: vec![
|
||||
Log {
|
||||
address: hex!("ddb6dcce6b794415145eb5caa6cd335aeda9c272").into(),
|
||||
data: LogData::new_unchecked(vec![
|
||||
b256!("0xc3d58168c5ae7397731d063d5bbf3d657854427343f4c083240f7aacaa2d0f62"),
|
||||
b256!("0x0000000000000000000000009d521a04bee134ff8136d2ec957e5bc8c50394ec"),
|
||||
b256!("0x0000000000000000000000009d521a04bee134ff8136d2ec957e5bc8c50394ec"),
|
||||
b256!("0x0000000000000000000000000000000000000000000000000000000000000000"),
|
||||
],
|
||||
Bytes::from_static(&hex!("00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001")))
|
||||
},
|
||||
Log {
|
||||
address: hex!("ddb6dcce6b794415145eb5caa6cd335aeda9c272").into(),
|
||||
data: LogData::new_unchecked(vec![
|
||||
b256!("0xc3d58168c5ae7397731d063d5bbf3d657854427343f4c083240f7aacaa2d0f62"),
|
||||
b256!("0x0000000000000000000000009d521a04bee134ff8136d2ec957e5bc8c50394ec"),
|
||||
b256!("0x0000000000000000000000000000000000000000000000000000000000000000"),
|
||||
b256!("0x0000000000000000000000009d521a04bee134ff8136d2ec957e5bc8c50394ec"),
|
||||
],
|
||||
Bytes::from_static(&hex!("00000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000000000000000000000000000000000001")))
|
||||
},
|
||||
Log {
|
||||
address: hex!("ddb6dcce6b794415145eb5caa6cd335aeda9c272").into(),
|
||||
data: LogData::new_unchecked(vec![
|
||||
b256!("0x0eb774bb9698a73583fe07b6972cf2dcc08d1d97581a22861f45feb86b395820"),
|
||||
b256!("0x0000000000000000000000009d521a04bee134ff8136d2ec957e5bc8c50394ec"),
|
||||
b256!("0x0000000000000000000000009d521a04bee134ff8136d2ec957e5bc8c50394ec"),
|
||||
],
|
||||
Bytes::from_static(&hex!("0000000000000000000000000000000000000000000000000000000000000003")))
|
||||
},
|
||||
],
|
||||
}),
|
||||
// 0x4d3ecbef04ba7ce7f5ab55be0c61978ca97c117d7da448ed9771d4ff0c720a3f
|
||||
OpReceipt::Eip1559(Receipt {
|
||||
status: true.into(),
|
||||
cumulative_gas_used: 346969,
|
||||
logs: vec![
|
||||
Log {
|
||||
address: hex!("4200000000000000000000000000000000000006").into(),
|
||||
data: LogData::new_unchecked( vec![
|
||||
b256!("0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef"),
|
||||
b256!("0x000000000000000000000000c3feb4ef4c2a5af77add15c95bd98f6b43640cc8"),
|
||||
b256!("0x0000000000000000000000002992607c1614484fe6d865088e5c048f0650afd4"),
|
||||
],
|
||||
Bytes::from_static(&hex!("0000000000000000000000000000000000000000000000000018de76816d8000")))
|
||||
},
|
||||
Log {
|
||||
address: hex!("cf8e7e6b26f407dee615fc4db18bf829e7aa8c09").into(),
|
||||
data: LogData::new_unchecked( vec![
|
||||
b256!("0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef"),
|
||||
b256!("0x0000000000000000000000002992607c1614484fe6d865088e5c048f0650afd4"),
|
||||
b256!("0x0000000000000000000000008dbffe4c8bf3caf5deae3a99b50cfcf3648cbc09"),
|
||||
],
|
||||
Bytes::from_static(&hex!("000000000000000000000000000000000000000000000002d24d8e9ac1aa79e2")))
|
||||
},
|
||||
Log {
|
||||
address: hex!("2992607c1614484fe6d865088e5c048f0650afd4").into(),
|
||||
data: LogData::new_unchecked( vec![
|
||||
b256!("0x1c411e9a96e071241c2f21f7726b17ae89e3cab4c78be50e062b03a9fffbbad1"),
|
||||
],
|
||||
Bytes::from_static(&hex!("000000000000000000000000000000000000000000000009bd50642785c15736000000000000000000000000000000000000000000011bb7ac324f724a29bbbf")))
|
||||
},
|
||||
Log {
|
||||
address: hex!("2992607c1614484fe6d865088e5c048f0650afd4").into(),
|
||||
data: LogData::new_unchecked( vec![
|
||||
b256!("0xd78ad95fa46c994b6551d0da85fc275fe613ce37657fb8d5e3d130840159d822"),
|
||||
b256!("0x00000000000000000000000029843613c7211d014f5dd5718cf32bcd314914cb"),
|
||||
b256!("0x0000000000000000000000008dbffe4c8bf3caf5deae3a99b50cfcf3648cbc09"),
|
||||
],
|
||||
Bytes::from_static(&hex!("0000000000000000000000000000000000000000000000000018de76816d800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002d24d8e9ac1aa79e2")))
|
||||
},
|
||||
Log {
|
||||
address: hex!("6d0f8d488b669aa9ba2d0f0b7b75a88bf5051cd3").into(),
|
||||
data: LogData::new_unchecked( vec![
|
||||
b256!("0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef"),
|
||||
b256!("0x0000000000000000000000008dbffe4c8bf3caf5deae3a99b50cfcf3648cbc09"),
|
||||
b256!("0x000000000000000000000000c3feb4ef4c2a5af77add15c95bd98f6b43640cc8"),
|
||||
],
|
||||
Bytes::from_static(&hex!("00000000000000000000000000000000000000000000000014bc73062aea8093")))
|
||||
},
|
||||
Log {
|
||||
address: hex!("8dbffe4c8bf3caf5deae3a99b50cfcf3648cbc09").into(),
|
||||
data: LogData::new_unchecked( vec![
|
||||
b256!("0x1c411e9a96e071241c2f21f7726b17ae89e3cab4c78be50e062b03a9fffbbad1"),
|
||||
],
|
||||
Bytes::from_static(&hex!("00000000000000000000000000000000000000000000002f122cfadc1ca82a35000000000000000000000000000000000000000000000665879dc0609945d6d1")))
|
||||
},
|
||||
Log {
|
||||
address: hex!("8dbffe4c8bf3caf5deae3a99b50cfcf3648cbc09").into(),
|
||||
data: LogData::new_unchecked( vec![
|
||||
b256!("0xd78ad95fa46c994b6551d0da85fc275fe613ce37657fb8d5e3d130840159d822"),
|
||||
b256!("0x00000000000000000000000029843613c7211d014f5dd5718cf32bcd314914cb"),
|
||||
b256!("0x000000000000000000000000c3feb4ef4c2a5af77add15c95bd98f6b43640cc8"),
|
||||
],
|
||||
Bytes::from_static(&hex!("0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002d24d8e9ac1aa79e200000000000000000000000000000000000000000000000014bc73062aea80930000000000000000000000000000000000000000000000000000000000000000")))
|
||||
},
|
||||
],
|
||||
}),
|
||||
// 0xf738af5eb00ba23dbc1be2dbce41dbc0180f0085b7fb46646e90bf737af90351
|
||||
OpReceipt::Eip1559(Receipt {
|
||||
status: true.into(),
|
||||
cumulative_gas_used: 623249,
|
||||
logs: vec![
|
||||
Log {
|
||||
address: hex!("ac6564f3718837caadd42eed742d75c12b90a052").into(),
|
||||
data: LogData::new_unchecked( vec![
|
||||
b256!("0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef"),
|
||||
b256!("0x0000000000000000000000000000000000000000000000000000000000000000"),
|
||||
b256!("0x000000000000000000000000a4fa7f3fbf0677f254ebdb1646146864c305b76e"),
|
||||
b256!("0x000000000000000000000000000000000000000000000000000000000011a1d3"),
|
||||
],
|
||||
Default::default())
|
||||
},
|
||||
Log {
|
||||
address: hex!("ac6564f3718837caadd42eed742d75c12b90a052").into(),
|
||||
data: LogData::new_unchecked( vec![
|
||||
b256!("0x9d89e36eadf856db0ad9ffb5a569e07f95634dddd9501141ecf04820484ad0dc"),
|
||||
b256!("0x000000000000000000000000a4fa7f3fbf0677f254ebdb1646146864c305b76e"),
|
||||
b256!("0x000000000000000000000000000000000000000000000000000000000011a1d3"),
|
||||
],
|
||||
Bytes::from_static(&hex!("00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000037697066733a2f2f516d515141646b33736538396b47716577395256567a316b68643548375562476d4d4a485a62566f386a6d346f4a2f30000000000000000000")))
|
||||
},
|
||||
Log {
|
||||
address: hex!("ac6564f3718837caadd42eed742d75c12b90a052").into(),
|
||||
data: LogData::new_unchecked( vec![
|
||||
b256!("0x110d160a1bedeea919a88fbc4b2a9fb61b7e664084391b6ca2740db66fef80fe"),
|
||||
b256!("0x00000000000000000000000084d47f6eea8f8d87910448325519d1bb45c2972a"),
|
||||
b256!("0x000000000000000000000000a4fa7f3fbf0677f254ebdb1646146864c305b76e"),
|
||||
b256!("0x000000000000000000000000000000000000000000000000000000000011a1d3"),
|
||||
],
|
||||
Bytes::from_static(&hex!("0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000a4fa7f3fbf0677f254ebdb1646146864c305b76e00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000007717500762343034303661353035646234633961386163316433306335633332303265370000000000000000000000000000000000000000000000000000000000000037697066733a2f2f516d515141646b33736538396b47716577395256567a316b68643548375562476d4d4a485a62566f386a6d346f4a2f30000000000000000000")))
|
||||
},
|
||||
],
|
||||
}),
|
||||
];
|
||||
let root = calculate_receipt_root_optimism(
|
||||
&receipts.iter().map(TxReceipt::with_bloom_ref).collect::<Vec<_>>(),
|
||||
BASE_SEPOLIA.as_ref(),
|
||||
case.1,
|
||||
);
|
||||
assert_eq!(root, case.2);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn check_receipt_root_optimism() {
|
||||
let logs = vec![Log {
|
||||
address: Address::ZERO,
|
||||
data: LogData::new_unchecked(vec![], Default::default()),
|
||||
}];
|
||||
let logs_bloom = bloom!(
|
||||
"00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001"
|
||||
);
|
||||
let inner =
|
||||
OpReceipt::Eip2930(Receipt { status: true.into(), cumulative_gas_used: 102068, logs });
|
||||
let receipt = ReceiptWithBloom { receipt: &inner, logs_bloom };
|
||||
let receipt = vec![receipt];
|
||||
let root = calculate_receipt_root_optimism(&receipt, BASE_SEPOLIA.as_ref(), 0);
|
||||
assert_eq!(
|
||||
root,
|
||||
b256!("0xfe70ae4a136d98944951b2123859698d59ad251a381abc9960fa81cae3d0d4a0")
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -1,41 +0,0 @@
|
||||
//! Canyon consensus rule checks.
|
||||
|
||||
use alloy_consensus::BlockHeader;
|
||||
use alloy_trie::EMPTY_ROOT_HASH;
|
||||
use reth_consensus::ConsensusError;
|
||||
use reth_primitives_traits::{BlockBody, GotExpected};
|
||||
|
||||
use crate::OpConsensusError;
|
||||
|
||||
/// Verifies that withdrawals root in block header (Shanghai) is always [`EMPTY_ROOT_HASH`] in
|
||||
/// Canyon.
|
||||
#[inline]
|
||||
pub fn ensure_empty_withdrawals_root<H: BlockHeader>(header: &H) -> Result<(), ConsensusError> {
|
||||
// Shanghai rule
|
||||
let header_withdrawals_root =
|
||||
&header.withdrawals_root().ok_or(ConsensusError::WithdrawalsRootMissing)?;
|
||||
|
||||
// Canyon rules
|
||||
if *header_withdrawals_root != EMPTY_ROOT_HASH {
|
||||
return Err(ConsensusError::BodyWithdrawalsRootDiff(
|
||||
GotExpected { got: *header_withdrawals_root, expected: EMPTY_ROOT_HASH }.into(),
|
||||
));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Verifies that withdrawals in block body (Shanghai) is always empty in Canyon.
|
||||
/// <https://specs.optimism.io/protocol/rollup-node-p2p.html#block-validation>
|
||||
#[inline]
|
||||
pub fn ensure_empty_shanghai_withdrawals<T: BlockBody>(body: &T) -> Result<(), OpConsensusError> {
|
||||
// Shanghai rule
|
||||
let withdrawals = body.withdrawals().ok_or(ConsensusError::BodyWithdrawalsMissing)?;
|
||||
|
||||
// Canyon rule
|
||||
if !withdrawals.as_ref().is_empty() {
|
||||
return Err(OpConsensusError::WithdrawalsNonEmpty)
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -1,195 +0,0 @@
|
||||
//! Block verification w.r.t. consensus rules new in Isthmus hardfork.
|
||||
|
||||
use crate::OpConsensusError;
|
||||
use alloy_consensus::BlockHeader;
|
||||
use alloy_primitives::B256;
|
||||
use alloy_trie::EMPTY_ROOT_HASH;
|
||||
use reth_optimism_primitives::L2_TO_L1_MESSAGE_PASSER_ADDRESS;
|
||||
use reth_storage_api::{errors::ProviderResult, StorageRootProvider};
|
||||
use reth_trie_common::HashedStorage;
|
||||
use revm::database::BundleState;
|
||||
use tracing::warn;
|
||||
|
||||
/// Verifies that `withdrawals_root` (i.e. `l2tol1-msg-passer` storage root since Isthmus) field is
|
||||
/// set in block header.
|
||||
pub fn ensure_withdrawals_storage_root_is_some<H: BlockHeader>(
|
||||
header: H,
|
||||
) -> Result<(), OpConsensusError> {
|
||||
header.withdrawals_root().ok_or(OpConsensusError::L2WithdrawalsRootMissing)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Computes the storage root of predeploy `L2ToL1MessagePasser.sol`.
|
||||
///
|
||||
/// Uses state updates from block execution. See also [`withdrawals_root_prehashed`].
|
||||
pub fn withdrawals_root<DB: StorageRootProvider>(
|
||||
state_updates: &BundleState,
|
||||
state: DB,
|
||||
) -> ProviderResult<B256> {
|
||||
// if l2 withdrawals transactions were executed there will be storage updates for
|
||||
// `L2ToL1MessagePasser.sol` predeploy
|
||||
withdrawals_root_prehashed(
|
||||
state_updates
|
||||
.state()
|
||||
.get(&L2_TO_L1_MESSAGE_PASSER_ADDRESS)
|
||||
.map(|acc| {
|
||||
HashedStorage::from_plain_storage(
|
||||
acc.status,
|
||||
acc.storage.iter().map(|(slot, value)| (slot, &value.present_value)),
|
||||
)
|
||||
})
|
||||
.unwrap_or_default(),
|
||||
state,
|
||||
)
|
||||
}
|
||||
|
||||
/// Computes the storage root of predeploy `L2ToL1MessagePasser.sol`.
|
||||
///
|
||||
/// Uses pre-hashed storage updates of `L2ToL1MessagePasser.sol` predeploy, resulting from
|
||||
/// execution of L2 withdrawals transactions. If none, takes empty [`HashedStorage::default`].
|
||||
pub fn withdrawals_root_prehashed<DB: StorageRootProvider>(
|
||||
hashed_storage_updates: HashedStorage,
|
||||
state: DB,
|
||||
) -> ProviderResult<B256> {
|
||||
state.storage_root(L2_TO_L1_MESSAGE_PASSER_ADDRESS, hashed_storage_updates)
|
||||
}
|
||||
|
||||
/// Verifies block header field `withdrawals_root` against storage root of
|
||||
/// `L2ToL1MessagePasser.sol` predeploy post block execution.
|
||||
///
|
||||
/// Takes state updates resulting from execution of block.
|
||||
///
|
||||
/// See <https://specs.optimism.io/protocol/isthmus/exec-engine.html#l2tol1messagepasser-storage-root-in-header>.
|
||||
pub fn verify_withdrawals_root<DB, H>(
|
||||
state_updates: &BundleState,
|
||||
state: DB,
|
||||
header: H,
|
||||
) -> Result<(), OpConsensusError>
|
||||
where
|
||||
DB: StorageRootProvider,
|
||||
H: BlockHeader,
|
||||
{
|
||||
let header_storage_root =
|
||||
header.withdrawals_root().ok_or(OpConsensusError::L2WithdrawalsRootMissing)?;
|
||||
|
||||
let storage_root = withdrawals_root(state_updates, state)
|
||||
.map_err(OpConsensusError::L2WithdrawalsRootCalculationFail)?;
|
||||
|
||||
if storage_root == EMPTY_ROOT_HASH {
|
||||
// if there was no MessagePasser contract storage, something is wrong
|
||||
// (it should at least store an implementation address and owner address)
|
||||
warn!("isthmus: no storage root for L2ToL1MessagePasser contract");
|
||||
}
|
||||
|
||||
if header_storage_root != storage_root {
|
||||
return Err(OpConsensusError::L2WithdrawalsRootMismatch {
|
||||
header: header_storage_root,
|
||||
exec_res: storage_root,
|
||||
})
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Verifies block header field `withdrawals_root` against storage root of
|
||||
/// `L2ToL1MessagePasser.sol` predeploy post block execution.
|
||||
///
|
||||
/// Takes pre-hashed storage updates of `L2ToL1MessagePasser.sol` predeploy, resulting from
|
||||
/// execution of block, if any. Otherwise takes empty [`HashedStorage::default`].
|
||||
///
|
||||
/// See <https://specs.optimism.io/protocol/isthmus/exec-engine.html#l2tol1messagepasser-storage-root-in-header>.
|
||||
pub fn verify_withdrawals_root_prehashed<DB, H>(
|
||||
hashed_storage_updates: HashedStorage,
|
||||
state: DB,
|
||||
header: H,
|
||||
) -> Result<(), OpConsensusError>
|
||||
where
|
||||
DB: StorageRootProvider,
|
||||
H: BlockHeader,
|
||||
{
|
||||
let header_storage_root =
|
||||
header.withdrawals_root().ok_or(OpConsensusError::L2WithdrawalsRootMissing)?;
|
||||
|
||||
let storage_root = withdrawals_root_prehashed(hashed_storage_updates, state)
|
||||
.map_err(OpConsensusError::L2WithdrawalsRootCalculationFail)?;
|
||||
|
||||
if header_storage_root != storage_root {
|
||||
return Err(OpConsensusError::L2WithdrawalsRootMismatch {
|
||||
header: header_storage_root,
|
||||
exec_res: storage_root,
|
||||
})
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use alloc::sync::Arc;
|
||||
use alloy_chains::Chain;
|
||||
use alloy_consensus::Header;
|
||||
use alloy_primitives::{keccak256, B256, U256};
|
||||
use core::str::FromStr;
|
||||
use reth_db_common::init::init_genesis;
|
||||
use reth_optimism_chainspec::OpChainSpecBuilder;
|
||||
use reth_optimism_node::OpNode;
|
||||
use reth_provider::{
|
||||
providers::BlockchainProvider, test_utils::create_test_provider_factory_with_node_types,
|
||||
StateWriter,
|
||||
};
|
||||
use reth_revm::db::BundleState;
|
||||
use reth_storage_api::StateProviderFactory;
|
||||
use reth_trie::{test_utils::storage_root_prehashed, HashedStorage};
|
||||
use reth_trie_common::HashedPostState;
|
||||
|
||||
#[test]
|
||||
fn l2tol1_message_passer_no_withdrawals() {
|
||||
let hashed_address = keccak256(L2_TO_L1_MESSAGE_PASSER_ADDRESS);
|
||||
|
||||
// create account storage
|
||||
let init_storage = HashedStorage::from_iter(
|
||||
false,
|
||||
[
|
||||
"50000000000000000000000000000004253371b55351a08cb3267d4d265530b6",
|
||||
"512428ed685fff57294d1a9cbb147b18ae5db9cf6ae4b312fa1946ba0561882e",
|
||||
"51e6784c736ef8548f856909870b38e49ef7a4e3e77e5e945e0d5e6fcaa3037f",
|
||||
]
|
||||
.into_iter()
|
||||
.map(|str| (B256::from_str(str).unwrap(), U256::from(1))),
|
||||
);
|
||||
let mut state = HashedPostState::default();
|
||||
state.storages.insert(hashed_address, init_storage.clone());
|
||||
|
||||
// init test db
|
||||
// note: must be empty (default) chain spec to ensure storage is empty after init genesis,
|
||||
// otherwise can't use `storage_root_prehashed` to determine storage root later
|
||||
let provider_factory = create_test_provider_factory_with_node_types::<OpNode>(Arc::new(
|
||||
OpChainSpecBuilder::default().chain(Chain::dev()).genesis(Default::default()).build(),
|
||||
));
|
||||
let _ = init_genesis(&provider_factory).unwrap();
|
||||
|
||||
// write account storage to database
|
||||
let provider_rw = provider_factory.provider_rw().unwrap();
|
||||
provider_rw.write_hashed_state(&state.clone().into_sorted()).unwrap();
|
||||
provider_rw.commit().unwrap();
|
||||
|
||||
// create block header with withdrawals root set to storage root of l2tol1-msg-passer
|
||||
let header = Header {
|
||||
withdrawals_root: Some(storage_root_prehashed(init_storage.storage)),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
// create state provider factory
|
||||
let state_provider_factory = BlockchainProvider::new(provider_factory).unwrap();
|
||||
|
||||
// validate block against existing state by passing empty state updates
|
||||
verify_withdrawals_root(
|
||||
&BundleState::default(),
|
||||
state_provider_factory.latest().expect("load state"),
|
||||
&header,
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
}
|
||||
@@ -1,588 +0,0 @@
|
||||
//! Verification of blocks w.r.t. Optimism hardforks.
|
||||
|
||||
pub mod canyon;
|
||||
pub mod isthmus;
|
||||
|
||||
// Re-export the decode_holocene_base_fee function for compatibility
|
||||
use reth_execution_types::BlockExecutionResult;
|
||||
pub use reth_optimism_chainspec::decode_holocene_base_fee;
|
||||
|
||||
use crate::proof::calculate_receipt_root_optimism;
|
||||
use alloc::vec::Vec;
|
||||
use alloy_consensus::{BlockHeader, TxReceipt, EMPTY_OMMER_ROOT_HASH};
|
||||
use alloy_eips::Encodable2718;
|
||||
use alloy_primitives::{Bloom, Bytes, B256};
|
||||
use alloy_trie::EMPTY_ROOT_HASH;
|
||||
use reth_consensus::ConsensusError;
|
||||
use reth_optimism_forks::OpHardforks;
|
||||
use reth_optimism_primitives::DepositReceipt;
|
||||
use reth_primitives_traits::{receipt::gas_spent_by_transactions, BlockBody, GotExpected};
|
||||
|
||||
/// Ensures the block response data matches the header.
|
||||
///
|
||||
/// This ensures the body response items match the header's hashes:
|
||||
/// - ommer hash
|
||||
/// - transaction root
|
||||
/// - withdrawals root: the body's withdrawals root must only match the header's before isthmus
|
||||
pub fn validate_body_against_header_op<B, H>(
|
||||
chain_spec: impl OpHardforks,
|
||||
body: &B,
|
||||
header: &H,
|
||||
) -> Result<(), ConsensusError>
|
||||
where
|
||||
B: BlockBody,
|
||||
H: reth_primitives_traits::BlockHeader,
|
||||
{
|
||||
let ommers_hash = body.calculate_ommers_root();
|
||||
if Some(header.ommers_hash()) != ommers_hash {
|
||||
return Err(ConsensusError::BodyOmmersHashDiff(
|
||||
GotExpected {
|
||||
got: ommers_hash.unwrap_or(EMPTY_OMMER_ROOT_HASH),
|
||||
expected: header.ommers_hash(),
|
||||
}
|
||||
.into(),
|
||||
))
|
||||
}
|
||||
|
||||
let tx_root = body.calculate_tx_root();
|
||||
if header.transactions_root() != tx_root {
|
||||
return Err(ConsensusError::BodyTransactionRootDiff(
|
||||
GotExpected { got: tx_root, expected: header.transactions_root() }.into(),
|
||||
))
|
||||
}
|
||||
|
||||
match (header.withdrawals_root(), body.calculate_withdrawals_root()) {
|
||||
(Some(header_withdrawals_root), Some(withdrawals_root)) => {
|
||||
// after isthmus, the withdrawals root field is repurposed and no longer mirrors the
|
||||
// withdrawals root computed from the body
|
||||
if chain_spec.is_isthmus_active_at_timestamp(header.timestamp()) {
|
||||
// After isthmus we only ensure that the body has empty withdrawals
|
||||
if withdrawals_root != EMPTY_ROOT_HASH {
|
||||
return Err(ConsensusError::BodyWithdrawalsRootDiff(
|
||||
GotExpected { got: withdrawals_root, expected: EMPTY_ROOT_HASH }.into(),
|
||||
))
|
||||
}
|
||||
} else {
|
||||
// before isthmus we ensure that the header root matches the body
|
||||
if withdrawals_root != header_withdrawals_root {
|
||||
return Err(ConsensusError::BodyWithdrawalsRootDiff(
|
||||
GotExpected { got: withdrawals_root, expected: header_withdrawals_root }
|
||||
.into(),
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
(None, None) => {
|
||||
// this is ok because we assume the fork is not active in this case
|
||||
}
|
||||
_ => return Err(ConsensusError::WithdrawalsRootUnexpected),
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Validate a block with regard to execution results:
|
||||
///
|
||||
/// - Compares the receipts root in the block header to the block body
|
||||
/// - Compares the gas used in the block header to the actual gas usage after execution
|
||||
///
|
||||
/// If `receipt_root_bloom` is provided, the pre-computed receipt root and logs bloom are used
|
||||
/// instead of computing them from the receipts.
|
||||
pub fn validate_block_post_execution<R: DepositReceipt>(
|
||||
header: impl BlockHeader,
|
||||
chain_spec: impl OpHardforks,
|
||||
result: &BlockExecutionResult<R>,
|
||||
receipt_root_bloom: Option<(B256, Bloom)>,
|
||||
) -> Result<(), ConsensusError> {
|
||||
// Validate that the blob gas used is present and correctly computed if Jovian is active.
|
||||
if chain_spec.is_jovian_active_at_timestamp(header.timestamp()) {
|
||||
let computed_blob_gas_used = result.blob_gas_used;
|
||||
let header_blob_gas_used =
|
||||
header.blob_gas_used().ok_or(ConsensusError::BlobGasUsedMissing)?;
|
||||
|
||||
if computed_blob_gas_used != header_blob_gas_used {
|
||||
return Err(ConsensusError::BlobGasUsedDiff(GotExpected {
|
||||
got: computed_blob_gas_used,
|
||||
expected: header_blob_gas_used,
|
||||
}));
|
||||
}
|
||||
}
|
||||
|
||||
let receipts = &result.receipts;
|
||||
|
||||
// Before Byzantium, receipts contained state root that would mean that expensive
|
||||
// operation as hashing that is required for state root got calculated in every
|
||||
// transaction This was replaced with is_success flag.
|
||||
// See more about EIP here: https://eips.ethereum.org/EIPS/eip-658
|
||||
if chain_spec.is_byzantium_active_at_block(header.number()) {
|
||||
let result = if let Some((receipts_root, logs_bloom)) = receipt_root_bloom {
|
||||
compare_receipts_root_and_logs_bloom(
|
||||
receipts_root,
|
||||
logs_bloom,
|
||||
header.receipts_root(),
|
||||
header.logs_bloom(),
|
||||
)
|
||||
} else {
|
||||
verify_receipts_optimism(
|
||||
header.receipts_root(),
|
||||
header.logs_bloom(),
|
||||
receipts,
|
||||
chain_spec,
|
||||
header.timestamp(),
|
||||
)
|
||||
};
|
||||
|
||||
if let Err(error) = result {
|
||||
let receipts = receipts
|
||||
.iter()
|
||||
.map(|r| Bytes::from(r.with_bloom_ref().encoded_2718()))
|
||||
.collect::<Vec<_>>();
|
||||
tracing::debug!(%error, ?receipts, "receipts verification failed");
|
||||
return Err(error)
|
||||
}
|
||||
}
|
||||
|
||||
// Check if gas used matches the value set in header.
|
||||
let cumulative_gas_used =
|
||||
receipts.last().map(|receipt| receipt.cumulative_gas_used()).unwrap_or(0);
|
||||
if header.gas_used() != cumulative_gas_used {
|
||||
return Err(ConsensusError::BlockGasUsed {
|
||||
gas: GotExpected { got: cumulative_gas_used, expected: header.gas_used() },
|
||||
gas_spent_by_tx: gas_spent_by_transactions(receipts),
|
||||
})
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Verify the calculated receipts root against the expected receipts root.
|
||||
fn verify_receipts_optimism<R: DepositReceipt>(
|
||||
expected_receipts_root: B256,
|
||||
expected_logs_bloom: Bloom,
|
||||
receipts: &[R],
|
||||
chain_spec: impl OpHardforks,
|
||||
timestamp: u64,
|
||||
) -> Result<(), ConsensusError> {
|
||||
// Calculate receipts root.
|
||||
let receipts_with_bloom = receipts.iter().map(TxReceipt::with_bloom_ref).collect::<Vec<_>>();
|
||||
let receipts_root =
|
||||
calculate_receipt_root_optimism(&receipts_with_bloom, chain_spec, timestamp);
|
||||
|
||||
// Calculate header logs bloom.
|
||||
let logs_bloom = receipts_with_bloom.iter().fold(Bloom::ZERO, |bloom, r| bloom | r.bloom_ref());
|
||||
|
||||
compare_receipts_root_and_logs_bloom(
|
||||
receipts_root,
|
||||
logs_bloom,
|
||||
expected_receipts_root,
|
||||
expected_logs_bloom,
|
||||
)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Compare the calculated receipts root with the expected receipts root, also compare
|
||||
/// the calculated logs bloom with the expected logs bloom.
|
||||
fn compare_receipts_root_and_logs_bloom(
|
||||
calculated_receipts_root: B256,
|
||||
calculated_logs_bloom: Bloom,
|
||||
expected_receipts_root: B256,
|
||||
expected_logs_bloom: Bloom,
|
||||
) -> Result<(), ConsensusError> {
|
||||
if calculated_receipts_root != expected_receipts_root {
|
||||
return Err(ConsensusError::BodyReceiptRootDiff(
|
||||
GotExpected { got: calculated_receipts_root, expected: expected_receipts_root }.into(),
|
||||
))
|
||||
}
|
||||
|
||||
if calculated_logs_bloom != expected_logs_bloom {
|
||||
return Err(ConsensusError::BodyBloomLogDiff(
|
||||
GotExpected { got: calculated_logs_bloom, expected: expected_logs_bloom }.into(),
|
||||
))
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use alloy_consensus::Header;
|
||||
use alloy_eips::eip7685::Requests;
|
||||
use alloy_primitives::{b256, hex, Bytes, U256};
|
||||
use op_alloy_consensus::OpTxEnvelope;
|
||||
use reth_chainspec::{BaseFeeParams, ChainSpec, EthChainSpec, ForkCondition, Hardfork};
|
||||
use reth_optimism_chainspec::{OpChainSpec, BASE_SEPOLIA};
|
||||
use reth_optimism_forks::{OpHardfork, BASE_SEPOLIA_HARDFORKS};
|
||||
use reth_optimism_primitives::OpReceipt;
|
||||
use std::sync::Arc;
|
||||
|
||||
const HOLOCENE_TIMESTAMP: u64 = 1700000000;
|
||||
const ISTHMUS_TIMESTAMP: u64 = 1750000000;
|
||||
const JOVIAN_TIMESTAMP: u64 = 1800000000;
|
||||
const BLOCK_TIME_SECONDS: u64 = 2;
|
||||
|
||||
fn holocene_chainspec() -> Arc<OpChainSpec> {
|
||||
let mut hardforks = BASE_SEPOLIA_HARDFORKS.clone();
|
||||
hardforks
|
||||
.insert(OpHardfork::Holocene.boxed(), ForkCondition::Timestamp(HOLOCENE_TIMESTAMP));
|
||||
Arc::new(OpChainSpec {
|
||||
inner: ChainSpec {
|
||||
chain: BASE_SEPOLIA.inner.chain,
|
||||
genesis: BASE_SEPOLIA.inner.genesis.clone(),
|
||||
genesis_header: BASE_SEPOLIA.inner.genesis_header.clone(),
|
||||
paris_block_and_final_difficulty: Some((0, U256::from(0))),
|
||||
hardforks,
|
||||
base_fee_params: BASE_SEPOLIA.inner.base_fee_params.clone(),
|
||||
prune_delete_limit: 10000,
|
||||
..Default::default()
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
fn isthmus_chainspec() -> OpChainSpec {
|
||||
let mut chainspec = BASE_SEPOLIA.as_ref().clone();
|
||||
chainspec
|
||||
.inner
|
||||
.hardforks
|
||||
.insert(OpHardfork::Isthmus.boxed(), ForkCondition::Timestamp(ISTHMUS_TIMESTAMP));
|
||||
chainspec
|
||||
}
|
||||
|
||||
fn jovian_chainspec() -> OpChainSpec {
|
||||
let mut chainspec = BASE_SEPOLIA.as_ref().clone();
|
||||
chainspec
|
||||
.inner
|
||||
.hardforks
|
||||
.insert(OpHardfork::Jovian.boxed(), ForkCondition::Timestamp(JOVIAN_TIMESTAMP));
|
||||
chainspec
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_base_fee_pre_holocene() {
|
||||
let op_chain_spec = BASE_SEPOLIA.clone();
|
||||
let parent = Header {
|
||||
base_fee_per_gas: Some(1),
|
||||
gas_used: 15763614,
|
||||
gas_limit: 144000000,
|
||||
..Default::default()
|
||||
};
|
||||
let base_fee =
|
||||
reth_optimism_chainspec::OpChainSpec::next_block_base_fee(&op_chain_spec, &parent, 0);
|
||||
assert_eq!(
|
||||
base_fee.unwrap(),
|
||||
op_chain_spec.next_block_base_fee(&parent, 0).unwrap_or_default()
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_base_fee_holocene_extra_data_not_set() {
|
||||
let op_chain_spec = holocene_chainspec();
|
||||
let parent = Header {
|
||||
base_fee_per_gas: Some(1),
|
||||
gas_used: 15763614,
|
||||
gas_limit: 144000000,
|
||||
timestamp: HOLOCENE_TIMESTAMP + 3,
|
||||
extra_data: Bytes::from_static(&[0, 0, 0, 0, 0, 0, 0, 0, 0]),
|
||||
..Default::default()
|
||||
};
|
||||
let base_fee = reth_optimism_chainspec::OpChainSpec::next_block_base_fee(
|
||||
&op_chain_spec,
|
||||
&parent,
|
||||
HOLOCENE_TIMESTAMP + 5,
|
||||
);
|
||||
assert_eq!(
|
||||
base_fee.unwrap(),
|
||||
op_chain_spec.next_block_base_fee(&parent, 0).unwrap_or_default()
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_base_fee_holocene_extra_data_set() {
|
||||
let parent = Header {
|
||||
base_fee_per_gas: Some(1),
|
||||
gas_used: 15763614,
|
||||
gas_limit: 144000000,
|
||||
extra_data: Bytes::from_static(&[0, 0, 0, 0, 8, 0, 0, 0, 8]),
|
||||
timestamp: HOLOCENE_TIMESTAMP + 3,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let base_fee = reth_optimism_chainspec::OpChainSpec::next_block_base_fee(
|
||||
&holocene_chainspec(),
|
||||
&parent,
|
||||
HOLOCENE_TIMESTAMP + 5,
|
||||
);
|
||||
assert_eq!(
|
||||
base_fee.unwrap(),
|
||||
parent
|
||||
.next_block_base_fee(BaseFeeParams::new(0x00000008, 0x00000008))
|
||||
.unwrap_or_default()
|
||||
);
|
||||
}
|
||||
|
||||
// <https://sepolia.basescan.org/block/19773628>
|
||||
#[test]
|
||||
fn test_get_base_fee_holocene_extra_data_set_base_sepolia() {
|
||||
let parent = Header {
|
||||
base_fee_per_gas: Some(507),
|
||||
gas_used: 4847634,
|
||||
gas_limit: 60000000,
|
||||
extra_data: hex!("00000000fa0000000a").into(),
|
||||
timestamp: 1735315544,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let base_fee = reth_optimism_chainspec::OpChainSpec::next_block_base_fee(
|
||||
&*BASE_SEPOLIA,
|
||||
&parent,
|
||||
1735315546,
|
||||
)
|
||||
.unwrap();
|
||||
assert_eq!(base_fee, 507);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_base_fee_holocene_extra_data_set_and_min_base_fee_set() {
|
||||
const MIN_BASE_FEE: u64 = 10;
|
||||
|
||||
let mut extra_data = Vec::new();
|
||||
// eip1559 params
|
||||
extra_data.append(&mut hex!("00000000fa0000000a").to_vec());
|
||||
// min base fee
|
||||
extra_data.append(&mut MIN_BASE_FEE.to_be_bytes().to_vec());
|
||||
let extra_data = Bytes::from(extra_data);
|
||||
|
||||
let parent = Header {
|
||||
base_fee_per_gas: Some(507),
|
||||
gas_used: 4847634,
|
||||
gas_limit: 60000000,
|
||||
extra_data,
|
||||
timestamp: 1735315544,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let base_fee = reth_optimism_chainspec::OpChainSpec::next_block_base_fee(
|
||||
&*BASE_SEPOLIA,
|
||||
&parent,
|
||||
1735315546,
|
||||
);
|
||||
assert_eq!(base_fee, None);
|
||||
}
|
||||
|
||||
/// The version byte for Jovian is 1.
|
||||
const JOVIAN_EXTRA_DATA_VERSION_BYTE: u8 = 1;
|
||||
|
||||
#[test]
|
||||
fn test_get_base_fee_jovian_extra_data_and_min_base_fee_not_set() {
|
||||
let op_chain_spec = jovian_chainspec();
|
||||
|
||||
let mut extra_data = Vec::new();
|
||||
extra_data.push(JOVIAN_EXTRA_DATA_VERSION_BYTE);
|
||||
// eip1559 params
|
||||
extra_data.append(&mut [0_u8; 8].to_vec());
|
||||
let extra_data = Bytes::from(extra_data);
|
||||
|
||||
let parent = Header {
|
||||
base_fee_per_gas: Some(1),
|
||||
gas_used: 15763614,
|
||||
gas_limit: 144000000,
|
||||
timestamp: JOVIAN_TIMESTAMP,
|
||||
extra_data,
|
||||
..Default::default()
|
||||
};
|
||||
let base_fee = reth_optimism_chainspec::OpChainSpec::next_block_base_fee(
|
||||
&op_chain_spec,
|
||||
&parent,
|
||||
JOVIAN_TIMESTAMP + BLOCK_TIME_SECONDS,
|
||||
);
|
||||
assert_eq!(base_fee, None);
|
||||
}
|
||||
|
||||
/// After Jovian, the next block base fee cannot be less than the minimum base fee.
|
||||
#[test]
|
||||
fn test_get_base_fee_jovian_default_extra_data_and_min_base_fee() {
|
||||
const CURR_BASE_FEE: u64 = 1;
|
||||
const MIN_BASE_FEE: u64 = 10;
|
||||
|
||||
let mut extra_data = Vec::new();
|
||||
extra_data.push(JOVIAN_EXTRA_DATA_VERSION_BYTE);
|
||||
// eip1559 params
|
||||
extra_data.append(&mut [0_u8; 8].to_vec());
|
||||
// min base fee
|
||||
extra_data.append(&mut MIN_BASE_FEE.to_be_bytes().to_vec());
|
||||
let extra_data = Bytes::from(extra_data);
|
||||
|
||||
let op_chain_spec = jovian_chainspec();
|
||||
let parent = Header {
|
||||
base_fee_per_gas: Some(CURR_BASE_FEE),
|
||||
gas_used: 15763614,
|
||||
gas_limit: 144000000,
|
||||
timestamp: JOVIAN_TIMESTAMP,
|
||||
extra_data,
|
||||
..Default::default()
|
||||
};
|
||||
let base_fee = reth_optimism_chainspec::OpChainSpec::next_block_base_fee(
|
||||
&op_chain_spec,
|
||||
&parent,
|
||||
JOVIAN_TIMESTAMP + BLOCK_TIME_SECONDS,
|
||||
);
|
||||
assert_eq!(base_fee, Some(MIN_BASE_FEE));
|
||||
}
|
||||
|
||||
/// After Jovian, the next block base fee cannot be less than the minimum base fee.
|
||||
#[test]
|
||||
fn test_jovian_min_base_fee_cannot_decrease() {
|
||||
const MIN_BASE_FEE: u64 = 10;
|
||||
|
||||
let mut extra_data = Vec::new();
|
||||
extra_data.push(JOVIAN_EXTRA_DATA_VERSION_BYTE);
|
||||
// eip1559 params
|
||||
extra_data.append(&mut [0_u8; 8].to_vec());
|
||||
// min base fee
|
||||
extra_data.append(&mut MIN_BASE_FEE.to_be_bytes().to_vec());
|
||||
let extra_data = Bytes::from(extra_data);
|
||||
|
||||
let op_chain_spec = jovian_chainspec();
|
||||
|
||||
// If we're currently at the minimum base fee, the next block base fee cannot decrease.
|
||||
let parent = Header {
|
||||
base_fee_per_gas: Some(MIN_BASE_FEE),
|
||||
gas_used: 10,
|
||||
gas_limit: 144000000,
|
||||
timestamp: JOVIAN_TIMESTAMP,
|
||||
extra_data: extra_data.clone(),
|
||||
..Default::default()
|
||||
};
|
||||
let base_fee = reth_optimism_chainspec::OpChainSpec::next_block_base_fee(
|
||||
&op_chain_spec,
|
||||
&parent,
|
||||
JOVIAN_TIMESTAMP + BLOCK_TIME_SECONDS,
|
||||
);
|
||||
assert_eq!(base_fee, Some(MIN_BASE_FEE));
|
||||
|
||||
// The next block can increase the base fee
|
||||
let parent = Header {
|
||||
base_fee_per_gas: Some(MIN_BASE_FEE),
|
||||
gas_used: 144000000,
|
||||
gas_limit: 144000000,
|
||||
timestamp: JOVIAN_TIMESTAMP,
|
||||
extra_data,
|
||||
..Default::default()
|
||||
};
|
||||
let base_fee = reth_optimism_chainspec::OpChainSpec::next_block_base_fee(
|
||||
&op_chain_spec,
|
||||
&parent,
|
||||
JOVIAN_TIMESTAMP + 2 * BLOCK_TIME_SECONDS,
|
||||
);
|
||||
assert_eq!(base_fee, Some(MIN_BASE_FEE + 1));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_jovian_base_fee_can_decrease_if_above_min_base_fee() {
|
||||
const MIN_BASE_FEE: u64 = 10;
|
||||
|
||||
let mut extra_data = Vec::new();
|
||||
extra_data.push(JOVIAN_EXTRA_DATA_VERSION_BYTE);
|
||||
// eip1559 params
|
||||
extra_data.append(&mut [0_u8; 8].to_vec());
|
||||
// min base fee
|
||||
extra_data.append(&mut MIN_BASE_FEE.to_be_bytes().to_vec());
|
||||
let extra_data = Bytes::from(extra_data);
|
||||
|
||||
let op_chain_spec = jovian_chainspec();
|
||||
|
||||
let parent = Header {
|
||||
base_fee_per_gas: Some(100 * MIN_BASE_FEE),
|
||||
gas_used: 10,
|
||||
gas_limit: 144000000,
|
||||
timestamp: JOVIAN_TIMESTAMP,
|
||||
extra_data,
|
||||
..Default::default()
|
||||
};
|
||||
let base_fee = reth_optimism_chainspec::OpChainSpec::next_block_base_fee(
|
||||
&op_chain_spec,
|
||||
&parent,
|
||||
JOVIAN_TIMESTAMP + BLOCK_TIME_SECONDS,
|
||||
)
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
base_fee,
|
||||
op_chain_spec
|
||||
.inner
|
||||
.next_block_base_fee(&parent, JOVIAN_TIMESTAMP + BLOCK_TIME_SECONDS)
|
||||
.unwrap()
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn body_against_header_isthmus() {
|
||||
let chainspec = isthmus_chainspec();
|
||||
let header = Header {
|
||||
base_fee_per_gas: Some(507),
|
||||
gas_used: 4847634,
|
||||
gas_limit: 60000000,
|
||||
extra_data: hex!("00000000fa0000000a").into(),
|
||||
timestamp: 1800000000,
|
||||
withdrawals_root: Some(b256!(
|
||||
"0x611e1d75cbb77fa782d79485a8384e853bc92e56883c313a51e3f9feef9a9a71"
|
||||
)),
|
||||
..Default::default()
|
||||
};
|
||||
let mut body = alloy_consensus::BlockBody::<OpTxEnvelope> {
|
||||
transactions: vec![],
|
||||
ommers: vec![],
|
||||
withdrawals: Some(Default::default()),
|
||||
};
|
||||
validate_body_against_header_op(&chainspec, &body, &header).unwrap();
|
||||
|
||||
body.withdrawals.take();
|
||||
validate_body_against_header_op(&chainspec, &body, &header).unwrap_err();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_jovian_blob_gas_used_validation() {
|
||||
const BLOB_GAS_USED: u64 = 1000;
|
||||
const GAS_USED: u64 = 5000;
|
||||
|
||||
let chainspec = jovian_chainspec();
|
||||
let header = Header {
|
||||
timestamp: JOVIAN_TIMESTAMP,
|
||||
blob_gas_used: Some(BLOB_GAS_USED),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let result = BlockExecutionResult::<OpReceipt> {
|
||||
blob_gas_used: BLOB_GAS_USED,
|
||||
receipts: vec![],
|
||||
requests: Requests::default(),
|
||||
gas_used: GAS_USED,
|
||||
};
|
||||
validate_block_post_execution(&header, &chainspec, &result, None).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_jovian_blob_gas_used_validation_mismatched() {
|
||||
const BLOB_GAS_USED: u64 = 1000;
|
||||
const GAS_USED: u64 = 5000;
|
||||
|
||||
let chainspec = jovian_chainspec();
|
||||
let header = Header {
|
||||
timestamp: JOVIAN_TIMESTAMP,
|
||||
blob_gas_used: Some(BLOB_GAS_USED + 1),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let result = BlockExecutionResult::<OpReceipt> {
|
||||
blob_gas_used: BLOB_GAS_USED,
|
||||
receipts: vec![],
|
||||
requests: Requests::default(),
|
||||
gas_used: GAS_USED,
|
||||
};
|
||||
assert!(matches!(
|
||||
validate_block_post_execution(&header, &chainspec, &result, None).unwrap_err(),
|
||||
ConsensusError::BlobGasUsedDiff(diff)
|
||||
if diff.got == BLOB_GAS_USED && diff.expected == BLOB_GAS_USED + 1
|
||||
));
|
||||
}
|
||||
}
|
||||
@@ -1,83 +0,0 @@
|
||||
[package]
|
||||
name = "reth-optimism-evm"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
rust-version.workspace = true
|
||||
license.workspace = true
|
||||
homepage.workspace = true
|
||||
repository.workspace = true
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
# Reth
|
||||
reth-chainspec.workspace = true
|
||||
reth-evm = { workspace = true, features = ["op"] }
|
||||
reth-primitives-traits.workspace = true
|
||||
reth-execution-errors.workspace = true
|
||||
reth-execution-types.workspace = true
|
||||
reth-storage-errors.workspace = true
|
||||
|
||||
reth-rpc-eth-api = { workspace = true, optional = true }
|
||||
|
||||
# ethereum
|
||||
alloy-eips.workspace = true
|
||||
alloy-evm.workspace = true
|
||||
alloy-primitives.workspace = true
|
||||
alloy-op-evm.workspace = true
|
||||
op-alloy-consensus.workspace = true
|
||||
op-alloy-rpc-types-engine.workspace = true
|
||||
alloy-consensus.workspace = true
|
||||
|
||||
# Optimism
|
||||
reth-optimism-chainspec.workspace = true
|
||||
reth-optimism-consensus.workspace = true
|
||||
reth-optimism-forks.workspace = true
|
||||
reth-optimism-primitives.workspace = true
|
||||
|
||||
# revm
|
||||
revm.workspace = true
|
||||
op-revm.workspace = true
|
||||
|
||||
# misc
|
||||
thiserror.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
reth-evm = { workspace = true, features = ["test-utils"] }
|
||||
reth-revm = { workspace = true, features = ["test-utils"] }
|
||||
alloy-genesis.workspace = true
|
||||
reth-optimism-primitives = { workspace = true, features = ["arbitrary"] }
|
||||
|
||||
[features]
|
||||
default = ["std"]
|
||||
std = [
|
||||
"reth-revm/std",
|
||||
"alloy-consensus/std",
|
||||
"alloy-eips/std",
|
||||
"alloy-genesis/std",
|
||||
"alloy-primitives/std",
|
||||
"reth-primitives-traits/std",
|
||||
"revm/std",
|
||||
"reth-optimism-primitives/std",
|
||||
"reth-optimism-forks/std",
|
||||
"thiserror/std",
|
||||
"op-alloy-consensus/std",
|
||||
"reth-chainspec/std",
|
||||
"reth-optimism-consensus/std",
|
||||
"reth-optimism-chainspec/std",
|
||||
"reth-execution-errors/std",
|
||||
"reth-execution-types/std",
|
||||
"alloy-evm/std",
|
||||
"alloy-op-evm/std",
|
||||
"op-revm/std",
|
||||
"reth-evm/std",
|
||||
"op-alloy-rpc-types-engine/std",
|
||||
"reth-storage-errors/std",
|
||||
]
|
||||
portable = [
|
||||
"reth-revm/portable",
|
||||
"op-revm/portable",
|
||||
"revm/portable",
|
||||
]
|
||||
rpc = ["reth-rpc-eth-api", "reth-optimism-primitives/serde", "reth-optimism-primitives/reth-codec", "alloy-evm/rpc"]
|
||||
@@ -1,154 +0,0 @@
|
||||
use alloc::sync::Arc;
|
||||
use alloy_consensus::{
|
||||
constants::EMPTY_WITHDRAWALS, proofs, Block, BlockBody, Header, TxReceipt,
|
||||
EMPTY_OMMER_ROOT_HASH,
|
||||
};
|
||||
use alloy_eips::{eip7685::EMPTY_REQUESTS_HASH, merge::BEACON_NONCE};
|
||||
use alloy_evm::block::BlockExecutorFactory;
|
||||
use alloy_op_evm::OpBlockExecutionCtx;
|
||||
use alloy_primitives::logs_bloom;
|
||||
use reth_evm::execute::{BlockAssembler, BlockAssemblerInput};
|
||||
use reth_execution_errors::BlockExecutionError;
|
||||
use reth_execution_types::BlockExecutionResult;
|
||||
use reth_optimism_consensus::{calculate_receipt_root_no_memo_optimism, isthmus};
|
||||
use reth_optimism_forks::OpHardforks;
|
||||
use reth_optimism_primitives::DepositReceipt;
|
||||
use reth_primitives_traits::{Receipt, SignedTransaction};
|
||||
use revm::context::Block as _;
|
||||
|
||||
/// Block builder for Optimism.
|
||||
#[derive(Debug)]
|
||||
pub struct OpBlockAssembler<ChainSpec> {
|
||||
chain_spec: Arc<ChainSpec>,
|
||||
}
|
||||
|
||||
impl<ChainSpec> OpBlockAssembler<ChainSpec> {
|
||||
/// Creates a new [`OpBlockAssembler`].
|
||||
pub const fn new(chain_spec: Arc<ChainSpec>) -> Self {
|
||||
Self { chain_spec }
|
||||
}
|
||||
}
|
||||
|
||||
impl<ChainSpec: OpHardforks> OpBlockAssembler<ChainSpec> {
|
||||
/// Builds a block for `input` without any bounds on header `H`.
|
||||
pub fn assemble_block<
|
||||
F: for<'a> BlockExecutorFactory<
|
||||
ExecutionCtx<'a>: Into<OpBlockExecutionCtx>,
|
||||
Transaction: SignedTransaction,
|
||||
Receipt: Receipt + DepositReceipt,
|
||||
>,
|
||||
H,
|
||||
>(
|
||||
&self,
|
||||
input: BlockAssemblerInput<'_, '_, F, H>,
|
||||
) -> Result<Block<F::Transaction>, BlockExecutionError> {
|
||||
let BlockAssemblerInput {
|
||||
evm_env,
|
||||
execution_ctx: ctx,
|
||||
transactions,
|
||||
output: BlockExecutionResult { receipts, gas_used, blob_gas_used, requests: _ },
|
||||
bundle_state,
|
||||
state_root,
|
||||
state_provider,
|
||||
..
|
||||
} = input;
|
||||
let ctx = ctx.into();
|
||||
|
||||
let timestamp = evm_env.block_env.timestamp().saturating_to();
|
||||
|
||||
let transactions_root = proofs::calculate_transaction_root(&transactions);
|
||||
let receipts_root =
|
||||
calculate_receipt_root_no_memo_optimism(receipts, &self.chain_spec, timestamp);
|
||||
let logs_bloom = logs_bloom(receipts.iter().flat_map(|r| r.logs()));
|
||||
|
||||
let mut requests_hash = None;
|
||||
|
||||
let withdrawals_root = if self.chain_spec.is_isthmus_active_at_timestamp(timestamp) {
|
||||
// always empty requests hash post isthmus
|
||||
requests_hash = Some(EMPTY_REQUESTS_HASH);
|
||||
|
||||
// withdrawals root field in block header is used for storage root of L2 predeploy
|
||||
// `l2tol1-message-passer`
|
||||
Some(
|
||||
isthmus::withdrawals_root(bundle_state, state_provider)
|
||||
.map_err(BlockExecutionError::other)?,
|
||||
)
|
||||
} else if self.chain_spec.is_canyon_active_at_timestamp(timestamp) {
|
||||
Some(EMPTY_WITHDRAWALS)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let (excess_blob_gas, blob_gas_used) =
|
||||
if self.chain_spec.is_jovian_active_at_timestamp(timestamp) {
|
||||
// In jovian, we're using the blob gas used field to store the current da
|
||||
// footprint's value.
|
||||
(Some(0), Some(*blob_gas_used))
|
||||
} else if self.chain_spec.is_ecotone_active_at_timestamp(timestamp) {
|
||||
(Some(0), Some(0))
|
||||
} else {
|
||||
(None, None)
|
||||
};
|
||||
|
||||
let header = Header {
|
||||
parent_hash: ctx.parent_hash,
|
||||
ommers_hash: EMPTY_OMMER_ROOT_HASH,
|
||||
beneficiary: evm_env.block_env.beneficiary(),
|
||||
state_root,
|
||||
transactions_root,
|
||||
receipts_root,
|
||||
withdrawals_root,
|
||||
logs_bloom,
|
||||
timestamp,
|
||||
mix_hash: evm_env.block_env.prevrandao().unwrap_or_default(),
|
||||
nonce: BEACON_NONCE.into(),
|
||||
base_fee_per_gas: Some(evm_env.block_env.basefee()),
|
||||
number: evm_env.block_env.number().saturating_to(),
|
||||
gas_limit: evm_env.block_env.gas_limit(),
|
||||
difficulty: evm_env.block_env.difficulty(),
|
||||
gas_used: *gas_used,
|
||||
extra_data: ctx.extra_data,
|
||||
parent_beacon_block_root: ctx.parent_beacon_block_root,
|
||||
blob_gas_used,
|
||||
excess_blob_gas,
|
||||
requests_hash,
|
||||
};
|
||||
|
||||
Ok(Block::new(
|
||||
header,
|
||||
BlockBody {
|
||||
transactions,
|
||||
ommers: Default::default(),
|
||||
withdrawals: self
|
||||
.chain_spec
|
||||
.is_canyon_active_at_timestamp(timestamp)
|
||||
.then(Default::default),
|
||||
},
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
impl<ChainSpec> Clone for OpBlockAssembler<ChainSpec> {
|
||||
fn clone(&self) -> Self {
|
||||
Self { chain_spec: self.chain_spec.clone() }
|
||||
}
|
||||
}
|
||||
|
||||
impl<F, ChainSpec> BlockAssembler<F> for OpBlockAssembler<ChainSpec>
|
||||
where
|
||||
ChainSpec: OpHardforks,
|
||||
F: for<'a> BlockExecutorFactory<
|
||||
ExecutionCtx<'a> = OpBlockExecutionCtx,
|
||||
Transaction: SignedTransaction,
|
||||
Receipt: Receipt + DepositReceipt,
|
||||
>,
|
||||
{
|
||||
type Block = Block<F::Transaction>;
|
||||
|
||||
fn assemble_block(
|
||||
&self,
|
||||
input: BlockAssemblerInput<'_, '_, F>,
|
||||
) -> Result<Self::Block, BlockExecutionError> {
|
||||
self.assemble_block(input)
|
||||
}
|
||||
}
|
||||
@@ -1,51 +0,0 @@
|
||||
pub use alloy_op_evm::{
|
||||
spec as revm_spec, spec_by_timestamp_after_bedrock as revm_spec_by_timestamp_after_bedrock,
|
||||
};
|
||||
use op_alloy_rpc_types_engine::OpFlashblockPayloadBase;
|
||||
use revm::primitives::{Address, Bytes, B256};
|
||||
|
||||
/// Context relevant for execution of a next block w.r.t OP.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct OpNextBlockEnvAttributes {
|
||||
/// The timestamp of the next block.
|
||||
pub timestamp: u64,
|
||||
/// The suggested fee recipient for the next block.
|
||||
pub suggested_fee_recipient: Address,
|
||||
/// The randomness value for the next block.
|
||||
pub prev_randao: B256,
|
||||
/// Block gas limit.
|
||||
pub gas_limit: u64,
|
||||
/// The parent beacon block root.
|
||||
pub parent_beacon_block_root: Option<B256>,
|
||||
/// Encoded EIP-1559 parameters to include into block's `extra_data` field.
|
||||
pub extra_data: Bytes,
|
||||
}
|
||||
|
||||
#[cfg(feature = "rpc")]
|
||||
impl<H: alloy_consensus::BlockHeader> reth_rpc_eth_api::helpers::pending_block::BuildPendingEnv<H>
|
||||
for OpNextBlockEnvAttributes
|
||||
{
|
||||
fn build_pending_env(parent: &crate::SealedHeader<H>) -> Self {
|
||||
Self {
|
||||
timestamp: parent.timestamp().saturating_add(12),
|
||||
suggested_fee_recipient: parent.beneficiary(),
|
||||
prev_randao: B256::random(),
|
||||
gas_limit: parent.gas_limit(),
|
||||
parent_beacon_block_root: parent.parent_beacon_block_root(),
|
||||
extra_data: parent.extra_data().clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<OpFlashblockPayloadBase> for OpNextBlockEnvAttributes {
|
||||
fn from(base: OpFlashblockPayloadBase) -> Self {
|
||||
Self {
|
||||
timestamp: base.timestamp,
|
||||
suggested_fee_recipient: base.fee_recipient,
|
||||
prev_randao: base.prev_randao,
|
||||
gas_limit: base.gas_limit,
|
||||
parent_beacon_block_root: Some(base.parent_beacon_block_root),
|
||||
extra_data: base.extra_data,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,67 +0,0 @@
|
||||
//! Error types for the Optimism EVM module.
|
||||
|
||||
use reth_evm::execute::BlockExecutionError;
|
||||
|
||||
/// L1 Block Info specific errors
|
||||
#[derive(Debug, Clone, PartialEq, Eq, thiserror::Error)]
|
||||
pub enum L1BlockInfoError {
|
||||
/// Could not find L1 block info transaction in the L2 block
|
||||
#[error("could not find l1 block info tx in the L2 block")]
|
||||
MissingTransaction,
|
||||
/// Invalid L1 block info transaction calldata
|
||||
#[error("invalid l1 block info transaction calldata in the L2 block")]
|
||||
InvalidCalldata,
|
||||
/// Unexpected L1 block info transaction calldata length
|
||||
#[error("unexpected l1 block info tx calldata length found")]
|
||||
UnexpectedCalldataLength,
|
||||
/// Base fee conversion error
|
||||
#[error("could not convert l1 base fee")]
|
||||
BaseFeeConversion,
|
||||
/// Fee overhead conversion error
|
||||
#[error("could not convert l1 fee overhead")]
|
||||
FeeOverheadConversion,
|
||||
/// Fee scalar conversion error
|
||||
#[error("could not convert l1 fee scalar")]
|
||||
FeeScalarConversion,
|
||||
/// Base Fee Scalar conversion error
|
||||
#[error("could not convert base fee scalar")]
|
||||
BaseFeeScalarConversion,
|
||||
/// Blob base fee conversion error
|
||||
#[error("could not convert l1 blob base fee")]
|
||||
BlobBaseFeeConversion,
|
||||
/// Blob base fee scalar conversion error
|
||||
#[error("could not convert l1 blob base fee scalar")]
|
||||
BlobBaseFeeScalarConversion,
|
||||
/// Operator fee scalar conversion error
|
||||
#[error("could not convert operator fee scalar")]
|
||||
OperatorFeeScalarConversion,
|
||||
/// Operator fee constant conversion error
|
||||
#[error("could not convert operator fee constant")]
|
||||
OperatorFeeConstantConversion,
|
||||
/// DA foootprint gas scalar constant conversion error
|
||||
#[error("could not convert DA footprint gas scalar constant")]
|
||||
DaFootprintGasScalarConversion,
|
||||
/// Optimism hardforks not active
|
||||
#[error("Optimism hardforks are not active")]
|
||||
HardforksNotActive,
|
||||
}
|
||||
|
||||
/// Optimism Block Executor Errors
|
||||
#[derive(Debug, Clone, PartialEq, Eq, thiserror::Error)]
|
||||
pub enum OpBlockExecutionError {
|
||||
/// Error when trying to parse L1 block info
|
||||
#[error(transparent)]
|
||||
L1BlockInfo(#[from] L1BlockInfoError),
|
||||
/// Thrown when force deploy of create2deployer code fails.
|
||||
#[error("failed to force create2deployer account code")]
|
||||
ForceCreate2DeployerFail,
|
||||
/// Thrown when a database account could not be loaded.
|
||||
#[error("failed to load account {_0}")]
|
||||
AccountLoadFailed(alloy_primitives::Address),
|
||||
}
|
||||
|
||||
impl From<OpBlockExecutionError> for BlockExecutionError {
|
||||
fn from(err: OpBlockExecutionError) -> Self {
|
||||
Self::other(err)
|
||||
}
|
||||
}
|
||||
@@ -1,198 +0,0 @@
|
||||
//! Optimism block execution strategy.
|
||||
|
||||
/// Helper type with backwards compatible methods to obtain executor providers.
|
||||
pub type OpExecutorProvider = crate::OpEvmConfig;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::{OpEvmConfig, OpRethReceiptBuilder};
|
||||
use alloc::sync::Arc;
|
||||
use alloy_consensus::{Block, BlockBody, Header, SignableTransaction, TxEip1559};
|
||||
use alloy_primitives::{b256, Address, Signature, StorageKey, StorageValue, U256};
|
||||
use op_alloy_consensus::TxDeposit;
|
||||
use op_revm::constants::L1_BLOCK_CONTRACT;
|
||||
use reth_chainspec::MIN_TRANSACTION_GAS;
|
||||
use reth_evm::execute::{BasicBlockExecutor, Executor};
|
||||
use reth_optimism_chainspec::{OpChainSpec, OpChainSpecBuilder};
|
||||
use reth_optimism_primitives::{OpReceipt, OpTransactionSigned};
|
||||
use reth_primitives_traits::{Account, RecoveredBlock};
|
||||
use reth_revm::{database::StateProviderDatabase, test_utils::StateProviderTest};
|
||||
use std::{collections::HashMap, str::FromStr};
|
||||
|
||||
fn create_op_state_provider() -> StateProviderTest {
|
||||
let mut db = StateProviderTest::default();
|
||||
|
||||
let l1_block_contract_account =
|
||||
Account { balance: U256::ZERO, bytecode_hash: None, nonce: 1 };
|
||||
|
||||
let mut l1_block_storage = HashMap::default();
|
||||
// base fee
|
||||
l1_block_storage.insert(StorageKey::with_last_byte(1), StorageValue::from(1000000000));
|
||||
// l1 fee overhead
|
||||
l1_block_storage.insert(StorageKey::with_last_byte(5), StorageValue::from(188));
|
||||
// l1 fee scalar
|
||||
l1_block_storage.insert(StorageKey::with_last_byte(6), StorageValue::from(684000));
|
||||
// l1 free scalars post ecotone
|
||||
l1_block_storage.insert(
|
||||
StorageKey::with_last_byte(3),
|
||||
StorageValue::from_str(
|
||||
"0x0000000000000000000000000000000000001db0000d27300000000000000005",
|
||||
)
|
||||
.unwrap(),
|
||||
);
|
||||
|
||||
db.insert_account(L1_BLOCK_CONTRACT, l1_block_contract_account, None, l1_block_storage);
|
||||
|
||||
db
|
||||
}
|
||||
|
||||
fn evm_config(chain_spec: Arc<OpChainSpec>) -> OpEvmConfig {
|
||||
OpEvmConfig::new(chain_spec, OpRethReceiptBuilder::default())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn op_deposit_fields_pre_canyon() {
|
||||
let header = Header {
|
||||
timestamp: 1,
|
||||
number: 1,
|
||||
gas_limit: 1_000_000,
|
||||
gas_used: 42_000,
|
||||
receipts_root: b256!(
|
||||
"0x83465d1e7d01578c0d609be33570f91242f013e9e295b0879905346abbd63731"
|
||||
),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let mut db = create_op_state_provider();
|
||||
|
||||
let addr = Address::ZERO;
|
||||
let account = Account { balance: U256::MAX, ..Account::default() };
|
||||
db.insert_account(addr, account, None, HashMap::default());
|
||||
|
||||
let chain_spec = Arc::new(OpChainSpecBuilder::base_mainnet().regolith_activated().build());
|
||||
|
||||
let tx: OpTransactionSigned = TxEip1559 {
|
||||
chain_id: chain_spec.chain.id(),
|
||||
nonce: 0,
|
||||
gas_limit: MIN_TRANSACTION_GAS,
|
||||
to: addr.into(),
|
||||
..Default::default()
|
||||
}
|
||||
.into_signed(Signature::test_signature())
|
||||
.into();
|
||||
|
||||
let tx_deposit: OpTransactionSigned = TxDeposit {
|
||||
from: addr,
|
||||
to: addr.into(),
|
||||
gas_limit: MIN_TRANSACTION_GAS,
|
||||
..Default::default()
|
||||
}
|
||||
.into();
|
||||
|
||||
let provider = evm_config(chain_spec);
|
||||
let mut executor = BasicBlockExecutor::new(provider, StateProviderDatabase::new(&db));
|
||||
|
||||
// make sure the L1 block contract state is preloaded.
|
||||
executor.with_state_mut(|state| {
|
||||
state.load_cache_account(L1_BLOCK_CONTRACT).unwrap();
|
||||
});
|
||||
|
||||
// Attempt to execute a block with one deposit and one non-deposit transaction
|
||||
let output = executor
|
||||
.execute(&RecoveredBlock::new_unhashed(
|
||||
Block {
|
||||
header,
|
||||
body: BlockBody { transactions: vec![tx, tx_deposit], ..Default::default() },
|
||||
},
|
||||
vec![addr, addr],
|
||||
))
|
||||
.unwrap();
|
||||
|
||||
let receipts = &output.receipts;
|
||||
let tx_receipt = &receipts[0];
|
||||
let deposit_receipt = &receipts[1];
|
||||
|
||||
assert!(!matches!(tx_receipt, OpReceipt::Deposit(_)));
|
||||
// deposit_nonce is present only in deposit transactions
|
||||
let OpReceipt::Deposit(deposit_receipt) = deposit_receipt else {
|
||||
panic!("expected deposit")
|
||||
};
|
||||
assert!(deposit_receipt.deposit_nonce.is_some());
|
||||
// deposit_receipt_version is not present in pre canyon transactions
|
||||
assert!(deposit_receipt.deposit_receipt_version.is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn op_deposit_fields_post_canyon() {
|
||||
// ensure_create2_deployer will fail if timestamp is set to less than 2
|
||||
let header = Header {
|
||||
timestamp: 2,
|
||||
number: 1,
|
||||
gas_limit: 1_000_000,
|
||||
gas_used: 42_000,
|
||||
receipts_root: b256!(
|
||||
"0xfffc85c4004fd03c7bfbe5491fae98a7473126c099ac11e8286fd0013f15f908"
|
||||
),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let mut db = create_op_state_provider();
|
||||
let addr = Address::ZERO;
|
||||
let account = Account { balance: U256::MAX, ..Account::default() };
|
||||
|
||||
db.insert_account(addr, account, None, HashMap::default());
|
||||
|
||||
let chain_spec = Arc::new(OpChainSpecBuilder::base_mainnet().canyon_activated().build());
|
||||
|
||||
let tx: OpTransactionSigned = TxEip1559 {
|
||||
chain_id: chain_spec.chain.id(),
|
||||
nonce: 0,
|
||||
gas_limit: MIN_TRANSACTION_GAS,
|
||||
to: addr.into(),
|
||||
..Default::default()
|
||||
}
|
||||
.into_signed(Signature::test_signature())
|
||||
.into();
|
||||
|
||||
let tx_deposit: OpTransactionSigned = TxDeposit {
|
||||
from: addr,
|
||||
to: addr.into(),
|
||||
gas_limit: MIN_TRANSACTION_GAS,
|
||||
..Default::default()
|
||||
}
|
||||
.into();
|
||||
|
||||
let provider = evm_config(chain_spec);
|
||||
let mut executor = BasicBlockExecutor::new(provider, StateProviderDatabase::new(&db));
|
||||
|
||||
// make sure the L1 block contract state is preloaded.
|
||||
executor.with_state_mut(|state| {
|
||||
state.load_cache_account(L1_BLOCK_CONTRACT).unwrap();
|
||||
});
|
||||
|
||||
// attempt to execute an empty block with parent beacon block root, this should not fail
|
||||
let output = executor
|
||||
.execute(&RecoveredBlock::new_unhashed(
|
||||
Block {
|
||||
header,
|
||||
body: BlockBody { transactions: vec![tx, tx_deposit], ..Default::default() },
|
||||
},
|
||||
vec![addr, addr],
|
||||
))
|
||||
.expect("Executing a block while canyon is active should not fail");
|
||||
|
||||
let receipts = &output.receipts;
|
||||
let tx_receipt = &receipts[0];
|
||||
let deposit_receipt = &receipts[1];
|
||||
|
||||
// deposit_receipt_version is set to 1 for post canyon deposit transactions
|
||||
assert!(!matches!(tx_receipt, OpReceipt::Deposit(_)));
|
||||
let OpReceipt::Deposit(deposit_receipt) = deposit_receipt else {
|
||||
panic!("expected deposit")
|
||||
};
|
||||
assert_eq!(deposit_receipt.deposit_receipt_version, Some(1));
|
||||
|
||||
// deposit_nonce is present only in deposit transactions
|
||||
assert!(deposit_receipt.deposit_nonce.is_some());
|
||||
}
|
||||
}
|
||||
@@ -1,519 +0,0 @@
|
||||
//! Optimism-specific implementation and utilities for the executor
|
||||
|
||||
use crate::{error::L1BlockInfoError, revm_spec_by_timestamp_after_bedrock, OpBlockExecutionError};
|
||||
use alloy_consensus::Transaction;
|
||||
use alloy_primitives::{hex, U16, U256};
|
||||
use op_revm::L1BlockInfo;
|
||||
use reth_execution_errors::BlockExecutionError;
|
||||
use reth_optimism_forks::OpHardforks;
|
||||
use reth_primitives_traits::BlockBody;
|
||||
|
||||
/// The function selector of the "setL1BlockValuesEcotone" function in the `L1Block` contract.
|
||||
const L1_BLOCK_ECOTONE_SELECTOR: [u8; 4] = hex!("440a5e20");
|
||||
|
||||
/// The function selector of the "setL1BlockValuesIsthmus" function in the `L1Block` contract.
|
||||
const L1_BLOCK_ISTHMUS_SELECTOR: [u8; 4] = hex!("098999be");
|
||||
|
||||
/// The function selector of the "setL1BlockValuesJovian" function in the `L1Block` contract.
|
||||
/// This is the first 4 bytes of `keccak256("setL1BlockValuesJovian()")`.
|
||||
const L1_BLOCK_JOVIAN_SELECTOR: [u8; 4] = hex!("3db6be2b");
|
||||
|
||||
/// Extracts the [`L1BlockInfo`] from the L2 block. The L1 info transaction is always the first
|
||||
/// transaction in the L2 block.
|
||||
///
|
||||
/// Returns an error if the L1 info transaction is not found, if the block is empty.
|
||||
pub fn extract_l1_info<B: BlockBody>(body: &B) -> Result<L1BlockInfo, OpBlockExecutionError> {
|
||||
let l1_info_tx = body
|
||||
.transactions()
|
||||
.first()
|
||||
.ok_or(OpBlockExecutionError::L1BlockInfo(L1BlockInfoError::MissingTransaction))?;
|
||||
extract_l1_info_from_tx(l1_info_tx)
|
||||
}
|
||||
|
||||
/// Extracts the [`L1BlockInfo`] from the L1 info transaction (first transaction) in the L2
|
||||
/// block.
|
||||
///
|
||||
/// Returns an error if the calldata is shorter than 4 bytes.
|
||||
pub fn extract_l1_info_from_tx<T: Transaction>(
|
||||
tx: &T,
|
||||
) -> Result<L1BlockInfo, OpBlockExecutionError> {
|
||||
let l1_info_tx_data = tx.input();
|
||||
if l1_info_tx_data.len() < 4 {
|
||||
return Err(OpBlockExecutionError::L1BlockInfo(L1BlockInfoError::InvalidCalldata));
|
||||
}
|
||||
|
||||
parse_l1_info(l1_info_tx_data)
|
||||
}
|
||||
|
||||
/// Parses the input of the first transaction in the L2 block, into [`L1BlockInfo`].
|
||||
///
|
||||
/// Returns an error if data is incorrect length.
|
||||
///
|
||||
/// Caution this expects that the input is the calldata of the [`L1BlockInfo`] transaction (first
|
||||
/// transaction) in the L2 block.
|
||||
///
|
||||
/// # Panics
|
||||
/// If the input is shorter than 4 bytes.
|
||||
pub fn parse_l1_info(input: &[u8]) -> Result<L1BlockInfo, OpBlockExecutionError> {
|
||||
// Parse the L1 info transaction into an L1BlockInfo struct, depending on the function selector.
|
||||
// There are currently 4 variants:
|
||||
// - Jovian
|
||||
// - Isthmus
|
||||
// - Ecotone
|
||||
// - Bedrock
|
||||
if input[0..4] == L1_BLOCK_JOVIAN_SELECTOR {
|
||||
parse_l1_info_tx_jovian(input[4..].as_ref())
|
||||
} else if input[0..4] == L1_BLOCK_ISTHMUS_SELECTOR {
|
||||
parse_l1_info_tx_isthmus(input[4..].as_ref())
|
||||
} else if input[0..4] == L1_BLOCK_ECOTONE_SELECTOR {
|
||||
parse_l1_info_tx_ecotone(input[4..].as_ref())
|
||||
} else {
|
||||
parse_l1_info_tx_bedrock(input[4..].as_ref())
|
||||
}
|
||||
}
|
||||
|
||||
/// Parses the calldata of the [`L1BlockInfo`] transaction pre-Ecotone hardfork.
|
||||
pub fn parse_l1_info_tx_bedrock(data: &[u8]) -> Result<L1BlockInfo, OpBlockExecutionError> {
|
||||
// The setL1BlockValues tx calldata must be exactly 260 bytes long, considering that
|
||||
// we already removed the first 4 bytes (the function selector). Detailed breakdown:
|
||||
// 32 bytes for the block number
|
||||
// + 32 bytes for the block timestamp
|
||||
// + 32 bytes for the base fee
|
||||
// + 32 bytes for the block hash
|
||||
// + 32 bytes for the block sequence number
|
||||
// + 32 bytes for the batcher hash
|
||||
// + 32 bytes for the fee overhead
|
||||
// + 32 bytes for the fee scalar
|
||||
if data.len() != 256 {
|
||||
return Err(OpBlockExecutionError::L1BlockInfo(L1BlockInfoError::UnexpectedCalldataLength));
|
||||
}
|
||||
|
||||
let l1_base_fee = U256::try_from_be_slice(&data[64..96])
|
||||
.ok_or(OpBlockExecutionError::L1BlockInfo(L1BlockInfoError::BaseFeeConversion))?;
|
||||
let l1_fee_overhead = U256::try_from_be_slice(&data[192..224])
|
||||
.ok_or(OpBlockExecutionError::L1BlockInfo(L1BlockInfoError::FeeOverheadConversion))?;
|
||||
let l1_fee_scalar = U256::try_from_be_slice(&data[224..256])
|
||||
.ok_or(OpBlockExecutionError::L1BlockInfo(L1BlockInfoError::FeeScalarConversion))?;
|
||||
|
||||
Ok(L1BlockInfo {
|
||||
l1_base_fee,
|
||||
l1_fee_overhead: Some(l1_fee_overhead),
|
||||
l1_base_fee_scalar: l1_fee_scalar,
|
||||
..Default::default()
|
||||
})
|
||||
}
|
||||
|
||||
/// Updates the L1 block values for an Ecotone upgraded chain.
|
||||
/// Params are packed and passed in as raw msg.data instead of ABI to reduce calldata size.
|
||||
/// Params are expected to be in the following order:
|
||||
/// 1. _baseFeeScalar L1 base fee scalar
|
||||
/// 2. _blobBaseFeeScalar L1 blob base fee scalar
|
||||
/// 3. _sequenceNumber Number of L2 blocks since epoch start.
|
||||
/// 4. _timestamp L1 timestamp.
|
||||
/// 5. _number L1 blocknumber.
|
||||
/// 6. _basefee L1 base fee.
|
||||
/// 7. _blobBaseFee L1 blob base fee.
|
||||
/// 8. _hash L1 blockhash.
|
||||
/// 9. _batcherHash Versioned hash to authenticate batcher by.
|
||||
///
|
||||
/// <https://github.com/ethereum-optimism/optimism/blob/957e13dd504fb336a4be40fb5dd0d8ba0276be34/packages/contracts-bedrock/src/L2/L1Block.sol#L136>
|
||||
pub fn parse_l1_info_tx_ecotone(data: &[u8]) -> Result<L1BlockInfo, OpBlockExecutionError> {
|
||||
if data.len() != 160 {
|
||||
return Err(OpBlockExecutionError::L1BlockInfo(L1BlockInfoError::UnexpectedCalldataLength));
|
||||
}
|
||||
|
||||
// https://github.com/ethereum-optimism/op-geth/blob/60038121c7571a59875ff9ed7679c48c9f73405d/core/types/rollup_cost.go#L317-L328
|
||||
//
|
||||
// data layout assumed for Ecotone:
|
||||
// offset type varname
|
||||
// 0 <selector>
|
||||
// 4 uint32 _basefeeScalar (start offset in this scope)
|
||||
// 8 uint32 _blobBaseFeeScalar
|
||||
// 12 uint64 _sequenceNumber,
|
||||
// 20 uint64 _timestamp,
|
||||
// 28 uint64 _l1BlockNumber
|
||||
// 36 uint256 _basefee,
|
||||
// 68 uint256 _blobBaseFee,
|
||||
// 100 bytes32 _hash,
|
||||
// 132 bytes32 _batcherHash,
|
||||
|
||||
let l1_base_fee_scalar = U256::try_from_be_slice(&data[..4])
|
||||
.ok_or(OpBlockExecutionError::L1BlockInfo(L1BlockInfoError::BaseFeeScalarConversion))?;
|
||||
let l1_blob_base_fee_scalar = U256::try_from_be_slice(&data[4..8]).ok_or({
|
||||
OpBlockExecutionError::L1BlockInfo(L1BlockInfoError::BlobBaseFeeScalarConversion)
|
||||
})?;
|
||||
let l1_base_fee = U256::try_from_be_slice(&data[32..64])
|
||||
.ok_or(OpBlockExecutionError::L1BlockInfo(L1BlockInfoError::BaseFeeConversion))?;
|
||||
let l1_blob_base_fee = U256::try_from_be_slice(&data[64..96])
|
||||
.ok_or(OpBlockExecutionError::L1BlockInfo(L1BlockInfoError::BlobBaseFeeConversion))?;
|
||||
|
||||
Ok(L1BlockInfo {
|
||||
l1_base_fee,
|
||||
l1_base_fee_scalar,
|
||||
l1_blob_base_fee: Some(l1_blob_base_fee),
|
||||
l1_blob_base_fee_scalar: Some(l1_blob_base_fee_scalar),
|
||||
..Default::default()
|
||||
})
|
||||
}
|
||||
|
||||
/// Updates the L1 block values for an Isthmus upgraded chain.
|
||||
/// Params are packed and passed in as raw msg.data instead of ABI to reduce calldata size.
|
||||
/// Params are expected to be in the following order:
|
||||
/// 1. _baseFeeScalar L1 base fee scalar
|
||||
/// 2. _blobBaseFeeScalar L1 blob base fee scalar
|
||||
/// 3. _sequenceNumber Number of L2 blocks since epoch start.
|
||||
/// 4. _timestamp L1 timestamp.
|
||||
/// 5. _number L1 blocknumber.
|
||||
/// 6. _basefee L1 base fee.
|
||||
/// 7. _blobBaseFee L1 blob base fee.
|
||||
/// 8. _hash L1 blockhash.
|
||||
/// 9. _batcherHash Versioned hash to authenticate batcher by.
|
||||
/// 10. _operatorFeeScalar Operator fee scalar
|
||||
/// 11. _operatorFeeConstant Operator fee constant
|
||||
pub fn parse_l1_info_tx_isthmus(data: &[u8]) -> Result<L1BlockInfo, OpBlockExecutionError> {
|
||||
if data.len() != 172 {
|
||||
return Err(OpBlockExecutionError::L1BlockInfo(L1BlockInfoError::UnexpectedCalldataLength));
|
||||
}
|
||||
|
||||
// https://github.com/ethereum-optimism/op-geth/blob/60038121c7571a59875ff9ed7679c48c9f73405d/core/types/rollup_cost.go#L317-L328
|
||||
//
|
||||
// data layout assumed for Ecotone:
|
||||
// offset type varname
|
||||
// 0 <selector>
|
||||
// 4 uint32 _basefeeScalar (start offset in this scope)
|
||||
// 8 uint32 _blobBaseFeeScalar
|
||||
// 12 uint64 _sequenceNumber,
|
||||
// 20 uint64 _timestamp,
|
||||
// 28 uint64 _l1BlockNumber
|
||||
// 36 uint256 _basefee,
|
||||
// 68 uint256 _blobBaseFee,
|
||||
// 100 bytes32 _hash,
|
||||
// 132 bytes32 _batcherHash,
|
||||
// 164 uint32 _operatorFeeScalar
|
||||
// 168 uint64 _operatorFeeConstant
|
||||
|
||||
let l1_base_fee_scalar = U256::try_from_be_slice(&data[..4])
|
||||
.ok_or(OpBlockExecutionError::L1BlockInfo(L1BlockInfoError::BaseFeeScalarConversion))?;
|
||||
let l1_blob_base_fee_scalar = U256::try_from_be_slice(&data[4..8]).ok_or({
|
||||
OpBlockExecutionError::L1BlockInfo(L1BlockInfoError::BlobBaseFeeScalarConversion)
|
||||
})?;
|
||||
let l1_base_fee = U256::try_from_be_slice(&data[32..64])
|
||||
.ok_or(OpBlockExecutionError::L1BlockInfo(L1BlockInfoError::BaseFeeConversion))?;
|
||||
let l1_blob_base_fee = U256::try_from_be_slice(&data[64..96])
|
||||
.ok_or(OpBlockExecutionError::L1BlockInfo(L1BlockInfoError::BlobBaseFeeConversion))?;
|
||||
let operator_fee_scalar = U256::try_from_be_slice(&data[160..164]).ok_or({
|
||||
OpBlockExecutionError::L1BlockInfo(L1BlockInfoError::OperatorFeeScalarConversion)
|
||||
})?;
|
||||
let operator_fee_constant = U256::try_from_be_slice(&data[164..172]).ok_or({
|
||||
OpBlockExecutionError::L1BlockInfo(L1BlockInfoError::OperatorFeeConstantConversion)
|
||||
})?;
|
||||
|
||||
Ok(L1BlockInfo {
|
||||
l1_base_fee,
|
||||
l1_base_fee_scalar,
|
||||
l1_blob_base_fee: Some(l1_blob_base_fee),
|
||||
l1_blob_base_fee_scalar: Some(l1_blob_base_fee_scalar),
|
||||
operator_fee_scalar: Some(operator_fee_scalar),
|
||||
operator_fee_constant: Some(operator_fee_constant),
|
||||
..Default::default()
|
||||
})
|
||||
}
|
||||
|
||||
/// Updates the L1 block values for an Jovian upgraded chain.
|
||||
/// Params are packed and passed in as raw msg.data instead of ABI to reduce calldata size.
|
||||
/// Params are expected to be in the following order:
|
||||
/// 1. _baseFeeScalar L1 base fee scalar
|
||||
/// 2. _blobBaseFeeScalar L1 blob base fee scalar
|
||||
/// 3. _sequenceNumber Number of L2 blocks since epoch start.
|
||||
/// 4. _timestamp L1 timestamp.
|
||||
/// 5. _number L1 blocknumber.
|
||||
/// 6. _basefee L1 base fee.
|
||||
/// 7. _blobBaseFee L1 blob base fee.
|
||||
/// 8. _hash L1 blockhash.
|
||||
/// 9. _batcherHash Versioned hash to authenticate batcher by.
|
||||
/// 10. _operatorFeeScalar Operator fee scalar
|
||||
/// 11. _operatorFeeConstant Operator fee constant
|
||||
/// 12. _daFootprintGasScalar DA footprint gas scalar
|
||||
pub fn parse_l1_info_tx_jovian(data: &[u8]) -> Result<L1BlockInfo, OpBlockExecutionError> {
|
||||
if data.len() != 174 {
|
||||
return Err(OpBlockExecutionError::L1BlockInfo(L1BlockInfoError::UnexpectedCalldataLength));
|
||||
}
|
||||
|
||||
// https://github.com/ethereum-optimism/op-geth/blob/60038121c7571a59875ff9ed7679c48c9f73405d/core/types/rollup_cost.go#L317-L328
|
||||
//
|
||||
// data layout assumed for Ecotone:
|
||||
// offset type varname
|
||||
// 0 <selector>
|
||||
// 4 uint32 _basefeeScalar (start offset in this scope)
|
||||
// 8 uint32 _blobBaseFeeScalar
|
||||
// 12 uint64 _sequenceNumber,
|
||||
// 20 uint64 _timestamp,
|
||||
// 28 uint64 _l1BlockNumber
|
||||
// 36 uint256 _basefee,
|
||||
// 68 uint256 _blobBaseFee,
|
||||
// 100 bytes32 _hash,
|
||||
// 132 bytes32 _batcherHash,
|
||||
// 164 uint32 _operatorFeeScalar
|
||||
// 168 uint64 _operatorFeeConstant
|
||||
// 176 uint16 _daFootprintGasScalar
|
||||
|
||||
let l1_base_fee_scalar = U256::try_from_be_slice(&data[..4])
|
||||
.ok_or(OpBlockExecutionError::L1BlockInfo(L1BlockInfoError::BaseFeeScalarConversion))?;
|
||||
let l1_blob_base_fee_scalar = U256::try_from_be_slice(&data[4..8]).ok_or({
|
||||
OpBlockExecutionError::L1BlockInfo(L1BlockInfoError::BlobBaseFeeScalarConversion)
|
||||
})?;
|
||||
let l1_base_fee = U256::try_from_be_slice(&data[32..64])
|
||||
.ok_or(OpBlockExecutionError::L1BlockInfo(L1BlockInfoError::BaseFeeConversion))?;
|
||||
let l1_blob_base_fee = U256::try_from_be_slice(&data[64..96])
|
||||
.ok_or(OpBlockExecutionError::L1BlockInfo(L1BlockInfoError::BlobBaseFeeConversion))?;
|
||||
let operator_fee_scalar = U256::try_from_be_slice(&data[160..164]).ok_or({
|
||||
OpBlockExecutionError::L1BlockInfo(L1BlockInfoError::OperatorFeeScalarConversion)
|
||||
})?;
|
||||
let operator_fee_constant = U256::try_from_be_slice(&data[164..172]).ok_or({
|
||||
OpBlockExecutionError::L1BlockInfo(L1BlockInfoError::OperatorFeeConstantConversion)
|
||||
})?;
|
||||
let da_footprint_gas_scalar: u16 = U16::try_from_be_slice(&data[172..174])
|
||||
.ok_or({
|
||||
OpBlockExecutionError::L1BlockInfo(L1BlockInfoError::DaFootprintGasScalarConversion)
|
||||
})?
|
||||
.to();
|
||||
|
||||
Ok(L1BlockInfo {
|
||||
l1_base_fee,
|
||||
l1_base_fee_scalar,
|
||||
l1_blob_base_fee: Some(l1_blob_base_fee),
|
||||
l1_blob_base_fee_scalar: Some(l1_blob_base_fee_scalar),
|
||||
operator_fee_scalar: Some(operator_fee_scalar),
|
||||
operator_fee_constant: Some(operator_fee_constant),
|
||||
da_footprint_gas_scalar: Some(da_footprint_gas_scalar),
|
||||
..Default::default()
|
||||
})
|
||||
}
|
||||
|
||||
/// An extension trait for [`L1BlockInfo`] that allows us to calculate the L1 cost of a transaction
|
||||
/// based off of the chain spec's activated hardfork.
|
||||
pub trait RethL1BlockInfo {
|
||||
/// Forwards an L1 transaction calculation to revm and returns the gas cost.
|
||||
///
|
||||
/// ### Takes
|
||||
/// - `chain_spec`: The chain spec for the node.
|
||||
/// - `timestamp`: The timestamp of the current block.
|
||||
/// - `input`: The calldata of the transaction.
|
||||
/// - `is_deposit`: Whether or not the transaction is a deposit.
|
||||
fn l1_tx_data_fee(
|
||||
&mut self,
|
||||
chain_spec: impl OpHardforks,
|
||||
timestamp: u64,
|
||||
input: &[u8],
|
||||
is_deposit: bool,
|
||||
) -> Result<U256, BlockExecutionError>;
|
||||
|
||||
/// Computes the data gas cost for an L2 transaction.
|
||||
///
|
||||
/// ### Takes
|
||||
/// - `chain_spec`: The chain spec for the node.
|
||||
/// - `timestamp`: The timestamp of the current block.
|
||||
/// - `input`: The calldata of the transaction.
|
||||
fn l1_data_gas(
|
||||
&self,
|
||||
chain_spec: impl OpHardforks,
|
||||
timestamp: u64,
|
||||
input: &[u8],
|
||||
) -> Result<U256, BlockExecutionError>;
|
||||
}
|
||||
|
||||
impl RethL1BlockInfo for L1BlockInfo {
|
||||
fn l1_tx_data_fee(
|
||||
&mut self,
|
||||
chain_spec: impl OpHardforks,
|
||||
timestamp: u64,
|
||||
input: &[u8],
|
||||
is_deposit: bool,
|
||||
) -> Result<U256, BlockExecutionError> {
|
||||
if is_deposit {
|
||||
return Ok(U256::ZERO);
|
||||
}
|
||||
|
||||
let spec_id = revm_spec_by_timestamp_after_bedrock(&chain_spec, timestamp);
|
||||
Ok(self.calculate_tx_l1_cost(input, spec_id))
|
||||
}
|
||||
|
||||
fn l1_data_gas(
|
||||
&self,
|
||||
chain_spec: impl OpHardforks,
|
||||
timestamp: u64,
|
||||
input: &[u8],
|
||||
) -> Result<U256, BlockExecutionError> {
|
||||
let spec_id = revm_spec_by_timestamp_after_bedrock(&chain_spec, timestamp);
|
||||
Ok(self.data_gas(input, spec_id))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use alloy_consensus::{Block, BlockBody};
|
||||
use alloy_eips::eip2718::Decodable2718;
|
||||
use alloy_primitives::keccak256;
|
||||
use reth_optimism_chainspec::OP_MAINNET;
|
||||
use reth_optimism_forks::OpHardforks;
|
||||
use reth_optimism_primitives::OpTransactionSigned;
|
||||
|
||||
#[test]
|
||||
fn sanity_l1_block() {
|
||||
use alloy_consensus::Header;
|
||||
use alloy_primitives::{hex_literal::hex, Bytes};
|
||||
|
||||
let bytes = Bytes::from_static(&hex!(
|
||||
"7ef9015aa044bae9d41b8380d781187b426c6fe43df5fb2fb57bd4466ef6a701e1f01e015694deaddeaddeaddeaddeaddeaddeaddeaddead000194420000000000000000000000000000000000001580808408f0d18001b90104015d8eb900000000000000000000000000000000000000000000000000000000008057650000000000000000000000000000000000000000000000000000000063d96d10000000000000000000000000000000000000000000000000000000000009f35273d89754a1e0387b89520d989d3be9c37c1f32495a88faf1ea05c61121ab0d1900000000000000000000000000000000000000000000000000000000000000010000000000000000000000002d679b567db6187c0c8323fa982cfb88b74dbcc7000000000000000000000000000000000000000000000000000000000000083400000000000000000000000000000000000000000000000000000000000f4240"
|
||||
));
|
||||
let l1_info_tx = OpTransactionSigned::decode_2718(&mut bytes.as_ref()).unwrap();
|
||||
let mock_block = Block {
|
||||
header: Header::default(),
|
||||
body: BlockBody { transactions: vec![l1_info_tx], ..Default::default() },
|
||||
};
|
||||
|
||||
let l1_info: L1BlockInfo = extract_l1_info(&mock_block.body).unwrap();
|
||||
assert_eq!(l1_info.l1_base_fee, U256::from(652_114));
|
||||
assert_eq!(l1_info.l1_fee_overhead, Some(U256::from(2100)));
|
||||
assert_eq!(l1_info.l1_base_fee_scalar, U256::from(1_000_000));
|
||||
assert_eq!(l1_info.l1_blob_base_fee, None);
|
||||
assert_eq!(l1_info.l1_blob_base_fee_scalar, None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_verify_set_jovian() {
|
||||
let hash = &keccak256("setL1BlockValuesJovian()")[..4];
|
||||
assert_eq!(hash, L1_BLOCK_JOVIAN_SELECTOR)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn sanity_l1_block_ecotone() {
|
||||
// rig
|
||||
|
||||
// OP mainnet ecotone block 118024092
|
||||
// <https://optimistic.etherscan.io/block/118024092>
|
||||
const TIMESTAMP: u64 = 1711603765;
|
||||
assert!(OP_MAINNET.is_ecotone_active_at_timestamp(TIMESTAMP));
|
||||
|
||||
// First transaction in OP mainnet block 118024092
|
||||
//
|
||||
// https://optimistic.etherscan.io/getRawTx?tx=0x88501da5d5ca990347c2193be90a07037af1e3820bb40774c8154871c7669150
|
||||
const TX: [u8; 251] = hex!(
|
||||
"7ef8f8a0a539eb753df3b13b7e386e147d45822b67cb908c9ddc5618e3dbaa22ed00850b94deaddeaddeaddeaddeaddeaddeaddeaddead00019442000000000000000000000000000000000000158080830f424080b8a4440a5e2000000558000c5fc50000000000000000000000006605a89f00000000012a10d90000000000000000000000000000000000000000000000000000000af39ac3270000000000000000000000000000000000000000000000000000000d5ea528d24e582fa68786f080069bdbfe06a43f8e67bfd31b8e4d8a8837ba41da9a82a54a0000000000000000000000006887246668a3b87f54deb3b94ba47a6f63f32985"
|
||||
);
|
||||
|
||||
let tx = OpTransactionSigned::decode_2718(&mut TX.as_slice()).unwrap();
|
||||
let block: Block<OpTransactionSigned> = Block {
|
||||
body: BlockBody { transactions: vec![tx], ..Default::default() },
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
// expected l1 block info
|
||||
let expected_l1_base_fee = U256::from_be_bytes(hex!(
|
||||
"0000000000000000000000000000000000000000000000000000000af39ac327" // 47036678951
|
||||
));
|
||||
let expected_l1_base_fee_scalar = U256::from(1368);
|
||||
let expected_l1_blob_base_fee = U256::from_be_bytes(hex!(
|
||||
"0000000000000000000000000000000000000000000000000000000d5ea528d2" // 57422457042
|
||||
));
|
||||
let expected_l1_blob_base_fee_scalar = U256::from(810949);
|
||||
|
||||
// test
|
||||
|
||||
let l1_block_info: L1BlockInfo = extract_l1_info(&block.body).unwrap();
|
||||
|
||||
assert_eq!(l1_block_info.l1_base_fee, expected_l1_base_fee);
|
||||
assert_eq!(l1_block_info.l1_base_fee_scalar, expected_l1_base_fee_scalar);
|
||||
assert_eq!(l1_block_info.l1_blob_base_fee, Some(expected_l1_blob_base_fee));
|
||||
assert_eq!(l1_block_info.l1_blob_base_fee_scalar, Some(expected_l1_blob_base_fee_scalar));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_l1_info_fjord() {
|
||||
// rig
|
||||
|
||||
// L1 block info for OP mainnet block 124665056 (stored in input of tx at index 0)
|
||||
//
|
||||
// https://optimistic.etherscan.io/tx/0x312e290cf36df704a2217b015d6455396830b0ce678b860ebfcc30f41403d7b1
|
||||
const DATA: &[u8] = &hex!(
|
||||
"440a5e200000146b000f79c500000000000000040000000066d052e700000000013ad8a3000000000000000000000000000000000000000000000000000000003ef1278700000000000000000000000000000000000000000000000000000000000000012fdf87b89884a61e74b322bbcf60386f543bfae7827725efaaf0ab1de2294a590000000000000000000000006887246668a3b87f54deb3b94ba47a6f63f32985"
|
||||
);
|
||||
|
||||
// expected l1 block info verified against expected l1 fee for tx. l1 tx fee listed on OP
|
||||
// mainnet block scanner
|
||||
//
|
||||
// https://github.com/bluealloy/revm/blob/fa5650ee8a4d802f4f3557014dd157adfb074460/crates/revm/src/optimism/l1block.rs#L414-L443
|
||||
let l1_base_fee = U256::from(1055991687);
|
||||
let l1_base_fee_scalar = U256::from(5227);
|
||||
let l1_blob_base_fee = Some(U256::from(1));
|
||||
let l1_blob_base_fee_scalar = Some(U256::from(1014213));
|
||||
|
||||
// test
|
||||
|
||||
let l1_block_info = parse_l1_info(DATA).unwrap();
|
||||
|
||||
assert_eq!(l1_block_info.l1_base_fee, l1_base_fee);
|
||||
assert_eq!(l1_block_info.l1_base_fee_scalar, l1_base_fee_scalar);
|
||||
assert_eq!(l1_block_info.l1_blob_base_fee, l1_blob_base_fee);
|
||||
assert_eq!(l1_block_info.l1_blob_base_fee_scalar, l1_blob_base_fee_scalar);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_l1_info_isthmus() {
|
||||
// rig
|
||||
|
||||
// L1 block info from a devnet with Isthmus activated
|
||||
const DATA: &[u8] = &hex!(
|
||||
"098999be00000558000c5fc500000000000000030000000067a9f765000000000000002900000000000000000000000000000000000000000000000000000000006a6d09000000000000000000000000000000000000000000000000000000000000000172fcc8e8886636bdbe96ba0e4baab67ea7e7811633f52b52e8cf7a5123213b6f000000000000000000000000d3f2c5afb2d76f5579f326b0cd7da5f5a4126c3500004e2000000000000001f4"
|
||||
);
|
||||
|
||||
// expected l1 block info verified against expected l1 fee and operator fee for tx.
|
||||
let l1_base_fee = U256::from(6974729);
|
||||
let l1_base_fee_scalar = U256::from(1368);
|
||||
let l1_blob_base_fee = Some(U256::from(1));
|
||||
let l1_blob_base_fee_scalar = Some(U256::from(810949));
|
||||
let operator_fee_scalar = Some(U256::from(20000));
|
||||
let operator_fee_constant = Some(U256::from(500));
|
||||
|
||||
// test
|
||||
|
||||
let l1_block_info = parse_l1_info(DATA).unwrap();
|
||||
|
||||
assert_eq!(l1_block_info.l1_base_fee, l1_base_fee);
|
||||
assert_eq!(l1_block_info.l1_base_fee_scalar, l1_base_fee_scalar);
|
||||
assert_eq!(l1_block_info.l1_blob_base_fee, l1_blob_base_fee);
|
||||
assert_eq!(l1_block_info.l1_blob_base_fee_scalar, l1_blob_base_fee_scalar);
|
||||
assert_eq!(l1_block_info.operator_fee_scalar, operator_fee_scalar);
|
||||
assert_eq!(l1_block_info.operator_fee_constant, operator_fee_constant);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_l1_info_jovian() {
|
||||
// L1 block info from a devnet with Isthmus activated
|
||||
const DATA: &[u8] = &hex!(
|
||||
"3db6be2b00000558000c5fc500000000000000030000000067a9f765000000000000002900000000000000000000000000000000000000000000000000000000006a6d09000000000000000000000000000000000000000000000000000000000000000172fcc8e8886636bdbe96ba0e4baab67ea7e7811633f52b52e8cf7a5123213b6f000000000000000000000000d3f2c5afb2d76f5579f326b0cd7da5f5a4126c3500004e2000000000000001f4dead"
|
||||
);
|
||||
|
||||
// expected l1 block info verified against expected l1 fee and operator fee for tx.
|
||||
let l1_base_fee = U256::from(6974729);
|
||||
let l1_base_fee_scalar = U256::from(1368);
|
||||
let l1_blob_base_fee = Some(U256::from(1));
|
||||
let l1_blob_base_fee_scalar = Some(U256::from(810949));
|
||||
let operator_fee_scalar = Some(U256::from(20000));
|
||||
let operator_fee_constant = Some(U256::from(500));
|
||||
let da_footprint_gas_scalar: Option<u16> = Some(U16::from(0xdead).to());
|
||||
|
||||
// test
|
||||
|
||||
let l1_block_info = parse_l1_info(DATA).unwrap();
|
||||
|
||||
assert_eq!(l1_block_info.l1_base_fee, l1_base_fee);
|
||||
assert_eq!(l1_block_info.l1_base_fee_scalar, l1_base_fee_scalar);
|
||||
assert_eq!(l1_block_info.l1_blob_base_fee, l1_blob_base_fee);
|
||||
assert_eq!(l1_block_info.l1_blob_base_fee_scalar, l1_blob_base_fee_scalar);
|
||||
assert_eq!(l1_block_info.operator_fee_scalar, operator_fee_scalar);
|
||||
assert_eq!(l1_block_info.operator_fee_constant, operator_fee_constant);
|
||||
assert_eq!(l1_block_info.da_footprint_gas_scalar, da_footprint_gas_scalar);
|
||||
}
|
||||
}
|
||||
@@ -1,906 +0,0 @@
|
||||
//! EVM config for vanilla optimism.
|
||||
|
||||
#![doc(
|
||||
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
|
||||
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
|
||||
issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/"
|
||||
)]
|
||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||
#![cfg_attr(not(feature = "std"), no_std)]
|
||||
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
|
||||
|
||||
extern crate alloc;
|
||||
|
||||
use alloc::sync::Arc;
|
||||
use alloy_consensus::{BlockHeader, Header};
|
||||
use alloy_evm::{EvmFactory, FromRecoveredTx, FromTxWithEncoded};
|
||||
use alloy_op_evm::block::{receipt_builder::OpReceiptBuilder, OpTxEnv};
|
||||
use core::fmt::Debug;
|
||||
use op_alloy_consensus::EIP1559ParamError;
|
||||
use op_revm::{OpSpecId, OpTransaction};
|
||||
use reth_chainspec::EthChainSpec;
|
||||
use reth_evm::{
|
||||
eth::NextEvmEnvAttributes, precompiles::PrecompilesMap, ConfigureEvm, EvmEnv, TransactionEnv,
|
||||
};
|
||||
use reth_optimism_chainspec::OpChainSpec;
|
||||
use reth_optimism_forks::OpHardforks;
|
||||
use reth_optimism_primitives::{DepositReceipt, OpPrimitives};
|
||||
use reth_primitives_traits::{NodePrimitives, SealedBlock, SealedHeader, SignedTransaction};
|
||||
use revm::context::{BlockEnv, TxEnv};
|
||||
|
||||
#[allow(unused_imports)]
|
||||
use {
|
||||
alloy_eips::Decodable2718,
|
||||
alloy_primitives::{Bytes, U256},
|
||||
op_alloy_rpc_types_engine::OpExecutionData,
|
||||
reth_evm::{EvmEnvFor, ExecutionCtxFor},
|
||||
reth_primitives_traits::{TxTy, WithEncoded},
|
||||
reth_storage_errors::any::AnyError,
|
||||
revm::{
|
||||
context::CfgEnv, context_interface::block::BlobExcessGasAndPrice,
|
||||
primitives::hardfork::SpecId,
|
||||
},
|
||||
};
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
use reth_evm::{ConfigureEngineEvm, ExecutableTxIterator};
|
||||
|
||||
mod config;
|
||||
pub use config::{revm_spec, revm_spec_by_timestamp_after_bedrock, OpNextBlockEnvAttributes};
|
||||
mod execute;
|
||||
pub use execute::*;
|
||||
pub mod l1;
|
||||
pub use l1::*;
|
||||
mod receipts;
|
||||
pub use receipts::*;
|
||||
mod build;
|
||||
pub use build::OpBlockAssembler;
|
||||
|
||||
mod error;
|
||||
pub use error::OpBlockExecutionError;
|
||||
|
||||
pub use alloy_op_evm::{OpBlockExecutionCtx, OpBlockExecutorFactory, OpEvm, OpEvmFactory};
|
||||
|
||||
/// Optimism-related EVM configuration.
|
||||
#[derive(Debug)]
|
||||
pub struct OpEvmConfig<
|
||||
ChainSpec = OpChainSpec,
|
||||
N: NodePrimitives = OpPrimitives,
|
||||
R = OpRethReceiptBuilder,
|
||||
EvmFactory = OpEvmFactory,
|
||||
> {
|
||||
/// Inner [`OpBlockExecutorFactory`].
|
||||
pub executor_factory: OpBlockExecutorFactory<R, Arc<ChainSpec>, EvmFactory>,
|
||||
/// Optimism block assembler.
|
||||
pub block_assembler: OpBlockAssembler<ChainSpec>,
|
||||
#[doc(hidden)]
|
||||
pub _pd: core::marker::PhantomData<N>,
|
||||
}
|
||||
|
||||
impl<ChainSpec, N: NodePrimitives, R: Clone, EvmFactory: Clone> Clone
|
||||
for OpEvmConfig<ChainSpec, N, R, EvmFactory>
|
||||
{
|
||||
fn clone(&self) -> Self {
|
||||
Self {
|
||||
executor_factory: self.executor_factory.clone(),
|
||||
block_assembler: self.block_assembler.clone(),
|
||||
_pd: self._pd,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<ChainSpec: OpHardforks> OpEvmConfig<ChainSpec> {
|
||||
/// Creates a new [`OpEvmConfig`] with the given chain spec for OP chains.
|
||||
pub fn optimism(chain_spec: Arc<ChainSpec>) -> Self {
|
||||
Self::new(chain_spec, OpRethReceiptBuilder::default())
|
||||
}
|
||||
}
|
||||
|
||||
impl<ChainSpec: OpHardforks, N: NodePrimitives, R> OpEvmConfig<ChainSpec, N, R> {
|
||||
/// Creates a new [`OpEvmConfig`] with the given chain spec.
|
||||
pub fn new(chain_spec: Arc<ChainSpec>, receipt_builder: R) -> Self {
|
||||
Self {
|
||||
block_assembler: OpBlockAssembler::new(chain_spec.clone()),
|
||||
executor_factory: OpBlockExecutorFactory::new(
|
||||
receipt_builder,
|
||||
chain_spec,
|
||||
OpEvmFactory::default(),
|
||||
),
|
||||
_pd: core::marker::PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<ChainSpec, N, R, EvmFactory> OpEvmConfig<ChainSpec, N, R, EvmFactory>
|
||||
where
|
||||
ChainSpec: OpHardforks,
|
||||
N: NodePrimitives,
|
||||
{
|
||||
/// Returns the chain spec associated with this configuration.
|
||||
pub const fn chain_spec(&self) -> &Arc<ChainSpec> {
|
||||
self.executor_factory.spec()
|
||||
}
|
||||
}
|
||||
|
||||
impl<ChainSpec, N, R, EvmF> ConfigureEvm for OpEvmConfig<ChainSpec, N, R, EvmF>
|
||||
where
|
||||
ChainSpec: EthChainSpec<Header = Header> + OpHardforks,
|
||||
N: NodePrimitives<
|
||||
Receipt = R::Receipt,
|
||||
SignedTx = R::Transaction,
|
||||
BlockHeader = Header,
|
||||
BlockBody = alloy_consensus::BlockBody<R::Transaction>,
|
||||
Block = alloy_consensus::Block<R::Transaction>,
|
||||
>,
|
||||
OpTransaction<TxEnv>: FromRecoveredTx<N::SignedTx> + FromTxWithEncoded<N::SignedTx>,
|
||||
R: OpReceiptBuilder<Receipt: DepositReceipt, Transaction: SignedTransaction>,
|
||||
EvmF: EvmFactory<
|
||||
Tx: FromRecoveredTx<R::Transaction>
|
||||
+ FromTxWithEncoded<R::Transaction>
|
||||
+ TransactionEnv
|
||||
+ OpTxEnv,
|
||||
Precompiles = PrecompilesMap,
|
||||
Spec = OpSpecId,
|
||||
BlockEnv = BlockEnv,
|
||||
> + Debug,
|
||||
Self: Send + Sync + Unpin + Clone + 'static,
|
||||
{
|
||||
type Primitives = N;
|
||||
type Error = EIP1559ParamError;
|
||||
type NextBlockEnvCtx = OpNextBlockEnvAttributes;
|
||||
type BlockExecutorFactory = OpBlockExecutorFactory<R, Arc<ChainSpec>, EvmF>;
|
||||
type BlockAssembler = OpBlockAssembler<ChainSpec>;
|
||||
|
||||
fn block_executor_factory(&self) -> &Self::BlockExecutorFactory {
|
||||
&self.executor_factory
|
||||
}
|
||||
|
||||
fn block_assembler(&self) -> &Self::BlockAssembler {
|
||||
&self.block_assembler
|
||||
}
|
||||
|
||||
fn evm_env(&self, header: &Header) -> Result<EvmEnv<OpSpecId>, Self::Error> {
|
||||
Ok(EvmEnv::for_op_block(header, self.chain_spec(), self.chain_spec().chain().id()))
|
||||
}
|
||||
|
||||
fn next_evm_env(
|
||||
&self,
|
||||
parent: &Header,
|
||||
attributes: &Self::NextBlockEnvCtx,
|
||||
) -> Result<EvmEnv<OpSpecId>, Self::Error> {
|
||||
Ok(EvmEnv::for_op_next_block(
|
||||
parent,
|
||||
NextEvmEnvAttributes {
|
||||
timestamp: attributes.timestamp,
|
||||
suggested_fee_recipient: attributes.suggested_fee_recipient,
|
||||
prev_randao: attributes.prev_randao,
|
||||
gas_limit: attributes.gas_limit,
|
||||
},
|
||||
self.chain_spec().next_block_base_fee(parent, attributes.timestamp).unwrap_or_default(),
|
||||
self.chain_spec(),
|
||||
self.chain_spec().chain().id(),
|
||||
))
|
||||
}
|
||||
|
||||
fn context_for_block(
|
||||
&self,
|
||||
block: &'_ SealedBlock<N::Block>,
|
||||
) -> Result<OpBlockExecutionCtx, Self::Error> {
|
||||
Ok(OpBlockExecutionCtx {
|
||||
parent_hash: block.header().parent_hash(),
|
||||
parent_beacon_block_root: block.header().parent_beacon_block_root(),
|
||||
extra_data: block.header().extra_data().clone(),
|
||||
})
|
||||
}
|
||||
|
||||
fn context_for_next_block(
|
||||
&self,
|
||||
parent: &SealedHeader<N::BlockHeader>,
|
||||
attributes: Self::NextBlockEnvCtx,
|
||||
) -> Result<OpBlockExecutionCtx, Self::Error> {
|
||||
Ok(OpBlockExecutionCtx {
|
||||
parent_hash: parent.hash(),
|
||||
parent_beacon_block_root: attributes.parent_beacon_block_root,
|
||||
extra_data: attributes.extra_data,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
impl<ChainSpec, N, R> ConfigureEngineEvm<OpExecutionData> for OpEvmConfig<ChainSpec, N, R>
|
||||
where
|
||||
ChainSpec: EthChainSpec<Header = Header> + OpHardforks,
|
||||
N: NodePrimitives<
|
||||
Receipt = R::Receipt,
|
||||
SignedTx = R::Transaction,
|
||||
BlockHeader = Header,
|
||||
BlockBody = alloy_consensus::BlockBody<R::Transaction>,
|
||||
Block = alloy_consensus::Block<R::Transaction>,
|
||||
>,
|
||||
OpTransaction<TxEnv>: FromRecoveredTx<N::SignedTx> + FromTxWithEncoded<N::SignedTx>,
|
||||
R: OpReceiptBuilder<Receipt: DepositReceipt, Transaction: SignedTransaction>,
|
||||
Self: Send + Sync + Unpin + Clone + 'static,
|
||||
{
|
||||
fn evm_env_for_payload(
|
||||
&self,
|
||||
payload: &OpExecutionData,
|
||||
) -> Result<EvmEnvFor<Self>, Self::Error> {
|
||||
let timestamp = payload.payload.timestamp();
|
||||
let block_number = payload.payload.block_number();
|
||||
|
||||
let spec = revm_spec_by_timestamp_after_bedrock(self.chain_spec(), timestamp);
|
||||
|
||||
let cfg_env = CfgEnv::new()
|
||||
.with_chain_id(self.chain_spec().chain().id())
|
||||
.with_spec_and_mainnet_gas_params(spec);
|
||||
|
||||
let blob_excess_gas_and_price = spec
|
||||
.into_eth_spec()
|
||||
.is_enabled_in(SpecId::CANCUN)
|
||||
.then_some(BlobExcessGasAndPrice { excess_blob_gas: 0, blob_gasprice: 1 });
|
||||
|
||||
let block_env = BlockEnv {
|
||||
number: U256::from(block_number),
|
||||
beneficiary: payload.payload.as_v1().fee_recipient,
|
||||
timestamp: U256::from(timestamp),
|
||||
difficulty: if spec.into_eth_spec() >= SpecId::MERGE {
|
||||
U256::ZERO
|
||||
} else {
|
||||
payload.payload.as_v1().prev_randao.into()
|
||||
},
|
||||
prevrandao: (spec.into_eth_spec() >= SpecId::MERGE)
|
||||
.then(|| payload.payload.as_v1().prev_randao),
|
||||
gas_limit: payload.payload.as_v1().gas_limit,
|
||||
basefee: payload.payload.as_v1().base_fee_per_gas.to(),
|
||||
// EIP-4844 excess blob gas of this block, introduced in Cancun
|
||||
blob_excess_gas_and_price,
|
||||
};
|
||||
|
||||
Ok(EvmEnv { cfg_env, block_env })
|
||||
}
|
||||
|
||||
fn context_for_payload<'a>(
|
||||
&self,
|
||||
payload: &'a OpExecutionData,
|
||||
) -> Result<ExecutionCtxFor<'a, Self>, Self::Error> {
|
||||
Ok(OpBlockExecutionCtx {
|
||||
parent_hash: payload.parent_hash(),
|
||||
parent_beacon_block_root: payload.sidecar.parent_beacon_block_root(),
|
||||
extra_data: payload.payload.as_v1().extra_data.clone(),
|
||||
})
|
||||
}
|
||||
|
||||
fn tx_iterator_for_payload(
|
||||
&self,
|
||||
payload: &OpExecutionData,
|
||||
) -> Result<impl ExecutableTxIterator<Self>, Self::Error> {
|
||||
let transactions = payload.payload.transactions().clone();
|
||||
let convert = |encoded: Bytes| {
|
||||
let tx = TxTy::<Self::Primitives>::decode_2718_exact(encoded.as_ref())
|
||||
.map_err(AnyError::new)?;
|
||||
let signer = tx.try_recover().map_err(AnyError::new)?;
|
||||
Ok::<_, AnyError>(WithEncoded::new(encoded, tx.with_signer(signer)))
|
||||
};
|
||||
|
||||
Ok((transactions, convert))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use alloc::collections::BTreeMap;
|
||||
use alloy_consensus::{Header, Receipt};
|
||||
use alloy_eips::eip7685::Requests;
|
||||
use alloy_genesis::Genesis;
|
||||
use alloy_primitives::{
|
||||
bytes,
|
||||
map::{AddressMap, B256Map, HashMap},
|
||||
Address, LogData, B256,
|
||||
};
|
||||
use op_revm::OpSpecId;
|
||||
use reth_chainspec::ChainSpec;
|
||||
use reth_evm::execute::ProviderError;
|
||||
use reth_execution_types::{
|
||||
AccountRevertInit, BundleStateInit, Chain, ExecutionOutcome, RevertsInit,
|
||||
};
|
||||
use reth_optimism_chainspec::{OpChainSpec, BASE_MAINNET};
|
||||
use reth_optimism_primitives::{OpBlock, OpPrimitives, OpReceipt};
|
||||
use reth_primitives_traits::{Account, RecoveredBlock};
|
||||
use revm::{
|
||||
database::{BundleState, CacheDB},
|
||||
database_interface::EmptyDBTyped,
|
||||
inspector::NoOpInspector,
|
||||
primitives::Log,
|
||||
state::AccountInfo,
|
||||
};
|
||||
use std::sync::Arc;
|
||||
|
||||
fn test_evm_config() -> OpEvmConfig {
|
||||
OpEvmConfig::optimism(BASE_MAINNET.clone())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_fill_cfg_and_block_env() {
|
||||
// Create a default header
|
||||
let header = Header::default();
|
||||
|
||||
// Build the ChainSpec for Ethereum mainnet, activating London, Paris, and Shanghai
|
||||
// hardforks
|
||||
let chain_spec = ChainSpec::builder()
|
||||
.chain(0.into())
|
||||
.genesis(Genesis::default())
|
||||
.london_activated()
|
||||
.paris_activated()
|
||||
.shanghai_activated()
|
||||
.build();
|
||||
|
||||
// Use the `OpEvmConfig` to create the `cfg_env` and `block_env` based on the ChainSpec,
|
||||
// Header, and total difficulty
|
||||
let EvmEnv { cfg_env, .. } =
|
||||
OpEvmConfig::optimism(Arc::new(OpChainSpec { inner: chain_spec.clone() }))
|
||||
.evm_env(&header)
|
||||
.unwrap();
|
||||
|
||||
// Assert that the chain ID in the `cfg_env` is correctly set to the chain ID of the
|
||||
// ChainSpec
|
||||
assert_eq!(cfg_env.chain_id, chain_spec.chain().id());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_evm_with_env_default_spec() {
|
||||
let evm_config = test_evm_config();
|
||||
|
||||
let db = CacheDB::<EmptyDBTyped<ProviderError>>::default();
|
||||
|
||||
let evm_env = EvmEnv::default();
|
||||
|
||||
let evm = evm_config.evm_with_env(db, evm_env.clone());
|
||||
|
||||
// Check that the EVM environment
|
||||
assert_eq!(evm.cfg, evm_env.cfg_env);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_evm_with_env_custom_cfg() {
|
||||
let evm_config = test_evm_config();
|
||||
|
||||
let db = CacheDB::<EmptyDBTyped<ProviderError>>::default();
|
||||
|
||||
// Create a custom configuration environment with a chain ID of 111
|
||||
let cfg =
|
||||
CfgEnv::new().with_chain_id(111).with_spec_and_mainnet_gas_params(OpSpecId::default());
|
||||
|
||||
let evm_env = EvmEnv { cfg_env: cfg.clone(), ..Default::default() };
|
||||
|
||||
let evm = evm_config.evm_with_env(db, evm_env);
|
||||
|
||||
// Check that the EVM environment is initialized with the custom environment
|
||||
assert_eq!(evm.cfg, cfg);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_evm_with_env_custom_block_and_tx() {
|
||||
let evm_config = test_evm_config();
|
||||
|
||||
let db = CacheDB::<EmptyDBTyped<ProviderError>>::default();
|
||||
|
||||
// Create customs block and tx env
|
||||
let block = BlockEnv {
|
||||
basefee: 1000,
|
||||
gas_limit: 10_000_000,
|
||||
number: U256::from(42),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let evm_env = EvmEnv { block_env: block, ..Default::default() };
|
||||
|
||||
let evm = evm_config.evm_with_env(db, evm_env.clone());
|
||||
|
||||
// Verify that the block and transaction environments are set correctly
|
||||
assert_eq!(evm.block, evm_env.block_env);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_evm_with_spec_id() {
|
||||
let evm_config = test_evm_config();
|
||||
|
||||
let db = CacheDB::<EmptyDBTyped<ProviderError>>::default();
|
||||
|
||||
let evm_env = EvmEnv {
|
||||
cfg_env: CfgEnv::new().with_spec_and_mainnet_gas_params(OpSpecId::ECOTONE),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let evm = evm_config.evm_with_env(db, evm_env.clone());
|
||||
|
||||
assert_eq!(evm.cfg, evm_env.cfg_env);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_evm_with_env_and_default_inspector() {
|
||||
let evm_config = test_evm_config();
|
||||
let db = CacheDB::<EmptyDBTyped<ProviderError>>::default();
|
||||
|
||||
let evm_env = EvmEnv { cfg_env: Default::default(), ..Default::default() };
|
||||
|
||||
let evm = evm_config.evm_with_env_and_inspector(db, evm_env.clone(), NoOpInspector {});
|
||||
|
||||
// Check that the EVM environment is set to default values
|
||||
assert_eq!(evm.block, evm_env.block_env);
|
||||
assert_eq!(evm.cfg, evm_env.cfg_env);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_evm_with_env_inspector_and_custom_cfg() {
|
||||
let evm_config = test_evm_config();
|
||||
let db = CacheDB::<EmptyDBTyped<ProviderError>>::default();
|
||||
|
||||
let cfg =
|
||||
CfgEnv::new().with_chain_id(111).with_spec_and_mainnet_gas_params(OpSpecId::default());
|
||||
let block = BlockEnv::default();
|
||||
let evm_env = EvmEnv { block_env: block, cfg_env: cfg.clone() };
|
||||
|
||||
let evm = evm_config.evm_with_env_and_inspector(db, evm_env.clone(), NoOpInspector {});
|
||||
|
||||
// Check that the EVM environment is set with custom configuration
|
||||
assert_eq!(evm.cfg, cfg);
|
||||
assert_eq!(evm.block, evm_env.block_env);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_evm_with_env_inspector_and_custom_block_tx() {
|
||||
let evm_config = test_evm_config();
|
||||
let db = CacheDB::<EmptyDBTyped<ProviderError>>::default();
|
||||
|
||||
// Create custom block and tx environment
|
||||
let block = BlockEnv {
|
||||
basefee: 1000,
|
||||
gas_limit: 10_000_000,
|
||||
number: U256::from(42),
|
||||
..Default::default()
|
||||
};
|
||||
let evm_env = EvmEnv { block_env: block, ..Default::default() };
|
||||
|
||||
let evm = evm_config.evm_with_env_and_inspector(db, evm_env.clone(), NoOpInspector {});
|
||||
|
||||
// Verify that the block and transaction environments are set correctly
|
||||
assert_eq!(evm.block, evm_env.block_env);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_evm_with_env_inspector_and_spec_id() {
|
||||
let evm_config = test_evm_config();
|
||||
let db = CacheDB::<EmptyDBTyped<ProviderError>>::default();
|
||||
|
||||
let evm_env = EvmEnv {
|
||||
cfg_env: CfgEnv::new().with_spec_and_mainnet_gas_params(OpSpecId::ECOTONE),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let evm = evm_config.evm_with_env_and_inspector(db, evm_env.clone(), NoOpInspector {});
|
||||
|
||||
// Check that the spec ID is set properly
|
||||
assert_eq!(evm.cfg, evm_env.cfg_env);
|
||||
assert_eq!(evm.block, evm_env.block_env);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn receipts_by_block_hash() {
|
||||
// Create a default recovered block
|
||||
let block: RecoveredBlock<OpBlock> = Default::default();
|
||||
|
||||
// Define block hashes for block1 and block2
|
||||
let block1_hash = B256::new([0x01; 32]);
|
||||
let block2_hash = B256::new([0x02; 32]);
|
||||
|
||||
// Clone the default block into block1 and block2
|
||||
let mut block1 = block.clone();
|
||||
let mut block2 = block;
|
||||
|
||||
// Set the hashes of block1 and block2
|
||||
block1.set_block_number(10);
|
||||
block1.set_hash(block1_hash);
|
||||
|
||||
block2.set_block_number(11);
|
||||
block2.set_hash(block2_hash);
|
||||
|
||||
// Create a random receipt object, receipt1
|
||||
let receipt1 = OpReceipt::Legacy(Receipt::<Log> {
|
||||
cumulative_gas_used: 46913,
|
||||
logs: vec![],
|
||||
status: true.into(),
|
||||
});
|
||||
|
||||
// Create another random receipt object, receipt2
|
||||
let receipt2 = OpReceipt::Legacy(Receipt::<Log> {
|
||||
cumulative_gas_used: 1325345,
|
||||
logs: vec![],
|
||||
status: true.into(),
|
||||
});
|
||||
|
||||
// Create a Receipts object with a vector of receipt vectors
|
||||
let receipts = vec![vec![receipt1.clone()], vec![receipt2]];
|
||||
|
||||
// Create an ExecutionOutcome object with the created bundle, receipts, an empty requests
|
||||
// vector, and first_block set to 10
|
||||
let execution_outcome = ExecutionOutcome::<OpReceipt> {
|
||||
bundle: Default::default(),
|
||||
receipts,
|
||||
requests: vec![],
|
||||
first_block: 10,
|
||||
};
|
||||
|
||||
// Create a Chain object with a BTreeMap of blocks mapped to their block numbers,
|
||||
// including block1_hash and block2_hash, and the execution_outcome
|
||||
let chain: Chain<OpPrimitives> =
|
||||
Chain::new([block1, block2], execution_outcome.clone(), BTreeMap::new());
|
||||
|
||||
// Assert that the proper receipt vector is returned for block1_hash
|
||||
assert_eq!(chain.receipts_by_block_hash(block1_hash), Some(vec![&receipt1]));
|
||||
|
||||
// Create an ExecutionOutcome object with a single receipt vector containing receipt1
|
||||
let execution_outcome1 = ExecutionOutcome {
|
||||
bundle: Default::default(),
|
||||
receipts: vec![vec![receipt1]],
|
||||
requests: vec![],
|
||||
first_block: 10,
|
||||
};
|
||||
|
||||
// Assert that the execution outcome at the first block contains only the first receipt
|
||||
assert_eq!(chain.execution_outcome_at_block(10), Some(execution_outcome1));
|
||||
|
||||
// Assert that the execution outcome at the tip block contains the whole execution outcome
|
||||
assert_eq!(chain.execution_outcome_at_block(11), Some(execution_outcome));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_initialization() {
|
||||
// Create a new BundleState object with initial data
|
||||
let bundle = BundleState::new(
|
||||
vec![(Address::new([2; 20]), None, Some(AccountInfo::default()), HashMap::default())],
|
||||
vec![vec![(Address::new([2; 20]), None, vec![])]],
|
||||
vec![],
|
||||
);
|
||||
|
||||
// Create a Receipts object with a vector of receipt vectors
|
||||
let receipts = vec![vec![Some(OpReceipt::Legacy(Receipt::<Log> {
|
||||
cumulative_gas_used: 46913,
|
||||
logs: vec![],
|
||||
status: true.into(),
|
||||
}))]];
|
||||
|
||||
// Create a Requests object with a vector of requests
|
||||
let requests = vec![Requests::new(vec![bytes!("dead"), bytes!("beef"), bytes!("beebee")])];
|
||||
|
||||
// Define the first block number
|
||||
let first_block = 123;
|
||||
|
||||
// Create a ExecutionOutcome object with the created bundle, receipts, requests, and
|
||||
// first_block
|
||||
let exec_res = ExecutionOutcome {
|
||||
bundle: bundle.clone(),
|
||||
receipts: receipts.clone(),
|
||||
requests: requests.clone(),
|
||||
first_block,
|
||||
};
|
||||
|
||||
// Assert that creating a new ExecutionOutcome using the constructor matches exec_res
|
||||
assert_eq!(
|
||||
ExecutionOutcome::new(bundle, receipts.clone(), first_block, requests.clone()),
|
||||
exec_res
|
||||
);
|
||||
|
||||
// Create a BundleStateInit object and insert initial data
|
||||
let mut state_init: BundleStateInit = AddressMap::default();
|
||||
state_init
|
||||
.insert(Address::new([2; 20]), (None, Some(Account::default()), B256Map::default()));
|
||||
|
||||
// Create an AddressMap for account reverts and insert initial data
|
||||
let mut revert_inner: AddressMap<AccountRevertInit> = AddressMap::default();
|
||||
revert_inner.insert(Address::new([2; 20]), (None, vec![]));
|
||||
|
||||
// Create a RevertsInit object and insert the revert_inner data
|
||||
let mut revert_init: RevertsInit = HashMap::default();
|
||||
revert_init.insert(123, revert_inner);
|
||||
|
||||
// Assert that creating a new ExecutionOutcome using the new_init method matches
|
||||
// exec_res
|
||||
assert_eq!(
|
||||
ExecutionOutcome::new_init(
|
||||
state_init,
|
||||
revert_init,
|
||||
vec![],
|
||||
receipts,
|
||||
first_block,
|
||||
requests,
|
||||
),
|
||||
exec_res
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_block_number_to_index() {
|
||||
// Create a Receipts object with a vector of receipt vectors
|
||||
let receipts = vec![vec![Some(OpReceipt::Legacy(Receipt::<Log> {
|
||||
cumulative_gas_used: 46913,
|
||||
logs: vec![],
|
||||
status: true.into(),
|
||||
}))]];
|
||||
|
||||
// Define the first block number
|
||||
let first_block = 123;
|
||||
|
||||
// Create a ExecutionOutcome object with the created bundle, receipts, requests, and
|
||||
// first_block
|
||||
let exec_res = ExecutionOutcome {
|
||||
bundle: Default::default(),
|
||||
receipts,
|
||||
requests: vec![],
|
||||
first_block,
|
||||
};
|
||||
|
||||
// Test before the first block
|
||||
assert_eq!(exec_res.block_number_to_index(12), None);
|
||||
|
||||
// Test after the first block but index larger than receipts length
|
||||
assert_eq!(exec_res.block_number_to_index(133), None);
|
||||
|
||||
// Test after the first block
|
||||
assert_eq!(exec_res.block_number_to_index(123), Some(0));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_logs() {
|
||||
// Create a Receipts object with a vector of receipt vectors
|
||||
let receipts = vec![vec![OpReceipt::Legacy(Receipt::<Log> {
|
||||
cumulative_gas_used: 46913,
|
||||
logs: vec![Log::<LogData>::default()],
|
||||
status: true.into(),
|
||||
})]];
|
||||
|
||||
// Define the first block number
|
||||
let first_block = 123;
|
||||
|
||||
// Create a ExecutionOutcome object with the created bundle, receipts, requests, and
|
||||
// first_block
|
||||
let exec_res = ExecutionOutcome {
|
||||
bundle: Default::default(),
|
||||
receipts,
|
||||
requests: vec![],
|
||||
first_block,
|
||||
};
|
||||
|
||||
// Get logs for block number 123
|
||||
let logs: Vec<&Log> = exec_res.logs(123).unwrap().collect();
|
||||
|
||||
// Assert that the logs match the expected logs
|
||||
assert_eq!(logs, vec![&Log::<LogData>::default()]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_receipts_by_block() {
|
||||
// Create a Receipts object with a vector of receipt vectors
|
||||
let receipts = vec![vec![Some(OpReceipt::Legacy(Receipt::<Log> {
|
||||
cumulative_gas_used: 46913,
|
||||
logs: vec![Log::<LogData>::default()],
|
||||
status: true.into(),
|
||||
}))]];
|
||||
|
||||
// Define the first block number
|
||||
let first_block = 123;
|
||||
|
||||
// Create a ExecutionOutcome object with the created bundle, receipts, requests, and
|
||||
// first_block
|
||||
let exec_res = ExecutionOutcome {
|
||||
bundle: Default::default(), // Default value for bundle
|
||||
receipts, // Include the created receipts
|
||||
requests: vec![], // Empty vector for requests
|
||||
first_block, // Set the first block number
|
||||
};
|
||||
|
||||
// Get receipts for block number 123 and convert the result into a vector
|
||||
let receipts_by_block: Vec<_> = exec_res.receipts_by_block(123).iter().collect();
|
||||
|
||||
// Assert that the receipts for block number 123 match the expected receipts
|
||||
assert_eq!(
|
||||
receipts_by_block,
|
||||
vec![&Some(OpReceipt::Legacy(Receipt::<Log> {
|
||||
cumulative_gas_used: 46913,
|
||||
logs: vec![Log::<LogData>::default()],
|
||||
status: true.into(),
|
||||
}))]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_receipts_len() {
|
||||
// Create a Receipts object with a vector of receipt vectors
|
||||
let receipts = vec![vec![Some(OpReceipt::Legacy(Receipt::<Log> {
|
||||
cumulative_gas_used: 46913,
|
||||
logs: vec![Log::<LogData>::default()],
|
||||
status: true.into(),
|
||||
}))]];
|
||||
|
||||
// Create an empty Receipts object
|
||||
let receipts_empty = vec![];
|
||||
|
||||
// Define the first block number
|
||||
let first_block = 123;
|
||||
|
||||
// Create a ExecutionOutcome object with the created bundle, receipts, requests, and
|
||||
// first_block
|
||||
let exec_res = ExecutionOutcome {
|
||||
bundle: Default::default(), // Default value for bundle
|
||||
receipts, // Include the created receipts
|
||||
requests: vec![], // Empty vector for requests
|
||||
first_block, // Set the first block number
|
||||
};
|
||||
|
||||
// Assert that the length of receipts in exec_res is 1
|
||||
assert_eq!(exec_res.len(), 1);
|
||||
|
||||
// Assert that exec_res is not empty
|
||||
assert!(!exec_res.is_empty());
|
||||
|
||||
// Create a ExecutionOutcome object with an empty Receipts object
|
||||
let exec_res_empty_receipts: ExecutionOutcome<OpReceipt> = ExecutionOutcome {
|
||||
bundle: Default::default(), // Default value for bundle
|
||||
receipts: receipts_empty, // Include the empty receipts
|
||||
requests: vec![], // Empty vector for requests
|
||||
first_block, // Set the first block number
|
||||
};
|
||||
|
||||
// Assert that the length of receipts in exec_res_empty_receipts is 0
|
||||
assert_eq!(exec_res_empty_receipts.len(), 0);
|
||||
|
||||
// Assert that exec_res_empty_receipts is empty
|
||||
assert!(exec_res_empty_receipts.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_revert_to() {
|
||||
// Create a random receipt object
|
||||
let receipt = OpReceipt::Legacy(Receipt::<Log> {
|
||||
cumulative_gas_used: 46913,
|
||||
logs: vec![],
|
||||
status: true.into(),
|
||||
});
|
||||
|
||||
// Create a Receipts object with a vector of receipt vectors
|
||||
let receipts = vec![vec![Some(receipt.clone())], vec![Some(receipt.clone())]];
|
||||
|
||||
// Define the first block number
|
||||
let first_block = 123;
|
||||
|
||||
// Create a request.
|
||||
let request = bytes!("deadbeef");
|
||||
|
||||
// Create a vector of Requests containing the request.
|
||||
let requests =
|
||||
vec![Requests::new(vec![request.clone()]), Requests::new(vec![request.clone()])];
|
||||
|
||||
// Create a ExecutionOutcome object with the created bundle, receipts, requests, and
|
||||
// first_block
|
||||
let mut exec_res =
|
||||
ExecutionOutcome { bundle: Default::default(), receipts, requests, first_block };
|
||||
|
||||
// Assert that the revert_to method returns true when reverting to the initial block number.
|
||||
assert!(exec_res.revert_to(123));
|
||||
|
||||
// Assert that the receipts are properly cut after reverting to the initial block number.
|
||||
assert_eq!(exec_res.receipts, vec![vec![Some(receipt)]]);
|
||||
|
||||
// Assert that the requests are properly cut after reverting to the initial block number.
|
||||
assert_eq!(exec_res.requests, vec![Requests::new(vec![request])]);
|
||||
|
||||
// Assert that the revert_to method returns false when attempting to revert to a block
|
||||
// number greater than the initial block number.
|
||||
assert!(!exec_res.revert_to(133));
|
||||
|
||||
// Assert that the revert_to method returns false when attempting to revert to a block
|
||||
// number less than the initial block number.
|
||||
assert!(!exec_res.revert_to(10));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_extend_execution_outcome() {
|
||||
// Create a Receipt object with specific attributes.
|
||||
let receipt = OpReceipt::Legacy(Receipt::<Log> {
|
||||
cumulative_gas_used: 46913,
|
||||
logs: vec![],
|
||||
status: true.into(),
|
||||
});
|
||||
|
||||
// Create a Receipts object containing the receipt.
|
||||
let receipts = vec![vec![Some(receipt.clone())]];
|
||||
|
||||
// Create a request.
|
||||
let request = bytes!("deadbeef");
|
||||
|
||||
// Create a vector of Requests containing the request.
|
||||
let requests = vec![Requests::new(vec![request.clone()])];
|
||||
|
||||
// Define the initial block number.
|
||||
let first_block = 123;
|
||||
|
||||
// Create an ExecutionOutcome object.
|
||||
let mut exec_res =
|
||||
ExecutionOutcome { bundle: Default::default(), receipts, requests, first_block };
|
||||
|
||||
// Extend the ExecutionOutcome object by itself.
|
||||
exec_res.extend(exec_res.clone());
|
||||
|
||||
// Assert the extended ExecutionOutcome matches the expected outcome.
|
||||
assert_eq!(
|
||||
exec_res,
|
||||
ExecutionOutcome {
|
||||
bundle: Default::default(),
|
||||
receipts: vec![vec![Some(receipt.clone())], vec![Some(receipt)]],
|
||||
requests: vec![Requests::new(vec![request.clone()]), Requests::new(vec![request])],
|
||||
first_block: 123,
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_split_at_execution_outcome() {
|
||||
// Create a random receipt object
|
||||
let receipt = OpReceipt::Legacy(Receipt::<Log> {
|
||||
cumulative_gas_used: 46913,
|
||||
logs: vec![],
|
||||
status: true.into(),
|
||||
});
|
||||
|
||||
// Create a Receipts object with a vector of receipt vectors
|
||||
let receipts = vec![
|
||||
vec![Some(receipt.clone())],
|
||||
vec![Some(receipt.clone())],
|
||||
vec![Some(receipt.clone())],
|
||||
];
|
||||
|
||||
// Define the first block number
|
||||
let first_block = 123;
|
||||
|
||||
// Create a request.
|
||||
let request = bytes!("deadbeef");
|
||||
|
||||
// Create a vector of Requests containing the request.
|
||||
let requests = vec![
|
||||
Requests::new(vec![request.clone()]),
|
||||
Requests::new(vec![request.clone()]),
|
||||
Requests::new(vec![request.clone()]),
|
||||
];
|
||||
|
||||
// Create a ExecutionOutcome object with the created bundle, receipts, requests, and
|
||||
// first_block
|
||||
let exec_res =
|
||||
ExecutionOutcome { bundle: Default::default(), receipts, requests, first_block };
|
||||
|
||||
// Split the ExecutionOutcome at block number 124
|
||||
let result = exec_res.clone().split_at(124);
|
||||
|
||||
// Define the expected lower ExecutionOutcome after splitting
|
||||
let lower_execution_outcome = ExecutionOutcome {
|
||||
bundle: Default::default(),
|
||||
receipts: vec![vec![Some(receipt.clone())]],
|
||||
requests: vec![Requests::new(vec![request.clone()])],
|
||||
first_block,
|
||||
};
|
||||
|
||||
// Define the expected higher ExecutionOutcome after splitting
|
||||
let higher_execution_outcome = ExecutionOutcome {
|
||||
bundle: Default::default(),
|
||||
receipts: vec![vec![Some(receipt.clone())], vec![Some(receipt)]],
|
||||
requests: vec![Requests::new(vec![request.clone()]), Requests::new(vec![request])],
|
||||
first_block: 124,
|
||||
};
|
||||
|
||||
// Assert that the split result matches the expected lower and higher outcomes
|
||||
assert_eq!(result.0, Some(lower_execution_outcome));
|
||||
assert_eq!(result.1, higher_execution_outcome);
|
||||
|
||||
// Assert that splitting at the first block number returns None for the lower outcome
|
||||
assert_eq!(exec_res.clone().split_at(123), (None, exec_res));
|
||||
}
|
||||
}
|
||||
@@ -1,47 +0,0 @@
|
||||
use alloy_consensus::{Eip658Value, Receipt};
|
||||
use alloy_evm::eth::receipt_builder::ReceiptBuilderCtx;
|
||||
use alloy_op_evm::block::receipt_builder::OpReceiptBuilder;
|
||||
use op_alloy_consensus::{OpDepositReceipt, OpTxType};
|
||||
use reth_evm::Evm;
|
||||
use reth_optimism_primitives::{OpReceipt, OpTransactionSigned};
|
||||
|
||||
/// A builder that operates on op-reth primitive types, specifically [`OpTransactionSigned`] and
|
||||
/// [`OpReceipt`].
|
||||
#[derive(Debug, Default, Clone, Copy)]
|
||||
#[non_exhaustive]
|
||||
pub struct OpRethReceiptBuilder;
|
||||
|
||||
impl OpReceiptBuilder for OpRethReceiptBuilder {
|
||||
type Transaction = OpTransactionSigned;
|
||||
type Receipt = OpReceipt;
|
||||
|
||||
fn build_receipt<'a, E: Evm>(
|
||||
&self,
|
||||
ctx: ReceiptBuilderCtx<'a, OpTxType, E>,
|
||||
) -> Result<Self::Receipt, ReceiptBuilderCtx<'a, OpTxType, E>> {
|
||||
match ctx.tx_type {
|
||||
OpTxType::Deposit => Err(ctx),
|
||||
ty => {
|
||||
let receipt = Receipt {
|
||||
// Success flag was added in `EIP-658: Embedding transaction status code in
|
||||
// receipts`.
|
||||
status: Eip658Value::Eip658(ctx.result.is_success()),
|
||||
cumulative_gas_used: ctx.cumulative_gas_used,
|
||||
logs: ctx.result.into_logs(),
|
||||
};
|
||||
|
||||
Ok(match ty {
|
||||
OpTxType::Legacy => OpReceipt::Legacy(receipt),
|
||||
OpTxType::Eip1559 => OpReceipt::Eip1559(receipt),
|
||||
OpTxType::Eip2930 => OpReceipt::Eip2930(receipt),
|
||||
OpTxType::Eip7702 => OpReceipt::Eip7702(receipt),
|
||||
OpTxType::Deposit => unreachable!(),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn build_deposit_receipt(&self, inner: OpDepositReceipt) -> Self::Receipt {
|
||||
OpReceipt::Deposit(inner)
|
||||
}
|
||||
}
|
||||
@@ -1,71 +0,0 @@
|
||||
[package]
|
||||
name = "example-custom-node"
|
||||
version = "0.0.0"
|
||||
publish = false
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
# reth
|
||||
reth-codecs.workspace = true
|
||||
reth-network-peers.workspace = true
|
||||
reth-node-builder.workspace = true
|
||||
reth-optimism-forks.workspace = true
|
||||
reth-optimism-flashblocks.workspace = true
|
||||
reth-db-api.workspace = true
|
||||
reth-op = { workspace = true, features = ["node", "pool", "rpc"] }
|
||||
reth-payload-builder.workspace = true
|
||||
reth-rpc-api.workspace = true
|
||||
reth-engine-primitives.workspace = true
|
||||
reth-rpc-engine-api.workspace = true
|
||||
reth-ethereum = { workspace = true, features = ["node-api", "network", "evm", "pool", "trie", "storage-api", "provider"] }
|
||||
|
||||
# revm
|
||||
revm.workspace = true
|
||||
revm-primitives.workspace = true
|
||||
|
||||
# alloy
|
||||
alloy-consensus = { workspace = true, features = ["serde"] }
|
||||
alloy-eips.workspace = true
|
||||
alloy-evm.workspace = true
|
||||
alloy-genesis.workspace = true
|
||||
alloy-op-evm.workspace = true
|
||||
alloy-primitives.workspace = true
|
||||
alloy-rlp.workspace = true
|
||||
alloy-serde.workspace = true
|
||||
alloy-network.workspace = true
|
||||
alloy-rpc-types-engine.workspace = true
|
||||
alloy-rpc-types-eth.workspace = true
|
||||
op-alloy-consensus.workspace = true
|
||||
op-alloy-rpc-types-engine.workspace = true
|
||||
op-alloy-rpc-types.workspace = true
|
||||
op-revm.workspace = true
|
||||
|
||||
# misc
|
||||
async-trait.workspace = true
|
||||
derive_more.workspace = true
|
||||
eyre.workspace = true
|
||||
jsonrpsee.workspace = true
|
||||
serde.workspace = true
|
||||
thiserror.workspace = true
|
||||
modular-bitfield.workspace = true
|
||||
|
||||
[features]
|
||||
arbitrary = [
|
||||
"alloy-consensus/arbitrary",
|
||||
"alloy-eips/arbitrary",
|
||||
"alloy-primitives/arbitrary",
|
||||
"alloy-serde/arbitrary",
|
||||
"op-alloy-consensus/arbitrary",
|
||||
"op-alloy-rpc-types-engine/arbitrary",
|
||||
"reth-codecs/arbitrary",
|
||||
"reth-op/arbitrary",
|
||||
"revm-primitives/arbitrary",
|
||||
"revm/arbitrary",
|
||||
"reth-ethereum/arbitrary",
|
||||
"alloy-rpc-types-engine/arbitrary",
|
||||
"reth-db-api/arbitrary",
|
||||
"alloy-rpc-types-eth/arbitrary",
|
||||
"op-alloy-rpc-types/arbitrary",
|
||||
]
|
||||
default = []
|
||||
@@ -1,117 +0,0 @@
|
||||
use crate::primitives::CustomHeader;
|
||||
use alloy_genesis::Genesis;
|
||||
use reth_ethereum::{
|
||||
chainspec::{EthChainSpec, EthereumHardforks, Hardfork, Hardforks},
|
||||
primitives::SealedHeader,
|
||||
};
|
||||
use reth_network_peers::NodeRecord;
|
||||
use reth_op::chainspec::OpChainSpec;
|
||||
use reth_optimism_forks::OpHardforks;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct CustomChainSpec {
|
||||
inner: OpChainSpec,
|
||||
genesis_header: SealedHeader<CustomHeader>,
|
||||
}
|
||||
|
||||
impl CustomChainSpec {
|
||||
pub const fn inner(&self) -> &OpChainSpec {
|
||||
&self.inner
|
||||
}
|
||||
}
|
||||
|
||||
impl Hardforks for CustomChainSpec {
|
||||
fn fork<H: Hardfork>(&self, fork: H) -> reth_ethereum::chainspec::ForkCondition {
|
||||
self.inner.fork(fork)
|
||||
}
|
||||
|
||||
fn forks_iter(
|
||||
&self,
|
||||
) -> impl Iterator<Item = (&dyn Hardfork, reth_ethereum::chainspec::ForkCondition)> {
|
||||
self.inner.forks_iter()
|
||||
}
|
||||
|
||||
fn fork_id(&self, head: &reth_ethereum::chainspec::Head) -> reth_ethereum::chainspec::ForkId {
|
||||
self.inner.fork_id(head)
|
||||
}
|
||||
|
||||
fn latest_fork_id(&self) -> reth_ethereum::chainspec::ForkId {
|
||||
self.inner.latest_fork_id()
|
||||
}
|
||||
|
||||
fn fork_filter(
|
||||
&self,
|
||||
head: reth_ethereum::chainspec::Head,
|
||||
) -> reth_ethereum::chainspec::ForkFilter {
|
||||
self.inner.fork_filter(head)
|
||||
}
|
||||
}
|
||||
|
||||
impl EthChainSpec for CustomChainSpec {
|
||||
type Header = CustomHeader;
|
||||
|
||||
fn chain(&self) -> reth_ethereum::chainspec::Chain {
|
||||
self.inner.chain()
|
||||
}
|
||||
|
||||
fn base_fee_params_at_timestamp(
|
||||
&self,
|
||||
timestamp: u64,
|
||||
) -> reth_ethereum::chainspec::BaseFeeParams {
|
||||
self.inner.base_fee_params_at_timestamp(timestamp)
|
||||
}
|
||||
|
||||
fn blob_params_at_timestamp(&self, timestamp: u64) -> Option<alloy_eips::eip7840::BlobParams> {
|
||||
self.inner.blob_params_at_timestamp(timestamp)
|
||||
}
|
||||
|
||||
fn deposit_contract(&self) -> Option<&reth_ethereum::chainspec::DepositContract> {
|
||||
self.inner.deposit_contract()
|
||||
}
|
||||
|
||||
fn genesis_hash(&self) -> revm_primitives::B256 {
|
||||
self.genesis_header.hash()
|
||||
}
|
||||
|
||||
fn prune_delete_limit(&self) -> usize {
|
||||
self.inner.prune_delete_limit()
|
||||
}
|
||||
|
||||
fn display_hardforks(&self) -> Box<dyn std::fmt::Display> {
|
||||
self.inner.display_hardforks()
|
||||
}
|
||||
|
||||
fn genesis_header(&self) -> &Self::Header {
|
||||
&self.genesis_header
|
||||
}
|
||||
|
||||
fn genesis(&self) -> &Genesis {
|
||||
self.inner.genesis()
|
||||
}
|
||||
|
||||
fn bootnodes(&self) -> Option<Vec<NodeRecord>> {
|
||||
self.inner.bootnodes()
|
||||
}
|
||||
|
||||
fn final_paris_total_difficulty(&self) -> Option<revm_primitives::U256> {
|
||||
self.inner.get_final_paris_total_difficulty()
|
||||
}
|
||||
}
|
||||
|
||||
impl EthereumHardforks for CustomChainSpec {
|
||||
fn ethereum_fork_activation(
|
||||
&self,
|
||||
fork: reth_ethereum::chainspec::EthereumHardfork,
|
||||
) -> reth_ethereum::chainspec::ForkCondition {
|
||||
self.inner.ethereum_fork_activation(fork)
|
||||
}
|
||||
}
|
||||
|
||||
impl OpHardforks for CustomChainSpec {
|
||||
fn op_fork_activation(
|
||||
&self,
|
||||
fork: reth_optimism_forks::OpHardfork,
|
||||
) -> reth_ethereum::chainspec::ForkCondition {
|
||||
self.inner.op_fork_activation(fork)
|
||||
}
|
||||
}
|
||||
@@ -1,335 +0,0 @@
|
||||
use crate::{
|
||||
chainspec::CustomChainSpec,
|
||||
evm::CustomEvmConfig,
|
||||
primitives::{CustomHeader, CustomNodePrimitives, CustomTransaction},
|
||||
CustomNode,
|
||||
};
|
||||
use alloy_eips::eip2718::WithEncoded;
|
||||
use alloy_primitives::Bytes;
|
||||
use op_alloy_rpc_types_engine::{OpExecutionData, OpExecutionPayload};
|
||||
use reth_engine_primitives::EngineApiValidator;
|
||||
use reth_ethereum::{
|
||||
node::api::{
|
||||
validate_version_specific_fields, AddOnsContext, BuiltPayload, BuiltPayloadExecutedBlock,
|
||||
EngineApiMessageVersion, EngineObjectValidationError, ExecutionPayload, FullNodeComponents,
|
||||
NewPayloadError, NodePrimitives, PayloadAttributes, PayloadBuilderAttributes,
|
||||
PayloadOrAttributes, PayloadTypes, PayloadValidator,
|
||||
},
|
||||
primitives::SealedBlock,
|
||||
storage::StateProviderFactory,
|
||||
trie::{KeccakKeyHasher, KeyHasher},
|
||||
};
|
||||
use reth_node_builder::{rpc::PayloadValidatorBuilder, InvalidPayloadAttributesError};
|
||||
use reth_op::node::{
|
||||
engine::OpEngineValidator, payload::OpAttributes, OpBuiltPayload, OpEngineTypes,
|
||||
OpPayloadAttributes, OpPayloadBuilderAttributes,
|
||||
};
|
||||
use revm_primitives::U256;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::sync::Arc;
|
||||
use thiserror::Error;
|
||||
|
||||
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
|
||||
pub struct CustomPayloadTypes;
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct CustomExecutionData {
|
||||
pub inner: OpExecutionData,
|
||||
pub extension: u64,
|
||||
}
|
||||
|
||||
impl ExecutionPayload for CustomExecutionData {
|
||||
fn parent_hash(&self) -> revm_primitives::B256 {
|
||||
self.inner.parent_hash()
|
||||
}
|
||||
|
||||
fn block_hash(&self) -> revm_primitives::B256 {
|
||||
self.inner.block_hash()
|
||||
}
|
||||
|
||||
fn block_number(&self) -> u64 {
|
||||
self.inner.block_number()
|
||||
}
|
||||
|
||||
fn withdrawals(&self) -> Option<&Vec<alloy_eips::eip4895::Withdrawal>> {
|
||||
None
|
||||
}
|
||||
|
||||
fn block_access_list(&self) -> Option<&Bytes> {
|
||||
None
|
||||
}
|
||||
|
||||
fn parent_beacon_block_root(&self) -> Option<revm_primitives::B256> {
|
||||
self.inner.parent_beacon_block_root()
|
||||
}
|
||||
|
||||
fn timestamp(&self) -> u64 {
|
||||
self.inner.timestamp()
|
||||
}
|
||||
|
||||
fn gas_used(&self) -> u64 {
|
||||
self.inner.gas_used()
|
||||
}
|
||||
|
||||
fn transaction_count(&self) -> usize {
|
||||
self.inner.payload.as_v1().transactions.len()
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<&reth_optimism_flashblocks::FlashBlockCompleteSequence> for CustomExecutionData {
|
||||
type Error = &'static str;
|
||||
|
||||
fn try_from(
|
||||
sequence: &reth_optimism_flashblocks::FlashBlockCompleteSequence,
|
||||
) -> Result<Self, Self::Error> {
|
||||
let inner = OpExecutionData::try_from(sequence)?;
|
||||
Ok(Self { inner, extension: sequence.last().diff.gas_used })
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct CustomPayloadAttributes {
|
||||
#[serde(flatten)]
|
||||
inner: OpPayloadAttributes,
|
||||
extension: u64,
|
||||
}
|
||||
|
||||
impl PayloadAttributes for CustomPayloadAttributes {
|
||||
fn timestamp(&self) -> u64 {
|
||||
self.inner.timestamp()
|
||||
}
|
||||
|
||||
fn withdrawals(&self) -> Option<&Vec<alloy_eips::eip4895::Withdrawal>> {
|
||||
self.inner.withdrawals()
|
||||
}
|
||||
|
||||
fn parent_beacon_block_root(&self) -> Option<revm_primitives::B256> {
|
||||
self.inner.parent_beacon_block_root()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct CustomPayloadBuilderAttributes {
|
||||
pub inner: OpPayloadBuilderAttributes<CustomTransaction>,
|
||||
pub extension: u64,
|
||||
}
|
||||
|
||||
impl PayloadBuilderAttributes for CustomPayloadBuilderAttributes {
|
||||
type RpcPayloadAttributes = CustomPayloadAttributes;
|
||||
type Error = alloy_rlp::Error;
|
||||
|
||||
fn try_new(
|
||||
parent: revm_primitives::B256,
|
||||
rpc_payload_attributes: Self::RpcPayloadAttributes,
|
||||
version: u8,
|
||||
) -> Result<Self, Self::Error>
|
||||
where
|
||||
Self: Sized,
|
||||
{
|
||||
let CustomPayloadAttributes { inner, extension } = rpc_payload_attributes;
|
||||
|
||||
Ok(Self { inner: OpPayloadBuilderAttributes::try_new(parent, inner, version)?, extension })
|
||||
}
|
||||
|
||||
fn payload_id(&self) -> alloy_rpc_types_engine::PayloadId {
|
||||
self.inner.payload_id()
|
||||
}
|
||||
|
||||
fn parent(&self) -> revm_primitives::B256 {
|
||||
self.inner.parent()
|
||||
}
|
||||
|
||||
fn timestamp(&self) -> u64 {
|
||||
self.inner.timestamp()
|
||||
}
|
||||
|
||||
fn parent_beacon_block_root(&self) -> Option<revm_primitives::B256> {
|
||||
self.inner.parent_beacon_block_root()
|
||||
}
|
||||
|
||||
fn suggested_fee_recipient(&self) -> revm_primitives::Address {
|
||||
self.inner.suggested_fee_recipient()
|
||||
}
|
||||
|
||||
fn prev_randao(&self) -> revm_primitives::B256 {
|
||||
self.inner.prev_randao()
|
||||
}
|
||||
|
||||
fn withdrawals(&self) -> &alloy_eips::eip4895::Withdrawals {
|
||||
self.inner.withdrawals()
|
||||
}
|
||||
}
|
||||
|
||||
impl OpAttributes for CustomPayloadBuilderAttributes {
|
||||
type Transaction = CustomTransaction;
|
||||
|
||||
fn no_tx_pool(&self) -> bool {
|
||||
self.inner.no_tx_pool
|
||||
}
|
||||
|
||||
fn sequencer_transactions(&self) -> &[WithEncoded<Self::Transaction>] {
|
||||
&self.inner.transactions
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct CustomBuiltPayload(pub OpBuiltPayload<CustomNodePrimitives>);
|
||||
|
||||
impl BuiltPayload for CustomBuiltPayload {
|
||||
type Primitives = CustomNodePrimitives;
|
||||
|
||||
fn block(&self) -> &SealedBlock<<Self::Primitives as NodePrimitives>::Block> {
|
||||
self.0.block()
|
||||
}
|
||||
|
||||
fn fees(&self) -> U256 {
|
||||
self.0.fees()
|
||||
}
|
||||
|
||||
fn executed_block(&self) -> Option<BuiltPayloadExecutedBlock<Self::Primitives>> {
|
||||
self.0.executed_block()
|
||||
}
|
||||
|
||||
fn requests(&self) -> Option<alloy_eips::eip7685::Requests> {
|
||||
self.0.requests()
|
||||
}
|
||||
}
|
||||
|
||||
impl From<CustomBuiltPayload>
|
||||
for alloy_consensus::Block<<CustomNodePrimitives as NodePrimitives>::SignedTx>
|
||||
{
|
||||
fn from(value: CustomBuiltPayload) -> Self {
|
||||
value.0.into_sealed_block().into_block().map_header(|header| header.inner)
|
||||
}
|
||||
}
|
||||
|
||||
impl PayloadTypes for CustomPayloadTypes {
|
||||
type ExecutionData = CustomExecutionData;
|
||||
type BuiltPayload = OpBuiltPayload<CustomNodePrimitives>;
|
||||
type PayloadAttributes = CustomPayloadAttributes;
|
||||
type PayloadBuilderAttributes = CustomPayloadBuilderAttributes;
|
||||
|
||||
fn block_to_payload(
|
||||
block: SealedBlock<
|
||||
<<Self::BuiltPayload as BuiltPayload>::Primitives as NodePrimitives>::Block,
|
||||
>,
|
||||
) -> Self::ExecutionData {
|
||||
let extension = block.header().extension;
|
||||
let block_hash = block.hash();
|
||||
let block = block.into_block().map_header(|header| header.inner);
|
||||
let (payload, sidecar) = OpExecutionPayload::from_block_unchecked(block_hash, &block);
|
||||
CustomExecutionData { inner: OpExecutionData { payload, sidecar }, extension }
|
||||
}
|
||||
}
|
||||
|
||||
/// Custom engine validator
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct CustomEngineValidator<P> {
|
||||
inner: OpEngineValidator<P, CustomTransaction, CustomChainSpec>,
|
||||
}
|
||||
|
||||
impl<P> CustomEngineValidator<P>
|
||||
where
|
||||
P: Send + Sync + Unpin + 'static,
|
||||
{
|
||||
/// Instantiates a new validator.
|
||||
pub fn new<KH: KeyHasher>(chain_spec: Arc<CustomChainSpec>, provider: P) -> Self {
|
||||
Self { inner: OpEngineValidator::new::<KH>(chain_spec, provider) }
|
||||
}
|
||||
|
||||
/// Returns the chain spec used by the validator.
|
||||
#[inline]
|
||||
fn chain_spec(&self) -> &CustomChainSpec {
|
||||
self.inner.chain_spec()
|
||||
}
|
||||
}
|
||||
|
||||
impl<P> PayloadValidator<CustomPayloadTypes> for CustomEngineValidator<P>
|
||||
where
|
||||
P: StateProviderFactory + Send + Sync + Unpin + 'static,
|
||||
{
|
||||
type Block = crate::primitives::block::Block;
|
||||
|
||||
fn validate_payload_attributes_against_header(
|
||||
&self,
|
||||
_attr: &CustomPayloadAttributes,
|
||||
_header: &<Self::Block as reth_ethereum::primitives::Block>::Header,
|
||||
) -> Result<(), InvalidPayloadAttributesError> {
|
||||
// skip default timestamp validation
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn convert_payload_to_block(
|
||||
&self,
|
||||
payload: CustomExecutionData,
|
||||
) -> Result<SealedBlock<Self::Block>, NewPayloadError> {
|
||||
let sealed_block = PayloadValidator::<OpEngineTypes>::convert_payload_to_block(
|
||||
&self.inner,
|
||||
payload.inner,
|
||||
)?;
|
||||
let (header, body) = sealed_block.split_sealed_header_body();
|
||||
let header = CustomHeader { inner: header.into_header(), extension: payload.extension };
|
||||
let body = body.map_ommers(|_| CustomHeader::default());
|
||||
Ok(SealedBlock::<Self::Block>::from_parts_unhashed(header, body))
|
||||
}
|
||||
}
|
||||
|
||||
impl<P> EngineApiValidator<CustomPayloadTypes> for CustomEngineValidator<P>
|
||||
where
|
||||
P: StateProviderFactory + Send + Sync + Unpin + 'static,
|
||||
{
|
||||
fn validate_version_specific_fields(
|
||||
&self,
|
||||
version: EngineApiMessageVersion,
|
||||
payload_or_attrs: PayloadOrAttributes<'_, CustomExecutionData, CustomPayloadAttributes>,
|
||||
) -> Result<(), EngineObjectValidationError> {
|
||||
validate_version_specific_fields(self.chain_spec(), version, payload_or_attrs)
|
||||
}
|
||||
|
||||
fn ensure_well_formed_attributes(
|
||||
&self,
|
||||
version: EngineApiMessageVersion,
|
||||
attributes: &CustomPayloadAttributes,
|
||||
) -> Result<(), EngineObjectValidationError> {
|
||||
validate_version_specific_fields(
|
||||
self.chain_spec(),
|
||||
version,
|
||||
PayloadOrAttributes::<CustomExecutionData, _>::PayloadAttributes(attributes),
|
||||
)?;
|
||||
|
||||
// custom validation logic - ensure that the custom field is not zero
|
||||
// if attributes.extension == 0 {
|
||||
// return Err(EngineObjectValidationError::invalid_params(
|
||||
// CustomError::CustomFieldIsNotZero,
|
||||
// ))
|
||||
// }
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Custom error type used in payload attributes validation
|
||||
#[derive(Debug, Error)]
|
||||
pub enum CustomError {
|
||||
#[error("Custom field is not zero")]
|
||||
CustomFieldIsNotZero,
|
||||
}
|
||||
|
||||
/// Custom engine validator builder
|
||||
#[derive(Debug, Default, Clone, Copy)]
|
||||
#[non_exhaustive]
|
||||
pub struct CustomEngineValidatorBuilder;
|
||||
|
||||
impl<N> PayloadValidatorBuilder<N> for CustomEngineValidatorBuilder
|
||||
where
|
||||
N: FullNodeComponents<Types = CustomNode, Evm = CustomEvmConfig>,
|
||||
{
|
||||
type Validator = CustomEngineValidator<N::Provider>;
|
||||
|
||||
async fn build(self, ctx: &AddOnsContext<'_, N>) -> eyre::Result<Self::Validator> {
|
||||
Ok(CustomEngineValidator::new::<KeccakKeyHasher>(
|
||||
ctx.config.chain.clone(),
|
||||
ctx.node.provider().clone(),
|
||||
))
|
||||
}
|
||||
}
|
||||
@@ -1,144 +0,0 @@
|
||||
use crate::{
|
||||
engine::{CustomExecutionData, CustomPayloadAttributes, CustomPayloadTypes},
|
||||
primitives::CustomNodePrimitives,
|
||||
CustomNode,
|
||||
};
|
||||
use alloy_rpc_types_engine::{
|
||||
ExecutionPayloadV3, ForkchoiceState, ForkchoiceUpdated, PayloadId, PayloadStatus,
|
||||
};
|
||||
use async_trait::async_trait;
|
||||
use jsonrpsee::{core::RpcResult, proc_macros::rpc, RpcModule};
|
||||
use reth_ethereum::node::api::{
|
||||
AddOnsContext, ConsensusEngineHandle, EngineApiMessageVersion, FullNodeComponents,
|
||||
};
|
||||
use reth_node_builder::rpc::EngineApiBuilder;
|
||||
use reth_op::node::OpBuiltPayload;
|
||||
use reth_payload_builder::PayloadStore;
|
||||
use reth_rpc_api::IntoEngineApiRpcModule;
|
||||
use reth_rpc_engine_api::EngineApiError;
|
||||
use std::sync::Arc;
|
||||
|
||||
#[derive(serde::Deserialize)]
|
||||
pub struct CustomExecutionPayloadInput {}
|
||||
|
||||
#[derive(Clone, serde::Serialize)]
|
||||
pub struct CustomExecutionPayloadEnvelope {
|
||||
execution_payload: ExecutionPayloadV3,
|
||||
extension: u64,
|
||||
}
|
||||
|
||||
impl From<OpBuiltPayload<CustomNodePrimitives>> for CustomExecutionPayloadEnvelope {
|
||||
fn from(value: OpBuiltPayload<CustomNodePrimitives>) -> Self {
|
||||
let sealed_block = value.into_sealed_block();
|
||||
let hash = sealed_block.hash();
|
||||
let extension = sealed_block.header().extension;
|
||||
let block = sealed_block.into_block();
|
||||
|
||||
Self {
|
||||
execution_payload: ExecutionPayloadV3::from_block_unchecked(hash, &block),
|
||||
extension,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[rpc(server, namespace = "engine")]
|
||||
pub trait CustomEngineApi {
|
||||
#[method(name = "newPayload")]
|
||||
async fn new_payload(&self, payload: CustomExecutionData) -> RpcResult<PayloadStatus>;
|
||||
|
||||
#[method(name = "forkchoiceUpdated")]
|
||||
async fn fork_choice_updated(
|
||||
&self,
|
||||
fork_choice_state: ForkchoiceState,
|
||||
payload_attributes: Option<CustomPayloadAttributes>,
|
||||
) -> RpcResult<ForkchoiceUpdated>;
|
||||
|
||||
#[method(name = "getPayload")]
|
||||
async fn get_payload(&self, payload_id: PayloadId)
|
||||
-> RpcResult<CustomExecutionPayloadEnvelope>;
|
||||
}
|
||||
|
||||
pub struct CustomEngineApi {
|
||||
inner: Arc<CustomEngineApiInner>,
|
||||
}
|
||||
|
||||
struct CustomEngineApiInner {
|
||||
beacon_consensus: ConsensusEngineHandle<CustomPayloadTypes>,
|
||||
payload_store: PayloadStore<CustomPayloadTypes>,
|
||||
}
|
||||
|
||||
impl CustomEngineApiInner {
|
||||
fn new(
|
||||
beacon_consensus: ConsensusEngineHandle<CustomPayloadTypes>,
|
||||
payload_store: PayloadStore<CustomPayloadTypes>,
|
||||
) -> Self {
|
||||
Self { beacon_consensus, payload_store }
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl CustomEngineApiServer for CustomEngineApi {
|
||||
async fn new_payload(&self, payload: CustomExecutionData) -> RpcResult<PayloadStatus> {
|
||||
Ok(self
|
||||
.inner
|
||||
.beacon_consensus
|
||||
.new_payload(payload)
|
||||
.await
|
||||
.map_err(EngineApiError::NewPayload)?)
|
||||
}
|
||||
|
||||
async fn fork_choice_updated(
|
||||
&self,
|
||||
fork_choice_state: ForkchoiceState,
|
||||
payload_attributes: Option<CustomPayloadAttributes>,
|
||||
) -> RpcResult<ForkchoiceUpdated> {
|
||||
Ok(self
|
||||
.inner
|
||||
.beacon_consensus
|
||||
.fork_choice_updated(fork_choice_state, payload_attributes, EngineApiMessageVersion::V3)
|
||||
.await
|
||||
.map_err(EngineApiError::ForkChoiceUpdate)?)
|
||||
}
|
||||
|
||||
async fn get_payload(
|
||||
&self,
|
||||
payload_id: PayloadId,
|
||||
) -> RpcResult<CustomExecutionPayloadEnvelope> {
|
||||
Ok(self
|
||||
.inner
|
||||
.payload_store
|
||||
.resolve(payload_id)
|
||||
.await
|
||||
.ok_or(EngineApiError::UnknownPayload)?
|
||||
.map_err(|_| EngineApiError::UnknownPayload)?
|
||||
.into())
|
||||
}
|
||||
}
|
||||
|
||||
impl IntoEngineApiRpcModule for CustomEngineApi
|
||||
where
|
||||
Self: CustomEngineApiServer,
|
||||
{
|
||||
fn into_rpc_module(self) -> RpcModule<()> {
|
||||
self.into_rpc().remove_context()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Clone)]
|
||||
pub struct CustomEngineApiBuilder {}
|
||||
|
||||
impl<N> EngineApiBuilder<N> for CustomEngineApiBuilder
|
||||
where
|
||||
N: FullNodeComponents<Types = CustomNode>,
|
||||
{
|
||||
type EngineApi = CustomEngineApi;
|
||||
|
||||
async fn build_engine_api(self, ctx: &AddOnsContext<'_, N>) -> eyre::Result<Self::EngineApi> {
|
||||
Ok(CustomEngineApi {
|
||||
inner: Arc::new(CustomEngineApiInner::new(
|
||||
ctx.beacon_engine_handle.clone(),
|
||||
PayloadStore::new(ctx.node.payload_builder_handle().clone()),
|
||||
)),
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,126 +0,0 @@
|
||||
use crate::evm::{CustomTxEnv, PaymentTxEnv};
|
||||
use alloy_evm::{precompiles::PrecompilesMap, Database, Evm, EvmEnv, EvmFactory};
|
||||
use alloy_op_evm::{OpEvm, OpEvmFactory};
|
||||
use alloy_primitives::{Address, Bytes};
|
||||
use op_revm::{
|
||||
precompiles::OpPrecompiles, L1BlockInfo, OpContext, OpHaltReason, OpSpecId, OpTransaction,
|
||||
OpTransactionError,
|
||||
};
|
||||
use reth_ethereum::evm::revm::{
|
||||
context::{result::ResultAndState, BlockEnv, CfgEnv},
|
||||
handler::PrecompileProvider,
|
||||
interpreter::InterpreterResult,
|
||||
Context, Inspector, Journal,
|
||||
};
|
||||
use revm::{context_interface::result::EVMError, inspector::NoOpInspector};
|
||||
use std::error::Error;
|
||||
|
||||
/// EVM context contains data that EVM needs for execution of [`CustomTxEnv`].
|
||||
pub type CustomContext<DB> =
|
||||
Context<BlockEnv, OpTransaction<PaymentTxEnv>, CfgEnv<OpSpecId>, DB, Journal<DB>, L1BlockInfo>;
|
||||
|
||||
pub struct CustomEvm<DB: Database, I, P = OpPrecompiles> {
|
||||
inner: OpEvm<DB, I, P>,
|
||||
}
|
||||
|
||||
impl<DB: Database, I, P> CustomEvm<DB, I, P> {
|
||||
pub fn new(op: OpEvm<DB, I, P>) -> Self {
|
||||
Self { inner: op }
|
||||
}
|
||||
}
|
||||
|
||||
impl<DB, I, P> Evm for CustomEvm<DB, I, P>
|
||||
where
|
||||
DB: Database,
|
||||
I: Inspector<OpContext<DB>>,
|
||||
P: PrecompileProvider<OpContext<DB>, Output = InterpreterResult>,
|
||||
{
|
||||
type DB = DB;
|
||||
type Tx = CustomTxEnv;
|
||||
type Error = EVMError<DB::Error, OpTransactionError>;
|
||||
type HaltReason = OpHaltReason;
|
||||
type Spec = OpSpecId;
|
||||
type BlockEnv = BlockEnv;
|
||||
type Precompiles = P;
|
||||
type Inspector = I;
|
||||
|
||||
fn block(&self) -> &BlockEnv {
|
||||
self.inner.block()
|
||||
}
|
||||
|
||||
fn chain_id(&self) -> u64 {
|
||||
self.inner.chain_id()
|
||||
}
|
||||
|
||||
fn transact_raw(
|
||||
&mut self,
|
||||
tx: Self::Tx,
|
||||
) -> Result<ResultAndState<Self::HaltReason>, Self::Error> {
|
||||
match tx {
|
||||
CustomTxEnv::Op(tx) => self.inner.transact_raw(tx),
|
||||
CustomTxEnv::Payment(..) => todo!(),
|
||||
}
|
||||
}
|
||||
|
||||
fn transact_system_call(
|
||||
&mut self,
|
||||
caller: Address,
|
||||
contract: Address,
|
||||
data: Bytes,
|
||||
) -> Result<ResultAndState<Self::HaltReason>, Self::Error> {
|
||||
self.inner.transact_system_call(caller, contract, data)
|
||||
}
|
||||
|
||||
fn finish(self) -> (Self::DB, EvmEnv<Self::Spec>) {
|
||||
self.inner.finish()
|
||||
}
|
||||
|
||||
fn set_inspector_enabled(&mut self, enabled: bool) {
|
||||
self.inner.set_inspector_enabled(enabled)
|
||||
}
|
||||
|
||||
fn components(&self) -> (&Self::DB, &Self::Inspector, &Self::Precompiles) {
|
||||
self.inner.components()
|
||||
}
|
||||
|
||||
fn components_mut(&mut self) -> (&mut Self::DB, &mut Self::Inspector, &mut Self::Precompiles) {
|
||||
self.inner.components_mut()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default, Debug, Clone, Copy)]
|
||||
pub struct CustomEvmFactory(pub OpEvmFactory);
|
||||
|
||||
impl CustomEvmFactory {
|
||||
pub fn new() -> Self {
|
||||
Self::default()
|
||||
}
|
||||
}
|
||||
|
||||
impl EvmFactory for CustomEvmFactory {
|
||||
type Evm<DB: Database, I: Inspector<OpContext<DB>>> = CustomEvm<DB, I, Self::Precompiles>;
|
||||
type Context<DB: Database> = OpContext<DB>;
|
||||
type Tx = CustomTxEnv;
|
||||
type Error<DBError: Error + Send + Sync + 'static> = EVMError<DBError, OpTransactionError>;
|
||||
type HaltReason = OpHaltReason;
|
||||
type Spec = OpSpecId;
|
||||
type BlockEnv = BlockEnv;
|
||||
type Precompiles = PrecompilesMap;
|
||||
|
||||
fn create_evm<DB: Database>(
|
||||
&self,
|
||||
db: DB,
|
||||
input: EvmEnv<Self::Spec>,
|
||||
) -> Self::Evm<DB, NoOpInspector> {
|
||||
CustomEvm::new(self.0.create_evm(db, input))
|
||||
}
|
||||
|
||||
fn create_evm_with_inspector<DB: Database, I: Inspector<Self::Context<DB>>>(
|
||||
&self,
|
||||
db: DB,
|
||||
input: EvmEnv<Self::Spec>,
|
||||
inspector: I,
|
||||
) -> Self::Evm<DB, I> {
|
||||
CustomEvm::new(self.0.create_evm_with_inspector(db, input, inspector))
|
||||
}
|
||||
}
|
||||
@@ -1,41 +0,0 @@
|
||||
use crate::{
|
||||
chainspec::CustomChainSpec,
|
||||
evm::executor::CustomBlockExecutionCtx,
|
||||
primitives::{Block, CustomHeader, CustomTransaction},
|
||||
};
|
||||
use alloy_evm::block::{BlockExecutionError, BlockExecutorFactory};
|
||||
use reth_ethereum::{
|
||||
evm::primitives::execute::{BlockAssembler, BlockAssemblerInput},
|
||||
primitives::Receipt,
|
||||
};
|
||||
use reth_op::{node::OpBlockAssembler, DepositReceipt};
|
||||
use std::sync::Arc;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct CustomBlockAssembler {
|
||||
block_assembler: OpBlockAssembler<CustomChainSpec>,
|
||||
}
|
||||
|
||||
impl CustomBlockAssembler {
|
||||
pub const fn new(chain_spec: Arc<CustomChainSpec>) -> Self {
|
||||
Self { block_assembler: OpBlockAssembler::new(chain_spec) }
|
||||
}
|
||||
}
|
||||
|
||||
impl<F> BlockAssembler<F> for CustomBlockAssembler
|
||||
where
|
||||
F: for<'a> BlockExecutorFactory<
|
||||
ExecutionCtx<'a> = CustomBlockExecutionCtx,
|
||||
Transaction = CustomTransaction,
|
||||
Receipt: Receipt + DepositReceipt,
|
||||
>,
|
||||
{
|
||||
type Block = Block;
|
||||
|
||||
fn assemble_block(
|
||||
&self,
|
||||
input: BlockAssemblerInput<'_, '_, F, CustomHeader>,
|
||||
) -> Result<Self::Block, BlockExecutionError> {
|
||||
Ok(self.block_assembler.assemble_block(input)?.map_header(From::from))
|
||||
}
|
||||
}
|
||||
@@ -1,22 +0,0 @@
|
||||
use crate::{chainspec::CustomChainSpec, evm::CustomEvmConfig, primitives::CustomNodePrimitives};
|
||||
use reth_ethereum::node::api::FullNodeTypes;
|
||||
use reth_node_builder::{components::ExecutorBuilder, BuilderContext, NodeTypes};
|
||||
use std::{future, future::Future};
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
#[non_exhaustive]
|
||||
pub struct CustomExecutorBuilder;
|
||||
|
||||
impl<Node: FullNodeTypes> ExecutorBuilder<Node> for CustomExecutorBuilder
|
||||
where
|
||||
Node::Types: NodeTypes<ChainSpec = CustomChainSpec, Primitives = CustomNodePrimitives>,
|
||||
{
|
||||
type EVM = CustomEvmConfig;
|
||||
|
||||
fn build_evm(
|
||||
self,
|
||||
ctx: &BuilderContext<Node>,
|
||||
) -> impl Future<Output = eyre::Result<Self::EVM>> + Send {
|
||||
future::ready(Ok(CustomEvmConfig::new(ctx.chain_spec())))
|
||||
}
|
||||
}
|
||||
@@ -1,180 +0,0 @@
|
||||
use crate::{
|
||||
chainspec::CustomChainSpec,
|
||||
engine::{CustomExecutionData, CustomPayloadBuilderAttributes},
|
||||
evm::{alloy::CustomEvmFactory, executor::CustomBlockExecutionCtx, CustomBlockAssembler},
|
||||
primitives::{Block, CustomHeader, CustomNodePrimitives, CustomTransaction},
|
||||
};
|
||||
use alloy_consensus::BlockHeader;
|
||||
use alloy_eips::{eip2718::WithEncoded, Decodable2718};
|
||||
use alloy_evm::EvmEnv;
|
||||
use alloy_op_evm::OpBlockExecutionCtx;
|
||||
use alloy_rpc_types_engine::PayloadError;
|
||||
use op_alloy_rpc_types_engine::flashblock::OpFlashblockPayloadBase;
|
||||
use op_revm::OpSpecId;
|
||||
use reth_engine_primitives::ExecutableTxIterator;
|
||||
use reth_ethereum::{
|
||||
chainspec::EthChainSpec,
|
||||
node::api::{BuildNextEnv, ConfigureEvm, PayloadBuilderError},
|
||||
primitives::{SealedBlock, SealedHeader},
|
||||
};
|
||||
use reth_node_builder::{ConfigureEngineEvm, NewPayloadError};
|
||||
use reth_op::{
|
||||
chainspec::OpHardforks,
|
||||
evm::primitives::{EvmEnvFor, ExecutionCtxFor},
|
||||
node::{OpEvmConfig, OpNextBlockEnvAttributes, OpRethReceiptBuilder},
|
||||
primitives::SignedTransaction,
|
||||
};
|
||||
use reth_rpc_api::eth::helpers::pending_block::BuildPendingEnv;
|
||||
use revm_primitives::Bytes;
|
||||
use std::sync::Arc;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct CustomEvmConfig {
|
||||
pub(super) inner: OpEvmConfig,
|
||||
pub(super) block_assembler: CustomBlockAssembler,
|
||||
pub(super) custom_evm_factory: CustomEvmFactory,
|
||||
}
|
||||
|
||||
impl CustomEvmConfig {
|
||||
pub fn new(chain_spec: Arc<CustomChainSpec>) -> Self {
|
||||
Self {
|
||||
inner: OpEvmConfig::new(
|
||||
Arc::new(chain_spec.inner().clone()),
|
||||
OpRethReceiptBuilder::default(),
|
||||
),
|
||||
block_assembler: CustomBlockAssembler::new(chain_spec),
|
||||
custom_evm_factory: CustomEvmFactory::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ConfigureEvm for CustomEvmConfig {
|
||||
type Primitives = CustomNodePrimitives;
|
||||
type Error = <OpEvmConfig as ConfigureEvm>::Error;
|
||||
type NextBlockEnvCtx = CustomNextBlockEnvAttributes;
|
||||
type BlockExecutorFactory = Self;
|
||||
type BlockAssembler = CustomBlockAssembler;
|
||||
|
||||
fn block_executor_factory(&self) -> &Self::BlockExecutorFactory {
|
||||
self
|
||||
}
|
||||
|
||||
fn block_assembler(&self) -> &Self::BlockAssembler {
|
||||
&self.block_assembler
|
||||
}
|
||||
|
||||
fn evm_env(&self, header: &CustomHeader) -> Result<EvmEnv<OpSpecId>, Self::Error> {
|
||||
self.inner.evm_env(header)
|
||||
}
|
||||
|
||||
fn next_evm_env(
|
||||
&self,
|
||||
parent: &CustomHeader,
|
||||
attributes: &CustomNextBlockEnvAttributes,
|
||||
) -> Result<EvmEnv<OpSpecId>, Self::Error> {
|
||||
self.inner.next_evm_env(parent, &attributes.inner)
|
||||
}
|
||||
|
||||
fn context_for_block(
|
||||
&self,
|
||||
block: &SealedBlock<Block>,
|
||||
) -> Result<CustomBlockExecutionCtx, Self::Error> {
|
||||
Ok(CustomBlockExecutionCtx {
|
||||
inner: OpBlockExecutionCtx {
|
||||
parent_hash: block.header().parent_hash(),
|
||||
parent_beacon_block_root: block.header().parent_beacon_block_root(),
|
||||
extra_data: block.header().extra_data().clone(),
|
||||
},
|
||||
extension: block.extension,
|
||||
})
|
||||
}
|
||||
|
||||
fn context_for_next_block(
|
||||
&self,
|
||||
parent: &SealedHeader<CustomHeader>,
|
||||
attributes: Self::NextBlockEnvCtx,
|
||||
) -> Result<CustomBlockExecutionCtx, Self::Error> {
|
||||
Ok(CustomBlockExecutionCtx {
|
||||
inner: OpBlockExecutionCtx {
|
||||
parent_hash: parent.hash(),
|
||||
parent_beacon_block_root: attributes.inner.parent_beacon_block_root,
|
||||
extra_data: attributes.inner.extra_data,
|
||||
},
|
||||
extension: attributes.extension,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl ConfigureEngineEvm<CustomExecutionData> for CustomEvmConfig {
|
||||
fn evm_env_for_payload(
|
||||
&self,
|
||||
payload: &CustomExecutionData,
|
||||
) -> Result<EvmEnvFor<Self>, Self::Error> {
|
||||
self.inner.evm_env_for_payload(&payload.inner)
|
||||
}
|
||||
|
||||
fn context_for_payload<'a>(
|
||||
&self,
|
||||
payload: &'a CustomExecutionData,
|
||||
) -> Result<ExecutionCtxFor<'a, Self>, Self::Error> {
|
||||
Ok(CustomBlockExecutionCtx {
|
||||
inner: self.inner.context_for_payload(&payload.inner)?,
|
||||
extension: payload.extension,
|
||||
})
|
||||
}
|
||||
|
||||
fn tx_iterator_for_payload(
|
||||
&self,
|
||||
payload: &CustomExecutionData,
|
||||
) -> Result<impl ExecutableTxIterator<Self>, Self::Error> {
|
||||
let transactions = payload.inner.payload.transactions().clone();
|
||||
let convert = |encoded: Bytes| {
|
||||
let tx = CustomTransaction::decode_2718_exact(encoded.as_ref())
|
||||
.map_err(Into::into)
|
||||
.map_err(PayloadError::Decode)?;
|
||||
let signer = tx.try_recover().map_err(NewPayloadError::other)?;
|
||||
Ok::<_, NewPayloadError>(WithEncoded::new(encoded, tx.with_signer(signer)))
|
||||
};
|
||||
Ok((transactions, convert))
|
||||
}
|
||||
}
|
||||
|
||||
/// Additional parameters required for executing next block of custom transactions.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct CustomNextBlockEnvAttributes {
|
||||
inner: OpNextBlockEnvAttributes,
|
||||
extension: u64,
|
||||
}
|
||||
|
||||
impl From<OpFlashblockPayloadBase> for CustomNextBlockEnvAttributes {
|
||||
fn from(value: OpFlashblockPayloadBase) -> Self {
|
||||
Self { inner: value.into(), extension: 0 }
|
||||
}
|
||||
}
|
||||
|
||||
impl BuildPendingEnv<CustomHeader> for CustomNextBlockEnvAttributes {
|
||||
fn build_pending_env(parent: &SealedHeader<CustomHeader>) -> Self {
|
||||
Self {
|
||||
inner: OpNextBlockEnvAttributes::build_pending_env(parent),
|
||||
extension: parent.extension,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<H, ChainSpec> BuildNextEnv<CustomPayloadBuilderAttributes, H, ChainSpec>
|
||||
for CustomNextBlockEnvAttributes
|
||||
where
|
||||
H: BlockHeader,
|
||||
ChainSpec: EthChainSpec + OpHardforks,
|
||||
{
|
||||
fn build_next_env(
|
||||
attributes: &CustomPayloadBuilderAttributes,
|
||||
parent: &SealedHeader<H>,
|
||||
chain_spec: &ChainSpec,
|
||||
) -> Result<Self, PayloadBuilderError> {
|
||||
let inner =
|
||||
OpNextBlockEnvAttributes::build_next_env(&attributes.inner, parent, chain_spec)?;
|
||||
|
||||
Ok(CustomNextBlockEnvAttributes { inner, extension: attributes.extension })
|
||||
}
|
||||
}
|
||||
@@ -1,340 +0,0 @@
|
||||
use crate::primitives::{CustomTransaction, TxPayment};
|
||||
use alloy_eips::{eip2930::AccessList, Typed2718};
|
||||
use alloy_evm::{FromRecoveredTx, FromTxWithEncoded, IntoTxEnv};
|
||||
use alloy_op_evm::block::OpTxEnv;
|
||||
use alloy_primitives::{Address, Bytes, TxKind, B256, U256};
|
||||
use op_alloy_consensus::OpTxEnvelope;
|
||||
use op_revm::OpTransaction;
|
||||
use reth_ethereum::evm::{primitives::TransactionEnv, revm::context::TxEnv};
|
||||
|
||||
/// An Optimism transaction extended by [`PaymentTxEnv`] that can be fed to [`Evm`].
|
||||
///
|
||||
/// [`Evm`]: alloy_evm::Evm
|
||||
#[derive(Clone, Debug)]
|
||||
pub enum CustomTxEnv {
|
||||
Op(OpTransaction<TxEnv>),
|
||||
Payment(PaymentTxEnv),
|
||||
}
|
||||
|
||||
/// A transaction environment is a set of information related to an Ethereum transaction that can be
|
||||
/// fed to [`Evm`] for execution.
|
||||
///
|
||||
/// [`Evm`]: alloy_evm::Evm
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct PaymentTxEnv(pub TxEnv);
|
||||
|
||||
impl revm::context::Transaction for CustomTxEnv {
|
||||
type AccessListItem<'a>
|
||||
= <TxEnv as revm::context::Transaction>::AccessListItem<'a>
|
||||
where
|
||||
Self: 'a;
|
||||
type Authorization<'a>
|
||||
= <TxEnv as revm::context::Transaction>::Authorization<'a>
|
||||
where
|
||||
Self: 'a;
|
||||
|
||||
fn tx_type(&self) -> u8 {
|
||||
match self {
|
||||
Self::Op(tx) => tx.tx_type(),
|
||||
Self::Payment(tx) => tx.tx_type(),
|
||||
}
|
||||
}
|
||||
|
||||
fn caller(&self) -> Address {
|
||||
match self {
|
||||
Self::Op(tx) => tx.caller(),
|
||||
Self::Payment(tx) => tx.caller(),
|
||||
}
|
||||
}
|
||||
|
||||
fn gas_limit(&self) -> u64 {
|
||||
match self {
|
||||
Self::Op(tx) => tx.gas_limit(),
|
||||
Self::Payment(tx) => tx.gas_limit(),
|
||||
}
|
||||
}
|
||||
|
||||
fn value(&self) -> U256 {
|
||||
match self {
|
||||
Self::Op(tx) => tx.value(),
|
||||
Self::Payment(tx) => tx.value(),
|
||||
}
|
||||
}
|
||||
|
||||
fn input(&self) -> &Bytes {
|
||||
match self {
|
||||
Self::Op(tx) => tx.input(),
|
||||
Self::Payment(tx) => tx.input(),
|
||||
}
|
||||
}
|
||||
|
||||
fn nonce(&self) -> u64 {
|
||||
match self {
|
||||
Self::Op(tx) => revm::context::Transaction::nonce(tx),
|
||||
Self::Payment(tx) => revm::context::Transaction::nonce(tx),
|
||||
}
|
||||
}
|
||||
|
||||
fn kind(&self) -> TxKind {
|
||||
match self {
|
||||
Self::Op(tx) => tx.kind(),
|
||||
Self::Payment(tx) => tx.kind(),
|
||||
}
|
||||
}
|
||||
|
||||
fn chain_id(&self) -> Option<u64> {
|
||||
match self {
|
||||
Self::Op(tx) => tx.chain_id(),
|
||||
Self::Payment(tx) => tx.chain_id(),
|
||||
}
|
||||
}
|
||||
|
||||
fn gas_price(&self) -> u128 {
|
||||
match self {
|
||||
Self::Op(tx) => tx.gas_price(),
|
||||
Self::Payment(tx) => tx.gas_price(),
|
||||
}
|
||||
}
|
||||
|
||||
fn access_list(&self) -> Option<impl Iterator<Item = Self::AccessListItem<'_>>> {
|
||||
Some(match self {
|
||||
Self::Op(tx) => tx.base.access_list.iter(),
|
||||
Self::Payment(tx) => tx.0.access_list.iter(),
|
||||
})
|
||||
}
|
||||
|
||||
fn blob_versioned_hashes(&self) -> &[B256] {
|
||||
match self {
|
||||
Self::Op(tx) => tx.blob_versioned_hashes(),
|
||||
Self::Payment(tx) => tx.blob_versioned_hashes(),
|
||||
}
|
||||
}
|
||||
|
||||
fn max_fee_per_blob_gas(&self) -> u128 {
|
||||
match self {
|
||||
Self::Op(tx) => tx.max_fee_per_blob_gas(),
|
||||
Self::Payment(tx) => tx.max_fee_per_blob_gas(),
|
||||
}
|
||||
}
|
||||
|
||||
fn authorization_list_len(&self) -> usize {
|
||||
match self {
|
||||
Self::Op(tx) => tx.authorization_list_len(),
|
||||
Self::Payment(tx) => tx.authorization_list_len(),
|
||||
}
|
||||
}
|
||||
|
||||
fn authorization_list(&self) -> impl Iterator<Item = Self::Authorization<'_>> {
|
||||
match self {
|
||||
Self::Op(tx) => tx.base.authorization_list.iter(),
|
||||
Self::Payment(tx) => tx.0.authorization_list.iter(),
|
||||
}
|
||||
}
|
||||
|
||||
fn max_priority_fee_per_gas(&self) -> Option<u128> {
|
||||
match self {
|
||||
Self::Op(tx) => tx.max_priority_fee_per_gas(),
|
||||
Self::Payment(tx) => tx.max_priority_fee_per_gas(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl revm::context::Transaction for PaymentTxEnv {
|
||||
type AccessListItem<'a>
|
||||
= <TxEnv as revm::context::Transaction>::AccessListItem<'a>
|
||||
where
|
||||
Self: 'a;
|
||||
type Authorization<'a>
|
||||
= <TxEnv as revm::context::Transaction>::Authorization<'a>
|
||||
where
|
||||
Self: 'a;
|
||||
|
||||
fn tx_type(&self) -> u8 {
|
||||
self.0.tx_type()
|
||||
}
|
||||
|
||||
fn caller(&self) -> Address {
|
||||
self.0.caller()
|
||||
}
|
||||
|
||||
fn gas_limit(&self) -> u64 {
|
||||
self.0.gas_limit()
|
||||
}
|
||||
|
||||
fn value(&self) -> U256 {
|
||||
self.0.value()
|
||||
}
|
||||
|
||||
fn input(&self) -> &Bytes {
|
||||
self.0.input()
|
||||
}
|
||||
|
||||
fn nonce(&self) -> u64 {
|
||||
revm::context::Transaction::nonce(&self.0)
|
||||
}
|
||||
|
||||
fn kind(&self) -> TxKind {
|
||||
self.0.kind()
|
||||
}
|
||||
|
||||
fn chain_id(&self) -> Option<u64> {
|
||||
self.0.chain_id()
|
||||
}
|
||||
|
||||
fn gas_price(&self) -> u128 {
|
||||
self.0.gas_price()
|
||||
}
|
||||
|
||||
fn access_list(&self) -> Option<impl Iterator<Item = Self::AccessListItem<'_>>> {
|
||||
self.0.access_list()
|
||||
}
|
||||
|
||||
fn blob_versioned_hashes(&self) -> &[B256] {
|
||||
self.0.blob_versioned_hashes()
|
||||
}
|
||||
|
||||
fn max_fee_per_blob_gas(&self) -> u128 {
|
||||
self.0.max_fee_per_blob_gas()
|
||||
}
|
||||
|
||||
fn authorization_list_len(&self) -> usize {
|
||||
self.0.authorization_list_len()
|
||||
}
|
||||
|
||||
fn authorization_list(&self) -> impl Iterator<Item = Self::Authorization<'_>> {
|
||||
self.0.authorization_list()
|
||||
}
|
||||
|
||||
fn max_priority_fee_per_gas(&self) -> Option<u128> {
|
||||
self.0.max_priority_fee_per_gas()
|
||||
}
|
||||
}
|
||||
|
||||
impl TransactionEnv for PaymentTxEnv {
|
||||
fn set_gas_limit(&mut self, gas_limit: u64) {
|
||||
self.0.set_gas_limit(gas_limit);
|
||||
}
|
||||
|
||||
fn nonce(&self) -> u64 {
|
||||
self.0.nonce()
|
||||
}
|
||||
|
||||
fn set_nonce(&mut self, nonce: u64) {
|
||||
self.0.set_nonce(nonce);
|
||||
}
|
||||
|
||||
fn set_access_list(&mut self, access_list: AccessList) {
|
||||
self.0.set_access_list(access_list);
|
||||
}
|
||||
}
|
||||
|
||||
impl TransactionEnv for CustomTxEnv {
|
||||
fn set_gas_limit(&mut self, gas_limit: u64) {
|
||||
match self {
|
||||
Self::Op(tx) => tx.set_gas_limit(gas_limit),
|
||||
Self::Payment(tx) => tx.set_gas_limit(gas_limit),
|
||||
}
|
||||
}
|
||||
|
||||
fn nonce(&self) -> u64 {
|
||||
match self {
|
||||
Self::Op(tx) => tx.nonce(),
|
||||
Self::Payment(tx) => tx.nonce(),
|
||||
}
|
||||
}
|
||||
|
||||
fn set_nonce(&mut self, nonce: u64) {
|
||||
match self {
|
||||
Self::Op(tx) => tx.set_nonce(nonce),
|
||||
Self::Payment(tx) => tx.set_nonce(nonce),
|
||||
}
|
||||
}
|
||||
|
||||
fn set_access_list(&mut self, access_list: AccessList) {
|
||||
match self {
|
||||
Self::Op(tx) => tx.set_access_list(access_list),
|
||||
Self::Payment(tx) => tx.set_access_list(access_list),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl FromRecoveredTx<TxPayment> for TxEnv {
|
||||
fn from_recovered_tx(tx: &TxPayment, caller: Address) -> Self {
|
||||
let TxPayment {
|
||||
chain_id,
|
||||
nonce,
|
||||
gas_limit,
|
||||
max_fee_per_gas,
|
||||
max_priority_fee_per_gas,
|
||||
to,
|
||||
value,
|
||||
} = tx;
|
||||
Self {
|
||||
tx_type: tx.ty(),
|
||||
caller,
|
||||
gas_limit: *gas_limit,
|
||||
gas_price: *max_fee_per_gas,
|
||||
gas_priority_fee: Some(*max_priority_fee_per_gas),
|
||||
kind: TxKind::Call(*to),
|
||||
value: *value,
|
||||
nonce: *nonce,
|
||||
chain_id: Some(*chain_id),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl FromTxWithEncoded<TxPayment> for TxEnv {
|
||||
fn from_encoded_tx(tx: &TxPayment, sender: Address, _encoded: Bytes) -> Self {
|
||||
Self::from_recovered_tx(tx, sender)
|
||||
}
|
||||
}
|
||||
|
||||
impl FromRecoveredTx<OpTxEnvelope> for CustomTxEnv {
|
||||
fn from_recovered_tx(tx: &OpTxEnvelope, sender: Address) -> Self {
|
||||
Self::Op(OpTransaction::from_recovered_tx(tx, sender))
|
||||
}
|
||||
}
|
||||
|
||||
impl FromTxWithEncoded<OpTxEnvelope> for CustomTxEnv {
|
||||
fn from_encoded_tx(tx: &OpTxEnvelope, sender: Address, encoded: Bytes) -> Self {
|
||||
Self::Op(OpTransaction::from_encoded_tx(tx, sender, encoded))
|
||||
}
|
||||
}
|
||||
|
||||
impl FromRecoveredTx<CustomTransaction> for CustomTxEnv {
|
||||
fn from_recovered_tx(tx: &CustomTransaction, sender: Address) -> Self {
|
||||
match tx {
|
||||
CustomTransaction::Op(tx) => Self::from_recovered_tx(tx, sender),
|
||||
CustomTransaction::Payment(tx) => {
|
||||
Self::Payment(PaymentTxEnv(TxEnv::from_recovered_tx(tx.tx(), sender)))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl FromTxWithEncoded<CustomTransaction> for CustomTxEnv {
|
||||
fn from_encoded_tx(tx: &CustomTransaction, sender: Address, encoded: Bytes) -> Self {
|
||||
match tx {
|
||||
CustomTransaction::Op(tx) => Self::from_encoded_tx(tx, sender, encoded),
|
||||
CustomTransaction::Payment(tx) => {
|
||||
Self::Payment(PaymentTxEnv(TxEnv::from_encoded_tx(tx.tx(), sender, encoded)))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl IntoTxEnv<Self> for CustomTxEnv {
|
||||
fn into_tx_env(self) -> Self {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl OpTxEnv for CustomTxEnv {
|
||||
fn encoded_bytes(&self) -> Option<&Bytes> {
|
||||
match self {
|
||||
Self::Op(tx) => tx.encoded_bytes(),
|
||||
Self::Payment(_) => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,120 +0,0 @@
|
||||
use crate::{
|
||||
evm::{
|
||||
alloy::{CustomEvm, CustomEvmFactory},
|
||||
CustomEvmConfig, CustomTxEnv,
|
||||
},
|
||||
primitives::CustomTransaction,
|
||||
};
|
||||
use alloy_consensus::transaction::Recovered;
|
||||
use alloy_evm::{
|
||||
block::{
|
||||
BlockExecutionError, BlockExecutionResult, BlockExecutor, BlockExecutorFactory,
|
||||
BlockExecutorFor, ExecutableTx, OnStateHook,
|
||||
},
|
||||
precompiles::PrecompilesMap,
|
||||
Database, Evm, RecoveredTx,
|
||||
};
|
||||
use alloy_op_evm::{block::OpTxResult, OpBlockExecutionCtx, OpBlockExecutor};
|
||||
use reth_ethereum::evm::primitives::InspectorFor;
|
||||
use reth_op::{chainspec::OpChainSpec, node::OpRethReceiptBuilder, OpReceipt, OpTxType};
|
||||
use revm::database::State;
|
||||
use std::sync::Arc;
|
||||
|
||||
pub struct CustomBlockExecutor<Evm> {
|
||||
inner: OpBlockExecutor<Evm, OpRethReceiptBuilder, Arc<OpChainSpec>>,
|
||||
}
|
||||
|
||||
impl<'db, DB, E> BlockExecutor for CustomBlockExecutor<E>
|
||||
where
|
||||
DB: Database + 'db,
|
||||
E: Evm<DB = &'db mut State<DB>, Tx = CustomTxEnv>,
|
||||
{
|
||||
type Transaction = CustomTransaction;
|
||||
type Receipt = OpReceipt;
|
||||
type Evm = E;
|
||||
type Result = OpTxResult<E::HaltReason, OpTxType>;
|
||||
|
||||
fn apply_pre_execution_changes(&mut self) -> Result<(), BlockExecutionError> {
|
||||
self.inner.apply_pre_execution_changes()
|
||||
}
|
||||
|
||||
fn receipts(&self) -> &[Self::Receipt] {
|
||||
self.inner.receipts()
|
||||
}
|
||||
|
||||
fn execute_transaction_without_commit(
|
||||
&mut self,
|
||||
tx: impl ExecutableTx<Self>,
|
||||
) -> Result<Self::Result, BlockExecutionError> {
|
||||
let tx = tx.into_parts().1;
|
||||
match tx.tx() {
|
||||
CustomTransaction::Op(op_tx) => self
|
||||
.inner
|
||||
.execute_transaction_without_commit(Recovered::new_unchecked(op_tx, *tx.signer())),
|
||||
CustomTransaction::Payment(..) => todo!(),
|
||||
}
|
||||
}
|
||||
|
||||
fn commit_transaction(&mut self, output: Self::Result) -> Result<u64, BlockExecutionError> {
|
||||
self.inner.commit_transaction(output)
|
||||
}
|
||||
|
||||
fn finish(self) -> Result<(Self::Evm, BlockExecutionResult<OpReceipt>), BlockExecutionError> {
|
||||
self.inner.finish()
|
||||
}
|
||||
|
||||
fn set_state_hook(&mut self, _hook: Option<Box<dyn OnStateHook>>) {
|
||||
self.inner.set_state_hook(_hook)
|
||||
}
|
||||
|
||||
fn evm_mut(&mut self) -> &mut Self::Evm {
|
||||
self.inner.evm_mut()
|
||||
}
|
||||
|
||||
fn evm(&self) -> &Self::Evm {
|
||||
self.inner.evm()
|
||||
}
|
||||
}
|
||||
|
||||
impl BlockExecutorFactory for CustomEvmConfig {
|
||||
type EvmFactory = CustomEvmFactory;
|
||||
type ExecutionCtx<'a> = CustomBlockExecutionCtx;
|
||||
type Transaction = CustomTransaction;
|
||||
type Receipt = OpReceipt;
|
||||
|
||||
fn evm_factory(&self) -> &Self::EvmFactory {
|
||||
&self.custom_evm_factory
|
||||
}
|
||||
|
||||
fn create_executor<'a, DB, I>(
|
||||
&'a self,
|
||||
evm: CustomEvm<&'a mut State<DB>, I, PrecompilesMap>,
|
||||
ctx: CustomBlockExecutionCtx,
|
||||
) -> impl BlockExecutorFor<'a, Self, DB, I>
|
||||
where
|
||||
DB: Database + 'a,
|
||||
I: InspectorFor<Self, &'a mut State<DB>> + 'a,
|
||||
{
|
||||
CustomBlockExecutor {
|
||||
inner: OpBlockExecutor::new(
|
||||
evm,
|
||||
ctx.inner,
|
||||
self.inner.chain_spec().clone(),
|
||||
*self.inner.executor_factory.receipt_builder(),
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Additional parameters for executing custom transactions.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct CustomBlockExecutionCtx {
|
||||
pub inner: OpBlockExecutionCtx,
|
||||
pub extension: u64,
|
||||
}
|
||||
|
||||
impl From<CustomBlockExecutionCtx> for OpBlockExecutionCtx {
|
||||
fn from(value: CustomBlockExecutionCtx) -> Self {
|
||||
value.inner
|
||||
}
|
||||
}
|
||||
@@ -1,13 +0,0 @@
|
||||
mod alloy;
|
||||
mod assembler;
|
||||
mod builder;
|
||||
mod config;
|
||||
mod env;
|
||||
mod executor;
|
||||
|
||||
pub use alloy::{CustomContext, CustomEvm};
|
||||
pub use assembler::CustomBlockAssembler;
|
||||
pub use builder::CustomExecutorBuilder;
|
||||
pub use config::CustomEvmConfig;
|
||||
pub use env::{CustomTxEnv, PaymentTxEnv};
|
||||
pub use executor::CustomBlockExecutor;
|
||||
@@ -1,86 +0,0 @@
|
||||
//! This example shows how to implement a custom node.
|
||||
//!
|
||||
//! A node consists of:
|
||||
//! - primitives: block,header,transactions
|
||||
//! - components: network,pool,evm
|
||||
//! - engine: advances the node
|
||||
|
||||
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
|
||||
|
||||
use crate::{
|
||||
engine::{CustomEngineValidatorBuilder, CustomPayloadTypes},
|
||||
engine_api::CustomEngineApiBuilder,
|
||||
evm::CustomExecutorBuilder,
|
||||
pool::CustomPooledTransaction,
|
||||
primitives::CustomTransaction,
|
||||
rpc::CustomRpcTypes,
|
||||
};
|
||||
use chainspec::CustomChainSpec;
|
||||
use primitives::CustomNodePrimitives;
|
||||
use reth_ethereum::node::api::{FullNodeTypes, NodeTypes};
|
||||
use reth_node_builder::{
|
||||
components::{BasicPayloadServiceBuilder, ComponentsBuilder},
|
||||
Node, NodeAdapter,
|
||||
};
|
||||
use reth_op::{
|
||||
node::{
|
||||
node::{OpConsensusBuilder, OpNetworkBuilder, OpPayloadBuilder, OpPoolBuilder},
|
||||
txpool, OpAddOns, OpNode,
|
||||
},
|
||||
rpc::OpEthApiBuilder,
|
||||
};
|
||||
|
||||
pub mod chainspec;
|
||||
pub mod engine;
|
||||
pub mod engine_api;
|
||||
pub mod evm;
|
||||
pub mod pool;
|
||||
pub mod primitives;
|
||||
pub mod rpc;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct CustomNode {
|
||||
inner: OpNode,
|
||||
}
|
||||
|
||||
impl NodeTypes for CustomNode {
|
||||
type Primitives = CustomNodePrimitives;
|
||||
type ChainSpec = CustomChainSpec;
|
||||
type Storage = <OpNode as NodeTypes>::Storage;
|
||||
type Payload = CustomPayloadTypes;
|
||||
}
|
||||
|
||||
impl<N> Node<N> for CustomNode
|
||||
where
|
||||
N: FullNodeTypes<Types = Self>,
|
||||
{
|
||||
type ComponentsBuilder = ComponentsBuilder<
|
||||
N,
|
||||
OpPoolBuilder<txpool::OpPooledTransaction<CustomTransaction, CustomPooledTransaction>>,
|
||||
BasicPayloadServiceBuilder<OpPayloadBuilder>,
|
||||
OpNetworkBuilder,
|
||||
CustomExecutorBuilder,
|
||||
OpConsensusBuilder,
|
||||
>;
|
||||
|
||||
type AddOns = OpAddOns<
|
||||
NodeAdapter<N>,
|
||||
OpEthApiBuilder<CustomRpcTypes>,
|
||||
CustomEngineValidatorBuilder,
|
||||
CustomEngineApiBuilder,
|
||||
>;
|
||||
|
||||
fn components_builder(&self) -> Self::ComponentsBuilder {
|
||||
ComponentsBuilder::default()
|
||||
.node_types::<N>()
|
||||
.pool(OpPoolBuilder::default())
|
||||
.executor(CustomExecutorBuilder::default())
|
||||
.payload(BasicPayloadServiceBuilder::new(OpPayloadBuilder::new(false)))
|
||||
.network(OpNetworkBuilder::new(false, false))
|
||||
.consensus(OpConsensusBuilder::default())
|
||||
}
|
||||
|
||||
fn add_ons(&self) -> Self::AddOns {
|
||||
self.inner.add_ons_builder().build()
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user