Merge branch 'main' into changes-to-enable-validation-api-extension

This commit is contained in:
Matthias Seitz
2023-09-30 13:55:03 +02:00
454 changed files with 5862 additions and 7715 deletions

View File

@@ -62,7 +62,7 @@ jobs:
bench-success:
if: always()
name: bench success
runs-on: ubuntu-20.04
runs-on: ubuntu-latest
steps:
# Note: This check is a dummy because we don't have any bench checks enabled.
- run: echo OK.
- run: echo OK.

View File

@@ -8,7 +8,7 @@ on:
jobs:
test:
runs-on: ubuntu-20.04
runs-on: ubuntu-latest
name: test
timeout-minutes: 60
@@ -31,7 +31,7 @@ jobs:
run: mdbook test
lint:
runs-on: ubuntu-20.04
runs-on: ubuntu-latest
name: lint
timeout-minutes: 60
@@ -50,7 +50,7 @@ jobs:
run: mdbook-linkcheck --standalone
build:
runs-on: ubuntu-20.04
runs-on: ubuntu-latest
timeout-minutes: 60
steps:
- uses: actions/checkout@v3
@@ -69,6 +69,8 @@ jobs:
echo $(pwd)/mdbook-template >> $GITHUB_PATH
- uses: Swatinem/rust-cache@v2
with:
cache-on-failure: true
- name: Build book
run: mdbook build
@@ -111,7 +113,7 @@ jobs:
deploy:
# Only deploy if a push to main
if: github.ref_name == 'main' && github.event_name == 'push'
runs-on: ubuntu-20.04
runs-on: ubuntu-latest
needs: [test, lint, build]
# Grant GITHUB_TOKEN the permissions required to make a Pages deployment

View File

@@ -17,6 +17,8 @@ jobs:
- uses: actions/checkout@v3
- uses: dtolnay/rust-toolchain@clippy
- uses: Swatinem/rust-cache@v2
with:
cache-on-failure: true
- run: cargo clippy --workspace --all-targets --all-features
env:
RUSTFLAGS: -D warnings
@@ -29,6 +31,8 @@ jobs:
- uses: actions/checkout@v3
- uses: dtolnay/rust-toolchain@nightly
- uses: Swatinem/rust-cache@v2
with:
cache-on-failure: true
- run: cargo docs --document-private-items
env:
# Keep in sync with ./book.yml:jobs.build

View File

@@ -18,7 +18,7 @@ concurrency: deny-${{ github.head_ref || github.run_id }}
jobs:
deny:
name: deny
runs-on: ubuntu-20.04
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: EmbarkStudios/cargo-deny-action@v1

View File

@@ -64,7 +64,7 @@ jobs:
fuzz-success:
if: always()
name: fuzz success
runs-on: ubuntu-20.04
runs-on: ubuntu-latest
needs: all
steps:
# Note: This check is a dummy because we currently have fuzz tests disabled.

View File

@@ -27,6 +27,7 @@ jobs:
with:
context: .
tags: paradigmxyz/reth:main
build-args: BUILD_PROFILE=hivetests
outputs: type=docker,dest=./artifacts/reth_image.tar
cache-from: type=gha
cache-to: type=gha,mode=max

View File

@@ -15,38 +15,42 @@ env:
jobs:
extract-version:
name: extract version
runs-on: ubuntu-20.04
runs-on: ubuntu-latest
steps:
- name: Extract version
run: echo "VERSION=$(echo ${GITHUB_REF#refs/tags/})" >> $GITHUB_OUTPUT
id: extract_version
outputs:
VERSION: ${{ steps.extract_version.outputs.VERSION }}
build:
name: build release
strategy:
matrix:
arch: [aarch64-unknown-linux-gnu,
x86_64-unknown-linux-gnu,
x86_64-apple-darwin,
aarch64-apple-darwin,
x86_64-pc-windows-gnu]
arch:
[
aarch64-unknown-linux-gnu,
x86_64-unknown-linux-gnu,
x86_64-apple-darwin,
aarch64-apple-darwin,
x86_64-pc-windows-gnu,
]
include:
- arch: aarch64-unknown-linux-gnu
platform: ubuntu-20.04
profile: maxperf
- arch: x86_64-unknown-linux-gnu
platform: ubuntu-20.04
profile: maxperf
- arch: x86_64-apple-darwin
platform: macos-latest
profile: maxperf
- arch: aarch64-apple-darwin
platform: macos-latest
profile: maxperf
- arch: x86_64-pc-windows-gnu
platform: ubuntu-20.04
profile: maxperf
- arch: aarch64-unknown-linux-gnu
platform: ubuntu-20.04
profile: maxperf
- arch: x86_64-unknown-linux-gnu
platform: ubuntu-20.04
profile: maxperf
- arch: x86_64-apple-darwin
platform: macos-latest
profile: maxperf
- arch: aarch64-apple-darwin
platform: macos-latest
profile: maxperf
- arch: x86_64-pc-windows-gnu
platform: ubuntu-20.04
profile: maxperf
runs-on: ${{ matrix.platform }}
needs: extract-version
@@ -76,18 +80,18 @@ jobs:
# ==============================
- name: Build reth for ${{ matrix.arch }}
run: |
run: |
cargo install cross
env PROFILE=${{ matrix.profile }} make build-${{ matrix.arch }}
- name: Move cross-compiled binary
if: matrix.arch != 'x86_64-pc-windows-gnu'
if: matrix.arch != 'x86_64-pc-windows-gnu'
run: |
mkdir artifacts
mv target/${{ matrix.arch }}/${{ matrix.profile }}/reth ./artifacts
- name: Move cross-compiled binary (Windows)
if: matrix.arch == 'x86_64-pc-windows-gnu'
if: matrix.arch == 'x86_64-pc-windows-gnu'
run: |
mkdir artifacts
mv target/${{ matrix.arch }}/${{ matrix.profile }}/reth.exe ./artifacts
@@ -113,8 +117,8 @@ jobs:
# Upload artifacts
# This is required to share artifacts between different jobs
# =======================================================================
- name: Upload artifact
uses: actions/upload-artifact@v3
- name: Upload artifact
uses: actions/upload-artifact@v3
with:
name: reth-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.arch }}.tar.gz
path: reth-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.arch }}.tar.gz
@@ -127,10 +131,10 @@ jobs:
draft-release:
name: draft release
needs: [build, extract-version]
runs-on: ubuntu-20.04
needs: [build, extract-version]
runs-on: ubuntu-latest
env:
VERSION: ${{ needs.extract-version.outputs.VERSION }}
VERSION: ${{ needs.extract-version.outputs.VERSION }}
permissions:
# Required to post the release
contents: write
@@ -162,54 +166,55 @@ jobs:
GITHUB_USER: ${{ github.repository_owner }}
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
# The formatting here is borrowed from Lighthouse (which is borrowed from OpenEthereum): https://github.com/openethereum/openethereum/blob/main/.github/workflows/build.yml
# The formatting here is borrowed from Lighthouse (which is borrowed from OpenEthereum):
# https://github.com/openethereum/openethereum/blob/main/.github/workflows/build.yml
run: |
body=$(cat <<- "ENDBODY"
<Release Name>
## Testing Checklist (DELETE ME)
- [ ] Run on testnet for 1-3 days.
- [ ] Resync a mainnet node.
- [ ] Ensure all CI checks pass.
## Release Checklist (DELETE ME)
- [ ] Ensure all crates have had their versions bumped.
- [ ] Write the summary.
- [ ] Fill out the update priority.
- [ ] Ensure all binaries have been added.
- [ ] Prepare release posts (Twitter, ...).
## Summary
Add a summary, including:
- Critical bug fixes
- New features
- Any breaking changes (and what to expect)
## Update Priority
This table provides priorities for which classes of users should update particular components.
| User Class | Priority |
|----------------------|-----------------|
| Payload Builders | <TODO> |
| Non-Payload Builders | <TODO> |
*See [Update Priorities](https://paradigmxyz.github.io/reth/installation/priorities.html) for more information about this table.*
## All Changes
${{ steps.changelog.outputs.CHANGELOG }}
## Binaries
[See pre-built binaries documentation.](https://paradigmxyz.github.io/reth/installation/binaries.html)
The binaries are signed with the PGP key: `A3AE 097C 8909 3A12 4049 DF1F 5391 A3C4 1005 30B4`
| System | Architecture | Binary | PGP Signature |
|:---:|:---:|:---:|:---|
| <img src="https://simpleicons.org/icons/linux.svg" style="width: 32px;"/> | x86_64 | [reth-${{ env.VERSION }}-x86_64-unknown-linux-gnu.tar.gz](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/reth-${{ env.VERSION }}-x86_64-unknown-linux-gnu.tar.gz) | [PGP Signature](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/reth-${{ env.VERSION }}-x86_64-unknown-linux-gnu.tar.gz.asc) |

View File

@@ -15,7 +15,7 @@ env:
name: sanity
jobs:
dep-version-constraints:
runs-on: ubuntu-20.04
runs-on: ubuntu-latest
name: dep version constraints test (partition ${{ matrix.partition }}/${{ strategy.job-total }})
strategy:
matrix:
@@ -64,7 +64,7 @@ jobs:
filename: .github/SANITY_DEPS_ISSUE_TEMPLATE.md
unused-deps:
runs-on: ubuntu-20.04
runs-on: ubuntu-latest
name: unused dependencies
steps:
- name: Checkout sources

View File

@@ -6,7 +6,7 @@ on:
jobs:
close-issues:
runs-on: ubuntu-20.04
runs-on: ubuntu-latest
permissions:
issues: write
pull-requests: write

View File

@@ -69,7 +69,7 @@ jobs:
repository: ethereum/tests
path: testing/ef-tests/ethereum-tests
submodules: recursive
depth: 1
fetch-depth: 1
- name: Install toolchain
uses: actions-rs/toolchain@v1
@@ -102,7 +102,7 @@ jobs:
unit-success:
if: always()
name: unit success
runs-on: ubuntu-20.04
runs-on: ubuntu-latest
needs: [test, eth-blockchain, doc-test]
timeout-minutes: 60
steps:

749
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -25,8 +25,6 @@ members = [
"crates/revm",
"crates/revm/revm-primitives",
"crates/revm/revm-inspectors",
"crates/rlp",
"crates/rlp/rlp-derive",
"crates/rpc/ipc",
"crates/rpc/rpc",
"crates/rpc/rpc-api",
@@ -34,6 +32,7 @@ members = [
"crates/rpc/rpc-engine-api",
"crates/rpc/rpc-types",
"crates/rpc/rpc-testing-util",
"crates/snapshot",
"crates/stages",
"crates/storage/codecs",
"crates/storage/db",
@@ -72,6 +71,12 @@ exclude = [".github/"]
inherits = "release"
debug = true
# Meant for testing - all optimizations, but with debug assertions and overflow
# checks
[profile.hivetests]
inherits = "test"
opt-level = 3
[profile.maxperf]
inherits = "release"
lto = "fat"
@@ -85,7 +90,6 @@ reth-primitives = { path = "./crates/primitives" }
reth-interfaces = { path = "./crates/interfaces" }
reth-provider = { path = "./crates/storage/provider" }
reth-db = { path = "./crates/storage/db" }
reth-rlp = { path = "./crates/rlp" }
reth-rpc-types = { path = "./crates/rpc/rpc-types" }
reth-rpc-builder = { path = "./crates/rpc/rpc-builder" }
reth-blockchain-tree = { path = "./crates/blockchain-tree" }
@@ -103,15 +107,19 @@ reth-eth-wire = { path = "./crates/net/eth-wire" }
reth-ecies = { path = "./crates/net/ecies" }
# revm
revm = { git = "https://github.com/bluealloy/revm", rev = "516f62cc" }
revm-primitives = { git = "https://github.com/bluealloy/revm", rev = "516f62cc" }
# TODO: Switch back to bluealloy/revm once #724 lands
revm = { git = "https://github.com/Evalir/revm/", branch = "reintroduce-alloy-rebased" }
revm-primitives = { git = "https://github.com/Evalir/revm/", branch = "reintroduce-alloy-rebased" }
## eth
alloy-primitives = "0.4"
alloy-dyn-abi = "0.4"
alloy-sol-types = "0.4"
alloy-rlp = "0.3"
ethers-core = { version = "2.0", default-features = false }
ethers-providers = { version = "2.0", default-features = false }
ethers-signers = { version = "2.0", default-features = false }
ethers-middleware = { version = "2.0", default-features = false }
discv5 = { git = "https://github.com/sigp/discv5", rev = "d2e30e04ee62418b9e57278cee907c02b99d5bd1" }
igd = { git = "https://github.com/stevefan1999-personal/rust-igd", rev = "c2d1f83eb1612a462962453cb0703bc93258b173" }
@@ -133,7 +141,7 @@ strum = "0.25"
rayon = "1.7"
itertools = "0.11"
parking_lot = "0.12"
metrics = "0.21.1" # Needed for `metrics-macro` to resolve the crate using `::metrics` notation
metrics = "0.21.1" # Needed for `metrics-macro` to resolve the crate using `::metrics` notation
hex-literal = "0.4"
### proc-macros
@@ -164,7 +172,7 @@ secp256k1 = { version = "0.27.0", default-features = false, features = [
] }
enr = { version = "0.9", default-features = false, features = ["k256"] }
# for eip-4844
c-kzg = { git = "https://github.com/ethereum/c-kzg-4844", rev = "f5f6f863d475847876a2bd5ee252058d37c3a15d" }
c-kzg = "0.1.1"
## config
confy = "0.5"

View File

@@ -26,12 +26,16 @@ RUN cargo chef cook --profile $BUILD_PROFILE --recipe-path recipe.json
COPY . .
RUN cargo build --profile $BUILD_PROFILE --locked --bin reth
# ARG is not resolved in COPY so we have to hack around it by copying the
# binary to a temporary location
RUN cp /app/target/$BUILD_PROFILE/reth /app/reth
# Use Ubuntu as the release image
FROM ubuntu AS runtime
WORKDIR /app
# Copy reth over from the build stage
COPY --from=builder /app/target/release/reth /usr/local/bin
COPY --from=builder /app/reth /usr/local/bin
# Copy licenses
COPY LICENSE-* ./

View File

@@ -38,7 +38,6 @@ reth-rpc = { path = "../../crates/rpc/rpc" }
reth-rpc-types = { path = "../../crates/rpc/rpc-types" }
reth-rpc-types-compat = { path = "../../crates/rpc/rpc-types-compat" }
reth-rpc-api = { path = "../../crates/rpc/rpc-api" }
reth-rlp.workspace = true
reth-network = { path = "../../crates/net/network", features = ["serde"] }
reth-network-api.workspace = true
reth-downloaders = { path = "../../crates/net/downloaders", features = ["test-utils"] }
@@ -49,9 +48,11 @@ reth-payload-builder.workspace = true
reth-basic-payload-builder = { path = "../../crates/payload/basic" }
reth-discv4 = { path = "../../crates/net/discv4" }
reth-prune = { path = "../../crates/prune" }
reth-snapshot = { path = "../../crates/snapshot" }
reth-trie = { path = "../../crates/trie" }
# crypto
alloy-rlp.workspace = true
secp256k1 = { workspace = true, features = ["global-context", "rand-std", "recovery"] }
# tracing
@@ -96,7 +97,6 @@ eyre = "0.6.8"
clap = { version = "4", features = ["derive"] }
tempfile = { version = "3.3.0" }
backon = "0.4"
hex = "0.4"
thiserror.workspace = true
pretty_assertions = "1.3.0"
humantime = "2.1.0"

View File

@@ -1,7 +1,7 @@
//! clap [Args](clap::Args) for debugging purposes
use clap::Args;
use reth_primitives::{TxHash, H256};
use reth_primitives::{TxHash, B256};
/// Parameters for debugging purposes
#[derive(Debug, Args, PartialEq, Default)]
@@ -21,7 +21,7 @@ pub struct DebugArgs {
///
/// NOTE: This is a temporary flag
#[arg(long = "debug.tip", help_heading = "Debug", conflicts_with = "continuous")]
pub tip: Option<H256>,
pub tip: Option<B256>,
/// Runs the sync only up to the specified block.
#[arg(long = "debug.max-block", help_heading = "Debug")]

View File

@@ -3,11 +3,12 @@
use crate::version::P2P_CLIENT_VERSION;
use clap::Args;
use reth_config::Config;
use reth_discv4::{DEFAULT_DISCOVERY_ADDR, DEFAULT_DISCOVERY_PORT};
use reth_net_nat::NatResolver;
use reth_network::{HelloMessage, NetworkConfigBuilder};
use reth_primitives::{mainnet_nodes, ChainSpec, NodeRecord};
use secp256k1::SecretKey;
use std::{path::PathBuf, sync::Arc};
use std::{net::Ipv4Addr, path::PathBuf, sync::Arc};
/// Parameters for configuring the network more granularity via CLI
#[derive(Debug, Args)]
@@ -57,9 +58,13 @@ pub struct NetworkArgs {
#[arg(long, default_value = "any")]
pub nat: NatResolver,
/// Network listening port. default: 30303
#[arg(long = "port", value_name = "PORT")]
pub port: Option<u16>,
/// Network listening address
#[arg(long = "addr", value_name = "ADDR", default_value_t = DEFAULT_DISCOVERY_ADDR)]
pub addr: Ipv4Addr,
/// Network listening port
#[arg(long = "port", value_name = "PORT", default_value_t = DEFAULT_DISCOVERY_PORT)]
pub port: u16,
/// Maximum number of outbound requests. default: 100
#[arg(long)]
@@ -133,9 +138,13 @@ pub struct DiscoveryArgs {
#[arg(long, conflicts_with = "disable_discovery")]
pub disable_discv4_discovery: bool,
/// The UDP port to use for P2P discovery/networking. default: 30303
#[arg(long = "discovery.port", name = "discovery.port", value_name = "DISCOVERY_PORT")]
pub port: Option<u16>,
/// The UDP address to use for P2P discovery/networking
#[arg(long = "discovery.addr", name = "discovery.addr", value_name = "DISCOVERY_ADDR", default_value_t = DEFAULT_DISCOVERY_ADDR)]
pub addr: Ipv4Addr,
/// The UDP port to use for P2P discovery/networking
#[arg(long = "discovery.port", name = "discovery.port", value_name = "DISCOVERY_PORT", default_value_t = DEFAULT_DISCOVERY_PORT)]
pub port: u16,
}
impl DiscoveryArgs {

View File

@@ -11,8 +11,8 @@ use std::sync::Arc;
#[derive(Debug, Args, PartialEq, Default)]
#[command(next_help_heading = "Pruning")]
pub struct PruningArgs {
/// Run full node. Only the most recent 128 block states are stored. This flag takes
/// priority over pruning configuration in reth.toml.
/// Run full node. Only the most recent [`MINIMUM_PRUNING_DISTANCE`] block states are stored.
/// This flag takes priority over pruning configuration in reth.toml.
#[arg(long, default_value_t = false)]
pub full: bool,
}

View File

@@ -24,6 +24,7 @@ use reth_rpc::{
},
JwtError, JwtSecret,
};
use reth_rpc_builder::{
auth::{AuthServerConfig, AuthServerHandle},
constants,
@@ -50,7 +51,7 @@ pub(crate) const RPC_DEFAULT_MAX_REQUEST_SIZE_MB: u32 = 15;
/// This is only relevant for very large trace responses.
pub(crate) const RPC_DEFAULT_MAX_RESPONSE_SIZE_MB: u32 = 115;
/// Default number of incoming connections.
pub(crate) const RPC_DEFAULT_MAX_CONNECTIONS: u32 = 100;
pub(crate) const RPC_DEFAULT_MAX_CONNECTIONS: u32 = 500;
/// Parameters for configuring the rpc more granularity via CLI
#[derive(Debug, Args)]

View File

@@ -1,6 +1,5 @@
use hex::encode as hex_encode;
use reth_network::config::rng_secret_key;
use reth_primitives::{fs, fs::FsPathError};
use reth_primitives::{fs, fs::FsPathError, hex::encode as hex_encode};
use secp256k1::{Error as SecretKeyBaseError, SecretKey};
use std::{
io,

View File

@@ -1,9 +1,9 @@
//! Clap parser utilities
use reth_primitives::{
fs, AllGenesisFormats, BlockHashOrNumber, ChainSpec, DEV, GOERLI, HOLESKY, MAINNET, SEPOLIA,
fs, AllGenesisFormats, BlockHashOrNumber, ChainSpec, B256, DEV, GOERLI, HOLESKY, MAINNET,
SEPOLIA,
};
use reth_revm::primitives::B256 as H256;
use std::{
net::{IpAddr, Ipv4Addr, SocketAddr, ToSocketAddrs},
path::PathBuf,
@@ -53,7 +53,7 @@ pub fn genesis_value_parser(s: &str) -> eyre::Result<Arc<ChainSpec>, eyre::Error
/// Parse [BlockHashOrNumber]
pub fn hash_or_num_value_parser(value: &str) -> eyre::Result<BlockHashOrNumber, eyre::Error> {
match H256::from_str(value) {
match B256::from_str(value) {
Ok(hash) => Ok(BlockHashOrNumber::Hash(hash)),
Err(_) => Ok(BlockHashOrNumber::Number(value.parse()?)),
}

View File

@@ -18,7 +18,7 @@ use reth_downloaders::{
headers::reverse_headers::ReverseHeadersDownloaderBuilder, test_utils::FileClient,
};
use reth_interfaces::consensus::Consensus;
use reth_primitives::{stage::StageId, ChainSpec, H256};
use reth_primitives::{stage::StageId, ChainSpec, B256};
use reth_stages::{
prelude::*,
stages::{
@@ -55,6 +55,7 @@ pub struct ImportCommand {
/// - mainnet
/// - goerli
/// - sepolia
/// - holesky
#[arg(
long,
value_name = "CHAIN_OR_PATH",
@@ -156,7 +157,7 @@ impl ImportCommand {
.build(file_client.clone(), consensus.clone(), db.clone())
.into_task();
let (tip_tx, tip_rx) = watch::channel(H256::zero());
let (tip_tx, tip_rx) = watch::channel(B256::ZERO);
let factory = reth_revm::Factory::new(self.chain.clone());
let max_block = file_client.max_block().unwrap_or(0);

View File

@@ -30,6 +30,7 @@ pub struct InitCommand {
/// - mainnet
/// - goerli
/// - sepolia
/// - holesky
#[arg(
long,
value_name = "CHAIN_OR_PATH",

View File

@@ -1,7 +1,7 @@
//! Config traits for various node components.
use reth_revm::primitives::bytes::BytesMut;
use reth_rlp::Encodable;
use alloy_rlp::Encodable;
use reth_primitives::{Bytes, BytesMut};
use reth_rpc::{eth::gas_oracle::GasPriceOracleConfig, JwtError, JwtSecret};
use reth_rpc_builder::{
auth::AuthServerConfig, error::RpcError, EthConfig, IpcServerBuilder, RpcServerConfig,
@@ -72,10 +72,10 @@ pub trait PayloadBuilderConfig {
fn extradata(&self) -> Cow<'_, str>;
/// Returns the rlp-encoded extradata bytes.
fn extradata_rlp_bytes(&self) -> reth_primitives::bytes::Bytes {
fn extradata_rlp_bytes(&self) -> Bytes {
let mut extradata = BytesMut::new();
self.extradata().as_bytes().encode(&mut extradata);
extradata.freeze()
extradata.freeze().into()
}
/// The interval at which the job should build a new payload after the last.

View File

@@ -40,6 +40,7 @@ pub struct Cli<Ext: RethCliExt = ()> {
/// - mainnet
/// - goerli
/// - sepolia
/// - holesky
#[arg(
long,
value_name = "CHAIN_OR_PATH",

View File

@@ -81,7 +81,7 @@ mod tests {
models::{storage_sharded_key::StorageShardedKey, ShardedKey},
AccountHistory, HashedAccount, Headers, StorageHistory, SyncStage,
};
use reth_primitives::{H160, H256};
use reth_primitives::{Address, B256};
use std::str::FromStr;
/// A helper type to parse Args more easily
@@ -104,7 +104,7 @@ mod tests {
.args;
assert_eq!(
args.table_key::<HashedAccount>().unwrap(),
H256::from_str("0x0ac361fe774b78f8fc4e86c1916930d150865c3fc2e21dca2e58833557608bac")
B256::from_str("0x0ac361fe774b78f8fc4e86c1916930d150865c3fc2e21dca2e58833557608bac")
.unwrap()
);
}
@@ -122,8 +122,8 @@ mod tests {
assert_eq!(
args.table_key::<StorageHistory>().unwrap(),
StorageShardedKey::new(
H160::from_str("0x01957911244e546ce519fbac6f798958fafadb41").unwrap(),
H256::from_str(
Address::from_str("0x01957911244e546ce519fbac6f798958fafadb41").unwrap(),
B256::from_str(
"0x0000000000000000000000000000000000000000000000000000000000000003"
)
.unwrap(),
@@ -138,7 +138,7 @@ mod tests {
assert_eq!(
args.table_key::<AccountHistory>().unwrap(),
ShardedKey::new(
H160::from_str("0x4448e1273fd5a8bfdb9ed111e96889c960eee145").unwrap(),
Address::from_str("0x4448e1273fd5a8bfdb9ed111e96889c960eee145").unwrap(),
18446744073709551615
)
);

View File

@@ -3,6 +3,7 @@ use crate::utils::{DbTool, ListFilter};
use clap::Parser;
use eyre::WrapErr;
use reth_db::{database::Database, table::Table, DatabaseEnvRO, TableType, TableViewer, Tables};
use reth_primitives::hex;
use std::cell::RefCell;
use tracing::error;

View File

@@ -48,6 +48,7 @@ pub struct Command {
/// - mainnet
/// - goerli
/// - sepolia
/// - holesky
#[arg(
long,
value_name = "CHAIN_OR_PATH",

View File

@@ -12,7 +12,6 @@ use futures::{stream::select as stream_select, StreamExt};
use reth_beacon_consensus::BeaconConsensus;
use reth_config::Config;
use reth_db::{database::Database, init_db, DatabaseEnv};
use reth_discv4::DEFAULT_DISCOVERY_PORT;
use reth_downloaders::{
bodies::bodies::BodiesDownloaderBuilder,
headers::reverse_headers::ReverseHeadersDownloaderBuilder,
@@ -23,7 +22,7 @@ use reth_interfaces::{
};
use reth_network::NetworkHandle;
use reth_network_api::NetworkInfo;
use reth_primitives::{fs, stage::StageId, BlockHashOrNumber, BlockNumber, ChainSpec, H256};
use reth_primitives::{fs, stage::StageId, BlockHashOrNumber, BlockNumber, ChainSpec, B256};
use reth_provider::{BlockExecutionWriter, ProviderFactory, StageCheckpointReader};
use reth_stages::{
sets::DefaultStages,
@@ -35,7 +34,7 @@ use reth_stages::{
};
use reth_tasks::TaskExecutor;
use std::{
net::{Ipv4Addr, SocketAddr, SocketAddrV4},
net::{SocketAddr, SocketAddrV4},
path::PathBuf,
sync::Arc,
};
@@ -63,6 +62,7 @@ pub struct Command {
/// - mainnet
/// - goerli
/// - sepolia
/// - holesky
#[arg(
long,
value_name = "CHAIN_OR_PATH",
@@ -112,7 +112,7 @@ impl Command {
let stage_conf = &config.stages;
let (tip_tx, tip_rx) = watch::channel(H256::zero());
let (tip_tx, tip_rx) = watch::channel(B256::ZERO);
let factory = reth_revm::Factory::new(self.chain.clone());
let header_mode = HeaderSyncMode::Tip(tip_rx);
@@ -166,13 +166,10 @@ impl Command {
.network
.network_config(config, self.chain.clone(), secret_key, default_peers_path)
.with_task_executor(Box::new(task_executor))
.listener_addr(SocketAddr::V4(SocketAddrV4::new(
Ipv4Addr::UNSPECIFIED,
self.network.port.unwrap_or(DEFAULT_DISCOVERY_PORT),
)))
.listener_addr(SocketAddr::V4(SocketAddrV4::new(self.network.addr, self.network.port)))
.discovery_addr(SocketAddr::V4(SocketAddrV4::new(
Ipv4Addr::UNSPECIFIED,
self.network.discovery.port.unwrap_or(DEFAULT_DISCOVERY_PORT),
self.network.discovery.addr,
self.network.discovery.port,
)))
.build(ProviderFactory::new(db, self.chain.clone()))
.start_network()
@@ -186,7 +183,7 @@ impl Command {
&self,
client: Client,
block: BlockNumber,
) -> eyre::Result<H256> {
) -> eyre::Result<B256> {
info!(target: "reth::cli", ?block, "Fetching block from the network.");
loop {
match get_single_header(&client, BlockHashOrNumber::Number(block)).await {

View File

@@ -9,7 +9,6 @@ use backon::{ConstantBuilder, Retryable};
use clap::Parser;
use reth_config::Config;
use reth_db::{init_db, DatabaseEnv};
use reth_discv4::DEFAULT_DISCOVERY_PORT;
use reth_network::NetworkHandle;
use reth_network_api::NetworkInfo;
use reth_primitives::{fs, stage::StageId, BlockHashOrNumber, ChainSpec};
@@ -21,7 +20,7 @@ use reth_provider::{
use reth_tasks::TaskExecutor;
use reth_trie::{hashed_cursor::HashedPostStateCursorFactory, updates::TrieKey, StateRoot};
use std::{
net::{Ipv4Addr, SocketAddr, SocketAddrV4},
net::{SocketAddr, SocketAddrV4},
path::PathBuf,
sync::Arc,
};
@@ -51,6 +50,7 @@ pub struct Command {
/// - mainnet
/// - goerli
/// - sepolia
/// - holesky
#[arg(
long,
value_name = "CHAIN_OR_PATH",
@@ -89,13 +89,10 @@ impl Command {
.network
.network_config(config, self.chain.clone(), secret_key, default_peers_path)
.with_task_executor(Box::new(task_executor))
.listener_addr(SocketAddr::V4(SocketAddrV4::new(
Ipv4Addr::UNSPECIFIED,
self.network.port.unwrap_or(DEFAULT_DISCOVERY_PORT),
)))
.listener_addr(SocketAddr::V4(SocketAddrV4::new(self.network.addr, self.network.port)))
.discovery_addr(SocketAddr::V4(SocketAddrV4::new(
Ipv4Addr::UNSPECIFIED,
self.network.discovery.port.unwrap_or(DEFAULT_DISCOVERY_PORT),
self.network.discovery.addr,
self.network.discovery.port,
)))
.build(ProviderFactory::new(db, self.chain.clone()))
.start_network()

View File

@@ -10,7 +10,6 @@ use clap::Parser;
use reth_beacon_consensus::BeaconConsensus;
use reth_config::Config;
use reth_db::{cursor::DbCursorRO, init_db, tables, transaction::DbTx, DatabaseEnv};
use reth_discv4::DEFAULT_DISCOVERY_PORT;
use reth_interfaces::{consensus::Consensus, p2p::full_block::FullBlockClient};
use reth_network::NetworkHandle;
use reth_network_api::NetworkInfo;
@@ -29,7 +28,7 @@ use reth_stages::{
};
use reth_tasks::TaskExecutor;
use std::{
net::{Ipv4Addr, SocketAddr, SocketAddrV4},
net::{SocketAddr, SocketAddrV4},
path::PathBuf,
sync::Arc,
};
@@ -56,6 +55,7 @@ pub struct Command {
/// - mainnet
/// - goerli
/// - sepolia
/// - holesky
#[arg(
long,
value_name = "CHAIN_OR_PATH",
@@ -98,13 +98,10 @@ impl Command {
.network
.network_config(config, self.chain.clone(), secret_key, default_peers_path)
.with_task_executor(Box::new(task_executor))
.listener_addr(SocketAddr::V4(SocketAddrV4::new(
Ipv4Addr::UNSPECIFIED,
self.network.port.unwrap_or(DEFAULT_DISCOVERY_PORT),
)))
.listener_addr(SocketAddr::V4(SocketAddrV4::new(self.network.addr, self.network.port)))
.discovery_addr(SocketAddr::V4(SocketAddrV4::new(
Ipv4Addr::UNSPECIFIED,
self.network.discovery.port.unwrap_or(DEFAULT_DISCOVERY_PORT),
self.network.discovery.addr,
self.network.discovery.port,
)))
.build(ProviderFactory::new(db, self.chain.clone()))
.start_network()

View File

@@ -6,7 +6,9 @@ use reth_db::{
transaction::{DbTx, DbTxMut},
};
use reth_interfaces::{db::DatabaseError, RethError};
use reth_primitives::{stage::StageId, Account, Bytecode, ChainSpec, StorageEntry, H256, U256};
use reth_primitives::{
stage::StageId, Account, Bytecode, ChainSpec, Receipts, StorageEntry, B256, U256,
};
use reth_provider::{
bundle_state::{BundleStateInit, RevertsInit},
BundleStateWithReceipts, DatabaseProviderRW, HashingWriter, HistoryWriter, OriginalValuesKnown,
@@ -26,9 +28,9 @@ pub enum InitDatabaseError {
#[error("Genesis hash in the database does not match the specified chainspec: chainspec is {chainspec_hash}, database is {database_hash}")]
GenesisHashMismatch {
/// Expected genesis hash.
chainspec_hash: H256,
chainspec_hash: B256,
/// Actual genesis hash.
database_hash: H256,
database_hash: B256,
},
/// Low-level database error.
@@ -45,7 +47,7 @@ pub enum InitDatabaseError {
pub fn init_genesis<DB: Database>(
db: Arc<DB>,
chain: Arc<ChainSpec>,
) -> Result<H256, InitDatabaseError> {
) -> Result<B256, InitDatabaseError> {
let genesis = chain.genesis();
let hash = chain.genesis_hash();
@@ -95,11 +97,11 @@ pub fn insert_genesis_state<DB: Database>(
) -> Result<(), InitDatabaseError> {
let mut state_init: BundleStateInit = HashMap::new();
let mut reverts_init = HashMap::new();
let mut contracts: HashMap<H256, Bytecode> = HashMap::new();
let mut contracts: HashMap<B256, Bytecode> = HashMap::new();
for (address, account) in &genesis.alloc {
let bytecode_hash = if let Some(code) = &account.code {
let bytecode = Bytecode::new_raw(code.0.clone());
let bytecode = Bytecode::new_raw(code.clone());
let hash = bytecode.hash_slow();
contracts.insert(hash, bytecode);
Some(hash)
@@ -145,7 +147,7 @@ pub fn insert_genesis_state<DB: Database>(
state_init,
all_reverts_init,
contracts.into_iter().collect(),
vec![],
Receipts::new(),
0,
);
@@ -287,9 +289,9 @@ mod tests {
#[test]
fn init_genesis_history() {
let address_with_balance = Address::from_low_u64_be(1);
let address_with_storage = Address::from_low_u64_be(2);
let storage_key = H256::from_low_u64_be(1);
let address_with_balance = Address::with_last_byte(1);
let address_with_storage = Address::with_last_byte(2);
let storage_key = B256::with_last_byte(1);
let chain_spec = Arc::new(ChainSpec {
chain: Chain::Id(1),
genesis: Genesis {
@@ -301,7 +303,7 @@ mod tests {
(
address_with_storage,
GenesisAccount {
storage: Some(HashMap::from([(storage_key, H256::random())])),
storage: Some(HashMap::from([(storage_key, B256::random())])),
..Default::default()
},
),

View File

@@ -19,7 +19,7 @@
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/paradigmxzy/reth/issues/"
issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/"
)]
#![warn(missing_docs, unreachable_pub, rustdoc::all)]
#![deny(unused_must_use, rust_2018_idioms)]

View File

@@ -34,18 +34,18 @@ struct NodeState {
eta: Eta,
/// The current checkpoint of the executing stage.
current_checkpoint: StageCheckpoint,
/// The latest canonical block added in the consensus engine.
latest_canonical_engine_block: Option<BlockNumber>,
/// The latest block reached by either pipeline or consensus engine.
latest_block: Option<BlockNumber>,
}
impl NodeState {
fn new(network: Option<NetworkHandle>, latest_block_number: Option<BlockNumber>) -> Self {
fn new(network: Option<NetworkHandle>, latest_block: Option<BlockNumber>) -> Self {
Self {
network,
current_stage: None,
eta: Eta::default(),
current_checkpoint: StageCheckpoint::new(0),
latest_canonical_engine_block: latest_block_number,
latest_block,
}
}
@@ -79,6 +79,9 @@ impl NodeState {
result: ExecOutput { checkpoint, done },
} => {
self.current_checkpoint = checkpoint;
if stage_id.is_finish() {
self.latest_block = Some(checkpoint.block_number);
}
self.eta.update(self.current_checkpoint);
info!(
@@ -124,11 +127,11 @@ impl NodeState {
);
}
BeaconConsensusEngineEvent::CanonicalBlockAdded(block) => {
self.latest_canonical_engine_block = Some(block.number);
info!(number=block.number, hash=?block.hash, "Block added to canonical chain");
}
BeaconConsensusEngineEvent::CanonicalChainCommitted(head, elapsed) => {
self.latest_block = Some(head.number);
info!(number=head.number, hash=?head.hash, ?elapsed, "Canonical chain committed");
}
BeaconConsensusEngineEvent::ForkBlockAdded(block) => {
@@ -138,18 +141,22 @@ impl NodeState {
}
fn handle_consensus_layer_health_event(&self, event: ConsensusLayerHealthEvent) {
match event {
ConsensusLayerHealthEvent::NeverSeen => {
warn!("Post-merge network, but never seen beacon client. Please launch one to follow the chain!")
}
ConsensusLayerHealthEvent::HasNotBeenSeenForAWhile(period) => {
warn!(?period, "Post-merge network, but no beacon client seen for a while. Please launch one to follow the chain!")
}
ConsensusLayerHealthEvent::NeverReceivedUpdates => {
warn!("Beacon client online, but never received consensus updates. Please ensure your beacon client is operational to follow the chain!")
}
ConsensusLayerHealthEvent::HaveNotReceivedUpdatesForAWhile(period) => {
warn!(?period, "Beacon client online, but no consensus updates received for a while. Please fix your beacon client to follow the chain!")
// If pipeline is running, it's fine to not receive any messages from the CL.
// So we need to report about CL health only when pipeline is idle.
if self.current_stage.is_none() {
match event {
ConsensusLayerHealthEvent::NeverSeen => {
warn!("Post-merge network, but never seen beacon client. Please launch one to follow the chain!")
}
ConsensusLayerHealthEvent::HasNotBeenSeenForAWhile(period) => {
warn!(?period, "Post-merge network, but no beacon client seen for a while. Please launch one to follow the chain!")
}
ConsensusLayerHealthEvent::NeverReceivedUpdates => {
warn!("Beacon client online, but never received consensus updates. Please ensure your beacon client is operational to follow the chain!")
}
ConsensusLayerHealthEvent::HaveNotReceivedUpdatesForAWhile(period) => {
warn!(?period, "Beacon client online, but no consensus updates received for a while. Please fix your beacon client to follow the chain!")
}
}
}
}
@@ -266,7 +273,7 @@ where
info!(
target: "reth::cli",
connected_peers = this.state.num_connected_peers(),
latest_block = this.state.latest_canonical_engine_block.unwrap_or(this.state.current_checkpoint.block_number),
latest_block = this.state.latest_block.unwrap_or(this.state.current_checkpoint.block_number),
"Status"
);
}

View File

@@ -34,7 +34,6 @@ use reth_blockchain_tree::{
};
use reth_config::{config::PruneConfig, Config};
use reth_db::{database::Database, init_db, DatabaseEnv};
use reth_discv4::DEFAULT_DISCOVERY_PORT;
use reth_downloaders::{
bodies::bodies::BodiesDownloaderBuilder,
headers::reverse_headers::ReverseHeadersDownloaderBuilder,
@@ -54,7 +53,7 @@ use reth_primitives::{
constants::eip4844::{LoadKzgSettingsError, MAINNET_KZG_TRUSTED_SETUP},
kzg::KzgSettings,
stage::StageId,
BlockHashOrNumber, BlockNumber, ChainSpec, DisplayHardforks, Head, SealedHeader, H256,
BlockHashOrNumber, BlockNumber, ChainSpec, DisplayHardforks, Head, SealedHeader, B256,
};
use reth_provider::{
providers::BlockchainProvider, BlockHashReader, BlockReader, CanonStateSubscriptions,
@@ -78,7 +77,7 @@ use reth_transaction_pool::{
};
use secp256k1::SecretKey;
use std::{
net::{Ipv4Addr, SocketAddr, SocketAddrV4},
net::{SocketAddr, SocketAddrV4},
path::PathBuf,
sync::Arc,
};
@@ -113,6 +112,7 @@ pub struct NodeCommand<Ext: RethCliExt = ()> {
/// - mainnet
/// - goerli
/// - sepolia
/// - holesky
/// - dev
#[arg(
long,
@@ -450,6 +450,8 @@ impl<Ext: RethCliExt> NodeCommand<Ext> {
None
};
let (highest_snapshots_tx, highest_snapshots_rx) = watch::channel(None);
let mut hooks = EngineHooks::new();
let pruner_events = if let Some(prune_config) = prune_config {
@@ -460,6 +462,7 @@ impl<Ext: RethCliExt> NodeCommand<Ext> {
prune_config.block_interval,
prune_config.parts,
self.chain.prune_batch_sizes,
highest_snapshots_rx,
);
let events = pruner.events();
hooks.add(PruneHook::new(pruner, Box::new(ctx.task_executor.clone())));
@@ -468,6 +471,13 @@ impl<Ext: RethCliExt> NodeCommand<Ext> {
Either::Right(stream::empty())
};
let _snapshotter = reth_snapshot::Snapshotter::new(
db,
self.chain.clone(),
self.chain.snapshot_block_interval,
highest_snapshots_tx,
);
// Configure the consensus engine
let (beacon_consensus_engine, beacon_engine_handle) = BeaconConsensusEngine::with_channel(
client,
@@ -705,7 +715,7 @@ impl<Ext: RethCliExt> NodeCommand<Ext> {
&self,
db: DB,
client: Client,
tip: H256,
tip: B256,
) -> RethResult<u64>
where
DB: Database,
@@ -766,20 +776,14 @@ impl<Ext: RethCliExt> NodeCommand<Ext> {
.with_task_executor(Box::new(executor))
.set_head(head)
.listener_addr(SocketAddr::V4(SocketAddrV4::new(
Ipv4Addr::UNSPECIFIED,
self.network.addr,
// set discovery port based on instance number
match self.network.port {
Some(port) => port + self.instance - 1,
None => DEFAULT_DISCOVERY_PORT + self.instance - 1,
},
self.network.port + self.instance - 1,
)))
.discovery_addr(SocketAddr::V4(SocketAddrV4::new(
Ipv4Addr::UNSPECIFIED,
self.network.addr,
// set discovery port based on instance number
match self.network.port {
Some(port) => port + self.instance - 1,
None => DEFAULT_DISCOVERY_PORT + self.instance - 1,
},
self.network.port + self.instance - 1,
)))
.build(ProviderFactory::new(db, self.chain.clone()))
}
@@ -811,7 +815,7 @@ impl<Ext: RethCliExt> NodeCommand<Ext> {
builder = builder.with_max_block(max_block)
}
let (tip_tx, tip_rx) = watch::channel(H256::zero());
let (tip_tx, tip_rx) = watch::channel(B256::ZERO);
use reth_revm_inspectors::stack::InspectorStackConfig;
let factory = reth_revm::Factory::new(self.chain.clone());
@@ -943,8 +947,12 @@ async fn run_network_until_shutdown<C>(
#[cfg(test)]
mod tests {
use super::*;
use reth_discv4::DEFAULT_DISCOVERY_PORT;
use reth_primitives::DEV;
use std::{net::IpAddr, path::Path};
use std::{
net::{IpAddr, Ipv4Addr},
path::Path,
};
#[test]
fn parse_help_node_command() {
@@ -960,10 +968,31 @@ mod tests {
}
}
#[test]
fn parse_discovery_addr() {
let cmd =
NodeCommand::<()>::try_parse_from(["reth", "--discovery.addr", "127.0.0.1"]).unwrap();
assert_eq!(cmd.network.discovery.addr, Ipv4Addr::LOCALHOST);
}
#[test]
fn parse_addr() {
let cmd = NodeCommand::<()>::try_parse_from([
"reth",
"--discovery.addr",
"127.0.0.1",
"--addr",
"127.0.0.1",
])
.unwrap();
assert_eq!(cmd.network.discovery.addr, Ipv4Addr::LOCALHOST);
assert_eq!(cmd.network.addr, Ipv4Addr::LOCALHOST);
}
#[test]
fn parse_discovery_port() {
let cmd = NodeCommand::<()>::try_parse_from(["reth", "--discovery.port", "300"]).unwrap();
assert_eq!(cmd.network.discovery.port, Some(300));
assert_eq!(cmd.network.discovery.port, 300);
}
#[test]
@@ -971,8 +1000,8 @@ mod tests {
let cmd =
NodeCommand::<()>::try_parse_from(["reth", "--discovery.port", "300", "--port", "99"])
.unwrap();
assert_eq!(cmd.network.discovery.port, Some(300));
assert_eq!(cmd.network.port, Some(99));
assert_eq!(cmd.network.discovery.port, 300);
assert_eq!(cmd.network.port, 99);
}
#[test]
@@ -1041,32 +1070,32 @@ mod tests {
fn parse_instance() {
let mut cmd = NodeCommand::<()>::parse_from(["reth"]);
cmd.adjust_instance_ports();
cmd.network.port = Some(DEFAULT_DISCOVERY_PORT + cmd.instance - 1);
cmd.network.port = DEFAULT_DISCOVERY_PORT + cmd.instance - 1;
// check rpc port numbers
assert_eq!(cmd.rpc.auth_port, 8551);
assert_eq!(cmd.rpc.http_port, 8545);
assert_eq!(cmd.rpc.ws_port, 8546);
// check network listening port number
assert_eq!(cmd.network.port.unwrap(), 30303);
assert_eq!(cmd.network.port, 30303);
let mut cmd = NodeCommand::<()>::parse_from(["reth", "--instance", "2"]);
cmd.adjust_instance_ports();
cmd.network.port = Some(DEFAULT_DISCOVERY_PORT + cmd.instance - 1);
cmd.network.port = DEFAULT_DISCOVERY_PORT + cmd.instance - 1;
// check rpc port numbers
assert_eq!(cmd.rpc.auth_port, 8651);
assert_eq!(cmd.rpc.http_port, 8544);
assert_eq!(cmd.rpc.ws_port, 8548);
// check network listening port number
assert_eq!(cmd.network.port.unwrap(), 30304);
assert_eq!(cmd.network.port, 30304);
let mut cmd = NodeCommand::<()>::parse_from(["reth", "--instance", "3"]);
cmd.adjust_instance_ports();
cmd.network.port = Some(DEFAULT_DISCOVERY_PORT + cmd.instance - 1);
cmd.network.port = DEFAULT_DISCOVERY_PORT + cmd.instance - 1;
// check rpc port numbers
assert_eq!(cmd.rpc.auth_port, 8751);
assert_eq!(cmd.rpc.http_port, 8543);
assert_eq!(cmd.rpc.ws_port, 8550);
// check network listening port number
assert_eq!(cmd.network.port.unwrap(), 30305);
assert_eq!(cmd.network.port, 30305);
}
}

View File

@@ -33,6 +33,7 @@ pub struct Command {
/// - mainnet
/// - goerli
/// - sepolia
/// - holesky
#[arg(
long,
value_name = "CHAIN_OR_PATH",

View File

@@ -37,6 +37,7 @@ pub struct Command {
/// - mainnet
/// - goerli
/// - sepolia
/// - holesky
#[arg(
long,
value_name = "CHAIN_OR_PATH",

View File

@@ -32,6 +32,7 @@ pub struct Command {
/// - mainnet
/// - goerli
/// - sepolia
/// - holesky
#[arg(
long,
value_name = "CHAIN_OR_PATH",

View File

@@ -46,6 +46,7 @@ pub struct Command {
/// - mainnet
/// - goerli
/// - sepolia
/// - holesky
#[arg(
long,
value_name = "CHAIN_OR_PATH",

View File

@@ -50,6 +50,7 @@ pub struct Command {
/// - mainnet
/// - goerli
/// - sepolia
/// - holesky
#[arg(
long,
value_name = "CHAIN_OR_PATH",

View File

@@ -31,6 +31,7 @@ pub struct Command {
/// - mainnet
/// - goerli
/// - sepolia
/// - holesky
#[arg(
long,
value_name = "CHAIN_OR_PATH",

View File

@@ -6,7 +6,7 @@ use reth_consensus_common::validation::validate_block_standalone;
use reth_db::{
cursor::DbCursorRO,
database::Database,
table::{Table, TableRow},
table::{Decode, Decompress, Table, TableRow},
transaction::{DbTx, DbTxMut},
DatabaseError, RawTable, TableRawRow,
};
@@ -128,16 +128,22 @@ impl<'a, DB: Database> DbTool<'a, DB> {
let map_filter = |row: Result<TableRawRow<T>, _>| {
if let Ok((k, v)) = row {
let (key, value) = (k.into_key(), v.into_value());
let result = || {
if filter.only_count {
return None
}
Some((k.key().unwrap(), v.value().unwrap()))
Some((
<T as Table>::Key::decode(&key).unwrap(),
<T as Table>::Value::decompress(&value).unwrap(),
))
};
match &*bmb {
Some(searcher) => {
if searcher.find_first_in(v.raw_value()).is_some() ||
searcher.find_first_in(k.raw_key()).is_some()
if searcher.find_first_in(&value).is_some() ||
searcher.find_first_in(&key).is_some()
{
hits += 1;
return result()

View File

@@ -52,6 +52,7 @@ Options:
- mainnet
- goerli
- sepolia
- holesky
[default: mainnet]

View File

@@ -23,6 +23,7 @@ Options:
- mainnet
- goerli
- sepolia
- holesky
[default: mainnet]

View File

@@ -39,6 +39,7 @@ Options:
- mainnet
- goerli
- sepolia
- holesky
[default: mainnet]

View File

@@ -34,6 +34,7 @@ Options:
- mainnet
- goerli
- sepolia
- holesky
[default: mainnet]

View File

@@ -31,6 +31,7 @@ Options:
- mainnet
- goerli
- sepolia
- holesky
[default: mainnet]

View File

@@ -28,6 +28,7 @@ Options:
- mainnet
- goerli
- sepolia
- holesky
[default: mainnet]

View File

@@ -31,6 +31,7 @@ Options:
- mainnet
- goerli
- sepolia
- holesky
- dev
[default: mainnet]
@@ -196,7 +197,7 @@ RPC:
--rpc-max-connections <COUNT>
Maximum number of RPC server connections
[default: 100]
[default: 500]
--rpc-max-tracing-requests <COUNT>
Maximum number of concurrent tracing requests

View File

@@ -25,6 +25,7 @@ Options:
- mainnet
- goerli
- sepolia
- holesky
[default: mainnet]

View File

@@ -21,6 +21,7 @@ Options:
- mainnet
- goerli
- sepolia
- holesky
[default: mainnet]

View File

@@ -24,6 +24,7 @@ Options:
- mainnet
- goerli
- sepolia
- holesky
[default: mainnet]

View File

@@ -21,6 +21,7 @@ Options:
- mainnet
- goerli
- sepolia
- holesky
[default: mainnet]

View File

@@ -2,15 +2,15 @@
For those who need a private testnet to validate functionality or scale with Reth.
## Using Docker locally
This guide uses [Kurtosis' eth2-package](https://github.com/kurtosis-tech/eth2-package) and assumes you have Kurtosis and Docker installed and have Docker already running on your machine.
This guide uses [Kurtosis' ethereum-package](https://github.com/kurtosis-tech/ethereum-package) and assumes you have Kurtosis and Docker installed and have Docker already running on your machine.
* Go [here](https://docs.kurtosis.com/install/) to install Kurtosis
* Go [here](https://docs.docker.com/get-docker/) to install Docker
The [`eth2-package`](https://github.com/kurtosis-tech/eth2-package) is a [package](https://docs.kurtosis.com/concepts-reference/packages) for a general purpose Ethereum testnet definition used for instantiating private testnets at any scale over Docker or Kubernetes, locally or in the cloud. This guide will go through how to spin up a local private testnet with Reth various CL clients locally. Specifically, you will instantiate a 2-node network over Docker with Reth/Lighthouse and Reth/Teku client combinations.
The [`ethereum-package`](https://github.com/kurtosis-tech/ethereum-package) is a [package](https://docs.kurtosis.com/concepts-reference/packages) for a general purpose Ethereum testnet definition used for instantiating private testnets at any scale over Docker or Kubernetes, locally or in the cloud. This guide will go through how to spin up a local private testnet with Reth various CL clients locally. Specifically, you will instantiate a 2-node network over Docker with Reth/Lighthouse and Reth/Teku client combinations.
To see all possible configurations and flags you can use, including metrics and observability tools (e.g. Grafana, Prometheus, etc), go [here](https://github.com/kurtosis-tech/eth2-package#configuration).
To see all possible configurations and flags you can use, including metrics and observability tools (e.g. Grafana, Prometheus, etc), go [here](https://github.com/kurtosis-tech/ethereum-package#configuration).
Genesis data will be generated using this [genesis-generator](https://github.com/ethpandaops/ethereum-genesis-generator) to be used to bootstrap the EL and CL clients for each node. The end result will be a private testnet with nodes deployed as Docker containers in an ephemeral, isolated environment on your machine called an [enclave](https://docs.kurtosis.com/concepts-reference/enclaves/). Read more about how the `eth2-package` works by going [here](https://github.com/kurtosis-tech/eth2-package/).
Genesis data will be generated using this [genesis-generator](https://github.com/ethpandaops/ethereum-genesis-generator) to be used to bootstrap the EL and CL clients for each node. The end result will be a private testnet with nodes deployed as Docker containers in an ephemeral, isolated environment on your machine called an [enclave](https://docs.kurtosis.com/concepts-reference/enclaves/). Read more about how the `ethereum-package` works by going [here](https://github.com/kurtosis-tech/ethereum-package/).
### Step 1: Define the parameters and shape of your private network
First, in your home directory, create a file with the name `network_params.json` with the following contents:
@@ -39,7 +39,7 @@ First, in your home directory, create a file with the name `network_params.json`
Next, run the following command from your command line:
```bash
kurtosis run github.com/kurtosis-tech/eth2-package "$(cat ~/network_params.json)"
kurtosis run github.com/kurtosis-tech/ethereum-package "$(cat ~/network_params.json)"
```
Kurtosis will spin up an [enclave](https://docs.kurtosis.com/concepts-reference/enclaves) (i.e an ephemeral, isolated environment) and begin to configure and instantiate the nodes in your network. In the end, Kurtosis will print the services running in your enclave that form your private testnet alongside all the container ports and files that were generated & used to start up the private testnet. Here is a sample output:
```console
@@ -96,11 +96,11 @@ Great! You now have a private network with 2 full Ethereum nodes on your local m
Kurtosis packages are portable and reproducible, meaning they will work the same way over Docker or Kubernetes, locally or on remote infrsatructure. For use cases that require a larger scale, Kurtosis can be deployed on Kubernetes by following these docs [here](https://docs.kurtosis.com/k8s/).
## Running the network with additional services
The [`eth2-package`](https://github.com/kurtosis-tech/eth2-package) comes with many optional flags and arguments you can enable for your private network. Some include:
The [`ethereum-package`](https://github.com/kurtosis-tech/ethereum-package) comes with many optional flags and arguments you can enable for your private network. Some include:
- A Grafana + Prometheus instance
- A transaction spammer called [`tx-fuzz`](https://github.com/MariusVanDerWijden/tx-fuzz)
- [A network metrics collector](https://github.com/dapplion/beacon-metrics-gazer)
- Flashbot's `mev-boost` implementation of PBS (to test/simulate MEV workflows)
### Questions?
Please reach out to the [Kurtosis discord](https://discord.com/invite/6Jjp9c89z9) should you have any questions about how to use the `eth2-package` for your private testnet needs. Thanks!
Please reach out to the [Kurtosis discord](https://discord.com/invite/6Jjp9c89z9) should you have any questions about how to use the `ethereum-package` for your private testnet needs. Thanks!

View File

@@ -215,13 +215,13 @@ impl BlockBuffer {
#[cfg(test)]
mod tests {
use reth_interfaces::test_utils::generators;
use std::collections::HashMap;
use reth_interfaces::test_utils::generators::{random_block, Rng};
use reth_primitives::{BlockHash, BlockNumHash, SealedBlockWithSenders};
use crate::BlockBuffer;
use reth_interfaces::test_utils::{
generators,
generators::{random_block, Rng},
};
use reth_primitives::{BlockHash, BlockNumHash, SealedBlockWithSenders};
use std::collections::HashMap;
fn create_block<R: Rng>(rng: &mut R, number: u64, parent: BlockHash) -> SealedBlockWithSenders {
let block = random_block(rng, number, Some(parent), None, None);
@@ -231,7 +231,8 @@ mod tests {
#[test]
fn simple_insertion() {
let mut rng = generators::rng();
let block1 = create_block(&mut rng, 10, BlockHash::random());
let parent = rng.gen();
let block1 = create_block(&mut rng, 10, parent);
let mut buffer = BlockBuffer::new(3);
buffer.insert_block(block1.clone());
@@ -244,11 +245,12 @@ mod tests {
fn take_all_chain_of_childrens() {
let mut rng = generators::rng();
let main_parent = BlockNumHash::new(9, BlockHash::random());
let main_parent = BlockNumHash::new(9, rng.gen());
let block1 = create_block(&mut rng, 10, main_parent.hash);
let block2 = create_block(&mut rng, 11, block1.hash);
let block3 = create_block(&mut rng, 12, block2.hash);
let block4 = create_block(&mut rng, 14, BlockHash::random());
let parent4 = rng.gen();
let block4 = create_block(&mut rng, 14, parent4);
let mut buffer = BlockBuffer::new(5);
@@ -273,7 +275,7 @@ mod tests {
fn take_all_multi_level_childrens() {
let mut rng = generators::rng();
let main_parent = BlockNumHash::new(9, BlockHash::random());
let main_parent = BlockNumHash::new(9, rng.gen());
let block1 = create_block(&mut rng, 10, main_parent.hash);
let block2 = create_block(&mut rng, 11, block1.hash);
let block3 = create_block(&mut rng, 11, block1.hash);
@@ -307,7 +309,7 @@ mod tests {
fn take_self_with_childs() {
let mut rng = generators::rng();
let main_parent = BlockNumHash::new(9, BlockHash::random());
let main_parent = BlockNumHash::new(9, rng.gen());
let block1 = create_block(&mut rng, 10, main_parent.hash);
let block2 = create_block(&mut rng, 11, block1.hash);
let block3 = create_block(&mut rng, 11, block1.hash);
@@ -341,11 +343,12 @@ mod tests {
fn clean_chain_of_children() {
let mut rng = generators::rng();
let main_parent = BlockNumHash::new(9, BlockHash::random());
let main_parent = BlockNumHash::new(9, rng.gen());
let block1 = create_block(&mut rng, 10, main_parent.hash);
let block2 = create_block(&mut rng, 11, block1.hash);
let block3 = create_block(&mut rng, 12, block2.hash);
let block4 = create_block(&mut rng, 14, BlockHash::random());
let parent4 = rng.gen();
let block4 = create_block(&mut rng, 14, parent4);
let mut buffer = BlockBuffer::new(5);
@@ -363,7 +366,7 @@ mod tests {
fn clean_all_multi_level_childrens() {
let mut rng = generators::rng();
let main_parent = BlockNumHash::new(9, BlockHash::random());
let main_parent = BlockNumHash::new(9, rng.gen());
let block1 = create_block(&mut rng, 10, main_parent.hash);
let block2 = create_block(&mut rng, 11, block1.hash);
let block3 = create_block(&mut rng, 11, block1.hash);
@@ -385,14 +388,17 @@ mod tests {
fn clean_multi_chains() {
let mut rng = generators::rng();
let main_parent = BlockNumHash::new(9, BlockHash::random());
let main_parent = BlockNumHash::new(9, rng.gen());
let block1 = create_block(&mut rng, 10, main_parent.hash);
let block1a = create_block(&mut rng, 10, main_parent.hash);
let block2 = create_block(&mut rng, 11, block1.hash);
let block2a = create_block(&mut rng, 11, block1.hash);
let random_block1 = create_block(&mut rng, 10, BlockHash::random());
let random_block2 = create_block(&mut rng, 11, BlockHash::random());
let random_block3 = create_block(&mut rng, 12, BlockHash::random());
let random_parent1 = rng.gen();
let random_block1 = create_block(&mut rng, 10, random_parent1);
let random_parent2 = rng.gen();
let random_block2 = create_block(&mut rng, 11, random_parent2);
let random_parent3 = rng.gen();
let random_block3 = create_block(&mut rng, 12, random_parent3);
let mut buffer = BlockBuffer::new(10);
@@ -436,11 +442,12 @@ mod tests {
fn evict_with_gap() {
let mut rng = generators::rng();
let main_parent = BlockNumHash::new(9, BlockHash::random());
let main_parent = BlockNumHash::new(9, rng.gen());
let block1 = create_block(&mut rng, 10, main_parent.hash);
let block2 = create_block(&mut rng, 11, block1.hash);
let block3 = create_block(&mut rng, 12, block2.hash);
let block4 = create_block(&mut rng, 13, BlockHash::random());
let parent4 = rng.gen();
let block4 = create_block(&mut rng, 13, parent4);
let mut buffer = BlockBuffer::new(3);
@@ -472,11 +479,12 @@ mod tests {
fn simple_eviction() {
let mut rng = generators::rng();
let main_parent = BlockNumHash::new(9, BlockHash::random());
let main_parent = BlockNumHash::new(9, rng.gen());
let block1 = create_block(&mut rng, 10, main_parent.hash);
let block2 = create_block(&mut rng, 11, block1.hash);
let block3 = create_block(&mut rng, 12, block2.hash);
let block4 = create_block(&mut rng, 13, BlockHash::random());
let parent4 = rng.gen();
let block4 = create_block(&mut rng, 13, parent4);
let mut buffer = BlockBuffer::new(3);

View File

@@ -946,7 +946,7 @@ impl<DB: Database, C: Consensus, EF: ExecutorFactory> BlockchainTree<DB, C, EF>
}
let Some(chain_id) = self.block_indices.get_blocks_chain_id(block_hash) else {
warn!(target: "blockchain_tree", ?block_hash, "Block hash not found in block indices");
debug!(target: "blockchain_tree", ?block_hash, "Block hash not found in block indices");
return Err(CanonicalError::from(BlockchainTreeError::BlockHashNotFoundInChain {
block_hash: *block_hash,
})
@@ -1178,7 +1178,7 @@ mod tests {
use reth_db::{test_utils::create_test_rw_db, transaction::DbTxMut, DatabaseEnv};
use reth_interfaces::test_utils::TestConsensus;
use reth_primitives::{
proofs::EMPTY_ROOT, stage::StageCheckpoint, ChainSpecBuilder, H256, MAINNET,
proofs::EMPTY_ROOT, stage::StageCheckpoint, ChainSpecBuilder, B256, MAINNET,
};
use reth_provider::{
test_utils::{blocks::BlockChainTestData, TestExecutorFactory},
@@ -1218,7 +1218,7 @@ mod tests {
for i in 0..10 {
provider
.tx_ref()
.put::<tables::CanonicalHeaders>(i, H256([100 + i as u8; 32]))
.put::<tables::CanonicalHeaders>(i, B256::new([100 + i as u8; 32]))
.unwrap();
}
provider
@@ -1321,10 +1321,10 @@ mod tests {
BlockchainTree::new(externals, sender, config, None).expect("failed to create tree");
// genesis block 10 is already canonical
tree.make_canonical(&H256::zero()).unwrap();
tree.make_canonical(&B256::ZERO).unwrap();
// make sure is_block_hash_canonical returns true for genesis block
tree.is_block_hash_canonical(&H256::zero()).unwrap();
tree.is_block_hash_canonical(&B256::ZERO).unwrap();
// make genesis block 10 as finalized
tree.finalize_block(10);
@@ -1356,7 +1356,7 @@ mod tests {
);
// check if random block is known
let old_block = BlockNumHash::new(1, H256([32; 32]));
let old_block = BlockNumHash::new(1, B256::new([32; 32]));
let err = BlockchainTreeError::PendingBlockIsFinalized { last_finalized: 10 };
assert_eq!(tree.is_block_known(old_block).unwrap_err().as_tree_error(), Some(err));
@@ -1424,10 +1424,10 @@ mod tests {
/**** INSERT SIDE BLOCKS *** */
let mut block1a = block1.clone();
let block1a_hash = H256([0x33; 32]);
let block1a_hash = B256::new([0x33; 32]);
block1a.hash = block1a_hash;
let mut block2a = block2.clone();
let block2a_hash = H256([0x34; 32]);
let block2a_hash = B256::new([0x34; 32]);
block2a.hash = block2a_hash;
// reinsert two blocks that point to canonical chain
@@ -1627,8 +1627,8 @@ mod tests {
// insert unconnected block2b
let mut block2b = block2a.clone();
block2b.hash = H256([0x99; 32]);
block2b.parent_hash = H256([0x88; 32]);
block2b.hash = B256::new([0x99; 32]);
block2b.parent_hash = B256::new([0x88; 32]);
assert_eq!(
tree.insert_block(block2b.clone()).unwrap(),

View File

@@ -13,7 +13,7 @@
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/paradigmxzy/reth/issues/"
issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/"
)]
#![warn(missing_debug_implementations, missing_docs, unreachable_pub, rustdoc::all)]
#![deny(unused_must_use, rust_2018_idioms)]
@@ -48,3 +48,6 @@ mod canonical_chain;
pub mod metrics;
pub use block_buffer::BlockBuffer;
/// Implementation of Tree traits that does nothing.
pub mod noop;

View File

@@ -0,0 +1,133 @@
use reth_interfaces::{
blockchain_tree::{
error::{BlockchainTreeError, InsertBlockError},
BlockchainTreeEngine, BlockchainTreeViewer, CanonicalOutcome, InsertPayloadOk,
},
RethResult,
};
use reth_primitives::{
BlockHash, BlockNumHash, BlockNumber, Receipt, SealedBlock, SealedBlockWithSenders,
SealedHeader,
};
use reth_provider::{
BlockchainTreePendingStateProvider, BundleStateDataProvider, CanonStateNotificationSender,
CanonStateNotifications, CanonStateSubscriptions,
};
use std::collections::{BTreeMap, HashSet};
/// A BlockchainTree that does nothing.
///
/// Caution: this is only intended for testing purposes, or for wiring components together.
#[derive(Debug, Clone, Default)]
#[non_exhaustive]
pub struct NoopBlockchainTree {}
impl BlockchainTreeEngine for NoopBlockchainTree {
fn buffer_block(&self, _block: SealedBlockWithSenders) -> Result<(), InsertBlockError> {
Ok(())
}
fn insert_block(
&self,
block: SealedBlockWithSenders,
) -> Result<InsertPayloadOk, InsertBlockError> {
Err(InsertBlockError::tree_error(
BlockchainTreeError::BlockHashNotFoundInChain { block_hash: block.hash },
block.block,
))
}
fn finalize_block(&self, _finalized_block: BlockNumber) {}
fn connect_buffered_blocks_to_canonical_hashes_and_finalize(
&self,
_last_finalized_block: BlockNumber,
) -> RethResult<()> {
Ok(())
}
fn connect_buffered_blocks_to_canonical_hashes(&self) -> RethResult<()> {
Ok(())
}
fn make_canonical(&self, block_hash: &BlockHash) -> RethResult<CanonicalOutcome> {
Err(BlockchainTreeError::BlockHashNotFoundInChain { block_hash: *block_hash }.into())
}
fn unwind(&self, _unwind_to: BlockNumber) -> RethResult<()> {
Ok(())
}
}
impl BlockchainTreeViewer for NoopBlockchainTree {
fn blocks(&self) -> BTreeMap<BlockNumber, HashSet<BlockHash>> {
Default::default()
}
fn header_by_hash(&self, _hash: BlockHash) -> Option<SealedHeader> {
None
}
fn block_by_hash(&self, _hash: BlockHash) -> Option<SealedBlock> {
None
}
fn buffered_block_by_hash(&self, _block_hash: BlockHash) -> Option<SealedBlock> {
None
}
fn buffered_header_by_hash(&self, _block_hash: BlockHash) -> Option<SealedHeader> {
None
}
fn canonical_blocks(&self) -> BTreeMap<BlockNumber, BlockHash> {
Default::default()
}
fn find_canonical_ancestor(&self, _parent_hash: BlockHash) -> Option<BlockHash> {
None
}
fn is_canonical(&self, block_hash: BlockHash) -> RethResult<bool> {
Err(BlockchainTreeError::BlockHashNotFoundInChain { block_hash }.into())
}
fn lowest_buffered_ancestor(&self, _hash: BlockHash) -> Option<SealedBlockWithSenders> {
None
}
fn canonical_tip(&self) -> BlockNumHash {
Default::default()
}
fn pending_blocks(&self) -> (BlockNumber, Vec<BlockHash>) {
(0, vec![])
}
fn pending_block_num_hash(&self) -> Option<BlockNumHash> {
None
}
fn pending_block_and_receipts(&self) -> Option<(SealedBlock, Vec<Receipt>)> {
None
}
fn receipts_by_block_hash(&self, _block_hash: BlockHash) -> Option<Vec<Receipt>> {
None
}
}
impl BlockchainTreePendingStateProvider for NoopBlockchainTree {
fn find_pending_state_provider(
&self,
_block_hash: BlockHash,
) -> Option<Box<dyn BundleStateDataProvider>> {
None
}
}
impl CanonStateSubscriptions for NoopBlockchainTree {
fn subscribe_to_canonical_state(&self) -> CanonStateNotifications {
CanonStateNotificationSender::new(1).subscribe()
}
}

View File

@@ -149,6 +149,11 @@ impl<DB: Database, C: Consensus, EF: ExecutorFactory> BlockchainTreeViewer
None
}
fn is_canonical(&self, hash: BlockHash) -> RethResult<bool> {
trace!(target: "blockchain_tree", ?hash, "Checking if block is canonical");
self.tree.read().is_block_hash_canonical(&hash)
}
fn lowest_buffered_ancestor(&self, hash: BlockHash) -> Option<SealedBlockWithSenders> {
trace!(target: "blockchain_tree", ?hash, "Returning lowest buffered ancestor");
self.tree.read().lowest_buffered_ancestor(&hash).cloned()
@@ -159,11 +164,6 @@ impl<DB: Database, C: Consensus, EF: ExecutorFactory> BlockchainTreeViewer
self.tree.read().block_indices().canonical_tip()
}
fn is_canonical(&self, hash: BlockHash) -> RethResult<bool> {
trace!(target: "blockchain_tree", ?hash, "Checking if block is canonical");
self.tree.read().is_block_hash_canonical(&hash)
}
fn pending_blocks(&self) -> (BlockNumber, Vec<BlockHash>) {
trace!(target: "blockchain_tree", "Returning all pending blocks");
self.tree.read().block_indices().pending_blocks()

View File

@@ -3,7 +3,7 @@
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/paradigmxzy/reth/issues/"
issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/"
)]
#![warn(missing_debug_implementations, missing_docs, unreachable_pub, rustdoc::all)]
#![deny(unused_must_use, rust_2018_idioms)]

View File

@@ -7,7 +7,7 @@ use reth_interfaces::p2p::{
priority::Priority,
};
use reth_primitives::{
BlockBody, BlockHashOrNumber, Header, HeadersDirection, PeerId, WithPeerId, H256,
BlockBody, BlockHashOrNumber, Header, HeadersDirection, PeerId, WithPeerId, B256,
};
use std::fmt::Debug;
use tracing::{trace, warn};
@@ -67,7 +67,7 @@ impl AutoSealClient {
headers
}
async fn fetch_bodies(&self, hashes: Vec<H256>) -> Vec<BlockBody> {
async fn fetch_bodies(&self, hashes: Vec<B256>) -> Vec<BlockBody> {
trace!(target: "consensus::auto", ?hashes, "received bodies request");
let storage = self.storage.read().await;
let mut bodies = Vec::new();
@@ -106,7 +106,7 @@ impl BodiesClient for AutoSealClient {
fn get_block_bodies_with_priority(
&self,
hashes: Vec<H256>,
hashes: Vec<B256>,
_priority: Priority,
) -> Self::Output {
let this = self.clone();

View File

@@ -10,7 +10,7 @@
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/paradigmxzy/reth/issues/"
issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/"
)]
#![warn(missing_debug_implementations, missing_docs, unreachable_pub, rustdoc::all)]
#![deny(unused_must_use, rust_2018_idioms)]
@@ -24,7 +24,7 @@ use reth_interfaces::{
use reth_primitives::{
constants::{EMPTY_RECEIPTS, EMPTY_TRANSACTIONS, ETHEREUM_BLOCK_GAS_LIMIT},
proofs, Address, Block, BlockBody, BlockHash, BlockHashOrNumber, BlockNumber, Bloom, ChainSpec,
Header, ReceiptWithBloom, SealedBlock, SealedHeader, TransactionSigned, EMPTY_OMMER_ROOT, H256,
Header, ReceiptWithBloom, SealedBlock, SealedHeader, TransactionSigned, B256, EMPTY_OMMER_ROOT,
U256,
};
use reth_provider::{
@@ -207,7 +207,7 @@ pub(crate) struct StorageInner {
/// Tracks best block
pub(crate) best_block: u64,
/// Tracks hash of best block
pub(crate) best_hash: H256,
pub(crate) best_hash: B256,
/// The total difficulty of the chain until this block
pub(crate) total_difficulty: U256,
}
@@ -340,7 +340,7 @@ impl StorageInner {
.map(|r| (*r).clone().expect("receipts have not been pruned").into())
.collect::<Vec<ReceiptWithBloom>>();
header.logs_bloom =
receipts_with_bloom.iter().fold(Bloom::zero(), |bloom, r| bloom | r.bloom);
receipts_with_bloom.iter().fold(Bloom::ZERO, |bloom, r| bloom | r.bloom);
proofs::calculate_receipt_root(&receipts_with_bloom)
};

View File

@@ -19,6 +19,7 @@ reth-rpc-types.workspace = true
reth-tasks.workspace = true
reth-payload-builder.workspace = true
reth-prune = { path = "../../prune" }
reth-snapshot = { path = "../../snapshot" }
reth-rpc-types-compat.workspace = true
# async
tokio = { workspace = true, features = ["sync"] }

View File

@@ -1,4 +1,4 @@
use reth_primitives::H256;
use reth_primitives::B256;
use reth_rpc_types::engine::{ForkchoiceState, PayloadStatusEnum};
/// The struct that keeps track of the received forkchoice state and their status.
@@ -65,13 +65,13 @@ impl ForkchoiceStateTracker {
/// Returns the last valid head hash.
#[allow(unused)]
pub(crate) fn last_valid_head(&self) -> Option<H256> {
pub(crate) fn last_valid_head(&self) -> Option<B256> {
self.last_valid.as_ref().map(|s| s.head_block_hash)
}
/// Returns the head hash of the latest received FCU to which we need to sync.
#[allow(unused)]
pub(crate) fn sync_target(&self) -> Option<H256> {
pub(crate) fn sync_target(&self) -> Option<B256> {
self.last_syncing.as_ref().map(|s| s.head_block_hash)
}
@@ -141,14 +141,14 @@ impl From<PayloadStatusEnum> for ForkchoiceStatus {
/// A helper type to check represent hashes of a [ForkchoiceState]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub(crate) enum ForkchoiceStateHash {
Head(H256),
Safe(H256),
Finalized(H256),
Head(B256),
Safe(B256),
Finalized(B256),
}
impl ForkchoiceStateHash {
/// Tries to find a matching hash in the given [ForkchoiceState].
pub(crate) fn find(state: &ForkchoiceState, hash: H256) -> Option<Self> {
pub(crate) fn find(state: &ForkchoiceState, hash: B256) -> Option<Self> {
if state.head_block_hash == hash {
Some(ForkchoiceStateHash::Head(hash))
} else if state.safe_block_hash == hash {
@@ -166,8 +166,8 @@ impl ForkchoiceStateHash {
}
}
impl AsRef<H256> for ForkchoiceStateHash {
fn as_ref(&self) -> &H256 {
impl AsRef<B256> for ForkchoiceStateHash {
fn as_ref(&self) -> &B256 {
match self {
ForkchoiceStateHash::Head(h) => h,
ForkchoiceStateHash::Safe(h) => h,

View File

@@ -54,7 +54,7 @@ impl EngineHooksController {
) -> Poll<Result<PolledHook, EngineHookError>> {
let Some(mut hook) = self.running_hook_with_db_write.take() else { return Poll::Pending };
match hook.poll(cx, args) {
match hook.poll(cx, args)? {
Poll::Ready((event, action)) => {
let result = PolledHook { event, action, db_access_level: hook.db_access_level() };
@@ -109,7 +109,7 @@ impl EngineHooksController {
return Poll::Pending
}
if let Poll::Ready((event, action)) = hook.poll(cx, args) {
if let Poll::Ready((event, action)) = hook.poll(cx, args)? {
let result = PolledHook { event, action, db_access_level: hook.db_access_level() };
debug!(

View File

@@ -1,4 +1,4 @@
use reth_interfaces::RethError;
use reth_interfaces::{RethError, RethResult};
use reth_primitives::BlockNumber;
use std::{
fmt,
@@ -11,6 +11,9 @@ pub(crate) use controller::{EngineHooksController, PolledHook};
mod prune;
pub use prune::PruneHook;
mod snapshot;
pub use snapshot::SnapshotHook;
/// Collection of [engine hooks][`EngineHook`].
#[derive(Default)]
pub struct EngineHooks {
@@ -47,7 +50,7 @@ pub trait EngineHook: Send + Sync + 'static {
&mut self,
cx: &mut Context<'_>,
ctx: EngineContext,
) -> Poll<(EngineHookEvent, Option<EngineHookAction>)>;
) -> Poll<RethResult<(EngineHookEvent, Option<EngineHookAction>)>>;
/// Returns [db access level][`EngineHookDBAccessLevel`] the hook needs.
fn db_access_level(&self) -> EngineHookDBAccessLevel;
@@ -58,6 +61,8 @@ pub trait EngineHook: Send + Sync + 'static {
pub struct EngineContext {
/// Tip block number.
pub tip_block_number: BlockNumber,
/// Finalized block number, if known.
pub finalized_block_number: Option<BlockNumber>,
}
/// An event emitted when [hook][`EngineHook`] is polled.

View File

@@ -9,7 +9,7 @@ use crate::{
use futures::FutureExt;
use metrics::Counter;
use reth_db::database::Database;
use reth_interfaces::RethError;
use reth_interfaces::{RethError, RethResult};
use reth_primitives::BlockNumber;
use reth_prune::{Pruner, PrunerError, PrunerWithResult};
use reth_tasks::TaskSpawner;
@@ -55,7 +55,7 @@ impl<DB: Database + 'static> PruneHook<DB> {
fn poll_pruner(
&mut self,
cx: &mut Context<'_>,
) -> Poll<(EngineHookEvent, Option<EngineHookAction>)> {
) -> Poll<RethResult<(EngineHookEvent, Option<EngineHookAction>)>> {
let result = match self.pruner_state {
PrunerState::Idle(_) => return Poll::Pending,
PrunerState::Running(ref mut fut) => {
@@ -69,14 +69,7 @@ impl<DB: Database + 'static> PruneHook<DB> {
match result {
Ok(_) => EngineHookEvent::Finished(Ok(())),
Err(err) => EngineHookEvent::Finished(Err(match err {
PrunerError::PrunePart(_) | PrunerError::InconsistentData(_) => {
EngineHookError::Internal(Box::new(err))
}
PrunerError::Interface(err) => err.into(),
PrunerError::Database(err) => RethError::Database(err).into(),
PrunerError::Provider(err) => RethError::Provider(err).into(),
})),
Err(err) => EngineHookEvent::Finished(Err(err.into())),
}
}
Err(_) => {
@@ -85,14 +78,15 @@ impl<DB: Database + 'static> PruneHook<DB> {
}
};
Poll::Ready((event, None))
Poll::Ready(Ok((event, None)))
}
/// This will try to spawn the pruner if it is idle:
/// 1. Check if pruning is needed through [Pruner::is_pruning_needed].
/// 2a. If pruning is needed, pass tip block number to the [Pruner::run] and spawn it in a
/// 2.
/// 1. If pruning is needed, pass tip block number to the [Pruner::run] and spawn it in a
/// separate task. Set pruner state to [PrunerState::Running].
/// 2b. If pruning is not needed, set pruner state back to [PrunerState::Idle].
/// 2. If pruning is not needed, set pruner state back to [PrunerState::Idle].
///
/// If pruner is already running, do nothing.
fn try_spawn_pruner(
@@ -136,11 +130,11 @@ impl<DB: Database + 'static> EngineHook for PruneHook<DB> {
&mut self,
cx: &mut Context<'_>,
ctx: EngineContext,
) -> Poll<(EngineHookEvent, Option<EngineHookAction>)> {
) -> Poll<RethResult<(EngineHookEvent, Option<EngineHookAction>)>> {
// Try to spawn a pruner
match self.try_spawn_pruner(ctx.tip_block_number) {
Some((EngineHookEvent::NotReady, _)) => return Poll::Pending,
Some((event, action)) => return Poll::Ready((event, action)),
Some((event, action)) => return Poll::Ready(Ok((event, action))),
None => (),
}
@@ -176,3 +170,16 @@ struct Metrics {
/// The number of times the pruner was run.
runs: Counter,
}
impl From<PrunerError> for EngineHookError {
fn from(err: PrunerError) -> Self {
match err {
PrunerError::PrunePart(_) | PrunerError::InconsistentData(_) => {
EngineHookError::Internal(Box::new(err))
}
PrunerError::Interface(err) => err.into(),
PrunerError::Database(err) => RethError::Database(err).into(),
PrunerError::Provider(err) => RethError::Provider(err).into(),
}
}
}

View File

@@ -0,0 +1,161 @@
//! Snapshot hook for the engine implementation.
use crate::{
engine::hooks::{
EngineContext, EngineHook, EngineHookAction, EngineHookError, EngineHookEvent,
},
hooks::EngineHookDBAccessLevel,
};
use futures::FutureExt;
use reth_db::database::Database;
use reth_interfaces::{RethError, RethResult};
use reth_primitives::BlockNumber;
use reth_snapshot::{Snapshotter, SnapshotterError, SnapshotterWithResult};
use reth_tasks::TaskSpawner;
use std::task::{ready, Context, Poll};
use tokio::sync::oneshot;
/// Manages snapshotting under the control of the engine.
///
/// This type controls the [Snapshotter].
#[derive(Debug)]
pub struct SnapshotHook<DB> {
/// The current state of the snapshotter.
state: SnapshotterState<DB>,
/// The type that can spawn the snapshotter task.
task_spawner: Box<dyn TaskSpawner>,
}
impl<DB: Database + 'static> SnapshotHook<DB> {
/// Create a new instance
pub fn new(snapshotter: Snapshotter<DB>, task_spawner: Box<dyn TaskSpawner>) -> Self {
Self { state: SnapshotterState::Idle(Some(snapshotter)), task_spawner }
}
/// Advances the snapshotter state.
///
/// This checks for the result in the channel, or returns pending if the snapshotter is idle.
fn poll_snapshotter(
&mut self,
cx: &mut Context<'_>,
) -> Poll<RethResult<(EngineHookEvent, Option<EngineHookAction>)>> {
let result = match self.state {
SnapshotterState::Idle(_) => return Poll::Pending,
SnapshotterState::Running(ref mut fut) => {
ready!(fut.poll_unpin(cx))
}
};
let event = match result {
Ok((snapshotter, result)) => {
self.state = SnapshotterState::Idle(Some(snapshotter));
match result {
Ok(_) => EngineHookEvent::Finished(Ok(())),
Err(err) => EngineHookEvent::Finished(Err(err.into())),
}
}
Err(_) => {
// failed to receive the snapshotter
EngineHookEvent::Finished(Err(EngineHookError::ChannelClosed))
}
};
Poll::Ready(Ok((event, None)))
}
/// This will try to spawn the snapshotter if it is idle:
/// 1. Check if snapshotting is needed through [Snapshotter::get_snapshot_targets] and then
/// [SnapshotTargets::any](reth_snapshot::SnapshotTargets::any).
/// 2.
/// 1. If snapshotting is needed, pass snapshot request to the [Snapshotter::run] and spawn
/// it in a separate task. Set snapshotter state to [SnapshotterState::Running].
/// 2. If snapshotting is not needed, set snapshotter state back to
/// [SnapshotterState::Idle].
///
/// If snapshotter is already running, do nothing.
fn try_spawn_snapshotter(
&mut self,
finalized_block_number: BlockNumber,
) -> RethResult<Option<(EngineHookEvent, Option<EngineHookAction>)>> {
Ok(match &mut self.state {
SnapshotterState::Idle(snapshotter) => {
let Some(mut snapshotter) = snapshotter.take() else { return Ok(None) };
let targets = snapshotter.get_snapshot_targets(finalized_block_number)?;
// Check if the snapshotting of any parts has been requested.
if targets.any() {
let (tx, rx) = oneshot::channel();
self.task_spawner.spawn_critical_blocking(
"snapshotter task",
Box::pin(async move {
let result = snapshotter.run(targets);
let _ = tx.send((snapshotter, result));
}),
);
self.state = SnapshotterState::Running(rx);
Some((EngineHookEvent::Started, None))
} else {
self.state = SnapshotterState::Idle(Some(snapshotter));
Some((EngineHookEvent::NotReady, None))
}
}
SnapshotterState::Running(_) => None,
})
}
}
impl<DB: Database + 'static> EngineHook for SnapshotHook<DB> {
fn name(&self) -> &'static str {
"Snapshot"
}
fn poll(
&mut self,
cx: &mut Context<'_>,
ctx: EngineContext,
) -> Poll<RethResult<(EngineHookEvent, Option<EngineHookAction>)>> {
let Some(finalized_block_number) = ctx.finalized_block_number else {
return Poll::Ready(Ok((EngineHookEvent::NotReady, None)))
};
// Try to spawn a snapshotter
match self.try_spawn_snapshotter(finalized_block_number)? {
Some((EngineHookEvent::NotReady, _)) => return Poll::Pending,
Some((event, action)) => return Poll::Ready(Ok((event, action))),
None => (),
}
// Poll snapshotter and check its status
self.poll_snapshotter(cx)
}
fn db_access_level(&self) -> EngineHookDBAccessLevel {
EngineHookDBAccessLevel::ReadOnly
}
}
/// The possible snapshotter states within the sync controller.
///
/// [SnapshotterState::Idle] means that the snapshotter is currently idle.
/// [SnapshotterState::Running] means that the snapshotter is currently running.
#[derive(Debug)]
enum SnapshotterState<DB> {
/// Snapshotter is idle.
Idle(Option<Snapshotter<DB>>),
/// Snapshotter is running and waiting for a response
Running(oneshot::Receiver<SnapshotterWithResult<DB>>),
}
impl From<SnapshotterError> for EngineHookError {
fn from(err: SnapshotterError) -> Self {
match err {
SnapshotterError::InconsistentData(_) => EngineHookError::Internal(Box::new(err)),
SnapshotterError::Interface(err) => err.into(),
SnapshotterError::Database(err) => RethError::Database(err).into(),
SnapshotterError::Provider(err) => RethError::Provider(err).into(),
}
}
}

View File

@@ -2,7 +2,7 @@ use reth_metrics::{
metrics::{Counter, Gauge},
Metrics,
};
use reth_primitives::{Header, SealedHeader, H256};
use reth_primitives::{Header, SealedHeader, B256};
use schnellru::{ByLength, LruMap};
use std::sync::Arc;
use tracing::warn;
@@ -16,7 +16,7 @@ const INVALID_HEADER_HIT_EVICTION_THRESHOLD: u8 = 128;
/// Keeps track of invalid headers.
pub(crate) struct InvalidHeaderCache {
/// This maps a header hash to a reference to its invalid ancestor.
headers: LruMap<H256, HeaderEntry>,
headers: LruMap<B256, HeaderEntry>,
/// Metrics for the cache.
metrics: InvalidHeaderCacheMetrics,
}
@@ -26,7 +26,7 @@ impl InvalidHeaderCache {
Self { headers: LruMap::new(ByLength::new(max_length)), metrics: Default::default() }
}
fn insert_entry(&mut self, hash: H256, header: Arc<Header>) {
fn insert_entry(&mut self, hash: B256, header: Arc<Header>) {
self.headers.insert(hash, HeaderEntry { header, hit_count: 0 });
}
@@ -34,7 +34,7 @@ impl InvalidHeaderCache {
///
/// If this is called, the hit count for the entry is incremented.
/// If the hit count exceeds the threshold, the entry is evicted and `None` is returned.
pub(crate) fn get(&mut self, hash: &H256) -> Option<Arc<Header>> {
pub(crate) fn get(&mut self, hash: &B256) -> Option<Arc<Header>> {
{
let entry = self.headers.get(hash)?;
entry.hit_count += 1;
@@ -51,7 +51,7 @@ impl InvalidHeaderCache {
/// Inserts an invalid block into the cache, with a given invalid ancestor.
pub(crate) fn insert_with_invalid_ancestor(
&mut self,
header_hash: H256,
header_hash: B256,
invalid_ancestor: Arc<Header>,
) {
if self.get(&header_hash).is_none() {

View File

@@ -23,7 +23,7 @@ use reth_interfaces::{
use reth_payload_builder::{PayloadBuilderAttributes, PayloadBuilderHandle};
use reth_primitives::{
constants::EPOCH_SLOTS, listener::EventListeners, stage::StageId, BlockNumHash, BlockNumber,
ChainSpec, Head, Header, SealedBlock, SealedHeader, H256, U256,
ChainSpec, Head, Header, SealedBlock, SealedHeader, B256, U256,
};
use reth_provider::{
BlockIdReader, BlockReader, BlockSource, CanonChainTracker, ChainSpecProvider, ProviderError,
@@ -230,7 +230,7 @@ where
max_block: Option<BlockNumber>,
run_pipeline_continuously: bool,
payload_builder: PayloadBuilderHandle,
target: Option<H256>,
target: Option<B256>,
pipeline_run_threshold: u64,
hooks: EngineHooks,
) -> RethResult<(Self, BeaconConsensusEngineHandle)> {
@@ -274,7 +274,7 @@ where
max_block: Option<BlockNumber>,
run_pipeline_continuously: bool,
payload_builder: PayloadBuilderHandle,
target: Option<H256>,
target: Option<B256>,
pipeline_run_threshold: u64,
to_engine: UnboundedSender<BeaconEngineMessage>,
rx: UnboundedReceiver<BeaconEngineMessage>,
@@ -328,7 +328,7 @@ where
/// # Returns
///
/// A target block hash if the pipeline is inconsistent, otherwise `None`.
fn check_pipeline_consistency(&self) -> RethResult<Option<H256>> {
fn check_pipeline_consistency(&self) -> RethResult<Option<B256>> {
// If no target was provided, check if the stages are congruent - check if the
// checkpoint of the last stage matches the checkpoint of the first.
let first_stage_checkpoint = self
@@ -388,7 +388,7 @@ where
canonical_tip_num: u64,
target_block_number: u64,
downloaded_block: Option<BlockNumHash>,
) -> Option<H256> {
) -> Option<B256> {
let sync_target_state = self.forkchoice_state_tracker.sync_target_state();
// check if the distance exceeds the threshold for pipeline sync
@@ -465,12 +465,12 @@ where
/// the above conditions.
fn latest_valid_hash_for_invalid_payload(
&self,
parent_hash: H256,
parent_hash: B256,
insert_err: Option<&InsertBlockErrorKind>,
) -> Option<H256> {
) -> Option<B256> {
// check pre merge block error
if insert_err.map(|err| err.is_block_pre_merge()).unwrap_or_default() {
return Some(H256::zero())
return Some(B256::ZERO)
}
// If this is sent from new payload then the parent hash could be in a side chain, and is
@@ -485,7 +485,7 @@ where
// we need to check if the parent block is the last POW block, if so then the payload is
// the first POS. The engine API spec mandates a zero hash to be returned: <https://github.com/ethereum/execution-apis/blob/6709c2a795b707202e93c4f2867fa0bf2640a84f/src/engine/paris.md#engine_newpayloadv1>
if parent_header.difficulty != U256::ZERO {
return Some(H256::zero())
return Some(B256::ZERO)
}
// parent is canonical POS block
@@ -496,12 +496,12 @@ where
/// Prepares the invalid payload response for the given hash, checking the
/// database for the parent hash and populating the payload status with the latest valid hash
/// according to the engine api spec.
fn prepare_invalid_response(&self, mut parent_hash: H256) -> PayloadStatus {
fn prepare_invalid_response(&self, mut parent_hash: B256) -> PayloadStatus {
// Edge case: the `latestValid` field is the zero hash if the parent block is the terminal
// PoW block, which we need to identify by looking at the parent's block difficulty
if let Ok(Some(parent)) = self.blockchain.header_by_hash_or_number(parent_hash.into()) {
if parent.difficulty != U256::ZERO {
parent_hash = H256::zero();
parent_hash = B256::ZERO;
}
}
@@ -518,8 +518,8 @@ where
/// be invalid.
fn check_invalid_ancestor_with_head(
&mut self,
check: H256,
head: H256,
check: B256,
head: B256,
) -> Option<PayloadStatus> {
// check if the check hash was previously marked as invalid
let header = self.invalid_headers.get(&check)?;
@@ -535,7 +535,7 @@ where
/// Checks if the given `head` points to an invalid header, which requires a specific response
/// to a forkchoice update.
fn check_invalid_ancestor(&mut self, head: H256) -> Option<PayloadStatus> {
fn check_invalid_ancestor(&mut self, head: B256) -> Option<PayloadStatus> {
let parent_hash = {
// check if the head was previously marked as invalid
let header = self.invalid_headers.get(&head)?;
@@ -879,7 +879,7 @@ where
///
/// Returns an error if the block is not found.
#[inline]
fn update_safe_block(&self, safe_block_hash: H256) -> RethResult<()> {
fn update_safe_block(&self, safe_block_hash: B256) -> RethResult<()> {
if !safe_block_hash.is_zero() {
if self.blockchain.safe_block_hash()? == Some(safe_block_hash) {
// nothing to update
@@ -899,7 +899,7 @@ where
///
/// Returns an error if the block is not found.
#[inline]
fn update_finalized_block(&self, finalized_block_hash: H256) -> RethResult<()> {
fn update_finalized_block(&self, finalized_block_hash: B256) -> RethResult<()> {
if !finalized_block_hash.is_zero() {
if self.blockchain.finalized_block_hash()? == Some(finalized_block_hash) {
// nothing to update
@@ -949,7 +949,7 @@ where
return PayloadStatus::from_status(PayloadStatusEnum::Invalid {
validation_error: error.to_string(),
})
.with_latest_valid_hash(H256::zero())
.with_latest_valid_hash(B256::ZERO)
}
RethError::BlockchainTree(BlockchainTreeError::BlockHashNotFoundInChain { .. }) => {
// This just means we couldn't find the block when attempting to make it canonical,
@@ -1008,7 +1008,7 @@ where
///
/// Returns the parent hash of the block itself if the block is buffered and has no other
/// buffered ancestors.
fn lowest_buffered_ancestor_or(&self, hash: H256) -> H256 {
fn lowest_buffered_ancestor_or(&self, hash: B256) -> B256 {
self.blockchain
.lowest_buffered_ancestor(hash)
.map(|block| block.parent_hash)
@@ -1030,7 +1030,7 @@ where
// client software MUST respond with -38003: `Invalid payload attributes` and MUST NOT
// begin a payload build process. In such an event, the forkchoiceState update MUST NOT
// be rolled back.
if attrs.timestamp <= head.timestamp.into() {
if attrs.timestamp.to::<u64>() <= head.timestamp {
return OnForkChoiceUpdated::invalid_payload_attributes()
}
@@ -1079,7 +1079,7 @@ where
payload: ExecutionPayload,
cancun_fields: Option<CancunPayloadFields>,
) -> Result<PayloadStatus, BeaconOnNewPayloadError> {
let block = match self.ensure_well_formed_payload(payload, cancun_fields)? {
let block = match self.ensure_well_formed_payload(payload, cancun_fields) {
Ok(block) => block,
Err(status) => return Ok(status),
};
@@ -1148,7 +1148,7 @@ where
&self,
payload: ExecutionPayload,
cancun_fields: Option<CancunPayloadFields>,
) -> Result<Result<SealedBlock, PayloadStatus>, BeaconOnNewPayloadError> {
) -> Result<SealedBlock, PayloadStatus> {
let parent_hash = payload.parent_hash();
let block_hash = payload.block_hash();
@@ -1158,15 +1158,13 @@ where
) {
Ok(block) => {
// make sure there are no blob transactions in the payload if it is pre-cancun
// we perform this check before validating the block hash because INVALID_PARAMS
// must be returned over an INVALID response.
if !self.chain_spec().is_cancun_active_at_timestamp(block.timestamp) &&
block.has_blob_transactions()
{
return Err(BeaconOnNewPayloadError::PreCancunBlockWithBlobTransactions)
Err(PayloadError::PreCancunBlockWithBlobTransactions)
} else {
validate_block_hash(block_hash, block)
}
validate_block_hash(block_hash, block)
}
Err(error) => Err(error),
};
@@ -1185,7 +1183,7 @@ where
}
let status = PayloadStatusEnum::from(error);
return Ok(Err(PayloadStatus::new(status, latest_valid_hash)))
return Err(PayloadStatus::new(status, latest_valid_hash))
}
};
@@ -1196,13 +1194,9 @@ where
.flatten()
.collect::<Vec<_>>();
if let Err(status) =
self.validate_versioned_hashes(parent_hash, block_versioned_hashes, cancun_fields)
{
return Ok(Err(status))
}
self.validate_versioned_hashes(parent_hash, block_versioned_hashes, cancun_fields)?;
Ok(Ok(block))
Ok(block)
}
/// Returns the currently configured [ChainSpec].
@@ -1219,8 +1213,8 @@ where
/// <https://github.com/ethereum/execution-apis/blob/fe8e13c288c592ec154ce25c534e26cb7ce0530d/src/engine/cancun.md#specification>
fn validate_versioned_hashes(
&self,
parent_hash: H256,
block_versioned_hashes: Vec<&H256>,
parent_hash: B256,
block_versioned_hashes: Vec<&B256>,
cancun_fields: Option<CancunPayloadFields>,
) -> Result<(), PayloadStatus> {
// This validates the following engine API rule:
@@ -1371,7 +1365,7 @@ where
///
/// If the given block is missing from the database, this will return `false`. Otherwise, `true`
/// is returned: the database contains the hash and the tree was updated.
fn update_tree_on_finished_pipeline(&mut self, block_hash: H256) -> RethResult<bool> {
fn update_tree_on_finished_pipeline(&mut self, block_hash: B256) -> RethResult<bool> {
let synced_to_finalized = match self.blockchain.block_number(block_hash)? {
Some(number) => {
// Attempt to restore the tree.
@@ -1749,6 +1743,10 @@ where
self.sync_state_updater.update_sync_state(SyncState::Syncing)
}
EngineHookEvent::Finished(_) => {
// Hook with read-write access to the database has finished running, so engine
// can process new FCU/payload messages from CL again. It's safe to
// return `false` on `eth_syncing` request.
self.sync_state_updater.update_sync_state(SyncState::Idle);
// If the hook had read-write access to the database, it means that the engine
// may have accumulated some buffered blocks.
if let Err(error) =
@@ -1792,45 +1790,60 @@ where
// Control loop that advances the state
'main: loop {
// Poll a running hook with db write access first, as we will not be able to process
// any engine messages until it's finished.
if let Poll::Ready(result) = this.hooks.poll_running_hook_with_db_write(
cx,
EngineContext { tip_block_number: this.blockchain.canonical_tip().number },
)? {
this.on_hook_result(result)?;
}
// Poll a running hook with db write access (if any) and CL messages first, draining
// both and then proceeding to polling other parts such as SyncController and hooks.
loop {
// Poll a running hook with db write access first, as we will not be able to process
// any engine messages until it's finished.
if let Poll::Ready(result) = this.hooks.poll_running_hook_with_db_write(
cx,
EngineContext {
tip_block_number: this.blockchain.canonical_tip().number,
finalized_block_number: this.blockchain.finalized_block_number()?,
},
)? {
this.on_hook_result(result)?;
continue
}
// Process all incoming messages from the CL, these can affect the state of the
// SyncController, hence they are polled first, and they're also time sensitive, hence
// they're always drained first.
while let Poll::Ready(Some(msg)) = this.engine_message_rx.poll_next_unpin(cx) {
match msg {
BeaconEngineMessage::ForkchoiceUpdated { state, payload_attrs, tx } => {
match this.on_forkchoice_updated(state, payload_attrs, tx) {
OnForkchoiceUpdateOutcome::Processed => {}
OnForkchoiceUpdateOutcome::ReachedMaxBlock => {
// reached the max block, we can terminate the future
return Poll::Ready(Ok(()))
}
OnForkchoiceUpdateOutcome::Fatal(err) => {
// fatal error, we can terminate the future
return Poll::Ready(Err(RethError::Execution(err).into()))
// Process one incoming message from the CL. We don't drain the messages right away,
// because we want to sneak a polling of running hook in between them.
//
// These messages can affect the state of the SyncController and they're also time
// sensitive, hence they are polled first.
if let Poll::Ready(Some(msg)) = this.engine_message_rx.poll_next_unpin(cx) {
match msg {
BeaconEngineMessage::ForkchoiceUpdated { state, payload_attrs, tx } => {
match this.on_forkchoice_updated(state, payload_attrs, tx) {
OnForkchoiceUpdateOutcome::Processed => {}
OnForkchoiceUpdateOutcome::ReachedMaxBlock => {
// reached the max block, we can terminate the future
return Poll::Ready(Ok(()))
}
OnForkchoiceUpdateOutcome::Fatal(err) => {
// fatal error, we can terminate the future
return Poll::Ready(Err(RethError::Execution(err).into()))
}
}
}
BeaconEngineMessage::NewPayload { payload, cancun_fields, tx } => {
this.metrics.new_payload_messages.increment(1);
let res = this.on_new_payload(payload, cancun_fields);
let _ = tx.send(res);
}
BeaconEngineMessage::TransitionConfigurationExchanged => {
this.blockchain.on_transition_configuration_exchanged();
}
BeaconEngineMessage::EventListener(tx) => {
this.listeners.push_listener(tx);
}
}
BeaconEngineMessage::NewPayload { payload, cancun_fields, tx } => {
this.metrics.new_payload_messages.increment(1);
let res = this.on_new_payload(payload, cancun_fields);
let _ = tx.send(res);
}
BeaconEngineMessage::TransitionConfigurationExchanged => {
this.blockchain.on_transition_configuration_exchanged();
}
BeaconEngineMessage::EventListener(tx) => {
this.listeners.push_listener(tx);
}
continue
}
// Both running hook with db write access and engine messages are pending,
// proceed to other polls
break
}
// process sync events if any
@@ -1856,7 +1869,10 @@ where
if !this.forkchoice_state_tracker.is_latest_invalid() {
if let Poll::Ready(result) = this.hooks.poll_next_hook(
cx,
EngineContext { tip_block_number: this.blockchain.canonical_tip().number },
EngineContext {
tip_block_number: this.blockchain.canonical_tip().number,
finalized_block_number: this.blockchain.finalized_block_number()?,
},
this.sync.is_pipeline_active(),
)? {
this.on_hook_result(result)?;
@@ -1893,7 +1909,8 @@ mod tests {
BeaconForkChoiceUpdateError,
};
use assert_matches::assert_matches;
use reth_primitives::{stage::StageCheckpoint, ChainSpec, ChainSpecBuilder, H256, MAINNET};
use reth_interfaces::test_utils::generators::{self, Rng};
use reth_primitives::{stage::StageCheckpoint, ChainSpec, ChainSpecBuilder, B256, MAINNET};
use reth_provider::{BlockWriter, ProviderFactory};
use reth_rpc_types::engine::{ForkchoiceState, ForkchoiceUpdated, PayloadStatus};
use reth_rpc_types_compat::engine::payload::try_block_to_payload_v1;
@@ -1904,6 +1921,7 @@ mod tests {
// Pipeline error is propagated.
#[tokio::test]
async fn pipeline_error_is_propagated() {
let mut rng = generators::rng();
let chain_spec = Arc::new(
ChainSpecBuilder::default()
.chain(MAINNET.chain)
@@ -1922,7 +1940,7 @@ mod tests {
let _ = env
.send_forkchoice_updated(ForkchoiceState {
head_block_hash: H256::random(),
head_block_hash: rng.gen(),
..Default::default()
})
.await;
@@ -1935,6 +1953,7 @@ mod tests {
// Test that the consensus engine is idle until first forkchoice updated is received.
#[tokio::test]
async fn is_idle_until_forkchoice_is_set() {
let mut rng = generators::rng();
let chain_spec = Arc::new(
ChainSpecBuilder::default()
.chain(MAINNET.chain)
@@ -1963,7 +1982,7 @@ mod tests {
// consensus engine is still idle because pruning is running
let _ = env
.send_forkchoice_updated(ForkchoiceState {
head_block_hash: H256::random(),
head_block_hash: rng.gen(),
..Default::default()
})
.await;
@@ -1983,7 +2002,7 @@ mod tests {
Err(TryRecvError::Empty) => {
let _ = env
.send_forkchoice_updated(ForkchoiceState {
head_block_hash: H256::random(),
head_block_hash: rng.gen(),
..Default::default()
})
.await;
@@ -1998,6 +2017,7 @@ mod tests {
// for the second time.
#[tokio::test]
async fn runs_pipeline_again_if_tree_not_restored() {
let mut rng = generators::rng();
let chain_spec = Arc::new(
ChainSpecBuilder::default()
.chain(MAINNET.chain)
@@ -2019,7 +2039,7 @@ mod tests {
let _ = env
.send_forkchoice_updated(ForkchoiceState {
head_block_hash: H256::random(),
head_block_hash: rng.gen(),
..Default::default()
})
.await;
@@ -2032,6 +2052,7 @@ mod tests {
#[tokio::test]
async fn terminates_upon_reaching_max_block() {
let mut rng = generators::rng();
let max_block = 1000;
let chain_spec = Arc::new(
ChainSpecBuilder::default()
@@ -2054,7 +2075,7 @@ mod tests {
let _ = env
.send_forkchoice_updated(ForkchoiceState {
head_block_hash: H256::random(),
head_block_hash: rng.gen(),
..Default::default()
})
.await;
@@ -2077,8 +2098,9 @@ mod tests {
mod fork_choice_updated {
use super::*;
use reth_db::{tables, transaction::DbTxMut};
use reth_interfaces::test_utils::{generators, generators::random_block};
use reth_interfaces::test_utils::generators::random_block;
use reth_rpc_types::engine::ForkchoiceUpdateError;
#[tokio::test]
async fn empty_head() {
let chain_spec = Arc::new(
@@ -2235,7 +2257,7 @@ mod tests {
let res = env
.send_forkchoice_updated(ForkchoiceState {
head_block_hash: H256::random(),
head_block_hash: rng.gen(),
finalized_block_hash: block1.hash,
..Default::default()
})
@@ -2295,7 +2317,7 @@ mod tests {
assert_matches!(res, Ok(result) => {
let ForkchoiceUpdated { payload_status, .. } = result;
assert_matches!(payload_status.status, PayloadStatusEnum::Invalid { .. });
assert_eq!(payload_status.latest_valid_hash, Some(H256::zero()));
assert_eq!(payload_status.latest_valid_hash, Some(B256::ZERO));
});
}
@@ -2335,7 +2357,7 @@ mod tests {
validation_error: BlockValidationError::BlockPreMerge { hash: block1.hash }
.to_string(),
})
.with_latest_valid_hash(H256::zero());
.with_latest_valid_hash(B256::ZERO);
assert_matches!(res, Ok(result) => assert_eq!(result, expected_result));
}
}
@@ -2537,7 +2559,8 @@ mod tests {
assert_matches!(res, Ok(ForkchoiceUpdated { payload_status, .. }) => assert_eq!(payload_status, expected_result));
// Send new payload
let block = random_block(&mut rng, 2, Some(H256::random()), None, Some(0));
let parent = rng.gen();
let block = random_block(&mut rng, 2, Some(parent), None, Some(0));
let res = env.send_new_payload(try_block_to_payload_v1(block), None).await;
let expected_result = PayloadStatus::from_status(PayloadStatusEnum::Syncing);
assert_matches!(res, Ok(result) => assert_eq!(result, expected_result));
@@ -2596,7 +2619,7 @@ mod tests {
validation_error: BlockValidationError::BlockPreMerge { hash: block1.hash }
.to_string(),
})
.with_latest_valid_hash(H256::zero());
.with_latest_valid_hash(B256::ZERO);
assert_matches!(res, Ok(ForkchoiceUpdated { payload_status, .. }) => assert_eq!(payload_status, expected_result));
// Send new payload
@@ -2609,7 +2632,7 @@ mod tests {
validation_error: BlockValidationError::BlockPreMerge { hash: block2.hash }
.to_string(),
})
.with_latest_valid_hash(H256::zero());
.with_latest_valid_hash(B256::ZERO);
assert_eq!(result, expected_result);
assert_matches!(engine_rx.try_recv(), Err(TryRecvError::Empty));

View File

@@ -8,7 +8,7 @@ use reth_interfaces::p2p::{
full_block::{FetchFullBlockFuture, FetchFullBlockRangeFuture, FullBlockClient},
headers::client::HeadersClient,
};
use reth_primitives::{BlockNumber, ChainSpec, SealedBlock, H256};
use reth_primitives::{BlockNumber, ChainSpec, SealedBlock, B256};
use reth_stages::{ControlFlow, Pipeline, PipelineError, PipelineWithResult};
use reth_tasks::TaskSpawner;
use std::{
@@ -40,7 +40,7 @@ where
/// The pipeline is used for large ranges.
pipeline_state: PipelineState<DB>,
/// Pending target block for the pipeline to sync
pending_pipeline_target: Option<H256>,
pending_pipeline_target: Option<B256>,
/// In-flight full block requests in progress.
inflight_full_block_requests: Vec<FetchFullBlockFuture<Client>>,
/// In-flight full block _range_ requests in progress.
@@ -109,7 +109,7 @@ where
}
/// Cancels the full block request with the given hash.
pub(crate) fn cancel_full_block_request(&mut self, hash: H256) {
pub(crate) fn cancel_full_block_request(&mut self, hash: B256) {
self.inflight_full_block_requests.retain(|req| *req.hash() != hash);
self.update_block_download_metrics();
}
@@ -136,7 +136,7 @@ where
}
/// Returns true if there's already a request for the given hash.
pub(crate) fn is_inflight_request(&self, hash: H256) -> bool {
pub(crate) fn is_inflight_request(&self, hash: B256) -> bool {
self.inflight_full_block_requests.iter().any(|req| *req.hash() == hash)
}
@@ -144,7 +144,7 @@ where
///
/// If the `count` is 1, this will use the `download_full_block` method instead, because it
/// downloads headers and bodies for the block concurrently.
pub(crate) fn download_block_range(&mut self, hash: H256, count: u64) {
pub(crate) fn download_block_range(&mut self, hash: B256, count: u64) {
if count == 1 {
self.download_full_block(hash);
} else {
@@ -167,7 +167,7 @@ where
///
/// Returns `true` if the request was started, `false` if there's already a request for the
/// given hash.
pub(crate) fn download_full_block(&mut self, hash: H256) -> bool {
pub(crate) fn download_full_block(&mut self, hash: B256) -> bool {
if self.is_inflight_request(hash) {
return false
}
@@ -185,7 +185,7 @@ where
}
/// Sets a new target to sync the pipeline to.
pub(crate) fn set_pipeline_sync_target(&mut self, target: H256) {
pub(crate) fn set_pipeline_sync_target(&mut self, target: B256) {
self.pending_pipeline_target = Some(target);
}
@@ -349,7 +349,7 @@ pub(crate) enum EngineSyncEvent {
/// Pipeline started syncing
///
/// This is none if the pipeline is triggered without a specific target.
PipelineStarted(Option<H256>),
PipelineStarted(Option<B256>),
/// Pipeline finished
///
/// If this is returned, the pipeline is idle.
@@ -457,7 +457,7 @@ mod tests {
executor_factory.extend(self.executor_results);
// Setup pipeline
let (tip_tx, _tip_rx) = watch::channel(H256::default());
let (tip_tx, _tip_rx) = watch::channel(B256::default());
let mut pipeline = Pipeline::builder()
.add_stages(TestStages::new(self.pipeline_exec_outputs, Default::default()))
.with_tip_sender(tip_tx);

View File

@@ -19,7 +19,7 @@ use reth_interfaces::{
test_utils::{NoopFullBlockClient, TestConsensus},
};
use reth_payload_builder::test_utils::spawn_test_payload_service;
use reth_primitives::{BlockNumber, ChainSpec, PruneBatchSizes, PruneModes, H256, U256};
use reth_primitives::{BlockNumber, ChainSpec, PruneBatchSizes, PruneModes, B256, U256};
use reth_provider::{
providers::BlockchainProvider, test_utils::TestExecutorFactory, BlockExecutor,
BundleStateWithReceipts, ExecutorFactory, ProviderFactory, PrunableBlockExecutor,
@@ -55,14 +55,14 @@ pub struct TestEnv<DB> {
pub db: DB,
// Keep the tip receiver around, so it's not dropped.
#[allow(dead_code)]
tip_rx: watch::Receiver<H256>,
tip_rx: watch::Receiver<B256>,
engine_handle: BeaconConsensusEngineHandle,
}
impl<DB> TestEnv<DB> {
fn new(
db: DB,
tip_rx: watch::Receiver<H256>,
tip_rx: watch::Receiver<B256>,
engine_handle: BeaconConsensusEngineHandle,
) -> Self {
Self { db, tip_rx, engine_handle }
@@ -468,7 +468,7 @@ where
};
// Setup pipeline
let (tip_tx, tip_rx) = watch::channel(H256::default());
let (tip_tx, tip_rx) = watch::channel(B256::default());
let mut pipeline = match self.base_config.pipeline_config {
TestPipelineConfig::Test(outputs) => Pipeline::builder()
.add_stages(TestStages::new(outputs, Default::default()))
@@ -521,6 +521,7 @@ where
5,
PruneModes::none(),
PruneBatchSizes::default(),
watch::channel(None).1,
);
let mut hooks = EngineHooks::new();

View File

@@ -3,7 +3,7 @@
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/paradigmxzy/reth/issues/"
issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/"
)]
#![warn(missing_debug_implementations, missing_docs, unreachable_pub, rustdoc::all)]
#![deny(unused_must_use, rust_2018_idioms)]

View File

@@ -3,7 +3,7 @@
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/paradigmxzy/reth/issues/"
issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/"
)]
#![warn(missing_debug_implementations, missing_docs, unreachable_pub, rustdoc::all)]
#![deny(unused_must_use, rust_2018_idioms)]

View File

@@ -230,21 +230,6 @@ pub fn validate_block_standalone(
expected: *header_withdrawals_root,
})
}
// Validate that withdrawal index is monotonically increasing within a block.
if let Some(first) = withdrawals.first() {
let mut prev_index = first.index;
for withdrawal in withdrawals.iter().skip(1) {
let expected = prev_index + 1;
if expected != withdrawal.index {
return Err(ConsensusError::WithdrawalIndexInvalid {
got: withdrawal.index,
expected,
})
}
prev_index = withdrawal.index;
}
}
}
// EIP-4844: Shard Blob Transactions
@@ -350,7 +335,6 @@ pub fn validate_header_regarding_parent(
/// Checks:
/// If we already know the block.
/// If parent is known
/// If withdrawals are valid
///
/// Returns parent block header
pub fn validate_block_regarding_chain<PROV: HeaderProvider + WithdrawalsProvider>(
@@ -369,33 +353,6 @@ pub fn validate_block_regarding_chain<PROV: HeaderProvider + WithdrawalsProvider
.header(&block.parent_hash)?
.ok_or(ConsensusError::ParentUnknown { hash: block.parent_hash })?;
// Check if withdrawals are valid.
if let Some(withdrawals) = &block.withdrawals {
if !withdrawals.is_empty() {
let latest_withdrawal = provider.latest_withdrawal()?;
match latest_withdrawal {
Some(withdrawal) => {
if withdrawal.index + 1 != withdrawals.first().unwrap().index {
return Err(ConsensusError::WithdrawalIndexInvalid {
got: withdrawals.first().unwrap().index,
expected: withdrawal.index + 1,
}
.into())
}
}
None => {
if withdrawals.first().unwrap().index != 0 {
return Err(ConsensusError::WithdrawalIndexInvalid {
got: withdrawals.first().unwrap().index,
expected: 0,
}
.into())
}
}
}
}
}
// Return parent header.
Ok(parent.seal(block.parent_hash))
}
@@ -501,13 +458,15 @@ pub fn validate_4844_header_standalone(header: &SealedHeader) -> Result<(), Cons
#[cfg(test)]
mod tests {
use super::*;
use assert_matches::assert_matches;
use mockall::mock;
use reth_interfaces::{RethError::Consensus, RethResult};
use reth_interfaces::{
test_utils::generators::{self, Rng},
RethResult,
};
use reth_primitives::{
constants::eip4844::DATA_GAS_PER_BLOB, hex_literal::hex, proofs, Account, Address,
BlockBody, BlockHash, BlockHashOrNumber, Bytes, ChainSpecBuilder, Header, Signature,
TransactionKind, TransactionSigned, Withdrawal, H256, MAINNET, U256,
TransactionKind, TransactionSigned, Withdrawal, MAINNET, U256,
};
use std::ops::RangeBounds;
@@ -625,11 +584,12 @@ mod tests {
let signature = Signature { odd_y_parity: true, r: U256::default(), s: U256::default() };
let tx = TransactionSigned::from_transaction_and_signature(request, signature);
let signer = Address::zero();
let signer = Address::ZERO;
TransactionSignedEcRecovered::from_signed_transaction(tx, signer)
}
fn mock_blob_tx(nonce: u64, num_blobs: usize) -> TransactionSigned {
let mut rng = generators::rng();
let request = Transaction::Eip4844(TxEip4844 {
chain_id: 1u64,
nonce,
@@ -641,7 +601,7 @@ mod tests {
value: 3,
input: Bytes::from(vec![1, 2]),
access_list: Default::default(),
blob_versioned_hashes: vec![H256::random(); num_blobs],
blob_versioned_hashes: std::iter::repeat_with(|| rng.gen()).take(num_blobs).collect(),
});
let signature = Signature { odd_y_parity: true, r: U256::default(), s: U256::default() };
@@ -795,43 +755,14 @@ mod tests {
let block = create_block_with_withdrawals(&[5, 6, 7, 8, 9]);
assert_eq!(validate_block_standalone(&block, &chain_spec), Ok(()));
// Invalid withdrawal index
let block = create_block_with_withdrawals(&[100, 102]);
assert_matches!(
validate_block_standalone(&block, &chain_spec),
Err(ConsensusError::WithdrawalIndexInvalid { .. })
);
let block = create_block_with_withdrawals(&[5, 6, 7, 9]);
assert_matches!(
validate_block_standalone(&block, &chain_spec),
Err(ConsensusError::WithdrawalIndexInvalid { .. })
);
let (_, parent) = mock_block();
let mut provider = Provider::new(Some(parent.clone()));
// Withdrawal index should be 0 if there are no withdrawals in the chain
let block = create_block_with_withdrawals(&[1, 2, 3]);
provider.withdrawals_provider.expect_latest_withdrawal().return_const(Ok(None));
assert_matches!(
validate_block_regarding_chain(&block, &provider),
Err(Consensus(ConsensusError::WithdrawalIndexInvalid { got: 1, expected: 0 }))
);
let provider = Provider::new(Some(parent.clone()));
let block = create_block_with_withdrawals(&[0, 1, 2]);
let res = validate_block_regarding_chain(&block, &provider);
assert!(res.is_ok());
// Withdrawal index should be the last withdrawal index + 1
let mut provider = Provider::new(Some(parent));
let block = create_block_with_withdrawals(&[4, 5, 6]);
provider
.withdrawals_provider
.expect_latest_withdrawal()
.return_const(Ok(Some(Withdrawal { index: 2, ..Default::default() })));
assert_matches!(
validate_block_regarding_chain(&block, &provider),
Err(Consensus(ConsensusError::WithdrawalIndexInvalid { got: 4, expected: 3 }))
);
let block = create_block_with_withdrawals(&[3, 4, 5]);
provider
.withdrawals_provider

View File

@@ -9,6 +9,7 @@ repository.workspace = true
[dependencies]
reth-codecs = { path = "../storage/codecs" }
reth-nippy-jar = { path = "../storage/nippy-jar" }
reth-primitives.workspace = true
reth-rpc-types.workspace = true
reth-network-api.workspace = true
@@ -45,7 +46,6 @@ reth-db = { workspace = true, features = ["test-utils"] }
tokio = { workspace = true, features = ["full"] }
tokio-stream = { workspace = true, features = ["sync"] }
arbitrary = { workspace = true, features = ["derive"] }
hex-literal.workspace = true
secp256k1 = { workspace = true, features = ["alloc", "recovery", "rand"] }
[features]

View File

@@ -1,6 +1,6 @@
use async_trait::async_trait;
use reth_primitives::{
BlockHash, BlockNumber, Header, InvalidTransactionError, SealedBlock, SealedHeader, H256, U256,
BlockHash, BlockNumber, Header, InvalidTransactionError, SealedBlock, SealedHeader, B256, U256,
};
use std::fmt::Debug;
@@ -94,18 +94,18 @@ pub enum ConsensusError {
#[error("Block ommer hash ({got:?}) is different from expected: ({expected:?})")]
BodyOmmersHashDiff {
/// The actual ommer hash.
got: H256,
got: B256,
/// The expected ommer hash.
expected: H256,
expected: B256,
},
/// Error when the state root in the block is different from the expected state root.
#[error("Block state root ({got:?}) is different from expected: ({expected:?})")]
BodyStateRootDiff {
/// The actual state root.
got: H256,
got: B256,
/// The expected state root.
expected: H256,
expected: B256,
},
/// Error when the transaction root in the block is different from the expected transaction
@@ -113,9 +113,9 @@ pub enum ConsensusError {
#[error("Block transaction root ({got:?}) is different from expected ({expected:?})")]
BodyTransactionRootDiff {
/// The actual transaction root.
got: H256,
got: B256,
/// The expected transaction root.
expected: H256,
expected: B256,
},
/// Error when the withdrawals root in the block is different from the expected withdrawals
@@ -123,9 +123,9 @@ pub enum ConsensusError {
#[error("Block withdrawals root ({got:?}) is different from expected ({expected:?})")]
BodyWithdrawalsRootDiff {
/// The actual withdrawals root.
got: H256,
got: B256,
/// The expected withdrawals root.
expected: H256,
expected: B256,
},
/// Error when a block with a specific hash and number is already known.
@@ -161,9 +161,9 @@ pub enum ConsensusError {
)]
ParentHashMismatch {
/// The expected parent hash.
expected_parent_hash: H256,
expected_parent_hash: B256,
/// The actual parent hash.
got_parent_hash: H256,
got_parent_hash: B256,
},
/// Error when the block timestamp is in the past compared to the parent timestamp.
@@ -246,15 +246,6 @@ pub enum ConsensusError {
#[error("Unexpected withdrawals root")]
WithdrawalsRootUnexpected,
/// Error when the withdrawal index is invalid.
#[error("Withdrawal index #{got} is invalid. Expected: #{expected}.")]
WithdrawalIndexInvalid {
/// The actual withdrawal index.
got: u64,
/// The expected withdrawal index.
expected: u64,
},
/// Error when withdrawals are missing.
#[error("Missing withdrawals")]
BodyWithdrawalsMissing,

View File

@@ -29,3 +29,9 @@ pub enum RethError {
#[error("{0}")]
Custom(String),
}
impl From<reth_nippy_jar::NippyJarError> for RethError {
fn from(err: reth_nippy_jar::NippyJarError) -> Self {
RethError::Custom(err.to_string())
}
}

View File

@@ -1,4 +1,4 @@
use reth_primitives::{BlockNumHash, Bloom, PrunePartError, H256};
use reth_primitives::{BlockNumHash, Bloom, PrunePartError, B256};
use thiserror::Error;
/// Transaction validation errors
@@ -9,7 +9,7 @@ pub enum BlockValidationError {
#[error("EVM reported invalid transaction ({hash:?}): {message}")]
EVM {
/// The hash of the transaction
hash: H256,
hash: B256,
/// Error message
message: String,
},
@@ -23,9 +23,9 @@ pub enum BlockValidationError {
#[error("Receipt root {got:?} is different than expected {expected:?}.")]
ReceiptRootDiff {
/// The actual receipt root
got: H256,
got: B256,
/// The expected receipt root
expected: H256,
expected: B256,
},
/// Error when header bloom filter doesn't match expected value
#[error("Header bloom filter {got:?} is different than expected {expected:?}.")]
@@ -57,10 +57,10 @@ pub enum BlockValidationError {
#[error("Block {hash:?} is pre merge")]
BlockPreMerge {
/// The hash of the block
hash: H256,
hash: B256,
},
#[error("Missing total difficulty for block {hash:?}")]
MissingTotalDifficulty { hash: H256 },
MissingTotalDifficulty { hash: B256 },
/// Error for EIP-4788 when parent beacon block root is missing
#[error("EIP-4788 Parent beacon block root missing for active Cancun block")]
MissingParentBeaconBlockRoot,

View File

@@ -7,7 +7,7 @@
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/paradigmxzy/reth/issues/"
issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/"
)]
#![warn(missing_debug_implementations, missing_docs, unreachable_pub, rustdoc::all)]
#![deny(unused_must_use, rust_2018_idioms)]

View File

@@ -5,7 +5,7 @@ use std::{
use crate::p2p::{download::DownloadClient, error::PeerRequestResult, priority::Priority};
use futures::{Future, FutureExt};
use reth_primitives::{BlockBody, H256};
use reth_primitives::{BlockBody, B256};
/// The bodies future type
pub type BodiesFut = Pin<Box<dyn Future<Output = PeerRequestResult<Vec<BlockBody>>> + Send + Sync>>;
@@ -17,23 +17,23 @@ pub trait BodiesClient: DownloadClient {
type Output: Future<Output = PeerRequestResult<Vec<BlockBody>>> + Sync + Send + Unpin;
/// Fetches the block body for the requested block.
fn get_block_bodies(&self, hashes: Vec<H256>) -> Self::Output {
fn get_block_bodies(&self, hashes: Vec<B256>) -> Self::Output {
self.get_block_bodies_with_priority(hashes, Priority::Normal)
}
/// Fetches the block body for the requested block with priority
fn get_block_bodies_with_priority(&self, hashes: Vec<H256>, priority: Priority)
fn get_block_bodies_with_priority(&self, hashes: Vec<B256>, priority: Priority)
-> Self::Output;
/// Fetches a single block body for the requested hash.
fn get_block_body(&self, hash: H256) -> SingleBodyRequest<Self::Output> {
fn get_block_body(&self, hash: B256) -> SingleBodyRequest<Self::Output> {
self.get_block_body_with_priority(hash, Priority::Normal)
}
/// Fetches a single block body for the requested hash with priority
fn get_block_body_with_priority(
&self,
hash: H256,
hash: B256,
priority: Priority,
) -> SingleBodyRequest<Self::Output> {
let fut = self.get_block_bodies_with_priority(vec![hash], priority);

View File

@@ -5,7 +5,7 @@ use crate::p2p::{
priority::Priority,
};
use futures::future::Either;
use reth_primitives::H256;
use reth_primitives::B256;
/// A downloader that combines two different downloaders/client implementations that have the same
/// associated types.
@@ -45,7 +45,7 @@ where
fn get_block_bodies_with_priority(
&self,
hashes: Vec<H256>,
hashes: Vec<B256>,
priority: Priority,
) -> Self::Output {
match self {

View File

@@ -1,7 +1,7 @@
use super::headers::client::HeadersRequest;
use crate::{consensus, db};
use reth_network_api::ReputationChangeKind;
use reth_primitives::{BlockHashOrNumber, BlockNumber, Header, WithPeerId, H256};
use reth_primitives::{BlockHashOrNumber, BlockNumber, Header, WithPeerId, B256};
use std::ops::RangeInclusive;
use thiserror::Error;
use tokio::sync::{mpsc, oneshot};
@@ -122,7 +122,7 @@ pub enum DownloadError {
#[error("Failed to validate header {hash}. Details: {error}.")]
HeaderValidation {
/// Hash of header failing validation
hash: H256,
hash: B256,
/// The details of validation failure
#[source]
error: consensus::ConsensusError,
@@ -131,9 +131,9 @@ pub enum DownloadError {
#[error("Received invalid tip: {received:?}. Expected {expected:?}.")]
InvalidTip {
/// The hash of the received tip
received: H256,
received: B256,
/// The hash of the expected tip
expected: H256,
expected: B256,
},
/// Received a tip with an invalid tip number
#[error("Received invalid tip number: {received:?}. Expected {expected:?}.")]
@@ -164,7 +164,7 @@ pub enum DownloadError {
#[error("Failed to validate body for header {hash}. Details: {error}.")]
BodyValidation {
/// Hash of header failing validation
hash: H256,
hash: B256,
/// The details of validation failure
#[source]
error: consensus::ConsensusError,

View File

@@ -9,7 +9,7 @@ use crate::{
};
use futures::Stream;
use reth_primitives::{
BlockBody, Header, HeadersDirection, SealedBlock, SealedHeader, WithPeerId, H256,
BlockBody, Header, HeadersDirection, SealedBlock, SealedHeader, WithPeerId, B256,
};
use std::{
cmp::Reverse,
@@ -52,7 +52,7 @@ where
///
/// Caution: This does no validation of body (transactions) response but guarantees that the
/// [SealedHeader] matches the requested hash.
pub fn get_full_block(&self, hash: H256) -> FetchFullBlockFuture<Client> {
pub fn get_full_block(&self, hash: B256) -> FetchFullBlockFuture<Client> {
let client = self.client.clone();
FetchFullBlockFuture {
hash,
@@ -77,7 +77,7 @@ where
/// The returned future yields bodies in falling order, i.e. with descending block numbers.
pub fn get_full_block_range(
&self,
hash: H256,
hash: B256,
count: u64,
) -> FetchFullBlockRangeFuture<Client> {
let client = self.client.clone();
@@ -117,7 +117,7 @@ where
Client: BodiesClient + HeadersClient,
{
client: Client,
hash: H256,
hash: B256,
request: FullBlockRequest<Client>,
header: Option<SealedHeader>,
body: Option<BodyResponse>,
@@ -128,7 +128,7 @@ where
Client: BodiesClient + HeadersClient,
{
/// Returns the hash of the block being requested.
pub fn hash(&self) -> &H256 {
pub fn hash(&self) -> &B256 {
&self.hash
}
@@ -364,7 +364,7 @@ where
/// The consensus instance used to validate the blocks.
consensus: Arc<dyn Consensus>,
/// The block hash to start fetching from (inclusive).
start_hash: H256,
start_hash: B256,
/// How many blocks to fetch: `len([start_hash, ..]) == count`
count: u64,
/// Requests for headers and bodies that are in progress.
@@ -382,7 +382,7 @@ where
Client: BodiesClient + HeadersClient,
{
/// Returns the block hashes for the given range, if they are available.
pub fn range_block_hashes(&self) -> Option<Vec<H256>> {
pub fn range_block_hashes(&self) -> Option<Vec<B256>> {
self.headers.as_ref().map(|h| h.iter().map(|h| h.hash()).collect::<Vec<_>>())
}
@@ -409,7 +409,7 @@ where
/// Returns the remaining hashes for the bodies request, based on the headers that still exist
/// in the `root_map`.
fn remaining_bodies_hashes(&self) -> Vec<H256> {
fn remaining_bodies_hashes(&self) -> Vec<B256> {
self.pending_headers.iter().map(|h| h.hash()).collect::<Vec<_>>()
}
@@ -521,7 +521,7 @@ where
}
/// Returns the start hash for the request
pub fn start_hash(&self) -> H256 {
pub fn start_hash(&self) -> B256 {
self.start_hash
}
@@ -605,8 +605,8 @@ where
// future, and one which is a bodies range future.
//
// The headers range future should yield the bodies range future.
// The bodies range future should not have an Option<Vec<H256>>, it should
// have a populated Vec<H256> from the successful headers range future.
// The bodies range future should not have an Option<Vec<B256>>, it should
// have a populated Vec<B256> from the successful headers range future.
//
// This is optimal because we can not send a bodies request without
// first completing the headers request. This way we can get rid of the

View File

@@ -4,7 +4,7 @@ use crate::{
p2p::error::{DownloadError, DownloadResult},
};
use futures::Stream;
use reth_primitives::{BlockHashOrNumber, SealedHeader, H256};
use reth_primitives::{BlockHashOrNumber, SealedHeader, B256};
/// A downloader capable of fetching and yielding block headers.
///
@@ -42,7 +42,7 @@ pub enum SyncTarget {
/// Sync _inclusively_ to the given block hash.
///
/// This target specifies the upper end of the sync gap `(head...tip]`
Tip(H256),
Tip(B256),
/// This represents a gap missing headers bounded by the given header `h` in the form of
/// `(head,..h),h+1,h+2...`
///

View File

@@ -1,4 +1,4 @@
use reth_primitives::{Address, BlockHash, BlockHashOrNumber, BlockNumber, TxNumber, H256};
use reth_primitives::{Address, BlockHash, BlockHashOrNumber, BlockNumber, TxNumber, B256};
/// Bundled errors variants thrown by various providers.
#[allow(missing_docs)]
@@ -21,7 +21,7 @@ pub enum ProviderError {
/// The account address
address: Address,
/// The storage key
storage_key: H256,
storage_key: B256,
},
/// The block number was found for the given address, but the changeset was not found.
#[error("Account {address:?} ChangeSet for block #{block_number} does not exist")]
@@ -60,10 +60,10 @@ pub enum ProviderError {
CacheServiceUnavailable,
/// Thrown when we failed to lookup a block for the pending state
#[error("Unknown block hash: {0:}")]
UnknownBlockHash(H256),
UnknownBlockHash(B256),
/// Thrown when we were unable to find a state for a block hash
#[error("No State found for block hash: {0:}")]
StateForHashNotFound(H256),
StateForHashNotFound(B256),
/// Unable to compute state root on top of historical block
#[error("Unable to compute state root on top of historical block")]
StateRootNotAvailableForHistoricalBlock,
@@ -74,9 +74,9 @@ pub enum ProviderError {
#[error("Merkle trie root mismatch at #{block_number} ({block_hash:?}). Got: {got:?}. Expected: {expected:?}")]
StateRootMismatch {
/// Expected root
expected: H256,
expected: B256,
/// Calculated root
got: H256,
got: B256,
/// Block number
block_number: BlockNumber,
/// Block hash
@@ -86,9 +86,9 @@ pub enum ProviderError {
#[error("Unwind merkle trie root mismatch at #{block_number} ({block_hash:?}). Got: {got:?}. Expected: {expected:?}")]
UnwindStateRootMismatch {
/// Expected root
expected: H256,
expected: B256,
/// Calculated root
got: H256,
got: B256,
/// Target block number
block_number: BlockNumber,
/// Block hash

View File

@@ -6,7 +6,7 @@ use crate::p2p::{
};
use async_trait::async_trait;
use futures::{future, Future, FutureExt};
use reth_primitives::{BlockBody, WithPeerId, H256};
use reth_primitives::{BlockBody, WithPeerId, B256};
use std::{
fmt::{Debug, Formatter},
pin::Pin,
@@ -37,13 +37,13 @@ impl<F: Sync + Send> DownloadClient for TestBodiesClient<F> {
impl<F> BodiesClient for TestBodiesClient<F>
where
F: Fn(Vec<H256>) -> PeerRequestResult<Vec<BlockBody>> + Send + Sync,
F: Fn(Vec<B256>) -> PeerRequestResult<Vec<BlockBody>> + Send + Sync,
{
type Output = BodiesFut;
fn get_block_bodies_with_priority(
&self,
hashes: Vec<H256>,
hashes: Vec<B256>,
_priority: Priority,
) -> Self::Output {
let (tx, rx) = oneshot::channel();

View File

@@ -8,7 +8,7 @@ use crate::p2p::{
use parking_lot::Mutex;
use reth_primitives::{
BlockBody, BlockHashOrNumber, BlockNumHash, Header, HeadersDirection, PeerId, SealedBlock,
SealedHeader, WithPeerId, H256,
SealedHeader, WithPeerId, B256,
};
use std::{collections::HashMap, sync::Arc};
@@ -30,7 +30,7 @@ impl BodiesClient for NoopFullBlockClient {
fn get_block_bodies_with_priority(
&self,
_hashes: Vec<H256>,
_hashes: Vec<B256>,
_priority: Priority,
) -> Self::Output {
futures::future::ready(Ok(WithPeerId::new(PeerId::random(), vec![])))
@@ -55,8 +55,8 @@ impl HeadersClient for NoopFullBlockClient {
/// This full block client can be [Clone]d and shared between multiple tasks.
#[derive(Clone, Debug)]
pub struct TestFullBlockClient {
headers: Arc<Mutex<HashMap<H256, Header>>>,
bodies: Arc<Mutex<HashMap<H256, BlockBody>>>,
headers: Arc<Mutex<HashMap<B256, Header>>>,
bodies: Arc<Mutex<HashMap<B256, BlockBody>>>,
// soft response limit, max number of bodies to respond with
soft_limit: usize,
}
@@ -147,7 +147,7 @@ impl BodiesClient for TestFullBlockClient {
fn get_block_bodies_with_priority(
&self,
hashes: Vec<H256>,
hashes: Vec<B256>,
_priority: Priority,
) -> Self::Output {
let bodies = self.bodies.lock();

View File

@@ -5,7 +5,7 @@ use rand::{
use reth_primitives::{
proofs, sign_message, Account, Address, BlockNumber, Bytes, Header, Log, Receipt, SealedBlock,
SealedHeader, Signature, StorageEntry, Transaction, TransactionKind, TransactionSigned,
TxLegacy, H160, H256, U256,
TxLegacy, B256, U256,
};
use secp256k1::{KeyPair, Message as SecpMessage, Secp256k1, SecretKey, SECP256K1};
use std::{
@@ -40,7 +40,7 @@ pub fn rng() -> StdRng {
pub fn random_header_range<R: Rng>(
rng: &mut R,
range: std::ops::Range<u64>,
head: H256,
head: B256,
) -> Vec<SealedHeader> {
let mut headers = Vec::with_capacity(range.end.saturating_sub(range.start) as usize);
for idx in range {
@@ -56,7 +56,7 @@ pub fn random_header_range<R: Rng>(
/// Generate a random [SealedHeader].
///
/// The header is assumed to not be correct if validated.
pub fn random_header<R: Rng>(rng: &mut R, number: u64, parent: Option<H256>) -> SealedHeader {
pub fn random_header<R: Rng>(rng: &mut R, number: u64, parent: Option<B256>) -> SealedHeader {
let header = reth_primitives::Header {
number,
nonce: rng.gen(),
@@ -79,7 +79,7 @@ pub fn random_tx<R: Rng>(rng: &mut R) -> Transaction {
nonce: rng.gen::<u16>().into(),
gas_price: rng.gen::<u16>().into(),
gas_limit: rng.gen::<u16>().into(),
to: TransactionKind::Call(Address::random()),
to: TransactionKind::Call(rng.gen()),
value: rng.gen::<u16>().into(),
input: Bytes::default(),
})
@@ -100,7 +100,7 @@ pub fn random_signed_tx<R: Rng>(rng: &mut R) -> TransactionSigned {
/// Signs the [Transaction] with the given key pair.
pub fn sign_tx_with_key_pair(key_pair: KeyPair, tx: Transaction) -> TransactionSigned {
let signature =
sign_message(H256::from_slice(&key_pair.secret_bytes()[..]), tx.signature_hash()).unwrap();
sign_message(B256::from_slice(&key_pair.secret_bytes()[..]), tx.signature_hash()).unwrap();
TransactionSigned::from_transaction_and_signature(tx, signature)
}
@@ -127,7 +127,7 @@ pub fn generate_keys<R: Rng>(rng: &mut R, count: usize) -> Vec<KeyPair> {
pub fn random_block<R: Rng>(
rng: &mut R,
number: u64,
parent: Option<H256>,
parent: Option<B256>,
tx_count: Option<u8>,
ommers_count: Option<u8>,
) -> SealedBlock {
@@ -173,7 +173,7 @@ pub fn random_block<R: Rng>(
pub fn random_block_range<R: Rng>(
rng: &mut R,
block_numbers: RangeInclusive<BlockNumber>,
head: H256,
head: B256,
tx_count: Range<u8>,
) -> Vec<SealedBlock> {
let mut blocks =
@@ -237,7 +237,7 @@ where
prev_from.balance = prev_from.balance.wrapping_sub(transfer);
// deposit in receiving account and update storage
let (prev_to, storage): &mut (Account, BTreeMap<H256, U256>) = state.get_mut(&to).unwrap();
let (prev_to, storage): &mut (Account, BTreeMap<B256, U256>) = state.get_mut(&to).unwrap();
let mut old_entries: Vec<_> = new_entries
.into_iter()
@@ -303,7 +303,12 @@ pub fn random_account_change<R: Rng>(
/// Generate a random storage change.
pub fn random_storage_entry<R: Rng>(rng: &mut R, key_range: std::ops::Range<u64>) -> StorageEntry {
let key = H256::from_low_u64_be(key_range.sample_single(rng));
let key = B256::new({
let n = key_range.sample_single(rng);
let mut m = [0u8; 32];
m[24..32].copy_from_slice(&n.to_be_bytes());
m
});
let value = U256::from(rng.gen::<u64>());
StorageEntry { key, value }
@@ -313,7 +318,7 @@ pub fn random_storage_entry<R: Rng>(rng: &mut R, key_range: std::ops::Range<u64>
pub fn random_eoa_account<R: Rng>(rng: &mut R) -> (Address, Account) {
let nonce: u64 = rng.gen();
let balance = U256::from(rng.gen::<u32>());
let addr = H160::from(rng.gen::<u64>());
let addr = rng.gen();
(addr, Account { nonce, balance, bytecode_hash: None })
}
@@ -338,7 +343,7 @@ pub fn random_contract_account_range<R: Rng>(
let mut accounts = Vec::with_capacity(acc_range.end.saturating_sub(acc_range.start) as usize);
for _ in acc_range {
let (address, eoa_account) = random_eoa_account(rng);
let account = Account { bytecode_hash: Some(H256::random()), ..eoa_account };
let account = Account { bytecode_hash: Some(rng.gen()), ..eoa_account };
accounts.push((address, account))
}
accounts
@@ -366,25 +371,23 @@ pub fn random_receipt<R: Rng>(
/// Generate random log
pub fn random_log<R: Rng>(rng: &mut R, address: Option<Address>, topics_count: Option<u8>) -> Log {
let data_byte_count = rng.gen::<u8>();
let topics_count = topics_count.unwrap_or_else(|| rng.gen::<u8>());
let data_byte_count = rng.gen::<u8>() as usize;
let topics_count = topics_count.unwrap_or_else(|| rng.gen()) as usize;
Log {
address: address.unwrap_or_else(|| rng.gen()),
topics: (0..topics_count).map(|_| rng.gen()).collect(),
data: Bytes::from((0..data_byte_count).map(|_| rng.gen::<u8>()).collect::<Vec<_>>()),
topics: std::iter::repeat_with(|| rng.gen()).take(topics_count).collect(),
data: std::iter::repeat_with(|| rng.gen()).take(data_byte_count).collect::<Vec<_>>().into(),
}
}
#[cfg(test)]
mod test {
use std::str::FromStr;
use super::*;
use hex_literal::hex;
use reth_primitives::{
keccak256, public_key_to_address, AccessList, Address, TransactionKind, TxEip1559,
hex, keccak256, public_key_to_address, AccessList, Address, TransactionKind, TxEip1559,
};
use secp256k1::KeyPair;
use std::str::FromStr;
#[test]
fn test_sign_message() {
@@ -407,7 +410,7 @@ mod test {
let key_pair = KeyPair::new(&secp, &mut rand::thread_rng());
let signature =
sign_message(H256::from_slice(&key_pair.secret_bytes()[..]), signature_hash)
sign_message(B256::from_slice(&key_pair.secret_bytes()[..]), signature_hash)
.unwrap();
let signed = TransactionSigned::from_transaction_and_signature(tx.clone(), signature);
@@ -440,12 +443,12 @@ mod test {
let hash = transaction.signature_hash();
let expected =
H256::from_str("daf5a779ae972f972197303d7b574746c7ef83eadac0f2791ad23db92e4c8e53")
B256::from_str("daf5a779ae972f972197303d7b574746c7ef83eadac0f2791ad23db92e4c8e53")
.unwrap();
assert_eq!(expected, hash);
let secret =
H256::from_str("4646464646464646464646464646464646464646464646464646464646464646")
B256::from_str("4646464646464646464646464646464646464646464646464646464646464646")
.unwrap();
let signature = sign_message(secret, hash).unwrap();

View File

@@ -16,7 +16,7 @@ use futures::{future, Future, FutureExt, Stream, StreamExt};
use reth_eth_wire::BlockHeaders;
use reth_primitives::{
BlockHash, BlockNumber, Head, Header, HeadersDirection, PeerId, SealedBlock, SealedHeader,
WithPeerId, H256, U256,
WithPeerId, B256, U256,
};
use reth_rpc_types::engine::ForkchoiceState;
use std::{

View File

@@ -3,7 +3,7 @@
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/paradigmxzy/reth/issues/"
issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/"
)]
#![warn(missing_debug_implementations, missing_docs, unreachable_pub, rustdoc::all)]
#![deny(unused_must_use, rust_2018_idioms)]

View File

@@ -8,7 +8,7 @@
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/paradigmxzy/reth/issues/"
issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/"
)]
#![warn(missing_debug_implementations, missing_docs, unreachable_pub, rustdoc::all)]
#![deny(unused_must_use, rust_2018_idioms)]

View File

@@ -3,7 +3,7 @@
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/paradigmxzy/reth/issues/"
issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/"
)]
#![warn(missing_debug_implementations, missing_docs, unreachable_pub, rustdoc::all)]
#![deny(unused_must_use, rust_2018_idioms)]

View File

@@ -13,15 +13,15 @@ Ethereum network discovery
[dependencies]
# reth
reth-primitives.workspace = true
reth-rlp.workspace = true
reth-rlp-derive = { path = "../../rlp/rlp-derive" }
reth-net-common = { path = "../common" }
reth-net-nat = { path = "../nat" }
# ethereum
alloy-rlp = { workspace = true, features = ["derive"] }
discv5.workspace = true
secp256k1 = { workspace = true, features = ["global-context", "rand-std", "recovery", "serde"] }
enr = { workspace = true, default-features = false, features = ["rust-secp256k1"] }
rlp = "0.5" # needed for enr
# async/futures
tokio = { workspace = true, features = ["io-util", "net", "time"] }
@@ -31,7 +31,6 @@ tokio-stream.workspace = true
tracing.workspace = true
thiserror.workspace = true
parking_lot.workspace = true
hex = "0.4"
rand = { workspace = true, optional = true }
generic-array = "0.14"
serde = { workspace = true, optional = true }

View File

@@ -3,13 +3,13 @@
//! This basis of this file has been taken from the discv5 codebase:
//! <https://github.com/sigp/discv5>
use alloy_rlp::Encodable;
use reth_net_common::ban_list::BanList;
use reth_net_nat::{NatResolver, ResolveNatInterval};
use reth_primitives::{
bytes::{Bytes, BytesMut},
NodeRecord,
};
use reth_rlp::Encodable;
use std::{
collections::{HashMap, HashSet},
time::Duration,

View File

@@ -7,7 +7,7 @@ use tokio::sync::{mpsc::error::SendError, oneshot::error::RecvError};
#[allow(missing_docs)]
pub enum DecodePacketError {
#[error("Failed to rlp decode: {0:?}")]
Rlp(#[from] reth_rlp::DecodeError),
Rlp(#[from] alloy_rlp::Error),
#[error("Received packet len too short.")]
PacketTooShort,
#[error("Hash of the header not equals to the hash of the data.")]

View File

@@ -19,7 +19,7 @@
#![doc(
html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png",
html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256",
issue_tracker_base_url = "https://github.com/paradigmxzy/reth/issues/"
issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/"
)]
#![warn(missing_debug_implementations, missing_docs, rustdoc::all)]
#![deny(unused_must_use, rust_2018_idioms, unreachable_pub, unused_crate_dependencies)]
@@ -29,6 +29,7 @@ use crate::{
error::{DecodePacketError, Discv4Error},
proto::{FindNode, Message, Neighbours, Packet, Ping, Pong},
};
use alloy_rlp::{RlpDecodable, RlpEncodable};
use discv5::{
kbucket,
kbucket::{
@@ -42,9 +43,8 @@ use parking_lot::Mutex;
use proto::{EnrRequest, EnrResponse, EnrWrapper};
use reth_primitives::{
bytes::{Bytes, BytesMut},
ForkId, PeerId, H256,
hex, ForkId, PeerId, B256,
};
use reth_rlp::{RlpDecodable, RlpEncodable};
use secp256k1::SecretKey;
use std::{
cell::RefCell,
@@ -88,6 +88,11 @@ use reth_net_nat::ResolveNatInterval;
/// reexport to get public ip.
pub use reth_net_nat::{external_ip, NatResolver};
/// The default address for discv4 via UDP
///
/// Note: the default TCP address is the same.
pub const DEFAULT_DISCOVERY_ADDR: Ipv4Addr = Ipv4Addr::UNSPECIFIED;
/// The default port for discv4 via UDP
///
/// Note: the default TCP port is the same.
@@ -352,7 +357,7 @@ impl Discv4 {
/// Sets the pair in the EIP-868 [`Enr`] of the node.
///
/// If the key already exists, this will update it.
pub fn set_eip868_rlp(&self, key: Vec<u8>, value: impl reth_rlp::Encodable) {
pub fn set_eip868_rlp(&self, key: Vec<u8>, value: impl alloy_rlp::Encodable) {
let mut buf = BytesMut::new();
value.encode(&mut buf);
self.set_eip868_rlp_pair(key, buf.freeze())
@@ -929,7 +934,7 @@ impl Discv4Service {
}
/// Encodes the packet, sends it and returns the hash.
pub(crate) fn send_packet(&mut self, msg: Message, to: SocketAddr) -> H256 {
pub(crate) fn send_packet(&mut self, msg: Message, to: SocketAddr) -> B256 {
let (payload, hash) = msg.encode(&self.secret_key);
trace!(target : "discv4", r#type=?msg.msg_type(), ?to, ?hash, "sending packet");
let _ = self.egress.try_send((payload, to)).map_err(|err| {
@@ -943,7 +948,7 @@ impl Discv4Service {
}
/// Message handler for an incoming `Ping`
fn on_ping(&mut self, ping: Ping, remote_addr: SocketAddr, remote_id: PeerId, hash: H256) {
fn on_ping(&mut self, ping: Ping, remote_addr: SocketAddr, remote_id: PeerId, hash: B256) {
if self.is_expired(ping.expire) {
// ping's expiration timestamp is in the past
return
@@ -1067,7 +1072,7 @@ impl Discv4Service {
/// Sends a ping message to the node's UDP address.
///
/// Returns the echo hash of the ping message.
pub(crate) fn send_ping(&mut self, node: NodeRecord, reason: PingReason) -> H256 {
pub(crate) fn send_ping(&mut self, node: NodeRecord, reason: PingReason) -> B256 {
let remote_addr = node.udp_addr();
let id = node.id;
let ping = Ping {
@@ -1200,7 +1205,7 @@ impl Discv4Service {
msg: EnrRequest,
remote_addr: SocketAddr,
id: PeerId,
request_hash: H256,
request_hash: B256,
) {
if !self.config.enable_eip868 || self.is_expired(msg.expire) {
return
@@ -1720,7 +1725,7 @@ struct PingRequest {
// Node to which the request was sent.
node: NodeRecord,
// Hash sent in the Ping request
echo_hash: H256,
echo_hash: B256,
/// Why this ping was sent.
reason: PingReason,
}
@@ -1929,7 +1934,7 @@ struct EnrRequestState {
// Timestamp when the request was sent.
sent_at: Instant,
// Hash sent in the Ping request
echo_hash: H256,
echo_hash: B256,
}
/// Stored node info.
@@ -2057,9 +2062,9 @@ impl From<ForkId> for EnrForkIdEntry {
mod tests {
use super::*;
use crate::test_utils::{create_discv4, create_discv4_with_config, rng_endpoint, rng_record};
use alloy_rlp::{Decodable, Encodable};
use rand::{thread_rng, Rng};
use reth_primitives::{hex_literal::hex, mainnet_nodes, ForkHash};
use reth_rlp::{Decodable, Encodable};
use reth_primitives::{hex, mainnet_nodes, ForkHash};
use std::{future::poll_fn, net::Ipv4Addr};
#[tokio::test]
@@ -2191,8 +2196,8 @@ mod tests {
enr_sq: Some(rng.gen()),
};
let id = PeerId::random();
service.on_ping(ping, addr, id, H256::random());
let id = PeerId::random_with(&mut rng);
service.on_ping(ping, addr, id, rng.gen());
let key = kad_key(id);
match service.kbuckets.entry(&key) {
@@ -2223,8 +2228,8 @@ mod tests {
enr_sq: Some(rng.gen()),
};
let id = PeerId::random();
service.on_ping(ping, addr, id, H256::random());
let id = PeerId::random_with(&mut rng);
service.on_ping(ping, addr, id, rng.gen());
let key = kad_key(id);
match service.kbuckets.entry(&key) {

View File

@@ -13,8 +13,8 @@ impl From<PeerId> for NodeKey {
impl From<NodeKey> for discv5::Key<NodeKey> {
fn from(value: NodeKey) -> Self {
let hash = keccak256(value.0.as_bytes());
let hash = *GenericArray::from_slice(hash.as_bytes());
let hash = keccak256(value.0.as_slice());
let hash = *GenericArray::from_slice(hash.as_slice());
discv5::Key::new_raw(value, hash)
}
}

View File

@@ -3,15 +3,14 @@
#![allow(missing_docs)]
use crate::{error::DecodePacketError, EnrForkIdEntry, PeerId, MAX_PACKET_SIZE, MIN_PACKET_SIZE};
use alloy_rlp::{
length_of_length, Decodable, Encodable, Error as RlpError, Header, RlpDecodable, RlpEncodable,
};
use enr::{Enr, EnrKey};
use reth_primitives::{
bytes::{Buf, BufMut, Bytes, BytesMut},
keccak256,
rpc_utils::rlp,
ForkId, NodeRecord, H256,
keccak256, ForkId, NodeRecord, B256,
};
use reth_rlp::{length_of_length, Decodable, DecodeError, Encodable, Header};
use reth_rlp_derive::{RlpDecodable, RlpEncodable};
use secp256k1::{
ecdsa::{RecoverableSignature, RecoveryId},
SecretKey, SECP256K1,
@@ -78,13 +77,13 @@ impl Message {
///
/// The datagram is `header || payload`
/// where header is `hash || signature || packet-type`
pub fn encode(&self, secret_key: &SecretKey) -> (Bytes, H256) {
pub fn encode(&self, secret_key: &SecretKey) -> (Bytes, B256) {
// allocate max packet size
let mut datagram = BytesMut::with_capacity(MAX_PACKET_SIZE);
// since signature has fixed len, we can split and fill the datagram buffer at fixed
// positions, this way we can encode the message directly in the datagram buffer
let mut sig_bytes = datagram.split_off(H256::len_bytes());
let mut sig_bytes = datagram.split_off(B256::len_bytes());
let mut payload = sig_bytes.split_off(secp256k1::constants::COMPACT_SIGNATURE_SIZE + 1);
match self {
@@ -126,7 +125,7 @@ impl Message {
sig_bytes.unsplit(payload);
let hash = keccak256(&sig_bytes);
datagram.extend_from_slice(hash.as_bytes());
datagram.extend_from_slice(hash.as_slice());
datagram.unsplit(sig_bytes);
(datagram.freeze(), hash)
@@ -146,7 +145,7 @@ impl Message {
// signature = sign(packet-type || packet-data)
let header_hash = keccak256(&packet[32..]);
let data_hash = H256::from_slice(&packet[..32]);
let data_hash = B256::from_slice(&packet[..32]);
if data_hash != header_hash {
return Err(DecodePacketError::HashMismatch)
}
@@ -156,7 +155,7 @@ impl Message {
let recoverable_sig = RecoverableSignature::from_compact(signature, recovery_id)?;
// recover the public key
let msg = secp256k1::Message::from_slice(keccak256(&packet[97..]).as_bytes())?;
let msg = secp256k1::Message::from_slice(keccak256(&packet[97..]).as_slice())?;
let pk = SECP256K1.recover_ecdsa(&msg, &recoverable_sig)?;
let node_id = PeerId::from_slice(&pk.serialize_uncompressed()[1..]);
@@ -182,7 +181,7 @@ impl Message {
pub struct Packet {
pub msg: Message,
pub node_id: PeerId,
pub hash: H256,
pub hash: B256,
}
/// Represents the `from`, `to` fields in the packets
@@ -258,24 +257,24 @@ where
}
impl<K: EnrKey> Decodable for EnrWrapper<K> {
fn decode(buf: &mut &[u8]) -> Result<Self, DecodeError> {
fn decode(buf: &mut &[u8]) -> alloy_rlp::Result<Self> {
let enr = <Enr<K> as rlp::Decodable>::decode(&rlp::Rlp::new(buf))
.map_err(|e| match e {
rlp::DecoderError::RlpIsTooShort => DecodeError::InputTooShort,
rlp::DecoderError::RlpInvalidLength => DecodeError::Overflow,
rlp::DecoderError::RlpExpectedToBeList => DecodeError::UnexpectedString,
rlp::DecoderError::RlpExpectedToBeData => DecodeError::UnexpectedList,
rlp::DecoderError::RlpIsTooShort => RlpError::InputTooShort,
rlp::DecoderError::RlpInvalidLength => RlpError::Overflow,
rlp::DecoderError::RlpExpectedToBeList => RlpError::UnexpectedString,
rlp::DecoderError::RlpExpectedToBeData => RlpError::UnexpectedList,
rlp::DecoderError::RlpDataLenWithZeroPrefix |
rlp::DecoderError::RlpListLenWithZeroPrefix => DecodeError::LeadingZero,
rlp::DecoderError::RlpInvalidIndirection => DecodeError::NonCanonicalSize,
rlp::DecoderError::RlpListLenWithZeroPrefix => RlpError::LeadingZero,
rlp::DecoderError::RlpInvalidIndirection => RlpError::NonCanonicalSize,
rlp::DecoderError::RlpIncorrectListLen => {
DecodeError::Custom("incorrect list length when decoding rlp")
RlpError::Custom("incorrect list length when decoding rlp")
}
rlp::DecoderError::RlpIsTooBig => DecodeError::Custom("rlp is too big"),
rlp::DecoderError::RlpIsTooBig => RlpError::Custom("rlp is too big"),
rlp::DecoderError::RlpInconsistentLengthAndData => {
DecodeError::Custom("inconsistent length and data when decoding rlp")
RlpError::Custom("inconsistent length and data when decoding rlp")
}
rlp::DecoderError::Custom(s) => DecodeError::Custom(s),
rlp::DecoderError::Custom(s) => RlpError::Custom(s),
})
.map(EnrWrapper::new);
if enr.is_ok() {
@@ -296,7 +295,7 @@ pub struct EnrRequest {
/// A [ENRResponse packet](https://github.com/ethereum/devp2p/blob/master/discv4.md#enrresponse-packet-0x06).
#[derive(Clone, Debug, Eq, PartialEq, RlpEncodable)]
pub struct EnrResponse {
pub request_hash: H256,
pub request_hash: B256,
pub enr: EnrWrapper<SecretKey>,
}
@@ -313,22 +312,22 @@ impl EnrResponse {
}
impl Decodable for EnrResponse {
fn decode(buf: &mut &[u8]) -> Result<Self, DecodeError> {
fn decode(buf: &mut &[u8]) -> alloy_rlp::Result<Self> {
let b = &mut &**buf;
let rlp_head = Header::decode(b)?;
if !rlp_head.list {
return Err(DecodeError::UnexpectedString)
return Err(RlpError::UnexpectedString)
}
// let started_len = b.len();
let this = Self {
request_hash: reth_rlp::Decodable::decode(b)?,
request_hash: alloy_rlp::Decodable::decode(b)?,
enr: EnrWrapper::<SecretKey>::decode(b)?,
};
// TODO: `Decodable` can be derived once we have native reth_rlp decoding for ENR: <https://github.com/paradigmxyz/reth/issues/482>
// TODO: `Decodable` can be derived once we have native alloy_rlp decoding for ENR: <https://github.com/paradigmxyz/reth/issues/482>
// Skipping the size check here is fine since the `buf` is the UDP datagram
// let consumed = started_len - b.len();
// if consumed != rlp_head.payload_length {
// return Err(reth_rlp::DecodeError::ListLengthMismatch {
// return Err(alloy_rlp::Error::ListLengthMismatch {
// expected: rlp_head.payload_length,
// got: consumed,
// })
@@ -388,11 +387,11 @@ impl Encodable for Ping {
}
impl Decodable for Ping {
fn decode(buf: &mut &[u8]) -> Result<Self, DecodeError> {
fn decode(buf: &mut &[u8]) -> alloy_rlp::Result<Self> {
let b = &mut &**buf;
let rlp_head = Header::decode(b)?;
if !rlp_head.list {
return Err(DecodeError::UnexpectedString)
return Err(RlpError::UnexpectedString)
}
let started_len = b.len();
let _version = u32::decode(b)?;
@@ -410,7 +409,7 @@ impl Decodable for Ping {
let consumed = started_len - b.len();
if consumed > rlp_head.payload_length {
return Err(DecodeError::ListLengthMismatch {
return Err(RlpError::ListLengthMismatch {
expected: rlp_head.payload_length,
got: consumed,
})
@@ -426,7 +425,7 @@ impl Decodable for Ping {
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct Pong {
pub to: NodeEndpoint,
pub echo: H256,
pub echo: B256,
pub expire: u64,
/// Optional enr_seq for <https://eips.ethereum.org/EIPS/eip-868>
pub enr_sq: Option<u64>,
@@ -437,7 +436,7 @@ impl Encodable for Pong {
#[derive(RlpEncodable)]
struct PongMessageEIP868<'a> {
to: &'a NodeEndpoint,
echo: &'a H256,
echo: &'a B256,
expire: u64,
enr_seq: u64,
}
@@ -445,7 +444,7 @@ impl Encodable for Pong {
#[derive(RlpEncodable)]
struct PongMessage<'a> {
to: &'a NodeEndpoint,
echo: &'a H256,
echo: &'a B256,
expire: u64,
}
@@ -459,11 +458,11 @@ impl Encodable for Pong {
}
impl Decodable for Pong {
fn decode(buf: &mut &[u8]) -> Result<Self, DecodeError> {
fn decode(buf: &mut &[u8]) -> alloy_rlp::Result<Self> {
let b = &mut &**buf;
let rlp_head = Header::decode(b)?;
if !rlp_head.list {
return Err(DecodeError::UnexpectedString)
return Err(RlpError::UnexpectedString)
}
let started_len = b.len();
let mut this = Self {
@@ -480,7 +479,7 @@ impl Decodable for Pong {
let consumed = started_len - b.len();
if consumed > rlp_head.payload_length {
return Err(DecodeError::ListLengthMismatch {
return Err(RlpError::ListLengthMismatch {
expected: rlp_head.payload_length,
got: consumed,
})
@@ -502,7 +501,7 @@ mod tests {
};
use enr::{EnrBuilder, EnrPublicKey};
use rand::{thread_rng, Rng, RngCore};
use reth_primitives::{hex_literal::hex, ForkHash};
use reth_primitives::{hex, ForkHash};
#[test]
fn test_endpoint_ipv_v4() {
@@ -594,7 +593,7 @@ mod tests {
rng.fill_bytes(&mut ip);
let msg = Pong {
to: rng_endpoint(&mut rng),
echo: H256::random(),
echo: rng.gen(),
expire: rng.gen(),
enr_sq: None,
};
@@ -615,7 +614,7 @@ mod tests {
rng.fill_bytes(&mut ip);
let msg = Pong {
to: rng_endpoint(&mut rng),
echo: H256::random(),
echo: rng.gen(),
expire: rng.gen(),
enr_sq: Some(rng.gen()),
};
@@ -719,11 +718,12 @@ mod tests {
#[test]
fn encode_decode_enr_msg() {
use self::EnrWrapper;
use alloy_rlp::Decodable;
use enr::secp256k1::SecretKey;
use reth_rlp::Decodable;
use std::net::Ipv4Addr;
let key = SecretKey::new(&mut rand::rngs::OsRng);
let mut rng = rand::rngs::OsRng;
let key = SecretKey::new(&mut rng);
let ip = Ipv4Addr::new(127, 0, 0, 1);
let tcp = 3000;
@@ -740,7 +740,7 @@ mod tests {
EnrWrapper::new(builder.build(&key).unwrap())
};
let enr_respone = EnrResponse { request_hash: H256::random(), enr };
let enr_respone = EnrResponse { request_hash: rng.gen(), enr };
let mut buf = Vec::new();
enr_respone.encode(&mut buf);
@@ -757,8 +757,8 @@ mod tests {
#[test]
fn encode_known_rlp_enr() {
use self::EnrWrapper;
use alloy_rlp::Decodable;
use enr::{secp256k1::SecretKey, EnrPublicKey};
use reth_rlp::Decodable;
use std::net::Ipv4Addr;
let valid_record =

View File

@@ -8,7 +8,7 @@ use crate::{
IngressReceiver, PeerId, SAFE_MAX_DATAGRAM_NEIGHBOUR_RECORDS,
};
use rand::{thread_rng, Rng, RngCore};
use reth_primitives::{hex_literal::hex, ForkHash, ForkId, NodeRecord, H256};
use reth_primitives::{hex, ForkHash, ForkId, NodeRecord, B256};
use secp256k1::{SecretKey, SECP256K1};
use std::{
collections::{HashMap, HashSet},
@@ -113,7 +113,7 @@ impl MockDiscovery {
}
/// Encodes the packet, sends it and returns the hash.
fn send_packet(&mut self, msg: Message, to: SocketAddr) -> H256 {
fn send_packet(&mut self, msg: Message, to: SocketAddr) -> B256 {
let (payload, hash) = msg.encode(&self.secret_key);
let _ = self.egress.try_send((payload, to));
hash
@@ -236,21 +236,21 @@ pub fn rng_endpoint(rng: &mut impl Rng) -> NodeEndpoint {
pub fn rng_record(rng: &mut impl RngCore) -> NodeRecord {
let NodeEndpoint { address, udp_port, tcp_port } = rng_endpoint(rng);
NodeRecord { address, tcp_port, udp_port, id: PeerId::random() }
NodeRecord { address, tcp_port, udp_port, id: rng.gen() }
}
pub fn rng_ipv6_record(rng: &mut impl RngCore) -> NodeRecord {
let mut ip = [0u8; 16];
rng.fill_bytes(&mut ip);
let address = IpAddr::V6(ip.into());
NodeRecord { address, tcp_port: rng.gen(), udp_port: rng.gen(), id: PeerId::random() }
NodeRecord { address, tcp_port: rng.gen(), udp_port: rng.gen(), id: rng.gen() }
}
pub fn rng_ipv4_record(rng: &mut impl RngCore) -> NodeRecord {
let mut ip = [0u8; 4];
rng.fill_bytes(&mut ip);
let address = IpAddr::V4(ip.into());
NodeRecord { address, tcp_port: rng.gen(), udp_port: rng.gen(), id: PeerId::random() }
NodeRecord { address, tcp_port: rng.gen(), udp_port: rng.gen(), id: rng.gen() }
}
pub fn rng_message(rng: &mut impl RngCore) -> Message {
@@ -263,11 +263,11 @@ pub fn rng_message(rng: &mut impl RngCore) -> Message {
}),
2 => Message::Pong(Pong {
to: rng_endpoint(rng),
echo: H256::random(),
echo: rng.gen(),
expire: rng.gen(),
enr_sq: None,
}),
3 => Message::FindNode(FindNode { id: PeerId::random(), expire: rng.gen() }),
3 => Message::FindNode(FindNode { id: rng.gen(), expire: rng.gen() }),
4 => {
let num: usize = rng.gen_range(1..=SAFE_MAX_DATAGRAM_NEIGHBOUR_RECORDS);
Message::Neighbours(Neighbours {

View File

@@ -12,9 +12,9 @@ description = "Support for EIP-1459 Node Discovery via DNS"
# reth
reth-primitives.workspace = true
reth-net-common = { path = "../common" }
reth-rlp.workspace = true
# ethereum
alloy-rlp.workspace = true
secp256k1 = { workspace = true, features = ["global-context", "rand-std", "recovery", "serde"] }
enr = { workspace = true, default-features = false, features = ["rust-secp256k1"] }

Some files were not shown because too many files have changed in this diff Show More