mirror of
https://github.com/paradigmxyz/reth.git
synced 2026-01-07 22:43:56 -05:00
chore: bump rust to edition 2024 (#18692)
This commit is contained in:
22
Cargo.lock
generated
22
Cargo.lock
generated
@@ -1590,6 +1590,24 @@ dependencies = [
|
|||||||
"syn 2.0.106",
|
"syn 2.0.106",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "bindgen"
|
||||||
|
version = "0.71.1"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "5f58bf3d7db68cfbac37cfc485a8d711e87e064c3d0fe0435b92f7a407f9d6b3"
|
||||||
|
dependencies = [
|
||||||
|
"bitflags 2.9.4",
|
||||||
|
"cexpr",
|
||||||
|
"clang-sys",
|
||||||
|
"itertools 0.13.0",
|
||||||
|
"proc-macro2",
|
||||||
|
"quote",
|
||||||
|
"regex",
|
||||||
|
"rustc-hash 2.1.1",
|
||||||
|
"shlex",
|
||||||
|
"syn 2.0.106",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "bit-set"
|
name = "bit-set"
|
||||||
version = "0.8.0"
|
version = "0.8.0"
|
||||||
@@ -5353,7 +5371,7 @@ version = "0.14.10"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "e78a09b56be5adbcad5aa1197371688dc6bb249a26da3bca2011ee2fb987ebfb"
|
checksum = "e78a09b56be5adbcad5aa1197371688dc6bb249a26da3bca2011ee2fb987ebfb"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bindgen",
|
"bindgen 0.70.1",
|
||||||
"errno",
|
"errno",
|
||||||
"libc",
|
"libc",
|
||||||
]
|
]
|
||||||
@@ -8652,7 +8670,7 @@ dependencies = [
|
|||||||
name = "reth-mdbx-sys"
|
name = "reth-mdbx-sys"
|
||||||
version = "1.8.1"
|
version = "1.8.1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bindgen",
|
"bindgen 0.71.1",
|
||||||
"cc",
|
"cc",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
[workspace.package]
|
[workspace.package]
|
||||||
version = "1.8.1"
|
version = "1.8.1"
|
||||||
edition = "2021"
|
edition = "2024"
|
||||||
rust-version = "1.88"
|
rust-version = "1.88"
|
||||||
license = "MIT OR Apache-2.0"
|
license = "MIT OR Apache-2.0"
|
||||||
homepage = "https://paradigmxyz.github.io/reth"
|
homepage = "https://paradigmxyz.github.io/reth"
|
||||||
@@ -188,6 +188,7 @@ rust.missing_docs = "warn"
|
|||||||
rust.rust_2018_idioms = { level = "deny", priority = -1 }
|
rust.rust_2018_idioms = { level = "deny", priority = -1 }
|
||||||
rust.unreachable_pub = "warn"
|
rust.unreachable_pub = "warn"
|
||||||
rust.unused_must_use = "deny"
|
rust.unused_must_use = "deny"
|
||||||
|
rust.rust_2024_incompatible_pat = "warn"
|
||||||
rustdoc.all = "warn"
|
rustdoc.all = "warn"
|
||||||
# rust.unnameable-types = "warn"
|
# rust.unnameable-types = "warn"
|
||||||
|
|
||||||
@@ -667,7 +668,7 @@ snmalloc-rs = { version = "0.3.7", features = ["build_cc"] }
|
|||||||
aes = "0.8.1"
|
aes = "0.8.1"
|
||||||
ahash = "0.8"
|
ahash = "0.8"
|
||||||
anyhow = "1.0"
|
anyhow = "1.0"
|
||||||
bindgen = { version = "0.70", default-features = false }
|
bindgen = { version = "0.71", default-features = false }
|
||||||
block-padding = "0.3.2"
|
block-padding = "0.3.2"
|
||||||
cc = "=1.2.15"
|
cc = "=1.2.15"
|
||||||
cipher = "0.4.3"
|
cipher = "0.4.3"
|
||||||
|
|||||||
@@ -26,7 +26,9 @@ use reth_cli_runner::CliRunner;
|
|||||||
fn main() {
|
fn main() {
|
||||||
// Enable backtraces unless a RUST_BACKTRACE value has already been explicitly provided.
|
// Enable backtraces unless a RUST_BACKTRACE value has already been explicitly provided.
|
||||||
if std::env::var_os("RUST_BACKTRACE").is_none() {
|
if std::env::var_os("RUST_BACKTRACE").is_none() {
|
||||||
std::env::set_var("RUST_BACKTRACE", "1");
|
unsafe {
|
||||||
|
std::env::set_var("RUST_BACKTRACE", "1");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Run until either exit or sigint or sigterm
|
// Run until either exit or sigint or sigterm
|
||||||
|
|||||||
@@ -141,10 +141,10 @@ impl<R: Read> ProgressReader<R> {
|
|||||||
impl<R: Read> Read for ProgressReader<R> {
|
impl<R: Read> Read for ProgressReader<R> {
|
||||||
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
|
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
|
||||||
let bytes = self.reader.read(buf)?;
|
let bytes = self.reader.read(buf)?;
|
||||||
if bytes > 0 {
|
if bytes > 0 &&
|
||||||
if let Err(e) = self.progress.update(bytes as u64) {
|
let Err(e) = self.progress.update(bytes as u64)
|
||||||
return Err(io::Error::other(e));
|
{
|
||||||
}
|
return Err(io::Error::other(e));
|
||||||
}
|
}
|
||||||
Ok(bytes)
|
Ok(bytes)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -192,7 +192,7 @@ pub fn build_import_pipeline_impl<N, C, E>(
|
|||||||
static_file_producer: StaticFileProducer<ProviderFactory<N>>,
|
static_file_producer: StaticFileProducer<ProviderFactory<N>>,
|
||||||
disable_exec: bool,
|
disable_exec: bool,
|
||||||
evm_config: E,
|
evm_config: E,
|
||||||
) -> eyre::Result<(Pipeline<N>, impl futures::Stream<Item = NodeEvent<N::Primitives>>)>
|
) -> eyre::Result<(Pipeline<N>, impl futures::Stream<Item = NodeEvent<N::Primitives>> + use<N, C, E>)>
|
||||||
where
|
where
|
||||||
N: ProviderNodeTypes,
|
N: ProviderNodeTypes,
|
||||||
C: FullConsensus<N::Primitives, Error = reth_consensus::ConsensusError> + 'static,
|
C: FullConsensus<N::Primitives, Error = reth_consensus::ConsensusError> + 'static,
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ use std::{
|
|||||||
fmt, mem, ptr,
|
fmt, mem, ptr,
|
||||||
};
|
};
|
||||||
|
|
||||||
extern "C" {
|
unsafe extern "C" {
|
||||||
fn backtrace_symbols_fd(buffer: *const *mut libc::c_void, size: libc::c_int, fd: libc::c_int);
|
fn backtrace_symbols_fd(buffer: *const *mut libc::c_void, size: libc::c_int, fd: libc::c_int);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -96,10 +96,11 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Connect last node with the first if there are more than two
|
// Connect last node with the first if there are more than two
|
||||||
if idx + 1 == num_nodes && num_nodes > 2 {
|
if idx + 1 == num_nodes &&
|
||||||
if let Some(first_node) = nodes.first_mut() {
|
num_nodes > 2 &&
|
||||||
node.connect(first_node).await;
|
let Some(first_node) = nodes.first_mut()
|
||||||
}
|
{
|
||||||
|
node.connect(first_node).await;
|
||||||
}
|
}
|
||||||
|
|
||||||
nodes.push(node);
|
nodes.push(node);
|
||||||
@@ -207,10 +208,11 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Connect last node with the first if there are more than two
|
// Connect last node with the first if there are more than two
|
||||||
if idx + 1 == num_nodes && num_nodes > 2 {
|
if idx + 1 == num_nodes &&
|
||||||
if let Some(first_node) = nodes.first_mut() {
|
num_nodes > 2 &&
|
||||||
node.connect(first_node).await;
|
let Some(first_node) = nodes.first_mut()
|
||||||
}
|
{
|
||||||
|
node.connect(first_node).await;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -150,14 +150,13 @@ where
|
|||||||
loop {
|
loop {
|
||||||
tokio::time::sleep(std::time::Duration::from_millis(20)).await;
|
tokio::time::sleep(std::time::Duration::from_millis(20)).await;
|
||||||
|
|
||||||
if !check && wait_finish_checkpoint {
|
if !check &&
|
||||||
if let Some(checkpoint) =
|
wait_finish_checkpoint &&
|
||||||
self.inner.provider.get_stage_checkpoint(StageId::Finish)?
|
let Some(checkpoint) =
|
||||||
{
|
self.inner.provider.get_stage_checkpoint(StageId::Finish)? &&
|
||||||
if checkpoint.block_number >= number {
|
checkpoint.block_number >= number
|
||||||
check = true
|
{
|
||||||
}
|
check = true
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if check {
|
if check {
|
||||||
@@ -178,10 +177,10 @@ where
|
|||||||
pub async fn wait_unwind(&self, number: BlockNumber) -> eyre::Result<()> {
|
pub async fn wait_unwind(&self, number: BlockNumber) -> eyre::Result<()> {
|
||||||
loop {
|
loop {
|
||||||
tokio::time::sleep(std::time::Duration::from_millis(10)).await;
|
tokio::time::sleep(std::time::Duration::from_millis(10)).await;
|
||||||
if let Some(checkpoint) = self.inner.provider.get_stage_checkpoint(StageId::Headers)? {
|
if let Some(checkpoint) = self.inner.provider.get_stage_checkpoint(StageId::Headers)? &&
|
||||||
if checkpoint.block_number == number {
|
checkpoint.block_number == number
|
||||||
break
|
{
|
||||||
}
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
@@ -207,14 +206,13 @@ where
|
|||||||
// wait for the block to commit
|
// wait for the block to commit
|
||||||
tokio::time::sleep(std::time::Duration::from_millis(20)).await;
|
tokio::time::sleep(std::time::Duration::from_millis(20)).await;
|
||||||
if let Some(latest_block) =
|
if let Some(latest_block) =
|
||||||
self.inner.provider.block_by_number_or_tag(BlockNumberOrTag::Latest)?
|
self.inner.provider.block_by_number_or_tag(BlockNumberOrTag::Latest)? &&
|
||||||
|
latest_block.header().number() == block_number
|
||||||
{
|
{
|
||||||
if latest_block.header().number() == block_number {
|
// make sure the block hash we submitted via FCU engine api is the new latest
|
||||||
// make sure the block hash we submitted via FCU engine api is the new latest
|
// block using an RPC call
|
||||||
// block using an RPC call
|
assert_eq!(latest_block.header().hash_slow(), block_hash);
|
||||||
assert_eq!(latest_block.header().hash_slow(), block_hash);
|
break
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
|
|||||||
@@ -174,16 +174,13 @@ where
|
|||||||
];
|
];
|
||||||
|
|
||||||
// if we're on a fork, validate it now that it's canonical
|
// if we're on a fork, validate it now that it's canonical
|
||||||
if let Ok(active_state) = env.active_node_state() {
|
if let Ok(active_state) = env.active_node_state() &&
|
||||||
if let Some(fork_base) = active_state.current_fork_base {
|
let Some(fork_base) = active_state.current_fork_base
|
||||||
debug!(
|
{
|
||||||
"MakeCanonical: Adding fork validation from base block {}",
|
debug!("MakeCanonical: Adding fork validation from base block {}", fork_base);
|
||||||
fork_base
|
actions.push(Box::new(ValidateFork::new(fork_base)));
|
||||||
);
|
// clear the fork base since we're now canonical
|
||||||
actions.push(Box::new(ValidateFork::new(fork_base)));
|
env.active_node_state_mut()?.current_fork_base = None;
|
||||||
// clear the fork base since we're now canonical
|
|
||||||
env.active_node_state_mut()?.current_fork_base = None;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut sequence = Sequence::new(actions);
|
let mut sequence = Sequence::new(actions);
|
||||||
|
|||||||
@@ -195,15 +195,15 @@ where
|
|||||||
.copied()
|
.copied()
|
||||||
.ok_or_else(|| eyre::eyre!("Block tag '{}' not found in registry", self.tag))?;
|
.ok_or_else(|| eyre::eyre!("Block tag '{}' not found in registry", self.tag))?;
|
||||||
|
|
||||||
if let Some(expected_node) = self.expected_node_idx {
|
if let Some(expected_node) = self.expected_node_idx &&
|
||||||
if node_idx != expected_node {
|
node_idx != expected_node
|
||||||
return Err(eyre::eyre!(
|
{
|
||||||
"Block tag '{}' came from node {} but expected node {}",
|
return Err(eyre::eyre!(
|
||||||
self.tag,
|
"Block tag '{}' came from node {} but expected node {}",
|
||||||
node_idx,
|
self.tag,
|
||||||
expected_node
|
node_idx,
|
||||||
));
|
expected_node
|
||||||
}
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
debug!(
|
debug!(
|
||||||
|
|||||||
@@ -220,7 +220,7 @@ where
|
|||||||
let is_dev = self.is_dev;
|
let is_dev = self.is_dev;
|
||||||
let node_count = self.network.node_count;
|
let node_count = self.network.node_count;
|
||||||
|
|
||||||
let attributes_generator = self.create_attributes_generator::<N>();
|
let attributes_generator = Self::create_static_attributes_generator::<N>();
|
||||||
|
|
||||||
let result = setup_engine_with_connection::<N>(
|
let result = setup_engine_with_connection::<N>(
|
||||||
node_count,
|
node_count,
|
||||||
@@ -299,10 +299,11 @@ where
|
|||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Create the attributes generator function
|
/// Create a static attributes generator that doesn't capture any instance data
|
||||||
fn create_attributes_generator<N>(
|
fn create_static_attributes_generator<N>(
|
||||||
&self,
|
) -> impl Fn(u64) -> <<N as NodeTypes>::Payload as PayloadTypes>::PayloadBuilderAttributes
|
||||||
) -> impl Fn(u64) -> <<N as NodeTypes>::Payload as PayloadTypes>::PayloadBuilderAttributes + Copy
|
+ Copy
|
||||||
|
+ use<N, I>
|
||||||
where
|
where
|
||||||
N: NodeBuilderHelper<Payload = I>,
|
N: NodeBuilderHelper<Payload = I>,
|
||||||
LocalPayloadAttributesBuilder<N::ChainSpec>: PayloadAttributesBuilder<
|
LocalPayloadAttributesBuilder<N::ChainSpec>: PayloadAttributesBuilder<
|
||||||
|
|||||||
@@ -89,11 +89,11 @@ async fn test_apply_with_import() -> Result<()> {
|
|||||||
)
|
)
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
if let Ok(Some(block)) = block_result {
|
if let Ok(Some(block)) = block_result &&
|
||||||
if block.header.number == 10 {
|
block.header.number == 10
|
||||||
debug!("Pipeline finished, block 10 is fully available");
|
{
|
||||||
break;
|
debug!("Pipeline finished, block 10 is fully available");
|
||||||
}
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if start.elapsed() > std::time::Duration::from_secs(10) {
|
if start.elapsed() > std::time::Duration::from_secs(10) {
|
||||||
|
|||||||
@@ -664,7 +664,7 @@ mod tests {
|
|||||||
|
|
||||||
unsafe impl GlobalAlloc for TrackingAllocator {
|
unsafe impl GlobalAlloc for TrackingAllocator {
|
||||||
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
|
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
|
||||||
let ret = self.inner.alloc(layout);
|
let ret = unsafe { self.inner.alloc(layout) };
|
||||||
if !ret.is_null() {
|
if !ret.is_null() {
|
||||||
self.allocated.fetch_add(layout.size(), Ordering::SeqCst);
|
self.allocated.fetch_add(layout.size(), Ordering::SeqCst);
|
||||||
self.total_allocated.fetch_add(layout.size(), Ordering::SeqCst);
|
self.total_allocated.fetch_add(layout.size(), Ordering::SeqCst);
|
||||||
@@ -674,7 +674,7 @@ mod tests {
|
|||||||
|
|
||||||
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
|
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
|
||||||
self.allocated.fetch_sub(layout.size(), Ordering::SeqCst);
|
self.allocated.fetch_sub(layout.size(), Ordering::SeqCst);
|
||||||
self.inner.dealloc(ptr, layout)
|
unsafe { self.inner.dealloc(ptr, layout) }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1818,10 +1818,10 @@ where
|
|||||||
fn prepare_invalid_response(&mut self, mut parent_hash: B256) -> ProviderResult<PayloadStatus> {
|
fn prepare_invalid_response(&mut self, mut parent_hash: B256) -> ProviderResult<PayloadStatus> {
|
||||||
// Edge case: the `latestValid` field is the zero hash if the parent block is the terminal
|
// Edge case: the `latestValid` field is the zero hash if the parent block is the terminal
|
||||||
// PoW block, which we need to identify by looking at the parent's block difficulty
|
// PoW block, which we need to identify by looking at the parent's block difficulty
|
||||||
if let Some(parent) = self.sealed_header_by_hash(parent_hash)? {
|
if let Some(parent) = self.sealed_header_by_hash(parent_hash)? &&
|
||||||
if !parent.difficulty().is_zero() {
|
!parent.difficulty().is_zero()
|
||||||
parent_hash = B256::ZERO;
|
{
|
||||||
}
|
parent_hash = B256::ZERO;
|
||||||
}
|
}
|
||||||
|
|
||||||
let valid_parent_hash = self.latest_valid_hash_for_invalid_payload(parent_hash)?;
|
let valid_parent_hash = self.latest_valid_hash_for_invalid_payload(parent_hash)?;
|
||||||
@@ -2038,62 +2038,65 @@ where
|
|||||||
let sync_target_state = self.state.forkchoice_state_tracker.sync_target_state();
|
let sync_target_state = self.state.forkchoice_state_tracker.sync_target_state();
|
||||||
|
|
||||||
// check if the downloaded block is the tracked finalized block
|
// check if the downloaded block is the tracked finalized block
|
||||||
let mut exceeds_backfill_threshold = if let Some(buffered_finalized) = sync_target_state
|
let exceeds_backfill_threshold =
|
||||||
.as_ref()
|
match (downloaded_block.as_ref(), sync_target_state.as_ref()) {
|
||||||
.and_then(|state| self.state.buffer.block(&state.finalized_block_hash))
|
// if we downloaded the finalized block we can now check how far we're off
|
||||||
{
|
(Some(downloaded_block), Some(state))
|
||||||
// if we have buffered the finalized block, we should check how far
|
if downloaded_block.hash == state.finalized_block_hash =>
|
||||||
// we're off
|
{
|
||||||
self.exceeds_backfill_run_threshold(canonical_tip_num, buffered_finalized.number())
|
self.exceeds_backfill_run_threshold(canonical_tip_num, downloaded_block.number)
|
||||||
} else {
|
}
|
||||||
// check if the distance exceeds the threshold for backfill sync
|
_ => match sync_target_state
|
||||||
self.exceeds_backfill_run_threshold(canonical_tip_num, target_block_number)
|
.as_ref()
|
||||||
};
|
.and_then(|state| self.state.buffer.block(&state.finalized_block_hash))
|
||||||
|
{
|
||||||
// If this is invoked after we downloaded a block we can check if this block is the
|
Some(buffered_finalized) => {
|
||||||
// finalized block
|
// if we have buffered the finalized block, we should check how far we're
|
||||||
if let (Some(downloaded_block), Some(ref state)) = (downloaded_block, sync_target_state) {
|
// off
|
||||||
if downloaded_block.hash == state.finalized_block_hash {
|
self.exceeds_backfill_run_threshold(
|
||||||
// we downloaded the finalized block and can now check how far we're off
|
canonical_tip_num,
|
||||||
exceeds_backfill_threshold =
|
buffered_finalized.number(),
|
||||||
self.exceeds_backfill_run_threshold(canonical_tip_num, downloaded_block.number);
|
)
|
||||||
}
|
}
|
||||||
}
|
None => {
|
||||||
|
// check if the distance exceeds the threshold for backfill sync
|
||||||
|
self.exceeds_backfill_run_threshold(canonical_tip_num, target_block_number)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
// if the number of missing blocks is greater than the max, trigger backfill
|
// if the number of missing blocks is greater than the max, trigger backfill
|
||||||
if exceeds_backfill_threshold {
|
if exceeds_backfill_threshold && let Some(state) = sync_target_state {
|
||||||
if let Some(state) = sync_target_state {
|
// if we have already canonicalized the finalized block, we should skip backfill
|
||||||
// if we have already canonicalized the finalized block, we should skip backfill
|
match self.provider.header_by_hash_or_number(state.finalized_block_hash.into()) {
|
||||||
match self.provider.header_by_hash_or_number(state.finalized_block_hash.into()) {
|
Err(err) => {
|
||||||
Err(err) => {
|
warn!(target: "engine::tree", %err, "Failed to get finalized block header");
|
||||||
warn!(target: "engine::tree", %err, "Failed to get finalized block header");
|
}
|
||||||
|
Ok(None) => {
|
||||||
|
// ensure the finalized block is known (not the zero hash)
|
||||||
|
if !state.finalized_block_hash.is_zero() {
|
||||||
|
// we don't have the block yet and the distance exceeds the allowed
|
||||||
|
// threshold
|
||||||
|
return Some(state.finalized_block_hash)
|
||||||
}
|
}
|
||||||
Ok(None) => {
|
|
||||||
// ensure the finalized block is known (not the zero hash)
|
|
||||||
if !state.finalized_block_hash.is_zero() {
|
|
||||||
// we don't have the block yet and the distance exceeds the allowed
|
|
||||||
// threshold
|
|
||||||
return Some(state.finalized_block_hash)
|
|
||||||
}
|
|
||||||
|
|
||||||
// OPTIMISTIC SYNCING
|
// OPTIMISTIC SYNCING
|
||||||
//
|
//
|
||||||
// It can happen when the node is doing an
|
// It can happen when the node is doing an
|
||||||
// optimistic sync, where the CL has no knowledge of the finalized hash,
|
// optimistic sync, where the CL has no knowledge of the finalized hash,
|
||||||
// but is expecting the EL to sync as high
|
// but is expecting the EL to sync as high
|
||||||
// as possible before finalizing.
|
// as possible before finalizing.
|
||||||
//
|
//
|
||||||
// This usually doesn't happen on ETH mainnet since CLs use the more
|
// This usually doesn't happen on ETH mainnet since CLs use the more
|
||||||
// secure checkpoint syncing.
|
// secure checkpoint syncing.
|
||||||
//
|
//
|
||||||
// However, optimism chains will do this. The risk of a reorg is however
|
// However, optimism chains will do this. The risk of a reorg is however
|
||||||
// low.
|
// low.
|
||||||
debug!(target: "engine::tree", hash=?state.head_block_hash, "Setting head hash as an optimistic backfill target.");
|
debug!(target: "engine::tree", hash=?state.head_block_hash, "Setting head hash as an optimistic backfill target.");
|
||||||
return Some(state.head_block_hash)
|
return Some(state.head_block_hash)
|
||||||
}
|
}
|
||||||
Ok(Some(_)) => {
|
Ok(Some(_)) => {
|
||||||
// we're fully synced to the finalized block
|
// we're fully synced to the finalized block
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -140,10 +140,10 @@ where
|
|||||||
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
|
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
|
||||||
let mut this = self.project();
|
let mut this = self.project();
|
||||||
let next = ready!(this.stream.poll_next_unpin(cx));
|
let next = ready!(this.stream.poll_next_unpin(cx));
|
||||||
if let Some(msg) = &next {
|
if let Some(msg) = &next &&
|
||||||
if let Err(error) = this.store.on_message(msg, SystemTime::now()) {
|
let Err(error) = this.store.on_message(msg, SystemTime::now())
|
||||||
error!(target: "engine::stream::store", ?msg, %error, "Error handling Engine API message");
|
{
|
||||||
}
|
error!(target: "engine::stream::store", ?msg, %error, "Error handling Engine API message");
|
||||||
}
|
}
|
||||||
Poll::Ready(next)
|
Poll::Ready(next)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -106,12 +106,11 @@ impl<Http: HttpClient + Clone> EraClient<Http> {
|
|||||||
|
|
||||||
if let Ok(mut dir) = fs::read_dir(&self.folder).await {
|
if let Ok(mut dir) = fs::read_dir(&self.folder).await {
|
||||||
while let Ok(Some(entry)) = dir.next_entry().await {
|
while let Ok(Some(entry)) = dir.next_entry().await {
|
||||||
if let Some(name) = entry.file_name().to_str() {
|
if let Some(name) = entry.file_name().to_str() &&
|
||||||
if let Some(number) = self.file_name_to_number(name) {
|
let Some(number) = self.file_name_to_number(name) &&
|
||||||
if max.is_none() || matches!(max, Some(max) if number > max) {
|
(max.is_none() || matches!(max, Some(max) if number > max))
|
||||||
max.replace(number + 1);
|
{
|
||||||
}
|
max.replace(number + 1);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -125,14 +124,13 @@ impl<Http: HttpClient + Clone> EraClient<Http> {
|
|||||||
|
|
||||||
if let Ok(mut dir) = fs::read_dir(&self.folder).await {
|
if let Ok(mut dir) = fs::read_dir(&self.folder).await {
|
||||||
while let Ok(Some(entry)) = dir.next_entry().await {
|
while let Ok(Some(entry)) = dir.next_entry().await {
|
||||||
if let Some(name) = entry.file_name().to_str() {
|
if let Some(name) = entry.file_name().to_str() &&
|
||||||
if let Some(number) = self.file_name_to_number(name) {
|
let Some(number) = self.file_name_to_number(name) &&
|
||||||
if number < index || number >= last {
|
(number < index || number >= last)
|
||||||
eprintln!("Deleting file {}", entry.path().display());
|
{
|
||||||
eprintln!("{number} < {index} || {number} >= {last}");
|
eprintln!("Deleting file {}", entry.path().display());
|
||||||
reth_fs_util::remove_file(entry.path())?;
|
eprintln!("{number} < {index} || {number} >= {last}");
|
||||||
}
|
reth_fs_util::remove_file(entry.path())?;
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -208,12 +206,12 @@ impl<Http: HttpClient + Clone> EraClient<Http> {
|
|||||||
let mut writer = io::BufWriter::new(file);
|
let mut writer = io::BufWriter::new(file);
|
||||||
|
|
||||||
while let Some(line) = lines.next_line().await? {
|
while let Some(line) = lines.next_line().await? {
|
||||||
if let Some(j) = line.find(".era1") {
|
if let Some(j) = line.find(".era1") &&
|
||||||
if let Some(i) = line[..j].rfind(|c: char| !c.is_alphanumeric() && c != '-') {
|
let Some(i) = line[..j].rfind(|c: char| !c.is_alphanumeric() && c != '-')
|
||||||
let era = &line[i + 1..j + 5];
|
{
|
||||||
writer.write_all(era.as_bytes()).await?;
|
let era = &line[i + 1..j + 5];
|
||||||
writer.write_all(b"\n").await?;
|
writer.write_all(era.as_bytes()).await?;
|
||||||
}
|
writer.write_all(b"\n").await?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
writer.flush().await?;
|
writer.flush().await?;
|
||||||
|
|||||||
@@ -17,16 +17,16 @@ pub fn read_dir(
|
|||||||
(|| {
|
(|| {
|
||||||
let path = entry?.path();
|
let path = entry?.path();
|
||||||
|
|
||||||
if path.extension() == Some("era1".as_ref()) {
|
if path.extension() == Some("era1".as_ref()) &&
|
||||||
if let Some(last) = path.components().next_back() {
|
let Some(last) = path.components().next_back()
|
||||||
let str = last.as_os_str().to_string_lossy().to_string();
|
{
|
||||||
let parts = str.split('-').collect::<Vec<_>>();
|
let str = last.as_os_str().to_string_lossy().to_string();
|
||||||
|
let parts = str.split('-').collect::<Vec<_>>();
|
||||||
|
|
||||||
if parts.len() == 3 {
|
if parts.len() == 3 {
|
||||||
let number = usize::from_str(parts[1])?;
|
let number = usize::from_str(parts[1])?;
|
||||||
|
|
||||||
return Ok(Some((number, path.into_boxed_path())));
|
return Ok(Some((number, path.into_boxed_path())));
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if path.file_name() == Some("checksums.txt".as_ref()) {
|
if path.file_name() == Some("checksums.txt".as_ref()) {
|
||||||
|
|||||||
@@ -262,47 +262,47 @@ impl<Http: HttpClient + Clone + Send + Sync + 'static + Unpin> Stream for Starti
|
|||||||
self.fetch_file_list();
|
self.fetch_file_list();
|
||||||
}
|
}
|
||||||
|
|
||||||
if self.state == State::FetchFileList {
|
if self.state == State::FetchFileList &&
|
||||||
if let Poll::Ready(result) = self.fetch_file_list.poll_unpin(cx) {
|
let Poll::Ready(result) = self.fetch_file_list.poll_unpin(cx)
|
||||||
match result {
|
{
|
||||||
Ok(_) => self.delete_outside_range(),
|
match result {
|
||||||
Err(e) => {
|
Ok(_) => self.delete_outside_range(),
|
||||||
self.fetch_file_list();
|
Err(e) => {
|
||||||
|
self.fetch_file_list();
|
||||||
|
|
||||||
return Poll::Ready(Some(Box::pin(async move { Err(e) })));
|
return Poll::Ready(Some(Box::pin(async move { Err(e) })));
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if self.state == State::DeleteOutsideRange {
|
if self.state == State::DeleteOutsideRange &&
|
||||||
if let Poll::Ready(result) = self.delete_outside_range.poll_unpin(cx) {
|
let Poll::Ready(result) = self.delete_outside_range.poll_unpin(cx)
|
||||||
match result {
|
{
|
||||||
Ok(_) => self.recover_index(),
|
match result {
|
||||||
Err(e) => {
|
Ok(_) => self.recover_index(),
|
||||||
self.delete_outside_range();
|
Err(e) => {
|
||||||
|
self.delete_outside_range();
|
||||||
|
|
||||||
return Poll::Ready(Some(Box::pin(async move { Err(e) })));
|
return Poll::Ready(Some(Box::pin(async move { Err(e) })));
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if self.state == State::RecoverIndex {
|
if self.state == State::RecoverIndex &&
|
||||||
if let Poll::Ready(last) = self.recover_index.poll_unpin(cx) {
|
let Poll::Ready(last) = self.recover_index.poll_unpin(cx)
|
||||||
self.last = last;
|
{
|
||||||
self.count_files();
|
self.last = last;
|
||||||
}
|
self.count_files();
|
||||||
}
|
}
|
||||||
|
|
||||||
if self.state == State::CountFiles {
|
if self.state == State::CountFiles &&
|
||||||
if let Poll::Ready(downloaded) = self.files_count.poll_unpin(cx) {
|
let Poll::Ready(downloaded) = self.files_count.poll_unpin(cx)
|
||||||
let max_missing = self
|
{
|
||||||
.max_files
|
let max_missing = self
|
||||||
.saturating_sub(downloaded + self.downloading)
|
.max_files
|
||||||
.max(self.last.unwrap_or_default().saturating_sub(self.index));
|
.saturating_sub(downloaded + self.downloading)
|
||||||
self.state = State::Missing(max_missing);
|
.max(self.last.unwrap_or_default().saturating_sub(self.index));
|
||||||
}
|
self.state = State::Missing(max_missing);
|
||||||
}
|
}
|
||||||
|
|
||||||
if let State::Missing(max_missing) = self.state {
|
if let State::Missing(max_missing) = self.state {
|
||||||
@@ -316,18 +316,16 @@ impl<Http: HttpClient + Clone + Send + Sync + 'static + Unpin> Stream for Starti
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if let State::NextUrl(max_missing) = self.state {
|
if let State::NextUrl(max_missing) = self.state &&
|
||||||
if let Poll::Ready(url) = self.next_url.poll_unpin(cx) {
|
let Poll::Ready(url) = self.next_url.poll_unpin(cx)
|
||||||
self.state = State::Missing(max_missing - 1);
|
{
|
||||||
|
self.state = State::Missing(max_missing - 1);
|
||||||
|
|
||||||
return Poll::Ready(url.transpose().map(|url| -> DownloadFuture {
|
return Poll::Ready(url.transpose().map(|url| -> DownloadFuture {
|
||||||
let mut client = self.client.clone();
|
let mut client = self.client.clone();
|
||||||
|
|
||||||
Box::pin(
|
Box::pin(async move { client.download_to_file(url?).await.map(EraRemoteMeta::new) })
|
||||||
async move { client.download_to_file(url?).await.map(EraRemoteMeta::new) },
|
}));
|
||||||
)
|
|
||||||
}));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Poll::Pending
|
Poll::Pending
|
||||||
|
|||||||
@@ -302,10 +302,10 @@ where
|
|||||||
if number <= last_header_number {
|
if number <= last_header_number {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
if let Some(target) = target {
|
if let Some(target) = target &&
|
||||||
if number > target {
|
number > target
|
||||||
break;
|
{
|
||||||
}
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
let hash = header.hash_slow();
|
let hash = header.hash_slow();
|
||||||
@@ -351,19 +351,18 @@ where
|
|||||||
// Database cursor for hash to number index
|
// Database cursor for hash to number index
|
||||||
let mut cursor_header_numbers =
|
let mut cursor_header_numbers =
|
||||||
provider.tx_ref().cursor_write::<RawTable<tables::HeaderNumbers>>()?;
|
provider.tx_ref().cursor_write::<RawTable<tables::HeaderNumbers>>()?;
|
||||||
let mut first_sync = false;
|
|
||||||
|
|
||||||
// If we only have the genesis block hash, then we are at first sync, and we can remove it,
|
// If we only have the genesis block hash, then we are at first sync, and we can remove it,
|
||||||
// add it to the collector and use tx.append on all hashes.
|
// add it to the collector and use tx.append on all hashes.
|
||||||
if provider.tx_ref().entries::<RawTable<tables::HeaderNumbers>>()? == 1 {
|
let first_sync = if provider.tx_ref().entries::<RawTable<tables::HeaderNumbers>>()? == 1 &&
|
||||||
if let Some((hash, block_number)) = cursor_header_numbers.last()? {
|
let Some((hash, block_number)) = cursor_header_numbers.last()? &&
|
||||||
if block_number.value()? == 0 {
|
block_number.value()? == 0
|
||||||
hash_collector.insert(hash.key()?, 0)?;
|
{
|
||||||
cursor_header_numbers.delete_current()?;
|
hash_collector.insert(hash.key()?, 0)?;
|
||||||
first_sync = true;
|
cursor_header_numbers.delete_current()?;
|
||||||
}
|
true
|
||||||
}
|
} else {
|
||||||
}
|
false
|
||||||
|
};
|
||||||
|
|
||||||
let interval = (total_headers / 10).max(8192);
|
let interval = (total_headers / 10).max(8192);
|
||||||
|
|
||||||
|
|||||||
@@ -37,17 +37,19 @@ where
|
|||||||
// operation as hashing that is required for state root got calculated in every
|
// operation as hashing that is required for state root got calculated in every
|
||||||
// transaction This was replaced with is_success flag.
|
// transaction This was replaced with is_success flag.
|
||||||
// See more about EIP here: https://eips.ethereum.org/EIPS/eip-658
|
// See more about EIP here: https://eips.ethereum.org/EIPS/eip-658
|
||||||
if chain_spec.is_byzantium_active_at_block(block.header().number()) {
|
if chain_spec.is_byzantium_active_at_block(block.header().number()) &&
|
||||||
if let Err(error) =
|
let Err(error) = verify_receipts(
|
||||||
verify_receipts(block.header().receipts_root(), block.header().logs_bloom(), receipts)
|
block.header().receipts_root(),
|
||||||
{
|
block.header().logs_bloom(),
|
||||||
let receipts = receipts
|
receipts,
|
||||||
.iter()
|
)
|
||||||
.map(|r| Bytes::from(r.with_bloom_ref().encoded_2718()))
|
{
|
||||||
.collect::<Vec<_>>();
|
let receipts = receipts
|
||||||
tracing::debug!(%error, ?receipts, "receipts verification failed");
|
.iter()
|
||||||
return Err(error)
|
.map(|r| Bytes::from(r.with_bloom_ref().encoded_2718()))
|
||||||
}
|
.collect::<Vec<_>>();
|
||||||
|
tracing::debug!(%error, ?receipts, "receipts verification failed");
|
||||||
|
return Err(error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Validate that the header requests hash matches the calculated requests hash
|
// Validate that the header requests hash matches the calculated requests hash
|
||||||
|
|||||||
@@ -501,11 +501,11 @@ where
|
|||||||
.next_notification_id
|
.next_notification_id
|
||||||
.checked_sub(this.min_id)
|
.checked_sub(this.min_id)
|
||||||
.expect("exex expected notification ID outside the manager's range");
|
.expect("exex expected notification ID outside the manager's range");
|
||||||
if let Some(notification) = this.buffer.get(notification_index) {
|
if let Some(notification) = this.buffer.get(notification_index) &&
|
||||||
if let Poll::Ready(Err(err)) = exex.send(cx, notification) {
|
let Poll::Ready(Err(err)) = exex.send(cx, notification)
|
||||||
// The channel was closed, which is irrecoverable for the manager
|
{
|
||||||
return Poll::Ready(Err(err.into()))
|
// The channel was closed, which is irrecoverable for the manager
|
||||||
}
|
return Poll::Ready(Err(err.into()))
|
||||||
}
|
}
|
||||||
min_id = min_id.min(exex.next_notification_id);
|
min_id = min_id.min(exex.next_notification_id);
|
||||||
this.exex_handles.push(exex);
|
this.exex_handles.push(exex);
|
||||||
|
|||||||
@@ -59,11 +59,11 @@ impl BanList {
|
|||||||
pub fn evict_peers(&mut self, now: Instant) -> Vec<PeerId> {
|
pub fn evict_peers(&mut self, now: Instant) -> Vec<PeerId> {
|
||||||
let mut evicted = Vec::new();
|
let mut evicted = Vec::new();
|
||||||
self.banned_peers.retain(|peer, until| {
|
self.banned_peers.retain(|peer, until| {
|
||||||
if let Some(until) = until {
|
if let Some(until) = until &&
|
||||||
if now > *until {
|
now > *until
|
||||||
evicted.push(*peer);
|
{
|
||||||
return false
|
evicted.push(*peer);
|
||||||
}
|
return false
|
||||||
}
|
}
|
||||||
true
|
true
|
||||||
});
|
});
|
||||||
@@ -74,11 +74,11 @@ impl BanList {
|
|||||||
pub fn evict_ips(&mut self, now: Instant) -> Vec<IpAddr> {
|
pub fn evict_ips(&mut self, now: Instant) -> Vec<IpAddr> {
|
||||||
let mut evicted = Vec::new();
|
let mut evicted = Vec::new();
|
||||||
self.banned_ips.retain(|peer, until| {
|
self.banned_ips.retain(|peer, until| {
|
||||||
if let Some(until) = until {
|
if let Some(until) = until &&
|
||||||
if now > *until {
|
now > *until
|
||||||
evicted.push(*peer);
|
{
|
||||||
return false
|
evicted.push(*peer);
|
||||||
}
|
return false
|
||||||
}
|
}
|
||||||
true
|
true
|
||||||
});
|
});
|
||||||
|
|||||||
@@ -627,10 +627,10 @@ impl Discv4Service {
|
|||||||
|
|
||||||
/// Sets the external Ip to the configured external IP if [`NatResolver::ExternalIp`].
|
/// Sets the external Ip to the configured external IP if [`NatResolver::ExternalIp`].
|
||||||
fn resolve_external_ip(&mut self) {
|
fn resolve_external_ip(&mut self) {
|
||||||
if let Some(r) = &self.resolve_external_ip_interval {
|
if let Some(r) = &self.resolve_external_ip_interval &&
|
||||||
if let Some(external_ip) = r.resolver().as_external_ip() {
|
let Some(external_ip) = r.resolver().as_external_ip()
|
||||||
self.set_external_ip_addr(external_ip);
|
{
|
||||||
}
|
self.set_external_ip_addr(external_ip);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -904,10 +904,10 @@ impl Discv4Service {
|
|||||||
|
|
||||||
/// Check if the peer has an active bond.
|
/// Check if the peer has an active bond.
|
||||||
fn has_bond(&self, remote_id: PeerId, remote_ip: IpAddr) -> bool {
|
fn has_bond(&self, remote_id: PeerId, remote_ip: IpAddr) -> bool {
|
||||||
if let Some(timestamp) = self.received_pongs.last_pong(remote_id, remote_ip) {
|
if let Some(timestamp) = self.received_pongs.last_pong(remote_id, remote_ip) &&
|
||||||
if timestamp.elapsed() < self.config.bond_expiration {
|
timestamp.elapsed() < self.config.bond_expiration
|
||||||
return true
|
{
|
||||||
}
|
return true
|
||||||
}
|
}
|
||||||
false
|
false
|
||||||
}
|
}
|
||||||
@@ -3048,12 +3048,11 @@ mod tests {
|
|||||||
loop {
|
loop {
|
||||||
tokio::select! {
|
tokio::select! {
|
||||||
Some(update) = updates.next() => {
|
Some(update) = updates.next() => {
|
||||||
if let DiscoveryUpdate::Added(record) = update {
|
if let DiscoveryUpdate::Added(record) = update
|
||||||
if record.id == peerid_1 {
|
&& record.id == peerid_1 {
|
||||||
bootnode_appeared = true;
|
bootnode_appeared = true;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
}
|
}
|
||||||
_ = &mut timeout => break,
|
_ = &mut timeout => break,
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -152,10 +152,10 @@ impl ConfigBuilder {
|
|||||||
/// Adds a comma-separated list of enodes, serialized unsigned node records, to boot nodes.
|
/// Adds a comma-separated list of enodes, serialized unsigned node records, to boot nodes.
|
||||||
pub fn add_serialized_unsigned_boot_nodes(mut self, enodes: &[&str]) -> Self {
|
pub fn add_serialized_unsigned_boot_nodes(mut self, enodes: &[&str]) -> Self {
|
||||||
for node in enodes {
|
for node in enodes {
|
||||||
if let Ok(node) = node.parse() {
|
if let Ok(node) = node.parse() &&
|
||||||
if let Ok(node) = BootNode::from_unsigned(node) {
|
let Ok(node) = BootNode::from_unsigned(node)
|
||||||
self.bootstrap_nodes.insert(node);
|
{
|
||||||
}
|
self.bootstrap_nodes.insert(node);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -411,14 +411,14 @@ pub fn discv5_sockets_wrt_rlpx_addr(
|
|||||||
let discv5_socket_ipv6 =
|
let discv5_socket_ipv6 =
|
||||||
discv5_addr_ipv6.map(|ip| SocketAddrV6::new(ip, discv5_port_ipv6, 0, 0));
|
discv5_addr_ipv6.map(|ip| SocketAddrV6::new(ip, discv5_port_ipv6, 0, 0));
|
||||||
|
|
||||||
if let Some(discv5_addr) = discv5_addr_ipv4 {
|
if let Some(discv5_addr) = discv5_addr_ipv4 &&
|
||||||
if discv5_addr != rlpx_addr {
|
discv5_addr != rlpx_addr
|
||||||
debug!(target: "net::discv5",
|
{
|
||||||
%discv5_addr,
|
debug!(target: "net::discv5",
|
||||||
%rlpx_addr,
|
%discv5_addr,
|
||||||
"Overwriting discv5 IPv4 address with RLPx IPv4 address, limited to one advertised IP address per IP version"
|
%rlpx_addr,
|
||||||
);
|
"Overwriting discv5 IPv4 address with RLPx IPv4 address, limited to one advertised IP address per IP version"
|
||||||
}
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
// overwrite discv5 ipv4 addr with RLPx address. this is since there is no
|
// overwrite discv5 ipv4 addr with RLPx address. this is since there is no
|
||||||
@@ -430,14 +430,14 @@ pub fn discv5_sockets_wrt_rlpx_addr(
|
|||||||
let discv5_socket_ipv4 =
|
let discv5_socket_ipv4 =
|
||||||
discv5_addr_ipv4.map(|ip| SocketAddrV4::new(ip, discv5_port_ipv4));
|
discv5_addr_ipv4.map(|ip| SocketAddrV4::new(ip, discv5_port_ipv4));
|
||||||
|
|
||||||
if let Some(discv5_addr) = discv5_addr_ipv6 {
|
if let Some(discv5_addr) = discv5_addr_ipv6 &&
|
||||||
if discv5_addr != rlpx_addr {
|
discv5_addr != rlpx_addr
|
||||||
debug!(target: "net::discv5",
|
{
|
||||||
%discv5_addr,
|
debug!(target: "net::discv5",
|
||||||
%rlpx_addr,
|
%discv5_addr,
|
||||||
"Overwriting discv5 IPv6 address with RLPx IPv6 address, limited to one advertised IP address per IP version"
|
%rlpx_addr,
|
||||||
);
|
"Overwriting discv5 IPv6 address with RLPx IPv6 address, limited to one advertised IP address per IP version"
|
||||||
}
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
// overwrite discv5 ipv6 addr with RLPx address. this is since there is no
|
// overwrite discv5 ipv6 addr with RLPx address. this is since there is no
|
||||||
|
|||||||
@@ -80,12 +80,12 @@ impl<R: Resolver, K: EnrKeyUnambiguous> QueryPool<R, K> {
|
|||||||
|
|
||||||
// queue in new queries if we have capacity
|
// queue in new queries if we have capacity
|
||||||
'queries: while self.active_queries.len() < self.rate_limit.limit() as usize {
|
'queries: while self.active_queries.len() < self.rate_limit.limit() as usize {
|
||||||
if self.rate_limit.poll_ready(cx).is_ready() {
|
if self.rate_limit.poll_ready(cx).is_ready() &&
|
||||||
if let Some(query) = self.queued_queries.pop_front() {
|
let Some(query) = self.queued_queries.pop_front()
|
||||||
self.rate_limit.tick();
|
{
|
||||||
self.active_queries.push(query);
|
self.rate_limit.tick();
|
||||||
continue 'queries
|
self.active_queries.push(query);
|
||||||
}
|
continue 'queries
|
||||||
}
|
}
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -172,19 +172,16 @@ where
|
|||||||
///
|
///
|
||||||
/// Returns `None` if no more requests are required.
|
/// Returns `None` if no more requests are required.
|
||||||
fn next_request(&mut self) -> Option<HeadersRequest> {
|
fn next_request(&mut self) -> Option<HeadersRequest> {
|
||||||
if let Some(local_head) = self.local_block_number() {
|
if let Some(local_head) = self.local_block_number() &&
|
||||||
if self.next_request_block_number > local_head {
|
self.next_request_block_number > local_head
|
||||||
let request = calc_next_request(
|
{
|
||||||
local_head,
|
let request =
|
||||||
self.next_request_block_number,
|
calc_next_request(local_head, self.next_request_block_number, self.request_limit);
|
||||||
self.request_limit,
|
// need to shift the tracked request block number based on the number of requested
|
||||||
);
|
// headers so follow-up requests will use that as start.
|
||||||
// need to shift the tracked request block number based on the number of requested
|
self.next_request_block_number -= request.limit;
|
||||||
// headers so follow-up requests will use that as start.
|
|
||||||
self.next_request_block_number -= request.limit;
|
|
||||||
|
|
||||||
return Some(request)
|
return Some(request)
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
None
|
None
|
||||||
|
|||||||
@@ -179,18 +179,18 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Ensure peer's total difficulty is reasonable
|
// Ensure peer's total difficulty is reasonable
|
||||||
if let StatusMessage::Legacy(s) = their_status_message {
|
if let StatusMessage::Legacy(s) = their_status_message &&
|
||||||
if s.total_difficulty.bit_len() > 160 {
|
s.total_difficulty.bit_len() > 160
|
||||||
unauth
|
{
|
||||||
.disconnect(DisconnectReason::ProtocolBreach)
|
unauth
|
||||||
.await
|
.disconnect(DisconnectReason::ProtocolBreach)
|
||||||
.map_err(EthStreamError::from)?;
|
.await
|
||||||
return Err(EthHandshakeError::TotalDifficultyBitLenTooLarge {
|
.map_err(EthStreamError::from)?;
|
||||||
got: s.total_difficulty.bit_len(),
|
return Err(EthHandshakeError::TotalDifficultyBitLenTooLarge {
|
||||||
maximum: 160,
|
got: s.total_difficulty.bit_len(),
|
||||||
}
|
maximum: 160,
|
||||||
.into());
|
|
||||||
}
|
}
|
||||||
|
.into());
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fork validation
|
// Fork validation
|
||||||
|
|||||||
@@ -656,13 +656,11 @@ impl<N: NetworkPrimitives> NetworkConfigBuilder<N> {
|
|||||||
|
|
||||||
// If default DNS config is used then we add the known dns network to bootstrap from
|
// If default DNS config is used then we add the known dns network to bootstrap from
|
||||||
if let Some(dns_networks) =
|
if let Some(dns_networks) =
|
||||||
dns_discovery_config.as_mut().and_then(|c| c.bootstrap_dns_networks.as_mut())
|
dns_discovery_config.as_mut().and_then(|c| c.bootstrap_dns_networks.as_mut()) &&
|
||||||
|
dns_networks.is_empty() &&
|
||||||
|
let Some(link) = chain_spec.chain().public_dns_network_protocol()
|
||||||
{
|
{
|
||||||
if dns_networks.is_empty() {
|
dns_networks.insert(link.parse().expect("is valid DNS link entry"));
|
||||||
if let Some(link) = chain_spec.chain().public_dns_network_protocol() {
|
|
||||||
dns_networks.insert(link.parse().expect("is valid DNS link entry"));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
NetworkConfig {
|
NetworkConfig {
|
||||||
|
|||||||
@@ -267,12 +267,11 @@ impl Discovery {
|
|||||||
while let Some(Poll::Ready(Some(update))) =
|
while let Some(Poll::Ready(Some(update))) =
|
||||||
self.discv5_updates.as_mut().map(|updates| updates.poll_next_unpin(cx))
|
self.discv5_updates.as_mut().map(|updates| updates.poll_next_unpin(cx))
|
||||||
{
|
{
|
||||||
if let Some(discv5) = self.discv5.as_mut() {
|
if let Some(discv5) = self.discv5.as_mut() &&
|
||||||
if let Some(DiscoveredPeer { node_record, fork_id }) =
|
let Some(DiscoveredPeer { node_record, fork_id }) =
|
||||||
discv5.on_discv5_update(update)
|
discv5.on_discv5_update(update)
|
||||||
{
|
{
|
||||||
self.on_node_record_update(node_record, fork_id);
|
self.on_node_record_update(node_record, fork_id);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -116,12 +116,12 @@ impl<N: NetworkPrimitives> StateFetcher<N> {
|
|||||||
///
|
///
|
||||||
/// Returns `true` if this a newer block
|
/// Returns `true` if this a newer block
|
||||||
pub(crate) fn update_peer_block(&mut self, peer_id: &PeerId, hash: B256, number: u64) -> bool {
|
pub(crate) fn update_peer_block(&mut self, peer_id: &PeerId, hash: B256, number: u64) -> bool {
|
||||||
if let Some(peer) = self.peers.get_mut(peer_id) {
|
if let Some(peer) = self.peers.get_mut(peer_id) &&
|
||||||
if number > peer.best_number {
|
number > peer.best_number
|
||||||
peer.best_hash = hash;
|
{
|
||||||
peer.best_number = number;
|
peer.best_hash = hash;
|
||||||
return true
|
peer.best_number = number;
|
||||||
}
|
return true
|
||||||
}
|
}
|
||||||
false
|
false
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -382,14 +382,15 @@ impl PeersManager {
|
|||||||
|
|
||||||
/// Bans the peer temporarily with the configured ban timeout
|
/// Bans the peer temporarily with the configured ban timeout
|
||||||
fn ban_peer(&mut self, peer_id: PeerId) {
|
fn ban_peer(&mut self, peer_id: PeerId) {
|
||||||
let mut ban_duration = self.ban_duration;
|
let ban_duration = if let Some(peer) = self.peers.get(&peer_id) &&
|
||||||
if let Some(peer) = self.peers.get(&peer_id) {
|
(peer.is_trusted() || peer.is_static())
|
||||||
if peer.is_trusted() || peer.is_static() {
|
{
|
||||||
// For misbehaving trusted or static peers, we provide a bit more leeway when
|
// For misbehaving trusted or static peers, we provide a bit more leeway when
|
||||||
// penalizing them.
|
// penalizing them.
|
||||||
ban_duration = self.backoff_durations.low / 2;
|
self.backoff_durations.low / 2
|
||||||
}
|
} else {
|
||||||
}
|
self.ban_duration
|
||||||
|
};
|
||||||
|
|
||||||
self.ban_list.ban_peer_until(peer_id, std::time::Instant::now() + ban_duration);
|
self.ban_list.ban_peer_until(peer_id, std::time::Instant::now() + ban_duration);
|
||||||
self.queued_actions.push_back(PeerAction::BanPeer { peer_id });
|
self.queued_actions.push_back(PeerAction::BanPeer { peer_id });
|
||||||
|
|||||||
@@ -748,11 +748,11 @@ impl<N: NetworkPrimitives> Future for ActiveSession<N> {
|
|||||||
|
|
||||||
while this.internal_request_timeout_interval.poll_tick(cx).is_ready() {
|
while this.internal_request_timeout_interval.poll_tick(cx).is_ready() {
|
||||||
// check for timed out requests
|
// check for timed out requests
|
||||||
if this.check_timed_out_requests(Instant::now()) {
|
if this.check_timed_out_requests(Instant::now()) &&
|
||||||
if let Poll::Ready(Ok(_)) = this.to_session_manager.poll_reserve(cx) {
|
let Poll::Ready(Ok(_)) = this.to_session_manager.poll_reserve(cx)
|
||||||
let msg = ActiveSessionMessage::ProtocolBreach { peer_id: this.remote_peer_id };
|
{
|
||||||
this.pending_message_to_session = Some(msg);
|
let msg = ActiveSessionMessage::ProtocolBreach { peer_id: this.remote_peer_id };
|
||||||
}
|
this.pending_message_to_session = Some(msg);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -80,10 +80,10 @@ impl SessionCounter {
|
|||||||
}
|
}
|
||||||
|
|
||||||
const fn ensure(current: u32, limit: Option<u32>) -> Result<(), ExceedsSessionLimit> {
|
const fn ensure(current: u32, limit: Option<u32>) -> Result<(), ExceedsSessionLimit> {
|
||||||
if let Some(limit) = limit {
|
if let Some(limit) = limit &&
|
||||||
if current >= limit {
|
current >= limit
|
||||||
return Err(ExceedsSessionLimit(limit))
|
{
|
||||||
}
|
return Err(ExceedsSessionLimit(limit))
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -697,12 +697,11 @@ impl<Pool: TransactionPool, N: NetworkPrimitives, PBundle: TransactionPolicies>
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
if is_eth68_message {
|
if is_eth68_message &&
|
||||||
if let Some((actual_ty_byte, _)) = *metadata_ref_mut {
|
let Some((actual_ty_byte, _)) = *metadata_ref_mut &&
|
||||||
if let Ok(parsed_tx_type) = TxType::try_from(actual_ty_byte) {
|
let Ok(parsed_tx_type) = TxType::try_from(actual_ty_byte)
|
||||||
tx_types_counter.increase_by_tx_type(parsed_tx_type);
|
{
|
||||||
}
|
tx_types_counter.increase_by_tx_type(parsed_tx_type);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let decision = self
|
let decision = self
|
||||||
|
|||||||
@@ -280,18 +280,18 @@ where
|
|||||||
Client: BlockClient,
|
Client: BlockClient,
|
||||||
{
|
{
|
||||||
fn poll(&mut self, cx: &mut Context<'_>) -> Poll<ResponseResult<Client::Header, Client::Body>> {
|
fn poll(&mut self, cx: &mut Context<'_>) -> Poll<ResponseResult<Client::Header, Client::Body>> {
|
||||||
if let Some(fut) = Pin::new(&mut self.header).as_pin_mut() {
|
if let Some(fut) = Pin::new(&mut self.header).as_pin_mut() &&
|
||||||
if let Poll::Ready(res) = fut.poll(cx) {
|
let Poll::Ready(res) = fut.poll(cx)
|
||||||
self.header = None;
|
{
|
||||||
return Poll::Ready(ResponseResult::Header(res))
|
self.header = None;
|
||||||
}
|
return Poll::Ready(ResponseResult::Header(res))
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(fut) = Pin::new(&mut self.body).as_pin_mut() {
|
if let Some(fut) = Pin::new(&mut self.body).as_pin_mut() &&
|
||||||
if let Poll::Ready(res) = fut.poll(cx) {
|
let Poll::Ready(res) = fut.poll(cx)
|
||||||
self.body = None;
|
{
|
||||||
return Poll::Ready(ResponseResult::Body(res))
|
self.body = None;
|
||||||
}
|
return Poll::Ready(ResponseResult::Body(res))
|
||||||
}
|
}
|
||||||
|
|
||||||
Poll::Pending
|
Poll::Pending
|
||||||
@@ -621,18 +621,18 @@ where
|
|||||||
&mut self,
|
&mut self,
|
||||||
cx: &mut Context<'_>,
|
cx: &mut Context<'_>,
|
||||||
) -> Poll<RangeResponseResult<Client::Header, Client::Body>> {
|
) -> Poll<RangeResponseResult<Client::Header, Client::Body>> {
|
||||||
if let Some(fut) = Pin::new(&mut self.headers).as_pin_mut() {
|
if let Some(fut) = Pin::new(&mut self.headers).as_pin_mut() &&
|
||||||
if let Poll::Ready(res) = fut.poll(cx) {
|
let Poll::Ready(res) = fut.poll(cx)
|
||||||
self.headers = None;
|
{
|
||||||
return Poll::Ready(RangeResponseResult::Header(res))
|
self.headers = None;
|
||||||
}
|
return Poll::Ready(RangeResponseResult::Header(res))
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(fut) = Pin::new(&mut self.bodies).as_pin_mut() {
|
if let Some(fut) = Pin::new(&mut self.bodies).as_pin_mut() &&
|
||||||
if let Poll::Ready(res) = fut.poll(cx) {
|
let Poll::Ready(res) = fut.poll(cx)
|
||||||
self.bodies = None;
|
{
|
||||||
return Poll::Ready(RangeResponseResult::Body(res))
|
self.bodies = None;
|
||||||
}
|
return Poll::Ready(RangeResponseResult::Body(res))
|
||||||
}
|
}
|
||||||
|
|
||||||
Poll::Pending
|
Poll::Pending
|
||||||
|
|||||||
@@ -63,11 +63,11 @@ impl NodeRecord {
|
|||||||
/// See also [`std::net::Ipv6Addr::to_ipv4_mapped`]
|
/// See also [`std::net::Ipv6Addr::to_ipv4_mapped`]
|
||||||
pub fn convert_ipv4_mapped(&mut self) -> bool {
|
pub fn convert_ipv4_mapped(&mut self) -> bool {
|
||||||
// convert IPv4 mapped IPv6 address
|
// convert IPv4 mapped IPv6 address
|
||||||
if let IpAddr::V6(v6) = self.address {
|
if let IpAddr::V6(v6) = self.address &&
|
||||||
if let Some(v4) = v6.to_ipv4_mapped() {
|
let Some(v4) = v6.to_ipv4_mapped()
|
||||||
self.address = v4.into();
|
{
|
||||||
return true
|
self.address = v4.into();
|
||||||
}
|
return true
|
||||||
}
|
}
|
||||||
false
|
false
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -956,23 +956,24 @@ where
|
|||||||
where
|
where
|
||||||
T: FullNodeTypes<Provider: StaticFileProviderFactory>,
|
T: FullNodeTypes<Provider: StaticFileProviderFactory>,
|
||||||
{
|
{
|
||||||
if self.node_config().pruning.bodies_pre_merge {
|
if self.node_config().pruning.bodies_pre_merge &&
|
||||||
if let Some(merge_block) =
|
let Some(merge_block) = self
|
||||||
self.chain_spec().ethereum_fork_activation(EthereumHardfork::Paris).block_number()
|
.chain_spec()
|
||||||
{
|
.ethereum_fork_activation(EthereumHardfork::Paris)
|
||||||
// Ensure we only expire transactions after we synced past the merge block.
|
.block_number()
|
||||||
let Some(latest) = self.blockchain_db().latest_header()? else { return Ok(()) };
|
{
|
||||||
if latest.number() > merge_block {
|
// Ensure we only expire transactions after we synced past the merge block.
|
||||||
let provider = self.blockchain_db().static_file_provider();
|
let Some(latest) = self.blockchain_db().latest_header()? else { return Ok(()) };
|
||||||
if provider
|
if latest.number() > merge_block {
|
||||||
.get_lowest_transaction_static_file_block()
|
let provider = self.blockchain_db().static_file_provider();
|
||||||
.is_some_and(|lowest| lowest < merge_block)
|
if provider
|
||||||
{
|
.get_lowest_transaction_static_file_block()
|
||||||
info!(target: "reth::cli", merge_block, "Expiring pre-merge transactions");
|
.is_some_and(|lowest| lowest < merge_block)
|
||||||
provider.delete_transactions_below(merge_block)?;
|
{
|
||||||
} else {
|
info!(target: "reth::cli", merge_block, "Expiring pre-merge transactions");
|
||||||
debug!(target: "reth::cli", merge_block, "No pre-merge transactions to expire");
|
provider.delete_transactions_below(merge_block)?;
|
||||||
}
|
} else {
|
||||||
|
debug!(target: "reth::cli", merge_block, "No pre-merge transactions to expire");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -181,14 +181,14 @@ where
|
|||||||
let response =
|
let response =
|
||||||
timeout(READ_TIMEOUT, conn.read_json()).await.map_err(|_| EthStatsError::Timeout)??;
|
timeout(READ_TIMEOUT, conn.read_json()).await.map_err(|_| EthStatsError::Timeout)??;
|
||||||
|
|
||||||
if let Some(ack) = response.get("emit") {
|
if let Some(ack) = response.get("emit") &&
|
||||||
if ack.get(0) == Some(&Value::String("ready".to_string())) {
|
ack.get(0) == Some(&Value::String("ready".to_string()))
|
||||||
info!(
|
{
|
||||||
target: "ethstats",
|
info!(
|
||||||
"Login successful to EthStats server as node_id {}", self.credentials.node_id
|
target: "ethstats",
|
||||||
);
|
"Login successful to EthStats server as node_id {}", self.credentials.node_id
|
||||||
return Ok(());
|
);
|
||||||
}
|
return Ok(());
|
||||||
}
|
}
|
||||||
|
|
||||||
debug!(target: "ethstats", "Login failed: Unauthorized or unexpected login response");
|
debug!(target: "ethstats", "Login failed: Unauthorized or unexpected login response");
|
||||||
@@ -595,10 +595,10 @@ where
|
|||||||
tokio::spawn(async move {
|
tokio::spawn(async move {
|
||||||
loop {
|
loop {
|
||||||
let head = canonical_stream.next().await;
|
let head = canonical_stream.next().await;
|
||||||
if let Some(head) = head {
|
if let Some(head) = head &&
|
||||||
if head_tx.send(head).await.is_err() {
|
head_tx.send(head).await.is_err()
|
||||||
break;
|
{
|
||||||
}
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -681,10 +681,10 @@ where
|
|||||||
/// Attempts to close the connection cleanly and logs any errors
|
/// Attempts to close the connection cleanly and logs any errors
|
||||||
/// that occur during the process.
|
/// that occur during the process.
|
||||||
async fn disconnect(&self) {
|
async fn disconnect(&self) {
|
||||||
if let Some(conn) = self.conn.write().await.take() {
|
if let Some(conn) = self.conn.write().await.take() &&
|
||||||
if let Err(e) = conn.close().await {
|
let Err(e) = conn.close().await
|
||||||
debug!(target: "ethstats", "Error closing connection: {}", e);
|
{
|
||||||
}
|
debug!(target: "ethstats", "Error closing connection: {}", e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -733,16 +733,13 @@ mod tests {
|
|||||||
|
|
||||||
// Handle ping
|
// Handle ping
|
||||||
while let Some(Ok(msg)) = ws_stream.next().await {
|
while let Some(Ok(msg)) = ws_stream.next().await {
|
||||||
if let Message::Text(text) = msg {
|
if let Message::Text(text) = msg &&
|
||||||
if text.contains("node-ping") {
|
text.contains("node-ping")
|
||||||
let pong = json!({
|
{
|
||||||
"emit": ["node-pong", {"id": "test-node"}]
|
let pong = json!({
|
||||||
});
|
"emit": ["node-pong", {"id": "test-node"}]
|
||||||
ws_stream
|
});
|
||||||
.send(Message::Text(Utf8Bytes::from(pong.to_string())))
|
ws_stream.send(Message::Text(Utf8Bytes::from(pong.to_string()))).await.unwrap();
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|||||||
@@ -13,7 +13,9 @@ fn main() {
|
|||||||
|
|
||||||
// Enable backtraces unless a RUST_BACKTRACE value has already been explicitly provided.
|
// Enable backtraces unless a RUST_BACKTRACE value has already been explicitly provided.
|
||||||
if std::env::var_os("RUST_BACKTRACE").is_none() {
|
if std::env::var_os("RUST_BACKTRACE").is_none() {
|
||||||
std::env::set_var("RUST_BACKTRACE", "1");
|
unsafe {
|
||||||
|
std::env::set_var("RUST_BACKTRACE", "1");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Err(err) =
|
if let Err(err) =
|
||||||
|
|||||||
@@ -459,33 +459,33 @@ impl OpGenesisInfo {
|
|||||||
.unwrap_or_default(),
|
.unwrap_or_default(),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
if let Some(optimism_base_fee_info) = &info.optimism_chain_info.base_fee_info {
|
if let Some(optimism_base_fee_info) = &info.optimism_chain_info.base_fee_info &&
|
||||||
if let (Some(elasticity), Some(denominator)) = (
|
let (Some(elasticity), Some(denominator)) = (
|
||||||
optimism_base_fee_info.eip1559_elasticity,
|
optimism_base_fee_info.eip1559_elasticity,
|
||||||
optimism_base_fee_info.eip1559_denominator,
|
optimism_base_fee_info.eip1559_denominator,
|
||||||
) {
|
)
|
||||||
let base_fee_params = if let Some(canyon_denominator) =
|
{
|
||||||
optimism_base_fee_info.eip1559_denominator_canyon
|
let base_fee_params = if let Some(canyon_denominator) =
|
||||||
{
|
optimism_base_fee_info.eip1559_denominator_canyon
|
||||||
BaseFeeParamsKind::Variable(
|
{
|
||||||
vec![
|
BaseFeeParamsKind::Variable(
|
||||||
(
|
vec![
|
||||||
EthereumHardfork::London.boxed(),
|
(
|
||||||
BaseFeeParams::new(denominator as u128, elasticity as u128),
|
EthereumHardfork::London.boxed(),
|
||||||
),
|
BaseFeeParams::new(denominator as u128, elasticity as u128),
|
||||||
(
|
),
|
||||||
OpHardfork::Canyon.boxed(),
|
(
|
||||||
BaseFeeParams::new(canyon_denominator as u128, elasticity as u128),
|
OpHardfork::Canyon.boxed(),
|
||||||
),
|
BaseFeeParams::new(canyon_denominator as u128, elasticity as u128),
|
||||||
]
|
),
|
||||||
.into(),
|
]
|
||||||
)
|
.into(),
|
||||||
} else {
|
)
|
||||||
BaseFeeParams::new(denominator as u128, elasticity as u128).into()
|
} else {
|
||||||
};
|
BaseFeeParams::new(denominator as u128, elasticity as u128).into()
|
||||||
|
};
|
||||||
|
|
||||||
info.base_fee_params = base_fee_params;
|
info.base_fee_params = base_fee_params;
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
info
|
info
|
||||||
@@ -498,19 +498,18 @@ pub fn make_op_genesis_header(genesis: &Genesis, hardforks: &ChainHardforks) ->
|
|||||||
|
|
||||||
// If Isthmus is active, overwrite the withdrawals root with the storage root of predeploy
|
// If Isthmus is active, overwrite the withdrawals root with the storage root of predeploy
|
||||||
// `L2ToL1MessagePasser.sol`
|
// `L2ToL1MessagePasser.sol`
|
||||||
if hardforks.fork(OpHardfork::Isthmus).active_at_timestamp(header.timestamp) {
|
if hardforks.fork(OpHardfork::Isthmus).active_at_timestamp(header.timestamp) &&
|
||||||
if let Some(predeploy) = genesis.alloc.get(&ADDRESS_L2_TO_L1_MESSAGE_PASSER) {
|
let Some(predeploy) = genesis.alloc.get(&ADDRESS_L2_TO_L1_MESSAGE_PASSER) &&
|
||||||
if let Some(storage) = &predeploy.storage {
|
let Some(storage) = &predeploy.storage
|
||||||
header.withdrawals_root =
|
{
|
||||||
Some(storage_root_unhashed(storage.iter().filter_map(|(k, v)| {
|
header.withdrawals_root =
|
||||||
if v.is_zero() {
|
Some(storage_root_unhashed(storage.iter().filter_map(|(k, v)| {
|
||||||
None
|
if v.is_zero() {
|
||||||
} else {
|
None
|
||||||
Some((*k, (*v).into()))
|
} else {
|
||||||
}
|
Some((*k, (*v).into()))
|
||||||
})));
|
}
|
||||||
}
|
})));
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
header
|
header
|
||||||
|
|||||||
@@ -141,11 +141,10 @@ where
|
|||||||
|
|
||||||
// Ensure that receipts hasn't been initialized apart from `init_genesis`.
|
// Ensure that receipts hasn't been initialized apart from `init_genesis`.
|
||||||
if let Some(num_receipts) =
|
if let Some(num_receipts) =
|
||||||
static_file_provider.get_highest_static_file_tx(StaticFileSegment::Receipts)
|
static_file_provider.get_highest_static_file_tx(StaticFileSegment::Receipts) &&
|
||||||
|
num_receipts > 0
|
||||||
{
|
{
|
||||||
if num_receipts > 0 {
|
eyre::bail!("Expected no receipts in storage, but found {num_receipts}.");
|
||||||
eyre::bail!("Expected no receipts in storage, but found {num_receipts}.");
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
match static_file_provider.get_highest_static_file_block(StaticFileSegment::Receipts) {
|
match static_file_provider.get_highest_static_file_block(StaticFileSegment::Receipts) {
|
||||||
Some(receipts_block) => {
|
Some(receipts_block) => {
|
||||||
|
|||||||
@@ -303,7 +303,7 @@ mod tests {
|
|||||||
|
|
||||||
// Verify deposit transaction
|
// Verify deposit transaction
|
||||||
let deposit_tx = match &deposit_decoded.transaction {
|
let deposit_tx = match &deposit_decoded.transaction {
|
||||||
OpTypedTransaction::Legacy(ref tx) => tx,
|
OpTypedTransaction::Legacy(tx) => tx,
|
||||||
_ => panic!("Expected legacy transaction for NFT deposit"),
|
_ => panic!("Expected legacy transaction for NFT deposit"),
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -345,7 +345,7 @@ mod tests {
|
|||||||
assert!(system_decoded.is_legacy());
|
assert!(system_decoded.is_legacy());
|
||||||
|
|
||||||
let system_tx = match &system_decoded.transaction {
|
let system_tx = match &system_decoded.transaction {
|
||||||
OpTypedTransaction::Legacy(ref tx) => tx,
|
OpTypedTransaction::Legacy(tx) => tx,
|
||||||
_ => panic!("Expected Legacy transaction"),
|
_ => panic!("Expected Legacy transaction"),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|||||||
@@ -93,21 +93,21 @@ pub fn validate_block_post_execution<R: DepositReceipt>(
|
|||||||
// operation as hashing that is required for state root got calculated in every
|
// operation as hashing that is required for state root got calculated in every
|
||||||
// transaction This was replaced with is_success flag.
|
// transaction This was replaced with is_success flag.
|
||||||
// See more about EIP here: https://eips.ethereum.org/EIPS/eip-658
|
// See more about EIP here: https://eips.ethereum.org/EIPS/eip-658
|
||||||
if chain_spec.is_byzantium_active_at_block(header.number()) {
|
if chain_spec.is_byzantium_active_at_block(header.number()) &&
|
||||||
if let Err(error) = verify_receipts_optimism(
|
let Err(error) = verify_receipts_optimism(
|
||||||
header.receipts_root(),
|
header.receipts_root(),
|
||||||
header.logs_bloom(),
|
header.logs_bloom(),
|
||||||
receipts,
|
receipts,
|
||||||
chain_spec,
|
chain_spec,
|
||||||
header.timestamp(),
|
header.timestamp(),
|
||||||
) {
|
)
|
||||||
let receipts = receipts
|
{
|
||||||
.iter()
|
let receipts = receipts
|
||||||
.map(|r| Bytes::from(r.with_bloom_ref().encoded_2718()))
|
.iter()
|
||||||
.collect::<Vec<_>>();
|
.map(|r| Bytes::from(r.with_bloom_ref().encoded_2718()))
|
||||||
tracing::debug!(%error, ?receipts, "receipts verification failed");
|
.collect::<Vec<_>>();
|
||||||
return Err(error)
|
tracing::debug!(%error, ?receipts, "receipts verification failed");
|
||||||
}
|
return Err(error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if gas used matches the value set in header.
|
// Check if gas used matches the value set in header.
|
||||||
|
|||||||
@@ -107,7 +107,12 @@ where
|
|||||||
/// Returns `None` if the flashblock have no `base` or the base is not a child block of latest.
|
/// Returns `None` if the flashblock have no `base` or the base is not a child block of latest.
|
||||||
fn build_args(
|
fn build_args(
|
||||||
&mut self,
|
&mut self,
|
||||||
) -> Option<BuildArgs<impl IntoIterator<Item = WithEncoded<Recovered<N::SignedTx>>>>> {
|
) -> Option<
|
||||||
|
BuildArgs<
|
||||||
|
impl IntoIterator<Item = WithEncoded<Recovered<N::SignedTx>>>
|
||||||
|
+ use<N, S, EvmConfig, Provider>,
|
||||||
|
>,
|
||||||
|
> {
|
||||||
let Some(base) = self.blocks.payload_base() else {
|
let Some(base) = self.blocks.payload_base() else {
|
||||||
trace!(
|
trace!(
|
||||||
flashblock_number = ?self.blocks.block_number(),
|
flashblock_number = ?self.blocks.block_number(),
|
||||||
@@ -119,11 +124,11 @@ where
|
|||||||
};
|
};
|
||||||
|
|
||||||
// attempt an initial consecutive check
|
// attempt an initial consecutive check
|
||||||
if let Some(latest) = self.builder.provider().latest_header().ok().flatten() {
|
if let Some(latest) = self.builder.provider().latest_header().ok().flatten() &&
|
||||||
if latest.hash() != base.parent_hash {
|
latest.hash() != base.parent_hash
|
||||||
trace!(flashblock_parent=?base.parent_hash, flashblock_number=base.block_number, local_latest=?latest.num_hash(), "Skipping non consecutive build attempt");
|
{
|
||||||
return None;
|
trace!(flashblock_parent=?base.parent_hash, flashblock_number=base.block_number, local_latest=?latest.num_hash(), "Skipping non consecutive build attempt");
|
||||||
}
|
return None;
|
||||||
}
|
}
|
||||||
|
|
||||||
Some(BuildArgs {
|
Some(BuildArgs {
|
||||||
@@ -244,16 +249,15 @@ where
|
|||||||
let fut = this.canon_receiver.recv();
|
let fut = this.canon_receiver.recv();
|
||||||
pin!(fut);
|
pin!(fut);
|
||||||
fut.poll_unpin(cx)
|
fut.poll_unpin(cx)
|
||||||
} {
|
} && let Some(current) = this.on_new_tip(state)
|
||||||
if let Some(current) = this.on_new_tip(state) {
|
{
|
||||||
trace!(
|
trace!(
|
||||||
parent_hash = %current.block().parent_hash(),
|
parent_hash = %current.block().parent_hash(),
|
||||||
block_number = current.block().number(),
|
block_number = current.block().number(),
|
||||||
"Clearing current flashblock on new canonical block"
|
"Clearing current flashblock on new canonical block"
|
||||||
);
|
);
|
||||||
|
|
||||||
return Poll::Ready(Some(Ok(None)))
|
return Poll::Ready(Some(Ok(None)))
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if !this.rebuild && this.current.is_some() {
|
if !this.rebuild && this.current.is_some() {
|
||||||
|
|||||||
@@ -690,11 +690,11 @@ where
|
|||||||
|
|
||||||
// We skip invalid cross chain txs, they would be removed on the next block update in
|
// We skip invalid cross chain txs, they would be removed on the next block update in
|
||||||
// the maintenance job
|
// the maintenance job
|
||||||
if let Some(interop) = interop {
|
if let Some(interop) = interop &&
|
||||||
if !is_valid_interop(interop, self.config.attributes.timestamp()) {
|
!is_valid_interop(interop, self.config.attributes.timestamp())
|
||||||
best_txs.mark_invalid(tx.signer(), tx.nonce());
|
{
|
||||||
continue
|
best_txs.mark_invalid(tx.signer(), tx.nonce());
|
||||||
}
|
continue
|
||||||
}
|
}
|
||||||
// check if the job was cancelled, if so we can exit early
|
// check if the job was cancelled, if so we can exit early
|
||||||
if self.cancel.is_cancelled() {
|
if self.cancel.is_cancelled() {
|
||||||
|
|||||||
@@ -108,11 +108,10 @@ where
|
|||||||
if let Some(notification) = canonical_notification {
|
if let Some(notification) = canonical_notification {
|
||||||
let chain = notification.committed();
|
let chain = notification.committed();
|
||||||
for block in chain.blocks_iter() {
|
for block in chain.blocks_iter() {
|
||||||
if block.body().contains_transaction(&hash) {
|
if block.body().contains_transaction(&hash)
|
||||||
if let Some(receipt) = this.transaction_receipt(hash).await? {
|
&& let Some(receipt) = this.transaction_receipt(hash).await? {
|
||||||
return Ok(receipt);
|
return Ok(receipt);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// Canonical stream ended
|
// Canonical stream ended
|
||||||
@@ -130,11 +129,10 @@ where
|
|||||||
// Check flashblocks for faster confirmation (Optimism-specific)
|
// Check flashblocks for faster confirmation (Optimism-specific)
|
||||||
if let Ok(Some(pending_block)) = this.pending_flashblock() {
|
if let Ok(Some(pending_block)) = this.pending_flashblock() {
|
||||||
let block_and_receipts = pending_block.into_block_and_receipts();
|
let block_and_receipts = pending_block.into_block_and_receipts();
|
||||||
if block_and_receipts.block.body().contains_transaction(&hash) {
|
if block_and_receipts.block.body().contains_transaction(&hash)
|
||||||
if let Some(receipt) = this.transaction_receipt(hash).await? {
|
&& let Some(receipt) = this.transaction_receipt(hash).await? {
|
||||||
return Ok(receipt);
|
return Ok(receipt);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -587,15 +587,15 @@ where
|
|||||||
let this = self.get_mut();
|
let this = self.get_mut();
|
||||||
|
|
||||||
// check if there is a better payload before returning the best payload
|
// check if there is a better payload before returning the best payload
|
||||||
if let Some(fut) = Pin::new(&mut this.maybe_better).as_pin_mut() {
|
if let Some(fut) = Pin::new(&mut this.maybe_better).as_pin_mut() &&
|
||||||
if let Poll::Ready(res) = fut.poll(cx) {
|
let Poll::Ready(res) = fut.poll(cx)
|
||||||
this.maybe_better = None;
|
{
|
||||||
if let Ok(Some(payload)) = res.map(|out| out.into_payload())
|
this.maybe_better = None;
|
||||||
.inspect_err(|err| warn!(target: "payload_builder", %err, "failed to resolve pending payload"))
|
if let Ok(Some(payload)) = res.map(|out| out.into_payload()).inspect_err(
|
||||||
{
|
|err| warn!(target: "payload_builder", %err, "failed to resolve pending payload"),
|
||||||
debug!(target: "payload_builder", "resolving better payload");
|
) {
|
||||||
return Poll::Ready(Ok(payload))
|
debug!(target: "payload_builder", "resolving better payload");
|
||||||
}
|
return Poll::Ready(Ok(payload))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -604,20 +604,20 @@ where
|
|||||||
return Poll::Ready(Ok(best))
|
return Poll::Ready(Ok(best))
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(fut) = Pin::new(&mut this.empty_payload).as_pin_mut() {
|
if let Some(fut) = Pin::new(&mut this.empty_payload).as_pin_mut() &&
|
||||||
if let Poll::Ready(res) = fut.poll(cx) {
|
let Poll::Ready(res) = fut.poll(cx)
|
||||||
this.empty_payload = None;
|
{
|
||||||
return match res {
|
this.empty_payload = None;
|
||||||
Ok(res) => {
|
return match res {
|
||||||
if let Err(err) = &res {
|
Ok(res) => {
|
||||||
warn!(target: "payload_builder", %err, "failed to resolve empty payload");
|
if let Err(err) = &res {
|
||||||
} else {
|
warn!(target: "payload_builder", %err, "failed to resolve empty payload");
|
||||||
debug!(target: "payload_builder", "resolving empty payload");
|
} else {
|
||||||
}
|
debug!(target: "payload_builder", "resolving empty payload");
|
||||||
Poll::Ready(res)
|
|
||||||
}
|
}
|
||||||
Err(err) => Poll::Ready(Err(err.into())),
|
Poll::Ready(res)
|
||||||
}
|
}
|
||||||
|
Err(err) => Poll::Ready(Err(err.into())),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -305,10 +305,10 @@ where
|
|||||||
) -> Option<PayloadFuture<T::BuiltPayload>> {
|
) -> Option<PayloadFuture<T::BuiltPayload>> {
|
||||||
debug!(target: "payload_builder", %id, "resolving payload job");
|
debug!(target: "payload_builder", %id, "resolving payload job");
|
||||||
|
|
||||||
if let Some((cached, _, payload)) = &*self.cached_payload_rx.borrow() {
|
if let Some((cached, _, payload)) = &*self.cached_payload_rx.borrow() &&
|
||||||
if *cached == id {
|
*cached == id
|
||||||
return Some(Box::pin(core::future::ready(Ok(payload.clone()))));
|
{
|
||||||
}
|
return Some(Box::pin(core::future::ready(Ok(payload.clone()))));
|
||||||
}
|
}
|
||||||
|
|
||||||
let job = self.payload_jobs.iter().position(|(_, job_id)| *job_id == id)?;
|
let job = self.payload_jobs.iter().position(|(_, job_id)| *job_id == id)?;
|
||||||
@@ -356,10 +356,10 @@ where
|
|||||||
{
|
{
|
||||||
/// Returns the payload timestamp for the given payload.
|
/// Returns the payload timestamp for the given payload.
|
||||||
fn payload_timestamp(&self, id: PayloadId) -> Option<Result<u64, PayloadBuilderError>> {
|
fn payload_timestamp(&self, id: PayloadId) -> Option<Result<u64, PayloadBuilderError>> {
|
||||||
if let Some((cached_id, timestamp, _)) = *self.cached_payload_rx.borrow() {
|
if let Some((cached_id, timestamp, _)) = *self.cached_payload_rx.borrow() &&
|
||||||
if cached_id == id {
|
cached_id == id
|
||||||
return Some(Ok(timestamp));
|
{
|
||||||
}
|
return Some(Ok(timestamp));
|
||||||
}
|
}
|
||||||
|
|
||||||
let timestamp = self
|
let timestamp = self
|
||||||
|
|||||||
@@ -48,18 +48,17 @@ where
|
|||||||
// data. If the TransactionLookup checkpoint is lagging behind (which can happen e.g. when
|
// data. If the TransactionLookup checkpoint is lagging behind (which can happen e.g. when
|
||||||
// pre-merge history is dropped and then later tx lookup pruning is enabled) then we can
|
// pre-merge history is dropped and then later tx lookup pruning is enabled) then we can
|
||||||
// only prune from the tx checkpoint and onwards.
|
// only prune from the tx checkpoint and onwards.
|
||||||
if let Some(txs_checkpoint) = provider.get_prune_checkpoint(PruneSegment::Transactions)? {
|
if let Some(txs_checkpoint) = provider.get_prune_checkpoint(PruneSegment::Transactions)? &&
|
||||||
if input
|
input
|
||||||
.previous_checkpoint
|
.previous_checkpoint
|
||||||
.is_none_or(|checkpoint| checkpoint.block_number < txs_checkpoint.block_number)
|
.is_none_or(|checkpoint| checkpoint.block_number < txs_checkpoint.block_number)
|
||||||
{
|
{
|
||||||
input.previous_checkpoint = Some(txs_checkpoint);
|
input.previous_checkpoint = Some(txs_checkpoint);
|
||||||
debug!(
|
debug!(
|
||||||
target: "pruner",
|
target: "pruner",
|
||||||
transactions_checkpoint = ?input.previous_checkpoint,
|
transactions_checkpoint = ?input.previous_checkpoint,
|
||||||
"No TransactionLookup checkpoint found, using Transactions checkpoint as fallback"
|
"No TransactionLookup checkpoint found, using Transactions checkpoint as fallback"
|
||||||
);
|
);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let (start, end) = match input.get_next_tx_num_range(provider)? {
|
let (start, end) = match input.get_next_tx_num_range(provider)? {
|
||||||
|
|||||||
@@ -96,12 +96,11 @@ impl ReceiptsLogPruneConfig {
|
|||||||
let mut lowest = None;
|
let mut lowest = None;
|
||||||
|
|
||||||
for mode in self.values() {
|
for mode in self.values() {
|
||||||
if mode.is_distance() {
|
if mode.is_distance() &&
|
||||||
if let Some((block, _)) =
|
let Some((block, _)) =
|
||||||
mode.prune_target_block(tip, PruneSegment::ContractLogs, PrunePurpose::User)?
|
mode.prune_target_block(tip, PruneSegment::ContractLogs, PrunePurpose::User)?
|
||||||
{
|
{
|
||||||
lowest = Some(lowest.unwrap_or(u64::MAX).min(block));
|
lowest = Some(lowest.unwrap_or(u64::MAX).min(block));
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -120,19 +120,15 @@ where
|
|||||||
let mut executed = self.pending_state.executed_block(&ancestor_hash);
|
let mut executed = self.pending_state.executed_block(&ancestor_hash);
|
||||||
|
|
||||||
// If it's not present, attempt to lookup invalid block.
|
// If it's not present, attempt to lookup invalid block.
|
||||||
if executed.is_none() {
|
if executed.is_none() &&
|
||||||
if let Some(invalid) =
|
let Some(invalid) =
|
||||||
self.pending_state.invalid_recovered_block(&ancestor_hash)
|
self.pending_state.invalid_recovered_block(&ancestor_hash)
|
||||||
{
|
{
|
||||||
trace!(target: "reth::ress_provider", %block_hash, %ancestor_hash, "Using invalid ancestor block for witness construction");
|
trace!(target: "reth::ress_provider", %block_hash, %ancestor_hash, "Using invalid ancestor block for witness construction");
|
||||||
executed = Some(ExecutedBlockWithTrieUpdates {
|
executed = Some(ExecutedBlockWithTrieUpdates {
|
||||||
block: ExecutedBlock {
|
block: ExecutedBlock { recovered_block: invalid, ..Default::default() },
|
||||||
recovered_block: invalid,
|
trie: ExecutedTrieUpdates::empty(),
|
||||||
..Default::default()
|
});
|
||||||
},
|
|
||||||
trie: ExecutedTrieUpdates::empty(),
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let Some(executed) = executed else {
|
let Some(executed) = executed else {
|
||||||
|
|||||||
@@ -144,11 +144,11 @@ where
|
|||||||
{
|
{
|
||||||
// set permissions only on unix
|
// set permissions only on unix
|
||||||
use std::os::unix::fs::PermissionsExt;
|
use std::os::unix::fs::PermissionsExt;
|
||||||
if let Some(perms_str) = &self.cfg.ipc_socket_permissions {
|
if let Some(perms_str) = &self.cfg.ipc_socket_permissions &&
|
||||||
if let Ok(mode) = u32::from_str_radix(&perms_str.replace("0o", ""), 8) {
|
let Ok(mode) = u32::from_str_radix(&perms_str.replace("0o", ""), 8)
|
||||||
let perms = std::fs::Permissions::from_mode(mode);
|
{
|
||||||
let _ = std::fs::set_permissions(&self.endpoint, perms);
|
let perms = std::fs::Permissions::from_mode(mode);
|
||||||
}
|
let _ = std::fs::set_permissions(&self.endpoint, perms);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
listener
|
listener
|
||||||
|
|||||||
@@ -572,11 +572,10 @@ where
|
|||||||
|
|
||||||
// > Client software MUST NOT return trailing null values if the request extends past the current latest known block.
|
// > Client software MUST NOT return trailing null values if the request extends past the current latest known block.
|
||||||
// truncate the end if it's greater than the last block
|
// truncate the end if it's greater than the last block
|
||||||
if let Ok(best_block) = inner.provider.best_block_number() {
|
if let Ok(best_block) = inner.provider.best_block_number()
|
||||||
if end > best_block {
|
&& end > best_block {
|
||||||
end = best_block;
|
end = best_block;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
for num in start..=end {
|
for num in start..=end {
|
||||||
let block_result = inner.provider.block(BlockHashOrNumber::Number(num));
|
let block_result = inner.provider.block(BlockHashOrNumber::Number(num));
|
||||||
|
|||||||
@@ -195,16 +195,14 @@ pub trait EthBlocks:
|
|||||||
}
|
}
|
||||||
|
|
||||||
if let Some(block_hash) =
|
if let Some(block_hash) =
|
||||||
self.provider().block_hash_for_id(block_id).map_err(Self::Error::from_eth_err)?
|
self.provider().block_hash_for_id(block_id).map_err(Self::Error::from_eth_err)? &&
|
||||||
{
|
let Some((block, receipts)) = self
|
||||||
if let Some((block, receipts)) = self
|
|
||||||
.cache()
|
.cache()
|
||||||
.get_block_and_receipts(block_hash)
|
.get_block_and_receipts(block_hash)
|
||||||
.await
|
.await
|
||||||
.map_err(Self::Error::from_eth_err)?
|
.map_err(Self::Error::from_eth_err)?
|
||||||
{
|
{
|
||||||
return Ok(Some((block, receipts)));
|
return Ok(Some((block, receipts)));
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(None)
|
Ok(None)
|
||||||
|
|||||||
@@ -122,14 +122,11 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA
|
|||||||
|
|
||||||
if let Some(block_overrides) = block_overrides {
|
if let Some(block_overrides) = block_overrides {
|
||||||
// ensure we don't allow uncapped gas limit per block
|
// ensure we don't allow uncapped gas limit per block
|
||||||
if let Some(gas_limit_override) = block_overrides.gas_limit {
|
if let Some(gas_limit_override) = block_overrides.gas_limit &&
|
||||||
if gas_limit_override > evm_env.block_env.gas_limit &&
|
gas_limit_override > evm_env.block_env.gas_limit &&
|
||||||
gas_limit_override > this.call_gas_limit()
|
gas_limit_override > this.call_gas_limit()
|
||||||
{
|
{
|
||||||
return Err(
|
return Err(EthApiError::other(EthSimulateError::GasLimitReached).into())
|
||||||
EthApiError::other(EthSimulateError::GasLimitReached).into()
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
apply_block_overrides(block_overrides, &mut db, &mut evm_env.block_env);
|
apply_block_overrides(block_overrides, &mut db, &mut evm_env.block_env);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -115,19 +115,19 @@ where
|
|||||||
|
|
||||||
let mut config = EthConfig { current, next: None, last: None };
|
let mut config = EthConfig { current, next: None, last: None };
|
||||||
|
|
||||||
if let Some(last_fork_idx) = current_fork_idx.checked_sub(1) {
|
if let Some(last_fork_idx) = current_fork_idx.checked_sub(1) &&
|
||||||
if let Some(last_fork_timestamp) = fork_timestamps.get(last_fork_idx).copied() {
|
let Some(last_fork_timestamp) = fork_timestamps.get(last_fork_idx).copied()
|
||||||
let fake_header = {
|
{
|
||||||
let mut header = latest.clone();
|
let fake_header = {
|
||||||
header.timestamp = last_fork_timestamp;
|
let mut header = latest.clone();
|
||||||
header
|
header.timestamp = last_fork_timestamp;
|
||||||
};
|
header
|
||||||
let last_precompiles = evm_to_precompiles_map(
|
};
|
||||||
self.evm_config.evm_for_block(EmptyDB::default(), &fake_header),
|
let last_precompiles = evm_to_precompiles_map(
|
||||||
);
|
self.evm_config.evm_for_block(EmptyDB::default(), &fake_header),
|
||||||
|
);
|
||||||
|
|
||||||
config.last = self.build_fork_config_at(last_fork_timestamp, last_precompiles);
|
config.last = self.build_fork_config_at(last_fork_timestamp, last_precompiles);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(next_fork_timestamp) = fork_timestamps.get(current_fork_idx + 1).copied() {
|
if let Some(next_fork_timestamp) = fork_timestamps.get(current_fork_idx + 1).copied() {
|
||||||
|
|||||||
@@ -88,14 +88,14 @@ pub trait EstimateCall: Call {
|
|||||||
let mut tx_env = self.create_txn_env(&evm_env, request, &mut db)?;
|
let mut tx_env = self.create_txn_env(&evm_env, request, &mut db)?;
|
||||||
|
|
||||||
// Check if this is a basic transfer (no input data to account with no code)
|
// Check if this is a basic transfer (no input data to account with no code)
|
||||||
let mut is_basic_transfer = false;
|
let is_basic_transfer = if tx_env.input().is_empty() &&
|
||||||
if tx_env.input().is_empty() {
|
let TxKind::Call(to) = tx_env.kind() &&
|
||||||
if let TxKind::Call(to) = tx_env.kind() {
|
let Ok(code) = db.db.account_code(&to)
|
||||||
if let Ok(code) = db.db.account_code(&to) {
|
{
|
||||||
is_basic_transfer = code.map(|code| code.is_empty()).unwrap_or(true);
|
code.map(|code| code.is_empty()).unwrap_or(true)
|
||||||
}
|
} else {
|
||||||
}
|
false
|
||||||
}
|
};
|
||||||
|
|
||||||
// Check funds of the sender (only useful to check if transaction gas price is more than 0).
|
// Check funds of the sender (only useful to check if transaction gas price is more than 0).
|
||||||
//
|
//
|
||||||
@@ -123,10 +123,10 @@ pub trait EstimateCall: Call {
|
|||||||
min_tx_env.set_gas_limit(MIN_TRANSACTION_GAS);
|
min_tx_env.set_gas_limit(MIN_TRANSACTION_GAS);
|
||||||
|
|
||||||
// Reuse the same EVM instance
|
// Reuse the same EVM instance
|
||||||
if let Ok(res) = evm.transact(min_tx_env).map_err(Self::Error::from_evm_err) {
|
if let Ok(res) = evm.transact(min_tx_env).map_err(Self::Error::from_evm_err) &&
|
||||||
if res.result.is_success() {
|
res.result.is_success()
|
||||||
return Ok(U256::from(MIN_TRANSACTION_GAS))
|
{
|
||||||
}
|
return Ok(U256::from(MIN_TRANSACTION_GAS))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -109,10 +109,10 @@ pub trait EthFees:
|
|||||||
// need to validate that they are monotonically
|
// need to validate that they are monotonically
|
||||||
// increasing and 0 <= p <= 100
|
// increasing and 0 <= p <= 100
|
||||||
// Note: The types used ensure that the percentiles are never < 0
|
// Note: The types used ensure that the percentiles are never < 0
|
||||||
if let Some(percentiles) = &reward_percentiles {
|
if let Some(percentiles) = &reward_percentiles &&
|
||||||
if percentiles.windows(2).any(|w| w[0] > w[1] || w[0] > 100.) {
|
percentiles.windows(2).any(|w| w[0] > w[1] || w[0] > 100.)
|
||||||
return Err(EthApiError::InvalidRewardPercentiles.into())
|
{
|
||||||
}
|
return Err(EthApiError::InvalidRewardPercentiles.into())
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fetch the headers and ensure we got all of them
|
// Fetch the headers and ensure we got all of them
|
||||||
|
|||||||
@@ -72,22 +72,21 @@ pub trait LoadPendingBlock:
|
|||||||
>,
|
>,
|
||||||
Self::Error,
|
Self::Error,
|
||||||
> {
|
> {
|
||||||
if let Some(block) = self.provider().pending_block().map_err(Self::Error::from_eth_err)? {
|
if let Some(block) = self.provider().pending_block().map_err(Self::Error::from_eth_err)? &&
|
||||||
if let Some(receipts) = self
|
let Some(receipts) = self
|
||||||
.provider()
|
.provider()
|
||||||
.receipts_by_block(block.hash().into())
|
.receipts_by_block(block.hash().into())
|
||||||
.map_err(Self::Error::from_eth_err)?
|
.map_err(Self::Error::from_eth_err)?
|
||||||
{
|
{
|
||||||
// Note: for the PENDING block we assume it is past the known merge block and
|
// Note: for the PENDING block we assume it is past the known merge block and
|
||||||
// thus this will not fail when looking up the total
|
// thus this will not fail when looking up the total
|
||||||
// difficulty value for the blockenv.
|
// difficulty value for the blockenv.
|
||||||
let evm_env = self.evm_config().evm_env(block.header());
|
let evm_env = self.evm_config().evm_env(block.header());
|
||||||
|
|
||||||
return Ok(PendingBlockEnv::new(
|
return Ok(PendingBlockEnv::new(
|
||||||
evm_env,
|
evm_env,
|
||||||
PendingBlockEnvOrigin::ActualPending(Arc::new(block), Arc::new(receipts)),
|
PendingBlockEnvOrigin::ActualPending(Arc::new(block), Arc::new(receipts)),
|
||||||
));
|
));
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// no pending block from the CL yet, so we use the latest block and modify the env
|
// no pending block from the CL yet, so we use the latest block and modify the env
|
||||||
@@ -309,21 +308,21 @@ pub trait LoadPendingBlock:
|
|||||||
|
|
||||||
// There's only limited amount of blob space available per block, so we need to
|
// There's only limited amount of blob space available per block, so we need to
|
||||||
// check if the EIP-4844 can still fit in the block
|
// check if the EIP-4844 can still fit in the block
|
||||||
if let Some(tx_blob_gas) = tx.blob_gas_used() {
|
if let Some(tx_blob_gas) = tx.blob_gas_used() &&
|
||||||
if sum_blob_gas_used + tx_blob_gas > blob_params.max_blob_gas_per_block() {
|
sum_blob_gas_used + tx_blob_gas > blob_params.max_blob_gas_per_block()
|
||||||
// we can't fit this _blob_ transaction into the block, so we mark it as
|
{
|
||||||
// invalid, which removes its dependent transactions from
|
// we can't fit this _blob_ transaction into the block, so we mark it as
|
||||||
// the iterator. This is similar to the gas limit condition
|
// invalid, which removes its dependent transactions from
|
||||||
// for regular transactions above.
|
// the iterator. This is similar to the gas limit condition
|
||||||
best_txs.mark_invalid(
|
// for regular transactions above.
|
||||||
&pool_tx,
|
best_txs.mark_invalid(
|
||||||
InvalidPoolTransactionError::ExceedsGasLimit(
|
&pool_tx,
|
||||||
tx_blob_gas,
|
InvalidPoolTransactionError::ExceedsGasLimit(
|
||||||
blob_params.max_blob_gas_per_block(),
|
tx_blob_gas,
|
||||||
),
|
blob_params.max_blob_gas_per_block(),
|
||||||
);
|
),
|
||||||
continue
|
);
|
||||||
}
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
let gas_used = match builder.execute_transaction(tx.clone()) {
|
let gas_used = match builder.execute_transaction(tx.clone()) {
|
||||||
|
|||||||
@@ -221,10 +221,10 @@ pub trait LoadState:
|
|||||||
Self: SpawnBlocking,
|
Self: SpawnBlocking,
|
||||||
{
|
{
|
||||||
async move {
|
async move {
|
||||||
if at.is_pending() {
|
if at.is_pending() &&
|
||||||
if let Ok(Some(state)) = self.local_pending_state().await {
|
let Ok(Some(state)) = self.local_pending_state().await
|
||||||
return Ok(state)
|
{
|
||||||
}
|
return Ok(state)
|
||||||
}
|
}
|
||||||
|
|
||||||
self.provider().state_by_block_id(at).map_err(Self::Error::from_eth_err)
|
self.provider().state_by_block_id(at).map_err(Self::Error::from_eth_err)
|
||||||
|
|||||||
@@ -97,10 +97,10 @@ pub trait EthTransactions: LoadTransaction<Provider: BlockReaderIdExt> {
|
|||||||
while let Some(notification) = stream.next().await {
|
while let Some(notification) = stream.next().await {
|
||||||
let chain = notification.committed();
|
let chain = notification.committed();
|
||||||
for block in chain.blocks_iter() {
|
for block in chain.blocks_iter() {
|
||||||
if block.body().contains_transaction(&hash) {
|
if block.body().contains_transaction(&hash) &&
|
||||||
if let Some(receipt) = this.transaction_receipt(hash).await? {
|
let Some(receipt) = this.transaction_receipt(hash).await?
|
||||||
return Ok(receipt);
|
{
|
||||||
}
|
return Ok(receipt);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -299,13 +299,12 @@ pub trait EthTransactions: LoadTransaction<Provider: BlockReaderIdExt> {
|
|||||||
{
|
{
|
||||||
async move {
|
async move {
|
||||||
// Check the pool first
|
// Check the pool first
|
||||||
if include_pending {
|
if include_pending &&
|
||||||
if let Some(tx) =
|
let Some(tx) =
|
||||||
RpcNodeCore::pool(self).get_transaction_by_sender_and_nonce(sender, nonce)
|
RpcNodeCore::pool(self).get_transaction_by_sender_and_nonce(sender, nonce)
|
||||||
{
|
{
|
||||||
let transaction = tx.transaction.clone_into_consensus();
|
let transaction = tx.transaction.clone_into_consensus();
|
||||||
return Ok(Some(self.tx_resp_builder().fill_pending(transaction)?));
|
return Ok(Some(self.tx_resp_builder().fill_pending(transaction)?));
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if the sender is a contract
|
// Check if the sender is a contract
|
||||||
@@ -375,10 +374,10 @@ pub trait EthTransactions: LoadTransaction<Provider: BlockReaderIdExt> {
|
|||||||
Self: LoadBlock,
|
Self: LoadBlock,
|
||||||
{
|
{
|
||||||
async move {
|
async move {
|
||||||
if let Some(block) = self.recovered_block(block_id).await? {
|
if let Some(block) = self.recovered_block(block_id).await? &&
|
||||||
if let Some(tx) = block.body().transactions().get(index) {
|
let Some(tx) = block.body().transactions().get(index)
|
||||||
return Ok(Some(tx.encoded_2718().into()))
|
{
|
||||||
}
|
return Ok(Some(tx.encoded_2718().into()))
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(None)
|
Ok(None)
|
||||||
|
|||||||
@@ -100,11 +100,11 @@ where
|
|||||||
{
|
{
|
||||||
let size = value.size();
|
let size = value.size();
|
||||||
|
|
||||||
if self.cache.limiter().is_over_the_limit(self.cache.len() + 1) {
|
if self.cache.limiter().is_over_the_limit(self.cache.len() + 1) &&
|
||||||
if let Some((_, evicted)) = self.cache.pop_oldest() {
|
let Some((_, evicted)) = self.cache.pop_oldest()
|
||||||
// update tracked memory with the evicted value
|
{
|
||||||
self.memory_usage = self.memory_usage.saturating_sub(evicted.size());
|
// update tracked memory with the evicted value
|
||||||
}
|
self.memory_usage = self.memory_usage.saturating_sub(evicted.size());
|
||||||
}
|
}
|
||||||
|
|
||||||
if self.cache.insert(key, value) {
|
if self.cache.insert(key, value) {
|
||||||
|
|||||||
@@ -234,13 +234,13 @@ pub async fn fee_history_cache_new_blocks_task<St, Provider, N>(
|
|||||||
let mut fetch_missing_block = Fuse::terminated();
|
let mut fetch_missing_block = Fuse::terminated();
|
||||||
|
|
||||||
loop {
|
loop {
|
||||||
if fetch_missing_block.is_terminated() {
|
if fetch_missing_block.is_terminated() &&
|
||||||
if let Some(block_number) = missing_blocks.pop_front() {
|
let Some(block_number) = missing_blocks.pop_front()
|
||||||
trace!(target: "rpc::fee", ?block_number, "Fetching missing block for fee history cache");
|
{
|
||||||
if let Ok(Some(hash)) = provider.block_hash(block_number) {
|
trace!(target: "rpc::fee", ?block_number, "Fetching missing block for fee history cache");
|
||||||
// fetch missing block
|
if let Ok(Some(hash)) = provider.block_hash(block_number) {
|
||||||
fetch_missing_block = cache.get_block_and_receipts(hash).boxed().fuse();
|
// fetch missing block
|
||||||
}
|
fetch_missing_block = cache.get_block_and_receipts(hash).boxed().fuse();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -204,10 +204,10 @@ where
|
|||||||
};
|
};
|
||||||
|
|
||||||
// constrain to the max price
|
// constrain to the max price
|
||||||
if let Some(max_price) = self.oracle_config.max_price {
|
if let Some(max_price) = self.oracle_config.max_price &&
|
||||||
if price > max_price {
|
price > max_price
|
||||||
price = max_price;
|
{
|
||||||
}
|
price = max_price;
|
||||||
}
|
}
|
||||||
|
|
||||||
inner.last_price = GasPriceOracleResult { block_hash: header.hash(), price };
|
inner.last_price = GasPriceOracleResult { block_hash: header.hash(), price };
|
||||||
@@ -254,10 +254,10 @@ where
|
|||||||
};
|
};
|
||||||
|
|
||||||
// ignore transactions with a tip under the configured threshold
|
// ignore transactions with a tip under the configured threshold
|
||||||
if let Some(ignore_under) = self.ignore_price {
|
if let Some(ignore_under) = self.ignore_price &&
|
||||||
if effective_tip < Some(ignore_under) {
|
effective_tip < Some(ignore_under)
|
||||||
continue
|
{
|
||||||
}
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// check if the sender was the coinbase, if so, ignore
|
// check if the sender was the coinbase, if so, ignore
|
||||||
@@ -338,10 +338,10 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
// constrain to the max price
|
// constrain to the max price
|
||||||
if let Some(max_price) = self.oracle_config.max_price {
|
if let Some(max_price) = self.oracle_config.max_price &&
|
||||||
if suggestion > max_price {
|
suggestion > max_price
|
||||||
suggestion = max_price;
|
{
|
||||||
}
|
suggestion = max_price;
|
||||||
}
|
}
|
||||||
|
|
||||||
inner.last_price = GasPriceOracleResult { block_hash: header.hash(), price: suggestion };
|
inner.last_price = GasPriceOracleResult { block_hash: header.hash(), price: suggestion };
|
||||||
|
|||||||
@@ -501,11 +501,11 @@ where
|
|||||||
.transpose()?
|
.transpose()?
|
||||||
.flatten();
|
.flatten();
|
||||||
|
|
||||||
if let Some(f) = from {
|
if let Some(f) = from &&
|
||||||
if f > info.best_number {
|
f > info.best_number
|
||||||
// start block higher than local head, can return empty
|
{
|
||||||
return Ok(Vec::new());
|
// start block higher than local head, can return empty
|
||||||
}
|
return Ok(Vec::new());
|
||||||
}
|
}
|
||||||
|
|
||||||
let (from_block_number, to_block_number) =
|
let (from_block_number, to_block_number) =
|
||||||
@@ -658,22 +658,23 @@ where
|
|||||||
// size check but only if range is multiple blocks, so we always return all
|
// size check but only if range is multiple blocks, so we always return all
|
||||||
// logs of a single block
|
// logs of a single block
|
||||||
let is_multi_block_range = from_block != to_block;
|
let is_multi_block_range = from_block != to_block;
|
||||||
if let Some(max_logs_per_response) = limits.max_logs_per_response {
|
if let Some(max_logs_per_response) = limits.max_logs_per_response &&
|
||||||
if is_multi_block_range && all_logs.len() > max_logs_per_response {
|
is_multi_block_range &&
|
||||||
debug!(
|
all_logs.len() > max_logs_per_response
|
||||||
target: "rpc::eth::filter",
|
{
|
||||||
logs_found = all_logs.len(),
|
debug!(
|
||||||
max_logs_per_response,
|
target: "rpc::eth::filter",
|
||||||
from_block,
|
logs_found = all_logs.len(),
|
||||||
to_block = num_hash.number.saturating_sub(1),
|
max_logs_per_response,
|
||||||
"Query exceeded max logs per response limit"
|
from_block,
|
||||||
);
|
to_block = num_hash.number.saturating_sub(1),
|
||||||
return Err(EthFilterError::QueryExceedsMaxResults {
|
"Query exceeded max logs per response limit"
|
||||||
max_logs: max_logs_per_response,
|
);
|
||||||
from_block,
|
return Err(EthFilterError::QueryExceedsMaxResults {
|
||||||
to_block: num_hash.number.saturating_sub(1),
|
max_logs: max_logs_per_response,
|
||||||
});
|
from_block,
|
||||||
}
|
to_block: num_hash.number.saturating_sub(1),
|
||||||
|
});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -490,14 +490,14 @@ where
|
|||||||
let mut maybe_traces =
|
let mut maybe_traces =
|
||||||
maybe_traces.map(|traces| traces.into_iter().flatten().collect::<Vec<_>>());
|
maybe_traces.map(|traces| traces.into_iter().flatten().collect::<Vec<_>>());
|
||||||
|
|
||||||
if let (Some(block), Some(traces)) = (maybe_block, maybe_traces.as_mut()) {
|
if let (Some(block), Some(traces)) = (maybe_block, maybe_traces.as_mut()) &&
|
||||||
if let Some(base_block_reward) = self.calculate_base_block_reward(block.header())? {
|
let Some(base_block_reward) = self.calculate_base_block_reward(block.header())?
|
||||||
traces.extend(self.extract_reward_traces(
|
{
|
||||||
block.header(),
|
traces.extend(self.extract_reward_traces(
|
||||||
block.body().ommers(),
|
block.header(),
|
||||||
base_block_reward,
|
block.body().ommers(),
|
||||||
));
|
base_block_reward,
|
||||||
}
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(maybe_traces)
|
Ok(maybe_traces)
|
||||||
|
|||||||
@@ -143,10 +143,10 @@ where
|
|||||||
if self.disallow.contains(sender) {
|
if self.disallow.contains(sender) {
|
||||||
return Err(ValidationApiError::Blacklist(*sender))
|
return Err(ValidationApiError::Blacklist(*sender))
|
||||||
}
|
}
|
||||||
if let Some(to) = tx.to() {
|
if let Some(to) = tx.to() &&
|
||||||
if self.disallow.contains(&to) {
|
self.disallow.contains(&to)
|
||||||
return Err(ValidationApiError::Blacklist(to))
|
{
|
||||||
}
|
return Err(ValidationApiError::Blacklist(to))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -334,10 +334,10 @@ where
|
|||||||
return Err(ValidationApiError::ProposerPayment)
|
return Err(ValidationApiError::ProposerPayment)
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(block_base_fee) = block.header().base_fee_per_gas() {
|
if let Some(block_base_fee) = block.header().base_fee_per_gas() &&
|
||||||
if tx.effective_tip_per_gas(block_base_fee).unwrap_or_default() != 0 {
|
tx.effective_tip_per_gas(block_base_fee).unwrap_or_default() != 0
|
||||||
return Err(ValidationApiError::ProposerPayment)
|
{
|
||||||
}
|
return Err(ValidationApiError::ProposerPayment)
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
|
|||||||
@@ -73,16 +73,15 @@ impl<Provider> StageSetBuilder<Provider> {
|
|||||||
|
|
||||||
fn upsert_stage_state(&mut self, stage: Box<dyn Stage<Provider>>, added_at_index: usize) {
|
fn upsert_stage_state(&mut self, stage: Box<dyn Stage<Provider>>, added_at_index: usize) {
|
||||||
let stage_id = stage.id();
|
let stage_id = stage.id();
|
||||||
if self.stages.insert(stage.id(), StageEntry { stage, enabled: true }).is_some() {
|
if self.stages.insert(stage.id(), StageEntry { stage, enabled: true }).is_some() &&
|
||||||
if let Some(to_remove) = self
|
let Some(to_remove) = self
|
||||||
.order
|
.order
|
||||||
.iter()
|
.iter()
|
||||||
.enumerate()
|
.enumerate()
|
||||||
.find(|(i, id)| *i != added_at_index && **id == stage_id)
|
.find(|(i, id)| *i != added_at_index && **id == stage_id)
|
||||||
.map(|(i, _)| i)
|
.map(|(i, _)| i)
|
||||||
{
|
{
|
||||||
self.order.remove(to_remove);
|
self.order.remove(to_remove);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -264,10 +263,10 @@ impl<Provider> StageSetBuilder<Provider> {
|
|||||||
pub fn build(mut self) -> Vec<Box<dyn Stage<Provider>>> {
|
pub fn build(mut self) -> Vec<Box<dyn Stage<Provider>>> {
|
||||||
let mut stages = Vec::new();
|
let mut stages = Vec::new();
|
||||||
for id in &self.order {
|
for id in &self.order {
|
||||||
if let Some(entry) = self.stages.remove(id) {
|
if let Some(entry) = self.stages.remove(id) &&
|
||||||
if entry.enabled {
|
entry.enabled
|
||||||
stages.push(entry.stage);
|
{
|
||||||
}
|
stages.push(entry.stage);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
stages
|
stages
|
||||||
|
|||||||
@@ -702,11 +702,10 @@ mod tests {
|
|||||||
|
|
||||||
// Validate sequentiality only after prev progress,
|
// Validate sequentiality only after prev progress,
|
||||||
// since the data before is mocked and can contain gaps
|
// since the data before is mocked and can contain gaps
|
||||||
if number > prev_progress {
|
if number > prev_progress
|
||||||
if let Some(prev_key) = prev_number {
|
&& let Some(prev_key) = prev_number {
|
||||||
assert_eq!(prev_key + 1, number, "Body entries must be sequential");
|
assert_eq!(prev_key + 1, number, "Body entries must be sequential");
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
// Validate that the current entry is below or equals to the highest allowed block
|
// Validate that the current entry is below or equals to the highest allowed block
|
||||||
assert!(
|
assert!(
|
||||||
|
|||||||
@@ -150,18 +150,17 @@ where
|
|||||||
return Poll::Ready(Ok(()));
|
return Poll::Ready(Ok(()));
|
||||||
}
|
}
|
||||||
|
|
||||||
if self.stream.is_none() {
|
if self.stream.is_none() &&
|
||||||
if let Some(source) = self.source.clone() {
|
let Some(source) = self.source.clone()
|
||||||
self.stream.replace(source.create(input)?);
|
{
|
||||||
}
|
self.stream.replace(source.create(input)?);
|
||||||
}
|
}
|
||||||
if let Some(stream) = &mut self.stream {
|
if let Some(stream) = &mut self.stream &&
|
||||||
if let Some(next) = ready!(stream.poll_next_unpin(cx))
|
let Some(next) = ready!(stream.poll_next_unpin(cx))
|
||||||
.transpose()
|
.transpose()
|
||||||
.map_err(|e| StageError::Fatal(e.into()))?
|
.map_err(|e| StageError::Fatal(e.into()))?
|
||||||
{
|
{
|
||||||
self.item.replace(next);
|
self.item.replace(next);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Poll::Ready(Ok(()))
|
Poll::Ready(Ok(()))
|
||||||
@@ -546,11 +545,10 @@ mod tests {
|
|||||||
|
|
||||||
// Validate sequentiality only after prev progress,
|
// Validate sequentiality only after prev progress,
|
||||||
// since the data before is mocked and can contain gaps
|
// since the data before is mocked and can contain gaps
|
||||||
if number > prev_progress {
|
if number > prev_progress
|
||||||
if let Some(prev_key) = prev_number {
|
&& let Some(prev_key) = prev_number {
|
||||||
assert_eq!(prev_key + 1, number, "Body entries must be sequential");
|
assert_eq!(prev_key + 1, number, "Body entries must be sequential");
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
// Validate that the current entry is below or equals to the highest allowed block
|
// Validate that the current entry is below or equals to the highest allowed block
|
||||||
assert!(
|
assert!(
|
||||||
|
|||||||
@@ -145,19 +145,18 @@ where
|
|||||||
|
|
||||||
let mut cursor_header_numbers =
|
let mut cursor_header_numbers =
|
||||||
provider.tx_ref().cursor_write::<RawTable<tables::HeaderNumbers>>()?;
|
provider.tx_ref().cursor_write::<RawTable<tables::HeaderNumbers>>()?;
|
||||||
let mut first_sync = false;
|
|
||||||
|
|
||||||
// If we only have the genesis block hash, then we are at first sync, and we can remove it,
|
// If we only have the genesis block hash, then we are at first sync, and we can remove it,
|
||||||
// add it to the collector and use tx.append on all hashes.
|
// add it to the collector and use tx.append on all hashes.
|
||||||
if provider.tx_ref().entries::<RawTable<tables::HeaderNumbers>>()? == 1 {
|
let first_sync = if provider.tx_ref().entries::<RawTable<tables::HeaderNumbers>>()? == 1 &&
|
||||||
if let Some((hash, block_number)) = cursor_header_numbers.last()? {
|
let Some((hash, block_number)) = cursor_header_numbers.last()? &&
|
||||||
if block_number.value()? == 0 {
|
block_number.value()? == 0
|
||||||
self.hash_collector.insert(hash.key()?, 0)?;
|
{
|
||||||
cursor_header_numbers.delete_current()?;
|
self.hash_collector.insert(hash.key()?, 0)?;
|
||||||
first_sync = true;
|
cursor_header_numbers.delete_current()?;
|
||||||
}
|
true
|
||||||
}
|
} else {
|
||||||
}
|
false
|
||||||
|
};
|
||||||
|
|
||||||
// Since ETL sorts all entries by hashes, we are either appending (first sync) or inserting
|
// Since ETL sorts all entries by hashes, we are either appending (first sync) or inserting
|
||||||
// in order (further syncs).
|
// in order (further syncs).
|
||||||
|
|||||||
@@ -67,23 +67,22 @@ where
|
|||||||
)
|
)
|
||||||
})
|
})
|
||||||
.transpose()?
|
.transpose()?
|
||||||
.flatten()
|
.flatten() &&
|
||||||
|
target_prunable_block > input.checkpoint().block_number
|
||||||
{
|
{
|
||||||
if target_prunable_block > input.checkpoint().block_number {
|
input.checkpoint = Some(StageCheckpoint::new(target_prunable_block));
|
||||||
input.checkpoint = Some(StageCheckpoint::new(target_prunable_block));
|
|
||||||
|
|
||||||
// Save prune checkpoint only if we don't have one already.
|
// Save prune checkpoint only if we don't have one already.
|
||||||
// Otherwise, pruner may skip the unpruned range of blocks.
|
// Otherwise, pruner may skip the unpruned range of blocks.
|
||||||
if provider.get_prune_checkpoint(PruneSegment::AccountHistory)?.is_none() {
|
if provider.get_prune_checkpoint(PruneSegment::AccountHistory)?.is_none() {
|
||||||
provider.save_prune_checkpoint(
|
provider.save_prune_checkpoint(
|
||||||
PruneSegment::AccountHistory,
|
PruneSegment::AccountHistory,
|
||||||
PruneCheckpoint {
|
PruneCheckpoint {
|
||||||
block_number: Some(target_prunable_block),
|
block_number: Some(target_prunable_block),
|
||||||
tx_number: None,
|
tx_number: None,
|
||||||
prune_mode,
|
prune_mode,
|
||||||
},
|
},
|
||||||
)?;
|
)?;
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -70,23 +70,22 @@ where
|
|||||||
)
|
)
|
||||||
})
|
})
|
||||||
.transpose()?
|
.transpose()?
|
||||||
.flatten()
|
.flatten() &&
|
||||||
|
target_prunable_block > input.checkpoint().block_number
|
||||||
{
|
{
|
||||||
if target_prunable_block > input.checkpoint().block_number {
|
input.checkpoint = Some(StageCheckpoint::new(target_prunable_block));
|
||||||
input.checkpoint = Some(StageCheckpoint::new(target_prunable_block));
|
|
||||||
|
|
||||||
// Save prune checkpoint only if we don't have one already.
|
// Save prune checkpoint only if we don't have one already.
|
||||||
// Otherwise, pruner may skip the unpruned range of blocks.
|
// Otherwise, pruner may skip the unpruned range of blocks.
|
||||||
if provider.get_prune_checkpoint(PruneSegment::StorageHistory)?.is_none() {
|
if provider.get_prune_checkpoint(PruneSegment::StorageHistory)?.is_none() {
|
||||||
provider.save_prune_checkpoint(
|
provider.save_prune_checkpoint(
|
||||||
PruneSegment::StorageHistory,
|
PruneSegment::StorageHistory,
|
||||||
PruneCheckpoint {
|
PruneCheckpoint {
|
||||||
block_number: Some(target_prunable_block),
|
block_number: Some(target_prunable_block),
|
||||||
tx_number: None,
|
tx_number: None,
|
||||||
prune_mode,
|
prune_mode,
|
||||||
},
|
},
|
||||||
)?;
|
)?;
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -88,28 +88,27 @@ where
|
|||||||
)
|
)
|
||||||
})
|
})
|
||||||
.transpose()?
|
.transpose()?
|
||||||
.flatten()
|
.flatten() &&
|
||||||
|
target_prunable_block > input.checkpoint().block_number
|
||||||
{
|
{
|
||||||
if target_prunable_block > input.checkpoint().block_number {
|
input.checkpoint = Some(StageCheckpoint::new(target_prunable_block));
|
||||||
input.checkpoint = Some(StageCheckpoint::new(target_prunable_block));
|
|
||||||
|
|
||||||
// Save prune checkpoint only if we don't have one already.
|
// Save prune checkpoint only if we don't have one already.
|
||||||
// Otherwise, pruner may skip the unpruned range of blocks.
|
// Otherwise, pruner may skip the unpruned range of blocks.
|
||||||
if provider.get_prune_checkpoint(PruneSegment::TransactionLookup)?.is_none() {
|
if provider.get_prune_checkpoint(PruneSegment::TransactionLookup)?.is_none() {
|
||||||
let target_prunable_tx_number = provider
|
let target_prunable_tx_number = provider
|
||||||
.block_body_indices(target_prunable_block)?
|
.block_body_indices(target_prunable_block)?
|
||||||
.ok_or(ProviderError::BlockBodyIndicesNotFound(target_prunable_block))?
|
.ok_or(ProviderError::BlockBodyIndicesNotFound(target_prunable_block))?
|
||||||
.last_tx_num();
|
.last_tx_num();
|
||||||
|
|
||||||
provider.save_prune_checkpoint(
|
provider.save_prune_checkpoint(
|
||||||
PruneSegment::TransactionLookup,
|
PruneSegment::TransactionLookup,
|
||||||
PruneCheckpoint {
|
PruneCheckpoint {
|
||||||
block_number: Some(target_prunable_block),
|
block_number: Some(target_prunable_block),
|
||||||
tx_number: Some(target_prunable_tx_number),
|
tx_number: Some(target_prunable_tx_number),
|
||||||
prune_mode,
|
prune_mode,
|
||||||
},
|
},
|
||||||
)?;
|
)?;
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if input.target_reached() {
|
if input.target_reached() {
|
||||||
@@ -213,10 +212,10 @@ where
|
|||||||
// Delete all transactions that belong to this block
|
// Delete all transactions that belong to this block
|
||||||
for tx_id in body.tx_num_range() {
|
for tx_id in body.tx_num_range() {
|
||||||
// First delete the transaction and hash to id mapping
|
// First delete the transaction and hash to id mapping
|
||||||
if let Some(transaction) = static_file_provider.transaction_by_id(tx_id)? {
|
if let Some(transaction) = static_file_provider.transaction_by_id(tx_id)? &&
|
||||||
if tx_hash_number_cursor.seek_exact(transaction.trie_hash())?.is_some() {
|
tx_hash_number_cursor.seek_exact(transaction.trie_hash())?.is_some()
|
||||||
tx_hash_number_cursor.delete_current()?;
|
{
|
||||||
}
|
tx_hash_number_cursor.delete_current()?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -538,11 +537,10 @@ mod tests {
|
|||||||
})
|
})
|
||||||
.transpose()
|
.transpose()
|
||||||
.expect("prune target block for transaction lookup")
|
.expect("prune target block for transaction lookup")
|
||||||
.flatten()
|
.flatten() &&
|
||||||
|
target_prunable_block > input.checkpoint().block_number
|
||||||
{
|
{
|
||||||
if target_prunable_block > input.checkpoint().block_number {
|
input.checkpoint = Some(StageCheckpoint::new(target_prunable_block));
|
||||||
input.checkpoint = Some(StageCheckpoint::new(target_prunable_block));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
let start_block = input.next_block();
|
let start_block = input.next_block();
|
||||||
let end_block = output.checkpoint.block_number;
|
let end_block = output.checkpoint.block_number;
|
||||||
|
|||||||
@@ -156,12 +156,11 @@ where
|
|||||||
|
|
||||||
// If it's not the first sync, there might an existing shard already, so we need to
|
// If it's not the first sync, there might an existing shard already, so we need to
|
||||||
// merge it with the one coming from the collector
|
// merge it with the one coming from the collector
|
||||||
if !append_only {
|
if !append_only &&
|
||||||
if let Some((_, last_database_shard)) =
|
let Some((_, last_database_shard)) =
|
||||||
write_cursor.seek_exact(sharded_key_factory(current_partial, u64::MAX))?
|
write_cursor.seek_exact(sharded_key_factory(current_partial, u64::MAX))?
|
||||||
{
|
{
|
||||||
current_list.extend(last_database_shard.iter());
|
current_list.extend(last_database_shard.iter());
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -265,10 +264,10 @@ where
|
|||||||
// To be extra safe, we make sure that the last tx num matches the last block from its indices.
|
// To be extra safe, we make sure that the last tx num matches the last block from its indices.
|
||||||
// If not, get it.
|
// If not, get it.
|
||||||
loop {
|
loop {
|
||||||
if let Some(indices) = provider.block_body_indices(last_block)? {
|
if let Some(indices) = provider.block_body_indices(last_block)? &&
|
||||||
if indices.last_tx_num() <= last_tx_num {
|
indices.last_tx_num() <= last_tx_num
|
||||||
break
|
{
|
||||||
}
|
break
|
||||||
}
|
}
|
||||||
if last_block == 0 {
|
if last_block == 0 {
|
||||||
break
|
break
|
||||||
|
|||||||
@@ -23,11 +23,11 @@ pub fn maybe_generate_tests(
|
|||||||
let mut iter = args.into_iter().peekable();
|
let mut iter = args.into_iter().peekable();
|
||||||
|
|
||||||
// we check if there's a crate argument which is used from inside the codecs crate directly
|
// we check if there's a crate argument which is used from inside the codecs crate directly
|
||||||
if let Some(arg) = iter.peek() {
|
if let Some(arg) = iter.peek() &&
|
||||||
if arg.to_string() == "crate" {
|
arg.to_string() == "crate"
|
||||||
is_crate = true;
|
{
|
||||||
iter.next();
|
is_crate = true;
|
||||||
}
|
iter.next();
|
||||||
}
|
}
|
||||||
|
|
||||||
for arg in iter {
|
for arg in iter {
|
||||||
|
|||||||
@@ -171,28 +171,14 @@ fn load_field_from_segments(
|
|||||||
///
|
///
|
||||||
/// If so, we use another impl to code/decode its data.
|
/// If so, we use another impl to code/decode its data.
|
||||||
fn should_use_alt_impl(ftype: &str, segment: &syn::PathSegment) -> bool {
|
fn should_use_alt_impl(ftype: &str, segment: &syn::PathSegment) -> bool {
|
||||||
if ftype == "Vec" || ftype == "Option" {
|
if (ftype == "Vec" || ftype == "Option") &&
|
||||||
if let syn::PathArguments::AngleBracketed(ref args) = segment.arguments {
|
let syn::PathArguments::AngleBracketed(ref args) = segment.arguments &&
|
||||||
if let Some(syn::GenericArgument::Type(syn::Type::Path(arg_path))) = args.args.last() {
|
let Some(syn::GenericArgument::Type(syn::Type::Path(arg_path))) = args.args.last() &&
|
||||||
if let (Some(path), 1) =
|
let (Some(path), 1) = (arg_path.path.segments.first(), arg_path.path.segments.len()) &&
|
||||||
(arg_path.path.segments.first(), arg_path.path.segments.len())
|
["B256", "Address", "Address", "Bloom", "TxHash", "BlockHash", "CompactPlaceholder"]
|
||||||
{
|
.contains(&path.ident.to_string().as_str())
|
||||||
if [
|
{
|
||||||
"B256",
|
return true
|
||||||
"Address",
|
|
||||||
"Address",
|
|
||||||
"Bloom",
|
|
||||||
"TxHash",
|
|
||||||
"BlockHash",
|
|
||||||
"CompactPlaceholder",
|
|
||||||
]
|
|
||||||
.contains(&path.ident.to_string().as_str())
|
|
||||||
{
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
false
|
false
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -69,8 +69,8 @@ pub fn derive_zstd(input: TokenStream) -> TokenStream {
|
|||||||
let mut decompressor = None;
|
let mut decompressor = None;
|
||||||
|
|
||||||
for attr in &input.attrs {
|
for attr in &input.attrs {
|
||||||
if attr.path().is_ident("reth_zstd") {
|
if attr.path().is_ident("reth_zstd") &&
|
||||||
if let Err(err) = attr.parse_nested_meta(|meta| {
|
let Err(err) = attr.parse_nested_meta(|meta| {
|
||||||
if meta.path.is_ident("compressor") {
|
if meta.path.is_ident("compressor") {
|
||||||
let value = meta.value()?;
|
let value = meta.value()?;
|
||||||
let path: syn::Path = value.parse()?;
|
let path: syn::Path = value.parse()?;
|
||||||
@@ -83,9 +83,9 @@ pub fn derive_zstd(input: TokenStream) -> TokenStream {
|
|||||||
return Err(meta.error("unsupported attribute"))
|
return Err(meta.error("unsupported attribute"))
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}) {
|
})
|
||||||
return err.to_compile_error().into()
|
{
|
||||||
}
|
return err.to_compile_error().into()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -44,17 +44,18 @@ impl StorageLock {
|
|||||||
#[cfg(any(test, not(feature = "disable-lock")))]
|
#[cfg(any(test, not(feature = "disable-lock")))]
|
||||||
fn try_acquire_file_lock(path: &Path) -> Result<Self, StorageLockError> {
|
fn try_acquire_file_lock(path: &Path) -> Result<Self, StorageLockError> {
|
||||||
let file_path = path.join(LOCKFILE_NAME);
|
let file_path = path.join(LOCKFILE_NAME);
|
||||||
if let Some(process_lock) = ProcessUID::parse(&file_path)? {
|
if let Some(process_lock) = ProcessUID::parse(&file_path)? &&
|
||||||
if process_lock.pid != (process::id() as usize) && process_lock.is_active() {
|
process_lock.pid != (process::id() as usize) &&
|
||||||
reth_tracing::tracing::error!(
|
process_lock.is_active()
|
||||||
target: "reth::db::lockfile",
|
{
|
||||||
path = ?file_path,
|
reth_tracing::tracing::error!(
|
||||||
pid = process_lock.pid,
|
target: "reth::db::lockfile",
|
||||||
start_time = process_lock.start_time,
|
path = ?file_path,
|
||||||
"Storage lock already taken."
|
pid = process_lock.pid,
|
||||||
);
|
start_time = process_lock.start_time,
|
||||||
return Err(StorageLockError::Taken(process_lock.pid))
|
"Storage lock already taken."
|
||||||
}
|
);
|
||||||
|
return Err(StorageLockError::Taken(process_lock.pid))
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(Self(Arc::new(StorageLockInner::new(file_path)?)))
|
Ok(Self(Arc::new(StorageLockInner::new(file_path)?)))
|
||||||
@@ -141,15 +142,15 @@ impl ProcessUID {
|
|||||||
|
|
||||||
/// Parses [`Self`] from a file.
|
/// Parses [`Self`] from a file.
|
||||||
fn parse(path: &Path) -> Result<Option<Self>, StorageLockError> {
|
fn parse(path: &Path) -> Result<Option<Self>, StorageLockError> {
|
||||||
if path.exists() {
|
if path.exists() &&
|
||||||
if let Ok(contents) = reth_fs_util::read_to_string(path) {
|
let Ok(contents) = reth_fs_util::read_to_string(path)
|
||||||
let mut lines = contents.lines();
|
{
|
||||||
if let (Some(Ok(pid)), Some(Ok(start_time))) = (
|
let mut lines = contents.lines();
|
||||||
lines.next().map(str::trim).map(str::parse),
|
if let (Some(Ok(pid)), Some(Ok(start_time))) = (
|
||||||
lines.next().map(str::trim).map(str::parse),
|
lines.next().map(str::trim).map(str::parse),
|
||||||
) {
|
lines.next().map(str::trim).map(str::parse),
|
||||||
return Ok(Some(Self { pid, start_time }));
|
) {
|
||||||
}
|
return Ok(Some(Self { pid, start_time }));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok(None)
|
Ok(None)
|
||||||
|
|||||||
@@ -33,25 +33,22 @@ pub fn iter_static_files(path: &Path) -> Result<SortedStaticFiles, NippyJarError
|
|||||||
.map_err(|err| NippyJarError::Custom(err.to_string()))?
|
.map_err(|err| NippyJarError::Custom(err.to_string()))?
|
||||||
.filter_map(Result::ok);
|
.filter_map(Result::ok);
|
||||||
for entry in entries {
|
for entry in entries {
|
||||||
if entry.metadata().is_ok_and(|metadata| metadata.is_file()) {
|
if entry.metadata().is_ok_and(|metadata| metadata.is_file()) &&
|
||||||
if let Some((segment, _)) =
|
let Some((segment, _)) =
|
||||||
StaticFileSegment::parse_filename(&entry.file_name().to_string_lossy())
|
StaticFileSegment::parse_filename(&entry.file_name().to_string_lossy())
|
||||||
{
|
{
|
||||||
let jar = NippyJar::<SegmentHeader>::load(&entry.path())?;
|
let jar = NippyJar::<SegmentHeader>::load(&entry.path())?;
|
||||||
|
|
||||||
let (block_range, tx_range) = (
|
let (block_range, tx_range) =
|
||||||
jar.user_header().block_range().copied(),
|
(jar.user_header().block_range().copied(), jar.user_header().tx_range().copied());
|
||||||
jar.user_header().tx_range().copied(),
|
|
||||||
);
|
|
||||||
|
|
||||||
if let Some(block_range) = block_range {
|
if let Some(block_range) = block_range {
|
||||||
match static_files.entry(segment) {
|
match static_files.entry(segment) {
|
||||||
Entry::Occupied(mut entry) => {
|
Entry::Occupied(mut entry) => {
|
||||||
entry.get_mut().push((block_range, tx_range));
|
entry.get_mut().push((block_range, tx_range));
|
||||||
}
|
}
|
||||||
Entry::Vacant(entry) => {
|
Entry::Vacant(entry) => {
|
||||||
entry.insert(vec![(block_range, tx_range)]);
|
entry.insert(vec![(block_range, tx_range)]);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -17,7 +17,7 @@ pub trait TableObject: Sized {
|
|||||||
_: *const ffi::MDBX_txn,
|
_: *const ffi::MDBX_txn,
|
||||||
data_val: ffi::MDBX_val,
|
data_val: ffi::MDBX_val,
|
||||||
) -> Result<Self, Error> {
|
) -> Result<Self, Error> {
|
||||||
let s = slice::from_raw_parts(data_val.iov_base as *const u8, data_val.iov_len);
|
let s = unsafe { slice::from_raw_parts(data_val.iov_base as *const u8, data_val.iov_len) };
|
||||||
Self::decode(s)
|
Self::decode(s)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -32,7 +32,7 @@ impl TableObject for Cow<'_, [u8]> {
|
|||||||
_txn: *const ffi::MDBX_txn,
|
_txn: *const ffi::MDBX_txn,
|
||||||
data_val: ffi::MDBX_val,
|
data_val: ffi::MDBX_val,
|
||||||
) -> Result<Self, Error> {
|
) -> Result<Self, Error> {
|
||||||
let s = slice::from_raw_parts(data_val.iov_base as *const u8, data_val.iov_len);
|
let s = unsafe { slice::from_raw_parts(data_val.iov_base as *const u8, data_val.iov_len) };
|
||||||
|
|
||||||
#[cfg(feature = "return-borrowed")]
|
#[cfg(feature = "return-borrowed")]
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -476,7 +476,7 @@ impl Transaction<RW> {
|
|||||||
/// Caller must close ALL other [Database] and [Cursor] instances pointing to the same dbi
|
/// Caller must close ALL other [Database] and [Cursor] instances pointing to the same dbi
|
||||||
/// BEFORE calling this function.
|
/// BEFORE calling this function.
|
||||||
pub unsafe fn drop_db(&self, db: Database) -> Result<()> {
|
pub unsafe fn drop_db(&self, db: Database) -> Result<()> {
|
||||||
mdbx_result(self.txn_execute(|txn| ffi::mdbx_drop(txn, db.dbi(), true))?)?;
|
mdbx_result(self.txn_execute(|txn| unsafe { ffi::mdbx_drop(txn, db.dbi(), true) })?)?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@@ -489,7 +489,7 @@ impl Transaction<RO> {
|
|||||||
/// Caller must close ALL other [Database] and [Cursor] instances pointing to the same dbi
|
/// Caller must close ALL other [Database] and [Cursor] instances pointing to the same dbi
|
||||||
/// BEFORE calling this function.
|
/// BEFORE calling this function.
|
||||||
pub unsafe fn close_db(&self, db: Database) -> Result<()> {
|
pub unsafe fn close_db(&self, db: Database) -> Result<()> {
|
||||||
mdbx_result(ffi::mdbx_dbi_close(self.env().env_ptr(), db.dbi()))?;
|
mdbx_result(unsafe { ffi::mdbx_dbi_close(self.env().env_ptr(), db.dbi()) })?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -309,10 +309,10 @@ impl<H: NippyJarHeader> NippyJar<H> {
|
|||||||
return Err(NippyJarError::ColumnLenMismatch(self.columns, columns.len()))
|
return Err(NippyJarError::ColumnLenMismatch(self.columns, columns.len()))
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(compression) = &self.compressor {
|
if let Some(compression) = &self.compressor &&
|
||||||
if !compression.is_ready() {
|
!compression.is_ready()
|
||||||
return Err(NippyJarError::CompressorNotReady)
|
{
|
||||||
}
|
return Err(NippyJarError::CompressorNotReady)
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
|
|||||||
@@ -404,10 +404,10 @@ impl<H: NippyJarHeader> NippyJarWriter<H> {
|
|||||||
|
|
||||||
// Appends new offsets to disk
|
// Appends new offsets to disk
|
||||||
for offset in self.offsets.drain(..) {
|
for offset in self.offsets.drain(..) {
|
||||||
if let Some(last_offset_ondisk) = last_offset_ondisk.take() {
|
if let Some(last_offset_ondisk) = last_offset_ondisk.take() &&
|
||||||
if last_offset_ondisk == offset {
|
last_offset_ondisk == offset
|
||||||
continue
|
{
|
||||||
}
|
continue
|
||||||
}
|
}
|
||||||
self.offsets_file.write_all(&offset.to_le_bytes())?;
|
self.offsets_file.write_all(&offset.to_le_bytes())?;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -594,10 +594,10 @@ impl<N: ProviderNodeTypes> StateProviderFactory for BlockchainProvider<N> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn pending_state_by_hash(&self, block_hash: B256) -> ProviderResult<Option<StateProviderBox>> {
|
fn pending_state_by_hash(&self, block_hash: B256) -> ProviderResult<Option<StateProviderBox>> {
|
||||||
if let Some(pending) = self.canonical_in_memory_state.pending_state() {
|
if let Some(pending) = self.canonical_in_memory_state.pending_state() &&
|
||||||
if pending.hash() == block_hash {
|
pending.hash() == block_hash
|
||||||
return Ok(Some(Box::new(self.block_state_provider(&pending)?)));
|
{
|
||||||
}
|
return Ok(Some(Box::new(self.block_state_provider(&pending)?)));
|
||||||
}
|
}
|
||||||
Ok(None)
|
Ok(None)
|
||||||
}
|
}
|
||||||
@@ -965,26 +965,26 @@ mod tests {
|
|||||||
) {
|
) {
|
||||||
let hook_provider = provider.clone();
|
let hook_provider = provider.clone();
|
||||||
provider.database.db_ref().set_post_transaction_hook(Box::new(move || {
|
provider.database.db_ref().set_post_transaction_hook(Box::new(move || {
|
||||||
if let Some(state) = hook_provider.canonical_in_memory_state.head_state() {
|
if let Some(state) = hook_provider.canonical_in_memory_state.head_state() &&
|
||||||
if state.anchor().number + 1 == block_number {
|
state.anchor().number + 1 == block_number
|
||||||
let mut lowest_memory_block =
|
{
|
||||||
state.parent_state_chain().last().expect("qed").block();
|
let mut lowest_memory_block =
|
||||||
let num_hash = lowest_memory_block.recovered_block().num_hash();
|
state.parent_state_chain().last().expect("qed").block();
|
||||||
|
let num_hash = lowest_memory_block.recovered_block().num_hash();
|
||||||
|
|
||||||
let mut execution_output = (*lowest_memory_block.execution_output).clone();
|
let mut execution_output = (*lowest_memory_block.execution_output).clone();
|
||||||
execution_output.first_block = lowest_memory_block.recovered_block().number;
|
execution_output.first_block = lowest_memory_block.recovered_block().number;
|
||||||
lowest_memory_block.execution_output = Arc::new(execution_output);
|
lowest_memory_block.execution_output = Arc::new(execution_output);
|
||||||
|
|
||||||
// Push to disk
|
// Push to disk
|
||||||
let provider_rw = hook_provider.database_provider_rw().unwrap();
|
let provider_rw = hook_provider.database_provider_rw().unwrap();
|
||||||
UnifiedStorageWriter::from(&provider_rw, &hook_provider.static_file_provider())
|
UnifiedStorageWriter::from(&provider_rw, &hook_provider.static_file_provider())
|
||||||
.save_blocks(vec![lowest_memory_block])
|
.save_blocks(vec![lowest_memory_block])
|
||||||
.unwrap();
|
.unwrap();
|
||||||
UnifiedStorageWriter::commit(provider_rw).unwrap();
|
UnifiedStorageWriter::commit(provider_rw).unwrap();
|
||||||
|
|
||||||
// Remove from memory
|
// Remove from memory
|
||||||
hook_provider.canonical_in_memory_state.remove_persisted_blocks(num_hash);
|
hook_provider.canonical_in_memory_state.remove_persisted_blocks(num_hash);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}));
|
}));
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -536,10 +536,10 @@ impl<N: ProviderNodeTypes> ConsistentProvider<N> {
|
|||||||
|
|
||||||
// If the transaction number is less than the first in-memory transaction number, make a
|
// If the transaction number is less than the first in-memory transaction number, make a
|
||||||
// database lookup
|
// database lookup
|
||||||
if let HashOrNumber::Number(id) = id {
|
if let HashOrNumber::Number(id) = id &&
|
||||||
if id < in_memory_tx_num {
|
id < in_memory_tx_num
|
||||||
return fetch_from_db(provider)
|
{
|
||||||
}
|
return fetch_from_db(provider)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Iterate from the lowest block to the highest
|
// Iterate from the lowest block to the highest
|
||||||
@@ -816,14 +816,14 @@ impl<N: ProviderNodeTypes> BlockReader for ConsistentProvider<N> {
|
|||||||
hash: B256,
|
hash: B256,
|
||||||
source: BlockSource,
|
source: BlockSource,
|
||||||
) -> ProviderResult<Option<Self::Block>> {
|
) -> ProviderResult<Option<Self::Block>> {
|
||||||
if matches!(source, BlockSource::Canonical | BlockSource::Any) {
|
if matches!(source, BlockSource::Canonical | BlockSource::Any) &&
|
||||||
if let Some(block) = self.get_in_memory_or_storage_by_block(
|
let Some(block) = self.get_in_memory_or_storage_by_block(
|
||||||
hash.into(),
|
hash.into(),
|
||||||
|db_provider| db_provider.find_block_by_hash(hash, BlockSource::Canonical),
|
|db_provider| db_provider.find_block_by_hash(hash, BlockSource::Canonical),
|
||||||
|block_state| Ok(Some(block_state.block_ref().recovered_block().clone_block())),
|
|block_state| Ok(Some(block_state.block_ref().recovered_block().clone_block())),
|
||||||
)? {
|
)?
|
||||||
return Ok(Some(block))
|
{
|
||||||
}
|
return Ok(Some(block))
|
||||||
}
|
}
|
||||||
|
|
||||||
if matches!(source, BlockSource::Pending | BlockSource::Any) {
|
if matches!(source, BlockSource::Pending | BlockSource::Any) {
|
||||||
@@ -1133,14 +1133,14 @@ impl<N: ProviderNodeTypes> ReceiptProviderIdExt for ConsistentProvider<N> {
|
|||||||
match block {
|
match block {
|
||||||
BlockId::Hash(rpc_block_hash) => {
|
BlockId::Hash(rpc_block_hash) => {
|
||||||
let mut receipts = self.receipts_by_block(rpc_block_hash.block_hash.into())?;
|
let mut receipts = self.receipts_by_block(rpc_block_hash.block_hash.into())?;
|
||||||
if receipts.is_none() && !rpc_block_hash.require_canonical.unwrap_or(false) {
|
if receipts.is_none() &&
|
||||||
if let Some(state) = self
|
!rpc_block_hash.require_canonical.unwrap_or(false) &&
|
||||||
|
let Some(state) = self
|
||||||
.head_block
|
.head_block
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.and_then(|b| b.block_on_chain(rpc_block_hash.block_hash.into()))
|
.and_then(|b| b.block_on_chain(rpc_block_hash.block_hash.into()))
|
||||||
{
|
{
|
||||||
receipts = Some(state.executed_block_receipts());
|
receipts = Some(state.executed_block_receipts());
|
||||||
}
|
|
||||||
}
|
}
|
||||||
Ok(receipts)
|
Ok(receipts)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -67,10 +67,10 @@ where
|
|||||||
//
|
//
|
||||||
// To ensure this doesn't happen, we just have to make sure that we fetch from the same
|
// To ensure this doesn't happen, we just have to make sure that we fetch from the same
|
||||||
// data source that we used during initialization. In this case, that is static files
|
// data source that we used during initialization. In this case, that is static files
|
||||||
if let Some((hash, number)) = self.tip {
|
if let Some((hash, number)) = self.tip &&
|
||||||
if provider_ro.sealed_header(number)?.is_none_or(|header| header.hash() != hash) {
|
provider_ro.sealed_header(number)?.is_none_or(|header| header.hash() != hash)
|
||||||
return Err(ConsistentViewError::Reorged { block: hash }.into())
|
{
|
||||||
}
|
return Err(ConsistentViewError::Reorged { block: hash }.into())
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(provider_ro)
|
Ok(provider_ro)
|
||||||
|
|||||||
@@ -1020,12 +1020,12 @@ impl<TX: DbTx + 'static, N: NodeTypesForProvider> HeaderProvider for DatabasePro
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn header_td_by_number(&self, number: BlockNumber) -> ProviderResult<Option<U256>> {
|
fn header_td_by_number(&self, number: BlockNumber) -> ProviderResult<Option<U256>> {
|
||||||
if self.chain_spec.is_paris_active_at_block(number) {
|
if self.chain_spec.is_paris_active_at_block(number) &&
|
||||||
if let Some(td) = self.chain_spec.final_paris_total_difficulty() {
|
let Some(td) = self.chain_spec.final_paris_total_difficulty()
|
||||||
// if this block is higher than the final paris(merge) block, return the final paris
|
{
|
||||||
// difficulty
|
// if this block is higher than the final paris(merge) block, return the final paris
|
||||||
return Ok(Some(td))
|
// difficulty
|
||||||
}
|
return Ok(Some(td))
|
||||||
}
|
}
|
||||||
|
|
||||||
self.static_file_provider.get_with_static_file_or_database(
|
self.static_file_provider.get_with_static_file_or_database(
|
||||||
@@ -1180,25 +1180,25 @@ impl<TX: DbTx + 'static, N: NodeTypesForProvider> BlockReader for DatabaseProvid
|
|||||||
/// If the header is found, but the transactions either do not exist, or are not indexed, this
|
/// If the header is found, but the transactions either do not exist, or are not indexed, this
|
||||||
/// will return None.
|
/// will return None.
|
||||||
fn block(&self, id: BlockHashOrNumber) -> ProviderResult<Option<Self::Block>> {
|
fn block(&self, id: BlockHashOrNumber) -> ProviderResult<Option<Self::Block>> {
|
||||||
if let Some(number) = self.convert_hash_or_number(id)? {
|
if let Some(number) = self.convert_hash_or_number(id)? &&
|
||||||
if let Some(header) = self.header_by_number(number)? {
|
let Some(header) = self.header_by_number(number)?
|
||||||
// If the body indices are not found, this means that the transactions either do not
|
{
|
||||||
// exist in the database yet, or they do exit but are not indexed.
|
// If the body indices are not found, this means that the transactions either do not
|
||||||
// If they exist but are not indexed, we don't have enough
|
// exist in the database yet, or they do exit but are not indexed.
|
||||||
// information to return the block anyways, so we return `None`.
|
// If they exist but are not indexed, we don't have enough
|
||||||
let Some(transactions) = self.transactions_by_block(number.into())? else {
|
// information to return the block anyways, so we return `None`.
|
||||||
return Ok(None)
|
let Some(transactions) = self.transactions_by_block(number.into())? else {
|
||||||
};
|
return Ok(None)
|
||||||
|
};
|
||||||
|
|
||||||
let body = self
|
let body = self
|
||||||
.storage
|
.storage
|
||||||
.reader()
|
.reader()
|
||||||
.read_block_bodies(self, vec![(&header, transactions)])?
|
.read_block_bodies(self, vec![(&header, transactions)])?
|
||||||
.pop()
|
.pop()
|
||||||
.ok_or(ProviderError::InvalidStorageOutput)?;
|
.ok_or(ProviderError::InvalidStorageOutput)?;
|
||||||
|
|
||||||
return Ok(Some(Self::Block::new(header, body)))
|
return Ok(Some(Self::Block::new(header, body)))
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(None)
|
Ok(None)
|
||||||
@@ -1416,34 +1416,31 @@ impl<TX: DbTx + 'static, N: NodeTypesForProvider> TransactionsProvider for Datab
|
|||||||
tx_hash: TxHash,
|
tx_hash: TxHash,
|
||||||
) -> ProviderResult<Option<(Self::Transaction, TransactionMeta)>> {
|
) -> ProviderResult<Option<(Self::Transaction, TransactionMeta)>> {
|
||||||
let mut transaction_cursor = self.tx.cursor_read::<tables::TransactionBlocks>()?;
|
let mut transaction_cursor = self.tx.cursor_read::<tables::TransactionBlocks>()?;
|
||||||
if let Some(transaction_id) = self.transaction_id(tx_hash)? {
|
if let Some(transaction_id) = self.transaction_id(tx_hash)? &&
|
||||||
if let Some(transaction) = self.transaction_by_id_unhashed(transaction_id)? {
|
let Some(transaction) = self.transaction_by_id_unhashed(transaction_id)? &&
|
||||||
if let Some(block_number) =
|
let Some(block_number) =
|
||||||
transaction_cursor.seek(transaction_id).map(|b| b.map(|(_, bn)| bn))?
|
transaction_cursor.seek(transaction_id).map(|b| b.map(|(_, bn)| bn))? &&
|
||||||
{
|
let Some(sealed_header) = self.sealed_header(block_number)?
|
||||||
if let Some(sealed_header) = self.sealed_header(block_number)? {
|
{
|
||||||
let (header, block_hash) = sealed_header.split();
|
let (header, block_hash) = sealed_header.split();
|
||||||
if let Some(block_body) = self.block_body_indices(block_number)? {
|
if let Some(block_body) = self.block_body_indices(block_number)? {
|
||||||
// the index of the tx in the block is the offset:
|
// the index of the tx in the block is the offset:
|
||||||
// len([start..tx_id])
|
// len([start..tx_id])
|
||||||
// NOTE: `transaction_id` is always `>=` the block's first
|
// NOTE: `transaction_id` is always `>=` the block's first
|
||||||
// index
|
// index
|
||||||
let index = transaction_id - block_body.first_tx_num();
|
let index = transaction_id - block_body.first_tx_num();
|
||||||
|
|
||||||
let meta = TransactionMeta {
|
let meta = TransactionMeta {
|
||||||
tx_hash,
|
tx_hash,
|
||||||
index,
|
index,
|
||||||
block_hash,
|
block_hash,
|
||||||
block_number,
|
block_number,
|
||||||
base_fee: header.base_fee_per_gas(),
|
base_fee: header.base_fee_per_gas(),
|
||||||
excess_blob_gas: header.excess_blob_gas(),
|
excess_blob_gas: header.excess_blob_gas(),
|
||||||
timestamp: header.timestamp(),
|
timestamp: header.timestamp(),
|
||||||
};
|
};
|
||||||
|
|
||||||
return Ok(Some((transaction, meta)))
|
return Ok(Some((transaction, meta)))
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1461,14 +1458,14 @@ impl<TX: DbTx + 'static, N: NodeTypesForProvider> TransactionsProvider for Datab
|
|||||||
) -> ProviderResult<Option<Vec<Self::Transaction>>> {
|
) -> ProviderResult<Option<Vec<Self::Transaction>>> {
|
||||||
let mut tx_cursor = self.tx.cursor_read::<tables::Transactions<Self::Transaction>>()?;
|
let mut tx_cursor = self.tx.cursor_read::<tables::Transactions<Self::Transaction>>()?;
|
||||||
|
|
||||||
if let Some(block_number) = self.convert_hash_or_number(id)? {
|
if let Some(block_number) = self.convert_hash_or_number(id)? &&
|
||||||
if let Some(body) = self.block_body_indices(block_number)? {
|
let Some(body) = self.block_body_indices(block_number)?
|
||||||
let tx_range = body.tx_num_range();
|
{
|
||||||
return if tx_range.is_empty() {
|
let tx_range = body.tx_num_range();
|
||||||
Ok(Some(Vec::new()))
|
return if tx_range.is_empty() {
|
||||||
} else {
|
Ok(Some(Vec::new()))
|
||||||
Ok(Some(self.transactions_by_tx_range_with_cursor(tx_range, &mut tx_cursor)?))
|
} else {
|
||||||
}
|
Ok(Some(self.transactions_by_tx_range_with_cursor(tx_range, &mut tx_cursor)?))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok(None)
|
Ok(None)
|
||||||
@@ -1543,14 +1540,14 @@ impl<TX: DbTx + 'static, N: NodeTypesForProvider> ReceiptProvider for DatabasePr
|
|||||||
&self,
|
&self,
|
||||||
block: BlockHashOrNumber,
|
block: BlockHashOrNumber,
|
||||||
) -> ProviderResult<Option<Vec<Self::Receipt>>> {
|
) -> ProviderResult<Option<Vec<Self::Receipt>>> {
|
||||||
if let Some(number) = self.convert_hash_or_number(block)? {
|
if let Some(number) = self.convert_hash_or_number(block)? &&
|
||||||
if let Some(body) = self.block_body_indices(number)? {
|
let Some(body) = self.block_body_indices(number)?
|
||||||
let tx_range = body.tx_num_range();
|
{
|
||||||
return if tx_range.is_empty() {
|
let tx_range = body.tx_num_range();
|
||||||
Ok(Some(Vec::new()))
|
return if tx_range.is_empty() {
|
||||||
} else {
|
Ok(Some(Vec::new()))
|
||||||
self.receipts_by_tx_range(tx_range).map(Some)
|
} else {
|
||||||
}
|
self.receipts_by_tx_range(tx_range).map(Some)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok(None)
|
Ok(None)
|
||||||
@@ -2000,10 +1997,10 @@ impl<TX: DbTxMut + DbTx + 'static, N: NodeTypesForProvider> StateWriter
|
|||||||
|
|
||||||
for entry in storage {
|
for entry in storage {
|
||||||
tracing::trace!(?address, ?entry.key, "Updating plain state storage");
|
tracing::trace!(?address, ?entry.key, "Updating plain state storage");
|
||||||
if let Some(db_entry) = storages_cursor.seek_by_key_subkey(address, entry.key)? {
|
if let Some(db_entry) = storages_cursor.seek_by_key_subkey(address, entry.key)? &&
|
||||||
if db_entry.key == entry.key {
|
db_entry.key == entry.key
|
||||||
storages_cursor.delete_current()?;
|
{
|
||||||
}
|
storages_cursor.delete_current()?;
|
||||||
}
|
}
|
||||||
|
|
||||||
if !entry.value.is_zero() {
|
if !entry.value.is_zero() {
|
||||||
@@ -2038,11 +2035,10 @@ impl<TX: DbTxMut + DbTx + 'static, N: NodeTypesForProvider> StateWriter
|
|||||||
for (hashed_slot, value) in storage.storage_slots_sorted() {
|
for (hashed_slot, value) in storage.storage_slots_sorted() {
|
||||||
let entry = StorageEntry { key: hashed_slot, value };
|
let entry = StorageEntry { key: hashed_slot, value };
|
||||||
if let Some(db_entry) =
|
if let Some(db_entry) =
|
||||||
hashed_storage_cursor.seek_by_key_subkey(*hashed_address, entry.key)?
|
hashed_storage_cursor.seek_by_key_subkey(*hashed_address, entry.key)? &&
|
||||||
|
db_entry.key == entry.key
|
||||||
{
|
{
|
||||||
if db_entry.key == entry.key {
|
hashed_storage_cursor.delete_current()?;
|
||||||
hashed_storage_cursor.delete_current()?;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if !entry.value.is_zero() {
|
if !entry.value.is_zero() {
|
||||||
|
|||||||
@@ -158,10 +158,10 @@ impl<Provider: DBProvider + BlockHashReader> StateProvider
|
|||||||
storage_key: StorageKey,
|
storage_key: StorageKey,
|
||||||
) -> ProviderResult<Option<StorageValue>> {
|
) -> ProviderResult<Option<StorageValue>> {
|
||||||
let mut cursor = self.tx().cursor_dup_read::<tables::PlainStorageState>()?;
|
let mut cursor = self.tx().cursor_dup_read::<tables::PlainStorageState>()?;
|
||||||
if let Some(entry) = cursor.seek_by_key_subkey(account, storage_key)? {
|
if let Some(entry) = cursor.seek_by_key_subkey(account, storage_key)? &&
|
||||||
if entry.key == storage_key {
|
entry.key == storage_key
|
||||||
return Ok(Some(entry.value))
|
{
|
||||||
}
|
return Ok(Some(entry.value))
|
||||||
}
|
}
|
||||||
Ok(None)
|
Ok(None)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -314,10 +314,10 @@ impl<N: NodePrimitives<SignedTx: Decompress + SignedTransaction, Receipt: Decomp
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult<Option<Self::Receipt>> {
|
fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult<Option<Self::Receipt>> {
|
||||||
if let Some(tx_static_file) = &self.auxiliary_jar {
|
if let Some(tx_static_file) = &self.auxiliary_jar &&
|
||||||
if let Some(num) = tx_static_file.transaction_id(hash)? {
|
let Some(num) = tx_static_file.transaction_id(hash)?
|
||||||
return self.receipt(num)
|
{
|
||||||
}
|
return self.receipt(num)
|
||||||
}
|
}
|
||||||
Ok(None)
|
Ok(None)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -950,12 +950,11 @@ impl<N: NodePrimitives> StaticFileProvider<N> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some((db_last_entry, _)) = db_cursor.last()? {
|
if let Some((db_last_entry, _)) = db_cursor.last()? &&
|
||||||
if highest_static_file_entry
|
highest_static_file_entry
|
||||||
.is_none_or(|highest_entry| db_last_entry > highest_entry)
|
.is_none_or(|highest_entry| db_last_entry > highest_entry)
|
||||||
{
|
{
|
||||||
return Ok(None)
|
return Ok(None)
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1281,16 +1280,15 @@ impl<N: NodePrimitives> StaticFileProvider<N> {
|
|||||||
self.get_highest_static_file_block(segment)
|
self.get_highest_static_file_block(segment)
|
||||||
} else {
|
} else {
|
||||||
self.get_highest_static_file_tx(segment)
|
self.get_highest_static_file_tx(segment)
|
||||||
} {
|
} && block_or_tx_range.start <= static_file_upper_bound
|
||||||
if block_or_tx_range.start <= static_file_upper_bound {
|
{
|
||||||
let end = block_or_tx_range.end.min(static_file_upper_bound + 1);
|
let end = block_or_tx_range.end.min(static_file_upper_bound + 1);
|
||||||
data.extend(fetch_from_static_file(
|
data.extend(fetch_from_static_file(
|
||||||
self,
|
self,
|
||||||
block_or_tx_range.start..end,
|
block_or_tx_range.start..end,
|
||||||
&mut predicate,
|
&mut predicate,
|
||||||
)?);
|
)?);
|
||||||
block_or_tx_range.start = end;
|
block_or_tx_range.start = end;
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if block_or_tx_range.end > block_or_tx_range.start {
|
if block_or_tx_range.end > block_or_tx_range.start {
|
||||||
|
|||||||
@@ -120,11 +120,10 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Write withdrawals if any
|
// Write withdrawals if any
|
||||||
if let Some(withdrawals) = body.withdrawals {
|
if let Some(withdrawals) = body.withdrawals &&
|
||||||
if !withdrawals.is_empty() {
|
!withdrawals.is_empty()
|
||||||
withdrawals_cursor
|
{
|
||||||
.append(block_number, &StoredBlockWithdrawals { withdrawals })?;
|
withdrawals_cursor.append(block_number, &StoredBlockWithdrawals { withdrawals })?;
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -118,10 +118,10 @@ impl ReusableDecompressor {
|
|||||||
// source.
|
// source.
|
||||||
if !reserved_upper_bound {
|
if !reserved_upper_bound {
|
||||||
reserved_upper_bound = true;
|
reserved_upper_bound = true;
|
||||||
if let Some(upper_bound) = Decompressor::upper_bound(src) {
|
if let Some(upper_bound) = Decompressor::upper_bound(src) &&
|
||||||
if let Some(additional) = upper_bound.checked_sub(self.buf.capacity()) {
|
let Some(additional) = upper_bound.checked_sub(self.buf.capacity())
|
||||||
break 'b additional
|
{
|
||||||
}
|
break 'b additional
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -229,21 +229,19 @@ pub async fn maintain_transaction_pool<N, Client, P, St, Tasks>(
|
|||||||
|
|
||||||
// check if we have a new finalized block
|
// check if we have a new finalized block
|
||||||
if let Some(finalized) =
|
if let Some(finalized) =
|
||||||
last_finalized_block.update(client.finalized_block_number().ok().flatten())
|
last_finalized_block.update(client.finalized_block_number().ok().flatten()) &&
|
||||||
{
|
let BlobStoreUpdates::Finalized(blobs) =
|
||||||
if let BlobStoreUpdates::Finalized(blobs) =
|
|
||||||
blob_store_tracker.on_finalized_block(finalized)
|
blob_store_tracker.on_finalized_block(finalized)
|
||||||
{
|
{
|
||||||
metrics.inc_deleted_tracked_blobs(blobs.len());
|
metrics.inc_deleted_tracked_blobs(blobs.len());
|
||||||
// remove all finalized blobs from the blob store
|
// remove all finalized blobs from the blob store
|
||||||
pool.delete_blobs(blobs);
|
pool.delete_blobs(blobs);
|
||||||
// and also do periodic cleanup
|
// and also do periodic cleanup
|
||||||
let pool = pool.clone();
|
let pool = pool.clone();
|
||||||
task_spawner.spawn_blocking(Box::pin(async move {
|
task_spawner.spawn_blocking(Box::pin(async move {
|
||||||
debug!(target: "txpool", finalized_block = %finalized, "cleaning up blob store");
|
debug!(target: "txpool", finalized_block = %finalized, "cleaning up blob store");
|
||||||
pool.cleanup_blobs();
|
pool.cleanup_blobs();
|
||||||
}));
|
}));
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// outcomes of the futures we are waiting on
|
// outcomes of the futures we are waiting on
|
||||||
|
|||||||
@@ -127,12 +127,12 @@ impl<T: TransactionOrdering> BestTransactions<T> {
|
|||||||
loop {
|
loop {
|
||||||
match self.new_transaction_receiver.as_mut()?.try_recv() {
|
match self.new_transaction_receiver.as_mut()?.try_recv() {
|
||||||
Ok(tx) => {
|
Ok(tx) => {
|
||||||
if let Some(last_priority) = &self.last_priority {
|
if let Some(last_priority) = &self.last_priority &&
|
||||||
if &tx.priority > last_priority {
|
&tx.priority > last_priority
|
||||||
// we skip transactions if we already yielded a transaction with lower
|
{
|
||||||
// priority
|
// we skip transactions if we already yielded a transaction with lower
|
||||||
return None
|
// priority
|
||||||
}
|
return None
|
||||||
}
|
}
|
||||||
return Some(tx)
|
return Some(tx)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -612,10 +612,10 @@ where
|
|||||||
// A newly added transaction may be immediately discarded, so we need to
|
// A newly added transaction may be immediately discarded, so we need to
|
||||||
// adjust the result here
|
// adjust the result here
|
||||||
for res in &mut added {
|
for res in &mut added {
|
||||||
if let Ok(AddedTransactionOutcome { hash, .. }) = res {
|
if let Ok(AddedTransactionOutcome { hash, .. }) = res &&
|
||||||
if discarded_hashes.contains(hash) {
|
discarded_hashes.contains(hash)
|
||||||
*res = Err(PoolError::new(*hash, PoolErrorKind::DiscardedOnInsert))
|
{
|
||||||
}
|
*res = Err(PoolError::new(*hash, PoolErrorKind::DiscardedOnInsert))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -329,13 +329,13 @@ impl<T: TransactionOrdering> PendingPool<T> {
|
|||||||
&mut self,
|
&mut self,
|
||||||
id: &TransactionId,
|
id: &TransactionId,
|
||||||
) -> Option<Arc<ValidPoolTransaction<T::Transaction>>> {
|
) -> Option<Arc<ValidPoolTransaction<T::Transaction>>> {
|
||||||
if let Some(lowest) = self.independent_transactions.get(&id.sender) {
|
if let Some(lowest) = self.independent_transactions.get(&id.sender) &&
|
||||||
if lowest.transaction.nonce() == id.nonce {
|
lowest.transaction.nonce() == id.nonce
|
||||||
self.independent_transactions.remove(&id.sender);
|
{
|
||||||
// mark the next as independent if it exists
|
self.independent_transactions.remove(&id.sender);
|
||||||
if let Some(unlocked) = self.get(&id.descendant()) {
|
// mark the next as independent if it exists
|
||||||
self.independent_transactions.insert(id.sender, unlocked.clone());
|
if let Some(unlocked) = self.get(&id.descendant()) {
|
||||||
}
|
self.independent_transactions.insert(id.sender, unlocked.clone());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -954,11 +954,11 @@ impl<T: TransactionOrdering> TxPool<T> {
|
|||||||
Destination::Pool(move_to) => {
|
Destination::Pool(move_to) => {
|
||||||
debug_assert_ne!(&move_to, ¤t, "destination must be different");
|
debug_assert_ne!(&move_to, ¤t, "destination must be different");
|
||||||
let moved = self.move_transaction(current, move_to, &id);
|
let moved = self.move_transaction(current, move_to, &id);
|
||||||
if matches!(move_to, SubPool::Pending) {
|
if matches!(move_to, SubPool::Pending) &&
|
||||||
if let Some(tx) = moved {
|
let Some(tx) = moved
|
||||||
trace!(target: "txpool", hash=%tx.transaction.hash(), "Promoted transaction to pending");
|
{
|
||||||
outcome.promoted.push(tx);
|
trace!(target: "txpool", hash=%tx.transaction.hash(), "Promoted transaction to pending");
|
||||||
}
|
outcome.promoted.push(tx);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1856,18 +1856,18 @@ impl<T: PoolTransaction> AllTransactions<T> {
|
|||||||
// overdraft
|
// overdraft
|
||||||
let id = new_blob_tx.transaction_id;
|
let id = new_blob_tx.transaction_id;
|
||||||
let mut descendants = self.descendant_txs_inclusive(&id).peekable();
|
let mut descendants = self.descendant_txs_inclusive(&id).peekable();
|
||||||
if let Some((maybe_replacement, _)) = descendants.peek() {
|
if let Some((maybe_replacement, _)) = descendants.peek() &&
|
||||||
if **maybe_replacement == new_blob_tx.transaction_id {
|
**maybe_replacement == new_blob_tx.transaction_id
|
||||||
// replacement transaction
|
{
|
||||||
descendants.next();
|
// replacement transaction
|
||||||
|
descendants.next();
|
||||||
|
|
||||||
// check if any of descendant blob transactions should be shifted into overdraft
|
// check if any of descendant blob transactions should be shifted into overdraft
|
||||||
for (_, tx) in descendants {
|
for (_, tx) in descendants {
|
||||||
cumulative_cost += tx.transaction.cost();
|
cumulative_cost += tx.transaction.cost();
|
||||||
if tx.transaction.is_eip4844() && cumulative_cost > on_chain_balance {
|
if tx.transaction.is_eip4844() && cumulative_cost > on_chain_balance {
|
||||||
// the transaction would shift
|
// the transaction would shift
|
||||||
return Err(InsertErr::Overdraft { transaction: Arc::new(new_blob_tx) })
|
return Err(InsertErr::Overdraft { transaction: Arc::new(new_blob_tx) })
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -54,7 +54,31 @@ pub fn mock_tx_pool() -> MockTxPool {
|
|||||||
|
|
||||||
/// Sets the value for the field
|
/// Sets the value for the field
|
||||||
macro_rules! set_value {
|
macro_rules! set_value {
|
||||||
($this:ident => $field:ident) => {
|
// For mutable references
|
||||||
|
(&mut $this:expr => $field:ident) => {{
|
||||||
|
let new_value = $field;
|
||||||
|
match $this {
|
||||||
|
MockTransaction::Legacy { $field, .. } => {
|
||||||
|
*$field = new_value;
|
||||||
|
}
|
||||||
|
MockTransaction::Eip1559 { $field, .. } => {
|
||||||
|
*$field = new_value;
|
||||||
|
}
|
||||||
|
MockTransaction::Eip4844 { $field, .. } => {
|
||||||
|
*$field = new_value;
|
||||||
|
}
|
||||||
|
MockTransaction::Eip2930 { $field, .. } => {
|
||||||
|
*$field = new_value;
|
||||||
|
}
|
||||||
|
MockTransaction::Eip7702 { $field, .. } => {
|
||||||
|
*$field = new_value;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Ensure the tx cost is always correct after each mutation.
|
||||||
|
$this.update_cost();
|
||||||
|
}};
|
||||||
|
// For owned values
|
||||||
|
($this:expr => $field:ident) => {{
|
||||||
let new_value = $field;
|
let new_value = $field;
|
||||||
match $this {
|
match $this {
|
||||||
MockTransaction::Legacy { ref mut $field, .. } |
|
MockTransaction::Legacy { ref mut $field, .. } |
|
||||||
@@ -67,7 +91,7 @@ macro_rules! set_value {
|
|||||||
}
|
}
|
||||||
// Ensure the tx cost is always correct after each mutation.
|
// Ensure the tx cost is always correct after each mutation.
|
||||||
$this.update_cost();
|
$this.update_cost();
|
||||||
};
|
}};
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Gets the value for the field
|
/// Gets the value for the field
|
||||||
@@ -89,7 +113,7 @@ macro_rules! make_setters_getters {
|
|||||||
paste! {$(
|
paste! {$(
|
||||||
/// Sets the value of the specified field.
|
/// Sets the value of the specified field.
|
||||||
pub fn [<set_ $name>](&mut self, $name: $t) -> &mut Self {
|
pub fn [<set_ $name>](&mut self, $name: $t) -> &mut Self {
|
||||||
set_value!(self => $name);
|
set_value!(&mut self => $name);
|
||||||
self
|
self
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -344,10 +344,10 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Check whether the init code size has been exceeded.
|
// Check whether the init code size has been exceeded.
|
||||||
if self.fork_tracker.is_shanghai_activated() {
|
if self.fork_tracker.is_shanghai_activated() &&
|
||||||
if let Err(err) = transaction.ensure_max_init_code_size(MAX_INIT_CODE_BYTE_SIZE) {
|
let Err(err) = transaction.ensure_max_init_code_size(MAX_INIT_CODE_BYTE_SIZE)
|
||||||
return Err(TransactionValidationOutcome::Invalid(transaction, err))
|
{
|
||||||
}
|
return Err(TransactionValidationOutcome::Invalid(transaction, err))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Checks for gas limit
|
// Checks for gas limit
|
||||||
@@ -364,16 +364,16 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Check individual transaction gas limit if configured
|
// Check individual transaction gas limit if configured
|
||||||
if let Some(max_tx_gas_limit) = self.max_tx_gas_limit {
|
if let Some(max_tx_gas_limit) = self.max_tx_gas_limit &&
|
||||||
if transaction_gas_limit > max_tx_gas_limit {
|
transaction_gas_limit > max_tx_gas_limit
|
||||||
return Err(TransactionValidationOutcome::Invalid(
|
{
|
||||||
transaction,
|
return Err(TransactionValidationOutcome::Invalid(
|
||||||
InvalidPoolTransactionError::MaxTxGasLimitExceeded(
|
transaction,
|
||||||
transaction_gas_limit,
|
InvalidPoolTransactionError::MaxTxGasLimitExceeded(
|
||||||
max_tx_gas_limit,
|
transaction_gas_limit,
|
||||||
),
|
max_tx_gas_limit,
|
||||||
))
|
),
|
||||||
}
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure max_priority_fee_per_gas (if EIP1559) is less than max_fee_per_gas if any.
|
// Ensure max_priority_fee_per_gas (if EIP1559) is less than max_fee_per_gas if any.
|
||||||
@@ -427,13 +427,13 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Checks for chainid
|
// Checks for chainid
|
||||||
if let Some(chain_id) = transaction.chain_id() {
|
if let Some(chain_id) = transaction.chain_id() &&
|
||||||
if chain_id != self.chain_id() {
|
chain_id != self.chain_id()
|
||||||
return Err(TransactionValidationOutcome::Invalid(
|
{
|
||||||
transaction,
|
return Err(TransactionValidationOutcome::Invalid(
|
||||||
InvalidTransactionError::ChainIdMismatch.into(),
|
transaction,
|
||||||
))
|
InvalidTransactionError::ChainIdMismatch.into(),
|
||||||
}
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
if transaction.is_eip7702() {
|
if transaction.is_eip7702() {
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user