chore: bump rust to edition 2024 (#18692)

This commit is contained in:
YK
2025-09-25 20:18:51 +08:00
committed by GitHub
parent 9a26947db6
commit a047a055ab
113 changed files with 1235 additions and 1251 deletions

22
Cargo.lock generated
View File

@@ -1590,6 +1590,24 @@ dependencies = [
"syn 2.0.106", "syn 2.0.106",
] ]
[[package]]
name = "bindgen"
version = "0.71.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5f58bf3d7db68cfbac37cfc485a8d711e87e064c3d0fe0435b92f7a407f9d6b3"
dependencies = [
"bitflags 2.9.4",
"cexpr",
"clang-sys",
"itertools 0.13.0",
"proc-macro2",
"quote",
"regex",
"rustc-hash 2.1.1",
"shlex",
"syn 2.0.106",
]
[[package]] [[package]]
name = "bit-set" name = "bit-set"
version = "0.8.0" version = "0.8.0"
@@ -5353,7 +5371,7 @@ version = "0.14.10"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e78a09b56be5adbcad5aa1197371688dc6bb249a26da3bca2011ee2fb987ebfb" checksum = "e78a09b56be5adbcad5aa1197371688dc6bb249a26da3bca2011ee2fb987ebfb"
dependencies = [ dependencies = [
"bindgen", "bindgen 0.70.1",
"errno", "errno",
"libc", "libc",
] ]
@@ -8652,7 +8670,7 @@ dependencies = [
name = "reth-mdbx-sys" name = "reth-mdbx-sys"
version = "1.8.1" version = "1.8.1"
dependencies = [ dependencies = [
"bindgen", "bindgen 0.71.1",
"cc", "cc",
] ]

View File

@@ -1,6 +1,6 @@
[workspace.package] [workspace.package]
version = "1.8.1" version = "1.8.1"
edition = "2021" edition = "2024"
rust-version = "1.88" rust-version = "1.88"
license = "MIT OR Apache-2.0" license = "MIT OR Apache-2.0"
homepage = "https://paradigmxyz.github.io/reth" homepage = "https://paradigmxyz.github.io/reth"
@@ -188,6 +188,7 @@ rust.missing_docs = "warn"
rust.rust_2018_idioms = { level = "deny", priority = -1 } rust.rust_2018_idioms = { level = "deny", priority = -1 }
rust.unreachable_pub = "warn" rust.unreachable_pub = "warn"
rust.unused_must_use = "deny" rust.unused_must_use = "deny"
rust.rust_2024_incompatible_pat = "warn"
rustdoc.all = "warn" rustdoc.all = "warn"
# rust.unnameable-types = "warn" # rust.unnameable-types = "warn"
@@ -667,7 +668,7 @@ snmalloc-rs = { version = "0.3.7", features = ["build_cc"] }
aes = "0.8.1" aes = "0.8.1"
ahash = "0.8" ahash = "0.8"
anyhow = "1.0" anyhow = "1.0"
bindgen = { version = "0.70", default-features = false } bindgen = { version = "0.71", default-features = false }
block-padding = "0.3.2" block-padding = "0.3.2"
cc = "=1.2.15" cc = "=1.2.15"
cipher = "0.4.3" cipher = "0.4.3"

View File

@@ -26,8 +26,10 @@ use reth_cli_runner::CliRunner;
fn main() { fn main() {
// Enable backtraces unless a RUST_BACKTRACE value has already been explicitly provided. // Enable backtraces unless a RUST_BACKTRACE value has already been explicitly provided.
if std::env::var_os("RUST_BACKTRACE").is_none() { if std::env::var_os("RUST_BACKTRACE").is_none() {
unsafe {
std::env::set_var("RUST_BACKTRACE", "1"); std::env::set_var("RUST_BACKTRACE", "1");
} }
}
// Run until either exit or sigint or sigterm // Run until either exit or sigint or sigterm
let runner = CliRunner::try_default_runtime().unwrap(); let runner = CliRunner::try_default_runtime().unwrap();

View File

@@ -141,11 +141,11 @@ impl<R: Read> ProgressReader<R> {
impl<R: Read> Read for ProgressReader<R> { impl<R: Read> Read for ProgressReader<R> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
let bytes = self.reader.read(buf)?; let bytes = self.reader.read(buf)?;
if bytes > 0 { if bytes > 0 &&
if let Err(e) = self.progress.update(bytes as u64) { let Err(e) = self.progress.update(bytes as u64)
{
return Err(io::Error::other(e)); return Err(io::Error::other(e));
} }
}
Ok(bytes) Ok(bytes)
} }
} }

View File

@@ -192,7 +192,7 @@ pub fn build_import_pipeline_impl<N, C, E>(
static_file_producer: StaticFileProducer<ProviderFactory<N>>, static_file_producer: StaticFileProducer<ProviderFactory<N>>,
disable_exec: bool, disable_exec: bool,
evm_config: E, evm_config: E,
) -> eyre::Result<(Pipeline<N>, impl futures::Stream<Item = NodeEvent<N::Primitives>>)> ) -> eyre::Result<(Pipeline<N>, impl futures::Stream<Item = NodeEvent<N::Primitives>> + use<N, C, E>)>
where where
N: ProviderNodeTypes, N: ProviderNodeTypes,
C: FullConsensus<N::Primitives, Error = reth_consensus::ConsensusError> + 'static, C: FullConsensus<N::Primitives, Error = reth_consensus::ConsensusError> + 'static,

View File

@@ -7,7 +7,7 @@ use std::{
fmt, mem, ptr, fmt, mem, ptr,
}; };
extern "C" { unsafe extern "C" {
fn backtrace_symbols_fd(buffer: *const *mut libc::c_void, size: libc::c_int, fd: libc::c_int); fn backtrace_symbols_fd(buffer: *const *mut libc::c_void, size: libc::c_int, fd: libc::c_int);
} }

View File

@@ -96,11 +96,12 @@ where
} }
// Connect last node with the first if there are more than two // Connect last node with the first if there are more than two
if idx + 1 == num_nodes && num_nodes > 2 { if idx + 1 == num_nodes &&
if let Some(first_node) = nodes.first_mut() { num_nodes > 2 &&
let Some(first_node) = nodes.first_mut()
{
node.connect(first_node).await; node.connect(first_node).await;
} }
}
nodes.push(node); nodes.push(node);
} }
@@ -207,12 +208,13 @@ where
} }
// Connect last node with the first if there are more than two // Connect last node with the first if there are more than two
if idx + 1 == num_nodes && num_nodes > 2 { if idx + 1 == num_nodes &&
if let Some(first_node) = nodes.first_mut() { num_nodes > 2 &&
let Some(first_node) = nodes.first_mut()
{
node.connect(first_node).await; node.connect(first_node).await;
} }
} }
}
nodes.push(node); nodes.push(node);
} }

View File

@@ -150,15 +150,14 @@ where
loop { loop {
tokio::time::sleep(std::time::Duration::from_millis(20)).await; tokio::time::sleep(std::time::Duration::from_millis(20)).await;
if !check && wait_finish_checkpoint { if !check &&
if let Some(checkpoint) = wait_finish_checkpoint &&
self.inner.provider.get_stage_checkpoint(StageId::Finish)? let Some(checkpoint) =
self.inner.provider.get_stage_checkpoint(StageId::Finish)? &&
checkpoint.block_number >= number
{ {
if checkpoint.block_number >= number {
check = true check = true
} }
}
}
if check { if check {
if let Some(latest_header) = self.inner.provider.header_by_number(number)? { if let Some(latest_header) = self.inner.provider.header_by_number(number)? {
@@ -178,12 +177,12 @@ where
pub async fn wait_unwind(&self, number: BlockNumber) -> eyre::Result<()> { pub async fn wait_unwind(&self, number: BlockNumber) -> eyre::Result<()> {
loop { loop {
tokio::time::sleep(std::time::Duration::from_millis(10)).await; tokio::time::sleep(std::time::Duration::from_millis(10)).await;
if let Some(checkpoint) = self.inner.provider.get_stage_checkpoint(StageId::Headers)? { if let Some(checkpoint) = self.inner.provider.get_stage_checkpoint(StageId::Headers)? &&
if checkpoint.block_number == number { checkpoint.block_number == number
{
break break
} }
} }
}
Ok(()) Ok(())
} }
@@ -207,16 +206,15 @@ where
// wait for the block to commit // wait for the block to commit
tokio::time::sleep(std::time::Duration::from_millis(20)).await; tokio::time::sleep(std::time::Duration::from_millis(20)).await;
if let Some(latest_block) = if let Some(latest_block) =
self.inner.provider.block_by_number_or_tag(BlockNumberOrTag::Latest)? self.inner.provider.block_by_number_or_tag(BlockNumberOrTag::Latest)? &&
latest_block.header().number() == block_number
{ {
if latest_block.header().number() == block_number {
// make sure the block hash we submitted via FCU engine api is the new latest // make sure the block hash we submitted via FCU engine api is the new latest
// block using an RPC call // block using an RPC call
assert_eq!(latest_block.header().hash_slow(), block_hash); assert_eq!(latest_block.header().hash_slow(), block_hash);
break break
} }
} }
}
Ok(()) Ok(())
} }

View File

@@ -174,17 +174,14 @@ where
]; ];
// if we're on a fork, validate it now that it's canonical // if we're on a fork, validate it now that it's canonical
if let Ok(active_state) = env.active_node_state() { if let Ok(active_state) = env.active_node_state() &&
if let Some(fork_base) = active_state.current_fork_base { let Some(fork_base) = active_state.current_fork_base
debug!( {
"MakeCanonical: Adding fork validation from base block {}", debug!("MakeCanonical: Adding fork validation from base block {}", fork_base);
fork_base
);
actions.push(Box::new(ValidateFork::new(fork_base))); actions.push(Box::new(ValidateFork::new(fork_base)));
// clear the fork base since we're now canonical // clear the fork base since we're now canonical
env.active_node_state_mut()?.current_fork_base = None; env.active_node_state_mut()?.current_fork_base = None;
} }
}
let mut sequence = Sequence::new(actions); let mut sequence = Sequence::new(actions);
sequence.execute(env).await sequence.execute(env).await

View File

@@ -195,8 +195,9 @@ where
.copied() .copied()
.ok_or_else(|| eyre::eyre!("Block tag '{}' not found in registry", self.tag))?; .ok_or_else(|| eyre::eyre!("Block tag '{}' not found in registry", self.tag))?;
if let Some(expected_node) = self.expected_node_idx { if let Some(expected_node) = self.expected_node_idx &&
if node_idx != expected_node { node_idx != expected_node
{
return Err(eyre::eyre!( return Err(eyre::eyre!(
"Block tag '{}' came from node {} but expected node {}", "Block tag '{}' came from node {} but expected node {}",
self.tag, self.tag,
@@ -204,7 +205,6 @@ where
expected_node expected_node
)); ));
} }
}
debug!( debug!(
"Validated block tag '{}': block {} (hash: {}) from node {}", "Validated block tag '{}': block {} (hash: {}) from node {}",

View File

@@ -220,7 +220,7 @@ where
let is_dev = self.is_dev; let is_dev = self.is_dev;
let node_count = self.network.node_count; let node_count = self.network.node_count;
let attributes_generator = self.create_attributes_generator::<N>(); let attributes_generator = Self::create_static_attributes_generator::<N>();
let result = setup_engine_with_connection::<N>( let result = setup_engine_with_connection::<N>(
node_count, node_count,
@@ -299,10 +299,11 @@ where
.await .await
} }
/// Create the attributes generator function /// Create a static attributes generator that doesn't capture any instance data
fn create_attributes_generator<N>( fn create_static_attributes_generator<N>(
&self, ) -> impl Fn(u64) -> <<N as NodeTypes>::Payload as PayloadTypes>::PayloadBuilderAttributes
) -> impl Fn(u64) -> <<N as NodeTypes>::Payload as PayloadTypes>::PayloadBuilderAttributes + Copy + Copy
+ use<N, I>
where where
N: NodeBuilderHelper<Payload = I>, N: NodeBuilderHelper<Payload = I>,
LocalPayloadAttributesBuilder<N::ChainSpec>: PayloadAttributesBuilder< LocalPayloadAttributesBuilder<N::ChainSpec>: PayloadAttributesBuilder<

View File

@@ -89,12 +89,12 @@ async fn test_apply_with_import() -> Result<()> {
) )
.await; .await;
if let Ok(Some(block)) = block_result { if let Ok(Some(block)) = block_result &&
if block.header.number == 10 { block.header.number == 10
{
debug!("Pipeline finished, block 10 is fully available"); debug!("Pipeline finished, block 10 is fully available");
break; break;
} }
}
if start.elapsed() > std::time::Duration::from_secs(10) { if start.elapsed() > std::time::Duration::from_secs(10) {
return Err(eyre::eyre!("Timeout waiting for pipeline to finish")); return Err(eyre::eyre!("Timeout waiting for pipeline to finish"));

View File

@@ -664,7 +664,7 @@ mod tests {
unsafe impl GlobalAlloc for TrackingAllocator { unsafe impl GlobalAlloc for TrackingAllocator {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 { unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
let ret = self.inner.alloc(layout); let ret = unsafe { self.inner.alloc(layout) };
if !ret.is_null() { if !ret.is_null() {
self.allocated.fetch_add(layout.size(), Ordering::SeqCst); self.allocated.fetch_add(layout.size(), Ordering::SeqCst);
self.total_allocated.fetch_add(layout.size(), Ordering::SeqCst); self.total_allocated.fetch_add(layout.size(), Ordering::SeqCst);
@@ -674,7 +674,7 @@ mod tests {
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
self.allocated.fetch_sub(layout.size(), Ordering::SeqCst); self.allocated.fetch_sub(layout.size(), Ordering::SeqCst);
self.inner.dealloc(ptr, layout) unsafe { self.inner.dealloc(ptr, layout) }
} }
} }
} }

View File

@@ -1818,11 +1818,11 @@ where
fn prepare_invalid_response(&mut self, mut parent_hash: B256) -> ProviderResult<PayloadStatus> { fn prepare_invalid_response(&mut self, mut parent_hash: B256) -> ProviderResult<PayloadStatus> {
// Edge case: the `latestValid` field is the zero hash if the parent block is the terminal // Edge case: the `latestValid` field is the zero hash if the parent block is the terminal
// PoW block, which we need to identify by looking at the parent's block difficulty // PoW block, which we need to identify by looking at the parent's block difficulty
if let Some(parent) = self.sealed_header_by_hash(parent_hash)? { if let Some(parent) = self.sealed_header_by_hash(parent_hash)? &&
if !parent.difficulty().is_zero() { !parent.difficulty().is_zero()
{
parent_hash = B256::ZERO; parent_hash = B256::ZERO;
} }
}
let valid_parent_hash = self.latest_valid_hash_for_invalid_payload(parent_hash)?; let valid_parent_hash = self.latest_valid_hash_for_invalid_payload(parent_hash)?;
Ok(PayloadStatus::from_status(PayloadStatusEnum::Invalid { Ok(PayloadStatus::from_status(PayloadStatusEnum::Invalid {
@@ -2038,31 +2038,35 @@ where
let sync_target_state = self.state.forkchoice_state_tracker.sync_target_state(); let sync_target_state = self.state.forkchoice_state_tracker.sync_target_state();
// check if the downloaded block is the tracked finalized block // check if the downloaded block is the tracked finalized block
let mut exceeds_backfill_threshold = if let Some(buffered_finalized) = sync_target_state let exceeds_backfill_threshold =
match (downloaded_block.as_ref(), sync_target_state.as_ref()) {
// if we downloaded the finalized block we can now check how far we're off
(Some(downloaded_block), Some(state))
if downloaded_block.hash == state.finalized_block_hash =>
{
self.exceeds_backfill_run_threshold(canonical_tip_num, downloaded_block.number)
}
_ => match sync_target_state
.as_ref() .as_ref()
.and_then(|state| self.state.buffer.block(&state.finalized_block_hash)) .and_then(|state| self.state.buffer.block(&state.finalized_block_hash))
{ {
// if we have buffered the finalized block, we should check how far Some(buffered_finalized) => {
// we're off // if we have buffered the finalized block, we should check how far we're
self.exceeds_backfill_run_threshold(canonical_tip_num, buffered_finalized.number()) // off
} else { self.exceeds_backfill_run_threshold(
canonical_tip_num,
buffered_finalized.number(),
)
}
None => {
// check if the distance exceeds the threshold for backfill sync // check if the distance exceeds the threshold for backfill sync
self.exceeds_backfill_run_threshold(canonical_tip_num, target_block_number) self.exceeds_backfill_run_threshold(canonical_tip_num, target_block_number)
}
},
}; };
// If this is invoked after we downloaded a block we can check if this block is the
// finalized block
if let (Some(downloaded_block), Some(ref state)) = (downloaded_block, sync_target_state) {
if downloaded_block.hash == state.finalized_block_hash {
// we downloaded the finalized block and can now check how far we're off
exceeds_backfill_threshold =
self.exceeds_backfill_run_threshold(canonical_tip_num, downloaded_block.number);
}
}
// if the number of missing blocks is greater than the max, trigger backfill // if the number of missing blocks is greater than the max, trigger backfill
if exceeds_backfill_threshold { if exceeds_backfill_threshold && let Some(state) = sync_target_state {
if let Some(state) = sync_target_state {
// if we have already canonicalized the finalized block, we should skip backfill // if we have already canonicalized the finalized block, we should skip backfill
match self.provider.header_by_hash_or_number(state.finalized_block_hash.into()) { match self.provider.header_by_hash_or_number(state.finalized_block_hash.into()) {
Err(err) => { Err(err) => {
@@ -2096,7 +2100,6 @@ where
} }
} }
} }
}
None None
} }

View File

@@ -140,11 +140,11 @@ where
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> { fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let mut this = self.project(); let mut this = self.project();
let next = ready!(this.stream.poll_next_unpin(cx)); let next = ready!(this.stream.poll_next_unpin(cx));
if let Some(msg) = &next { if let Some(msg) = &next &&
if let Err(error) = this.store.on_message(msg, SystemTime::now()) { let Err(error) = this.store.on_message(msg, SystemTime::now())
{
error!(target: "engine::stream::store", ?msg, %error, "Error handling Engine API message"); error!(target: "engine::stream::store", ?msg, %error, "Error handling Engine API message");
} }
}
Poll::Ready(next) Poll::Ready(next)
} }
} }

View File

@@ -106,15 +106,14 @@ impl<Http: HttpClient + Clone> EraClient<Http> {
if let Ok(mut dir) = fs::read_dir(&self.folder).await { if let Ok(mut dir) = fs::read_dir(&self.folder).await {
while let Ok(Some(entry)) = dir.next_entry().await { while let Ok(Some(entry)) = dir.next_entry().await {
if let Some(name) = entry.file_name().to_str() { if let Some(name) = entry.file_name().to_str() &&
if let Some(number) = self.file_name_to_number(name) { let Some(number) = self.file_name_to_number(name) &&
if max.is_none() || matches!(max, Some(max) if number > max) { (max.is_none() || matches!(max, Some(max) if number > max))
{
max.replace(number + 1); max.replace(number + 1);
} }
} }
} }
}
}
max max
} }
@@ -125,17 +124,16 @@ impl<Http: HttpClient + Clone> EraClient<Http> {
if let Ok(mut dir) = fs::read_dir(&self.folder).await { if let Ok(mut dir) = fs::read_dir(&self.folder).await {
while let Ok(Some(entry)) = dir.next_entry().await { while let Ok(Some(entry)) = dir.next_entry().await {
if let Some(name) = entry.file_name().to_str() { if let Some(name) = entry.file_name().to_str() &&
if let Some(number) = self.file_name_to_number(name) { let Some(number) = self.file_name_to_number(name) &&
if number < index || number >= last { (number < index || number >= last)
{
eprintln!("Deleting file {}", entry.path().display()); eprintln!("Deleting file {}", entry.path().display());
eprintln!("{number} < {index} || {number} >= {last}"); eprintln!("{number} < {index} || {number} >= {last}");
reth_fs_util::remove_file(entry.path())?; reth_fs_util::remove_file(entry.path())?;
} }
} }
} }
}
}
Ok(()) Ok(())
} }
@@ -208,14 +206,14 @@ impl<Http: HttpClient + Clone> EraClient<Http> {
let mut writer = io::BufWriter::new(file); let mut writer = io::BufWriter::new(file);
while let Some(line) = lines.next_line().await? { while let Some(line) = lines.next_line().await? {
if let Some(j) = line.find(".era1") { if let Some(j) = line.find(".era1") &&
if let Some(i) = line[..j].rfind(|c: char| !c.is_alphanumeric() && c != '-') { let Some(i) = line[..j].rfind(|c: char| !c.is_alphanumeric() && c != '-')
{
let era = &line[i + 1..j + 5]; let era = &line[i + 1..j + 5];
writer.write_all(era.as_bytes()).await?; writer.write_all(era.as_bytes()).await?;
writer.write_all(b"\n").await?; writer.write_all(b"\n").await?;
} }
} }
}
writer.flush().await?; writer.flush().await?;
Ok(()) Ok(())

View File

@@ -17,8 +17,9 @@ pub fn read_dir(
(|| { (|| {
let path = entry?.path(); let path = entry?.path();
if path.extension() == Some("era1".as_ref()) { if path.extension() == Some("era1".as_ref()) &&
if let Some(last) = path.components().next_back() { let Some(last) = path.components().next_back()
{
let str = last.as_os_str().to_string_lossy().to_string(); let str = last.as_os_str().to_string_lossy().to_string();
let parts = str.split('-').collect::<Vec<_>>(); let parts = str.split('-').collect::<Vec<_>>();
@@ -28,7 +29,6 @@ pub fn read_dir(
return Ok(Some((number, path.into_boxed_path()))); return Ok(Some((number, path.into_boxed_path())));
} }
} }
}
if path.file_name() == Some("checksums.txt".as_ref()) { if path.file_name() == Some("checksums.txt".as_ref()) {
let file = fs::open(path)?; let file = fs::open(path)?;
let reader = io::BufReader::new(file); let reader = io::BufReader::new(file);

View File

@@ -262,8 +262,9 @@ impl<Http: HttpClient + Clone + Send + Sync + 'static + Unpin> Stream for Starti
self.fetch_file_list(); self.fetch_file_list();
} }
if self.state == State::FetchFileList { if self.state == State::FetchFileList &&
if let Poll::Ready(result) = self.fetch_file_list.poll_unpin(cx) { let Poll::Ready(result) = self.fetch_file_list.poll_unpin(cx)
{
match result { match result {
Ok(_) => self.delete_outside_range(), Ok(_) => self.delete_outside_range(),
Err(e) => { Err(e) => {
@@ -273,10 +274,10 @@ impl<Http: HttpClient + Clone + Send + Sync + 'static + Unpin> Stream for Starti
} }
} }
} }
}
if self.state == State::DeleteOutsideRange { if self.state == State::DeleteOutsideRange &&
if let Poll::Ready(result) = self.delete_outside_range.poll_unpin(cx) { let Poll::Ready(result) = self.delete_outside_range.poll_unpin(cx)
{
match result { match result {
Ok(_) => self.recover_index(), Ok(_) => self.recover_index(),
Err(e) => { Err(e) => {
@@ -286,24 +287,23 @@ impl<Http: HttpClient + Clone + Send + Sync + 'static + Unpin> Stream for Starti
} }
} }
} }
}
if self.state == State::RecoverIndex { if self.state == State::RecoverIndex &&
if let Poll::Ready(last) = self.recover_index.poll_unpin(cx) { let Poll::Ready(last) = self.recover_index.poll_unpin(cx)
{
self.last = last; self.last = last;
self.count_files(); self.count_files();
} }
}
if self.state == State::CountFiles { if self.state == State::CountFiles &&
if let Poll::Ready(downloaded) = self.files_count.poll_unpin(cx) { let Poll::Ready(downloaded) = self.files_count.poll_unpin(cx)
{
let max_missing = self let max_missing = self
.max_files .max_files
.saturating_sub(downloaded + self.downloading) .saturating_sub(downloaded + self.downloading)
.max(self.last.unwrap_or_default().saturating_sub(self.index)); .max(self.last.unwrap_or_default().saturating_sub(self.index));
self.state = State::Missing(max_missing); self.state = State::Missing(max_missing);
} }
}
if let State::Missing(max_missing) = self.state { if let State::Missing(max_missing) = self.state {
if max_missing > 0 { if max_missing > 0 {
@@ -316,19 +316,17 @@ impl<Http: HttpClient + Clone + Send + Sync + 'static + Unpin> Stream for Starti
} }
} }
if let State::NextUrl(max_missing) = self.state { if let State::NextUrl(max_missing) = self.state &&
if let Poll::Ready(url) = self.next_url.poll_unpin(cx) { let Poll::Ready(url) = self.next_url.poll_unpin(cx)
{
self.state = State::Missing(max_missing - 1); self.state = State::Missing(max_missing - 1);
return Poll::Ready(url.transpose().map(|url| -> DownloadFuture { return Poll::Ready(url.transpose().map(|url| -> DownloadFuture {
let mut client = self.client.clone(); let mut client = self.client.clone();
Box::pin( Box::pin(async move { client.download_to_file(url?).await.map(EraRemoteMeta::new) })
async move { client.download_to_file(url?).await.map(EraRemoteMeta::new) },
)
})); }));
} }
}
Poll::Pending Poll::Pending
} }

View File

@@ -302,11 +302,11 @@ where
if number <= last_header_number { if number <= last_header_number {
continue; continue;
} }
if let Some(target) = target { if let Some(target) = target &&
if number > target { number > target
{
break; break;
} }
}
let hash = header.hash_slow(); let hash = header.hash_slow();
last_header_number = number; last_header_number = number;
@@ -351,19 +351,18 @@ where
// Database cursor for hash to number index // Database cursor for hash to number index
let mut cursor_header_numbers = let mut cursor_header_numbers =
provider.tx_ref().cursor_write::<RawTable<tables::HeaderNumbers>>()?; provider.tx_ref().cursor_write::<RawTable<tables::HeaderNumbers>>()?;
let mut first_sync = false;
// If we only have the genesis block hash, then we are at first sync, and we can remove it, // If we only have the genesis block hash, then we are at first sync, and we can remove it,
// add it to the collector and use tx.append on all hashes. // add it to the collector and use tx.append on all hashes.
if provider.tx_ref().entries::<RawTable<tables::HeaderNumbers>>()? == 1 { let first_sync = if provider.tx_ref().entries::<RawTable<tables::HeaderNumbers>>()? == 1 &&
if let Some((hash, block_number)) = cursor_header_numbers.last()? { let Some((hash, block_number)) = cursor_header_numbers.last()? &&
if block_number.value()? == 0 { block_number.value()? == 0
{
hash_collector.insert(hash.key()?, 0)?; hash_collector.insert(hash.key()?, 0)?;
cursor_header_numbers.delete_current()?; cursor_header_numbers.delete_current()?;
first_sync = true; true
} } else {
} false
} };
let interval = (total_headers / 10).max(8192); let interval = (total_headers / 10).max(8192);

View File

@@ -37,9 +37,12 @@ where
// operation as hashing that is required for state root got calculated in every // operation as hashing that is required for state root got calculated in every
// transaction This was replaced with is_success flag. // transaction This was replaced with is_success flag.
// See more about EIP here: https://eips.ethereum.org/EIPS/eip-658 // See more about EIP here: https://eips.ethereum.org/EIPS/eip-658
if chain_spec.is_byzantium_active_at_block(block.header().number()) { if chain_spec.is_byzantium_active_at_block(block.header().number()) &&
if let Err(error) = let Err(error) = verify_receipts(
verify_receipts(block.header().receipts_root(), block.header().logs_bloom(), receipts) block.header().receipts_root(),
block.header().logs_bloom(),
receipts,
)
{ {
let receipts = receipts let receipts = receipts
.iter() .iter()
@@ -48,7 +51,6 @@ where
tracing::debug!(%error, ?receipts, "receipts verification failed"); tracing::debug!(%error, ?receipts, "receipts verification failed");
return Err(error) return Err(error)
} }
}
// Validate that the header requests hash matches the calculated requests hash // Validate that the header requests hash matches the calculated requests hash
if chain_spec.is_prague_active_at_timestamp(block.header().timestamp()) { if chain_spec.is_prague_active_at_timestamp(block.header().timestamp()) {

View File

@@ -501,12 +501,12 @@ where
.next_notification_id .next_notification_id
.checked_sub(this.min_id) .checked_sub(this.min_id)
.expect("exex expected notification ID outside the manager's range"); .expect("exex expected notification ID outside the manager's range");
if let Some(notification) = this.buffer.get(notification_index) { if let Some(notification) = this.buffer.get(notification_index) &&
if let Poll::Ready(Err(err)) = exex.send(cx, notification) { let Poll::Ready(Err(err)) = exex.send(cx, notification)
{
// The channel was closed, which is irrecoverable for the manager // The channel was closed, which is irrecoverable for the manager
return Poll::Ready(Err(err.into())) return Poll::Ready(Err(err.into()))
} }
}
min_id = min_id.min(exex.next_notification_id); min_id = min_id.min(exex.next_notification_id);
this.exex_handles.push(exex); this.exex_handles.push(exex);
} }

View File

@@ -59,12 +59,12 @@ impl BanList {
pub fn evict_peers(&mut self, now: Instant) -> Vec<PeerId> { pub fn evict_peers(&mut self, now: Instant) -> Vec<PeerId> {
let mut evicted = Vec::new(); let mut evicted = Vec::new();
self.banned_peers.retain(|peer, until| { self.banned_peers.retain(|peer, until| {
if let Some(until) = until { if let Some(until) = until &&
if now > *until { now > *until
{
evicted.push(*peer); evicted.push(*peer);
return false return false
} }
}
true true
}); });
evicted evicted
@@ -74,12 +74,12 @@ impl BanList {
pub fn evict_ips(&mut self, now: Instant) -> Vec<IpAddr> { pub fn evict_ips(&mut self, now: Instant) -> Vec<IpAddr> {
let mut evicted = Vec::new(); let mut evicted = Vec::new();
self.banned_ips.retain(|peer, until| { self.banned_ips.retain(|peer, until| {
if let Some(until) = until { if let Some(until) = until &&
if now > *until { now > *until
{
evicted.push(*peer); evicted.push(*peer);
return false return false
} }
}
true true
}); });
evicted evicted

View File

@@ -627,12 +627,12 @@ impl Discv4Service {
/// Sets the external Ip to the configured external IP if [`NatResolver::ExternalIp`]. /// Sets the external Ip to the configured external IP if [`NatResolver::ExternalIp`].
fn resolve_external_ip(&mut self) { fn resolve_external_ip(&mut self) {
if let Some(r) = &self.resolve_external_ip_interval { if let Some(r) = &self.resolve_external_ip_interval &&
if let Some(external_ip) = r.resolver().as_external_ip() { let Some(external_ip) = r.resolver().as_external_ip()
{
self.set_external_ip_addr(external_ip); self.set_external_ip_addr(external_ip);
} }
} }
}
/// Sets the given ip address as the node's external IP in the node record announced in /// Sets the given ip address as the node's external IP in the node record announced in
/// discovery /// discovery
@@ -904,11 +904,11 @@ impl Discv4Service {
/// Check if the peer has an active bond. /// Check if the peer has an active bond.
fn has_bond(&self, remote_id: PeerId, remote_ip: IpAddr) -> bool { fn has_bond(&self, remote_id: PeerId, remote_ip: IpAddr) -> bool {
if let Some(timestamp) = self.received_pongs.last_pong(remote_id, remote_ip) { if let Some(timestamp) = self.received_pongs.last_pong(remote_id, remote_ip) &&
if timestamp.elapsed() < self.config.bond_expiration { timestamp.elapsed() < self.config.bond_expiration
{
return true return true
} }
}
false false
} }
@@ -3048,13 +3048,12 @@ mod tests {
loop { loop {
tokio::select! { tokio::select! {
Some(update) = updates.next() => { Some(update) = updates.next() => {
if let DiscoveryUpdate::Added(record) = update { if let DiscoveryUpdate::Added(record) = update
if record.id == peerid_1 { && record.id == peerid_1 {
bootnode_appeared = true; bootnode_appeared = true;
break; break;
} }
} }
}
_ = &mut timeout => break, _ = &mut timeout => break,
} }
} }

View File

@@ -152,12 +152,12 @@ impl ConfigBuilder {
/// Adds a comma-separated list of enodes, serialized unsigned node records, to boot nodes. /// Adds a comma-separated list of enodes, serialized unsigned node records, to boot nodes.
pub fn add_serialized_unsigned_boot_nodes(mut self, enodes: &[&str]) -> Self { pub fn add_serialized_unsigned_boot_nodes(mut self, enodes: &[&str]) -> Self {
for node in enodes { for node in enodes {
if let Ok(node) = node.parse() { if let Ok(node) = node.parse() &&
if let Ok(node) = BootNode::from_unsigned(node) { let Ok(node) = BootNode::from_unsigned(node)
{
self.bootstrap_nodes.insert(node); self.bootstrap_nodes.insert(node);
} }
} }
}
self self
} }
@@ -411,15 +411,15 @@ pub fn discv5_sockets_wrt_rlpx_addr(
let discv5_socket_ipv6 = let discv5_socket_ipv6 =
discv5_addr_ipv6.map(|ip| SocketAddrV6::new(ip, discv5_port_ipv6, 0, 0)); discv5_addr_ipv6.map(|ip| SocketAddrV6::new(ip, discv5_port_ipv6, 0, 0));
if let Some(discv5_addr) = discv5_addr_ipv4 { if let Some(discv5_addr) = discv5_addr_ipv4 &&
if discv5_addr != rlpx_addr { discv5_addr != rlpx_addr
{
debug!(target: "net::discv5", debug!(target: "net::discv5",
%discv5_addr, %discv5_addr,
%rlpx_addr, %rlpx_addr,
"Overwriting discv5 IPv4 address with RLPx IPv4 address, limited to one advertised IP address per IP version" "Overwriting discv5 IPv4 address with RLPx IPv4 address, limited to one advertised IP address per IP version"
); );
} }
}
// overwrite discv5 ipv4 addr with RLPx address. this is since there is no // overwrite discv5 ipv4 addr with RLPx address. this is since there is no
// spec'd way to advertise a different address for rlpx and discovery in the // spec'd way to advertise a different address for rlpx and discovery in the
@@ -430,15 +430,15 @@ pub fn discv5_sockets_wrt_rlpx_addr(
let discv5_socket_ipv4 = let discv5_socket_ipv4 =
discv5_addr_ipv4.map(|ip| SocketAddrV4::new(ip, discv5_port_ipv4)); discv5_addr_ipv4.map(|ip| SocketAddrV4::new(ip, discv5_port_ipv4));
if let Some(discv5_addr) = discv5_addr_ipv6 { if let Some(discv5_addr) = discv5_addr_ipv6 &&
if discv5_addr != rlpx_addr { discv5_addr != rlpx_addr
{
debug!(target: "net::discv5", debug!(target: "net::discv5",
%discv5_addr, %discv5_addr,
%rlpx_addr, %rlpx_addr,
"Overwriting discv5 IPv6 address with RLPx IPv6 address, limited to one advertised IP address per IP version" "Overwriting discv5 IPv6 address with RLPx IPv6 address, limited to one advertised IP address per IP version"
); );
} }
}
// overwrite discv5 ipv6 addr with RLPx address. this is since there is no // overwrite discv5 ipv6 addr with RLPx address. this is since there is no
// spec'd way to advertise a different address for rlpx and discovery in the // spec'd way to advertise a different address for rlpx and discovery in the

View File

@@ -80,13 +80,13 @@ impl<R: Resolver, K: EnrKeyUnambiguous> QueryPool<R, K> {
// queue in new queries if we have capacity // queue in new queries if we have capacity
'queries: while self.active_queries.len() < self.rate_limit.limit() as usize { 'queries: while self.active_queries.len() < self.rate_limit.limit() as usize {
if self.rate_limit.poll_ready(cx).is_ready() { if self.rate_limit.poll_ready(cx).is_ready() &&
if let Some(query) = self.queued_queries.pop_front() { let Some(query) = self.queued_queries.pop_front()
{
self.rate_limit.tick(); self.rate_limit.tick();
self.active_queries.push(query); self.active_queries.push(query);
continue 'queries continue 'queries
} }
}
break break
} }

View File

@@ -172,20 +172,17 @@ where
/// ///
/// Returns `None` if no more requests are required. /// Returns `None` if no more requests are required.
fn next_request(&mut self) -> Option<HeadersRequest> { fn next_request(&mut self) -> Option<HeadersRequest> {
if let Some(local_head) = self.local_block_number() { if let Some(local_head) = self.local_block_number() &&
if self.next_request_block_number > local_head { self.next_request_block_number > local_head
let request = calc_next_request( {
local_head, let request =
self.next_request_block_number, calc_next_request(local_head, self.next_request_block_number, self.request_limit);
self.request_limit,
);
// need to shift the tracked request block number based on the number of requested // need to shift the tracked request block number based on the number of requested
// headers so follow-up requests will use that as start. // headers so follow-up requests will use that as start.
self.next_request_block_number -= request.limit; self.next_request_block_number -= request.limit;
return Some(request) return Some(request)
} }
}
None None
} }

View File

@@ -179,8 +179,9 @@ where
} }
// Ensure peer's total difficulty is reasonable // Ensure peer's total difficulty is reasonable
if let StatusMessage::Legacy(s) = their_status_message { if let StatusMessage::Legacy(s) = their_status_message &&
if s.total_difficulty.bit_len() > 160 { s.total_difficulty.bit_len() > 160
{
unauth unauth
.disconnect(DisconnectReason::ProtocolBreach) .disconnect(DisconnectReason::ProtocolBreach)
.await .await
@@ -191,7 +192,6 @@ where
} }
.into()); .into());
} }
}
// Fork validation // Fork validation
if let Err(err) = fork_filter if let Err(err) = fork_filter

View File

@@ -656,14 +656,12 @@ impl<N: NetworkPrimitives> NetworkConfigBuilder<N> {
// If default DNS config is used then we add the known dns network to bootstrap from // If default DNS config is used then we add the known dns network to bootstrap from
if let Some(dns_networks) = if let Some(dns_networks) =
dns_discovery_config.as_mut().and_then(|c| c.bootstrap_dns_networks.as_mut()) dns_discovery_config.as_mut().and_then(|c| c.bootstrap_dns_networks.as_mut()) &&
dns_networks.is_empty() &&
let Some(link) = chain_spec.chain().public_dns_network_protocol()
{ {
if dns_networks.is_empty() {
if let Some(link) = chain_spec.chain().public_dns_network_protocol() {
dns_networks.insert(link.parse().expect("is valid DNS link entry")); dns_networks.insert(link.parse().expect("is valid DNS link entry"));
} }
}
}
NetworkConfig { NetworkConfig {
client, client,

View File

@@ -267,14 +267,13 @@ impl Discovery {
while let Some(Poll::Ready(Some(update))) = while let Some(Poll::Ready(Some(update))) =
self.discv5_updates.as_mut().map(|updates| updates.poll_next_unpin(cx)) self.discv5_updates.as_mut().map(|updates| updates.poll_next_unpin(cx))
{ {
if let Some(discv5) = self.discv5.as_mut() { if let Some(discv5) = self.discv5.as_mut() &&
if let Some(DiscoveredPeer { node_record, fork_id }) = let Some(DiscoveredPeer { node_record, fork_id }) =
discv5.on_discv5_update(update) discv5.on_discv5_update(update)
{ {
self.on_node_record_update(node_record, fork_id); self.on_node_record_update(node_record, fork_id);
} }
} }
}
// drain the dns update stream // drain the dns update stream
while let Some(Poll::Ready(Some(update))) = while let Some(Poll::Ready(Some(update))) =

View File

@@ -116,13 +116,13 @@ impl<N: NetworkPrimitives> StateFetcher<N> {
/// ///
/// Returns `true` if this a newer block /// Returns `true` if this a newer block
pub(crate) fn update_peer_block(&mut self, peer_id: &PeerId, hash: B256, number: u64) -> bool { pub(crate) fn update_peer_block(&mut self, peer_id: &PeerId, hash: B256, number: u64) -> bool {
if let Some(peer) = self.peers.get_mut(peer_id) { if let Some(peer) = self.peers.get_mut(peer_id) &&
if number > peer.best_number { number > peer.best_number
{
peer.best_hash = hash; peer.best_hash = hash;
peer.best_number = number; peer.best_number = number;
return true return true
} }
}
false false
} }

View File

@@ -382,14 +382,15 @@ impl PeersManager {
/// Bans the peer temporarily with the configured ban timeout /// Bans the peer temporarily with the configured ban timeout
fn ban_peer(&mut self, peer_id: PeerId) { fn ban_peer(&mut self, peer_id: PeerId) {
let mut ban_duration = self.ban_duration; let ban_duration = if let Some(peer) = self.peers.get(&peer_id) &&
if let Some(peer) = self.peers.get(&peer_id) { (peer.is_trusted() || peer.is_static())
if peer.is_trusted() || peer.is_static() { {
// For misbehaving trusted or static peers, we provide a bit more leeway when // For misbehaving trusted or static peers, we provide a bit more leeway when
// penalizing them. // penalizing them.
ban_duration = self.backoff_durations.low / 2; self.backoff_durations.low / 2
} } else {
} self.ban_duration
};
self.ban_list.ban_peer_until(peer_id, std::time::Instant::now() + ban_duration); self.ban_list.ban_peer_until(peer_id, std::time::Instant::now() + ban_duration);
self.queued_actions.push_back(PeerAction::BanPeer { peer_id }); self.queued_actions.push_back(PeerAction::BanPeer { peer_id });

View File

@@ -748,13 +748,13 @@ impl<N: NetworkPrimitives> Future for ActiveSession<N> {
while this.internal_request_timeout_interval.poll_tick(cx).is_ready() { while this.internal_request_timeout_interval.poll_tick(cx).is_ready() {
// check for timed out requests // check for timed out requests
if this.check_timed_out_requests(Instant::now()) { if this.check_timed_out_requests(Instant::now()) &&
if let Poll::Ready(Ok(_)) = this.to_session_manager.poll_reserve(cx) { let Poll::Ready(Ok(_)) = this.to_session_manager.poll_reserve(cx)
{
let msg = ActiveSessionMessage::ProtocolBreach { peer_id: this.remote_peer_id }; let msg = ActiveSessionMessage::ProtocolBreach { peer_id: this.remote_peer_id };
this.pending_message_to_session = Some(msg); this.pending_message_to_session = Some(msg);
} }
} }
}
this.shrink_to_fit(); this.shrink_to_fit();

View File

@@ -80,11 +80,11 @@ impl SessionCounter {
} }
const fn ensure(current: u32, limit: Option<u32>) -> Result<(), ExceedsSessionLimit> { const fn ensure(current: u32, limit: Option<u32>) -> Result<(), ExceedsSessionLimit> {
if let Some(limit) = limit { if let Some(limit) = limit &&
if current >= limit { current >= limit
{
return Err(ExceedsSessionLimit(limit)) return Err(ExceedsSessionLimit(limit))
} }
}
Ok(()) Ok(())
} }
} }

View File

@@ -697,13 +697,12 @@ impl<Pool: TransactionPool, N: NetworkPrimitives, PBundle: TransactionPolicies>
} }
}; };
if is_eth68_message { if is_eth68_message &&
if let Some((actual_ty_byte, _)) = *metadata_ref_mut { let Some((actual_ty_byte, _)) = *metadata_ref_mut &&
if let Ok(parsed_tx_type) = TxType::try_from(actual_ty_byte) { let Ok(parsed_tx_type) = TxType::try_from(actual_ty_byte)
{
tx_types_counter.increase_by_tx_type(parsed_tx_type); tx_types_counter.increase_by_tx_type(parsed_tx_type);
} }
}
}
let decision = self let decision = self
.policies .policies

View File

@@ -280,19 +280,19 @@ where
Client: BlockClient, Client: BlockClient,
{ {
fn poll(&mut self, cx: &mut Context<'_>) -> Poll<ResponseResult<Client::Header, Client::Body>> { fn poll(&mut self, cx: &mut Context<'_>) -> Poll<ResponseResult<Client::Header, Client::Body>> {
if let Some(fut) = Pin::new(&mut self.header).as_pin_mut() { if let Some(fut) = Pin::new(&mut self.header).as_pin_mut() &&
if let Poll::Ready(res) = fut.poll(cx) { let Poll::Ready(res) = fut.poll(cx)
{
self.header = None; self.header = None;
return Poll::Ready(ResponseResult::Header(res)) return Poll::Ready(ResponseResult::Header(res))
} }
}
if let Some(fut) = Pin::new(&mut self.body).as_pin_mut() { if let Some(fut) = Pin::new(&mut self.body).as_pin_mut() &&
if let Poll::Ready(res) = fut.poll(cx) { let Poll::Ready(res) = fut.poll(cx)
{
self.body = None; self.body = None;
return Poll::Ready(ResponseResult::Body(res)) return Poll::Ready(ResponseResult::Body(res))
} }
}
Poll::Pending Poll::Pending
} }
@@ -621,19 +621,19 @@ where
&mut self, &mut self,
cx: &mut Context<'_>, cx: &mut Context<'_>,
) -> Poll<RangeResponseResult<Client::Header, Client::Body>> { ) -> Poll<RangeResponseResult<Client::Header, Client::Body>> {
if let Some(fut) = Pin::new(&mut self.headers).as_pin_mut() { if let Some(fut) = Pin::new(&mut self.headers).as_pin_mut() &&
if let Poll::Ready(res) = fut.poll(cx) { let Poll::Ready(res) = fut.poll(cx)
{
self.headers = None; self.headers = None;
return Poll::Ready(RangeResponseResult::Header(res)) return Poll::Ready(RangeResponseResult::Header(res))
} }
}
if let Some(fut) = Pin::new(&mut self.bodies).as_pin_mut() { if let Some(fut) = Pin::new(&mut self.bodies).as_pin_mut() &&
if let Poll::Ready(res) = fut.poll(cx) { let Poll::Ready(res) = fut.poll(cx)
{
self.bodies = None; self.bodies = None;
return Poll::Ready(RangeResponseResult::Body(res)) return Poll::Ready(RangeResponseResult::Body(res))
} }
}
Poll::Pending Poll::Pending
} }

View File

@@ -63,12 +63,12 @@ impl NodeRecord {
/// See also [`std::net::Ipv6Addr::to_ipv4_mapped`] /// See also [`std::net::Ipv6Addr::to_ipv4_mapped`]
pub fn convert_ipv4_mapped(&mut self) -> bool { pub fn convert_ipv4_mapped(&mut self) -> bool {
// convert IPv4 mapped IPv6 address // convert IPv4 mapped IPv6 address
if let IpAddr::V6(v6) = self.address { if let IpAddr::V6(v6) = self.address &&
if let Some(v4) = v6.to_ipv4_mapped() { let Some(v4) = v6.to_ipv4_mapped()
{
self.address = v4.into(); self.address = v4.into();
return true return true
} }
}
false false
} }

View File

@@ -956,9 +956,11 @@ where
where where
T: FullNodeTypes<Provider: StaticFileProviderFactory>, T: FullNodeTypes<Provider: StaticFileProviderFactory>,
{ {
if self.node_config().pruning.bodies_pre_merge { if self.node_config().pruning.bodies_pre_merge &&
if let Some(merge_block) = let Some(merge_block) = self
self.chain_spec().ethereum_fork_activation(EthereumHardfork::Paris).block_number() .chain_spec()
.ethereum_fork_activation(EthereumHardfork::Paris)
.block_number()
{ {
// Ensure we only expire transactions after we synced past the merge block. // Ensure we only expire transactions after we synced past the merge block.
let Some(latest) = self.blockchain_db().latest_header()? else { return Ok(()) }; let Some(latest) = self.blockchain_db().latest_header()? else { return Ok(()) };
@@ -975,7 +977,6 @@ where
} }
} }
} }
}
Ok(()) Ok(())
} }

View File

@@ -181,15 +181,15 @@ where
let response = let response =
timeout(READ_TIMEOUT, conn.read_json()).await.map_err(|_| EthStatsError::Timeout)??; timeout(READ_TIMEOUT, conn.read_json()).await.map_err(|_| EthStatsError::Timeout)??;
if let Some(ack) = response.get("emit") { if let Some(ack) = response.get("emit") &&
if ack.get(0) == Some(&Value::String("ready".to_string())) { ack.get(0) == Some(&Value::String("ready".to_string()))
{
info!( info!(
target: "ethstats", target: "ethstats",
"Login successful to EthStats server as node_id {}", self.credentials.node_id "Login successful to EthStats server as node_id {}", self.credentials.node_id
); );
return Ok(()); return Ok(());
} }
}
debug!(target: "ethstats", "Login failed: Unauthorized or unexpected login response"); debug!(target: "ethstats", "Login failed: Unauthorized or unexpected login response");
Err(EthStatsError::AuthError("Unauthorized or unexpected login response".into())) Err(EthStatsError::AuthError("Unauthorized or unexpected login response".into()))
@@ -595,12 +595,12 @@ where
tokio::spawn(async move { tokio::spawn(async move {
loop { loop {
let head = canonical_stream.next().await; let head = canonical_stream.next().await;
if let Some(head) = head { if let Some(head) = head &&
if head_tx.send(head).await.is_err() { head_tx.send(head).await.is_err()
{
break; break;
} }
} }
}
let _ = shutdown_tx.send(()).await; let _ = shutdown_tx.send(()).await;
}) })
@@ -681,12 +681,12 @@ where
/// Attempts to close the connection cleanly and logs any errors /// Attempts to close the connection cleanly and logs any errors
/// that occur during the process. /// that occur during the process.
async fn disconnect(&self) { async fn disconnect(&self) {
if let Some(conn) = self.conn.write().await.take() { if let Some(conn) = self.conn.write().await.take() &&
if let Err(e) = conn.close().await { let Err(e) = conn.close().await
{
debug!(target: "ethstats", "Error closing connection: {}", e); debug!(target: "ethstats", "Error closing connection: {}", e);
} }
} }
}
/// Test helper to check connection status /// Test helper to check connection status
#[cfg(test)] #[cfg(test)]
@@ -733,16 +733,13 @@ mod tests {
// Handle ping // Handle ping
while let Some(Ok(msg)) = ws_stream.next().await { while let Some(Ok(msg)) = ws_stream.next().await {
if let Message::Text(text) = msg { if let Message::Text(text) = msg &&
if text.contains("node-ping") { text.contains("node-ping")
{
let pong = json!({ let pong = json!({
"emit": ["node-pong", {"id": "test-node"}] "emit": ["node-pong", {"id": "test-node"}]
}); });
ws_stream ws_stream.send(Message::Text(Utf8Bytes::from(pong.to_string()))).await.unwrap();
.send(Message::Text(Utf8Bytes::from(pong.to_string())))
.await
.unwrap();
}
} }
} }
}); });

View File

@@ -13,8 +13,10 @@ fn main() {
// Enable backtraces unless a RUST_BACKTRACE value has already been explicitly provided. // Enable backtraces unless a RUST_BACKTRACE value has already been explicitly provided.
if std::env::var_os("RUST_BACKTRACE").is_none() { if std::env::var_os("RUST_BACKTRACE").is_none() {
unsafe {
std::env::set_var("RUST_BACKTRACE", "1"); std::env::set_var("RUST_BACKTRACE", "1");
} }
}
if let Err(err) = if let Err(err) =
Cli::<OpChainSpecParser, RollupArgs>::parse().run(async move |builder, rollup_args| { Cli::<OpChainSpecParser, RollupArgs>::parse().run(async move |builder, rollup_args| {

View File

@@ -459,11 +459,12 @@ impl OpGenesisInfo {
.unwrap_or_default(), .unwrap_or_default(),
..Default::default() ..Default::default()
}; };
if let Some(optimism_base_fee_info) = &info.optimism_chain_info.base_fee_info { if let Some(optimism_base_fee_info) = &info.optimism_chain_info.base_fee_info &&
if let (Some(elasticity), Some(denominator)) = ( let (Some(elasticity), Some(denominator)) = (
optimism_base_fee_info.eip1559_elasticity, optimism_base_fee_info.eip1559_elasticity,
optimism_base_fee_info.eip1559_denominator, optimism_base_fee_info.eip1559_denominator,
) { )
{
let base_fee_params = if let Some(canyon_denominator) = let base_fee_params = if let Some(canyon_denominator) =
optimism_base_fee_info.eip1559_denominator_canyon optimism_base_fee_info.eip1559_denominator_canyon
{ {
@@ -486,7 +487,6 @@ impl OpGenesisInfo {
info.base_fee_params = base_fee_params; info.base_fee_params = base_fee_params;
} }
}
info info
} }
@@ -498,9 +498,10 @@ pub fn make_op_genesis_header(genesis: &Genesis, hardforks: &ChainHardforks) ->
// If Isthmus is active, overwrite the withdrawals root with the storage root of predeploy // If Isthmus is active, overwrite the withdrawals root with the storage root of predeploy
// `L2ToL1MessagePasser.sol` // `L2ToL1MessagePasser.sol`
if hardforks.fork(OpHardfork::Isthmus).active_at_timestamp(header.timestamp) { if hardforks.fork(OpHardfork::Isthmus).active_at_timestamp(header.timestamp) &&
if let Some(predeploy) = genesis.alloc.get(&ADDRESS_L2_TO_L1_MESSAGE_PASSER) { let Some(predeploy) = genesis.alloc.get(&ADDRESS_L2_TO_L1_MESSAGE_PASSER) &&
if let Some(storage) = &predeploy.storage { let Some(storage) = &predeploy.storage
{
header.withdrawals_root = header.withdrawals_root =
Some(storage_root_unhashed(storage.iter().filter_map(|(k, v)| { Some(storage_root_unhashed(storage.iter().filter_map(|(k, v)| {
if v.is_zero() { if v.is_zero() {
@@ -510,8 +511,6 @@ pub fn make_op_genesis_header(genesis: &Genesis, hardforks: &ChainHardforks) ->
} }
}))); })));
} }
}
}
header header
} }

View File

@@ -141,12 +141,11 @@ where
// Ensure that receipts hasn't been initialized apart from `init_genesis`. // Ensure that receipts hasn't been initialized apart from `init_genesis`.
if let Some(num_receipts) = if let Some(num_receipts) =
static_file_provider.get_highest_static_file_tx(StaticFileSegment::Receipts) static_file_provider.get_highest_static_file_tx(StaticFileSegment::Receipts) &&
num_receipts > 0
{ {
if num_receipts > 0 {
eyre::bail!("Expected no receipts in storage, but found {num_receipts}."); eyre::bail!("Expected no receipts in storage, but found {num_receipts}.");
} }
}
match static_file_provider.get_highest_static_file_block(StaticFileSegment::Receipts) { match static_file_provider.get_highest_static_file_block(StaticFileSegment::Receipts) {
Some(receipts_block) => { Some(receipts_block) => {
if receipts_block > 0 { if receipts_block > 0 {

View File

@@ -303,7 +303,7 @@ mod tests {
// Verify deposit transaction // Verify deposit transaction
let deposit_tx = match &deposit_decoded.transaction { let deposit_tx = match &deposit_decoded.transaction {
OpTypedTransaction::Legacy(ref tx) => tx, OpTypedTransaction::Legacy(tx) => tx,
_ => panic!("Expected legacy transaction for NFT deposit"), _ => panic!("Expected legacy transaction for NFT deposit"),
}; };
@@ -345,7 +345,7 @@ mod tests {
assert!(system_decoded.is_legacy()); assert!(system_decoded.is_legacy());
let system_tx = match &system_decoded.transaction { let system_tx = match &system_decoded.transaction {
OpTypedTransaction::Legacy(ref tx) => tx, OpTypedTransaction::Legacy(tx) => tx,
_ => panic!("Expected Legacy transaction"), _ => panic!("Expected Legacy transaction"),
}; };

View File

@@ -93,14 +93,15 @@ pub fn validate_block_post_execution<R: DepositReceipt>(
// operation as hashing that is required for state root got calculated in every // operation as hashing that is required for state root got calculated in every
// transaction This was replaced with is_success flag. // transaction This was replaced with is_success flag.
// See more about EIP here: https://eips.ethereum.org/EIPS/eip-658 // See more about EIP here: https://eips.ethereum.org/EIPS/eip-658
if chain_spec.is_byzantium_active_at_block(header.number()) { if chain_spec.is_byzantium_active_at_block(header.number()) &&
if let Err(error) = verify_receipts_optimism( let Err(error) = verify_receipts_optimism(
header.receipts_root(), header.receipts_root(),
header.logs_bloom(), header.logs_bloom(),
receipts, receipts,
chain_spec, chain_spec,
header.timestamp(), header.timestamp(),
) { )
{
let receipts = receipts let receipts = receipts
.iter() .iter()
.map(|r| Bytes::from(r.with_bloom_ref().encoded_2718())) .map(|r| Bytes::from(r.with_bloom_ref().encoded_2718()))
@@ -108,7 +109,6 @@ pub fn validate_block_post_execution<R: DepositReceipt>(
tracing::debug!(%error, ?receipts, "receipts verification failed"); tracing::debug!(%error, ?receipts, "receipts verification failed");
return Err(error) return Err(error)
} }
}
// Check if gas used matches the value set in header. // Check if gas used matches the value set in header.
let cumulative_gas_used = let cumulative_gas_used =

View File

@@ -107,7 +107,12 @@ where
/// Returns `None` if the flashblock have no `base` or the base is not a child block of latest. /// Returns `None` if the flashblock have no `base` or the base is not a child block of latest.
fn build_args( fn build_args(
&mut self, &mut self,
) -> Option<BuildArgs<impl IntoIterator<Item = WithEncoded<Recovered<N::SignedTx>>>>> { ) -> Option<
BuildArgs<
impl IntoIterator<Item = WithEncoded<Recovered<N::SignedTx>>>
+ use<N, S, EvmConfig, Provider>,
>,
> {
let Some(base) = self.blocks.payload_base() else { let Some(base) = self.blocks.payload_base() else {
trace!( trace!(
flashblock_number = ?self.blocks.block_number(), flashblock_number = ?self.blocks.block_number(),
@@ -119,12 +124,12 @@ where
}; };
// attempt an initial consecutive check // attempt an initial consecutive check
if let Some(latest) = self.builder.provider().latest_header().ok().flatten() { if let Some(latest) = self.builder.provider().latest_header().ok().flatten() &&
if latest.hash() != base.parent_hash { latest.hash() != base.parent_hash
{
trace!(flashblock_parent=?base.parent_hash, flashblock_number=base.block_number, local_latest=?latest.num_hash(), "Skipping non consecutive build attempt"); trace!(flashblock_parent=?base.parent_hash, flashblock_number=base.block_number, local_latest=?latest.num_hash(), "Skipping non consecutive build attempt");
return None; return None;
} }
}
Some(BuildArgs { Some(BuildArgs {
base, base,
@@ -244,8 +249,8 @@ where
let fut = this.canon_receiver.recv(); let fut = this.canon_receiver.recv();
pin!(fut); pin!(fut);
fut.poll_unpin(cx) fut.poll_unpin(cx)
} { } && let Some(current) = this.on_new_tip(state)
if let Some(current) = this.on_new_tip(state) { {
trace!( trace!(
parent_hash = %current.block().parent_hash(), parent_hash = %current.block().parent_hash(),
block_number = current.block().number(), block_number = current.block().number(),
@@ -254,7 +259,6 @@ where
return Poll::Ready(Some(Ok(None))) return Poll::Ready(Some(Ok(None)))
} }
}
if !this.rebuild && this.current.is_some() { if !this.rebuild && this.current.is_some() {
return Poll::Pending return Poll::Pending

View File

@@ -690,12 +690,12 @@ where
// We skip invalid cross chain txs, they would be removed on the next block update in // We skip invalid cross chain txs, they would be removed on the next block update in
// the maintenance job // the maintenance job
if let Some(interop) = interop { if let Some(interop) = interop &&
if !is_valid_interop(interop, self.config.attributes.timestamp()) { !is_valid_interop(interop, self.config.attributes.timestamp())
{
best_txs.mark_invalid(tx.signer(), tx.nonce()); best_txs.mark_invalid(tx.signer(), tx.nonce());
continue continue
} }
}
// check if the job was cancelled, if so we can exit early // check if the job was cancelled, if so we can exit early
if self.cancel.is_cancelled() { if self.cancel.is_cancelled() {
return Ok(Some(())) return Ok(Some(()))

View File

@@ -108,12 +108,11 @@ where
if let Some(notification) = canonical_notification { if let Some(notification) = canonical_notification {
let chain = notification.committed(); let chain = notification.committed();
for block in chain.blocks_iter() { for block in chain.blocks_iter() {
if block.body().contains_transaction(&hash) { if block.body().contains_transaction(&hash)
if let Some(receipt) = this.transaction_receipt(hash).await? { && let Some(receipt) = this.transaction_receipt(hash).await? {
return Ok(receipt); return Ok(receipt);
} }
} }
}
} else { } else {
// Canonical stream ended // Canonical stream ended
break; break;
@@ -130,15 +129,14 @@ where
// Check flashblocks for faster confirmation (Optimism-specific) // Check flashblocks for faster confirmation (Optimism-specific)
if let Ok(Some(pending_block)) = this.pending_flashblock() { if let Ok(Some(pending_block)) = this.pending_flashblock() {
let block_and_receipts = pending_block.into_block_and_receipts(); let block_and_receipts = pending_block.into_block_and_receipts();
if block_and_receipts.block.body().contains_transaction(&hash) { if block_and_receipts.block.body().contains_transaction(&hash)
if let Some(receipt) = this.transaction_receipt(hash).await? { && let Some(receipt) = this.transaction_receipt(hash).await? {
return Ok(receipt); return Ok(receipt);
} }
} }
} }
} }
} }
}
Err(Self::Error::from_eth_err(EthApiError::TransactionConfirmationTimeout { Err(Self::Error::from_eth_err(EthApiError::TransactionConfirmationTimeout {
hash, hash,
duration: timeout_duration, duration: timeout_duration,

View File

@@ -587,25 +587,26 @@ where
let this = self.get_mut(); let this = self.get_mut();
// check if there is a better payload before returning the best payload // check if there is a better payload before returning the best payload
if let Some(fut) = Pin::new(&mut this.maybe_better).as_pin_mut() { if let Some(fut) = Pin::new(&mut this.maybe_better).as_pin_mut() &&
if let Poll::Ready(res) = fut.poll(cx) { let Poll::Ready(res) = fut.poll(cx)
this.maybe_better = None;
if let Ok(Some(payload)) = res.map(|out| out.into_payload())
.inspect_err(|err| warn!(target: "payload_builder", %err, "failed to resolve pending payload"))
{ {
this.maybe_better = None;
if let Ok(Some(payload)) = res.map(|out| out.into_payload()).inspect_err(
|err| warn!(target: "payload_builder", %err, "failed to resolve pending payload"),
) {
debug!(target: "payload_builder", "resolving better payload"); debug!(target: "payload_builder", "resolving better payload");
return Poll::Ready(Ok(payload)) return Poll::Ready(Ok(payload))
} }
} }
}
if let Some(best) = this.best_payload.take() { if let Some(best) = this.best_payload.take() {
debug!(target: "payload_builder", "resolving best payload"); debug!(target: "payload_builder", "resolving best payload");
return Poll::Ready(Ok(best)) return Poll::Ready(Ok(best))
} }
if let Some(fut) = Pin::new(&mut this.empty_payload).as_pin_mut() { if let Some(fut) = Pin::new(&mut this.empty_payload).as_pin_mut() &&
if let Poll::Ready(res) = fut.poll(cx) { let Poll::Ready(res) = fut.poll(cx)
{
this.empty_payload = None; this.empty_payload = None;
return match res { return match res {
Ok(res) => { Ok(res) => {
@@ -619,7 +620,6 @@ where
Err(err) => Poll::Ready(Err(err.into())), Err(err) => Poll::Ready(Err(err.into())),
} }
} }
}
if this.is_empty() { if this.is_empty() {
return Poll::Ready(Err(PayloadBuilderError::MissingPayload)) return Poll::Ready(Err(PayloadBuilderError::MissingPayload))

View File

@@ -305,11 +305,11 @@ where
) -> Option<PayloadFuture<T::BuiltPayload>> { ) -> Option<PayloadFuture<T::BuiltPayload>> {
debug!(target: "payload_builder", %id, "resolving payload job"); debug!(target: "payload_builder", %id, "resolving payload job");
if let Some((cached, _, payload)) = &*self.cached_payload_rx.borrow() { if let Some((cached, _, payload)) = &*self.cached_payload_rx.borrow() &&
if *cached == id { *cached == id
{
return Some(Box::pin(core::future::ready(Ok(payload.clone())))); return Some(Box::pin(core::future::ready(Ok(payload.clone()))));
} }
}
let job = self.payload_jobs.iter().position(|(_, job_id)| *job_id == id)?; let job = self.payload_jobs.iter().position(|(_, job_id)| *job_id == id)?;
let (fut, keep_alive) = self.payload_jobs[job].0.resolve_kind(kind); let (fut, keep_alive) = self.payload_jobs[job].0.resolve_kind(kind);
@@ -356,11 +356,11 @@ where
{ {
/// Returns the payload timestamp for the given payload. /// Returns the payload timestamp for the given payload.
fn payload_timestamp(&self, id: PayloadId) -> Option<Result<u64, PayloadBuilderError>> { fn payload_timestamp(&self, id: PayloadId) -> Option<Result<u64, PayloadBuilderError>> {
if let Some((cached_id, timestamp, _)) = *self.cached_payload_rx.borrow() { if let Some((cached_id, timestamp, _)) = *self.cached_payload_rx.borrow() &&
if cached_id == id { cached_id == id
{
return Some(Ok(timestamp)); return Some(Ok(timestamp));
} }
}
let timestamp = self let timestamp = self
.payload_jobs .payload_jobs

View File

@@ -48,8 +48,8 @@ where
// data. If the TransactionLookup checkpoint is lagging behind (which can happen e.g. when // data. If the TransactionLookup checkpoint is lagging behind (which can happen e.g. when
// pre-merge history is dropped and then later tx lookup pruning is enabled) then we can // pre-merge history is dropped and then later tx lookup pruning is enabled) then we can
// only prune from the tx checkpoint and onwards. // only prune from the tx checkpoint and onwards.
if let Some(txs_checkpoint) = provider.get_prune_checkpoint(PruneSegment::Transactions)? { if let Some(txs_checkpoint) = provider.get_prune_checkpoint(PruneSegment::Transactions)? &&
if input input
.previous_checkpoint .previous_checkpoint
.is_none_or(|checkpoint| checkpoint.block_number < txs_checkpoint.block_number) .is_none_or(|checkpoint| checkpoint.block_number < txs_checkpoint.block_number)
{ {
@@ -60,7 +60,6 @@ where
"No TransactionLookup checkpoint found, using Transactions checkpoint as fallback" "No TransactionLookup checkpoint found, using Transactions checkpoint as fallback"
); );
} }
}
let (start, end) = match input.get_next_tx_num_range(provider)? { let (start, end) = match input.get_next_tx_num_range(provider)? {
Some(range) => range, Some(range) => range,

View File

@@ -96,14 +96,13 @@ impl ReceiptsLogPruneConfig {
let mut lowest = None; let mut lowest = None;
for mode in self.values() { for mode in self.values() {
if mode.is_distance() { if mode.is_distance() &&
if let Some((block, _)) = let Some((block, _)) =
mode.prune_target_block(tip, PruneSegment::ContractLogs, PrunePurpose::User)? mode.prune_target_block(tip, PruneSegment::ContractLogs, PrunePurpose::User)?
{ {
lowest = Some(lowest.unwrap_or(u64::MAX).min(block)); lowest = Some(lowest.unwrap_or(u64::MAX).min(block));
} }
} }
}
Ok(lowest.map(|lowest| lowest.max(pruned_block))) Ok(lowest.map(|lowest| lowest.max(pruned_block)))
} }

View File

@@ -120,20 +120,16 @@ where
let mut executed = self.pending_state.executed_block(&ancestor_hash); let mut executed = self.pending_state.executed_block(&ancestor_hash);
// If it's not present, attempt to lookup invalid block. // If it's not present, attempt to lookup invalid block.
if executed.is_none() { if executed.is_none() &&
if let Some(invalid) = let Some(invalid) =
self.pending_state.invalid_recovered_block(&ancestor_hash) self.pending_state.invalid_recovered_block(&ancestor_hash)
{ {
trace!(target: "reth::ress_provider", %block_hash, %ancestor_hash, "Using invalid ancestor block for witness construction"); trace!(target: "reth::ress_provider", %block_hash, %ancestor_hash, "Using invalid ancestor block for witness construction");
executed = Some(ExecutedBlockWithTrieUpdates { executed = Some(ExecutedBlockWithTrieUpdates {
block: ExecutedBlock { block: ExecutedBlock { recovered_block: invalid, ..Default::default() },
recovered_block: invalid,
..Default::default()
},
trie: ExecutedTrieUpdates::empty(), trie: ExecutedTrieUpdates::empty(),
}); });
} }
}
let Some(executed) = executed else { let Some(executed) = executed else {
return Err(ProviderError::StateForHashNotFound(ancestor_hash)) return Err(ProviderError::StateForHashNotFound(ancestor_hash))

View File

@@ -144,13 +144,13 @@ where
{ {
// set permissions only on unix // set permissions only on unix
use std::os::unix::fs::PermissionsExt; use std::os::unix::fs::PermissionsExt;
if let Some(perms_str) = &self.cfg.ipc_socket_permissions { if let Some(perms_str) = &self.cfg.ipc_socket_permissions &&
if let Ok(mode) = u32::from_str_radix(&perms_str.replace("0o", ""), 8) { let Ok(mode) = u32::from_str_radix(&perms_str.replace("0o", ""), 8)
{
let perms = std::fs::Permissions::from_mode(mode); let perms = std::fs::Permissions::from_mode(mode);
let _ = std::fs::set_permissions(&self.endpoint, perms); let _ = std::fs::set_permissions(&self.endpoint, perms);
} }
} }
}
listener listener
} }
Err(err) => { Err(err) => {

View File

@@ -572,11 +572,10 @@ where
// > Client software MUST NOT return trailing null values if the request extends past the current latest known block. // > Client software MUST NOT return trailing null values if the request extends past the current latest known block.
// truncate the end if it's greater than the last block // truncate the end if it's greater than the last block
if let Ok(best_block) = inner.provider.best_block_number() { if let Ok(best_block) = inner.provider.best_block_number()
if end > best_block { && end > best_block {
end = best_block; end = best_block;
} }
}
for num in start..=end { for num in start..=end {
let block_result = inner.provider.block(BlockHashOrNumber::Number(num)); let block_result = inner.provider.block(BlockHashOrNumber::Number(num));

View File

@@ -195,9 +195,8 @@ pub trait EthBlocks:
} }
if let Some(block_hash) = if let Some(block_hash) =
self.provider().block_hash_for_id(block_id).map_err(Self::Error::from_eth_err)? self.provider().block_hash_for_id(block_id).map_err(Self::Error::from_eth_err)? &&
{ let Some((block, receipts)) = self
if let Some((block, receipts)) = self
.cache() .cache()
.get_block_and_receipts(block_hash) .get_block_and_receipts(block_hash)
.await .await
@@ -205,7 +204,6 @@ pub trait EthBlocks:
{ {
return Ok(Some((block, receipts))); return Ok(Some((block, receipts)));
} }
}
Ok(None) Ok(None)
} }

View File

@@ -122,14 +122,11 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA
if let Some(block_overrides) = block_overrides { if let Some(block_overrides) = block_overrides {
// ensure we don't allow uncapped gas limit per block // ensure we don't allow uncapped gas limit per block
if let Some(gas_limit_override) = block_overrides.gas_limit { if let Some(gas_limit_override) = block_overrides.gas_limit &&
if gas_limit_override > evm_env.block_env.gas_limit && gas_limit_override > evm_env.block_env.gas_limit &&
gas_limit_override > this.call_gas_limit() gas_limit_override > this.call_gas_limit()
{ {
return Err( return Err(EthApiError::other(EthSimulateError::GasLimitReached).into())
EthApiError::other(EthSimulateError::GasLimitReached).into()
)
}
} }
apply_block_overrides(block_overrides, &mut db, &mut evm_env.block_env); apply_block_overrides(block_overrides, &mut db, &mut evm_env.block_env);
} }

View File

@@ -115,8 +115,9 @@ where
let mut config = EthConfig { current, next: None, last: None }; let mut config = EthConfig { current, next: None, last: None };
if let Some(last_fork_idx) = current_fork_idx.checked_sub(1) { if let Some(last_fork_idx) = current_fork_idx.checked_sub(1) &&
if let Some(last_fork_timestamp) = fork_timestamps.get(last_fork_idx).copied() { let Some(last_fork_timestamp) = fork_timestamps.get(last_fork_idx).copied()
{
let fake_header = { let fake_header = {
let mut header = latest.clone(); let mut header = latest.clone();
header.timestamp = last_fork_timestamp; header.timestamp = last_fork_timestamp;
@@ -128,7 +129,6 @@ where
config.last = self.build_fork_config_at(last_fork_timestamp, last_precompiles); config.last = self.build_fork_config_at(last_fork_timestamp, last_precompiles);
} }
}
if let Some(next_fork_timestamp) = fork_timestamps.get(current_fork_idx + 1).copied() { if let Some(next_fork_timestamp) = fork_timestamps.get(current_fork_idx + 1).copied() {
let fake_header = { let fake_header = {

View File

@@ -88,14 +88,14 @@ pub trait EstimateCall: Call {
let mut tx_env = self.create_txn_env(&evm_env, request, &mut db)?; let mut tx_env = self.create_txn_env(&evm_env, request, &mut db)?;
// Check if this is a basic transfer (no input data to account with no code) // Check if this is a basic transfer (no input data to account with no code)
let mut is_basic_transfer = false; let is_basic_transfer = if tx_env.input().is_empty() &&
if tx_env.input().is_empty() { let TxKind::Call(to) = tx_env.kind() &&
if let TxKind::Call(to) = tx_env.kind() { let Ok(code) = db.db.account_code(&to)
if let Ok(code) = db.db.account_code(&to) { {
is_basic_transfer = code.map(|code| code.is_empty()).unwrap_or(true); code.map(|code| code.is_empty()).unwrap_or(true)
} } else {
} false
} };
// Check funds of the sender (only useful to check if transaction gas price is more than 0). // Check funds of the sender (only useful to check if transaction gas price is more than 0).
// //
@@ -123,12 +123,12 @@ pub trait EstimateCall: Call {
min_tx_env.set_gas_limit(MIN_TRANSACTION_GAS); min_tx_env.set_gas_limit(MIN_TRANSACTION_GAS);
// Reuse the same EVM instance // Reuse the same EVM instance
if let Ok(res) = evm.transact(min_tx_env).map_err(Self::Error::from_evm_err) { if let Ok(res) = evm.transact(min_tx_env).map_err(Self::Error::from_evm_err) &&
if res.result.is_success() { res.result.is_success()
{
return Ok(U256::from(MIN_TRANSACTION_GAS)) return Ok(U256::from(MIN_TRANSACTION_GAS))
} }
} }
}
trace!(target: "rpc::eth::estimate", ?tx_env, gas_limit = tx_env.gas_limit(), is_basic_transfer, "Starting gas estimation"); trace!(target: "rpc::eth::estimate", ?tx_env, gas_limit = tx_env.gas_limit(), is_basic_transfer, "Starting gas estimation");

View File

@@ -109,11 +109,11 @@ pub trait EthFees:
// need to validate that they are monotonically // need to validate that they are monotonically
// increasing and 0 <= p <= 100 // increasing and 0 <= p <= 100
// Note: The types used ensure that the percentiles are never < 0 // Note: The types used ensure that the percentiles are never < 0
if let Some(percentiles) = &reward_percentiles { if let Some(percentiles) = &reward_percentiles &&
if percentiles.windows(2).any(|w| w[0] > w[1] || w[0] > 100.) { percentiles.windows(2).any(|w| w[0] > w[1] || w[0] > 100.)
{
return Err(EthApiError::InvalidRewardPercentiles.into()) return Err(EthApiError::InvalidRewardPercentiles.into())
} }
}
// Fetch the headers and ensure we got all of them // Fetch the headers and ensure we got all of them
// //

View File

@@ -72,8 +72,8 @@ pub trait LoadPendingBlock:
>, >,
Self::Error, Self::Error,
> { > {
if let Some(block) = self.provider().pending_block().map_err(Self::Error::from_eth_err)? { if let Some(block) = self.provider().pending_block().map_err(Self::Error::from_eth_err)? &&
if let Some(receipts) = self let Some(receipts) = self
.provider() .provider()
.receipts_by_block(block.hash().into()) .receipts_by_block(block.hash().into())
.map_err(Self::Error::from_eth_err)? .map_err(Self::Error::from_eth_err)?
@@ -88,7 +88,6 @@ pub trait LoadPendingBlock:
PendingBlockEnvOrigin::ActualPending(Arc::new(block), Arc::new(receipts)), PendingBlockEnvOrigin::ActualPending(Arc::new(block), Arc::new(receipts)),
)); ));
} }
}
// no pending block from the CL yet, so we use the latest block and modify the env // no pending block from the CL yet, so we use the latest block and modify the env
// values that we can // values that we can
@@ -309,8 +308,9 @@ pub trait LoadPendingBlock:
// There's only limited amount of blob space available per block, so we need to // There's only limited amount of blob space available per block, so we need to
// check if the EIP-4844 can still fit in the block // check if the EIP-4844 can still fit in the block
if let Some(tx_blob_gas) = tx.blob_gas_used() { if let Some(tx_blob_gas) = tx.blob_gas_used() &&
if sum_blob_gas_used + tx_blob_gas > blob_params.max_blob_gas_per_block() { sum_blob_gas_used + tx_blob_gas > blob_params.max_blob_gas_per_block()
{
// we can't fit this _blob_ transaction into the block, so we mark it as // we can't fit this _blob_ transaction into the block, so we mark it as
// invalid, which removes its dependent transactions from // invalid, which removes its dependent transactions from
// the iterator. This is similar to the gas limit condition // the iterator. This is similar to the gas limit condition
@@ -324,7 +324,6 @@ pub trait LoadPendingBlock:
); );
continue continue
} }
}
let gas_used = match builder.execute_transaction(tx.clone()) { let gas_used = match builder.execute_transaction(tx.clone()) {
Ok(gas_used) => gas_used, Ok(gas_used) => gas_used,

View File

@@ -221,11 +221,11 @@ pub trait LoadState:
Self: SpawnBlocking, Self: SpawnBlocking,
{ {
async move { async move {
if at.is_pending() { if at.is_pending() &&
if let Ok(Some(state)) = self.local_pending_state().await { let Ok(Some(state)) = self.local_pending_state().await
{
return Ok(state) return Ok(state)
} }
}
self.provider().state_by_block_id(at).map_err(Self::Error::from_eth_err) self.provider().state_by_block_id(at).map_err(Self::Error::from_eth_err)
} }

View File

@@ -97,13 +97,13 @@ pub trait EthTransactions: LoadTransaction<Provider: BlockReaderIdExt> {
while let Some(notification) = stream.next().await { while let Some(notification) = stream.next().await {
let chain = notification.committed(); let chain = notification.committed();
for block in chain.blocks_iter() { for block in chain.blocks_iter() {
if block.body().contains_transaction(&hash) { if block.body().contains_transaction(&hash) &&
if let Some(receipt) = this.transaction_receipt(hash).await? { let Some(receipt) = this.transaction_receipt(hash).await?
{
return Ok(receipt); return Ok(receipt);
} }
} }
} }
}
Err(Self::Error::from_eth_err(TransactionConfirmationTimeout { Err(Self::Error::from_eth_err(TransactionConfirmationTimeout {
hash, hash,
duration: timeout_duration, duration: timeout_duration,
@@ -299,14 +299,13 @@ pub trait EthTransactions: LoadTransaction<Provider: BlockReaderIdExt> {
{ {
async move { async move {
// Check the pool first // Check the pool first
if include_pending { if include_pending &&
if let Some(tx) = let Some(tx) =
RpcNodeCore::pool(self).get_transaction_by_sender_and_nonce(sender, nonce) RpcNodeCore::pool(self).get_transaction_by_sender_and_nonce(sender, nonce)
{ {
let transaction = tx.transaction.clone_into_consensus(); let transaction = tx.transaction.clone_into_consensus();
return Ok(Some(self.tx_resp_builder().fill_pending(transaction)?)); return Ok(Some(self.tx_resp_builder().fill_pending(transaction)?));
} }
}
// Check if the sender is a contract // Check if the sender is a contract
if !self.get_code(sender, None).await?.is_empty() { if !self.get_code(sender, None).await?.is_empty() {
@@ -375,11 +374,11 @@ pub trait EthTransactions: LoadTransaction<Provider: BlockReaderIdExt> {
Self: LoadBlock, Self: LoadBlock,
{ {
async move { async move {
if let Some(block) = self.recovered_block(block_id).await? { if let Some(block) = self.recovered_block(block_id).await? &&
if let Some(tx) = block.body().transactions().get(index) { let Some(tx) = block.body().transactions().get(index)
{
return Ok(Some(tx.encoded_2718().into())) return Ok(Some(tx.encoded_2718().into()))
} }
}
Ok(None) Ok(None)
} }

View File

@@ -100,12 +100,12 @@ where
{ {
let size = value.size(); let size = value.size();
if self.cache.limiter().is_over_the_limit(self.cache.len() + 1) { if self.cache.limiter().is_over_the_limit(self.cache.len() + 1) &&
if let Some((_, evicted)) = self.cache.pop_oldest() { let Some((_, evicted)) = self.cache.pop_oldest()
{
// update tracked memory with the evicted value // update tracked memory with the evicted value
self.memory_usage = self.memory_usage.saturating_sub(evicted.size()); self.memory_usage = self.memory_usage.saturating_sub(evicted.size());
} }
}
if self.cache.insert(key, value) { if self.cache.insert(key, value) {
self.memory_usage = self.memory_usage.saturating_add(size); self.memory_usage = self.memory_usage.saturating_add(size);

View File

@@ -234,15 +234,15 @@ pub async fn fee_history_cache_new_blocks_task<St, Provider, N>(
let mut fetch_missing_block = Fuse::terminated(); let mut fetch_missing_block = Fuse::terminated();
loop { loop {
if fetch_missing_block.is_terminated() { if fetch_missing_block.is_terminated() &&
if let Some(block_number) = missing_blocks.pop_front() { let Some(block_number) = missing_blocks.pop_front()
{
trace!(target: "rpc::fee", ?block_number, "Fetching missing block for fee history cache"); trace!(target: "rpc::fee", ?block_number, "Fetching missing block for fee history cache");
if let Ok(Some(hash)) = provider.block_hash(block_number) { if let Ok(Some(hash)) = provider.block_hash(block_number) {
// fetch missing block // fetch missing block
fetch_missing_block = cache.get_block_and_receipts(hash).boxed().fuse(); fetch_missing_block = cache.get_block_and_receipts(hash).boxed().fuse();
} }
} }
}
let chain_spec = provider.chain_spec(); let chain_spec = provider.chain_spec();

View File

@@ -204,11 +204,11 @@ where
}; };
// constrain to the max price // constrain to the max price
if let Some(max_price) = self.oracle_config.max_price { if let Some(max_price) = self.oracle_config.max_price &&
if price > max_price { price > max_price
{
price = max_price; price = max_price;
} }
}
inner.last_price = GasPriceOracleResult { block_hash: header.hash(), price }; inner.last_price = GasPriceOracleResult { block_hash: header.hash(), price };
@@ -254,11 +254,11 @@ where
}; };
// ignore transactions with a tip under the configured threshold // ignore transactions with a tip under the configured threshold
if let Some(ignore_under) = self.ignore_price { if let Some(ignore_under) = self.ignore_price &&
if effective_tip < Some(ignore_under) { effective_tip < Some(ignore_under)
{
continue continue
} }
}
// check if the sender was the coinbase, if so, ignore // check if the sender was the coinbase, if so, ignore
if tx.signer() == block.beneficiary() { if tx.signer() == block.beneficiary() {
@@ -338,11 +338,11 @@ where
} }
// constrain to the max price // constrain to the max price
if let Some(max_price) = self.oracle_config.max_price { if let Some(max_price) = self.oracle_config.max_price &&
if suggestion > max_price { suggestion > max_price
{
suggestion = max_price; suggestion = max_price;
} }
}
inner.last_price = GasPriceOracleResult { block_hash: header.hash(), price: suggestion }; inner.last_price = GasPriceOracleResult { block_hash: header.hash(), price: suggestion };

View File

@@ -501,12 +501,12 @@ where
.transpose()? .transpose()?
.flatten(); .flatten();
if let Some(f) = from { if let Some(f) = from &&
if f > info.best_number { f > info.best_number
{
// start block higher than local head, can return empty // start block higher than local head, can return empty
return Ok(Vec::new()); return Ok(Vec::new());
} }
}
let (from_block_number, to_block_number) = let (from_block_number, to_block_number) =
logs_utils::get_filter_block_range(from, to, start_block, info); logs_utils::get_filter_block_range(from, to, start_block, info);
@@ -658,8 +658,10 @@ where
// size check but only if range is multiple blocks, so we always return all // size check but only if range is multiple blocks, so we always return all
// logs of a single block // logs of a single block
let is_multi_block_range = from_block != to_block; let is_multi_block_range = from_block != to_block;
if let Some(max_logs_per_response) = limits.max_logs_per_response { if let Some(max_logs_per_response) = limits.max_logs_per_response &&
if is_multi_block_range && all_logs.len() > max_logs_per_response { is_multi_block_range &&
all_logs.len() > max_logs_per_response
{
debug!( debug!(
target: "rpc::eth::filter", target: "rpc::eth::filter",
logs_found = all_logs.len(), logs_found = all_logs.len(),
@@ -675,7 +677,6 @@ where
}); });
} }
} }
}
Ok(all_logs) Ok(all_logs)
} }

View File

@@ -490,15 +490,15 @@ where
let mut maybe_traces = let mut maybe_traces =
maybe_traces.map(|traces| traces.into_iter().flatten().collect::<Vec<_>>()); maybe_traces.map(|traces| traces.into_iter().flatten().collect::<Vec<_>>());
if let (Some(block), Some(traces)) = (maybe_block, maybe_traces.as_mut()) { if let (Some(block), Some(traces)) = (maybe_block, maybe_traces.as_mut()) &&
if let Some(base_block_reward) = self.calculate_base_block_reward(block.header())? { let Some(base_block_reward) = self.calculate_base_block_reward(block.header())?
{
traces.extend(self.extract_reward_traces( traces.extend(self.extract_reward_traces(
block.header(), block.header(),
block.body().ommers(), block.body().ommers(),
base_block_reward, base_block_reward,
)); ));
} }
}
Ok(maybe_traces) Ok(maybe_traces)
} }

View File

@@ -143,13 +143,13 @@ where
if self.disallow.contains(sender) { if self.disallow.contains(sender) {
return Err(ValidationApiError::Blacklist(*sender)) return Err(ValidationApiError::Blacklist(*sender))
} }
if let Some(to) = tx.to() { if let Some(to) = tx.to() &&
if self.disallow.contains(&to) { self.disallow.contains(&to)
{
return Err(ValidationApiError::Blacklist(to)) return Err(ValidationApiError::Blacklist(to))
} }
} }
} }
}
let latest_header = let latest_header =
self.provider.latest_header()?.ok_or_else(|| ValidationApiError::MissingLatestBlock)?; self.provider.latest_header()?.ok_or_else(|| ValidationApiError::MissingLatestBlock)?;
@@ -334,11 +334,11 @@ where
return Err(ValidationApiError::ProposerPayment) return Err(ValidationApiError::ProposerPayment)
} }
if let Some(block_base_fee) = block.header().base_fee_per_gas() { if let Some(block_base_fee) = block.header().base_fee_per_gas() &&
if tx.effective_tip_per_gas(block_base_fee).unwrap_or_default() != 0 { tx.effective_tip_per_gas(block_base_fee).unwrap_or_default() != 0
{
return Err(ValidationApiError::ProposerPayment) return Err(ValidationApiError::ProposerPayment)
} }
}
Ok(()) Ok(())
} }

View File

@@ -73,8 +73,8 @@ impl<Provider> StageSetBuilder<Provider> {
fn upsert_stage_state(&mut self, stage: Box<dyn Stage<Provider>>, added_at_index: usize) { fn upsert_stage_state(&mut self, stage: Box<dyn Stage<Provider>>, added_at_index: usize) {
let stage_id = stage.id(); let stage_id = stage.id();
if self.stages.insert(stage.id(), StageEntry { stage, enabled: true }).is_some() { if self.stages.insert(stage.id(), StageEntry { stage, enabled: true }).is_some() &&
if let Some(to_remove) = self let Some(to_remove) = self
.order .order
.iter() .iter()
.enumerate() .enumerate()
@@ -84,7 +84,6 @@ impl<Provider> StageSetBuilder<Provider> {
self.order.remove(to_remove); self.order.remove(to_remove);
} }
} }
}
/// Overrides the given [`Stage`], if it is in this set. /// Overrides the given [`Stage`], if it is in this set.
/// ///
@@ -264,12 +263,12 @@ impl<Provider> StageSetBuilder<Provider> {
pub fn build(mut self) -> Vec<Box<dyn Stage<Provider>>> { pub fn build(mut self) -> Vec<Box<dyn Stage<Provider>>> {
let mut stages = Vec::new(); let mut stages = Vec::new();
for id in &self.order { for id in &self.order {
if let Some(entry) = self.stages.remove(id) { if let Some(entry) = self.stages.remove(id) &&
if entry.enabled { entry.enabled
{
stages.push(entry.stage); stages.push(entry.stage);
} }
} }
}
stages stages
} }
} }

View File

@@ -702,11 +702,10 @@ mod tests {
// Validate sequentiality only after prev progress, // Validate sequentiality only after prev progress,
// since the data before is mocked and can contain gaps // since the data before is mocked and can contain gaps
if number > prev_progress { if number > prev_progress
if let Some(prev_key) = prev_number { && let Some(prev_key) = prev_number {
assert_eq!(prev_key + 1, number, "Body entries must be sequential"); assert_eq!(prev_key + 1, number, "Body entries must be sequential");
} }
}
// Validate that the current entry is below or equals to the highest allowed block // Validate that the current entry is below or equals to the highest allowed block
assert!( assert!(

View File

@@ -150,19 +150,18 @@ where
return Poll::Ready(Ok(())); return Poll::Ready(Ok(()));
} }
if self.stream.is_none() { if self.stream.is_none() &&
if let Some(source) = self.source.clone() { let Some(source) = self.source.clone()
{
self.stream.replace(source.create(input)?); self.stream.replace(source.create(input)?);
} }
} if let Some(stream) = &mut self.stream &&
if let Some(stream) = &mut self.stream { let Some(next) = ready!(stream.poll_next_unpin(cx))
if let Some(next) = ready!(stream.poll_next_unpin(cx))
.transpose() .transpose()
.map_err(|e| StageError::Fatal(e.into()))? .map_err(|e| StageError::Fatal(e.into()))?
{ {
self.item.replace(next); self.item.replace(next);
} }
}
Poll::Ready(Ok(())) Poll::Ready(Ok(()))
} }
@@ -546,11 +545,10 @@ mod tests {
// Validate sequentiality only after prev progress, // Validate sequentiality only after prev progress,
// since the data before is mocked and can contain gaps // since the data before is mocked and can contain gaps
if number > prev_progress { if number > prev_progress
if let Some(prev_key) = prev_number { && let Some(prev_key) = prev_number {
assert_eq!(prev_key + 1, number, "Body entries must be sequential"); assert_eq!(prev_key + 1, number, "Body entries must be sequential");
} }
}
// Validate that the current entry is below or equals to the highest allowed block // Validate that the current entry is below or equals to the highest allowed block
assert!( assert!(

View File

@@ -145,19 +145,18 @@ where
let mut cursor_header_numbers = let mut cursor_header_numbers =
provider.tx_ref().cursor_write::<RawTable<tables::HeaderNumbers>>()?; provider.tx_ref().cursor_write::<RawTable<tables::HeaderNumbers>>()?;
let mut first_sync = false;
// If we only have the genesis block hash, then we are at first sync, and we can remove it, // If we only have the genesis block hash, then we are at first sync, and we can remove it,
// add it to the collector and use tx.append on all hashes. // add it to the collector and use tx.append on all hashes.
if provider.tx_ref().entries::<RawTable<tables::HeaderNumbers>>()? == 1 { let first_sync = if provider.tx_ref().entries::<RawTable<tables::HeaderNumbers>>()? == 1 &&
if let Some((hash, block_number)) = cursor_header_numbers.last()? { let Some((hash, block_number)) = cursor_header_numbers.last()? &&
if block_number.value()? == 0 { block_number.value()? == 0
{
self.hash_collector.insert(hash.key()?, 0)?; self.hash_collector.insert(hash.key()?, 0)?;
cursor_header_numbers.delete_current()?; cursor_header_numbers.delete_current()?;
first_sync = true; true
} } else {
} false
} };
// Since ETL sorts all entries by hashes, we are either appending (first sync) or inserting // Since ETL sorts all entries by hashes, we are either appending (first sync) or inserting
// in order (further syncs). // in order (further syncs).

View File

@@ -67,9 +67,9 @@ where
) )
}) })
.transpose()? .transpose()?
.flatten() .flatten() &&
target_prunable_block > input.checkpoint().block_number
{ {
if target_prunable_block > input.checkpoint().block_number {
input.checkpoint = Some(StageCheckpoint::new(target_prunable_block)); input.checkpoint = Some(StageCheckpoint::new(target_prunable_block));
// Save prune checkpoint only if we don't have one already. // Save prune checkpoint only if we don't have one already.
@@ -85,7 +85,6 @@ where
)?; )?;
} }
} }
}
if input.target_reached() { if input.target_reached() {
return Ok(ExecOutput::done(input.checkpoint())) return Ok(ExecOutput::done(input.checkpoint()))

View File

@@ -70,9 +70,9 @@ where
) )
}) })
.transpose()? .transpose()?
.flatten() .flatten() &&
target_prunable_block > input.checkpoint().block_number
{ {
if target_prunable_block > input.checkpoint().block_number {
input.checkpoint = Some(StageCheckpoint::new(target_prunable_block)); input.checkpoint = Some(StageCheckpoint::new(target_prunable_block));
// Save prune checkpoint only if we don't have one already. // Save prune checkpoint only if we don't have one already.
@@ -88,7 +88,6 @@ where
)?; )?;
} }
} }
}
if input.target_reached() { if input.target_reached() {
return Ok(ExecOutput::done(input.checkpoint())) return Ok(ExecOutput::done(input.checkpoint()))

View File

@@ -88,9 +88,9 @@ where
) )
}) })
.transpose()? .transpose()?
.flatten() .flatten() &&
target_prunable_block > input.checkpoint().block_number
{ {
if target_prunable_block > input.checkpoint().block_number {
input.checkpoint = Some(StageCheckpoint::new(target_prunable_block)); input.checkpoint = Some(StageCheckpoint::new(target_prunable_block));
// Save prune checkpoint only if we don't have one already. // Save prune checkpoint only if we don't have one already.
@@ -111,7 +111,6 @@ where
)?; )?;
} }
} }
}
if input.target_reached() { if input.target_reached() {
return Ok(ExecOutput::done(input.checkpoint())); return Ok(ExecOutput::done(input.checkpoint()));
} }
@@ -213,13 +212,13 @@ where
// Delete all transactions that belong to this block // Delete all transactions that belong to this block
for tx_id in body.tx_num_range() { for tx_id in body.tx_num_range() {
// First delete the transaction and hash to id mapping // First delete the transaction and hash to id mapping
if let Some(transaction) = static_file_provider.transaction_by_id(tx_id)? { if let Some(transaction) = static_file_provider.transaction_by_id(tx_id)? &&
if tx_hash_number_cursor.seek_exact(transaction.trie_hash())?.is_some() { tx_hash_number_cursor.seek_exact(transaction.trie_hash())?.is_some()
{
tx_hash_number_cursor.delete_current()?; tx_hash_number_cursor.delete_current()?;
} }
} }
} }
}
Ok(UnwindOutput { Ok(UnwindOutput {
checkpoint: StageCheckpoint::new(unwind_to) checkpoint: StageCheckpoint::new(unwind_to)
@@ -538,12 +537,11 @@ mod tests {
}) })
.transpose() .transpose()
.expect("prune target block for transaction lookup") .expect("prune target block for transaction lookup")
.flatten() .flatten() &&
target_prunable_block > input.checkpoint().block_number
{ {
if target_prunable_block > input.checkpoint().block_number {
input.checkpoint = Some(StageCheckpoint::new(target_prunable_block)); input.checkpoint = Some(StageCheckpoint::new(target_prunable_block));
} }
}
let start_block = input.next_block(); let start_block = input.next_block();
let end_block = output.checkpoint.block_number; let end_block = output.checkpoint.block_number;

View File

@@ -156,14 +156,13 @@ where
// If it's not the first sync, there might an existing shard already, so we need to // If it's not the first sync, there might an existing shard already, so we need to
// merge it with the one coming from the collector // merge it with the one coming from the collector
if !append_only { if !append_only &&
if let Some((_, last_database_shard)) = let Some((_, last_database_shard)) =
write_cursor.seek_exact(sharded_key_factory(current_partial, u64::MAX))? write_cursor.seek_exact(sharded_key_factory(current_partial, u64::MAX))?
{ {
current_list.extend(last_database_shard.iter()); current_list.extend(last_database_shard.iter());
} }
} }
}
current_list.extend(new_list.iter()); current_list.extend(new_list.iter());
load_indices( load_indices(
@@ -265,11 +264,11 @@ where
// To be extra safe, we make sure that the last tx num matches the last block from its indices. // To be extra safe, we make sure that the last tx num matches the last block from its indices.
// If not, get it. // If not, get it.
loop { loop {
if let Some(indices) = provider.block_body_indices(last_block)? { if let Some(indices) = provider.block_body_indices(last_block)? &&
if indices.last_tx_num() <= last_tx_num { indices.last_tx_num() <= last_tx_num
{
break break
} }
}
if last_block == 0 { if last_block == 0 {
break break
} }

View File

@@ -23,12 +23,12 @@ pub fn maybe_generate_tests(
let mut iter = args.into_iter().peekable(); let mut iter = args.into_iter().peekable();
// we check if there's a crate argument which is used from inside the codecs crate directly // we check if there's a crate argument which is used from inside the codecs crate directly
if let Some(arg) = iter.peek() { if let Some(arg) = iter.peek() &&
if arg.to_string() == "crate" { arg.to_string() == "crate"
{
is_crate = true; is_crate = true;
iter.next(); iter.next();
} }
}
for arg in iter { for arg in iter {
if arg.to_string() == "compact" { if arg.to_string() == "compact" {

View File

@@ -171,29 +171,15 @@ fn load_field_from_segments(
/// ///
/// If so, we use another impl to code/decode its data. /// If so, we use another impl to code/decode its data.
fn should_use_alt_impl(ftype: &str, segment: &syn::PathSegment) -> bool { fn should_use_alt_impl(ftype: &str, segment: &syn::PathSegment) -> bool {
if ftype == "Vec" || ftype == "Option" { if (ftype == "Vec" || ftype == "Option") &&
if let syn::PathArguments::AngleBracketed(ref args) = segment.arguments { let syn::PathArguments::AngleBracketed(ref args) = segment.arguments &&
if let Some(syn::GenericArgument::Type(syn::Type::Path(arg_path))) = args.args.last() { let Some(syn::GenericArgument::Type(syn::Type::Path(arg_path))) = args.args.last() &&
if let (Some(path), 1) = let (Some(path), 1) = (arg_path.path.segments.first(), arg_path.path.segments.len()) &&
(arg_path.path.segments.first(), arg_path.path.segments.len()) ["B256", "Address", "Address", "Bloom", "TxHash", "BlockHash", "CompactPlaceholder"]
{
if [
"B256",
"Address",
"Address",
"Bloom",
"TxHash",
"BlockHash",
"CompactPlaceholder",
]
.contains(&path.ident.to_string().as_str()) .contains(&path.ident.to_string().as_str())
{ {
return true return true
} }
}
}
}
}
false false
} }

View File

@@ -69,8 +69,8 @@ pub fn derive_zstd(input: TokenStream) -> TokenStream {
let mut decompressor = None; let mut decompressor = None;
for attr in &input.attrs { for attr in &input.attrs {
if attr.path().is_ident("reth_zstd") { if attr.path().is_ident("reth_zstd") &&
if let Err(err) = attr.parse_nested_meta(|meta| { let Err(err) = attr.parse_nested_meta(|meta| {
if meta.path.is_ident("compressor") { if meta.path.is_ident("compressor") {
let value = meta.value()?; let value = meta.value()?;
let path: syn::Path = value.parse()?; let path: syn::Path = value.parse()?;
@@ -83,11 +83,11 @@ pub fn derive_zstd(input: TokenStream) -> TokenStream {
return Err(meta.error("unsupported attribute")) return Err(meta.error("unsupported attribute"))
} }
Ok(()) Ok(())
}) { })
{
return err.to_compile_error().into() return err.to_compile_error().into()
} }
} }
}
let (Some(compressor), Some(decompressor)) = (compressor, decompressor) else { let (Some(compressor), Some(decompressor)) = (compressor, decompressor) else {
return quote! { return quote! {

View File

@@ -44,8 +44,10 @@ impl StorageLock {
#[cfg(any(test, not(feature = "disable-lock")))] #[cfg(any(test, not(feature = "disable-lock")))]
fn try_acquire_file_lock(path: &Path) -> Result<Self, StorageLockError> { fn try_acquire_file_lock(path: &Path) -> Result<Self, StorageLockError> {
let file_path = path.join(LOCKFILE_NAME); let file_path = path.join(LOCKFILE_NAME);
if let Some(process_lock) = ProcessUID::parse(&file_path)? { if let Some(process_lock) = ProcessUID::parse(&file_path)? &&
if process_lock.pid != (process::id() as usize) && process_lock.is_active() { process_lock.pid != (process::id() as usize) &&
process_lock.is_active()
{
reth_tracing::tracing::error!( reth_tracing::tracing::error!(
target: "reth::db::lockfile", target: "reth::db::lockfile",
path = ?file_path, path = ?file_path,
@@ -55,7 +57,6 @@ impl StorageLock {
); );
return Err(StorageLockError::Taken(process_lock.pid)) return Err(StorageLockError::Taken(process_lock.pid))
} }
}
Ok(Self(Arc::new(StorageLockInner::new(file_path)?))) Ok(Self(Arc::new(StorageLockInner::new(file_path)?)))
} }
@@ -141,8 +142,9 @@ impl ProcessUID {
/// Parses [`Self`] from a file. /// Parses [`Self`] from a file.
fn parse(path: &Path) -> Result<Option<Self>, StorageLockError> { fn parse(path: &Path) -> Result<Option<Self>, StorageLockError> {
if path.exists() { if path.exists() &&
if let Ok(contents) = reth_fs_util::read_to_string(path) { let Ok(contents) = reth_fs_util::read_to_string(path)
{
let mut lines = contents.lines(); let mut lines = contents.lines();
if let (Some(Ok(pid)), Some(Ok(start_time))) = ( if let (Some(Ok(pid)), Some(Ok(start_time))) = (
lines.next().map(str::trim).map(str::parse), lines.next().map(str::trim).map(str::parse),
@@ -151,7 +153,6 @@ impl ProcessUID {
return Ok(Some(Self { pid, start_time })); return Ok(Some(Self { pid, start_time }));
} }
} }
}
Ok(None) Ok(None)
} }

View File

@@ -33,16 +33,14 @@ pub fn iter_static_files(path: &Path) -> Result<SortedStaticFiles, NippyJarError
.map_err(|err| NippyJarError::Custom(err.to_string()))? .map_err(|err| NippyJarError::Custom(err.to_string()))?
.filter_map(Result::ok); .filter_map(Result::ok);
for entry in entries { for entry in entries {
if entry.metadata().is_ok_and(|metadata| metadata.is_file()) { if entry.metadata().is_ok_and(|metadata| metadata.is_file()) &&
if let Some((segment, _)) = let Some((segment, _)) =
StaticFileSegment::parse_filename(&entry.file_name().to_string_lossy()) StaticFileSegment::parse_filename(&entry.file_name().to_string_lossy())
{ {
let jar = NippyJar::<SegmentHeader>::load(&entry.path())?; let jar = NippyJar::<SegmentHeader>::load(&entry.path())?;
let (block_range, tx_range) = ( let (block_range, tx_range) =
jar.user_header().block_range().copied(), (jar.user_header().block_range().copied(), jar.user_header().tx_range().copied());
jar.user_header().tx_range().copied(),
);
if let Some(block_range) = block_range { if let Some(block_range) = block_range {
match static_files.entry(segment) { match static_files.entry(segment) {
@@ -56,7 +54,6 @@ pub fn iter_static_files(path: &Path) -> Result<SortedStaticFiles, NippyJarError
} }
} }
} }
}
for range_list in static_files.values_mut() { for range_list in static_files.values_mut() {
// Sort by block end range. // Sort by block end range.

View File

@@ -17,7 +17,7 @@ pub trait TableObject: Sized {
_: *const ffi::MDBX_txn, _: *const ffi::MDBX_txn,
data_val: ffi::MDBX_val, data_val: ffi::MDBX_val,
) -> Result<Self, Error> { ) -> Result<Self, Error> {
let s = slice::from_raw_parts(data_val.iov_base as *const u8, data_val.iov_len); let s = unsafe { slice::from_raw_parts(data_val.iov_base as *const u8, data_val.iov_len) };
Self::decode(s) Self::decode(s)
} }
} }
@@ -32,7 +32,7 @@ impl TableObject for Cow<'_, [u8]> {
_txn: *const ffi::MDBX_txn, _txn: *const ffi::MDBX_txn,
data_val: ffi::MDBX_val, data_val: ffi::MDBX_val,
) -> Result<Self, Error> { ) -> Result<Self, Error> {
let s = slice::from_raw_parts(data_val.iov_base as *const u8, data_val.iov_len); let s = unsafe { slice::from_raw_parts(data_val.iov_base as *const u8, data_val.iov_len) };
#[cfg(feature = "return-borrowed")] #[cfg(feature = "return-borrowed")]
{ {

View File

@@ -476,7 +476,7 @@ impl Transaction<RW> {
/// Caller must close ALL other [Database] and [Cursor] instances pointing to the same dbi /// Caller must close ALL other [Database] and [Cursor] instances pointing to the same dbi
/// BEFORE calling this function. /// BEFORE calling this function.
pub unsafe fn drop_db(&self, db: Database) -> Result<()> { pub unsafe fn drop_db(&self, db: Database) -> Result<()> {
mdbx_result(self.txn_execute(|txn| ffi::mdbx_drop(txn, db.dbi(), true))?)?; mdbx_result(self.txn_execute(|txn| unsafe { ffi::mdbx_drop(txn, db.dbi(), true) })?)?;
Ok(()) Ok(())
} }
@@ -489,7 +489,7 @@ impl Transaction<RO> {
/// Caller must close ALL other [Database] and [Cursor] instances pointing to the same dbi /// Caller must close ALL other [Database] and [Cursor] instances pointing to the same dbi
/// BEFORE calling this function. /// BEFORE calling this function.
pub unsafe fn close_db(&self, db: Database) -> Result<()> { pub unsafe fn close_db(&self, db: Database) -> Result<()> {
mdbx_result(ffi::mdbx_dbi_close(self.env().env_ptr(), db.dbi()))?; mdbx_result(unsafe { ffi::mdbx_dbi_close(self.env().env_ptr(), db.dbi()) })?;
Ok(()) Ok(())
} }

View File

@@ -309,11 +309,11 @@ impl<H: NippyJarHeader> NippyJar<H> {
return Err(NippyJarError::ColumnLenMismatch(self.columns, columns.len())) return Err(NippyJarError::ColumnLenMismatch(self.columns, columns.len()))
} }
if let Some(compression) = &self.compressor { if let Some(compression) = &self.compressor &&
if !compression.is_ready() { !compression.is_ready()
{
return Err(NippyJarError::CompressorNotReady) return Err(NippyJarError::CompressorNotReady)
} }
}
Ok(()) Ok(())
} }

View File

@@ -404,11 +404,11 @@ impl<H: NippyJarHeader> NippyJarWriter<H> {
// Appends new offsets to disk // Appends new offsets to disk
for offset in self.offsets.drain(..) { for offset in self.offsets.drain(..) {
if let Some(last_offset_ondisk) = last_offset_ondisk.take() { if let Some(last_offset_ondisk) = last_offset_ondisk.take() &&
if last_offset_ondisk == offset { last_offset_ondisk == offset
{
continue continue
} }
}
self.offsets_file.write_all(&offset.to_le_bytes())?; self.offsets_file.write_all(&offset.to_le_bytes())?;
} }
self.offsets_file.flush()?; self.offsets_file.flush()?;

View File

@@ -594,11 +594,11 @@ impl<N: ProviderNodeTypes> StateProviderFactory for BlockchainProvider<N> {
} }
fn pending_state_by_hash(&self, block_hash: B256) -> ProviderResult<Option<StateProviderBox>> { fn pending_state_by_hash(&self, block_hash: B256) -> ProviderResult<Option<StateProviderBox>> {
if let Some(pending) = self.canonical_in_memory_state.pending_state() { if let Some(pending) = self.canonical_in_memory_state.pending_state() &&
if pending.hash() == block_hash { pending.hash() == block_hash
{
return Ok(Some(Box::new(self.block_state_provider(&pending)?))); return Ok(Some(Box::new(self.block_state_provider(&pending)?)));
} }
}
Ok(None) Ok(None)
} }
@@ -965,8 +965,9 @@ mod tests {
) { ) {
let hook_provider = provider.clone(); let hook_provider = provider.clone();
provider.database.db_ref().set_post_transaction_hook(Box::new(move || { provider.database.db_ref().set_post_transaction_hook(Box::new(move || {
if let Some(state) = hook_provider.canonical_in_memory_state.head_state() { if let Some(state) = hook_provider.canonical_in_memory_state.head_state() &&
if state.anchor().number + 1 == block_number { state.anchor().number + 1 == block_number
{
let mut lowest_memory_block = let mut lowest_memory_block =
state.parent_state_chain().last().expect("qed").block(); state.parent_state_chain().last().expect("qed").block();
let num_hash = lowest_memory_block.recovered_block().num_hash(); let num_hash = lowest_memory_block.recovered_block().num_hash();
@@ -985,7 +986,6 @@ mod tests {
// Remove from memory // Remove from memory
hook_provider.canonical_in_memory_state.remove_persisted_blocks(num_hash); hook_provider.canonical_in_memory_state.remove_persisted_blocks(num_hash);
} }
}
})); }));
} }

View File

@@ -536,11 +536,11 @@ impl<N: ProviderNodeTypes> ConsistentProvider<N> {
// If the transaction number is less than the first in-memory transaction number, make a // If the transaction number is less than the first in-memory transaction number, make a
// database lookup // database lookup
if let HashOrNumber::Number(id) = id { if let HashOrNumber::Number(id) = id &&
if id < in_memory_tx_num { id < in_memory_tx_num
{
return fetch_from_db(provider) return fetch_from_db(provider)
} }
}
// Iterate from the lowest block to the highest // Iterate from the lowest block to the highest
for block_state in in_mem_chain.iter().rev() { for block_state in in_mem_chain.iter().rev() {
@@ -816,15 +816,15 @@ impl<N: ProviderNodeTypes> BlockReader for ConsistentProvider<N> {
hash: B256, hash: B256,
source: BlockSource, source: BlockSource,
) -> ProviderResult<Option<Self::Block>> { ) -> ProviderResult<Option<Self::Block>> {
if matches!(source, BlockSource::Canonical | BlockSource::Any) { if matches!(source, BlockSource::Canonical | BlockSource::Any) &&
if let Some(block) = self.get_in_memory_or_storage_by_block( let Some(block) = self.get_in_memory_or_storage_by_block(
hash.into(), hash.into(),
|db_provider| db_provider.find_block_by_hash(hash, BlockSource::Canonical), |db_provider| db_provider.find_block_by_hash(hash, BlockSource::Canonical),
|block_state| Ok(Some(block_state.block_ref().recovered_block().clone_block())), |block_state| Ok(Some(block_state.block_ref().recovered_block().clone_block())),
)? { )?
{
return Ok(Some(block)) return Ok(Some(block))
} }
}
if matches!(source, BlockSource::Pending | BlockSource::Any) { if matches!(source, BlockSource::Pending | BlockSource::Any) {
return Ok(self return Ok(self
@@ -1133,15 +1133,15 @@ impl<N: ProviderNodeTypes> ReceiptProviderIdExt for ConsistentProvider<N> {
match block { match block {
BlockId::Hash(rpc_block_hash) => { BlockId::Hash(rpc_block_hash) => {
let mut receipts = self.receipts_by_block(rpc_block_hash.block_hash.into())?; let mut receipts = self.receipts_by_block(rpc_block_hash.block_hash.into())?;
if receipts.is_none() && !rpc_block_hash.require_canonical.unwrap_or(false) { if receipts.is_none() &&
if let Some(state) = self !rpc_block_hash.require_canonical.unwrap_or(false) &&
let Some(state) = self
.head_block .head_block
.as_ref() .as_ref()
.and_then(|b| b.block_on_chain(rpc_block_hash.block_hash.into())) .and_then(|b| b.block_on_chain(rpc_block_hash.block_hash.into()))
{ {
receipts = Some(state.executed_block_receipts()); receipts = Some(state.executed_block_receipts());
} }
}
Ok(receipts) Ok(receipts)
} }
BlockId::Number(num_tag) => match num_tag { BlockId::Number(num_tag) => match num_tag {

View File

@@ -67,11 +67,11 @@ where
// //
// To ensure this doesn't happen, we just have to make sure that we fetch from the same // To ensure this doesn't happen, we just have to make sure that we fetch from the same
// data source that we used during initialization. In this case, that is static files // data source that we used during initialization. In this case, that is static files
if let Some((hash, number)) = self.tip { if let Some((hash, number)) = self.tip &&
if provider_ro.sealed_header(number)?.is_none_or(|header| header.hash() != hash) { provider_ro.sealed_header(number)?.is_none_or(|header| header.hash() != hash)
{
return Err(ConsistentViewError::Reorged { block: hash }.into()) return Err(ConsistentViewError::Reorged { block: hash }.into())
} }
}
Ok(provider_ro) Ok(provider_ro)
} }

View File

@@ -1020,13 +1020,13 @@ impl<TX: DbTx + 'static, N: NodeTypesForProvider> HeaderProvider for DatabasePro
} }
fn header_td_by_number(&self, number: BlockNumber) -> ProviderResult<Option<U256>> { fn header_td_by_number(&self, number: BlockNumber) -> ProviderResult<Option<U256>> {
if self.chain_spec.is_paris_active_at_block(number) { if self.chain_spec.is_paris_active_at_block(number) &&
if let Some(td) = self.chain_spec.final_paris_total_difficulty() { let Some(td) = self.chain_spec.final_paris_total_difficulty()
{
// if this block is higher than the final paris(merge) block, return the final paris // if this block is higher than the final paris(merge) block, return the final paris
// difficulty // difficulty
return Ok(Some(td)) return Ok(Some(td))
} }
}
self.static_file_provider.get_with_static_file_or_database( self.static_file_provider.get_with_static_file_or_database(
StaticFileSegment::Headers, StaticFileSegment::Headers,
@@ -1180,8 +1180,9 @@ impl<TX: DbTx + 'static, N: NodeTypesForProvider> BlockReader for DatabaseProvid
/// If the header is found, but the transactions either do not exist, or are not indexed, this /// If the header is found, but the transactions either do not exist, or are not indexed, this
/// will return None. /// will return None.
fn block(&self, id: BlockHashOrNumber) -> ProviderResult<Option<Self::Block>> { fn block(&self, id: BlockHashOrNumber) -> ProviderResult<Option<Self::Block>> {
if let Some(number) = self.convert_hash_or_number(id)? { if let Some(number) = self.convert_hash_or_number(id)? &&
if let Some(header) = self.header_by_number(number)? { let Some(header) = self.header_by_number(number)?
{
// If the body indices are not found, this means that the transactions either do not // If the body indices are not found, this means that the transactions either do not
// exist in the database yet, or they do exit but are not indexed. // exist in the database yet, or they do exit but are not indexed.
// If they exist but are not indexed, we don't have enough // If they exist but are not indexed, we don't have enough
@@ -1199,7 +1200,6 @@ impl<TX: DbTx + 'static, N: NodeTypesForProvider> BlockReader for DatabaseProvid
return Ok(Some(Self::Block::new(header, body))) return Ok(Some(Self::Block::new(header, body)))
} }
}
Ok(None) Ok(None)
} }
@@ -1416,12 +1416,12 @@ impl<TX: DbTx + 'static, N: NodeTypesForProvider> TransactionsProvider for Datab
tx_hash: TxHash, tx_hash: TxHash,
) -> ProviderResult<Option<(Self::Transaction, TransactionMeta)>> { ) -> ProviderResult<Option<(Self::Transaction, TransactionMeta)>> {
let mut transaction_cursor = self.tx.cursor_read::<tables::TransactionBlocks>()?; let mut transaction_cursor = self.tx.cursor_read::<tables::TransactionBlocks>()?;
if let Some(transaction_id) = self.transaction_id(tx_hash)? { if let Some(transaction_id) = self.transaction_id(tx_hash)? &&
if let Some(transaction) = self.transaction_by_id_unhashed(transaction_id)? { let Some(transaction) = self.transaction_by_id_unhashed(transaction_id)? &&
if let Some(block_number) = let Some(block_number) =
transaction_cursor.seek(transaction_id).map(|b| b.map(|(_, bn)| bn))? transaction_cursor.seek(transaction_id).map(|b| b.map(|(_, bn)| bn))? &&
let Some(sealed_header) = self.sealed_header(block_number)?
{ {
if let Some(sealed_header) = self.sealed_header(block_number)? {
let (header, block_hash) = sealed_header.split(); let (header, block_hash) = sealed_header.split();
if let Some(block_body) = self.block_body_indices(block_number)? { if let Some(block_body) = self.block_body_indices(block_number)? {
// the index of the tx in the block is the offset: // the index of the tx in the block is the offset:
@@ -1443,9 +1443,6 @@ impl<TX: DbTx + 'static, N: NodeTypesForProvider> TransactionsProvider for Datab
return Ok(Some((transaction, meta))) return Ok(Some((transaction, meta)))
} }
} }
}
}
}
Ok(None) Ok(None)
} }
@@ -1461,8 +1458,9 @@ impl<TX: DbTx + 'static, N: NodeTypesForProvider> TransactionsProvider for Datab
) -> ProviderResult<Option<Vec<Self::Transaction>>> { ) -> ProviderResult<Option<Vec<Self::Transaction>>> {
let mut tx_cursor = self.tx.cursor_read::<tables::Transactions<Self::Transaction>>()?; let mut tx_cursor = self.tx.cursor_read::<tables::Transactions<Self::Transaction>>()?;
if let Some(block_number) = self.convert_hash_or_number(id)? { if let Some(block_number) = self.convert_hash_or_number(id)? &&
if let Some(body) = self.block_body_indices(block_number)? { let Some(body) = self.block_body_indices(block_number)?
{
let tx_range = body.tx_num_range(); let tx_range = body.tx_num_range();
return if tx_range.is_empty() { return if tx_range.is_empty() {
Ok(Some(Vec::new())) Ok(Some(Vec::new()))
@@ -1470,7 +1468,6 @@ impl<TX: DbTx + 'static, N: NodeTypesForProvider> TransactionsProvider for Datab
Ok(Some(self.transactions_by_tx_range_with_cursor(tx_range, &mut tx_cursor)?)) Ok(Some(self.transactions_by_tx_range_with_cursor(tx_range, &mut tx_cursor)?))
} }
} }
}
Ok(None) Ok(None)
} }
@@ -1543,8 +1540,9 @@ impl<TX: DbTx + 'static, N: NodeTypesForProvider> ReceiptProvider for DatabasePr
&self, &self,
block: BlockHashOrNumber, block: BlockHashOrNumber,
) -> ProviderResult<Option<Vec<Self::Receipt>>> { ) -> ProviderResult<Option<Vec<Self::Receipt>>> {
if let Some(number) = self.convert_hash_or_number(block)? { if let Some(number) = self.convert_hash_or_number(block)? &&
if let Some(body) = self.block_body_indices(number)? { let Some(body) = self.block_body_indices(number)?
{
let tx_range = body.tx_num_range(); let tx_range = body.tx_num_range();
return if tx_range.is_empty() { return if tx_range.is_empty() {
Ok(Some(Vec::new())) Ok(Some(Vec::new()))
@@ -1552,7 +1550,6 @@ impl<TX: DbTx + 'static, N: NodeTypesForProvider> ReceiptProvider for DatabasePr
self.receipts_by_tx_range(tx_range).map(Some) self.receipts_by_tx_range(tx_range).map(Some)
} }
} }
}
Ok(None) Ok(None)
} }
@@ -2000,11 +1997,11 @@ impl<TX: DbTxMut + DbTx + 'static, N: NodeTypesForProvider> StateWriter
for entry in storage { for entry in storage {
tracing::trace!(?address, ?entry.key, "Updating plain state storage"); tracing::trace!(?address, ?entry.key, "Updating plain state storage");
if let Some(db_entry) = storages_cursor.seek_by_key_subkey(address, entry.key)? { if let Some(db_entry) = storages_cursor.seek_by_key_subkey(address, entry.key)? &&
if db_entry.key == entry.key { db_entry.key == entry.key
{
storages_cursor.delete_current()?; storages_cursor.delete_current()?;
} }
}
if !entry.value.is_zero() { if !entry.value.is_zero() {
storages_cursor.upsert(address, &entry)?; storages_cursor.upsert(address, &entry)?;
@@ -2038,12 +2035,11 @@ impl<TX: DbTxMut + DbTx + 'static, N: NodeTypesForProvider> StateWriter
for (hashed_slot, value) in storage.storage_slots_sorted() { for (hashed_slot, value) in storage.storage_slots_sorted() {
let entry = StorageEntry { key: hashed_slot, value }; let entry = StorageEntry { key: hashed_slot, value };
if let Some(db_entry) = if let Some(db_entry) =
hashed_storage_cursor.seek_by_key_subkey(*hashed_address, entry.key)? hashed_storage_cursor.seek_by_key_subkey(*hashed_address, entry.key)? &&
db_entry.key == entry.key
{ {
if db_entry.key == entry.key {
hashed_storage_cursor.delete_current()?; hashed_storage_cursor.delete_current()?;
} }
}
if !entry.value.is_zero() { if !entry.value.is_zero() {
hashed_storage_cursor.upsert(*hashed_address, &entry)?; hashed_storage_cursor.upsert(*hashed_address, &entry)?;

View File

@@ -158,11 +158,11 @@ impl<Provider: DBProvider + BlockHashReader> StateProvider
storage_key: StorageKey, storage_key: StorageKey,
) -> ProviderResult<Option<StorageValue>> { ) -> ProviderResult<Option<StorageValue>> {
let mut cursor = self.tx().cursor_dup_read::<tables::PlainStorageState>()?; let mut cursor = self.tx().cursor_dup_read::<tables::PlainStorageState>()?;
if let Some(entry) = cursor.seek_by_key_subkey(account, storage_key)? { if let Some(entry) = cursor.seek_by_key_subkey(account, storage_key)? &&
if entry.key == storage_key { entry.key == storage_key
{
return Ok(Some(entry.value)) return Ok(Some(entry.value))
} }
}
Ok(None) Ok(None)
} }
} }

View File

@@ -314,11 +314,11 @@ impl<N: NodePrimitives<SignedTx: Decompress + SignedTransaction, Receipt: Decomp
} }
fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult<Option<Self::Receipt>> { fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult<Option<Self::Receipt>> {
if let Some(tx_static_file) = &self.auxiliary_jar { if let Some(tx_static_file) = &self.auxiliary_jar &&
if let Some(num) = tx_static_file.transaction_id(hash)? { let Some(num) = tx_static_file.transaction_id(hash)?
{
return self.receipt(num) return self.receipt(num)
} }
}
Ok(None) Ok(None)
} }

View File

@@ -950,14 +950,13 @@ impl<N: NodePrimitives> StaticFileProvider<N> {
} }
} }
if let Some((db_last_entry, _)) = db_cursor.last()? { if let Some((db_last_entry, _)) = db_cursor.last()? &&
if highest_static_file_entry highest_static_file_entry
.is_none_or(|highest_entry| db_last_entry > highest_entry) .is_none_or(|highest_entry| db_last_entry > highest_entry)
{ {
return Ok(None) return Ok(None)
} }
} }
}
let highest_static_file_entry = highest_static_file_entry.unwrap_or_default(); let highest_static_file_entry = highest_static_file_entry.unwrap_or_default();
let highest_static_file_block = highest_static_file_block.unwrap_or_default(); let highest_static_file_block = highest_static_file_block.unwrap_or_default();
@@ -1281,8 +1280,8 @@ impl<N: NodePrimitives> StaticFileProvider<N> {
self.get_highest_static_file_block(segment) self.get_highest_static_file_block(segment)
} else { } else {
self.get_highest_static_file_tx(segment) self.get_highest_static_file_tx(segment)
} { } && block_or_tx_range.start <= static_file_upper_bound
if block_or_tx_range.start <= static_file_upper_bound { {
let end = block_or_tx_range.end.min(static_file_upper_bound + 1); let end = block_or_tx_range.end.min(static_file_upper_bound + 1);
data.extend(fetch_from_static_file( data.extend(fetch_from_static_file(
self, self,
@@ -1291,7 +1290,6 @@ impl<N: NodePrimitives> StaticFileProvider<N> {
)?); )?);
block_or_tx_range.start = end; block_or_tx_range.start = end;
} }
}
if block_or_tx_range.end > block_or_tx_range.start { if block_or_tx_range.end > block_or_tx_range.start {
data.extend(fetch_from_database(block_or_tx_range, predicate)?) data.extend(fetch_from_database(block_or_tx_range, predicate)?)

View File

@@ -120,11 +120,10 @@ where
} }
// Write withdrawals if any // Write withdrawals if any
if let Some(withdrawals) = body.withdrawals { if let Some(withdrawals) = body.withdrawals &&
if !withdrawals.is_empty() { !withdrawals.is_empty()
withdrawals_cursor {
.append(block_number, &StoredBlockWithdrawals { withdrawals })?; withdrawals_cursor.append(block_number, &StoredBlockWithdrawals { withdrawals })?;
}
} }
} }

View File

@@ -118,12 +118,12 @@ impl ReusableDecompressor {
// source. // source.
if !reserved_upper_bound { if !reserved_upper_bound {
reserved_upper_bound = true; reserved_upper_bound = true;
if let Some(upper_bound) = Decompressor::upper_bound(src) { if let Some(upper_bound) = Decompressor::upper_bound(src) &&
if let Some(additional) = upper_bound.checked_sub(self.buf.capacity()) { let Some(additional) = upper_bound.checked_sub(self.buf.capacity())
{
break 'b additional break 'b additional
} }
} }
}
// Otherwise, double the capacity of the buffer. // Otherwise, double the capacity of the buffer.
// This should normally not be reached as the upper bound should be enough. // This should normally not be reached as the upper bound should be enough.

View File

@@ -229,9 +229,8 @@ pub async fn maintain_transaction_pool<N, Client, P, St, Tasks>(
// check if we have a new finalized block // check if we have a new finalized block
if let Some(finalized) = if let Some(finalized) =
last_finalized_block.update(client.finalized_block_number().ok().flatten()) last_finalized_block.update(client.finalized_block_number().ok().flatten()) &&
{ let BlobStoreUpdates::Finalized(blobs) =
if let BlobStoreUpdates::Finalized(blobs) =
blob_store_tracker.on_finalized_block(finalized) blob_store_tracker.on_finalized_block(finalized)
{ {
metrics.inc_deleted_tracked_blobs(blobs.len()); metrics.inc_deleted_tracked_blobs(blobs.len());
@@ -244,7 +243,6 @@ pub async fn maintain_transaction_pool<N, Client, P, St, Tasks>(
pool.cleanup_blobs(); pool.cleanup_blobs();
})); }));
} }
}
// outcomes of the futures we are waiting on // outcomes of the futures we are waiting on
let mut event = None; let mut event = None;

View File

@@ -127,13 +127,13 @@ impl<T: TransactionOrdering> BestTransactions<T> {
loop { loop {
match self.new_transaction_receiver.as_mut()?.try_recv() { match self.new_transaction_receiver.as_mut()?.try_recv() {
Ok(tx) => { Ok(tx) => {
if let Some(last_priority) = &self.last_priority { if let Some(last_priority) = &self.last_priority &&
if &tx.priority > last_priority { &tx.priority > last_priority
{
// we skip transactions if we already yielded a transaction with lower // we skip transactions if we already yielded a transaction with lower
// priority // priority
return None return None
} }
}
return Some(tx) return Some(tx)
} }
// note TryRecvError::Lagged can be returned here, which is an error that attempts // note TryRecvError::Lagged can be returned here, which is an error that attempts

View File

@@ -612,13 +612,13 @@ where
// A newly added transaction may be immediately discarded, so we need to // A newly added transaction may be immediately discarded, so we need to
// adjust the result here // adjust the result here
for res in &mut added { for res in &mut added {
if let Ok(AddedTransactionOutcome { hash, .. }) = res { if let Ok(AddedTransactionOutcome { hash, .. }) = res &&
if discarded_hashes.contains(hash) { discarded_hashes.contains(hash)
{
*res = Err(PoolError::new(*hash, PoolErrorKind::DiscardedOnInsert)) *res = Err(PoolError::new(*hash, PoolErrorKind::DiscardedOnInsert))
} }
} }
} }
}
added added
} }

View File

@@ -329,15 +329,15 @@ impl<T: TransactionOrdering> PendingPool<T> {
&mut self, &mut self,
id: &TransactionId, id: &TransactionId,
) -> Option<Arc<ValidPoolTransaction<T::Transaction>>> { ) -> Option<Arc<ValidPoolTransaction<T::Transaction>>> {
if let Some(lowest) = self.independent_transactions.get(&id.sender) { if let Some(lowest) = self.independent_transactions.get(&id.sender) &&
if lowest.transaction.nonce() == id.nonce { lowest.transaction.nonce() == id.nonce
{
self.independent_transactions.remove(&id.sender); self.independent_transactions.remove(&id.sender);
// mark the next as independent if it exists // mark the next as independent if it exists
if let Some(unlocked) = self.get(&id.descendant()) { if let Some(unlocked) = self.get(&id.descendant()) {
self.independent_transactions.insert(id.sender, unlocked.clone()); self.independent_transactions.insert(id.sender, unlocked.clone());
} }
} }
}
let tx = self.by_id.remove(id)?; let tx = self.by_id.remove(id)?;
self.size_of -= tx.transaction.size(); self.size_of -= tx.transaction.size();

View File

@@ -954,15 +954,15 @@ impl<T: TransactionOrdering> TxPool<T> {
Destination::Pool(move_to) => { Destination::Pool(move_to) => {
debug_assert_ne!(&move_to, &current, "destination must be different"); debug_assert_ne!(&move_to, &current, "destination must be different");
let moved = self.move_transaction(current, move_to, &id); let moved = self.move_transaction(current, move_to, &id);
if matches!(move_to, SubPool::Pending) { if matches!(move_to, SubPool::Pending) &&
if let Some(tx) = moved { let Some(tx) = moved
{
trace!(target: "txpool", hash=%tx.transaction.hash(), "Promoted transaction to pending"); trace!(target: "txpool", hash=%tx.transaction.hash(), "Promoted transaction to pending");
outcome.promoted.push(tx); outcome.promoted.push(tx);
} }
} }
} }
} }
}
outcome outcome
} }
@@ -1856,8 +1856,9 @@ impl<T: PoolTransaction> AllTransactions<T> {
// overdraft // overdraft
let id = new_blob_tx.transaction_id; let id = new_blob_tx.transaction_id;
let mut descendants = self.descendant_txs_inclusive(&id).peekable(); let mut descendants = self.descendant_txs_inclusive(&id).peekable();
if let Some((maybe_replacement, _)) = descendants.peek() { if let Some((maybe_replacement, _)) = descendants.peek() &&
if **maybe_replacement == new_blob_tx.transaction_id { **maybe_replacement == new_blob_tx.transaction_id
{
// replacement transaction // replacement transaction
descendants.next(); descendants.next();
@@ -1870,7 +1871,6 @@ impl<T: PoolTransaction> AllTransactions<T> {
} }
} }
} }
}
} else if new_blob_tx.cost() > &on_chain_balance { } else if new_blob_tx.cost() > &on_chain_balance {
// the transaction would go into overdraft // the transaction would go into overdraft
return Err(InsertErr::Overdraft { transaction: Arc::new(new_blob_tx) }) return Err(InsertErr::Overdraft { transaction: Arc::new(new_blob_tx) })

View File

@@ -54,7 +54,31 @@ pub fn mock_tx_pool() -> MockTxPool {
/// Sets the value for the field /// Sets the value for the field
macro_rules! set_value { macro_rules! set_value {
($this:ident => $field:ident) => { // For mutable references
(&mut $this:expr => $field:ident) => {{
let new_value = $field;
match $this {
MockTransaction::Legacy { $field, .. } => {
*$field = new_value;
}
MockTransaction::Eip1559 { $field, .. } => {
*$field = new_value;
}
MockTransaction::Eip4844 { $field, .. } => {
*$field = new_value;
}
MockTransaction::Eip2930 { $field, .. } => {
*$field = new_value;
}
MockTransaction::Eip7702 { $field, .. } => {
*$field = new_value;
}
}
// Ensure the tx cost is always correct after each mutation.
$this.update_cost();
}};
// For owned values
($this:expr => $field:ident) => {{
let new_value = $field; let new_value = $field;
match $this { match $this {
MockTransaction::Legacy { ref mut $field, .. } | MockTransaction::Legacy { ref mut $field, .. } |
@@ -67,7 +91,7 @@ macro_rules! set_value {
} }
// Ensure the tx cost is always correct after each mutation. // Ensure the tx cost is always correct after each mutation.
$this.update_cost(); $this.update_cost();
}; }};
} }
/// Gets the value for the field /// Gets the value for the field
@@ -89,7 +113,7 @@ macro_rules! make_setters_getters {
paste! {$( paste! {$(
/// Sets the value of the specified field. /// Sets the value of the specified field.
pub fn [<set_ $name>](&mut self, $name: $t) -> &mut Self { pub fn [<set_ $name>](&mut self, $name: $t) -> &mut Self {
set_value!(self => $name); set_value!(&mut self => $name);
self self
} }

View File

@@ -344,11 +344,11 @@ where
} }
// Check whether the init code size has been exceeded. // Check whether the init code size has been exceeded.
if self.fork_tracker.is_shanghai_activated() { if self.fork_tracker.is_shanghai_activated() &&
if let Err(err) = transaction.ensure_max_init_code_size(MAX_INIT_CODE_BYTE_SIZE) { let Err(err) = transaction.ensure_max_init_code_size(MAX_INIT_CODE_BYTE_SIZE)
{
return Err(TransactionValidationOutcome::Invalid(transaction, err)) return Err(TransactionValidationOutcome::Invalid(transaction, err))
} }
}
// Checks for gas limit // Checks for gas limit
let transaction_gas_limit = transaction.gas_limit(); let transaction_gas_limit = transaction.gas_limit();
@@ -364,8 +364,9 @@ where
} }
// Check individual transaction gas limit if configured // Check individual transaction gas limit if configured
if let Some(max_tx_gas_limit) = self.max_tx_gas_limit { if let Some(max_tx_gas_limit) = self.max_tx_gas_limit &&
if transaction_gas_limit > max_tx_gas_limit { transaction_gas_limit > max_tx_gas_limit
{
return Err(TransactionValidationOutcome::Invalid( return Err(TransactionValidationOutcome::Invalid(
transaction, transaction,
InvalidPoolTransactionError::MaxTxGasLimitExceeded( InvalidPoolTransactionError::MaxTxGasLimitExceeded(
@@ -374,7 +375,6 @@ where
), ),
)) ))
} }
}
// Ensure max_priority_fee_per_gas (if EIP1559) is less than max_fee_per_gas if any. // Ensure max_priority_fee_per_gas (if EIP1559) is less than max_fee_per_gas if any.
if transaction.max_priority_fee_per_gas() > Some(transaction.max_fee_per_gas()) { if transaction.max_priority_fee_per_gas() > Some(transaction.max_fee_per_gas()) {
@@ -427,14 +427,14 @@ where
} }
// Checks for chainid // Checks for chainid
if let Some(chain_id) = transaction.chain_id() { if let Some(chain_id) = transaction.chain_id() &&
if chain_id != self.chain_id() { chain_id != self.chain_id()
{
return Err(TransactionValidationOutcome::Invalid( return Err(TransactionValidationOutcome::Invalid(
transaction, transaction,
InvalidTransactionError::ChainIdMismatch.into(), InvalidTransactionError::ChainIdMismatch.into(),
)) ))
} }
}
if transaction.is_eip7702() { if transaction.is_eip7702() {
// Prague fork is required for 7702 txs // Prague fork is required for 7702 txs

Some files were not shown because too many files have changed in this diff Show More