chore: fix clippy (#4032)

This commit is contained in:
Alexey Shekhirin
2023-08-02 14:02:50 +01:00
committed by GitHub
parent b46101afb5
commit cb0dedc8a9
9 changed files with 19 additions and 19 deletions

View File

@@ -331,7 +331,7 @@ impl<Ext: RethCliExt> Command<Ext> {
let mut pipeline = self
.build_networked_pipeline(
&mut config,
&config,
client.clone(),
Arc::clone(&consensus),
db.clone(),
@@ -351,7 +351,7 @@ impl<Ext: RethCliExt> Command<Ext> {
} else {
let pipeline = self
.build_networked_pipeline(
&mut config,
&config,
network_client.clone(),
Arc::clone(&consensus),
db.clone(),
@@ -480,7 +480,7 @@ impl<Ext: RethCliExt> Command<Ext> {
#[allow(clippy::too_many_arguments)]
async fn build_networked_pipeline<DB, Client>(
&self,
config: &mut Config,
config: &Config,
client: Client,
consensus: Arc<dyn Consensus>,
db: DB,

View File

@@ -13,7 +13,7 @@ use std::{path::PathBuf, sync::Arc};
use tracing::info;
pub(crate) async fn dump_execution_stage<DB: Database>(
db_tool: &mut DbTool<'_, DB>,
db_tool: &DbTool<'_, DB>,
from: u64,
to: u64,
output_db: &PathBuf,
@@ -90,7 +90,7 @@ fn import_tables_with_range<DB: Database>(
/// PlainAccountState safely. There might be some state dependency from an address
/// which hasn't been changed in the given range.
async fn unwind_and_copy<DB: Database>(
db_tool: &mut DbTool<'_, DB>,
db_tool: &DbTool<'_, DB>,
from: u64,
tip_block_number: u64,
output_db: &DatabaseEnv,

View File

@@ -9,7 +9,7 @@ use std::{path::PathBuf, sync::Arc};
use tracing::info;
pub(crate) async fn dump_hashing_account_stage<DB: Database>(
db_tool: &mut DbTool<'_, DB>,
db_tool: &DbTool<'_, DB>,
from: BlockNumber,
to: BlockNumber,
output_db: &PathBuf,
@@ -33,7 +33,7 @@ pub(crate) async fn dump_hashing_account_stage<DB: Database>(
/// Dry-run an unwind to FROM block and copy the necessary table data to the new database.
async fn unwind_and_copy<DB: Database>(
db_tool: &mut DbTool<'_, DB>,
db_tool: &DbTool<'_, DB>,
from: u64,
tip_block_number: u64,
output_db: &DatabaseEnv,

View File

@@ -9,7 +9,7 @@ use std::{path::PathBuf, sync::Arc};
use tracing::info;
pub(crate) async fn dump_hashing_storage_stage<DB: Database>(
db_tool: &mut DbTool<'_, DB>,
db_tool: &DbTool<'_, DB>,
from: u64,
to: u64,
output_db: &PathBuf,
@@ -28,7 +28,7 @@ pub(crate) async fn dump_hashing_storage_stage<DB: Database>(
/// Dry-run an unwind to FROM block and copy the necessary table data to the new database.
async fn unwind_and_copy<DB: Database>(
db_tool: &mut DbTool<'_, DB>,
db_tool: &DbTool<'_, DB>,
from: u64,
tip_block_number: u64,
output_db: &DatabaseEnv,

View File

@@ -15,7 +15,7 @@ use std::{path::PathBuf, sync::Arc};
use tracing::info;
pub(crate) async fn dump_merkle_stage<DB: Database>(
db_tool: &mut DbTool<'_, DB>,
db_tool: &DbTool<'_, DB>,
from: BlockNumber,
to: BlockNumber,
output_db: &PathBuf,
@@ -42,7 +42,7 @@ pub(crate) async fn dump_merkle_stage<DB: Database>(
/// Dry-run an unwind to FROM block and copy the necessary table data to the new database.
async fn unwind_and_copy<DB: Database>(
db_tool: &mut DbTool<'_, DB>,
db_tool: &DbTool<'_, DB>,
range: (u64, u64),
tip_block_number: u64,
output_db: &DatabaseEnv,

View File

@@ -104,20 +104,20 @@ impl Command {
let db = Arc::new(init_db(db_path, self.db.log_level)?);
info!(target: "reth::cli", "Database opened");
let mut tool = DbTool::new(&db, self.chain.clone())?;
let tool = DbTool::new(&db, self.chain.clone())?;
match &self.command {
Stages::Execution(StageCommand { output_db, from, to, dry_run, .. }) => {
dump_execution_stage(&mut tool, *from, *to, output_db, *dry_run).await?
dump_execution_stage(&tool, *from, *to, output_db, *dry_run).await?
}
Stages::StorageHashing(StageCommand { output_db, from, to, dry_run, .. }) => {
dump_hashing_storage_stage(&mut tool, *from, *to, output_db, *dry_run).await?
dump_hashing_storage_stage(&tool, *from, *to, output_db, *dry_run).await?
}
Stages::AccountHashing(StageCommand { output_db, from, to, dry_run, .. }) => {
dump_hashing_account_stage(&mut tool, *from, *to, output_db, *dry_run).await?
dump_hashing_account_stage(&tool, *from, *to, output_db, *dry_run).await?
}
Stages::Merkle(StageCommand { output_db, from, to, dry_run, .. }) => {
dump_merkle_stage(&mut tool, *from, *to, output_db, *dry_run).await?
dump_merkle_stage(&tool, *from, *to, output_db, *dry_run).await?
}
}

View File

@@ -487,7 +487,7 @@ impl CallTraceNode {
post_value: bool,
) {
let addr = self.trace.address;
let acc_state = account_states.entry(addr).or_insert_with(AccountState::default);
let acc_state = account_states.entry(addr).or_default();
for change in self.trace.steps.iter().filter_map(|s| s.storage_change) {
let StorageChange { key, value, had_value } = change;
let storage_map = acc_state.storage.get_or_insert_with(BTreeMap::new);

View File

@@ -62,7 +62,6 @@ where
}
fn try_balance_changes_in_block(&self, block_id: BlockId) -> EthResult<HashMap<Address, U256>> {
let block_id = block_id;
let Some(block_number) = self.provider().block_number_for_id(block_id)? else {
return Err(EthApiError::UnknownBlockNumber)
};

View File

@@ -346,7 +346,7 @@ where
}
///////////////////////////////////////////////////////////////////////////////////////////////////
//// Environment Builder
// Environment Builder
///////////////////////////////////////////////////////////////////////////////////////////////////
#[derive(Clone, Debug, PartialEq, Eq)]
@@ -512,6 +512,7 @@ where
match rx.recv() {
Ok(msg) => match msg {
TxnManagerMessage::Begin { parent, flags, sender } => {
#[allow(clippy::redundant_locals)]
let e = e;
let mut txn: *mut ffi::MDBX_txn = ptr::null_mut();
sender