chore: make clippy happy (#15895)

Co-authored-by: Federico Gimenez <federico.gimenez@gmail.com>
This commit is contained in:
Matthias Seitz
2025-04-24 10:06:06 +02:00
committed by GitHub
parent 94723cacb3
commit ddba222c08
41 changed files with 91 additions and 104 deletions

View File

@@ -144,10 +144,10 @@ impl Command {
);
if let Some(rpc_url) = self.rpc_url {
cmd += &format!(" --rpc-url {}", rpc_url);
cmd += &format!(" --rpc-url {rpc_url}");
}
if let Some(secret) = &jwt_secret {
cmd += &format!(" --jwt-secret {}", secret);
cmd += &format!(" --jwt-secret {secret}");
}
println!("{cmd}");

View File

@@ -343,8 +343,8 @@ impl Command {
// add rows containing checksums to the table
let mut row = Row::new();
row.add_cell(Cell::new(db_table));
row.add_cell(Cell::new(format!("{:x}", checksum)));
row.add_cell(Cell::new(format!("{:?}", elapsed)));
row.add_cell(Cell::new(format!("{checksum:x}")));
row.add_cell(Cell::new(format!("{elapsed:?}")));
table.add_row(row);
}
@@ -360,7 +360,7 @@ impl Command {
let mut row = Row::new();
row.add_cell(Cell::new("Total elapsed"));
row.add_cell(Cell::new(""));
row.add_cell(Cell::new(format!("{:?}", total_elapsed)));
row.add_cell(Cell::new(format!("{total_elapsed:?}")));
table.add_row(row);
Ok(table)

View File

@@ -34,7 +34,7 @@ impl Command {
let (_, their_hello) =
UnauthedP2PStream::new(ecies_stream).handshake(hello).await?;
println!("{:#?}", their_hello);
println!("{their_hello:#?}");
}
}
Ok(())

View File

@@ -178,7 +178,7 @@ pub fn read_vectors_with(read: &[fn() -> eyre::Result<()>]) -> Result<()> {
if let Some(err_list) = errors {
for error in err_list {
eprintln!("{:?}", error);
eprintln!("{error:?}");
}
return Err(eyre::eyre!(
"If there are missing types, make sure to run `reth test-vectors compact --write` first.\n
@@ -271,7 +271,7 @@ where
let (reconstructed, _) = T::from_compact(&compact_bytes, len_or_identifier);
reconstructed.to_compact(&mut buffer);
assert_eq!(buffer, compact_bytes, "mismatch {}", type_name);
assert_eq!(buffer, compact_bytes, "mismatch {type_name}");
}
println!("");

View File

@@ -123,13 +123,13 @@ pub fn install() {
let mut alt_stack: libc::stack_t = mem::zeroed();
alt_stack.ss_sp = alloc(Layout::from_size_align(alt_stack_size, 1).unwrap()).cast();
alt_stack.ss_size = alt_stack_size;
libc::sigaltstack(&alt_stack, ptr::null_mut());
libc::sigaltstack(&raw const alt_stack, ptr::null_mut());
let mut sa: libc::sigaction = mem::zeroed();
sa.sa_sigaction = print_stack_trace as libc::sighandler_t;
sa.sa_flags = libc::SA_NODEFER | libc::SA_RESETHAND | libc::SA_ONSTACK;
libc::sigemptyset(&mut sa.sa_mask);
libc::sigaction(libc::SIGSEGV, &sa, ptr::null_mut());
libc::sigemptyset(&raw mut sa.sa_mask);
libc::sigaction(libc::SIGSEGV, &raw const sa, ptr::null_mut());
}
}

View File

@@ -293,7 +293,7 @@ where
/// Returns the RPC URL.
pub fn rpc_url(&self) -> Url {
let addr = self.inner.rpc_server_handle().http_local_addr().unwrap();
format!("http://{}", addr).parse().unwrap()
format!("http://{addr}").parse().unwrap()
}
/// Returns an RPC client.

View File

@@ -652,7 +652,7 @@ mod tests {
#[test]
fn measure_storage_cache_overhead() {
let (base_overhead, cache) = measure_allocation(|| AccountStorageCache::new(1000));
println!("Base AccountStorageCache overhead: {} bytes", base_overhead);
println!("Base AccountStorageCache overhead: {base_overhead} bytes");
let mut rng = rand::rng();
let key = StorageKey::random();
@@ -660,7 +660,7 @@ mod tests {
let (first_slot, _) = measure_allocation(|| {
cache.insert_storage(key, Some(value));
});
println!("First slot insertion overhead: {} bytes", first_slot);
println!("First slot insertion overhead: {first_slot} bytes");
const TOTAL_SLOTS: usize = 10_000;
let (test_slots, _) = measure_allocation(|| {

View File

@@ -3328,7 +3328,7 @@ mod tests {
assert_eq!(state, fcu_state);
assert_eq!(status, fcu_status.into());
}
_ => panic!("Unexpected event: {:#?}", event),
_ => panic!("Unexpected event: {event:#?}"),
}
}
@@ -3377,7 +3377,7 @@ mod tests {
) => {
assert_eq!(header.hash(), hash);
}
_ => panic!("Unexpected event: {:#?}", event),
_ => panic!("Unexpected event: {event:#?}"),
}
}
@@ -3407,7 +3407,7 @@ mod tests {
) => {
assert_eq!(executed.recovered_block.hash(), expected_hash);
}
_ => panic!("Unexpected event: {:#?}", event),
_ => panic!("Unexpected event: {event:#?}"),
}
}
@@ -3420,7 +3420,7 @@ mod tests {
)) => {
assert_eq!(executed.recovered_block.hash(), expected_hash);
}
_ => panic!("Unexpected event: {:#?}", event),
_ => panic!("Unexpected event: {event:#?}"),
}
}
@@ -3432,7 +3432,7 @@ mod tests {
)) => {
assert_eq!(block.hash(), expected_hash);
}
_ => panic!("Unexpected event: {:#?}", event),
_ => panic!("Unexpected event: {event:#?}"),
}
}
@@ -3523,7 +3523,7 @@ mod tests {
FromEngine::DownloadedBlocks(blocks) => {
assert_eq!(blocks.len(), tree_config.max_execute_block_batch_size());
}
_ => panic!("unexpected message: {:#?}", msg),
_ => panic!("unexpected message: {msg:#?}"),
}
}
@@ -4213,7 +4213,7 @@ mod tests {
let expected_block_set = HashSet::from_iter([missing_block.hash()]);
assert_eq!(actual_block_set, expected_block_set);
}
_ => panic!("Unexpected event: {:#?}", event),
_ => panic!("Unexpected event: {event:#?}"),
}
}
@@ -4305,7 +4305,7 @@ mod tests {
EngineApiEvent::Download(DownloadRequest::BlockSet(hash_set)) => {
assert_eq!(hash_set, HashSet::from_iter([main_chain_last_hash]));
}
_ => panic!("Unexpected event: {:#?}", event),
_ => panic!("Unexpected event: {event:#?}"),
}
test_harness
@@ -4325,7 +4325,7 @@ mod tests {
);
assert_eq!(initial_hash, main_chain.last().unwrap().parent_hash);
}
_ => panic!("Unexpected event: {:#?}", event),
_ => panic!("Unexpected event: {event:#?}"),
}
}
@@ -4368,7 +4368,7 @@ mod tests {
EngineApiEvent::Download(DownloadRequest::BlockSet(hash_set)) => {
assert_eq!(hash_set, HashSet::from_iter([main_chain_backfill_target_hash]));
}
_ => panic!("Unexpected event: {:#?}", event),
_ => panic!("Unexpected event: {event:#?}"),
}
// send message to tell the engine the requested block was downloaded
@@ -4387,7 +4387,7 @@ mod tests {
)) => {
assert_eq!(target_hash, main_chain_backfill_target_hash);
}
_ => panic!("Unexpected event: {:#?}", event),
_ => panic!("Unexpected event: {event:#?}"),
}
// persist blocks of main chain, same as the backfill operation would do
@@ -4413,7 +4413,7 @@ mod tests {
EngineApiEvent::Download(DownloadRequest::BlockSet(target_hash)) => {
assert_eq!(target_hash, HashSet::from_iter([main_chain_last_hash]));
}
_ => panic!("Unexpected event: {:#?}", event),
_ => panic!("Unexpected event: {event:#?}"),
}
// tell engine main chain tip downloaded
@@ -4432,7 +4432,7 @@ mod tests {
);
assert_eq!(initial_hash, main_chain_last.parent_hash);
}
_ => panic!("Unexpected event: {:#?}", event),
_ => panic!("Unexpected event: {event:#?}"),
}
let remaining: Vec<_> = main_chain

View File

@@ -58,12 +58,11 @@ impl<T: Decodable> SnappyRlpCodec<T> {
let mut decoder = FrameDecoder::new(compressed_data);
let mut decompressed = Vec::new();
Read::read_to_end(&mut decoder, &mut decompressed).map_err(|e| {
E2sError::SnappyDecompression(format!("Failed to decompress data: {}", e))
E2sError::SnappyDecompression(format!("Failed to decompress data: {e}"))
})?;
let mut slice = decompressed.as_slice();
T::decode(&mut slice)
.map_err(|e| E2sError::Rlp(format!("Failed to decode RLP data: {}", e)))
T::decode(&mut slice).map_err(|e| E2sError::Rlp(format!("Failed to decode RLP data: {e}")))
}
}
@@ -78,11 +77,11 @@ impl<T: Encodable> SnappyRlpCodec<T> {
let mut encoder = FrameEncoder::new(&mut compressed);
Write::write_all(&mut encoder, &rlp_data).map_err(|e| {
E2sError::SnappyCompression(format!("Failed to compress data: {}", e))
E2sError::SnappyCompression(format!("Failed to compress data: {e}"))
})?;
encoder.flush().map_err(|e| {
E2sError::SnappyCompression(format!("Failed to flush encoder: {}", e))
E2sError::SnappyCompression(format!("Failed to flush encoder: {e}"))
})?;
}
@@ -116,11 +115,11 @@ impl CompressedHeader {
let mut encoder = FrameEncoder::new(&mut compressed);
Write::write_all(&mut encoder, rlp_data).map_err(|e| {
E2sError::SnappyCompression(format!("Failed to compress header: {}", e))
E2sError::SnappyCompression(format!("Failed to compress header: {e}"))
})?;
encoder.flush().map_err(|e| {
E2sError::SnappyCompression(format!("Failed to flush encoder: {}", e))
E2sError::SnappyCompression(format!("Failed to flush encoder: {e}"))
})?;
}
Ok(Self { data: compressed })
@@ -131,7 +130,7 @@ impl CompressedHeader {
let mut decoder = FrameDecoder::new(self.data.as_slice());
let mut decompressed = Vec::new();
Read::read_to_end(&mut decoder, &mut decompressed).map_err(|e| {
E2sError::SnappyDecompression(format!("Failed to decompress header: {}", e))
E2sError::SnappyDecompression(format!("Failed to decompress header: {e}"))
})?;
Ok(decompressed)
@@ -197,11 +196,11 @@ impl CompressedBody {
let mut encoder = FrameEncoder::new(&mut compressed);
Write::write_all(&mut encoder, rlp_data).map_err(|e| {
E2sError::SnappyCompression(format!("Failed to compress header: {}", e))
E2sError::SnappyCompression(format!("Failed to compress header: {e}"))
})?;
encoder.flush().map_err(|e| {
E2sError::SnappyCompression(format!("Failed to flush encoder: {}", e))
E2sError::SnappyCompression(format!("Failed to flush encoder: {e}"))
})?;
}
Ok(Self { data: compressed })
@@ -212,7 +211,7 @@ impl CompressedBody {
let mut decoder = FrameDecoder::new(self.data.as_slice());
let mut decompressed = Vec::new();
Read::read_to_end(&mut decoder, &mut decompressed).map_err(|e| {
E2sError::SnappyDecompression(format!("Failed to decompress body: {}", e))
E2sError::SnappyDecompression(format!("Failed to decompress body: {e}"))
})?;
Ok(decompressed)
@@ -275,11 +274,11 @@ impl CompressedReceipts {
let mut encoder = FrameEncoder::new(&mut compressed);
Write::write_all(&mut encoder, rlp_data).map_err(|e| {
E2sError::SnappyCompression(format!("Failed to compress header: {}", e))
E2sError::SnappyCompression(format!("Failed to compress header: {e}"))
})?;
encoder.flush().map_err(|e| {
E2sError::SnappyCompression(format!("Failed to flush encoder: {}", e))
E2sError::SnappyCompression(format!("Failed to flush encoder: {e}"))
})?;
}
Ok(Self { data: compressed })
@@ -289,7 +288,7 @@ impl CompressedReceipts {
let mut decoder = FrameDecoder::new(self.data.as_slice());
let mut decompressed = Vec::new();
Read::read_to_end(&mut decoder, &mut decompressed).map_err(|e| {
E2sError::SnappyDecompression(format!("Failed to decompress receipts: {}", e))
E2sError::SnappyDecompression(format!("Failed to decompress receipts: {e}"))
})?;
Ok(decompressed)

View File

@@ -43,8 +43,7 @@ impl core::fmt::Display for DisplayFork {
// All networks that have merged are finalized.
write!(
f,
"{:32} @{} (network is known to be merged)",
name_with_eip, total_difficulty,
"{name_with_eip:32} @{total_difficulty} (network is known to be merged)",
)?;
}
ForkCondition::Never => unreachable!(),

View File

@@ -316,7 +316,7 @@ mod tests {
reth.command.chain_spec().map(|c| c.chain.to_string()).unwrap_or(String::new());
reth.logs.log_file_directory = reth.logs.log_file_directory.join(chain.clone());
let log_dir = reth.logs.log_file_directory;
let end = format!("reth/logs/{}", chain);
let end = format!("reth/logs/{chain}");
assert!(log_dir.as_ref().ends_with(end), "{log_dir:?}");
}
}
@@ -332,7 +332,7 @@ mod tests {
}
let log_dir = reth.logs.log_file_directory;
let end = format!("reth/logs/{}", SUPPORTED_CHAINS[0]);
println!("{:?}", log_dir);
println!("{log_dir:?}");
assert!(log_dir.as_ref().ends_with(end), "{log_dir:?}");
}
@@ -346,7 +346,7 @@ mod tests {
}
let log_dir = reth.logs.log_file_directory;
let end = "reth/logs".to_string();
println!("{:?}", log_dir);
println!("{log_dir:?}");
assert!(log_dir.as_ref().ends_with(end), "{log_dir:?}");
}

View File

@@ -55,7 +55,7 @@ async fn e2e_test_send_transactions() -> eyre::Result<()> {
let seed: [u8; 32] = rand::rng().random();
let mut rng = StdRng::from_seed(seed);
println!("Seed: {:?}", seed);
println!("Seed: {seed:?}");
let chain_spec = Arc::new(
ChainSpecBuilder::default()
@@ -91,7 +91,7 @@ async fn test_long_reorg() -> eyre::Result<()> {
let seed: [u8; 32] = rand::rng().random();
let mut rng = StdRng::from_seed(seed);
println!("Seed: {:?}", seed);
println!("Seed: {seed:?}");
let chain_spec = Arc::new(
ChainSpecBuilder::default()
@@ -141,7 +141,7 @@ async fn test_reorg_through_backfill() -> eyre::Result<()> {
let seed: [u8; 32] = rand::rng().random();
let mut rng = StdRng::from_seed(seed);
println!("Seed: {:?}", seed);
println!("Seed: {seed:?}");
let chain_spec = Arc::new(
ChainSpecBuilder::default()

View File

@@ -35,7 +35,7 @@ async fn test_fee_history() -> eyre::Result<()> {
let seed: [u8; 32] = rand::rng().random();
let mut rng = StdRng::from_seed(seed);
println!("Seed: {:?}", seed);
println!("Seed: {seed:?}");
let chain_spec = Arc::new(
ChainSpecBuilder::default()

View File

@@ -1143,7 +1143,7 @@ mod tests {
assert_eq!(received_notification, notification);
}
Poll::Pending => panic!("Notification send is pending"),
Poll::Ready(Err(e)) => panic!("Failed to send notification: {:?}", e),
Poll::Ready(Err(e)) => panic!("Failed to send notification: {e:?}"),
}
// Ensure the notification ID was incremented

View File

@@ -43,7 +43,7 @@ pub fn benchmark_fetch_pending_hashes(group: &mut BenchmarkGroup<'_, WallTime>,
buffer_hash_to_tx_fetcher(&mut tx_fetcher, hash, peer, 0, None);
}
let group_id = format!("fetch pending hashes, peers num: {}", peers_num);
let group_id = format!("fetch pending hashes, peers num: {peers_num}");
group.bench_function(group_id, |b| {
b.iter(|| {

View File

@@ -145,7 +145,7 @@ impl FromStr for TransactionPropagationKind {
match s {
"All" | "all" => Ok(Self::All),
"Trusted" | "trusted" => Ok(Self::Trusted),
_ => Err(format!("Invalid transaction propagation policy: {}", s)),
_ => Err(format!("Invalid transaction propagation policy: {s}")),
}
}
}

View File

@@ -30,7 +30,7 @@ fn main() -> Result<(), Box<dyn Error>> {
// if on a tag: v0.2.0-beta.3
let not_on_tag = env::var("VERGEN_GIT_DESCRIBE")?.ends_with(&format!("-g{sha_short}"));
let version_suffix = if is_dirty || not_on_tag { "-dev" } else { "" };
println!("cargo:rustc-env=RETH_VERSION_SUFFIX={}", version_suffix);
println!("cargo:rustc-env=RETH_VERSION_SUFFIX={version_suffix}");
// Set short SHA
println!("cargo:rustc-env=VERGEN_GIT_SHA_SHORT={}", &sha[..8]);

View File

@@ -136,7 +136,7 @@ impl FromStr for ByteSize {
"MB" => 1024 * 1024,
"GB" => 1024 * 1024 * 1024,
"TB" => 1024 * 1024 * 1024 * 1024,
_ => return Err(format!("Invalid unit: {}. Use B, KB, MB, GB, or TB.", unit)),
_ => return Err(format!("Invalid unit: {unit}. Use B, KB, MB, GB, or TB.")),
};
Ok(Self(num * multiplier))
@@ -162,7 +162,7 @@ impl fmt::Display for ByteSize {
(self.0 as f64, "B")
};
write!(f, "{:.2}{}", size, unit)
write!(f, "{size:.2}{unit}")
}
}

View File

@@ -80,7 +80,7 @@ impl fmt::Debug for Hooks {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let hooks_len = self.inner.len();
f.debug_struct("Hooks")
.field("inner", &format_args!("Arc<Vec<Box<dyn Hook>>>, len: {}", hooks_len))
.field("inner", &format_args!("Arc<Vec<Box<dyn Hook>>>, len: {hooks_len}"))
.finish()
}
}

View File

@@ -244,7 +244,7 @@ mod tests {
MetricServer::new(config).serve().await.unwrap();
// Send request to the metrics endpoint
let url = format!("http://{}", listen_addr);
let url = format!("http://{listen_addr}");
let response = Client::new().get(&url).send().await.unwrap();
assert!(response.status().is_success());

View File

@@ -32,7 +32,7 @@ impl SuperChainRegistryManager {
/// Get the path to a genesis file for the given network (`mainnet`, `base`).
pub fn genesis_path(&self, network_type: &str, network: &str) -> PathBuf {
self.base_path.join(network_type).join(format!("{}.json.zst", network))
self.base_path.join(network_type).join(format!("{network}.json.zst"))
}
/// Read file from the given path

View File

@@ -41,10 +41,10 @@ pub(crate) fn read_superchain_genesis(
.map_err(SuperchainConfigError::CorruptDataError)?;
// Read and decompress the genesis file.
let compressed_genesis_file =
read_file(&archive, &format!("genesis/{}/{}.json.zz", environment, name))?;
read_file(&archive, &format!("genesis/{environment}/{name}.json.zz"))?;
let genesis_file =
decompress_to_vec_zlib_with_limit(&compressed_genesis_file, MAX_GENESIS_SIZE)
.map_err(|e| SuperchainConfigError::DecompressError(format!("{}", e)))?;
.map_err(|e| SuperchainConfigError::DecompressError(format!("{e}")))?;
// Load the genesis file.
let mut genesis: Genesis = serde_json::from_slice(&genesis_file)?;
@@ -65,7 +65,7 @@ fn read_superchain_metadata(
environment: &str,
archive: &TarArchiveRef<'_>,
) -> Result<ChainMetadata, SuperchainConfigError> {
let config_file = read_file(archive, &format!("configs/{}/{}.json", environment, name))?;
let config_file = read_file(archive, &format!("configs/{environment}/{name}.json"))?;
let config_content = String::from_utf8(config_file)?;
let chain_config: ChainMetadata = serde_json::from_str(&config_content)?;
Ok(chain_config)

View File

@@ -38,8 +38,7 @@ mod tests {
for &chain in OpChainSpecParser::SUPPORTED_CHAINS {
assert!(
<OpChainSpecParser as ChainSpecParser>::parse(chain).is_ok(),
"Failed to parse {}",
chain
"Failed to parse {chain}"
);
}
}

View File

@@ -28,8 +28,8 @@ where
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Left(l) => write!(f, "Left: {}", l),
Self::Right(r) => write!(f, "Right: {}", r),
Self::Left(l) => write!(f, "Left: {l}"),
Self::Right(r) => write!(f, "Right: {r}"),
}
}
}

View File

@@ -795,9 +795,9 @@ pub fn dummy_name() -> String {
use rand::Rng;
let num: u64 = rand::rng().random();
if cfg!(windows) {
format!(r"\\.\pipe\my-pipe-{}", num)
format!(r"\\.\pipe\my-pipe-{num}")
} else {
format!(r"/tmp/my-uds-{}", num)
format!(r"/tmp/my-uds-{num}")
}
}

View File

@@ -83,11 +83,8 @@ async fn worker_fetch(
WorkerRequest::Download { chunk_index, start, end } => {
data_file.seek(tokio::io::SeekFrom::Start(start as u64)).await?;
let mut response = client
.get(&url)
.header(RANGE, format!("bytes={}-{}", start, end))
.send()
.await?;
let mut response =
client.get(&url).header(RANGE, format!("bytes={start}-{end}")).send().await?;
let mut written_bytes = 0;
while let Some(chunk) = response.chunk().await? {

View File

@@ -183,7 +183,7 @@ impl S3Stage {
if let Err(err) = fetch(
filename,
&static_file_directory,
&format!("{}/{filename}", url),
&format!("{url}/{filename}"),
max_concurrent_requests,
Some(*file_hash),
)

View File

@@ -314,7 +314,7 @@ where
let storage_transitions = alloc
.filter_map(|(addr, account)| account.storage.as_ref().map(|storage| (addr, storage)))
.flat_map(|(addr, storage)| storage.iter().map(|(key, _)| ((*addr, *key), [block])));
.flat_map(|(addr, storage)| storage.keys().map(|key| ((*addr, *key), [block])));
provider.insert_storage_history_index(storage_transitions)?;
trace!(target: "reth::cli", "Inserted storage history");

View File

@@ -30,7 +30,7 @@ where
format!("{}/../../../testdata/micro/db/{}.json", env!("CARGO_MANIFEST_DIR"), T::NAME);
let list: Vec<TableRow<T>> = serde_json::from_reader(std::io::BufReader::new(
std::fs::File::open(&path)
.unwrap_or_else(|_| panic!("Test vectors not found. They can be generated from the workspace by calling `cargo run --bin reth --features dev -- test-vectors tables`: {:?}", path))
.unwrap_or_else(|_| panic!("Test vectors not found. They can be generated from the workspace by calling `cargo run --bin reth --features dev -- test-vectors tables`: {path:?}"))
))
.unwrap();

View File

@@ -85,11 +85,11 @@ fn bench_get_seq_raw(c: &mut Criterion) {
c.bench_function("bench_get_seq_raw", |b| {
b.iter(|| unsafe {
txn.txn_execute(|txn| {
mdbx_cursor_open(txn, dbi, &mut cursor);
mdbx_cursor_open(txn, dbi, &raw mut cursor);
let mut i = 0;
let mut count = 0u32;
while mdbx_cursor_get(cursor, &mut key, &mut data, MDBX_NEXT) == 0 {
while mdbx_cursor_get(cursor, &raw mut key, &raw mut data, MDBX_NEXT) == 0 {
i += key.iov_len + data.iov_len;
count += 1;
}

View File

@@ -49,7 +49,7 @@ fn bench_get_rand_raw(c: &mut Criterion) {
key_val.iov_len = key.len();
key_val.iov_base = key.as_bytes().as_ptr().cast_mut().cast();
mdbx_get(txn, dbi, &key_val, &mut data_val);
mdbx_get(txn, dbi, &raw const key_val, &raw mut data_val);
i += key_val.iov_len;
}
@@ -98,7 +98,7 @@ fn bench_put_rand_raw(c: &mut Criterion) {
b.iter(|| unsafe {
let mut txn: *mut MDBX_txn = ptr::null_mut();
env.with_raw_env_ptr(|env| {
mdbx_txn_begin_ex(env, ptr::null_mut(), 0, &mut txn, ptr::null_mut());
mdbx_txn_begin_ex(env, ptr::null_mut(), 0, &raw mut txn, ptr::null_mut());
let mut i = 0;
for (key, data) in &items {
@@ -107,7 +107,7 @@ fn bench_put_rand_raw(c: &mut Criterion) {
data_val.iov_len = data.len();
data_val.iov_base = data.as_bytes().as_ptr().cast_mut().cast();
i += mdbx_put(txn, dbi, &key_val, &mut data_val, 0);
i += mdbx_put(txn, dbi, &raw const key_val, &raw mut data_val, 0);
}
assert_eq!(0, i);
mdbx_txn_abort(txn);

View File

@@ -7,6 +7,7 @@
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![allow(missing_docs, clippy::needless_pass_by_ref_mut)]
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
#![allow(clippy::borrow_as_ptr)]
pub extern crate reth_mdbx_sys as ffi;

View File

@@ -426,6 +426,7 @@ impl Transaction<RW> {
/// Returns a buffer which can be used to write a value into the item at the
/// given key and with the given length. The buffer must be completely
/// filled by the caller.
#[allow(clippy::mut_from_ref)]
pub fn reserve(
&self,
db: &Database,

View File

@@ -592,10 +592,7 @@ impl<P> RevealedSparseTrie<P> {
current.extend_from_slice_unchecked(key);
assert!(
path.starts_with(&current),
"path: {:?}, current: {:?}, key: {:?}",
path,
current,
key
"path: {path:?}, current: {current:?}, key: {key:?}",
);
}
@@ -607,11 +604,7 @@ impl<P> RevealedSparseTrie<P> {
let nibble = path[current.len()];
debug_assert!(
state_mask.is_bit_set(nibble),
"current: {:?}, path: {:?}, nibble: {:?}, state_mask: {:?}",
current,
path,
nibble,
state_mask
"current: {current:?}, path: {path:?}, nibble: {nibble:?}, state_mask: {state_mask:?}",
);
// If the branch node has a child that is a leaf node that we're removing,
@@ -2244,8 +2237,7 @@ mod tests {
};
assert!(
equals,
"path: {:?}\nproof node: {:?}\nsparse node: {:?}",
proof_node_path, proof_node, sparse_node
"path: {proof_node_path:?}\nproof node: {proof_node:?}\nsparse node: {sparse_node:?}"
);
}
}

View File

@@ -53,11 +53,11 @@ fn main() {
match result {
Ok(blob_transaction) => {
// Handle successful transaction
println!("Processed BlobTransaction: {:?}", blob_transaction);
println!("Processed BlobTransaction: {blob_transaction:?}");
}
Err(e) => {
// Handle errors specifically
eprintln!("Failed to process transaction: {:?}", e);
eprintln!("Failed to process transaction: {e:?}");
}
}
}

View File

@@ -394,8 +394,7 @@ mod tests {
// Assert that at least one outcome matches our criteria
assert!(
outcomes.iter().any(assert_fn),
"No outcome matched the expected criteria. Outcomes: {:?}",
outcomes
"No outcome matched the expected criteria. Outcomes: {outcomes:?}"
);
}
}

View File

@@ -238,7 +238,7 @@ pub fn apply_withdrawals_contract_call(
Ok(res) => res.state,
Err(e) => {
return Err(BlockExecutionError::Internal(InternalBlockExecutionError::Other(
format!("withdrawal contract system call revert: {}", e).into(),
format!("withdrawal contract system call revert: {e}").into(),
)))
}
};

View File

@@ -54,7 +54,7 @@ async fn main() -> eyre::Result<()> {
// print network events
let mut events = handle.event_listener();
while let Some(event) = events.next().await {
println!("Received event: {:?}", event);
println!("Received event: {event:?}");
}
});
@@ -121,7 +121,7 @@ async fn run_peer(handle: NetworkHandle) -> eyre::Result<()> {
let client: FetchClient = peer.fetch_client().await?;
let header = client.get_header(BlockHashOrNumber::Number(0)).await.unwrap();
println!("Got header: {:?}", header);
println!("Got header: {header:?}");
// send a (bogus) hashes message
let hashes = NewPooledTransactionHashes68 {

View File

@@ -61,7 +61,7 @@ async fn main() -> eyre::Result<()> {
let mut txs = pool.pending_transactions_listener_for(TransactionListenerKind::All);
while let Some(tx) = txs.recv().await {
println!("Received new transaction: {:?}", tx);
println!("Received new transaction: {tx:?}");
}
Ok(())

View File

@@ -39,7 +39,7 @@ async fn main() -> eyre::Result<()> {
// interact with the network
let mut events = handle.event_listener();
while let Some(event) = events.next().await {
println!("Received event: {:?}", event);
println!("Received event: {event:?}");
}
Ok(())

View File

@@ -153,7 +153,7 @@ mod tests {
let sink = match pending.accept().await {
Ok(sink) => sink,
Err(err) => {
eprintln!("failed to accept subscription: {}", err);
eprintln!("failed to accept subscription: {err}");
return;
}
};
@@ -175,7 +175,7 @@ mod tests {
#[tokio::test(flavor = "multi_thread")]
async fn test_call_transaction_count_http() {
let server_addr = start_server().await;
let uri = format!("http://{}", server_addr);
let uri = format!("http://{server_addr}");
let client = HttpClientBuilder::default().build(&uri).unwrap();
let count = TxpoolExtApiClient::transaction_count(&client).await.unwrap();
assert_eq!(count, 0);
@@ -184,7 +184,7 @@ mod tests {
#[tokio::test(flavor = "multi_thread")]
async fn test_subscribe_transaction_count_ws() {
let server_addr = start_server().await;
let ws_url = format!("ws://{}", server_addr);
let ws_url = format!("ws://{server_addr}");
let client = WsClientBuilder::default().build(&ws_url).await.unwrap();
let mut sub = TxpoolExtApiClient::subscribe_transaction_count(&client, None)