fix: clippy warnning manual_is_multiple_of (#17853)

Signed-off-by: Jack Drogon <jack.xsuperman@gmail.com>
This commit is contained in:
Jack Drogon
2025-08-13 21:47:58 +08:00
committed by GitHub
parent f30016019d
commit f1da87e3e6
11 changed files with 26 additions and 22 deletions

View File

@@ -111,7 +111,7 @@ impl<N: ProviderNodeTypes> TableViewer<(u64, Duration)> for ChecksumViewer<'_, N
for (index, entry) in walker.enumerate() {
let (k, v): (RawKey<T::Key>, RawValue<T::Value>) = entry?;
if index % 100_000 == 0 {
if index.is_multiple_of(100_000) {
info!("Hashed {index} entries.");
}

View File

@@ -209,7 +209,7 @@ pub fn validate_4844_header_standalone<H: BlockHeader>(
return Err(ConsensusError::ParentBeaconBlockRootMissing)
}
if blob_gas_used % DATA_GAS_PER_BLOB != 0 {
if !blob_gas_used.is_multiple_of(DATA_GAS_PER_BLOB) {
return Err(ConsensusError::BlobGasUsedNotMultipleOfBlobGasPerBlob {
blob_gas_used,
blob_gas_per_blob: DATA_GAS_PER_BLOB,

View File

@@ -370,7 +370,7 @@ where
for (index, hash_to_number) in hash_collector.iter()?.enumerate() {
let (hash, number) = hash_to_number?;
if index != 0 && index % interval == 0 {
if index != 0 && index.is_multiple_of(interval) {
info!(target: "era::history::import", progress = %format!("{:.2}%", (index as f64 / total_headers as f64) * 100.0), "Writing headers hash index");
}

View File

@@ -180,7 +180,7 @@ where
});
// Flush to ETL when channels length reaches MAXIMUM_CHANNELS
if !channels.is_empty() && channels.len() % MAXIMUM_CHANNELS == 0 {
if !channels.is_empty() && channels.len().is_multiple_of(MAXIMUM_CHANNELS) {
collect(&mut channels, &mut collector)?;
}
}
@@ -193,7 +193,7 @@ where
let total_hashes = collector.len();
let interval = (total_hashes / 10).max(1);
for (index, item) in collector.iter()?.enumerate() {
if index > 0 && index % interval == 0 {
if index > 0 && index.is_multiple_of(interval) {
info!(
target: "sync::stages::hashing_account",
progress = %format!("{:.2}%", (index as f64 / total_hashes as f64) * 100.0),

View File

@@ -110,7 +110,7 @@ where
});
// Flush to ETL when channels length reaches MAXIMUM_CHANNELS
if !channels.is_empty() && channels.len() % MAXIMUM_CHANNELS == 0 {
if !channels.is_empty() && channels.len().is_multiple_of(MAXIMUM_CHANNELS) {
collect(&mut channels, &mut collector)?;
}
}
@@ -121,7 +121,7 @@ where
let interval = (total_hashes / 10).max(1);
let mut cursor = tx.cursor_dup_write::<tables::HashedStorages>()?;
for (index, item) in collector.iter()?.enumerate() {
if index > 0 && index % interval == 0 {
if index > 0 && index.is_multiple_of(interval) {
info!(
target: "sync::stages::hashing_storage",
progress = %format!("{:.2}%", (index as f64 / total_hashes as f64) * 100.0),

View File

@@ -119,7 +119,7 @@ where
for (index, header) in self.header_collector.iter()?.enumerate() {
let (_, header_buf) = header?;
if index > 0 && index % interval == 0 && total_headers > 100 {
if index > 0 && index.is_multiple_of(interval) && total_headers > 100 {
info!(target: "sync::stages::headers", progress = %format!("{:.2}%", (index as f64 / total_headers as f64) * 100.0), "Writing headers");
}
@@ -164,7 +164,7 @@ where
for (index, hash_to_number) in self.hash_collector.iter()?.enumerate() {
let (hash, number) = hash_to_number?;
if index > 0 && index % interval == 0 && total_headers > 100 {
if index > 0 && index.is_multiple_of(interval) && total_headers > 100 {
info!(target: "sync::stages::headers", progress = %format!("{:.2}%", (index as f64 / total_headers as f64) * 100.0), "Writing headers hash index");
}

View File

@@ -154,7 +154,7 @@ where
let interval = (total_hashes / 10).max(1);
for (index, hash_to_number) in hash_collector.iter()?.enumerate() {
let (hash, number) = hash_to_number?;
if index > 0 && index % interval == 0 {
if index > 0 && index.is_multiple_of(interval) {
info!(
target: "sync::stages::transaction_lookup",
?append_only,

View File

@@ -77,7 +77,7 @@ where
let (block_number, key) = partial_key_factory(entry?);
cache.entry(key).or_default().push(block_number);
if idx > 0 && idx % interval == 0 && total_changesets > 1000 {
if idx > 0 && idx.is_multiple_of(interval) && total_changesets > 1000 {
info!(target: "sync::stages::index_history", progress = %format!("{:.4}%", (idx as f64 / total_changesets as f64) * 100.0), "Collecting indices");
}
@@ -131,7 +131,7 @@ where
let sharded_key = decode_key(k)?;
let new_list = BlockNumberList::decompress_owned(v)?;
if index > 0 && index % interval == 0 && total_entries > 10 {
if index > 0 && index.is_multiple_of(interval) && total_entries > 10 {
info!(target: "sync::stages::index_history", progress = %format!("{:.2}%", (index as f64 / total_entries as f64) * 100.0), "Writing indices");
}

View File

@@ -484,7 +484,8 @@ fn parse_accounts(
let GenesisAccountWithAddress { genesis_account, address } = serde_json::from_str(&line)?;
collector.insert(address, genesis_account)?;
if !collector.is_empty() && collector.len() % AVERAGE_COUNT_ACCOUNTS_PER_GB_STATE_DUMP == 0
if !collector.is_empty() &&
collector.len().is_multiple_of(AVERAGE_COUNT_ACCOUNTS_PER_GB_STATE_DUMP)
{
info!(target: "reth::cli",
parsed_new_accounts=collector.len(),
@@ -523,7 +524,7 @@ where
accounts.push((address, account));
if (index > 0 && index % AVERAGE_COUNT_ACCOUNTS_PER_GB_STATE_DUMP == 0) ||
if (index > 0 && index.is_multiple_of(AVERAGE_COUNT_ACCOUNTS_PER_GB_STATE_DUMP)) ||
index == accounts_len - 1
{
total_inserted_accounts += accounts.len();
@@ -588,7 +589,7 @@ where
intermediate_state = Some(*state);
if total_flushed_updates % SOFT_LIMIT_COUNT_FLUSHED_UPDATES == 0 {
if total_flushed_updates.is_multiple_of(SOFT_LIMIT_COUNT_FLUSHED_UPDATES) {
info!(target: "reth::cli",
total_flushed_updates,
"Flushing trie updates"

View File

@@ -366,7 +366,7 @@ fn block4(
for idx in address_range {
let address = Address::with_last_byte(idx);
// increase balance for every even account and destroy every odd
bundle_state_builder = if idx % 2 == 0 {
bundle_state_builder = if idx.is_multiple_of(2) {
bundle_state_builder
.state_present_account_info(
address,
@@ -462,7 +462,7 @@ fn block5(
.map(|slot| (U256::from(slot), (U256::from(slot), U256::from(slot * 4))))
.collect(),
);
bundle_state_builder = if idx % 2 == 0 {
bundle_state_builder = if idx.is_multiple_of(2) {
bundle_state_builder
.revert_account_info(
number,

View File

@@ -100,14 +100,14 @@ fn account_cursor_correct_order() {
let db = create_test_rw_db();
db.update(|tx| {
for (key, account) in accounts.iter().filter(|x| x.0[31] % 2 == 0) {
for (key, account) in accounts.iter().filter(|x| x.0[31].is_multiple_of(2)) {
tx.put::<tables::HashedAccounts>(*key, *account).unwrap();
}
})
.unwrap();
let mut hashed_post_state = HashedPostState::default();
for (hashed_address, account) in accounts.iter().filter(|x| x.0[31] % 2 != 0) {
for (hashed_address, account) in accounts.iter().filter(|x| !x.0[31].is_multiple_of(2)) {
hashed_post_state.accounts.insert(*hashed_address, Some(*account));
}
@@ -127,14 +127,14 @@ fn removed_accounts_are_discarded() {
let db = create_test_rw_db();
db.update(|tx| {
for (key, account) in accounts.iter().filter(|x| x.0[31] % 2 == 0) {
for (key, account) in accounts.iter().filter(|x| x.0[31].is_multiple_of(2)) {
tx.put::<tables::HashedAccounts>(*key, *account).unwrap();
}
})
.unwrap();
let mut hashed_post_state = HashedPostState::default();
for (hashed_address, account) in accounts.iter().filter(|x| x.0[31] % 2 != 0) {
for (hashed_address, account) in accounts.iter().filter(|x| !x.0[31].is_multiple_of(2)) {
hashed_post_state.accounts.insert(
*hashed_address,
if removed_keys.contains(hashed_address) { None } else { Some(*account) },
@@ -338,7 +338,10 @@ fn zero_value_storage_entries_are_discarded() {
(0..10).map(|key| (B256::with_last_byte(key), U256::from(key))).collect::<BTreeMap<_, _>>(); // every even number is changed to zero value
let post_state_storage = (0..10)
.map(|key| {
(B256::with_last_byte(key), if key % 2 == 0 { U256::ZERO } else { U256::from(key) })
(
B256::with_last_byte(key),
if key.is_multiple_of(2) { U256::ZERO } else { U256::from(key) },
)
})
.collect::<BTreeMap<_, _>>();