chore(lint): Remove redundant lint attributes (#15531)

This commit is contained in:
Emilia Hane
2025-04-09 12:11:02 +02:00
committed by GitHub
parent 9239188093
commit 73b9294072
26 changed files with 9 additions and 37 deletions

View File

@@ -25,7 +25,7 @@ impl Default for WorkloadExecutor {
impl WorkloadExecutor {
/// Creates a new executor with the given number of threads for cpu bound work (rayon).
#[allow(unused)]
#[expect(unused)]
pub(super) fn with_num_cpu_threads(cpu_threads: usize) -> Self {
Self {
inner: WorkloadExecutorInner::new(
@@ -50,7 +50,7 @@ impl WorkloadExecutor {
}
/// Returns access to the rayon pool
#[allow(unused)]
#[expect(unused)]
pub(super) fn rayon_pool(&self) -> &Arc<rayon::ThreadPool> {
&self.inner.rayon_pool
}

View File

@@ -617,7 +617,7 @@ impl From<TransactionSigned> for Signed<Transaction> {
#[cfg(any(test, feature = "arbitrary"))]
impl<'a> arbitrary::Arbitrary<'a> for TransactionSigned {
fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result<Self> {
#[allow(unused_mut)]
#[expect(unused_mut)]
let mut transaction = Transaction::arbitrary(u)?;
let secp = secp256k1::Secp256k1::new();

View File

@@ -461,7 +461,6 @@ impl<B: Block> IntoIterator for ChainBlocks<'_, B> {
type IntoIter = alloc::collections::btree_map::IntoIter<BlockNumber, RecoveredBlock<B>>;
fn into_iter(self) -> Self::IntoIter {
#[allow(clippy::unnecessary_to_owned)]
self.blocks.into_owned().into_iter()
}
}

View File

@@ -445,7 +445,7 @@ where
/// A generic block executor that uses a [`BlockExecutor`] to
/// execute blocks.
#[allow(missing_debug_implementations, dead_code)]
#[expect(missing_debug_implementations)]
pub struct BasicBlockExecutor<F, DB> {
/// Block execution strategy.
pub(crate) strategy_factory: F,

View File

@@ -774,7 +774,6 @@ mod test {
// <https://github.com/sigp/discv5/blob/master/src/kbucket/key.rs#L89-L101>
#[expect(unreachable_pub)]
#[expect(unused)]
#[allow(clippy::assign_op_pattern)]
mod sigp {
use alloy_primitives::U256;
use enr::{

View File

@@ -51,9 +51,7 @@ macro_rules! poll_nested_stream_with_budget {
loop {
match $poll_stream {
Poll::Ready(Some(item)) => {
#[allow(unused_mut)]
let mut f = $on_ready_some;
f(item);
$on_ready_some(item);
budget -= 1;
if budget == 0 {

View File

@@ -169,7 +169,6 @@ impl<N: NetworkPrimitives> ActiveSession<N> {
macro_rules! on_response {
($resp:ident, $item:ident) => {{
let RequestPair { request_id, message } = $resp;
#[allow(clippy::collapsible_match)]
if let Some(req) = self.inflight_requests.remove(&request_id) {
match req.request {
RequestState::Waiting(PeerRequest::$item { response, .. }) => {

View File

@@ -244,7 +244,7 @@ impl ValidateTx68 for EthMessageFilter {
fn max_encoded_tx_length(&self, ty: TxType) -> Option<usize> {
// the biggest transaction so far is a blob transaction, which is currently max 2^17,
// encoded length, nonetheless, the blob tx may become bigger in the future.
#[allow(unreachable_patterns, clippy::match_same_arms)]
#[expect(clippy::match_same_arms)]
match ty {
TxType::Legacy | TxType::Eip2930 | TxType::Eip1559 => Some(MAX_MESSAGE_SIZE),
TxType::Eip4844 => None,

View File

@@ -125,7 +125,6 @@ pub enum AnyNode {
impl AnyNode {
/// Returns the peer id of the node.
#[allow(clippy::missing_const_for_fn)]
pub fn peer_id(&self) -> PeerId {
match self {
Self::NodeRecord(record) => record.id,
@@ -136,7 +135,6 @@ impl AnyNode {
}
/// Returns the full node record if available.
#[allow(clippy::missing_const_for_fn)]
pub fn node_record(&self) -> Option<NodeRecord> {
match self {
Self::NodeRecord(record) => Some(*record),

View File

@@ -647,7 +647,6 @@ impl reth_codecs::Compact for OpTransactionSigned {
#[cfg(any(test, feature = "arbitrary"))]
impl<'a> arbitrary::Arbitrary<'a> for OpTransactionSigned {
fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result<Self> {
#[allow(unused_mut)]
let mut transaction = OpTypedTransaction::arbitrary(u)?;
let secp = secp256k1::Secp256k1::new();
@@ -737,7 +736,6 @@ pub mod serde_bincode_compat {
/// Bincode-compatible [`super::OpTypedTransaction`] serde implementation.
#[derive(Debug, Serialize, Deserialize)]
#[allow(missing_docs)]
enum OpTypedTransaction<'a> {
Legacy(TxLegacy<'a>),
Eip2930(TxEip2930<'a>),

View File

@@ -276,7 +276,6 @@ impl<N: OpNodeCore> fmt::Debug for OpEthApi<N> {
}
/// Container type `OpEthApi`
#[allow(missing_debug_implementations)]
struct OpEthApiInner<N: OpNodeCore> {
/// Gateway to node's core components.
eth_api: EthApiNodeBackend<N>,

View File

@@ -60,7 +60,6 @@ pub mod secp256k1 {
}
#[cfg(any(test, feature = "secp256k1"))]
#[allow(unused, unreachable_pub)]
mod impl_secp256k1 {
use super::*;
pub(crate) use ::secp256k1::Error;

View File

@@ -21,7 +21,6 @@ where
/// Returns a new instance with the additional handlers for the `eth` namespace.
///
/// This will spawn all necessary tasks for the additional handlers.
#[allow(clippy::too_many_arguments)]
pub fn bootstrap<Tasks>(config: EthConfig, executor: Tasks, eth_api: EthApi) -> Self
where
Tasks: TaskSpawner + Clone + 'static,

View File

@@ -176,7 +176,6 @@ where
N: NodePrimitives,
{
/// Create a new instance of the builder
#[allow(clippy::too_many_arguments)]
pub const fn new(
provider: Provider,
pool: Pool,

View File

@@ -58,7 +58,6 @@ impl RpcRequestMetrics {
}
/// Creates a new instance of the metrics layer for Ws.
#[allow(unused)]
pub(crate) fn ipc(module: &RpcModule<()>) -> Self {
Self::new(module, RpcTransport::Ipc)
}
@@ -182,7 +181,6 @@ impl<F: Future<Output = MethodResponse>> Future for MeteredRequestFuture<F> {
pub(crate) enum RpcTransport {
Http,
WebSocket,
#[allow(unused)]
Ipc,
}

View File

@@ -29,7 +29,6 @@ pub use error::*;
pub use reth_rpc_api::EngineApiServer;
#[cfg(test)]
#[allow(unused_imports)]
mod tests {
// silence unused import warning
use alloy_rlp as _;

View File

@@ -102,7 +102,6 @@ pub trait EthBlocks: LoadBlock {
/// Helper function for `eth_getBlockReceipts`.
///
/// Returns all transaction receipts in block, or `None` if block wasn't found.
#[allow(clippy::type_complexity)]
fn block_receipts(
&self,
block_id: BlockId,
@@ -111,7 +110,6 @@ pub trait EthBlocks: LoadBlock {
Self: LoadReceipt;
/// Helper method that loads a block and all its receipts.
#[allow(clippy::type_complexity)]
fn load_block_and_receipts(
&self,
block_id: BlockId,

View File

@@ -65,7 +65,6 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA
/// The transactions are packed into individual blocks. Overrides can be provided.
///
/// See also: <https://github.com/ethereum/go-ethereum/pull/27720>
#[allow(clippy::type_complexity)]
fn simulate_v1(
&self,
payload: SimulatePayload,

View File

@@ -21,7 +21,6 @@ pub trait DbCursorRO<T: Table> {
fn seek(&mut self, key: T::Key) -> PairResult<T>;
/// Position the cursor at the next KV pair, returning it.
#[allow(clippy::should_implement_trait)]
fn next(&mut self) -> PairResult<T>;
/// Position the cursor at the previous KV pair, returning it.

View File

@@ -22,7 +22,6 @@ macro_rules! impl_fuzzer_with_input {
#[allow(unused_imports)]
use super::inputs::*;
#[allow(unused_imports)]
use crate::models::*;
/// Encodes and decodes table types returning its encoded size and the decoded object.

View File

@@ -914,7 +914,6 @@ pub(crate) mod read_transactions {
}
/// Converts a [`HandleSlowReadersCallback`] to the actual FFI function pointer.
#[allow(clippy::missing_transmute_annotations)]
fn convert_hsr_fn(callback: Option<HandleSlowReadersCallback>) -> ffi::MDBX_hsr_func {
unsafe { std::mem::transmute(callback) }
}

View File

@@ -53,7 +53,6 @@ impl TxnManager {
/// - [`TxnManagerMessage::Commit`] commits a transaction with [`ffi::mdbx_txn_commit_ex`]
fn start_message_listener(&self, env: EnvPtr, rx: Receiver<TxnManagerMessage>) {
let task = move || {
#[allow(clippy::redundant_locals)]
let env = env;
loop {
match rx.recv() {

View File

@@ -856,7 +856,7 @@ mod tests {
(database_blocks.to_vec(), in_memory_blocks.to_vec())
}
#[allow(clippy::type_complexity, clippy::too_many_arguments)]
#[expect(clippy::type_complexity)]
fn provider_with_chain_spec_and_random_blocks(
rng: &mut impl Rng,
chain_spec: Arc<ChainSpec>,
@@ -2223,7 +2223,6 @@ mod tests {
);
// Test range that spans database and in-memory
#[allow(unused_assignments)]
{
// This block will be persisted to disk and removed from memory AFTER the firsk database query. This ensures that we query the in-memory state before the database avoiding any race condition.
persist_block_after_db_tx_creation(provider.clone(), in_memory_blocks[0].number);

View File

@@ -168,10 +168,9 @@ impl<N: NodePrimitives> StaticFileProvider<N> {
// appending/truncating rows
for segment in event.paths {
// Ensure it's a file with the .conf extension
#[allow(clippy::nonminimal_bool)]
if !segment
if segment
.extension()
.is_some_and(|s| s.to_str() == Some(CONFIG_FILE_EXTENSION))
.is_none_or(|s| s.to_str() != Some(CONFIG_FILE_EXTENSION))
{
continue
}
@@ -653,7 +652,6 @@ impl<N: NodePrimitives> StaticFileProvider<N> {
///
/// WARNING: No static file writer should be held before calling this function, otherwise it
/// will deadlock.
#[allow(clippy::while_let_loop)]
pub fn check_consistency<Provider>(
&self,
provider: &Provider,

View File

@@ -2255,7 +2255,6 @@ mod tests {
assert_eq!(sparse, sparse_old);
}
#[allow(clippy::type_complexity)]
#[test]
fn sparse_trie_fuzz() {
// Having only the first 3 nibbles set, we narrow down the range of keys

View File

@@ -12,7 +12,6 @@ macro_rules! general_state_test {
};
}
#[allow(missing_docs)]
mod general_state_tests {
use super::*;