feat: make logs per response configurable (#2559)

This commit is contained in:
Matthias Seitz
2023-05-04 21:27:31 +02:00
committed by GitHub
parent 6a79b16737
commit acbbd6788c
4 changed files with 33 additions and 12 deletions

View File

@@ -1,6 +1,7 @@
use crate::{
constants,
error::{RpcError, ServerKind},
eth::DEFAULT_MAX_LOGS_IN_RESPONSE,
};
use hyper::header::AUTHORIZATION;
pub use jsonrpsee::server::ServerBuilder;
@@ -49,7 +50,7 @@ where
// spawn a new cache task
let eth_cache = EthStateCache::spawn_with(client.clone(), Default::default(), executor);
let eth_api = EthApi::new(client.clone(), pool.clone(), network, eth_cache.clone());
let eth_filter = EthFilter::new(client, pool, eth_cache.clone());
let eth_filter = EthFilter::new(client, pool, eth_cache.clone(), DEFAULT_MAX_LOGS_IN_RESPONSE);
launch_with_eth_api(eth_api, eth_filter, engine_api, socket_addr, secret).await
}

View File

@@ -4,6 +4,9 @@ use reth_rpc::{
};
use serde::{Deserialize, Serialize};
/// The default maximum of logs in a single response.
pub(crate) const DEFAULT_MAX_LOGS_IN_RESPONSE: usize = 2_000;
/// All handlers for the `eth` namespace
#[derive(Debug, Clone)]
pub struct EthHandlers<Client, Pool, Network, Events> {
@@ -22,13 +25,18 @@ pub struct EthHandlers<Client, Pool, Network, Events> {
pub struct EthConfig {
/// Settings for the caching layer
pub cache: EthStateCacheConfig,
/// The maximum number of tracing calls that can be executed in concurrently.
pub max_tracing_requests: usize,
/// Maximum number of logs that can be returned in a single response in `eth_getLogs` calls.
pub max_logs_per_response: usize,
}
impl Default for EthConfig {
fn default() -> Self {
Self { cache: EthStateCacheConfig::default(), max_tracing_requests: 10 }
Self {
cache: EthStateCacheConfig::default(),
max_tracing_requests: 10,
max_logs_per_response: DEFAULT_MAX_LOGS_IN_RESPONSE,
}
}
}

View File

@@ -798,7 +798,12 @@ where
self.network.clone(),
cache.clone(),
);
let filter = EthFilter::new(self.client.clone(), self.pool.clone(), cache.clone());
let filter = EthFilter::new(
self.client.clone(),
self.pool.clone(),
cache.clone(),
self.config.eth.max_logs_per_response,
);
let pubsub = EthPubSub::new(
self.client.clone(),

View File

@@ -21,9 +21,6 @@ use std::{collections::HashMap, sync::Arc, time::Instant};
use tokio::sync::Mutex;
use tracing::trace;
/// The default maximum of logs in a single response.
const DEFAULT_MAX_LOGS_IN_RESPONSE: usize = 2_000;
/// `Eth` filter RPC implementation.
#[derive(Debug, Clone)]
pub struct EthFilter<Client, Pool> {
@@ -33,13 +30,23 @@ pub struct EthFilter<Client, Pool> {
impl<Client, Pool> EthFilter<Client, Pool> {
/// Creates a new, shareable instance.
pub fn new(client: Client, pool: Pool, eth_cache: EthStateCache) -> Self {
///
/// This uses the given pool to get notified about new transactions, the client to interact with
/// the blockchain, the cache to fetch cacheable data, like the logs and the
/// max_logs_per_response to limit the amount of logs returned in a single response
/// `eth_getLogs`
pub fn new(
client: Client,
pool: Pool,
eth_cache: EthStateCache,
max_logs_per_response: usize,
) -> Self {
let inner = EthFilterInner {
client,
active_filters: Default::default(),
pool,
id_provider: Arc::new(EthSubscriptionIdProvider::default()),
max_logs_in_response: DEFAULT_MAX_LOGS_IN_RESPONSE,
max_logs_per_response,
eth_cache,
};
Self { inner: Arc::new(inner) }
@@ -188,7 +195,7 @@ struct EthFilterInner<Client, Pool> {
/// Provides ids to identify filters
id_provider: Arc<dyn IdProvider>,
/// Maximum number of logs that can be returned in a response
max_logs_in_response: usize,
max_logs_per_response: usize,
/// The async cache frontend for eth related data
eth_cache: EthStateCache,
}
@@ -303,9 +310,9 @@ where
);
// size check
if all_logs.len() > self.max_logs_in_response {
if all_logs.len() > self.max_logs_per_response {
return Err(FilterError::QueryExceedsMaxResults(
self.max_logs_in_response,
self.max_logs_per_response,
))
}
}