remove all () from the ends of log targets: foo::bar() => foo::bar

This commit is contained in:
jkds
2025-12-30 12:07:14 +01:00
parent 44a9de17f1
commit cc571d4851
25 changed files with 292 additions and 292 deletions

View File

@@ -99,7 +99,7 @@ impl LocalEventGraph {
// If not, we can prune the DAG and insert this new genesis event.
if !dag.contains_key(current_genesis.id().as_bytes())? {
info!(
target: "event_graph::new()",
target: "event_graph::new",
"[EVENTGRAPH] DAG does not contain current genesis, pruning existing data",
);
self_.dag_prune(current_genesis).await?;
@@ -116,7 +116,7 @@ impl LocalEventGraph {
prune_task.clone().start(
self_.clone().dag_prune_task(days_rotation),
|_| async move {
info!(target: "event_graph::_handle_stop()", "[EVENTGRAPH] Prune task stopped, flushing sled")
info!(target: "event_graph::_handle_stop", "[EVENTGRAPH] Prune task stopped, flushing sled")
},
Error::DetachedTaskStopped,
ex.clone(),
@@ -127,7 +127,7 @@ impl LocalEventGraph {
}
async fn dag_prune(&self, genesis_event: Event) -> Result<()> {
debug!(target: "event_graph::dag_prune()", "Pruning DAG...");
debug!(target: "event_graph::dag_prune", "Pruning DAG...");
// Acquire exclusive locks to unreferenced_tips, broadcasted_ids and
// current_genesis while this operation is happening. We do this to
@@ -145,7 +145,7 @@ impl LocalEventGraph {
}
batch.insert(genesis_event.id().as_bytes(), serialize_async(&genesis_event).await);
debug!(target: "event_graph::dag_prune()", "Applying batch...");
debug!(target: "event_graph::dag_prune", "Applying batch...");
if let Err(e) = self.dag.apply_batch(batch) {
panic!("Failed pruning DAG, sled apply_batch error: {}", e);
}
@@ -159,7 +159,7 @@ impl LocalEventGraph {
drop(broadcasted_ids);
drop(current_genesis);
debug!(target: "event_graph::dag_prune()", "DAG pruned successfully");
debug!(target: "event_graph::dag_prune", "DAG pruned successfully");
Ok(())
}
@@ -169,7 +169,7 @@ impl LocalEventGraph {
// parameter. By pruning, we should deterministically replace the
// genesis event (can use a deterministic timestamp) and drop everything
// in the DAG, leaving just the new genesis event.
debug!(target: "event_graph::dag_prune_task()", "Spawned background DAG pruning task");
debug!(target: "event_graph::dag_prune_task", "Spawned background DAG pruning task");
loop {
// Find the next rotation timestamp:
@@ -186,9 +186,9 @@ impl LocalEventGraph {
// Sleep until it's time to rotate.
let s = millis_until_next_rotation(next_rotation);
debug!(target: "event_graph::dag_prune_task()", "Sleeping {}s until next DAG prune", s);
debug!(target: "event_graph::dag_prune_task", "Sleeping {}s until next DAG prune", s);
msleep(s).await;
debug!(target: "event_graph::dag_prune_task()", "Rotation period reached");
debug!(target: "event_graph::dag_prune_task", "Rotation period reached");
// Trigger DAG prune
self.dag_prune(current_genesis).await?;
@@ -254,7 +254,7 @@ impl LocalEventGraph {
for event in events {
let event_id = event.id();
debug!(
target: "event_graph::dag_insert()",
target: "event_graph::dag_insert",
"Inserting event {} into the DAG", event_id,
);
@@ -262,7 +262,7 @@ impl LocalEventGraph {
.validate(&self.dag, genesis_timestamp, self.days_rotation, Some(&overlay))
.await?
{
error!(target: "event_graph::dag_insert()", "Event {} is invalid!", event_id);
error!(target: "event_graph::dag_insert", "Event {} is invalid!", event_id);
return Err(Error::EventIsInvalid)
}
@@ -291,13 +291,13 @@ impl LocalEventGraph {
// Update the unreferenced DAG tips set
debug!(
target: "event_graph::dag_insert()",
target: "event_graph::dag_insert",
"Event {} parents {:#?}", event_id, event.parents,
);
for parent_id in event.parents.iter() {
if parent_id != &NULL_ID {
debug!(
target: "event_graph::dag_insert()",
target: "event_graph::dag_insert",
"Removing {} from unreferenced_tips", parent_id,
);
@@ -317,7 +317,7 @@ impl LocalEventGraph {
}
unreferenced_tips.retain(|_, tips| !tips.is_empty());
debug!(
target: "event_graph::dag_insert()",
target: "event_graph::dag_insert",
"Adding {} to unreferenced tips", event_id,
);

View File

@@ -312,7 +312,7 @@ impl<H: DhtHandler> Dht<H> {
self.handler().await.add_value(key, value).await;
let nodes = self.lookup_nodes(key).await;
info!(target: "dht::announce()", "[DHT] Announcing {} to {} nodes", H::key_to_string(key), nodes.len());
info!(target: "dht::announce", "[DHT] Announcing {} to {} nodes", H::key_to_string(key), nodes.len());
for node in nodes {
if let Ok((channel, _)) = self.get_channel(&node).await {
@@ -334,7 +334,7 @@ impl<H: DhtHandler> Dht<H> {
self.set_bootstrapped(true).await;
info!(target: "dht::bootstrap()", "[DHT] Bootstrapping");
info!(target: "dht::bootstrap", "[DHT] Bootstrapping");
self.event_publisher.notify(DhtEvent::BootstrapStarted).await;
let _nodes = self.lookup_nodes(&self_node.id()).await;
@@ -349,7 +349,7 @@ impl<H: DhtHandler> Dht<H> {
// TODO: Optimize this
async fn on_new_node(&self, node: &H::Node, channel: ChannelPtr) {
info!(target: "dht::on_new_node()", "[DHT] Found new node {}", H::key_to_string(&node.id()));
info!(target: "dht::on_new_node", "[DHT] Found new node {}", H::key_to_string(&node.id()));
// If this is the first node we know about then bootstrap
if !self.is_bootstrapped().await {
@@ -377,7 +377,7 @@ impl<H: DhtHandler> Dht<H> {
pub async fn update_node(&self, node: &H::Node, channel: ChannelPtr) {
self.p2p.session_direct().inc_channel_usage(&channel, 1).await;
if let Err(e) = self.add_node_tx.send((node.clone(), channel.clone())).await {
warn!(target: "dht::update_node()", "[DHT] Cannot add node {}: {e}", H::key_to_string(&node.id()))
warn!(target: "dht::update_node", "[DHT] Cannot add node {}: {e}", H::key_to_string(&node.id()))
}
}
@@ -479,11 +479,11 @@ impl<H: DhtHandler> Dht<H> {
let handler = self.handler().await;
let res = match &lookup_type {
DhtLookupType::Nodes => {
info!(target: "dht::lookup()", "[DHT] [LOOKUP] Querying node {} for nodes lookup of key {}", H::key_to_string(&node.id()), H::key_to_string(key));
info!(target: "dht::lookup", "[DHT] [LOOKUP] Querying node {} for nodes lookup of key {}", H::key_to_string(&node.id()), H::key_to_string(key));
handler.find_nodes(channel.clone(), key).await.map(DhtLookupReply::Nodes)
}
DhtLookupType::Value => {
info!(target: "dht::lookup()", "[DHT] [LOOKUP] Querying node {} for value lookup of key {}", H::key_to_string(&node.id()), H::key_to_string(key));
info!(target: "dht::lookup", "[DHT] [LOOKUP] Querying node {} for value lookup of key {}", H::key_to_string(&node.id()), H::key_to_string(key));
handler.find_value(channel.clone(), key).await
}
};
@@ -529,7 +529,7 @@ impl<H: DhtHandler> Dht<H> {
// Process lookup responses
while let Some((queried_node, res)) = futures.next().await {
if let Err(e) = res {
warn!(target: "dht::lookup()", "[DHT] [LOOKUP] Error in lookup: {e}");
warn!(target: "dht::lookup", "[DHT] [LOOKUP] Error in lookup: {e}");
// Spawn next `alpha` futures if there are no more futures but
// we still have nodes to visit
@@ -548,7 +548,7 @@ impl<H: DhtHandler> Dht<H> {
// Send the value we found to the publisher
if let Some(value) = value {
info!(target: "dht::lookup()", "[DHT] [LOOKUP] Found value for {} from {}", H::key_to_string(&key), H::key_to_string(&queried_node.id()));
info!(target: "dht::lookup", "[DHT] [LOOKUP] Found value for {} from {}", H::key_to_string(&key), H::key_to_string(&queried_node.id()));
values.push(value.clone());
self.event_publisher.notify(DhtEvent::ValueFound { key, value }).await;
}
@@ -556,7 +556,7 @@ impl<H: DhtHandler> Dht<H> {
// Update nodes_to_visit
if let Some(mut nodes) = nodes {
if !nodes.is_empty() {
info!(target: "dht::lookup()", "[DHT] [LOOKUP] Found {} nodes from {}", nodes.len(), H::key_to_string(&queried_node.id()));
info!(target: "dht::lookup", "[DHT] [LOOKUP] Found {} nodes from {}", nodes.len(), H::key_to_string(&queried_node.id()));
self.event_publisher
.notify(DhtEvent::NodesFound { key, nodes: nodes.clone() })
@@ -597,7 +597,7 @@ impl<H: DhtHandler> Dht<H> {
spawn_futures(&mut nodes_to_visit, &mut futures).await;
}
info!(target: "dht::lookup()", "[DHT] [LOOKUP] Lookup for {} completed", H::key_to_string(&key));
info!(target: "dht::lookup", "[DHT] [LOOKUP] Lookup for {} completed", H::key_to_string(&key));
let nodes: Vec<_> = result.into_iter().take(k).collect();
(nodes, values)
@@ -605,7 +605,7 @@ impl<H: DhtHandler> Dht<H> {
/// Find `k` nodes closest to a key
pub async fn lookup_nodes(&self, key: &blake3::Hash) -> Vec<H::Node> {
info!(target: "dht::lookup_nodes()", "[DHT] [LOOKUP] Starting node lookup for key {}", H::key_to_string(key));
info!(target: "dht::lookup_nodes", "[DHT] [LOOKUP] Starting node lookup for key {}", H::key_to_string(key));
self.event_publisher.notify(DhtEvent::NodesLookupStarted { key: *key }).await;
@@ -620,7 +620,7 @@ impl<H: DhtHandler> Dht<H> {
/// Find value for `key`
pub async fn lookup_value(&self, key: &blake3::Hash) -> (Vec<H::Node>, Vec<H::Value>) {
info!(target: "dht::lookup_value()", "[DHT] [LOOKUP] Starting value lookup for key {}", H::key_to_string(key));
info!(target: "dht::lookup_value", "[DHT] [LOOKUP] Starting value lookup for key {}", H::key_to_string(key));
self.event_publisher.notify(DhtEvent::ValueLookupStarted { key: *key }).await;

View File

@@ -96,7 +96,7 @@ pub async fn channel_task<H: DhtHandler>(handler: Arc<H>) -> Result<()> {
let ping_res = dht.ping(channel.clone()).await;
if let Err(e) = ping_res {
warn!(target: "dht::channel_task()", "Error while pinging manual connection (requesting node id) {}: {e}", channel.display_address());
warn!(target: "dht::channel_task", "Error while pinging manual connection (requesting node id) {}: {e}", channel.display_address());
continue;
}
}
@@ -157,7 +157,7 @@ pub async fn dht_refinery_task<H: DhtHandler>(handler: Arc<H>) -> Result<()> {
let last_seen = UNIX_EPOCH.elapsed().unwrap().as_secs();
if let Err(e) = hosts.whitelist_host(url, last_seen).await {
error!(target: "dht::tasks::whitelist_refinery_task()", "Could not send {url} to the whitelist: {e}");
error!(target: "dht::tasks::whitelist_refinery_task", "Could not send {url} to the whitelist: {e}");
}
break
}
@@ -282,7 +282,7 @@ pub async fn disconnect_inbounds_task<H: DhtHandler>(handler: Arc<H>) -> Result<
}
// Now we can stop it and remove it from the channel cache
info!(target: "dht::disconnect_inbounds_task()", "Closing expired inbound channel [{}]", channel.display_address());
info!(target: "dht::disconnect_inbounds_task", "Closing expired inbound channel [{}]", channel.display_address());
channel.stop().await;
channel_cache.remove(&channel.info.id);
}

View File

@@ -164,7 +164,7 @@ impl EventGraph {
// If not, we can prune the DAG and insert this new genesis event.
if !dag.contains_key(current_genesis.id().as_bytes())? {
info!(
target: "event_graph::new()",
target: "event_graph::new",
"[EVENTGRAPH] DAG does not contain current genesis, pruning existing data",
);
self_.dag_prune(current_genesis).await?;
@@ -183,7 +183,7 @@ impl EventGraph {
|res| async move {
match res {
Ok(()) | Err(Error::DetachedTaskStopped) => { /* Do nothing */ }
Err(e) => error!(target: "event_graph::_handle_stop()", "[EVENTGRAPH] Failed stopping prune task: {e}")
Err(e) => error!(target: "event_graph::_handle_stop", "[EVENTGRAPH] Failed stopping prune task: {e}")
}
},
Error::DetachedTaskStopped,
@@ -219,7 +219,7 @@ impl EventGraph {
let channels = self.p2p.hosts().peers();
let mut communicated_peers = channels.len();
info!(
target: "event_graph::dag_sync()",
target: "event_graph::dag_sync",
"[EVENTGRAPH] Syncing DAG from {communicated_peers} peers..."
);
@@ -235,7 +235,7 @@ impl EventGraph {
Ok(v) => v,
Err(e) => {
error!(
target: "event_graph::dag_sync()",
target: "event_graph::dag_sync",
"[EVENTGRAPH] Sync: Couldn't subscribe TipReq for peer {url}, skipping ({e})"
);
communicated_peers -= 1;
@@ -245,7 +245,7 @@ impl EventGraph {
if let Err(e) = channel.send(&TipReq {}).await {
error!(
target: "event_graph::dag_sync()",
target: "event_graph::dag_sync",
"[EVENTGRAPH] Sync: Couldn't contact peer {url}, skipping ({e})"
);
communicated_peers -= 1;
@@ -262,7 +262,7 @@ impl EventGraph {
let Ok(peer_tips) = tip_rep_sub.receive_with_timeout(outbound_connect_timeout).await
else {
error!(
target: "event_graph::dag_sync()",
target: "event_graph::dag_sync",
"[EVENTGRAPH] Sync: Peer {url} didn't reply with tips in time, skipping"
);
communicated_peers -= 1;
@@ -286,7 +286,7 @@ impl EventGraph {
// After we've communicated all the peers, let's see what happened.
if tips.is_empty() {
error!(
target: "event_graph::dag_sync()",
target: "event_graph::dag_sync",
"[EVENTGRAPH] Sync: Could not find any DAG tips",
);
return Err(Error::DagSyncFailed)
@@ -316,11 +316,11 @@ impl EventGraph {
if missing_parents.is_empty() {
*self.synced.write().await = true;
info!(target: "event_graph::dag_sync()", "[EVENTGRAPH] DAG synced successfully!");
info!(target: "event_graph::dag_sync", "[EVENTGRAPH] DAG synced successfully!");
return Ok(())
}
info!(target: "event_graph::dag_sync()", "[EVENTGRAPH] Fetching events");
info!(target: "event_graph::dag_sync", "[EVENTGRAPH] Fetching events");
let mut received_events: BTreeMap<u64, Vec<Event>> = BTreeMap::new();
let mut received_events_hashes = HashSet::new();
@@ -331,7 +331,7 @@ impl EventGraph {
let url = channel.display_address();
debug!(
target: "event_graph::dag_sync()",
target: "event_graph::dag_sync",
"Requesting {missing_parents:?} from {url}..."
);
@@ -339,7 +339,7 @@ impl EventGraph {
Ok(v) => v,
Err(e) => {
error!(
target: "event_graph::dag_sync()",
target: "event_graph::dag_sync",
"[EVENTGRAPH] Sync: Couldn't subscribe EventRep for peer {url}, skipping ({e})"
);
continue
@@ -349,7 +349,7 @@ impl EventGraph {
let request_missing_events = missing_parents.clone().into_iter().collect();
if let Err(e) = channel.send(&EventReq(request_missing_events)).await {
error!(
target: "event_graph::dag_sync()",
target: "event_graph::dag_sync",
"[EVENTGRAPH] Sync: Failed communicating EventReq({missing_parents:?}) to {url}: {e}"
);
continue
@@ -365,7 +365,7 @@ impl EventGraph {
let Ok(parent) = ev_rep_sub.receive_with_timeout(outbound_connect_timeout).await
else {
error!(
target: "event_graph::dag_sync()",
target: "event_graph::dag_sync",
"[EVENTGRAPH] Sync: Timeout waiting for parents {missing_parents:?} from {url}"
);
continue
@@ -377,7 +377,7 @@ impl EventGraph {
let parent_id = parent.id();
if !missing_parents.contains(&parent_id) {
error!(
target: "event_graph::dag_sync()",
target: "event_graph::dag_sync",
"[EVENTGRAPH] Sync: Peer {url} replied with a wrong event: {}",
parent.id()
);
@@ -385,7 +385,7 @@ impl EventGraph {
}
debug!(
target: "event_graph::dag_sync()",
target: "event_graph::dag_sync",
"Got correct parent event {parent_id}"
);
@@ -411,7 +411,7 @@ impl EventGraph {
!self.dag.contains_key(upper_parent.as_bytes()).unwrap()
{
debug!(
target: "event_graph::dag_sync()",
target: "event_graph::dag_sync",
"Found upper missing parent event {upper_parent}"
);
missing_parents.insert(*upper_parent);
@@ -424,7 +424,7 @@ impl EventGraph {
if !found_event {
error!(
target: "event_graph::dag_sync()",
target: "event_graph::dag_sync",
"[EVENTGRAPH] Sync: Failed to get all events",
);
return Err(Error::DagSyncFailed)
@@ -443,13 +443,13 @@ impl EventGraph {
*self.synced.write().await = true;
info!(target: "event_graph::dag_sync()", "[EVENTGRAPH] DAG synced successfully!");
info!(target: "event_graph::dag_sync", "[EVENTGRAPH] DAG synced successfully!");
Ok(())
}
/// Atomically prune the DAG and insert the given event as genesis.
async fn dag_prune(&self, genesis_event: Event) -> Result<()> {
debug!(target: "event_graph::dag_prune()", "Pruning DAG...");
debug!(target: "event_graph::dag_prune", "Pruning DAG...");
// Acquire exclusive locks to unreferenced_tips, broadcasted_ids and
// current_genesis while this operation is happening. We do this to
@@ -467,7 +467,7 @@ impl EventGraph {
}
batch.insert(genesis_event.id().as_bytes(), serialize_async(&genesis_event).await);
debug!(target: "event_graph::dag_prune()", "Applying batch...");
debug!(target: "event_graph::dag_prune", "Applying batch...");
if let Err(e) = self.dag.apply_batch(batch) {
panic!("Failed pruning DAG, sled apply_batch error: {e}");
}
@@ -481,7 +481,7 @@ impl EventGraph {
drop(broadcasted_ids);
drop(current_genesis);
debug!(target: "event_graph::dag_prune()", "DAG pruned successfully");
debug!(target: "event_graph::dag_prune", "DAG pruned successfully");
Ok(())
}
@@ -491,7 +491,7 @@ impl EventGraph {
// parameter. By pruning, we should deterministically replace the
// genesis event (can use a deterministic timestamp) and drop everything
// in the DAG, leaving just the new genesis event.
debug!(target: "event_graph::dag_prune_task()", "Spawned background DAG pruning task");
debug!(target: "event_graph::dag_prune_task", "Spawned background DAG pruning task");
loop {
// Find the next rotation timestamp:
@@ -508,9 +508,9 @@ impl EventGraph {
// Sleep until it's time to rotate.
let s = millis_until_next_rotation(next_rotation);
debug!(target: "event_graph::dag_prune_task()", "Sleeping {s}ms until next DAG prune");
debug!(target: "event_graph::dag_prune_task", "Sleeping {s}ms until next DAG prune");
msleep(s).await;
debug!(target: "event_graph::dag_prune_task()", "Rotation period reached");
debug!(target: "event_graph::dag_prune_task", "Rotation period reached");
// Trigger DAG prune
self.dag_prune(current_genesis).await?;
@@ -551,7 +551,7 @@ impl EventGraph {
for event in events {
let event_id = event.id();
debug!(
target: "event_graph::dag_insert()",
target: "event_graph::dag_insert",
"Inserting event {event_id} into the DAG"
);
@@ -559,7 +559,7 @@ impl EventGraph {
.validate(&self.dag, genesis_timestamp, self.days_rotation, Some(&overlay))
.await?
{
error!(target: "event_graph::dag_insert()", "Event {event_id} is invalid!");
error!(target: "event_graph::dag_insert", "Event {event_id} is invalid!");
return Err(Error::EventIsInvalid)
}
@@ -591,13 +591,13 @@ impl EventGraph {
// Update the unreferenced DAG tips set
debug!(
target: "event_graph::dag_insert()",
target: "event_graph::dag_insert",
"Event {event_id} parents {:#?}", event.parents,
);
for parent_id in event.parents.iter() {
if parent_id != &NULL_ID {
debug!(
target: "event_graph::dag_insert()",
target: "event_graph::dag_insert",
"Removing {parent_id} from unreferenced_tips"
);
@@ -617,7 +617,7 @@ impl EventGraph {
}
unreferenced_tips.retain(|_, tips| !tips.is_empty());
debug!(
target: "event_graph::dag_insert()",
target: "event_graph::dag_insert",
"Adding {event_id} to unreferenced tips"
);
@@ -834,7 +834,7 @@ impl EventGraph {
tips: BTreeMap<u64, HashSet<blake3::Hash>>,
) -> Result<Vec<Event>> {
debug!(
target: "event_graph::fetch_successors_of()",
target: "event_graph::fetch_successors_of",
"fetching successors of {tips:?}"
);

View File

@@ -74,7 +74,7 @@ impl MovingWindow {
fn clean(&mut self) {
while let Some(ts) = self.times.front() {
let Ok(elapsed) = ts.elapsed() else {
debug!(target: "event_graph::protocol::MovingWindow::clean()", "Timestamp [{ts}] is in future. Removing...");
debug!(target: "event_graph::protocol::MovingWindow::clean", "Timestamp [{ts}] is in future. Removing...");
let _ = self.times.pop_front();
continue
};
@@ -201,7 +201,7 @@ impl ProtocolEventGraph {
let malicious_count = self.malicious_count.fetch_add(1, SeqCst);
if malicious_count + 1 == MALICIOUS_THRESHOLD {
error!(
target: "event_graph::protocol::handle_event_put()",
target: "event_graph::protocol::handle_event_put",
"[EVENTGRAPH] Peer {} reached malicious threshold. Dropping connection.",
self.channel.display_address(),
);
@@ -210,7 +210,7 @@ impl ProtocolEventGraph {
}
warn!(
target: "event_graph::protocol::handle_event_put()",
target: "event_graph::protocol::handle_event_put",
"[EVENTGRAPH] Peer {} sent us a malicious event", self.channel.display_address(),
);
@@ -230,7 +230,7 @@ impl ProtocolEventGraph {
Err(_) => continue,
};
trace!(
target: "event_graph::protocol::handle_event_put()",
target: "event_graph::protocol::handle_event_put",
"Got EventPut: {} [{}]", event.id(), self.channel.display_address(),
);
@@ -247,7 +247,7 @@ impl ProtocolEventGraph {
let event_id = event.id();
if self.event_graph.dag.contains_key(event_id.as_bytes()).unwrap() {
debug!(
target: "event_graph::protocol::handle_event_put()",
target: "event_graph::protocol::handle_event_put",
"Event {event_id} is already known"
);
continue
@@ -277,7 +277,7 @@ impl ProtocolEventGraph {
let genesis_timestamp = self.event_graph.current_genesis.read().await.timestamp;
if event.timestamp < genesis_timestamp {
debug!(
target: "event_graph::protocol::handle_event_put()",
target: "event_graph::protocol::handle_event_put",
"Event {} is older than genesis. Event timestamp: `{}`. Genesis timestamp: `{genesis_timestamp}`",
event.id(), event.timestamp
);
@@ -294,7 +294,7 @@ impl ProtocolEventGraph {
// At this point, this is a new event to us. Let's see if we
// have all of its parents.
debug!(
target: "event_graph::protocol::handle_event_put()",
target: "event_graph::protocol::handle_event_put",
"Event {event_id} is new"
);
@@ -324,14 +324,14 @@ impl ProtocolEventGraph {
let mut received_events_hashes = HashSet::new();
debug!(
target: "event_graph::protocol::handle_event_put()",
target: "event_graph::protocol::handle_event_put",
"Event has {} missing parents. Requesting...", missing_parents.len(),
);
while !missing_parents.is_empty() {
// for parent_id in missing_parents.clone().iter() {
debug!(
target: "event_graph::protocol::handle_event_put()",
target: "event_graph::protocol::handle_event_put",
"Requesting {missing_parents:?}..."
);
@@ -351,7 +351,7 @@ impl ProtocolEventGraph {
self.ev_rep_sub.receive_with_timeout(outbound_connect_timeout).await
else {
error!(
target: "event_graph::protocol::handle_event_put()",
target: "event_graph::protocol::handle_event_put",
"[EVENTGRAPH] Timeout while waiting for parents {missing_parents:?} from {}",
self.channel.display_address(),
);
@@ -365,7 +365,7 @@ impl ProtocolEventGraph {
let parent_id = parent.id();
if !missing_parents.contains(&parent_id) {
error!(
target: "event_graph::protocol::handle_event_put()",
target: "event_graph::protocol::handle_event_put",
"[EVENTGRAPH] Peer {} replied with a wrong event: {}",
self.channel.display_address(), parent.id(),
);
@@ -374,7 +374,7 @@ impl ProtocolEventGraph {
}
debug!(
target: "event_graph::protocol::handle_event_put()",
target: "event_graph::protocol::handle_event_put",
"Got correct parent event {}", parent.id(),
);
@@ -403,7 +403,7 @@ impl ProtocolEventGraph {
.unwrap()
{
debug!(
target: "event_graph::protocol::handle_event_put()",
target: "event_graph::protocol::handle_event_put",
"Found upper missing parent event {upper_parent}"
);
missing_parents.insert(*upper_parent);
@@ -430,7 +430,7 @@ impl ProtocolEventGraph {
// perform a full validation and add the actual event to
// the DAG.
debug!(
target: "event_graph::protocol::handle_event_put()",
target: "event_graph::protocol::handle_event_put",
"Got all parents necessary for insertion",
);
if self.event_graph.dag_insert(slice::from_ref(&event)).await.is_err() {
@@ -451,14 +451,14 @@ impl ProtocolEventGraph {
Err(_) => continue,
};
trace!(
target: "event_graph::protocol::handle_event_req()",
target: "event_graph::protocol::handle_event_req",
"Got EventReq: {event_ids:?} [{}]", self.channel.display_address(),
);
// Check if node has finished syncing its DAG
if !*self.event_graph.synced.read().await {
debug!(
target: "event_graph::protocol::handle_event_req()",
target: "event_graph::protocol::handle_event_req",
"DAG is still syncing, skipping..."
);
continue
@@ -481,7 +481,7 @@ impl ProtocolEventGraph {
let malicious_count = self.malicious_count.fetch_add(1, SeqCst);
if malicious_count + 1 == MALICIOUS_THRESHOLD {
error!(
target: "event_graph::protocol::handle_event_req()",
target: "event_graph::protocol::handle_event_req",
"[EVENTGRAPH] Peer {} reached malicious threshold. Dropping connection.",
self.channel.display_address(),
);
@@ -490,7 +490,7 @@ impl ProtocolEventGraph {
}
warn!(
target: "event_graph::protocol::handle_event_req()",
target: "event_graph::protocol::handle_event_req",
"[EVENTGRAPH] Peer {} requested an unexpected event {event_id:?}",
self.channel.display_address()
);
@@ -500,7 +500,7 @@ impl ProtocolEventGraph {
// At this point we should have it in our DAG.
// This code panics if this is not the case.
debug!(
target: "event_graph::protocol::handle_event_req()",
target: "event_graph::protocol::handle_event_req",
"Fetching event {event_id:?} from DAG"
);
events.push(self.event_graph.dag_get(event_id).await.unwrap().unwrap());
@@ -515,7 +515,7 @@ impl ProtocolEventGraph {
for event in events.iter() {
if event.timestamp < genesis_timestamp {
error!(
target: "event_graph::protocol::handle_event_req()",
target: "event_graph::protocol::handle_event_req",
"Requested event by peer {} is older than previous rotation period. It should have been pruned.
Event timestamp: `{}`. Genesis timestamp: `{genesis_timestamp}`",
event.id(), event.timestamp
@@ -547,14 +547,14 @@ impl ProtocolEventGraph {
loop {
self.tip_req_sub.receive().await?;
trace!(
target: "event_graph::protocol::handle_tip_req()",
target: "event_graph::protocol::handle_tip_req",
"Got TipReq [{}]", self.channel.display_address(),
);
// Check if node has finished syncing its DAG
if !*self.event_graph.synced.read().await {
debug!(
target: "event_graph::protocol::handle_tip_req()",
target: "event_graph::protocol::handle_tip_req",
"DAG is still syncing, skipping..."
);
continue

View File

@@ -141,7 +141,7 @@ impl Geode {
/// This works for both file metadata and directory metadata.
/// Returns (chunk hashes, [(file path, file size)]).
async fn read_metadata(path: &PathBuf) -> Result<(Vec<blake3::Hash>, Vec<(PathBuf, u64)>)> {
debug!(target: "geode::read_dir_metadata()", "Reading chunks from {path:?} (dir)");
debug!(target: "geode::read_dir_metadata", "Reading chunks from {path:?} (dir)");
let mut chunk_hashes = vec![];
let mut files = vec![];
@@ -199,7 +199,7 @@ impl Geode {
/// Perform garbage collection over the filesystem hierarchy.
/// Returns a set representing deleted files.
pub async fn garbage_collect(&self) -> Result<HashSet<blake3::Hash>> {
info!(target: "geode::garbage_collect()", "[Geode] Performing garbage collection");
info!(target: "geode::garbage_collect", "[Geode] Performing garbage collection");
// We track corrupt files here.
let mut deleted_files = HashSet::new();
@@ -234,7 +234,7 @@ impl Geode {
if Self::read_metadata(&path).await.is_err() {
if let Err(e) = fs::remove_file(path).await {
warn!(
target: "geode::garbage_collect()",
target: "geode::garbage_collect",
"[Geode] Garbage collect failed to remove corrupted metadata: {e}"
);
}
@@ -244,7 +244,7 @@ impl Geode {
}
}
info!(target: "geode::garbage_collect()", "[Geode] Garbage collection finished");
info!(target: "geode::garbage_collect", "[Geode] Garbage collection finished");
Ok(deleted_files)
}
@@ -307,7 +307,7 @@ impl Geode {
chunk_hashes: &[blake3::Hash],
relative_files: &[(PathBuf, u64)],
) -> Result<()> {
info!(target: "geode::insert_metadata()", "[Geode] Inserting metadata");
info!(target: "geode::insert_metadata", "[Geode] Inserting metadata");
// Verify the metadata
if !self.verify_metadata(hash, chunk_hashes, relative_files) {
@@ -342,7 +342,7 @@ impl Geode {
chunked: &mut ChunkedStorage,
stream: impl AsRef<[u8]>,
) -> Result<(blake3::Hash, usize)> {
info!(target: "geode::write_chunk()", "[Geode] Writing single chunk");
info!(target: "geode::write_chunk", "[Geode] Writing single chunk");
let mut cursor = Cursor::new(&stream);
let mut chunk = vec![0u8; MAX_CHUNK_SIZE];
@@ -394,7 +394,7 @@ impl Geode {
/// the read failed in any way (could also be the file does not exist).
pub async fn get(&self, hash: &blake3::Hash, path: &Path) -> Result<ChunkedStorage> {
let hash_str = hash_to_string(hash);
info!(target: "geode::get()", "[Geode] Getting chunks for {hash_str}...");
info!(target: "geode::get", "[Geode] Getting chunks for {hash_str}...");
// Try to read the file or dir metadata. If it's corrupt, return an error signalling
// that garbage collection needs to run.
@@ -453,7 +453,7 @@ impl Geode {
chunked: &mut ChunkedStorage,
chunk_hash: &blake3::Hash,
) -> Result<Vec<u8>> {
info!(target: "geode::get_chunk()", "[Geode] Getting chunk {}", hash_to_string(chunk_hash));
info!(target: "geode::get_chunk", "[Geode] Getting chunk {}", hash_to_string(chunk_hash));
// Get the chunk index in the file from the chunk hash
let chunk_index = match chunked.iter().position(|(h, _)| *h == *chunk_hash) {
@@ -488,7 +488,7 @@ impl Geode {
chunk_hashes: &[blake3::Hash],
files: &[(PathBuf, u64)],
) -> bool {
info!(target: "geode::verify_metadata()", "[Geode] Verifying metadata for {}", hash_to_string(hash));
info!(target: "geode::verify_metadata", "[Geode] Verifying metadata for {}", hash_to_string(hash));
let mut hasher = blake3::Hasher::new();
self.hash_chunks_metadata(&mut hasher, chunk_hashes);
self.hash_files_metadata(&mut hasher, files);
@@ -497,7 +497,7 @@ impl Geode {
/// Verifies that the chunk hash matches the content.
pub fn verify_chunk(&self, chunk_hash: &blake3::Hash, chunk_slice: &[u8]) -> bool {
info!(target: "geode::verify_chunk()", "[Geode] Verifying chunk {}", hash_to_string(chunk_hash));
info!(target: "geode::verify_chunk", "[Geode] Verifying chunk {}", hash_to_string(chunk_hash));
blake3::hash(chunk_slice) == *chunk_hash
}
}

View File

@@ -135,7 +135,7 @@ impl Acceptor {
// These channels are the channels spawned below on listener.next().is_ok().
// After the notification, we reset the condvar and retry this loop to see
// if we can accept more connections, and if not - we'll be back here.
warn!(target: "net::acceptor::run_accept_loop()", "Reached incoming conn limit, waiting...");
warn!(target: "net::acceptor::run_accept_loop", "Reached incoming conn limit, waiting...");
cv.wait().await;
cv.reset();
continue
@@ -148,7 +148,7 @@ impl Acceptor {
if hosts.container.contains(HostColor::Black as usize, &url) ||
hosts.block_all_ports(&url)
{
warn!(target: "net::acceptor::run_accept_loop()", "Peer {url} is blacklisted");
warn!(target: "net::acceptor::run_accept_loop", "Peer {url} is blacklisted");
continue
}
@@ -183,28 +183,28 @@ impl Acceptor {
libc::EAGAIN | libc::ECONNABORTED | libc::EPROTO | libc::EINTR => continue,
libc::ECONNRESET => {
warn!(
target: "net::acceptor::run_accept_loop()",
target: "net::acceptor::run_accept_loop",
"[P2P] Connection reset by peer in accept_loop"
);
continue
}
libc::ETIMEDOUT => {
warn!(
target: "net::acceptor::run_accept_loop()",
target: "net::acceptor::run_accept_loop",
"[P2P] Connection timed out in accept_loop"
);
continue
}
libc::EPIPE => {
warn!(
target: "net::acceptor::run_accept_loop()",
target: "net::acceptor::run_accept_loop",
"[P2P] Broken pipe in accept_loop"
);
continue
}
x => {
warn!(
target: "net::acceptor::run_accept_loop()",
target: "net::acceptor::run_accept_loop",
"[P2P] Unhandled OS Error: {e} {x}"
);
continue
@@ -219,7 +219,7 @@ impl Acceptor {
if let Some(inner) = std::error::Error::source(&e) {
if let Some(inner) = inner.downcast_ref::<futures_rustls::rustls::Error>() {
warn!(
target: "net::acceptor::run_accept_loop()",
target: "net::acceptor::run_accept_loop",
"[P2P] rustls listener error: {inner:?}"
);
continue
@@ -227,7 +227,7 @@ impl Acceptor {
}
warn!(
target: "net::acceptor::run_accept_loop()",
target: "net::acceptor::run_accept_loop",
"[P2P] Unhandled ErrorKind::Other error: {e:?}"
);
continue
@@ -236,7 +236,7 @@ impl Acceptor {
// Errors we didn't handle above:
Err(e) => {
warn!(
target: "net::acceptor::run_accept_loop()",
target: "net::acceptor::run_accept_loop",
"[P2P] Unhandled listener.next() error: {e}"
);
continue

View File

@@ -160,7 +160,7 @@ impl Channel {
/// Starts the channel. Runs a receive loop to start receiving messages
/// or handles a network failure.
pub fn start(self: Arc<Self>, executor: Arc<Executor<'_>>) {
debug!(target: "net::channel::start()", "START {self:?}");
debug!(target: "net::channel::start", "START {self:?}");
let self_ = self.clone();
self.receive_task.clone().start(
@@ -170,21 +170,21 @@ impl Channel {
executor,
);
debug!(target: "net::channel::start()", "END {self:?}");
debug!(target: "net::channel::start", "END {self:?}");
}
/// Stops the channel.
/// Notifies all publishers that the channel has been closed in `handle_stop()`.
pub async fn stop(&self) {
debug!(target: "net::channel::stop()", "START {self:?}");
debug!(target: "net::channel::stop", "START {self:?}");
self.receive_task.stop().await;
debug!(target: "net::channel::stop()", "END {self:?}");
debug!(target: "net::channel::stop", "END {self:?}");
}
/// Creates a subscription to a stopped signal.
/// If the channel is stopped then this will return a ChannelStopped error.
pub async fn subscribe_stop(&self) -> Result<Subscription<Error>> {
debug!(target: "net::channel::subscribe_stop()", "START {self:?}");
debug!(target: "net::channel::subscribe_stop", "START {self:?}");
if self.is_stopped() {
return Err(Error::ChannelStopped)
@@ -192,7 +192,7 @@ impl Channel {
let sub = self.stop_publisher.clone().subscribe().await;
debug!(target: "net::channel::subscribe_stop()", "END {self:?}");
debug!(target: "net::channel::subscribe_stop", "END {self:?}");
Ok(sub)
}
@@ -228,7 +228,7 @@ impl Channel {
metering_config: &MeteringConfiguration,
) -> Result<()> {
debug!(
target: "net::channel::send()", "[START] command={} {self:?}",
target: "net::channel::send", "[START] command={} {self:?}",
message.command,
);
@@ -251,7 +251,7 @@ impl Channel {
if let Some(sleep_time) = sleep_time {
let sleep_time = 2 * sleep_time;
debug!(
target: "net::channel::send()",
target: "net::channel::send",
"[P2P] Channel rate limit is active, sleeping before sending for: {sleep_time} (ms)"
);
msleep(sleep_time).await;
@@ -266,7 +266,7 @@ impl Channel {
if let Err(e) = self.send_message(message).await {
if self.session.upgrade().unwrap().type_id() & (SESSION_ALL & !SESSION_REFINE) != 0 {
error!(
target: "net::channel::send()", "[P2P] Channel send error for [{self:?}]: {e}"
target: "net::channel::send", "[P2P] Channel send error for [{self:?}]: {e}"
);
}
self.stop().await;
@@ -274,7 +274,7 @@ impl Channel {
}
debug!(
target: "net::channel::send()", "[END] command={} {self:?}",
target: "net::channel::send", "[END] command={} {self:?}",
message.command
);
@@ -295,23 +295,23 @@ impl Channel {
time: NanoTimestamp::current_time(),
});
trace!(target: "net::channel::send_message()", "Sending magic...");
trace!(target: "net::channel::send_message", "Sending magic...");
let magic_bytes = self.p2p().settings().read().await.magic_bytes.0;
written += magic_bytes.encode_async(stream).await?;
trace!(target: "net::channel::send_message()", "Sent magic");
trace!(target: "net::channel::send_message", "Sent magic");
trace!(target: "net::channel::send_message()", "Sending command...");
trace!(target: "net::channel::send_message", "Sending command...");
written += message.command.encode_async(stream).await?;
trace!(target: "net::channel::send_message()", "Sent command: {}", message.command);
trace!(target: "net::channel::send_message", "Sent command: {}", message.command);
trace!(target: "net::channel::send_message()", "Sending payload...");
trace!(target: "net::channel::send_message", "Sending payload...");
// First extract the length of the payload as a VarInt and write it to the stream.
written += VarInt(message.payload.len() as u64).encode_async(stream).await?;
// Then write the encoded payload itself to the stream.
stream.write_all(&message.payload).await?;
written += message.payload.len();
trace!(target: "net::channel::send_message()", "Sent payload {} bytes, total bytes {written}",
trace!(target: "net::channel::send_message", "Sent payload {} bytes, total bytes {written}",
message.payload.len());
stream.flush().await?;
@@ -331,10 +331,10 @@ impl Channel {
// Messages should have a 4 byte header of magic digits.
// This is used for network debugging.
let mut magic = [0u8; 4];
trace!(target: "net::channel::read_command()", "Reading magic...");
trace!(target: "net::channel::read_command", "Reading magic...");
stream.read_exact(&mut magic).await?;
trace!(target: "net::channel::read_command()", "Read magic {magic:?}");
trace!(target: "net::channel::read_command", "Read magic {magic:?}");
let magic_bytes = self.p2p().settings().read().await.magic_bytes.0;
if magic != magic_bytes {
error!(target: "net::channel::read_command", "Error: Magic bytes mismatch");
@@ -372,14 +372,14 @@ impl Channel {
/// Subscribe to a message on the message subsystem.
pub async fn subscribe_msg<M: message::Message>(&self) -> Result<MessageSubscription<M>> {
debug!(
target: "net::channel::subscribe_msg()", "[START] command={} {self:?}",
target: "net::channel::subscribe_msg", "[START] command={} {self:?}",
M::NAME
);
let sub = self.message_subsystem.subscribe::<M>().await;
debug!(
target: "net::channel::subscribe_msg()", "[END] command={} {self:?}",
target: "net::channel::subscribe_msg", "[END] command={} {self:?}",
M::NAME
);
@@ -389,7 +389,7 @@ impl Channel {
/// Handle network errors. Panic if error passes silently, otherwise
/// broadcast the error.
async fn handle_stop(self: Arc<Self>, result: Result<()>) {
debug!(target: "net::channel::handle_stop()", "[START] {self:?}");
debug!(target: "net::channel::handle_stop", "[START] {self:?}");
self.stopped.store(true, SeqCst);
@@ -402,12 +402,12 @@ impl Channel {
}
}
debug!(target: "net::channel::handle_stop()", "[END] {self:?}");
debug!(target: "net::channel::handle_stop", "[END] {self:?}");
}
/// Run the receive loop. Start receiving messages or handle network failure.
async fn main_receive_loop(self: Arc<Self>) -> Result<()> {
debug!(target: "net::channel::main_receive_loop()", "[START] {self:?}");
debug!(target: "net::channel::main_receive_loop", "[START] {self:?}");
// Acquire reader lock
let reader = &mut *self.reader.lock().await;
@@ -419,7 +419,7 @@ impl Channel {
Err(err) => {
if Self::is_eof_error(&err) {
verbose!(
target: "net::channel::main_receive_loop()",
target: "net::channel::main_receive_loop",
"[P2P] Channel {} disconnected",
self.display_address()
);
@@ -433,14 +433,14 @@ impl Channel {
0
{
error!(
target: "net::channel::main_receive_loop()",
target: "net::channel::main_receive_loop",
"[P2P] Read error on channel {}: {err}",
self.display_address()
);
}
debug!(
target: "net::channel::main_receive_loop()",
target: "net::channel::main_receive_loop",
"Stopping channel {self:?}"
);
return Err(Error::ChannelStopped)
@@ -477,7 +477,7 @@ impl Channel {
// messages it does not have dispatchers for.
if self.session.upgrade().unwrap().type_id() != SESSION_REFINE {
warn!(
target: "net::channel::main_receive_loop()",
target: "net::channel::main_receive_loop",
"MissingDispatcher|MessageInvalid|MeteringLimitExceeded for command={command}, channel={self:?}"
);
@@ -495,8 +495,8 @@ impl Channel {
/// Ban a malicious peer and stop the channel.
pub async fn ban(&self) {
debug!(target: "net::channel::ban()", "START {self:?}");
debug!(target: "net::channel::ban()", "Peer: {:?}", self.display_address());
debug!(target: "net::channel::ban", "START {self:?}");
debug!(target: "net::channel::ban", "Peer: {:?}", self.display_address());
// Just store the hostname if this is an inbound session.
// This will block all ports from this peer by setting
@@ -536,17 +536,17 @@ impl Channel {
};
let last_seen = UNIX_EPOCH.elapsed().unwrap().as_secs();
verbose!(target: "net::channel::ban()", "Blacklisting peer={peer}");
verbose!(target: "net::channel::ban", "Blacklisting peer={peer}");
match self.p2p().hosts().move_host(&peer, last_seen, HostColor::Black).await {
Ok(()) => {
verbose!(target: "net::channel::ban()", "Peer={peer} blacklisted successfully");
verbose!(target: "net::channel::ban", "Peer={peer} blacklisted successfully");
}
Err(e) => {
warn!(target: "net::channel::ban()", "Could not blacklisted peer={peer}, err={e}");
warn!(target: "net::channel::ban", "Could not blacklisted peer={peer}, err={e}");
}
}
self.stop().await;
debug!(target: "net::channel::ban()", "STOP {self:?}");
debug!(target: "net::channel::ban", "STOP {self:?}");
}
/// Returns the relevant socket address for this connection. If this is

View File

@@ -360,45 +360,45 @@ impl HostContainer {
/// Append host to a hostlist. Called when initalizing the hostlist in load_hosts().
fn store(&self, color: usize, addr: Url, last_seen: u64) {
trace!(target: "net::hosts::store()", "[START] list={:?}",
trace!(target: "net::hosts::store", "[START] list={:?}",
HostColor::try_from(color).unwrap());
let mut list = self.hostlists[color].write().unwrap();
list.push((addr.clone(), last_seen));
debug!(target: "net::hosts::store()", "Added [{addr}] to {:?} list",
debug!(target: "net::hosts::store", "Added [{addr}] to {:?} list",
HostColor::try_from(color).unwrap());
trace!(target: "net::hosts::store()", "[END] list={:?}",
trace!(target: "net::hosts::store", "[END] list={:?}",
HostColor::try_from(color).unwrap());
}
/// Stores an address on a hostlist or updates its last_seen field if
/// we already have the address.
fn store_or_update(&self, color: HostColor, addr: Url, last_seen: u64) {
trace!(target: "net::hosts::store_or_update()", "[START]");
trace!(target: "net::hosts::store_or_update", "[START]");
let color_code = color.clone() as usize;
let mut list = self.hostlists[color_code].write().unwrap();
if let Some(entry) = list.iter_mut().find(|(u, _)| *u == addr) {
entry.1 = last_seen;
debug!(target: "net::hosts::store_or_update()", "Updated [{addr}] entry on {:?} list",
debug!(target: "net::hosts::store_or_update", "Updated [{addr}] entry on {:?} list",
color.clone());
} else {
list.push((addr.clone(), last_seen));
debug!(target: "net::hosts::store_or_update()", "Added [{addr}] to {color:?} list");
debug!(target: "net::hosts::store_or_update", "Added [{addr}] to {color:?} list");
}
trace!(target: "net::hosts::store_or_update()", "[STOP]");
trace!(target: "net::hosts::store_or_update", "[STOP]");
}
/// Update the last_seen field of a peer on a hostlist.
pub fn update_last_seen(&self, color: usize, addr: Url, last_seen: u64) {
trace!(target: "net::hosts::update_last_seen()", "[START] list={:?}",
trace!(target: "net::hosts::update_last_seen", "[START] list={:?}",
HostColor::try_from(color).unwrap());
let mut list = self.hostlists[color].write().unwrap();
if let Some(entry) = list.iter_mut().find(|(u, _)| *u == addr) {
entry.1 = last_seen;
}
trace!(target: "net::hosts::update_last_seen()", "[END] list={:?}",
trace!(target: "net::hosts::update_last_seen", "[END] list={:?}",
HostColor::try_from(color).unwrap());
}
@@ -421,7 +421,7 @@ impl HostContainer {
schemes: &[String],
limit: Option<usize>,
) -> Vec<(Url, u64)> {
trace!(target: "net::hosts::fetch_with_schemes()", "[START] {:?}",
trace!(target: "net::hosts::fetch_with_schemes", "[START] {:?}",
HostColor::try_from(color).unwrap());
let list = self.hostlists[color].read().unwrap();
@@ -441,7 +441,7 @@ impl HostContainer {
ret.push((addr.clone(), *last_seen));
limit -= 1;
if limit == 0 {
debug!(target: "net::hosts::fetch_with_schemes()",
debug!(target: "net::hosts::fetch_with_schemes",
"Found matching addr on list={:?}, returning {} addresses",
HostColor::try_from(color).unwrap(), ret.len());
return ret
@@ -450,7 +450,7 @@ impl HostContainer {
}
if ret.is_empty() {
debug!(target: "net::hosts::fetch_with_schemes()",
debug!(target: "net::hosts::fetch_with_schemes",
"No matching schemes found on list={:?}!", HostColor::try_from(color).unwrap())
}
@@ -466,7 +466,7 @@ impl HostContainer {
schemes: &[String],
limit: Option<usize>,
) -> Vec<(Url, u64)> {
trace!(target: "net::hosts::fetch_with_schemes()", "[START] {:?}",
trace!(target: "net::hosts::fetch_with_schemes", "[START] {:?}",
HostColor::try_from(color).unwrap());
let list = self.hostlists[color].read().unwrap();
@@ -492,7 +492,7 @@ impl HostContainer {
}
if ret.is_empty() {
debug!(target: "net::hosts::fetch_excluding_schemes()", "No such schemes found!");
debug!(target: "net::hosts::fetch_excluding_schemes", "No such schemes found!");
}
ret
@@ -506,7 +506,7 @@ impl HostContainer {
schemes: &[String],
) -> Option<((Url, u64), usize)> {
// Retrieve all peers corresponding to that transport schemes
trace!(target: "net::hosts::fetch_random_with_schemes()", "[START] {color:?}");
trace!(target: "net::hosts::fetch_random_with_schemes", "[START] {color:?}");
let list = self.fetch_with_schemes(color as usize, schemes, None);
if list.is_empty() {
@@ -520,7 +520,7 @@ impl HostContainer {
/// Get up to n random peers. Schemes are not taken into account.
pub(in crate::net) fn fetch_n_random(&self, color: HostColor, n: u32) -> Vec<(Url, u64)> {
trace!(target: "net::hosts::fetch_n_random()", "[START] {color:?}");
trace!(target: "net::hosts::fetch_n_random", "[START] {color:?}");
let n = n as usize;
if n == 0 {
return vec![]
@@ -534,7 +534,7 @@ impl HostContainer {
}
if hosts.is_empty() {
debug!(target: "net::hosts::fetch_n_random()", "No entries found!");
debug!(target: "net::hosts::fetch_n_random", "No entries found!");
return hosts
}
@@ -550,7 +550,7 @@ impl HostContainer {
schemes: &[String],
n: u32,
) -> Vec<(Url, u64)> {
trace!(target: "net::hosts::fetch_n_random_with_schemes()", "[START] {color:?}");
trace!(target: "net::hosts::fetch_n_random_with_schemes", "[START] {color:?}");
let index = color as usize;
let n = n as usize;
if n == 0 {
@@ -560,7 +560,7 @@ impl HostContainer {
// Retrieve all peers corresponding to that transport schemes
let hosts = self.fetch_with_schemes(index, schemes, None);
if hosts.is_empty() {
debug!(target: "net::hosts::fetch_n_random_with_schemes()",
debug!(target: "net::hosts::fetch_n_random_with_schemes",
"No such schemes found!");
return hosts
}
@@ -578,7 +578,7 @@ impl HostContainer {
schemes: &[String],
n: u32,
) -> Vec<(Url, u64)> {
trace!(target: "net::hosts::fetch_excluding_schemes()", "[START] {color:?}");
trace!(target: "net::hosts::fetch_excluding_schemes", "[START] {color:?}");
let index = color as usize;
let n = n as usize;
if n == 0 {
@@ -588,7 +588,7 @@ impl HostContainer {
let hosts = self.fetch_excluding_schemes(index, schemes, None);
if hosts.is_empty() {
debug!(target: "net::hosts::fetch_n_random_excluding_schemes()",
debug!(target: "net::hosts::fetch_n_random_excluding_schemes",
"No such schemes found!");
return hosts
}
@@ -603,7 +603,7 @@ impl HostContainer {
let color_code = color.clone() as usize;
let mut list = self.hostlists[color_code].write().unwrap();
if let Some(position) = list.iter().position(|(u, _)| u == addr) {
debug!(target: "net::hosts::remove_if_exists()", "Removing addr={addr} list={color:?}");
debug!(target: "net::hosts::remove_if_exists", "Removing addr={addr} list={color:?}");
list.remove(position);
}
}
@@ -663,7 +663,7 @@ impl HostContainer {
let last_entry = list.pop().unwrap();
debug!(
target: "net::hosts::resize()",
target: "net::hosts::resize",
"{color:?}list reached max size. Removed {last_entry:?}"
);
}
@@ -687,7 +687,7 @@ impl HostContainer {
// our system clock is behind or if other nodes are
// misreporting the last_seen field.
if now < last_seen {
debug!(target: "net::hosts::refresh()",
debug!(target: "net::hosts::refresh",
"last_seen [{now}] is newer than current system time [{last_seen}]. Skipping");
continue
}
@@ -697,7 +697,7 @@ impl HostContainer {
}
for item in old_items {
debug!(target: "net::hosts::refresh()", "Removing {item:?}");
debug!(target: "net::hosts::refresh", "Removing {item:?}");
self.remove_if_exists(color.clone(), &item);
}
}
@@ -716,7 +716,7 @@ impl HostContainer {
let contents = load_file(&path);
if let Err(e) = contents {
warn!(target: "net::hosts::load_hosts()", "Failed retrieving saved hosts: {e}");
warn!(target: "net::hosts::load_hosts", "Failed retrieving saved hosts: {e}");
return Ok(())
}
@@ -726,7 +726,7 @@ impl HostContainer {
let url = match Url::parse(data[1]) {
Ok(u) => u,
Err(e) => {
debug!(target: "net::hosts::load_hosts()", "Skipping malformed URL {e}");
debug!(target: "net::hosts::load_hosts", "Skipping malformed URL {e}");
continue
}
};
@@ -734,7 +734,7 @@ impl HostContainer {
let last_seen = match data[2].parse::<u64>() {
Ok(t) => t,
Err(e) => {
debug!(target: "net::hosts::load_hosts()", "Skipping malformed last seen {e}");
debug!(target: "net::hosts::load_hosts", "Skipping malformed last seen {e}");
continue
}
};
@@ -764,7 +764,7 @@ impl HostContainer {
self.refresh(HostColor::Dark, day);
}
_ => {
debug!(target: "net::hosts::load_hosts()", "Malformed list name...");
debug!(target: "net::hosts::load_hosts", "Malformed list name...");
}
}
}
@@ -791,9 +791,9 @@ impl HostContainer {
}
if !tsv.is_empty() {
verbose!(target: "net::hosts::save_hosts()", "Saving hosts to: {path:?}");
verbose!(target: "net::hosts::save_hosts", "Saving hosts to: {path:?}");
if let Err(e) = save_file(&path, &tsv) {
error!(target: "net::hosts::save_hosts()", "Failed saving hosts: {e}");
error!(target: "net::hosts::save_hosts", "Failed saving hosts: {e}");
}
}
@@ -922,7 +922,7 @@ impl Hosts {
/// Safely insert into the HostContainer. Filters the addresses first before storing and
/// notifies the publisher. Must be called when first receiving greylist addresses.
pub(in crate::net) async fn insert(&self, color: HostColor, addrs: &[(Url, u64)]) {
trace!(target: "net::hosts:insert()", "[START]");
trace!(target: "net::hosts:insert", "[START]");
// First filter these address to ensure this peer doesn't exist in our black, gold or
// whitelist and apply transport filtering. If we don't support this transport,
@@ -931,13 +931,13 @@ impl Hosts {
let mut addrs_len = 0;
if filtered_addrs.is_empty() {
debug!(target: "net::hosts::insert()", "Filtered out all addresses");
debug!(target: "net::hosts::insert", "Filtered out all addresses");
}
// Then ensure we aren't currently trying to add this peer to the hostlist.
for (i, (addr, last_seen)) in filtered_addrs.iter().enumerate() {
if let Err(e) = self.try_register(addr.clone(), HostState::Insert) {
debug!(target: "net::hosts::insert()", "Cannot insert addr={}, err={e}",
debug!(target: "net::hosts::insert", "Cannot insert addr={}, err={e}",
addr.clone());
continue
@@ -950,12 +950,12 @@ impl Hosts {
self.container.resize(color.clone());
if let Err(e) = self.unregister(addr) {
warn!(target: "net::hosts::insert()", "Error while unregistering addr={addr}, err={e}");
warn!(target: "net::hosts::insert", "Error while unregistering addr={addr}, err={e}");
}
}
self.store_publisher.notify(addrs_len).await;
trace!(target: "net::hosts:insert()", "[END]");
trace!(target: "net::hosts:insert", "[END]");
}
/// Check whether a peer is available to be refined currently. Returns true
@@ -973,7 +973,7 @@ impl Hosts {
) -> Result<HostState> {
let mut registry = self.registry.lock().unwrap();
trace!(target: "net::hosts::try_update_registry()", "Try register addr={addr}, state={}",
trace!(target: "net::hosts::try_update_registry", "Try register addr={addr}, state={}",
&new_state);
if registry.contains_key(&addr) {
@@ -993,12 +993,12 @@ impl Hosts {
registry.insert(addr.clone(), state.clone());
}
trace!(target: "net::hosts::try_update_registry()", "Returning result {result:?}");
trace!(target: "net::hosts::try_update_registry", "Returning result {result:?}");
result
} else {
// We don't know this peer. We can safely update the state.
debug!(target: "net::hosts::try_update_registry()", "Inserting addr={addr}, state={}",
debug!(target: "net::hosts::try_update_registry", "Inserting addr={addr}, state={}",
&new_state);
registry.insert(addr.clone(), new_state.clone());
@@ -1010,7 +1010,7 @@ impl Hosts {
// Loop through hosts selected by Outbound Session and see if any of them are
// free to connect to.
pub(in crate::net) async fn check_addrs(&self, hosts: Vec<(Url, u64)>) -> Option<(Url, u64)> {
trace!(target: "net::hosts::check_addrs()", "[START]");
trace!(target: "net::hosts::check_addrs", "[START]");
let seeds = self.settings.read().await.seeds.clone();
let external_addrs = self.external_addrs().await;
@@ -1043,7 +1043,7 @@ impl Hosts {
continue
}
debug!(target: "net::hosts::check_addrs()", "Found valid host {host}");
debug!(target: "net::hosts::check_addrs", "Found valid host {host}");
return Some((host.clone(), last_seen))
}
@@ -1054,7 +1054,7 @@ impl Hosts {
pub(in crate::net) fn unregister(&self, addr: &Url) -> Result<()> {
let age = UNIX_EPOCH.elapsed().unwrap().as_secs();
self.try_register(addr.clone(), HostState::Free(age))?;
debug!(target: "net::hosts::unregister()", "Unregistered: {}", &addr);
debug!(target: "net::hosts::unregister", "Unregistered: {}", &addr);
Ok(())
}
@@ -1433,7 +1433,7 @@ impl Hosts {
/// Downgrade host to Greylist, remove from Gold or White list.
pub async fn greylist_host(&self, addr: &Url, last_seen: u64) -> Result<()> {
debug!(target: "net::hosts:greylist_host()", "Downgrading addr={addr}");
debug!(target: "net::hosts:greylist_host", "Downgrading addr={addr}");
self.move_host(addr, last_seen, HostColor::Grey).await?;
// Free up this addr for future operations.
@@ -1441,7 +1441,7 @@ impl Hosts {
}
pub async fn whitelist_host(&self, addr: &Url, last_seen: u64) -> Result<()> {
debug!(target: "net::hosts:whitelist_host()", "Upgrading addr={addr}");
debug!(target: "net::hosts:whitelist_host", "Upgrading addr={addr}");
self.move_host(addr, last_seen, HostColor::White).await?;
// Free up this addr for future operations.
@@ -1467,12 +1467,12 @@ impl Hosts {
last_seen: u64,
destination: HostColor,
) -> Result<()> {
debug!(target: "net::hosts::move_host()", "Trying to move addr={addr} destination={destination:?}");
debug!(target: "net::hosts::move_host", "Trying to move addr={addr} destination={destination:?}");
// If we cannot register this address as move, this will simply return here.
self.try_register(addr.clone(), HostState::Move)?;
debug!(target: "net::hosts::move_host()", "Moving addr={} destination={destination:?}",
debug!(target: "net::hosts::move_host", "Moving addr={} destination={destination:?}",
addr.clone());
match destination {

View File

@@ -103,7 +103,7 @@ impl<M: Message> MessageDispatcher<M> {
let msg_result_type = if message.is_ok() { "Ok" } else { "Err" };
debug!(
target: "net::message_publisher::_trigger_all()", "START msg={msg_result_type}({}), subs={}",
target: "net::message_publisher::_trigger_all", "START msg={msg_result_type}({}), subs={}",
M::NAME, subs.len()
);
@@ -142,7 +142,7 @@ impl<M: Message> MessageDispatcher<M> {
}
debug!(
target: "net::message_publisher::_trigger_all()", "END msg={msg_result_type}({}), subs={}",
target: "net::message_publisher::_trigger_all", "END msg={msg_result_type}({}), subs={}",
M::NAME, subs.len(),
);
}
@@ -251,7 +251,7 @@ impl<M: Message> MessageDispatcherInterface for MessageDispatcher<M> {
Ok(int) => int.0,
Err(err) => {
error!(
target: "net::message_publisher::trigger()",
target: "net::message_publisher::trigger",
"Unable to decode VarInt. Dropping...: {err}"
);
return Err(Error::MessageInvalid)
@@ -261,7 +261,7 @@ impl<M: Message> MessageDispatcherInterface for MessageDispatcher<M> {
// Check the message length does not exceed set limit
if M::MAX_BYTES > 0 && length > M::MAX_BYTES {
error!(
target: "net::message_publisher::trigger()",
target: "net::message_publisher::trigger",
"Message length ({length}) exceeds configured limit ({}). Dropping...",
M::MAX_BYTES
);
@@ -274,7 +274,7 @@ impl<M: Message> MessageDispatcherInterface for MessageDispatcher<M> {
Ok(payload) => Ok(Arc::new(payload)),
Err(err) => {
error!(
target: "net::message_publisher::trigger()",
target: "net::message_publisher::trigger",
"Unable to decode data. Dropping...: {err}"
);
return Err(Error::MessageInvalid)

View File

@@ -97,7 +97,7 @@ impl MeteringQueue {
// This is an edge case where system reports a future timestamp
// therefore elapsed computation fails.
let Ok(elapsed) = ts.elapsed() else {
debug!(target: "net::metering::MeteringQueue::clean()", "Timestamp [{ts}] is in future. Removing...");
debug!(target: "net::metering::MeteringQueue::clean", "Timestamp [{ts}] is in future. Removing...");
let _ = self.queue.pop_front();
continue
};

View File

@@ -159,12 +159,12 @@ impl P2p {
/// Reseed the P2P network.
pub async fn seed(self: Arc<Self>) {
debug!(target: "net::p2p::seed()", "P2P::seed() [BEGIN]");
debug!(target: "net::p2p::seed", "P2P::seed() [BEGIN]");
// Activate the seed session.
self.session_seedsync().notify().await;
debug!(target: "net::p2p::seed()", "P2P::seed() [END]");
debug!(target: "net::p2p::seed", "P2P::seed() [END]");
}
/// Stop the running P2P subsystem
@@ -199,7 +199,7 @@ impl P2p {
/// Broadcast a message concurrently to all given peers.
pub async fn broadcast_to<M: Message>(&self, message: &M, channel_list: &[ChannelPtr]) {
if channel_list.is_empty() {
warn!(target: "net::p2p::broadcast()", "[P2P] No connected channels found for broadcast");
warn!(target: "net::p2p::broadcast", "[P2P] No connected channels found for broadcast");
return
}
@@ -313,7 +313,7 @@ async fn broadcast_serialized_to<M: Message>(
.send_serialized(&message, &M::METERING_SCORE, &M::METERING_CONFIGURATION)
.map_err(|e| {
error!(
target: "net::p2p::broadcast()",
target: "net::p2p::broadcast",
"[P2P] Broadcasting message to {} failed: {e}",
channel.display_address()
);

View File

@@ -104,19 +104,19 @@ impl ProtocolAddress {
/// received addresses to the greylist.
async fn handle_receive_addrs(self: Arc<Self>) -> Result<()> {
debug!(
target: "net::protocol_address::handle_receive_addrs()",
target: "net::protocol_address::handle_receive_addrs",
"[START] address={}", self.channel.display_address(),
);
loop {
let addrs_msg = self.addrs_sub.receive().await?;
debug!(
target: "net::protocol_address::handle_receive_addrs()",
target: "net::protocol_address::handle_receive_addrs",
"Received {} addrs from {}", addrs_msg.addrs.len(), self.channel.display_address(),
);
debug!(
target: "net::protocol_address::handle_receive_addrs()",
target: "net::protocol_address::handle_receive_addrs",
"Appending to greylist...",
);
@@ -129,7 +129,7 @@ impl ProtocolAddress {
/// with an address message.
async fn handle_receive_get_addrs(self: Arc<Self>) -> Result<()> {
debug!(
target: "net::protocol_address::handle_receive_get_addrs()",
target: "net::protocol_address::handle_receive_get_addrs",
"[START] address={}", self.channel.display_address(),
);
@@ -137,7 +137,7 @@ impl ProtocolAddress {
let get_addrs_msg = self.get_addrs_sub.receive().await?;
debug!(
target: "net::protocol_address::handle_receive_get_addrs()",
target: "net::protocol_address::handle_receive_get_addrs",
"Received GetAddrs({}) message from {}", get_addrs_msg.max, self.channel.display_address(),
);
@@ -150,7 +150,7 @@ impl ProtocolAddress {
.collect();
// First we grab address with the requested transports from the gold list
debug!(target: "net::protocol_address::handle_receive_get_addrs()",
debug!(target: "net::protocol_address::handle_receive_get_addrs",
"Fetching gold entries with schemes");
let mut addrs = self.hosts.container.fetch_n_random_with_schemes(
HostColor::Gold,
@@ -159,7 +159,7 @@ impl ProtocolAddress {
);
// Then we grab address with the requested transports from the whitelist
debug!(target: "net::protocol_address::handle_receive_get_addrs()",
debug!(target: "net::protocol_address::handle_receive_get_addrs",
"Fetching whitelist entries with schemes");
addrs.append(&mut self.hosts.container.fetch_n_random_with_schemes(
HostColor::White,
@@ -171,7 +171,7 @@ impl ProtocolAddress {
// to fill a 2 * max length vector.
// Then we grab address without the requested transports from the gold list
debug!(target: "net::protocol_address::handle_receive_get_addrs()",
debug!(target: "net::protocol_address::handle_receive_get_addrs",
"Fetching gold entries without schemes");
let remain = 2 * get_addrs_msg.max - addrs.len() as u32;
addrs.append(&mut self.hosts.container.fetch_n_random_excluding_schemes(
@@ -181,7 +181,7 @@ impl ProtocolAddress {
));
// Then we grab address without the requested transports from the white list
debug!(target: "net::protocol_address::handle_receive_get_addrs()",
debug!(target: "net::protocol_address::handle_receive_get_addrs",
"Fetching white entries without schemes");
let remain = 2 * get_addrs_msg.max - addrs.len() as u32;
addrs.append(&mut self.hosts.container.fetch_n_random_excluding_schemes(
@@ -197,7 +197,7 @@ impl ProtocolAddress {
so that they propagate on the network even if they're not
popular transports. */
debug!(target: "net::protocol_address::handle_receive_get_addrs()",
debug!(target: "net::protocol_address::handle_receive_get_addrs",
"Fetching dark entries");
let remain = 2 * get_addrs_msg.max - addrs.len() as u32;
addrs.append(&mut self.hosts.container.fetch_n_random(HostColor::Dark, remain));
@@ -206,7 +206,7 @@ impl ProtocolAddress {
addrs.retain(|addr| TRANSPORT_COMBOS.contains(&addr.0.scheme()));
debug!(
target: "net::protocol_address::handle_receive_get_addrs()",
target: "net::protocol_address::handle_receive_get_addrs",
"Sending {} addresses to {}", addrs.len(), self.channel.display_address(),
);
@@ -273,7 +273,7 @@ impl ProtocolBase for ProtocolAddress {
/// get-address msg.
async fn start(self: Arc<Self>, ex: Arc<Executor<'_>>) -> Result<()> {
debug!(
target: "net::protocol_address::start()",
target: "net::protocol_address::start",
"START => address={}", self.channel.display_address(),
);
@@ -300,7 +300,7 @@ impl ProtocolBase for ProtocolAddress {
self.channel.send(&get_addrs).await?;
debug!(
target: "net::protocol_address::start()",
target: "net::protocol_address::start",
"END => address={}", self.channel.display_address(),
);

View File

@@ -79,7 +79,7 @@ impl ProtocolPing {
/// waits for the pong reply and ensures the nonce is the same.
async fn run_ping_pong(self: Arc<Self>) -> Result<()> {
debug!(
target: "net::protocol_ping::run_ping_pong()",
target: "net::protocol_ping::run_ping_pong",
"START => address={}", self.channel.display_address(),
);
@@ -117,7 +117,7 @@ impl ProtocolPing {
// Pong timeout. We didn't receive any message back
// so close the connection.
warn!(
target: "net::protocol_ping::run_ping_pong()",
target: "net::protocol_ping::run_ping_pong",
"[P2P] Ping-Pong protocol timed out for {}", self.channel.display_address(),
);
self.channel.stop().await;
@@ -127,7 +127,7 @@ impl ProtocolPing {
if pong_msg.nonce != nonce {
error!(
target: "net::protocol_ping::run_ping_pong()",
target: "net::protocol_ping::run_ping_pong",
"[P2P] Wrong nonce in pingpong, disconnecting {}",
self.channel.display_address(),
);
@@ -136,7 +136,7 @@ impl ProtocolPing {
}
debug!(
target: "net::protocol_ping::run_ping_pong()",
target: "net::protocol_ping::run_ping_pong",
"Received Pong from {}: {:?}",
self.channel.display_address(),
timer.elapsed(),
@@ -151,7 +151,7 @@ impl ProtocolPing {
/// Copies ping's nonce into the pong reply.
async fn reply_to_ping(self: Arc<Self>) -> Result<()> {
debug!(
target: "net::protocol_ping::reply_to_ping()",
target: "net::protocol_ping::reply_to_ping",
"START => address={}", self.channel.display_address(),
);
@@ -159,7 +159,7 @@ impl ProtocolPing {
// Wait for ping, reply with pong that has a matching nonce.
let ping = self.ping_sub.receive().await?;
debug!(
target: "net::protocol_ping::reply_to_ping()",
target: "net::protocol_ping::reply_to_ping",
"Received Ping from {}", self.channel.display_address(),
);
@@ -168,7 +168,7 @@ impl ProtocolPing {
self.channel.send(&pong).await?;
debug!(
target: "net::protocol_ping::reply_to_ping()",
target: "net::protocol_ping::reply_to_ping",
"Sent Pong reply to {}", self.channel.display_address(),
);
}
@@ -185,11 +185,11 @@ impl ProtocolBase for ProtocolPing {
/// protocol task manager, then queues the reply. Sends out a ping and
/// waits for pong reply. Waits for ping and replies with a pong.
async fn start(self: Arc<Self>, ex: Arc<Executor<'_>>) -> Result<()> {
debug!(target: "net::protocol_ping::start()", "START => address={}", self.channel.display_address());
debug!(target: "net::protocol_ping::start", "START => address={}", self.channel.display_address());
self.jobsman.clone().start(ex.clone());
self.jobsman.clone().spawn(self.clone().run_ping_pong(), ex.clone()).await;
self.jobsman.clone().spawn(self.clone().reply_to_ping(), ex).await;
debug!(target: "net::protocol_ping::start()", "END => address={}", self.channel.display_address());
debug!(target: "net::protocol_ping::start", "END => address={}", self.channel.display_address());
Ok(())
}

View File

@@ -103,7 +103,7 @@ impl ProtocolBase for ProtocolSeed {
/// to the seed server. Sends a get-address message and receives an
/// address messsage.
async fn start(self: Arc<Self>, _ex: Arc<Executor<'_>>) -> Result<()> {
debug!(target: "net::protocol_seed::start()", "START => address={}", self.channel.display_address());
debug!(target: "net::protocol_seed::start", "START => address={}", self.channel.display_address());
// Send own address to the seed server
self.send_my_addrs().await?;
@@ -125,19 +125,19 @@ impl ProtocolBase for ProtocolSeed {
// Receive addresses
let addrs_msg = self.addr_sub.receive().await?;
debug!(
target: "net::protocol_seed::start()",
target: "net::protocol_seed::start",
"Received {} addrs from {}", addrs_msg.addrs.len(), self.channel.display_address(),
);
if !addrs_msg.addrs.is_empty() {
debug!(
target: "net::protocol_seed::start()",
target: "net::protocol_seed::start",
"Appending to greylist...",
);
self.hosts.insert(HostColor::Grey, &addrs_msg.addrs).await;
}
debug!(target: "net::protocol_seed::start()", "END => address={}", self.channel.display_address());
debug!(target: "net::protocol_seed::start", "END => address={}", self.channel.display_address());
Ok(())
}

View File

@@ -68,7 +68,7 @@ impl ProtocolVersion {
/// info and wait for version ack. Wait for version info and send
/// version ack.
pub async fn run(self: Arc<Self>, executor: Arc<Executor<'_>>) -> Result<()> {
debug!(target: "net::protocol_version::run()", "START => address={}", self.channel.display_address());
debug!(target: "net::protocol_version::run", "START => address={}", self.channel.display_address());
let channel_handshake_timeout =
self.settings.read().await.channel_handshake_timeout(self.channel.address().scheme());
@@ -83,14 +83,14 @@ impl ProtocolVersion {
// time out.
match select(version, timeout).await {
Either::Left((Ok(_), _)) => {
debug!(target: "net::protocol_version::run()", "END => address={}",
debug!(target: "net::protocol_version::run", "END => address={}",
self.channel.display_address());
Ok(())
}
Either::Left((Err(e), _)) => {
error!(
target: "net::protocol_version::run()",
target: "net::protocol_version::run",
"[P2P] Version Exchange failed [{}]: {e}",
self.channel.display_address()
);
@@ -101,7 +101,7 @@ impl ProtocolVersion {
Either::Right((_, _)) => {
error!(
target: "net::protocol_version::run()",
target: "net::protocol_version::run",
"[P2P] Version Exchange timed out [{}]",
self.channel.display_address(),
);
@@ -115,7 +115,7 @@ impl ProtocolVersion {
/// Send and receive version information
async fn exchange_versions(self: Arc<Self>, executor: Arc<Executor<'_>>) -> Result<()> {
debug!(
target: "net::protocol_version::exchange_versions()",
target: "net::protocol_version::exchange_versions",
"START => address={}", self.channel.display_address(),
);
@@ -125,7 +125,7 @@ impl ProtocolVersion {
let rets = join_all(vec![send, recv]).await;
if let Err(e) = &rets[0] {
error!(
target: "net::protocol_version::exchange_versions()",
target: "net::protocol_version::exchange_versions",
"send_version() failed: {e}"
);
return Err(e.clone())
@@ -133,14 +133,14 @@ impl ProtocolVersion {
if let Err(e) = &rets[1] {
error!(
target: "net::protocol_version::exchange_versions()",
target: "net::protocol_version::exchange_versions",
"recv_version() failed: {e}"
);
return Err(e.clone())
}
debug!(
target: "net::protocol_version::exchange_versions()",
target: "net::protocol_version::exchange_versions",
"END => address={}", self.channel.display_address(),
);
Ok(())
@@ -150,7 +150,7 @@ impl ProtocolVersion {
/// Ensures that the app version is the same.
async fn send_version(self: Arc<Self>) -> Result<()> {
debug!(
target: "net::protocol_version::send_version()",
target: "net::protocol_version::send_version",
"START => address={}", self.channel.display_address(),
);
@@ -182,7 +182,7 @@ impl ProtocolVersion {
// Validate peer received version against our version.
debug!(
target: "net::protocol_version::send_version()",
target: "net::protocol_version::send_version",
"App version: {app_version}, Recv version: {}",
verack_msg.app_version,
);
@@ -193,7 +193,7 @@ impl ProtocolVersion {
app_name != verack_msg.app_name
{
error!(
target: "net::protocol_version::send_version()",
target: "net::protocol_version::send_version",
"[P2P] Version mismatch from {}. Disconnecting...",
self.channel.display_address(),
);
@@ -211,7 +211,7 @@ impl ProtocolVersion {
// Versions are compatible
debug!(
target: "net::protocol_version::send_version()",
target: "net::protocol_version::send_version",
"END => address={}", self.channel.display_address(),
);
Ok(())
@@ -221,7 +221,7 @@ impl ProtocolVersion {
/// with app version attached.
async fn recv_version(self: Arc<Self>) -> Result<()> {
debug!(
target: "net::protocol_version::recv_version()",
target: "net::protocol_version::recv_version",
"START => address={}", self.channel.display_address(),
);
@@ -243,7 +243,7 @@ impl ProtocolVersion {
self.channel.send(&verack).await?;
debug!(
target: "net::protocol_version::recv_version()",
target: "net::protocol_version::recv_version",
"END => address={}", self.channel.display_address(),
);
Ok(())

View File

@@ -244,7 +244,7 @@ impl DirectSession {
match res {
Ok(()) | Err(Error::DetachedTaskStopped) => { /* Do nothing */ }
Err(e) => {
error!(target: "net::direct_session::get_channel_with_retries()", "{e}")
error!(target: "net::direct_session::get_channel_with_retries", "{e}")
}
}
},
@@ -537,7 +537,7 @@ impl PeerDiscovery {
if current_attempt >= 4 {
verbose!(
target: "net::direct_session::peer_discovery()",
target: "net::direct_session::peer_discovery",
"[P2P] [PEER DISCOVERY] Sleeping and trying again. Attempt {current_attempt}"
);
@@ -579,7 +579,7 @@ impl PeerDiscovery {
// Broadcast the GetAddrs message to all active peers.
// If we have no active peers, we will perform a SeedSyncSession instead.
verbose!(
target: "net::direct_session::peer_discovery()",
target: "net::direct_session::peer_discovery",
"[P2P] [PEER DISCOVERY] Asking peers for new peers to connect to...");
dnetev!(self, DirectPeerDiscovery, {
@@ -604,7 +604,7 @@ impl PeerDiscovery {
match result {
Ok(addrs_len) => {
verbose!(
target: "net::direct_session::peer_discovery()",
target: "net::direct_session::peer_discovery",
"[P2P] [PEER DISCOVERY] Discovered {addrs_len} peers"
);
// Found some addrs, reset `current_attempt`
@@ -614,7 +614,7 @@ impl PeerDiscovery {
}
Err(_) => {
warn!(
target: "net::direct_session::peer_discovery()",
target: "net::direct_session::peer_discovery",
"[P2P] [PEER DISCOVERY] Waiting for addrs timed out."
);
// Just do seed next time
@@ -629,7 +629,7 @@ impl PeerDiscovery {
store_sub.unsubscribe().await;
} else if !seeds.is_empty() {
verbose!(
target: "net::direct_session::peer_discovery()",
target: "net::direct_session::peer_discovery",
"[P2P] [PEER DISCOVERY] Asking seeds for new peers to connect to...");
dnetev!(self, DirectPeerDiscovery, {

View File

@@ -194,14 +194,14 @@ impl InboundSession {
stop_sub.receive().await;
debug!(
target: "net::inbound_session::setup_channel()",
target: "net::inbound_session::setup_channel",
"Received stop_sub, channel removed from P2P",
);
}
}
Err(e) => {
warn!(
target: "net::inbound_session::setup_channel()",
target: "net::inbound_session::setup_channel",
"Channel setup failed! Err={e}"
);
}

View File

@@ -69,14 +69,14 @@ pub async fn remove_sub_on_stop(
type_id: SessionBitFlag,
stop_sub: Subscription<Error>,
) {
debug!(target: "net::session::remove_sub_on_stop()", "[START]");
debug!(target: "net::session::remove_sub_on_stop", "[START]");
let hosts = p2p.hosts();
let addr = channel.address();
stop_sub.receive().await;
debug!(
target: "net::session::remove_sub_on_stop()",
target: "net::session::remove_sub_on_stop",
"Received stop event. Removing channel {}",
channel.display_address()
);
@@ -84,7 +84,7 @@ pub async fn remove_sub_on_stop(
// Downgrade to greylist if this is a outbound session.
if type_id & (SESSION_OUTBOUND | SESSION_DIRECT) != 0 {
debug!(
target: "net::session::remove_sub_on_stop()",
target: "net::session::remove_sub_on_stop",
"Downgrading {}",
channel.display_address()
);
@@ -95,12 +95,12 @@ pub async fn remove_sub_on_stop(
match hosts.fetch_last_seen(addr) {
Some(last_seen) => {
if let Err(e) = hosts.move_host(addr, last_seen, HostColor::Grey).await {
error!(target: "net::session::remove_sub_on_stop()",
error!(target: "net::session::remove_sub_on_stop",
"Failed to move host {} to Greylist! Err={e}", channel.display_address());
}
}
None => {
error!(target: "net::session::remove_sub_on_stop()",
error!(target: "net::session::remove_sub_on_stop",
"Failed to fetch last seen for {}", channel.display_address());
}
}
@@ -112,7 +112,7 @@ pub async fn remove_sub_on_stop(
// happens in the refinery directly.
if type_id & SESSION_REFINE == 0 {
if let Err(e) = hosts.unregister(channel.address()) {
error!(target: "net::session::remove_sub_on_stop()", "Error while unregistering addr={}, err={e}", channel.display_address());
error!(target: "net::session::remove_sub_on_stop", "Error while unregistering addr={}, err={e}", channel.display_address());
}
}
@@ -131,7 +131,7 @@ pub async fn remove_sub_on_stop(
if !p2p.is_connected() {
hosts.disconnect_publisher.notify(Error::NetworkNotConnected).await;
}
debug!(target: "net::session::remove_sub_on_stop()", "[END]");
debug!(target: "net::session::remove_sub_on_stop", "[END]");
}
/// Session trait. Defines methods that are used across sessions.
@@ -152,7 +152,7 @@ pub trait Session: Sync {
channel: ChannelPtr,
executor: Arc<Executor<'_>>,
) -> Result<()> {
trace!(target: "net::session::register_channel()", "[START]");
trace!(target: "net::session::register_channel", "[START]");
// Protocols should all be initialized but not started.
// We do this so that the protocols can begin receiving and buffering
@@ -165,7 +165,7 @@ pub trait Session: Sync {
// Perform the handshake protocol
let protocol_version = ProtocolVersion::new(channel.clone(), p2p.settings().clone()).await;
debug!(
target: "net::session::register_channel()",
target: "net::session::register_channel",
"Performing handshake protocols {}", channel.clone().display_address(),
);
@@ -178,11 +178,11 @@ pub trait Session: Sync {
// Wait for handshake to finish.
match handshake_task.await {
Ok(()) => {
debug!(target: "net::session::register_channel()",
debug!(target: "net::session::register_channel",
"Handshake successful {}", channel.clone().display_address());
}
Err(e) => {
debug!(target: "net::session::register_channel()",
debug!(target: "net::session::register_channel",
"Handshake error {e} {}", channel.clone().display_address());
return Err(e)
@@ -190,8 +190,8 @@ pub trait Session: Sync {
}
// Now the channel is ready
debug!(target: "net::session::register_channel()", "Session handshake complete");
debug!(target: "net::session::register_channel()", "Activating remaining protocols");
debug!(target: "net::session::register_channel", "Session handshake complete");
debug!(target: "net::session::register_channel", "Activating remaining protocols");
// Now start all the protocols. They are responsible for managing their own
// lifetimes and correctly selfdestructing when the channel ends.
@@ -199,7 +199,7 @@ pub trait Session: Sync {
protocol.start(executor.clone()).await?;
}
trace!(target: "net::session::register_channel()", "[END]");
trace!(target: "net::session::register_channel", "[END]");
Ok(())
}
@@ -222,7 +222,7 @@ pub trait Session: Sync {
// Upgrade to goldlist if this is a outbound session.
if self.type_id() & (SESSION_OUTBOUND | SESSION_DIRECT) != 0 {
debug!(
target: "net::session::perform_handshake_protocols()",
target: "net::session::perform_handshake_protocols",
"Upgrading {}", channel.display_address(),
);

View File

@@ -250,7 +250,7 @@ impl Slot {
loop {
// Activate the slot
debug!(
target: "net::outbound_session::try_connect()",
target: "net::outbound_session::try_connect",
"[P2P] Finding a host to connect to for outbound slot #{}",
self.slot,
);
@@ -275,11 +275,11 @@ impl Slot {
}
let addr = if let Some(addr) = self.fetch_addrs().await {
debug!(target: "net::outbound_session::run()", "Fetched addr={}, slot #{}", addr.0,
debug!(target: "net::outbound_session::run", "Fetched addr={}, slot #{}", addr.0,
self.slot);
addr
} else {
debug!(target: "net::outbound_session::run()", "No address found! Activating peer discovery...");
debug!(target: "net::outbound_session::run", "No address found! Activating peer discovery...");
dnetev!(self, OutboundSlotSleeping, {
slot: self.slot,
});
@@ -298,7 +298,7 @@ impl Slot {
let slot = self.slot;
verbose!(
target: "net::outbound_session::try_connect()",
target: "net::outbound_session::try_connect",
"[P2P] Connecting outbound slot #{slot} [{host}]"
);
@@ -311,7 +311,7 @@ impl Slot {
Ok(connect_info) => connect_info,
Err(err) => {
debug!(
target: "net::outbound_session::try_connect()",
target: "net::outbound_session::try_connect",
"[P2P] Outbound slot #{slot} connection failed: {err}"
);
@@ -330,7 +330,7 @@ impl Slot {
let stop_sub = channel.subscribe_stop().await?;
verbose!(
target: "net::outbound_session::try_connect()",
target: "net::outbound_session::try_connect",
"[P2P] Outbound slot #{slot} connected [{}]",
channel.display_address()
);
@@ -358,7 +358,7 @@ impl Slot {
self.channel_id.store(0, Ordering::Relaxed);
warn!(
target: "net::outbound_session::try_connect()",
target: "net::outbound_session::try_connect",
"[P2P] Suspending addr=[{}] slot #{slot}",
channel.display_address()
);
@@ -406,7 +406,7 @@ impl Slot {
Err(err) => {
verbose!(
target: "net::outbound_session::try_connect()",
target: "net::outbound_session::try_connect",
"[P2P] Unable to connect outbound slot #{} {err}",
self.slot
);
@@ -423,7 +423,7 @@ impl Slot {
// Mark its state as Suspend, which sends it to the Refinery for processing.
if let Err(e) = self.p2p().hosts().try_register(addr.clone(), HostState::Suspend) {
warn!(target: "net::outbound_session::try_connect()", "Error while suspending addr={addr}: {e}");
warn!(target: "net::outbound_session::try_connect", "Error while suspending addr={addr}: {e}");
}
// Notify that channel processing failed
@@ -551,7 +551,7 @@ impl PeerDiscoveryBase for PeerDiscovery {
if current_attempt >= 4 {
verbose!(
target: "net::outbound_session::peer_discovery()",
target: "net::outbound_session::peer_discovery",
"[P2P] [PEER DISCOVERY] Sleeping and trying again. Attempt {current_attempt}"
);
@@ -571,7 +571,7 @@ impl PeerDiscoveryBase for PeerDiscovery {
// Broadcast the GetAddrs message to all active peers.
// If we have no active peers, we will perform a SeedSyncSession instead.
verbose!(
target: "net::outbound_session::peer_discovery()",
target: "net::outbound_session::peer_discovery",
"[P2P] [PEER DISCOVERY] Asking peers for new peers to connect to...");
dnetev!(self, OutboundPeerDiscovery, {
@@ -598,13 +598,13 @@ impl PeerDiscoveryBase for PeerDiscovery {
match result {
Ok(addrs_len) => {
verbose!(
target: "net::outbound_session::peer_discovery()",
target: "net::outbound_session::peer_discovery",
"[P2P] [PEER DISCOVERY] Discovered {addrs_len} peers"
);
}
Err(_) => {
warn!(
target: "net::outbound_session::peer_discovery()",
target: "net::outbound_session::peer_discovery",
"[P2P] [PEER DISCOVERY] Waiting for addrs timed out."
);
// Just do seed next time
@@ -619,7 +619,7 @@ impl PeerDiscoveryBase for PeerDiscovery {
store_sub.unsubscribe().await;
} else if !seeds.is_empty() {
verbose!(
target: "net::outbound_session::peer_discovery()",
target: "net::outbound_session::peer_discovery",
"[P2P] [PEER DISCOVERY] Asking seeds for new peers to connect to...");
dnetev!(self, OutboundPeerDiscovery, {

View File

@@ -102,10 +102,10 @@ impl RefineSession {
if let Some(ref hostlist) = self.p2p().settings().read().await.hostlist {
match self.p2p().hosts().container.save_all(hostlist) {
Ok(()) => {
debug!(target: "net::refine_session::stop()", "Save hosts successful!");
debug!(target: "net::refine_session::stop", "Save hosts successful!");
}
Err(e) => {
warn!(target: "net::refine_session::stop()", "Error saving hosts {e}");
warn!(target: "net::refine_session::stop", "Error saving hosts {e}");
}
}
}
@@ -118,19 +118,19 @@ impl RefineSession {
let self_ = Arc::downgrade(&self);
let connector = Connector::new(self.p2p().settings(), self_);
debug!(target: "net::refinery::handshake_node()", "Attempting to connect to {addr}");
debug!(target: "net::refinery::handshake_node", "Attempting to connect to {addr}");
match connector.connect(&addr).await {
Ok((url, channel)) => {
debug!(target: "net::refinery::handshake_node()", "Successfully created a channel with {url}");
debug!(target: "net::refinery::handshake_node", "Successfully created a channel with {url}");
// First initialize the version protocol and its Version, Verack subscriptions.
let proto_ver = ProtocolVersion::new(channel.clone(), p2p.settings()).await;
debug!(target: "net::refinery::handshake_node()", "Performing handshake protocols with {url}");
debug!(target: "net::refinery::handshake_node", "Performing handshake protocols with {url}");
// Then run the version exchange, store the channel and subscribe to a stop signal.
let handshake =
self.perform_handshake_protocols(proto_ver, channel.clone(), p2p.executor());
debug!(target: "net::refinery::handshake_node()", "Starting channel {url}");
debug!(target: "net::refinery::handshake_node", "Starting channel {url}");
channel.clone().start(p2p.executor());
// Ensure the channel gets stopped by adding a timeout to the handshake. Otherwise if
@@ -143,27 +143,27 @@ impl RefineSession {
let result = match select(handshake, timeout).await {
Either::Left((Ok(_), _)) => {
debug!(target: "net::refinery::handshake_node()", "Handshake success!");
debug!(target: "net::refinery::handshake_node", "Handshake success!");
true
}
Either::Left((Err(e), _)) => {
debug!(target: "net::refinery::handshake_node()", "Handshake error={e}");
debug!(target: "net::refinery::handshake_node", "Handshake error={e}");
false
}
Either::Right((_, _)) => {
debug!(target: "net::refinery::handshake_node()", "Handshake timed out");
debug!(target: "net::refinery::handshake_node", "Handshake timed out");
false
}
};
debug!(target: "net::refinery::handshake_node()", "Stopping channel {url}");
debug!(target: "net::refinery::handshake_node", "Stopping channel {url}");
channel.stop().await;
result
}
Err(e) => {
debug!(target: "net::refinery::handshake_node()", "Failed to connect ({e})");
debug!(target: "net::refinery::handshake_node", "Failed to connect ({e})");
false
}
}

View File

@@ -234,7 +234,7 @@ impl Slot {
// Seed process complete
if hosts.container.is_empty(HostColor::Grey) {
warn!(target: "net::session::seedsync_session()",
warn!(target: "net::session::seedsync_session",
"[P2P] Greylist empty after seeding");
}

View File

@@ -107,7 +107,7 @@ impl RpcClient {
req_recv: channel::Receiver<(JsonRequest, bool)>,
req_skip_recv: channel::Receiver<()>,
) -> Result<()> {
debug!(target: "rpc::client::reqrep_loop()", "Starting reqrep loop");
debug!(target: "rpc::client::reqrep_loop", "Starting reqrep loop");
let (reader, mut writer) = smol::io::split(stream);
let mut reader = BufReader::new(reader);
@@ -358,7 +358,7 @@ impl RpcChadClient {
rep_send: channel::Sender<JsonResult>,
req_recv: channel::Receiver<JsonRequest>,
) -> Result<()> {
debug!(target: "rpc::chad_client::reqrep_loop()", "Starting reqrep loop");
debug!(target: "rpc::chad_client::reqrep_loop", "Starting reqrep loop");
let (reader, mut writer) = smol::io::split(stream);
let mut reader = BufReader::new(reader);

View File

@@ -263,7 +263,7 @@ pub async fn accept<'a, T: 'a>(
if let Some(conn_limit) = conn_limit {
if rh.clone().active_connections().await >= conn_limit {
debug!(
target: "rpc::server::accept()",
target: "rpc::server::accept",
"Connection limit reached, refusing new conn"
);
return Err(Error::RpcConnectionsExhausted)
@@ -288,7 +288,7 @@ pub async fn accept<'a, T: 'a>(
Ok(v) => v,
Err(e) => {
warn!(
target: "rpc::server::accept()",
target: "rpc::server::accept",
"[RPC SERVER] Failed parsing string from read buffer: {e}"
);
return Err(e.into())
@@ -300,7 +300,7 @@ pub async fn accept<'a, T: 'a>(
Ok(v) => v,
Err(e) => {
warn!(
target: "rpc::server::accept()",
target: "rpc::server::accept",
"[RPC SERVER] Failed parsing JSON string: {e}"
);
return Err(e.into())
@@ -312,7 +312,7 @@ pub async fn accept<'a, T: 'a>(
Ok(v) => v,
Err(e) => {
warn!(
target: "rpc::server::accept()",
target: "rpc::server::accept",
"[RPC SERVER] Failed casting JSON to a JsonRequest: {e}"
);
return Err(e.into())
@@ -403,28 +403,28 @@ async fn run_accept_loop<'a, T: 'a>(
libc::EAGAIN | libc::ECONNABORTED | libc::EPROTO | libc::EINTR => continue,
libc::ECONNRESET => {
warn!(
target: "rpc::server::run_accept_loop()",
target: "rpc::server::run_accept_loop",
"[RPC] Connection reset by peer in accept_loop"
);
continue
}
libc::ETIMEDOUT => {
warn!(
target: "rpc::server::run_accept_loop()",
target: "rpc::server::run_accept_loop",
"[RPC] Connection timed out in accept_loop"
);
continue
}
libc::EPIPE => {
warn!(
target: "rpc::server::run_accept_loop()",
target: "rpc::server::run_accept_loop",
"[RPC] Broken pipe in accept_loop"
);
continue
}
x => {
warn!(
target: "rpc::server::run_accept_loop()",
target: "rpc::server::run_accept_loop",
"[RPC] Unhandled OS Error: {e} {x}"
);
continue
@@ -439,7 +439,7 @@ async fn run_accept_loop<'a, T: 'a>(
if let Some(inner) = std::error::Error::source(&e) {
if let Some(inner) = inner.downcast_ref::<futures_rustls::rustls::Error>() {
warn!(
target: "rpc::server::run_accept_loop()",
target: "rpc::server::run_accept_loop",
"[RPC] rustls listener error: {inner:?}"
);
continue
@@ -447,7 +447,7 @@ async fn run_accept_loop<'a, T: 'a>(
}
warn!(
target: "rpc::server::run_accept_loop()",
target: "rpc::server::run_accept_loop",
"[RPC] Unhandled ErrorKind::Other error: {e:?}"
);
continue
@@ -456,7 +456,7 @@ async fn run_accept_loop<'a, T: 'a>(
// Errors we didn't handle above:
Err(e) => {
warn!(
target: "rpc::server::run_accept_loop()",
target: "rpc::server::run_accept_loop",
"[RPC] Unhandled listener.next() error: {e}"
);
continue

View File

@@ -102,7 +102,7 @@ pub(crate) fn get_object_bytes(mut ctx: FunctionEnvMut<Env>, ptr: WasmPtr<u8>, i
acl_allow(env, &[ContractSection::Deploy, ContractSection::Metadata, ContractSection::Exec])
{
error!(
target: "runtime::util::get_object_bytes()",
target: "runtime::util::get_object_bytes",
"[WASM] [{cid}] get_object_bytes(): Called in unauthorized section: {e}"
);
return darkfi_sdk::error::CALLER_ACCESS_DENIED
@@ -163,7 +163,7 @@ pub(crate) fn get_object_size(mut ctx: FunctionEnvMut<Env>, idx: u32) -> i64 {
acl_allow(env, &[ContractSection::Deploy, ContractSection::Metadata, ContractSection::Exec])
{
error!(
target: "runtime::util::get_object_size()",
target: "runtime::util::get_object_size",
"[WASM] [{cid}] get_object_size(): Called in unauthorized section: {e}"
);
return darkfi_sdk::error::CALLER_ACCESS_DENIED