From d5df21cbbc7dd76e20476f95a25a6f28f99e4a32 Mon Sep 17 00:00:00 2001 From: dasman Date: Wed, 25 Dec 2024 03:20:28 +0300 Subject: [PATCH] event_graph: remove already fixed TODOs and debug log msgs --- src/event_graph/event.rs | 2 -- src/event_graph/mod.rs | 2 -- src/event_graph/proto.rs | 1 - src/event_graph/tests.rs | 22 +--------------------- src/event_graph/util.rs | 3 +-- 5 files changed, 2 insertions(+), 28 deletions(-) diff --git a/src/event_graph/event.rs b/src/event_graph/event.rs index 739936651..cfbdbea05 100644 --- a/src/event_graph/event.rs +++ b/src/event_graph/event.rs @@ -19,7 +19,6 @@ use std::{collections::HashSet, time::UNIX_EPOCH}; use darkfi_serial::{async_trait, deserialize_async, Encodable, SerialDecodable, SerialEncodable}; -use log::info; use sled_overlay::{sled, SledTreeOverlay}; use crate::Result; @@ -100,7 +99,6 @@ impl Event { ) -> Result { // Let's not bother with empty events if self.content.is_empty() { - info!("content is emtpy"); return Ok(false) } diff --git a/src/event_graph/mod.rs b/src/event_graph/mod.rs index ff8cb7149..57880de3c 100644 --- a/src/event_graph/mod.rs +++ b/src/event_graph/mod.rs @@ -160,7 +160,6 @@ impl DAGStore { /// This is called if prune_task activates. pub async fn add_dag(&mut self, dag_name: &str, genesis_event: &Event) { debug!("add_dag::dags: {}", self.dags.len()); - // TODO: sort dags by timestamp and drop the oldest if self.dags.len() > DAGS_MAX_NUMBER.try_into().unwrap() { while self.dags.len() >= DAGS_MAX_NUMBER.try_into().unwrap() { debug!("[EVENTGRAPH] dropping oldest dag"); @@ -908,7 +907,6 @@ impl EventGraph { vec_tips } - // TODO: Fix fetching all events from all dags and then order and retrun them /// Perform a topological sort of the DAG. pub async fn order_events(&self) -> Vec { let mut ordered_events = VecDeque::new(); diff --git a/src/event_graph/proto.rs b/src/event_graph/proto.rs index 87cdf3a7d..8f9b37728 100644 --- a/src/event_graph/proto.rs +++ b/src/event_graph/proto.rs @@ -448,7 +448,6 @@ impl ProtocolEventGraph { target: "event_graph::protocol::handle_event_req()", "Fetching event {:?} from DAG", event_id, ); - // TODO: search for the event among all the dags events.push(self.event_graph.fetch_event(event_id).await.unwrap().unwrap()); } diff --git a/src/event_graph/tests.rs b/src/event_graph/tests.rs index 185aa2db9..d7f06f2a4 100644 --- a/src/event_graph/tests.rs +++ b/src/event_graph/tests.rs @@ -161,7 +161,6 @@ async fn assert_dags(eg_instances: &[Arc], expected_len: usize, rng: for (i, eg) in eg_instances.iter().enumerate() { let current_genesis = eg.current_genesis.read().await; let dag_name = current_genesis.id().to_string(); - info!("dag_name: {}", dag_name); let dag = eg.dag_store.read().await.get_dag(&dag_name); let unreferenced_tips = eg.dag_store.read().await.find_unreferenced_tips(&dag).await; let node_last_layer_tips = unreferenced_tips.last_key_value().unwrap().1.clone(); @@ -225,7 +224,6 @@ async fn eventgraph_propagation_real(ex: Arc>) { // ========================================= // 1. Assert that everyone's DAG is the same // ========================================= - info!("11111111111"); assert_dags(&eg_instances, 1, &mut rng).await; // ========================================== @@ -238,20 +236,9 @@ async fn eventgraph_propagation_real(ex: Arc>) { assert!(event.parents.contains(&genesis_event_id)); // The node adds it to their DAG, on layer 1. let event_id = random_node.dag_insert(&[event.clone()], &dag_name).await.unwrap()[0]; - info!("event id: {}", event_id); let store = random_node.dag_store.read().await; - let (d, tips_layers) = store.dags.get(¤t_genesis.id()).unwrap(); - for key in d.iter().keys() { - let x = key.unwrap(); - let id = blake3::Hash::from_bytes((&x as &[u8]).try_into().unwrap()); - info!("id: {}", id); - } + let (_, tips_layers) = store.dags.get(¤t_genesis.id()).unwrap(); - for (_, i) in tips_layers.iter() { - for j in i.iter() { - info!("j: {}", j); - } - } // Since genesis was referenced, its layer (0) have been removed assert_eq!(tips_layers.len(), 1); assert!(tips_layers.last_key_value().unwrap().1.get(&event_id).is_some()); @@ -265,7 +252,6 @@ async fn eventgraph_propagation_real(ex: Arc>) { // ==================================================== // 3. Assert that everyone has the new event in the DAG // ==================================================== - info!("33333333"); assert_dags(&eg_instances, 2, &mut rng).await; // ============================================================== @@ -304,7 +290,6 @@ async fn eventgraph_propagation_real(ex: Arc>) { // ========================================== // 5. Assert that everyone has all the events // ========================================== - info!("555555555"); assert_dags(&eg_instances, 5, &mut rng).await; // =========================================== @@ -364,7 +349,6 @@ async fn eventgraph_propagation_real(ex: Arc>) { // 7. Assert that everyone has all the events // ========================================== // 5 events from 2. and 4. + 9 events from 6. = 14 - info!("77777777"); assert_dags(&eg_instances, 14, &mut rng).await; // ============================================================ @@ -403,7 +387,6 @@ async fn eventgraph_propagation_real(ex: Arc>) { // 9. Assert the new synced DAG has the same contents as others // ============================================================ // 5 events from 2. and 4. + 9 events from 6. = 14 - info!("9999999999"); assert_dags(&eg_instances, 14, &mut rng).await; // Stop the P2P network @@ -429,7 +412,6 @@ async fn eventgraph_chaotic_propagation_real(ex: Arc>) { // ========================================= // 1. Assert that everyone's DAG is the same // ========================================= - info!("another 111111111111"); assert_dags(&eg_instances, 1, &mut rng).await; // =========================================== @@ -448,7 +430,6 @@ async fn eventgraph_chaotic_propagation_real(ex: Arc>) { // ========================================== // 3. Assert that everyone has all the events // ========================================== - info!("another 333333333"); assert_dags(&eg_instances, n_events + 1, &mut rng).await; // ============================================================ @@ -486,7 +467,6 @@ async fn eventgraph_chaotic_propagation_real(ex: Arc>) { // ============================================================ // 5. Assert the new synced DAG has the same contents as others // ============================================================ - info!("another 555555555"); assert_dags(&eg_instances, n_events + 1, &mut rng).await; // Stop the P2P network diff --git a/src/event_graph/util.rs b/src/event_graph/util.rs index 1a06e7c61..0b03f54f9 100644 --- a/src/event_graph/util.rs +++ b/src/event_graph/util.rs @@ -25,7 +25,7 @@ use std::{ }; use darkfi_serial::{deserialize, deserialize_async, serialize}; -use log::{error, info}; +use log::error; use sled_overlay::sled; use tinyjson::JsonValue; @@ -125,7 +125,6 @@ pub fn generate_genesis(days_rotation: u64) -> Event { // Calculate the timestamp of the most recent event INITIAL_GENESIS + (rotations_since_genesis * days_rotation * DAY as u64) }; - info!("generate ts: {}", timestamp); Event { timestamp, content: GENESIS_CONTENTS.to_vec(),