From 4bf43ec521280509a9f317fed3d8421f5cfd6492 Mon Sep 17 00:00:00 2001 From: lunar-mining Date: Tue, 16 Jan 2024 13:49:37 +0100 Subject: [PATCH] net: downgrade whitelist to greylist on stop This commit forces all whitelisted peers through the greylist each time we start a node, thus ensuring that whitelisted peers are in fact reachable. Otherwise, our whitelist risks getting polluted with unreachable whitelisted peers. While the greylist has a refinery process to deal with this, the whitelist has no such process. --- src/net/hosts/refinery.rs | 3 +++ src/net/hosts/store.rs | 38 +++++++++++++++++++++++++++++--------- 2 files changed, 32 insertions(+), 9 deletions(-) diff --git a/src/net/hosts/refinery.rs b/src/net/hosts/refinery.rs index 3ac1639b3..6f7d1b55f 100644 --- a/src/net/hosts/refinery.rs +++ b/src/net/hosts/refinery.rs @@ -67,6 +67,9 @@ impl GreylistRefinery { } pub async fn stop(self: Arc) { + // First save whitelist entries on the greylist. + self.p2p().hosts().whitelist_downgrade().await; + match self.p2p().hosts().save_hosts().await { Ok(()) => { debug!(target: "net::refinery::stop()", "Save hosts successful!"); diff --git a/src/net/hosts/store.rs b/src/net/hosts/store.rs index 740f2790a..418f7437c 100644 --- a/src/net/hosts/store.rs +++ b/src/net/hosts/store.rs @@ -696,20 +696,28 @@ impl Hosts { anchorlist.iter().find(|(url, _)| url == addr).map(|(url, time)| (url.clone(), *time)) } + /// Return all known greylisted hosts + pub async fn greylist_fetch_all(&self) -> Vec<(Url, u64)> { + self.greylist.read().await.iter().cloned().collect() + } + /// Return all known whitelisted hosts pub async fn whitelist_fetch_all(&self) -> Vec<(Url, u64)> { self.whitelist.read().await.iter().cloned().collect() } - /// Return all known hosts - pub async fn hostlist_fetch_all(&self) -> HashMap> { + /// Return all known anchorlisted hosts + pub async fn anchorlist_fetch_all(&self) -> Vec<(Url, u64)> { + self.anchorlist.read().await.iter().cloned().collect() + } + + /// Return anchorlist and greylist hosts. Called on stop(). + pub async fn hostlist_fetch_safe(&self) -> HashMap> { let mut hostlist = HashMap::new(); hostlist.insert( "anchorlist".to_string(), self.anchorlist.read().await.iter().cloned().collect(), ); - hostlist - .insert("whitelist".to_string(), self.whitelist.read().await.iter().cloned().collect()); hostlist .insert("greylist".to_string(), self.greylist.read().await.iter().cloned().collect()); hostlist @@ -962,6 +970,18 @@ impl Hosts { ret } + /// Downgrade all whitelist entries to the greylist. Called on GreylistRefinery::stop(). + pub async fn whitelist_downgrade(&self) { + let mut greylist = self.greylist.write().await; + + for (url, last_seen) in self.whitelist.read().await.iter() { + if !self.greylist_contains(&url).await { + greylist.push((url.clone(), *last_seen)); + } + } + + } + /// Load the hostlist from a file. pub async fn load_hosts(&self) -> Result<()> { let path = expand_path(&self.settings.hostlist)?; @@ -1003,9 +1023,6 @@ impl Hosts { "greylist" => { self.greylist_store(url, last_seen).await; } - "whitelist" => { - self.whitelist_store(url, last_seen).await; - } "anchorlist" => { self.anchorlist_store(url, last_seen).await; } @@ -1018,13 +1035,16 @@ impl Hosts { Ok(()) } - /// Save the hostlist to a file. + /// Save the greylist and anchorlist to a file. + /// Note: we do not save the whitelist here as doing so would make the whitelist + /// effectively static. Instead, we first downgrade whitelist entries as greylist before + /// storing them on disk. This forces all whitelist entries through the refinery. pub async fn save_hosts(&self) -> Result<()> { let path = expand_path(&self.settings.hostlist)?; let mut tsv = String::new(); - for (name, list) in self.hostlist_fetch_all().await { + for (name, list) in self.hostlist_fetch_safe().await { for (url, last_seen) in list { tsv.push_str(&format!("{}\t{}\t{}\n", name, url, last_seen)); }