net: downgrade whitelist to greylist on stop

This commit forces all whitelisted peers through the greylist each time
we start a node, thus ensuring that whitelisted peers are in fact
reachable.

Otherwise, our whitelist risks getting polluted with unreachable
whitelisted peers. While the greylist has a refinery process to deal
with this, the whitelist has no such process.
This commit is contained in:
lunar-mining
2024-01-16 13:49:37 +01:00
parent c0e23dca86
commit 4bf43ec521
2 changed files with 32 additions and 9 deletions

View File

@@ -67,6 +67,9 @@ impl GreylistRefinery {
}
pub async fn stop(self: Arc<Self>) {
// First save whitelist entries on the greylist.
self.p2p().hosts().whitelist_downgrade().await;
match self.p2p().hosts().save_hosts().await {
Ok(()) => {
debug!(target: "net::refinery::stop()", "Save hosts successful!");

View File

@@ -696,20 +696,28 @@ impl Hosts {
anchorlist.iter().find(|(url, _)| url == addr).map(|(url, time)| (url.clone(), *time))
}
/// Return all known greylisted hosts
pub async fn greylist_fetch_all(&self) -> Vec<(Url, u64)> {
self.greylist.read().await.iter().cloned().collect()
}
/// Return all known whitelisted hosts
pub async fn whitelist_fetch_all(&self) -> Vec<(Url, u64)> {
self.whitelist.read().await.iter().cloned().collect()
}
/// Return all known hosts
pub async fn hostlist_fetch_all(&self) -> HashMap<String, Vec<(Url, u64)>> {
/// Return all known anchorlisted hosts
pub async fn anchorlist_fetch_all(&self) -> Vec<(Url, u64)> {
self.anchorlist.read().await.iter().cloned().collect()
}
/// Return anchorlist and greylist hosts. Called on stop().
pub async fn hostlist_fetch_safe(&self) -> HashMap<String, Vec<(Url, u64)>> {
let mut hostlist = HashMap::new();
hostlist.insert(
"anchorlist".to_string(),
self.anchorlist.read().await.iter().cloned().collect(),
);
hostlist
.insert("whitelist".to_string(), self.whitelist.read().await.iter().cloned().collect());
hostlist
.insert("greylist".to_string(), self.greylist.read().await.iter().cloned().collect());
hostlist
@@ -962,6 +970,18 @@ impl Hosts {
ret
}
/// Downgrade all whitelist entries to the greylist. Called on GreylistRefinery::stop().
pub async fn whitelist_downgrade(&self) {
let mut greylist = self.greylist.write().await;
for (url, last_seen) in self.whitelist.read().await.iter() {
if !self.greylist_contains(&url).await {
greylist.push((url.clone(), *last_seen));
}
}
}
/// Load the hostlist from a file.
pub async fn load_hosts(&self) -> Result<()> {
let path = expand_path(&self.settings.hostlist)?;
@@ -1003,9 +1023,6 @@ impl Hosts {
"greylist" => {
self.greylist_store(url, last_seen).await;
}
"whitelist" => {
self.whitelist_store(url, last_seen).await;
}
"anchorlist" => {
self.anchorlist_store(url, last_seen).await;
}
@@ -1018,13 +1035,16 @@ impl Hosts {
Ok(())
}
/// Save the hostlist to a file.
/// Save the greylist and anchorlist to a file.
/// Note: we do not save the whitelist here as doing so would make the whitelist
/// effectively static. Instead, we first downgrade whitelist entries as greylist before
/// storing them on disk. This forces all whitelist entries through the refinery.
pub async fn save_hosts(&self) -> Result<()> {
let path = expand_path(&self.settings.hostlist)?;
let mut tsv = String::new();
for (name, list) in self.hostlist_fetch_all().await {
for (name, list) in self.hostlist_fetch_safe().await {
for (url, last_seen) in list {
tsv.push_str(&format!("{}\t{}\t{}\n", name, url, last_seen));
}