updated theoratical waku latency/bandwidth computations

This commit is contained in:
ufarooqstatus
2025-09-29 23:16:50 +02:00
parent a459439e3c
commit df5bf92c28
3 changed files with 55 additions and 19 deletions

View File

@@ -36,6 +36,9 @@ avg_shards_per_node = 3 # average number of shards a given node is part of
# latency
average_delay_per_hop = 0.1 # s
# peer bandwidth
average_peer_bandwidth = 30 # Mbps
# TODO: load case for status control messages (note: this also introduces messages by currently online, but not active users.)
# TODO: spread in the latency distribution (the highest 10%ish of latencies might be too high)
@@ -78,4 +81,5 @@ a37 = "- A37. Size of messages large enough to trigger IDONTWANT (static): " + s
# Assumption strings (delay)
a41 = "- A41. Delay is calculated based on an upper bound of the expected distance."
a42 = "- A42. Average delay per hop (static): " + str(average_delay_per_hop) + "s."
a42 = "- A42. Average delay per hop (static): " + str(average_delay_per_hop) + "s."
a43 = "- A43. Average peer bandwidth (static): " + str(average_peer_bandwidth) + "Mbps."

View File

@@ -33,6 +33,7 @@ from assumptions import (
a35,
a36,
a37,
a43,
)
from assumptions import (
@@ -51,6 +52,7 @@ from assumptions import (
big_message_size,
small_message_size,
idontwant_too_late,
average_peer_bandwidth,
)
from utils import load_color_fmt, magnitude_fmt, get_header, sizeof_fmt
@@ -96,7 +98,7 @@ def latency_str(latency_users_fn, n_users, degree):
latency,
"For "
+ magnitude_fmt(n_users)
+ " the average latency is "
+ " the maximum latency is "
+ ("%.3f" % latency_users_fn(n_users, degree))
+ " s",
)
@@ -232,10 +234,10 @@ class Case4(Case):
messages_sent_per_hour * (n_users * (average_node_degree - 1) + 1)
) # see case 3
messages_load = message_size * messages_received_per_hour
num_ihave = messages_sent_per_hour * n_users * d_lazy * mcache_gossip
num_ihave = messages_sent_per_hour * n_users * d_lazy * mcache_gossip # batched messages? n * heartbeat_count * (d-1)_batches * batch size?
ihave_load = num_ihave * gossip_message_size
gossip_response_load = (
num_ihave * message_size #receive load only, IWANT load not included
num_ihave * message_size #computing receive load only, IWANT load not included
) * avg_ratio_gossip_replys # reply load contains both an IWANT (from requester to sender), and the actual wanted message (from sender to requester)
gossip_total = ihave_load + gossip_response_load
@@ -377,7 +379,15 @@ class LatencyCase1(Case):
legend: str = "Latency case 1. topology: 6-regular graph. No gossip"
def load(self, n_users, degree):
return avg_node_distance_upper_bound(n_users, degree) * average_delay_per_hop
#ceil(log_d(n)) can provide closer approximation of longest path involved
longest_path = math.ceil(avg_node_distance_upper_bound(n_users, degree))
data_per_hour = n_users * messages_sent_per_hour * message_size
#on average, every peer make d/2 transmissions for each message
data_rate = (data_per_hour * (average_node_degree/2) * 8) / 3600 #Mbps
tx_time = longest_path * (data_rate / average_peer_bandwidth) #sec
propagation_time = longest_path * average_delay_per_hop #sec
return propagation_time + tx_time
@property
def header(self) -> str:
@@ -399,4 +409,4 @@ class LatencyCase1(Case):
@property
def assumptions(self):
return [a3, a41, a42]
return [a3, a41, a42, a43]

View File

@@ -148,7 +148,7 @@ Assumptions/Simplifications:
- A04. Messages outside of Waku Relay are not considered, e.g. store messages.
- A05. Messages are only sent once along an edge. (requires delays before sending)
- A07. Single shard (i.e. single pubsub mesh)
- A21. Gossip is not considered.
- A31. Gossip is not considered.
For 100 users, receiving bandwidth is 3.0MB/hour
For 10k users, receiving bandwidth is 300.0MB/hour
@@ -164,7 +164,7 @@ Assumptions/Simplifications:
- A04. Messages outside of Waku Relay are not considered, e.g. store messages.
- A06. Messages are sent to all d-1 neighbours as soon as receiving a message (current operation)
- A07. Single shard (i.e. single pubsub mesh)
- A21. Gossip is not considered.
- A31. Gossip is not considered.
For 100 users, receiving bandwidth is 5.0MB/hour
For 10k users, receiving bandwidth is 500.0MB/hour
@@ -183,8 +183,29 @@ Assumptions/Simplifications:
- A32. Gossip message size (IHAVE/IWANT) (static):0.05KB
- A33. Ratio of IHAVEs followed-up by an IWANT (incl. the actual requested message):0.01
For 100 users, receiving bandwidth is 8.2MB/hour
For 10k users, receiving bandwidth is 817.2MB/hour
For 100 users, receiving bandwidth is 5.6MB/hour
For 10k users, receiving bandwidth is 563.0MB/hour
------------------------------------------------------------
Load case 5 (received load per node with IDONTWANT messages, excl. gossip)
Assumptions/Simplifications:
- A01. Message size (static): 2.05KB
- A02. Messages sent per node per hour (static) (assuming no spam; but also no rate limiting.): 5
- A03. The network topology is a d-regular graph of degree (static): 6
- A04. Messages outside of Waku Relay are not considered, e.g. store messages.
- A06. Messages are sent to all d-1 neighbours as soon as receiving a message (current operation)
- A07. Single shard (i.e. single pubsub mesh)
- A16. There exists at most one peer edge between any two nodes.
- A17. The peer network is connected.
- A34. Gossip message size for IDONTWANT (static): 0.05KB
- A35. Ratio of messages that are big enough to trigger a IDONTWANT response: 0.2
- A36. Ratio of big messages that are avoided due to IDONTWANT: 1.67
- A37. Size of messages large enough to trigger IDONTWANT (static): 6.14KB
For 100 users, receiving bandwidth is 4.0MB/hour
For 10k users, receiving bandwidth is 400.4MB/hour
------------------------------------------------------------
@@ -206,9 +227,9 @@ Assumptions/Simplifications:
- A32. Gossip message size (IHAVE/IWANT) (static):0.05KB
- A33. Ratio of IHAVEs followed-up by an IWANT (incl. the actual requested message):0.01
For 100 users, receiving bandwidth is 8.2MB/hour
For 10k users, receiving bandwidth is 817.3MB/hour
For 1m users, receiving bandwidth is 2451.8MB/hour
For 100 users, receiving bandwidth is 5.7MB/hour
For 10k users, receiving bandwidth is 563.0MB/hour
For 1m users, receiving bandwidth is 1689.0MB/hour
------------------------------------------------------------
@@ -235,9 +256,9 @@ Assumptions/Simplifications:
- A32. Gossip message size (IHAVE/IWANT) (static):0.05KB
- A33. Ratio of IHAVEs followed-up by an IWANT (incl. the actual requested message):0.01
For 100 users, receiving bandwidth is 16.3MB/hour
For 10k users, receiving bandwidth is 1634.5MB/hour
For 1m users, receiving bandwidth is 3269.0MB/hour
For 100 users, receiving bandwidth is 11.3MB/hour
For 10k users, receiving bandwidth is 1126.0MB/hour
For 1m users, receiving bandwidth is 2252.0MB/hour
------------------------------------------------------------
@@ -268,10 +289,11 @@ Assumptions/Simplifications:
- A03. The network topology is a d-regular graph of degree (static): 6
- A41. Delay is calculated based on an upper bound of the expected distance.
- A42. Average delay per hop (static): 0.1s.
- A43. Average peer bandwidth (static): 30Mbps.
For 100 the average latency is 0.257 s
For 10k the average latency is 0.514 s (max with sharding)
For 1m the average latency is 0.771 s (even in a single shard)
For 100 the maximum latency is 0.301 s
For 10k the maximum latency is 0.733 s (max with sharding)
For 1m the maximum latency is 18.578 (even in a single shard)
------------------------------------------------------------