Merge pull request #129 from vacp2p/Alberto/discv5

Discv5
This commit is contained in:
Alberto Soutullo
2023-07-30 13:24:30 +02:00
committed by GitHub
48 changed files with 344 additions and 213 deletions

9
.gitignore vendored
View File

@@ -18,19 +18,16 @@ data/
enclave.dump/
wls-module/requirements.txt
wls-module/__pycache__
config/network_topology_auto/
config/config.json
config/topology_generated/*
gennet-module/topology/
gennet-module/network_data/
gennet-module/traits
kurtosis_log.txt
config/network_topology/
config/topology_generated/
config/waku_config_files/
kurtosisrun_log.txt
summary.json
wakurtosis_logs/
wakurtosis_logs/*
log_trace_test
*/__pycache__/*
# monitoring
docker-ps.out

View File

@@ -1,6 +1,5 @@
Wakurtosis
=====================
Starting version for Waku network simulations (https://github.com/waku-org/pm/issues/2)
More info about Kurtosis: https://docs.kurtosis.com/
@@ -9,7 +8,7 @@ More info about Kurtosis: https://docs.kurtosis.com/
#### Before using this repository note that:
- **You are using Kurtosis version 0.70.2**. This is important, as they are working on it and changes can be huge depending on different versions. You can find all Kurtosis versions [here](https://github.com/kurtosis-tech/kurtosis-cli-release-artifacts/releases).
- **You are using Kurtosis version 0.77.0**. This is important, as they are working on it and changes can be huge depending on different versions. You can find all Kurtosis versions [here](https://github.com/kurtosis-tech/kurtosis-cli-release-artifacts/releases).
- The topology files that will be used by default are defined in `config/topology_generated/`. This topology is created with the [gennet](gennet-module/Readme.md) module.
- Kurtosis can set up services in a parallel manner, defined in the `config.json` file (see below).
- Only `kurtosis` and `docker` are needed to run this.
@@ -18,11 +17,13 @@ More info about Kurtosis: https://docs.kurtosis.com/
From the root of the repo run:
`sh ./run.sh`
`./build.sh`
This will load the default configuration file **./config/config.json**. You can also specify a different .json config file and its location with:
`./run.sh <measurement_infra> [enclave_name] [config_file]`
`sh ./run.sh ./config/config.json`
There are 4 different measurements: `cadvisor`, `dstats`, `host-proc`, `container-proc`. The other parameters are optional.
By default, the enclave name is `wakurtosis` and the config file is in `config/config.json`.
#### JSON main configuration file options
@@ -30,34 +31,11 @@ These are arguments that can be modified:
- _prng_seed_: int. Seed to reproduce results.
- _enclave_name_: string. Default: **wakurtosis**. Defines the name of the Kurtosis enclave being created.
- _topology_file_: string. Default: **waku_test_topology_small.json**. If defines the network topology that will be created.
- _topology_path_: string. Topology information that will be read.
- _jobs_: int. Defines how many services will be instantiated at the same time.
- _interconnect_nodes_: It allows to skip the interconnection phase of the topology.
- _interconnection_batch_: int. If nodes are being connected by a given topology, this tells kurtosis how many connections will try to set up in the same node at a time. Used to avoid timeouts if a node has a lot of connections.
- [WLS](wls-module/README.md) module configuration
- [Gennet](gennet-module/Readme.md) module configuration
- [WLS](wls-module/README.md) module configuration
#### What will happen
Kurtosis will automatically add one node as container inside the enclave. The way that nodes are interconnected is given by the topology.
The configuration of each node is given by the configuration file.
Once all nodes are ready, prometheus and grafana will be set up and connected to all nodes.
Once all nodes have been interconnected the simulation starts and will inject traffic into the network following the parameters specified in the configuration file.
#### Check Prometheus+Grafana+Logs
- Simulation log:
'kurtosis service logs wakurtosis $(kurtosis enclave inspect <enclave-name> | grep wls- | awk '{print $1}')'
- Grafana server:
To display the IP address and Port of the Grafana server on your local machine run:
'kurtosis enclave inspect <enclave-name> | grep grafana- | awk '{print $6}'
Remember that by default <enclave-name> is 'wakurtosis'.
Please, any improvements/bugs that you see, create an issue, and we will work on it.

View File

@@ -29,4 +29,4 @@ ENV PYTHONPATH "${PYTHONPATH}:src"
# Set the entrypoint
# `docker run -it analysis /bin/sh` vs `docker run -it --entrypoint /bin/sh analysis` ?
ENTRYPOINT ["python"]
ENTRYPOINT ["python"]

View File

@@ -8,7 +8,6 @@ from src import log_parser
from src import analysis
from src import analysis_logger
from src import cproc
from src import hproc
from src import cadvisor
from src import plotting
from src import plotting_configurations

0
clean_repo.sh → bash-utils/clean_repo.sh Normal file → Executable file
View File

View File

@@ -5,13 +5,16 @@ rm -f ./kurtosisrun_log.txt
rm -f /tmp/hostproc-signal.fifo
rm -rf ./wakurtosis_logs ./config/topology_generated ./monitoring/host-proc/stats ./monitoring/dstats/stats monitoring/container-proc/cproc_metrics.json
docker stop gennet cadvisor dstats host-proc analysis > /dev/null 2>&1
docker rm gennet cadvisor dstats host-proc analysis > /dev/null 2>&1
docker stop gennet cadvisor bootstrap_node dstats host-proc analysis > /dev/null 2>&1
docker rm gennet cadvisor bootstrap_node dstats host-proc analysis > /dev/null 2>&1
kurtosis --cli-log-level "error" enclave rm -f $enclave_name > /dev/null 2>&1
docker stop $(docker ps -qa) > /dev/null 2>&1
docker rm $(docker ps -qa) > /dev/null 2>&1
toml_file="config/traits/discv5.toml"
sed -i "s/^discv5-bootstrap-node=\".*\"$/discv5-bootstrap-node=""/" "$toml_file"
#cleanup any host waku processes
#sudo killall -15 wakunode waku

6
prepare_env.sh → bash-utils/prepare_env.sh Normal file → Executable file
View File

@@ -5,11 +5,13 @@ kurtosis engine stop
ulimit -n $(ulimit -n -H)
ulimit -u $(ulimit -u -H)
sudo sysctl -w net.ipv4.neigh.default.gc_thresh3=4096
sudo sysctl -w net.ipv4.neigh.default.gc_thresh1=16384
sudo sysctl -w net.ipv4.neigh.default.gc_thresh2=28672
sudo sysctl -w net.ipv4.neigh.default.gc_thresh3=32768
sudo sysctl fs.inotify.max_user_instances=1048576
sudo sysctl -w vm.max_map_count=262144
sudo docker container rm -f $(docker container ls -aq)
sudo docker volume rm -f $(docker volume ls -q)
kurtosis engine start
kurtosis engine start

View File

@@ -0,0 +1,17 @@
#!/bin/sh
IP=$(ip a | grep "inet " | grep -Fv 127.0.0.1 | sed 's/.*inet \([^/]*\).*/\1/')
exec /usr/bin/wakunode\
--relay=true\
--rpc-admin=true\
--keep-alive=true\
--max-connections=150\
--dns-discovery=true\
--discv5-discovery=true\
--discv5-enr-auto-update=True\
--log-level=INFO\
--rpc-address=0.0.0.0\
--metrics-server=True\
--metrics-server-address=0.0.0.0\
--nat=extip:${IP}

5
bash-utils/run_waku_node.sh Executable file
View File

@@ -0,0 +1,5 @@
#!/bin/sh
IP=$(ip a | grep "inet " | grep -Fv 127.0.0.1 | sed 's/.*inet \([^/]*\).*/\1/')
exec /usr/bin/wakunode --config-file="$1" --ports-shift="$2" --nat=extip:"${IP}" --log-level=TRACE

View File

@@ -6,15 +6,15 @@ apt-get install -y jq
# Install the suitable kurtosis-cli
required_version=0.70.2
installed_version=`kurtosis version | grep -v WARN`
installed_version=`kurtosis version`
if [ "$installed_version" = "$required_version" ]; then
echo "Kurtosis version is up to date : $installed_version"
else
else
echo "deb [trusted=yes] https://apt.fury.io/kurtosis-tech/ /" | sudo tee /etc/apt/sources.list.d/kurtosis.list
sudo apt update
sudo apt-mark unhold kurtosis-cli
sudo apt install kurtosis-cli=$required_version
sudo apt install kurtosis-cli="$required_version"
sudo apt-mark hold kurtosis-cli
sudo rm /etc/apt/sources.list.d/kurtosis.list
fi
@@ -42,7 +42,7 @@ cd wls-module
docker build -t wls:0.0.1 .
cd ..
cd ./monitoring/container-proc
cd monitoring/container-proc
sh ./build.sh
cd ..

View File

@@ -6,17 +6,18 @@
"enclave_name": "wakurtosis",
"topology_path": "./config/topology_generated/",
"jobs": 4,
"interconnect_nodes": false,
"interconnection_batch": 10
},
"gennet": {
"num_nodes": 25,
"fanout": 13,
"num_nodes": 9,
"fanout": 3,
"num_topics": 1,
"num_partitions": 1,
"num_subnets": 1,
"container_size": "1",
"node_type_distribution": {
"nwaku:relay:rpc:metrics": 100,
"nwaku:relay:rpc:metrics:discv5": 100,
"gowaku:rln:dnsdisc:dns": 0
},
"network_type": "newmanwattsstrogatz",
@@ -27,8 +28,8 @@
"debug_level": "DEBUG",
"simulation_time": 60,
"message_rate": 10,
"min_packet_size": 1024,
"max_packet_size": 10240,
"min_packet_size": 2,
"max_packet_size": 1024,
"inter_msg_type": "poisson",
"dist_type": "gaussian",
"emitters_fraction": 1.0
@@ -42,18 +43,6 @@
"metrics_filename": "./cproc_metrics.json"
},
"plotting": {
"hproc": {
"out_prefix" : "output",
"aggregate" : "False",
"to_plot" : {
"Network": "True",
"ColPanel" : ["CPUPerc", "MemUse", "NetRecv", "NetSent"],
"ValueCluster" : [],
"DegColPanel" : ["CPUPerc", "MemUse", "NetRecv", "NetSent"],
"SettlingTime" : "True",
"Compare" : "True"
}
},
"by_node": [
"container_cpu_load_average_10s",
"container_memory_usage_bytes",

5
config/run_waku_node.sh Normal file
View File

@@ -0,0 +1,5 @@
#!/bin/sh
IP=$(ip a | grep "inet " | grep -Fv 127.0.0.1 | sed 's/.*inet \([^/]*\).*/\1/')
exec /usr/bin/wakunode --config-file="$1" --ports-shift="$2" --nat=extip:"${IP}" --log-level=TRACE

View File

@@ -2,4 +2,5 @@ topics="test asd"
rpc-admin=true
keep-alive=true
metrics-server=true
nodekey="80fb8f9b71c808bc186bc8c6639a12446e667d031e8ac4896efad603e21728b2"
nodekey="80fb8f9b71c808bc186bc8c6639a12446e667d031e8ac4896efad603e21728b2"
rpc-address="0.0.0.0"

View File

@@ -2,4 +2,5 @@ topics="test"
rpc-admin=true
keep-alive=true
metrics-server=true
nodekey="80fb8f9b71c808bc186bc8c6639a12446e667d031e8ac4896efad603e21728b2"
nodekey="80fb8f9b71c808bc186bc8c6639a12446e667d031e8ac4896efad603e21728b2"
rpc-address="0.0.0.0"

View File

@@ -2,4 +2,5 @@ topics="test"
rpc-admin=true
keep-alive=true
metrics-server=true
nodekey="80fb8f9b71c808bc186bc8c6639a12446e667d031e8ac4896efad603e21728b3"
nodekey="80fb8f9b71c808bc186bc8c6639a12446e667d031e8ac4896efad603e21728b3"
rpc-address="0.0.0.0"

View File

@@ -2,4 +2,5 @@ topics="test"
rpc-admin=true
keep-alive=true
metrics-server=true
nodekey="80fb8f9b71c808bc186bc8c6639a12446e667d031e8ac4896efad603e21728b3"
nodekey="80fb8f9b71c808bc186bc8c6639a12446e667d031e8ac4896efad603e21728b3"
rpc-address="0.0.0.0"

View File

@@ -2,4 +2,5 @@ topics="test"
rpc-admin=true
keep-alive=true
metrics-server=true
nodekey="80fb8f9b71c808bc186bc8c6639a12446e667d031e8ac4896efad603e21728b4"
nodekey="80fb8f9b71c808bc186bc8c6639a12446e667d031e8ac4896efad603e21728b4"
rpc-address="0.0.0.0"

View File

@@ -2,4 +2,5 @@ topics="test"
rpc-admin=true
keep-alive=true
metrics-server=true
nodekey="80fb8f9b71c808bc186bc8c6639a12446e667d031e8ac4896efad603e21728b5"
nodekey="80fb8f9b71c808bc186bc8c6639a12446e667d031e8ac4896efad603e21728b5"
rpc-address="0.0.0.0"

View File

@@ -10,7 +10,7 @@
# nodes it communicates with. This option allows to
# enable/disable this functionality [=false].
discv5-discovery=false
discv5-discovery=true
#discv5-udp-port=
#discv5-bootstrap-node=
#discv5-enr-auto-update=
discv5-bootstrap-node=""
discv5-enr-auto-update=true

View File

@@ -7,6 +7,6 @@
# ValidIpAddress.init("1.0.0.1")]].
metrics-server=true
#metrics-server-address=
metrics-server-address="0.0.0.0"
metrics-server-port=8008
#metrics-logging=

View File

@@ -41,7 +41,7 @@ tcp-port=60000
keep-alive=false
max-connections=50
# NOTE: Port-shift will be handled automatically by Kurtosis if using multinode approach.
ports-shift=0
#nat=any
#ext-multiaddr=

View File

@@ -8,7 +8,7 @@
# [=false].
rpc=true
rpc-address="127.0.0.1"
rpc-address="0.0.0.0"
rpc-port=8545
rpc-admin=true
rpc-private=false

View File

@@ -3,7 +3,7 @@
rm -f traits
rm -rf network_data
cp -r ../config/traits .
cp -r ../config/traits .
docker build -t gennet .
rm -rf traits
ln -s ../config/traits .
ln -s ../config/traits .

View File

@@ -383,10 +383,10 @@ def generate_and_write_files(ctx: typer, G):
for container, nodes in container2nodes.items():
json_dump[CONTAINERS_JSON][container] = nodes
i, traits_dir = 0, ctx.params["traits_dir"]
i, traits_dir = 0, ctx.params["traits_dir"]
for node in G.nodes:
# write the per node toml for the i^ith node of appropriate type
traits_list, i = traits_distribution[i].split(":"), i+1
traits_list, i = traits_distribution[i].split(":"), i + 1
node_type = nodeType(traits_list[0])
write_toml(ctx.params["output_dir"], node, generate_toml(traits_dir, topics, traits_list))
json_dump[NODES_JSON][node] = {}
@@ -395,13 +395,14 @@ def generate_and_write_files(ctx: typer, G):
json_dump[NODES_JSON][node]["static_nodes"].append(edge[1])
json_dump[NODES_JSON][node][SUBNET_PREFIX] = node2subnet[node]
json_dump[NODES_JSON][node]["image"] = nodeTypeToDocker.get(node_type)
# the per node tomls will continue for now as they include topics
# the per node tomls will continue for now as they include topics
json_dump[NODES_JSON][node]["node_config"] = f"{node}.toml"
# logs ought to continue as they need to be unique
# logs ought to continue as they need to be unique
json_dump[NODES_JSON][node]["node_log"] = f"{node}.log"
port_shift, cid = node2container[node]
json_dump[NODES_JSON][node]["port_shift"] = port_shift
json_dump[NODES_JSON][node]["container_id"] = cid
write_json(ctx.params["output_dir"], json_dump) # network wide json

View File

@@ -24,14 +24,16 @@ def run(plan, args):
network_topology = json.decode(network_topology)
# Set up nodes
nodes.instantiate_services(plan, network_topology, False)
nodes.instantiate_services(plan, network_topology, True, False)
# Set up prometheus + grafana
prometheus_service = prometheus.set_up_prometheus(plan, network_topology)
grafana_service = grafana.set_up_grafana(plan, prometheus_service)
nodes.interconnect_nodes(plan, network_topology, interconnection_batch)
if kurtosis_config[vars.INTERCONNECT_NODES]:
nodes.interconnect_nodes(plan, network_topology, interconnection_batch)
# Setup WLS & Start the Simulation
wls_service = wls.init(plan, network_topology, config_file)
wls_service = wls.init(plan, network_topology, config_file, prometheus_service)

View File

@@ -1,5 +1,4 @@
#!/bin/sh
image_id=$(docker images -q container-proc)
echo $image_id
docker image rm -f $image_id
docker build --rm --no-cache --progress=plain -t container-proc .
docker build -t container-proc .

111
run.sh
View File

@@ -14,36 +14,19 @@ wakurtosis_config_file=${3:-"config.json"}
dir=$(pwd)
loglevel="error"
echo "- Metrics Infra: " $metrics_infra
echo "- Enclave name: " $enclave_name
echo "- Configuration file: " $wakurtosis_config_file
echo "- Metrics Infra: " "$metrics_infra"
echo "- Enclave name: " "$enclave_name"
echo "- Configuration file: " "$wakurtosis_config_file"
# cleanup previous runs
echo -e "\Cleaning up previous runs"
sh ./cleanup.sh $enclave_name
echo -e "\Done cleaning up previous runs"
# Cleanup previous runs
echo "Cleaning up previous runs"
sh ./bash-utils/cleanup.sh $enclave_name
echo "Done cleaning up previous runs"
# make sure the prometheus and grafana configs are readable
chmod a+r monitoring/prometheus.yml monitoring/configuration/config/grafana.ini ./monitoring/configuration/config/provisioning/dashboards/dashboard.yaml
##################### END
##################### GENNET
echo -e "\nRunning network generation"
docker run --name cgennet -v ${dir}/config/:/config:ro gennet --config-file /config/${wakurtosis_config_file} --traits-dir /config/traits
err=$?
if [ $err != 0 ]; then
echo "Gennet failed with error code $err"
exit
fi
# copy the network generated TODO: remove this extra copy
docker cp cgennet:/gennet/network_data ${dir}/config/topology_generated
docker rm cgennet > /dev/null 2>&1
##################### END
kurtosis_run="kurtosis_run.log"
kurtosis_inspect="kurtosis_inspect.log"
@@ -51,20 +34,22 @@ usr=`id -u`
grp=`id -g`
stats_dir=stats
signal_fifo=/tmp/hostproc-signal.fifo # do not create fifo under ./stats, or inside the repo
##################### MONITORING MODULE PROLOGUES
if [ "$metrics_infra" = "cadvisor" ]; then #CADVISOR
# prepare the enclave
##################### PREPARING ENCLAVE
# prepare the enclave
echo "Preparing the enclave..."
kurtosis --cli-log-level $loglevel enclave add --name ${enclave_name}
enclave_prefix=$(kurtosis --cli-log-level $loglevel enclave inspect --full-uuids $enclave_name | grep UUID: | awk '{print $2}')
echo "Enclave network: "$enclave_prefix
# get the last IP of the enclave
subnet="$(docker network inspect $enclave_prefix | jq -r '.[].IPAM.Config[0].Subnet')"
echo "Enclave subnetork: $subnet"
last_ip="$(ipcalc $subnet | grep HostMax | awk '{print $2}')"
echo "cAdvisor IP: $last_ip"
#####################
##################### MONITORING MODULE PROLOGUES
if [ "$metrics_infra" = "cadvisor" ]; then #CADVISOR
echo "cAdvisor IP: $last_ip"
# set up the cadvisor
docker run --volume=/:/rootfs:ro --volume=/var/run:/var/run:rw --volume=/var/lib/docker/:/var/lib/docker:ro --volume=/dev/disk/:/dev/disk:ro --volume=/sys:/sys:ro --volume=/etc/machine-id:/etc/machine-id:ro --publish=8080:8080 --detach=true --name=cadvisor --privileged --device=/dev/kmsg --network $enclave_prefix --ip=$last_ip gcr.io/cadvisor/cadvisor:v0.47.0
elif [ "$metrics_infra" = "dstats" ]; then # HOST-PROC
@@ -84,19 +69,68 @@ fi
##################### END
##################### BOOTSTRAP NODE
echo "Setting up bootstrap node"
# Get bootstrap IP in enclave
IFS='.' ip_parts="$last_ip"
part1=$(echo "$ip_parts" | cut -d '.' -f 1)
part2=$(echo "$ip_parts" | cut -d '.' -f 2)
part3=$(echo "$ip_parts" | cut -d '.' -f 3)
part4=$(echo "$ip_parts" | cut -d '.' -f 4)
previous_part=$((part4 - 1))
bootstrap_ip="$part1.$part2.$part3.$previous_part"
IFS=' '
echo "Bootstrap node IP: $bootstrap_ip"
docker run --name bootstrap_node -p 127.0.0.1:60000:60000 -p 127.0.0.1:8008:8008 -p 127.0.0.1:9000:9000 -p 127.0.0.1:8545:8545 -v "$(pwd)/bash-utils/run_bootstrap_node.sh:/opt/runnode.sh:Z" --detach=true --network $enclave_prefix --ip="$bootstrap_ip" --entrypoint sh statusteam/nim-waku:nwaku-trace3 -c "/opt/runnode.sh" >/dev/null 2>&1
RETRIES_TRAFFIC=${RETRIES_TRAFFIC:=10}
while [ -z "${NODE_ENR}" ] && [ ${RETRIES_TRAFFIC} -ge 0 ]; do
NODE_ENR=$(wget -O - --post-data='{"jsonrpc":"2.0","method":"get_waku_v2_debug_v1_info","params":[],"id":1}' --header='Content-Type:application/json' http://localhost:8545/ 2> /dev/null | sed 's/.*"enrUri":"\([^"]*\)".*/\1/');
echo "Trying to get Bootstrap ENR, but node still not ready, retrying (retries left: ${RETRIES_TRAFFIC})"
sleep 1
RETRIES_TRAFFIC=$(( $RETRIES_TRAFFIC - 1 ))
done
echo "Bootstrap ENR: ${NODE_ENR}"
# Specify the path to your TOML file
echo "Injecting ENR in Discv5 toml"
toml_file="config/traits/discv5.toml"
sed -i "s|discv5-bootstrap-node=\(.*\)|discv5-bootstrap-node=[\"${NODE_ENR}\"]|" $toml_file
##################### END
##################### GENNET
# Run Gennet docker container
echo "Running network generation"
docker run --name cgennet -v ${dir}/config/:/config:ro gennet --config-file /config/"${wakurtosis_config_file}" --traits-dir /config/traits
err=$?
if [ $err != 0 ]; then
echo "Gennet failed with error code $err"
exit
fi
# Copy the network generated TODO: remove this extra copy
docker cp cgennet:/gennet/network_data "${dir}"/config/topology_generated
docker rm cgennet > /dev/null 2>&1
##################### END
##################### KURTOSIS RUN
# Create the new enclave and run the simulation
jobs=$(cat config/${wakurtosis_config_file} | jq -r ".kurtosis.jobs")
echo -e "\nSetting up the enclave: $enclave_name"
jobs=$(cat "${dir}"/config/"${wakurtosis_config_file}" | jq -r ".kurtosis.jobs")
echo "\nSetting up the enclave: $enclave_name"
kurtosis_cmd="kurtosis --cli-log-level \"$loglevel\" run --full-uuids --enclave ${enclave_name} . '{\"wakurtosis_config_file\" : \"config/${wakurtosis_config_file}\"}' --parallelism ${jobs} > $kurtosis_run 2>&1"
START=$(date +%s)
eval $kurtosis_cmd
eval "$kurtosis_cmd"
END1=$(date +%s)
DIFF1=$(( $END1 - $START ))
echo -e "Enclave $enclave_name is up and running: took $DIFF1 secs to setup"
echo "Enclave $enclave_name is up and running: took $DIFF1 secs to setup"
sed -n '/Starlark code successfully run. No output was returned./,$p' $kurtosis_run > $kurtosis_inspect
# Extract the WLS service name
@@ -147,7 +181,7 @@ fi
#grafana_host=$(kurtosis enclave inspect $enclave_name | grep "\<grafana\>" | awk '{print $6}')
grafana_host=$(grep "\<grafana\>" $kurtosis_inspect | awk '{print $6}')
echo -e "\n--> Statistics in Grafana server at http://$grafana_host/ <--"
echo "\n--> Statistics in Grafana server at http://$grafana_host/ <--"
echo "Output of kurtosis run command written in $kurtosis_run"
##################### END
@@ -155,9 +189,9 @@ echo "Output of kurtosis run command written in $kurtosis_run"
##################### WAIT FOR THE WLS TO FINISH
# Wait for the container to halt; this will block
echo -e "Waiting for simulation to finish ..."
echo "Waiting for simulation to finish ..."
status_code="$(docker container wait $wls_cid)"
echo -e "Simulation ended with code $status_code Results in ./${enclave_name}_logs"
echo "Simulation ended with code $status_code Results in ./${enclave_name}_logs"
END2=$(date +%s)
DIFF2=$(( $END2 - $END1 ))
echo "Simulation took $DIFF1 + $DIFF2 = $(( $END2 - $START)) secs"
@@ -185,7 +219,7 @@ elif [ "$metrics_infra" = "host-proc" ]; then
echo "Copying the host-proc data"
cp -r ./monitoring/host-proc/stats ${enclave_name}_logs/host-proc-data
elif [ "$metrics_infra" = "container-proc" ]; then
echo -e "Waiting monitoring to finish ..."
echo "Waiting monitoring to finish ..."
wait $monitor_pid
echo "Copying the container-proc measurements"
cp ./monitoring/container-proc/cproc_metrics.json "./${enclave_name}_logs/cproc_metrics.json" > /dev/null 2>&1
@@ -199,11 +233,12 @@ echo "- Configuration file: $wakurtosis_config_file" >> ./${enclave_name}_logs/
# Copy simulation results
docker cp "$wls_cid:/wls/network_topology/network_data.json" "./${enclave_name}_logs"
docker cp "$wls_cid:/wls/messages.json" "./${enclave_name}_logs"
docker cp "$wls_cid:/wls/prometheus_data.json" "./${enclave_name}_logs"
# Run analysis
if jq -e ."plotting" >/dev/null 2>&1 "./config/${wakurtosis_config_file}"; then
if [ "$metrics_infra" = "dstats" ]; then
docker run --name "dstats" --network "host" -v "$(pwd)/wakurtosis_logs:/simulation_data/" --add-host=host.docker.internal:host-gateway analysis src/hproc.py dstats /simulation_data/ --config-file /simulation_data/config/config.json >/dev/null 2>&1
docker run --name "dstats" --network "host" -v "$(pwd)/wakurtosis_logs:/simulation_data/" --add-host=host.docker.internal:host-gateway analysis src/hproc.py dstats /simulation_data/ --config-file /simulation_data/config/config.json >/dev/null 2>&1
docker cp dstats:/analysis/plots/ wakurtosis_logs/dstats-plots
cd wakurtosis_logs
ln -s dstats-plots/output-dstats-compare.pdf analysis.pdf

View File

@@ -5,7 +5,7 @@ vars = import_module("github.com/logos-co/wakurtosis/src/system_variables.star")
files = import_module(vars.FILE_HELPERS_MODULE)
dispatchers = import_module(vars.DISPATCHERS_MODULE)
def instantiate_services(plan, network_topology, testing):
def instantiate_services(plan, network_topology, discovery, testing):
"""
As we will need to access for the service information later, we are adding Starlark info into
the network topology.:
@@ -29,6 +29,8 @@ def instantiate_services(plan, network_topology, testing):
"""
all_services_configuration = {}
run_artifact = plan.upload_files(src=vars.RUN_SCRIPT_FILE)
for service_id, nodes_in_service in network_topology[vars.GENNET_ALL_CONTAINERS_KEY].items():
image = network_topology[vars.GENNET_NODES_KEY][nodes_in_service[0]][vars.GENNET_IMAGE_KEY]
service_builder = dispatchers.service_builder_dispatcher[image]
@@ -37,20 +39,20 @@ def instantiate_services(plan, network_topology, testing):
config_file_names = [network_topology[vars.GENNET_NODES_KEY][node][vars.GENNET_CONFIG_KEY]
for node in nodes_in_service]
config_files_artifact_ids = [
toml_files_artifact_ids = [
files.get_toml_configuration_artifact(plan, config_file_name, service_name, testing)
for config_file_name, service_name
in zip(config_file_names, nodes_in_service)
]
service_builder(nodes_in_service, all_services_configuration, config_file_names,
config_files_artifact_ids, service_id, network_topology)
toml_files_artifact_ids, run_artifact, service_id, network_topology, discovery)
all_services_information = plan.add_services(
configs=all_services_configuration
)
_add_service_info_to_topology(plan, all_services_information, network_topology)
_add_service_info_to_topology(plan, all_services_information, network_topology, discovery)
def interconnect_nodes(plan, topology_information, interconnection_batch):
@@ -68,13 +70,13 @@ def interconnect_nodes(plan, topology_information, interconnection_batch):
for peer in peers[i:i + interconnection_batch]]
connect_node_to_peers(plan, nodes_in_topology[node_id][vars.GENNET_NODE_CONTAINER_KEY],
node_id, vars.RPC_PORT_ID, peer_ids)
node_id, vars.WAKU_RPC_PORT_ID, peer_ids)
def _add_service_info_to_topology(plan, all_services_information, network_topology):
def _add_service_info_to_topology(plan, all_services_information, network_topology, discovery):
for node_id, node_info in network_topology[vars.GENNET_NODES_KEY].items():
node_rpc_port_id = vars.RPC_PORT_ID + vars.ID_STR_SEPARATOR + node_id
node_rpc_port_id = vars.WAKU_RPC_PORT_ID + vars.ID_STR_SEPARATOR + node_id
image = network_topology[vars.GENNET_NODES_KEY][node_id][vars.GENNET_IMAGE_KEY]
peer_id_getter = dispatchers.service_info_dispatcher[image]
@@ -87,7 +89,7 @@ def _add_service_info_to_topology(plan, all_services_information, network_topolo
all_services_information[node_info[vars.GENNET_NODE_CONTAINER_KEY]].ip_address
ports_adder = dispatchers.ports_dispatcher[node_info[vars.GENNET_IMAGE_KEY]]
ports_adder(network_topology, all_services_information, node_info, node_id)
ports_adder(network_topology, all_services_information, node_info, node_id, discovery)
for container_id, container_info in network_topology[vars.GENNET_ALL_CONTAINERS_KEY].items():
nodes = container_info

View File

@@ -11,7 +11,7 @@ def test_instantiate_services(plan):
vars.DEFAULT_TOPOLOGY_FILE_DEFAULT_ARGUMENT_VALUE)
topology = json.decode(topology)
node_builders.instantiate_services(plan, topology, True)
node_builders.instantiate_services(plan, topology, False, True)
for node_info in topology["nodes"].values():
plan.assert(value="peer_id", assertion="IN", target_value=node_info.keys())

View File

@@ -6,8 +6,9 @@ waku_builder = import_module(vars.WAKU_BUILDER_MODULE)
def prepare_gowaku_service(gowakunode_names, all_services, config_files, artifact_ids, service_id,
network_topology):
prepared_ports = waku_builder.prepare_waku_ports_in_service(gowakunode_names, network_topology)
network_topology, discovery):
prepared_ports = waku_builder.prepare_waku_ports_in_service(gowakunode_names, network_topology,
discovery)
prepared_files = waku_builder.prepare_waku_config_files_in_service(gowakunode_names, artifact_ids)
prepared_cmd = _prepare_gowaku_cmd_in_service(gowakunode_names, config_files, network_topology)

View File

@@ -33,7 +33,7 @@ def _prepare_nomos_cmd_in_service(nomos_names, config_files):
def _prepare_nomos_ports_in_service(node_names):
prepared_ports = {}
for i in range(len(node_names)):
prepared_ports[vars.RPC_PORT_ID + vars.ID_STR_SEPARATOR + node_names[i]] = \
prepared_ports[vars.WAKU_RPC_PORT_ID + vars.ID_STR_SEPARATOR + node_names[i]] = \
PortSpec(number=vars.NOMOS_RPC_PORT_NUMBER + i,
transport_protocol=vars.NOMOS_RPC_PORT_PROTOCOL)
@@ -58,7 +58,7 @@ def _prepare_nomos_config_files_in_service(node_names, artifact_ids):
def add_nomos_ports_info_to_topology(network_topology, all_services_information, node_info, node_id):
nomos_rpc_port_id = vars.RPC_PORT_ID + vars.ID_STR_SEPARATOR + node_id
nomos_rpc_port_id = vars.WAKU_RPC_PORT_ID + vars.ID_STR_SEPARATOR + node_id
libp2p_port_id = vars.NOMOS_LIBP2P_PORT_ID + vars.ID_STR_SEPARATOR + node_id
prometheus_port_id = vars.PROMETHEUS_PORT_ID + vars.ID_STR_SEPARATOR + node_id

View File

@@ -3,12 +3,15 @@ vars = import_module("github.com/logos-co/wakurtosis/src/system_variables.star")
# Module Imports
waku_builder = import_module(vars.WAKU_BUILDER_MODULE)
files = import_module(vars.FILE_HELPERS_MODULE)
def prepare_nwaku_service(nwakunode_names, all_services, config_files, artifact_ids, service_id,
network_topology):
prepared_ports = waku_builder.prepare_waku_ports_in_service(nwakunode_names, network_topology)
prepared_files = waku_builder.prepare_waku_config_files_in_service(nwakunode_names, artifact_ids)
def prepare_nwaku_service(nwakunode_names, all_services, config_files, artifact_ids, run_artifact_id,
service_id, network_topology, discovery):
prepared_ports = waku_builder.prepare_waku_ports_in_service(nwakunode_names, network_topology,
discovery)
prepared_files = waku_builder.prepare_waku_config_files_in_service(nwakunode_names,
artifact_ids, run_artifact_id)
prepared_cmd = _prepare_nwaku_cmd_in_service(nwakunode_names, config_files, network_topology)
add_service_config = ServiceConfig(
@@ -24,13 +27,14 @@ def prepare_nwaku_service(nwakunode_names, all_services, config_files, artifact_
def _prepare_nwaku_cmd_in_service(nwakunode_names, config_files, network_topology):
prepared_cmd = ""
for i in range(len(nwakunode_names)):
prepared_cmd += vars.NWAKU_ENTRYPOINT + " "
prepared_cmd += vars.WAKUNODE_CONFIGURATION_FILE_FLAG + \
vars.CONTAINER_NODE_CONFIG_FILE_LOCATION + \
nwakunode_names[i] + "/" + config_files[i] + " "
prepared_cmd += vars.WAKUNODE_PORT_SHIFT_FLAG + \
str(network_topology[vars.GENNET_NODES_KEY][nwakunode_names[i]][vars.GENNET_PORT_SHIFT_KEY])
prepared_cmd += vars.CONTAINER_NODE_SCRIPT_RUN_LOCATION + vars.NWAKU_SCRIPT_ENTRYPOINT + " "
prepared_cmd += vars.CONTAINER_NODE_CONFIG_FILE_LOCATION + nwakunode_names[i] + "/" + config_files[i] + " "
prepared_cmd += str(network_topology[vars.GENNET_NODES_KEY][nwakunode_names[i]][vars.GENNET_PORT_SHIFT_KEY])
if i != len(nwakunode_names) - 1:
prepared_cmd += " & "

View File

@@ -13,7 +13,7 @@ def test_prepare_gowaku_service(plan):
gowaku_builder.prepare_gowaku_service(["test1", "test2"], test_dict,
["test1.toml", "test2.toml"],
["a1", "a2"],
"id_1", topology)
"id_1", topology, False)
# hasattr doesn't work in dicts?
plan.assert(value=str(test_dict.get("id_1")),
@@ -23,7 +23,7 @@ def test_prepare_gowaku_service(plan):
assertion="==", target_value=vars.GOWAKU_IMAGE)
for node in ["test1", "test2"]:
plan.assert(value=str(test_dict["id_1"].ports[vars.RPC_PORT_ID+vars.ID_STR_SEPARATOR+node].number),
plan.assert(value=str(test_dict["id_1"].ports[vars.WAKU_RPC_PORT_ID+vars.ID_STR_SEPARATOR+node].number),
assertion="==", target_value = str(vars.WAKU_RPC_PORT_NUMBER +
topology["nodes"][node][vars.GENNET_PORT_SHIFT_KEY]))
plan.assert(value=str(test_dict["id_1"].ports[vars.PROMETHEUS_PORT_ID+vars.ID_STR_SEPARATOR+node].number),

View File

@@ -11,8 +11,8 @@ def test_prepare_nwaku_service(plan):
nwaku_builder.prepare_nwaku_service(["test1", "test2"], test_dict,
["test1.toml", "test2.toml"],
["a1", "a2"],
"id_1", topology)
["a1", "a2"], "run",
"id_1", topology, False)
# hasattr doesn't work in dicts?
plan.assert(value=str(test_dict.get("id_1")),
@@ -22,7 +22,7 @@ def test_prepare_nwaku_service(plan):
assertion="==", target_value=vars.NWAKU_IMAGE)
for node in ["test1", "test2"]:
plan.assert(value=str(test_dict["id_1"].ports[vars.RPC_PORT_ID+vars.ID_STR_SEPARATOR+node].number),
plan.assert(value=str(test_dict["id_1"].ports[vars.WAKU_RPC_PORT_ID+vars.ID_STR_SEPARATOR+node].number),
assertion="==", target_value = str(vars.WAKU_RPC_PORT_NUMBER +
topology["nodes"][node][vars.GENNET_PORT_SHIFT_KEY]))
plan.assert(value=str(test_dict["id_1"].ports[vars.PROMETHEUS_PORT_ID+vars.ID_STR_SEPARATOR+node].number),
@@ -36,6 +36,10 @@ def test_prepare_nwaku_service(plan):
plan.assert(value=test_dict["id_1"].files[vars.CONTAINER_NODE_CONFIG_FILE_LOCATION+node],
assertion="==", target_value=file)
for node, file in zip(["test1", "test2"], ["run", "run"]):
plan.assert (value=test_dict["id_1"].files[vars.CONTAINER_NODE_SCRIPT_RUN_LOCATION],
assertion="==", target_value=file)
for i in range(len(test_dict["id_1"].entrypoint)):
plan.assert(value=test_dict["id_1"].entrypoint[i], assertion="==",
target_value=vars.GENERAL_ENTRYPOINT[i])
@@ -48,10 +52,9 @@ def test__prepare_nwaku_cmd_in_service(plan):
plan.assert(value=result[0],
assertion="==",
target_value=vars.NWAKU_ENTRYPOINT+" "+vars.WAKUNODE_CONFIGURATION_FILE_FLAG+
vars.CONTAINER_NODE_CONFIG_FILE_LOCATION+"a"+"/"+"c"+" "+
vars.WAKUNODE_PORT_SHIFT_FLAG+"0"+" & "+
vars.NWAKU_ENTRYPOINT+" "+vars.WAKUNODE_CONFIGURATION_FILE_FLAG+
vars.CONTAINER_NODE_CONFIG_FILE_LOCATION+"b"+"/"+"d"+" "+
vars.WAKUNODE_PORT_SHIFT_FLAG+"1"
target_value=vars.CONTAINER_NODE_SCRIPT_RUN_LOCATION+
vars.NWAKU_SCRIPT_ENTRYPOINT+" "+vars.CONTAINER_NODE_CONFIG_FILE_LOCATION
+"a"+"/"+"c"+" "+"0"+" & "+vars.CONTAINER_NODE_SCRIPT_RUN_LOCATION+
vars.NWAKU_SCRIPT_ENTRYPOINT+" "+vars.CONTAINER_NODE_CONFIG_FILE_LOCATION+
"b"+"/"+"d"+" "+"1"
)

View File

@@ -8,10 +8,10 @@ waku_builders = import_module(vars.WAKU_BUILDER_MODULE)
def test_prepare_waku_ports_in_service(plan):
topology = {"nodes":{"test1": {vars.GENNET_PORT_SHIFT_KEY : 0},
"test2": {vars.GENNET_PORT_SHIFT_KEY : 1}}}
ports = waku_builders.prepare_waku_ports_in_service(["test1", "test2"], topology)
ports = waku_builders.prepare_waku_ports_in_service(["test1", "test2"], topology, False)
for node_name in ["test1", "test2"]:
plan.assert(value=str(ports[vars.RPC_PORT_ID+vars.ID_STR_SEPARATOR+node_name].number),
plan.assert(value=str(ports[vars.WAKU_RPC_PORT_ID+vars.ID_STR_SEPARATOR+node_name].number),
assertion="==", target_value = str(vars.WAKU_RPC_PORT_NUMBER +
topology["nodes"][node_name][vars.GENNET_PORT_SHIFT_KEY]))
plan.assert(value=str(ports[vars.PROMETHEUS_PORT_ID+vars.ID_STR_SEPARATOR+node_name].number),
@@ -24,26 +24,33 @@ def test_prepare_waku_ports_in_service(plan):
def test_prepare_waku_config_files_in_service(plan):
names = ["test1", "test2"]
artifact_ids = ["a1", "a2"]
run_artifact_id = ["run", "run"]
files = waku_builders.prepare_waku_config_files_in_service(names, artifact_ids)
files = waku_builders.prepare_waku_config_files_in_service(names, artifact_ids, "run")
for name, artif_id in zip(names, artifact_ids):
for name, artif_id, run_id in zip(names, artifact_ids, run_artifact_id):
plan.assert(value=files[vars.CONTAINER_NODE_CONFIG_FILE_LOCATION+name],
assertion="==", target_value=artif_id)
plan.assert (value=files[vars.CONTAINER_NODE_SCRIPT_RUN_LOCATION],
assertion="==", target_value=run_id)
def test_add_waku_ports_info_to_topology(plan):
network_topology = {"nodes": {"test1": {}, "test2": {}}}
service_struct_1 = struct(ports={vars.RPC_PORT_ID+"-test1": PortSpec(number=1),
vars.WAKU_LIBP2P_PORT_ID+"-test1": PortSpec(number=2),
vars.PROMETHEUS_PORT_ID+"-test1": PortSpec(number=3)})
service_struct_1 = struct(ports={vars.WAKU_RPC_PORT_ID+"-test1": PortSpec(number=1,
transport_protocol="TCP"),
vars.WAKU_LIBP2P_PORT_ID+"-test1": PortSpec(number=2,
transport_protocol="TCP"),
vars.PROMETHEUS_PORT_ID+"-test1": PortSpec(number=3,
transport_protocol="TCP")})
node_info1 = {vars.GENNET_NODE_CONTAINER_KEY: "cid1"}
services = {"cid1": service_struct_1}
waku_builders.add_waku_ports_info_to_topology(network_topology, services, node_info1, "test1")
waku_builders.add_waku_ports_info_to_topology(network_topology, services, node_info1, "test1",
False)
plan.assert(value=str(network_topology["nodes"]["test1"]["ports"][vars.RPC_PORT_ID+"-test1"][0]),
plan.assert(value=str(network_topology["nodes"]["test1"]["ports"][vars.WAKU_RPC_PORT_ID+"-test1"][0]),
assertion="==", target_value=str(1))
plan.assert(value=str(network_topology["nodes"]["test1"]["ports"][vars.WAKU_LIBP2P_PORT_ID+"-test1"][0]),
assertion="==", target_value=str(2))

View File

@@ -2,44 +2,62 @@
vars = import_module("github.com/logos-co/wakurtosis/src/system_variables.star")
def prepare_waku_ports_in_service(node_names, network_topology):
def prepare_waku_ports_in_service(node_names, network_topology, discovery):
prepared_ports = {}
for node_name in node_names:
node_info = network_topology[vars.GENNET_NODES_KEY][node_name]
prepared_ports[vars.RPC_PORT_ID + vars.ID_STR_SEPARATOR + node_name] = \
PortSpec(number=vars.WAKU_RPC_PORT_NUMBER + node_info[vars.GENNET_PORT_SHIFT_KEY],
transport_protocol=vars.WAKU_RPC_PORT_PROTOCOL)
prepared_ports[vars.PROMETHEUS_PORT_ID + vars.ID_STR_SEPARATOR + node_name] = \
PortSpec(number=vars.PROMETHEUS_PORT_NUMBER + node_info[vars.GENNET_PORT_SHIFT_KEY],
transport_protocol=vars.PROMETHEUS_PORT_PROTOCOL)
prepared_ports[vars.WAKU_LIBP2P_PORT_ID + vars.ID_STR_SEPARATOR + node_name] = \
PortSpec(number=vars.WAKU_LIBP2P_PORT + node_info[vars.GENNET_PORT_SHIFT_KEY],
transport_protocol=vars.WAKU_LIBP2P_PORT_PROTOCOL)
port_shift = network_topology[vars.GENNET_NODES_KEY][node_name][vars.GENNET_PORT_SHIFT_KEY]
prepare_single_node_waku_ports(prepared_ports, node_name, port_shift, discovery)
return prepared_ports
def prepare_waku_config_files_in_service(node_names, artifact_ids):
def prepare_single_node_waku_ports(prepared_ports, node_name, port_shift, discovery):
prepared_ports[vars.WAKU_RPC_PORT_ID + vars.ID_STR_SEPARATOR + node_name] = \
PortSpec(number=vars.WAKU_RPC_PORT_NUMBER + port_shift,
transport_protocol=vars.WAKU_RPC_PORT_PROTOCOL)
prepared_ports[vars.PROMETHEUS_PORT_ID + vars.ID_STR_SEPARATOR + node_name] = \
PortSpec(number=vars.PROMETHEUS_PORT_NUMBER + port_shift,
transport_protocol=vars.PROMETHEUS_PORT_PROTOCOL)
prepared_ports[vars.WAKU_LIBP2P_PORT_ID + vars.ID_STR_SEPARATOR + node_name] = \
PortSpec(number=vars.WAKU_LIBP2P_PORT + port_shift,
transport_protocol=vars.WAKU_LIBP2P_PORT_PROTOCOL)
if discovery:
prepared_ports[vars.WAKU_DISCV5_PORT_ID + vars.ID_STR_SEPARATOR + node_name] = \
PortSpec(number=vars.WAKU_DISCV5_PORT_NUMBER + port_shift,
transport_protocol=vars.WAKU_DISCV5_PORT_PROTOCOL)
def prepare_waku_config_files_in_service(node_names, toml_artifact_ids, run_artifact_id):
prepared_files = {}
for i in range(len(node_names)):
prepared_files[vars.CONTAINER_NODE_CONFIG_FILE_LOCATION + node_names[i]] = artifact_ids[i]
prepared_files[vars.CONTAINER_NODE_CONFIG_FILE_LOCATION + node_names[i]] = toml_artifact_ids[i]
prepared_files[vars.CONTAINER_NODE_SCRIPT_RUN_LOCATION] = run_artifact_id
return prepared_files
def add_waku_ports_info_to_topology(network_topology, all_services_information, node_info, node_id):
waku_rpc_port_id = vars.RPC_PORT_ID + vars.ID_STR_SEPARATOR + node_id
def add_waku_ports_info_to_topology(network_topology, all_services_information, node_info, node_id,
discovery):
waku_rpc_port_id = vars.WAKU_RPC_PORT_ID + vars.ID_STR_SEPARATOR + node_id
libp2p_port_id = vars.WAKU_LIBP2P_PORT_ID + vars.ID_STR_SEPARATOR + node_id
prometheus_port_id = vars.PROMETHEUS_PORT_ID + vars.ID_STR_SEPARATOR + node_id
network_topology[vars.GENNET_NODES_KEY][node_id][vars.PORTS_KEY] = {}
_add_waku_port(network_topology, all_services_information, node_id, node_info, waku_rpc_port_id)
_add_waku_port(network_topology, all_services_information, node_id, node_info, libp2p_port_id)
_add_waku_port(network_topology, all_services_information, node_id, node_info, prometheus_port_id)
_add_waku_port(network_topology, all_services_information, node_id, node_info,
prometheus_port_id)
if discovery:
discv5_port_id = vars.WAKU_DISCV5_PORT_ID + vars.ID_STR_SEPARATOR + node_id
_add_waku_port(network_topology, all_services_information, node_id, node_info,
discv5_port_id)
def _add_waku_port(network_topology, all_services_information, node_id, node_info, port_id):
@@ -47,4 +65,4 @@ def _add_waku_port(network_topology, all_services_information, node_id, node_inf
(all_services_information[node_info[vars.GENNET_NODE_CONTAINER_KEY]].ports[
port_id].number,
all_services_information[node_info[vars.GENNET_NODE_CONTAINER_KEY]].ports[
port_id].transport_protocol)
port_id].transport_protocol)

View File

@@ -59,6 +59,6 @@ def interconnect_nomos_nodes(plan, topology_information, interconnection_batch):
for peer in peers[i:i + interconnection_batch]]
connect_nomos_to_peers(plan, nodes_in_topology[node_id][vars.GENNET_NODE_CONTAINER_KEY],
node_id, vars.RPC_PORT_ID, peer_ids)
node_id, vars.WAKU_RPC_PORT_ID, peer_ids)

View File

@@ -5,29 +5,35 @@ GOWAKU_IMAGE = "gowaku"
# If changing this, you'll likely need to change it as well in gennet
ID_STR_SEPARATOR = "-"
RPC_PORT_ID = "rpc"
NODE_CONFIG_FILE_LOCATION = "github.com/logos-co/wakurtosis/config/topology_generated/"
RUN_SCRIPT_FILE = "github.com/logos-co/wakurtosis/bash-utils/run_waku_node.sh"
CONFIG_FILE_LOCATION = "github.com/logos-co/wakurtosis/config/"
CONTAINER_NODE_CONFIG_FILE_LOCATION = "/node/configuration_file/"
CONTAINER_NODE_SCRIPT_RUN_LOCATION = "/opt/"
GENERAL_ENTRYPOINT = ["/bin/sh", "-c"]
CONFIG_FILE_STARLARK_PARAMETER = "config_file"
# Config file keys
KURTOSIS_KEY = "kurtosis"
WLS_KEY = "wls"
INTERCONNECT_NODES = "interconnect_nodes"
INTERCONNECTION_BATCH_KEY = "interconnection_batch"
# Waku Configuration
WAKU_RPC_PORT_ID = "rpc"
WAKU_RPC_PORT_PROTOCOL = "TCP"
WAKU_RPC_PORT_NUMBER = 8545
WAKU_LIBP2P_PORT_ID = "libp2p"
WAKU_LIBP2P_PORT_PROTOCOL = "TCP"
WAKU_LIBP2P_PORT = 60000
WAKU_DISCV5_PORT_ID = "discv5"
WAKU_DISCV5_PORT_NUMBER = 9000
WAKU_DISCV5_PORT_PROTOCOL = "UDP"
WAKUNODE_CONFIGURATION_FILE_FLAG = "--config-file="
WAKUNODE_PORT_SHIFT_FLAG = "--ports-shift="
NWAKU_ENTRYPOINT = "/usr/bin/wakunode --rpc-address=0.0.0.0 --metrics-server-address=0.0.0.0 --log-level=TRACE"
# NWAKU_ENTRYPOINT = "/usr/bin/wakunode --rpc-address=0.0.0.0 --metrics-server-address=0.0.0.0 --log-level=TRACE"
NWAKU_SCRIPT_ENTRYPOINT = "run_waku_node.sh"
GOWAKU_ENTRYPOINT = "/usr/bin/waku --rpc-address=0.0.0.0 --metrics-server-address=0.0.0.0"
NOMOS_ENTRYPOINT = "/usr/bin/nomos-node"
NOMOS_PORT_SHIFT_FLAG = "--ports-shift="
@@ -76,6 +82,10 @@ CONTAINER_DATASOURCES_GRAFANA = "/etc/grafana/provisioning/datasources/"
CONTAINER_DATASOURCES_FILE_NAME_GRAFANA = "datasources.yaml"
# Gennet topology Keys
GENNET_KEY = "gennet"
GENNET_IMAGE = "gennet"
GENNET_SERVICE_NAME = "gennet"
GENNET_CONFIG_ARTIFACT_NAME = "gennet-config"
GENNET_NODES_KEY = "nodes"
GENNET_PORT_SHIFT_KEY = "port_shift"
GENNET_ALL_CONTAINERS_KEY = "containers"
@@ -148,9 +158,3 @@ DEFAULT_TOPOLOGY_FILE = "network_data.json"
TEST_FILES_LOCATION = "github.com/logos-co/wakurtosis/config/test_files/"
DEFAULT_TOPOLOGY_FILE_DEFAULT_ARGUMENT_VALUE = "test_network_data.json"
DEFAULT_CONFIG_FILE = "github.com/logos-co/wakurtosis/config/config.json"
# Default Simulation Parameters
SIMULATION_TIME = 300
MESSAGE_RATE = 25
MIN_PACKET_SIZE = 1
MAX_PACKET_SIZE = 1024

View File

@@ -17,7 +17,7 @@ def test_waku_methods(plan):
topology = read_file(src=topology_for_test_file)
topology = json.decode(topology)
node_builders.instantiate_services(plan, topology, True)
node_builders.instantiate_services(plan, topology, False, True)
expected_ids = {
"nwaku_0_2": "16Uiu2HAm7ZPmRY3ECVz7fAJQdxEDrBw3ToneYgUryKDJPtz25R2n",
"nwaku_1_2": "16Uiu2HAmV7KPdL24S9Lztu6orfWuHypA9F6NUR4GkBDvWg8U4B5Z"
@@ -43,13 +43,13 @@ def test_send_json_rpc(plan, test_node, test_node_info):
service_id = test_node_info[vars.GENNET_NODE_CONTAINER_KEY]
# Automatically waits for 200
call_protocols.send_json_rpc(plan, service_id, vars.RPC_PORT_ID+vars.ID_STR_SEPARATOR+test_node,
call_protocols.send_json_rpc(plan, service_id, vars.WAKU_RPC_PORT_ID+vars.ID_STR_SEPARATOR+test_node,
vars.POST_RELAY_MESSAGE_METHOD, params)
def test_get_wakunode_peer_id(plan, test_node, test_node_info, expected_ids):
service_id = test_node_info[vars.GENNET_NODE_CONTAINER_KEY]
peer_id = waku.get_wakunode_peer_id(plan, service_id, vars.RPC_PORT_ID+vars.ID_STR_SEPARATOR+test_node)
peer_id = waku.get_wakunode_peer_id(plan, service_id, vars.WAKU_RPC_PORT_ID+vars.ID_STR_SEPARATOR+test_node)
plan.print("Peer ID for " + test_node + ": " + peer_id)
plan.assert(value=peer_id, assertion="==", target_value=expected_ids[test_node])

View File

@@ -51,7 +51,7 @@ def make_service_wait(plan, service_name, time):
def get_waku_peers(plan, waku_service_container, node_name):
extract = {"peers": '.result | length'}
port_name = vars.RPC_PORT_ID + vars.ID_STR_SEPARATOR + node_name
port_name = vars.WAKU_RPC_PORT_ID + vars.ID_STR_SEPARATOR + node_name
response = call_protocols.send_json_rpc(plan, waku_service_container, port_name,
vars.GET_PEERS_METHOD, "", extract)
@@ -73,6 +73,6 @@ def interconnect_waku_nodes(plan, topology_information, interconnection_batch):
for peer in peers[i:i + interconnection_batch]]
connect_wakunode_to_peers(plan, nodes_in_topology[node_id][vars.GENNET_NODE_CONTAINER_KEY],
node_id, vars.RPC_PORT_ID, peer_ids)
node_id, vars.WAKU_RPC_PORT_ID, peer_ids)

View File

@@ -33,7 +33,7 @@ def create_new_topology_information(plan, network_topology, network_artifact_nam
return artifact_id
def create_cmd(config_file):
def create_cmd(config_file, prometheus_service):
cmd = []
config_file_name = config_file.split("/")[-1]
@@ -41,10 +41,14 @@ def create_cmd(config_file):
cmd.append(vars.WLS_CONFIG_PATH + config_file_name)
cmd.append(vars.WLS_TOPOLOGY_FILE_FLAG)
cmd.append(vars.WLS_TOPOLOGY_PATH + vars.CONTAINER_TOPOLOGY_FILE_NAME_WLS)
cmd.append("--prometheus-ip")
cmd.append(prometheus_service.ip_address)
cmd.append("--prometheus-port")
cmd.append(str(prometheus_service.ports[vars.PROMETHEUS_PORT_ID].number))
return cmd
def init(plan, network_topology, config_file):
def init(plan, network_topology, config_file, prometheus_service):
# Generate simulation config
config_artifact = upload_config(plan, config_file, vars.WLS_CONFIG_ARTIFACT_NAME)
@@ -58,7 +62,7 @@ def init(plan, network_topology, config_file):
wls_topology = create_new_topology_information(plan, network_topology,
vars.WLS_TOPOLOGY_ARTIFACT_NAME)
wls_cmd = create_cmd(config_file)
wls_cmd = create_cmd(config_file, prometheus_service)
add_service_config = ServiceConfig(
image=vars.WLS_IMAGE,

View File

@@ -13,7 +13,6 @@ nwaku_builder_test = import_module(vars.TEST_NWAKU_BUILDER_MODULE)
def run(plan, args):
args_parser_test.test_load_config_args_default(plan)
@@ -35,9 +34,8 @@ def run(plan, args):
waku_builder_test.test_prepare_waku_config_files_in_service(plan)
waku_builder_test.test_add_waku_ports_info_to_topology(plan)
gowaku_builder_test.test_prepare_gowaku_service(plan)
gowaku_builder_test.test__prepare_gowaku_cmd_in_service(plan)
# gowaku_builder_test.test_prepare_gowaku_service(plan)
# gowaku_builder_test.test__prepare_gowaku_cmd_in_service(plan)
nwaku_builder_test.test_prepare_nwaku_service(plan)
nwaku_builder_test.test__prepare_nwaku_cmd_in_service(plan)

View File

@@ -2,4 +2,4 @@
image_id=$(docker images -q wls:0.0.1)
echo $image_id
docker image rm -f $image_id
docker image build --progress=plain -t wls:0.0.1 ./
docker image build -t wls:0.0.1 ./

View File

@@ -24,3 +24,4 @@ tqdm==4.64.1
typer==0.7.0
urllib3==1.26.13
aiohttp==3.8.4
prometheus-api-client==0.5.3

View File

@@ -0,0 +1,35 @@
# Python Imports
import json
from prometheus_api_client import PrometheusConnect
def connect_to_prometheus(ip, port):
print(f"Connecting to {ip}:{port}")
url = f"http://{ip}:{port}"
try:
prometheus = PrometheusConnect(url, disable_ssl=True)
except Exception as e:
print("Cannot connect to Prometheus Service")
print(e)
return None
return prometheus
def dump_prometheus(config, prometheus_ip, prometheus_port, start_time, finish_time):
to_query = config["plotting"]["by_node"]
to_query = "|".join(to_query)
print(to_query)
prometheus_connection = connect_to_prometheus(prometheus_ip, prometheus_port)
query = f"{{__name__=~\"{to_query}\"}}"
print(query)
metrics = prometheus_connection.custom_query_range(query, start_time=start_time,
end_time=finish_time, step="1s")
with open("/wls/prometheus_data.json", "w") as out_file:
json.dump(metrics, out_file)

View File

@@ -2,18 +2,19 @@
import argparse
import hashlib
import random
import os
import sys
import time
import tomllib
import asyncio
import os
from datetime import datetime
# Project Imports
from src.utils import wls_logger
from src.utils import waku_messaging
from src.utils import payloads
from src.utils import files
from src.utils import prometheus
""" Globals """
G_DEFAULT_CONFIG_FILE = 'config.json'
@@ -27,6 +28,10 @@ def parse_cli(args):
default=G_DEFAULT_CONFIG_FILE)
parser.add_argument("-t", "--topology_file", type=str, help="Topology file",
default=G_DEFAULT_TOPOLOGY_FILE)
parser.add_argument("-pi", "--prometheus-ip", type=str, help="Prometheus Port",
default=None)
parser.add_argument("-pp", "--prometheus-port", type=str, help="Prometheus Port",
default=None)
parsed_args = parser.parse_args(args)
@@ -167,9 +172,11 @@ async def main():
config_file = args.config_file
topology_file = args.topology_file
prometheus_ip = args.prometheus_ip
prometheus_port = args.prometheus_port
config = files.load_config_file(config_file)
# Set loglevel from config
wls_config = config['wls']
@@ -191,14 +198,23 @@ async def main():
t1 = time.time()
wls_logger.G_LOGGER.info(f'Got the signal to start: took {t1-t0} secs')
injection_start_time = datetime.now()
msgs_dict = await start_traffic_injection_async(wls_config, random_emitters)
injection_finish_time = datetime.now()
files.save_messages_to_json(msgs_dict)
# Delete de signal file just in case
if os.path.exists('/wls/start.signal'):
os.remove('/wls/start.signal')
if prometheus_port is not None:
prometheus.dump_prometheus(config, prometheus_ip, prometheus_port, injection_start_time,
injection_finish_time)
if __name__ == "__main__":
asyncio.run(main())