Add files for local codex

This commit is contained in:
Alberto Soutullo
2025-11-13 13:54:07 +01:00
parent aa5422c404
commit 77a8bb054c
8 changed files with 466 additions and 0 deletions

View File

@@ -0,0 +1,43 @@
## Install docker-desktop
Make sure docker-desktop is installed on your machine with the Kubernetes extension.
## Build controlbox
Controlbox is a pod (single container) that will run inside the cluster to run the python scripts that will
control the scenario. The docker image needs to be build with the ssh keys of the person that will
connect to the controlbox pod.
In order to do this, go to `controlbox` folder:
- Substitute `<your_github_handle>` with the handle of the used.
- These keys will be later used to ssh in the pod.
- Build the image.
- Make sure to use that image in `deployment/codex/controlbox_codex.yaml`
There is a bit more of information in the `controlbox/README.md` file.
## Deploy everything
`deploy_codex_local.sh` will deploy:
- Anvil, SNT contracts, services and everything that can be needed for more complex scenarios
- Controlbox pod
- K8s services
- Nwaku bootstrap node
- Nwaku store node
- Status-codex nodes (without being initialized)
- You can modify how many nodes you want to deploy in the replicas field in `status-backend-relay-codex.yaml`
Note that if you want to repeat the deployment, you just need to run `cleanup_local.sh` and `deploy_codex_local.sh` again.
If the cluster still has anvil and controlbox, there are things you need to comment in `deploy_codex_local.sh`.
Those deployments are only needed on a fresh cluster.
## Upload code to controlbox and execute
Run `./prepare_and_launch.sh`
Editing and rerunning process is much faster and easier if you create an ssh venv from your IDE. Pycharm licensed supports this. I guess
this can also be done with vscode. The only thing you need to do is enable the port forward, and the IDE should let you
transfer the files to the controlbox, and create the venv. I am explaining here the slow-safe approach just in case.
There are other tools that also makes this process faster and easier (K9s to check logs, do portforwarding, etc.)
But again, didn't want to introduce too many new dependencies.

View File

@@ -0,0 +1,7 @@
kubectl delete -f ../service/status-service-bootstrap.yaml
kubectl delete -f status-service-node-codex.yaml
kubectl delete -f status-backend-relay-codex.yaml
kubectl delete -f ../../base-manifests/anvil-statefulset.yaml
kubectl delete -f controlbox_codex.yaml

View File

@@ -0,0 +1,80 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: controlbox-sa
namespace: status-go-test
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: statefulset-viewer
namespace: status-go-test
rules:
- apiGroups: ["apps"]
resources: ["statefulsets"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: controlbox-statefulset-viewer
namespace: status-go-test
subjects:
- kind: ServiceAccount
name: controlbox-sa
namespace: status-go-test
roleRef:
kind: Role
name: statefulset-viewer
apiGroup: rbac.authorization.k8s.io
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: controlbox
namespace: status-go-test
spec:
replicas: 1
selector:
matchLabels:
app: controlbox
template:
metadata:
labels:
app: controlbox
spec:
dnsConfig:
searches:
- status-service-node.status-go-test.svc.cluster.local
- status-service-bootstrap.status-go-test.svc.cluster.local
- status-backend-relay-codex.status-go-test.svc.cluster.local
serviceAccountName: controlbox-sa
containers:
- name: controlbox
image: soutullostatus/controlbox-status:v1.0.0
# image: <your_image>
imagePullPolicy: Always
ports:
- containerPort: 22
command: ["/bin/bash", "-c"]
args:
- |
apt-get update && apt-get install -y curl && \
curl -LO "https://dl.k8s.io/release/stable.txt" && \
curl -LO "https://dl.k8s.io/release/$(cat stable.txt)/bin/linux/amd64/kubectl" && \
chmod +x kubectl && \
mv kubectl /usr/local/bin/ && \
/usr/sbin/sshd -D
---
apiVersion: v1
kind: Service
metadata:
name: controlbox-service
namespace: status-go-test
spec:
clusterIP: None
selector:
app: controlbox

View File

@@ -0,0 +1,17 @@
#!/bin/bash
##### JUST RUN ONCE #####
../../base-manifests/deploy_local.sh
kubectl apply -f controlbox_codex.yaml
kubectl apply -f status-backend-relay-codex-services.yaml
##### JUST RUN ONCE #####
kubectl apply -f ../service/status-service-bootstrap.yaml
kubectl rollout status --watch --timeout=30000s statefulset/status-service-bootstrap -n status-go-test
kubectl apply -f status-service-node-codex.yaml
sleep 10
kubectl rollout status --watch --timeout=30000s statefulset/status-service-node-codex -n status-go-test
kubectl apply -f status-backend-relay-codex.yaml
kubectl rollout status --watch --timeout=30000s statefulset/status-backend-relay-codex -n status-go-test

View File

@@ -0,0 +1,42 @@
#!/usr/bin/env bash
set -e
namespace="status-go-test"
if ! kubectl get namespace "$namespace" >/dev/null 2>&1; then
echo "Namespace '$namespace' does not exist. Creating it..."
kubectl create namespace "$namespace"
else
echo "Namespace '$namespace' already exists."
fi
./deploy_codex_local.sh
# Get the controlbox pod name dynamically
controlbox_name=$(kubectl get pods -n "$namespace" | grep controlbox | awk '{print $1}' | head -n 1)
echo "Using control box: $controlbox_name"
# Create the target directory inside the pod
kubectl exec -n "$namespace" "$controlbox_name" -- mkdir -p /home/code/
# Copy your local 'src' directory into the pod
kubectl cp ../../src/ "$namespace/$controlbox_name:/home/code/"
kubectl cp ../../main.py "$namespace/$controlbox_name:/home/code/"
kubectl cp ../../requirements.txt "$namespace/$controlbox_name:/home/code/"
# Create a Python venv (installs python3-venv if missing)
kubectl exec -n "$namespace" "$controlbox_name" -- bash -c "
python3 -m venv /home/venv &&
source /home/venv/bin/activate &&
pip install --upgrade pip &&
pip install -r /home/code/requirements.txt
"
# Run the script
kubectl exec -n "$namespace" "$controlbox_name" -- bash -c "
source /home/venv/bin/activate &&
cd /home/code &&
python main.py
"

View File

@@ -0,0 +1,54 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: config-init-sa
namespace: status-go-test
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: pod-reader
namespace: status-go-test
rules:
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: pod-reader-binding
namespace: status-go-test
subjects:
- kind: ServiceAccount
name: config-init-sa
namespace: status-go-test
roleRef:
kind: ClusterRole
name: pod-reader
apiGroup: rbac.authorization.k8s.io
---
apiVersion: v1
kind: Service
metadata:
name: status-backend-relay-codex
namespace: status-go-test
spec:
# Headless service to get per-pod DNS names
clusterIP: None
selector:
app: status-backend-relay-codex
---
apiVersion: v1
kind: Service
metadata:
name: status-backend-relay-codex-2
namespace: status-go-test
spec:
# Headless service to get per-pod DNS names
clusterIP: None
selector:
app: status-backend-relay-codex-2

View File

@@ -0,0 +1,88 @@
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: status-backend-relay-codex
namespace: status-go-test
spec:
replicas: 3
podManagementPolicy: Parallel
serviceName: status-backend-relay-codex
selector:
matchLabels:
app: status-backend-relay-codex
template:
metadata:
labels:
app: status-backend-relay-codex
spec:
serviceAccountName: controlbox-sa
dnsConfig:
searches:
- status-backend-relay-codex.status-go-test.svc.cluster.local
- status-service-bootstrap.status-go-test.svc.cluster.local
- status-service-node.status-go-test.svc.cluster.local
initContainers:
- name: getenr
image: soutullostatus/getenr:v1.1.0
imagePullPolicy: IfNotPresent
volumeMounts:
- name: enr-data
mountPath: /etc/enr
command: ["/bin/sh", "-c"]
args:
- |
/app/getenr.sh 3 status-service-bootstrap.status-go-test /etc/enr/BOOT_ENRS && \
/app/getenr.sh 3 status-service-node.status-go-test /etc/enr/STORE_ENRS
- name: config-init
image: soutullostatus/status-init:v1.2.0
imagePullPolicy: IfNotPresent
command:
- /bin/bash
- -c
- |
set -a
source /etc/enr/BOOT_ENRS
set +a
source /etc/enr/STORE_ENRS
python /init_container.py
volumeMounts:
- name: enr-data
mountPath: /etc/enr
- name: relay-config
mountPath: /static/configs
containers:
- name: status-backend
image: soutullostatus/statusgo-codex-066488ea2:latest
imagePullPolicy: IfNotPresent
args: ["status-backend", "-address", "0.0.0.0:3333"]
ports:
- containerPort: 3333
name: http
- containerPort: 30303
name: waku
volumeMounts:
- mountPath: "/static/configs"
name: relay-config
livenessProbe:
exec:
command:
- /bin/sh
- -c
- |
curl -X GET http://localhost:3333/health || exit 1
initialDelaySeconds: 5
periodSeconds: 5
timeoutSeconds: 2
failureThreshold: 120
- name: status-subscriber
image: soutullostatus/status-subscriber:v1.0.0
imagePullPolicy: IfNotPresent
env:
- name: WEBSOCKET_URL
value: "ws://localhost:3333/signals"
volumes:
- name: relay-config
emptyDir: {}
- name: enr-data
emptyDir: {}

View File

@@ -0,0 +1,135 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: status-service-node-codex
namespace: status-go-test
spec:
replicas: 1
podManagementPolicy: "Parallel"
serviceName: status-service-node
selector:
matchLabels:
app: status-service-node
template:
metadata:
labels:
app: status-service-node
spec:
#dnsConfig:
# searches:
# - status-service-node.status-go-test.svc.cluster.local
volumes:
- name: enr-data
emptyDir: {}
- name: postgres-data
emptyDir: {}
initContainers:
- name: grabenr
image: soutullostatus/getenr:v1.1.0
imagePullPolicy: IfNotPresent
volumeMounts:
- name: enr-data
mountPath: /etc/enr
command:
- /app/getenr.sh
args:
- "3"
- "status-service-bootstrap.status-go-test"
containers:
- name: postgres
image: postgres:15.1-alpine
imagePullPolicy: IfNotPresent
volumeMounts:
- name: postgres-data
mountPath: /var/lib/postgresql/data
env:
- name: POSTGRES_DB
value: wakumessages
- name: POSTGRES_USER
value: wakuuser
- name: POSTGRES_PASSWORD
value: wakupassword
ports:
- containerPort: 5432
readinessProbe:
exec:
command:
- sh
- -c
- |
pg_isready -U wakuuser -d wakumessages
initialDelaySeconds: 5
periodSeconds: 2
timeoutSeconds: 5
- name: waku
image: soutullostatus/nwaku-jq-curl:v0.36.0-rc.0
imagePullPolicy: IfNotPresent
ports:
- containerPort: 8645
- containerPort: 8008
volumeMounts:
- name: enr-data
mountPath: /etc/enr
readinessProbe:
exec:
command:
- /bin/sh
- -c
- "curl -sf http://127.0.0.1:8008/health | grep -q 'OK'"
successThreshold: 5
initialDelaySeconds: 5
periodSeconds: 1
failureThreshold: 2
timeoutSeconds: 5
resources:
requests:
memory: "64Mi"
cpu: "150m"
limits:
memory: "600Mi"
cpu: "400m"
env:
- name: IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: POSTGRES_URL
value: "postgres://wakuuser:wakupassword@127.0.0.1:5432/wakumessages"
command:
- sh
- -c
- |
echo "Waiting for Postgres on 127.0.0.1:5432..."
until curl --silent --output /dev/null --connect-timeout 2 telnet://127.0.0.1:5432; do
echo "Still waiting for Postgres..."
sleep 2
done
echo "Postgres is ready, launching wakunode"
. /etc/enr/ENR
echo ENRs are $ENR1 $ENR2 $ENR3
/usr/bin/wakunode \
--rest=true --rest-admin=true --rest-address=0.0.0.0 \
--relay=true \
--lightpush=true \
--filter=true \
--store=true \
--store-message-db-url=${POSTGRES_URL} \
--store-message-retention-policy="time:90" \
--max-connections=800 \
--discv5-discovery=true \
--discv5-enr-auto-update=True \
--discv5-bootstrap-node=$ENR1 --discv5-bootstrap-node=$ENR2 --discv5-bootstrap-node=$ENR3 \
--log-level=DEBUG \
--metrics-server=True --metrics-server-address=0.0.0.0 \
--nat=extip:$IP \
--cluster-id=16 --shard=32 --shard=64
---
apiVersion: v1
kind: Service
metadata:
name: status-service-node
namespace: status-go-test
spec:
clusterIP: None
selector:
app: status-service-node