feat: harness (#703)

* feat: harness

* delete tests.rs build artifact

* fix binary path

* seconds -> milliseconds

* update lock

* add empty tests module

* rustfmt

* ToString -> Display

* output tests module into build artifacts

* clippy

* rustfmt
This commit is contained in:
sinu.eth
2025-06-06 13:34:32 -07:00
committed by GitHub
parent 79c230f2fa
commit 0933d711d2
98 changed files with 3529 additions and 4479 deletions

View File

@@ -119,7 +119,7 @@ jobs:
- name: Run tests
run: |
cd crates/wasm-test-runner
cd crates/harness
./run.sh
- name: Run build

1519
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,10 +1,5 @@
[workspace]
members = [
"crates/benches/binary",
"crates/benches/browser/core",
"crates/benches/browser/native",
"crates/benches/browser/wasm",
"crates/benches/library",
"crates/common",
"crates/components/deap",
"crates/components/cipher",
@@ -21,7 +16,6 @@ members = [
"crates/prover",
"crates/server-fixture/certs",
"crates/server-fixture/server",
"crates/tests-integration",
"crates/tls/backend",
"crates/tls/client",
"crates/tls/client-async",
@@ -30,7 +24,9 @@ members = [
"crates/tls/server-fixture",
"crates/verifier",
"crates/wasm",
"crates/wasm-test-runner",
"crates/harness/core",
"crates/harness/executor",
"crates/harness/runner",
]
resolver = "2"
@@ -56,9 +52,6 @@ notary-common = { path = "crates/notary/common" }
notary-server = { path = "crates/notary/server" }
tls-server-fixture = { path = "crates/tls/server-fixture" }
tlsn-cipher = { path = "crates/components/cipher" }
tlsn-benches-browser-core = { path = "crates/benches/browser/core" }
tlsn-benches-browser-native = { path = "crates/benches/browser/native" }
tlsn-benches-library = { path = "crates/benches/library" }
tlsn-common = { path = "crates/common" }
tlsn-core = { path = "crates/core" }
tlsn-data-fixtures = { path = "crates/data-fixtures" }
@@ -75,6 +68,10 @@ tlsn-tls-client = { path = "crates/tls/client" }
tlsn-tls-client-async = { path = "crates/tls/client-async" }
tlsn-tls-core = { path = "crates/tls/core" }
tlsn-utils = { git = "https://github.com/tlsnotary/tlsn-utils", rev = "6168663" }
tlsn-harness-core = { path = "crates/harness/core" }
tlsn-harness-executor = { path = "crates/harness/executor" }
tlsn-harness-runner = { path = "crates/harness/runner" }
tlsn-wasm = { path = "crates/wasm" }
tlsn-verifier = { path = "crates/verifier" }
mpz-circuits = { git = "https://github.com/privacy-scaling-explorations/mpz", tag = "v0.1.0-alpha.3" }
@@ -106,9 +103,13 @@ axum = { version = "0.8" }
bcs = { version = "0.1" }
bincode = { version = "1.3" }
blake3 = { version = "1.5" }
bon = { version = "3.6" }
bytes = { version = "1.4" }
cfg-if = { version = "1" }
chromiumoxide = { version = "0.7" }
chrono = { version = "0.4" }
cipher = { version = "0.4" }
clap = { version = "4.5" }
criterion = { version = "0.5" }
ctr = { version = "0.9" }
derive_builder = { version = "0.12" }
@@ -127,7 +128,10 @@ http = { version = "1.1" }
http-body-util = { version = "0.1" }
hyper = { version = "1.1" }
hyper-util = { version = "0.1" }
ipnet = { version = "2.11" }
inventory = { version = "0.3" }
itybity = { version = "0.2" }
js-sys = { version = "0.3" }
k256 = { version = "0.13" }
log = { version = "0.4" }
once_cell = { version = "1.19" }
@@ -135,6 +139,7 @@ opaque-debug = { version = "0.3" }
p256 = { version = "0.13" }
pkcs8 = { version = "0.10" }
pin-project-lite = { version = "0.2" }
pollster = { version = "0.4" }
rand = { version = "0.9" }
rand_chacha = { version = "0.9" }
rand_core = { version = "0.9" }
@@ -155,12 +160,21 @@ thiserror = { version = "1.0" }
tokio = { version = "1.38" }
tokio-rustls = { version = "0.24" }
tokio-util = { version = "0.7" }
toml = { version = "0.8" }
tower = { version = "0.5" }
tower-http = { version = "0.5" }
tower-service = { version = "0.3" }
tower-util = { version = "0.3.1" }
tracing = { version = "0.1" }
tracing-subscriber = { version = "0.3" }
uuid = { version = "1.4" }
wasm-bindgen = { version = "0.2" }
wasm-bindgen-futures = { version = "0.4" }
web-spawn = { version = "0.2" }
web-time = { version = "0.2" }
webpki = { version = "0.22" }
webpki-roots = { version = "0.26" }
ws_stream_tungstenite = { version = "0.14" }
# Use the patched ws_stream_wasm to fix the issue https://github.com/najamelan/ws_stream_wasm/issues/12#issuecomment-1711902958
ws_stream_wasm = { git = "https://github.com/tlsnotary/ws_stream_wasm", rev = "2ed12aad9f0236e5321f577672f309920b2aef51" }
zeroize = { version = "1.8" }

View File

@@ -1,74 +0,0 @@
[package]
edition = "2021"
name = "tlsn-benches"
publish = false
version = "0.0.0"
[lints]
workspace = true
[features]
default = []
# Enables benchmarks in the browser.
browser-bench = ["tlsn-benches-browser-native"]
[dependencies]
mpz-common = { workspace = true }
mpz-core = { workspace = true }
mpz-garble = { workspace = true }
mpz-ot = { workspace = true, features = ["ideal"] }
tlsn-benches-library = { workspace = true }
tlsn-benches-browser-native = { workspace = true, optional = true }
tlsn-common = { workspace = true }
tlsn-core = { workspace = true }
tlsn-hmac-sha256 = { workspace = true }
tlsn-prover = { workspace = true }
tlsn-server-fixture = { workspace = true }
tlsn-server-fixture-certs = { workspace = true }
tlsn-tls-core = { workspace = true }
tlsn-verifier = { workspace = true }
anyhow = { workspace = true }
async-trait = { workspace = true }
charming = { version = "0.3.1", features = ["ssr"] }
csv = "1.3.0"
dhat = { version = "0.3.3" }
env_logger = { version = "0.6.0", default-features = false }
futures = { workspace = true }
serde = { workspace = true }
tokio = { workspace = true, features = [
"rt",
"rt-multi-thread",
"macros",
"net",
"io-std",
"fs",
] }
tokio-util = { workspace = true }
toml = "0.8.11"
tracing-subscriber = { workspace = true, features = ["env-filter"] }
rand = { workspace = true }
[[bin]]
name = "bench"
path = "bin/bench.rs"
[[bin]]
name = "prover"
path = "bin/prover.rs"
[[bin]]
name = "prover-memory"
path = "bin/prover_memory.rs"
[[bin]]
name = "verifier"
path = "bin/verifier.rs"
[[bin]]
name = "verifier-memory"
path = "bin/verifier_memory.rs"
[[bin]]
name = "plot"
path = "bin/plot.rs"

View File

@@ -1,53 +0,0 @@
# TLSNotary bench utilities
This crate provides utilities for benchmarking protocol performance under various network conditions and usage patterns.
As the protocol is mostly IO bound, it's important to track how it performs in low bandwidth and/or high latency environments. To do this we set up temporary network namespaces and add virtual ethernet interfaces which we can control using the linux `tc` (Traffic Control) utility.
## Configuration
See the `bench.toml` file for benchmark configurations.
## Preliminaries
To run the benchmarks you will need `iproute2` installed, eg:
```sh
sudo apt-get install iproute2 -y
```
## Running benches
Running the benches requires root privileges because they will set up virtual interfaces. The script is designed to fully clean up when the benches are done, but run them at your own risk.
#### Native benches
Make sure you're in the `crates/benches/` directory, build the binaries, and then run the script:
```sh
cd binary
cargo build --release --locked
sudo ./bench.sh
```
#### Browser benches
(Note, we recommend running browser benches inside a docker container (see docker.md) to avoid
facing incompatibility issues observed in the latest versions of Chrome.)
With a Chrome browser installed on your system, make sure you're in the `crates/benches/`
directory, build the wasm module, build the binaries, and then run the script:
```sh
cd browser/wasm
wasm-pack build --release --locked --target web
cd ../../binary
cargo build --release --locked --features browser-bench
sudo ./bench.sh
```
## Metrics
After you run the benches you will see a `metrics.csv` file in the working directory. It will be owned by `root`, so you probably want to run
```sh
sudo chown $USER metrics.csv
```

View File

@@ -1,13 +0,0 @@
#! /bin/bash
# Check if we are running as root.
if [ "$EUID" -ne 0 ]; then
echo "This script must be run as root"
exit
fi
# Run the benchmark binary.
../../../target/release/bench
# Plot the results.
../../../target/release/plot metrics.csv

View File

@@ -1,45 +0,0 @@
[[benches]]
name = "latency"
upload = 250
upload-delay = [10, 25, 50]
download = 250
download-delay = [10, 25, 50]
upload-size = 1024
download-size = 4096
defer-decryption = true
memory-profile = false
[[benches]]
name = "download_bandwidth"
upload = 250
upload-delay = 25
download = [10, 25, 50, 100, 250]
download-delay = 25
upload-size = 1024
download-size = 4096
defer-decryption = true
memory-profile = false
[[benches]]
name = "upload_bandwidth"
upload = [10, 25, 50, 100, 250]
upload-delay = 25
download = 250
download-delay = 25
upload-size = 1024
download-size = 4096
defer-decryption = [false, true]
memory-profile = false
[[benches]]
name = "download_volume"
upload = 250
upload-delay = 25
download = 250
download-delay = 25
upload-size = 1024
# It was observed that setting download-size > 30K causes browser errors that need to
# be investigated.
download-size = [1024, 4096, 16384]
defer-decryption = true
memory-profile = true

View File

@@ -1,56 +0,0 @@
FROM rust AS builder
WORKDIR /usr/src/tlsn
COPY . .
ARG BENCH_TYPE=native
RUN \
rustup update; \
if [ "$BENCH_TYPE" = "browser" ]; then \
# ring's build script needs clang.
apt update && apt install -y clang; \
rustup install nightly; \
rustup component add rust-src --toolchain nightly; \
cargo install wasm-pack; \
cd crates/benches/browser/wasm; \
wasm-pack build --release --locked --target web; \
cd ../../binary; \
cargo build --release --features browser-bench --locked; \
else \
cd crates/benches/binary; \
cargo build --release --locked; \
fi
FROM debian:latest
ARG BENCH_TYPE=native
RUN apt update && apt upgrade -y && apt install -y --no-install-recommends \
iproute2 \
sudo
RUN \
if [ "$BENCH_TYPE" = "browser" ]; then \
# Using Chromium since Chrome for Linux is not available on ARM.
apt install -y chromium; \
fi
RUN apt clean && rm -rf /var/lib/apt/lists/*
COPY --from=builder \
["/usr/src/tlsn/target/release/bench", \
"/usr/src/tlsn/target/release/prover", \
"/usr/src/tlsn/target/release/prover-memory", \
"/usr/src/tlsn/target/release/verifier", \
"/usr/src/tlsn/target/release/verifier-memory", \
"/usr/src/tlsn/target/release/plot", \
"/usr/local/bin/"]
ENV PROVER_PATH="/usr/local/bin/prover"
ENV VERIFIER_PATH="/usr/local/bin/verifier"
ENV PROVER_MEMORY_PATH="/usr/local/bin/prover-memory"
ENV VERIFIER_MEMORY_PATH="/usr/local/bin/verifier-memory"
VOLUME [ "/benches" ]
WORKDIR "/benches"
CMD ["/bin/bash", "-c", "bench && bench --memory-profiling && plot /benches/metrics.csv && cat /benches/metrics.csv"]

View File

@@ -1,2 +0,0 @@
# exclude any /target folders
**/target*

View File

@@ -1,62 +0,0 @@
use std::{env, process::Command, thread, time::Duration};
use tlsn_benches::{clean_up, set_up};
fn main() {
let args: Vec<String> = env::args().collect();
let is_memory_profiling = args.contains(&"--memory-profiling".to_string());
let (prover_path, verifier_path) = if is_memory_profiling {
(
std::env::var("PROVER_MEMORY_PATH")
.unwrap_or_else(|_| "../../../target/release/prover-memory".to_string()),
std::env::var("VERIFIER_MEMORY_PATH")
.unwrap_or_else(|_| "../../../target/release/verifier-memory".to_string()),
)
} else {
(
std::env::var("PROVER_PATH")
.unwrap_or_else(|_| "../../../target/release/prover".to_string()),
std::env::var("VERIFIER_PATH")
.unwrap_or_else(|_| "../../../target/release/verifier".to_string()),
)
};
if let Err(e) = set_up() {
println!("Error setting up: {}", e);
clean_up();
}
// Run prover and verifier binaries in parallel.
let Ok(mut verifier) = Command::new("ip")
.arg("netns")
.arg("exec")
.arg("verifier-ns")
.arg(verifier_path)
.spawn()
else {
println!("Failed to start verifier");
return clean_up();
};
// Allow the verifier some time to start listening before the prover attempts to
// connect.
thread::sleep(Duration::from_secs(1));
let Ok(mut prover) = Command::new("ip")
.arg("netns")
.arg("exec")
.arg("prover-ns")
.arg(prover_path)
.spawn()
else {
println!("Failed to start prover");
return clean_up();
};
// Wait for both to finish.
_ = prover.wait();
_ = verifier.wait();
clean_up();
}

View File

@@ -1,248 +0,0 @@
use tlsn_benches::metrics::Metrics;
use charming::{
component::{
Axis, DataView, Feature, Legend, Restore, SaveAsImage, Title, Toolbox, ToolboxDataZoom,
},
element::{NameLocation, Orient, Tooltip, Trigger},
series::{Line, Scatter},
theme::Theme,
Chart, HtmlRenderer,
};
use csv::Reader;
const THEME: Theme = Theme::Default;
fn main() -> Result<(), Box<dyn std::error::Error>> {
let csv_file = std::env::args()
.nth(1)
.expect("Usage: plot <path_to_csv_file>");
let mut rdr = Reader::from_path(csv_file)?;
// Prepare data for plotting.
let all_data: Vec<Metrics> = rdr
.deserialize::<Metrics>()
.collect::<Result<Vec<_>, _>>()?; // Attempt to collect all results, return an error if any fail.
let _chart = runtime_vs_latency(&all_data)?;
let _chart = runtime_vs_bandwidth(&all_data)?;
// Memory profiling is not compatible with browser benches.
if cfg!(not(feature = "browser-bench")) {
let _chart = download_size_vs_memory(&all_data)?;
}
Ok(())
}
fn download_size_vs_memory(all_data: &[Metrics]) -> Result<Chart, Box<dyn std::error::Error>> {
const TITLE: &str = "Download Size vs Memory";
let prover_kind: String = all_data
.first()
.map(|s| s.kind.clone().into())
.unwrap_or_default();
let data: Vec<Vec<f32>> = all_data
.iter()
.filter(|record| record.name == "download_volume" && record.heap_max_bytes.is_some())
.map(|record| {
vec![
record.download_size as f32,
record.heap_max_bytes.unwrap() as f32 / 1024.0 / 1024.0,
]
})
.collect();
// https://github.com/yuankunzhang/charming
let chart = Chart::new()
.title(
Title::new()
.text(TITLE)
.subtext(format!("{} Prover", prover_kind)),
)
.tooltip(Tooltip::new().trigger(Trigger::Axis))
.legend(Legend::new().orient(Orient::Vertical))
.toolbox(
Toolbox::new().show(true).feature(
Feature::new()
.save_as_image(SaveAsImage::new())
.restore(Restore::new())
.data_zoom(ToolboxDataZoom::new().y_axis_index("none"))
.data_view(DataView::new().read_only(false)),
),
)
.x_axis(
Axis::new()
.scale(true)
.name("Download Size (bytes)")
.name_gap(30)
.name_location(NameLocation::Center),
)
.y_axis(
Axis::new()
.scale(true)
.name("Heap Memory (Mbytes)")
.name_gap(40)
.name_location(NameLocation::Middle),
)
.series(
Scatter::new()
.name("Allocated Heap Memory")
.symbol_size(10)
.data(data),
);
// Save the chart as HTML file.
HtmlRenderer::new(TITLE, 1000, 800)
.theme(THEME)
.save(&chart, "download_size_vs_memory.html")
.unwrap();
Ok(chart)
}
fn runtime_vs_latency(all_data: &[Metrics]) -> Result<Chart, Box<dyn std::error::Error>> {
const TITLE: &str = "Runtime vs Latency";
let prover_kind: String = all_data
.first()
.map(|s| s.kind.clone().into())
.unwrap_or_default();
let data: Vec<Vec<f32>> = all_data
.iter()
.filter(|record| record.name == "latency")
.map(|record| {
let total_delay = record.upload_delay + record.download_delay; // Calculate the sum of upload and download delays.
vec![total_delay as f32, record.runtime as f32]
})
.collect();
// https://github.com/yuankunzhang/charming
let chart = Chart::new()
.title(
Title::new()
.text(TITLE)
.subtext(format!("{} Prover", prover_kind)),
)
.tooltip(Tooltip::new().trigger(Trigger::Axis))
.legend(Legend::new().orient(Orient::Vertical))
.toolbox(
Toolbox::new().show(true).feature(
Feature::new()
.save_as_image(SaveAsImage::new())
.restore(Restore::new())
.data_zoom(ToolboxDataZoom::new().y_axis_index("none"))
.data_view(DataView::new().read_only(false)),
),
)
.x_axis(
Axis::new()
.scale(true)
.name("Upload + Download Latency (ms)")
.name_location(NameLocation::Center),
)
.y_axis(
Axis::new()
.scale(true)
.name("Runtime (s)")
.name_location(NameLocation::Middle),
)
.series(
Scatter::new()
.name("Combined Latency")
.symbol_size(10)
.data(data),
);
// Save the chart as HTML file.
HtmlRenderer::new(TITLE, 1000, 800)
.theme(THEME)
.save(&chart, "runtime_vs_latency.html")
.unwrap();
Ok(chart)
}
fn runtime_vs_bandwidth(all_data: &[Metrics]) -> Result<Chart, Box<dyn std::error::Error>> {
const TITLE: &str = "Runtime vs Bandwidth";
let prover_kind: String = all_data
.first()
.map(|s| s.kind.clone().into())
.unwrap_or_default();
let download_data: Vec<Vec<f32>> = all_data
.iter()
.filter(|record| record.name == "download_bandwidth")
.map(|record| vec![record.download as f32, record.runtime as f32])
.collect();
let upload_deferred_data: Vec<Vec<f32>> = all_data
.iter()
.filter(|record| record.name == "upload_bandwidth" && record.defer_decryption)
.map(|record| vec![record.upload as f32, record.runtime as f32])
.collect();
let upload_non_deferred_data: Vec<Vec<f32>> = all_data
.iter()
.filter(|record| record.name == "upload_bandwidth" && !record.defer_decryption)
.map(|record| vec![record.upload as f32, record.runtime as f32])
.collect();
// https://github.com/yuankunzhang/charming
let chart = Chart::new()
.title(
Title::new()
.text(TITLE)
.subtext(format!("{} Prover", prover_kind)),
)
.tooltip(Tooltip::new().trigger(Trigger::Axis))
.legend(Legend::new().orient(Orient::Vertical))
.toolbox(
Toolbox::new().show(true).feature(
Feature::new()
.save_as_image(SaveAsImage::new())
.restore(Restore::new())
.data_zoom(ToolboxDataZoom::new().y_axis_index("none"))
.data_view(DataView::new().read_only(false)),
),
)
.x_axis(
Axis::new()
.scale(true)
.name("Bandwidth (Mbps)")
.name_location(NameLocation::Center),
)
.y_axis(
Axis::new()
.scale(true)
.name("Runtime (s)")
.name_location(NameLocation::Middle),
)
.series(
Line::new()
.name("Download bandwidth")
.symbol_size(10)
.data(download_data),
)
.series(
Line::new()
.name("Upload bandwidth (deferred decryption)")
.symbol_size(10)
.data(upload_deferred_data),
)
.series(
Line::new()
.name("Upload bandwidth")
.symbol_size(10)
.data(upload_non_deferred_data),
);
// Save the chart as HTML file.
HtmlRenderer::new(TITLE, 1000, 800)
.theme(THEME)
.save(&chart, "runtime_vs_bandwidth.html")
.unwrap();
Ok(chart)
}

View File

@@ -1,8 +0,0 @@
//! A Prover without memory profiling.
use tlsn_benches::prover_main::prover_main;
#[tokio::main]
async fn main() -> anyhow::Result<()> {
prover_main(false).await
}

View File

@@ -1,15 +0,0 @@
//! A Prover with memory profiling.
use tlsn_benches::prover_main::prover_main;
#[global_allocator]
static ALLOC: dhat::Alloc = dhat::Alloc;
#[tokio::main]
async fn main() -> anyhow::Result<()> {
if cfg!(feature = "browser-bench") {
// Memory profiling is not compatible with browser benches.
return Ok(());
}
prover_main(true).await
}

View File

@@ -1,8 +0,0 @@
//! A Verifier without memory profiling.
use tlsn_benches::verifier_main::verifier_main;
#[tokio::main]
async fn main() -> anyhow::Result<()> {
verifier_main(false).await
}

View File

@@ -1,15 +0,0 @@
//! A Verifier with memory profiling.
use tlsn_benches::verifier_main::verifier_main;
#[global_allocator]
static ALLOC: dhat::Alloc = dhat::Alloc;
#[tokio::main]
async fn main() -> anyhow::Result<()> {
if cfg!(feature = "browser-bench") {
// Memory profiling is not compatible with browser benches.
return Ok(());
}
verifier_main(true).await
}

View File

@@ -1,13 +0,0 @@
# Run the TLSN benches with Docker
In the root folder of this repository, run:
```
# Change to BENCH_TYPE=browser if you want benchmarks to run in the browser.
docker build -t tlsn-bench . -f ./crates/benches/binary/benches.Dockerfile --build-arg BENCH_TYPE=native
```
Next run the benches with:
```
docker run -it --privileged -v ./crates/benches/binary:/benches tlsn-bench
```
The `--privileged` parameter is required because this test bench needs permission to create networks with certain parameters

View File

@@ -1,123 +0,0 @@
use serde::{Deserialize, Serialize};
#[derive(Deserialize)]
#[serde(untagged)]
pub enum Field<T> {
Single(T),
Multiple(Vec<T>),
}
#[derive(Deserialize)]
pub struct Config {
pub benches: Vec<Bench>,
}
#[derive(Deserialize)]
pub struct Bench {
pub name: String,
pub upload: Field<usize>,
#[serde(rename = "upload-delay")]
pub upload_delay: Field<usize>,
pub download: Field<usize>,
#[serde(rename = "download-delay")]
pub download_delay: Field<usize>,
#[serde(rename = "upload-size")]
pub upload_size: Field<usize>,
#[serde(rename = "download-size")]
pub download_size: Field<usize>,
#[serde(rename = "defer-decryption")]
pub defer_decryption: Field<bool>,
#[serde(rename = "memory-profile")]
pub memory_profile: Field<bool>,
}
impl Bench {
/// Flattens the config into a list of instances
pub fn flatten(self) -> Vec<BenchInstance> {
let mut instances = vec![];
let upload = match self.upload {
Field::Single(u) => vec![u],
Field::Multiple(u) => u,
};
let upload_delay = match self.upload_delay {
Field::Single(u) => vec![u],
Field::Multiple(u) => u,
};
let download = match self.download {
Field::Single(u) => vec![u],
Field::Multiple(u) => u,
};
let download_latency = match self.download_delay {
Field::Single(u) => vec![u],
Field::Multiple(u) => u,
};
let upload_size = match self.upload_size {
Field::Single(u) => vec![u],
Field::Multiple(u) => u,
};
let download_size = match self.download_size {
Field::Single(u) => vec![u],
Field::Multiple(u) => u,
};
let defer_decryption = match self.defer_decryption {
Field::Single(u) => vec![u],
Field::Multiple(u) => u,
};
let memory_profile = match self.memory_profile {
Field::Single(u) => vec![u],
Field::Multiple(u) => u,
};
for u in upload {
for ul in &upload_delay {
for d in &download {
for dl in &download_latency {
for us in &upload_size {
for ds in &download_size {
for dd in &defer_decryption {
for mp in &memory_profile {
instances.push(BenchInstance {
name: self.name.clone(),
upload: u,
upload_delay: *ul,
download: *d,
download_delay: *dl,
upload_size: *us,
download_size: *ds,
defer_decryption: *dd,
memory_profile: *mp,
});
}
}
}
}
}
}
}
}
instances
}
}
#[derive(Debug, Clone, Serialize)]
pub struct BenchInstance {
pub name: String,
pub upload: usize,
pub upload_delay: usize,
pub download: usize,
pub download_delay: usize,
pub upload_size: usize,
pub download_size: usize,
pub defer_decryption: bool,
/// Whether this instance should be used for memory profiling.
pub memory_profile: bool,
}

View File

@@ -1,272 +0,0 @@
pub mod config;
pub mod metrics;
pub mod prover;
pub mod prover_main;
pub mod verifier_main;
use std::{
io,
process::{Command, Stdio},
};
pub const PROVER_NAMESPACE: &str = "prover-ns";
pub const PROVER_INTERFACE: &str = "prover-veth";
pub const PROVER_SUBNET: &str = "10.10.1.0/24";
pub const VERIFIER_NAMESPACE: &str = "verifier-ns";
pub const VERIFIER_INTERFACE: &str = "verifier-veth";
pub const VERIFIER_SUBNET: &str = "10.10.1.1/24";
pub fn set_up() -> io::Result<()> {
// Create network namespaces
create_network_namespace(PROVER_NAMESPACE)?;
create_network_namespace(VERIFIER_NAMESPACE)?;
// Create veth pair and attach to namespaces
create_veth_pair(
PROVER_NAMESPACE,
PROVER_INTERFACE,
VERIFIER_NAMESPACE,
VERIFIER_INTERFACE,
)?;
// Set devices up
set_device_up(PROVER_NAMESPACE, PROVER_INTERFACE)?;
set_device_up(VERIFIER_NAMESPACE, VERIFIER_INTERFACE)?;
// Bring up the loopback interface.
set_device_up(PROVER_NAMESPACE, "lo")?;
set_device_up(VERIFIER_NAMESPACE, "lo")?;
// Assign IPs
assign_ip_to_interface(PROVER_NAMESPACE, PROVER_INTERFACE, PROVER_SUBNET)?;
assign_ip_to_interface(VERIFIER_NAMESPACE, VERIFIER_INTERFACE, VERIFIER_SUBNET)?;
// Set default routes
set_default_route(
PROVER_NAMESPACE,
PROVER_INTERFACE,
PROVER_SUBNET.split('/').next().unwrap(),
)?;
set_default_route(
VERIFIER_NAMESPACE,
VERIFIER_INTERFACE,
VERIFIER_SUBNET.split('/').next().unwrap(),
)?;
Ok(())
}
pub fn clean_up() {
// Delete interface pair
if let Err(e) = Command::new("ip")
.args([
"netns",
"exec",
PROVER_NAMESPACE,
"ip",
"link",
"delete",
PROVER_INTERFACE,
])
.status()
{
println!("Error deleting interface {}: {}", PROVER_INTERFACE, e);
}
// Delete namespaces
if let Err(e) = Command::new("ip")
.args(["netns", "del", PROVER_NAMESPACE])
.status()
{
println!("Error deleting namespace {}: {}", PROVER_NAMESPACE, e);
}
if let Err(e) = Command::new("ip")
.args(["netns", "del", VERIFIER_NAMESPACE])
.status()
{
println!("Error deleting namespace {}: {}", VERIFIER_NAMESPACE, e);
}
}
/// Sets the interface parameters.
///
/// Must be run in the correct namespace.
///
/// # Arguments
///
/// * `egress` - The egress bandwidth in mbps.
/// * `burst` - The burst in mbps.
/// * `delay` - The delay in ms.
pub fn set_interface(interface: &str, egress: usize, burst: usize, delay: usize) -> io::Result<()> {
// Clear rules
let output = Command::new("tc")
.arg("qdisc")
.arg("del")
.arg("dev")
.arg(interface)
.arg("root")
.stdout(Stdio::piped())
.output()?;
if output.stderr == "Error: Cannot delete qdisc with handle of zero.\n".as_bytes() {
// This error is informative, do not log it to stderr.
} else if !output.status.success() {
return Err(io::Error::other("Failed to clear rules"));
}
// Egress
Command::new("tc")
.arg("qdisc")
.arg("add")
.arg("dev")
.arg(interface)
.arg("root")
.arg("handle")
.arg("1:")
.arg("tbf")
.arg("rate")
.arg(format!("{}mbit", egress))
.arg("burst")
.arg(format!("{}mbit", burst))
.arg("latency")
.arg("60s")
.status()?;
// Delay
Command::new("tc")
.arg("qdisc")
.arg("add")
.arg("dev")
.arg(interface)
.arg("parent")
.arg("1:1")
.arg("handle")
.arg("10:")
.arg("netem")
.arg("delay")
.arg(format!("{}ms", delay))
.status()?;
Ok(())
}
/// Create a network namespace with the given name if it does not already exist.
fn create_network_namespace(name: &str) -> io::Result<()> {
// Check if namespace already exists
if Command::new("ip")
.args(["netns", "list"])
.output()?
.stdout
.windows(name.len())
.any(|ns| ns == name.as_bytes())
{
println!("Namespace {} already exists", name);
return Ok(());
} else {
println!("Creating namespace {}", name);
Command::new("ip").args(["netns", "add", name]).status()?;
}
Ok(())
}
fn create_veth_pair(
left_namespace: &str,
left_interface: &str,
right_namespace: &str,
right_interface: &str,
) -> io::Result<()> {
// Check if interfaces are already present in namespaces
if is_interface_present_in_namespace(left_namespace, left_interface)?
|| is_interface_present_in_namespace(right_namespace, right_interface)?
{
println!("Virtual interface already exists.");
return Ok(());
}
// Create veth pair
Command::new("ip")
.args([
"link",
"add",
left_interface,
"type",
"veth",
"peer",
"name",
right_interface,
])
.status()?;
println!(
"Created veth pair {} and {}",
left_interface, right_interface
);
// Attach veth pair to namespaces
attach_interface_to_namespace(left_namespace, left_interface)?;
attach_interface_to_namespace(right_namespace, right_interface)?;
Ok(())
}
fn attach_interface_to_namespace(namespace: &str, interface: &str) -> io::Result<()> {
Command::new("ip")
.args(["link", "set", interface, "netns", namespace])
.status()?;
println!("Attached {} to namespace {}", interface, namespace);
Ok(())
}
fn set_default_route(namespace: &str, interface: &str, ip: &str) -> io::Result<()> {
Command::new("ip")
.args([
"netns", "exec", namespace, "ip", "route", "add", "default", "via", ip, "dev",
interface,
])
.status()?;
println!(
"Set default route for namespace {} ip {} to {}",
namespace, ip, interface
);
Ok(())
}
fn is_interface_present_in_namespace(
namespace: &str,
interface: &str,
) -> Result<bool, std::io::Error> {
Ok(Command::new("ip")
.args([
"netns", "exec", namespace, "ip", "link", "list", "dev", interface,
])
.output()?
.stdout
.windows(interface.len())
.any(|ns| ns == interface.as_bytes()))
}
fn set_device_up(namespace: &str, interface: &str) -> io::Result<()> {
Command::new("ip")
.args([
"netns", "exec", namespace, "ip", "link", "set", interface, "up",
])
.status()?;
Ok(())
}
fn assign_ip_to_interface(namespace: &str, interface: &str, ip: &str) -> io::Result<()> {
Command::new("ip")
.args([
"netns", "exec", namespace, "ip", "addr", "add", ip, "dev", interface,
])
.status()?;
Ok(())
}

View File

@@ -1,31 +0,0 @@
use serde::{Deserialize, Serialize};
use tlsn_benches_library::ProverKind;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Metrics {
pub name: String,
/// The kind of the prover, either native or browser.
pub kind: ProverKind,
/// Upload bandwidth in Mbps.
pub upload: usize,
/// Upload latency in ms.
pub upload_delay: usize,
/// Download bandwidth in Mbps.
pub download: usize,
/// Download latency in ms.
pub download_delay: usize,
/// Total bytes sent to the server.
pub upload_size: usize,
/// Total bytes received from the server.
pub download_size: usize,
/// Whether deferred decryption was used.
pub defer_decryption: bool,
/// The total runtime of the benchmark in seconds.
pub runtime: u64,
/// The total amount of data uploaded to the verifier in bytes.
pub uploaded: u64,
/// The total amount of data downloaded from the verifier in bytes.
pub downloaded: u64,
/// The peak heap memory usage in bytes.
pub heap_max_bytes: Option<usize>,
}

View File

@@ -1,57 +0,0 @@
use std::time::Instant;
use tlsn_benches_library::{run_prover, AsyncIo, ProverKind, ProverTrait};
use async_trait::async_trait;
pub struct NativeProver {
upload_size: usize,
download_size: usize,
defer_decryption: bool,
io: Option<Box<dyn AsyncIo>>,
client_conn: Option<Box<dyn AsyncIo>>,
}
#[async_trait]
impl ProverTrait for NativeProver {
async fn setup(
upload_size: usize,
download_size: usize,
defer_decryption: bool,
io: Box<dyn AsyncIo>,
client_conn: Box<dyn AsyncIo>,
) -> anyhow::Result<Self>
where
Self: Sized,
{
Ok(Self {
upload_size,
download_size,
defer_decryption,
io: Some(io),
client_conn: Some(client_conn),
})
}
async fn run(&mut self) -> anyhow::Result<u64> {
let io = std::mem::take(&mut self.io).unwrap();
let client_conn = std::mem::take(&mut self.client_conn).unwrap();
let start_time = Instant::now();
run_prover(
self.upload_size,
self.download_size,
self.defer_decryption,
io,
client_conn,
)
.await?;
Ok(Instant::now().duration_since(start_time).as_secs())
}
fn kind(&self) -> ProverKind {
ProverKind::Native
}
}

View File

@@ -1,171 +0,0 @@
//! Contains the actual main() function of the prover binary. It is moved here
//! in order to enable cargo to build two prover binaries - with and without
//! memory profiling.
use std::{
fs::metadata,
io::Write,
sync::{
atomic::{AtomicU64, Ordering},
Arc,
},
};
use crate::{
config::{BenchInstance, Config},
metrics::Metrics,
set_interface, PROVER_INTERFACE,
};
use anyhow::Context;
use tlsn_benches_library::{AsyncIo, ProverTrait};
use tlsn_server_fixture::bind;
use csv::WriterBuilder;
use tokio_util::{
compat::TokioAsyncReadCompatExt,
io::{InspectReader, InspectWriter},
};
use tracing_subscriber::{fmt::format::FmtSpan, EnvFilter};
#[cfg(not(feature = "browser-bench"))]
use crate::prover::NativeProver as BenchProver;
#[cfg(feature = "browser-bench")]
use tlsn_benches_browser_native::BrowserProver as BenchProver;
pub async fn prover_main(is_memory_profiling: bool) -> anyhow::Result<()> {
let config_path = std::env::var("CFG").unwrap_or_else(|_| "bench.toml".to_string());
let config: Config = toml::from_str(
&std::fs::read_to_string(config_path).context("failed to read config file")?,
)
.context("failed to parse config")?;
tracing_subscriber::fmt()
.with_env_filter(EnvFilter::from_default_env())
.with_span_events(FmtSpan::NEW | FmtSpan::CLOSE)
.init();
let ip = std::env::var("VERIFIER_IP").unwrap_or_else(|_| "10.10.1.1".to_string());
let port: u16 = std::env::var("VERIFIER_PORT")
.map(|port| port.parse().expect("port is valid u16"))
.unwrap_or(8000);
let verifier_host = (ip.as_str(), port);
let mut file = std::fs::OpenOptions::new()
.create(true)
.append(true)
.open("metrics.csv")
.context("failed to open metrics file")?;
{
let mut metric_wrt = WriterBuilder::new()
// If file is not empty, assume that the CSV header is already present in the file.
.has_headers(metadata("metrics.csv")?.len() == 0)
.from_writer(&mut file);
for bench in config.benches {
let instances = bench.flatten();
for instance in instances {
if is_memory_profiling && !instance.memory_profile {
continue;
}
println!("{:?}", &instance);
let io = tokio::net::TcpStream::connect(verifier_host)
.await
.context("failed to open tcp connection")?;
metric_wrt.serialize(
run_instance(instance, io, is_memory_profiling)
.await
.context("failed to run instance")?,
)?;
metric_wrt.flush()?;
}
}
}
file.flush()?;
Ok(())
}
async fn run_instance(
instance: BenchInstance,
io: impl AsyncIo,
is_memory_profiling: bool,
) -> anyhow::Result<Metrics> {
let uploaded = Arc::new(AtomicU64::new(0));
let downloaded = Arc::new(AtomicU64::new(0));
let io = InspectWriter::new(
InspectReader::new(io, {
let downloaded = downloaded.clone();
move |data| {
downloaded.fetch_add(data.len() as u64, Ordering::Relaxed);
}
}),
{
let uploaded = uploaded.clone();
move |data| {
uploaded.fetch_add(data.len() as u64, Ordering::Relaxed);
}
},
);
let BenchInstance {
name,
upload,
upload_delay,
download,
download_delay,
upload_size,
download_size,
defer_decryption,
memory_profile,
} = instance.clone();
set_interface(PROVER_INTERFACE, upload, 1, upload_delay)?;
let _profiler = if is_memory_profiling {
assert!(memory_profile, "Instance doesn't have `memory_profile` set");
// Build a testing profiler as it won't output to stderr.
Some(dhat::Profiler::builder().testing().build())
} else {
None
};
let (client_conn, server_conn) = tokio::io::duplex(1 << 16);
tokio::spawn(bind(server_conn.compat()));
let mut prover = BenchProver::setup(
upload_size,
download_size,
defer_decryption,
Box::new(io),
Box::new(client_conn),
)
.await?;
let runtime = prover.run().await?;
let heap_max_bytes = if is_memory_profiling {
Some(dhat::HeapStats::get().max_bytes)
} else {
None
};
Ok(Metrics {
name,
kind: prover.kind(),
upload,
upload_delay,
download,
download_delay,
upload_size,
download_size,
defer_decryption,
runtime,
uploaded: uploaded.load(Ordering::SeqCst),
downloaded: downloaded.load(Ordering::SeqCst),
heap_max_bytes,
})
}

View File

@@ -1,128 +0,0 @@
//! Contains the actual main() function of the verifier binary. It is moved here
//! in order to enable cargo to build two verifier binaries - with and without
//! memory profiling.
use crate::{
config::{BenchInstance, Config},
set_interface, VERIFIER_INTERFACE,
};
use tls_core::verify::WebPkiVerifier;
use tlsn_common::config::ProtocolConfigValidator;
use tlsn_core::{CryptoProvider, VerifyConfig};
use tlsn_server_fixture_certs::CA_CERT_DER;
use tlsn_verifier::{Verifier, VerifierConfig};
use anyhow::Context;
use tokio::io::{AsyncRead, AsyncWrite};
use tokio_util::compat::TokioAsyncReadCompatExt;
use tracing_subscriber::{fmt::format::FmtSpan, EnvFilter};
pub async fn verifier_main(is_memory_profiling: bool) -> anyhow::Result<()> {
let config_path = std::env::var("CFG").unwrap_or_else(|_| "bench.toml".to_string());
let config: Config = toml::from_str(
&std::fs::read_to_string(config_path).context("failed to read config file")?,
)
.context("failed to parse config")?;
tracing_subscriber::fmt()
.with_env_filter(EnvFilter::from_default_env())
.with_span_events(FmtSpan::NEW | FmtSpan::CLOSE)
.init();
let ip = std::env::var("VERIFIER_IP").unwrap_or_else(|_| "10.10.1.1".to_string());
let port: u16 = std::env::var("VERIFIER_PORT")
.map(|port| port.parse().expect("port is valid u16"))
.unwrap_or(8000);
let host = (ip.as_str(), port);
let listener = tokio::net::TcpListener::bind(host)
.await
.context("failed to bind to port")?;
for bench in config.benches {
for instance in bench.flatten() {
if is_memory_profiling && !instance.memory_profile {
continue;
}
let (io, _) = listener
.accept()
.await
.context("failed to accept connection")?;
run_instance(instance, io, is_memory_profiling)
.await
.context("failed to run instance")?;
}
}
Ok(())
}
async fn run_instance<S: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>(
instance: BenchInstance,
io: S,
is_memory_profiling: bool,
) -> anyhow::Result<()> {
let BenchInstance {
download,
download_delay,
upload_size,
download_size,
memory_profile,
..
} = instance;
set_interface(VERIFIER_INTERFACE, download, 1, download_delay)?;
let _profiler = if is_memory_profiling {
assert!(memory_profile, "Instance doesn't have `memory_profile` set");
// Build a testing profiler as it won't output to stderr.
Some(dhat::Profiler::builder().testing().build())
} else {
None
};
let provider = CryptoProvider {
cert: cert_verifier(),
..Default::default()
};
let config_validator = ProtocolConfigValidator::builder()
.max_sent_data(upload_size + 256)
.max_recv_data(download_size + 256)
.build()
.unwrap();
let verifier = Verifier::new(
VerifierConfig::builder()
.protocol_config_validator(config_validator)
.crypto_provider(provider)
.build()?,
);
verifier
.verify(io.compat(), &VerifyConfig::default())
.await?;
println!("verifier done");
if is_memory_profiling {
// XXX: we may want to profile the Verifier's memory usage at a future
// point.
// println!(
// "verifier peak heap memory usage: {}",
// dhat::HeapStats::get().max_bytes
// );
}
Ok(())
}
fn cert_verifier() -> WebPkiVerifier {
let mut root_store = tls_core::anchors::RootCertStore::empty();
root_store
.add(&tls_core::key::Certificate(CA_CERT_DER.to_vec()))
.unwrap();
WebPkiVerifier::new(root_store, None)
}

View File

@@ -1,16 +0,0 @@
[package]
edition = "2021"
name = "tlsn-benches-browser-core"
publish = false
version = "0.0.0"
[lints]
workspace = true
[dependencies]
tlsn-benches-library = { workspace = true }
serio = { workspace = true }
serde = { workspace = true }
tokio-util= { workspace = true, features = ["compat", "io-util"] }

View File

@@ -1,68 +0,0 @@
//! Contains core types shared by the native and the wasm components.
use std::{
io::Error,
pin::Pin,
task::{Context, Poll},
};
use tlsn_benches_library::AsyncIo;
use serio::{
codec::{Bincode, Framed},
Sink, Stream,
};
use tokio_util::codec::LengthDelimitedCodec;
pub mod msg;
/// A sink/stream for serializable types with a framed transport.
pub struct FramedIo {
inner:
serio::Framed<tokio_util::codec::Framed<Box<dyn AsyncIo>, LengthDelimitedCodec>, Bincode>,
}
impl FramedIo {
/// Creates a new `FramedIo` from the given async `io`.
#[allow(clippy::default_constructed_unit_structs)]
pub fn new(io: Box<dyn AsyncIo>) -> Self {
let io = LengthDelimitedCodec::builder().new_framed(io);
Self {
inner: Framed::new(io, Bincode::default()),
}
}
}
impl Sink for FramedIo {
type Error = Error;
fn poll_ready(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Pin::new(&mut self.inner).poll_ready(cx)
}
fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Pin::new(&mut self.inner).poll_close(cx)
}
fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Pin::new(&mut self.inner).poll_flush(cx)
}
fn start_send<Item: serio::Serialize>(
mut self: Pin<&mut Self>,
item: Item,
) -> std::result::Result<(), Self::Error> {
Pin::new(&mut self.inner).start_send(item)
}
}
impl Stream for FramedIo {
type Error = Error;
fn poll_next<Item: serio::Deserialize>(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Item, Error>>> {
Pin::new(&mut self.inner).poll_next(cx)
}
}

View File

@@ -1,17 +0,0 @@
//! Messages exchanged by the native and the wasm components of the browser
//! prover.
use serde::{Deserialize, Serialize};
#[derive(Serialize, Deserialize, PartialEq)]
/// The config sent to the wasm component.
pub struct Config {
pub upload_size: usize,
pub download_size: usize,
pub defer_decryption: bool,
}
#[derive(Serialize, Deserialize, PartialEq)]
/// Sent by the wasm component when proving process is finished. Contains total
/// runtime in seconds.
pub struct Runtime(pub u64);

View File

@@ -1,25 +0,0 @@
[package]
edition = "2021"
name = "tlsn-benches-browser-native"
publish = false
version = "0.0.0"
[lints]
workspace = true
[dependencies]
tlsn-benches-browser-core = { workspace = true }
tlsn-benches-library = { workspace = true }
serio = { workspace = true }
websocket-relay = { workspace = true }
anyhow = { workspace = true }
async-trait = { workspace = true }
chromiumoxide = { version = "0.6.0" , features = ["tokio-runtime"] }
futures = { workspace = true }
rust-embed = "8.5.0"
tokio = { workspace = true, features = ["rt", "io-std"] }
tracing = { workspace = true }
warp = "0.3.7"
warp-embed = "0.5.0"

View File

@@ -1,336 +0,0 @@
//! Contains the native component of the browser prover.
//!
//! Conceptually the browser prover consists of the native and the wasm
//! components. The native component is responsible for starting the browser,
//! loading the wasm component and driving it.
use std::{env, net::IpAddr, time::Duration};
use anyhow::{anyhow, Context, Result};
use async_trait::async_trait;
use chromiumoxide::{
cdp::{
browser_protocol::log::{EventEntryAdded, LogEntryLevel},
js_protocol::runtime::EventExceptionThrown,
},
Browser, BrowserConfig, Page,
};
use futures::{Future, FutureExt, StreamExt};
use rust_embed::RustEmbed;
use serio::{stream::IoStreamExt, SinkExt as _};
use tokio::{io, io::AsyncWriteExt, net::TcpListener, task::JoinHandle};
use tracing::{debug, error, info};
use warp::Filter;
use tlsn_benches_browser_core::{
msg::{Config, Runtime},
FramedIo,
};
use tlsn_benches_library::{AsyncIo, ProverKind, ProverTrait};
/// The IP on which the wasm component is served.
pub static DEFAULT_WASM_IP: &str = "127.0.0.1";
/// The IP of the websocket relay.
pub static DEFAULT_WS_IP: &str = "127.0.0.1";
/// The port on which the wasm component is served.
pub static DEFAULT_WASM_PORT: u16 = 9001;
/// The port of the websocket relay.
pub static DEFAULT_WS_PORT: u16 = 9002;
/// The port for the wasm component to communicate with the TLS server.
pub static DEFAULT_WASM_TO_SERVER_PORT: u16 = 9003;
/// The port for the wasm component to communicate with the verifier.
pub static DEFAULT_WASM_TO_VERIFIER_PORT: u16 = 9004;
/// The port for the wasm component to communicate with the native component.
pub static DEFAULT_WASM_TO_NATIVE_PORT: u16 = 9005;
// The `pkg` dir will be embedded into the binary at compile-time.
#[derive(RustEmbed)]
#[folder = "../wasm/pkg"]
struct Data;
/// The native component of the prover which runs in the browser.
pub struct BrowserProver {
/// Io for communication with the wasm component.
wasm_io: FramedIo,
/// The browser spawned by the prover.
browser: Browser,
/// A handle to the http server.
http_server: JoinHandle<()>,
/// Handles to the relays.
relays: Vec<JoinHandle<Result<(), anyhow::Error>>>,
}
#[async_trait]
impl ProverTrait for BrowserProver {
async fn setup(
upload_size: usize,
download_size: usize,
defer_decryption: bool,
verifier_io: Box<dyn AsyncIo>,
server_io: Box<dyn AsyncIo>,
) -> anyhow::Result<Self>
where
Self: Sized,
{
let wasm_port: u16 = env::var("WASM_PORT")
.map(|port| port.parse().expect("port should be valid integer"))
.unwrap_or(DEFAULT_WASM_PORT);
let ws_port: u16 = env::var("WS_PORT")
.map(|port| port.parse().expect("port should be valid integer"))
.unwrap_or(DEFAULT_WS_PORT);
let wasm_to_server_port: u16 = env::var("WASM_TO_SERVER_PORT")
.map(|port| port.parse().expect("port should be valid integer"))
.unwrap_or(DEFAULT_WASM_TO_SERVER_PORT);
let wasm_to_verifier_port: u16 = env::var("WASM_TO_VERIFIER_PORT")
.map(|port| port.parse().expect("port should be valid integer"))
.unwrap_or(DEFAULT_WASM_TO_VERIFIER_PORT);
let wasm_to_native_port: u16 = env::var("WASM_TO_NATIVE_PORT")
.map(|port| port.parse().expect("port should be valid integer"))
.unwrap_or(DEFAULT_WASM_TO_NATIVE_PORT);
let wasm_ip: IpAddr = env::var("WASM_IP")
.map(|addr| addr.parse().expect("should be valid IP address"))
.unwrap_or(IpAddr::V4(DEFAULT_WASM_IP.parse().unwrap()));
let ws_ip: IpAddr = env::var("WS_IP")
.map(|addr| addr.parse().expect("should be valid IP address"))
.unwrap_or(IpAddr::V4(DEFAULT_WS_IP.parse().unwrap()));
let mut relays = Vec::with_capacity(4);
relays.push(spawn_websocket_relay(ws_ip, ws_port).await?);
// Create a framed connection to the wasm component.
let (wasm_left, wasm_right) = tokio::io::duplex(1 << 16);
relays.push(spawn_port_relay(wasm_to_native_port, Box::new(wasm_right)).await?);
let mut wasm_io = FramedIo::new(Box::new(wasm_left));
let http_server = spawn_http_server(wasm_ip, wasm_port)?;
// Relay data from the wasm component to the server.
relays.push(spawn_port_relay(wasm_to_server_port, server_io).await?);
// Relay data from the wasm component to the verifier.
relays.push(spawn_port_relay(wasm_to_verifier_port, verifier_io).await?);
info!("spawning browser");
// Note that the browser must be spawned only when the WebSocket relay is
// running.
let browser = spawn_browser(
wasm_ip,
ws_ip,
wasm_port,
ws_port,
wasm_to_server_port,
wasm_to_verifier_port,
wasm_to_native_port,
)
.await?;
// Without this sleep, it was observed that `wasm_io.send(Config)`
// msg does not reach the browser component.
tokio::time::sleep(Duration::from_secs(2)).await;
info!("sending config to the browser component");
wasm_io
.send(Config {
upload_size,
download_size,
defer_decryption,
})
.await?;
Ok(Self {
wasm_io,
browser,
http_server,
relays,
})
}
async fn run(&mut self) -> anyhow::Result<u64> {
let runtime: Runtime = self.wasm_io.expect_next().await.unwrap();
_ = self.clean_up().await?;
Ok(runtime.0)
}
fn kind(&self) -> ProverKind {
ProverKind::Browser
}
}
impl BrowserProver {
async fn clean_up(&mut self) -> anyhow::Result<()> {
// Kill the http server.
self.http_server.abort();
// Kill all relays.
let _ = self
.relays
.iter_mut()
.map(|task| task.abort())
.collect::<Vec<_>>();
// Close the browser.
self.browser.close().await?;
self.browser.wait().await?;
Ok(())
}
}
pub async fn spawn_websocket_relay(
ip: IpAddr,
port: u16,
) -> anyhow::Result<JoinHandle<Result<(), anyhow::Error>>> {
let listener = TcpListener::bind((ip, port)).await?;
Ok(tokio::spawn(websocket_relay::run(listener)))
}
/// Binds to the given localhost `port`, accepts a connection and relays data
/// between the connection and the `channel`.
pub async fn spawn_port_relay(
port: u16,
channel: Box<dyn AsyncIo>,
) -> anyhow::Result<JoinHandle<Result<(), anyhow::Error>>> {
let listener = tokio::net::TcpListener::bind(("127.0.0.1", port))
.await
.context("failed to bind to port")?;
let handle = tokio::spawn(async move {
let (tcp, _) = listener
.accept()
.await
.context("failed to accept a connection")
.unwrap();
relay_data(Box::new(tcp), channel).await
});
Ok(handle)
}
/// Relays data between two sources.
pub async fn relay_data(left: Box<dyn AsyncIo>, right: Box<dyn AsyncIo>) -> Result<()> {
let (mut left_read, mut left_write) = io::split(left);
let (mut right_read, mut right_write) = io::split(right);
let left_to_right = async {
io::copy(&mut left_read, &mut right_write).await?;
right_write.shutdown().await
};
let right_to_left = async {
io::copy(&mut right_read, &mut left_write).await?;
left_write.shutdown().await
};
tokio::try_join!(left_to_right, right_to_left)?;
Ok(())
}
/// Spawns the browser and starts the wasm component.
async fn spawn_browser(
wasm_ip: IpAddr,
ws_ip: IpAddr,
wasm_port: u16,
ws_port: u16,
wasm_to_server_port: u16,
wasm_to_verifier_port: u16,
wasm_to_native_port: u16,
) -> anyhow::Result<Browser> {
// Chrome requires --no-sandbox when running as root.
let config = BrowserConfig::builder()
.no_sandbox()
.incognito()
.build()
.map_err(|s| anyhow!(s))?;
debug!("launching chromedriver");
let (browser, mut handler) = Browser::launch(config).await?;
debug!("chromedriver started");
tokio::spawn(async move {
while let Some(res) = handler.next().await {
res.unwrap();
}
});
let page = browser
.new_page(&format!("http://{}:{}/index.html", wasm_ip, wasm_port))
.await?;
tokio::spawn(register_listeners(&page).await?);
page.wait_for_navigation().await?;
// Note that `format!` needs double {{ }} in order to escape them.
let _ = page
.evaluate_function(&format!(
r#"
async function() {{
await window.benchWorker.init();
// Do not `await` run() or else it will block the browser.
window.benchWorker.run("{}", {}, {}, {}, {});
}}
"#,
ws_ip, ws_port, wasm_to_server_port, wasm_to_verifier_port, wasm_to_native_port
))
.await?;
Ok(browser)
}
pub fn spawn_http_server(ip: IpAddr, port: u16) -> anyhow::Result<JoinHandle<()>> {
let handle = tokio::spawn(async move {
// Serve embedded files with additional headers.
let data_serve = warp_embed::embed(&Data);
let data_serve_with_headers = data_serve
.map(|reply| {
warp::reply::with_header(reply, "Cross-Origin-Opener-Policy", "same-origin")
})
.map(|reply| {
warp::reply::with_header(reply, "Cross-Origin-Embedder-Policy", "require-corp")
});
warp::serve(data_serve_with_headers).run((ip, port)).await;
});
Ok(handle)
}
async fn register_listeners(page: &Page) -> Result<impl Future<Output = ()>> {
let mut logs = page.event_listener::<EventEntryAdded>().await?.fuse();
let mut exceptions = page.event_listener::<EventExceptionThrown>().await?.fuse();
Ok(futures::future::join(
async move {
while let Some(event) = logs.next().await {
let entry = &event.entry;
match entry.level {
LogEntryLevel::Error => {
error!("{:?}", entry);
}
_ => {
debug!("{:?}: {}", entry.timestamp, entry.text);
}
}
}
},
async move {
while let Some(event) = exceptions.next().await {
error!("{:?}", event);
}
},
)
.map(|_| ()))
}

View File

@@ -1,30 +0,0 @@
[package]
edition = "2021"
name = "tlsn-benches-browser-wasm"
publish = false
version = "0.0.0"
[lints]
workspace = true
[lib]
crate-type = ["cdylib", "rlib"]
[dependencies]
tlsn-benches-browser-core = { workspace = true }
tlsn-benches-library = { workspace = true }
tlsn-wasm = { path = "../../../wasm" }
serio = { workspace = true }
anyhow = { workspace = true }
rayon = { workspace = true }
tracing = { workspace = true }
wasm-bindgen = { version = "0.2" }
wasm-bindgen-futures = { version = "0.4" }
web-spawn = { workspace = true, features = ["no-bundler"] }
web-time = { workspace = true }
# Use the patched ws_stream_wasm to fix the issue https://github.com/najamelan/ws_stream_wasm/issues/12#issuecomment-1711902958
ws_stream_wasm = { version = "0.7.4", git = "https://github.com/tlsnotary/ws_stream_wasm", rev = "2ed12aad9f0236e5321f577672f309920b2aef51", features = [
"tokio_io",
] }

View File

@@ -1,7 +0,0 @@
<!DOCTYPE html>
<head>
</head>
<body>
<script src="index.js" type="module"></script>
</body>
</html>

View File

@@ -1,5 +0,0 @@
import * as Comlink from "./comlink.mjs";
const benchWorker = Comlink.wrap(new Worker("worker.js", { type: "module" }));
window.benchWorker = benchWorker;

View File

@@ -1,41 +0,0 @@
import * as Comlink from "./comlink.mjs";
import init_wasm, * as wasm from './tlsn_benches_browser_wasm.js';
class BenchWorker {
async init() {
try {
await init_wasm();
// Using Error level since excessive logging may interfere with the
// benchmark results.
await wasm.initialize_bench({ level: "Error" }, navigator.hardwareConcurrency);
} catch (e) {
console.error(e);
throw e;
}
}
async run(
ws_ip,
ws_port,
wasm_to_server_port,
wasm_to_verifier_port,
wasm_to_native_port
) {
try {
await wasm.wasm_main(
ws_ip,
ws_port,
wasm_to_server_port,
wasm_to_verifier_port,
wasm_to_native_port);
} catch (e) {
console.error(e);
throw e;
}
}
}
const worker = new BenchWorker();
Comlink.expose(worker);

View File

@@ -1,4 +0,0 @@
[toolchain]
channel = "nightly"
components = ["rust-src"]
targets = ["wasm32-unknown-unknown"]

View File

@@ -1,115 +0,0 @@
#![cfg(target_arch = "wasm32")]
//! Contains the wasm component of the browser prover.
//!
//! Conceptually the browser prover consists of the native and the wasm
//! components.
use anyhow::Result;
use serio::{stream::IoStreamExt, SinkExt as _};
use tracing::info;
use wasm_bindgen::prelude::*;
use web_time::Instant;
use ws_stream_wasm::WsMeta;
use tlsn_benches_browser_core::{
msg::{Config, Runtime},
FramedIo,
};
use tlsn_benches_library::run_prover;
use tlsn_wasm::LoggingConfig;
#[wasm_bindgen]
pub async fn wasm_main(
ws_ip: String,
ws_port: u16,
wasm_to_server_port: u16,
wasm_to_verifier_port: u16,
wasm_to_native_port: u16,
) -> Result<(), JsError> {
// Wrapping main() since wasm_bindgen doesn't support anyhow.
main(
ws_ip,
ws_port,
wasm_to_server_port,
wasm_to_verifier_port,
wasm_to_native_port,
)
.await
.map_err(|err| JsError::new(&err.to_string()))
}
pub async fn main(
ws_ip: String,
ws_port: u16,
wasm_to_server_port: u16,
wasm_to_verifier_port: u16,
wasm_to_native_port: u16,
) -> Result<()> {
info!("starting main");
// Connect to the server.
let (_, server_io_ws) = WsMeta::connect(
&format!(
"ws://{}:{}/tcp?addr=localhost%3A{}",
ws_ip, ws_port, wasm_to_server_port
),
None,
)
.await?;
let server_io = server_io_ws.into_io();
// Connect to the verifier.
let (_, verifier_io_ws) = WsMeta::connect(
&format!(
"ws://{}:{}/tcp?addr=localhost%3A{}",
ws_ip, ws_port, wasm_to_verifier_port
),
None,
)
.await?;
let verifier_io = verifier_io_ws.into_io();
// Connect to the native component of the browser prover.
let (_, native_io_ws) = WsMeta::connect(
&format!(
"ws://{}:{}/tcp?addr=localhost%3A{}",
ws_ip, ws_port, wasm_to_native_port
),
None,
)
.await?;
let mut native_io = FramedIo::new(Box::new(native_io_ws.into_io()));
info!("expecting config from the native component");
let cfg: Config = native_io.expect_next().await?;
let start_time = Instant::now();
info!("running the prover");
run_prover(
cfg.upload_size,
cfg.download_size,
cfg.defer_decryption,
Box::new(verifier_io),
Box::new(server_io),
)
.await?;
native_io
.send(Runtime(start_time.elapsed().as_secs()))
.await?;
Ok(())
}
/// Initializes the module.
#[wasm_bindgen]
pub async fn initialize_bench(
logging_config: Option<LoggingConfig>,
thread_count: usize,
) -> Result<(), JsValue> {
tlsn_wasm::initialize(logging_config, thread_count).await
}

View File

@@ -1,22 +0,0 @@
[package]
edition = "2021"
name = "tlsn-benches-library"
publish = false
version = "0.0.0"
[lints]
workspace = true
[dependencies]
tlsn-common = { workspace = true }
tlsn-core = { workspace = true }
tlsn-prover = { workspace = true }
tlsn-server-fixture-certs = { workspace = true }
tlsn-tls-core = { workspace = true }
anyhow = "1.0"
async-trait = "0.1.81"
futures = { version = "0.3", features = ["compat"] }
serde = { workspace = true }
tokio = {version = "1", default-features = false, features = ["rt", "macros"]}
tokio-util= {version = "0.7", features = ["compat", "io"]}

View File

@@ -1,137 +0,0 @@
use tls_core::{anchors::RootCertStore, verify::WebPkiVerifier};
use tlsn_common::config::ProtocolConfig;
use tlsn_core::{CryptoProvider, ProveConfig};
use tlsn_prover::{Prover, ProverConfig};
use tlsn_server_fixture_certs::{CA_CERT_DER, SERVER_DOMAIN};
use anyhow::Context;
use async_trait::async_trait;
use futures::{future::try_join, AsyncReadExt as _, AsyncWriteExt as _, TryFutureExt};
use serde::{Deserialize, Serialize};
use tokio::io::{AsyncRead, AsyncWrite};
use tokio_util::compat::TokioAsyncReadCompatExt;
pub trait AsyncIo: AsyncRead + AsyncWrite + Send + Sync + Unpin + 'static {}
impl<T> AsyncIo for T where T: AsyncRead + AsyncWrite + Send + Sync + Unpin + 'static {}
#[async_trait]
pub trait ProverTrait {
/// Sets up the prover preparing it to be run. Returns a prover ready to be
/// run.
async fn setup(
upload_size: usize,
download_size: usize,
defer_decryption: bool,
verifier_io: Box<dyn AsyncIo>,
server_io: Box<dyn AsyncIo>,
) -> anyhow::Result<Self>
where
Self: Sized;
/// Runs the prover. Returns the total run time in seconds.
async fn run(&mut self) -> anyhow::Result<u64>;
/// Returns the kind of the prover.
fn kind(&self) -> ProverKind;
}
#[derive(Debug, Clone, Serialize, Deserialize)]
/// The kind of a prover.
pub enum ProverKind {
/// The prover compiled into a native binary.
Native,
/// The prover compiled into a wasm binary.
Browser,
}
impl From<ProverKind> for String {
fn from(value: ProverKind) -> Self {
match value {
ProverKind::Native => "Native".to_string(),
ProverKind::Browser => "Browser".to_string(),
}
}
}
pub async fn run_prover(
upload_size: usize,
download_size: usize,
defer_decryption: bool,
io: Box<dyn AsyncIo>,
client_conn: Box<dyn AsyncIo>,
) -> anyhow::Result<()> {
let provider = CryptoProvider {
cert: WebPkiVerifier::new(root_store(), None),
..Default::default()
};
let mut protocol_config = ProtocolConfig::builder();
if defer_decryption {
protocol_config
.max_sent_data(upload_size + 256)
.max_recv_data(download_size + 256)
} else {
protocol_config
.max_sent_data(upload_size + 256)
.max_recv_data(download_size + 256)
.max_recv_data_online(download_size + 256)
};
let protocol_config = protocol_config
.defer_decryption_from_start(defer_decryption)
.build()
.unwrap();
let prover = Prover::new(
ProverConfig::builder()
.server_name(SERVER_DOMAIN)
.protocol_config(protocol_config)
.crypto_provider(provider)
.build()
.context("invalid prover config")?,
)
.setup(io.compat())
.await?;
let (mut mpc_tls_connection, prover_fut) = prover.connect(client_conn.compat()).await?;
let tls_fut = async move {
let request = format!(
"GET /bytes?size={} HTTP/1.1\r\nConnection: close\r\nData: {}\r\n\r\n",
download_size,
String::from_utf8(vec![0x42u8; upload_size]).unwrap(),
);
mpc_tls_connection.write_all(request.as_bytes()).await?;
mpc_tls_connection.close().await?;
let mut response = vec![];
mpc_tls_connection.read_to_end(&mut response).await?;
dbg!(response.len());
Ok::<(), anyhow::Error>(())
};
let (mut prover, _) = try_join(prover_fut.map_err(anyhow::Error::from), tls_fut).await?;
let (sent_len, recv_len) = prover.transcript().len();
let mut builder = ProveConfig::builder(prover.transcript());
builder.reveal_sent(&(0..sent_len)).unwrap();
builder.reveal_recv(&(0..recv_len)).unwrap();
let config = builder.build().unwrap();
prover.prove(&config).await?;
prover.close().await?;
Ok(())
}
fn root_store() -> RootCertStore {
let mut root_store = RootCertStore::empty();
root_store
.add(&tls_core::key::Certificate(CA_CERT_DER.to_vec()))
.unwrap();
root_store
}

View File

@@ -1,2 +1,4 @@
*.svg
*.html
bin/

50
crates/harness/README.md Normal file
View File

@@ -0,0 +1,50 @@
# TLSNotary Harness
A harness for testing and benchmarking the TLSNotary protocol with both native and browser support.
## Getting started
First build the harness.
```sh
./build.sh
```
With the harness built, run the following to see the available commands and options in the harness CLI.
```sh
./bin/runner --help
```
## Network setup
Running the harness requires root privileges to be able to set up a virtual network. Before running tests or benchmarks, first
set up the network.
```sh
sudo ./bin/runner setup
```
This network can be torn down simply by running:
```sh
sudo ./bin/runner clean
```
## Tests
See the CLI manual for available testing options.
To add new tests, one can register a test in the [plugin directory](executor/test_plugins).
See existing tests for an example of how to do so.
## Benches
See the CLI manual for available benching options.
To add or modify benchmarks, see the [`bench.toml`](bench.toml) file.
## Browser
The harness supports running tests and benches in the browser by setting the `--target browser` flag in the cli.

51
crates/harness/bench.toml Normal file
View File

@@ -0,0 +1,51 @@
#### Latency ####
[[group]]
name = "latency"
bandwidth = 1000
[[bench]]
group = "latency"
latency = 10
[[bench]]
group = "latency"
latency = 25
[[bench]]
group = "latency"
latency = 50
[[bench]]
group = "latency"
latency = 100
[[bench]]
group = "latency"
latency = 200
#### Bandwidth ####
[[group]]
name = "bandwidth"
latency = 25
[[bench]]
group = "bandwidth"
bandwidth = 10
[[bench]]
group = "bandwidth"
bandwidth = 50
[[bench]]
group = "bandwidth"
bandwidth = 100
[[bench]]
group = "bandwidth"
bandwidth = 250
[[bench]]
group = "bandwidth"
bandwidth = 1000

View File

@@ -0,0 +1,10 @@
#!/bin/sh
# Ensure the script runs in the folder that contains this script
cd "$(dirname "$0")"
cargo build --package tlsn-harness-runner
mkdir -p bin
cp ../../target/debug/tlsn-harness-runner bin/runner

15
crates/harness/build.sh Executable file
View File

@@ -0,0 +1,15 @@
#!/bin/sh
# Ensure the script runs in the folder that contains this script
cd "$(dirname "$0")"
cargo build --release --package tlsn-harness-runner --package tlsn-harness-executor --package tlsn-server-fixture
mkdir -p bin
cp ../../target/release/tlsn-harness-runner bin/runner
cp ../../target/release/tlsn-harness-executor-native bin/executor-native
cp ../../target/release/tlsn-server-fixture bin/server-fixture
cp ../../target/release/tlsn-harness-wasm-server bin/wasm-server
./build.wasm.sh

7
crates/harness/build.wasm.sh Executable file
View File

@@ -0,0 +1,7 @@
#!/bin/sh
# Ensure the script runs in the folder that contains this script
cd "$(dirname "$0")"
rustup run nightly \
wasm-pack build executor --target web --no-pack --out-dir=../static/generated -- -Zbuild-std=panic_abort,std

View File

@@ -0,0 +1,15 @@
[package]
name = "tlsn-harness-core"
version = "0.1.0"
edition = "2024"
publish = false
[lib]
name = "harness_core"
[dependencies]
bon = { workspace = true }
serde = { workspace = true, features = ["derive"] }
thiserror = { workspace = true }
enum-try-as-inner = { workspace = true }
ipnet = { workspace = true, features = ["serde"] }

View File

@@ -0,0 +1,255 @@
use std::collections::HashMap;
use serde::{Deserialize, Serialize};
pub const DEFAULT_PROTOCOL_LATENCY: usize = 50;
pub const DEFAULT_APP_LATENCY: usize = 50;
pub const DEFAULT_BANDWIDTH: usize = 1000;
pub const DEFAULT_UPLOAD_SIZE: usize = 1024;
pub const DEFAULT_DOWNLOAD_SIZE: usize = 4096;
pub const DEFAULT_DEFER_DECRYPTION: bool = true;
pub const DEFAULT_MEMORY_PROFILE: bool = false;
pub const WARM_UP_BENCH: Bench = Bench {
group: None,
name: None,
protocol_latency: 1,
app_latency: 1,
bandwidth: 1000,
upload_size: 1024,
download_size: 4096,
defer_decryption: true,
memory_profile: false,
};
#[derive(Deserialize)]
pub struct BenchItems {
pub group: Vec<BenchGroupItem>,
pub bench: Vec<BenchItem>,
}
impl BenchItems {
pub fn to_benches(&self, samples: usize, samples_override: bool) -> Vec<Bench> {
let group: HashMap<String, BenchGroupItem> = HashMap::from_iter(
self.group
.iter()
.cloned()
.map(|group| (group.name.clone(), group)),
);
let mut benches = Vec::new();
for mut bench in self.bench.clone() {
if let Some(group_name) = &bench.group {
let group = group
.get(group_name)
.expect("bench group should be defined: {group_name}");
bench.apply_group(group);
}
let count = if samples_override {
samples
} else if let Some(samples) = bench.samples {
samples
} else {
samples
};
for _ in 0..count {
benches.push(bench.into_bench());
}
}
benches.sort_by_key(|bench| (bench.group.clone(), bench.name.clone()));
benches
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct BenchGroupItem {
pub name: String,
pub samples: Option<usize>,
pub latency: Option<usize>,
pub bandwidth: Option<usize>,
#[serde(rename = "upload-size")]
pub upload_size: Option<usize>,
#[serde(rename = "download-size")]
pub download_size: Option<usize>,
#[serde(rename = "defer-decryption")]
pub defer_decryption: Option<bool>,
#[serde(rename = "memory-profile")]
pub memory_profile: Option<bool>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct BenchItem {
pub group: Option<String>,
pub name: Option<String>,
pub samples: Option<usize>,
pub protocol_latency: Option<usize>,
pub app_latency: Option<usize>,
pub bandwidth: Option<usize>,
#[serde(rename = "upload-size")]
pub upload_size: Option<usize>,
#[serde(rename = "download-size")]
pub download_size: Option<usize>,
#[serde(rename = "defer-decryption")]
pub defer_decryption: Option<bool>,
#[serde(rename = "memory-profile")]
pub memory_profile: Option<bool>,
}
impl BenchItem {
pub fn apply_group(&mut self, group: &BenchGroupItem) {
if self.samples.is_none() {
self.samples = group.samples;
}
if self.protocol_latency.is_none() {
self.protocol_latency = group.latency;
}
if self.bandwidth.is_none() {
self.bandwidth = group.bandwidth;
}
if self.upload_size.is_none() {
self.upload_size = group.upload_size;
}
if self.download_size.is_none() {
self.download_size = group.download_size;
}
if self.defer_decryption.is_none() {
self.defer_decryption = group.defer_decryption;
}
if self.memory_profile.is_none() {
self.memory_profile = group.memory_profile;
}
}
pub fn into_bench(&self) -> Bench {
Bench {
group: self.group.clone(),
name: self.name.clone(),
protocol_latency: self.protocol_latency.unwrap_or(DEFAULT_PROTOCOL_LATENCY),
app_latency: self.app_latency.unwrap_or(DEFAULT_APP_LATENCY),
bandwidth: self.bandwidth.unwrap_or(DEFAULT_BANDWIDTH),
upload_size: self.upload_size.unwrap_or(DEFAULT_UPLOAD_SIZE),
download_size: self.download_size.unwrap_or(DEFAULT_DOWNLOAD_SIZE),
defer_decryption: self.defer_decryption.unwrap_or(DEFAULT_DEFER_DECRYPTION),
memory_profile: self.memory_profile.unwrap_or(DEFAULT_MEMORY_PROFILE),
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Bench {
pub group: Option<String>,
pub name: Option<String>,
pub protocol_latency: usize,
pub app_latency: usize,
pub bandwidth: usize,
#[serde(rename = "upload-size")]
pub upload_size: usize,
#[serde(rename = "download-size")]
pub download_size: usize,
#[serde(rename = "defer-decryption")]
pub defer_decryption: bool,
#[serde(rename = "memory-profile")]
pub memory_profile: bool,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum BenchOutput {
Prover { metrics: ProverMetrics },
Verifier,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ProverMetrics {
/// Time taken to preprocess the connection in milliseconds.
pub time_preprocess: u64,
/// TLS connection online time in milliseconds.
pub time_online: u64,
/// Total runtime of the benchmark in milliseconds.
pub time_total: u64,
/// Total amount of data uploaded to the verifier in bytes during
/// preprocessing.
pub uploaded_preprocess: u64,
/// Total amount of data downloaded from the verifier in bytes during
/// preprocessing.
pub downloaded_preprocess: u64,
/// Total amount of data uploaded to the verifier in bytes during online
/// phase.
pub uploaded_online: u64,
/// Total amount of data downloaded from the verifier in bytes during online
/// phase.
pub downloaded_online: u64,
/// Total amount of data uploaded to the verifier in bytes.
pub uploaded_total: u64,
/// Total amount of data downloaded from the verifier in bytes.
pub downloaded_total: u64,
/// Peak heap memory usage in bytes.
pub heap_max_bytes: Option<usize>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Measurement {
pub group: Option<String>,
pub name: Option<String>,
pub latency: usize,
pub bandwidth: usize,
pub upload_size: usize,
pub download_size: usize,
pub defer_decryption: bool,
/// Time taken to preprocess the connection in milliseconds.
pub time_preprocess: u64,
/// TLS connection online time in milliseconds.
pub time_online: u64,
/// Total runtime of the benchmark in milliseconds.
pub time_total: u64,
/// Total amount of data uploaded to the verifier in bytes during
/// preprocessing.
pub uploaded_preprocess: u64,
/// Total amount of data downloaded from the verifier in bytes during
/// preprocessing.
pub downloaded_preprocess: u64,
/// Total amount of data uploaded to the verifier in bytes during online
/// phase.
pub uploaded_online: u64,
/// Total amount of data downloaded from the verifier in bytes during online
/// phase.
pub downloaded_online: u64,
/// Total amount of data uploaded to the verifier in bytes.
pub uploaded_total: u64,
/// Total amount of data downloaded from the verifier in bytes.
pub downloaded_total: u64,
/// Peak heap memory usage in bytes.
pub heap_max_bytes: Option<usize>,
}
impl Measurement {
pub fn new(config: Bench, metrics: ProverMetrics) -> Self {
Self {
group: config.group,
name: config.name,
latency: config.protocol_latency,
bandwidth: config.bandwidth,
upload_size: config.upload_size,
download_size: config.download_size,
defer_decryption: config.defer_decryption,
time_preprocess: metrics.time_preprocess,
time_online: metrics.time_online,
time_total: metrics.time_total,
uploaded_preprocess: metrics.uploaded_preprocess,
downloaded_preprocess: metrics.downloaded_preprocess,
uploaded_online: metrics.uploaded_online,
downloaded_online: metrics.downloaded_online,
uploaded_total: metrics.uploaded_total,
downloaded_total: metrics.downloaded_total,
heap_max_bytes: metrics.heap_max_bytes,
}
}
}

View File

@@ -0,0 +1,78 @@
pub mod bench;
pub mod network;
pub mod rpc;
pub mod test;
use std::fmt::{self, Display};
use serde::{Deserialize, Serialize};
use crate::network::NetworkConfig;
pub const TEST_PROTO_BANDWIDTH: usize = 1000;
pub const TEST_PROTO_DELAY: usize = 10;
pub const TEST_APP_BANDWIDTH: usize = 1000;
pub const TEST_APP_DELAY: usize = 10;
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
pub enum Role {
Prover,
Verifier,
}
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
pub enum Id {
Zero,
One,
}
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
pub enum IoMode {
Client,
Server,
}
impl TryFrom<&str> for IoMode {
type Error = &'static str;
fn try_from(value: &str) -> Result<Self, Self::Error> {
match value.to_lowercase().as_str() {
"client" => Ok(IoMode::Client),
"server" => Ok(IoMode::Server),
_ => Err("Invalid io mode"),
}
}
}
impl Display for IoMode {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
IoMode::Client => write!(f, "client"),
IoMode::Server => write!(f, "server"),
}
}
}
#[derive(Debug, Clone, bon::Builder, Serialize, Deserialize)]
pub struct ExecutorConfig {
id: Id,
io_mode: IoMode,
network_config: NetworkConfig,
}
impl ExecutorConfig {
/// Returns the id.
pub fn id(&self) -> &Id {
&self.id
}
/// Returns the io mode.
pub fn io_mode(&self) -> &IoMode {
&self.io_mode
}
/// Returns the network config.
pub fn network(&self) -> &NetworkConfig {
&self.network_config
}
}

View File

@@ -0,0 +1,60 @@
use std::net::Ipv4Addr;
use ipnet::Ipv4Net;
use serde::{Deserialize, Serialize};
pub const PORT_PROTO: u16 = 8000;
pub const PORT_APP_SERVER: u16 = 8000;
pub const PORT_PROXY: u16 = 8000;
pub const PORT_WASM_SERVER: u16 = 8080;
pub const PORT_RPC: u16 = 8000;
pub const PORT_BROWSER: u16 = 8001;
pub const NS_0: &str = "tlsn-ns0";
pub const NS_1: &str = "tlsn-ns1";
pub const NS_APP: &str = "tlsn-nsapp";
pub const BRIDGE: &str = "tlsn-br";
pub const VETH_PROTO_0: &str = "tlsn-vethp0";
pub const VETH_PROTO_1: &str = "tlsn-vethp1";
pub const VETH_RPC_0: &str = "tlsn-vethr0";
pub const VETH_RPC_1: &str = "tlsn-vethr1";
pub const VETH_PROTO_PROXY: &str = "tlsn-vethppx";
pub const VETH_APP_PROXY: &str = "tlsn-vethapx";
pub const VETH_APP: &str = "tlsn-vethapp";
pub const VETH_APP_0: &str = "tlsn-vethapp0";
pub const VETH_APP_1: &str = "tlsn-vethapp1";
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct NetworkConfig {
pub subnet: Ipv4Net,
pub wasm: (Ipv4Addr, u16),
pub host: Ipv4Addr,
pub proto_proxy: (Ipv4Addr, u16),
pub app_proxy: (Ipv4Addr, u16),
pub rpc_0: (Ipv4Addr, u16),
pub rpc_1: (Ipv4Addr, u16),
pub proto_0: (Ipv4Addr, u16),
pub proto_1: (Ipv4Addr, u16),
pub app: (Ipv4Addr, u16),
pub app_0: Ipv4Addr,
pub app_1: Ipv4Addr,
}
impl NetworkConfig {
pub fn new(subnet: Ipv4Net) -> Self {
let mut hosts = subnet.hosts();
Self {
subnet,
host: hosts.next().unwrap(),
wasm: (Ipv4Addr::new(127, 0, 0, 1), PORT_WASM_SERVER),
proto_proxy: (hosts.next().unwrap(), PORT_PROXY),
app_proxy: (hosts.next().unwrap(), PORT_PROXY),
rpc_0: (hosts.next().unwrap(), PORT_RPC),
rpc_1: (hosts.next().unwrap(), PORT_RPC),
proto_0: (hosts.next().unwrap(), PORT_PROTO),
proto_1: (hosts.next().unwrap(), PORT_PROTO),
app: (hosts.next().unwrap(), PORT_APP_SERVER),
app_0: hosts.next().unwrap(),
app_1: hosts.next().unwrap(),
}
}
}

View File

@@ -0,0 +1,69 @@
use enum_try_as_inner::EnumTryAsInner;
use serde::{Deserialize, Serialize};
use crate::{
Role,
bench::{Bench, BenchOutput},
test::TestOutput,
};
pub type Result<T, E = RpcError> = std::result::Result<T, E>;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum Cmd {
GetTests,
Test(TestCmd),
Bench(BenchCmd),
}
#[derive(Debug, Clone, EnumTryAsInner, Serialize, Deserialize)]
pub enum CmdOutput {
Empty,
GetTests(Vec<String>),
Test(TestOutput),
Bench(BenchOutput),
Fail { reason: Option<String> },
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TestCmd {
pub name: String,
pub role: Role,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct BenchCmd {
pub config: Bench,
pub role: Role,
}
#[derive(Debug, thiserror::Error, Serialize, Deserialize)]
#[error("rpc error: {reason}")]
pub struct RpcError {
reason: String,
}
impl RpcError {
pub fn new(reason: impl ToString) -> Self {
Self {
reason: reason.to_string(),
}
}
/// The reason for the error.
pub fn reason(&self) -> &str {
&self.reason
}
}
impl From<CmdOutputError> for RpcError {
fn from(value: CmdOutputError) -> Self {
RpcError {
reason: format!(
"unexpected command output: expected {}, got {}",
value.expected(),
value.actual()
),
}
}
}

View File

@@ -0,0 +1,20 @@
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum TestStatus {
Passed,
Failed { reason: Option<String> },
TimedOut,
}
impl TestStatus {
/// Returns `true` if the test passed.
pub fn is_passed(&self) -> bool {
matches!(self, TestStatus::Passed)
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TestOutput {
pub status: TestStatus,
}

View File

@@ -1,9 +1,3 @@
[build]
target = "wasm32-unknown-unknown"
[unstable]
build-std = ["panic_abort", "std"]
[target.wasm32-unknown-unknown]
rustflags = [
"-C",

View File

@@ -0,0 +1,49 @@
[package]
name = "tlsn-harness-executor"
version = "0.1.0"
edition = "2024"
publish = false
[lib]
name = "harness_executor"
crate-type = ["cdylib", "rlib"]
[dependencies]
tlsn-harness-core = { workspace = true }
tlsn-prover = { workspace = true }
tlsn-common = { workspace = true }
tlsn-core = { workspace = true }
tlsn-verifier = { workspace = true }
tlsn-tls-core = { workspace = true }
tlsn-server-fixture-certs = { workspace = true }
inventory = { workspace = true }
pin-project-lite = { workspace = true }
futures = { workspace = true }
serio = { workspace = true }
serde_json = { workspace = true }
anyhow = { workspace = true }
web-time = { workspace = true }
http-body-util = { workspace = true }
hyper = { workspace = true, features = ["client", "http1"] }
[target.'cfg(not(target_arch = "wasm32"))'.dependencies]
tokio = { workspace = true, features = ["full"] }
tokio-util = { workspace = true, features = ["compat"] }
[target.'cfg(target_arch = "wasm32")'.dependencies]
wasm-bindgen = { workspace = true }
tlsn-wasm = { workspace = true }
js-sys = { workspace = true }
wasm-bindgen-futures = { workspace = true }
web-spawn = { workspace = true, features = ["no-bundler"] }
ws_stream_wasm = { workspace = true }
gloo-utils = { version = "0.2", features = ["serde"] }
getrandom = { version = "0.2", features = ["js"] }
getrandom_03 = { package = "getrandom", version = "0.3", features = [
"wasm_js",
] }
[[bin]]
name = "tlsn-harness-executor-native"
path = "src/bin/native.rs"

View File

@@ -0,0 +1,31 @@
use std::{env, fs};
fn main() {
let manifest_dir = env::var("CARGO_MANIFEST_DIR").unwrap();
let out_dir = env::var("OUT_DIR").unwrap();
let plugin_dir = format!("{manifest_dir}/test_plugins");
let out_path = format!("{out_dir}/tests.rs");
let mut content = String::new();
// Iterate over all .rs files in the directory, and add an import for each
// one.
for entry in std::fs::read_dir(&plugin_dir).unwrap() {
let entry = entry.unwrap();
if entry.file_type().unwrap().is_file() && entry.path().extension().unwrap() == "rs" {
let file_name = entry
.path()
.file_name()
.unwrap()
.to_string_lossy()
.into_owned();
let module_name = file_name.strip_suffix(".rs").unwrap();
content.push_str(&format!(
"#[path = \"{plugin_dir}/{file_name}\"]\nmod {module_name};\n",
));
}
}
fs::write(out_path, content).expect("Unable to write to file");
}

View File

@@ -0,0 +1,11 @@
mod io;
mod prover;
mod verifier;
/// Number of bytes to pad the receive configuration with due to HTTP structure
/// overhead.
const RECV_PADDING: usize = 256;
pub(crate) use io::Meter;
pub(crate) use prover::bench_prover;
pub(crate) use verifier::bench_verifier;

View File

@@ -0,0 +1,78 @@
use std::{
io::Result,
pin::Pin,
sync::{
Arc,
atomic::{AtomicU64, Ordering},
},
task::{Context, Poll},
};
use futures::{AsyncRead, AsyncWrite};
use pin_project_lite::pin_project;
pin_project! {
pub(crate) struct Meter<Io> {
sent: Arc<AtomicU64>,
recv: Arc<AtomicU64>,
#[pin] io: Io,
}
}
impl<Io> Meter<Io> {
pub(crate) fn new(io: Io) -> Self {
Self {
sent: Arc::new(AtomicU64::new(0)),
recv: Arc::new(AtomicU64::new(0)),
io,
}
}
pub(crate) fn sent(&self) -> Arc<AtomicU64> {
self.sent.clone()
}
pub(crate) fn recv(&self) -> Arc<AtomicU64> {
self.recv.clone()
}
}
impl<Io> AsyncWrite for Meter<Io>
where
Io: AsyncWrite,
{
fn poll_write(self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8]) -> Poll<Result<usize>> {
let this = self.project();
this.io.poll_write(cx, buf).map(|res| {
res.inspect(|n| {
this.sent.fetch_add(*n as u64, Ordering::Relaxed);
})
})
}
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<()>> {
self.project().io.poll_flush(cx)
}
fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<()>> {
self.project().io.poll_close(cx)
}
}
impl<Io> AsyncRead for Meter<Io>
where
Io: AsyncRead,
{
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut [u8],
) -> Poll<Result<usize>> {
let this = self.project();
this.io.poll_read(cx, buf).map(|res| {
res.inspect(|n| {
this.recv.fetch_add(*n as u64, Ordering::Relaxed);
})
})
}
}

View File

@@ -0,0 +1,116 @@
use std::sync::atomic::Ordering;
use anyhow::Result;
use futures::{AsyncReadExt, AsyncWriteExt, TryFutureExt};
use harness_core::bench::{Bench, ProverMetrics};
use tls_core::verify::WebPkiVerifier;
use tlsn_common::config::ProtocolConfig;
use tlsn_core::{CryptoProvider, ProveConfig};
use tlsn_prover::{Prover, ProverConfig};
use tlsn_server_fixture_certs::{CA_CERT_DER, SERVER_DOMAIN};
use crate::{
IoProvider,
bench::{Meter, RECV_PADDING},
};
pub async fn bench_prover(provider: &IoProvider, config: &Bench) -> Result<ProverMetrics> {
let verifier_io = Meter::new(provider.provide_proto_io().await?);
let sent = verifier_io.sent();
let recv = verifier_io.recv();
let mut builder = ProtocolConfig::builder();
builder.max_sent_data(config.upload_size);
builder.defer_decryption_from_start(config.defer_decryption);
if !config.defer_decryption {
builder.max_recv_data_online(config.download_size + RECV_PADDING);
}
builder.max_recv_data(config.download_size + RECV_PADDING);
let protocol_config = builder.build()?;
let mut root_store = tls_core::anchors::RootCertStore::empty();
root_store
.add(&tls_core::key::Certificate(CA_CERT_DER.to_vec()))
.unwrap();
let crypto_provider = CryptoProvider {
cert: WebPkiVerifier::new(root_store, None),
..Default::default()
};
let prover = Prover::new(
ProverConfig::builder()
.protocol_config(protocol_config)
.server_name(SERVER_DOMAIN)
.crypto_provider(crypto_provider)
.build()?,
);
let time_start = web_time::Instant::now();
let prover = prover.setup(verifier_io).await?;
let time_preprocess = time_start.elapsed().as_millis();
let time_start_online = web_time::Instant::now();
let uploaded_preprocess = sent.load(Ordering::Relaxed);
let downloaded_preprocess = recv.load(Ordering::Relaxed);
let (mut conn, prover_fut) = prover.connect(provider.provide_server_io().await?).await?;
let (_, mut prover) = futures::try_join!(
async {
let request = format!(
"GET /bytes?size={} HTTP/1.1\r\nConnection: close\r\nData: {}\r\n\r\n",
config.download_size,
// Subtract the 68 bytes already present in the request template.
String::from_utf8(vec![0x42u8; config.upload_size.saturating_sub(68)])?,
);
conn.write_all(request.as_bytes()).await?;
conn.close().await?;
let mut response = Vec::new();
conn.read_to_end(&mut response).await?;
Ok(())
},
prover_fut.map_err(anyhow::Error::from)
)?;
let time_online = time_start_online.elapsed().as_millis();
let uploaded_online = sent.load(Ordering::Relaxed) - uploaded_preprocess;
let downloaded_online = recv.load(Ordering::Relaxed) - downloaded_preprocess;
let (sent_len, recv_len) = prover.transcript().len();
let mut builder = ProveConfig::builder(prover.transcript());
builder
.server_identity()
.reveal_sent(&(0..sent_len))?
.reveal_recv(&(0..recv_len))?;
let config = builder.build()?;
prover.prove(&config).await?;
prover.close().await?;
let time_total = time_start.elapsed().as_millis();
Ok(ProverMetrics {
time_preprocess: time_preprocess as u64,
time_online: time_online as u64,
time_total: time_total as u64,
uploaded_preprocess,
downloaded_preprocess,
uploaded_online,
downloaded_online,
uploaded_total: sent.load(Ordering::Relaxed),
downloaded_total: recv.load(Ordering::Relaxed),
heap_max_bytes: None,
})
}

View File

@@ -0,0 +1,43 @@
use anyhow::Result;
use harness_core::bench::Bench;
use tls_core::verify::WebPkiVerifier;
use tlsn_common::config::ProtocolConfigValidator;
use tlsn_core::{CryptoProvider, VerifyConfig};
use tlsn_server_fixture_certs::CA_CERT_DER;
use tlsn_verifier::{Verifier, VerifierConfig};
use crate::{IoProvider, bench::RECV_PADDING};
pub async fn bench_verifier(provider: &IoProvider, config: &Bench) -> Result<()> {
let mut builder = ProtocolConfigValidator::builder();
builder
.max_sent_data(config.upload_size)
.max_recv_data(config.download_size + RECV_PADDING);
let protocol_config = builder.build()?;
let mut root_store = tls_core::anchors::RootCertStore::empty();
root_store
.add(&tls_core::key::Certificate(CA_CERT_DER.to_vec()))
.unwrap();
let crypto_provider = CryptoProvider {
cert: WebPkiVerifier::new(root_store, None),
..Default::default()
};
let verifier = Verifier::new(
VerifierConfig::builder()
.protocol_config_validator(protocol_config)
.crypto_provider(crypto_provider)
.build()?,
);
let verifier = verifier.setup(provider.provide_proto_io().await?).await?;
let mut verifier = verifier.run().await?;
verifier.verify(&VerifyConfig::default()).await?;
verifier.close().await?;
Ok(())
}

View File

@@ -0,0 +1,29 @@
use harness_core::{ExecutorConfig, Id};
use harness_executor::Executor;
use serio::{Framed, SinkExt, StreamExt, codec::Bincode};
use tokio::net::TcpListener;
use tokio_util::codec::LengthDelimitedCodec;
#[tokio::main(flavor = "multi_thread")]
async fn main() -> anyhow::Result<()> {
let config = std::env::var("CONFIG").expect("CONFIG env var must be set");
let config: ExecutorConfig = serde_json::from_str(&config)?;
let rpc_addr = match config.id() {
Id::Zero => config.network().rpc_0,
Id::One => config.network().rpc_1,
};
let listener = TcpListener::bind(rpc_addr).await?;
let (stream, _) = listener.accept().await?;
let mut io = Framed::new(LengthDelimitedCodec::builder().new_framed(stream), Bincode);
let executor = Executor::new(config);
while let Some(cmd) = io.next().await.transpose()? {
io.send(executor.process(cmd).await).await?;
}
Ok(())
}

View File

@@ -0,0 +1,93 @@
use core::slice;
use futures::{AsyncRead, AsyncWrite};
use std::{
pin::Pin,
task::{Context, Poll},
};
use pin_project_lite::pin_project;
pub trait Io: AsyncRead + AsyncWrite + Send + Unpin + 'static {}
impl<T> Io for T where T: AsyncRead + AsyncWrite + Send + Unpin + 'static {}
pin_project! {
#[derive(Debug)]
pub(crate) struct FuturesIo<T> {
#[pin]
inner: T,
}
}
impl<T> FuturesIo<T> {
/// Create a new `FuturesIo` wrapping the given I/O object.
///
/// # Safety
///
/// This wrapper is only safe to use if the inner I/O object does not under
/// any circumstance read from the buffer passed to `poll_read` in the
/// `futures::AsyncRead` implementation.
pub(crate) fn new(inner: T) -> Self {
Self { inner }
}
}
impl<T> hyper::rt::Write for FuturesIo<T>
where
T: futures::AsyncWrite + Unpin,
{
fn poll_write(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<Result<usize, std::io::Error>> {
self.project().inner.poll_write(cx, buf)
}
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), std::io::Error>> {
self.project().inner.poll_flush(cx)
}
fn poll_shutdown(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Result<(), std::io::Error>> {
self.project().inner.poll_close(cx)
}
fn poll_write_vectored(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
bufs: &[std::io::IoSlice<'_>],
) -> Poll<Result<usize, std::io::Error>> {
self.project().inner.poll_write_vectored(cx, bufs)
}
}
// Adapted from https://github.com/hyperium/hyper-util/blob/99b77a5a6f75f24bc0bcb4ca74b5f26a07b19c80/src/rt/tokio.rs
impl<T> hyper::rt::Read for FuturesIo<T>
where
T: futures::AsyncRead + Unpin,
{
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
mut buf: hyper::rt::ReadBufCursor<'_>,
) -> Poll<Result<(), std::io::Error>> {
// Safety: buf_slice should only be written to, so it's safe to convert `&mut
// [MaybeUninit<u8>]` to `&mut [u8]`.
let buf_slice = unsafe {
slice::from_raw_parts_mut(buf.as_mut().as_mut_ptr() as *mut u8, buf.as_mut().len())
};
let n = match futures::AsyncRead::poll_read(self.project().inner, cx, buf_slice) {
Poll::Ready(Ok(n)) => n,
other => return other.map_ok(|_| ()),
};
unsafe {
buf.advance(n);
}
Poll::Ready(Ok(()))
}
}

View File

@@ -0,0 +1,76 @@
mod bench;
mod io;
mod provider;
pub(crate) mod spawn;
pub mod test;
#[cfg(target_arch = "wasm32")]
mod wasm;
// Include the tests.rs file generated by the build script.
include!(concat!(env!("OUT_DIR"), "/tests.rs"));
pub use provider::IoProvider;
pub use spawn::spawn;
use harness_core::{
ExecutorConfig, Role,
bench::BenchOutput,
rpc::{BenchCmd, Cmd, CmdOutput, Result, RpcError, TestCmd},
test::{TestOutput, TestStatus},
};
use crate::bench::{bench_prover, bench_verifier};
pub struct Executor {
config: ExecutorConfig,
}
impl Executor {
pub fn new(config: ExecutorConfig) -> Self {
Self { config }
}
pub async fn process(&self, cmd: Cmd) -> Result<CmdOutput> {
match cmd {
Cmd::GetTests => Ok(CmdOutput::GetTests(test::collect_tests())),
Cmd::Test(TestCmd { name, role }) => {
let test = test::get_test(&name).ok_or(RpcError::new("test not found"))?;
let provider =
IoProvider::new(*self.config.io_mode(), self.config.network().clone());
let f = match role {
Role::Prover => test.prover,
Role::Verifier => test.verifier,
};
f(&provider).await;
Ok(CmdOutput::Test(TestOutput {
status: TestStatus::Passed,
}))
}
Cmd::Bench(BenchCmd { config, role }) => {
let provider =
IoProvider::new(*self.config.io_mode(), self.config.network().clone());
match role {
Role::Prover => {
let metrics = bench_prover(&provider, &config)
.await
.map_err(|e| RpcError::new(format!("prover bench failed: {}", e)))?;
Ok(CmdOutput::Bench(BenchOutput::Prover { metrics }))
}
Role::Verifier => {
bench_verifier(&provider, &config)
.await
.map_err(|e| RpcError::new(format!("verifier bench failed: {}", e)))?;
Ok(CmdOutput::Bench(BenchOutput::Verifier))
}
}
}
}
}
}

View File

@@ -0,0 +1,115 @@
#![allow(unused)]
use std::net::Ipv4Addr;
use harness_core::{IoMode, network::NetworkConfig};
const MAX_RETRIES: usize = 50;
const RETRY_DELAY_MS: usize = 50;
pub struct IoProvider {
mode: IoMode,
config: NetworkConfig,
}
impl IoProvider {
/// Creates a new provider.
pub(crate) fn new(mode: IoMode, network_config: NetworkConfig) -> Self {
Self {
mode,
config: network_config,
}
}
}
#[cfg(not(target_arch = "wasm32"))]
mod native {
use super::{IoProvider, MAX_RETRIES, RETRY_DELAY_MS};
use crate::io::Io;
use anyhow::Result;
use harness_core::IoMode;
use std::{io::ErrorKind, time::Duration};
use tokio::net::{TcpListener, TcpStream};
use tokio_util::compat::TokioAsyncReadCompatExt;
impl IoProvider {
/// Provides a connection to the server.
pub async fn provide_server_io(&self) -> Result<impl Io> {
TcpStream::connect(self.config.app)
.await
.map(|io| io.compat())
.map_err(anyhow::Error::from)
}
/// Provides a connection to the peer.
pub async fn provide_proto_io(&self) -> Result<impl Io> {
match self.mode {
IoMode::Client => {
// It might take a bit for the peer to start up, so we retry a few times.
let mut retries = 0;
loop {
match TcpStream::connect(self.config.proto_1)
.await
.inspect(|io| io.set_nodelay(true).unwrap())
.map(|io| io.compat())
{
Ok(io) => return Ok(io),
Err(e) if e.kind() == ErrorKind::ConnectionRefused => {
tokio::time::sleep(Duration::from_millis(RETRY_DELAY_MS as u64))
.await;
retries += 1;
if retries > MAX_RETRIES {
return Err(e.into());
}
}
Err(e) => return Err(e.into()),
}
}
}
IoMode::Server => {
let listener = TcpListener::bind(self.config.proto_1).await?;
let (io, _) = listener.accept().await?;
io.set_nodelay(true).unwrap();
Ok(io.compat())
}
}
}
}
}
#[cfg(target_arch = "wasm32")]
mod wasm {
use super::IoProvider;
use crate::io::Io;
use anyhow::Result;
impl IoProvider {
/// Provides a connection to the server.
pub async fn provide_server_io(&self) -> Result<impl Io> {
let url = format!(
"ws://{}:{}/tcp?addr={}%3A{}",
&self.config.app_proxy.0,
self.config.app_proxy.1,
&self.config.app.0,
self.config.app.1,
);
let (_, io) = ws_stream_wasm::WsMeta::connect(url, None).await?;
Ok(io.into_io())
}
/// Provides a connection to the verifier.
pub async fn provide_proto_io(&self) -> Result<impl Io> {
let url = format!(
"ws://{}:{}/tcp?addr={}%3A{}",
&self.config.proto_proxy.0,
self.config.proto_proxy.1,
&self.config.proto_1.0,
self.config.proto_1.1,
);
let (_, io) = ws_stream_wasm::WsMeta::connect(url, None).await?;
Ok(io.into_io())
}
}
}

View File

@@ -0,0 +1,28 @@
use anyhow::Context;
/// Spawns a future.
pub fn spawn<F>(f: F) -> impl Future<Output = anyhow::Result<F::Output>> + Send
where
F: Future + Send + 'static,
F::Output: Send,
{
#[cfg(target_arch = "wasm32")]
{
let (sender, receiver) = futures::channel::oneshot::channel();
wasm_bindgen_futures::spawn_local(async move {
_ = sender.send(f.await);
});
async move { receiver.await.context("future result was dropped") }
}
#[cfg(not(target_arch = "wasm32"))]
{
let (sender, receiver) = tokio::sync::oneshot::channel();
tokio::spawn(async move {
_ = sender.send(f.await);
});
async move { receiver.await.context("future result was dropped") }
}
}

View File

@@ -0,0 +1,39 @@
use std::{future::Future, pin::Pin};
use crate::IoProvider;
pub const DEFAULT_TEST_TIMEOUT: u64 = 300;
pub type Fn = for<'a> fn(&'a IoProvider) -> Pin<Box<dyn Future<Output = ()> + 'a>>;
pub(crate) fn get_test(name: &str) -> Option<&'static Test> {
inventory::iter::<Test>
.into_iter()
.find(|test| test.name == name)
}
pub(crate) fn collect_tests() -> Vec<String> {
inventory::iter::<Test>
.into_iter()
.map(|test| test.name.to_string())
.collect()
}
pub struct Test {
pub name: &'static str,
pub prover: Fn,
pub verifier: Fn,
}
inventory::collect!(Test);
#[macro_export]
macro_rules! test {
($name:literal, $prover:ident, $verifier:ident) => {
inventory::submit!($crate::test::Test {
name: $name,
prover: move |io| Box::pin($prover(io)) as _,
verifier: move |io| Box::pin($verifier(io)) as _,
});
};
}

View File

@@ -0,0 +1,107 @@
use std::{cell::RefCell, panic::PanicHookInfo};
use gloo_utils::format::JsValueSerdeExt;
use js_sys::Function;
use wasm_bindgen::prelude::*;
use harness_core::{
ExecutorConfig,
rpc::{Cmd, CmdOutput, RpcError},
test::{TestOutput, TestStatus},
};
use crate::Executor;
pub use tlsn_wasm::*;
unsafe extern "C" {
fn __wasm_call_ctors();
}
#[wasm_bindgen(start)]
pub fn main() {
unsafe { __wasm_call_ctors() };
}
thread_local! {
static PANIC_CB: RefCell<Option<Function>> = RefCell::new(None);
}
#[wasm_bindgen]
pub struct WasmExecutor(Executor);
#[wasm_bindgen]
impl WasmExecutor {
#[wasm_bindgen(constructor)]
pub fn new(config: JsValue) -> Self {
let config: ExecutorConfig = config.into_serde().unwrap();
Self(Executor::new(config))
}
pub async fn call(
&mut self,
cmd: JsValue,
panic_callback: &Function,
) -> Result<JsValue, JsError> {
let cmd: Cmd = cmd.into_serde()?;
PANIC_CB.with(|callback| {
*callback.borrow_mut() = Some(panic_callback.clone());
});
let panic_msg = {
let cmd = cmd.clone();
move |info: &PanicHookInfo<'_>| {
let payload = if let Some(s) = info.payload().downcast_ref::<&str>() {
Some(s.to_string())
} else if let Some(s) = info.payload().downcast_ref::<String>() {
Some(s.clone())
} else {
None
};
let reason = match (info.location(), payload) {
(Some(location), Some(payload)) => Some(format!(
"\nwasm executor panicked at {}:{}:{}:\n{}",
location.file(),
location.line(),
location.column(),
payload
)),
(Some(location), None) => Some(format!(
"\nwasm executor panicked at {}:{}:{}",
location.file(),
location.line(),
location.column()
)),
(None, Some(payload)) => Some(payload),
_ => None,
};
let output: Result<CmdOutput, RpcError> = match cmd {
Cmd::Test(_) => Ok(CmdOutput::Test(TestOutput {
status: TestStatus::Failed { reason },
})),
_ => Ok(CmdOutput::Fail { reason }),
};
output
}
};
std::panic::set_hook(Box::new(move |info| {
PANIC_CB.with(|callback| {
if let Some(callback) = callback.borrow().as_ref() {
let _ = callback.call1(
&JsValue::NULL,
&JsValue::from_serde(&panic_msg(info)).unwrap(),
);
}
});
}));
let output = self.0.process(cmd).await;
Ok(JsValue::from_serde(&output)?)
}
}

View File

@@ -1,75 +1,33 @@
use tls_core::{anchors::RootCertStore, verify::WebPkiVerifier};
use tlsn_common::config::{ProtocolConfig, ProtocolConfigValidator};
use tlsn_core::{
hash::HashAlgId,
transcript::{Idx, TranscriptCommitConfig, TranscriptCommitment, TranscriptCommitmentKind},
CryptoProvider, ProveConfig, VerifierOutput, VerifyConfig,
hash::HashAlgId,
transcript::{TranscriptCommitConfig, TranscriptCommitment, TranscriptCommitmentKind},
};
use tlsn_prover::{Prover, ProverConfig};
use tlsn_server_fixture::bind;
use tlsn_server_fixture_certs::{CA_CERT_DER, SERVER_DOMAIN};
use tlsn_verifier::{Verifier, VerifierConfig};
use http_body_util::{BodyExt as _, Empty};
use hyper::{body::Bytes, Request, StatusCode};
use hyper_util::rt::TokioIo;
use hyper::{Request, StatusCode, body::Bytes};
use tokio::io::{AsyncRead, AsyncWrite};
use tokio_util::compat::{FuturesAsyncReadCompatExt, TokioAsyncReadCompatExt};
use tracing::instrument;
use crate::{IoProvider, io::FuturesIo, spawn};
// Maximum number of bytes that can be sent from prover to server
const MAX_SENT_DATA: usize = 1 << 11;
// Maximum number of bytes that can be received by prover from server
const MAX_RECV_DATA: usize = 1 << 11;
#[tokio::test]
#[ignore]
async fn verify() {
tracing_subscriber::fmt::init();
let (socket_0, socket_1) = tokio::io::duplex(1 << 23);
let (
_,
VerifierOutput {
server_name,
transcript,
transcript_commitments,
},
) = tokio::join!(prover(socket_0), verifier(socket_1));
let server_name = server_name.unwrap();
let transcript = transcript.unwrap();
assert_eq!(
transcript.sent_authed(),
&Idx::new(0..transcript.len_sent() - 1)
);
assert_eq!(
transcript.received_authed(),
&Idx::new(2..transcript.len_received())
);
assert_eq!(server_name.as_str(), SERVER_DOMAIN);
assert!(transcript_commitments
.iter()
.any(|commitment| matches!(commitment, TranscriptCommitment::Hash { .. })));
println!("{:?}", transcript_commitments);
}
#[instrument(skip(notary_socket))]
async fn prover<T: AsyncWrite + AsyncRead + Send + Unpin + 'static>(notary_socket: T) {
let (client_socket, server_socket) = tokio::io::duplex(1 << 16);
let server_task = tokio::spawn(bind(server_socket.compat()));
crate::test!("basic", prover, verifier);
async fn prover(provider: &IoProvider) {
let mut root_store = RootCertStore::empty();
root_store
.add(&tls_core::key::Certificate(CA_CERT_DER.to_vec()))
.unwrap();
let provider = CryptoProvider {
let crypto_provider = CryptoProvider {
cert: WebPkiVerifier::new(root_store, None),
..Default::default()
};
@@ -85,24 +43,27 @@ async fn prover<T: AsyncWrite + AsyncRead + Send + Unpin + 'static>(notary_socke
.build()
.unwrap(),
)
.crypto_provider(provider)
.crypto_provider(crypto_provider)
.build()
.unwrap(),
)
.setup(notary_socket.compat())
.setup(provider.provide_proto_io().await.unwrap())
.await
.unwrap();
let (tls_connection, prover_fut) = prover.connect(client_socket.compat()).await.unwrap();
let (tls_connection, prover_fut) = prover
.connect(provider.provide_server_io().await.unwrap())
.await
.unwrap();
let prover_task = tokio::spawn(prover_fut);
let prover_task = spawn(prover_fut);
let (mut request_sender, connection) =
hyper::client::conn::http1::handshake(TokioIo::new(tls_connection.compat()))
hyper::client::conn::http1::handshake(FuturesIo::new(tls_connection))
.await
.unwrap();
tokio::spawn(connection);
_ = spawn(connection);
let request = Request::builder()
.uri(format!(
@@ -122,8 +83,6 @@ async fn prover<T: AsyncWrite + AsyncRead + Send + Unpin + 'static>(notary_socke
let _ = response.into_body().collect().await.unwrap().to_bytes();
let _ = server_task.await.unwrap();
let mut prover = prover_task.await.unwrap().unwrap();
let (sent_len, recv_len) = prover.transcript().len();
@@ -155,16 +114,13 @@ async fn prover<T: AsyncWrite + AsyncRead + Send + Unpin + 'static>(notary_socke
prover.close().await.unwrap();
}
#[instrument(skip(socket))]
async fn verifier<T: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>(
socket: T,
) -> VerifierOutput {
async fn verifier(provider: &IoProvider) {
let mut root_store = RootCertStore::empty();
root_store
.add(&tls_core::key::Certificate(CA_CERT_DER.to_vec()))
.unwrap();
let provider = CryptoProvider {
let crypto_provider = CryptoProvider {
cert: WebPkiVerifier::new(root_store, None),
..Default::default()
};
@@ -177,14 +133,28 @@ async fn verifier<T: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>(
.build()
.unwrap(),
)
.crypto_provider(provider)
.crypto_provider(crypto_provider)
.build()
.unwrap();
let verifier = Verifier::new(config);
verifier
.verify(socket.compat(), &VerifyConfig::default())
let VerifierOutput {
server_name,
transcript_commitments,
..
} = verifier
.verify(
provider.provide_proto_io().await.unwrap(),
&VerifyConfig::default(),
)
.await
.unwrap()
.unwrap();
assert_eq!(server_name.unwrap().as_str(), SERVER_DOMAIN);
assert!(
transcript_commitments
.iter()
.any(|commitment| matches!(commitment, TranscriptCommitment::Hash { .. }))
);
}

View File

@@ -0,0 +1,39 @@
[package]
name = "tlsn-harness-runner"
version = "0.1.0"
edition = "2024"
publish = false
[lib]
name = "harness_runner"
[dependencies]
tlsn-harness-core = { workspace = true }
tlsn-server-fixture = { workspace = true }
anyhow = { workspace = true }
axum = { workspace = true }
chromiumoxide = { workspace = true }
clap = { workspace = true, features = ["derive", "env"] }
csv = { version = "1.3" }
duct = { version = "1" }
futures = { workspace = true }
ipnet = { workspace = true }
serio = { workspace = true }
serde_json = { workspace = true }
tokio = { workspace = true, features = ["full"] }
tokio-util = { workspace = true, features = ["compat", "codec"] }
toml = { workspace = true }
tower = { workspace = true }
tower-http = { workspace = true, features = ["set-header", "fs"] }
tracing = { workspace = true }
tracing-subscriber = { workspace = true, features = ["env-filter"] }
websocket-relay = { workspace = true }
[[bin]]
name = "tlsn-harness-runner"
path = "src/bin/runner.rs"
[[bin]]
name = "tlsn-harness-wasm-server"
path = "src/bin/wasm.rs"

View File

@@ -0,0 +1,4 @@
#[tokio::main(flavor = "multi_thread")]
async fn main() -> anyhow::Result<()> {
harness_runner::main().await
}

View File

@@ -0,0 +1,6 @@
use anyhow::Result;
#[tokio::main]
async fn main() -> Result<()> {
harness_runner::wasm_server::main().await
}

View File

@@ -0,0 +1,76 @@
use std::path::PathBuf;
use clap::{Parser, Subcommand, ValueEnum};
use ipnet::Ipv4Net;
use crate::Target;
#[derive(Parser)]
#[command(version, about, name = "tlsn-harness-runner", long_about = None)]
pub struct Cli {
#[command(subcommand)]
pub command: Command,
/// Target platform.
#[arg(long, default_value = "native")]
pub target: Target,
/// Subnet to assign harness network interfaces.
#[arg(long, default_value = "10.250.0.0/24", env = "SUBNET")]
pub subnet: Ipv4Net,
}
#[derive(Subcommand)]
pub enum Command {
/// runs tests.
Test {
/// Name prefix filter.
#[arg(long)]
name: Option<String>,
/// List tests.
#[arg(long, exclusive = true)]
list: bool,
},
/// runs benchmarks.
Bench {
/// Configuration path.
#[arg(short, long, default_value = "bench.toml")]
config: PathBuf,
/// Output file path.
#[arg(short, long, default_value = "metrics.csv")]
output: PathBuf,
/// Number of samples to measure per benchmark. This is overridden by
/// the number of samples specified in the configuration
/// file unless `samples_override` is set.
#[arg(short, long, default_value = "10")]
samples: usize,
/// Override the number of samples specified in the configuration file.
#[arg(long)]
samples_override: bool,
/// Skip warmup.
#[arg(long)]
skip_warmup: bool,
},
/// serves runner utilities such as the application server fixture, WASM
/// server and WS proxy.
Serve {},
/// sets up the harness network.
Setup {},
/// cleans up the harness network.
Clean {},
/// prints the harness network configuration.
Info {},
/// sets the connection configuration.
SetNetwork {
/// The route to set.
route: Route,
/// The bandwidth to set.
bandwidth: usize,
/// The latency to set.
latency: usize,
},
}
#[derive(Debug, Clone, Copy, ValueEnum)]
pub enum Route {
Protocol,
App,
}

View File

@@ -0,0 +1,2 @@
/// Latency between the server and the prover.
pub const SERVER_LATENCY: usize = 50;

View File

@@ -0,0 +1,268 @@
use std::time::Duration;
use anyhow::{Context, Result, anyhow};
use chromiumoxide::{
Browser,
cdp::browser_protocol::{
network::{EnableParams, SetCacheDisabledParams},
page::ReloadParams,
},
};
use futures::StreamExt;
use harness_core::{
ExecutorConfig, Id,
bench::BenchOutput,
network::PORT_BROWSER,
rpc::{BenchCmd, TestCmd},
test::{TestOutput, TestStatus},
};
use crate::{Target, network::Namespace, rpc::Rpc};
pub struct Executor {
ns: Namespace,
config: ExecutorConfig,
target: Target,
state: State,
}
#[allow(clippy::large_enum_variant)]
enum State {
Init,
Started {
process: duct::Handle,
rpc: Rpc,
browser: Option<Browser>,
},
Stopped,
}
impl State {
fn take(&mut self) -> Self {
std::mem::replace(self, State::Stopped)
}
}
impl Executor {
pub fn new(ns: Namespace, config: ExecutorConfig, target: Target) -> Self {
Self {
ns,
config,
target,
state: State::Init,
}
}
pub async fn start(&mut self) -> Result<()> {
match self.target {
Target::Native => {
let current_path = std::env::current_exe().unwrap();
let executor_path = current_path.parent().unwrap().join("executor-native");
let rpc_addr = match self.config.id() {
Id::Zero => self.config.network().rpc_0,
Id::One => self.config.network().rpc_1,
};
let process = duct::cmd!(
"sudo",
"ip",
"netns",
"exec",
self.ns.name(),
"env",
format!("CONFIG={}", serde_json::to_string(&self.config)?),
executor_path
)
.stdout_capture()
.stderr_capture()
.unchecked()
.start()?;
let rpc = Rpc::new_native(rpc_addr).await?;
self.state = State::Started {
process,
rpc,
browser: None,
};
}
Target::Browser => {
let chrome_path = chromiumoxide::detection::default_executable(Default::default())
.map_err(|_| anyhow!("failed to detect chrome path"))?;
let rpc_addr = match self.config.id() {
Id::Zero => self.config.network().rpc_0,
Id::One => self.config.network().rpc_1,
};
let (wasm_addr, wasm_port) = self.config.network().wasm;
// Create a temporary directory for the browser profile.
let tmp = duct::cmd!("mktemp", "-d").read()?;
let tmp = tmp.trim();
let process = duct::cmd!(
"sudo",
"ip",
"netns",
"exec",
self.ns.name(),
chrome_path,
format!("--remote-debugging-port={PORT_BROWSER}"),
"--headless",
"--disable-gpu",
"--disable-cache",
"--disable-application-cache",
"--no-sandbox",
format!("--user-data-dir={tmp}"),
format!("--allowed-ips=10.250.0.1"),
)
//.stderr_capture()
//.stdout_capture()
.start()?;
// Give the browser time to start.
tokio::time::sleep(Duration::from_millis(250)).await;
let (browser, mut handler) =
Browser::connect(format!("http://{}:{}", rpc_addr.0, PORT_BROWSER)).await?;
tokio::spawn(async move {
while let Some(res) = handler.next().await {
if let Err(e) = res {
eprintln!("chromium error: {:?}", e);
}
}
});
let page = browser
.new_page(&format!("http://{}:{}/index.html", wasm_addr, wasm_port))
.await?;
page.execute(EnableParams::builder().build()).await?;
page.execute(SetCacheDisabledParams {
cache_disabled: true,
})
.await?;
page.execute(ReloadParams::builder().ignore_cache(true).build())
.await?;
page.wait_for_navigation().await?;
page.bring_to_front().await?;
page.evaluate(format!(
r#"
(async () => {{
const config = JSON.parse('{config}');
console.log("initializing executor", config);
await window.executor.init(config);
console.log("executor initialized");
return;
}})();
"#,
config = serde_json::to_string(&self.config)?
))
.await?;
let rpc = Rpc::new_browser(page);
self.state = State::Started {
process,
rpc,
browser: Some(browser),
};
}
}
Ok(())
}
pub async fn get_tests(&mut self) -> Result<Vec<String>> {
let State::Started { rpc, .. } = &mut self.state else {
return Err(anyhow!("executor not started"));
};
rpc.get_tests().await?.map_err(From::from)
}
pub async fn test(&mut self, test: TestCmd) -> Result<TestOutput> {
let State::Started { process, rpc, .. } = &mut self.state else {
return Err(anyhow!("executor not started"));
};
let output: Result<TestOutput> = match rpc.test(test).await {
Ok(res) => res.map_err(From::from),
// Test could cause the native executor process to panic.
Err(e) if self.target == Target::Native => {
// Wait a moment to give the process time to exit.
tokio::time::sleep(Duration::from_millis(100)).await;
if let Some(output) = process.try_wait()? {
let res = if output.status.success() {
Err(e).context("executor process closed with success exit code even though RPC call returned an error")
} else {
Ok(TestOutput {
status: TestStatus::Failed {
reason: Some(String::from_utf8_lossy(&output.stderr).to_string()),
},
})
};
// Restart the executor.
self.start().await?;
return res;
}
return Err(e.into());
}
Err(e) => {
return Err(e.into());
}
};
output
}
pub async fn bench(&mut self, bench: BenchCmd) -> Result<BenchOutput> {
let State::Started { rpc, .. } = &mut self.state else {
return Err(anyhow!("executor not started"));
};
rpc.bench(bench).await?.map_err(From::from)
}
pub fn shutdown(&mut self) -> impl Future<Output = Result<()>> {
let state = self.state.take();
async move {
let State::Started {
process, browser, ..
} = state
else {
return Ok(());
};
if let Some(mut browser) = browser {
browser.close().await?;
};
tokio::task::spawn_blocking(move || {
_ = process.kill();
_ = process.wait();
})
.await?;
Ok(())
}
}
}
impl Drop for Executor {
fn drop(&mut self) {
let State::Started { process, .. } = &mut self.state else {
return;
};
_ = process.kill();
}
}

View File

@@ -0,0 +1,291 @@
pub mod cli;
mod executor;
mod network;
pub(crate) mod rpc;
mod server_fixture;
pub mod wasm_server;
mod ws_proxy;
use std::time::Duration;
use anyhow::Result;
use clap::Parser;
use csv::WriterBuilder;
use harness_core::{
ExecutorConfig, Id, IoMode, Role, TEST_APP_BANDWIDTH, TEST_APP_DELAY, TEST_PROTO_BANDWIDTH,
TEST_PROTO_DELAY,
bench::{BenchItems, BenchOutput, Measurement, WARM_UP_BENCH},
network::NetworkConfig,
rpc::{BenchCmd, TestCmd},
test::TestStatus,
};
use cli::{Cli, Command};
use executor::Executor;
use server_fixture::ServerFixture;
use crate::{cli::Route, network::Network, wasm_server::WasmServer, ws_proxy::WsProxy};
#[derive(Debug, Clone, Copy, PartialEq, Eq, clap::ValueEnum)]
pub enum Target {
Native,
Browser,
}
impl Default for Target {
fn default() -> Self {
Self::Native
}
}
struct Runner {
network: Network,
server_fixture: ServerFixture,
wasm_server: WasmServer,
proto_proxy: WsProxy,
app_proxy: WsProxy,
exec_p: Executor,
exec_v: Executor,
started: bool,
}
impl Runner {
fn new(cli: &Cli) -> Result<Self> {
let Cli { target, subnet, .. } = cli;
let current_path = std::env::current_exe().unwrap();
let fixture_path = current_path.parent().unwrap().join("server-fixture");
let network_config = NetworkConfig::new(*subnet);
let network = Network::new(network_config.clone())?;
let server_fixture =
ServerFixture::new(fixture_path, network.ns_app().clone(), network_config.app);
let wasm_server = WasmServer::new(
network.ns_0().clone(),
current_path.parent().unwrap().join("wasm-server"),
network_config.wasm,
);
let proto_proxy = WsProxy::new(network_config.proto_proxy);
let app_proxy = WsProxy::new(network_config.app_proxy);
let exec_p = Executor::new(
network.ns_0().clone(),
ExecutorConfig::builder()
.id(Id::Zero)
.io_mode(IoMode::Client)
.network_config(network_config.clone())
.build(),
*target,
);
let exec_v = Executor::new(
network.ns_1().clone(),
ExecutorConfig::builder()
.id(Id::One)
.io_mode(IoMode::Server)
.network_config(network_config.clone())
.build(),
Target::Native,
);
Ok(Self {
network,
server_fixture,
wasm_server,
proto_proxy,
app_proxy,
exec_p,
exec_v,
started: false,
})
}
async fn start_services(&mut self) -> Result<()> {
if self.started {
return Ok(());
}
self.server_fixture.start()?;
self.wasm_server.start()?;
self.proto_proxy.start().await?;
self.app_proxy.start().await?;
self.started = true;
Ok(())
}
}
pub async fn main() -> Result<()> {
let cli = Cli::parse();
let mut runner = Runner::new(&cli)?;
let mut exit_code = 0;
match cli.command {
Command::Test { list, .. } if list => {
runner.start_services().await?;
runner.exec_p.start().await?;
let tests = runner.exec_p.get_tests().await?;
for test in tests {
println!("{test}");
}
}
Command::Test { name, .. } => {
runner.start_services().await?;
runner.exec_p.start().await?;
runner.exec_v.start().await?;
let tests = runner.exec_p.get_tests().await?;
// Filter tests by name if provided
let tests = if let Some(name) = name {
tests
.into_iter()
.filter(|t| t == &name || t.starts_with(&name))
.collect()
} else {
tests
};
runner
.network
.set_proto_config(TEST_PROTO_BANDWIDTH, TEST_PROTO_DELAY)?;
runner
.network
.set_app_config(TEST_APP_BANDWIDTH, TEST_APP_DELAY)?;
let mut success = 0;
let mut failed = 0;
let mut failed_tests = Vec::new();
for name in tests {
let (output_p, output_v) = tokio::try_join!(
runner.exec_p.test(TestCmd {
name: name.clone(),
role: Role::Prover,
}),
runner.exec_v.test(TestCmd {
name: name.clone(),
role: Role::Verifier,
})
)?;
if output_p.status.is_passed() && output_v.status.is_passed() {
success += 1;
println!("{name}: passed");
} else {
failed += 1;
failed_tests.push(name.clone());
eprintln!("{name}: failed");
if let TestStatus::Failed { reason } = output_p.status {
eprintln!("{name} prover failed.");
if let Some(reason) = reason {
eprintln!("reason: {reason}");
}
}
if let TestStatus::Failed { reason } = output_v.status {
eprintln!("{name} verifier failed.");
if let Some(reason) = reason {
eprintln!("reason: {reason}");
}
}
}
}
println!("summary: {success} passed, {failed} failed");
if failed > 0 {
exit_code = 1;
println!("failed: {}", failed_tests.join(", "));
}
}
Command::Bench {
config,
output,
samples,
samples_override,
skip_warmup,
} => {
let items: BenchItems = toml::from_str(&std::fs::read_to_string(config)?)?;
let output_file = std::fs::File::create(output)?;
let mut writer = WriterBuilder::new().from_writer(output_file);
let mut benches = Vec::new();
if !skip_warmup {
benches.extend(vec![WARM_UP_BENCH; 3]);
}
benches.extend(items.to_benches(samples, samples_override));
runner.start_services().await?;
runner.exec_p.start().await?;
runner.exec_v.start().await?;
for config in benches {
runner
.network
.set_proto_config(config.bandwidth, config.protocol_latency.div_ceil(2))?;
runner
.network
.set_app_config(config.bandwidth, config.app_latency.div_ceil(2))?;
// Wait for the network to stabilize
tokio::time::sleep(Duration::from_millis(100)).await;
let (output, _) = tokio::try_join!(
runner.exec_p.bench(BenchCmd {
config: config.clone(),
role: Role::Prover,
}),
runner.exec_v.bench(BenchCmd {
config: config.clone(),
role: Role::Verifier,
})
)?;
let BenchOutput::Prover { metrics } = output else {
panic!("expected prover output");
};
let measurement = Measurement::new(config, metrics);
writer.serialize(measurement)?;
writer.flush()?;
}
}
Command::Serve {} => {
runner.start_services().await?;
tokio::signal::ctrl_c().await?;
}
Command::Setup {} => {
runner.network.create()?;
println!("network created");
runner.network.print_network();
}
Command::Clean {} => {
runner.network.delete()?;
println!("network deleted");
}
Command::Info {} => {
runner.network.print_network();
}
Command::SetNetwork {
route,
bandwidth,
latency: delay,
} => match route {
Route::Protocol => runner
.network
.set_proto_config(bandwidth, delay.div_ceil(2))?,
Route::App => runner
.network
.set_app_config(bandwidth, delay.div_ceil(2))?,
},
}
runner.exec_p.shutdown().await?;
runner.exec_v.shutdown().await?;
if exit_code != 0 {
std::process::exit(exit_code);
}
Ok(())
}

View File

@@ -0,0 +1,707 @@
use std::net::Ipv4Addr;
use anyhow::Result;
use harness_core::network::*;
pub struct Network {
config: NetworkConfig,
ns_0: Namespace,
ns_1: Namespace,
ns_app: Namespace,
bridge: Bridge,
veth_rpc_0: VethPair,
veth_rpc_1: VethPair,
veth_proto_0: VethPair,
veth_proto_1: VethPair,
veth_proto_proxy: VethPair,
veth_app_proxy: VethPair,
veth_app: VethPair,
veth_app_0: VethPair,
veth_app_1: VethPair,
}
impl Network {
pub fn new(config: NetworkConfig) -> Result<Self> {
let ns_0 = Namespace::new(NS_0);
let ns_1 = Namespace::new(NS_1);
let ns_app = Namespace::new(NS_APP);
let mut bridge = Bridge::new(BRIDGE);
let mut veth_rpc_0 = VethPair::new(VETH_RPC_0);
let mut veth_rpc_1 = VethPair::new(VETH_RPC_1);
let mut veth_proto_0 = VethPair::new(VETH_PROTO_0);
let mut veth_proto_1 = VethPair::new(VETH_PROTO_1);
let mut veth_proto_proxy = VethPair::new(VETH_PROTO_PROXY);
let mut veth_app_proxy = VethPair::new(VETH_APP_PROXY);
let mut veth_app = VethPair::new(VETH_APP);
let mut veth_app_0 = VethPair::new(VETH_APP_0);
let mut veth_app_1 = VethPair::new(VETH_APP_1);
veth_rpc_0.0.set_namespace(&ns_0);
veth_proto_0.0.set_namespace(&ns_0);
veth_rpc_1.0.set_namespace(&ns_1);
veth_proto_1.0.set_namespace(&ns_1);
veth_app.0.set_namespace(&ns_app);
veth_app_0.0.set_namespace(&ns_0);
veth_app_1.0.set_namespace(&ns_1);
// Assign addresses.
let prefix_len = config.subnet.prefix_len();
bridge.set_addr(config.host, prefix_len);
veth_rpc_0.0.set_addr(config.rpc_0.0, prefix_len);
veth_rpc_1.0.set_addr(config.rpc_1.0, prefix_len);
veth_proto_0.0.set_addr(config.proto_0.0, prefix_len);
veth_proto_1.0.set_addr(config.proto_1.0, prefix_len);
veth_proto_proxy
.0
.set_addr(config.proto_proxy.0, prefix_len);
veth_app_proxy.0.set_addr(config.app_proxy.0, prefix_len);
veth_app.0.set_addr(config.app.0, prefix_len);
Ok(Self {
config,
ns_0,
ns_1,
ns_app,
bridge,
veth_rpc_0,
veth_rpc_1,
veth_proto_0,
veth_proto_1,
veth_proto_proxy,
veth_app_proxy,
veth_app,
veth_app_0,
veth_app_1,
})
}
/// Creates the network.
pub fn create(&mut self) -> Result<()> {
self.ns_0.create()?;
self.ns_1.create()?;
self.ns_app.create()?;
self.bridge.create()?;
self.veth_rpc_0.create()?;
self.veth_rpc_1.create()?;
self.veth_proto_0.create()?;
self.veth_proto_1.create()?;
self.veth_proto_proxy.create()?;
self.veth_app_proxy.create()?;
self.veth_app.create()?;
self.veth_app_0.create()?;
self.veth_app_1.create()?;
// Enslave ends of the veth pairs to the bridge.
self.bridge.add_interface(&self.veth_rpc_0.1)?;
self.bridge.add_interface(&self.veth_rpc_1.1)?;
self.bridge.add_interface(&self.veth_proto_0.1)?;
self.bridge.add_interface(&self.veth_proto_1.1)?;
self.bridge.add_interface(&self.veth_proto_proxy.1)?;
self.bridge.add_interface(&self.veth_app_proxy.1)?;
self.bridge.add_interface(&self.veth_app.1)?;
self.bridge.add_interface(&self.veth_app_0.1)?;
self.bridge.add_interface(&self.veth_app_1.1)?;
// Bring up interfaces.
self.bridge.up()?;
self.veth_rpc_0.0.up()?;
self.veth_rpc_0.1.up()?;
self.veth_proto_0.0.up()?;
self.veth_proto_0.1.up()?;
self.veth_rpc_1.0.up()?;
self.veth_rpc_1.1.up()?;
self.veth_proto_1.0.up()?;
self.veth_proto_1.1.up()?;
self.veth_proto_proxy.0.up()?;
self.veth_proto_proxy.1.up()?;
self.veth_app_proxy.0.up()?;
self.veth_app_proxy.1.up()?;
self.veth_app.0.up()?;
self.veth_app.1.up()?;
self.veth_app_0.0.up()?;
self.veth_app_0.1.up()?;
self.veth_app_1.0.up()?;
self.veth_app_1.1.up()?;
duct::cmd!(
"sudo",
"ip",
"netns",
"exec",
&self.ns_0.name,
"ip",
"link",
"set",
"lo",
"up"
)
.run()?;
ip_route(&self.ns_0, "default", &self.veth_rpc_0.0.name)?;
ip_route(&self.ns_1, "default", &self.veth_rpc_1.0.name)?;
ip_route(&self.ns_app, "default", &self.veth_app.0.name)?;
ip_route(&self.ns_0, self.config.proto_1.0, &self.veth_proto_0.0.name)?;
ip_route(&self.ns_1, self.config.proto_0.0, &self.veth_proto_1.0.name)?;
ip_route(&self.ns_0, self.config.app.0, &self.veth_app_0.0.name)?;
ip_route(&self.ns_1, self.config.app.0, &self.veth_app_1.0.name)?;
ip_route(
&self.ns_0,
self.config.proto_proxy.0,
&self.veth_proto_0.0.name,
)?;
ip_route(
&self.ns_1,
self.config.proto_proxy.0,
&self.veth_proto_1.0.name,
)?;
ip_route(&self.ns_0, self.config.app_proxy.0, &self.veth_app_0.0.name)?;
ip_route(&self.ns_1, self.config.app_proxy.0, &self.veth_app_1.0.name)?;
ip_forward(
&self.ns_0,
(self.config.rpc_0.0, PORT_BROWSER),
("127.0.0.1", PORT_BROWSER),
)?;
Ok(())
}
/// Returns namespace 0.
pub fn ns_0(&self) -> &Namespace {
&self.ns_0
}
/// Returns namespace 1.
pub fn ns_1(&self) -> &Namespace {
&self.ns_1
}
/// Returns namespace app.
pub fn ns_app(&self) -> &Namespace {
&self.ns_app
}
pub fn print_network(&self) {
println!("host: {}", self.config.host);
println!(
"protocol proxy: {}:{}",
self.config.proto_proxy.0, self.config.proto_proxy.1
);
println!(
"app proxy: {}:{}",
self.config.app_proxy.0, self.config.app_proxy.1
);
println!(
"executor 0 rpc: {}:{}",
self.config.rpc_0.0, self.config.rpc_0.1
);
println!(
"executor 1 rpc: {}:{}",
self.config.rpc_1.0, self.config.rpc_1.1
);
println!(
"protocol 0: {}:{}",
self.config.proto_0.0, self.config.proto_0.1
);
println!(
"protocol 1: {}:{}",
self.config.proto_1.0, self.config.proto_1.1
);
println!("app: {}:{}", self.config.app.0, self.config.app.1);
}
/// Sets the configuration of the protocol interfaces.
pub fn set_proto_config(&self, bandwidth: usize, delay: usize) -> Result<()> {
self.veth_proto_0.0.set_egress(bandwidth, delay)?;
self.veth_proto_1.0.set_egress(bandwidth, delay)?;
Ok(())
}
/// Sets the configuration of the app interfaces.
pub fn set_app_config(&self, bandwidth: usize, delay: usize) -> Result<()> {
self.veth_app.0.set_egress(bandwidth, delay)?;
self.veth_app_0.0.set_egress(bandwidth, delay)?;
self.veth_app_1.0.set_egress(bandwidth, delay)?;
Ok(())
}
/// Deletes the network.
pub fn delete(&self) -> Result<()> {
self.ns_0.delete()?;
self.ns_1.delete()?;
self.ns_app.delete()?;
self.bridge.delete()?;
self.veth_proto_proxy.delete()?;
self.veth_app_proxy.delete()?;
self.veth_app_0.delete()?;
self.veth_app_1.delete()?;
self.veth_app.delete()?;
Ok(())
}
}
// Runs a command in the namespace if it exists, otherwise runs it in the
// current namespace.
macro_rules! ns_cmd {
("sudo", $cmd:expr, $($args:expr),* => $namespace:expr) => {
if let Some(namespace) = $namespace.as_ref() {
duct::cmd!(
"sudo",
"ip",
"netns",
"exec",
namespace.name(),
$cmd, $($args),*)
} else {
duct::cmd!("sudo", $cmd, $($args),*)
}
};
}
#[derive(Debug, Clone)]
pub struct Namespace {
name: String,
}
impl Namespace {
/// Creates a new namespace.
pub fn new(name: &str) -> Self {
Self {
name: name.to_string(),
}
}
/// Returns the name of the namespace.
pub fn name(&self) -> &str {
&self.name
}
/// Creates the namespace.
fn create(&self) -> Result<()> {
let output = duct::cmd!("sudo", "ip", "netns", "add", &self.name)
.stderr_capture()
.unchecked()
.run()?;
if !output.status.success() {
let stderr = String::from_utf8_lossy(&output.stderr);
// If the namespace already exists, delete it.
if stderr.contains("File exists") {
duct::cmd!("sudo", "ip", "netns", "delete", &self.name).run()?;
// Recreate the namespace.
return self.create();
}
}
Ok(())
}
/// Deletes the namespace.
fn delete(&self) -> Result<()> {
let output = duct::cmd!("sudo", "ip", "netns", "delete", &self.name)
.stderr_capture()
.unchecked()
.run()?;
if !output.status.success() {
let stderr = String::from_utf8_lossy(&output.stderr);
// Ignore error if the namespace doesn't exist.
if !stderr.contains("No such file or directory") {
return Err(anyhow::anyhow!(
"Failed to delete namespace {}: {}",
self.name,
stderr
));
}
}
Ok(())
}
}
struct VethPair(Veth, Veth);
impl VethPair {
fn new(name: &str) -> Self {
Self(
Veth::new(&format!("{}-0", name)),
Veth::new(&format!("{}-1", name)),
)
}
fn create(&self) -> Result<()> {
let output = duct::cmd!(
"sudo",
"ip",
"link",
"add",
&self.0.name,
"type",
"veth",
"peer",
"name",
&self.1.name
)
.stderr_capture()
.unchecked()
.run()?;
// Delete it if it already exists.
if !output.status.success() {
let stderr = String::from_utf8_lossy(&output.stderr);
if stderr.contains("File exists") {
self.delete()?;
return self.create();
} else {
return Err(anyhow::anyhow!(
"Failed to create veth pair {}: {}",
self.0.name,
stderr
));
}
}
self.0.create()?;
self.1.create()?;
Ok(())
}
fn delete(&self) -> Result<()> {
self.0.delete()?;
self.1.delete()?;
Ok(())
}
}
struct Veth {
name: String,
ns: Option<Namespace>,
addr: Option<(Ipv4Addr, u8)>,
}
impl Veth {
fn new(name: &str) -> Self {
Self {
name: name.to_string(),
ns: None,
addr: None,
}
}
fn create(&self) -> Result<()> {
// Set namespace.
if let Some(ns) = &self.ns {
duct::cmd!("sudo", "ip", "link", "set", &self.name, "netns", ns.name()).run()?;
}
// Set address.
if let Some((addr, prefix_length)) = &self.addr {
ns_cmd!(
"sudo",
"ip",
"addr",
"add",
format!("{}/{}", addr, prefix_length),
"dev",
&self.name
=> self.ns
)
.run()?;
}
Ok(())
}
/// Sets the namespace of the veth interface.
fn set_namespace(&mut self, ns: &Namespace) {
self.ns = Some(ns.clone());
}
/// Sets the address of the veth interface.
fn set_addr(&mut self, addr: Ipv4Addr, prefix_length: u8) {
self.addr = Some((addr, prefix_length));
}
/// Brings the veth interface up.
fn up(&self) -> Result<()> {
ns_cmd!(
"sudo",
"ip",
"link",
"set",
&self.name,
"up"
=> self.ns
)
.run()?;
Ok(())
}
/// Sets the egress bandwidth and delay of the veth interface.
///
/// # Arguments
///
/// * `bandwidth` - Egress bandwidth in Mbps.
/// * `delay` - Egress delay in ms.
fn set_egress(&self, bandwidth: usize, delay: usize) -> Result<()> {
// Remove existing rules.
ns_cmd!(
"sudo",
"tc",
"qdisc",
"del",
"dev",
&self.name, "root"
=> self.ns
)
.stderr_capture()
.unchecked()
.run()?;
if bandwidth > 0 {
// Set burst to bandwidth delay product in kbit.
let burst = bandwidth * (2 * delay.min(10));
ns_cmd!(
"sudo",
"tc",
"qdisc",
"add",
"dev",
&self.name,
"root",
"handle",
"1:",
"tbf",
"rate",
format!("{bandwidth}mbit"),
"burst",
format!("{burst}kbit"),
"latency",
format!("60s")
=> self.ns
)
.run()?;
}
if delay > 0 {
if bandwidth > 0 {
ns_cmd!(
"sudo",
"tc",
"qdisc",
"add",
"dev",
&self.name,
"parent",
"1:1",
"handle",
"10:",
"netem",
"delay",
format!("{delay}ms")
=> self.ns
)
.run()?;
} else {
ns_cmd!(
"sudo",
"tc",
"qdisc",
"add",
"dev",
&self.name,
"root",
"handle",
"1:",
"netem",
"delay",
format!("{delay}ms")
=> self.ns
)
.run()?;
}
}
Ok(())
}
fn delete(&self) -> Result<()> {
ns_cmd!(
"sudo",
"ip",
"link",
"delete",
&self.name
=> self.ns
)
.stderr_capture()
.unchecked()
.run()?;
Ok(())
}
}
struct Bridge {
name: String,
addr: Option<(Ipv4Addr, u8)>,
}
impl Bridge {
/// Creates a new bridge in a namespace.
pub fn new(name: &str) -> Self {
Self {
name: name.to_string(),
addr: None,
}
}
/// Creates the bridge in the host namespace.
pub fn create(&self) -> Result<()> {
let output = duct::cmd!("sudo", "ip", "link", "add", &self.name, "type", "bridge")
.stderr_capture()
.unchecked()
.run()?;
if !output.status.success() {
let stderr = String::from_utf8_lossy(&output.stderr);
// Delete the bridge if it already exists.
if stderr.contains("File exists") {
duct::cmd!("sudo", "ip", "link", "delete", &self.name).run()?;
// Recreate the bridge.
return self.create();
}
return Err(anyhow::anyhow!(
"Failed to create bridge {}: {}",
self.name,
stderr
));
}
if let Some((addr, prefix_length)) = &self.addr {
duct::cmd!(
"sudo",
"ip",
"addr",
"add",
format!("{}/{}", addr, prefix_length),
"dev",
&self.name
)
.run()?;
}
Ok(())
}
/// Deletes the bridge.
pub fn delete(&self) -> Result<()> {
duct::cmd!("sudo", "ip", "link", "delete", &self.name)
.stderr_capture()
.unchecked()
.run()?;
Ok(())
}
/// Adds an interface to the bridge.
pub fn add_interface(&self, interface: &Veth) -> Result<()> {
duct::cmd!(
"sudo",
"ip",
"link",
"set",
&interface.name,
"master",
&self.name
)
.run()?;
Ok(())
}
/// Sets the address of the bridge.
pub fn set_addr(&mut self, addr: Ipv4Addr, prefix_length: u8) {
self.addr = Some((addr, prefix_length));
}
/// Brings the bridge up.
pub fn up(&self) -> Result<()> {
duct::cmd!("sudo", "ip", "link", "set", &self.name, "up").run()?;
Ok(())
}
}
fn ip_route(ns: &Namespace, dest: impl ToString, dev: &str) -> Result<()> {
duct::cmd!(
"sudo",
"ip",
"netns",
"exec",
ns.name(),
"ip",
"route",
"add",
dest.to_string(),
"dev",
dev
)
.run()?;
Ok(())
}
fn ip_forward(
ns: &Namespace,
remote: (impl ToString, u16),
local: (impl ToString, u16),
) -> Result<()> {
duct::cmd!(
"sudo",
"ip",
"netns",
"exec",
ns.name(),
"sysctl",
"-w",
"net.ipv4.conf.all.route_localnet=1"
)
.run()?;
duct::cmd!(
"sudo",
"ip",
"netns",
"exec",
ns.name(),
"iptables",
"-t",
"nat",
"-A",
"PREROUTING",
"-p",
"tcp",
"-d",
remote.0.to_string(),
"--dport",
remote.1.to_string(),
"-j",
"DNAT",
"--to-destination",
format!("{}:{}", local.0.to_string(), local.1)
)
.run()?;
Ok(())
}

View File

@@ -0,0 +1,120 @@
use std::{
io::{self, ErrorKind},
net::Ipv4Addr,
time::Duration,
};
use chromiumoxide::Page;
use serio::{SinkExt, stream::IoStreamExt};
use tokio::net::TcpStream;
use harness_core::{
bench::BenchOutput,
rpc::{BenchCmd, Cmd, CmdOutput, Result, RpcError, TestCmd},
test::TestOutput,
};
type Framed = serio::Framed<
tokio_util::codec::Framed<TcpStream, tokio_util::codec::LengthDelimitedCodec>,
serio::codec::Bincode,
>;
const RETRY_DELAY: usize = 50;
const MAX_RETRIES: usize = 10;
pub(crate) struct Rpc(Inner);
enum Inner {
Native { io: Framed },
Browser { page: Page },
}
impl Rpc {
pub async fn new_native(addr: (Ipv4Addr, u16)) -> io::Result<Self> {
let mut retries = 0;
let stream = loop {
match TcpStream::connect(addr).await {
Ok(stream) => break stream,
Err(e) if e.kind() == ErrorKind::ConnectionRefused => {
tokio::time::sleep(Duration::from_millis(RETRY_DELAY as u64)).await;
retries += 1;
if retries > MAX_RETRIES {
return Err(e);
}
}
Err(e) => return Err(e),
}
};
let io = serio::Framed::new(
tokio_util::codec::LengthDelimitedCodec::builder().new_framed(stream),
serio::codec::Bincode,
);
Ok(Self(Inner::Native { io }))
}
pub fn new_browser(page: Page) -> Self {
Self(Inner::Browser { page })
}
pub async fn get_tests(&mut self) -> io::Result<Result<Vec<String>>> {
let output = match &mut self.0 {
Inner::Native { io } => {
io.send(Cmd::GetTests).await?;
io.expect_next::<Result<CmdOutput>>().await?
}
Inner::Browser { page } => browser_cmd(page, Cmd::GetTests).await?,
};
Ok(match output {
Ok(output) => output.try_into_get_tests().map_err(RpcError::from),
Err(e) => Err(e),
})
}
pub async fn test(&mut self, test: TestCmd) -> io::Result<Result<TestOutput>> {
let output = match &mut self.0 {
Inner::Native { io } => {
io.send(Cmd::Test(test)).await?;
io.expect_next::<Result<CmdOutput>>().await?
}
Inner::Browser { page } => browser_cmd(page, Cmd::Test(test)).await?,
};
Ok(match output {
Ok(output) => output.try_into_test().map_err(RpcError::from),
Err(e) => Err(e),
})
}
pub async fn bench(&mut self, bench: BenchCmd) -> io::Result<Result<BenchOutput>> {
let output = match &mut self.0 {
Inner::Native { io } => {
io.send(Cmd::Bench(bench)).await?;
io.expect_next::<Result<CmdOutput>>().await?
}
Inner::Browser { page } => browser_cmd(page, Cmd::Bench(bench)).await?,
};
Ok(match output {
Ok(output) => output.try_into_bench().map_err(RpcError::from),
Err(e) => Err(e),
})
}
}
async fn browser_cmd(page: &Page, cmd: Cmd) -> io::Result<Result<CmdOutput>> {
page.evaluate(format!(
r#"
(async () => {{
return await window.executor.call(JSON.parse('{cmd}'));
}})();
"#,
cmd = serde_json::to_string(&cmd).unwrap()
))
.await
.map_err(io::Error::other)?
.into_value()
.map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))
}

View File

@@ -0,0 +1,63 @@
use std::{net::Ipv4Addr, path::PathBuf};
use anyhow::Result;
use crate::network::Namespace;
pub struct ServerFixture {
path: PathBuf,
namespace: Namespace,
addr: (Ipv4Addr, u16),
handle: Option<duct::Handle>,
}
impl ServerFixture {
/// Spawns a new server fixture.
pub fn new(path: PathBuf, namespace: Namespace, addr: (Ipv4Addr, u16)) -> Self {
Self {
path,
namespace,
addr,
handle: None,
}
}
/// Starts the server fixture.
pub fn start(&mut self) -> Result<()> {
if self.handle.is_some() {
return Ok(());
}
let handle = duct::cmd!(
"sudo",
"ip",
"netns",
"exec",
self.namespace.name(),
"env",
format!("ADDR={}", self.addr.0),
format!("PORT={}", self.addr.1),
&self.path
)
.stderr_capture()
.stdout_capture()
.start()?;
self.handle = Some(handle);
Ok(())
}
/// Shuts down the server fixture.
pub fn shutdown(&self) {
self.handle.as_ref().inspect(|handle| {
_ = handle.kill();
});
}
}
impl Drop for ServerFixture {
fn drop(&mut self) {
self.shutdown();
}
}

View File

@@ -0,0 +1,92 @@
use std::{env, net::Ipv4Addr, path::PathBuf};
use anyhow::Result;
use axum::{
Router,
http::{HeaderName, HeaderValue},
};
use tokio::net::TcpListener;
use tower::ServiceBuilder;
use tower_http::{services::ServeDir, set_header::SetResponseHeaderLayer};
use crate::network::Namespace;
pub struct WasmServer {
namespace: Namespace,
path: PathBuf,
addr: (Ipv4Addr, u16),
handle: Option<duct::Handle>,
}
impl WasmServer {
pub fn new(namespace: Namespace, path: PathBuf, addr: (Ipv4Addr, u16)) -> Self {
Self {
namespace,
path,
addr,
handle: None,
}
}
/// Spawns a new wasm server.
pub fn start(&mut self) -> Result<()> {
let handle = duct::cmd!(
"sudo",
"ip",
"netns",
"exec",
&self.namespace.name(),
"env",
format!("ADDR={}", self.addr.0),
format!("PORT={}", self.addr.1),
&self.path,
)
.stderr_capture()
.stdout_capture()
.start()?;
self.handle = Some(handle);
Ok(())
}
/// Shuts down the wasm server.
pub fn shutdown(&self) {
self.handle.as_ref().inspect(|handle| {
_ = handle.kill();
});
}
}
impl Drop for WasmServer {
fn drop(&mut self) {
self.shutdown();
}
}
pub async fn main() -> Result<()> {
let addr = env::var("ADDR")?;
let port = env::var("PORT")?.parse::<u16>()?;
let files = ServeDir::new("static");
let service = ServiceBuilder::new()
.layer(SetResponseHeaderLayer::if_not_present(
HeaderName::from_static("cross-origin-embedder-policy"),
HeaderValue::from_static("require-corp"),
))
.layer(SetResponseHeaderLayer::if_not_present(
HeaderName::from_static("cross-origin-opener-policy"),
HeaderValue::from_static("same-origin"),
))
.service(files);
// build our application with a single route
let app = Router::new().fallback_service(service);
let listener = TcpListener::bind((addr, port)).await?;
axum::serve(listener, app).await?;
Ok(())
}

View File

@@ -0,0 +1,39 @@
use std::net::Ipv4Addr;
use anyhow::Result;
use tokio::net::TcpListener;
pub struct WsProxy {
addr: (Ipv4Addr, u16),
handle: Option<tokio::task::JoinHandle<()>>,
}
impl WsProxy {
/// Spawns a new ws proxy.
pub fn new(addr: (Ipv4Addr, u16)) -> Self {
Self { addr, handle: None }
}
/// Starts the ws proxy.
pub async fn start(&mut self) -> Result<()> {
let listener = TcpListener::bind(self.addr).await?;
let handle = tokio::spawn(async move {
websocket_relay::run(listener).await.unwrap();
});
self.handle = Some(handle);
Ok(())
}
/// Shuts down the ws proxy.
pub fn shutdown(&self) {
self.handle.as_ref().inspect(|handle| handle.abort());
}
}
impl Drop for WsProxy {
fn drop(&mut self) {
self.shutdown();
}
}

View File

@@ -0,0 +1,37 @@
import * as Comlink from "./comlink.mjs";
import initWasm, * as wasm from "./generated/harness_executor.js";
class Executor {
executor;
async init(config) {
try {
console.log("loading wasm");
await initWasm();
console.log("wasm loaded");
console.log("initializing wasm");
await wasm.initialize({ thread_count: navigator.hardwareConcurrency });
console.log("wasm initialized");
console.log("initializing executor");
this.executor = new wasm.WasmExecutor(config);
console.log("executor initialized");
} catch (e) {
console.error(e);
throw e;
}
}
async call(cmd) {
let panicCallback;
const panicPromise = new Promise((resolve, _) => {
panicCallback = resolve;
});
const callPromise = this.executor.call(cmd, panicCallback);
return await Promise.race([callPromise, panicPromise]);
}
}
const executor = new Executor();
Comlink.expose(executor);

View File

Before

Width:  |  Height:  |  Size: 99 KiB

After

Width:  |  Height:  |  Size: 99 KiB

View File

@@ -0,0 +1,5 @@
import * as Comlink from "./comlink.mjs";
const executor = Comlink.wrap(new Worker("executor.js", { type: "module" }));
window.executor = executor;

View File

@@ -7,10 +7,7 @@ edition = "2021"
workspace = true
[features]
tee_quote = [
"dep:mc-sgx-dcap-types",
"dep:hex",
]
tee_quote = ["dep:mc-sgx-dcap-types", "dep:hex"]
[dependencies]
notary-common = { workspace = true }
@@ -53,9 +50,9 @@ thiserror = { workspace = true }
tokio = { workspace = true, features = ["full"] }
tokio-rustls = { workspace = true }
tokio-util = { workspace = true, features = ["compat"] }
tower-http = { version = "0.5", features = ["cors"] }
tower-service = { version = "0.3" }
tower-util = { version = "0.3.1" }
tower-http = { workspace = true, features = ["cors"] }
tower-service = { workspace = true }
tower-util = { workspace = true }
tracing = { workspace = true }
tracing-subscriber = { workspace = true, features = ["env-filter", "json"] }
uuid = { workspace = true, features = ["v4", "fast-rng"] }

View File

@@ -1,4 +1,4 @@
# exclude Rust build artifacts
./target
./crates/wasm/pkg/
./crates/wasm-test-runner/static/generated/
./crates/harness/static/generated/

View File

@@ -25,7 +25,7 @@ tower-http = { version = "0.6.5", features = ["trace"] }
[[bin]]
name = "tlsn-server-fixture"
path = "src/main.rs"
path = "bin/main.rs"
[dev-dependencies]
axum-test = { version = "16.2.0" }

View File

@@ -8,8 +8,11 @@ use tracing::info;
#[tokio::main]
async fn main() -> io::Result<()> {
tracing_subscriber::fmt::init();
let port = env::var("PORT").unwrap_or_else(|_| DEFAULT_FIXTURE_PORT.to_string());
let listener = TcpListener::bind(&format!("0.0.0.0:{port}")).await?;
let addr = env::var("ADDR").unwrap_or_else(|_| "0.0.0.0".to_string());
let port = env::var("PORT")
.map(|port| port.parse().unwrap())
.unwrap_or_else(|_| DEFAULT_FIXTURE_PORT);
let listener = TcpListener::bind((addr, port)).await?;
info!("Starting server fixture on port {port}");
loop {

View File

@@ -1,26 +0,0 @@
[package]
name = "tests-integration"
version = "0.0.0"
edition = "2021"
publish = false
[lints]
workspace = true
[dev-dependencies]
tlsn-core = { workspace = true }
tlsn-common = { workspace = true }
tlsn-prover = { workspace = true }
tlsn-server-fixture = { workspace = true }
tlsn-server-fixture-certs = { workspace = true }
tlsn-tls-core = { workspace = true }
tlsn-verifier = { workspace = true }
futures = { workspace = true }
http-body-util = { workspace = true }
hyper = { workspace = true, features = ["client", "http1"] }
hyper-util = { workspace = true, features = ["full"] }
tokio = { workspace = true, features = ["rt", "rt-multi-thread", "macros"] }
tokio-util = { workspace = true }
tracing = { workspace = true }
tracing-subscriber = { workspace = true }

View File

@@ -1,141 +0,0 @@
use futures::{AsyncReadExt, AsyncWriteExt};
use tls_core::verify::WebPkiVerifier;
use tlsn_common::config::{ProtocolConfig, ProtocolConfigValidator};
use tlsn_core::{
attestation::AttestationConfig, request::RequestConfig, signing::SignatureAlgId,
transcript::TranscriptCommitConfig, CryptoProvider,
};
use tlsn_prover::{Prover, ProverConfig};
use tlsn_server_fixture::bind;
use tlsn_server_fixture_certs::{CA_CERT_DER, SERVER_DOMAIN};
use tlsn_verifier::{Verifier, VerifierConfig};
use tokio::io::{AsyncRead, AsyncWrite};
use tokio_util::compat::TokioAsyncReadCompatExt;
use tracing::instrument;
// Maximum number of bytes that can be sent from prover to server
const MAX_SENT_DATA: usize = 1 << 12;
// Maximum number of application records sent from prover to server
const MAX_SENT_RECORDS: usize = 4;
// Maximum number of bytes that can be received by prover from server
const MAX_RECV_DATA: usize = 1 << 14;
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
#[ignore]
async fn test_defer_decryption() {
tracing_subscriber::fmt::init();
let (socket_0, socket_1) = tokio::io::duplex(2 << 23);
tokio::join!(prover(socket_0), notary(socket_1));
}
#[instrument(skip(notary_socket))]
#[allow(deprecated)]
async fn prover<T: AsyncWrite + AsyncRead + Send + Unpin + 'static>(notary_socket: T) {
let (client_socket, server_socket) = tokio::io::duplex(2 << 16);
let server_task = tokio::spawn(bind(server_socket.compat()));
let mut root_store = tls_core::anchors::RootCertStore::empty();
root_store
.add(&tls_core::key::Certificate(CA_CERT_DER.to_vec()))
.unwrap();
let provider = CryptoProvider {
cert: WebPkiVerifier::new(root_store, None),
..Default::default()
};
let prover = Prover::new(
ProverConfig::builder()
.server_name(SERVER_DOMAIN)
.protocol_config(
ProtocolConfig::builder()
.max_sent_data(MAX_SENT_DATA)
.max_sent_records(MAX_SENT_RECORDS)
.max_recv_data(MAX_RECV_DATA)
.build()
.unwrap(),
)
.crypto_provider(provider)
.build()
.unwrap(),
)
.setup(notary_socket.compat())
.await
.unwrap();
let (mut tls_connection, prover_fut) = prover.connect(client_socket.compat()).await.unwrap();
let prover_task = tokio::spawn(prover_fut);
tls_connection
.write_all(b"GET / HTTP/1.1\r\nConnection: close\r\n\r\n")
.await
.unwrap();
tls_connection.close().await.unwrap();
let mut response = vec![0u8; 1024];
tls_connection.read_to_end(&mut response).await.unwrap();
let _ = server_task.await.unwrap();
let mut prover = prover_task.await.unwrap().unwrap();
let sent_tx_len = prover.transcript().sent().len();
let recv_tx_len = prover.transcript().received().len();
let mut builder = TranscriptCommitConfig::builder(prover.transcript());
// Commit to everything
builder.commit_sent(&(0..sent_tx_len)).unwrap();
builder.commit_recv(&(0..recv_tx_len)).unwrap();
let transcript_commit = builder.build().unwrap();
let mut builder = RequestConfig::builder();
builder.transcript_commit(transcript_commit);
let config = builder.build().unwrap();
prover.notarize(&config).await.unwrap();
prover.close().await.unwrap();
}
#[instrument(skip(socket))]
#[allow(deprecated)]
async fn notary<T: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>(socket: T) {
let mut root_store = tls_core::anchors::RootCertStore::empty();
root_store
.add(&tls_core::key::Certificate(CA_CERT_DER.to_vec()))
.unwrap();
let mut provider = CryptoProvider {
cert: WebPkiVerifier::new(root_store, None),
..Default::default()
};
provider.signer.set_secp256k1(&[1u8; 32]).unwrap();
let config_validator = ProtocolConfigValidator::builder()
.max_sent_data(MAX_SENT_DATA)
.max_recv_data(MAX_RECV_DATA)
.build()
.unwrap();
let verifier = Verifier::new(
VerifierConfig::builder()
.protocol_config_validator(config_validator)
.crypto_provider(provider)
.build()
.unwrap(),
);
let config = AttestationConfig::builder()
.supported_signature_algs(vec![SignatureAlgId::SECP256K1])
.build()
.unwrap();
_ = verifier.notarize(socket.compat(), &config).await.unwrap();
}

View File

@@ -1,166 +0,0 @@
use tls_core::verify::WebPkiVerifier;
use tlsn_common::config::{ProtocolConfig, ProtocolConfigValidator};
use tlsn_core::{
attestation::{AttestationConfig, Extension},
request::RequestConfig,
signing::SignatureAlgId,
transcript::TranscriptCommitConfig,
CryptoProvider,
};
use tlsn_prover::{Prover, ProverConfig};
use tlsn_server_fixture::bind;
use tlsn_server_fixture_certs::{CA_CERT_DER, SERVER_DOMAIN};
use tlsn_verifier::{Verifier, VerifierConfig};
use http_body_util::{BodyExt as _, Empty};
use hyper::{body::Bytes, Request, StatusCode};
use hyper_util::rt::TokioIo;
use tokio::io::{AsyncRead, AsyncWrite};
use tokio_util::compat::{FuturesAsyncReadCompatExt, TokioAsyncReadCompatExt};
use tracing::instrument;
// Maximum number of bytes that can be sent from prover to server
const MAX_SENT_DATA: usize = 1 << 12;
// Maximum number of bytes that can be received by prover from server
const MAX_RECV_DATA: usize = 1 << 14;
#[tokio::test]
#[ignore]
async fn notarize() {
tracing_subscriber::fmt::init();
let (socket_0, socket_1) = tokio::io::duplex(2 << 23);
tokio::join!(prover(socket_0), notary(socket_1));
}
#[instrument(skip(notary_socket))]
#[allow(deprecated)]
async fn prover<T: AsyncWrite + AsyncRead + Send + Unpin + 'static>(notary_socket: T) {
let (client_socket, server_socket) = tokio::io::duplex(2 << 16);
let server_task = tokio::spawn(bind(server_socket.compat()));
let mut root_store = tls_core::anchors::RootCertStore::empty();
root_store
.add(&tls_core::key::Certificate(CA_CERT_DER.to_vec()))
.unwrap();
let provider = CryptoProvider {
cert: WebPkiVerifier::new(root_store, None),
..Default::default()
};
let protocol_config = ProtocolConfig::builder()
.max_sent_data(MAX_SENT_DATA)
.max_recv_data(MAX_RECV_DATA)
.max_recv_data_online(MAX_RECV_DATA)
.defer_decryption_from_start(false)
.build()
.unwrap();
let prover = Prover::new(
ProverConfig::builder()
.server_name(SERVER_DOMAIN)
.protocol_config(protocol_config)
.crypto_provider(provider)
.build()
.unwrap(),
)
.setup(notary_socket.compat())
.await
.unwrap();
let (tls_connection, prover_fut) = prover.connect(client_socket.compat()).await.unwrap();
let prover_task = tokio::spawn(prover_fut);
let (mut request_sender, connection) =
hyper::client::conn::http1::handshake(TokioIo::new(tls_connection.compat()))
.await
.unwrap();
tokio::spawn(connection);
let request = Request::builder()
.uri(format!("https://{}/bytes?size=16000", SERVER_DOMAIN))
.header("Host", SERVER_DOMAIN)
.header("Connection", "close")
.method("GET")
.body(Empty::<Bytes>::new())
.unwrap();
let response = request_sender.send_request(request).await.unwrap();
assert!(response.status() == StatusCode::OK);
let payload = response.into_body().collect().await.unwrap().to_bytes();
println!("{:?}", &String::from_utf8_lossy(&payload));
let _ = server_task.await.unwrap();
let mut prover = prover_task.await.unwrap().unwrap();
let sent_tx_len = prover.transcript().sent().len();
let recv_tx_len = prover.transcript().received().len();
let mut builder = TranscriptCommitConfig::builder(prover.transcript());
// Commit to everything
builder.commit_sent(&(0..sent_tx_len)).unwrap();
builder.commit_recv(&(0..recv_tx_len)).unwrap();
let transcript_commit = builder.build().unwrap();
let mut builder = RequestConfig::builder();
builder.transcript_commit(transcript_commit);
builder.extension(Extension {
id: b"foo".to_vec(),
value: b"bar".to_vec(),
});
let config = builder.build().unwrap();
let (attestation, _) = prover.notarize(&config).await.unwrap();
prover.close().await.unwrap();
assert_eq!(attestation.body.extensions().count(), 1);
}
#[instrument(skip(socket))]
#[allow(deprecated)]
async fn notary<T: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>(socket: T) {
let mut root_store = tls_core::anchors::RootCertStore::empty();
root_store
.add(&tls_core::key::Certificate(CA_CERT_DER.to_vec()))
.unwrap();
let mut provider = CryptoProvider {
cert: WebPkiVerifier::new(root_store, None),
..Default::default()
};
provider.signer.set_secp256k1(&[1u8; 32]).unwrap();
let config_validator = ProtocolConfigValidator::builder()
.max_sent_data(MAX_SENT_DATA)
.max_recv_data(MAX_RECV_DATA)
.build()
.unwrap();
let verifier = Verifier::new(
VerifierConfig::builder()
.protocol_config_validator(config_validator)
.crypto_provider(provider)
.build()
.unwrap(),
);
let config = AttestationConfig::builder()
.supported_signature_algs(vec![SignatureAlgId::SECP256K1])
.extension_validator(|_| Ok(()))
.build()
.unwrap();
_ = verifier.notarize(socket.compat(), &config).await.unwrap();
}

View File

@@ -1,31 +0,0 @@
[package]
name = "tlsn-wasm-test-runner"
version = "0.0.0"
edition = "2021"
publish = false
[lints]
workspace = true
[dependencies]
tlsn-common = { workspace = true }
tlsn-core = { workspace = true }
tlsn-prover = { workspace = true }
tlsn-server-fixture = { workspace = true }
tlsn-server-fixture-certs = { workspace = true }
tlsn-tls-core = { workspace = true }
tlsn-verifier = { workspace = true }
websocket-relay = { workspace = true }
anyhow = { workspace = true }
axum = { workspace = true }
chromiumoxide = { version = "0.6", features = ["tokio-runtime"] }
futures = { workspace = true }
serde = { workspace = true, features = ["derive"] }
tokio = { workspace = true, features = ["full"] }
tokio-util = { workspace = true, features = ["compat"] }
tower = { version = "0.4" }
tower-http = { version = "0.5", features = ["fs", "set-header"] }
tracing = { workspace = true }
tracing-subscriber = { workspace = true, features = ["env-filter"] }

View File

@@ -1,9 +0,0 @@
#!/bin/sh
# Ensure the script runs in the folder that contains this script
cd "$(dirname "$0")"
RUSTFLAGS='-C target-feature=+atomics,+bulk-memory,+mutable-globals -C link-arg=--max-memory=4294967296 --cfg getrandom_backend="wasm_js"' \
rustup run nightly \
wasm-pack build ../wasm --release --target web --no-pack --out-dir=../wasm-test-runner/static/generated -- -Zbuild-std=panic_abort,std --features test,no-bundler &&
RUST_LOG=debug cargo run --release

View File

@@ -1,91 +0,0 @@
use anyhow::{anyhow, Result};
use chromiumoxide::{
cdp::{
browser_protocol::log::{EventEntryAdded, LogEntryLevel},
js_protocol::runtime::EventExceptionThrown,
},
Browser, BrowserConfig, Page,
};
use futures::{Future, FutureExt, StreamExt};
use std::{env, time::Duration};
use tracing::{debug, error, instrument};
use crate::{TestResult, DEFAULT_SERVER_IP, DEFAULT_WASM_PORT};
#[instrument]
pub async fn run() -> Result<Vec<TestResult>> {
let config = BrowserConfig::builder()
.request_timeout(Duration::from_secs(60))
.disable_cache()
.incognito() // Run in incognito mode to avoid unexplained WS connection errors in chromiumoxide.
.build()
.map_err(|s| anyhow!(s))?;
debug!("launching chromedriver");
let (mut browser, mut handler) = Browser::launch(config).await?;
debug!("chromedriver started");
tokio::spawn(async move {
while let Some(res) = handler.next().await {
res.unwrap();
}
});
let wasm_port: u16 = env::var("WASM_PORT")
.map(|port| port.parse().expect("port should be valid integer"))
.unwrap_or(DEFAULT_WASM_PORT);
let wasm_addr: String = env::var("WASM_IP").unwrap_or_else(|_| DEFAULT_SERVER_IP.to_string());
let page = browser
.new_page(&format!("http://{}:{}/index.html", wasm_addr, wasm_port))
.await?;
tokio::spawn(register_listeners(&page).await?);
page.wait_for_navigation().await?;
let results: Vec<TestResult> = page
.evaluate(
r#"
(async () => {
await window.testWorker.init();
return await window.testWorker.run();
})();
"#,
)
.await?
.into_value()?;
browser.close().await?;
browser.wait().await?;
Ok(results)
}
async fn register_listeners(page: &Page) -> Result<impl Future<Output = ()>> {
let mut logs = page.event_listener::<EventEntryAdded>().await?.fuse();
let mut exceptions = page.event_listener::<EventExceptionThrown>().await?.fuse();
Ok(futures::future::join(
async move {
while let Some(event) = logs.next().await {
let entry = &event.entry;
match entry.level {
LogEntryLevel::Error => {
error!("{:?}", entry);
}
_ => {
debug!("{:?}: {}", entry.timestamp, entry.text);
}
}
}
},
async move {
while let Some(event) = exceptions.next().await {
error!("{:?}", event);
}
},
)
.map(|_| ()))
}

View File

@@ -1,39 +0,0 @@
use std::fmt::Display;
pub mod chrome_driver;
pub mod server_fixture;
pub mod tlsn_fixture;
pub mod wasm_server;
pub mod ws;
pub static DEFAULT_SERVER_IP: &str = "127.0.0.1";
pub static DEFAULT_WASM_PORT: u16 = 8013;
pub static DEFAULT_WS_PORT: u16 = 8080;
pub static DEFAULT_SERVER_PORT: u16 = 8083;
pub static DEFAULT_VERIFIER_PORT: u16 = 8010;
pub static DEFAULT_NOTARY_PORT: u16 = 8011;
pub static DEFAULT_PROVER_PORT: u16 = 8012;
#[derive(Debug, serde::Deserialize)]
pub struct TestResult {
pub name: String,
pub passed: bool,
#[serde(default)]
pub duration_secs: f64,
pub error: Option<String>,
}
impl Display for TestResult {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
if self.passed {
write!(f, "{}: passed in {} seconds", self.name, self.duration_secs)?;
} else {
write!(f, "{}: failed", self.name)?;
if let Some(error) = &self.error {
write!(f, "\ncaused by: {}", error)?;
}
}
Ok(())
}
}

View File

@@ -1,42 +0,0 @@
use anyhow::Result;
fn init_tracing() {
use tracing_subscriber::EnvFilter;
tracing_subscriber::fmt()
.with_env_filter(EnvFilter::from_default_env())
.init();
}
#[tokio::main]
async fn main() -> Result<()> {
init_tracing();
let fut_wasm = tlsn_wasm_test_runner::wasm_server::start().await?;
let fut_proxy = tlsn_wasm_test_runner::ws::start().await?;
let fut_tlsn = tlsn_wasm_test_runner::tlsn_fixture::start().await?;
let fut_server = tlsn_wasm_test_runner::server_fixture::start().await?;
tokio::spawn(async move {
futures::future::try_join4(fut_wasm, fut_proxy, fut_tlsn, fut_server)
.await
.unwrap()
});
let results = tlsn_wasm_test_runner::chrome_driver::run().await?;
for result in &results {
println!("{}", result);
}
let passed = results.iter().filter(|r| r.passed).count();
let failed = results.iter().filter(|r| !r.passed).count();
println!("{} passed, {} failed", passed, failed);
if results.iter().any(|r| !r.passed) {
std::process::exit(1);
}
Ok(())
}

View File

@@ -1,34 +0,0 @@
use std::{env, net::IpAddr};
use tlsn_server_fixture;
use anyhow::Result;
use futures::Future;
use tokio::net::TcpListener;
use tokio_util::compat::TokioAsyncReadCompatExt;
use tracing::{info, instrument};
use crate::{DEFAULT_SERVER_IP, DEFAULT_SERVER_PORT};
#[instrument]
pub async fn start() -> Result<impl Future<Output = Result<()>>> {
let port: u16 = env::var("SERVER_PORT")
.map(|port| port.parse().expect("port should be valid integer"))
.unwrap_or(DEFAULT_SERVER_PORT);
let addr: IpAddr = env::var("SERVER_IP")
.map(|addr| addr.parse().expect("should be valid IP address"))
.unwrap_or(IpAddr::V4(DEFAULT_SERVER_IP.parse().unwrap()));
let listener = TcpListener::bind((addr, port)).await?;
info!("listening on: {}", listener.local_addr()?);
Ok(async move {
loop {
let (socket, addr) = listener.accept().await?;
info!("accepted connection from: {}", addr);
tokio::spawn(tlsn_server_fixture::bind(socket.compat()));
}
})
}

View File

@@ -1,199 +0,0 @@
use std::{env, net::IpAddr};
use anyhow::Result;
use futures::{AsyncReadExt, AsyncWriteExt, Future};
use tls_core::{anchors::RootCertStore, verify::WebPkiVerifier};
use tlsn_common::config::{ProtocolConfig, ProtocolConfigValidator};
use tlsn_core::{
attestation::AttestationConfig, signing::SignatureAlgId, CryptoProvider, ProveConfig,
VerifyConfig,
};
use tlsn_prover::{Prover, ProverConfig};
use tlsn_server_fixture_certs::{CA_CERT_DER, SERVER_DOMAIN};
use tlsn_verifier::{Verifier, VerifierConfig};
use tokio::net::{TcpListener, TcpStream};
use tokio_util::compat::TokioAsyncReadCompatExt;
use tracing::{info, instrument};
use crate::{
DEFAULT_NOTARY_PORT, DEFAULT_PROVER_PORT, DEFAULT_SERVER_IP, DEFAULT_SERVER_PORT,
DEFAULT_VERIFIER_PORT,
};
#[instrument]
pub async fn start() -> Result<impl Future<Output = Result<()>>> {
let verifier_port: u16 = env::var("VERIFIER_PORT")
.map(|port| port.parse().expect("port should be valid integer"))
.unwrap_or(DEFAULT_VERIFIER_PORT);
let notary_port: u16 = env::var("NOTARY_PORT")
.map(|port| port.parse().expect("port should be valid integer"))
.unwrap_or(DEFAULT_NOTARY_PORT);
let prover_port: u16 = env::var("PROVER_PORT")
.map(|port| port.parse().expect("port should be valid integer"))
.unwrap_or(DEFAULT_PROVER_PORT);
let addr: IpAddr = env::var("TLSN_IP")
.map(|addr| addr.parse().expect("should be valid IP address"))
.unwrap_or(IpAddr::V4(DEFAULT_SERVER_IP.parse().unwrap()));
let verifier_listener = TcpListener::bind((addr, verifier_port)).await?;
let notary_listener = TcpListener::bind((addr, notary_port)).await?;
let prover_listener = TcpListener::bind((addr, prover_port)).await?;
Ok(async move {
loop {
tokio::select! {
res = verifier_listener.accept() => {
let (socket, addr) = res?;
info!("verifier accepted connection from: {}", addr);
tokio::spawn(handle_verifier(socket));
},
res = notary_listener.accept() => {
let (socket, addr) = res?;
info!("notary accepted connection from: {}", addr);
tokio::spawn(handle_notary(socket));
},
res = prover_listener.accept() => {
let (socket, addr) = res?;
info!("prover accepted connection from: {}", addr);
tokio::spawn(handle_prover(socket));
},
}
}
})
}
#[instrument(level = "debug", skip_all, err)]
async fn handle_verifier(io: TcpStream) -> Result<()> {
let mut root_store = RootCertStore::empty();
root_store
.add(&tls_core::key::Certificate(CA_CERT_DER.to_vec()))
.unwrap();
let config_validator = ProtocolConfigValidator::builder()
.max_sent_data(1024)
.max_recv_data(1024)
.build()
.unwrap();
let provider = CryptoProvider {
cert: WebPkiVerifier::new(root_store, None),
..Default::default()
};
let config = VerifierConfig::builder()
.crypto_provider(provider)
.protocol_config_validator(config_validator)
.build()
.unwrap();
let verifier = Verifier::new(config);
verifier
.verify(io.compat(), &VerifyConfig::default())
.await?;
Ok(())
}
#[instrument(level = "debug", skip_all, err)]
async fn handle_notary(io: TcpStream) -> Result<()> {
let mut provider = CryptoProvider::default();
provider.signer.set_secp256k1(&[1u8; 32]).unwrap();
let config_validator = ProtocolConfigValidator::builder()
.max_sent_data(1024)
.max_recv_data(1024)
.build()
.unwrap();
let config = VerifierConfig::builder()
.protocol_config_validator(config_validator)
.crypto_provider(provider)
.build()
.unwrap();
let verifier = Verifier::new(config);
let mut builder = AttestationConfig::builder();
builder.supported_signature_algs(vec![SignatureAlgId::SECP256K1]);
let attestation_config = builder.build().unwrap();
#[allow(deprecated)]
verifier.notarize(io.compat(), &attestation_config).await?;
Ok(())
}
#[instrument(level = "debug", skip_all, err)]
async fn handle_prover(io: TcpStream) -> Result<()> {
let mut root_store = tls_core::anchors::RootCertStore::empty();
root_store
.add(&tls_core::key::Certificate(CA_CERT_DER.to_vec()))
.unwrap();
let provider = CryptoProvider {
cert: WebPkiVerifier::new(root_store, None),
..Default::default()
};
let protocol_config = ProtocolConfig::builder()
.max_sent_data(1024)
.max_recv_data(1024)
.build()
.unwrap();
let prover = Prover::new(
ProverConfig::builder()
.server_name(SERVER_DOMAIN)
.protocol_config(protocol_config)
.crypto_provider(provider)
.build()
.unwrap(),
)
.setup(io.compat())
.await
.unwrap();
let port: u16 = env::var("SERVER_PORT")
.map(|port| port.parse().expect("port should be valid integer"))
.unwrap_or(DEFAULT_SERVER_PORT);
let addr: IpAddr = env::var("SERVER_IP")
.map(|addr| addr.parse().expect("should be valid IP address"))
.unwrap_or(IpAddr::V4(DEFAULT_SERVER_IP.parse().unwrap()));
let client_socket = TcpStream::connect((addr, port)).await.unwrap();
let (mut tls_connection, prover_fut) = prover.connect(client_socket.compat()).await.unwrap();
let prover_task = tokio::spawn(prover_fut);
tls_connection
.write_all(b"GET / HTTP/1.1\r\nConnection: close\r\n\r\n")
.await
.unwrap();
tls_connection.close().await.unwrap();
let mut response = vec![0u8; 1024];
tls_connection.read_to_end(&mut response).await.unwrap();
let mut prover = prover_task.await.unwrap().unwrap();
let sent_transcript_len = prover.transcript().sent().len();
let recv_transcript_len = prover.transcript().received().len();
let mut builder = ProveConfig::builder(prover.transcript());
builder.reveal_sent(&(0..sent_transcript_len - 1)).unwrap();
builder.reveal_recv(&(2..recv_transcript_len)).unwrap();
let config = builder.build().unwrap();
prover.prove(&config).await.unwrap();
prover.close().await.unwrap();
Ok(())
}

View File

@@ -1,49 +0,0 @@
use std::{env, net::IpAddr};
use anyhow::Result;
use axum::{
http::{HeaderName, HeaderValue},
Router,
};
use futures::Future;
use tokio::net::TcpListener;
use tower::ServiceBuilder;
use tower_http::{services::ServeDir, set_header::SetResponseHeaderLayer};
use tracing::{info, instrument};
use crate::{DEFAULT_SERVER_IP, DEFAULT_WASM_PORT};
#[instrument]
pub async fn start() -> Result<impl Future<Output = Result<()>>> {
let port: u16 = env::var("WASM_PORT")
.map(|port| port.parse().expect("port should be valid integer"))
.unwrap_or(DEFAULT_WASM_PORT);
let addr: IpAddr = env::var("WASM_IP")
.map(|addr| addr.parse().expect("should be valid IP address"))
.unwrap_or(IpAddr::V4(DEFAULT_SERVER_IP.parse().unwrap()));
let files = ServeDir::new("static");
let service = ServiceBuilder::new()
.layer(SetResponseHeaderLayer::if_not_present(
HeaderName::from_static("cross-origin-embedder-policy"),
HeaderValue::from_static("require-corp"),
))
.layer(SetResponseHeaderLayer::if_not_present(
HeaderName::from_static("cross-origin-opener-policy"),
HeaderValue::from_static("same-origin"),
))
.service(files);
// build our application with a single route
let app = Router::new().fallback_service(service);
let listener = TcpListener::bind((addr, port)).await?;
info!("listening on {}", listener.local_addr()?);
Ok(async move {
axum::serve(listener, app).await?;
Ok(())
})
}

View File

@@ -1,26 +0,0 @@
use std::{env, net::IpAddr};
use anyhow::{Context, Result};
use futures::Future;
use tokio::net::TcpListener;
use tracing::{info, instrument};
use crate::{DEFAULT_SERVER_IP, DEFAULT_WS_PORT};
#[instrument]
pub async fn start() -> Result<impl Future<Output = Result<()>>> {
let port: u16 = env::var("PROXY_PORT")
.map(|port| port.parse().expect("port should be valid integer"))
.unwrap_or(DEFAULT_WS_PORT);
let addr: IpAddr = env::var("PROXY_IP")
.map(|addr| addr.parse().expect("should be valid IP address"))
.unwrap_or(IpAddr::V4(DEFAULT_SERVER_IP.parse().unwrap()));
let listener = TcpListener::bind((addr, port))
.await
.context("failed to bind to address")?;
info!("listening on: {}", listener.local_addr()?);
Ok(websocket_relay::run(listener))
}

View File

@@ -1,5 +0,0 @@
import * as Comlink from "https://unpkg.com/comlink/dist/esm/comlink.mjs";
const testWorker = Comlink.wrap(new Worker("worker.js", { type: "module" }));
window.testWorker = testWorker;

View File

@@ -1,49 +0,0 @@
import * as Comlink from "https://unpkg.com/comlink/dist/esm/comlink.mjs";
import init_wasm, * as wasm from "./generated/tlsn_wasm.js";
class TestWorker {
async init() {
try {
console.log("initializing wasm");
await init_wasm();
await wasm.initialize({ level: "Debug" }, navigator.hardwareConcurrency);
} catch (e) {
console.error(e);
throw e;
}
}
async run() {
let promises = [];
for (const [name, func] of Object.entries(wasm)) {
if (name.startsWith("test_") && (typeof func === 'function')) {
promises.push((async () => {
console.log("running test", name);
const start = performance.now();
try {
await func();
} catch (error) {
return {
name: name,
passed: false,
error: error.toString(),
}
}
const duration_secs = (performance.now() - start) / 1000;
console.log(`Test ${name} passed in ${duration_secs} seconds`);
return {
name: name,
passed: true,
duration_secs,
}
})());
}
}
return Promise.all(promises);
}
}
const worker = new TestWorker();
Comlink.expose(worker);

View File

@@ -17,6 +17,3 @@ cargo clippy --all-features --all-targets -- -D warnings
# Run tests
# cargo test
# Run wasm tests
# ./crates/wasm-test-runner/run.sh