test: add browser benches (#570)

* refactor: modularize server-fixture

* Update crates/server-fixture/server/Cargo.toml

add newline

Co-authored-by: sinu.eth <65924192+sinui0@users.noreply.github.com>

* test: add browser benches

* fix deps

* ci: run ci workflow for all pull requests (#571)

* misc fixes

* fix clippy

* don't log a non-critical error to stderr

* use incognito (mitigates random hangs)

* add notes

* distinguish prover kind when plotting

---------

Co-authored-by: sinu.eth <65924192+sinui0@users.noreply.github.com>
Co-authored-by: Hendrik Eeckhaut <hendrik@eeckhaut.org>
Co-authored-by: Ubuntu <ubuntu@ip-10-35-1-164.eu-central-1.compute.internal>
This commit is contained in:
dan
2024-10-14 15:52:52 +02:00
committed by GitHub
parent 66db5344ac
commit 6b845fd473
39 changed files with 1442 additions and 158 deletions

2
.dockerignore Normal file
View File

@@ -0,0 +1,2 @@
/target
/.git

27
.github/workflows/bench-browser.yml vendored Normal file
View File

@@ -0,0 +1,27 @@
name: Run Benchmarks In The Browser
on:
# manual trigger
workflow_dispatch:
jobs:
run-benchmarks:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Build Docker Image
run: |
docker build -t tlsn-bench . -f ./crates/benches/binary/benches.Dockerfile --build-arg BENCH_TYPE=browser
- name: Run Benchmarks
run: |
docker run --privileged -v ${{ github.workspace }}/crates/benches/binary:/benches tlsn-bench
- name: Upload runtime_vs_latency.html
uses: actions/upload-artifact@v4
with:
name: benchmark_graphs
path: |
./crates/benches/binary/runtime_vs_latency.html
./crates/benches/binary/runtime_vs_bandwidth.html

View File

@@ -12,16 +12,16 @@ jobs:
- name: Build Docker Image
run: |
docker build -t tlsn-bench . -f ./crates/benches/benches.Dockerfile
docker build -t tlsn-bench . -f ./crates/benches/binary/benches.Dockerfile --build-arg BENCH_TYPE=native
- name: Run Benchmarks
run: |
docker run --privileged -v ${{ github.workspace }}/crates/benches/:/benches tlsn-bench
docker run --privileged -v ${{ github.workspace }}/crates/benches/binary:/benches tlsn-bench
- name: Upload runtime_vs_latency.html
uses: actions/upload-artifact@v4
with:
name: benchmark_graphs
path: |
./crates/benches/runtime_vs_latency.html
./crates/benches/runtime_vs_bandwidth.html
./crates/benches/binary/runtime_vs_latency.html
./crates/benches/binary/runtime_vs_bandwidth.html

View File

@@ -7,8 +7,6 @@ on:
tags:
- "[v]?[0-9]+.[0-9]+.[0-9]+*"
pull_request:
branches:
- dev
env:
CARGO_TERM_COLOR: always

View File

@@ -1,6 +1,10 @@
[workspace]
members = [
"crates/benches",
"crates/benches/binary",
"crates/benches/browser/core",
"crates/benches/browser/native",
"crates/benches/browser/wasm",
"crates/benches/library",
"crates/common",
"crates/components/aead",
"crates/components/block-cipher",
@@ -41,6 +45,9 @@ notary-client = { path = "crates/notary/client" }
notary-server = { path = "crates/notary/server" }
tls-server-fixture = { path = "crates/tls/server-fixture" }
tlsn-aead = { path = "crates/components/aead" }
tlsn-benches-browser-core = { path = "crates/benches/browser/core" }
tlsn-benches-browser-native = { path = "crates/benches/browser/native" }
tlsn-benches-library = { path = "crates/benches/library" }
tlsn-block-cipher = { path = "crates/components/block-cipher" }
tlsn-common = { path = "crates/common" }
tlsn-core = { path = "crates/core" }

View File

@@ -1,21 +0,0 @@
FROM rust AS builder
WORKDIR /usr/src/tlsn
COPY . .
RUN cd crates/benches && cargo build --release
FROM ubuntu:latest
RUN apt-get update && apt-get -y upgrade && apt-get install -y --no-install-recommends \
iproute2 \
sudo \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
COPY --from=builder ["/usr/src/tlsn/target/release/bench", "/usr/src/tlsn/target/release/prover", "/usr/src/tlsn/target/release/verifier", "/usr/src/tlsn/target/release/plot", "/usr/local/bin/"]
ENV PROVER_PATH="/usr/local/bin/prover"
ENV VERIFIER_PATH="/usr/local/bin/verifier"
VOLUME [ "/benches" ]
WORKDIR "/benches"
CMD ["/bin/bash", "-c", "bench && plot /benches/metrics.csv && cat /benches/metrics.csv"]

View File

@@ -2,14 +2,15 @@
edition = "2021"
name = "tlsn-benches"
publish = false
version = "0.0.0"
version = "0.1.0"
[features]
default = []
browser-bench = ["tlsn-benches-browser-native"]
[dependencies]
anyhow = { workspace = true }
charming = { version = "0.3.1", features = ["ssr"] }
csv = "1.3.0"
futures = { workspace = true }
serde = { workspace = true }
tlsn-benches-library = { workspace = true }
tlsn-benches-browser-native = { workspace = true, optional = true}
tlsn-common = { workspace = true }
tlsn-core = { workspace = true }
tlsn-prover = { workspace = true }
@@ -17,17 +18,17 @@ tlsn-server-fixture = { workspace = true }
tlsn-server-fixture-certs = { workspace = true }
tlsn-tls-core = { workspace = true }
tlsn-verifier = { workspace = true }
tokio = { workspace = true, features = [
"rt",
"rt-multi-thread",
"macros",
"net",
"io-std",
"fs",
] }
anyhow = { workspace = true }
async-trait = { workspace = true }
charming = {version = "0.3.1", features = ["ssr"]}
csv = "1.3.0"
env_logger = { version = "0.6.0", default-features = false }
serde = { workspace = true }
tokio = { workspace = true, features = ["rt", "rt-multi-thread", "macros", "net", "io-std"]}
tokio-util = { workspace = true }
toml = "0.8.11"
tracing-subscriber = { workspace = true, features = ["env-filter"] }
tracing-subscriber = {workspace = true, features = ["env-filter"]}
[[bin]]
name = "bench"
@@ -43,4 +44,4 @@ path = "bin/verifier.rs"
[[bin]]
name = "plot"
path = "bin/plot.rs"
path = "bin/plot.rs"

View File

@@ -19,13 +19,31 @@ sudo apt-get install iproute2 -y
Running the benches requires root privileges because they will set up virtual interfaces. The script is designed to fully clean up when the benches are done, but run them at your own risk.
Make sure you're in the `crates/benches/` directory, build the binaries then run the script:
#### Native benches
Make sure you're in the `crates/benches/` directory, build the binaries, and then run the script:
```sh
cd binary
cargo build --release
sudo ./bench.sh
```
#### Browser benches
(Note, we recommend running browser benches inside a docker container (see docker.md) to avoid
facing incompatibility issues observed in the latest versions of Chrome.)
With a Chrome browser installed on your system, make sure you're in the `crates/benches/`
directory, build the wasm module, build the binaries, and then run the script:
```sh
cd browser/wasm
rustup run nightly wasm-pack build --release --target web
cd ../../binary
cargo build --release --features browser-bench
sudo ./bench.sh
```
## Metrics
After you run the benches you will see a `metrics.csv` file in the working directory. It will be owned by `root`, so you probably want to run

View File

@@ -7,7 +7,7 @@ if [ "$EUID" -ne 0 ]; then
fi
# Run the benchmark binary
../../target/release/bench
../../../target/release/bench
# Plot the results
../../target/release/plot metrics.csv
../../../target/release/plot metrics.csv

View File

@@ -35,5 +35,7 @@ upload-delay = 25
download = 250
download-delay = 25
upload-size = 1024
download-size = [1024, 4096, 16384, 65536]
# Setting download-size higher than 45000 will cause a `Maximum call stack size exceeded`
# error in the browser.
download-size = [1024, 4096, 16384, 45000]
defer-decryption = true

View File

@@ -0,0 +1,46 @@
FROM rust AS builder
WORKDIR /usr/src/tlsn
COPY . .
ARG BENCH_TYPE=native
RUN \
if [ "$BENCH_TYPE" = "browser" ]; then \
# ring's build script needs clang.
apt update && apt install -y clang; \
rustup install nightly; \
rustup component add rust-src --toolchain nightly; \
cargo install wasm-pack; \
cd crates/benches/browser/wasm; \
rustup run nightly wasm-pack build --release --target web; \
cd ../../binary; \
cargo build --release --features browser-bench; \
else \
cd crates/benches/binary; \
cargo build --release; \
fi
FROM debian:latest
ARG BENCH_TYPE=native
RUN apt update && apt upgrade -y && apt install -y --no-install-recommends \
iproute2 \
sudo
RUN \
if [ "$BENCH_TYPE" = "browser" ]; then \
# Using Chromium since Chrome for Linux is not available on ARM.
apt install -y chromium; \
fi
RUN apt clean && rm -rf /var/lib/apt/lists/*
COPY --from=builder ["/usr/src/tlsn/target/release/bench", "/usr/src/tlsn/target/release/prover", "/usr/src/tlsn/target/release/verifier", "/usr/src/tlsn/target/release/plot", "/usr/local/bin/"]
ENV PROVER_PATH="/usr/local/bin/prover"
ENV VERIFIER_PATH="/usr/local/bin/verifier"
VOLUME [ "/benches" ]
WORKDIR "/benches"
CMD ["/bin/bash", "-c", "bench && plot /benches/metrics.csv && cat /benches/metrics.csv"]

View File

@@ -1,19 +1,19 @@
use std::process::Command;
use std::{process::Command, thread, time::Duration};
use tlsn_benches::{clean_up, set_up};
fn main() {
let prover_path =
std::env::var("PROVER_PATH").unwrap_or_else(|_| "../../target/release/prover".to_string());
let prover_path = std::env::var("PROVER_PATH")
.unwrap_or_else(|_| "../../../target/release/prover".to_string());
let verifier_path = std::env::var("VERIFIER_PATH")
.unwrap_or_else(|_| "../../target/release/verifier".to_string());
.unwrap_or_else(|_| "../../../target/release/verifier".to_string());
if let Err(e) = set_up() {
println!("Error setting up: {}", e);
clean_up();
}
// Run prover and verifier binaries in parallel
// Run prover and verifier binaries in parallel.
let Ok(mut verifier) = Command::new("ip")
.arg("netns")
.arg("exec")
@@ -25,6 +25,10 @@ fn main() {
return clean_up();
};
// Allow the verifier some time to start listening before the prover attempts to
// connect.
thread::sleep(Duration::from_secs(1));
let Ok(mut prover) = Command::new("ip")
.arg("netns")
.arg("exec")
@@ -36,7 +40,7 @@ fn main() {
return clean_up();
};
// Wait for both to finish
// Wait for both to finish.
_ = prover.wait();
_ = verifier.wait();

View File

@@ -1,3 +1,5 @@
use tlsn_benches::metrics::Metrics;
use charming::{
component::{
Axis, DataView, Feature, Legend, Restore, SaveAsImage, Title, Toolbox, ToolboxDataZoom,
@@ -7,7 +9,7 @@ use charming::{
theme::Theme,
Chart, HtmlRenderer,
};
use tlsn_benches::metrics::Metrics;
use csv::Reader;
const THEME: Theme = Theme::Default;
@@ -16,12 +18,12 @@ fn main() -> Result<(), Box<dyn std::error::Error>> {
.nth(1)
.expect("Usage: plot <path_to_csv_file>");
let mut rdr = csv::Reader::from_path(csv_file)?;
let mut rdr = Reader::from_path(csv_file)?;
// Prepare data for plotting
// Prepare data for plotting.
let all_data: Vec<Metrics> = rdr
.deserialize::<Metrics>()
.collect::<Result<Vec<_>, _>>()?; // Attempt to collect all results, return an error if any fail
.collect::<Result<Vec<_>, _>>()?; // Attempt to collect all results, return an error if any fail.
let _chart = runtime_vs_latency(&all_data)?;
let _chart = runtime_vs_bandwidth(&all_data)?;
@@ -32,6 +34,11 @@ fn main() -> Result<(), Box<dyn std::error::Error>> {
fn runtime_vs_latency(all_data: &[Metrics]) -> Result<Chart, Box<dyn std::error::Error>> {
const TITLE: &str = "Runtime vs Latency";
let prover_kind: String = all_data
.first()
.map(|s| s.kind.clone().into())
.unwrap_or_default();
let data: Vec<Vec<f32>> = all_data
.iter()
.filter(|record| record.name == "latency")
@@ -43,7 +50,11 @@ fn runtime_vs_latency(all_data: &[Metrics]) -> Result<Chart, Box<dyn std::error:
// https://github.com/yuankunzhang/charming
let chart = Chart::new()
.title(Title::new().text(TITLE))
.title(
Title::new()
.text(TITLE)
.subtext(format!("{} Prover", prover_kind)),
)
.tooltip(Tooltip::new().trigger(Trigger::Axis))
.legend(Legend::new().orient(Orient::Vertical))
.toolbox(

View File

@@ -1,33 +1,33 @@
use std::{
fs::metadata,
io::Write,
sync::{
atomic::{AtomicU64, Ordering},
Arc,
},
time::Instant,
};
use anyhow::Context;
use futures::{AsyncReadExt, AsyncWriteExt};
use tls_core::verify::WebPkiVerifier;
use tlsn_benches::{
config::{BenchInstance, Config},
metrics::Metrics,
set_interface, PROVER_INTERFACE,
};
use tlsn_common::config::ProtocolConfig;
use tlsn_core::{transcript::Idx, CryptoProvider};
use tlsn_benches_library::{AsyncIo, ProverTrait};
use tlsn_server_fixture::bind;
use tlsn_server_fixture_certs::{CA_CERT_DER, SERVER_DOMAIN};
use tokio::io::{AsyncRead, AsyncWrite};
use anyhow::Context;
use csv::WriterBuilder;
use tokio_util::{
compat::TokioAsyncReadCompatExt,
io::{InspectReader, InspectWriter},
};
use tlsn_prover::{Prover, ProverConfig};
use tracing_subscriber::{fmt::format::FmtSpan, EnvFilter};
#[cfg(not(feature = "browser-bench"))]
use tlsn_benches::prover::NativeProver as BenchProver;
#[cfg(feature = "browser-bench")]
use tlsn_benches_browser_native::BrowserProver as BenchProver;
#[tokio::main]
async fn main() -> anyhow::Result<()> {
let config_path = std::env::var("CFG").unwrap_or_else(|_| "bench.toml".to_string());
@@ -54,7 +54,10 @@ async fn main() -> anyhow::Result<()> {
.context("failed to open metrics file")?;
{
let mut metric_wtr = csv::Writer::from_writer(&mut file);
let mut metric_wtr = WriterBuilder::new()
// If file is not empty, assume that the CSV header is already present in the file.
.has_headers(metadata("metrics.csv")?.len() == 0)
.from_writer(&mut file);
for bench in config.benches {
let instances = bench.flatten();
for instance in instances {
@@ -78,10 +81,7 @@ async fn main() -> anyhow::Result<()> {
Ok(())
}
async fn run_instance<S: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>(
instance: BenchInstance,
io: S,
) -> anyhow::Result<Metrics> {
async fn run_instance(instance: BenchInstance, io: impl AsyncIo) -> anyhow::Result<Metrics> {
let uploaded = Arc::new(AtomicU64::new(0));
let downloaded = Arc::new(AtomicU64::new(0));
let io = InspectWriter::new(
@@ -112,69 +112,23 @@ async fn run_instance<S: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>
set_interface(PROVER_INTERFACE, upload, 1, upload_delay)?;
let (client_conn, server_conn) = tokio::io::duplex(2 << 16);
let (client_conn, server_conn) = tokio::io::duplex(1 << 16);
tokio::spawn(bind(server_conn.compat()));
let start_time = Instant::now();
let provider = CryptoProvider {
cert: WebPkiVerifier::new(root_store(), None),
..Default::default()
};
let protocol_config = if defer_decryption {
ProtocolConfig::builder()
.max_sent_data(upload_size + 256)
.max_recv_data(download_size + 256)
.build()
.unwrap()
} else {
ProtocolConfig::builder()
.max_sent_data(upload_size + 256)
.max_recv_data(download_size + 256)
.max_recv_data_online(download_size + 256)
.build()
.unwrap()
};
let prover = Prover::new(
ProverConfig::builder()
.server_name(SERVER_DOMAIN)
.protocol_config(protocol_config)
.defer_decryption_from_start(defer_decryption)
.crypto_provider(provider)
.build()
.context("invalid prover config")?,
let mut prover = BenchProver::setup(
upload_size,
download_size,
defer_decryption,
Box::new(io),
Box::new(client_conn),
)
.setup(io.compat())
.await?;
let (mut mpc_tls_connection, prover_fut) = prover.connect(client_conn.compat()).await.unwrap();
let prover_task = tokio::spawn(prover_fut);
let request = format!(
"GET /bytes?size={} HTTP/1.1\r\nConnection: close\r\nData: {}\r\n\r\n",
download_size,
String::from_utf8(vec![0x42u8; upload_size]).unwrap(),
);
mpc_tls_connection.write_all(request.as_bytes()).await?;
mpc_tls_connection.close().await?;
let mut response = vec![];
mpc_tls_connection.read_to_end(&mut response).await?;
let mut prover = prover_task.await??.start_prove();
let (sent_len, recv_len) = prover.transcript().len();
prover
.prove_transcript(Idx::new(0..sent_len), Idx::new(0..recv_len))
.await?;
prover.finalize().await?;
let runtime = prover.run().await?;
Ok(Metrics {
name,
kind: prover.kind(),
upload,
upload_delay,
download,
@@ -182,16 +136,8 @@ async fn run_instance<S: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>
upload_size,
download_size,
defer_decryption,
runtime: Instant::now().duration_since(start_time).as_secs(),
runtime,
uploaded: uploaded.load(Ordering::SeqCst),
downloaded: downloaded.load(Ordering::SeqCst),
})
}
fn root_store() -> tls_core::anchors::RootCertStore {
let mut root_store = tls_core::anchors::RootCertStore::empty();
root_store
.add(&tls_core::key::Certificate(CA_CERT_DER.to_vec()))
.unwrap();
root_store
}

View File

@@ -1,8 +1,3 @@
use anyhow::Context;
use tokio::io::{AsyncRead, AsyncWrite};
use tokio_util::compat::TokioAsyncReadCompatExt;
use tracing_subscriber::{fmt::format::FmtSpan, EnvFilter};
use tls_core::verify::WebPkiVerifier;
use tlsn_benches::{
config::{BenchInstance, Config},
@@ -13,6 +8,11 @@ use tlsn_core::CryptoProvider;
use tlsn_server_fixture_certs::CA_CERT_DER;
use tlsn_verifier::{Verifier, VerifierConfig};
use anyhow::Context;
use tokio::io::{AsyncRead, AsyncWrite};
use tokio_util::compat::TokioAsyncReadCompatExt;
use tracing_subscriber::{fmt::format::FmtSpan, EnvFilter};
#[tokio::main]
async fn main() -> anyhow::Result<()> {
let config_path = std::env::var("CFG").unwrap_or_else(|_| "bench.toml".to_string());

View File

@@ -0,0 +1,13 @@
# Run the TLSN benches with Docker
In the root folder of this repository, run:
```
# Change to BENCH_TYPE=browser if you want benchmarks to run in the browser.
docker build -t tlsn-bench . -f ./crates/benches/binary/benches.Dockerfile --build-arg BENCH_TYPE=native
```
Next run the benches with:
```
docker run -it --privileged -v ./crates/benches/binary:/benches tlsn-bench
```
The `--privileged` parameter is required because this test bench needs permission to create networks with certain parameters

View File

@@ -1,7 +1,11 @@
pub mod config;
pub mod metrics;
pub mod prover;
use std::{io, process::Command};
use std::{
io,
process::{Command, Stdio},
};
pub const PROVER_NAMESPACE: &str = "prover-ns";
pub const PROVER_INTERFACE: &str = "prover-veth";
@@ -27,6 +31,10 @@ pub fn set_up() -> io::Result<()> {
set_device_up(PROVER_NAMESPACE, PROVER_INTERFACE)?;
set_device_up(VERIFIER_NAMESPACE, VERIFIER_INTERFACE)?;
// Bring up the loopback interface.
set_device_up(PROVER_NAMESPACE, "lo")?;
set_device_up(VERIFIER_NAMESPACE, "lo")?;
// Assign IPs
assign_ip_to_interface(PROVER_NAMESPACE, PROVER_INTERFACE, PROVER_SUBNET)?;
assign_ip_to_interface(VERIFIER_NAMESPACE, VERIFIER_INTERFACE, VERIFIER_SUBNET)?;
@@ -90,13 +98,20 @@ pub fn clean_up() {
/// * `delay` - The delay in ms.
pub fn set_interface(interface: &str, egress: usize, burst: usize, delay: usize) -> io::Result<()> {
// Clear rules
_ = Command::new("tc")
let output = Command::new("tc")
.arg("qdisc")
.arg("del")
.arg("dev")
.arg(interface)
.arg("root")
.status();
.stdout(Stdio::piped())
.output()?;
if output.stderr == "Error: Cannot delete qdisc with handle of zero.\n".as_bytes() {
// This error is informative, do not log it to stderr.
} else if !output.status.success() {
return Err(io::Error::other("Failed to clear rules"));
}
// Egress
Command::new("tc")

View File

@@ -1,8 +1,11 @@
use serde::{Deserialize, Serialize};
use tlsn_benches_library::ProverKind;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Metrics {
pub name: String,
/// The kind of the prover, either native or browser.
pub kind: ProverKind,
/// Upload bandwidth in Mbps.
pub upload: usize,
/// Upload latency in ms.

View File

@@ -0,0 +1,57 @@
use std::time::Instant;
use tlsn_benches_library::{run_prover, AsyncIo, ProverKind, ProverTrait};
use async_trait::async_trait;
pub struct NativeProver {
upload_size: usize,
download_size: usize,
defer_decryption: bool,
io: Option<Box<dyn AsyncIo>>,
client_conn: Option<Box<dyn AsyncIo>>,
}
#[async_trait]
impl ProverTrait for NativeProver {
async fn setup(
upload_size: usize,
download_size: usize,
defer_decryption: bool,
io: Box<dyn AsyncIo>,
client_conn: Box<dyn AsyncIo>,
) -> anyhow::Result<Self>
where
Self: Sized,
{
Ok(Self {
upload_size,
download_size,
defer_decryption,
io: Some(io),
client_conn: Some(client_conn),
})
}
async fn run(&mut self) -> anyhow::Result<u64> {
let io = std::mem::take(&mut self.io).unwrap();
let client_conn = std::mem::take(&mut self.client_conn).unwrap();
let start_time = Instant::now();
run_prover(
self.upload_size,
self.download_size,
self.defer_decryption,
io,
client_conn,
)
.await?;
Ok(Instant::now().duration_since(start_time).as_secs())
}
fn kind(&self) -> ProverKind {
ProverKind::Native
}
}

View File

@@ -0,0 +1,13 @@
[package]
edition = "2021"
name = "tlsn-benches-browser-core"
publish = false
version = "0.1.0"
[dependencies]
tlsn-benches-library = { workspace = true }
serio = { workspace = true }
serde = { workspace = true }
tokio-util= { workspace = true, features = ["compat", "io-util"] }

View File

@@ -0,0 +1,68 @@
//! Contains core types shared by the native and the wasm components.
use std::{
io::Error,
pin::Pin,
task::{Context, Poll},
};
use tlsn_benches_library::AsyncIo;
use serio::{
codec::{Bincode, Framed},
Sink, Stream,
};
use tokio_util::codec::LengthDelimitedCodec;
pub mod msg;
/// A sink/stream for serializable types with a framed transport.
pub struct FramedIo {
inner:
serio::Framed<tokio_util::codec::Framed<Box<dyn AsyncIo>, LengthDelimitedCodec>, Bincode>,
}
impl FramedIo {
/// Creates a new `FramedIo` from the given async `io`.
#[allow(clippy::default_constructed_unit_structs)]
pub fn new(io: Box<dyn AsyncIo>) -> Self {
let io = LengthDelimitedCodec::builder().new_framed(io);
Self {
inner: Framed::new(io, Bincode::default()),
}
}
}
impl Sink for FramedIo {
type Error = Error;
fn poll_ready(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Pin::new(&mut self.inner).poll_ready(cx)
}
fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Pin::new(&mut self.inner).poll_close(cx)
}
fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Pin::new(&mut self.inner).poll_flush(cx)
}
fn start_send<Item: serio::Serialize>(
mut self: Pin<&mut Self>,
item: Item,
) -> std::result::Result<(), Self::Error> {
Pin::new(&mut self.inner).start_send(item)
}
}
impl Stream for FramedIo {
type Error = Error;
fn poll_next<Item: serio::Deserialize>(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Item, Error>>> {
Pin::new(&mut self.inner).poll_next(cx)
}
}

View File

@@ -0,0 +1,17 @@
//! Messages exchanged by the native and the wasm components of the browser
//! prover.
use serde::{Deserialize, Serialize};
#[derive(Serialize, Deserialize, PartialEq)]
/// The config sent to the wasm component.
pub struct Config {
pub upload_size: usize,
pub download_size: usize,
pub defer_decryption: bool,
}
#[derive(Serialize, Deserialize, PartialEq)]
/// Sent by the wasm component when proving process is finished. Contains total
/// runtime in seconds.
pub struct Runtime(pub u64);

View File

@@ -0,0 +1,23 @@
[package]
edition = "2021"
name = "tlsn-benches-browser-native"
publish = false
version = "0.1.0"
[dependencies]
tlsn-benches-browser-core = { workspace = true }
tlsn-benches-library = { workspace = true }
serio = { workspace = true }
websocket-relay = { git = "https://github.com/tlsnotary/tlsn-utils", rev = "6ef048c" }
anyhow = { workspace = true }
async-trait = { workspace = true }
chromiumoxide = { version = "0.6.0" , features = ["tokio-runtime"] }
futures = { workspace = true }
rust-embed = "8.5.0"
tokio = { workspace = true, features = ["rt", "io-std"] }
tokio-tungstenite = { version = "0.23", features = ["url"] }
tracing = { workspace = true }
warp = "0.3.7"
warp-embed = "0.5.0"

View File

@@ -0,0 +1,331 @@
//! Contains the native component of the browser prover.
//!
//! Conceptually the browser prover consists of the native and the wasm
//! components. The native component is responsible for starting the browser,
//! loading the wasm component and driving it.
use std::{env, net::IpAddr};
use serio::{stream::IoStreamExt, SinkExt as _};
use tlsn_benches_browser_core::{
msg::{Config, Runtime},
FramedIo,
};
use tlsn_benches_library::{AsyncIo, ProverKind, ProverTrait};
use anyhow::{anyhow, Context, Result};
use async_trait::async_trait;
use chromiumoxide::{
cdp::{
browser_protocol::log::{EventEntryAdded, LogEntryLevel},
js_protocol::runtime::EventExceptionThrown,
},
Browser, BrowserConfig, Page,
};
use futures::{Future, FutureExt, StreamExt};
use rust_embed::RustEmbed;
use tokio::{io, io::AsyncWriteExt, net::TcpListener, task::JoinHandle};
use tracing::{debug, error, info};
use warp::Filter;
/// The IP on which the wasm component is served.
pub static DEFAULT_WASM_IP: &str = "127.0.0.1";
/// The IP of the websocket relay.
pub static DEFAULT_WS_IP: &str = "127.0.0.1";
/// The port on which the wasm component is served.
pub static DEFAULT_WASM_PORT: u16 = 9001;
/// The port of the websocket relay.
pub static DEFAULT_WS_PORT: u16 = 9002;
/// The port for the wasm component to communicate with the TLS server.
pub static DEFAULT_WASM_TO_SERVER_PORT: u16 = 9003;
/// The port for the wasm component to communicate with the verifier.
pub static DEFAULT_WASM_TO_VERIFIER_PORT: u16 = 9004;
/// The port for the wasm component to communicate with the native component.
pub static DEFAULT_WASM_TO_NATIVE_PORT: u16 = 9005;
// The `pkg` dir will be embedded into the binary at compile-time.
#[derive(RustEmbed)]
#[folder = "../wasm/pkg"]
struct Data;
/// The native component of the prover which runs in the browser.
pub struct BrowserProver {
/// Io for communication with the wasm component.
wasm_io: FramedIo,
/// The browser spawned by the prover.
browser: Browser,
/// A handle to the http server.
http_server: JoinHandle<()>,
/// Handles to the relays.
relays: Vec<JoinHandle<Result<(), anyhow::Error>>>,
}
#[async_trait]
impl ProverTrait for BrowserProver {
async fn setup(
upload_size: usize,
download_size: usize,
defer_decryption: bool,
verifier_io: Box<dyn AsyncIo>,
server_io: Box<dyn AsyncIo>,
) -> anyhow::Result<Self>
where
Self: Sized,
{
let wasm_port: u16 = env::var("WASM_PORT")
.map(|port| port.parse().expect("port should be valid integer"))
.unwrap_or(DEFAULT_WASM_PORT);
let ws_port: u16 = env::var("WS_PORT")
.map(|port| port.parse().expect("port should be valid integer"))
.unwrap_or(DEFAULT_WS_PORT);
let wasm_to_server_port: u16 = env::var("WASM_TO_SERVER_PORT")
.map(|port| port.parse().expect("port should be valid integer"))
.unwrap_or(DEFAULT_WASM_TO_SERVER_PORT);
let wasm_to_verifier_port: u16 = env::var("WASM_TO_VERIFIER_PORT")
.map(|port| port.parse().expect("port should be valid integer"))
.unwrap_or(DEFAULT_WASM_TO_VERIFIER_PORT);
let wasm_to_native_port: u16 = env::var("WASM_TO_NATIVE_PORT")
.map(|port| port.parse().expect("port should be valid integer"))
.unwrap_or(DEFAULT_WASM_TO_NATIVE_PORT);
let wasm_ip: IpAddr = env::var("WASM_IP")
.map(|addr| addr.parse().expect("should be valid IP address"))
.unwrap_or(IpAddr::V4(DEFAULT_WASM_IP.parse().unwrap()));
let ws_ip: IpAddr = env::var("WS_IP")
.map(|addr| addr.parse().expect("should be valid IP address"))
.unwrap_or(IpAddr::V4(DEFAULT_WS_IP.parse().unwrap()));
let mut relays = Vec::with_capacity(4);
relays.push(spawn_websocket_relay(ws_ip, ws_port).await?);
let http_server = spawn_http_server(wasm_ip, wasm_port)?;
// Relay data from the wasm component to the server.
relays.push(spawn_port_relay(wasm_to_server_port, server_io).await?);
// Relay data from the wasm component to the verifier.
relays.push(spawn_port_relay(wasm_to_verifier_port, verifier_io).await?);
// Create a framed connection to the wasm component.
let (wasm_left, wasm_right) = tokio::io::duplex(1 << 16);
relays.push(spawn_port_relay(wasm_to_native_port, Box::new(wasm_right)).await?);
let mut wasm_io = FramedIo::new(Box::new(wasm_left));
info!("spawning browser");
// Note that the browser must be spawned only when the WebSocket relay is
// running.
let browser = spawn_browser(
wasm_ip,
ws_ip,
wasm_port,
ws_port,
wasm_to_server_port,
wasm_to_verifier_port,
wasm_to_native_port,
)
.await?;
info!("sending config to the browser component");
wasm_io
.send(Config {
upload_size,
download_size,
defer_decryption,
})
.await?;
Ok(Self {
wasm_io,
browser,
http_server,
relays,
})
}
async fn run(&mut self) -> anyhow::Result<u64> {
let runtime: Runtime = self.wasm_io.expect_next().await.unwrap();
_ = self.clean_up().await?;
Ok(runtime.0)
}
fn kind(&self) -> ProverKind {
ProverKind::Browser
}
}
impl BrowserProver {
async fn clean_up(&mut self) -> anyhow::Result<()> {
// Kill the http server.
self.http_server.abort();
// Kill all relays.
let _ = self
.relays
.iter_mut()
.map(|task| task.abort())
.collect::<Vec<_>>();
// Close the browser.
self.browser.close().await?;
self.browser.wait().await?;
Ok(())
}
}
pub async fn spawn_websocket_relay(
ip: IpAddr,
port: u16,
) -> anyhow::Result<JoinHandle<Result<(), anyhow::Error>>> {
let listener = TcpListener::bind((ip, port)).await?;
Ok(tokio::spawn(websocket_relay::run(listener)))
}
/// Binds to the given localhost `port`, accepts a connection and relays data
/// between the connection and the `channel`.
pub async fn spawn_port_relay(
port: u16,
channel: Box<dyn AsyncIo>,
) -> anyhow::Result<JoinHandle<Result<(), anyhow::Error>>> {
let listener = tokio::net::TcpListener::bind(("127.0.0.1", port))
.await
.context("failed to bind to port")?;
let handle = tokio::spawn(async move {
let (tcp, _) = listener
.accept()
.await
.context("failed to accept a connection")
.unwrap();
relay_data(Box::new(tcp), channel).await
});
Ok(handle)
}
/// Relays data between two sources.
pub async fn relay_data(left: Box<dyn AsyncIo>, right: Box<dyn AsyncIo>) -> Result<()> {
let (mut left_read, mut left_write) = io::split(left);
let (mut right_read, mut right_write) = io::split(right);
let left_to_right = async {
io::copy(&mut left_read, &mut right_write).await?;
right_write.shutdown().await
};
let right_to_left = async {
io::copy(&mut right_read, &mut left_write).await?;
left_write.shutdown().await
};
tokio::try_join!(left_to_right, right_to_left)?;
Ok(())
}
/// Spawns the browser and starts the wasm component.
async fn spawn_browser(
wasm_ip: IpAddr,
ws_ip: IpAddr,
wasm_port: u16,
ws_port: u16,
wasm_to_server_port: u16,
wasm_to_verifier_port: u16,
wasm_to_native_port: u16,
) -> anyhow::Result<Browser> {
// Chrome requires --no-sandbox when running as root.
let config = BrowserConfig::builder()
.no_sandbox()
.incognito()
.build()
.map_err(|s| anyhow!(s))?;
debug!("launching chromedriver");
let (browser, mut handler) = Browser::launch(config).await?;
debug!("chromedriver started");
tokio::spawn(async move {
while let Some(res) = handler.next().await {
res.unwrap();
}
});
let page = browser
.new_page(&format!("http://{}:{}/index.html", wasm_ip, wasm_port))
.await?;
tokio::spawn(register_listeners(&page).await?);
page.wait_for_navigation().await?;
// Note that `format!` needs double {{ }} in order to escape them.
let _ = page
.evaluate_function(&format!(
r#"
async function() {{
await window.worker.init();
// Do not `await` run() or else it will block the browser.
window.worker.run("{}", {}, {}, {}, {});
}}
"#,
ws_ip, ws_port, wasm_to_server_port, wasm_to_verifier_port, wasm_to_native_port
))
.await?;
Ok(browser)
}
pub fn spawn_http_server(ip: IpAddr, port: u16) -> anyhow::Result<JoinHandle<()>> {
let handle = tokio::spawn(async move {
// Serve embedded files with additional headers.
let data_serve = warp_embed::embed(&Data);
let data_serve_with_headers = data_serve
.map(|reply| {
warp::reply::with_header(reply, "Cross-Origin-Opener-Policy", "same-origin")
})
.map(|reply| {
warp::reply::with_header(reply, "Cross-Origin-Embedder-Policy", "require-corp")
});
warp::serve(data_serve_with_headers).run((ip, port)).await;
});
Ok(handle)
}
async fn register_listeners(page: &Page) -> Result<impl Future<Output = ()>> {
let mut logs = page.event_listener::<EventEntryAdded>().await?.fuse();
let mut exceptions = page.event_listener::<EventExceptionThrown>().await?.fuse();
Ok(futures::future::join(
async move {
while let Some(event) = logs.next().await {
let entry = &event.entry;
match entry.level {
LogEntryLevel::Error => {
error!("{:?}", entry);
}
_ => {
debug!("{:?}: {}", entry.timestamp, entry.text);
}
}
}
},
async move {
while let Some(event) = exceptions.next().await {
error!("{:?}", event);
}
},
)
.map(|_| ()))
}

View File

@@ -0,0 +1,11 @@
[build]
target = "wasm32-unknown-unknown"
[target.wasm32-unknown-unknown]
rustflags = [
"-C",
"target-feature=+atomics,+bulk-memory,+mutable-globals",
]
[unstable]
build-std = ["panic_abort", "std"]

View File

@@ -0,0 +1,38 @@
[package]
edition = "2021"
name = "tlsn-benches-browser-wasm"
publish = false
version = "0.1.0"
[lib]
crate-type = ["cdylib", "rlib"]
[dependencies]
tlsn-benches-browser-core = { workspace = true }
tlsn-benches-library = { workspace = true }
tlsn-wasm = { path = "../../../wasm" }
serio = { workspace = true }
anyhow = { workspace = true }
console_error_panic_hook = { version = "0.1.7" }
parking_lot = { version = "0.12", features = ["nightly"] }
serde = { workspace = true, features = ["derive"] }
tracing = { workspace = true }
tracing-subscriber = { workspace = true, features = ["time", "env-filter"] }
tracing-web = "0.1.2"
tsify-next = { version = "0.5", default-features = false, features = ["js"] }
wasm-bindgen = "0.2.87"
wasm-bindgen-futures = "0.4.37"
web-sys = { version = "0.3.4", features = ["console"] }
web-time = { workspace = true }
# Use the patched ws_stream_wasm to fix the issue https://github.com/najamelan/ws_stream_wasm/issues/12#issuecomment-1711902958
ws_stream_wasm = { version = "0.7.4", git = "https://github.com/tlsnotary/ws_stream_wasm", rev = "2ed12aad9f0236e5321f577672f309920b2aef51", features = ["tokio_io"]}
[target.'cfg(target_arch = "wasm32")'.dependencies]
wasm-bindgen-rayon = { version = "1", features = ["no-bundler"] }
[package.metadata.wasm-pack.profile.release]
# Note: these wasm-pack options should match those in crates/wasm/Cargo.toml
opt-level = "z"
wasm-opt = true

View File

@@ -0,0 +1,346 @@
/**
* @license
* Copyright 2019 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
const proxyMarker = Symbol("Comlink.proxy");
const createEndpoint = Symbol("Comlink.endpoint");
const releaseProxy = Symbol("Comlink.releaseProxy");
const finalizer = Symbol("Comlink.finalizer");
const throwMarker = Symbol("Comlink.thrown");
const isObject = (val) => (typeof val === "object" && val !== null) || typeof val === "function";
/**
* Internal transfer handle to handle objects marked to proxy.
*/
const proxyTransferHandler = {
canHandle: (val) => isObject(val) && val[proxyMarker],
serialize(obj) {
const { port1, port2 } = new MessageChannel();
expose(obj, port1);
return [port2, [port2]];
},
deserialize(port) {
port.start();
return wrap(port);
},
};
/**
* Internal transfer handler to handle thrown exceptions.
*/
const throwTransferHandler = {
canHandle: (value) => isObject(value) && throwMarker in value,
serialize({ value }) {
let serialized;
if (value instanceof Error) {
serialized = {
isError: true,
value: {
message: value.message,
name: value.name,
stack: value.stack,
},
};
}
else {
serialized = { isError: false, value };
}
return [serialized, []];
},
deserialize(serialized) {
if (serialized.isError) {
throw Object.assign(new Error(serialized.value.message), serialized.value);
}
throw serialized.value;
},
};
/**
* Allows customizing the serialization of certain values.
*/
const transferHandlers = new Map([
["proxy", proxyTransferHandler],
["throw", throwTransferHandler],
]);
function isAllowedOrigin(allowedOrigins, origin) {
for (const allowedOrigin of allowedOrigins) {
if (origin === allowedOrigin || allowedOrigin === "*") {
return true;
}
if (allowedOrigin instanceof RegExp && allowedOrigin.test(origin)) {
return true;
}
}
return false;
}
function expose(obj, ep = globalThis, allowedOrigins = ["*"]) {
ep.addEventListener("message", function callback(ev) {
if (!ev || !ev.data) {
return;
}
if (!isAllowedOrigin(allowedOrigins, ev.origin)) {
console.warn(`Invalid origin '${ev.origin}' for comlink proxy`);
return;
}
const { id, type, path } = Object.assign({ path: [] }, ev.data);
const argumentList = (ev.data.argumentList || []).map(fromWireValue);
let returnValue;
try {
const parent = path.slice(0, -1).reduce((obj, prop) => obj[prop], obj);
const rawValue = path.reduce((obj, prop) => obj[prop], obj);
switch (type) {
case "GET" /* MessageType.GET */:
{
returnValue = rawValue;
}
break;
case "SET" /* MessageType.SET */:
{
parent[path.slice(-1)[0]] = fromWireValue(ev.data.value);
returnValue = true;
}
break;
case "APPLY" /* MessageType.APPLY */:
{
returnValue = rawValue.apply(parent, argumentList);
}
break;
case "CONSTRUCT" /* MessageType.CONSTRUCT */:
{
const value = new rawValue(...argumentList);
returnValue = proxy(value);
}
break;
case "ENDPOINT" /* MessageType.ENDPOINT */:
{
const { port1, port2 } = new MessageChannel();
expose(obj, port2);
returnValue = transfer(port1, [port1]);
}
break;
case "RELEASE" /* MessageType.RELEASE */:
{
returnValue = undefined;
}
break;
default:
return;
}
}
catch (value) {
returnValue = { value, [throwMarker]: 0 };
}
Promise.resolve(returnValue)
.catch((value) => {
return { value, [throwMarker]: 0 };
})
.then((returnValue) => {
const [wireValue, transferables] = toWireValue(returnValue);
ep.postMessage(Object.assign(Object.assign({}, wireValue), { id }), transferables);
if (type === "RELEASE" /* MessageType.RELEASE */) {
// detach and deactive after sending release response above.
ep.removeEventListener("message", callback);
closeEndPoint(ep);
if (finalizer in obj && typeof obj[finalizer] === "function") {
obj[finalizer]();
}
}
})
.catch((error) => {
// Send Serialization Error To Caller
const [wireValue, transferables] = toWireValue({
value: new TypeError("Unserializable return value"),
[throwMarker]: 0,
});
ep.postMessage(Object.assign(Object.assign({}, wireValue), { id }), transferables);
});
});
if (ep.start) {
ep.start();
}
}
function isMessagePort(endpoint) {
return endpoint.constructor.name === "MessagePort";
}
function closeEndPoint(endpoint) {
if (isMessagePort(endpoint))
endpoint.close();
}
function wrap(ep, target) {
return createProxy(ep, [], target);
}
function throwIfProxyReleased(isReleased) {
if (isReleased) {
throw new Error("Proxy has been released and is not useable");
}
}
function releaseEndpoint(ep) {
return requestResponseMessage(ep, {
type: "RELEASE" /* MessageType.RELEASE */,
}).then(() => {
closeEndPoint(ep);
});
}
const proxyCounter = new WeakMap();
const proxyFinalizers = "FinalizationRegistry" in globalThis &&
new FinalizationRegistry((ep) => {
const newCount = (proxyCounter.get(ep) || 0) - 1;
proxyCounter.set(ep, newCount);
if (newCount === 0) {
releaseEndpoint(ep);
}
});
function registerProxy(proxy, ep) {
const newCount = (proxyCounter.get(ep) || 0) + 1;
proxyCounter.set(ep, newCount);
if (proxyFinalizers) {
proxyFinalizers.register(proxy, ep, proxy);
}
}
function unregisterProxy(proxy) {
if (proxyFinalizers) {
proxyFinalizers.unregister(proxy);
}
}
function createProxy(ep, path = [], target = function () { }) {
let isProxyReleased = false;
const proxy = new Proxy(target, {
get(_target, prop) {
throwIfProxyReleased(isProxyReleased);
if (prop === releaseProxy) {
return () => {
unregisterProxy(proxy);
releaseEndpoint(ep);
isProxyReleased = true;
};
}
if (prop === "then") {
if (path.length === 0) {
return { then: () => proxy };
}
const r = requestResponseMessage(ep, {
type: "GET" /* MessageType.GET */,
path: path.map((p) => p.toString()),
}).then(fromWireValue);
return r.then.bind(r);
}
return createProxy(ep, [...path, prop]);
},
set(_target, prop, rawValue) {
throwIfProxyReleased(isProxyReleased);
// FIXME: ES6 Proxy Handler `set` methods are supposed to return a
// boolean. To show good will, we return true asynchronously ¯\_(ツ)_/¯
const [value, transferables] = toWireValue(rawValue);
return requestResponseMessage(ep, {
type: "SET" /* MessageType.SET */,
path: [...path, prop].map((p) => p.toString()),
value,
}, transferables).then(fromWireValue);
},
apply(_target, _thisArg, rawArgumentList) {
throwIfProxyReleased(isProxyReleased);
const last = path[path.length - 1];
if (last === createEndpoint) {
return requestResponseMessage(ep, {
type: "ENDPOINT" /* MessageType.ENDPOINT */,
}).then(fromWireValue);
}
// We just pretend that `bind()` didnt happen.
if (last === "bind") {
return createProxy(ep, path.slice(0, -1));
}
const [argumentList, transferables] = processArguments(rawArgumentList);
return requestResponseMessage(ep, {
type: "APPLY" /* MessageType.APPLY */,
path: path.map((p) => p.toString()),
argumentList,
}, transferables).then(fromWireValue);
},
construct(_target, rawArgumentList) {
throwIfProxyReleased(isProxyReleased);
const [argumentList, transferables] = processArguments(rawArgumentList);
return requestResponseMessage(ep, {
type: "CONSTRUCT" /* MessageType.CONSTRUCT */,
path: path.map((p) => p.toString()),
argumentList,
}, transferables).then(fromWireValue);
},
});
registerProxy(proxy, ep);
return proxy;
}
function myFlat(arr) {
return Array.prototype.concat.apply([], arr);
}
function processArguments(argumentList) {
const processed = argumentList.map(toWireValue);
return [processed.map((v) => v[0]), myFlat(processed.map((v) => v[1]))];
}
const transferCache = new WeakMap();
function transfer(obj, transfers) {
transferCache.set(obj, transfers);
return obj;
}
function proxy(obj) {
return Object.assign(obj, { [proxyMarker]: true });
}
function windowEndpoint(w, context = globalThis, targetOrigin = "*") {
return {
postMessage: (msg, transferables) => w.postMessage(msg, targetOrigin, transferables),
addEventListener: context.addEventListener.bind(context),
removeEventListener: context.removeEventListener.bind(context),
};
}
function toWireValue(value) {
for (const [name, handler] of transferHandlers) {
if (handler.canHandle(value)) {
const [serializedValue, transferables] = handler.serialize(value);
return [
{
type: "HANDLER" /* WireValueType.HANDLER */,
name,
value: serializedValue,
},
transferables,
];
}
}
return [
{
type: "RAW" /* WireValueType.RAW */,
value,
},
transferCache.get(value) || [],
];
}
function fromWireValue(value) {
switch (value.type) {
case "HANDLER" /* WireValueType.HANDLER */:
return transferHandlers.get(value.name).deserialize(value.value);
case "RAW" /* WireValueType.RAW */:
return value.value;
}
}
function requestResponseMessage(ep, msg, transfers) {
return new Promise((resolve) => {
const id = generateUUID();
ep.addEventListener("message", function l(ev) {
if (!ev.data || !ev.data.id || ev.data.id !== id) {
return;
}
ep.removeEventListener("message", l);
resolve(ev.data);
});
if (ep.start) {
ep.start();
}
ep.postMessage(Object.assign({ id }, msg), transfers);
});
}
function generateUUID() {
return new Array(4)
.fill(0)
.map(() => Math.floor(Math.random() * Number.MAX_SAFE_INTEGER).toString(16))
.join("-");
}
export { createEndpoint, expose, finalizer, proxy, proxyMarker, releaseProxy, transfer, transferHandlers, windowEndpoint, wrap };
//# sourceMappingURL=comlink.mjs.map

View File

@@ -0,0 +1,7 @@
<!DOCTYPE html>
<head>
</head>
<body>
<script src="index.js" type="module"></script>
</body>
</html>

View File

@@ -0,0 +1,7 @@
import * as Comlink from "./comlink.mjs";
async function init() {
const worker = Comlink.wrap(new Worker("worker.js", { type: "module" }));
window.worker = worker;
}
init();

View File

@@ -0,0 +1,45 @@
import * as Comlink from "./comlink.mjs";
import init, { wasm_main, initThreadPool, init_logging } from './tlsn_benches_browser_wasm.js';
class Worker {
async init() {
try {
await init();
// Tracing may interfere with the benchmark results. We should enable it only for debugging.
// init_logging({
// level: 'Debug',
// crate_filters: undefined,
// span_events: undefined,
// });
await initThreadPool(navigator.hardwareConcurrency);
} catch (e) {
console.error(e);
throw e;
}
}
async run(
ws_ip,
ws_port,
wasm_to_server_port,
wasm_to_verifier_port,
wasm_to_native_port
) {
try {
await wasm_main(
ws_ip,
ws_port,
wasm_to_server_port,
wasm_to_verifier_port,
wasm_to_native_port);
} catch (e) {
console.error(e);
throw e;
}
}
}
const worker = new Worker();
Comlink.expose(worker);

View File

@@ -0,0 +1,2 @@
[toolchain]
channel = "nightly"

View File

@@ -0,0 +1,103 @@
//! Contains the wasm component of the browser prover.
//!
//! Conceptually the browser prover consists of the native and the wasm
//! components.
use serio::{stream::IoStreamExt, SinkExt as _};
use tlsn_benches_browser_core::{
msg::{Config, Runtime},
FramedIo,
};
use tlsn_benches_library::run_prover;
pub use tlsn_wasm::init_logging;
use anyhow::Result;
use tracing::info;
use wasm_bindgen::prelude::*;
#[cfg(target_arch = "wasm32")]
pub use wasm_bindgen_rayon::init_thread_pool;
use web_time::Instant;
use ws_stream_wasm::WsMeta;
#[wasm_bindgen]
pub async fn wasm_main(
ws_ip: String,
ws_port: u16,
wasm_to_server_port: u16,
wasm_to_verifier_port: u16,
wasm_to_native_port: u16,
) -> Result<(), JsError> {
// Wrapping main() since wasm_bindgen doesn't support anyhow.
main(
ws_ip,
ws_port,
wasm_to_server_port,
wasm_to_verifier_port,
wasm_to_native_port,
)
.await
.map_err(|err| JsError::new(&err.to_string()))
}
pub async fn main(
ws_ip: String,
ws_port: u16,
wasm_to_server_port: u16,
wasm_to_verifier_port: u16,
wasm_to_native_port: u16,
) -> Result<()> {
info!("starting main");
// Connect to the server.
let (_, server_io_ws) = WsMeta::connect(
&format!(
"ws://{}:{}/tcp?addr=localhost%3A{}",
ws_ip, ws_port, wasm_to_server_port
),
None,
)
.await?;
let server_io = server_io_ws.into_io();
// Connect to the verifier.
let (_, verifier_io_ws) = WsMeta::connect(
&format!(
"ws://{}:{}/tcp?addr=localhost%3A{}",
ws_ip, ws_port, wasm_to_verifier_port
),
None,
)
.await?;
let verifier_io = verifier_io_ws.into_io();
// Connect to the native component of the browser prover.
let (_, native_io_ws) = WsMeta::connect(
&format!(
"ws://{}:{}/tcp?addr=localhost%3A{}",
ws_ip, ws_port, wasm_to_native_port
),
None,
)
.await?;
let mut native_io = FramedIo::new(Box::new(native_io_ws.into_io()));
info!("expecting config from the native component");
let cfg: Config = native_io.expect_next().await?;
let start_time = Instant::now();
run_prover(
cfg.upload_size,
cfg.download_size,
cfg.defer_decryption,
Box::new(verifier_io),
Box::new(server_io),
)
.await?;
native_io
.send(Runtime(start_time.elapsed().as_secs()))
.await?;
Ok(())
}

View File

@@ -1,12 +0,0 @@
# Run the TLSN benches with Docker
In the root folder of this repository, run:
```
docker build -t tlsn-bench . -f ./crates/benches/benches.Dockerfile
```
Next run the benches with:
```
docker run -it --privileged -v ./crates/benches/:/benches tlsn-bench
```
The `--privileged` parameter is required because this test bench needs permission to create networks with certain parameters

View File

@@ -0,0 +1,24 @@
[package]
edition = "2021"
name = "tlsn-benches-library"
publish = false
version = "0.1.0"
[dependencies]
tlsn-common = { workspace = true }
tlsn-core = { workspace = true }
tlsn-prover = { workspace = true }
tlsn-server-fixture-certs = { workspace = true }
tlsn-tls-core = { workspace = true }
anyhow = "1.0"
async-trait = "0.1.81"
futures = { version = "0.3", features = ["compat"] }
serde = { workspace = true }
tokio = {version = "1", default-features = false, features = ["rt", "macros"]}
tokio-tungstenite-wasm = "0.3.1"
tokio-util= {version = "0.7", features = ["compat", "io"]}
toml = "0.8.11"

View File

@@ -0,0 +1,131 @@
use tls_core::{anchors::RootCertStore, verify::WebPkiVerifier};
use tlsn_common::config::ProtocolConfig;
use tlsn_core::{transcript::Idx, CryptoProvider};
use tlsn_prover::{Prover, ProverConfig};
use tlsn_server_fixture_certs::{CA_CERT_DER, SERVER_DOMAIN};
use anyhow::Context;
use async_trait::async_trait;
use futures::{future::join, AsyncReadExt as _, AsyncWriteExt as _};
use serde::{Deserialize, Serialize};
use tokio::io::{AsyncRead, AsyncWrite};
use tokio_util::compat::TokioAsyncReadCompatExt;
pub trait AsyncIo: AsyncRead + AsyncWrite + Send + Sync + Unpin + 'static {}
impl<T> AsyncIo for T where T: AsyncRead + AsyncWrite + Send + Sync + Unpin + 'static {}
#[async_trait]
pub trait ProverTrait {
/// Sets up the prover preparing it to be run. Returns a prover ready to be
/// run.
async fn setup(
upload_size: usize,
download_size: usize,
defer_decryption: bool,
verifier_io: Box<dyn AsyncIo>,
server_io: Box<dyn AsyncIo>,
) -> anyhow::Result<Self>
where
Self: Sized;
/// Runs the prover. Returns the total run time in seconds.
async fn run(&mut self) -> anyhow::Result<u64>;
/// Returns the kind of the prover.
fn kind(&self) -> ProverKind;
}
#[derive(Debug, Clone, Serialize, Deserialize)]
/// The kind of a prover.
pub enum ProverKind {
/// The prover compiled into a native binary.
Native,
/// The prover compiled into a wasm binary.
Browser,
}
impl From<ProverKind> for String {
fn from(value: ProverKind) -> Self {
match value {
ProverKind::Native => "Native".to_string(),
ProverKind::Browser => "Browser".to_string(),
}
}
}
pub async fn run_prover(
upload_size: usize,
download_size: usize,
defer_decryption: bool,
io: Box<dyn AsyncIo>,
client_conn: Box<dyn AsyncIo>,
) -> anyhow::Result<()> {
let provider = CryptoProvider {
cert: WebPkiVerifier::new(root_store(), None),
..Default::default()
};
let protocol_config = if defer_decryption {
ProtocolConfig::builder()
.max_sent_data(upload_size + 256)
.max_recv_data(download_size + 256)
.build()
.unwrap()
} else {
ProtocolConfig::builder()
.max_sent_data(upload_size + 256)
.max_recv_data(download_size + 256)
.max_recv_data_online(download_size + 256)
.build()
.unwrap()
};
let prover = Prover::new(
ProverConfig::builder()
.server_name(SERVER_DOMAIN)
.protocol_config(protocol_config)
.defer_decryption_from_start(defer_decryption)
.crypto_provider(provider)
.build()
.context("invalid prover config")?,
)
.setup(io.compat())
.await?;
let (mut mpc_tls_connection, prover_fut) = prover.connect(client_conn.compat()).await?;
let tls_fut = async move {
let request = format!(
"GET /bytes?size={} HTTP/1.1\r\nConnection: close\r\nData: {}\r\n\r\n",
download_size,
String::from_utf8(vec![0x42u8; upload_size]).unwrap(),
);
mpc_tls_connection.write_all(request.as_bytes()).await?;
mpc_tls_connection.close().await?;
let mut response = vec![];
mpc_tls_connection.read_to_end(&mut response).await?;
Ok::<(), anyhow::Error>(())
};
let (prover_task, _) = join(prover_fut, tls_fut).await;
let mut prover = prover_task?.start_prove();
let (sent_len, recv_len) = prover.transcript().len();
prover
.prove_transcript(Idx::new(0..sent_len), Idx::new(0..recv_len))
.await?;
prover.finalize().await?;
Ok(())
}
fn root_store() -> RootCertStore {
let mut root_store = RootCertStore::empty();
root_store
.add(&tls_core::key::Certificate(CA_CERT_DER.to_vec()))
.unwrap();
root_store
}

View File

@@ -12,6 +12,7 @@ pub mod types;
pub mod verifier;
use log::LoggingConfig;
pub use log::LoggingLevel;
use tlsn_core::{transcript::Direction, CryptoProvider};
use tracing::error;
use tracing_subscriber::{