Compare commits

..

44 Commits

Author SHA1 Message Date
sinu.eth
1897f0d1e6 refactor: remove encoding commitment (#1071)
* refactor: remove encoding commitment

* remove unused field

* clippy
2026-01-08 07:55:03 -08:00
dan
2101285f7f chore(bench): dont bench large payloads (#1074) 2026-01-05 09:55:35 +00:00
dan
98210e4059 chore(bench): added headed mode for debugging (#1073) 2026-01-05 09:38:31 +00:00
dan
9dfac850d5 chore(harness): improve UX with progress bar, separate sweep benches (#1068) 2025-12-23 13:59:14 +00:00
Hendrik Eeckhaut
b41d678829 build: update Rust to version 1.92.0 2025-12-16 09:36:11 +01:00
sinu.eth
1ebefa27d8 perf(core): fold instead of flatten (#1064) 2025-12-11 06:41:26 -08:00
dan
4fe5c1defd feat(harness): add reveal_all config (#1063) 2025-12-09 12:01:39 +00:00
dan
0e8e547300 chore: adapt for rangeset 0.4 (#1058) 2025-12-09 11:36:13 +00:00
dan
22cc88907a chore: bump mpz (#1057) 2025-12-04 10:27:43 +00:00
Hendrik Eeckhaut
cec4756e0e ci: set GITHUB_TOKEN env 2025-11-28 14:33:19 +01:00
Hendrik Eeckhaut
0919e1f2b3 clippy: allow deprecated aead::generic_array 2025-11-28 14:33:19 +01:00
Hendrik Eeckhaut
43b9f57e1f build: update Rust to version 1.91.1 2025-11-26 16:53:08 +01:00
dan
c51331d63d test: use ideal vm for testing (#1049) 2025-11-07 12:56:09 +00:00
dan
3905d9351c chore: clean up deps (#1048) 2025-11-07 10:36:41 +00:00
dan
f8a67bc8e7 feat(core): support proving keccak256 commitments (#1046) 2025-11-07 09:18:44 +00:00
dan
952a7011bf feat(cipher): use AES pre/post key schedule circuits (#1042) 2025-11-07 09:08:08 +00:00
Ram
0673818e4e chore: fix links to key exchange doc page (#1045) 2025-11-04 23:07:58 +01:00
dan
a5749d81f1 fix(attestation): verify sig during validation (#1037) 2025-10-30 07:59:57 +00:00
sinu.eth
f2e119bb66 refactor: move and rewrite configuration (#1034)
* refactor: move and rewrite configuration

* fix wasm
2025-10-27 11:47:42 -07:00
Hendrik Eeckhaut
271ac3771e Fix example (#1033)
* fix: provide encoder secret to attestation
* Add missing entry in example's README file
2025-10-24 10:33:32 +02:00
Benjamin Martinez Picech
f69dd7a239 refactor(tlsn-core): redeclaration of content type into core (#1026)
* redeclaration of content type into core

* fix compilation error

* comment removal

* Lint and format fixes

* fix wasm build

* Unknown content type

* format fix
2025-10-23 15:47:53 +02:00
sinu.eth
79f5160cae feat(tlsn): insecure mode (#1031) 2025-10-22 10:18:11 -07:00
sinu.eth
5fef2af698 fix(example): close prover (#1025) 2025-10-17 10:39:08 -07:00
sinu.eth
5b2083e211 refactor(tlsn): invert control of config validation (#1023)
* refactor(tlsn): invert control of config validation

* clippy
2025-10-17 10:19:02 -07:00
sinu.eth
d26bb02d2e chore: update to alpha.14-pre (#1022) 2025-10-15 11:11:43 -07:00
sinu
a766b64184 ci: add manual trigger to main update 2025-10-15 10:11:01 -07:00
sinu.eth
0885d40ddf chore: release v0.1.0-alpha.13 (#1019) 2025-10-15 09:38:52 -07:00
sinu.eth
610411aae4 ci: relax clippy (#1020) 2025-10-15 09:27:55 -07:00
sinu.eth
37df1baed7 feat(core): proof config builder reveal all methods (#1017)
Co-authored-by: Hendrik Eeckhaut <hendrik@eeckhaut.org>
2025-10-14 08:56:28 -07:00
dan
aeaebc5c60 chore(harness): expose debug flag in dockerfile (#1018) 2025-10-14 11:19:30 +00:00
sinu.eth
2e7e3db11d fix: fully identify signature algorithm (#1015) 2025-10-13 09:57:34 +02:00
sinu.eth
0a68837d0a fix: empty auth ranges (#1016) 2025-10-10 15:44:38 -07:00
sinu.eth
0ec2392716 chore(tlsn): add transcript auth tests (#1014)
* chore(tlsn): add transcript auth tests

* clippy
2025-10-10 14:10:17 -07:00
sinu.eth
f99fce5b5a fix(tlsn): do not implicitly reveal encoder secret (#1011) 2025-10-10 08:39:24 -07:00
sinu.eth
6b9f44e7e5 feat(tlsn): disclose encryption key (#1010)
Co-authored-by: th4s <th4s@metavoid.xyz>
2025-10-10 08:32:50 -07:00
dan
bf1cf2302a fix(harness): add harness debug feature (#1012) 2025-10-10 14:20:42 +00:00
sinu.eth
2884be17e0 feat(tlsn): partial plaintext auth (#1006)
Co-authored-by: th4s <th4s@metavoid.xyz>
2025-10-09 11:22:23 -07:00
sinu.eth
df8d79c152 fix(wasm): explicitly enable link args for wasm (#1007) 2025-10-09 08:34:11 -07:00
yuroitaki
82d509266b feat: add blake3 transcript commitment (#1000)
* Add blake3.

* Update mpz version.

---------

Co-authored-by: yuroitaki <>
2025-10-08 10:13:07 +08:00
dan
d5ad768e7c chore: improve error msg (#1003) 2025-10-03 05:43:58 +00:00
Hendrik Eeckhaut
d25fb320d4 build: update Rust to version 1.90.0 2025-09-24 09:32:56 +02:00
Hendrik Eeckhaut
0539268da7 Interactive noir example (#981)
demo for interactive zk age proof

Co-authored-by: th4s <th4s@metavoid.xyz>
2025-09-19 16:55:10 +02:00
dan
427b2896b5 allow root_store to be None (#995) 2025-09-19 15:15:04 +02:00
Hendrik Eeckhaut
89d1e594d1 privacy-scaling-explorations -> privacy-ethereum (#993) 2025-09-11 16:48:01 +02:00
147 changed files with 8157 additions and 8337 deletions

View File

@@ -18,10 +18,11 @@ env:
# We need a higher number of parallel rayon tasks than the default (which is 4) # We need a higher number of parallel rayon tasks than the default (which is 4)
# in order to prevent a deadlock, c.f. # in order to prevent a deadlock, c.f.
# - https://github.com/tlsnotary/tlsn/issues/548 # - https://github.com/tlsnotary/tlsn/issues/548
# - https://github.com/privacy-scaling-explorations/mpz/issues/178 # - https://github.com/privacy-ethereum/mpz/issues/178
# 32 seems to be big enough for the foreseeable future # 32 seems to be big enough for the foreseeable future
RAYON_NUM_THREADS: 32 RAYON_NUM_THREADS: 32
RUST_VERSION: 1.89.0 RUST_VERSION: 1.92.0
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
jobs: jobs:
clippy: clippy:
@@ -32,7 +33,7 @@ jobs:
uses: actions/checkout@v4 uses: actions/checkout@v4
- name: Install rust toolchain - name: Install rust toolchain
uses: dtolnay/rust-toolchain@stable uses: dtolnay/rust-toolchain@master
with: with:
toolchain: ${{ env.RUST_VERSION }} toolchain: ${{ env.RUST_VERSION }}
components: clippy components: clippy
@@ -41,7 +42,7 @@ jobs:
uses: Swatinem/rust-cache@v2.7.7 uses: Swatinem/rust-cache@v2.7.7
- name: Clippy - name: Clippy
run: cargo clippy --keep-going --all-features --all-targets --locked -- -D warnings run: cargo clippy --keep-going --all-features --all-targets --locked
fmt: fmt:
name: Check formatting name: Check formatting

View File

@@ -6,7 +6,7 @@ on:
tag: tag:
description: 'Tag to publish to NPM' description: 'Tag to publish to NPM'
required: true required: true
default: 'v0.1.0-alpha.13-pre' default: 'v0.1.0-alpha.14-pre'
jobs: jobs:
release: release:

View File

@@ -23,7 +23,6 @@ jobs:
- name: "rustdoc" - name: "rustdoc"
run: crates/wasm/build-docs.sh run: crates/wasm/build-docs.sh
- name: Deploy - name: Deploy
uses: peaceiris/actions-gh-pages@v3 uses: peaceiris/actions-gh-pages@v3
if: ${{ github.ref == 'refs/heads/dev' }} if: ${{ github.ref == 'refs/heads/dev' }}

View File

@@ -1,6 +1,7 @@
name: Fast-forward main branch to published release tag name: Fast-forward main branch to published release tag
on: on:
workflow_dispatch:
release: release:
types: [published] types: [published]

3113
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -66,25 +66,27 @@ tlsn-harness-runner = { path = "crates/harness/runner" }
tlsn-wasm = { path = "crates/wasm" } tlsn-wasm = { path = "crates/wasm" }
tlsn = { path = "crates/tlsn" } tlsn = { path = "crates/tlsn" }
mpz-circuits = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "3d90b6c" } mpz-circuits = { git = "https://github.com/privacy-ethereum/mpz", rev = "9c343f8" }
mpz-memory-core = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "3d90b6c" } mpz-circuits-data = { git = "https://github.com/privacy-ethereum/mpz", rev = "9c343f8" }
mpz-common = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "3d90b6c" } mpz-memory-core = { git = "https://github.com/privacy-ethereum/mpz", rev = "9c343f8" }
mpz-core = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "3d90b6c" } mpz-common = { git = "https://github.com/privacy-ethereum/mpz", rev = "9c343f8" }
mpz-vm-core = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "3d90b6c" } mpz-core = { git = "https://github.com/privacy-ethereum/mpz", rev = "9c343f8" }
mpz-garble = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "3d90b6c" } mpz-vm-core = { git = "https://github.com/privacy-ethereum/mpz", rev = "9c343f8" }
mpz-garble-core = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "3d90b6c" } mpz-garble = { git = "https://github.com/privacy-ethereum/mpz", rev = "9c343f8" }
mpz-ole = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "3d90b6c" } mpz-garble-core = { git = "https://github.com/privacy-ethereum/mpz", rev = "9c343f8" }
mpz-ot = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "3d90b6c" } mpz-ole = { git = "https://github.com/privacy-ethereum/mpz", rev = "9c343f8" }
mpz-share-conversion = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "3d90b6c" } mpz-ot = { git = "https://github.com/privacy-ethereum/mpz", rev = "9c343f8" }
mpz-fields = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "3d90b6c" } mpz-share-conversion = { git = "https://github.com/privacy-ethereum/mpz", rev = "9c343f8" }
mpz-zk = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "3d90b6c" } mpz-fields = { git = "https://github.com/privacy-ethereum/mpz", rev = "9c343f8" }
mpz-hash = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "3d90b6c" } mpz-zk = { git = "https://github.com/privacy-ethereum/mpz", rev = "9c343f8" }
mpz-hash = { git = "https://github.com/privacy-ethereum/mpz", rev = "9c343f8" }
mpz-ideal-vm = { git = "https://github.com/privacy-ethereum/mpz", rev = "9c343f8" }
rangeset = { version = "0.2" } rangeset = { version = "0.4" }
serio = { version = "0.2" } serio = { version = "0.2" }
spansy = { git = "https://github.com/tlsnotary/tlsn-utils", rev = "6168663" } spansy = { git = "https://github.com/tlsnotary/tlsn-utils", rev = "6f1a934" }
uid-mux = { version = "0.2" } uid-mux = { version = "0.2" }
websocket-relay = { git = "https://github.com/tlsnotary/tlsn-utils", rev = "6168663" } websocket-relay = { git = "https://github.com/tlsnotary/tlsn-utils", rev = "6f1a934" }
aead = { version = "0.4" } aead = { version = "0.4" }
aes = { version = "0.8" } aes = { version = "0.8" }
@@ -100,7 +102,7 @@ bytes = { version = "1.4" }
cfg-if = { version = "1" } cfg-if = { version = "1" }
chromiumoxide = { version = "0.7" } chromiumoxide = { version = "0.7" }
chrono = { version = "0.4" } chrono = { version = "0.4" }
cipher-crypto = { package = "cipher", version = "0.4" } cipher = { version = "0.4" }
clap = { version = "4.5" } clap = { version = "4.5" }
criterion = { version = "0.5" } criterion = { version = "0.5" }
ctr = { version = "0.9" } ctr = { version = "0.9" }
@@ -110,7 +112,7 @@ elliptic-curve = { version = "0.13" }
enum-try-as-inner = { version = "0.1" } enum-try-as-inner = { version = "0.1" }
env_logger = { version = "0.10" } env_logger = { version = "0.10" }
futures = { version = "0.3" } futures = { version = "0.3" }
futures-rustls = { version = "0.26" } futures-rustls = { version = "0.25" }
generic-array = { version = "0.14" } generic-array = { version = "0.14" }
ghash = { version = "0.5" } ghash = { version = "0.5" }
hex = { version = "0.4" } hex = { version = "0.4" }
@@ -123,7 +125,6 @@ inventory = { version = "0.3" }
itybity = { version = "0.2" } itybity = { version = "0.2" }
js-sys = { version = "0.3" } js-sys = { version = "0.3" }
k256 = { version = "0.13" } k256 = { version = "0.13" }
lipsum = { version = "0.9" }
log = { version = "0.4" } log = { version = "0.4" }
once_cell = { version = "1.19" } once_cell = { version = "1.19" }
opaque-debug = { version = "0.3" } opaque-debug = { version = "0.3" }

View File

@@ -1,6 +1,6 @@
[package] [package]
name = "tlsn-attestation" name = "tlsn-attestation"
version = "0.1.0-alpha.13-pre" version = "0.1.0-alpha.14-pre"
edition = "2024" edition = "2024"
[features] [features]
@@ -9,7 +9,7 @@ fixtures = ["tlsn-core/fixtures", "dep:tlsn-data-fixtures"]
[dependencies] [dependencies]
tlsn-tls-core = { workspace = true } tlsn-tls-core = { workspace = true }
tlsn-core = { workspace = true } tlsn-core = { workspace = true, features = ["mozilla-certs"] }
tlsn-data-fixtures = { workspace = true, optional = true } tlsn-data-fixtures = { workspace = true, optional = true }
bcs = { workspace = true } bcs = { workspace = true }
@@ -23,10 +23,11 @@ thiserror = { workspace = true }
tiny-keccak = { workspace = true, features = ["keccak"] } tiny-keccak = { workspace = true, features = ["keccak"] }
[dev-dependencies] [dev-dependencies]
alloy-primitives = { version = "0.8.22", default-features = false } alloy-primitives = { version = "1.3.1", default-features = false }
alloy-signer = { version = "0.12", default-features = false } alloy-signer = { version = "1.0", default-features = false }
alloy-signer-local = { version = "0.12", default-features = false } alloy-signer-local = { version = "1.0", default-features = false }
rand06-compat = { workspace = true } rand06-compat = { workspace = true }
rangeset = { workspace = true }
rstest = { workspace = true } rstest = { workspace = true }
tlsn-core = { workspace = true, features = ["fixtures"] } tlsn-core = { workspace = true, features = ["fixtures"] }
tlsn-data-fixtures = { workspace = true } tlsn-data-fixtures = { workspace = true }

View File

@@ -243,8 +243,7 @@ mod test {
use rstest::{fixture, rstest}; use rstest::{fixture, rstest};
use tlsn_core::{ use tlsn_core::{
connection::{CertBinding, CertBindingV1_2}, connection::{CertBinding, CertBindingV1_2},
fixtures::{ConnectionFixture, encoding_provider}, fixtures::ConnectionFixture,
hash::Blake3,
transcript::Transcript, transcript::Transcript,
}; };
use tlsn_data_fixtures::http::{request::GET_WITH_HEADER, response::OK_JSON}; use tlsn_data_fixtures::http::{request::GET_WITH_HEADER, response::OK_JSON};
@@ -275,13 +274,7 @@ mod test {
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON); let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
let connection = ConnectionFixture::tlsnotary(transcript.length()); let connection = ConnectionFixture::tlsnotary(transcript.length());
let RequestFixture { request, .. } = request_fixture( let RequestFixture { request, .. } = request_fixture(transcript, connection, Vec::new());
transcript,
encoding_provider(GET_WITH_HEADER, OK_JSON),
connection,
Blake3::default(),
Vec::new(),
);
let attestation_config = AttestationConfig::builder() let attestation_config = AttestationConfig::builder()
.supported_signature_algs([SignatureAlgId::SECP256R1]) .supported_signature_algs([SignatureAlgId::SECP256R1])
@@ -300,13 +293,7 @@ mod test {
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON); let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
let connection = ConnectionFixture::tlsnotary(transcript.length()); let connection = ConnectionFixture::tlsnotary(transcript.length());
let RequestFixture { request, .. } = request_fixture( let RequestFixture { request, .. } = request_fixture(transcript, connection, Vec::new());
transcript,
encoding_provider(GET_WITH_HEADER, OK_JSON),
connection,
Blake3::default(),
Vec::new(),
);
let attestation_config = AttestationConfig::builder() let attestation_config = AttestationConfig::builder()
.supported_signature_algs([SignatureAlgId::SECP256K1]) .supported_signature_algs([SignatureAlgId::SECP256K1])
@@ -326,13 +313,7 @@ mod test {
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON); let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
let connection = ConnectionFixture::tlsnotary(transcript.length()); let connection = ConnectionFixture::tlsnotary(transcript.length());
let RequestFixture { request, .. } = request_fixture( let RequestFixture { request, .. } = request_fixture(transcript, connection, Vec::new());
transcript,
encoding_provider(GET_WITH_HEADER, OK_JSON),
connection,
Blake3::default(),
Vec::new(),
);
let attestation_builder = Attestation::builder(attestation_config) let attestation_builder = Attestation::builder(attestation_config)
.accept_request(request) .accept_request(request)
@@ -353,13 +334,8 @@ mod test {
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON); let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
let connection = ConnectionFixture::tlsnotary(transcript.length()); let connection = ConnectionFixture::tlsnotary(transcript.length());
let RequestFixture { request, .. } = request_fixture( let RequestFixture { request, .. } =
transcript, request_fixture(transcript, connection.clone(), Vec::new());
encoding_provider(GET_WITH_HEADER, OK_JSON),
connection.clone(),
Blake3::default(),
Vec::new(),
);
let mut attestation_builder = Attestation::builder(attestation_config) let mut attestation_builder = Attestation::builder(attestation_config)
.accept_request(request) .accept_request(request)
@@ -383,13 +359,8 @@ mod test {
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON); let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
let connection = ConnectionFixture::tlsnotary(transcript.length()); let connection = ConnectionFixture::tlsnotary(transcript.length());
let RequestFixture { request, .. } = request_fixture( let RequestFixture { request, .. } =
transcript, request_fixture(transcript, connection.clone(), Vec::new());
encoding_provider(GET_WITH_HEADER, OK_JSON),
connection.clone(),
Blake3::default(),
Vec::new(),
);
let mut attestation_builder = Attestation::builder(attestation_config) let mut attestation_builder = Attestation::builder(attestation_config)
.accept_request(request) .accept_request(request)
@@ -422,9 +393,7 @@ mod test {
let RequestFixture { request, .. } = request_fixture( let RequestFixture { request, .. } = request_fixture(
transcript, transcript,
encoding_provider(GET_WITH_HEADER, OK_JSON),
connection.clone(), connection.clone(),
Blake3::default(),
vec![Extension { vec![Extension {
id: b"foo".to_vec(), id: b"foo".to_vec(),
value: b"bar".to_vec(), value: b"bar".to_vec(),
@@ -451,9 +420,7 @@ mod test {
let RequestFixture { request, .. } = request_fixture( let RequestFixture { request, .. } = request_fixture(
transcript, transcript,
encoding_provider(GET_WITH_HEADER, OK_JSON),
connection.clone(), connection.clone(),
Blake3::default(),
vec![Extension { vec![Extension {
id: b"foo".to_vec(), id: b"foo".to_vec(),
value: b"bar".to_vec(), value: b"bar".to_vec(),

View File

@@ -1,34 +1,29 @@
//! Attestation fixtures. //! Attestation fixtures.
use tlsn_core::{ use tlsn_core::{
connection::{CertBinding, CertBindingV1_2}, connection::{CertBinding, CertBindingV1_2},
fixtures::ConnectionFixture, fixtures::ConnectionFixture,
hash::HashAlgorithm, transcript::{Transcript, TranscriptCommitConfigBuilder, TranscriptCommitment},
transcript::{
Transcript, TranscriptCommitConfigBuilder, TranscriptCommitment,
encoding::{EncodingProvider, EncodingTree},
},
}; };
use crate::{ use crate::{
Attestation, AttestationConfig, CryptoProvider, Extension, Attestation, AttestationConfig, CryptoProvider, Extension,
request::{Request, RequestConfig}, request::{Request, RequestConfig},
signing::SignatureAlgId, signing::{
KeyAlgId, SignatureAlgId, SignatureVerifier, SignatureVerifierProvider, Signer,
SignerProvider,
},
}; };
/// A Request fixture used for testing. /// A Request fixture used for testing.
#[allow(missing_docs)] #[allow(missing_docs)]
pub struct RequestFixture { pub struct RequestFixture {
pub encoding_tree: EncodingTree,
pub request: Request, pub request: Request,
} }
/// Returns a request fixture for testing. /// Returns a request fixture for testing.
pub fn request_fixture( pub fn request_fixture(
transcript: Transcript, transcript: Transcript,
encodings_provider: impl EncodingProvider,
connection: ConnectionFixture, connection: ConnectionFixture,
encoding_hasher: impl HashAlgorithm,
extensions: Vec<Extension>, extensions: Vec<Extension>,
) -> RequestFixture { ) -> RequestFixture {
let provider = CryptoProvider::default(); let provider = CryptoProvider::default();
@@ -48,16 +43,10 @@ pub fn request_fixture(
.unwrap(); .unwrap();
let transcripts_commitment_config = transcript_commitment_builder.build().unwrap(); let transcripts_commitment_config = transcript_commitment_builder.build().unwrap();
// Prover constructs encoding tree.
let encoding_tree = EncodingTree::new(
&encoding_hasher,
transcripts_commitment_config.iter_encoding(),
&encodings_provider,
)
.unwrap();
let mut builder = RequestConfig::builder(); let mut builder = RequestConfig::builder();
builder.transcript_commit(transcripts_commitment_config);
for extension in extensions { for extension in extensions {
builder.extension(extension); builder.extension(extension);
} }
@@ -72,10 +61,7 @@ pub fn request_fixture(
let (request, _) = request_builder.build(&provider).unwrap(); let (request, _) = request_builder.build(&provider).unwrap();
RequestFixture { RequestFixture { request }
encoding_tree,
request,
}
} }
/// Returns an attestation fixture for testing. /// Returns an attestation fixture for testing.
@@ -102,7 +88,8 @@ pub fn attestation_fixture(
let mut provider = CryptoProvider::default(); let mut provider = CryptoProvider::default();
match signature_alg { match signature_alg {
SignatureAlgId::SECP256K1 => provider.signer.set_secp256k1(&[42u8; 32]).unwrap(), SignatureAlgId::SECP256K1 => provider.signer.set_secp256k1(&[42u8; 32]).unwrap(),
SignatureAlgId::SECP256R1 => provider.signer.set_secp256r1(&[42u8; 32]).unwrap(), SignatureAlgId::SECP256K1ETH => provider.signer.set_secp256k1eth(&[43u8; 32]).unwrap(),
SignatureAlgId::SECP256R1 => provider.signer.set_secp256r1(&[44u8; 32]).unwrap(),
_ => unimplemented!(), _ => unimplemented!(),
}; };
@@ -122,3 +109,68 @@ pub fn attestation_fixture(
attestation_builder.build(&provider).unwrap() attestation_builder.build(&provider).unwrap()
} }
/// Returns a crypto provider which supports only a custom signature alg.
pub fn custom_provider_fixture() -> CryptoProvider {
const CUSTOM_SIG_ALG_ID: SignatureAlgId = SignatureAlgId::new(128);
// A dummy signer.
struct DummySigner {}
impl Signer for DummySigner {
fn alg_id(&self) -> SignatureAlgId {
CUSTOM_SIG_ALG_ID
}
fn sign(
&self,
msg: &[u8],
) -> Result<crate::signing::Signature, crate::signing::SignatureError> {
Ok(crate::signing::Signature {
alg: CUSTOM_SIG_ALG_ID,
data: msg.to_vec(),
})
}
fn verifying_key(&self) -> crate::signing::VerifyingKey {
crate::signing::VerifyingKey {
alg: KeyAlgId::new(128),
data: vec![1, 2, 3, 4],
}
}
}
// A dummy verifier.
struct DummyVerifier {}
impl SignatureVerifier for DummyVerifier {
fn alg_id(&self) -> SignatureAlgId {
CUSTOM_SIG_ALG_ID
}
fn verify(
&self,
_key: &crate::signing::VerifyingKey,
msg: &[u8],
sig: &[u8],
) -> Result<(), crate::signing::SignatureError> {
if msg == sig {
Ok(())
} else {
Err(crate::signing::SignatureError::from_str(
"invalid signature",
))
}
}
}
let mut provider = CryptoProvider::default();
let mut signer_provider = SignerProvider::default();
signer_provider.set_signer(Box::new(DummySigner {}));
provider.signer = signer_provider;
let mut verifier_provider = SignatureVerifierProvider::empty();
verifier_provider.set_verifier(Box::new(DummyVerifier {}));
provider.signature = verifier_provider;
provider
}

View File

@@ -79,8 +79,6 @@
//! //!
//! // Specify all the transcript commitments we want to make. //! // Specify all the transcript commitments we want to make.
//! builder //! builder
//! // Use BLAKE3 for encoding commitments.
//! .encoding_hash_alg(HashAlgId::BLAKE3)
//! // Commit to all sent data. //! // Commit to all sent data.
//! .commit_sent(&(0..sent_len))? //! .commit_sent(&(0..sent_len))?
//! // Commit to the first 10 bytes of sent data. //! // Commit to the first 10 bytes of sent data.
@@ -129,7 +127,7 @@
//! //!
//! ```no_run //! ```no_run
//! # use tlsn_attestation::{Attestation, CryptoProvider, Secrets, presentation::Presentation}; //! # use tlsn_attestation::{Attestation, CryptoProvider, Secrets, presentation::Presentation};
//! # use tlsn_core::transcript::{TranscriptCommitmentKind, Direction}; //! # use tlsn_core::transcript::Direction;
//! # fn main() -> Result<(), Box<dyn std::error::Error>> { //! # fn main() -> Result<(), Box<dyn std::error::Error>> {
//! # let attestation: Attestation = unimplemented!(); //! # let attestation: Attestation = unimplemented!();
//! # let secrets: Secrets = unimplemented!(); //! # let secrets: Secrets = unimplemented!();
@@ -140,8 +138,6 @@
//! let mut builder = secrets.transcript_proof_builder(); //! let mut builder = secrets.transcript_proof_builder();
//! //!
//! builder //! builder
//! // Use transcript encoding commitments.
//! .commitment_kinds(&[TranscriptCommitmentKind::Encoding])
//! // Disclose the first 10 bytes of the sent data. //! // Disclose the first 10 bytes of the sent data.
//! .reveal(&(0..10), Direction::Sent)? //! .reveal(&(0..10), Direction::Sent)?
//! // Disclose all of the received data. //! // Disclose all of the received data.
@@ -301,8 +297,6 @@ pub enum FieldKind {
ServerEphemKey = 0x02, ServerEphemKey = 0x02,
/// Server identity commitment. /// Server identity commitment.
ServerIdentityCommitment = 0x03, ServerIdentityCommitment = 0x03,
/// Encoding commitment.
EncodingCommitment = 0x04,
/// Plaintext hash commitment. /// Plaintext hash commitment.
PlaintextHash = 0x05, PlaintextHash = 0x05,
} }

View File

@@ -20,7 +20,10 @@ use serde::{Deserialize, Serialize};
use tlsn_core::hash::HashAlgId; use tlsn_core::hash::HashAlgId;
use crate::{Attestation, Extension, connection::ServerCertCommitment, signing::SignatureAlgId}; use crate::{
Attestation, CryptoProvider, Extension, connection::ServerCertCommitment,
serialize::CanonicalSerialize, signing::SignatureAlgId,
};
pub use builder::{RequestBuilder, RequestBuilderError}; pub use builder::{RequestBuilder, RequestBuilderError};
pub use config::{RequestConfig, RequestConfigBuilder, RequestConfigBuilderError}; pub use config::{RequestConfig, RequestConfigBuilder, RequestConfigBuilderError};
@@ -41,51 +44,107 @@ impl Request {
} }
/// Validates the content of the attestation against this request. /// Validates the content of the attestation against this request.
pub fn validate(&self, attestation: &Attestation) -> Result<(), InconsistentAttestation> { pub fn validate(
&self,
attestation: &Attestation,
provider: &CryptoProvider,
) -> Result<(), AttestationValidationError> {
if attestation.signature.alg != self.signature_alg { if attestation.signature.alg != self.signature_alg {
return Err(InconsistentAttestation(format!( return Err(AttestationValidationError::inconsistent(format!(
"signature algorithm: expected {:?}, got {:?}", "signature algorithm: expected {:?}, got {:?}",
self.signature_alg, attestation.signature.alg self.signature_alg, attestation.signature.alg
))); )));
} }
if attestation.header.root.alg != self.hash_alg { if attestation.header.root.alg != self.hash_alg {
return Err(InconsistentAttestation(format!( return Err(AttestationValidationError::inconsistent(format!(
"hash algorithm: expected {:?}, got {:?}", "hash algorithm: expected {:?}, got {:?}",
self.hash_alg, attestation.header.root.alg self.hash_alg, attestation.header.root.alg
))); )));
} }
if attestation.body.cert_commitment() != &self.server_cert_commitment { if attestation.body.cert_commitment() != &self.server_cert_commitment {
return Err(InconsistentAttestation( return Err(AttestationValidationError::inconsistent(
"server certificate commitment does not match".to_string(), "server certificate commitment does not match",
)); ));
} }
// TODO: improve the O(M*N) complexity of this check. // TODO: improve the O(M*N) complexity of this check.
for extension in &self.extensions { for extension in &self.extensions {
if !attestation.body.extensions().any(|e| e == extension) { if !attestation.body.extensions().any(|e| e == extension) {
return Err(InconsistentAttestation( return Err(AttestationValidationError::inconsistent(
"extension is missing from the attestation".to_string(), "extension is missing from the attestation",
)); ));
} }
} }
let verifier = provider
.signature
.get(&attestation.signature.alg)
.map_err(|_| {
AttestationValidationError::provider(format!(
"provider not configured for signature algorithm id {:?}",
attestation.signature.alg,
))
})?;
verifier
.verify(
&attestation.body.verifying_key.data,
&CanonicalSerialize::serialize(&attestation.header),
&attestation.signature.data,
)
.map_err(|_| {
AttestationValidationError::inconsistent("failed to verify the signature")
})?;
Ok(()) Ok(())
} }
} }
/// Error for [`Request::validate`]. /// Error for [`Request::validate`].
#[derive(Debug, thiserror::Error)] #[derive(Debug, thiserror::Error)]
#[error("inconsistent attestation: {0}")] #[error("attestation validation error: {kind}: {message}")]
pub struct InconsistentAttestation(String); pub struct AttestationValidationError {
kind: ErrorKind,
message: String,
}
impl AttestationValidationError {
fn inconsistent(msg: impl Into<String>) -> Self {
Self {
kind: ErrorKind::Inconsistent,
message: msg.into(),
}
}
fn provider(msg: impl Into<String>) -> Self {
Self {
kind: ErrorKind::Provider,
message: msg.into(),
}
}
}
#[derive(Debug)]
enum ErrorKind {
Inconsistent,
Provider,
}
impl std::fmt::Display for ErrorKind {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
ErrorKind::Inconsistent => write!(f, "inconsistent"),
ErrorKind::Provider => write!(f, "provider"),
}
}
}
#[cfg(test)] #[cfg(test)]
mod test { mod test {
use tlsn_core::{ use tlsn_core::{
connection::TranscriptLength, connection::TranscriptLength, fixtures::ConnectionFixture, hash::HashAlgId,
fixtures::{ConnectionFixture, encoding_provider},
hash::{Blake3, HashAlgId},
transcript::Transcript, transcript::Transcript,
}; };
use tlsn_data_fixtures::http::{request::GET_WITH_HEADER, response::OK_JSON}; use tlsn_data_fixtures::http::{request::GET_WITH_HEADER, response::OK_JSON};
@@ -93,7 +152,8 @@ mod test {
use crate::{ use crate::{
CryptoProvider, CryptoProvider,
connection::ServerCertOpening, connection::ServerCertOpening,
fixtures::{RequestFixture, attestation_fixture, request_fixture}, fixtures::{RequestFixture, attestation_fixture, custom_provider_fixture, request_fixture},
request::{AttestationValidationError, ErrorKind},
signing::SignatureAlgId, signing::SignatureAlgId,
}; };
@@ -102,18 +162,15 @@ mod test {
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON); let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
let connection = ConnectionFixture::tlsnotary(transcript.length()); let connection = ConnectionFixture::tlsnotary(transcript.length());
let RequestFixture { request, .. } = request_fixture( let RequestFixture { request, .. } =
transcript, request_fixture(transcript, connection.clone(), Vec::new());
encoding_provider(GET_WITH_HEADER, OK_JSON),
connection.clone(),
Blake3::default(),
Vec::new(),
);
let attestation = let attestation =
attestation_fixture(request.clone(), connection, SignatureAlgId::SECP256K1, &[]); attestation_fixture(request.clone(), connection, SignatureAlgId::SECP256K1, &[]);
assert!(request.validate(&attestation).is_ok()) let provider = CryptoProvider::default();
assert!(request.validate(&attestation, &provider).is_ok())
} }
#[test] #[test]
@@ -121,20 +178,17 @@ mod test {
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON); let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
let connection = ConnectionFixture::tlsnotary(transcript.length()); let connection = ConnectionFixture::tlsnotary(transcript.length());
let RequestFixture { mut request, .. } = request_fixture( let RequestFixture { mut request, .. } =
transcript, request_fixture(transcript, connection.clone(), Vec::new());
encoding_provider(GET_WITH_HEADER, OK_JSON),
connection.clone(),
Blake3::default(),
Vec::new(),
);
let attestation = let attestation =
attestation_fixture(request.clone(), connection, SignatureAlgId::SECP256K1, &[]); attestation_fixture(request.clone(), connection, SignatureAlgId::SECP256K1, &[]);
request.signature_alg = SignatureAlgId::SECP256R1; request.signature_alg = SignatureAlgId::SECP256R1;
let res = request.validate(&attestation); let provider = CryptoProvider::default();
let res = request.validate(&attestation, &provider);
assert!(res.is_err()); assert!(res.is_err());
} }
@@ -143,20 +197,17 @@ mod test {
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON); let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
let connection = ConnectionFixture::tlsnotary(transcript.length()); let connection = ConnectionFixture::tlsnotary(transcript.length());
let RequestFixture { mut request, .. } = request_fixture( let RequestFixture { mut request, .. } =
transcript, request_fixture(transcript, connection.clone(), Vec::new());
encoding_provider(GET_WITH_HEADER, OK_JSON),
connection.clone(),
Blake3::default(),
Vec::new(),
);
let attestation = let attestation =
attestation_fixture(request.clone(), connection, SignatureAlgId::SECP256K1, &[]); attestation_fixture(request.clone(), connection, SignatureAlgId::SECP256K1, &[]);
request.hash_alg = HashAlgId::SHA256; request.hash_alg = HashAlgId::SHA256;
let res = request.validate(&attestation); let provider = CryptoProvider::default();
let res = request.validate(&attestation, &provider);
assert!(res.is_err()) assert!(res.is_err())
} }
@@ -165,13 +216,8 @@ mod test {
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON); let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
let connection = ConnectionFixture::tlsnotary(transcript.length()); let connection = ConnectionFixture::tlsnotary(transcript.length());
let RequestFixture { mut request, .. } = request_fixture( let RequestFixture { mut request, .. } =
transcript, request_fixture(transcript, connection.clone(), Vec::new());
encoding_provider(GET_WITH_HEADER, OK_JSON),
connection.clone(),
Blake3::default(),
Vec::new(),
);
let attestation = let attestation =
attestation_fixture(request.clone(), connection, SignatureAlgId::SECP256K1, &[]); attestation_fixture(request.clone(), connection, SignatureAlgId::SECP256K1, &[]);
@@ -184,11 +230,52 @@ mod test {
}); });
let opening = ServerCertOpening::new(server_cert_data); let opening = ServerCertOpening::new(server_cert_data);
let crypto_provider = CryptoProvider::default(); let provider = CryptoProvider::default();
request.server_cert_commitment = request.server_cert_commitment =
opening.commit(crypto_provider.hash.get(&HashAlgId::BLAKE3).unwrap()); opening.commit(provider.hash.get(&HashAlgId::BLAKE3).unwrap());
let res = request.validate(&attestation); let res = request.validate(&attestation, &provider);
assert!(res.is_err()) assert!(res.is_err())
} }
#[test]
fn test_wrong_sig() {
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
let connection = ConnectionFixture::tlsnotary(transcript.length());
let RequestFixture { request, .. } =
request_fixture(transcript, connection.clone(), Vec::new());
let mut attestation =
attestation_fixture(request.clone(), connection, SignatureAlgId::SECP256K1, &[]);
// Corrupt the signature.
attestation.signature.data[1] = attestation.signature.data[1].wrapping_add(1);
let provider = CryptoProvider::default();
assert!(request.validate(&attestation, &provider).is_err())
}
#[test]
fn test_wrong_provider() {
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
let connection = ConnectionFixture::tlsnotary(transcript.length());
let RequestFixture { request, .. } =
request_fixture(transcript, connection.clone(), Vec::new());
let attestation =
attestation_fixture(request.clone(), connection, SignatureAlgId::SECP256K1, &[]);
let provider = custom_provider_fixture();
assert!(matches!(
request.validate(&attestation, &provider),
Err(AttestationValidationError {
kind: ErrorKind::Provider,
..
})
))
}
} }

View File

@@ -49,5 +49,4 @@ impl_domain_separator!(tlsn_core::connection::ConnectionInfo);
impl_domain_separator!(tlsn_core::connection::CertBinding); impl_domain_separator!(tlsn_core::connection::CertBinding);
impl_domain_separator!(tlsn_core::transcript::TranscriptCommitment); impl_domain_separator!(tlsn_core::transcript::TranscriptCommitment);
impl_domain_separator!(tlsn_core::transcript::TranscriptSecret); impl_domain_separator!(tlsn_core::transcript::TranscriptSecret);
impl_domain_separator!(tlsn_core::transcript::encoding::EncodingCommitment);
impl_domain_separator!(tlsn_core::transcript::hash::PlaintextHash); impl_domain_separator!(tlsn_core::transcript::hash::PlaintextHash);

View File

@@ -202,6 +202,14 @@ impl SignatureVerifierProvider {
.map(|s| &**s) .map(|s| &**s)
.ok_or(UnknownSignatureAlgId(*alg)) .ok_or(UnknownSignatureAlgId(*alg))
} }
/// Returns am empty provider.
#[cfg(any(test, feature = "fixtures"))]
pub fn empty() -> Self {
Self {
verifiers: HashMap::default(),
}
}
} }
/// Signature verifier. /// Signature verifier.
@@ -229,6 +237,14 @@ impl_domain_separator!(VerifyingKey);
#[error("signature verification failed: {0}")] #[error("signature verification failed: {0}")]
pub struct SignatureError(String); pub struct SignatureError(String);
impl SignatureError {
/// Creates a new error with the given message.
#[allow(clippy::should_implement_trait)]
pub fn from_str(msg: &str) -> Self {
Self(msg.to_string())
}
}
/// A signature. /// A signature.
#[derive(Debug, Clone, Deserialize, Serialize)] #[derive(Debug, Clone, Deserialize, Serialize)]
pub struct Signature { pub struct Signature {

View File

@@ -1,3 +1,5 @@
use rand::{Rng, SeedableRng, rngs::StdRng};
use rangeset::set::RangeSet;
use tlsn_attestation::{ use tlsn_attestation::{
Attestation, AttestationConfig, CryptoProvider, Attestation, AttestationConfig, CryptoProvider,
presentation::PresentationOutput, presentation::PresentationOutput,
@@ -6,12 +8,11 @@ use tlsn_attestation::{
}; };
use tlsn_core::{ use tlsn_core::{
connection::{CertBinding, CertBindingV1_2}, connection::{CertBinding, CertBindingV1_2},
fixtures::{self, ConnectionFixture, encoder_secret}, fixtures::ConnectionFixture,
hash::Blake3, hash::{Blake3, Blinder, HashAlgId},
transcript::{ transcript::{
Direction, Transcript, TranscriptCommitConfigBuilder, TranscriptCommitment, Direction, Transcript, TranscriptCommitment, TranscriptSecret,
TranscriptSecret, hash::{PlaintextHash, PlaintextHashSecret, hash_plaintext},
encoding::{EncodingCommitment, EncodingTree},
}, },
}; };
use tlsn_data_fixtures::http::{request::GET_WITH_HEADER, response::OK_JSON}; use tlsn_data_fixtures::http::{request::GET_WITH_HEADER, response::OK_JSON};
@@ -19,6 +20,7 @@ use tlsn_data_fixtures::http::{request::GET_WITH_HEADER, response::OK_JSON};
/// Tests that the attestation protocol and verification work end-to-end /// Tests that the attestation protocol and verification work end-to-end
#[test] #[test]
fn test_api() { fn test_api() {
let mut rng = StdRng::seed_from_u64(0);
let mut provider = CryptoProvider::default(); let mut provider = CryptoProvider::default();
// Configure signer for Notary // Configure signer for Notary
@@ -26,8 +28,6 @@ fn test_api() {
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON); let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
let (sent_len, recv_len) = transcript.len(); let (sent_len, recv_len) = transcript.len();
// Plaintext encodings which the Prover obtained from GC evaluation
let encodings_provider = fixtures::encoding_provider(GET_WITH_HEADER, OK_JSON);
// At the end of the TLS connection the Prover holds the: // At the end of the TLS connection the Prover holds the:
let ConnectionFixture { let ConnectionFixture {
@@ -44,27 +44,38 @@ fn test_api() {
unreachable!() unreachable!()
}; };
// Prover specifies the ranges it wants to commit to. // Create hash commitments
let mut transcript_commitment_builder = TranscriptCommitConfigBuilder::new(&transcript); let hasher = Blake3::default();
transcript_commitment_builder let sent_blinder: Blinder = rng.random();
.commit_sent(&(0..sent_len)) let recv_blinder: Blinder = rng.random();
.unwrap()
.commit_recv(&(0..recv_len))
.unwrap();
let transcripts_commitment_config = transcript_commitment_builder.build().unwrap(); let sent_idx = RangeSet::from(0..sent_len);
let recv_idx = RangeSet::from(0..recv_len);
// Prover constructs encoding tree. let sent_hash_commitment = PlaintextHash {
let encoding_tree = EncodingTree::new( direction: Direction::Sent,
&Blake3::default(), idx: sent_idx.clone(),
transcripts_commitment_config.iter_encoding(), hash: hash_plaintext(&hasher, transcript.sent(), &sent_blinder),
&encodings_provider, };
)
.unwrap();
let encoding_commitment = EncodingCommitment { let recv_hash_commitment = PlaintextHash {
root: encoding_tree.root(), direction: Direction::Received,
secret: encoder_secret(), idx: recv_idx.clone(),
hash: hash_plaintext(&hasher, transcript.received(), &recv_blinder),
};
let sent_hash_secret = PlaintextHashSecret {
direction: Direction::Sent,
idx: sent_idx,
alg: HashAlgId::BLAKE3,
blinder: sent_blinder,
};
let recv_hash_secret = PlaintextHashSecret {
direction: Direction::Received,
idx: recv_idx,
alg: HashAlgId::BLAKE3,
blinder: recv_blinder,
}; };
let request_config = RequestConfig::default(); let request_config = RequestConfig::default();
@@ -75,8 +86,14 @@ fn test_api() {
.handshake_data(server_cert_data) .handshake_data(server_cert_data)
.transcript(transcript) .transcript(transcript)
.transcript_commitments( .transcript_commitments(
vec![TranscriptSecret::Encoding(encoding_tree)], vec![
vec![TranscriptCommitment::Encoding(encoding_commitment.clone())], TranscriptSecret::Hash(sent_hash_secret),
TranscriptSecret::Hash(recv_hash_secret),
],
vec![
TranscriptCommitment::Hash(sent_hash_commitment.clone()),
TranscriptCommitment::Hash(recv_hash_commitment.clone()),
],
); );
let (request, secrets) = request_builder.build(&provider).unwrap(); let (request, secrets) = request_builder.build(&provider).unwrap();
@@ -96,12 +113,15 @@ fn test_api() {
.connection_info(connection_info.clone()) .connection_info(connection_info.clone())
// Server key Notary received during handshake // Server key Notary received during handshake
.server_ephemeral_key(server_ephemeral_key) .server_ephemeral_key(server_ephemeral_key)
.transcript_commitments(vec![TranscriptCommitment::Encoding(encoding_commitment)]); .transcript_commitments(vec![
TranscriptCommitment::Hash(sent_hash_commitment),
TranscriptCommitment::Hash(recv_hash_commitment),
]);
let attestation = attestation_builder.build(&provider).unwrap(); let attestation = attestation_builder.build(&provider).unwrap();
// Prover validates the attestation is consistent with its request. // Prover validates the attestation is consistent with its request.
request.validate(&attestation).unwrap(); request.validate(&attestation, &provider).unwrap();
let mut transcript_proof_builder = secrets.transcript_proof_builder(); let mut transcript_proof_builder = secrets.transcript_proof_builder();

View File

@@ -5,7 +5,7 @@ description = "This crate provides implementations of ciphers for two parties"
keywords = ["tls", "mpc", "2pc", "aes"] keywords = ["tls", "mpc", "2pc", "aes"]
categories = ["cryptography"] categories = ["cryptography"]
license = "MIT OR Apache-2.0" license = "MIT OR Apache-2.0"
version = "0.1.0-alpha.13-pre" version = "0.1.0-alpha.14-pre"
edition = "2021" edition = "2021"
[lints] [lints]
@@ -15,7 +15,7 @@ workspace = true
name = "cipher" name = "cipher"
[dependencies] [dependencies]
mpz-circuits = { workspace = true } mpz-circuits = { workspace = true, features = ["aes"] }
mpz-vm-core = { workspace = true } mpz-vm-core = { workspace = true }
mpz-memory-core = { workspace = true } mpz-memory-core = { workspace = true }
@@ -24,11 +24,9 @@ thiserror = { workspace = true }
aes = { workspace = true } aes = { workspace = true }
[dev-dependencies] [dev-dependencies]
mpz-garble = { workspace = true } mpz-common = { workspace = true, features = ["test-utils"] }
mpz-common = { workspace = true } mpz-ideal-vm = { workspace = true }
mpz-ot = { workspace = true }
tokio = { version = "1", features = ["macros", "rt", "rt-multi-thread"] } tokio = { version = "1", features = ["macros", "rt", "rt-multi-thread"] }
rand = { workspace = true }
ctr = { workspace = true } ctr = { workspace = true }
cipher-crypto = { workspace = true } cipher = { workspace = true }

View File

@@ -2,7 +2,7 @@
use crate::{Cipher, CtrBlock, Keystream}; use crate::{Cipher, CtrBlock, Keystream};
use async_trait::async_trait; use async_trait::async_trait;
use mpz_circuits::circuits::AES128; use mpz_circuits::{AES128_KS, AES128_POST_KS};
use mpz_memory_core::binary::{Binary, U8}; use mpz_memory_core::binary::{Binary, U8};
use mpz_vm_core::{prelude::*, Call, Vm}; use mpz_vm_core::{prelude::*, Call, Vm};
use std::fmt::Debug; use std::fmt::Debug;
@@ -12,13 +12,35 @@ mod error;
pub use error::AesError; pub use error::AesError;
use error::ErrorKind; use error::ErrorKind;
/// AES key schedule: 11 round keys, 16 bytes each.
type KeySchedule = Array<U8, 176>;
/// Computes AES-128. /// Computes AES-128.
#[derive(Default, Debug)] #[derive(Default, Debug)]
pub struct Aes128 { pub struct Aes128 {
key: Option<Array<U8, 16>>, key: Option<Array<U8, 16>>,
key_schedule: Option<KeySchedule>,
iv: Option<Array<U8, 4>>, iv: Option<Array<U8, 4>>,
} }
impl Aes128 {
// Allocates key schedule.
//
// Expects the key to be already set.
fn alloc_key_schedule(&self, vm: &mut dyn Vm<Binary>) -> Result<KeySchedule, AesError> {
let ks: KeySchedule = vm
.call(
Call::builder(AES128_KS.clone())
.arg(self.key.expect("key is set"))
.build()
.expect("call should be valid"),
)
.map_err(|err| AesError::new(ErrorKind::Vm, err))?;
Ok(ks)
}
}
#[async_trait] #[async_trait]
impl Cipher for Aes128 { impl Cipher for Aes128 {
type Error = AesError; type Error = AesError;
@@ -45,18 +67,22 @@ impl Cipher for Aes128 {
} }
fn alloc_block( fn alloc_block(
&self, &mut self,
vm: &mut dyn Vm<Binary>, vm: &mut dyn Vm<Binary>,
input: Array<U8, 16>, input: Array<U8, 16>,
) -> Result<Self::Block, Self::Error> { ) -> Result<Self::Block, Self::Error> {
let key = self self.key
.key
.ok_or_else(|| AesError::new(ErrorKind::Key, "key not set"))?; .ok_or_else(|| AesError::new(ErrorKind::Key, "key not set"))?;
if self.key_schedule.is_none() {
self.key_schedule = Some(self.alloc_key_schedule(vm)?);
}
let ks = *self.key_schedule.as_ref().expect("key schedule was set");
let output = vm let output = vm
.call( .call(
Call::builder(AES128.clone()) Call::builder(AES128_POST_KS.clone())
.arg(key) .arg(ks)
.arg(input) .arg(input)
.build() .build()
.expect("call should be valid"), .expect("call should be valid"),
@@ -67,11 +93,10 @@ impl Cipher for Aes128 {
} }
fn alloc_ctr_block( fn alloc_ctr_block(
&self, &mut self,
vm: &mut dyn Vm<Binary>, vm: &mut dyn Vm<Binary>,
) -> Result<CtrBlock<Self::Nonce, Self::Counter, Self::Block>, Self::Error> { ) -> Result<CtrBlock<Self::Nonce, Self::Counter, Self::Block>, Self::Error> {
let key = self self.key
.key
.ok_or_else(|| AesError::new(ErrorKind::Key, "key not set"))?; .ok_or_else(|| AesError::new(ErrorKind::Key, "key not set"))?;
let iv = self let iv = self
.iv .iv
@@ -89,10 +114,15 @@ impl Cipher for Aes128 {
vm.mark_public(counter) vm.mark_public(counter)
.map_err(|err| AesError::new(ErrorKind::Vm, err))?; .map_err(|err| AesError::new(ErrorKind::Vm, err))?;
if self.key_schedule.is_none() {
self.key_schedule = Some(self.alloc_key_schedule(vm)?);
}
let ks = *self.key_schedule.as_ref().expect("key schedule was set");
let output = vm let output = vm
.call( .call(
Call::builder(AES128.clone()) Call::builder(AES128_POST_KS.clone())
.arg(key) .arg(ks)
.arg(iv) .arg(iv)
.arg(explicit_nonce) .arg(explicit_nonce)
.arg(counter) .arg(counter)
@@ -109,12 +139,11 @@ impl Cipher for Aes128 {
} }
fn alloc_keystream( fn alloc_keystream(
&self, &mut self,
vm: &mut dyn Vm<Binary>, vm: &mut dyn Vm<Binary>,
len: usize, len: usize,
) -> Result<Keystream<Self::Nonce, Self::Counter, Self::Block>, Self::Error> { ) -> Result<Keystream<Self::Nonce, Self::Counter, Self::Block>, Self::Error> {
let key = self self.key
.key
.ok_or_else(|| AesError::new(ErrorKind::Key, "key not set"))?; .ok_or_else(|| AesError::new(ErrorKind::Key, "key not set"))?;
let iv = self let iv = self
.iv .iv
@@ -143,10 +172,15 @@ impl Cipher for Aes128 {
let blocks = inputs let blocks = inputs
.into_iter() .into_iter()
.map(|(explicit_nonce, counter)| { .map(|(explicit_nonce, counter)| {
if self.key_schedule.is_none() {
self.key_schedule = Some(self.alloc_key_schedule(vm)?);
}
let ks = *self.key_schedule.as_ref().expect("key schedule was set");
let output = vm let output = vm
.call( .call(
Call::builder(AES128.clone()) Call::builder(AES128_POST_KS.clone())
.arg(key) .arg(ks)
.arg(iv) .arg(iv)
.arg(explicit_nonce) .arg(explicit_nonce)
.arg(counter) .arg(counter)
@@ -172,15 +206,12 @@ mod tests {
use super::*; use super::*;
use crate::Cipher; use crate::Cipher;
use mpz_common::context::test_st_context; use mpz_common::context::test_st_context;
use mpz_garble::protocol::semihonest::{Evaluator, Garbler}; use mpz_ideal_vm::IdealVm;
use mpz_memory_core::{ use mpz_memory_core::{
binary::{Binary, U8}, binary::{Binary, U8},
correlated::Delta,
Array, MemoryExt, Vector, ViewExt, Array, MemoryExt, Vector, ViewExt,
}; };
use mpz_ot::ideal::cot::ideal_cot;
use mpz_vm_core::{Execute, Vm}; use mpz_vm_core::{Execute, Vm};
use rand::{rngs::StdRng, SeedableRng};
#[tokio::test] #[tokio::test]
async fn test_aes_ctr() { async fn test_aes_ctr() {
@@ -190,10 +221,11 @@ mod tests {
let start_counter = 3u32; let start_counter = 3u32;
let (mut ctx_a, mut ctx_b) = test_st_context(8); let (mut ctx_a, mut ctx_b) = test_st_context(8);
let (mut gen, mut ev) = mock_vm(); let mut gen = IdealVm::new();
let mut ev = IdealVm::new();
let aes_gen = setup_ctr(key, iv, &mut gen); let mut aes_gen = setup_ctr(key, iv, &mut gen);
let aes_ev = setup_ctr(key, iv, &mut ev); let mut aes_ev = setup_ctr(key, iv, &mut ev);
let msg = vec![42u8; 128]; let msg = vec![42u8; 128];
@@ -252,10 +284,11 @@ mod tests {
let input = [5_u8; 16]; let input = [5_u8; 16];
let (mut ctx_a, mut ctx_b) = test_st_context(8); let (mut ctx_a, mut ctx_b) = test_st_context(8);
let (mut gen, mut ev) = mock_vm(); let mut gen = IdealVm::new();
let mut ev = IdealVm::new();
let aes_gen = setup_block(key, &mut gen); let mut aes_gen = setup_block(key, &mut gen);
let aes_ev = setup_block(key, &mut ev); let mut aes_ev = setup_block(key, &mut ev);
let block_ref_gen: Array<U8, 16> = gen.alloc().unwrap(); let block_ref_gen: Array<U8, 16> = gen.alloc().unwrap();
gen.mark_public(block_ref_gen).unwrap(); gen.mark_public(block_ref_gen).unwrap();
@@ -294,18 +327,6 @@ mod tests {
assert_eq!(ciphertext_gen, expected); assert_eq!(ciphertext_gen, expected);
} }
fn mock_vm() -> (impl Vm<Binary>, impl Vm<Binary>) {
let mut rng = StdRng::seed_from_u64(0);
let delta = Delta::random(&mut rng);
let (cot_send, cot_recv) = ideal_cot(delta.into_inner());
let gen = Garbler::new(cot_send, [0u8; 16], delta);
let ev = Evaluator::new(cot_recv);
(gen, ev)
}
fn setup_ctr(key: [u8; 16], iv: [u8; 4], vm: &mut dyn Vm<Binary>) -> Aes128 { fn setup_ctr(key: [u8; 16], iv: [u8; 4], vm: &mut dyn Vm<Binary>) -> Aes128 {
let key_ref: Array<U8, 16> = vm.alloc().unwrap(); let key_ref: Array<U8, 16> = vm.alloc().unwrap();
vm.mark_public(key_ref).unwrap(); vm.mark_public(key_ref).unwrap();
@@ -344,8 +365,8 @@ mod tests {
start_ctr: usize, start_ctr: usize,
msg: Vec<u8>, msg: Vec<u8>,
) -> Vec<u8> { ) -> Vec<u8> {
use ::cipher::{KeyIvInit, StreamCipher, StreamCipherSeek};
use aes::Aes128; use aes::Aes128;
use cipher_crypto::{KeyIvInit, StreamCipher, StreamCipherSeek};
use ctr::Ctr32BE; use ctr::Ctr32BE;
let mut full_iv = [0u8; 16]; let mut full_iv = [0u8; 16];
@@ -365,7 +386,7 @@ mod tests {
fn aes128(key: [u8; 16], msg: [u8; 16]) -> [u8; 16] { fn aes128(key: [u8; 16], msg: [u8; 16]) -> [u8; 16] {
use ::aes::Aes128 as TestAes128; use ::aes::Aes128 as TestAes128;
use cipher_crypto::{BlockEncrypt, KeyInit}; use ::cipher::{BlockEncrypt, KeyInit};
let mut msg = msg.into(); let mut msg = msg.into();
let cipher = TestAes128::new(&key.into()); let cipher = TestAes128::new(&key.into());

View File

@@ -55,7 +55,7 @@ pub trait Cipher {
/// Allocates a single block in ECB mode. /// Allocates a single block in ECB mode.
fn alloc_block( fn alloc_block(
&self, &mut self,
vm: &mut dyn Vm<Binary>, vm: &mut dyn Vm<Binary>,
input: Self::Block, input: Self::Block,
) -> Result<Self::Block, Self::Error>; ) -> Result<Self::Block, Self::Error>;
@@ -63,7 +63,7 @@ pub trait Cipher {
/// Allocates a single block in counter mode. /// Allocates a single block in counter mode.
#[allow(clippy::type_complexity)] #[allow(clippy::type_complexity)]
fn alloc_ctr_block( fn alloc_ctr_block(
&self, &mut self,
vm: &mut dyn Vm<Binary>, vm: &mut dyn Vm<Binary>,
) -> Result<CtrBlock<Self::Nonce, Self::Counter, Self::Block>, Self::Error>; ) -> Result<CtrBlock<Self::Nonce, Self::Counter, Self::Block>, Self::Error>;
@@ -75,7 +75,7 @@ pub trait Cipher {
/// * `len` - Length of the stream in bytes. /// * `len` - Length of the stream in bytes.
#[allow(clippy::type_complexity)] #[allow(clippy::type_complexity)]
fn alloc_keystream( fn alloc_keystream(
&self, &mut self,
vm: &mut dyn Vm<Binary>, vm: &mut dyn Vm<Binary>,
len: usize, len: usize,
) -> Result<Keystream<Self::Nonce, Self::Counter, Self::Block>, Self::Error>; ) -> Result<Keystream<Self::Nonce, Self::Counter, Self::Block>, Self::Error>;

View File

@@ -1,6 +1,6 @@
[package] [package]
name = "tlsn-deap" name = "tlsn-deap"
version = "0.1.0-alpha.13-pre" version = "0.1.0-alpha.14-pre"
edition = "2021" edition = "2021"
[lints] [lints]
@@ -19,11 +19,8 @@ futures = { workspace = true }
tokio = { workspace = true, features = ["sync"] } tokio = { workspace = true, features = ["sync"] }
[dev-dependencies] [dev-dependencies]
mpz-circuits = { workspace = true } mpz-circuits = { workspace = true, features = ["aes"] }
mpz-garble = { workspace = true } mpz-common = { workspace = true, features = ["test-utils"] }
mpz-ot = { workspace = true } mpz-ideal-vm = { workspace = true }
mpz-zk = { workspace = true }
tokio = { workspace = true, features = ["macros", "rt", "rt-multi-thread"] } tokio = { workspace = true, features = ["macros", "rt", "rt-multi-thread"] }
rand = { workspace = true }
rand06-compat = { workspace = true }

View File

@@ -15,7 +15,7 @@ use mpz_vm_core::{
memory::{binary::Binary, DecodeFuture, Memory, Repr, Slice, View}, memory::{binary::Binary, DecodeFuture, Memory, Repr, Slice, View},
Call, Callable, Execute, Vm, VmError, Call, Callable, Execute, Vm, VmError,
}; };
use rangeset::{Difference, RangeSet, UnionMut}; use rangeset::{ops::Set, set::RangeSet};
use tokio::sync::{Mutex, MutexGuard, OwnedMutexGuard}; use tokio::sync::{Mutex, MutexGuard, OwnedMutexGuard};
type Error = DeapError; type Error = DeapError;
@@ -210,10 +210,12 @@ where
} }
fn commit_raw(&mut self, slice: Slice) -> Result<(), VmError> { fn commit_raw(&mut self, slice: Slice) -> Result<(), VmError> {
let slice_range = slice.to_range();
// Follower's private inputs are not committed in the ZK VM until finalization. // Follower's private inputs are not committed in the ZK VM until finalization.
let input_minus_follower = slice.to_range().difference(&self.follower_input_ranges); let input_minus_follower = slice_range.difference(&self.follower_input_ranges);
let mut zk = self.zk.try_lock().unwrap(); let mut zk = self.zk.try_lock().unwrap();
for input in input_minus_follower.iter_ranges() { for input in input_minus_follower {
zk.commit_raw( zk.commit_raw(
self.memory_map self.memory_map
.try_get(Slice::from_range_unchecked(input))?, .try_get(Slice::from_range_unchecked(input))?,
@@ -266,7 +268,7 @@ where
mpc.mark_private_raw(slice)?; mpc.mark_private_raw(slice)?;
// Follower's private inputs will become public during finalization. // Follower's private inputs will become public during finalization.
zk.mark_public_raw(self.memory_map.try_get(slice)?)?; zk.mark_public_raw(self.memory_map.try_get(slice)?)?;
self.follower_input_ranges.union_mut(&slice.to_range()); self.follower_input_ranges.union_mut(slice.to_range());
self.follower_inputs.push(slice); self.follower_inputs.push(slice);
} }
} }
@@ -282,7 +284,7 @@ where
mpc.mark_blind_raw(slice)?; mpc.mark_blind_raw(slice)?;
// Follower's private inputs will become public during finalization. // Follower's private inputs will become public during finalization.
zk.mark_public_raw(self.memory_map.try_get(slice)?)?; zk.mark_public_raw(self.memory_map.try_get(slice)?)?;
self.follower_input_ranges.union_mut(&slice.to_range()); self.follower_input_ranges.union_mut(slice.to_range());
self.follower_inputs.push(slice); self.follower_inputs.push(slice);
} }
Role::Follower => { Role::Follower => {
@@ -382,37 +384,27 @@ enum ErrorRepr {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use mpz_circuits::circuits::AES128; use mpz_circuits::AES128;
use mpz_common::context::test_st_context; use mpz_common::context::test_st_context;
use mpz_core::Block; use mpz_ideal_vm::IdealVm;
use mpz_garble::protocol::semihonest::{Evaluator, Garbler};
use mpz_ot::ideal::{cot::ideal_cot, rcot::ideal_rcot};
use mpz_vm_core::{ use mpz_vm_core::{
memory::{binary::U8, correlated::Delta, Array}, memory::{binary::U8, Array},
prelude::*, prelude::*,
}; };
use mpz_zk::{Prover, ProverConfig, Verifier, VerifierConfig};
use rand::{rngs::StdRng, SeedableRng};
use super::*; use super::*;
#[tokio::test] #[tokio::test]
async fn test_deap() { async fn test_deap() {
let mut rng = StdRng::seed_from_u64(0);
let delta_mpc = Delta::random(&mut rng);
let delta_zk = Delta::random(&mut rng);
let (mut ctx_a, mut ctx_b) = test_st_context(8); let (mut ctx_a, mut ctx_b) = test_st_context(8);
let (rcot_send, rcot_recv) = ideal_rcot(Block::ZERO, delta_zk.into_inner());
let (cot_send, cot_recv) = ideal_cot(delta_mpc.into_inner());
let gb = Garbler::new(cot_send, [0u8; 16], delta_mpc); let leader_mpc = IdealVm::new();
let ev = Evaluator::new(cot_recv); let leader_zk = IdealVm::new();
let prover = Prover::new(ProverConfig::default(), rcot_recv); let follower_mpc = IdealVm::new();
let verifier = Verifier::new(VerifierConfig::default(), delta_zk, rcot_send); let follower_zk = IdealVm::new();
let mut leader = Deap::new(Role::Leader, gb, prover); let mut leader = Deap::new(Role::Leader, leader_mpc, leader_zk);
let mut follower = Deap::new(Role::Follower, ev, verifier); let mut follower = Deap::new(Role::Follower, follower_mpc, follower_zk);
let (ct_leader, ct_follower) = futures::join!( let (ct_leader, ct_follower) = futures::join!(
async { async {
@@ -478,21 +470,15 @@ mod tests {
#[tokio::test] #[tokio::test]
async fn test_deap_desync_memory() { async fn test_deap_desync_memory() {
let mut rng = StdRng::seed_from_u64(0);
let delta_mpc = Delta::random(&mut rng);
let delta_zk = Delta::random(&mut rng);
let (mut ctx_a, mut ctx_b) = test_st_context(8); let (mut ctx_a, mut ctx_b) = test_st_context(8);
let (rcot_send, rcot_recv) = ideal_rcot(Block::ZERO, delta_zk.into_inner());
let (cot_send, cot_recv) = ideal_cot(delta_mpc.into_inner());
let gb = Garbler::new(cot_send, [0u8; 16], delta_mpc); let leader_mpc = IdealVm::new();
let ev = Evaluator::new(cot_recv); let leader_zk = IdealVm::new();
let prover = Prover::new(ProverConfig::default(), rcot_recv); let follower_mpc = IdealVm::new();
let verifier = Verifier::new(VerifierConfig::default(), delta_zk, rcot_send); let follower_zk = IdealVm::new();
let mut leader = Deap::new(Role::Leader, gb, prover); let mut leader = Deap::new(Role::Leader, leader_mpc, leader_zk);
let mut follower = Deap::new(Role::Follower, ev, verifier); let mut follower = Deap::new(Role::Follower, follower_mpc, follower_zk);
// Desynchronize the memories. // Desynchronize the memories.
let _ = leader.zk().alloc_raw(1).unwrap(); let _ = leader.zk().alloc_raw(1).unwrap();
@@ -564,21 +550,15 @@ mod tests {
// detection by the follower. // detection by the follower.
#[tokio::test] #[tokio::test]
async fn test_malicious() { async fn test_malicious() {
let mut rng = StdRng::seed_from_u64(0);
let delta_mpc = Delta::random(&mut rng);
let delta_zk = Delta::random(&mut rng);
let (mut ctx_a, mut ctx_b) = test_st_context(8); let (mut ctx_a, mut ctx_b) = test_st_context(8);
let (rcot_send, rcot_recv) = ideal_rcot(Block::ZERO, delta_zk.into_inner());
let (cot_send, cot_recv) = ideal_cot(delta_mpc.into_inner());
let gb = Garbler::new(cot_send, [1u8; 16], delta_mpc); let leader_mpc = IdealVm::new();
let ev = Evaluator::new(cot_recv); let leader_zk = IdealVm::new();
let prover = Prover::new(ProverConfig::default(), rcot_recv); let follower_mpc = IdealVm::new();
let verifier = Verifier::new(VerifierConfig::default(), delta_zk, rcot_send); let follower_zk = IdealVm::new();
let mut leader = Deap::new(Role::Leader, gb, prover); let mut leader = Deap::new(Role::Leader, leader_mpc, leader_zk);
let mut follower = Deap::new(Role::Follower, ev, verifier); let mut follower = Deap::new(Role::Follower, follower_mpc, follower_zk);
let (_, follower_res) = futures::join!( let (_, follower_res) = futures::join!(
async { async {

View File

@@ -1,7 +1,7 @@
use std::ops::Range; use std::ops::Range;
use mpz_vm_core::{memory::Slice, VmError}; use mpz_vm_core::{memory::Slice, VmError};
use rangeset::Subset; use rangeset::ops::Set;
/// A mapping between the memories of the MPC and ZK VMs. /// A mapping between the memories of the MPC and ZK VMs.
#[derive(Debug, Default)] #[derive(Debug, Default)]

View File

@@ -5,7 +5,7 @@ description = "A 2PC implementation of TLS HMAC-SHA256 PRF"
keywords = ["tls", "mpc", "2pc", "hmac", "sha256"] keywords = ["tls", "mpc", "2pc", "hmac", "sha256"]
categories = ["cryptography"] categories = ["cryptography"]
license = "MIT OR Apache-2.0" license = "MIT OR Apache-2.0"
version = "0.1.0-alpha.13-pre" version = "0.1.0-alpha.14-pre"
edition = "2021" edition = "2021"
[lints] [lints]
@@ -20,14 +20,13 @@ mpz-core = { workspace = true }
mpz-circuits = { workspace = true } mpz-circuits = { workspace = true }
mpz-hash = { workspace = true } mpz-hash = { workspace = true }
sha2 = { workspace = true, features = ["compress"] }
thiserror = { workspace = true } thiserror = { workspace = true }
tracing = { workspace = true } tracing = { workspace = true }
sha2 = { workspace = true }
[dev-dependencies] [dev-dependencies]
mpz-ot = { workspace = true, features = ["ideal"] }
mpz-garble = { workspace = true }
mpz-common = { workspace = true, features = ["test-utils"] } mpz-common = { workspace = true, features = ["test-utils"] }
mpz-ideal-vm = { workspace = true }
criterion = { workspace = true, features = ["async_tokio"] } criterion = { workspace = true, features = ["async_tokio"] }
tokio = { workspace = true, features = ["macros", "rt", "rt-multi-thread"] } tokio = { workspace = true, features = ["macros", "rt", "rt-multi-thread"] }

View File

@@ -4,14 +4,12 @@ use criterion::{criterion_group, criterion_main, Criterion};
use hmac_sha256::{Mode, MpcPrf}; use hmac_sha256::{Mode, MpcPrf};
use mpz_common::context::test_mt_context; use mpz_common::context::test_mt_context;
use mpz_garble::protocol::semihonest::{Evaluator, Garbler}; use mpz_ideal_vm::IdealVm;
use mpz_ot::ideal::cot::ideal_cot;
use mpz_vm_core::{ use mpz_vm_core::{
memory::{binary::U8, correlated::Delta, Array}, memory::{binary::U8, Array},
prelude::*, prelude::*,
Execute, Execute,
}; };
use rand::{rngs::StdRng, SeedableRng};
#[allow(clippy::unit_arg)] #[allow(clippy::unit_arg)]
fn criterion_benchmark(c: &mut Criterion) { fn criterion_benchmark(c: &mut Criterion) {
@@ -29,8 +27,6 @@ criterion_group!(benches, criterion_benchmark);
criterion_main!(benches); criterion_main!(benches);
async fn prf(mode: Mode) { async fn prf(mode: Mode) {
let mut rng = StdRng::seed_from_u64(0);
let pms = [42u8; 32]; let pms = [42u8; 32];
let client_random = [69u8; 32]; let client_random = [69u8; 32];
let server_random: [u8; 32] = [96u8; 32]; let server_random: [u8; 32] = [96u8; 32];
@@ -39,11 +35,8 @@ async fn prf(mode: Mode) {
let mut leader_ctx = leader_exec.new_context().await.unwrap(); let mut leader_ctx = leader_exec.new_context().await.unwrap();
let mut follower_ctx = follower_exec.new_context().await.unwrap(); let mut follower_ctx = follower_exec.new_context().await.unwrap();
let delta = Delta::random(&mut rng); let mut leader_vm = IdealVm::new();
let (ot_send, ot_recv) = ideal_cot(delta.into_inner()); let mut follower_vm = IdealVm::new();
let mut leader_vm = Garbler::new(ot_send, [0u8; 16], delta);
let mut follower_vm = Evaluator::new(ot_recv);
let leader_pms: Array<U8, 32> = leader_vm.alloc().unwrap(); let leader_pms: Array<U8, 32> = leader_vm.alloc().unwrap();
leader_vm.mark_public(leader_pms).unwrap(); leader_vm.mark_public(leader_pms).unwrap();

View File

@@ -54,10 +54,11 @@ mod tests {
use crate::{ use crate::{
hmac::hmac_sha256, hmac::hmac_sha256,
sha256, state_to_bytes, sha256, state_to_bytes,
test_utils::{compute_inner_local, compute_outer_partial, mock_vm}, test_utils::{compute_inner_local, compute_outer_partial},
}; };
use mpz_common::context::test_st_context; use mpz_common::context::test_st_context;
use mpz_hash::sha256::Sha256; use mpz_hash::sha256::Sha256;
use mpz_ideal_vm::IdealVm;
use mpz_vm_core::{ use mpz_vm_core::{
memory::{ memory::{
binary::{U32, U8}, binary::{U32, U8},
@@ -83,7 +84,8 @@ mod tests {
#[tokio::test] #[tokio::test]
async fn test_hmac_circuit() { async fn test_hmac_circuit() {
let (mut ctx_a, mut ctx_b) = test_st_context(8); let (mut ctx_a, mut ctx_b) = test_st_context(8);
let (mut leader, mut follower) = mock_vm(); let mut leader = IdealVm::new();
let mut follower = IdealVm::new();
let (inputs, references) = test_fixtures(); let (inputs, references) = test_fixtures();
for (input, &reference) in inputs.iter().zip(references.iter()) { for (input, &reference) in inputs.iter().zip(references.iter()) {

View File

@@ -72,10 +72,11 @@ fn state_to_bytes(input: [u32; 8]) -> [u8; 32] {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use crate::{ use crate::{
test_utils::{mock_vm, prf_cf_vd, prf_keys, prf_ms, prf_sf_vd}, test_utils::{prf_cf_vd, prf_keys, prf_ms, prf_sf_vd},
Mode, MpcPrf, SessionKeys, Mode, MpcPrf, SessionKeys,
}; };
use mpz_common::context::test_st_context; use mpz_common::context::test_st_context;
use mpz_ideal_vm::IdealVm;
use mpz_vm_core::{ use mpz_vm_core::{
memory::{binary::U8, Array, MemoryExt, ViewExt}, memory::{binary::U8, Array, MemoryExt, ViewExt},
Execute, Execute,
@@ -123,7 +124,8 @@ mod tests {
// Set up vm and prf // Set up vm and prf
let (mut ctx_a, mut ctx_b) = test_st_context(128); let (mut ctx_a, mut ctx_b) = test_st_context(128);
let (mut leader, mut follower) = mock_vm(); let mut leader = IdealVm::new();
let mut follower = IdealVm::new();
let leader_pms: Array<U8, 32> = leader.alloc().unwrap(); let leader_pms: Array<U8, 32> = leader.alloc().unwrap();
leader.mark_public(leader_pms).unwrap(); leader.mark_public(leader_pms).unwrap();

View File

@@ -339,8 +339,9 @@ fn gen_merge_circ(size: usize) -> Arc<Circuit> {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use crate::{prf::merge_outputs, test_utils::mock_vm}; use crate::prf::merge_outputs;
use mpz_common::context::test_st_context; use mpz_common::context::test_st_context;
use mpz_ideal_vm::IdealVm;
use mpz_vm_core::{ use mpz_vm_core::{
memory::{binary::U8, Array, MemoryExt, ViewExt}, memory::{binary::U8, Array, MemoryExt, ViewExt},
Execute, Execute,
@@ -349,7 +350,8 @@ mod tests {
#[tokio::test] #[tokio::test]
async fn test_merge_outputs() { async fn test_merge_outputs() {
let (mut ctx_a, mut ctx_b) = test_st_context(8); let (mut ctx_a, mut ctx_b) = test_st_context(8);
let (mut leader, mut follower) = mock_vm(); let mut leader = IdealVm::new();
let mut follower = IdealVm::new();
let input1: [u8; 32] = std::array::from_fn(|i| i as u8); let input1: [u8; 32] = std::array::from_fn(|i| i as u8);
let input2: [u8; 32] = std::array::from_fn(|i| i as u8 + 32); let input2: [u8; 32] = std::array::from_fn(|i| i as u8 + 32);

View File

@@ -137,10 +137,11 @@ impl Prf {
mod tests { mod tests {
use crate::{ use crate::{
prf::{compute_partial, function::Prf}, prf::{compute_partial, function::Prf},
test_utils::{mock_vm, phash}, test_utils::phash,
Mode, Mode,
}; };
use mpz_common::context::test_st_context; use mpz_common::context::test_st_context;
use mpz_ideal_vm::IdealVm;
use mpz_vm_core::{ use mpz_vm_core::{
memory::{binary::U8, Array, MemoryExt, ViewExt}, memory::{binary::U8, Array, MemoryExt, ViewExt},
Execute, Execute,
@@ -166,7 +167,8 @@ mod tests {
let mut rng = ThreadRng::default(); let mut rng = ThreadRng::default();
let (mut ctx_a, mut ctx_b) = test_st_context(8); let (mut ctx_a, mut ctx_b) = test_st_context(8);
let (mut leader, mut follower) = mock_vm(); let mut leader = IdealVm::new();
let mut follower = IdealVm::new();
let key: [u8; 32] = rng.random(); let key: [u8; 32] = rng.random();
let start_seed: Vec<u8> = vec![42; 64]; let start_seed: Vec<u8> = vec![42; 64];

View File

@@ -1,25 +1,10 @@
use crate::{sha256, state_to_bytes}; use crate::{sha256, state_to_bytes};
use mpz_garble::protocol::semihonest::{Evaluator, Garbler};
use mpz_ot::ideal::cot::{ideal_cot, IdealCOTReceiver, IdealCOTSender};
use mpz_vm_core::memory::correlated::Delta;
use rand::{rngs::StdRng, Rng, SeedableRng}; use rand::{rngs::StdRng, Rng, SeedableRng};
pub(crate) const SHA256_IV: [u32; 8] = [ pub(crate) const SHA256_IV: [u32; 8] = [
0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a, 0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19, 0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a, 0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19,
]; ];
pub(crate) fn mock_vm() -> (Garbler<IdealCOTSender>, Evaluator<IdealCOTReceiver>) {
let mut rng = StdRng::seed_from_u64(0);
let delta = Delta::random(&mut rng);
let (cot_send, cot_recv) = ideal_cot(delta.into_inner());
let gen = Garbler::new(cot_send, [0u8; 16], delta);
let ev = Evaluator::new(cot_recv);
(gen, ev)
}
pub(crate) fn prf_ms(pms: [u8; 32], client_random: [u8; 32], server_random: [u8; 32]) -> [u8; 48] { pub(crate) fn prf_ms(pms: [u8; 32], client_random: [u8; 32], server_random: [u8; 32]) -> [u8; 48] {
let mut label_start_seed = b"master secret".to_vec(); let mut label_start_seed = b"master secret".to_vec();
label_start_seed.extend_from_slice(&client_random); label_start_seed.extend_from_slice(&client_random);

View File

@@ -5,7 +5,7 @@ description = "Implementation of the 3-party key-exchange protocol"
keywords = ["tls", "mpc", "2pc", "pms", "key-exchange"] keywords = ["tls", "mpc", "2pc", "pms", "key-exchange"]
categories = ["cryptography"] categories = ["cryptography"]
license = "MIT OR Apache-2.0" license = "MIT OR Apache-2.0"
version = "0.1.0-alpha.13-pre" version = "0.1.0-alpha.14-pre"
edition = "2021" edition = "2021"
[lints] [lints]
@@ -40,6 +40,7 @@ tokio = { workspace = true, features = ["sync"] }
[dev-dependencies] [dev-dependencies]
mpz-ot = { workspace = true, features = ["ideal"] } mpz-ot = { workspace = true, features = ["ideal"] }
mpz-garble = { workspace = true } mpz-garble = { workspace = true }
mpz-ideal-vm = { workspace = true }
rand_core = { workspace = true } rand_core = { workspace = true }
tokio = { workspace = true, features = ["macros", "rt", "rt-multi-thread"] } tokio = { workspace = true, features = ["macros", "rt", "rt-multi-thread"] }

View File

@@ -459,9 +459,7 @@ mod tests {
use mpz_common::context::test_st_context; use mpz_common::context::test_st_context;
use mpz_core::Block; use mpz_core::Block;
use mpz_fields::UniformRand; use mpz_fields::UniformRand;
use mpz_garble::protocol::semihonest::{Evaluator, Garbler}; use mpz_ideal_vm::IdealVm;
use mpz_memory_core::correlated::Delta;
use mpz_ot::ideal::cot::{ideal_cot, IdealCOTReceiver, IdealCOTSender};
use mpz_share_conversion::ideal::{ use mpz_share_conversion::ideal::{
ideal_share_convert, IdealShareConvertReceiver, IdealShareConvertSender, ideal_share_convert, IdealShareConvertReceiver, IdealShareConvertSender,
}; };
@@ -484,7 +482,8 @@ mod tests {
async fn test_key_exchange() { async fn test_key_exchange() {
let mut rng = StdRng::seed_from_u64(0).compat(); let mut rng = StdRng::seed_from_u64(0).compat();
let (mut ctx_a, mut ctx_b) = test_st_context(8); let (mut ctx_a, mut ctx_b) = test_st_context(8);
let (mut gen, mut ev) = mock_vm(); let mut gen = IdealVm::new();
let mut ev = IdealVm::new();
let leader_private_key = SecretKey::random(&mut rng); let leader_private_key = SecretKey::random(&mut rng);
let follower_private_key = SecretKey::random(&mut rng); let follower_private_key = SecretKey::random(&mut rng);
@@ -625,7 +624,8 @@ mod tests {
async fn test_malicious_key_exchange(#[case] malicious: Malicious) { async fn test_malicious_key_exchange(#[case] malicious: Malicious) {
let mut rng = StdRng::seed_from_u64(0); let mut rng = StdRng::seed_from_u64(0);
let (mut ctx_a, mut ctx_b) = test_st_context(8); let (mut ctx_a, mut ctx_b) = test_st_context(8);
let (mut gen, mut ev) = mock_vm(); let mut gen = IdealVm::new();
let mut ev = IdealVm::new();
let leader_private_key = SecretKey::random(&mut rng.compat_by_ref()); let leader_private_key = SecretKey::random(&mut rng.compat_by_ref());
let follower_private_key = SecretKey::random(&mut rng.compat_by_ref()); let follower_private_key = SecretKey::random(&mut rng.compat_by_ref());
@@ -704,7 +704,8 @@ mod tests {
#[tokio::test] #[tokio::test]
async fn test_circuit() { async fn test_circuit() {
let (mut ctx_a, mut ctx_b) = test_st_context(8); let (mut ctx_a, mut ctx_b) = test_st_context(8);
let (gen, ev) = mock_vm(); let gen = IdealVm::new();
let ev = IdealVm::new();
let share_a0_bytes = [5_u8; 32]; let share_a0_bytes = [5_u8; 32];
let share_a1_bytes = [2_u8; 32]; let share_a1_bytes = [2_u8; 32];
@@ -834,16 +835,4 @@ mod tests {
(leader, follower) (leader, follower)
} }
fn mock_vm() -> (Garbler<IdealCOTSender>, Evaluator<IdealCOTReceiver>) {
let mut rng = StdRng::seed_from_u64(0);
let delta = Delta::random(&mut rng);
let (cot_send, cot_recv) = ideal_cot(delta.into_inner());
let gen = Garbler::new(cot_send, [0u8; 16], delta);
let ev = Evaluator::new(cot_recv);
(gen, ev)
}
} }

View File

@@ -8,7 +8,7 @@
//! with the server alone and forward all messages from and to the follower. //! with the server alone and forward all messages from and to the follower.
//! //!
//! A detailed description of this protocol can be found in our documentation //! A detailed description of this protocol can be found in our documentation
//! <https://docs.tlsnotary.org/protocol/notarization/key_exchange.html>. //! <https://tlsnotary.org/docs/mpc/key_exchange>.
#![deny(missing_docs, unreachable_pub, unused_must_use)] #![deny(missing_docs, unreachable_pub, unused_must_use)]
#![deny(clippy::all)] #![deny(clippy::all)]

View File

@@ -26,8 +26,7 @@ pub fn create_mock_key_exchange_pair() -> (MockKeyExchange, MockKeyExchange) {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use mpz_garble::protocol::semihonest::{Evaluator, Garbler}; use mpz_ideal_vm::IdealVm;
use mpz_ot::ideal::cot::{IdealCOTReceiver, IdealCOTSender};
use super::*; use super::*;
use crate::KeyExchange; use crate::KeyExchange;
@@ -40,12 +39,12 @@ mod tests {
is_key_exchange::< is_key_exchange::<
MpcKeyExchange<IdealShareConvertSender<P256>, IdealShareConvertReceiver<P256>>, MpcKeyExchange<IdealShareConvertSender<P256>, IdealShareConvertReceiver<P256>>,
Garbler<IdealCOTSender>, IdealVm,
>(leader); >(leader);
is_key_exchange::< is_key_exchange::<
MpcKeyExchange<IdealShareConvertSender<P256>, IdealShareConvertReceiver<P256>>, MpcKeyExchange<IdealShareConvertSender<P256>, IdealShareConvertReceiver<P256>>,
Evaluator<IdealCOTReceiver>, IdealVm,
>(follower); >(follower);
} }
} }

View File

@@ -4,7 +4,7 @@
//! protocol has semi-honest security. //! protocol has semi-honest security.
//! //!
//! The protocol is described in //! The protocol is described in
//! <https://docs.tlsnotary.org/protocol/notarization/key_exchange.html> //! <https://tlsnotary.org/docs/mpc/key_exchange>
use crate::{KeyExchangeError, Role}; use crate::{KeyExchangeError, Role};
use mpz_common::{Context, Flush}; use mpz_common::{Context, Flush};

View File

@@ -5,7 +5,7 @@ description = "Core types for TLSNotary"
keywords = ["tls", "mpc", "2pc", "types"] keywords = ["tls", "mpc", "2pc", "types"]
categories = ["cryptography"] categories = ["cryptography"]
license = "MIT OR Apache-2.0" license = "MIT OR Apache-2.0"
version = "0.1.0-alpha.13-pre" version = "0.1.0-alpha.14-pre"
edition = "2021" edition = "2021"
[lints] [lints]
@@ -13,6 +13,7 @@ workspace = true
[features] [features]
default = [] default = []
mozilla-certs = ["dep:webpki-root-certs", "dep:webpki-roots"]
fixtures = [ fixtures = [
"dep:hex", "dep:hex",
"dep:tlsn-data-fixtures", "dep:tlsn-data-fixtures",
@@ -44,7 +45,8 @@ sha2 = { workspace = true }
thiserror = { workspace = true } thiserror = { workspace = true }
tiny-keccak = { workspace = true, features = ["keccak"] } tiny-keccak = { workspace = true, features = ["keccak"] }
web-time = { workspace = true } web-time = { workspace = true }
webpki-roots = { workspace = true } webpki-roots = { workspace = true, optional = true }
webpki-root-certs = { workspace = true, optional = true }
rustls-webpki = { workspace = true, features = ["ring"] } rustls-webpki = { workspace = true, features = ["ring"] }
rustls-pki-types = { workspace = true } rustls-pki-types = { workspace = true }
itybity = { workspace = true } itybity = { workspace = true }
@@ -57,5 +59,7 @@ generic-array = { workspace = true }
bincode = { workspace = true } bincode = { workspace = true }
hex = { workspace = true } hex = { workspace = true }
rstest = { workspace = true } rstest = { workspace = true }
tlsn-core = { workspace = true, features = ["fixtures"] }
tlsn-attestation = { workspace = true, features = ["fixtures"] }
tlsn-data-fixtures = { workspace = true } tlsn-data-fixtures = { workspace = true }
webpki-root-certs = { workspace = true } webpki-root-certs = { workspace = true }

View File

@@ -0,0 +1,7 @@
//! Configuration types.
pub mod prove;
pub mod prover;
pub mod tls;
pub mod tls_commit;
pub mod verifier;

View File

@@ -0,0 +1,189 @@
//! Proving configuration.
use rangeset::set::{RangeSet, ToRangeSet};
use serde::{Deserialize, Serialize};
use crate::transcript::{Direction, Transcript, TranscriptCommitConfig, TranscriptCommitRequest};
/// Configuration to prove information to the verifier.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ProveConfig {
server_identity: bool,
reveal: Option<(RangeSet<usize>, RangeSet<usize>)>,
transcript_commit: Option<TranscriptCommitConfig>,
}
impl ProveConfig {
/// Creates a new builder.
pub fn builder(transcript: &Transcript) -> ProveConfigBuilder<'_> {
ProveConfigBuilder::new(transcript)
}
/// Returns `true` if the server identity is to be proven.
pub fn server_identity(&self) -> bool {
self.server_identity
}
/// Returns the sent and received ranges of the transcript to be revealed,
/// respectively.
pub fn reveal(&self) -> Option<&(RangeSet<usize>, RangeSet<usize>)> {
self.reveal.as_ref()
}
/// Returns the transcript commitment configuration.
pub fn transcript_commit(&self) -> Option<&TranscriptCommitConfig> {
self.transcript_commit.as_ref()
}
/// Returns a request.
pub fn to_request(&self) -> ProveRequest {
ProveRequest {
server_identity: self.server_identity,
reveal: self.reveal.clone(),
transcript_commit: self
.transcript_commit
.clone()
.map(|config| config.to_request()),
}
}
}
/// Builder for [`ProveConfig`].
#[derive(Debug)]
pub struct ProveConfigBuilder<'a> {
transcript: &'a Transcript,
server_identity: bool,
reveal: Option<(RangeSet<usize>, RangeSet<usize>)>,
transcript_commit: Option<TranscriptCommitConfig>,
}
impl<'a> ProveConfigBuilder<'a> {
/// Creates a new builder.
pub fn new(transcript: &'a Transcript) -> Self {
Self {
transcript,
server_identity: false,
reveal: None,
transcript_commit: None,
}
}
/// Proves the server identity.
pub fn server_identity(&mut self) -> &mut Self {
self.server_identity = true;
self
}
/// Configures transcript commitments.
pub fn transcript_commit(&mut self, transcript_commit: TranscriptCommitConfig) -> &mut Self {
self.transcript_commit = Some(transcript_commit);
self
}
/// Reveals the given ranges of the transcript.
pub fn reveal(
&mut self,
direction: Direction,
ranges: &dyn ToRangeSet<usize>,
) -> Result<&mut Self, ProveConfigError> {
let idx = ranges.to_range_set();
if idx.end().unwrap_or(0) > self.transcript.len_of_direction(direction) {
return Err(ProveConfigError(ErrorRepr::IndexOutOfBounds {
direction,
actual: idx.end().unwrap_or(0),
len: self.transcript.len_of_direction(direction),
}));
}
let (sent, recv) = self.reveal.get_or_insert_default();
match direction {
Direction::Sent => sent.union_mut(&idx),
Direction::Received => recv.union_mut(&idx),
}
Ok(self)
}
/// Reveals the given ranges of the sent data transcript.
pub fn reveal_sent(
&mut self,
ranges: &dyn ToRangeSet<usize>,
) -> Result<&mut Self, ProveConfigError> {
self.reveal(Direction::Sent, ranges)
}
/// Reveals all of the sent data transcript.
pub fn reveal_sent_all(&mut self) -> Result<&mut Self, ProveConfigError> {
let len = self.transcript.len_of_direction(Direction::Sent);
let (sent, _) = self.reveal.get_or_insert_default();
sent.union_mut(&(0..len));
Ok(self)
}
/// Reveals the given ranges of the received data transcript.
pub fn reveal_recv(
&mut self,
ranges: &dyn ToRangeSet<usize>,
) -> Result<&mut Self, ProveConfigError> {
self.reveal(Direction::Received, ranges)
}
/// Reveals all of the received data transcript.
pub fn reveal_recv_all(&mut self) -> Result<&mut Self, ProveConfigError> {
let len = self.transcript.len_of_direction(Direction::Received);
let (_, recv) = self.reveal.get_or_insert_default();
recv.union_mut(&(0..len));
Ok(self)
}
/// Builds the configuration.
pub fn build(self) -> Result<ProveConfig, ProveConfigError> {
Ok(ProveConfig {
server_identity: self.server_identity,
reveal: self.reveal,
transcript_commit: self.transcript_commit,
})
}
}
/// Request to prove statements about the connection.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ProveRequest {
server_identity: bool,
reveal: Option<(RangeSet<usize>, RangeSet<usize>)>,
transcript_commit: Option<TranscriptCommitRequest>,
}
impl ProveRequest {
/// Returns `true` if the server identity is to be proven.
pub fn server_identity(&self) -> bool {
self.server_identity
}
/// Returns the sent and received ranges of the transcript to be revealed,
/// respectively.
pub fn reveal(&self) -> Option<&(RangeSet<usize>, RangeSet<usize>)> {
self.reveal.as_ref()
}
/// Returns the transcript commitment configuration.
pub fn transcript_commit(&self) -> Option<&TranscriptCommitRequest> {
self.transcript_commit.as_ref()
}
}
/// Error for [`ProveConfig`].
#[derive(Debug, thiserror::Error)]
#[error(transparent)]
pub struct ProveConfigError(#[from] ErrorRepr);
#[derive(Debug, thiserror::Error)]
enum ErrorRepr {
#[error("range is out of bounds of the transcript ({direction}): {actual} > {len}")]
IndexOutOfBounds {
direction: Direction,
actual: usize,
len: usize,
},
}

View File

@@ -0,0 +1,33 @@
//! Prover configuration.
use serde::{Deserialize, Serialize};
/// Prover configuration.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ProverConfig {}
impl ProverConfig {
/// Creates a new builder.
pub fn builder() -> ProverConfigBuilder {
ProverConfigBuilder::default()
}
}
/// Builder for [`ProverConfig`].
#[derive(Debug, Default)]
pub struct ProverConfigBuilder {}
impl ProverConfigBuilder {
/// Builds the configuration.
pub fn build(self) -> Result<ProverConfig, ProverConfigError> {
Ok(ProverConfig {})
}
}
/// Error for [`ProverConfig`].
#[derive(Debug, thiserror::Error)]
#[error(transparent)]
pub struct ProverConfigError(#[from] ErrorRepr);
#[derive(Debug, thiserror::Error)]
enum ErrorRepr {}

View File

@@ -0,0 +1,111 @@
//! TLS client configuration.
use serde::{Deserialize, Serialize};
use crate::{
connection::ServerName,
webpki::{CertificateDer, PrivateKeyDer, RootCertStore},
};
/// TLS client configuration.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TlsClientConfig {
server_name: ServerName,
/// Root certificates.
root_store: RootCertStore,
/// Certificate chain and a matching private key for client
/// authentication.
client_auth: Option<(Vec<CertificateDer>, PrivateKeyDer)>,
}
impl TlsClientConfig {
/// Creates a new builder.
pub fn builder() -> TlsConfigBuilder {
TlsConfigBuilder::default()
}
/// Returns the server name.
pub fn server_name(&self) -> &ServerName {
&self.server_name
}
/// Returns the root certificates.
pub fn root_store(&self) -> &RootCertStore {
&self.root_store
}
/// Returns a certificate chain and a matching private key for client
/// authentication.
pub fn client_auth(&self) -> Option<&(Vec<CertificateDer>, PrivateKeyDer)> {
self.client_auth.as_ref()
}
}
/// Builder for [`TlsClientConfig`].
#[derive(Debug, Default)]
pub struct TlsConfigBuilder {
server_name: Option<ServerName>,
root_store: Option<RootCertStore>,
client_auth: Option<(Vec<CertificateDer>, PrivateKeyDer)>,
}
impl TlsConfigBuilder {
/// Sets the server name.
pub fn server_name(mut self, server_name: ServerName) -> Self {
self.server_name = Some(server_name);
self
}
/// Sets the root certificates to use for verifying the server's
/// certificate.
pub fn root_store(mut self, store: RootCertStore) -> Self {
self.root_store = Some(store);
self
}
/// Sets a DER-encoded certificate chain and a matching private key for
/// client authentication.
///
/// Often the chain will consist of a single end-entity certificate.
///
/// # Arguments
///
/// * `cert_key` - A tuple containing the certificate chain and the private
/// key.
///
/// - Each certificate in the chain must be in the X.509 format.
/// - The key must be in the ASN.1 format (either PKCS#8 or PKCS#1).
pub fn client_auth(mut self, cert_key: (Vec<CertificateDer>, PrivateKeyDer)) -> Self {
self.client_auth = Some(cert_key);
self
}
/// Builds the TLS configuration.
pub fn build(self) -> Result<TlsClientConfig, TlsConfigError> {
let server_name = self.server_name.ok_or(ErrorRepr::MissingField {
field: "server_name",
})?;
let root_store = self.root_store.ok_or(ErrorRepr::MissingField {
field: "root_store",
})?;
Ok(TlsClientConfig {
server_name,
root_store,
client_auth: self.client_auth,
})
}
}
/// TLS configuration error.
#[derive(Debug, thiserror::Error)]
#[error(transparent)]
pub struct TlsConfigError(#[from] ErrorRepr);
#[derive(Debug, thiserror::Error)]
#[error("tls config error")]
enum ErrorRepr {
#[error("missing required field: {field}")]
MissingField { field: &'static str },
}

View File

@@ -0,0 +1,94 @@
//! TLS commitment configuration.
pub mod mpc;
use serde::{Deserialize, Serialize};
/// TLS commitment configuration.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TlsCommitConfig {
protocol: TlsCommitProtocolConfig,
}
impl TlsCommitConfig {
/// Creates a new builder.
pub fn builder() -> TlsCommitConfigBuilder {
TlsCommitConfigBuilder::default()
}
/// Returns the protocol configuration.
pub fn protocol(&self) -> &TlsCommitProtocolConfig {
&self.protocol
}
/// Returns a TLS commitment request.
pub fn to_request(&self) -> TlsCommitRequest {
TlsCommitRequest {
config: self.protocol.clone(),
}
}
}
/// Builder for [`TlsCommitConfig`].
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
pub struct TlsCommitConfigBuilder {
protocol: Option<TlsCommitProtocolConfig>,
}
impl TlsCommitConfigBuilder {
/// Sets the protocol configuration.
pub fn protocol<C>(mut self, protocol: C) -> Self
where
C: Into<TlsCommitProtocolConfig>,
{
self.protocol = Some(protocol.into());
self
}
/// Builds the configuration.
pub fn build(self) -> Result<TlsCommitConfig, TlsCommitConfigError> {
let protocol = self
.protocol
.ok_or(ErrorRepr::MissingField { name: "protocol" })?;
Ok(TlsCommitConfig { protocol })
}
}
/// TLS commitment protocol configuration.
#[derive(Debug, Clone, Serialize, Deserialize)]
#[non_exhaustive]
pub enum TlsCommitProtocolConfig {
/// MPC-TLS configuration.
Mpc(mpc::MpcTlsConfig),
}
impl From<mpc::MpcTlsConfig> for TlsCommitProtocolConfig {
fn from(config: mpc::MpcTlsConfig) -> Self {
Self::Mpc(config)
}
}
/// TLS commitment request.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TlsCommitRequest {
config: TlsCommitProtocolConfig,
}
impl TlsCommitRequest {
/// Returns the protocol configuration.
pub fn protocol(&self) -> &TlsCommitProtocolConfig {
&self.config
}
}
/// Error for [`TlsCommitConfig`].
#[derive(Debug, thiserror::Error)]
#[error(transparent)]
pub struct TlsCommitConfigError(#[from] ErrorRepr);
#[derive(Debug, thiserror::Error)]
enum ErrorRepr {
#[error("missing field: {name}")]
MissingField { name: &'static str },
}

View File

@@ -0,0 +1,241 @@
//! MPC-TLS commitment protocol configuration.
use serde::{Deserialize, Serialize};
// Default is 32 bytes to decrypt the TLS protocol messages.
const DEFAULT_MAX_RECV_ONLINE: usize = 32;
/// MPC-TLS commitment protocol configuration.
#[derive(Clone, Debug, Deserialize, Serialize)]
#[serde(try_from = "unchecked::MpcTlsConfigUnchecked")]
pub struct MpcTlsConfig {
/// Maximum number of bytes that can be sent.
max_sent_data: usize,
/// Maximum number of application data records that can be sent.
max_sent_records: Option<usize>,
/// Maximum number of bytes that can be decrypted online, i.e. while the
/// MPC-TLS connection is active.
max_recv_data_online: usize,
/// Maximum number of bytes that can be received.
max_recv_data: usize,
/// Maximum number of received application data records that can be
/// decrypted online, i.e. while the MPC-TLS connection is active.
max_recv_records_online: Option<usize>,
/// Whether the `deferred decryption` feature is toggled on from the start
/// of the MPC-TLS connection.
defer_decryption_from_start: bool,
/// Network settings.
network: NetworkSetting,
}
impl MpcTlsConfig {
/// Creates a new builder.
pub fn builder() -> MpcTlsConfigBuilder {
MpcTlsConfigBuilder::default()
}
/// Returns the maximum number of bytes that can be sent.
pub fn max_sent_data(&self) -> usize {
self.max_sent_data
}
/// Returns the maximum number of application data records that can
/// be sent.
pub fn max_sent_records(&self) -> Option<usize> {
self.max_sent_records
}
/// Returns the maximum number of bytes that can be decrypted online.
pub fn max_recv_data_online(&self) -> usize {
self.max_recv_data_online
}
/// Returns the maximum number of bytes that can be received.
pub fn max_recv_data(&self) -> usize {
self.max_recv_data
}
/// Returns the maximum number of received application data records that
/// can be decrypted online.
pub fn max_recv_records_online(&self) -> Option<usize> {
self.max_recv_records_online
}
/// Returns whether the `deferred decryption` feature is toggled on from the
/// start of the MPC-TLS connection.
pub fn defer_decryption_from_start(&self) -> bool {
self.defer_decryption_from_start
}
/// Returns the network settings.
pub fn network(&self) -> NetworkSetting {
self.network
}
}
fn validate(config: MpcTlsConfig) -> Result<MpcTlsConfig, MpcTlsConfigError> {
if config.max_recv_data_online > config.max_recv_data {
return Err(ErrorRepr::InvalidValue {
name: "max_recv_data_online",
reason: format!(
"must be <= max_recv_data ({} > {})",
config.max_recv_data_online, config.max_recv_data
),
}
.into());
}
Ok(config)
}
/// Builder for [`MpcTlsConfig`].
#[derive(Debug, Default)]
pub struct MpcTlsConfigBuilder {
max_sent_data: Option<usize>,
max_sent_records: Option<usize>,
max_recv_data_online: Option<usize>,
max_recv_data: Option<usize>,
max_recv_records_online: Option<usize>,
defer_decryption_from_start: Option<bool>,
network: Option<NetworkSetting>,
}
impl MpcTlsConfigBuilder {
/// Sets the maximum number of bytes that can be sent.
pub fn max_sent_data(mut self, max_sent_data: usize) -> Self {
self.max_sent_data = Some(max_sent_data);
self
}
/// Sets the maximum number of application data records that can be sent.
pub fn max_sent_records(mut self, max_sent_records: usize) -> Self {
self.max_sent_records = Some(max_sent_records);
self
}
/// Sets the maximum number of bytes that can be decrypted online.
pub fn max_recv_data_online(mut self, max_recv_data_online: usize) -> Self {
self.max_recv_data_online = Some(max_recv_data_online);
self
}
/// Sets the maximum number of bytes that can be received.
pub fn max_recv_data(mut self, max_recv_data: usize) -> Self {
self.max_recv_data = Some(max_recv_data);
self
}
/// Sets the maximum number of received application data records that can
/// be decrypted online.
pub fn max_recv_records_online(mut self, max_recv_records_online: usize) -> Self {
self.max_recv_records_online = Some(max_recv_records_online);
self
}
/// Sets whether the `deferred decryption` feature is toggled on from the
/// start of the MPC-TLS connection.
pub fn defer_decryption_from_start(mut self, defer_decryption_from_start: bool) -> Self {
self.defer_decryption_from_start = Some(defer_decryption_from_start);
self
}
/// Sets the network settings.
pub fn network(mut self, network: NetworkSetting) -> Self {
self.network = Some(network);
self
}
/// Builds the configuration.
pub fn build(self) -> Result<MpcTlsConfig, MpcTlsConfigError> {
let Self {
max_sent_data,
max_sent_records,
max_recv_data_online,
max_recv_data,
max_recv_records_online,
defer_decryption_from_start,
network,
} = self;
let max_sent_data = max_sent_data.ok_or(ErrorRepr::MissingField {
name: "max_sent_data",
})?;
let max_recv_data_online = max_recv_data_online.unwrap_or(DEFAULT_MAX_RECV_ONLINE);
let max_recv_data = max_recv_data.ok_or(ErrorRepr::MissingField {
name: "max_recv_data",
})?;
let defer_decryption_from_start = defer_decryption_from_start.unwrap_or(true);
let network = network.unwrap_or_default();
validate(MpcTlsConfig {
max_sent_data,
max_sent_records,
max_recv_data_online,
max_recv_data,
max_recv_records_online,
defer_decryption_from_start,
network,
})
}
}
/// Settings for the network environment.
///
/// Provides optimization options to adapt the protocol to different network
/// situations.
#[derive(Debug, Clone, Copy, Serialize, Deserialize, Default)]
pub enum NetworkSetting {
/// Reduces network round-trips at the expense of consuming more network
/// bandwidth.
Bandwidth,
/// Reduces network bandwidth utilization at the expense of more network
/// round-trips.
#[default]
Latency,
}
/// Error for [`MpcTlsConfig`].
#[derive(Debug, thiserror::Error)]
#[error(transparent)]
pub struct MpcTlsConfigError(#[from] ErrorRepr);
#[derive(Debug, thiserror::Error)]
enum ErrorRepr {
#[error("missing field: {name}")]
MissingField { name: &'static str },
#[error("invalid value for field({name}): {reason}")]
InvalidValue { name: &'static str, reason: String },
}
mod unchecked {
use super::*;
#[derive(Deserialize)]
pub(super) struct MpcTlsConfigUnchecked {
max_sent_data: usize,
max_sent_records: Option<usize>,
max_recv_data_online: usize,
max_recv_data: usize,
max_recv_records_online: Option<usize>,
defer_decryption_from_start: bool,
network: NetworkSetting,
}
impl TryFrom<MpcTlsConfigUnchecked> for MpcTlsConfig {
type Error = MpcTlsConfigError;
fn try_from(value: MpcTlsConfigUnchecked) -> Result<Self, Self::Error> {
validate(MpcTlsConfig {
max_sent_data: value.max_sent_data,
max_sent_records: value.max_sent_records,
max_recv_data_online: value.max_recv_data_online,
max_recv_data: value.max_recv_data,
max_recv_records_online: value.max_recv_records_online,
defer_decryption_from_start: value.defer_decryption_from_start,
network: value.network,
})
}
}
}

View File

@@ -0,0 +1,56 @@
//! Verifier configuration.
use serde::{Deserialize, Serialize};
use crate::webpki::RootCertStore;
/// Verifier configuration.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct VerifierConfig {
root_store: RootCertStore,
}
impl VerifierConfig {
/// Creates a new builder.
pub fn builder() -> VerifierConfigBuilder {
VerifierConfigBuilder::default()
}
/// Returns the root certificate store.
pub fn root_store(&self) -> &RootCertStore {
&self.root_store
}
}
/// Builder for [`VerifierConfig`].
#[derive(Debug, Default)]
pub struct VerifierConfigBuilder {
root_store: Option<RootCertStore>,
}
impl VerifierConfigBuilder {
/// Sets the root certificate store.
pub fn root_store(mut self, root_store: RootCertStore) -> Self {
self.root_store = Some(root_store);
self
}
/// Builds the configuration.
pub fn build(self) -> Result<VerifierConfig, VerifierConfigError> {
let root_store = self
.root_store
.ok_or(ErrorRepr::MissingField { name: "root_store" })?;
Ok(VerifierConfig { root_store })
}
}
/// Error for [`VerifierConfig`].
#[derive(Debug, thiserror::Error)]
#[error(transparent)]
pub struct VerifierConfigError(#[from] ErrorRepr);
#[derive(Debug, thiserror::Error)]
enum ErrorRepr {
#[error("missing field: {name}")]
MissingField { name: &'static str },
}

View File

@@ -6,10 +6,7 @@ use rustls_pki_types as webpki_types;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use tls_core::msgs::{codec::Codec, enums::NamedGroup, handshake::ServerECDHParams}; use tls_core::msgs::{codec::Codec, enums::NamedGroup, handshake::ServerECDHParams};
use crate::{ use crate::webpki::{CertificateDer, ServerCertVerifier, ServerCertVerifierError};
transcript::TlsTranscript,
webpki::{CertificateDer, ServerCertVerifier, ServerCertVerifierError},
};
/// TLS version. /// TLS version.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)] #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
@@ -119,84 +116,75 @@ pub enum KeyType {
SECP256R1 = 0x0017, SECP256R1 = 0x0017,
} }
/// Signature scheme on the key exchange parameters. /// Signature algorithm used on the key exchange parameters.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)] #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")] #[serde(rename_all = "lowercase")]
#[allow(non_camel_case_types, missing_docs)] #[allow(non_camel_case_types, missing_docs)]
pub enum SignatureScheme { pub enum SignatureAlgorithm {
RSA_PKCS1_SHA1 = 0x0201, ECDSA_NISTP256_SHA256,
ECDSA_SHA1_Legacy = 0x0203, ECDSA_NISTP256_SHA384,
RSA_PKCS1_SHA256 = 0x0401, ECDSA_NISTP384_SHA256,
ECDSA_NISTP256_SHA256 = 0x0403, ECDSA_NISTP384_SHA384,
RSA_PKCS1_SHA384 = 0x0501, ED25519,
ECDSA_NISTP384_SHA384 = 0x0503, RSA_PKCS1_2048_8192_SHA256,
RSA_PKCS1_SHA512 = 0x0601, RSA_PKCS1_2048_8192_SHA384,
ECDSA_NISTP521_SHA512 = 0x0603, RSA_PKCS1_2048_8192_SHA512,
RSA_PSS_SHA256 = 0x0804, RSA_PSS_2048_8192_SHA256_LEGACY_KEY,
RSA_PSS_SHA384 = 0x0805, RSA_PSS_2048_8192_SHA384_LEGACY_KEY,
RSA_PSS_SHA512 = 0x0806, RSA_PSS_2048_8192_SHA512_LEGACY_KEY,
ED25519 = 0x0807,
} }
impl fmt::Display for SignatureScheme { impl fmt::Display for SignatureAlgorithm {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self { match self {
SignatureScheme::RSA_PKCS1_SHA1 => write!(f, "RSA_PKCS1_SHA1"), SignatureAlgorithm::ECDSA_NISTP256_SHA256 => write!(f, "ECDSA_NISTP256_SHA256"),
SignatureScheme::ECDSA_SHA1_Legacy => write!(f, "ECDSA_SHA1_Legacy"), SignatureAlgorithm::ECDSA_NISTP256_SHA384 => write!(f, "ECDSA_NISTP256_SHA384"),
SignatureScheme::RSA_PKCS1_SHA256 => write!(f, "RSA_PKCS1_SHA256"), SignatureAlgorithm::ECDSA_NISTP384_SHA256 => write!(f, "ECDSA_NISTP384_SHA256"),
SignatureScheme::ECDSA_NISTP256_SHA256 => write!(f, "ECDSA_NISTP256_SHA256"), SignatureAlgorithm::ECDSA_NISTP384_SHA384 => write!(f, "ECDSA_NISTP384_SHA384"),
SignatureScheme::RSA_PKCS1_SHA384 => write!(f, "RSA_PKCS1_SHA384"), SignatureAlgorithm::ED25519 => write!(f, "ED25519"),
SignatureScheme::ECDSA_NISTP384_SHA384 => write!(f, "ECDSA_NISTP384_SHA384"), SignatureAlgorithm::RSA_PKCS1_2048_8192_SHA256 => {
SignatureScheme::RSA_PKCS1_SHA512 => write!(f, "RSA_PKCS1_SHA512"), write!(f, "RSA_PKCS1_2048_8192_SHA256")
SignatureScheme::ECDSA_NISTP521_SHA512 => write!(f, "ECDSA_NISTP521_SHA512"), }
SignatureScheme::RSA_PSS_SHA256 => write!(f, "RSA_PSS_SHA256"), SignatureAlgorithm::RSA_PKCS1_2048_8192_SHA384 => {
SignatureScheme::RSA_PSS_SHA384 => write!(f, "RSA_PSS_SHA384"), write!(f, "RSA_PKCS1_2048_8192_SHA384")
SignatureScheme::RSA_PSS_SHA512 => write!(f, "RSA_PSS_SHA512"), }
SignatureScheme::ED25519 => write!(f, "ED25519"), SignatureAlgorithm::RSA_PKCS1_2048_8192_SHA512 => {
write!(f, "RSA_PKCS1_2048_8192_SHA512")
}
SignatureAlgorithm::RSA_PSS_2048_8192_SHA256_LEGACY_KEY => {
write!(f, "RSA_PSS_2048_8192_SHA256_LEGACY_KEY")
}
SignatureAlgorithm::RSA_PSS_2048_8192_SHA384_LEGACY_KEY => {
write!(f, "RSA_PSS_2048_8192_SHA384_LEGACY_KEY")
}
SignatureAlgorithm::RSA_PSS_2048_8192_SHA512_LEGACY_KEY => {
write!(f, "RSA_PSS_2048_8192_SHA512_LEGACY_KEY")
}
} }
} }
} }
impl TryFrom<tls_core::msgs::enums::SignatureScheme> for SignatureScheme { impl From<tls_core::verify::SignatureAlgorithm> for SignatureAlgorithm {
type Error = &'static str; fn from(value: tls_core::verify::SignatureAlgorithm) -> Self {
use tls_core::verify::SignatureAlgorithm as Core;
fn try_from(value: tls_core::msgs::enums::SignatureScheme) -> Result<Self, Self::Error> {
use tls_core::msgs::enums::SignatureScheme as Core;
use SignatureScheme::*;
Ok(match value {
Core::RSA_PKCS1_SHA1 => RSA_PKCS1_SHA1,
Core::ECDSA_SHA1_Legacy => ECDSA_SHA1_Legacy,
Core::RSA_PKCS1_SHA256 => RSA_PKCS1_SHA256,
Core::ECDSA_NISTP256_SHA256 => ECDSA_NISTP256_SHA256,
Core::RSA_PKCS1_SHA384 => RSA_PKCS1_SHA384,
Core::ECDSA_NISTP384_SHA384 => ECDSA_NISTP384_SHA384,
Core::RSA_PKCS1_SHA512 => RSA_PKCS1_SHA512,
Core::ECDSA_NISTP521_SHA512 => ECDSA_NISTP521_SHA512,
Core::RSA_PSS_SHA256 => RSA_PSS_SHA256,
Core::RSA_PSS_SHA384 => RSA_PSS_SHA384,
Core::RSA_PSS_SHA512 => RSA_PSS_SHA512,
Core::ED25519 => ED25519,
_ => return Err("unsupported signature scheme"),
})
}
}
impl From<SignatureScheme> for tls_core::msgs::enums::SignatureScheme {
fn from(value: SignatureScheme) -> Self {
use tls_core::msgs::enums::SignatureScheme::*;
match value { match value {
SignatureScheme::RSA_PKCS1_SHA1 => RSA_PKCS1_SHA1, Core::ECDSA_NISTP256_SHA256 => SignatureAlgorithm::ECDSA_NISTP256_SHA256,
SignatureScheme::ECDSA_SHA1_Legacy => ECDSA_SHA1_Legacy, Core::ECDSA_NISTP256_SHA384 => SignatureAlgorithm::ECDSA_NISTP256_SHA384,
SignatureScheme::RSA_PKCS1_SHA256 => RSA_PKCS1_SHA256, Core::ECDSA_NISTP384_SHA256 => SignatureAlgorithm::ECDSA_NISTP384_SHA256,
SignatureScheme::ECDSA_NISTP256_SHA256 => ECDSA_NISTP256_SHA256, Core::ECDSA_NISTP384_SHA384 => SignatureAlgorithm::ECDSA_NISTP384_SHA384,
SignatureScheme::RSA_PKCS1_SHA384 => RSA_PKCS1_SHA384, Core::ED25519 => SignatureAlgorithm::ED25519,
SignatureScheme::ECDSA_NISTP384_SHA384 => ECDSA_NISTP384_SHA384, Core::RSA_PKCS1_2048_8192_SHA256 => SignatureAlgorithm::RSA_PKCS1_2048_8192_SHA256,
SignatureScheme::RSA_PKCS1_SHA512 => RSA_PKCS1_SHA512, Core::RSA_PKCS1_2048_8192_SHA384 => SignatureAlgorithm::RSA_PKCS1_2048_8192_SHA384,
SignatureScheme::ECDSA_NISTP521_SHA512 => ECDSA_NISTP521_SHA512, Core::RSA_PKCS1_2048_8192_SHA512 => SignatureAlgorithm::RSA_PKCS1_2048_8192_SHA512,
SignatureScheme::RSA_PSS_SHA256 => RSA_PSS_SHA256, Core::RSA_PSS_2048_8192_SHA256_LEGACY_KEY => {
SignatureScheme::RSA_PSS_SHA384 => RSA_PSS_SHA384, SignatureAlgorithm::RSA_PSS_2048_8192_SHA256_LEGACY_KEY
SignatureScheme::RSA_PSS_SHA512 => RSA_PSS_SHA512, }
SignatureScheme::ED25519 => ED25519, Core::RSA_PSS_2048_8192_SHA384_LEGACY_KEY => {
SignatureAlgorithm::RSA_PSS_2048_8192_SHA384_LEGACY_KEY
}
Core::RSA_PSS_2048_8192_SHA512_LEGACY_KEY => {
SignatureAlgorithm::RSA_PSS_2048_8192_SHA512_LEGACY_KEY
}
} }
} }
} }
@@ -204,8 +192,8 @@ impl From<SignatureScheme> for tls_core::msgs::enums::SignatureScheme {
/// Server's signature of the key exchange parameters. /// Server's signature of the key exchange parameters.
#[derive(Debug, Clone, Serialize, Deserialize)] #[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ServerSignature { pub struct ServerSignature {
/// Signature scheme. /// Signature algorithm.
pub scheme: SignatureScheme, pub alg: SignatureAlgorithm,
/// Signature data. /// Signature data.
pub sig: Vec<u8>, pub sig: Vec<u8>,
} }
@@ -315,25 +303,6 @@ pub struct HandshakeData {
} }
impl HandshakeData { impl HandshakeData {
/// Creates a new instance.
///
/// # Arguments
///
/// * `transcript` - The TLS transcript.
pub fn new(transcript: &TlsTranscript) -> Self {
Self {
certs: transcript
.server_cert_chain()
.expect("server cert chain is present")
.to_vec(),
sig: transcript
.server_signature()
.expect("server signature is present")
.clone(),
binding: transcript.certificate_binding().clone(),
}
}
/// Verifies the handshake data. /// Verifies the handshake data.
/// ///
/// # Arguments /// # Arguments
@@ -381,20 +350,23 @@ impl HandshakeData {
message.extend_from_slice(&server_ephemeral_key.kx_params()); message.extend_from_slice(&server_ephemeral_key.kx_params());
use webpki::ring as alg; use webpki::ring as alg;
let sig_alg = match self.sig.scheme { let sig_alg = match self.sig.alg {
SignatureScheme::RSA_PKCS1_SHA256 => alg::RSA_PKCS1_2048_8192_SHA256, SignatureAlgorithm::ECDSA_NISTP256_SHA256 => alg::ECDSA_P256_SHA256,
SignatureScheme::RSA_PKCS1_SHA384 => alg::RSA_PKCS1_2048_8192_SHA384, SignatureAlgorithm::ECDSA_NISTP256_SHA384 => alg::ECDSA_P256_SHA384,
SignatureScheme::RSA_PKCS1_SHA512 => alg::RSA_PKCS1_2048_8192_SHA512, SignatureAlgorithm::ECDSA_NISTP384_SHA256 => alg::ECDSA_P384_SHA256,
SignatureScheme::RSA_PSS_SHA256 => alg::RSA_PSS_2048_8192_SHA256_LEGACY_KEY, SignatureAlgorithm::ECDSA_NISTP384_SHA384 => alg::ECDSA_P384_SHA384,
SignatureScheme::RSA_PSS_SHA384 => alg::RSA_PSS_2048_8192_SHA384_LEGACY_KEY, SignatureAlgorithm::ED25519 => alg::ED25519,
SignatureScheme::RSA_PSS_SHA512 => alg::RSA_PSS_2048_8192_SHA512_LEGACY_KEY, SignatureAlgorithm::RSA_PKCS1_2048_8192_SHA256 => alg::RSA_PKCS1_2048_8192_SHA256,
SignatureScheme::ECDSA_NISTP256_SHA256 => alg::ECDSA_P256_SHA256, SignatureAlgorithm::RSA_PKCS1_2048_8192_SHA384 => alg::RSA_PKCS1_2048_8192_SHA384,
SignatureScheme::ECDSA_NISTP384_SHA384 => alg::ECDSA_P384_SHA384, SignatureAlgorithm::RSA_PKCS1_2048_8192_SHA512 => alg::RSA_PKCS1_2048_8192_SHA512,
SignatureScheme::ED25519 => alg::ED25519, SignatureAlgorithm::RSA_PSS_2048_8192_SHA256_LEGACY_KEY => {
scheme => { alg::RSA_PSS_2048_8192_SHA256_LEGACY_KEY
return Err(HandshakeVerificationError::UnsupportedSignatureScheme( }
scheme, SignatureAlgorithm::RSA_PSS_2048_8192_SHA384_LEGACY_KEY => {
)) alg::RSA_PSS_2048_8192_SHA384_LEGACY_KEY
}
SignatureAlgorithm::RSA_PSS_2048_8192_SHA512_LEGACY_KEY => {
alg::RSA_PSS_2048_8192_SHA512_LEGACY_KEY
} }
}; };
@@ -424,8 +396,6 @@ pub enum HandshakeVerificationError {
InvalidServerEphemeralKey, InvalidServerEphemeralKey,
#[error("server certificate verification failed: {0}")] #[error("server certificate verification failed: {0}")]
ServerCert(ServerCertVerifierError), ServerCert(ServerCertVerifierError),
#[error("unsupported signature scheme: {0}")]
UnsupportedSignatureScheme(SignatureScheme),
} }
#[cfg(test)] #[cfg(test)]

View File

@@ -1,11 +1,11 @@
use rangeset::RangeSet; use rangeset::set::RangeSet;
pub(crate) struct FmtRangeSet<'a>(pub &'a RangeSet<usize>); pub(crate) struct FmtRangeSet<'a>(pub &'a RangeSet<usize>);
impl<'a> std::fmt::Display for FmtRangeSet<'a> { impl<'a> std::fmt::Display for FmtRangeSet<'a> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str("{")?; f.write_str("{")?;
for range in self.0.iter_ranges() { for range in self.0.iter() {
write!(f, "{}..{}", range.start, range.end)?; write!(f, "{}..{}", range.start, range.end)?;
if range.end < self.0.end().unwrap_or(0) { if range.end < self.0.end().unwrap_or(0) {
f.write_str(", ")?; f.write_str(", ")?;

View File

@@ -1,20 +1,14 @@
//! Fixtures for testing //! Fixtures for testing
mod provider;
pub mod transcript; pub mod transcript;
pub use provider::FixtureEncodingProvider;
use hex::FromHex; use hex::FromHex;
use crate::{ use crate::{
connection::{ connection::{
CertBinding, CertBindingV1_2, ConnectionInfo, DnsName, HandshakeData, KeyType, CertBinding, CertBindingV1_2, ConnectionInfo, DnsName, HandshakeData, KeyType,
ServerEphemKey, ServerName, ServerSignature, SignatureScheme, TlsVersion, TranscriptLength, ServerEphemKey, ServerName, ServerSignature, SignatureAlgorithm, TlsVersion,
}, TranscriptLength,
transcript::{
encoding::{EncoderSecret, EncodingProvider},
Transcript,
}, },
webpki::CertificateDer, webpki::CertificateDer,
}; };
@@ -47,7 +41,7 @@ impl ConnectionFixture {
CertificateDer(include_bytes!("fixtures/data/tlsnotary.org/ca.der").to_vec()), CertificateDer(include_bytes!("fixtures/data/tlsnotary.org/ca.der").to_vec()),
], ],
sig: ServerSignature { sig: ServerSignature {
scheme: SignatureScheme::RSA_PKCS1_SHA256, alg: SignatureAlgorithm::RSA_PKCS1_2048_8192_SHA256,
sig: Vec::<u8>::from_hex(include_bytes!( sig: Vec::<u8>::from_hex(include_bytes!(
"fixtures/data/tlsnotary.org/signature" "fixtures/data/tlsnotary.org/signature"
)) ))
@@ -92,7 +86,7 @@ impl ConnectionFixture {
CertificateDer(include_bytes!("fixtures/data/appliedzkp.org/ca.der").to_vec()), CertificateDer(include_bytes!("fixtures/data/appliedzkp.org/ca.der").to_vec()),
], ],
sig: ServerSignature { sig: ServerSignature {
scheme: SignatureScheme::ECDSA_NISTP256_SHA256, alg: SignatureAlgorithm::ECDSA_NISTP256_SHA256,
sig: Vec::<u8>::from_hex(include_bytes!( sig: Vec::<u8>::from_hex(include_bytes!(
"fixtures/data/appliedzkp.org/signature" "fixtures/data/appliedzkp.org/signature"
)) ))
@@ -128,27 +122,3 @@ impl ConnectionFixture {
server_ephemeral_key server_ephemeral_key
} }
} }
/// Returns an encoding provider fixture.
pub fn encoding_provider(tx: &[u8], rx: &[u8]) -> impl EncodingProvider {
let secret = encoder_secret();
FixtureEncodingProvider::new(&secret, Transcript::new(tx, rx))
}
/// Seed fixture.
const SEED: [u8; 32] = [0; 32];
/// Delta fixture.
const DELTA: [u8; 16] = [1; 16];
/// Returns an encoder secret fixture.
pub fn encoder_secret() -> EncoderSecret {
EncoderSecret::new(SEED, DELTA)
}
/// Returns a tampered encoder secret fixture.
pub fn encoder_secret_tampered_seed() -> EncoderSecret {
let mut seed = SEED;
seed[0] += 1;
EncoderSecret::new(seed, DELTA)
}

View File

@@ -1,41 +0,0 @@
use std::ops::Range;
use crate::transcript::{
encoding::{new_encoder, Encoder, EncoderSecret, EncodingProvider, EncodingProviderError},
Direction, Transcript,
};
/// A encoding provider fixture.
pub struct FixtureEncodingProvider {
encoder: Box<dyn Encoder>,
transcript: Transcript,
}
impl FixtureEncodingProvider {
/// Creates a new encoding provider fixture.
pub(crate) fn new(secret: &EncoderSecret, transcript: Transcript) -> Self {
Self {
encoder: Box::new(new_encoder(secret)),
transcript,
}
}
}
impl EncodingProvider for FixtureEncodingProvider {
fn provide_encoding(
&self,
direction: Direction,
range: Range<usize>,
dest: &mut Vec<u8>,
) -> Result<(), EncodingProviderError> {
let transcript = match direction {
Direction::Sent => &self.transcript.sent(),
Direction::Received => &self.transcript.received(),
};
let data = transcript.get(range.clone()).ok_or(EncodingProviderError)?;
self.encoder.encode_data(direction, range, data, dest);
Ok(())
}
}

View File

@@ -2,12 +2,13 @@
use aead::Payload as AeadPayload; use aead::Payload as AeadPayload;
use aes_gcm::{aead::Aead, Aes128Gcm, NewAead}; use aes_gcm::{aead::Aead, Aes128Gcm, NewAead};
#[allow(deprecated)]
use generic_array::GenericArray; use generic_array::GenericArray;
use rand::{rngs::StdRng, Rng, SeedableRng}; use rand::{rngs::StdRng, Rng, SeedableRng};
use tls_core::msgs::{ use tls_core::msgs::{
base::Payload, base::Payload,
codec::Codec, codec::Codec,
enums::{ContentType, HandshakeType, ProtocolVersion}, enums::{HandshakeType, ProtocolVersion},
handshake::{HandshakeMessagePayload, HandshakePayload}, handshake::{HandshakeMessagePayload, HandshakePayload},
message::{OpaqueMessage, PlainMessage}, message::{OpaqueMessage, PlainMessage},
}; };
@@ -15,7 +16,7 @@ use tls_core::msgs::{
use crate::{ use crate::{
connection::{TranscriptLength, VerifyData}, connection::{TranscriptLength, VerifyData},
fixtures::ConnectionFixture, fixtures::ConnectionFixture,
transcript::{Record, TlsTranscript}, transcript::{ContentType, Record, TlsTranscript},
}; };
/// The key used for encryption of the sent and received transcript. /// The key used for encryption of the sent and received transcript.
@@ -103,7 +104,7 @@ impl TranscriptGenerator {
let explicit_nonce: [u8; 8] = seq.to_be_bytes(); let explicit_nonce: [u8; 8] = seq.to_be_bytes();
let msg = PlainMessage { let msg = PlainMessage {
typ: ContentType::ApplicationData, typ: ContentType::ApplicationData.into(),
version: ProtocolVersion::TLSv1_2, version: ProtocolVersion::TLSv1_2,
payload: Payload::new(plaintext), payload: Payload::new(plaintext),
}; };
@@ -138,7 +139,7 @@ impl TranscriptGenerator {
handshake_message.encode(&mut plaintext); handshake_message.encode(&mut plaintext);
let msg = PlainMessage { let msg = PlainMessage {
typ: ContentType::Handshake, typ: ContentType::Handshake.into(),
version: ProtocolVersion::TLSv1_2, version: ProtocolVersion::TLSv1_2,
payload: Payload::new(plaintext.clone()), payload: Payload::new(plaintext.clone()),
}; };
@@ -180,6 +181,7 @@ fn aes_gcm_encrypt(
let mut nonce = [0u8; 12]; let mut nonce = [0u8; 12];
nonce[..4].copy_from_slice(&iv); nonce[..4].copy_from_slice(&iv);
nonce[4..].copy_from_slice(&explicit_nonce); nonce[4..].copy_from_slice(&explicit_nonce);
#[allow(deprecated)]
let nonce = GenericArray::from_slice(&nonce); let nonce = GenericArray::from_slice(&nonce);
let cipher = Aes128Gcm::new_from_slice(&key).unwrap(); let cipher = Aes128Gcm::new_from_slice(&key).unwrap();

View File

@@ -95,7 +95,7 @@ impl Display for HashAlgId {
} }
/// A typed hash value. /// A typed hash value.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)] #[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub struct TypedHash { pub struct TypedHash {
/// The algorithm of the hash. /// The algorithm of the hash.
pub alg: HashAlgId, pub alg: HashAlgId,
@@ -191,6 +191,11 @@ impl Hash {
len: value.len(), len: value.len(),
} }
} }
/// Returns a byte slice of the hash value.
pub fn as_bytes(&self) -> &[u8] {
&self.value[..self.len]
}
} }
impl rs_merkle::Hash for Hash { impl rs_merkle::Hash for Hash {
@@ -291,14 +296,14 @@ mod sha2 {
fn hash(&self, data: &[u8]) -> super::Hash { fn hash(&self, data: &[u8]) -> super::Hash {
let mut hasher = ::sha2::Sha256::default(); let mut hasher = ::sha2::Sha256::default();
hasher.update(data); hasher.update(data);
super::Hash::new(hasher.finalize().as_slice()) super::Hash::new(hasher.finalize().as_ref())
} }
fn hash_prefixed(&self, prefix: &[u8], data: &[u8]) -> super::Hash { fn hash_prefixed(&self, prefix: &[u8], data: &[u8]) -> super::Hash {
let mut hasher = ::sha2::Sha256::default(); let mut hasher = ::sha2::Sha256::default();
hasher.update(prefix); hasher.update(prefix);
hasher.update(data); hasher.update(data);
super::Hash::new(hasher.finalize().as_slice()) super::Hash::new(hasher.finalize().as_ref())
} }
} }
} }

View File

@@ -12,228 +12,16 @@ pub mod merkle;
pub mod transcript; pub mod transcript;
pub mod webpki; pub mod webpki;
pub use rangeset; pub use rangeset;
pub mod config;
pub(crate) mod display; pub(crate) mod display;
use rangeset::{RangeSet, ToRangeSet, UnionMut};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use crate::{ use crate::{
connection::{HandshakeData, ServerName}, connection::ServerName,
transcript::{ transcript::{PartialTranscript, TranscriptCommitment, TranscriptSecret},
Direction, PartialTranscript, Transcript, TranscriptCommitConfig, TranscriptCommitRequest,
TranscriptCommitment, TranscriptSecret,
},
}; };
/// Configuration to prove information to the verifier.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ProveConfig {
server_identity: bool,
reveal: Option<(RangeSet<usize>, RangeSet<usize>)>,
transcript_commit: Option<TranscriptCommitConfig>,
}
impl ProveConfig {
/// Creates a new builder.
pub fn builder(transcript: &Transcript) -> ProveConfigBuilder<'_> {
ProveConfigBuilder::new(transcript)
}
/// Returns `true` if the server identity is to be proven.
pub fn server_identity(&self) -> bool {
self.server_identity
}
/// Returns the ranges of the transcript to be revealed.
pub fn reveal(&self) -> Option<&(RangeSet<usize>, RangeSet<usize>)> {
self.reveal.as_ref()
}
/// Returns the transcript commitment configuration.
pub fn transcript_commit(&self) -> Option<&TranscriptCommitConfig> {
self.transcript_commit.as_ref()
}
}
/// Builder for [`ProveConfig`].
#[derive(Debug)]
pub struct ProveConfigBuilder<'a> {
transcript: &'a Transcript,
server_identity: bool,
reveal: Option<(RangeSet<usize>, RangeSet<usize>)>,
transcript_commit: Option<TranscriptCommitConfig>,
}
impl<'a> ProveConfigBuilder<'a> {
/// Creates a new builder.
pub fn new(transcript: &'a Transcript) -> Self {
Self {
transcript,
server_identity: false,
reveal: None,
transcript_commit: None,
}
}
/// Proves the server identity.
pub fn server_identity(&mut self) -> &mut Self {
self.server_identity = true;
self
}
/// Configures transcript commitments.
pub fn transcript_commit(&mut self, transcript_commit: TranscriptCommitConfig) -> &mut Self {
self.transcript_commit = Some(transcript_commit);
self
}
/// Reveals the given ranges of the transcript.
pub fn reveal(
&mut self,
direction: Direction,
ranges: &dyn ToRangeSet<usize>,
) -> Result<&mut Self, ProveConfigBuilderError> {
let idx = ranges.to_range_set();
if idx.end().unwrap_or(0) > self.transcript.len_of_direction(direction) {
return Err(ProveConfigBuilderError(
ProveConfigBuilderErrorRepr::IndexOutOfBounds {
direction,
actual: idx.end().unwrap_or(0),
len: self.transcript.len_of_direction(direction),
},
));
}
let (sent, recv) = self.reveal.get_or_insert_default();
match direction {
Direction::Sent => sent.union_mut(&idx),
Direction::Received => recv.union_mut(&idx),
}
Ok(self)
}
/// Reveals the given ranges of the sent data transcript.
pub fn reveal_sent(
&mut self,
ranges: &dyn ToRangeSet<usize>,
) -> Result<&mut Self, ProveConfigBuilderError> {
self.reveal(Direction::Sent, ranges)
}
/// Reveals the given ranges of the received data transcript.
pub fn reveal_recv(
&mut self,
ranges: &dyn ToRangeSet<usize>,
) -> Result<&mut Self, ProveConfigBuilderError> {
self.reveal(Direction::Received, ranges)
}
/// Reveals the full transcript range for a given direction.
pub fn reveal_all(
&mut self,
direction: Direction,
) -> Result<&mut Self, ProveConfigBuilderError> {
let len = self.transcript.len_of_direction(direction);
self.reveal(direction, &(0..len))
}
/// Builds the configuration.
pub fn build(self) -> Result<ProveConfig, ProveConfigBuilderError> {
Ok(ProveConfig {
server_identity: self.server_identity,
reveal: self.reveal,
transcript_commit: self.transcript_commit,
})
}
}
/// Error for [`ProveConfigBuilder`].
#[derive(Debug, thiserror::Error)]
#[error(transparent)]
pub struct ProveConfigBuilderError(#[from] ProveConfigBuilderErrorRepr);
#[derive(Debug, thiserror::Error)]
enum ProveConfigBuilderErrorRepr {
#[error("range is out of bounds of the transcript ({direction}): {actual} > {len}")]
IndexOutOfBounds {
direction: Direction,
actual: usize,
len: usize,
},
}
/// Configuration to verify information from the prover.
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
pub struct VerifyConfig {}
impl VerifyConfig {
/// Creates a new builder.
pub fn builder() -> VerifyConfigBuilder {
VerifyConfigBuilder::new()
}
}
/// Builder for [`VerifyConfig`].
#[derive(Debug, Default)]
pub struct VerifyConfigBuilder {}
impl VerifyConfigBuilder {
/// Creates a new builder.
pub fn new() -> Self {
Self {}
}
/// Builds the configuration.
pub fn build(self) -> Result<VerifyConfig, VerifyConfigBuilderError> {
Ok(VerifyConfig {})
}
}
/// Error for [`VerifyConfigBuilder`].
#[derive(Debug, thiserror::Error)]
#[error(transparent)]
pub struct VerifyConfigBuilderError(#[from] VerifyConfigBuilderErrorRepr);
#[derive(Debug, thiserror::Error)]
enum VerifyConfigBuilderErrorRepr {}
/// Request to prove statements about the connection.
#[doc(hidden)]
#[derive(Debug, Serialize, Deserialize)]
pub struct ProveRequest {
/// Handshake data.
pub handshake: Option<(ServerName, HandshakeData)>,
/// Transcript data.
pub transcript: Option<PartialTranscript>,
/// Transcript commitment configuration.
pub transcript_commit: Option<TranscriptCommitRequest>,
}
impl ProveRequest {
/// Creates a new prove payload.
///
/// # Arguments
///
/// * `config` - The prove config.
/// * `transcript` - The partial transcript.
/// * `handshake` - The server name and handshake data.
pub fn new(
config: &ProveConfig,
transcript: Option<PartialTranscript>,
handshake: Option<(ServerName, HandshakeData)>,
) -> Self {
let transcript_commit = config.transcript_commit().map(|config| config.to_request());
Self {
handshake,
transcript,
transcript_commit,
}
}
}
/// Prover output. /// Prover output.
#[derive(Serialize, Deserialize)] #[derive(Serialize, Deserialize)]
pub struct ProverOutput { pub struct ProverOutput {

View File

@@ -63,11 +63,6 @@ impl MerkleProof {
Ok(()) Ok(())
} }
/// Returns the leaf count of the Merkle tree associated with the proof.
pub(crate) fn leaf_count(&self) -> usize {
self.leaf_count
}
} }
#[derive(Clone)] #[derive(Clone)]

View File

@@ -19,14 +19,17 @@
//! withheld. //! withheld.
mod commit; mod commit;
pub mod encoding;
pub mod hash; pub mod hash;
mod proof; mod proof;
mod tls; mod tls;
use std::{fmt, ops::Range}; use std::{fmt, ops::Range};
use rangeset::{Difference, IndexRanges, RangeSet, Union}; use rangeset::{
iter::RangeIterator,
ops::{Index, Set},
set::RangeSet,
};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use crate::connection::TranscriptLength; use crate::connection::TranscriptLength;
@@ -38,8 +41,7 @@ pub use commit::{
pub use proof::{ pub use proof::{
TranscriptProof, TranscriptProofBuilder, TranscriptProofBuilderError, TranscriptProofError, TranscriptProof, TranscriptProofBuilder, TranscriptProofBuilderError, TranscriptProofError,
}; };
pub use tls::{Record, TlsTranscript}; pub use tls::{ContentType, Record, TlsTranscript};
pub use tls_core::msgs::enums::ContentType;
/// A transcript contains the plaintext of all application data communicated /// A transcript contains the plaintext of all application data communicated
/// between the Prover and the Server. /// between the Prover and the Server.
@@ -107,8 +109,14 @@ impl Transcript {
} }
Some( Some(
Subsequence::new(idx.clone(), data.index_ranges(idx)) Subsequence::new(
.expect("data is same length as index"), idx.clone(),
data.index(idx).fold(Vec::new(), |mut acc, s| {
acc.extend_from_slice(s);
acc
}),
)
.expect("data is same length as index"),
) )
} }
@@ -130,11 +138,11 @@ impl Transcript {
let mut sent = vec![0; self.sent.len()]; let mut sent = vec![0; self.sent.len()];
let mut received = vec![0; self.received.len()]; let mut received = vec![0; self.received.len()];
for range in sent_idx.iter_ranges() { for range in sent_idx.iter() {
sent[range.clone()].copy_from_slice(&self.sent[range]); sent[range.clone()].copy_from_slice(&self.sent[range]);
} }
for range in recv_idx.iter_ranges() { for range in recv_idx.iter() {
received[range.clone()].copy_from_slice(&self.received[range]); received[range.clone()].copy_from_slice(&self.received[range]);
} }
@@ -187,12 +195,20 @@ pub struct CompressedPartialTranscript {
impl From<PartialTranscript> for CompressedPartialTranscript { impl From<PartialTranscript> for CompressedPartialTranscript {
fn from(uncompressed: PartialTranscript) -> Self { fn from(uncompressed: PartialTranscript) -> Self {
Self { Self {
sent_authed: uncompressed sent_authed: uncompressed.sent.index(&uncompressed.sent_authed_idx).fold(
.sent Vec::new(),
.index_ranges(&uncompressed.sent_authed_idx), |mut acc, s| {
acc.extend_from_slice(s);
acc
},
),
received_authed: uncompressed received_authed: uncompressed
.received .received
.index_ranges(&uncompressed.received_authed_idx), .index(&uncompressed.received_authed_idx)
.fold(Vec::new(), |mut acc, s| {
acc.extend_from_slice(s);
acc
}),
sent_idx: uncompressed.sent_authed_idx, sent_idx: uncompressed.sent_authed_idx,
recv_idx: uncompressed.received_authed_idx, recv_idx: uncompressed.received_authed_idx,
sent_total: uncompressed.sent.len(), sent_total: uncompressed.sent.len(),
@@ -208,7 +224,7 @@ impl From<CompressedPartialTranscript> for PartialTranscript {
let mut offset = 0; let mut offset = 0;
for range in compressed.sent_idx.iter_ranges() { for range in compressed.sent_idx.iter() {
sent[range.clone()] sent[range.clone()]
.copy_from_slice(&compressed.sent_authed[offset..offset + range.len()]); .copy_from_slice(&compressed.sent_authed[offset..offset + range.len()]);
offset += range.len(); offset += range.len();
@@ -216,7 +232,7 @@ impl From<CompressedPartialTranscript> for PartialTranscript {
let mut offset = 0; let mut offset = 0;
for range in compressed.recv_idx.iter_ranges() { for range in compressed.recv_idx.iter() {
received[range.clone()] received[range.clone()]
.copy_from_slice(&compressed.received_authed[offset..offset + range.len()]); .copy_from_slice(&compressed.received_authed[offset..offset + range.len()]);
offset += range.len(); offset += range.len();
@@ -305,12 +321,16 @@ impl PartialTranscript {
/// Returns the index of sent data which haven't been authenticated. /// Returns the index of sent data which haven't been authenticated.
pub fn sent_unauthed(&self) -> RangeSet<usize> { pub fn sent_unauthed(&self) -> RangeSet<usize> {
(0..self.sent.len()).difference(&self.sent_authed_idx) (0..self.sent.len())
.difference(&self.sent_authed_idx)
.into_set()
} }
/// Returns the index of received data which haven't been authenticated. /// Returns the index of received data which haven't been authenticated.
pub fn received_unauthed(&self) -> RangeSet<usize> { pub fn received_unauthed(&self) -> RangeSet<usize> {
(0..self.received.len()).difference(&self.received_authed_idx) (0..self.received.len())
.difference(&self.received_authed_idx)
.into_set()
} }
/// Returns an iterator over the authenticated data in the transcript. /// Returns an iterator over the authenticated data in the transcript.
@@ -320,7 +340,7 @@ impl PartialTranscript {
Direction::Received => (&self.received, &self.received_authed_idx), Direction::Received => (&self.received, &self.received_authed_idx),
}; };
authed.iter().map(|i| data[i]) authed.iter_values().map(move |i| data[i])
} }
/// Unions the authenticated data of this transcript with another. /// Unions the authenticated data of this transcript with another.
@@ -340,24 +360,20 @@ impl PartialTranscript {
"received data are not the same length" "received data are not the same length"
); );
for range in other for range in other.sent_authed_idx.difference(&self.sent_authed_idx) {
.sent_authed_idx
.difference(&self.sent_authed_idx)
.iter_ranges()
{
self.sent[range.clone()].copy_from_slice(&other.sent[range]); self.sent[range.clone()].copy_from_slice(&other.sent[range]);
} }
for range in other for range in other
.received_authed_idx .received_authed_idx
.difference(&self.received_authed_idx) .difference(&self.received_authed_idx)
.iter_ranges()
{ {
self.received[range.clone()].copy_from_slice(&other.received[range]); self.received[range.clone()].copy_from_slice(&other.received[range]);
} }
self.sent_authed_idx = self.sent_authed_idx.union(&other.sent_authed_idx); self.sent_authed_idx.union_mut(&other.sent_authed_idx);
self.received_authed_idx = self.received_authed_idx.union(&other.received_authed_idx); self.received_authed_idx
.union_mut(&other.received_authed_idx);
} }
/// Unions an authenticated subsequence into this transcript. /// Unions an authenticated subsequence into this transcript.
@@ -369,11 +385,11 @@ impl PartialTranscript {
match direction { match direction {
Direction::Sent => { Direction::Sent => {
seq.copy_to(&mut self.sent); seq.copy_to(&mut self.sent);
self.sent_authed_idx = self.sent_authed_idx.union(&seq.idx); self.sent_authed_idx.union_mut(&seq.idx);
} }
Direction::Received => { Direction::Received => {
seq.copy_to(&mut self.received); seq.copy_to(&mut self.received);
self.received_authed_idx = self.received_authed_idx.union(&seq.idx); self.received_authed_idx.union_mut(&seq.idx);
} }
} }
} }
@@ -384,10 +400,10 @@ impl PartialTranscript {
/// ///
/// * `value` - The value to set the unauthenticated bytes to /// * `value` - The value to set the unauthenticated bytes to
pub fn set_unauthed(&mut self, value: u8) { pub fn set_unauthed(&mut self, value: u8) {
for range in self.sent_unauthed().iter_ranges() { for range in self.sent_unauthed().iter() {
self.sent[range].fill(value); self.sent[range].fill(value);
} }
for range in self.received_unauthed().iter_ranges() { for range in self.received_unauthed().iter() {
self.received[range].fill(value); self.received[range].fill(value);
} }
} }
@@ -402,13 +418,13 @@ impl PartialTranscript {
pub fn set_unauthed_range(&mut self, value: u8, direction: Direction, range: Range<usize>) { pub fn set_unauthed_range(&mut self, value: u8, direction: Direction, range: Range<usize>) {
match direction { match direction {
Direction::Sent => { Direction::Sent => {
for range in range.difference(&self.sent_authed_idx).iter_ranges() { for r in range.difference(&self.sent_authed_idx) {
self.sent[range].fill(value); self.sent[r].fill(value);
} }
} }
Direction::Received => { Direction::Received => {
for range in range.difference(&self.received_authed_idx).iter_ranges() { for r in range.difference(&self.received_authed_idx) {
self.received[range].fill(value); self.received[r].fill(value);
} }
} }
} }
@@ -486,7 +502,7 @@ impl Subsequence {
/// Panics if the subsequence ranges are out of bounds. /// Panics if the subsequence ranges are out of bounds.
pub(crate) fn copy_to(&self, dest: &mut [u8]) { pub(crate) fn copy_to(&self, dest: &mut [u8]) {
let mut offset = 0; let mut offset = 0;
for range in self.idx.iter_ranges() { for range in self.idx.iter() {
dest[range.clone()].copy_from_slice(&self.data[offset..offset + range.len()]); dest[range.clone()].copy_from_slice(&self.data[offset..offset + range.len()]);
offset += range.len(); offset += range.len();
} }
@@ -611,12 +627,7 @@ mod validation {
mut partial_transcript: CompressedPartialTranscriptUnchecked, mut partial_transcript: CompressedPartialTranscriptUnchecked,
) { ) {
// Change the total to be less than the last range's end bound. // Change the total to be less than the last range's end bound.
let end = partial_transcript let end = partial_transcript.sent_idx.iter().next_back().unwrap().end;
.sent_idx
.iter_ranges()
.next_back()
.unwrap()
.end;
partial_transcript.sent_total = end - 1; partial_transcript.sent_total = end - 1;

View File

@@ -1,34 +1,22 @@
//! Transcript commitments. //! Transcript commitments.
use std::fmt; use std::{collections::HashSet, fmt};
use rangeset::ToRangeSet; use rangeset::set::ToRangeSet;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use crate::{ use crate::{
hash::HashAlgId, hash::HashAlgId,
transcript::{ transcript::{
encoding::{EncodingCommitment, EncodingTree},
hash::{PlaintextHash, PlaintextHashSecret}, hash::{PlaintextHash, PlaintextHashSecret},
Direction, RangeSet, Transcript, Direction, RangeSet, Transcript,
}, },
}; };
/// The maximum allowed total bytelength of committed data for a single
/// commitment kind. Used to prevent DoS during verification. (May cause the
/// verifier to hash up to a max of 1GB * 128 = 128GB of data for certain kinds
/// of encoding commitments.)
///
/// This value must not exceed bcs's MAX_SEQUENCE_LENGTH limit (which is (1 <<
/// 31) - 1 by default)
pub(crate) const MAX_TOTAL_COMMITTED_DATA: usize = 1_000_000_000;
/// Kind of transcript commitment. /// Kind of transcript commitment.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)] #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
#[non_exhaustive] #[non_exhaustive]
pub enum TranscriptCommitmentKind { pub enum TranscriptCommitmentKind {
/// A commitment to encodings of the transcript.
Encoding,
/// A hash commitment to plaintext in the transcript. /// A hash commitment to plaintext in the transcript.
Hash { Hash {
/// The hash algorithm used. /// The hash algorithm used.
@@ -39,7 +27,6 @@ pub enum TranscriptCommitmentKind {
impl fmt::Display for TranscriptCommitmentKind { impl fmt::Display for TranscriptCommitmentKind {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self { match self {
Self::Encoding => f.write_str("encoding"),
Self::Hash { alg } => write!(f, "hash ({alg})"), Self::Hash { alg } => write!(f, "hash ({alg})"),
} }
} }
@@ -49,8 +36,6 @@ impl fmt::Display for TranscriptCommitmentKind {
#[derive(Debug, Clone, Serialize, Deserialize)] #[derive(Debug, Clone, Serialize, Deserialize)]
#[non_exhaustive] #[non_exhaustive]
pub enum TranscriptCommitment { pub enum TranscriptCommitment {
/// Encoding commitment.
Encoding(EncodingCommitment),
/// Plaintext hash commitment. /// Plaintext hash commitment.
Hash(PlaintextHash), Hash(PlaintextHash),
} }
@@ -59,8 +44,6 @@ pub enum TranscriptCommitment {
#[derive(Debug, Clone, Serialize, Deserialize)] #[derive(Debug, Clone, Serialize, Deserialize)]
#[non_exhaustive] #[non_exhaustive]
pub enum TranscriptSecret { pub enum TranscriptSecret {
/// Encoding tree.
Encoding(EncodingTree),
/// Plaintext hash secret. /// Plaintext hash secret.
Hash(PlaintextHashSecret), Hash(PlaintextHashSecret),
} }
@@ -68,7 +51,6 @@ pub enum TranscriptSecret {
/// Configuration for transcript commitments. /// Configuration for transcript commitments.
#[derive(Debug, Clone, Serialize, Deserialize)] #[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TranscriptCommitConfig { pub struct TranscriptCommitConfig {
encoding_hash_alg: HashAlgId,
commits: Vec<((Direction, RangeSet<usize>), TranscriptCommitmentKind)>, commits: Vec<((Direction, RangeSet<usize>), TranscriptCommitmentKind)>,
} }
@@ -78,34 +60,23 @@ impl TranscriptCommitConfig {
TranscriptCommitConfigBuilder::new(transcript) TranscriptCommitConfigBuilder::new(transcript)
} }
/// Returns the hash algorithm to use for encoding commitments. /// Returns `true` if the configuration has any hash commitments.
pub fn encoding_hash_alg(&self) -> &HashAlgId { pub fn has_hash(&self) -> bool {
&self.encoding_hash_alg self.commits
} .iter()
.any(|(_, kind)| matches!(kind, TranscriptCommitmentKind::Hash { .. }))
/// Returns an iterator over the encoding commitment indices.
pub fn iter_encoding(&self) -> impl Iterator<Item = &(Direction, RangeSet<usize>)> {
self.commits.iter().filter_map(|(idx, kind)| match kind {
TranscriptCommitmentKind::Encoding => Some(idx),
_ => None,
})
} }
/// Returns an iterator over the hash commitment indices. /// Returns an iterator over the hash commitment indices.
pub fn iter_hash(&self) -> impl Iterator<Item = (&(Direction, RangeSet<usize>), &HashAlgId)> { pub fn iter_hash(&self) -> impl Iterator<Item = (&(Direction, RangeSet<usize>), &HashAlgId)> {
self.commits.iter().filter_map(|(idx, kind)| match kind { self.commits.iter().map(|(idx, kind)| match kind {
TranscriptCommitmentKind::Hash { alg } => Some((idx, alg)), TranscriptCommitmentKind::Hash { alg } => (idx, alg),
_ => None,
}) })
} }
/// Returns a request for the transcript commitments. /// Returns a request for the transcript commitments.
pub fn to_request(&self) -> TranscriptCommitRequest { pub fn to_request(&self) -> TranscriptCommitRequest {
TranscriptCommitRequest { TranscriptCommitRequest {
encoding: self
.iter_encoding()
.map(|(dir, idx)| (*dir, idx.clone()))
.collect(),
hash: self hash: self
.iter_hash() .iter_hash()
.map(|((dir, idx), alg)| (*dir, idx.clone(), *alg)) .map(|((dir, idx), alg)| (*dir, idx.clone(), *alg))
@@ -115,15 +86,11 @@ impl TranscriptCommitConfig {
} }
/// A builder for [`TranscriptCommitConfig`]. /// A builder for [`TranscriptCommitConfig`].
///
/// The default hash algorithm is [`HashAlgId::BLAKE3`] and the default kind
/// is [`TranscriptCommitmentKind::Encoding`].
#[derive(Debug)] #[derive(Debug)]
pub struct TranscriptCommitConfigBuilder<'a> { pub struct TranscriptCommitConfigBuilder<'a> {
transcript: &'a Transcript, transcript: &'a Transcript,
encoding_hash_alg: HashAlgId,
default_kind: TranscriptCommitmentKind, default_kind: TranscriptCommitmentKind,
commits: Vec<((Direction, RangeSet<usize>), TranscriptCommitmentKind)>, commits: HashSet<((Direction, RangeSet<usize>), TranscriptCommitmentKind)>,
} }
impl<'a> TranscriptCommitConfigBuilder<'a> { impl<'a> TranscriptCommitConfigBuilder<'a> {
@@ -131,18 +98,13 @@ impl<'a> TranscriptCommitConfigBuilder<'a> {
pub fn new(transcript: &'a Transcript) -> Self { pub fn new(transcript: &'a Transcript) -> Self {
Self { Self {
transcript, transcript,
encoding_hash_alg: HashAlgId::BLAKE3, default_kind: TranscriptCommitmentKind::Hash {
default_kind: TranscriptCommitmentKind::Encoding, alg: HashAlgId::BLAKE3,
commits: Vec::default(), },
commits: HashSet::default(),
} }
} }
/// Sets the hash algorithm to use for encoding commitments.
pub fn encoding_hash_alg(&mut self, alg: HashAlgId) -> &mut Self {
self.encoding_hash_alg = alg;
self
}
/// Sets the default kind of commitment to use. /// Sets the default kind of commitment to use.
pub fn default_kind(&mut self, default_kind: TranscriptCommitmentKind) -> &mut Self { pub fn default_kind(&mut self, default_kind: TranscriptCommitmentKind) -> &mut Self {
self.default_kind = default_kind; self.default_kind = default_kind;
@@ -175,11 +137,8 @@ impl<'a> TranscriptCommitConfigBuilder<'a> {
), ),
)); ));
} }
let value = ((direction, idx), kind);
if !self.commits.contains(&value) { self.commits.insert(((direction, idx), kind));
self.commits.push(value);
}
Ok(self) Ok(self)
} }
@@ -225,7 +184,6 @@ impl<'a> TranscriptCommitConfigBuilder<'a> {
/// Builds the configuration. /// Builds the configuration.
pub fn build(self) -> Result<TranscriptCommitConfig, TranscriptCommitConfigBuilderError> { pub fn build(self) -> Result<TranscriptCommitConfig, TranscriptCommitConfigBuilderError> {
Ok(TranscriptCommitConfig { Ok(TranscriptCommitConfig {
encoding_hash_alg: self.encoding_hash_alg,
commits: Vec::from_iter(self.commits), commits: Vec::from_iter(self.commits),
}) })
} }
@@ -272,14 +230,13 @@ impl fmt::Display for TranscriptCommitConfigBuilderError {
/// Request to compute transcript commitments. /// Request to compute transcript commitments.
#[derive(Debug, Clone, Serialize, Deserialize)] #[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TranscriptCommitRequest { pub struct TranscriptCommitRequest {
encoding: Vec<(Direction, RangeSet<usize>)>,
hash: Vec<(Direction, RangeSet<usize>, HashAlgId)>, hash: Vec<(Direction, RangeSet<usize>, HashAlgId)>,
} }
impl TranscriptCommitRequest { impl TranscriptCommitRequest {
/// Returns an iterator over the encoding commitments. /// Returns `true` if a hash commitment is requested.
pub fn iter_encoding(&self) -> impl Iterator<Item = &(Direction, RangeSet<usize>)> { pub fn has_hash(&self) -> bool {
self.encoding.iter() !self.hash.is_empty()
} }
/// Returns an iterator over the hash commitments. /// Returns an iterator over the hash commitments.

View File

@@ -1,24 +0,0 @@
//! Transcript encoding commitments and proofs.
mod encoder;
mod proof;
mod provider;
mod tree;
pub use encoder::{new_encoder, Encoder, EncoderSecret};
pub use proof::{EncodingProof, EncodingProofError};
pub use provider::{EncodingProvider, EncodingProviderError};
pub use tree::{EncodingTree, EncodingTreeError};
use serde::{Deserialize, Serialize};
use crate::hash::TypedHash;
/// Transcript encoding commitment.
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub struct EncodingCommitment {
/// Merkle root of the encoding commitments.
pub root: TypedHash,
/// Seed used to generate the encodings.
pub secret: EncoderSecret,
}

View File

@@ -1,137 +0,0 @@
use std::ops::Range;
use crate::transcript::Direction;
use itybity::ToBits;
use rand::{RngCore, SeedableRng};
use rand_chacha::ChaCha12Rng;
use serde::{Deserialize, Serialize};
/// The size of the encoding for 1 bit, in bytes.
const BIT_ENCODING_SIZE: usize = 16;
/// The size of the encoding for 1 byte, in bytes.
const BYTE_ENCODING_SIZE: usize = 128;
/// Secret used by an encoder to generate encodings.
#[derive(Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
pub struct EncoderSecret {
seed: [u8; 32],
delta: [u8; BIT_ENCODING_SIZE],
}
opaque_debug::implement!(EncoderSecret);
impl EncoderSecret {
/// Creates a new secret.
///
/// # Arguments
///
/// * `seed` - The seed for the PRG.
/// * `delta` - Delta for deriving the one-encodings.
pub fn new(seed: [u8; 32], delta: [u8; 16]) -> Self {
Self { seed, delta }
}
/// Returns the seed.
pub fn seed(&self) -> &[u8; 32] {
&self.seed
}
/// Returns the delta.
pub fn delta(&self) -> &[u8; 16] {
&self.delta
}
}
/// Creates a new encoder.
pub fn new_encoder(secret: &EncoderSecret) -> impl Encoder {
ChaChaEncoder::new(secret)
}
pub(crate) struct ChaChaEncoder {
seed: [u8; 32],
delta: [u8; 16],
}
impl ChaChaEncoder {
pub(crate) fn new(secret: &EncoderSecret) -> Self {
let seed = *secret.seed();
let delta = *secret.delta();
Self { seed, delta }
}
pub(crate) fn new_prg(&self, stream_id: u64) -> ChaCha12Rng {
let mut prg = ChaCha12Rng::from_seed(self.seed);
prg.set_stream(stream_id);
prg.set_word_pos(0);
prg
}
}
/// A transcript encoder.
///
/// This is an internal implementation detail that should not be exposed to the
/// public API.
pub trait Encoder {
/// Writes the zero encoding for the given range of the transcript into the
/// destination buffer.
fn encode_range(&self, direction: Direction, range: Range<usize>, dest: &mut Vec<u8>);
/// Writes the encoding for the given data into the destination buffer.
fn encode_data(
&self,
direction: Direction,
range: Range<usize>,
data: &[u8],
dest: &mut Vec<u8>,
);
}
impl Encoder for ChaChaEncoder {
fn encode_range(&self, direction: Direction, range: Range<usize>, dest: &mut Vec<u8>) {
// ChaCha encoder works with 32-bit words. Each encoded bit is 128 bits long.
const WORDS_PER_BYTE: u128 = 8 * 128 / 32;
let stream_id: u64 = match direction {
Direction::Sent => 0,
Direction::Received => 1,
};
let mut prg = self.new_prg(stream_id);
let len = range.len() * BYTE_ENCODING_SIZE;
let pos = dest.len();
// Write 0s to the destination buffer.
dest.resize(pos + len, 0);
// Fill the destination buffer with the PRG.
prg.set_word_pos(range.start as u128 * WORDS_PER_BYTE);
prg.fill_bytes(&mut dest[pos..pos + len]);
}
fn encode_data(
&self,
direction: Direction,
range: Range<usize>,
data: &[u8],
dest: &mut Vec<u8>,
) {
const ZERO: [u8; 16] = [0; BIT_ENCODING_SIZE];
let pos = dest.len();
// Write the zero encoding for the given range.
self.encode_range(direction, range, dest);
let dest = &mut dest[pos..];
for (pos, bit) in data.iter_lsb0().enumerate() {
// Add the delta to the encoding whenever the encoded bit is 1,
// otherwise add a zero.
let summand = if bit { &self.delta } else { &ZERO };
dest[pos * BIT_ENCODING_SIZE..(pos + 1) * BIT_ENCODING_SIZE]
.iter_mut()
.zip(summand)
.for_each(|(a, b)| *a ^= *b);
}
}
}

View File

@@ -1,357 +0,0 @@
use std::{collections::HashMap, fmt};
use rangeset::{RangeSet, UnionMut};
use serde::{Deserialize, Serialize};
use crate::{
hash::{Blinder, HashProvider, HashProviderError},
merkle::{MerkleError, MerkleProof},
transcript::{
commit::MAX_TOTAL_COMMITTED_DATA,
encoding::{new_encoder, Encoder, EncodingCommitment},
Direction,
},
};
/// An opening of a leaf in the encoding tree.
#[derive(Clone, Serialize, Deserialize)]
pub(super) struct Opening {
pub(super) direction: Direction,
pub(super) idx: RangeSet<usize>,
pub(super) blinder: Blinder,
}
opaque_debug::implement!(Opening);
/// An encoding commitment proof.
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(try_from = "validation::EncodingProofUnchecked")]
pub struct EncodingProof {
/// The proof of inclusion of the commitment(s) in the Merkle tree of
/// commitments.
pub(super) inclusion_proof: MerkleProof,
pub(super) openings: HashMap<usize, Opening>,
}
impl EncodingProof {
/// Verifies the proof against the commitment.
///
/// Returns the authenticated indices of the sent and received data,
/// respectively.
///
/// # Arguments
///
/// * `provider` - Hash provider.
/// * `commitment` - Encoding commitment to verify against.
/// * `sent` - Sent data to authenticate.
/// * `recv` - Received data to authenticate.
pub fn verify_with_provider(
&self,
provider: &HashProvider,
commitment: &EncodingCommitment,
sent: &[u8],
recv: &[u8],
) -> Result<(RangeSet<usize>, RangeSet<usize>), EncodingProofError> {
let hasher = provider.get(&commitment.root.alg)?;
let encoder = new_encoder(&commitment.secret);
let Self {
inclusion_proof,
openings,
} = self;
let mut leaves = Vec::with_capacity(openings.len());
let mut expected_leaf = Vec::default();
let mut total_opened = 0u128;
let mut auth_sent = RangeSet::default();
let mut auth_recv = RangeSet::default();
for (
id,
Opening {
direction,
idx,
blinder,
},
) in openings
{
// Make sure the amount of data being proved is bounded.
total_opened += idx.len() as u128;
if total_opened > MAX_TOTAL_COMMITTED_DATA as u128 {
return Err(EncodingProofError::new(
ErrorKind::Proof,
"exceeded maximum allowed data",
))?;
}
let (data, auth) = match direction {
Direction::Sent => (sent, &mut auth_sent),
Direction::Received => (recv, &mut auth_recv),
};
// Make sure the ranges are within the bounds of the transcript.
if idx.end().unwrap_or(0) > data.len() {
return Err(EncodingProofError::new(
ErrorKind::Proof,
format!(
"index out of bounds of the transcript ({}): {} > {}",
direction,
idx.end().unwrap_or(0),
data.len()
),
));
}
expected_leaf.clear();
for range in idx.iter_ranges() {
encoder.encode_data(*direction, range.clone(), &data[range], &mut expected_leaf);
}
expected_leaf.extend_from_slice(blinder.as_bytes());
// Compute the expected hash of the commitment to make sure it is
// present in the merkle tree.
leaves.push((*id, hasher.hash(&expected_leaf)));
auth.union_mut(idx);
}
// Verify that the expected hashes are present in the merkle tree.
//
// This proves the Prover committed to the purported data prior to the encoder
// seed being revealed. Ergo, if the encodings are authentic then the purported
// data is authentic.
inclusion_proof.verify(hasher, &commitment.root, leaves)?;
Ok((auth_sent, auth_recv))
}
}
/// Error for [`EncodingProof`].
#[derive(Debug, thiserror::Error)]
pub struct EncodingProofError {
kind: ErrorKind,
source: Option<Box<dyn std::error::Error + Send + Sync>>,
}
impl EncodingProofError {
fn new<E>(kind: ErrorKind, source: E) -> Self
where
E: Into<Box<dyn std::error::Error + Send + Sync>>,
{
Self {
kind,
source: Some(source.into()),
}
}
}
#[derive(Debug)]
enum ErrorKind {
Provider,
Proof,
}
impl fmt::Display for EncodingProofError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str("encoding proof error: ")?;
match self.kind {
ErrorKind::Provider => f.write_str("provider error")?,
ErrorKind::Proof => f.write_str("proof error")?,
}
if let Some(source) = &self.source {
write!(f, " caused by: {source}")?;
}
Ok(())
}
}
impl From<HashProviderError> for EncodingProofError {
fn from(error: HashProviderError) -> Self {
Self::new(ErrorKind::Provider, error)
}
}
impl From<MerkleError> for EncodingProofError {
fn from(error: MerkleError) -> Self {
Self::new(ErrorKind::Proof, error)
}
}
/// Invalid encoding proof error.
#[derive(Debug, thiserror::Error)]
#[error("invalid encoding proof: {0}")]
pub struct InvalidEncodingProof(&'static str);
mod validation {
use super::*;
/// The maximum allowed height of the Merkle tree of encoding commitments.
///
/// The statistical security parameter (SSP) of the encoding commitment
/// protocol is calculated as "the number of uniformly random bits in a
/// single bit's encoding minus `MAX_HEIGHT`".
///
/// For example, a bit encoding used in garbled circuits typically has 127
/// uniformly random bits, hence when using it in the encoding
/// commitment protocol, the SSP is 127 - 30 = 97 bits.
///
/// Leaving this validation here as a fail-safe in case we ever start
/// using shorter encodings.
const MAX_HEIGHT: usize = 30;
#[derive(Debug, Deserialize)]
pub(super) struct EncodingProofUnchecked {
inclusion_proof: MerkleProof,
openings: HashMap<usize, Opening>,
}
impl TryFrom<EncodingProofUnchecked> for EncodingProof {
type Error = InvalidEncodingProof;
fn try_from(unchecked: EncodingProofUnchecked) -> Result<Self, Self::Error> {
if unchecked.inclusion_proof.leaf_count() > 1 << MAX_HEIGHT {
return Err(InvalidEncodingProof(
"the height of the tree exceeds the maximum allowed",
));
}
Ok(Self {
inclusion_proof: unchecked.inclusion_proof,
openings: unchecked.openings,
})
}
}
}
#[cfg(test)]
mod test {
use tlsn_data_fixtures::http::{request::POST_JSON, response::OK_JSON};
use crate::{
fixtures::{encoder_secret, encoder_secret_tampered_seed, encoding_provider},
hash::Blake3,
transcript::{
encoding::{EncoderSecret, EncodingTree},
Transcript,
},
};
use super::*;
struct EncodingFixture {
transcript: Transcript,
proof: EncodingProof,
commitment: EncodingCommitment,
}
fn new_encoding_fixture(secret: EncoderSecret) -> EncodingFixture {
let transcript = Transcript::new(POST_JSON, OK_JSON);
let idx_0 = (Direction::Sent, RangeSet::from(0..POST_JSON.len()));
let idx_1 = (Direction::Received, RangeSet::from(0..OK_JSON.len()));
let provider = encoding_provider(transcript.sent(), transcript.received());
let tree = EncodingTree::new(&Blake3::default(), [&idx_0, &idx_1], &provider).unwrap();
let proof = tree.proof([&idx_0, &idx_1].into_iter()).unwrap();
let commitment = EncodingCommitment {
root: tree.root(),
secret,
};
EncodingFixture {
transcript,
proof,
commitment,
}
}
#[test]
fn test_verify_encoding_proof_tampered_seed() {
let EncodingFixture {
transcript,
proof,
commitment,
} = new_encoding_fixture(encoder_secret_tampered_seed());
let err = proof
.verify_with_provider(
&HashProvider::default(),
&commitment,
transcript.sent(),
transcript.received(),
)
.unwrap_err();
assert!(matches!(err.kind, ErrorKind::Proof));
}
#[test]
fn test_verify_encoding_proof_out_of_range() {
let EncodingFixture {
transcript,
proof,
commitment,
} = new_encoding_fixture(encoder_secret());
let sent = &transcript.sent()[transcript.sent().len() - 1..];
let recv = &transcript.received()[transcript.received().len() - 2..];
let err = proof
.verify_with_provider(&HashProvider::default(), &commitment, sent, recv)
.unwrap_err();
assert!(matches!(err.kind, ErrorKind::Proof));
}
#[test]
fn test_verify_encoding_proof_tampered_idx() {
let EncodingFixture {
transcript,
mut proof,
commitment,
} = new_encoding_fixture(encoder_secret());
let Opening { idx, .. } = proof.openings.values_mut().next().unwrap();
*idx = RangeSet::from([0..3, 13..15]);
let err = proof
.verify_with_provider(
&HashProvider::default(),
&commitment,
transcript.sent(),
transcript.received(),
)
.unwrap_err();
assert!(matches!(err.kind, ErrorKind::Proof));
}
#[test]
fn test_verify_encoding_proof_tampered_encoding_blinder() {
let EncodingFixture {
transcript,
mut proof,
commitment,
} = new_encoding_fixture(encoder_secret());
let Opening { blinder, .. } = proof.openings.values_mut().next().unwrap();
*blinder = rand::random();
let err = proof
.verify_with_provider(
&HashProvider::default(),
&commitment,
transcript.sent(),
transcript.received(),
)
.unwrap_err();
assert!(matches!(err.kind, ErrorKind::Proof));
}
}

View File

@@ -1,19 +0,0 @@
use std::ops::Range;
use crate::transcript::Direction;
/// A provider of plaintext encodings.
pub trait EncodingProvider {
/// Writes the encoding of the given range into the destination buffer.
fn provide_encoding(
&self,
direction: Direction,
range: Range<usize>,
dest: &mut Vec<u8>,
) -> Result<(), EncodingProviderError>;
}
/// Error for [`EncodingProvider`].
#[derive(Debug, thiserror::Error)]
#[error("failed to provide encoding")]
pub struct EncodingProviderError;

View File

@@ -1,331 +0,0 @@
use std::collections::HashMap;
use bimap::BiMap;
use rangeset::{RangeSet, UnionMut};
use serde::{Deserialize, Serialize};
use crate::{
hash::{Blinder, HashAlgId, HashAlgorithm, TypedHash},
merkle::MerkleTree,
transcript::{
encoding::{
proof::{EncodingProof, Opening},
EncodingProvider,
},
Direction,
},
};
/// Encoding tree builder error.
#[derive(Debug, thiserror::Error)]
pub enum EncodingTreeError {
/// Index is out of bounds of the transcript.
#[error("index is out of bounds of the transcript")]
OutOfBounds {
/// The index.
index: RangeSet<usize>,
/// The transcript length.
transcript_length: usize,
},
/// Encoding provider is missing an encoding for an index.
#[error("encoding provider is missing an encoding for an index")]
MissingEncoding {
/// The index which is missing.
index: RangeSet<usize>,
},
/// Index is missing from the tree.
#[error("index is missing from the tree")]
MissingLeaf {
/// The index which is missing.
index: RangeSet<usize>,
},
}
/// A merkle tree of transcript encodings.
#[derive(Clone, Serialize, Deserialize)]
pub struct EncodingTree {
/// Merkle tree of the commitments.
tree: MerkleTree,
/// Nonces used to blind the hashes.
blinders: Vec<Blinder>,
/// Mapping between the index of a leaf and the transcript index it
/// corresponds to.
idxs: BiMap<usize, (Direction, RangeSet<usize>)>,
/// Union of all transcript indices in the sent direction.
sent_idx: RangeSet<usize>,
/// Union of all transcript indices in the received direction.
received_idx: RangeSet<usize>,
}
opaque_debug::implement!(EncodingTree);
impl EncodingTree {
/// Creates a new encoding tree.
///
/// # Arguments
///
/// * `hasher` - The hash algorithm to use.
/// * `idxs` - The subsequence indices to commit to.
/// * `provider` - The encoding provider.
pub fn new<'idx>(
hasher: &dyn HashAlgorithm,
idxs: impl IntoIterator<Item = &'idx (Direction, RangeSet<usize>)>,
provider: &dyn EncodingProvider,
) -> Result<Self, EncodingTreeError> {
let mut this = Self {
tree: MerkleTree::new(hasher.id()),
blinders: Vec::new(),
idxs: BiMap::new(),
sent_idx: RangeSet::default(),
received_idx: RangeSet::default(),
};
let mut leaves = Vec::new();
let mut encoding = Vec::new();
for dir_idx in idxs {
let direction = dir_idx.0;
let idx = &dir_idx.1;
// Ignore empty indices.
if idx.is_empty() {
continue;
}
if this.idxs.contains_right(dir_idx) {
// The subsequence is already in the tree.
continue;
}
let blinder: Blinder = rand::random();
encoding.clear();
for range in idx.iter_ranges() {
provider
.provide_encoding(direction, range, &mut encoding)
.map_err(|_| EncodingTreeError::MissingEncoding { index: idx.clone() })?;
}
encoding.extend_from_slice(blinder.as_bytes());
let leaf = hasher.hash(&encoding);
leaves.push(leaf);
this.blinders.push(blinder);
this.idxs.insert(this.idxs.len(), dir_idx.clone());
match direction {
Direction::Sent => this.sent_idx.union_mut(idx),
Direction::Received => this.received_idx.union_mut(idx),
}
}
this.tree.insert(hasher, leaves);
Ok(this)
}
/// Returns the root of the tree.
pub fn root(&self) -> TypedHash {
self.tree.root()
}
/// Returns the hash algorithm of the tree.
pub fn algorithm(&self) -> HashAlgId {
self.tree.algorithm()
}
/// Generates a proof for the given indices.
///
/// # Arguments
///
/// * `idxs` - The transcript indices to prove.
pub fn proof<'idx>(
&self,
idxs: impl Iterator<Item = &'idx (Direction, RangeSet<usize>)>,
) -> Result<EncodingProof, EncodingTreeError> {
let mut openings = HashMap::new();
for dir_idx in idxs {
let direction = dir_idx.0;
let idx = &dir_idx.1;
let leaf_idx = *self
.idxs
.get_by_right(dir_idx)
.ok_or_else(|| EncodingTreeError::MissingLeaf { index: idx.clone() })?;
let blinder = self.blinders[leaf_idx].clone();
openings.insert(
leaf_idx,
Opening {
direction,
idx: idx.clone(),
blinder,
},
);
}
let mut indices = openings.keys().copied().collect::<Vec<_>>();
indices.sort();
Ok(EncodingProof {
inclusion_proof: self.tree.proof(&indices),
openings,
})
}
/// Returns whether the tree contains the given transcript index.
pub fn contains(&self, idx: &(Direction, RangeSet<usize>)) -> bool {
self.idxs.contains_right(idx)
}
pub(crate) fn idx(&self, direction: Direction) -> &RangeSet<usize> {
match direction {
Direction::Sent => &self.sent_idx,
Direction::Received => &self.received_idx,
}
}
/// Returns the committed transcript indices.
pub(crate) fn transcript_indices(&self) -> impl Iterator<Item = &(Direction, RangeSet<usize>)> {
self.idxs.right_values()
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{
fixtures::{encoder_secret, encoding_provider},
hash::{Blake3, HashProvider},
transcript::{encoding::EncodingCommitment, Transcript},
};
use tlsn_data_fixtures::http::{request::POST_JSON, response::OK_JSON};
fn new_tree<'seq>(
transcript: &Transcript,
idxs: impl Iterator<Item = &'seq (Direction, RangeSet<usize>)>,
) -> Result<EncodingTree, EncodingTreeError> {
let provider = encoding_provider(transcript.sent(), transcript.received());
EncodingTree::new(&Blake3::default(), idxs, &provider)
}
#[test]
fn test_encoding_tree() {
let transcript = Transcript::new(POST_JSON, OK_JSON);
let idx_0 = (Direction::Sent, RangeSet::from(0..POST_JSON.len()));
let idx_1 = (Direction::Received, RangeSet::from(0..OK_JSON.len()));
let tree = new_tree(&transcript, [&idx_0, &idx_1].into_iter()).unwrap();
assert!(tree.contains(&idx_0));
assert!(tree.contains(&idx_1));
let proof = tree.proof([&idx_0, &idx_1].into_iter()).unwrap();
let commitment = EncodingCommitment {
root: tree.root(),
secret: encoder_secret(),
};
let (auth_sent, auth_recv) = proof
.verify_with_provider(
&HashProvider::default(),
&commitment,
transcript.sent(),
transcript.received(),
)
.unwrap();
assert_eq!(auth_sent, idx_0.1);
assert_eq!(auth_recv, idx_1.1);
}
#[test]
fn test_encoding_tree_multiple_ranges() {
let transcript = Transcript::new(POST_JSON, OK_JSON);
let idx_0 = (Direction::Sent, RangeSet::from(0..1));
let idx_1 = (Direction::Sent, RangeSet::from(1..POST_JSON.len()));
let idx_2 = (Direction::Received, RangeSet::from(0..1));
let idx_3 = (Direction::Received, RangeSet::from(1..OK_JSON.len()));
let tree = new_tree(&transcript, [&idx_0, &idx_1, &idx_2, &idx_3].into_iter()).unwrap();
assert!(tree.contains(&idx_0));
assert!(tree.contains(&idx_1));
assert!(tree.contains(&idx_2));
assert!(tree.contains(&idx_3));
let proof = tree
.proof([&idx_0, &idx_1, &idx_2, &idx_3].into_iter())
.unwrap();
let commitment = EncodingCommitment {
root: tree.root(),
secret: encoder_secret(),
};
let (auth_sent, auth_recv) = proof
.verify_with_provider(
&HashProvider::default(),
&commitment,
transcript.sent(),
transcript.received(),
)
.unwrap();
let mut expected_auth_sent = RangeSet::default();
expected_auth_sent.union_mut(&idx_0.1);
expected_auth_sent.union_mut(&idx_1.1);
let mut expected_auth_recv = RangeSet::default();
expected_auth_recv.union_mut(&idx_2.1);
expected_auth_recv.union_mut(&idx_3.1);
assert_eq!(auth_sent, expected_auth_sent);
assert_eq!(auth_recv, expected_auth_recv);
}
#[test]
fn test_encoding_tree_proof_missing_leaf() {
let transcript = Transcript::new(POST_JSON, OK_JSON);
let idx_0 = (Direction::Sent, RangeSet::from(0..POST_JSON.len()));
let idx_1 = (Direction::Received, RangeSet::from(0..4));
let idx_2 = (Direction::Received, RangeSet::from(4..OK_JSON.len()));
let tree = new_tree(&transcript, [&idx_0, &idx_1].into_iter()).unwrap();
let result = tree
.proof([&idx_0, &idx_1, &idx_2].into_iter())
.unwrap_err();
assert!(matches!(result, EncodingTreeError::MissingLeaf { .. }));
}
#[test]
fn test_encoding_tree_out_of_bounds() {
let transcript = Transcript::new(POST_JSON, OK_JSON);
let idx_0 = (Direction::Sent, RangeSet::from(0..POST_JSON.len() + 1));
let idx_1 = (Direction::Received, RangeSet::from(0..OK_JSON.len() + 1));
let result = new_tree(&transcript, [&idx_0].into_iter()).unwrap_err();
assert!(matches!(result, EncodingTreeError::MissingEncoding { .. }));
let result = new_tree(&transcript, [&idx_1].into_iter()).unwrap_err();
assert!(matches!(result, EncodingTreeError::MissingEncoding { .. }));
}
#[test]
fn test_encoding_tree_missing_encoding() {
let provider = encoding_provider(&[], &[]);
let result = EncodingTree::new(
&Blake3::default(),
[(Direction::Sent, RangeSet::from(0..8))].iter(),
&provider,
)
.unwrap_err();
assert!(matches!(result, EncodingTreeError::MissingEncoding { .. }));
}
}

View File

@@ -1,6 +1,10 @@
//! Transcript proofs. //! Transcript proofs.
use rangeset::{Cover, Difference, Subset, ToRangeSet, UnionMut}; use rangeset::{
iter::RangeIterator,
ops::{Cover, Set},
set::ToRangeSet,
};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use std::{collections::HashSet, fmt}; use std::{collections::HashSet, fmt};
@@ -10,7 +14,6 @@ use crate::{
hash::{HashAlgId, HashProvider}, hash::{HashAlgId, HashProvider},
transcript::{ transcript::{
commit::{TranscriptCommitment, TranscriptCommitmentKind}, commit::{TranscriptCommitment, TranscriptCommitmentKind},
encoding::{EncodingProof, EncodingProofError, EncodingTree},
hash::{hash_plaintext, PlaintextHash, PlaintextHashSecret}, hash::{hash_plaintext, PlaintextHash, PlaintextHashSecret},
Direction, PartialTranscript, RangeSet, Transcript, TranscriptSecret, Direction, PartialTranscript, RangeSet, Transcript, TranscriptSecret,
}, },
@@ -22,14 +25,18 @@ const DEFAULT_COMMITMENT_KINDS: &[TranscriptCommitmentKind] = &[
TranscriptCommitmentKind::Hash { TranscriptCommitmentKind::Hash {
alg: HashAlgId::SHA256, alg: HashAlgId::SHA256,
}, },
TranscriptCommitmentKind::Encoding, TranscriptCommitmentKind::Hash {
alg: HashAlgId::BLAKE3,
},
TranscriptCommitmentKind::Hash {
alg: HashAlgId::KECCAK256,
},
]; ];
/// Proof of the contents of a transcript. /// Proof of the contents of a transcript.
#[derive(Clone, Serialize, Deserialize)] #[derive(Clone, Serialize, Deserialize)]
pub struct TranscriptProof { pub struct TranscriptProof {
transcript: PartialTranscript, transcript: PartialTranscript,
encoding_proof: Option<EncodingProof>,
hash_secrets: Vec<PlaintextHashSecret>, hash_secrets: Vec<PlaintextHashSecret>,
} }
@@ -43,26 +50,18 @@ impl TranscriptProof {
/// # Arguments /// # Arguments
/// ///
/// * `provider` - The hash provider to use for verification. /// * `provider` - The hash provider to use for verification.
/// * `attestation_body` - The attestation body to verify against. /// * `length` - The transcript length.
/// * `commitments` - The commitments to verify against.
pub fn verify_with_provider<'a>( pub fn verify_with_provider<'a>(
self, self,
provider: &HashProvider, provider: &HashProvider,
length: &TranscriptLength, length: &TranscriptLength,
commitments: impl IntoIterator<Item = &'a TranscriptCommitment>, commitments: impl IntoIterator<Item = &'a TranscriptCommitment>,
) -> Result<PartialTranscript, TranscriptProofError> { ) -> Result<PartialTranscript, TranscriptProofError> {
let mut encoding_commitment = None;
let mut hash_commitments = HashSet::new(); let mut hash_commitments = HashSet::new();
// Index commitments. // Index commitments.
for commitment in commitments { for commitment in commitments {
match commitment { match commitment {
TranscriptCommitment::Encoding(commitment) => {
if encoding_commitment.replace(commitment).is_some() {
return Err(TranscriptProofError::new(
ErrorKind::Encoding,
"multiple encoding commitments are present.",
));
}
}
TranscriptCommitment::Hash(plaintext_hash) => { TranscriptCommitment::Hash(plaintext_hash) => {
hash_commitments.insert(plaintext_hash); hash_commitments.insert(plaintext_hash);
} }
@@ -81,26 +80,6 @@ impl TranscriptProof {
let mut total_auth_sent = RangeSet::default(); let mut total_auth_sent = RangeSet::default();
let mut total_auth_recv = RangeSet::default(); let mut total_auth_recv = RangeSet::default();
// Verify encoding proof.
if let Some(proof) = self.encoding_proof {
let commitment = encoding_commitment.ok_or_else(|| {
TranscriptProofError::new(
ErrorKind::Encoding,
"contains an encoding proof but missing encoding commitment",
)
})?;
let (auth_sent, auth_recv) = proof.verify_with_provider(
provider,
commitment,
self.transcript.sent_unsafe(),
self.transcript.received_unsafe(),
)?;
total_auth_sent.union_mut(&auth_sent);
total_auth_recv.union_mut(&auth_recv);
}
let mut buffer = Vec::new(); let mut buffer = Vec::new();
for PlaintextHashSecret { for PlaintextHashSecret {
direction, direction,
@@ -129,7 +108,7 @@ impl TranscriptProof {
} }
buffer.clear(); buffer.clear();
for range in idx.iter_ranges() { for range in idx.iter() {
buffer.extend_from_slice(&plaintext[range]); buffer.extend_from_slice(&plaintext[range]);
} }
@@ -184,7 +163,6 @@ impl TranscriptProofError {
#[derive(Debug)] #[derive(Debug)]
enum ErrorKind { enum ErrorKind {
Encoding,
Hash, Hash,
Proof, Proof,
} }
@@ -194,7 +172,6 @@ impl fmt::Display for TranscriptProofError {
f.write_str("transcript proof error: ")?; f.write_str("transcript proof error: ")?;
match self.kind { match self.kind {
ErrorKind::Encoding => f.write_str("encoding error")?,
ErrorKind::Hash => f.write_str("hash error")?, ErrorKind::Hash => f.write_str("hash error")?,
ErrorKind::Proof => f.write_str("proof error")?, ErrorKind::Proof => f.write_str("proof error")?,
} }
@@ -207,12 +184,6 @@ impl fmt::Display for TranscriptProofError {
} }
} }
impl From<EncodingProofError> for TranscriptProofError {
fn from(e: EncodingProofError) -> Self {
TranscriptProofError::new(ErrorKind::Encoding, e)
}
}
/// Union of ranges to reveal. /// Union of ranges to reveal.
#[derive(Clone, Debug, PartialEq)] #[derive(Clone, Debug, PartialEq)]
struct QueryIdx { struct QueryIdx {
@@ -257,7 +228,6 @@ pub struct TranscriptProofBuilder<'a> {
/// Commitment kinds in order of preference for building transcript proofs. /// Commitment kinds in order of preference for building transcript proofs.
commitment_kinds: Vec<TranscriptCommitmentKind>, commitment_kinds: Vec<TranscriptCommitmentKind>,
transcript: &'a Transcript, transcript: &'a Transcript,
encoding_tree: Option<&'a EncodingTree>,
hash_secrets: Vec<&'a PlaintextHashSecret>, hash_secrets: Vec<&'a PlaintextHashSecret>,
committed_sent: RangeSet<usize>, committed_sent: RangeSet<usize>,
committed_recv: RangeSet<usize>, committed_recv: RangeSet<usize>,
@@ -273,15 +243,9 @@ impl<'a> TranscriptProofBuilder<'a> {
let mut committed_sent = RangeSet::default(); let mut committed_sent = RangeSet::default();
let mut committed_recv = RangeSet::default(); let mut committed_recv = RangeSet::default();
let mut encoding_tree = None;
let mut hash_secrets = Vec::new(); let mut hash_secrets = Vec::new();
for secret in secrets { for secret in secrets {
match secret { match secret {
TranscriptSecret::Encoding(tree) => {
committed_sent.union_mut(tree.idx(Direction::Sent));
committed_recv.union_mut(tree.idx(Direction::Received));
encoding_tree = Some(tree);
}
TranscriptSecret::Hash(hash) => { TranscriptSecret::Hash(hash) => {
match hash.direction { match hash.direction {
Direction::Sent => committed_sent.union_mut(&hash.idx), Direction::Sent => committed_sent.union_mut(&hash.idx),
@@ -295,7 +259,6 @@ impl<'a> TranscriptProofBuilder<'a> {
Self { Self {
commitment_kinds: DEFAULT_COMMITMENT_KINDS.to_vec(), commitment_kinds: DEFAULT_COMMITMENT_KINDS.to_vec(),
transcript, transcript,
encoding_tree,
hash_secrets, hash_secrets,
committed_sent, committed_sent,
committed_recv, committed_recv,
@@ -351,7 +314,7 @@ impl<'a> TranscriptProofBuilder<'a> {
if idx.is_subset(committed) { if idx.is_subset(committed) {
self.query_idx.union(&direction, &idx); self.query_idx.union(&direction, &idx);
} else { } else {
let missing = idx.difference(committed); let missing = idx.difference(committed).into_set();
return Err(TranscriptProofBuilderError::new( return Err(TranscriptProofBuilderError::new(
BuilderErrorKind::MissingCommitment, BuilderErrorKind::MissingCommitment,
format!( format!(
@@ -393,7 +356,6 @@ impl<'a> TranscriptProofBuilder<'a> {
transcript: self transcript: self
.transcript .transcript
.to_partial(self.query_idx.sent.clone(), self.query_idx.recv.clone()), .to_partial(self.query_idx.sent.clone(), self.query_idx.recv.clone()),
encoding_proof: None,
hash_secrets: Vec::new(), hash_secrets: Vec::new(),
}; };
let mut uncovered_query_idx = self.query_idx.clone(); let mut uncovered_query_idx = self.query_idx.clone();
@@ -405,46 +367,6 @@ impl<'a> TranscriptProofBuilder<'a> {
// self.commitment_kinds. // self.commitment_kinds.
if let Some(kind) = commitment_kinds_iter.next() { if let Some(kind) = commitment_kinds_iter.next() {
match kind { match kind {
TranscriptCommitmentKind::Encoding => {
let Some(encoding_tree) = self.encoding_tree else {
// Proceeds to the next preferred commitment kind if encoding tree is
// not available.
continue;
};
let (sent_dir_idxs, sent_uncovered) = uncovered_query_idx.sent.cover_by(
encoding_tree
.transcript_indices()
.filter(|(dir, _)| *dir == Direction::Sent),
|(_, idx)| idx,
);
// Uncovered ranges will be checked with ranges of the next
// preferred commitment kind.
uncovered_query_idx.sent = sent_uncovered;
let (recv_dir_idxs, recv_uncovered) = uncovered_query_idx.recv.cover_by(
encoding_tree
.transcript_indices()
.filter(|(dir, _)| *dir == Direction::Received),
|(_, idx)| idx,
);
uncovered_query_idx.recv = recv_uncovered;
let dir_idxs = sent_dir_idxs
.into_iter()
.chain(recv_dir_idxs)
.collect::<Vec<_>>();
// Skip proof generation if there are no committed ranges that can cover the
// query ranges.
if !dir_idxs.is_empty() {
transcript_proof.encoding_proof = Some(
encoding_tree
.proof(dir_idxs.into_iter())
.expect("subsequences were checked to be in tree"),
);
}
}
TranscriptCommitmentKind::Hash { alg } => { TranscriptCommitmentKind::Hash { alg } => {
let (sent_hashes, sent_uncovered) = uncovered_query_idx.sent.cover_by( let (sent_hashes, sent_uncovered) = uncovered_query_idx.sent.cover_by(
self.hash_secrets.iter().filter(|hash| { self.hash_secrets.iter().filter(|hash| {
@@ -567,45 +489,14 @@ impl fmt::Display for TranscriptProofBuilderError {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use rand::{Rng, SeedableRng}; use rand::{Rng, SeedableRng};
use rangeset::RangeSet; use rangeset::prelude::*;
use rstest::rstest; use rstest::rstest;
use tlsn_data_fixtures::http::{request::GET_WITH_HEADER, response::OK_JSON}; use tlsn_data_fixtures::http::{request::GET_WITH_HEADER, response::OK_JSON};
use crate::{ use crate::hash::{Blinder, HashAlgId};
fixtures::encoding_provider,
hash::{Blake3, Blinder, HashAlgId},
transcript::TranscriptCommitConfigBuilder,
};
use super::*; use super::*;
#[rstest]
fn test_verify_missing_encoding_commitment_root() {
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
let idxs = vec![(Direction::Received, RangeSet::from(0..transcript.len().1))];
let encoding_tree = EncodingTree::new(
&Blake3::default(),
&idxs,
&encoding_provider(transcript.sent(), transcript.received()),
)
.unwrap();
let secrets = vec![TranscriptSecret::Encoding(encoding_tree)];
let mut builder = TranscriptProofBuilder::new(&transcript, &secrets);
builder.reveal_recv(&(0..transcript.len().1)).unwrap();
let transcript_proof = builder.build().unwrap();
let provider = HashProvider::default();
let err = transcript_proof
.verify_with_provider(&provider, &transcript.length(), &[])
.err()
.unwrap();
assert!(matches!(err.kind, ErrorKind::Encoding));
}
#[rstest] #[rstest]
fn test_reveal_range_out_of_bounds() { fn test_reveal_range_out_of_bounds() {
let transcript = Transcript::new( let transcript = Transcript::new(
@@ -625,7 +516,7 @@ mod tests {
} }
#[rstest] #[rstest]
fn test_reveal_missing_encoding_tree() { fn test_reveal_missing_commitment() {
let transcript = Transcript::new( let transcript = Transcript::new(
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
@@ -637,7 +528,10 @@ mod tests {
} }
#[rstest] #[rstest]
fn test_reveal_with_hash_commitment() { #[case::sha256(HashAlgId::SHA256)]
#[case::blake3(HashAlgId::BLAKE3)]
#[case::keccak256(HashAlgId::KECCAK256)]
fn test_reveal_with_hash_commitment(#[case] alg: HashAlgId) {
let mut rng = rand::rngs::StdRng::seed_from_u64(0); let mut rng = rand::rngs::StdRng::seed_from_u64(0);
let provider = HashProvider::default(); let provider = HashProvider::default();
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON); let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
@@ -645,7 +539,6 @@ mod tests {
let direction = Direction::Sent; let direction = Direction::Sent;
let idx = RangeSet::from(0..10); let idx = RangeSet::from(0..10);
let blinder: Blinder = rng.random(); let blinder: Blinder = rng.random();
let alg = HashAlgId::SHA256;
let hasher = provider.get(&alg).unwrap(); let hasher = provider.get(&alg).unwrap();
let commitment = PlaintextHash { let commitment = PlaintextHash {
@@ -683,7 +576,10 @@ mod tests {
} }
#[rstest] #[rstest]
fn test_reveal_with_inconsistent_hash_commitment() { #[case::sha256(HashAlgId::SHA256)]
#[case::blake3(HashAlgId::BLAKE3)]
#[case::keccak256(HashAlgId::KECCAK256)]
fn test_reveal_with_inconsistent_hash_commitment(#[case] alg: HashAlgId) {
let mut rng = rand::rngs::StdRng::seed_from_u64(0); let mut rng = rand::rngs::StdRng::seed_from_u64(0);
let provider = HashProvider::default(); let provider = HashProvider::default();
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON); let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
@@ -691,7 +587,6 @@ mod tests {
let direction = Direction::Sent; let direction = Direction::Sent;
let idx = RangeSet::from(0..10); let idx = RangeSet::from(0..10);
let blinder: Blinder = rng.random(); let blinder: Blinder = rng.random();
let alg = HashAlgId::SHA256;
let hasher = provider.get(&alg).unwrap(); let hasher = provider.get(&alg).unwrap();
let commitment = PlaintextHash { let commitment = PlaintextHash {
@@ -734,24 +629,19 @@ mod tests {
TranscriptCommitmentKind::Hash { TranscriptCommitmentKind::Hash {
alg: HashAlgId::SHA256, alg: HashAlgId::SHA256,
}, },
TranscriptCommitmentKind::Encoding,
TranscriptCommitmentKind::Hash { TranscriptCommitmentKind::Hash {
alg: HashAlgId::SHA256, alg: HashAlgId::SHA256,
}, },
TranscriptCommitmentKind::Hash { TranscriptCommitmentKind::Hash {
alg: HashAlgId::SHA256, alg: HashAlgId::SHA256,
}, },
TranscriptCommitmentKind::Encoding,
]); ]);
assert_eq!( assert_eq!(
builder.commitment_kinds, builder.commitment_kinds,
vec![ vec![TranscriptCommitmentKind::Hash {
TranscriptCommitmentKind::Hash { alg: HashAlgId::SHA256
alg: HashAlgId::SHA256 },]
},
TranscriptCommitmentKind::Encoding
]
); );
} }
@@ -761,7 +651,7 @@ mod tests {
RangeSet::from([0..10, 12..30]), RangeSet::from([0..10, 12..30]),
true, true,
)] )]
#[case::reveal_all_rangesets_with_superset_ranges( #[case::reveal_all_rangesets_with_single_superset_range(
vec![RangeSet::from([0..1]), RangeSet::from([1..2, 8..9]), RangeSet::from([2..4, 6..8]), RangeSet::from([2..3, 6..7]), RangeSet::from([9..12])], vec![RangeSet::from([0..1]), RangeSet::from([1..2, 8..9]), RangeSet::from([2..4, 6..8]), RangeSet::from([2..3, 6..7]), RangeSet::from([9..12])],
RangeSet::from([0..4, 6..9]), RangeSet::from([0..4, 6..9]),
true, true,
@@ -792,29 +682,30 @@ mod tests {
false, false,
)] )]
#[allow(clippy::single_range_in_vec_init)] #[allow(clippy::single_range_in_vec_init)]
fn test_reveal_mutliple_rangesets_with_one_rangeset( fn test_reveal_multiple_rangesets_with_one_rangeset(
#[case] commit_recv_rangesets: Vec<RangeSet<usize>>, #[case] commit_recv_rangesets: Vec<RangeSet<usize>>,
#[case] reveal_recv_rangeset: RangeSet<usize>, #[case] reveal_recv_rangeset: RangeSet<usize>,
#[case] success: bool, #[case] success: bool,
) { ) {
use rand::{Rng, SeedableRng};
let mut rng = rand::rngs::StdRng::seed_from_u64(0);
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON); let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
// Encoding commitment kind // Create hash commitments for each rangeset
let mut transcript_commitment_builder = TranscriptCommitConfigBuilder::new(&transcript); let mut secrets = Vec::new();
for rangeset in commit_recv_rangesets.iter() { for rangeset in commit_recv_rangesets.iter() {
transcript_commitment_builder.commit_recv(rangeset).unwrap(); let blinder: crate::hash::Blinder = rng.random();
let secret = PlaintextHashSecret {
direction: Direction::Received,
idx: rangeset.clone(),
alg: HashAlgId::BLAKE3,
blinder,
};
secrets.push(TranscriptSecret::Hash(secret));
} }
let transcripts_commitment_config = transcript_commitment_builder.build().unwrap();
let encoding_tree = EncodingTree::new(
&Blake3::default(),
transcripts_commitment_config.iter_encoding(),
&encoding_provider(GET_WITH_HEADER, OK_JSON),
)
.unwrap();
let secrets = vec![TranscriptSecret::Encoding(encoding_tree)];
let mut builder = TranscriptProofBuilder::new(&transcript, &secrets); let mut builder = TranscriptProofBuilder::new(&transcript, &secrets);
if success { if success {
@@ -867,27 +758,34 @@ mod tests {
#[case] uncovered_sent_rangeset: RangeSet<usize>, #[case] uncovered_sent_rangeset: RangeSet<usize>,
#[case] uncovered_recv_rangeset: RangeSet<usize>, #[case] uncovered_recv_rangeset: RangeSet<usize>,
) { ) {
use rand::{Rng, SeedableRng};
let mut rng = rand::rngs::StdRng::seed_from_u64(0);
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON); let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
// Encoding commitment kind // Create hash commitments for each rangeset
let mut transcript_commitment_builder = TranscriptCommitConfigBuilder::new(&transcript); let mut secrets = Vec::new();
for rangeset in commit_sent_rangesets.iter() { for rangeset in commit_sent_rangesets.iter() {
transcript_commitment_builder.commit_sent(rangeset).unwrap(); let blinder: crate::hash::Blinder = rng.random();
let secret = PlaintextHashSecret {
direction: Direction::Sent,
idx: rangeset.clone(),
alg: HashAlgId::BLAKE3,
blinder,
};
secrets.push(TranscriptSecret::Hash(secret));
} }
for rangeset in commit_recv_rangesets.iter() { for rangeset in commit_recv_rangesets.iter() {
transcript_commitment_builder.commit_recv(rangeset).unwrap(); let blinder: crate::hash::Blinder = rng.random();
let secret = PlaintextHashSecret {
direction: Direction::Received,
idx: rangeset.clone(),
alg: HashAlgId::BLAKE3,
blinder,
};
secrets.push(TranscriptSecret::Hash(secret));
} }
let transcripts_commitment_config = transcript_commitment_builder.build().unwrap();
let encoding_tree = EncodingTree::new(
&Blake3::default(),
transcripts_commitment_config.iter_encoding(),
&encoding_provider(GET_WITH_HEADER, OK_JSON),
)
.unwrap();
let secrets = vec![TranscriptSecret::Encoding(encoding_tree)];
let mut builder = TranscriptProofBuilder::new(&transcript, &secrets); let mut builder = TranscriptProofBuilder::new(&transcript, &secrets);
builder.reveal_sent(&reveal_sent_rangeset).unwrap(); builder.reveal_sent(&reveal_sent_rangeset).unwrap();
builder.reveal_recv(&reveal_recv_rangeset).unwrap(); builder.reveal_recv(&reveal_recv_rangeset).unwrap();

View File

@@ -10,10 +10,53 @@ use crate::{
use tls_core::msgs::{ use tls_core::msgs::{
alert::AlertMessagePayload, alert::AlertMessagePayload,
codec::{Codec, Reader}, codec::{Codec, Reader},
enums::{AlertDescription, ContentType, ProtocolVersion}, enums::{AlertDescription, ProtocolVersion},
handshake::{HandshakeMessagePayload, HandshakePayload}, handshake::{HandshakeMessagePayload, HandshakePayload},
}; };
/// TLS record content type.
#[derive(Debug, Clone, Copy, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
pub enum ContentType {
/// Change cipher spec protocol.
ChangeCipherSpec,
/// Alert protocol.
Alert,
/// Handshake protocol.
Handshake,
/// Application data protocol.
ApplicationData,
/// Heartbeat protocol.
Heartbeat,
/// Unknown protocol.
Unknown(u8),
}
impl From<ContentType> for tls_core::msgs::enums::ContentType {
fn from(content_type: ContentType) -> Self {
match content_type {
ContentType::ChangeCipherSpec => tls_core::msgs::enums::ContentType::ChangeCipherSpec,
ContentType::Alert => tls_core::msgs::enums::ContentType::Alert,
ContentType::Handshake => tls_core::msgs::enums::ContentType::Handshake,
ContentType::ApplicationData => tls_core::msgs::enums::ContentType::ApplicationData,
ContentType::Heartbeat => tls_core::msgs::enums::ContentType::Heartbeat,
ContentType::Unknown(id) => tls_core::msgs::enums::ContentType::Unknown(id),
}
}
}
impl From<tls_core::msgs::enums::ContentType> for ContentType {
fn from(content_type: tls_core::msgs::enums::ContentType) -> Self {
match content_type {
tls_core::msgs::enums::ContentType::ChangeCipherSpec => ContentType::ChangeCipherSpec,
tls_core::msgs::enums::ContentType::Alert => ContentType::Alert,
tls_core::msgs::enums::ContentType::Handshake => ContentType::Handshake,
tls_core::msgs::enums::ContentType::ApplicationData => ContentType::ApplicationData,
tls_core::msgs::enums::ContentType::Heartbeat => ContentType::Heartbeat,
tls_core::msgs::enums::ContentType::Unknown(id) => ContentType::Unknown(id),
}
}
}
/// A transcript of TLS records sent and received by the prover. /// A transcript of TLS records sent and received by the prover.
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub struct TlsTranscript { pub struct TlsTranscript {

View File

@@ -53,6 +53,21 @@ impl RootCertStore {
pub fn empty() -> Self { pub fn empty() -> Self {
Self { roots: Vec::new() } Self { roots: Vec::new() }
} }
/// Creates a root certificate store with Mozilla root certificates.
///
/// These certificates are sourced from [`webpki-root-certs`](https://docs.rs/webpki-root-certs/latest/webpki_root_certs/). It is not recommended to use these unless the
/// application binary can be recompiled and deployed on-demand in the case
/// that the root certificates need to be updated.
#[cfg(feature = "mozilla-certs")]
pub fn mozilla() -> Self {
Self {
roots: webpki_root_certs::TLS_SERVER_ROOT_CERTS
.iter()
.map(|cert| CertificateDer(cert.to_vec()))
.collect(),
}
}
} }
/// Server certificate verifier. /// Server certificate verifier.
@@ -82,8 +97,12 @@ impl ServerCertVerifier {
Ok(Self { roots }) Ok(Self { roots })
} }
/// Creates a new server certificate verifier with Mozilla root /// Creates a server certificate verifier with Mozilla root certificates.
/// certificates. ///
/// These certificates are sourced from [`webpki-root-certs`](https://docs.rs/webpki-root-certs/latest/webpki_root_certs/). It is not recommended to use these unless the
/// application binary can be recompiled and deployed on-demand in the case
/// that the root certificates need to be updated.
#[cfg(feature = "mozilla-certs")]
pub fn mozilla() -> Self { pub fn mozilla() -> Self {
Self { Self {
roots: webpki_roots::TLS_SERVER_ROOTS.to_vec(), roots: webpki_roots::TLS_SERVER_ROOTS.to_vec(),

View File

@@ -15,6 +15,7 @@ tlsn-server-fixture = { workspace = true }
tlsn-server-fixture-certs = { workspace = true } tlsn-server-fixture-certs = { workspace = true }
spansy = { workspace = true } spansy = { workspace = true }
anyhow = { workspace = true }
bincode = { workspace = true } bincode = { workspace = true }
chrono = { workspace = true } chrono = { workspace = true }
clap = { version = "4.5", features = ["derive"] } clap = { version = "4.5", features = ["derive"] }
@@ -24,6 +25,7 @@ hex = { workspace = true }
hyper = { workspace = true, features = ["client", "http1"] } hyper = { workspace = true, features = ["client", "http1"] }
hyper-util = { workspace = true, features = ["full"] } hyper-util = { workspace = true, features = ["full"] }
k256 = { workspace = true, features = ["ecdsa"] } k256 = { workspace = true, features = ["ecdsa"] }
serde = { workspace = true, features = ["derive"] }
serde_json = { workspace = true } serde_json = { workspace = true }
tokio = { workspace = true, features = [ tokio = { workspace = true, features = [
"rt", "rt",
@@ -36,11 +38,18 @@ tokio = { workspace = true, features = [
tokio-util = { workspace = true } tokio-util = { workspace = true }
tracing = { workspace = true } tracing = { workspace = true }
tracing-subscriber = { workspace = true } tracing-subscriber = { workspace = true }
noir = { git = "https://github.com/zkmopro/noir-rs", tag = "v1.0.0-beta.8", features = [
"barretenberg",
] }
[[example]] [[example]]
name = "interactive" name = "interactive"
path = "interactive/interactive.rs" path = "interactive/interactive.rs"
[[example]]
name = "interactive_zk"
path = "interactive_zk/interactive_zk.rs"
[[example]] [[example]]
name = "attestation_prove" name = "attestation_prove"
path = "attestation/prove.rs" path = "attestation/prove.rs"

View File

@@ -4,5 +4,7 @@ This folder contains examples demonstrating how to use the TLSNotary protocol.
* [Interactive](./interactive/README.md): Interactive Prover and Verifier session without a trusted notary. * [Interactive](./interactive/README.md): Interactive Prover and Verifier session without a trusted notary.
* [Attestation](./attestation/README.md): Performing a simple notarization with a trusted notary. * [Attestation](./attestation/README.md): Performing a simple notarization with a trusted notary.
* [Interactive_zk](./interactive_zk/README.md): Interactive Prover and Verifier session demonstrating zero-knowledge age verification using Noir.
Refer to <https://tlsnotary.org/docs/quick_start> for a quick start guide to using TLSNotary with these examples. Refer to <https://tlsnotary.org/docs/quick_start> for a quick start guide to using TLSNotary with these examples.

View File

@@ -4,6 +4,7 @@
use std::env; use std::env;
use anyhow::{anyhow, Result};
use clap::Parser; use clap::Parser;
use http_body_util::Empty; use http_body_util::Empty;
use hyper::{body::Bytes, Request, StatusCode}; use hyper::{body::Bytes, Request, StatusCode};
@@ -23,12 +24,17 @@ use tlsn::{
Attestation, AttestationConfig, CryptoProvider, Secrets, Attestation, AttestationConfig, CryptoProvider, Secrets,
}, },
config::{ config::{
CertificateDer, PrivateKeyDer, ProtocolConfig, ProtocolConfigValidator, RootCertStore, prove::ProveConfig,
prover::ProverConfig,
tls::TlsClientConfig,
tls_commit::{mpc::MpcTlsConfig, TlsCommitConfig},
verifier::VerifierConfig,
}, },
connection::{ConnectionInfo, HandshakeData, ServerName, TranscriptLength}, connection::{ConnectionInfo, HandshakeData, ServerName, TranscriptLength},
prover::{state::Committed, ProveConfig, Prover, ProverConfig, ProverOutput, TlsConfig}, prover::{state::Committed, Prover, ProverOutput},
transcript::{ContentType, TranscriptCommitConfig}, transcript::{ContentType, TranscriptCommitConfig},
verifier::{Verifier, VerifierConfig, VerifierOutput, VerifyConfig}, verifier::{Verifier, VerifierOutput},
webpki::{CertificateDer, PrivateKeyDer, RootCertStore},
}; };
use tlsn_examples::ExampleType; use tlsn_examples::ExampleType;
use tlsn_formats::http::{DefaultHttpCommitter, HttpCommit, HttpTranscript}; use tlsn_formats::http::{DefaultHttpCommitter, HttpCommit, HttpTranscript};
@@ -47,7 +53,7 @@ struct Args {
} }
#[tokio::main] #[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> { async fn main() -> Result<()> {
tracing_subscriber::fmt::init(); tracing_subscriber::fmt::init();
let args = Args::parse(); let args = Args::parse();
@@ -87,64 +93,63 @@ async fn prover<S: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>(
uri: &str, uri: &str,
extra_headers: Vec<(&str, &str)>, extra_headers: Vec<(&str, &str)>,
example_type: &ExampleType, example_type: &ExampleType,
) -> Result<(), Box<dyn std::error::Error>> { ) -> Result<()> {
let server_host: String = env::var("SERVER_HOST").unwrap_or("127.0.0.1".into()); let server_host: String = env::var("SERVER_HOST").unwrap_or("127.0.0.1".into());
let server_port: u16 = env::var("SERVER_PORT") let server_port: u16 = env::var("SERVER_PORT")
.map(|port| port.parse().expect("port should be valid integer")) .map(|port| port.parse().expect("port should be valid integer"))
.unwrap_or(DEFAULT_FIXTURE_PORT); .unwrap_or(DEFAULT_FIXTURE_PORT);
// Create a root certificate store with the server-fixture's self-signed
// certificate. This is only required for offline testing with the
// server-fixture.
let mut tls_config_builder = TlsConfig::builder();
tls_config_builder
.root_store(RootCertStore {
roots: vec![CertificateDer(CA_CERT_DER.to_vec())],
})
// (Optional) Set up TLS client authentication if required by the server.
.client_auth((
vec![CertificateDer(CLIENT_CERT_DER.to_vec())],
PrivateKeyDer(CLIENT_KEY_DER.to_vec()),
));
let tls_config = tls_config_builder.build().unwrap();
// Set up protocol configuration for prover.
let mut prover_config_builder = ProverConfig::builder();
prover_config_builder
.server_name(ServerName::Dns(SERVER_DOMAIN.try_into().unwrap()))
.tls_config(tls_config)
.protocol_config(
ProtocolConfig::builder()
// We must configure the amount of data we expect to exchange beforehand, which will
// be preprocessed prior to the connection. Reducing these limits will improve
// performance.
.max_sent_data(tlsn_examples::MAX_SENT_DATA)
.max_recv_data(tlsn_examples::MAX_RECV_DATA)
.build()?,
);
let prover_config = prover_config_builder.build()?;
// Create a new prover and perform necessary setup. // Create a new prover and perform necessary setup.
let prover = Prover::new(prover_config).setup(socket.compat()).await?; let prover = Prover::new(ProverConfig::builder().build()?)
.commit(
TlsCommitConfig::builder()
// Select the TLS commitment protocol.
.protocol(
MpcTlsConfig::builder()
// We must configure the amount of data we expect to exchange beforehand,
// which will be preprocessed prior to the
// connection. Reducing these limits will improve
// performance.
.max_sent_data(tlsn_examples::MAX_SENT_DATA)
.max_recv_data(tlsn_examples::MAX_RECV_DATA)
.build()?,
)
.build()?,
socket.compat(),
)
.await?;
// Open a TCP connection to the server. // Open a TCP connection to the server.
let client_socket = tokio::net::TcpStream::connect((server_host, server_port)).await?; let client_socket = tokio::net::TcpStream::connect((server_host, server_port)).await?;
// Bind the prover to the server connection. // Bind the prover to the server connection.
// The returned `mpc_tls_connection` is an MPC TLS connection to the server: all let (tls_connection, prover_fut) = prover
// data written to/read from it will be encrypted/decrypted using MPC with .connect(
// the notary. TlsClientConfig::builder()
let (mpc_tls_connection, prover_fut) = prover.connect(client_socket.compat()).await?; .server_name(ServerName::Dns(SERVER_DOMAIN.try_into()?))
let mpc_tls_connection = TokioIo::new(mpc_tls_connection.compat()); // Create a root certificate store with the server-fixture's self-signed
// certificate. This is only required for offline testing with the
// server-fixture.
.root_store(RootCertStore {
roots: vec![CertificateDer(CA_CERT_DER.to_vec())],
})
// (Optional) Set up TLS client authentication if required by the server.
.client_auth((
vec![CertificateDer(CLIENT_CERT_DER.to_vec())],
PrivateKeyDer(CLIENT_KEY_DER.to_vec()),
))
.build()?,
client_socket.compat(),
)
.await?;
let tls_connection = TokioIo::new(tls_connection.compat());
// Spawn the prover task to be run concurrently in the background. // Spawn the prover task to be run concurrently in the background.
let prover_task = tokio::spawn(prover_fut); let prover_task = tokio::spawn(prover_fut);
// Attach the hyper HTTP client to the connection. // Attach the hyper HTTP client to the connection.
let (mut request_sender, connection) = let (mut request_sender, connection) =
hyper::client::conn::http1::handshake(mpc_tls_connection).await?; hyper::client::conn::http1::handshake(tls_connection).await?;
// Spawn the HTTP task to be run concurrently in the background. // Spawn the HTTP task to be run concurrently in the background.
tokio::spawn(connection); tokio::spawn(connection);
@@ -165,7 +170,7 @@ async fn prover<S: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>(
} }
let request = request_builder.body(Empty::<Bytes>::new())?; let request = request_builder.body(Empty::<Bytes>::new())?;
info!("Starting an MPC TLS connection with the server"); info!("Starting connection with the server");
// Send the request to the server and wait for the response. // Send the request to the server and wait for the response.
let response = request_sender.send_request(request).await?; let response = request_sender.send_request(request).await?;
@@ -175,7 +180,7 @@ async fn prover<S: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>(
assert!(response.status() == StatusCode::OK); assert!(response.status() == StatusCode::OK);
// The prover task should be done now, so we can await it. // The prover task should be done now, so we can await it.
let mut prover = prover_task.await??; let prover = prover_task.await??;
// Parse the HTTP transcript. // Parse the HTTP transcript.
let transcript = HttpTranscript::parse(prover.transcript())?; let transcript = HttpTranscript::parse(prover.transcript())?;
@@ -217,7 +222,7 @@ async fn prover<S: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>(
let request_config = builder.build()?; let request_config = builder.build()?;
let (attestation, secrets) = notarize(&mut prover, &request_config, req_tx, resp_rx).await?; let (attestation, secrets) = notarize(prover, &request_config, req_tx, resp_rx).await?;
// Write the attestation to disk. // Write the attestation to disk.
let attestation_path = tlsn_examples::get_file_path(example_type, "attestation"); let attestation_path = tlsn_examples::get_file_path(example_type, "attestation");
@@ -238,11 +243,11 @@ async fn prover<S: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>(
} }
async fn notarize( async fn notarize(
prover: &mut Prover<Committed>, mut prover: Prover<Committed>,
config: &RequestConfig, config: &RequestConfig,
request_tx: Sender<AttestationRequest>, request_tx: Sender<AttestationRequest>,
attestation_rx: Receiver<Attestation>, attestation_rx: Receiver<Attestation>,
) -> Result<(Attestation, Secrets), Box<dyn std::error::Error>> { ) -> Result<(Attestation, Secrets)> {
let mut builder = ProveConfig::builder(prover.transcript()); let mut builder = ProveConfig::builder(prover.transcript());
if let Some(config) = config.transcript_commit() { if let Some(config) = config.transcript_commit() {
@@ -255,7 +260,11 @@ async fn notarize(
transcript_commitments, transcript_commitments,
transcript_secrets, transcript_secrets,
.. ..
} = prover.prove(disclosure_config).await?; } = prover.prove(&disclosure_config).await?;
let transcript = prover.transcript().clone();
let tls_transcript = prover.tls_transcript().clone();
prover.close().await?;
// Build an attestation request. // Build an attestation request.
let mut builder = AttestationRequest::builder(config); let mut builder = AttestationRequest::builder(config);
@@ -263,19 +272,17 @@ async fn notarize(
builder builder
.server_name(ServerName::Dns(SERVER_DOMAIN.try_into().unwrap())) .server_name(ServerName::Dns(SERVER_DOMAIN.try_into().unwrap()))
.handshake_data(HandshakeData { .handshake_data(HandshakeData {
certs: prover certs: tls_transcript
.tls_transcript()
.server_cert_chain() .server_cert_chain()
.expect("server cert chain is present") .expect("server cert chain is present")
.to_vec(), .to_vec(),
sig: prover sig: tls_transcript
.tls_transcript()
.server_signature() .server_signature()
.expect("server signature is present") .expect("server signature is present")
.clone(), .clone(),
binding: prover.tls_transcript().certificate_binding().clone(), binding: tls_transcript.certificate_binding().clone(),
}) })
.transcript(prover.transcript().clone()) .transcript(transcript)
.transcript_commitments(transcript_secrets, transcript_commitments); .transcript_commitments(transcript_secrets, transcript_commitments);
let (request, secrets) = builder.build(&CryptoProvider::default())?; let (request, secrets) = builder.build(&CryptoProvider::default())?;
@@ -283,15 +290,18 @@ async fn notarize(
// Send attestation request to notary. // Send attestation request to notary.
request_tx request_tx
.send(request.clone()) .send(request.clone())
.map_err(|_| "notary is not receiving attestation request".to_string())?; .map_err(|_| anyhow!("notary is not receiving attestation request"))?;
// Receive attestation from notary. // Receive attestation from notary.
let attestation = attestation_rx let attestation = attestation_rx
.await .await
.map_err(|err| format!("notary did not respond with attestation: {err}"))?; .map_err(|err| anyhow!("notary did not respond with attestation: {err}"))?;
// Signature verifier for the signature algorithm in the request.
let provider = CryptoProvider::default();
// Check the attestation is consistent with the Prover's view. // Check the attestation is consistent with the Prover's view.
request.validate(&attestation)?; request.validate(&attestation, &provider)?;
Ok((attestation, secrets)) Ok((attestation, secrets))
} }
@@ -300,14 +310,7 @@ async fn notary<S: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>(
socket: S, socket: S,
request_rx: Receiver<AttestationRequest>, request_rx: Receiver<AttestationRequest>,
attestation_tx: Sender<Attestation>, attestation_tx: Sender<Attestation>,
) -> Result<(), Box<dyn std::error::Error>> { ) -> Result<()> {
// Set up Verifier.
let config_validator = ProtocolConfigValidator::builder()
.max_sent_data(tlsn_examples::MAX_SENT_DATA)
.max_recv_data(tlsn_examples::MAX_RECV_DATA)
.build()
.unwrap();
// Create a root certificate store with the server-fixture's self-signed // Create a root certificate store with the server-fixture's self-signed
// certificate. This is only required for offline testing with the // certificate. This is only required for offline testing with the
// server-fixture. // server-fixture.
@@ -315,20 +318,24 @@ async fn notary<S: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>(
.root_store(RootCertStore { .root_store(RootCertStore {
roots: vec![CertificateDer(CA_CERT_DER.to_vec())], roots: vec![CertificateDer(CA_CERT_DER.to_vec())],
}) })
.protocol_config_validator(config_validator)
.build() .build()
.unwrap(); .unwrap();
let mut verifier = Verifier::new(verifier_config) let verifier = Verifier::new(verifier_config)
.setup(socket.compat()) .commit(socket.compat())
.await?
.accept()
.await? .await?
.run() .run()
.await?; .await?;
let VerifierOutput { let (
transcript_commitments, VerifierOutput {
.. transcript_commitments,
} = verifier.verify(&VerifyConfig::default()).await?; ..
},
verifier,
) = verifier.verify().await?.accept().await?;
let tls_transcript = verifier.tls_transcript().clone(); let tls_transcript = verifier.tls_transcript().clone();
@@ -390,7 +397,7 @@ async fn notary<S: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>(
// Send attestation to prover. // Send attestation to prover.
attestation_tx attestation_tx
.send(attestation) .send(attestation)
.map_err(|_| "prover is not receiving attestation".to_string())?; .map_err(|_| anyhow!("prover is not receiving attestation"))?;
Ok(()) Ok(())
} }

View File

@@ -12,8 +12,8 @@ use tlsn::{
signing::VerifyingKey, signing::VerifyingKey,
CryptoProvider, CryptoProvider,
}, },
config::{CertificateDer, RootCertStore},
verifier::ServerCertVerifier, verifier::ServerCertVerifier,
webpki::{CertificateDer, RootCertStore},
}; };
use tlsn_examples::ExampleType; use tlsn_examples::ExampleType;
use tlsn_server_fixture_certs::CA_CERT_DER; use tlsn_server_fixture_certs::CA_CERT_DER;

View File

@@ -3,6 +3,7 @@ use std::{
net::{IpAddr, SocketAddr}, net::{IpAddr, SocketAddr},
}; };
use anyhow::Result;
use http_body_util::Empty; use http_body_util::Empty;
use hyper::{body::Bytes, Request, StatusCode, Uri}; use hyper::{body::Bytes, Request, StatusCode, Uri};
use hyper_util::rt::TokioIo; use hyper_util::rt::TokioIo;
@@ -11,11 +12,18 @@ use tokio_util::compat::{FuturesAsyncReadCompatExt, TokioAsyncReadCompatExt};
use tracing::instrument; use tracing::instrument;
use tlsn::{ use tlsn::{
config::{CertificateDer, ProtocolConfig, ProtocolConfigValidator, RootCertStore}, config::{
prove::ProveConfig,
prover::ProverConfig,
tls::TlsClientConfig,
tls_commit::{mpc::MpcTlsConfig, TlsCommitConfig, TlsCommitProtocolConfig},
verifier::VerifierConfig,
},
connection::ServerName, connection::ServerName,
prover::{ProveConfig, Prover, ProverConfig, TlsConfig}, prover::Prover,
transcript::PartialTranscript, transcript::PartialTranscript,
verifier::{Verifier, VerifierConfig, VerifierOutput, VerifyConfig}, verifier::{Verifier, VerifierOutput},
webpki::{CertificateDer, RootCertStore},
}; };
use tlsn_server_fixture::DEFAULT_FIXTURE_PORT; use tlsn_server_fixture::DEFAULT_FIXTURE_PORT;
use tlsn_server_fixture_certs::{CA_CERT_DER, SERVER_DOMAIN}; use tlsn_server_fixture_certs::{CA_CERT_DER, SERVER_DOMAIN};
@@ -46,7 +54,7 @@ async fn main() {
let (prover_socket, verifier_socket) = tokio::io::duplex(1 << 23); let (prover_socket, verifier_socket) = tokio::io::duplex(1 << 23);
let prover = prover(prover_socket, &server_addr, &uri); let prover = prover(prover_socket, &server_addr, &uri);
let verifier = verifier(verifier_socket); let verifier = verifier(verifier_socket);
let (_, transcript) = tokio::join!(prover, verifier); let (_, transcript) = tokio::try_join!(prover, verifier).unwrap();
println!("Successfully verified {}", &uri); println!("Successfully verified {}", &uri);
println!( println!(
@@ -64,61 +72,57 @@ async fn prover<T: AsyncWrite + AsyncRead + Send + Unpin + 'static>(
verifier_socket: T, verifier_socket: T,
server_addr: &SocketAddr, server_addr: &SocketAddr,
uri: &str, uri: &str,
) { ) -> Result<()> {
let uri = uri.parse::<Uri>().unwrap(); let uri = uri.parse::<Uri>().unwrap();
assert_eq!(uri.scheme().unwrap().as_str(), "https"); assert_eq!(uri.scheme().unwrap().as_str(), "https");
let server_domain = uri.authority().unwrap().host(); let server_domain = uri.authority().unwrap().host();
// Create a root certificate store with the server-fixture's self-signed // Create a new prover and perform necessary setup.
// certificate. This is only required for offline testing with the let prover = Prover::new(ProverConfig::builder().build()?)
// server-fixture. .commit(
let mut tls_config_builder = TlsConfig::builder(); TlsCommitConfig::builder()
tls_config_builder.root_store(RootCertStore { // Select the TLS commitment protocol.
roots: vec![CertificateDer(CA_CERT_DER.to_vec())], .protocol(
}); MpcTlsConfig::builder()
let tls_config = tls_config_builder.build().unwrap(); // We must configure the amount of data we expect to exchange beforehand,
// which will be preprocessed prior to the
// connection. Reducing these limits will improve
// performance.
.max_sent_data(tlsn_examples::MAX_SENT_DATA)
.max_recv_data(tlsn_examples::MAX_RECV_DATA)
.build()?,
)
.build()?,
verifier_socket.compat(),
)
.await?;
// Set up protocol configuration for prover. // Open a TCP connection to the server.
let mut prover_config_builder = ProverConfig::builder(); let client_socket = tokio::net::TcpStream::connect(server_addr).await?;
prover_config_builder
.server_name(ServerName::Dns(server_domain.try_into().unwrap()))
.tls_config(tls_config)
.protocol_config(
ProtocolConfig::builder()
.max_sent_data(MAX_SENT_DATA)
.max_recv_data(MAX_RECV_DATA)
.build()
.unwrap(),
);
let prover_config = prover_config_builder.build().unwrap(); // Bind the prover to the server connection.
let (tls_connection, prover_fut) = prover
// Create prover and connect to verifier. .connect(
// TlsClientConfig::builder()
// Perform the setup phase with the verifier. .server_name(ServerName::Dns(SERVER_DOMAIN.try_into()?))
let prover = Prover::new(prover_config) // Create a root certificate store with the server-fixture's self-signed
.setup(verifier_socket.compat()) // certificate. This is only required for offline testing with the
.await // server-fixture.
.unwrap(); .root_store(RootCertStore {
roots: vec![CertificateDer(CA_CERT_DER.to_vec())],
// Connect to TLS Server. })
let tls_client_socket = tokio::net::TcpStream::connect(server_addr).await.unwrap(); .build()?,
client_socket.compat(),
// Pass server connection into the prover. )
let (mpc_tls_connection, prover_fut) = .await?;
prover.connect(tls_client_socket.compat()).await.unwrap(); let tls_connection = TokioIo::new(tls_connection.compat());
// Wrap the connection in a TokioIo compatibility layer to use it with hyper.
let mpc_tls_connection = TokioIo::new(mpc_tls_connection.compat());
// Spawn the Prover to run in the background. // Spawn the Prover to run in the background.
let prover_task = tokio::spawn(prover_fut); let prover_task = tokio::spawn(prover_fut);
// MPC-TLS Handshake. // MPC-TLS Handshake.
let (mut request_sender, connection) = let (mut request_sender, connection) =
hyper::client::conn::http1::handshake(mpc_tls_connection) hyper::client::conn::http1::handshake(tls_connection).await?;
.await
.unwrap();
// Spawn the connection to run in the background. // Spawn the connection to run in the background.
tokio::spawn(connection); tokio::spawn(connection);
@@ -130,14 +134,13 @@ async fn prover<T: AsyncWrite + AsyncRead + Send + Unpin + 'static>(
.header("Connection", "close") .header("Connection", "close")
.header("Secret", SECRET) .header("Secret", SECRET)
.method("GET") .method("GET")
.body(Empty::<Bytes>::new()) .body(Empty::<Bytes>::new())?;
.unwrap(); let response = request_sender.send_request(request).await?;
let response = request_sender.send_request(request).await.unwrap();
assert!(response.status() == StatusCode::OK); assert!(response.status() == StatusCode::OK);
// Create proof for the Verifier. // Create proof for the Verifier.
let mut prover = prover_task.await.unwrap().unwrap(); let mut prover = prover_task.await??;
let mut builder = ProveConfig::builder(prover.transcript()); let mut builder = ProveConfig::builder(prover.transcript());
@@ -153,10 +156,8 @@ async fn prover<T: AsyncWrite + AsyncRead + Send + Unpin + 'static>(
.expect("the secret should be in the sent data"); .expect("the secret should be in the sent data");
// Reveal everything except for the secret. // Reveal everything except for the secret.
builder.reveal_sent(&(0..pos)).unwrap(); builder.reveal_sent(&(0..pos))?;
builder builder.reveal_sent(&(pos + SECRET.len()..prover.transcript().sent().len()))?;
.reveal_sent(&(pos + SECRET.len()..prover.transcript().sent().len()))
.unwrap();
// Find the substring "Dick". // Find the substring "Dick".
let pos = prover let pos = prover
@@ -167,28 +168,21 @@ async fn prover<T: AsyncWrite + AsyncRead + Send + Unpin + 'static>(
.expect("the substring 'Dick' should be in the received data"); .expect("the substring 'Dick' should be in the received data");
// Reveal everything except for the substring. // Reveal everything except for the substring.
builder.reveal_recv(&(0..pos)).unwrap(); builder.reveal_recv(&(0..pos))?;
builder builder.reveal_recv(&(pos + 4..prover.transcript().received().len()))?;
.reveal_recv(&(pos + 4..prover.transcript().received().len()))
.unwrap();
let config = builder.build().unwrap(); let config = builder.build()?;
prover.prove(config).await.unwrap(); prover.prove(&config).await?;
prover.close().await.unwrap(); prover.close().await?;
Ok(())
} }
#[instrument(skip(socket))] #[instrument(skip(socket))]
async fn verifier<T: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>( async fn verifier<T: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>(
socket: T, socket: T,
) -> PartialTranscript { ) -> Result<PartialTranscript> {
// Set up Verifier.
let config_validator = ProtocolConfigValidator::builder()
.max_sent_data(MAX_SENT_DATA)
.max_recv_data(MAX_RECV_DATA)
.build()
.unwrap();
// Create a root certificate store with the server-fixture's self-signed // Create a root certificate store with the server-fixture's self-signed
// certificate. This is only required for offline testing with the // certificate. This is only required for offline testing with the
// server-fixture. // server-fixture.
@@ -196,20 +190,56 @@ async fn verifier<T: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>(
.root_store(RootCertStore { .root_store(RootCertStore {
roots: vec![CertificateDer(CA_CERT_DER.to_vec())], roots: vec![CertificateDer(CA_CERT_DER.to_vec())],
}) })
.protocol_config_validator(config_validator) .build()?;
.build()
.unwrap();
let verifier = Verifier::new(verifier_config); let verifier = Verifier::new(verifier_config);
// Receive authenticated data. // Validate the proposed configuration and then run the TLS commitment protocol.
let VerifierOutput { let verifier = verifier.commit(socket.compat()).await?;
server_name,
transcript, // This is the opportunity to ensure the prover does not attempt to overload the
.. // verifier.
} = verifier let reject = if let TlsCommitProtocolConfig::Mpc(mpc_tls_config) = verifier.request().protocol()
.verify(socket.compat(), &VerifyConfig::default()) {
.await if mpc_tls_config.max_sent_data() > MAX_SENT_DATA {
.unwrap(); Some("max_sent_data is too large")
} else if mpc_tls_config.max_recv_data() > MAX_RECV_DATA {
Some("max_recv_data is too large")
} else {
None
}
} else {
Some("expecting to use MPC-TLS")
};
if reject.is_some() {
verifier.reject(reject).await?;
return Err(anyhow::anyhow!("protocol configuration rejected"));
}
// Runs the TLS commitment protocol to completion.
let verifier = verifier.accept().await?.run().await?;
// Validate the proving request and then verify.
let verifier = verifier.verify().await?;
if !verifier.request().server_identity() {
let verifier = verifier
.reject(Some("expecting to verify the server name"))
.await?;
verifier.close().await?;
return Err(anyhow::anyhow!("prover did not reveal the server name"));
}
let (
VerifierOutput {
server_name,
transcript,
..
},
verifier,
) = verifier.accept().await?;
verifier.close().await?;
let server_name = server_name.expect("prover should have revealed server name"); let server_name = server_name.expect("prover should have revealed server name");
let transcript = transcript.expect("prover should have revealed transcript data"); let transcript = transcript.expect("prover should have revealed transcript data");
@@ -232,7 +262,7 @@ async fn verifier<T: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>(
let ServerName::Dns(server_name) = server_name; let ServerName::Dns(server_name) = server_name;
assert_eq!(server_name.as_str(), SERVER_DOMAIN); assert_eq!(server_name.as_str(), SERVER_DOMAIN);
transcript Ok(transcript)
} }
/// Render redacted bytes as `🙈`. /// Render redacted bytes as `🙈`.

View File

@@ -0,0 +1,5 @@
!noir/target/
# Ignore everything inside noir/target
noir/target/*
# Except noir.json
!noir/target/noir.json

View File

@@ -0,0 +1,167 @@
# Interactive Zero-Knowledge Age Verification with TLSNotary
This example demonstrates **privacy-preserving age verification** using TLSNotary and zero-knowledge proofs. It allows a prover to demonstrate they are 18+ years old without revealing their actual birth date or any other personal information.
## 🔍 How It Works (simplified overview)
```mermaid
sequenceDiagram
participant S as Tax Server<br/>(fixture)
participant P as Prover
participant V as Verifier
P->>S: Request tax data (with auth token) (MPC-TLS)
S->>P: Tax data including `date_of_birth` (MPC-TLS)
P->>V: Share transcript with redactions
P->>V: Commit to blinded hash of birth date
P->>P: Generate ZK proof of age ≥ 18
P->>V: Send ZK proof
V->>V: Verify transcript & ZK proof
V->>V: ✅ Confirm: Prover is 18+ (no birth date revealed)
```
### The Process
1. **MPC-TLS Session**: The Prover fetches tax information containing their birth date, while the Verifier jointly verifies the TLS session to ensure the data comes from the authentic server.
2. **Selective Disclosure**:
* The authorization token is **redacted**: the Verifier sees the plaintext request but not the token.
* The birth date is **committed** as a blinded hash: the Verifier cannot see the date, but the Prover is cryptographically bound to it.
(Depending on the use case more data can be redacted or revealed)
3. **Zero-Knowledge Proof**: The Prover generates a ZK proof that the committed birth date corresponds to an age ≥ 18.
4. **Verification**: The Verifier checks both the TLS transcript and the ZK proof, confirming age ≥ 18 without learning the actual date of birth.
### Example Data
The tax server returns data like this:
```json
{
"tax_year": 2024,
"taxpayer": {
"idnr": "12345678901",
"first_name": "Max",
"last_name": "Mustermann",
"date_of_birth": "1985-03-12",
// ...
}
}
```
## 🔐 Zero-Knowledge Proof Details
The ZK circuit proves: **"I know a birth date that hashes to the committed value AND indicates I am 18+ years old"**
**Public Inputs:**
- ✅ Verification date
- ✅ Committed blinded hash of birth date
**Private Inputs (Hidden):**
- 🔒 Actual birth date plaintext
- 🔒 Random blinder used in hash commitment
**What the Verifier Learns:**
- ✅ The prover is 18+ years old
- ✅ The birth date is authentic (from the MPC-TLS session)
Everything else remains private.
## 🏃 Run the Example
1. **Start the test server** (from repository root):
```bash
RUST_LOG=info PORT=4000 cargo run --bin tlsn-server-fixture
```
2. **Run the age verification** (in a new terminal):
```bash
SERVER_PORT=4000 cargo run --release --example interactive_zk
```
3. **For detailed logs**:
```bash
RUST_LOG=debug,yamux=info,uid_mux=info SERVER_PORT=4000 cargo run --release --example interactive_zk
```
### Expected Output
```
Successfully verified https://test-server.io:4000/elster
Age verified in ZK: 18+ ✅
Verified sent data:
GET https://test-server.io:4000/elster HTTP/1.1
host: test-server.io
connection: close
authorization: 🙈🙈🙈🙈🙈🙈🙈🙈🙈🙈🙈🙈🙈🙈🙈🙈🙈🙈🙈🙈🙈🙈🙈🙈🙈🙈
Verified received data:
🙈🙈🙈🙈🙈🙈🙈🙈[truncated for brevity]...🙈🙈🙈🙈🙈"tax_year":2024🙈🙈🙈🙈🙈...
```
> 💡 **Note**: In this demo, both Prover and Verifier run on the same machine. In production, they would operate on separate systems.
> 💡 **Note**: This demo assumes that the tax server serves correct data, and that only the submitter of the tax data has access to the specified page.
## 🛠 Development
### Project Structure
```
interactive_zk/
├── prover.rs # Prover implementation
├── verifier.rs # Verifier implementation
├── types.rs # Shared types
└── interactive_zk.rs # Main example runner
├── noir/ # Zero-knowledge circuit
│ ├── src/main.n # Noir circuit code
│ ├── target/ # Compiled circuit artifacts
│ └── Nargo.toml # Noir project config
│ └── Prover.toml # Example input for `nargo execute`
│ └── generate_test_data.rs # Rust script to generate Noir test data
└── README.md
```
### Noir Circuit Commands
We use [Mopro's `noir_rs`](https://zkmopro.org/docs/crates/noir-rs/) for ZK proof generation. The **circuit is pre-compiled and ready to use**. You don't need to install Noir tools to run the example. But if you want to change or test the circuit in isolation, you can use the following instructions.
Before you proceed, we recommend to double check that your Noir tooling matches the versions used in Mopro's `noir_rs`:
```sh
# Install correct Noir and BB versions (important for compatibility!)
noirup --version 1.0.0-beta.8
bbup -v 1.0.0-nightly.20250723
```
If you don't have `noirup` and `bbup` installed yet, check [Noir's Quick Start](https://noir-lang.org/docs/getting_started/quick_start).
To compile the circuit, go to the `noir` folder and run `nargo compile`.
To check and experiment with the Noir circuit, you can use these commands:
* Execute Circuit: Compile the circuit and run it with sample data from `Prover.toml`:
```sh
nargo execute
```
* Generate Verification Key: Create the verification key needed to verify proofs
```sh
bb write_vk -b ./target/noir.json -o ./target
```
* Generate Proof: Create a zero-knowledge proof using the circuit and witness data.
```sh
bb prove --bytecode_path ./target/noir.json --witness_path ./target/noir.gz -o ./target
```
* Verify Proof: Verify that a proof is valid using the verification key.
```sh
bb verify -k ./target/vk -p ./target/proof
```
* Run the Noir tests:
```sh
nargo test --show-output
```
To create extra tests, you can use `./generate_test_data.rs` to help with generating correct blinders and hashes.
## 📚 Learn More
- [TLSNotary Documentation](https://docs.tlsnotary.org/)
- [Noir Language Guide](https://noir-lang.org/)
- [Zero-Knowledge Proofs Explained](https://ethereum.org/en/zero-knowledge-proofs/)
- [Mopro ZK Toolkit](https://zkmopro.org/)

View File

@@ -0,0 +1,60 @@
mod prover;
mod types;
mod verifier;
use anyhow::Result;
use prover::prover;
use std::{
env,
net::{IpAddr, SocketAddr},
};
use tlsn_server_fixture::DEFAULT_FIXTURE_PORT;
use tlsn_server_fixture_certs::SERVER_DOMAIN;
use verifier::verifier;
#[tokio::main]
async fn main() -> Result<()> {
tracing_subscriber::fmt::init();
let server_host: String = env::var("SERVER_HOST").unwrap_or("127.0.0.1".into());
let server_port: u16 = env::var("SERVER_PORT")
.map(|port| port.parse().expect("port should be valid integer"))
.unwrap_or(DEFAULT_FIXTURE_PORT);
// We use SERVER_DOMAIN here to make sure it matches the domain in the test
// server's certificate.
let uri = format!("https://{SERVER_DOMAIN}:{server_port}/elster");
let server_ip: IpAddr = server_host
.parse()
.map_err(|e| anyhow::anyhow!("Invalid IP address '{server_host}': {e}"))?;
let server_addr = SocketAddr::from((server_ip, server_port));
// Connect prover and verifier.
let (prover_socket, verifier_socket) = tokio::io::duplex(1 << 23);
let (prover_extra_socket, verifier_extra_socket) = tokio::io::duplex(1 << 23);
let (_, transcript) = tokio::try_join!(
prover(prover_socket, prover_extra_socket, &server_addr, &uri),
verifier(verifier_socket, verifier_extra_socket)
)?;
println!("---");
println!("Successfully verified {}", &uri);
println!("Age verified in ZK: 18+ ✅\n");
println!(
"Verified sent data:\n{}",
bytes_to_redacted_string(transcript.sent_unsafe())
);
println!(
"Verified received data:\n{}",
bytes_to_redacted_string(transcript.received_unsafe())
);
Ok(())
}
/// Render redacted bytes as `🙈`.
pub fn bytes_to_redacted_string(bytes: &[u8]) -> String {
String::from_utf8_lossy(bytes).replace('\0', "🙈")
}

View File

@@ -0,0 +1,8 @@
[package]
name = "noir"
type = "bin"
authors = [""]
[dependencies]
sha256 = { tag = "v0.1.5", git = "https://github.com/noir-lang/sha256" }
date = { tag = "v0.5.4", git = "https://github.com/madztheo/noir-date.git" }

View File

@@ -0,0 +1,8 @@
blinder = [108, 93, 120, 205, 15, 35, 159, 124, 243, 96, 22, 128, 16, 149, 219, 216]
committed_hash = [186, 158, 101, 39, 49, 48, 26, 83, 242, 96, 10, 221, 121, 174, 62, 50, 136, 132, 232, 58, 25, 32, 66, 196, 99, 85, 66, 85, 255, 1, 202, 254]
date_of_birth = "1985-03-12"
[proof_date]
day = "29"
month = "08"
year = "2025"

View File

@@ -0,0 +1,64 @@
#!/usr/bin/env -S cargo +nightly -Zscript
---
[package]
name = "generate_test_data"
version = "0.0.0"
edition = "2021"
publish = false
[dependencies]
sha2 = "0.10"
rand = "0.8"
chrono = "0.4"
---
use chrono::Datelike;
use chrono::Local;
use rand::RngCore;
use sha2::{Digest, Sha256};
fn main() {
// 1. Birthdate string (fixed)
let dob_str = "1985-03-12"; // 10 bytes long
let proof_date = Local::now().date_naive();
let proof_year = proof_date.year();
let proof_month = proof_date.month();
let proof_day = proof_date.day();
// 2. Generate random 16-byte blinder
let mut blinder = [0u8; 16];
rand::thread_rng().fill_bytes(&mut blinder);
// 3. Concatenate blinder + dob string bytes
let mut preimage = Vec::with_capacity(26);
preimage.extend_from_slice(dob_str.as_bytes());
preimage.extend_from_slice(&blinder);
// 4. Hash it
let hash = Sha256::digest(&preimage);
let blinder = blinder
.iter()
.map(|b| b.to_string())
.collect::<Vec<_>>()
.join(", ");
let committed_hash = hash
.iter()
.map(|b| b.to_string())
.collect::<Vec<_>>()
.join(", ");
println!(
"
// Private input
let date_of_birth = \"{dob_str}\";
let blinder = [{blinder}];
// Public input
let proof_date = date::Date {{ year: {proof_year}, month: {proof_month}, day: {proof_day} }};
let committed_hash = [{committed_hash}];
main(proof_date, committed_hash, date_of_birth, blinder);
"
);
}

View File

@@ -0,0 +1,82 @@
use dep::date::Date;
fn main(
// Public inputs
proof_date: pub date::Date, // "2025-08-29"
committed_hash: pub [u8; 32], // Hash of (blinder || dob string)
// Private inputs
date_of_birth: str<10>, // "1985-03-12"
blinder: [u8; 16], // Random 16-byte blinder
) {
let is_18 = check_18(date_of_birth, proof_date);
let correct_hash = check_hash(date_of_birth, blinder, committed_hash);
assert(correct_hash);
assert(is_18);
}
fn check_18(date_of_birth: str<10>, proof_date: date::Date) -> bool {
let dob = parse_birth_date(date_of_birth);
let is_18 = dob.add_years(18).lt(proof_date);
println(f"Is 18? {is_18}");
is_18
}
fn check_hash(date_of_birth: str<10>, blinder: [u8; 16], committed_hash: [u8; 32]) -> bool {
let hash_input: [u8; 26] = make_hash_input(date_of_birth, blinder);
let computed_hash = sha256::sha256_var(hash_input, 26);
let correct_hash = computed_hash == committed_hash;
println(f"Correct hash? {correct_hash}");
correct_hash
}
fn make_hash_input(dob: str<10>, blinder: [u8; 16]) -> [u8; 26] {
let mut input: [u8; 26] = [0; 26];
for i in 0..10 {
input[i] = dob.as_bytes()[i];
}
for i in 0..16 {
input[10 + i] = blinder[i];
}
input
}
pub fn parse_birth_date(birth_date: str<10>) -> date::Date {
let date: [u8; 10] = birth_date.as_bytes();
let date_str: str<8> =
[date[0], date[1], date[2], date[3], date[5], date[6], date[8], date[9]].as_str_unchecked();
Date::from_str_long_year(date_str)
}
#[test]
fn test_max_is_over_18() {
// Private input
let date_of_birth = "1985-03-12";
let blinder = [120, 80, 62, 10, 76, 60, 130, 98, 147, 161, 139, 126, 27, 236, 36, 56];
// Public input
let proof_date = date::Date { year: 2025, month: 9, day: 2 };
let committed_hash = [
229, 118, 202, 216, 213, 230, 125, 163, 48, 178, 118, 225, 84, 7, 140, 63, 173, 255, 163,
208, 163, 3, 63, 204, 37, 120, 254, 246, 202, 116, 122, 145,
];
main(proof_date, committed_hash, date_of_birth, blinder);
}
#[test(should_fail)]
fn test_under_18() {
// Private input
let date_of_birth = "2010-08-01";
let blinder = [160, 23, 57, 158, 141, 195, 155, 132, 109, 242, 48, 220, 70, 217, 229, 189];
// Public input
let proof_date = date::Date { year: 2025, month: 8, day: 29 };
let committed_hash = [
16, 132, 194, 62, 232, 90, 157, 153, 4, 231, 1, 54, 226, 3, 87, 174, 129, 177, 80, 69, 37,
222, 209, 91, 168, 156, 9, 109, 108, 144, 168, 109,
];
main(proof_date, committed_hash, date_of_birth, blinder);
}

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,390 @@
use std::net::SocketAddr;
use crate::types::received_commitments;
use super::types::ZKProofBundle;
use anyhow::Result;
use chrono::{Datelike, Local, NaiveDate};
use http_body_util::Empty;
use hyper::{body::Bytes, header, Request, StatusCode, Uri};
use hyper_util::rt::TokioIo;
use k256::sha2::{Digest, Sha256};
use noir::{
barretenberg::{
prove::prove_ultra_honk, srs::setup_srs_from_bytecode,
verify::get_ultra_honk_verification_key,
},
witness::from_vec_str_to_witness_map,
};
use serde_json::Value;
use spansy::{
http::{BodyContent, Requests, Responses},
Spanned,
};
use tls_server_fixture::{CA_CERT_DER, SERVER_DOMAIN};
use tlsn::{
config::{
prove::{ProveConfig, ProveConfigBuilder},
prover::ProverConfig,
tls::TlsClientConfig,
tls_commit::{mpc::MpcTlsConfig, TlsCommitConfig},
},
connection::ServerName,
hash::HashAlgId,
prover::Prover,
transcript::{
hash::{PlaintextHash, PlaintextHashSecret},
Direction, TranscriptCommitConfig, TranscriptCommitConfigBuilder, TranscriptCommitmentKind,
TranscriptSecret,
},
webpki::{CertificateDer, RootCertStore},
};
use tlsn_examples::{MAX_RECV_DATA, MAX_SENT_DATA};
use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt};
use tokio_util::compat::{FuturesAsyncReadCompatExt, TokioAsyncReadCompatExt};
use tracing::instrument;
#[instrument(skip(verifier_socket, verifier_extra_socket))]
pub async fn prover<T: AsyncWrite + AsyncRead + Send + Unpin + 'static>(
verifier_socket: T,
mut verifier_extra_socket: T,
server_addr: &SocketAddr,
uri: &str,
) -> Result<()> {
let uri = uri.parse::<Uri>()?;
if uri.scheme().map(|s| s.as_str()) != Some("https") {
return Err(anyhow::anyhow!("URI must use HTTPS scheme"));
}
let server_domain = uri
.authority()
.ok_or_else(|| anyhow::anyhow!("URI must have authority"))?
.host();
// Create a new prover and perform necessary setup.
let prover = Prover::new(ProverConfig::builder().build()?)
.commit(
TlsCommitConfig::builder()
// Select the TLS commitment protocol.
.protocol(
MpcTlsConfig::builder()
// We must configure the amount of data we expect to exchange beforehand,
// which will be preprocessed prior to the
// connection. Reducing these limits will improve
// performance.
.max_sent_data(MAX_SENT_DATA)
.max_recv_data(MAX_RECV_DATA)
.build()?,
)
.build()?,
verifier_socket.compat(),
)
.await?;
// Open a TCP connection to the server.
let client_socket = tokio::net::TcpStream::connect(server_addr).await?;
// Bind the prover to the server connection.
let (tls_connection, prover_fut) = prover
.connect(
TlsClientConfig::builder()
.server_name(ServerName::Dns(SERVER_DOMAIN.try_into()?))
// Create a root certificate store with the server-fixture's self-signed
// certificate. This is only required for offline testing with the
// server-fixture.
.root_store(RootCertStore {
roots: vec![CertificateDer(CA_CERT_DER.to_vec())],
})
.build()?,
client_socket.compat(),
)
.await?;
let tls_connection = TokioIo::new(tls_connection.compat());
// Spawn the Prover to run in the background.
let prover_task = tokio::spawn(prover_fut);
// MPC-TLS Handshake.
let (mut request_sender, connection) =
hyper::client::conn::http1::handshake(tls_connection).await?;
// Spawn the connection to run in the background.
tokio::spawn(connection);
// MPC-TLS: Send Request and wait for Response.
let request = Request::builder()
.uri(uri.clone())
.header("Host", server_domain)
.header("Connection", "close")
.header(header::AUTHORIZATION, "Bearer random_auth_token")
.method("GET")
.body(Empty::<Bytes>::new())?;
let response = request_sender.send_request(request).await?;
if response.status() != StatusCode::OK {
return Err(anyhow::anyhow!(
"MPC-TLS request failed with status {}",
response.status()
));
}
// Create proof for the Verifier.
let mut prover = prover_task.await??;
let transcript = prover.transcript().clone();
let mut prove_config_builder = ProveConfig::builder(&transcript);
// Reveal the DNS name.
prove_config_builder.server_identity();
let sent: &[u8] = transcript.sent();
let received: &[u8] = transcript.received();
let sent_len = sent.len();
let recv_len = received.len();
tracing::info!("Sent length: {}, Received length: {}", sent_len, recv_len);
// Reveal the entire HTTP request except for the authorization bearer token
reveal_request(sent, &mut prove_config_builder)?;
// Create hash commitment for the date of birth field from the response
let mut transcript_commitment_builder = TranscriptCommitConfig::builder(&transcript);
transcript_commitment_builder.default_kind(TranscriptCommitmentKind::Hash {
alg: HashAlgId::SHA256,
});
reveal_received(
received,
&mut prove_config_builder,
&mut transcript_commitment_builder,
)?;
let transcripts_commitment_config = transcript_commitment_builder.build()?;
prove_config_builder.transcript_commit(transcripts_commitment_config);
let prove_config = prove_config_builder.build()?;
// MPC-TLS prove
let prover_output = prover.prove(&prove_config).await?;
prover.close().await?;
// Prove birthdate is more than 18 years ago.
let received_commitments = received_commitments(&prover_output.transcript_commitments);
let received_commitment = received_commitments
.first()
.ok_or_else(|| anyhow::anyhow!("No received commitments found"))?; // committed hash (of date of birth string)
let received_secrets = received_secrets(&prover_output.transcript_secrets);
let received_secret = received_secrets
.first()
.ok_or_else(|| anyhow::anyhow!("No received secrets found"))?; // hash blinder
let proof_input = prepare_zk_proof_input(received, received_commitment, received_secret)?;
let proof_bundle = generate_zk_proof(&proof_input)?;
// Sent zk proof bundle to verifier
let serialized_proof = bincode::serialize(&proof_bundle)?;
verifier_extra_socket.write_all(&serialized_proof).await?;
verifier_extra_socket.shutdown().await?;
Ok(())
}
// Reveal everything from the request, except for the authorization token.
fn reveal_request(request: &[u8], builder: &mut ProveConfigBuilder<'_>) -> Result<()> {
let reqs = Requests::new_from_slice(request).collect::<Result<Vec<_>, _>>()?;
let req = reqs
.first()
.ok_or_else(|| anyhow::anyhow!("No requests found"))?;
if req.request.method.as_str() != "GET" {
return Err(anyhow::anyhow!(
"Expected GET method, found {}",
req.request.method.as_str()
));
}
let authorization_header = req
.headers_with_name(header::AUTHORIZATION.as_str())
.next()
.ok_or_else(|| anyhow::anyhow!("Authorization header not found"))?;
let start_pos = authorization_header
.span()
.indices()
.min()
.ok_or_else(|| anyhow::anyhow!("Could not find authorization header start position"))?
+ header::AUTHORIZATION.as_str().len()
+ 2;
let end_pos =
start_pos + authorization_header.span().len() - header::AUTHORIZATION.as_str().len() - 2;
builder.reveal_sent(&(0..start_pos))?;
builder.reveal_sent(&(end_pos..request.len()))?;
Ok(())
}
fn reveal_received(
received: &[u8],
builder: &mut ProveConfigBuilder<'_>,
transcript_commitment_builder: &mut TranscriptCommitConfigBuilder,
) -> Result<()> {
let resp = Responses::new_from_slice(received).collect::<Result<Vec<_>, _>>()?;
let response = resp
.first()
.ok_or_else(|| anyhow::anyhow!("No responses found"))?;
let body = response
.body
.as_ref()
.ok_or_else(|| anyhow::anyhow!("Response body not found"))?;
let BodyContent::Json(json) = &body.content else {
return Err(anyhow::anyhow!("Expected JSON body content"));
};
// reveal tax year
let tax_year = json
.get("tax_year")
.ok_or_else(|| anyhow::anyhow!("tax_year field not found in JSON"))?;
let start_pos = tax_year
.span()
.indices()
.min()
.ok_or_else(|| anyhow::anyhow!("Could not find tax_year start position"))?
- 11;
let end_pos = tax_year
.span()
.indices()
.max()
.ok_or_else(|| anyhow::anyhow!("Could not find tax_year end position"))?
+ 1;
builder.reveal_recv(&(start_pos..end_pos))?;
// commit to hash of date of birth
let dob = json
.get("taxpayer.date_of_birth")
.ok_or_else(|| anyhow::anyhow!("taxpayer.date_of_birth field not found in JSON"))?;
transcript_commitment_builder.commit_recv(dob.span())?;
Ok(())
}
// extract secret from prover output
fn received_secrets(transcript_secrets: &[TranscriptSecret]) -> Vec<&PlaintextHashSecret> {
transcript_secrets
.iter()
.filter_map(|secret| match secret {
TranscriptSecret::Hash(hash) if hash.direction == Direction::Received => Some(hash),
_ => None,
})
.collect()
}
#[derive(Debug)]
pub struct ZKProofInput {
dob: Vec<u8>,
proof_date: NaiveDate,
blinder: Vec<u8>,
committed_hash: Vec<u8>,
}
// Verify that the blinded, committed hash is correct
fn prepare_zk_proof_input(
received: &[u8],
received_commitment: &PlaintextHash,
received_secret: &PlaintextHashSecret,
) -> Result<ZKProofInput> {
assert_eq!(received_commitment.direction, Direction::Received);
assert_eq!(received_commitment.hash.alg, HashAlgId::SHA256);
let hash = &received_commitment.hash;
let dob_start = received_commitment
.idx
.min()
.ok_or_else(|| anyhow::anyhow!("No start index for DOB"))?;
let dob_end = received_commitment
.idx
.end()
.ok_or_else(|| anyhow::anyhow!("No end index for DOB"))?;
let dob = received[dob_start..dob_end].to_vec();
let blinder = received_secret.blinder.as_bytes().to_vec();
let committed_hash = hash.value.as_bytes().to_vec();
let proof_date = Local::now().date_naive();
assert_eq!(received_secret.direction, Direction::Received);
assert_eq!(received_secret.alg, HashAlgId::SHA256);
let mut hasher = Sha256::new();
hasher.update(&dob);
hasher.update(&blinder);
let computed_hash = hasher.finalize();
if committed_hash != computed_hash.as_ref() as &[u8] {
return Err(anyhow::anyhow!(
"Computed hash does not match committed hash"
));
}
Ok(ZKProofInput {
dob,
proof_date,
committed_hash,
blinder,
})
}
fn generate_zk_proof(proof_input: &ZKProofInput) -> Result<ZKProofBundle> {
tracing::info!("🔒 Generating ZK proof with Noir...");
const PROGRAM_JSON: &str = include_str!("./noir/target/noir.json");
// 1. Load bytecode from program.json
let json: Value = serde_json::from_str(PROGRAM_JSON)?;
let bytecode = json["bytecode"]
.as_str()
.ok_or_else(|| anyhow::anyhow!("bytecode field not found in program.json"))?;
let mut inputs: Vec<String> = vec![];
inputs.push(proof_input.proof_date.day().to_string());
inputs.push(proof_input.proof_date.month().to_string());
inputs.push(proof_input.proof_date.year().to_string());
inputs.extend(proof_input.committed_hash.iter().map(|b| b.to_string()));
inputs.extend(proof_input.dob.iter().map(|b| b.to_string()));
inputs.extend(proof_input.blinder.iter().map(|b| b.to_string()));
let proof_date = proof_input.proof_date.to_string();
tracing::info!(
"Public inputs : Proof date ({}) and committed hash ({})",
proof_date,
hex::encode(&proof_input.committed_hash)
);
tracing::info!(
"Private inputs: Blinder ({}) and Date of Birth ({})",
hex::encode(&proof_input.blinder),
String::from_utf8_lossy(&proof_input.dob)
);
tracing::debug!("Witness inputs {:?}", inputs);
let input_refs: Vec<&str> = inputs.iter().map(String::as_str).collect();
let witness = from_vec_str_to_witness_map(input_refs).map_err(|e| anyhow::anyhow!(e))?;
// Setup SRS
setup_srs_from_bytecode(bytecode, None, false).map_err(|e| anyhow::anyhow!(e))?;
// Verification key
let vk = get_ultra_honk_verification_key(bytecode, false).map_err(|e| anyhow::anyhow!(e))?;
// Generate proof
let proof = prove_ultra_honk(bytecode, witness.clone(), vk.clone(), false)
.map_err(|e| anyhow::anyhow!(e))?;
tracing::info!("✅ Proof generated ({} bytes)", proof.len());
let proof_bundle = ZKProofBundle { vk, proof };
Ok(proof_bundle)
}

View File

@@ -0,0 +1,21 @@
use serde::{Deserialize, Serialize};
use tlsn::transcript::{hash::PlaintextHash, Direction, TranscriptCommitment};
#[derive(Serialize, Deserialize, Debug)]
pub struct ZKProofBundle {
pub vk: Vec<u8>,
pub proof: Vec<u8>,
}
// extract commitment from prover output
pub fn received_commitments(
transcript_commitments: &[TranscriptCommitment],
) -> Vec<&PlaintextHash> {
transcript_commitments
.iter()
.filter_map(|commitment| match commitment {
TranscriptCommitment::Hash(hash) if hash.direction == Direction::Received => Some(hash),
_ => None,
})
.collect()
}

View File

@@ -0,0 +1,219 @@
use crate::types::received_commitments;
use super::types::ZKProofBundle;
use anyhow::Result;
use chrono::{Local, NaiveDate};
use noir::barretenberg::verify::{get_ultra_honk_verification_key, verify_ultra_honk};
use serde_json::Value;
use tls_server_fixture::CA_CERT_DER;
use tlsn::{
config::{tls_commit::TlsCommitProtocolConfig, verifier::VerifierConfig},
connection::ServerName,
hash::HashAlgId,
transcript::{Direction, PartialTranscript},
verifier::{Verifier, VerifierOutput},
webpki::{CertificateDer, RootCertStore},
};
use tlsn_examples::{MAX_RECV_DATA, MAX_SENT_DATA};
use tlsn_server_fixture_certs::SERVER_DOMAIN;
use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite};
use tokio_util::compat::TokioAsyncReadCompatExt;
use tracing::instrument;
#[instrument(skip(socket, extra_socket))]
pub async fn verifier<T: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>(
socket: T,
mut extra_socket: T,
) -> Result<PartialTranscript> {
let verifier = Verifier::new(
VerifierConfig::builder()
// Create a root certificate store with the server-fixture's self-signed
// certificate. This is only required for offline testing with the
// server-fixture.
.root_store(RootCertStore {
roots: vec![CertificateDer(CA_CERT_DER.to_vec())],
})
.build()?,
);
// Validate the proposed configuration and then run the TLS commitment protocol.
let verifier = verifier.commit(socket.compat()).await?;
// This is the opportunity to ensure the prover does not attempt to overload the
// verifier.
let reject = if let TlsCommitProtocolConfig::Mpc(mpc_tls_config) = verifier.request().protocol()
{
if mpc_tls_config.max_sent_data() > MAX_SENT_DATA {
Some("max_sent_data is too large")
} else if mpc_tls_config.max_recv_data() > MAX_RECV_DATA {
Some("max_recv_data is too large")
} else {
None
}
} else {
Some("expecting to use MPC-TLS")
};
if reject.is_some() {
verifier.reject(reject).await?;
return Err(anyhow::anyhow!("protocol configuration rejected"));
}
// Runs the TLS commitment protocol to completion.
let verifier = verifier.accept().await?.run().await?;
// Validate the proving request and then verify.
let verifier = verifier.verify().await?;
let request = verifier.request();
if !request.server_identity() || request.reveal().is_none() {
let verifier = verifier
.reject(Some(
"expecting to verify the server name and transcript data",
))
.await?;
verifier.close().await?;
return Err(anyhow::anyhow!(
"prover did not reveal the server name and transcript data"
));
}
let (
VerifierOutput {
server_name,
transcript,
transcript_commitments,
..
},
verifier,
) = verifier.accept().await?;
verifier.close().await?;
let server_name = server_name.expect("server name should be present");
let transcript = transcript.expect("transcript should be present");
// Create hash commitment for the date of birth field from the response
let sent = transcript.sent_unsafe().to_vec();
let sent_data = String::from_utf8(sent.clone())
.map_err(|e| anyhow::anyhow!("Verifier expected valid UTF-8 sent data: {e}"))?;
if !sent_data.contains(SERVER_DOMAIN) {
return Err(anyhow::anyhow!(
"Verification failed: Expected host {SERVER_DOMAIN} not found in sent data"
));
}
// Check received data.
let received_commitments = received_commitments(&transcript_commitments);
let received_commitment = received_commitments
.first()
.ok_or_else(|| anyhow::anyhow!("Missing hash commitment"))?;
assert!(received_commitment.direction == Direction::Received);
assert!(received_commitment.hash.alg == HashAlgId::SHA256);
let committed_hash = &received_commitment.hash;
// Check Session info: server name.
let ServerName::Dns(server_name) = server_name;
if server_name.as_str() != SERVER_DOMAIN {
return Err(anyhow::anyhow!(
"Server name mismatch: expected {SERVER_DOMAIN}, got {}",
server_name.as_str()
));
}
// Receive ZKProof information from prover
let mut buf = Vec::new();
extra_socket.read_to_end(&mut buf).await?;
if buf.is_empty() {
return Err(anyhow::anyhow!("No ZK proof data received from prover"));
}
let msg: ZKProofBundle = bincode::deserialize(&buf)
.map_err(|e| anyhow::anyhow!("Failed to deserialize ZK proof bundle: {e}"))?;
// Verify zk proof
const PROGRAM_JSON: &str = include_str!("./noir/target/noir.json");
let json: Value = serde_json::from_str(PROGRAM_JSON)
.map_err(|e| anyhow::anyhow!("Failed to parse Noir circuit: {e}"))?;
let bytecode = json["bytecode"]
.as_str()
.ok_or_else(|| anyhow::anyhow!("Bytecode field missing in noir.json"))?;
let vk = get_ultra_honk_verification_key(bytecode, false)
.map_err(|e| anyhow::anyhow!("Failed to get verification key: {e}"))?;
if vk != msg.vk {
return Err(anyhow::anyhow!(
"Verification key mismatch between computed and provided by prover"
));
}
let proof = msg.proof.clone();
// Validate proof has enough data.
// The proof should start with the public inputs:
// * We expect at least 3 * 32 bytes for the three date fields (day, month,
// year)
// * and 32*32 bytes for the hash
let min_bytes = (32 + 3) * 32;
if proof.len() < min_bytes {
return Err(anyhow::anyhow!(
"Proof too short: expected at least {min_bytes} bytes, got {}",
proof.len()
));
}
// Check that the proof date is correctly included in the proof
let proof_date_day: u32 = u32::from_be_bytes(proof[28..32].try_into()?);
let proof_date_month: u32 = u32::from_be_bytes(proof[60..64].try_into()?);
let proof_date_year: i32 = i32::from_be_bytes(proof[92..96].try_into()?);
let proof_date_from_proof =
NaiveDate::from_ymd_opt(proof_date_year, proof_date_month, proof_date_day)
.ok_or_else(|| anyhow::anyhow!("Invalid proof date in proof"))?;
let today = Local::now().date_naive();
if (today - proof_date_from_proof).num_days() < 0 {
return Err(anyhow::anyhow!(
"The proof date can only be today or in the past: provided {proof_date_from_proof}, today {today}"
));
}
// Check that the committed hash in the proof matches the hash from the
// commitment
let committed_hash_in_proof: Vec<u8> = proof
.chunks(32)
.skip(3) // skip the first 3 chunks
.take(32)
.map(|chunk| *chunk.last().unwrap_or(&0))
.collect();
let expected_hash = committed_hash.value.as_bytes().to_vec();
if committed_hash_in_proof != expected_hash {
tracing::error!(
"❌ The hash in the proof does not match the committed hash in MPC-TLS: {} != {}",
hex::encode(&committed_hash_in_proof),
hex::encode(&expected_hash)
);
return Err(anyhow::anyhow!(
"Hash in proof does not match committed hash in MPC-TLS"
));
}
tracing::info!(
"✅ The hash in the proof matches the committed hash in MPC-TLS ({})",
hex::encode(&expected_hash)
);
// Finally verify the proof
let is_valid = verify_ultra_honk(msg.proof, msg.vk)
.map_err(|e| anyhow::anyhow!("ZKProof Verification failed: {e}"))?;
if !is_valid {
tracing::error!("❌ Age verification ZKProof failed to verify");
return Err(anyhow::anyhow!("Age verification ZKProof failed to verify"));
}
tracing::info!("✅ Age verification ZKProof successfully verified");
Ok(transcript)
}

View File

@@ -1,6 +1,6 @@
[package] [package]
name = "tlsn-formats" name = "tlsn-formats"
version = "0.1.0-alpha.13-pre" version = "0.1.0-alpha.14-pre"
edition = "2021" edition = "2021"
[lints] [lints]

View File

@@ -1,51 +1,59 @@
#### Latency #### #### Default Representative Benchmarks ####
#
# This benchmark measures TLSNotary performance on three representative network scenarios.
# Each scenario is run multiple times to produce statistical metrics (median, std dev, etc.)
# rather than plots. Use this for quick performance checks and CI regression testing.
#
# Payload sizes:
# - upload-size: 1KB (typical HTTP request)
# - download-size: 2KB (typical HTTP response/API data)
#
# Network scenarios are chosen to represent real-world user conditions where
# TLSNotary is primarily bottlenecked by upload bandwidth.
#### Cable/DSL Home Internet ####
# Most common residential internet connection
# - Asymmetric: high download, limited upload (typical bottleneck)
# - Upload bandwidth: 20 Mbps (realistic cable/DSL upload speed)
# - Latency: 20ms (typical ISP latency)
[[group]] [[group]]
name = "latency" name = "cable"
bandwidth = 1000 bandwidth = 20
protocol_latency = 20
upload-size = 1024
download-size = 2048
[[bench]] [[bench]]
group = "latency" group = "cable"
protocol_latency = 10
[[bench]] #### Mobile 5G ####
group = "latency" # Modern mobile connection with good coverage
protocol_latency = 25 # - Upload bandwidth: 30 Mbps (typical 5G upload in good conditions)
# - Latency: 30ms (higher than wired due to mobile tower hops)
[[bench]]
group = "latency"
protocol_latency = 50
[[bench]]
group = "latency"
protocol_latency = 100
[[bench]]
group = "latency"
protocol_latency = 200
#### Bandwidth ####
[[group]] [[group]]
name = "bandwidth" name = "mobile_5g"
protocol_latency = 25 bandwidth = 30
protocol_latency = 30
upload-size = 1024
download-size = 2048
[[bench]] [[bench]]
group = "bandwidth" group = "mobile_5g"
bandwidth = 10
[[bench]] #### Fiber Home Internet ####
group = "bandwidth" # High-end residential connection (best case scenario)
bandwidth = 50 # - Symmetric: equal upload/download bandwidth
# - Upload bandwidth: 100 Mbps (typical fiber upload)
# - Latency: 15ms (lower latency than cable)
[[bench]] [[group]]
group = "bandwidth" name = "fiber"
bandwidth = 100 bandwidth = 100
protocol_latency = 15
upload-size = 1024
download-size = 2048
[[bench]] [[bench]]
group = "bandwidth" group = "fiber"
bandwidth = 250
[[bench]]
group = "bandwidth"
bandwidth = 1000

View File

@@ -0,0 +1,52 @@
#### Bandwidth Sweep Benchmark ####
#
# Measures how network bandwidth affects TLSNotary runtime.
# Keeps latency and payload sizes fixed while varying upload bandwidth.
#
# Fixed parameters:
# - Latency: 25ms (typical internet latency)
# - Upload: 1KB (typical request)
# - Download: 2KB (typical response)
#
# Variable: Bandwidth from 5 Mbps to 1000 Mbps
#
# Use this to plot "Bandwidth vs Runtime" and understand bandwidth sensitivity.
# Focus on upload bandwidth as TLSNotary is primarily upload-bottlenecked
[[group]]
name = "bandwidth_sweep"
protocol_latency = 25
upload-size = 1024
download-size = 2048
[[bench]]
group = "bandwidth_sweep"
bandwidth = 5
[[bench]]
group = "bandwidth_sweep"
bandwidth = 10
[[bench]]
group = "bandwidth_sweep"
bandwidth = 20
[[bench]]
group = "bandwidth_sweep"
bandwidth = 50
[[bench]]
group = "bandwidth_sweep"
bandwidth = 100
[[bench]]
group = "bandwidth_sweep"
bandwidth = 250
[[bench]]
group = "bandwidth_sweep"
bandwidth = 500
[[bench]]
group = "bandwidth_sweep"
bandwidth = 1000

View File

@@ -0,0 +1,53 @@
#### Download Size Sweep Benchmark ####
#
# Measures how download payload size affects TLSNotary runtime.
# Keeps network conditions fixed while varying the response size.
#
# Fixed parameters:
# - Bandwidth: 100 Mbps (typical good connection)
# - Latency: 25ms (typical internet latency)
# - Upload: 1KB (typical request size)
#
# Variable: Download size from 1KB to 100KB
#
# Use this to plot "Download Size vs Runtime" and understand how much data
# TLSNotary can efficiently notarize. Useful for determining optimal
# chunking strategies for large responses.
[[group]]
name = "download_sweep"
bandwidth = 100
protocol_latency = 25
upload-size = 1024
[[bench]]
group = "download_sweep"
download-size = 1024
[[bench]]
group = "download_sweep"
download-size = 2048
[[bench]]
group = "download_sweep"
download-size = 5120
[[bench]]
group = "download_sweep"
download-size = 10240
[[bench]]
group = "download_sweep"
download-size = 20480
[[bench]]
group = "download_sweep"
download-size = 30720
[[bench]]
group = "download_sweep"
download-size = 40960
[[bench]]
group = "download_sweep"
download-size = 51200

View File

@@ -0,0 +1,47 @@
#### Latency Sweep Benchmark ####
#
# Measures how network latency affects TLSNotary runtime.
# Keeps bandwidth and payload sizes fixed while varying protocol latency.
#
# Fixed parameters:
# - Bandwidth: 100 Mbps (typical good connection)
# - Upload: 1KB (typical request)
# - Download: 2KB (typical response)
#
# Variable: Protocol latency from 10ms to 200ms
#
# Use this to plot "Latency vs Runtime" and understand latency sensitivity.
[[group]]
name = "latency_sweep"
bandwidth = 100
upload-size = 1024
download-size = 2048
[[bench]]
group = "latency_sweep"
protocol_latency = 10
[[bench]]
group = "latency_sweep"
protocol_latency = 25
[[bench]]
group = "latency_sweep"
protocol_latency = 50
[[bench]]
group = "latency_sweep"
protocol_latency = 75
[[bench]]
group = "latency_sweep"
protocol_latency = 100
[[bench]]
group = "latency_sweep"
protocol_latency = 150
[[bench]]
group = "latency_sweep"
protocol_latency = 200

View File

@@ -3,7 +3,19 @@
# Ensure the script runs in the folder that contains this script # Ensure the script runs in the folder that contains this script
cd "$(dirname "$0")" cd "$(dirname "$0")"
cargo build --release --package tlsn-harness-runner --package tlsn-harness-executor --package tlsn-server-fixture --package tlsn-harness-plot RUNNER_FEATURES=""
EXECUTOR_FEATURES=""
if [ "$1" = "debug" ]; then
RUNNER_FEATURES="--features debug"
EXECUTOR_FEATURES="--no-default-features --features debug"
fi
cargo build --release \
--package tlsn-harness-runner $RUNNER_FEATURES \
--package tlsn-harness-executor $EXECUTOR_FEATURES \
--package tlsn-server-fixture \
--package tlsn-harness-plot
mkdir -p bin mkdir -p bin

View File

@@ -9,6 +9,7 @@ pub const DEFAULT_UPLOAD_SIZE: usize = 1024;
pub const DEFAULT_DOWNLOAD_SIZE: usize = 4096; pub const DEFAULT_DOWNLOAD_SIZE: usize = 4096;
pub const DEFAULT_DEFER_DECRYPTION: bool = true; pub const DEFAULT_DEFER_DECRYPTION: bool = true;
pub const DEFAULT_MEMORY_PROFILE: bool = false; pub const DEFAULT_MEMORY_PROFILE: bool = false;
pub const DEFAULT_REVEAL_ALL: bool = false;
pub const WARM_UP_BENCH: Bench = Bench { pub const WARM_UP_BENCH: Bench = Bench {
group: None, group: None,
@@ -20,6 +21,7 @@ pub const WARM_UP_BENCH: Bench = Bench {
download_size: 4096, download_size: 4096,
defer_decryption: true, defer_decryption: true,
memory_profile: false, memory_profile: false,
reveal_all: true,
}; };
#[derive(Deserialize)] #[derive(Deserialize)]
@@ -79,6 +81,8 @@ pub struct BenchGroupItem {
pub defer_decryption: Option<bool>, pub defer_decryption: Option<bool>,
#[serde(rename = "memory-profile")] #[serde(rename = "memory-profile")]
pub memory_profile: Option<bool>, pub memory_profile: Option<bool>,
#[serde(rename = "reveal-all")]
pub reveal_all: Option<bool>,
} }
#[derive(Debug, Clone, Serialize, Deserialize)] #[derive(Debug, Clone, Serialize, Deserialize)]
@@ -97,6 +101,8 @@ pub struct BenchItem {
pub defer_decryption: Option<bool>, pub defer_decryption: Option<bool>,
#[serde(rename = "memory-profile")] #[serde(rename = "memory-profile")]
pub memory_profile: Option<bool>, pub memory_profile: Option<bool>,
#[serde(rename = "reveal-all")]
pub reveal_all: Option<bool>,
} }
impl BenchItem { impl BenchItem {
@@ -132,6 +138,10 @@ impl BenchItem {
if self.memory_profile.is_none() { if self.memory_profile.is_none() {
self.memory_profile = group.memory_profile; self.memory_profile = group.memory_profile;
} }
if self.reveal_all.is_none() {
self.reveal_all = group.reveal_all;
}
} }
pub fn into_bench(&self) -> Bench { pub fn into_bench(&self) -> Bench {
@@ -145,6 +155,7 @@ impl BenchItem {
download_size: self.download_size.unwrap_or(DEFAULT_DOWNLOAD_SIZE), download_size: self.download_size.unwrap_or(DEFAULT_DOWNLOAD_SIZE),
defer_decryption: self.defer_decryption.unwrap_or(DEFAULT_DEFER_DECRYPTION), defer_decryption: self.defer_decryption.unwrap_or(DEFAULT_DEFER_DECRYPTION),
memory_profile: self.memory_profile.unwrap_or(DEFAULT_MEMORY_PROFILE), memory_profile: self.memory_profile.unwrap_or(DEFAULT_MEMORY_PROFILE),
reveal_all: self.reveal_all.unwrap_or(DEFAULT_REVEAL_ALL),
} }
} }
} }
@@ -164,6 +175,8 @@ pub struct Bench {
pub defer_decryption: bool, pub defer_decryption: bool,
#[serde(rename = "memory-profile")] #[serde(rename = "memory-profile")]
pub memory_profile: bool, pub memory_profile: bool,
#[serde(rename = "reveal-all")]
pub reveal_all: bool,
} }
#[derive(Debug, Clone, Serialize, Deserialize)] #[derive(Debug, Clone, Serialize, Deserialize)]

View File

@@ -22,7 +22,10 @@ pub enum CmdOutput {
GetTests(Vec<String>), GetTests(Vec<String>),
Test(TestOutput), Test(TestOutput),
Bench(BenchOutput), Bench(BenchOutput),
Fail { reason: Option<String> }, #[cfg(target_arch = "wasm32")]
Fail {
reason: Option<String>,
},
} }
#[derive(Debug, Clone, Serialize, Deserialize)] #[derive(Debug, Clone, Serialize, Deserialize)]

View File

@@ -1,10 +1,14 @@
[target.wasm32-unknown-unknown] [target.wasm32-unknown-unknown]
rustflags = [ rustflags = [
"-C", "-Ctarget-feature=+atomics,+bulk-memory,+mutable-globals,+simd128",
"target-feature=+atomics,+bulk-memory,+mutable-globals,+simd128", "-Clink-arg=--shared-memory",
"-C",
# 4GB # 4GB
"link-arg=--max-memory=4294967296", "-Clink-arg=--max-memory=4294967296",
"-Clink-arg=--import-memory",
"-Clink-arg=--export=__wasm_init_tls",
"-Clink-arg=--export=__tls_size",
"-Clink-arg=--export=__tls_align",
"-Clink-arg=--export=__tls_base",
"--cfg", "--cfg",
'getrandom_backend="wasm_js"', 'getrandom_backend="wasm_js"',
] ]

View File

@@ -4,6 +4,12 @@ version = "0.1.0"
edition = "2024" edition = "2024"
publish = false publish = false
[features]
# Disable tracing events as a workaround for issue 959.
default = ["tracing/release_max_level_off"]
# Used to debug the executor itself.
debug = []
[lib] [lib]
name = "harness_executor" name = "harness_executor"
crate-type = ["cdylib", "rlib"] crate-type = ["cdylib", "rlib"]
@@ -28,8 +34,7 @@ tokio = { workspace = true, features = ["full"] }
tokio-util = { workspace = true, features = ["compat"] } tokio-util = { workspace = true, features = ["compat"] }
[target.'cfg(target_arch = "wasm32")'.dependencies] [target.'cfg(target_arch = "wasm32")'.dependencies]
# Disable tracing events as a workaround for issue 959. tracing = { workspace = true }
tracing = { workspace = true, features = ["release_max_level_off"] }
wasm-bindgen = { workspace = true } wasm-bindgen = { workspace = true }
tlsn-wasm = { workspace = true } tlsn-wasm = { workspace = true }
js-sys = { workspace = true } js-sys = { workspace = true }

View File

@@ -5,9 +5,15 @@ use futures::{AsyncReadExt, AsyncWriteExt, TryFutureExt};
use harness_core::bench::{Bench, ProverMetrics}; use harness_core::bench::{Bench, ProverMetrics};
use tlsn::{ use tlsn::{
config::{CertificateDer, ProtocolConfig, RootCertStore}, config::{
prove::ProveConfig,
prover::ProverConfig,
tls::TlsClientConfig,
tls_commit::{TlsCommitConfig, mpc::MpcTlsConfig},
},
connection::ServerName, connection::ServerName,
prover::{ProveConfig, Prover, ProverConfig, TlsConfig}, prover::Prover,
webpki::{CertificateDer, RootCertStore},
}; };
use tlsn_server_fixture_certs::{CA_CERT_DER, SERVER_DOMAIN}; use tlsn_server_fixture_certs::{CA_CERT_DER, SERVER_DOMAIN};
@@ -22,41 +28,47 @@ pub async fn bench_prover(provider: &IoProvider, config: &Bench) -> Result<Prove
let sent = verifier_io.sent(); let sent = verifier_io.sent();
let recv = verifier_io.recv(); let recv = verifier_io.recv();
let mut builder = ProtocolConfig::builder(); let prover = Prover::new(ProverConfig::builder().build()?);
builder.max_sent_data(config.upload_size);
builder.defer_decryption_from_start(config.defer_decryption);
if !config.defer_decryption {
builder.max_recv_data_online(config.download_size + RECV_PADDING);
}
builder.max_recv_data(config.download_size + RECV_PADDING);
let protocol_config = builder.build()?;
let mut tls_config_builder = TlsConfig::builder();
tls_config_builder.root_store(RootCertStore {
roots: vec![CertificateDer(CA_CERT_DER.to_vec())],
});
let tls_config = tls_config_builder.build()?;
let prover = Prover::new(
ProverConfig::builder()
.tls_config(tls_config)
.protocol_config(protocol_config)
.server_name(ServerName::Dns(SERVER_DOMAIN.try_into().unwrap()))
.build()?,
);
let time_start = web_time::Instant::now(); let time_start = web_time::Instant::now();
let prover = prover.setup(verifier_io).await?; let prover = prover
.commit(
TlsCommitConfig::builder()
.protocol({
let mut builder = MpcTlsConfig::builder()
.max_sent_data(config.upload_size)
.defer_decryption_from_start(config.defer_decryption);
if !config.defer_decryption {
builder = builder.max_recv_data_online(config.download_size + RECV_PADDING);
}
builder
.max_recv_data(config.download_size + RECV_PADDING)
.build()
}?)
.build()?,
verifier_io,
)
.await?;
let time_preprocess = time_start.elapsed().as_millis(); let time_preprocess = time_start.elapsed().as_millis();
let time_start_online = web_time::Instant::now(); let time_start_online = web_time::Instant::now();
let uploaded_preprocess = sent.load(Ordering::Relaxed); let uploaded_preprocess = sent.load(Ordering::Relaxed);
let downloaded_preprocess = recv.load(Ordering::Relaxed); let downloaded_preprocess = recv.load(Ordering::Relaxed);
let (mut conn, prover_fut) = prover.connect(provider.provide_server_io().await?).await?; let (mut conn, prover_fut) = prover
.connect(
TlsClientConfig::builder()
.server_name(ServerName::Dns(SERVER_DOMAIN.try_into()?))
.root_store(RootCertStore {
roots: vec![CertificateDer(CA_CERT_DER.to_vec())],
})
.build()?,
provider.provide_server_io().await?,
)
.await?;
let (_, mut prover) = futures::try_join!( let (_, mut prover) = futures::try_join!(
async { async {
@@ -86,14 +98,27 @@ pub async fn bench_prover(provider: &IoProvider, config: &Bench) -> Result<Prove
let mut builder = ProveConfig::builder(prover.transcript()); let mut builder = ProveConfig::builder(prover.transcript());
// When reveal_all is false (the default), we exclude 1 byte to avoid the
// reveal-all optimization and benchmark the realistic ZK authentication path.
let reveal_sent_range = if config.reveal_all {
0..sent_len
} else {
0..sent_len.saturating_sub(1)
};
let reveal_recv_range = if config.reveal_all {
0..recv_len
} else {
0..recv_len.saturating_sub(1)
};
builder builder
.server_identity() .server_identity()
.reveal_sent(&(0..sent_len))? .reveal_sent(&reveal_sent_range)?
.reveal_recv(&(0..recv_len))?; .reveal_recv(&reveal_recv_range)?;
let config = builder.build()?; let prove_config = builder.build()?;
prover.prove(config).await?; prover.prove(&prove_config).await?;
prover.close().await?; prover.close().await?;
let time_total = time_start.elapsed().as_millis(); let time_total = time_start.elapsed().as_millis();

View File

@@ -2,33 +2,31 @@ use anyhow::Result;
use harness_core::bench::Bench; use harness_core::bench::Bench;
use tlsn::{ use tlsn::{
config::{CertificateDer, ProtocolConfigValidator, RootCertStore}, config::verifier::VerifierConfig,
verifier::{Verifier, VerifierConfig, VerifyConfig}, verifier::Verifier,
webpki::{CertificateDer, RootCertStore},
}; };
use tlsn_server_fixture_certs::CA_CERT_DER; use tlsn_server_fixture_certs::CA_CERT_DER;
use crate::{IoProvider, bench::RECV_PADDING}; use crate::IoProvider;
pub async fn bench_verifier(provider: &IoProvider, config: &Bench) -> Result<()> {
let mut builder = ProtocolConfigValidator::builder();
builder
.max_sent_data(config.upload_size)
.max_recv_data(config.download_size + RECV_PADDING);
let protocol_config = builder.build()?;
pub async fn bench_verifier(provider: &IoProvider, _config: &Bench) -> Result<()> {
let verifier = Verifier::new( let verifier = Verifier::new(
VerifierConfig::builder() VerifierConfig::builder()
.root_store(RootCertStore { .root_store(RootCertStore {
roots: vec![CertificateDer(CA_CERT_DER.to_vec())], roots: vec![CertificateDer(CA_CERT_DER.to_vec())],
}) })
.protocol_config_validator(protocol_config)
.build()?, .build()?,
); );
let verifier = verifier.setup(provider.provide_proto_io().await?).await?; let verifier = verifier
let mut verifier = verifier.run().await?; .commit(provider.provide_proto_io().await?)
verifier.verify(&VerifyConfig::default()).await?; .await?
.accept()
.await?
.run()
.await?;
let (_, verifier) = verifier.verify().await?.accept().await?;
verifier.close().await?; verifier.close().await?;
Ok(()) Ok(())

View File

@@ -1,10 +1,17 @@
use tlsn::{ use tlsn::{
config::{CertificateDer, ProtocolConfig, ProtocolConfigValidator, RootCertStore}, config::{
prove::ProveConfig,
prover::ProverConfig,
tls::TlsClientConfig,
tls_commit::{TlsCommitConfig, mpc::MpcTlsConfig},
verifier::VerifierConfig,
},
connection::ServerName, connection::ServerName,
hash::HashAlgId, hash::HashAlgId,
prover::{ProveConfig, Prover, ProverConfig, TlsConfig}, prover::Prover,
transcript::{TranscriptCommitConfig, TranscriptCommitment, TranscriptCommitmentKind}, transcript::{TranscriptCommitConfig, TranscriptCommitment, TranscriptCommitmentKind},
verifier::{Verifier, VerifierConfig, VerifierOutput, VerifyConfig}, verifier::{Verifier, VerifierOutput},
webpki::{CertificateDer, RootCertStore},
}; };
use tlsn_server_fixture_certs::{CA_CERT_DER, SERVER_DOMAIN}; use tlsn_server_fixture_certs::{CA_CERT_DER, SERVER_DOMAIN};
@@ -21,35 +28,35 @@ const MAX_RECV_DATA: usize = 1 << 11;
crate::test!("basic", prover, verifier); crate::test!("basic", prover, verifier);
async fn prover(provider: &IoProvider) { async fn prover(provider: &IoProvider) {
let mut tls_config_builder = TlsConfig::builder(); let prover = Prover::new(ProverConfig::builder().build().unwrap())
tls_config_builder.root_store(RootCertStore { .commit(
roots: vec![CertificateDer(CA_CERT_DER.to_vec())], TlsCommitConfig::builder()
}); .protocol(
MpcTlsConfig::builder()
let tls_config = tls_config_builder.build().unwrap(); .max_sent_data(MAX_SENT_DATA)
.max_recv_data(MAX_RECV_DATA)
let server_name = ServerName::Dns(SERVER_DOMAIN.try_into().unwrap()); .defer_decryption_from_start(true)
let prover = Prover::new( .build()
ProverConfig::builder() .unwrap(),
.server_name(server_name) )
.tls_config(tls_config) .build()
.protocol_config( .unwrap(),
ProtocolConfig::builder() provider.provide_proto_io().await.unwrap(),
.max_sent_data(MAX_SENT_DATA) )
.max_recv_data(MAX_RECV_DATA) .await
.defer_decryption_from_start(true) .unwrap();
.build()
.unwrap(),
)
.build()
.unwrap(),
)
.setup(provider.provide_proto_io().await.unwrap())
.await
.unwrap();
let (tls_connection, prover_fut) = prover let (tls_connection, prover_fut) = prover
.connect(provider.provide_server_io().await.unwrap()) .connect(
TlsClientConfig::builder()
.server_name(ServerName::Dns(SERVER_DOMAIN.try_into().unwrap()))
.root_store(RootCertStore {
roots: vec![CertificateDer(CA_CERT_DER.to_vec())],
})
.build()
.unwrap(),
provider.provide_server_io().await.unwrap(),
)
.await .await
.unwrap(); .unwrap();
@@ -107,39 +114,40 @@ async fn prover(provider: &IoProvider) {
let config = builder.build().unwrap(); let config = builder.build().unwrap();
prover.prove(config).await.unwrap(); prover.prove(&config).await.unwrap();
prover.close().await.unwrap(); prover.close().await.unwrap();
} }
async fn verifier(provider: &IoProvider) { async fn verifier(provider: &IoProvider) {
let config = VerifierConfig::builder() let config = VerifierConfig::builder()
.protocol_config_validator(
ProtocolConfigValidator::builder()
.max_sent_data(MAX_SENT_DATA)
.max_recv_data(MAX_RECV_DATA)
.build()
.unwrap(),
)
.root_store(RootCertStore { .root_store(RootCertStore {
roots: vec![CertificateDer(CA_CERT_DER.to_vec())], roots: vec![CertificateDer(CA_CERT_DER.to_vec())],
}) })
.build() .build()
.unwrap(); .unwrap();
let verifier = Verifier::new(config); let verifier = Verifier::new(config)
.commit(provider.provide_proto_io().await.unwrap())
let VerifierOutput { .await
server_name, .unwrap()
transcript_commitments, .accept()
.. .await
} = verifier .unwrap()
.verify( .run()
provider.provide_proto_io().await.unwrap(),
&VerifyConfig::default(),
)
.await .await
.unwrap(); .unwrap();
let (
VerifierOutput {
server_name,
transcript_commitments,
..
},
verifier,
) = verifier.verify().await.unwrap().accept().await.unwrap();
verifier.close().await.unwrap();
let ServerName::Dns(server_name) = server_name.unwrap(); let ServerName::Dns(server_name) = server_name.unwrap();
assert_eq!(server_name.as_str(), SERVER_DOMAIN); assert_eq!(server_name.as_str(), SERVER_DOMAIN);

View File

@@ -1,6 +1,8 @@
FROM rust AS builder FROM rust AS builder
WORKDIR /usr/src/tlsn WORKDIR /usr/src/tlsn
ARG DEBUG=0
RUN \ RUN \
rustup update; \ rustup update; \
apt update && apt install -y clang; \ apt update && apt install -y clang; \
@@ -10,7 +12,12 @@ RUN \
COPY . . COPY . .
RUN \ RUN \
cd crates/harness; \ cd crates/harness; \
./build.sh; # Pass `--build-arg DEBUG=1` to `docker build` if you need to debug the harness.
if [ "$DEBUG" = "1" ]; then \
./build.sh debug; \
else \
./build.sh; \
fi
FROM debian:latest FROM debian:latest

View File

@@ -7,6 +7,10 @@ publish = false
[lib] [lib]
name = "harness_runner" name = "harness_runner"
[features]
# Used to debug the runner itself.
debug = []
[dependencies] [dependencies]
tlsn-harness-core = { workspace = true } tlsn-harness-core = { workspace = true }
tlsn-server-fixture = { workspace = true } tlsn-server-fixture = { workspace = true }
@@ -18,6 +22,7 @@ clap = { workspace = true, features = ["derive", "env"] }
csv = { version = "1.3" } csv = { version = "1.3" }
duct = { version = "1" } duct = { version = "1" }
futures = { workspace = true } futures = { workspace = true }
indicatif = { version = "0.17" }
ipnet = { workspace = true } ipnet = { workspace = true }
serio = { workspace = true } serio = { workspace = true }
serde_json = { workspace = true } serde_json = { workspace = true }

View File

@@ -16,6 +16,10 @@ pub struct Cli {
/// Subnet to assign harness network interfaces. /// Subnet to assign harness network interfaces.
#[arg(long, default_value = "10.250.0.0/24", env = "SUBNET")] #[arg(long, default_value = "10.250.0.0/24", env = "SUBNET")]
pub subnet: Ipv4Net, pub subnet: Ipv4Net,
/// Run browser in headed mode (visible window) for debugging.
/// Works with both X11 and Wayland.
#[arg(long)]
pub headed: bool,
} }
#[derive(Subcommand)] #[derive(Subcommand)]
@@ -31,10 +35,13 @@ pub enum Command {
}, },
/// runs benchmarks. /// runs benchmarks.
Bench { Bench {
/// Configuration path. /// Configuration path. Defaults to bench.toml which contains
/// representative scenarios (cable, 5G, fiber) for quick performance
/// checks. Use bench_*_sweep.toml files for parametric
/// analysis.
#[arg(short, long, default_value = "bench.toml")] #[arg(short, long, default_value = "bench.toml")]
config: PathBuf, config: PathBuf,
/// Output file path. /// Output CSV file path for detailed metrics and post-processing.
#[arg(short, long, default_value = "metrics.csv")] #[arg(short, long, default_value = "metrics.csv")]
output: PathBuf, output: PathBuf,
/// Number of samples to measure per benchmark. This is overridden by /// Number of samples to measure per benchmark. This is overridden by

View File

@@ -0,0 +1,17 @@
#![allow(unused_imports)]
pub use futures::FutureExt;
pub use tracing::{debug, error};
pub use chromiumoxide::{
Browser, Page,
cdp::{
browser_protocol::{
log::{EventEntryAdded, LogEntryLevel},
network::{EnableParams, SetCacheDisabledParams},
page::ReloadParams,
},
js_protocol::runtime::EventExceptionThrown,
},
handler::HandlerConfig,
};

View File

@@ -21,10 +21,16 @@ use harness_core::{
use crate::{Target, network::Namespace, rpc::Rpc}; use crate::{Target, network::Namespace, rpc::Rpc};
#[cfg(feature = "debug")]
use crate::debug_prelude::*;
pub struct Executor { pub struct Executor {
ns: Namespace, ns: Namespace,
config: ExecutorConfig, config: ExecutorConfig,
target: Target, target: Target,
/// Display environment variables for headed mode (X11/Wayland).
/// Empty means headless mode.
display_env: Vec<String>,
state: State, state: State,
} }
@@ -46,11 +52,17 @@ impl State {
} }
impl Executor { impl Executor {
pub fn new(ns: Namespace, config: ExecutorConfig, target: Target) -> Self { pub fn new(
ns: Namespace,
config: ExecutorConfig,
target: Target,
display_env: Vec<String>,
) -> Self {
Self { Self {
ns, ns,
config, config,
target, target,
display_env,
state: State::Init, state: State::Init,
} }
} }
@@ -66,20 +78,34 @@ impl Executor {
Id::One => self.config.network().rpc_1, Id::One => self.config.network().rpc_1,
}; };
let process = duct::cmd!( let mut args = vec![
"sudo", "ip".into(),
"ip", "netns".into(),
"netns", "exec".into(),
"exec", self.ns.name().into(),
self.ns.name(), "env".into(),
"env",
format!("CONFIG={}", serde_json::to_string(&self.config)?), format!("CONFIG={}", serde_json::to_string(&self.config)?),
executor_path ];
)
.stdout_capture() if cfg!(feature = "debug") {
.stderr_capture() let level = &std::env::var("RUST_LOG").unwrap_or("debug".to_string());
.unchecked() args.push("env".into());
.start()?; args.push(format!("RUST_LOG={}", level));
};
args.push(executor_path.to_str().expect("valid path").into());
let process = duct::cmd("sudo", args);
let process = if !cfg!(feature = "debug") {
process
.stdout_capture()
.stderr_capture()
.unchecked()
.start()?
} else {
process.unchecked().start()?
};
let rpc = Rpc::new_native(rpc_addr).await?; let rpc = Rpc::new_native(rpc_addr).await?;
@@ -103,26 +129,55 @@ impl Executor {
let tmp = duct::cmd!("mktemp", "-d").read()?; let tmp = duct::cmd!("mktemp", "-d").read()?;
let tmp = tmp.trim(); let tmp = tmp.trim();
let process = duct::cmd!( let headed = !self.display_env.is_empty();
"sudo",
"ip", // Build command args based on headed/headless mode
"netns", let mut args: Vec<String> = vec![
"exec", "ip".into(),
self.ns.name(), "netns".into(),
chrome_path, "exec".into(),
format!("--remote-debugging-port={PORT_BROWSER}"), self.ns.name().into(),
"--headless", ];
"--disable-dev-shm-usage",
"--disable-gpu", if headed {
"--disable-cache", // For headed mode: drop back to the current user and pass display env vars
"--disable-application-cache", // This allows the browser to connect to X11/Wayland while in the namespace
"--no-sandbox", let user =
std::env::var("USER").context("USER environment variable not set")?;
args.extend(["sudo".into(), "-E".into(), "-u".into(), user, "env".into()]);
args.extend(self.display_env.clone());
}
args.push(chrome_path.to_string_lossy().into());
args.push(format!("--remote-debugging-port={PORT_BROWSER}"));
if headed {
// Headed mode: no headless, add flags to suppress first-run dialogs
args.extend(["--no-first-run".into(), "--no-default-browser-check".into()]);
} else {
// Headless mode: original flags
args.extend([
"--headless".into(),
"--disable-dev-shm-usage".into(),
"--disable-gpu".into(),
"--disable-cache".into(),
"--disable-application-cache".into(),
]);
}
args.extend([
"--no-sandbox".into(),
format!("--user-data-dir={tmp}"), format!("--user-data-dir={tmp}"),
format!("--allowed-ips=10.250.0.1"), "--allowed-ips=10.250.0.1".into(),
) ]);
.stderr_capture()
.stdout_capture() let process = duct::cmd("sudo", &args);
.start()?;
let process = if !cfg!(feature = "debug") {
process.stderr_capture().stdout_capture().start()?
} else {
process.start()?
};
const TIMEOUT: usize = 10000; const TIMEOUT: usize = 10000;
const DELAY: usize = 100; const DELAY: usize = 100;
@@ -171,6 +226,38 @@ impl Executor {
.new_page(&format!("http://{wasm_addr}:{wasm_port}/index.html")) .new_page(&format!("http://{wasm_addr}:{wasm_port}/index.html"))
.await?; .await?;
#[cfg(feature = "debug")]
tokio::spawn(register_listeners(page.clone()).await?);
#[cfg(feature = "debug")]
async fn register_listeners(page: Page) -> Result<impl Future<Output = ()>> {
let mut logs = page.event_listener::<EventEntryAdded>().await?.fuse();
let mut exceptions =
page.event_listener::<EventExceptionThrown>().await?.fuse();
Ok(futures::future::join(
async move {
while let Some(event) = logs.next().await {
let entry = &event.entry;
match entry.level {
LogEntryLevel::Error => {
error!("{:?}", entry);
}
_ => {
debug!("{:?}: {}", entry.timestamp, entry.text);
}
}
}
},
async move {
while let Some(event) = exceptions.next().await {
error!("{:?}", event);
}
},
)
.map(|_| ()))
}
page.execute(EnableParams::builder().build()).await?; page.execute(EnableParams::builder().build()).await?;
page.execute(SetCacheDisabledParams { page.execute(SetCacheDisabledParams {
cache_disabled: true, cache_disabled: true,

View File

@@ -6,7 +6,10 @@ mod server_fixture;
pub mod wasm_server; pub mod wasm_server;
mod ws_proxy; mod ws_proxy;
use std::time::Duration; #[cfg(feature = "debug")]
mod debug_prelude;
use std::{collections::HashMap, time::Duration};
use anyhow::Result; use anyhow::Result;
use clap::Parser; use clap::Parser;
@@ -19,25 +22,78 @@ use harness_core::{
rpc::{BenchCmd, TestCmd}, rpc::{BenchCmd, TestCmd},
test::TestStatus, test::TestStatus,
}; };
use indicatif::{ProgressBar, ProgressStyle};
use cli::{Cli, Command}; use cli::{Cli, Command};
use executor::Executor; use executor::Executor;
use server_fixture::ServerFixture; use server_fixture::ServerFixture;
#[cfg(feature = "debug")]
use crate::debug_prelude::*;
use crate::{cli::Route, network::Network, wasm_server::WasmServer, ws_proxy::WsProxy}; use crate::{cli::Route, network::Network, wasm_server::WasmServer, ws_proxy::WsProxy};
#[derive(Debug, Clone, Copy, PartialEq, Eq, clap::ValueEnum)] /// Statistics for a benchmark configuration
pub enum Target { #[derive(Debug, Clone)]
Native, struct BenchStats {
Browser, group: Option<String>,
bandwidth: usize,
latency: usize,
upload_size: usize,
download_size: usize,
times: Vec<u64>,
} }
impl Default for Target { impl BenchStats {
fn default() -> Self { fn median(&self) -> f64 {
Self::Native let mut sorted = self.times.clone();
sorted.sort();
let len = sorted.len();
if len == 0 {
return 0.0;
}
if len.is_multiple_of(2) {
(sorted[len / 2 - 1] + sorted[len / 2]) as f64 / 2.0
} else {
sorted[len / 2] as f64
}
} }
} }
/// Print summary table of benchmark results
fn print_bench_summary(stats: &[BenchStats]) {
if stats.is_empty() {
println!("\nNo benchmark results to display (only warmup was run).");
return;
}
println!("\n{}", "=".repeat(80));
println!("TLSNotary Benchmark Results");
println!("{}", "=".repeat(80));
println!();
for stat in stats {
let group_name = stat.group.as_deref().unwrap_or("unnamed");
println!(
"{} ({} Mbps, {}ms latency, {}KB↑ {}KB↓):",
group_name,
stat.bandwidth,
stat.latency,
stat.upload_size / 1024,
stat.download_size / 1024
);
println!(" Median: {:.2}s", stat.median() / 1000.0);
println!();
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, clap::ValueEnum, Default)]
pub enum Target {
#[default]
Native,
Browser,
}
struct Runner { struct Runner {
network: Network, network: Network,
server_fixture: ServerFixture, server_fixture: ServerFixture,
@@ -49,14 +105,46 @@ struct Runner {
started: bool, started: bool,
} }
/// Collects display-related environment variables for headed browser mode.
/// Works with both X11 and Wayland by collecting whichever vars are present.
fn collect_display_env_vars() -> Vec<String> {
const DISPLAY_VARS: &[&str] = &[
"DISPLAY", // X11
"XAUTHORITY", // X11 auth
"WAYLAND_DISPLAY", // Wayland
"XDG_RUNTIME_DIR", // Wayland runtime dir
];
DISPLAY_VARS
.iter()
.filter_map(|&var| {
std::env::var(var)
.ok()
.map(|val| format!("{}={}", var, val))
})
.collect()
}
impl Runner { impl Runner {
fn new(cli: &Cli) -> Result<Self> { fn new(cli: &Cli) -> Result<Self> {
let Cli { target, subnet, .. } = cli; let Cli {
target,
subnet,
headed,
..
} = cli;
let current_path = std::env::current_exe().unwrap(); let current_path = std::env::current_exe().unwrap();
let fixture_path = current_path.parent().unwrap().join("server-fixture"); let fixture_path = current_path.parent().unwrap().join("server-fixture");
let network_config = NetworkConfig::new(*subnet); let network_config = NetworkConfig::new(*subnet);
let network = Network::new(network_config.clone())?; let network = Network::new(network_config.clone())?;
// Collect display env vars once if headed mode is enabled
let display_env = if *headed {
collect_display_env_vars()
} else {
Vec::new()
};
let server_fixture = let server_fixture =
ServerFixture::new(fixture_path, network.ns_app().clone(), network_config.app); ServerFixture::new(fixture_path, network.ns_app().clone(), network_config.app);
let wasm_server = WasmServer::new( let wasm_server = WasmServer::new(
@@ -74,6 +162,7 @@ impl Runner {
.network_config(network_config.clone()) .network_config(network_config.clone())
.build(), .build(),
*target, *target,
display_env.clone(),
); );
let exec_v = Executor::new( let exec_v = Executor::new(
network.ns_1().clone(), network.ns_1().clone(),
@@ -83,6 +172,7 @@ impl Runner {
.network_config(network_config.clone()) .network_config(network_config.clone())
.build(), .build(),
Target::Native, Target::Native,
Vec::new(), // Verifier doesn't need display env
); );
Ok(Self { Ok(Self {
@@ -113,7 +203,16 @@ impl Runner {
} }
pub async fn main() -> Result<()> { pub async fn main() -> Result<()> {
#[cfg(feature = "debug")]
tracing_subscriber::fmt::init();
let cli = Cli::parse(); let cli = Cli::parse();
// Validate --headed requires --target browser
if cli.headed && cli.target != Target::Browser {
anyhow::bail!("--headed can only be used with --target browser");
}
let mut runner = Runner::new(&cli)?; let mut runner = Runner::new(&cli)?;
let mut exit_code = 0; let mut exit_code = 0;
@@ -202,6 +301,12 @@ pub async fn main() -> Result<()> {
samples_override, samples_override,
skip_warmup, skip_warmup,
} => { } => {
// Print configuration info
println!("TLSNotary Benchmark Harness");
println!("Running benchmarks from: {}", config.display());
println!("Output will be written to: {}", output.display());
println!();
let items: BenchItems = toml::from_str(&std::fs::read_to_string(config)?)?; let items: BenchItems = toml::from_str(&std::fs::read_to_string(config)?)?;
let output_file = std::fs::File::create(output)?; let output_file = std::fs::File::create(output)?;
let mut writer = WriterBuilder::new().from_writer(output_file); let mut writer = WriterBuilder::new().from_writer(output_file);
@@ -216,7 +321,34 @@ pub async fn main() -> Result<()> {
runner.exec_p.start().await?; runner.exec_p.start().await?;
runner.exec_v.start().await?; runner.exec_v.start().await?;
for config in benches { // Create progress bar
let pb = ProgressBar::new(benches.len() as u64);
pb.set_style(
ProgressStyle::default_bar()
.template("[{elapsed_precise}] {bar:40.cyan/blue} {pos}/{len} {msg}")
.expect("valid template")
.progress_chars("█▓▒░ "),
);
// Collect measurements for stats
let mut measurements_by_config: HashMap<String, Vec<u64>> = HashMap::new();
let warmup_count = if skip_warmup { 0 } else { 3 };
for (idx, config) in benches.iter().enumerate() {
let is_warmup = idx < warmup_count;
let group_name = if is_warmup {
format!("Warmup {}/{}", idx + 1, warmup_count)
} else {
config.group.as_deref().unwrap_or("unnamed").to_string()
};
pb.set_message(format!(
"{} ({} Mbps, {}ms)",
group_name, config.bandwidth, config.protocol_latency
));
runner runner
.network .network
.set_proto_config(config.bandwidth, config.protocol_latency.div_ceil(2))?; .set_proto_config(config.bandwidth, config.protocol_latency.div_ceil(2))?;
@@ -227,6 +359,9 @@ pub async fn main() -> Result<()> {
// Wait for the network to stabilize // Wait for the network to stabilize
tokio::time::sleep(Duration::from_millis(100)).await; tokio::time::sleep(Duration::from_millis(100)).await;
#[cfg(feature = "debug")]
debug!("Starting bench in group {:?}", config.group);
let (output, _) = tokio::try_join!( let (output, _) = tokio::try_join!(
runner.exec_p.bench(BenchCmd { runner.exec_p.bench(BenchCmd {
config: config.clone(), config: config.clone(),
@@ -242,11 +377,73 @@ pub async fn main() -> Result<()> {
panic!("expected prover output"); panic!("expected prover output");
}; };
let measurement = Measurement::new(config, metrics); // Collect metrics for stats (skip warmup benches)
if !is_warmup {
let config_key = format!(
"{:?}|{}|{}|{}|{}",
config.group,
config.bandwidth,
config.protocol_latency,
config.upload_size,
config.download_size
);
measurements_by_config
.entry(config_key)
.or_default()
.push(metrics.time_total);
}
let measurement = Measurement::new(config.clone(), metrics);
writer.serialize(measurement)?; writer.serialize(measurement)?;
writer.flush()?; writer.flush()?;
pb.inc(1);
} }
pb.finish_with_message("Benchmarks complete");
// Compute and print statistics
let mut all_stats: Vec<BenchStats> = Vec::new();
for (key, times) in measurements_by_config {
// Parse back the config from the key
let parts: Vec<&str> = key.split('|').collect();
if parts.len() >= 5 {
let group = if parts[0] == "None" {
None
} else {
Some(
parts[0]
.trim_start_matches("Some(\"")
.trim_end_matches("\")")
.to_string(),
)
};
let bandwidth: usize = parts[1].parse().unwrap_or(0);
let latency: usize = parts[2].parse().unwrap_or(0);
let upload_size: usize = parts[3].parse().unwrap_or(0);
let download_size: usize = parts[4].parse().unwrap_or(0);
all_stats.push(BenchStats {
group,
bandwidth,
latency,
upload_size,
download_size,
times,
});
}
}
// Sort stats by group name for consistent output
all_stats.sort_by(|a, b| {
a.group
.cmp(&b.group)
.then(a.latency.cmp(&b.latency))
.then(a.bandwidth.cmp(&b.bandwidth))
});
print_bench_summary(&all_stats);
} }
Command::Serve {} => { Command::Serve {} => {
runner.start_services().await?; runner.start_services().await?;

View File

@@ -5,7 +5,7 @@ description = "TLSNotary MPC-TLS protocol"
keywords = ["tls", "mpc", "2pc"] keywords = ["tls", "mpc", "2pc"]
categories = ["cryptography"] categories = ["cryptography"]
license = "MIT OR Apache-2.0" license = "MIT OR Apache-2.0"
version = "0.1.0-alpha.13-pre" version = "0.1.0-alpha.14-pre"
edition = "2021" edition = "2021"
[lints] [lints]
@@ -33,7 +33,6 @@ mpz-ole = { workspace = true }
mpz-share-conversion = { workspace = true } mpz-share-conversion = { workspace = true }
mpz-vm-core = { workspace = true } mpz-vm-core = { workspace = true }
mpz-memory-core = { workspace = true } mpz-memory-core = { workspace = true }
mpz-circuits = { workspace = true }
ludi = { git = "https://github.com/sinui0/ludi", rev = "e511c3b", default-features = false } ludi = { git = "https://github.com/sinui0/ludi", rev = "e511c3b", default-features = false }
serio = { workspace = true } serio = { workspace = true }
@@ -57,9 +56,9 @@ pin-project-lite = { workspace = true }
web-time = { workspace = true } web-time = { workspace = true }
[dev-dependencies] [dev-dependencies]
mpz-ole = { workspace = true, features = ["test-utils"] } mpz-common = { workspace = true, features = ["test-utils"] }
mpz-ot = { workspace = true } mpz-ot = { workspace = true, features = ["ideal"] }
mpz-garble = { workspace = true } mpz-ideal-vm = { workspace = true }
cipher-crate = { package = "cipher", version = "0.4" } cipher-crate = { package = "cipher", version = "0.4" }
generic-array = { workspace = true } generic-array = { workspace = true }

View File

@@ -41,6 +41,7 @@ use tls_core::{
message::{OpaqueMessage, PlainMessage}, message::{OpaqueMessage, PlainMessage},
}, },
suites::SupportedCipherSuite, suites::SupportedCipherSuite,
verify::verify_sig_determine_alg,
}; };
use tlsn_core::{ use tlsn_core::{
connection::{CertBinding, CertBindingV1_2, ServerSignature, TlsVersion, VerifyData}, connection::{CertBinding, CertBindingV1_2, ServerSignature, TlsVersion, VerifyData},
@@ -327,12 +328,20 @@ impl MpcTlsLeader {
.map(|cert| CertificateDer(cert.0.clone())) .map(|cert| CertificateDer(cert.0.clone()))
.collect(); .collect();
let mut sig_msg = Vec::new();
sig_msg.extend_from_slice(&client_random.0);
sig_msg.extend_from_slice(&server_random.0);
sig_msg.extend_from_slice(server_kx_details.kx_params());
let server_signature_alg = verify_sig_determine_alg(
&server_cert_details.cert_chain()[0],
&sig_msg,
server_kx_details.kx_sig(),
)
.expect("only supported signature should have been accepted");
let server_signature = ServerSignature { let server_signature = ServerSignature {
scheme: server_kx_details alg: server_signature_alg.into(),
.kx_sig()
.scheme
.try_into()
.expect("only supported signature scheme should have been accepted"),
sig: server_kx_details.kx_sig().sig.0.clone(), sig: server_kx_details.kx_sig().sig.0.clone(),
}; };

View File

@@ -72,4 +72,5 @@ pub(crate) struct ServerFinishedVd {
} }
#[derive(Debug, Clone, Serialize, Deserialize)] #[derive(Debug, Clone, Serialize, Deserialize)]
#[allow(dead_code)]
pub(crate) struct CloseConnection; pub(crate) struct CloseConnection;

View File

@@ -487,7 +487,7 @@ impl RecordLayer {
sent_records.push(Record { sent_records.push(Record {
seq: op.seq, seq: op.seq,
typ: op.typ, typ: op.typ.into(),
plaintext: op.plaintext, plaintext: op.plaintext,
explicit_nonce: op.explicit_nonce, explicit_nonce: op.explicit_nonce,
ciphertext, ciphertext,
@@ -505,7 +505,7 @@ impl RecordLayer {
recv_records.push(Record { recv_records.push(Record {
seq: op.seq, seq: op.seq,
typ: op.typ, typ: op.typ.into(),
plaintext, plaintext,
explicit_nonce: op.explicit_nonce, explicit_nonce: op.explicit_nonce,
ciphertext: op.ciphertext, ciphertext: op.ciphertext,
@@ -578,7 +578,7 @@ impl RecordLayer {
recv_records.push(Record { recv_records.push(Record {
seq: op.seq, seq: op.seq,
typ: op.typ, typ: op.typ.into(),
plaintext, plaintext,
explicit_nonce: op.explicit_nonce, explicit_nonce: op.explicit_nonce,
ciphertext: op.ciphertext, ciphertext: op.ciphertext,

View File

@@ -456,9 +456,8 @@ mod tests {
}; };
use mpz_common::context::test_st_context; use mpz_common::context::test_st_context;
use mpz_core::Block; use mpz_core::Block;
use mpz_garble::protocol::semihonest::{Evaluator, Garbler}; use mpz_ideal_vm::IdealVm;
use mpz_memory_core::{binary::U8, correlated::Delta}; use mpz_memory_core::binary::U8;
use mpz_ot::ideal::cot::ideal_cot;
use mpz_share_conversion::ideal::ideal_share_convert; use mpz_share_conversion::ideal::ideal_share_convert;
use rand::{rngs::StdRng, SeedableRng}; use rand::{rngs::StdRng, SeedableRng};
use rstest::*; use rstest::*;
@@ -574,13 +573,8 @@ mod tests {
} }
fn create_vm(key: [u8; 16], iv: [u8; 4]) -> ((impl Vm<Binary>, Vars), (impl Vm<Binary>, Vars)) { fn create_vm(key: [u8; 16], iv: [u8; 4]) -> ((impl Vm<Binary>, Vars), (impl Vm<Binary>, Vars)) {
let mut rng = StdRng::seed_from_u64(0); let mut vm_0 = IdealVm::new();
let block = Block::random(&mut rng); let mut vm_1 = IdealVm::new();
let (sender, receiver) = ideal_cot(block);
let delta = Delta::new(block);
let mut vm_0 = Garbler::new(sender, [0u8; 16], delta);
let mut vm_1 = Evaluator::new(receiver);
let key_ref_0 = vm_0.alloc::<Array<U8, 16>>().unwrap(); let key_ref_0 = vm_0.alloc::<Array<U8, 16>>().unwrap();
vm_0.mark_public(key_ref_0).unwrap(); vm_0.mark_public(key_ref_0).unwrap();

View File

@@ -193,7 +193,7 @@ where
}; };
// Divide by block length and round up. // Divide by block length and round up.
let block_count = input.len() / 16 + (input.len() % 16 != 0) as usize; let block_count = input.len() / 16 + !input.len().is_multiple_of(16) as usize;
if block_count > MAX_POWER { if block_count > MAX_POWER {
return Err(ErrorRepr::InputLength { return Err(ErrorRepr::InputLength {
@@ -282,11 +282,11 @@ fn build_ghash_data(mut aad: Vec<u8>, mut ciphertext: Vec<u8>) -> Vec<u8> {
let len_block = ((associated_data_bitlen as u128) << 64) + (text_bitlen as u128); let len_block = ((associated_data_bitlen as u128) << 64) + (text_bitlen as u128);
// Pad data to be a multiple of 16 bytes. // Pad data to be a multiple of 16 bytes.
let aad_padded_block_count = (aad.len() / 16) + (aad.len() % 16 != 0) as usize; let aad_padded_block_count = (aad.len() / 16) + !aad.len().is_multiple_of(16) as usize;
aad.resize(aad_padded_block_count * 16, 0); aad.resize(aad_padded_block_count * 16, 0);
let ciphertext_padded_block_count = let ciphertext_padded_block_count =
(ciphertext.len() / 16) + (ciphertext.len() % 16 != 0) as usize; (ciphertext.len() / 16) + !ciphertext.len().is_multiple_of(16) as usize;
ciphertext.resize(ciphertext_padded_block_count * 16, 0); ciphertext.resize(ciphertext_padded_block_count * 16, 0);
let mut data: Vec<u8> = Vec::with_capacity(aad.len() + ciphertext.len() + 16); let mut data: Vec<u8> = Vec::with_capacity(aad.len() + ciphertext.len() + 16);

View File

@@ -4,14 +4,13 @@ use futures::{AsyncReadExt, AsyncWriteExt};
use mpc_tls::{Config, MpcTlsFollower, MpcTlsLeader}; use mpc_tls::{Config, MpcTlsFollower, MpcTlsLeader};
use mpz_common::context::test_mt_context; use mpz_common::context::test_mt_context;
use mpz_core::Block; use mpz_core::Block;
use mpz_garble::protocol::semihonest::{Evaluator, Garbler}; use mpz_ideal_vm::IdealVm;
use mpz_memory_core::correlated::Delta; use mpz_memory_core::correlated::Delta;
use mpz_ot::{ use mpz_ot::{
cot::{DerandCOTReceiver, DerandCOTSender},
ideal::rcot::ideal_rcot, ideal::rcot::ideal_rcot,
rcot::shared::{SharedRCOTReceiver, SharedRCOTSender}, rcot::shared::{SharedRCOTReceiver, SharedRCOTSender},
}; };
use rand::{rngs::StdRng, Rng, SeedableRng}; use rand::{rngs::StdRng, SeedableRng};
use rustls_pki_types::CertificateDer; use rustls_pki_types::CertificateDer;
use tls_client::RootCertStore; use tls_client::RootCertStore;
use tls_client_async::bind_client; use tls_client_async::bind_client;
@@ -23,7 +22,6 @@ use webpki::anchor_from_trusted_cert;
const CA_CERT: CertificateDer = CertificateDer::from_slice(CA_CERT_DER); const CA_CERT: CertificateDer = CertificateDer::from_slice(CA_CERT_DER);
#[tokio::test(flavor = "multi_thread", worker_threads = 2)] #[tokio::test(flavor = "multi_thread", worker_threads = 2)]
#[ignore = "expensive"]
async fn mpc_tls_test() { async fn mpc_tls_test() {
tracing_subscriber::fmt::init(); tracing_subscriber::fmt::init();
@@ -139,14 +137,8 @@ fn build_pair(config: Config) -> (MpcTlsLeader, MpcTlsFollower) {
let rcot_recv_a = SharedRCOTReceiver::new(rcot_recv_a); let rcot_recv_a = SharedRCOTReceiver::new(rcot_recv_a);
let rcot_recv_b = SharedRCOTReceiver::new(rcot_recv_b); let rcot_recv_b = SharedRCOTReceiver::new(rcot_recv_b);
let mpc_a = Arc::new(Mutex::new(Garbler::new( let mpc_a = Arc::new(Mutex::new(IdealVm::new()));
DerandCOTSender::new(rcot_send_a.clone()), let mpc_b = Arc::new(Mutex::new(IdealVm::new()));
rand::rng().random(),
delta_a,
)));
let mpc_b = Arc::new(Mutex::new(Evaluator::new(DerandCOTReceiver::new(
rcot_recv_b.clone(),
))));
let leader = MpcTlsLeader::new( let leader = MpcTlsLeader::new(
config.clone(), config.clone(),

Some files were not shown because too many files have changed in this diff Show More