Compare commits

...

9 Commits
plot_py ... dev

Author SHA1 Message Date
sinu
9a3d73c5bd ci: reset main to tag 2026-01-14 09:33:23 -08:00
sinu.eth
2e24c62d9a chore: release alpha.14 (#1078) 2026-01-14 09:18:12 -08:00
sinu.eth
a08c54a224 doc(tlsn): document Session type and error (#1077) 2026-01-14 09:00:36 -08:00
sinu.eth
0656f7c742 doc(examples): rename interactive to basic and clarify (#1076) 2026-01-14 08:57:45 -08:00
sinu.eth
d30538aafb feat(tlsn): session type (#1075)
* feat(tlsn): session type

* simpify example

* fmt
2026-01-14 08:48:55 -08:00
sinu.eth
1897f0d1e6 refactor: remove encoding commitment (#1071)
* refactor: remove encoding commitment

* remove unused field

* clippy
2026-01-08 07:55:03 -08:00
dan
2101285f7f chore(bench): dont bench large payloads (#1074) 2026-01-05 09:55:35 +00:00
dan
98210e4059 chore(bench): added headed mode for debugging (#1073) 2026-01-05 09:38:31 +00:00
dan
9dfac850d5 chore(harness): improve UX with progress bar, separate sweep benches (#1068) 2025-12-23 13:59:14 +00:00
89 changed files with 2436 additions and 3230 deletions

View File

@@ -6,7 +6,7 @@ on:
tag: tag:
description: 'Tag to publish to NPM' description: 'Tag to publish to NPM'
required: true required: true
default: 'v0.1.0-alpha.14-pre' default: 'v0.1.0-alpha.14'
jobs: jobs:
release: release:

View File

@@ -1,4 +1,4 @@
name: Fast-forward main branch to published release tag name: Reset main branch to published release tag
on: on:
workflow_dispatch: workflow_dispatch:
@@ -6,7 +6,7 @@ on:
types: [published] types: [published]
jobs: jobs:
ff-main-to-release: reset-main-to-release:
runs-on: ubuntu-latest runs-on: ubuntu-latest
permissions: permissions:
contents: write contents: write
@@ -17,9 +17,9 @@ jobs:
with: with:
ref: main ref: main
- name: Fast-forward main to release tag - name: Reset main to release tag
run: | run: |
tag="${{ github.event.release.tag_name }}" tag="${{ github.event.release.tag_name }}"
git fetch origin "refs/tags/$tag:refs/tags/$tag" git fetch origin "refs/tags/$tag:refs/tags/$tag"
git merge --ff-only "refs/tags/$tag" git reset --hard "refs/tags/$tag"
git push origin main git push --force origin main

952
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -53,6 +53,7 @@ tlsn-formats = { path = "crates/formats" }
tlsn-hmac-sha256 = { path = "crates/components/hmac-sha256" } tlsn-hmac-sha256 = { path = "crates/components/hmac-sha256" }
tlsn-key-exchange = { path = "crates/components/key-exchange" } tlsn-key-exchange = { path = "crates/components/key-exchange" }
tlsn-mpc-tls = { path = "crates/mpc-tls" } tlsn-mpc-tls = { path = "crates/mpc-tls" }
tlsn-mux = { git = "https://github.com/tlsnotary/tlsn-utils", rev = "d9facb6" }
tlsn-server-fixture = { path = "crates/server-fixture/server" } tlsn-server-fixture = { path = "crates/server-fixture/server" }
tlsn-server-fixture-certs = { path = "crates/server-fixture/certs" } tlsn-server-fixture-certs = { path = "crates/server-fixture/certs" }
tlsn-tls-backend = { path = "crates/tls/backend" } tlsn-tls-backend = { path = "crates/tls/backend" }
@@ -66,32 +67,32 @@ tlsn-harness-runner = { path = "crates/harness/runner" }
tlsn-wasm = { path = "crates/wasm" } tlsn-wasm = { path = "crates/wasm" }
tlsn = { path = "crates/tlsn" } tlsn = { path = "crates/tlsn" }
mpz-circuits = { git = "https://github.com/privacy-ethereum/mpz", rev = "9c343f8" } mpz-circuits = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.5" }
mpz-circuits-data = { git = "https://github.com/privacy-ethereum/mpz", rev = "9c343f8" } mpz-circuits-data = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.5" }
mpz-memory-core = { git = "https://github.com/privacy-ethereum/mpz", rev = "9c343f8" } mpz-memory-core = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.5" }
mpz-common = { git = "https://github.com/privacy-ethereum/mpz", rev = "9c343f8" } mpz-common = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.5" }
mpz-core = { git = "https://github.com/privacy-ethereum/mpz", rev = "9c343f8" } mpz-core = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.5" }
mpz-vm-core = { git = "https://github.com/privacy-ethereum/mpz", rev = "9c343f8" } mpz-vm-core = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.5" }
mpz-garble = { git = "https://github.com/privacy-ethereum/mpz", rev = "9c343f8" } mpz-garble = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.5" }
mpz-garble-core = { git = "https://github.com/privacy-ethereum/mpz", rev = "9c343f8" } mpz-garble-core = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.5" }
mpz-ole = { git = "https://github.com/privacy-ethereum/mpz", rev = "9c343f8" } mpz-ole = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.5" }
mpz-ot = { git = "https://github.com/privacy-ethereum/mpz", rev = "9c343f8" } mpz-ot = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.5" }
mpz-share-conversion = { git = "https://github.com/privacy-ethereum/mpz", rev = "9c343f8" } mpz-share-conversion = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.5" }
mpz-fields = { git = "https://github.com/privacy-ethereum/mpz", rev = "9c343f8" } mpz-fields = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.5" }
mpz-zk = { git = "https://github.com/privacy-ethereum/mpz", rev = "9c343f8" } mpz-zk = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.5" }
mpz-hash = { git = "https://github.com/privacy-ethereum/mpz", rev = "9c343f8" } mpz-hash = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.5" }
mpz-ideal-vm = { git = "https://github.com/privacy-ethereum/mpz", rev = "9c343f8" } mpz-ideal-vm = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.5" }
rangeset = { version = "0.4" } rangeset = { version = "0.4" }
serio = { version = "0.2" } serio = { version = "0.2" }
spansy = { git = "https://github.com/tlsnotary/tlsn-utils", rev = "6f1a934" } spansy = { git = "https://github.com/tlsnotary/tlsn-utils", rev = "6f1a934" }
uid-mux = { version = "0.2" }
websocket-relay = { git = "https://github.com/tlsnotary/tlsn-utils", rev = "6f1a934" } websocket-relay = { git = "https://github.com/tlsnotary/tlsn-utils", rev = "6f1a934" }
aead = { version = "0.4" } aead = { version = "0.4" }
aes = { version = "0.8" } aes = { version = "0.8" }
aes-gcm = { version = "0.9" } aes-gcm = { version = "0.9" }
anyhow = { version = "1.0" } anyhow = { version = "1.0" }
async_io_stream = { version = "0.3" }
async-trait = { version = "0.1" } async-trait = { version = "0.1" }
axum = { version = "0.8" } axum = { version = "0.8" }
bcs = { version = "0.1" } bcs = { version = "0.1" }

View File

@@ -1,6 +1,6 @@
[package] [package]
name = "tlsn-attestation" name = "tlsn-attestation"
version = "0.1.0-alpha.14-pre" version = "0.1.0-alpha.14"
edition = "2024" edition = "2024"
[features] [features]
@@ -27,6 +27,7 @@ alloy-primitives = { version = "1.3.1", default-features = false }
alloy-signer = { version = "1.0", default-features = false } alloy-signer = { version = "1.0", default-features = false }
alloy-signer-local = { version = "1.0", default-features = false } alloy-signer-local = { version = "1.0", default-features = false }
rand06-compat = { workspace = true } rand06-compat = { workspace = true }
rangeset = { workspace = true }
rstest = { workspace = true } rstest = { workspace = true }
tlsn-core = { workspace = true, features = ["fixtures"] } tlsn-core = { workspace = true, features = ["fixtures"] }
tlsn-data-fixtures = { workspace = true } tlsn-data-fixtures = { workspace = true }

View File

@@ -5,7 +5,7 @@ use rand::{Rng, rng};
use tlsn_core::{ use tlsn_core::{
connection::{ConnectionInfo, ServerEphemKey}, connection::{ConnectionInfo, ServerEphemKey},
hash::HashAlgId, hash::HashAlgId,
transcript::{TranscriptCommitment, encoding::EncoderSecret}, transcript::TranscriptCommitment,
}; };
use crate::{ use crate::{
@@ -25,7 +25,6 @@ pub struct Sign {
connection_info: Option<ConnectionInfo>, connection_info: Option<ConnectionInfo>,
server_ephemeral_key: Option<ServerEphemKey>, server_ephemeral_key: Option<ServerEphemKey>,
cert_commitment: ServerCertCommitment, cert_commitment: ServerCertCommitment,
encoder_secret: Option<EncoderSecret>,
extensions: Vec<Extension>, extensions: Vec<Extension>,
transcript_commitments: Vec<TranscriptCommitment>, transcript_commitments: Vec<TranscriptCommitment>,
} }
@@ -87,7 +86,6 @@ impl<'a> AttestationBuilder<'a, Accept> {
connection_info: None, connection_info: None,
server_ephemeral_key: None, server_ephemeral_key: None,
cert_commitment, cert_commitment,
encoder_secret: None,
transcript_commitments: Vec::new(), transcript_commitments: Vec::new(),
extensions, extensions,
}, },
@@ -108,12 +106,6 @@ impl AttestationBuilder<'_, Sign> {
self self
} }
/// Sets the secret for encoding commitments.
pub fn encoder_secret(&mut self, secret: EncoderSecret) -> &mut Self {
self.state.encoder_secret = Some(secret);
self
}
/// Adds an extension to the attestation. /// Adds an extension to the attestation.
pub fn extension(&mut self, extension: Extension) -> &mut Self { pub fn extension(&mut self, extension: Extension) -> &mut Self {
self.state.extensions.push(extension); self.state.extensions.push(extension);
@@ -137,7 +129,6 @@ impl AttestationBuilder<'_, Sign> {
connection_info, connection_info,
server_ephemeral_key, server_ephemeral_key,
cert_commitment, cert_commitment,
encoder_secret,
extensions, extensions,
transcript_commitments, transcript_commitments,
} = self.state; } = self.state;
@@ -168,7 +159,6 @@ impl AttestationBuilder<'_, Sign> {
AttestationBuilderError::new(ErrorKind::Field, "handshake data was not set") AttestationBuilderError::new(ErrorKind::Field, "handshake data was not set")
})?), })?),
cert_commitment: field_id.next(cert_commitment), cert_commitment: field_id.next(cert_commitment),
encoder_secret: encoder_secret.map(|secret| field_id.next(secret)),
extensions: extensions extensions: extensions
.into_iter() .into_iter()
.map(|extension| field_id.next(extension)) .map(|extension| field_id.next(extension))
@@ -253,8 +243,7 @@ mod test {
use rstest::{fixture, rstest}; use rstest::{fixture, rstest};
use tlsn_core::{ use tlsn_core::{
connection::{CertBinding, CertBindingV1_2}, connection::{CertBinding, CertBindingV1_2},
fixtures::{ConnectionFixture, encoding_provider}, fixtures::ConnectionFixture,
hash::Blake3,
transcript::Transcript, transcript::Transcript,
}; };
use tlsn_data_fixtures::http::{request::GET_WITH_HEADER, response::OK_JSON}; use tlsn_data_fixtures::http::{request::GET_WITH_HEADER, response::OK_JSON};
@@ -285,13 +274,7 @@ mod test {
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON); let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
let connection = ConnectionFixture::tlsnotary(transcript.length()); let connection = ConnectionFixture::tlsnotary(transcript.length());
let RequestFixture { request, .. } = request_fixture( let RequestFixture { request, .. } = request_fixture(transcript, connection, Vec::new());
transcript,
encoding_provider(GET_WITH_HEADER, OK_JSON),
connection,
Blake3::default(),
Vec::new(),
);
let attestation_config = AttestationConfig::builder() let attestation_config = AttestationConfig::builder()
.supported_signature_algs([SignatureAlgId::SECP256R1]) .supported_signature_algs([SignatureAlgId::SECP256R1])
@@ -310,13 +293,7 @@ mod test {
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON); let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
let connection = ConnectionFixture::tlsnotary(transcript.length()); let connection = ConnectionFixture::tlsnotary(transcript.length());
let RequestFixture { request, .. } = request_fixture( let RequestFixture { request, .. } = request_fixture(transcript, connection, Vec::new());
transcript,
encoding_provider(GET_WITH_HEADER, OK_JSON),
connection,
Blake3::default(),
Vec::new(),
);
let attestation_config = AttestationConfig::builder() let attestation_config = AttestationConfig::builder()
.supported_signature_algs([SignatureAlgId::SECP256K1]) .supported_signature_algs([SignatureAlgId::SECP256K1])
@@ -336,13 +313,7 @@ mod test {
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON); let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
let connection = ConnectionFixture::tlsnotary(transcript.length()); let connection = ConnectionFixture::tlsnotary(transcript.length());
let RequestFixture { request, .. } = request_fixture( let RequestFixture { request, .. } = request_fixture(transcript, connection, Vec::new());
transcript,
encoding_provider(GET_WITH_HEADER, OK_JSON),
connection,
Blake3::default(),
Vec::new(),
);
let attestation_builder = Attestation::builder(attestation_config) let attestation_builder = Attestation::builder(attestation_config)
.accept_request(request) .accept_request(request)
@@ -363,13 +334,8 @@ mod test {
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON); let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
let connection = ConnectionFixture::tlsnotary(transcript.length()); let connection = ConnectionFixture::tlsnotary(transcript.length());
let RequestFixture { request, .. } = request_fixture( let RequestFixture { request, .. } =
transcript, request_fixture(transcript, connection.clone(), Vec::new());
encoding_provider(GET_WITH_HEADER, OK_JSON),
connection.clone(),
Blake3::default(),
Vec::new(),
);
let mut attestation_builder = Attestation::builder(attestation_config) let mut attestation_builder = Attestation::builder(attestation_config)
.accept_request(request) .accept_request(request)
@@ -393,13 +359,8 @@ mod test {
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON); let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
let connection = ConnectionFixture::tlsnotary(transcript.length()); let connection = ConnectionFixture::tlsnotary(transcript.length());
let RequestFixture { request, .. } = request_fixture( let RequestFixture { request, .. } =
transcript, request_fixture(transcript, connection.clone(), Vec::new());
encoding_provider(GET_WITH_HEADER, OK_JSON),
connection.clone(),
Blake3::default(),
Vec::new(),
);
let mut attestation_builder = Attestation::builder(attestation_config) let mut attestation_builder = Attestation::builder(attestation_config)
.accept_request(request) .accept_request(request)
@@ -432,9 +393,7 @@ mod test {
let RequestFixture { request, .. } = request_fixture( let RequestFixture { request, .. } = request_fixture(
transcript, transcript,
encoding_provider(GET_WITH_HEADER, OK_JSON),
connection.clone(), connection.clone(),
Blake3::default(),
vec![Extension { vec![Extension {
id: b"foo".to_vec(), id: b"foo".to_vec(),
value: b"bar".to_vec(), value: b"bar".to_vec(),
@@ -461,9 +420,7 @@ mod test {
let RequestFixture { request, .. } = request_fixture( let RequestFixture { request, .. } = request_fixture(
transcript, transcript,
encoding_provider(GET_WITH_HEADER, OK_JSON),
connection.clone(), connection.clone(),
Blake3::default(),
vec![Extension { vec![Extension {
id: b"foo".to_vec(), id: b"foo".to_vec(),
value: b"bar".to_vec(), value: b"bar".to_vec(),

View File

@@ -2,11 +2,7 @@
use tlsn_core::{ use tlsn_core::{
connection::{CertBinding, CertBindingV1_2}, connection::{CertBinding, CertBindingV1_2},
fixtures::ConnectionFixture, fixtures::ConnectionFixture,
hash::HashAlgorithm, transcript::{Transcript, TranscriptCommitConfigBuilder, TranscriptCommitment},
transcript::{
Transcript, TranscriptCommitConfigBuilder, TranscriptCommitment,
encoding::{EncodingProvider, EncodingTree},
},
}; };
use crate::{ use crate::{
@@ -21,16 +17,13 @@ use crate::{
/// A Request fixture used for testing. /// A Request fixture used for testing.
#[allow(missing_docs)] #[allow(missing_docs)]
pub struct RequestFixture { pub struct RequestFixture {
pub encoding_tree: EncodingTree,
pub request: Request, pub request: Request,
} }
/// Returns a request fixture for testing. /// Returns a request fixture for testing.
pub fn request_fixture( pub fn request_fixture(
transcript: Transcript, transcript: Transcript,
encodings_provider: impl EncodingProvider,
connection: ConnectionFixture, connection: ConnectionFixture,
encoding_hasher: impl HashAlgorithm,
extensions: Vec<Extension>, extensions: Vec<Extension>,
) -> RequestFixture { ) -> RequestFixture {
let provider = CryptoProvider::default(); let provider = CryptoProvider::default();
@@ -50,16 +43,10 @@ pub fn request_fixture(
.unwrap(); .unwrap();
let transcripts_commitment_config = transcript_commitment_builder.build().unwrap(); let transcripts_commitment_config = transcript_commitment_builder.build().unwrap();
// Prover constructs encoding tree.
let encoding_tree = EncodingTree::new(
&encoding_hasher,
transcripts_commitment_config.iter_encoding(),
&encodings_provider,
)
.unwrap();
let mut builder = RequestConfig::builder(); let mut builder = RequestConfig::builder();
builder.transcript_commit(transcripts_commitment_config);
for extension in extensions { for extension in extensions {
builder.extension(extension); builder.extension(extension);
} }
@@ -74,10 +61,7 @@ pub fn request_fixture(
let (request, _) = request_builder.build(&provider).unwrap(); let (request, _) = request_builder.build(&provider).unwrap();
RequestFixture { RequestFixture { request }
encoding_tree,
request,
}
} }
/// Returns an attestation fixture for testing. /// Returns an attestation fixture for testing.

View File

@@ -79,8 +79,6 @@
//! //!
//! // Specify all the transcript commitments we want to make. //! // Specify all the transcript commitments we want to make.
//! builder //! builder
//! // Use BLAKE3 for encoding commitments.
//! .encoding_hash_alg(HashAlgId::BLAKE3)
//! // Commit to all sent data. //! // Commit to all sent data.
//! .commit_sent(&(0..sent_len))? //! .commit_sent(&(0..sent_len))?
//! // Commit to the first 10 bytes of sent data. //! // Commit to the first 10 bytes of sent data.
@@ -129,7 +127,7 @@
//! //!
//! ```no_run //! ```no_run
//! # use tlsn_attestation::{Attestation, CryptoProvider, Secrets, presentation::Presentation}; //! # use tlsn_attestation::{Attestation, CryptoProvider, Secrets, presentation::Presentation};
//! # use tlsn_core::transcript::{TranscriptCommitmentKind, Direction}; //! # use tlsn_core::transcript::Direction;
//! # fn main() -> Result<(), Box<dyn std::error::Error>> { //! # fn main() -> Result<(), Box<dyn std::error::Error>> {
//! # let attestation: Attestation = unimplemented!(); //! # let attestation: Attestation = unimplemented!();
//! # let secrets: Secrets = unimplemented!(); //! # let secrets: Secrets = unimplemented!();
@@ -140,8 +138,6 @@
//! let mut builder = secrets.transcript_proof_builder(); //! let mut builder = secrets.transcript_proof_builder();
//! //!
//! builder //! builder
//! // Use transcript encoding commitments.
//! .commitment_kinds(&[TranscriptCommitmentKind::Encoding])
//! // Disclose the first 10 bytes of the sent data. //! // Disclose the first 10 bytes of the sent data.
//! .reveal(&(0..10), Direction::Sent)? //! .reveal(&(0..10), Direction::Sent)?
//! // Disclose all of the received data. //! // Disclose all of the received data.
@@ -219,7 +215,7 @@ use tlsn_core::{
connection::{ConnectionInfo, ServerEphemKey}, connection::{ConnectionInfo, ServerEphemKey},
hash::{Hash, HashAlgorithm, TypedHash}, hash::{Hash, HashAlgorithm, TypedHash},
merkle::MerkleTree, merkle::MerkleTree,
transcript::{TranscriptCommitment, encoding::EncoderSecret}, transcript::TranscriptCommitment,
}; };
use crate::{ use crate::{
@@ -301,8 +297,6 @@ pub enum FieldKind {
ServerEphemKey = 0x02, ServerEphemKey = 0x02,
/// Server identity commitment. /// Server identity commitment.
ServerIdentityCommitment = 0x03, ServerIdentityCommitment = 0x03,
/// Encoding commitment.
EncodingCommitment = 0x04,
/// Plaintext hash commitment. /// Plaintext hash commitment.
PlaintextHash = 0x05, PlaintextHash = 0x05,
} }
@@ -327,7 +321,6 @@ pub struct Body {
connection_info: Field<ConnectionInfo>, connection_info: Field<ConnectionInfo>,
server_ephemeral_key: Field<ServerEphemKey>, server_ephemeral_key: Field<ServerEphemKey>,
cert_commitment: Field<ServerCertCommitment>, cert_commitment: Field<ServerCertCommitment>,
encoder_secret: Option<Field<EncoderSecret>>,
extensions: Vec<Field<Extension>>, extensions: Vec<Field<Extension>>,
transcript_commitments: Vec<Field<TranscriptCommitment>>, transcript_commitments: Vec<Field<TranscriptCommitment>>,
} }
@@ -373,7 +366,6 @@ impl Body {
connection_info: conn_info, connection_info: conn_info,
server_ephemeral_key, server_ephemeral_key,
cert_commitment, cert_commitment,
encoder_secret,
extensions, extensions,
transcript_commitments, transcript_commitments,
} = self; } = self;
@@ -391,13 +383,6 @@ impl Body {
), ),
]; ];
if let Some(encoder_secret) = encoder_secret {
fields.push((
encoder_secret.id,
hasher.hash_separated(&encoder_secret.data),
));
}
for field in extensions.iter() { for field in extensions.iter() {
fields.push((field.id, hasher.hash_separated(&field.data))); fields.push((field.id, hasher.hash_separated(&field.data)));
} }

View File

@@ -91,11 +91,6 @@ impl Presentation {
transcript.verify_with_provider( transcript.verify_with_provider(
&provider.hash, &provider.hash,
&attestation.body.connection_info().transcript_length, &attestation.body.connection_info().transcript_length,
attestation
.body
.encoder_secret
.as_ref()
.map(|field| &field.data),
attestation.body.transcript_commitments(), attestation.body.transcript_commitments(),
) )
}) })

View File

@@ -144,9 +144,7 @@ impl std::fmt::Display for ErrorKind {
#[cfg(test)] #[cfg(test)]
mod test { mod test {
use tlsn_core::{ use tlsn_core::{
connection::TranscriptLength, connection::TranscriptLength, fixtures::ConnectionFixture, hash::HashAlgId,
fixtures::{ConnectionFixture, encoding_provider},
hash::{Blake3, HashAlgId},
transcript::Transcript, transcript::Transcript,
}; };
use tlsn_data_fixtures::http::{request::GET_WITH_HEADER, response::OK_JSON}; use tlsn_data_fixtures::http::{request::GET_WITH_HEADER, response::OK_JSON};
@@ -164,13 +162,8 @@ mod test {
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON); let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
let connection = ConnectionFixture::tlsnotary(transcript.length()); let connection = ConnectionFixture::tlsnotary(transcript.length());
let RequestFixture { request, .. } = request_fixture( let RequestFixture { request, .. } =
transcript, request_fixture(transcript, connection.clone(), Vec::new());
encoding_provider(GET_WITH_HEADER, OK_JSON),
connection.clone(),
Blake3::default(),
Vec::new(),
);
let attestation = let attestation =
attestation_fixture(request.clone(), connection, SignatureAlgId::SECP256K1, &[]); attestation_fixture(request.clone(), connection, SignatureAlgId::SECP256K1, &[]);
@@ -185,13 +178,8 @@ mod test {
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON); let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
let connection = ConnectionFixture::tlsnotary(transcript.length()); let connection = ConnectionFixture::tlsnotary(transcript.length());
let RequestFixture { mut request, .. } = request_fixture( let RequestFixture { mut request, .. } =
transcript, request_fixture(transcript, connection.clone(), Vec::new());
encoding_provider(GET_WITH_HEADER, OK_JSON),
connection.clone(),
Blake3::default(),
Vec::new(),
);
let attestation = let attestation =
attestation_fixture(request.clone(), connection, SignatureAlgId::SECP256K1, &[]); attestation_fixture(request.clone(), connection, SignatureAlgId::SECP256K1, &[]);
@@ -209,13 +197,8 @@ mod test {
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON); let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
let connection = ConnectionFixture::tlsnotary(transcript.length()); let connection = ConnectionFixture::tlsnotary(transcript.length());
let RequestFixture { mut request, .. } = request_fixture( let RequestFixture { mut request, .. } =
transcript, request_fixture(transcript, connection.clone(), Vec::new());
encoding_provider(GET_WITH_HEADER, OK_JSON),
connection.clone(),
Blake3::default(),
Vec::new(),
);
let attestation = let attestation =
attestation_fixture(request.clone(), connection, SignatureAlgId::SECP256K1, &[]); attestation_fixture(request.clone(), connection, SignatureAlgId::SECP256K1, &[]);
@@ -233,13 +216,8 @@ mod test {
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON); let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
let connection = ConnectionFixture::tlsnotary(transcript.length()); let connection = ConnectionFixture::tlsnotary(transcript.length());
let RequestFixture { mut request, .. } = request_fixture( let RequestFixture { mut request, .. } =
transcript, request_fixture(transcript, connection.clone(), Vec::new());
encoding_provider(GET_WITH_HEADER, OK_JSON),
connection.clone(),
Blake3::default(),
Vec::new(),
);
let attestation = let attestation =
attestation_fixture(request.clone(), connection, SignatureAlgId::SECP256K1, &[]); attestation_fixture(request.clone(), connection, SignatureAlgId::SECP256K1, &[]);
@@ -265,13 +243,8 @@ mod test {
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON); let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
let connection = ConnectionFixture::tlsnotary(transcript.length()); let connection = ConnectionFixture::tlsnotary(transcript.length());
let RequestFixture { request, .. } = request_fixture( let RequestFixture { request, .. } =
transcript, request_fixture(transcript, connection.clone(), Vec::new());
encoding_provider(GET_WITH_HEADER, OK_JSON),
connection.clone(),
Blake3::default(),
Vec::new(),
);
let mut attestation = let mut attestation =
attestation_fixture(request.clone(), connection, SignatureAlgId::SECP256K1, &[]); attestation_fixture(request.clone(), connection, SignatureAlgId::SECP256K1, &[]);
@@ -289,13 +262,8 @@ mod test {
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON); let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
let connection = ConnectionFixture::tlsnotary(transcript.length()); let connection = ConnectionFixture::tlsnotary(transcript.length());
let RequestFixture { request, .. } = request_fixture( let RequestFixture { request, .. } =
transcript, request_fixture(transcript, connection.clone(), Vec::new());
encoding_provider(GET_WITH_HEADER, OK_JSON),
connection.clone(),
Blake3::default(),
Vec::new(),
);
let attestation = let attestation =
attestation_fixture(request.clone(), connection, SignatureAlgId::SECP256K1, &[]); attestation_fixture(request.clone(), connection, SignatureAlgId::SECP256K1, &[]);

View File

@@ -49,6 +49,4 @@ impl_domain_separator!(tlsn_core::connection::ConnectionInfo);
impl_domain_separator!(tlsn_core::connection::CertBinding); impl_domain_separator!(tlsn_core::connection::CertBinding);
impl_domain_separator!(tlsn_core::transcript::TranscriptCommitment); impl_domain_separator!(tlsn_core::transcript::TranscriptCommitment);
impl_domain_separator!(tlsn_core::transcript::TranscriptSecret); impl_domain_separator!(tlsn_core::transcript::TranscriptSecret);
impl_domain_separator!(tlsn_core::transcript::encoding::EncoderSecret);
impl_domain_separator!(tlsn_core::transcript::encoding::EncodingCommitment);
impl_domain_separator!(tlsn_core::transcript::hash::PlaintextHash); impl_domain_separator!(tlsn_core::transcript::hash::PlaintextHash);

View File

@@ -1,3 +1,5 @@
use rand::{Rng, SeedableRng, rngs::StdRng};
use rangeset::set::RangeSet;
use tlsn_attestation::{ use tlsn_attestation::{
Attestation, AttestationConfig, CryptoProvider, Attestation, AttestationConfig, CryptoProvider,
presentation::PresentationOutput, presentation::PresentationOutput,
@@ -6,12 +8,11 @@ use tlsn_attestation::{
}; };
use tlsn_core::{ use tlsn_core::{
connection::{CertBinding, CertBindingV1_2}, connection::{CertBinding, CertBindingV1_2},
fixtures::{self, ConnectionFixture, encoder_secret}, fixtures::ConnectionFixture,
hash::Blake3, hash::{Blake3, Blinder, HashAlgId},
transcript::{ transcript::{
Direction, Transcript, TranscriptCommitConfigBuilder, TranscriptCommitment, Direction, Transcript, TranscriptCommitment, TranscriptSecret,
TranscriptSecret, hash::{PlaintextHash, PlaintextHashSecret, hash_plaintext},
encoding::{EncodingCommitment, EncodingTree},
}, },
}; };
use tlsn_data_fixtures::http::{request::GET_WITH_HEADER, response::OK_JSON}; use tlsn_data_fixtures::http::{request::GET_WITH_HEADER, response::OK_JSON};
@@ -19,6 +20,7 @@ use tlsn_data_fixtures::http::{request::GET_WITH_HEADER, response::OK_JSON};
/// Tests that the attestation protocol and verification work end-to-end /// Tests that the attestation protocol and verification work end-to-end
#[test] #[test]
fn test_api() { fn test_api() {
let mut rng = StdRng::seed_from_u64(0);
let mut provider = CryptoProvider::default(); let mut provider = CryptoProvider::default();
// Configure signer for Notary // Configure signer for Notary
@@ -26,8 +28,6 @@ fn test_api() {
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON); let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
let (sent_len, recv_len) = transcript.len(); let (sent_len, recv_len) = transcript.len();
// Plaintext encodings which the Prover obtained from GC evaluation
let encodings_provider = fixtures::encoding_provider(GET_WITH_HEADER, OK_JSON);
// At the end of the TLS connection the Prover holds the: // At the end of the TLS connection the Prover holds the:
let ConnectionFixture { let ConnectionFixture {
@@ -44,26 +44,38 @@ fn test_api() {
unreachable!() unreachable!()
}; };
// Prover specifies the ranges it wants to commit to. // Create hash commitments
let mut transcript_commitment_builder = TranscriptCommitConfigBuilder::new(&transcript); let hasher = Blake3::default();
transcript_commitment_builder let sent_blinder: Blinder = rng.random();
.commit_sent(&(0..sent_len)) let recv_blinder: Blinder = rng.random();
.unwrap()
.commit_recv(&(0..recv_len))
.unwrap();
let transcripts_commitment_config = transcript_commitment_builder.build().unwrap(); let sent_idx = RangeSet::from(0..sent_len);
let recv_idx = RangeSet::from(0..recv_len);
// Prover constructs encoding tree. let sent_hash_commitment = PlaintextHash {
let encoding_tree = EncodingTree::new( direction: Direction::Sent,
&Blake3::default(), idx: sent_idx.clone(),
transcripts_commitment_config.iter_encoding(), hash: hash_plaintext(&hasher, transcript.sent(), &sent_blinder),
&encodings_provider, };
)
.unwrap();
let encoding_commitment = EncodingCommitment { let recv_hash_commitment = PlaintextHash {
root: encoding_tree.root(), direction: Direction::Received,
idx: recv_idx.clone(),
hash: hash_plaintext(&hasher, transcript.received(), &recv_blinder),
};
let sent_hash_secret = PlaintextHashSecret {
direction: Direction::Sent,
idx: sent_idx,
alg: HashAlgId::BLAKE3,
blinder: sent_blinder,
};
let recv_hash_secret = PlaintextHashSecret {
direction: Direction::Received,
idx: recv_idx,
alg: HashAlgId::BLAKE3,
blinder: recv_blinder,
}; };
let request_config = RequestConfig::default(); let request_config = RequestConfig::default();
@@ -74,8 +86,14 @@ fn test_api() {
.handshake_data(server_cert_data) .handshake_data(server_cert_data)
.transcript(transcript) .transcript(transcript)
.transcript_commitments( .transcript_commitments(
vec![TranscriptSecret::Encoding(encoding_tree)], vec![
vec![TranscriptCommitment::Encoding(encoding_commitment.clone())], TranscriptSecret::Hash(sent_hash_secret),
TranscriptSecret::Hash(recv_hash_secret),
],
vec![
TranscriptCommitment::Hash(sent_hash_commitment.clone()),
TranscriptCommitment::Hash(recv_hash_commitment.clone()),
],
); );
let (request, secrets) = request_builder.build(&provider).unwrap(); let (request, secrets) = request_builder.build(&provider).unwrap();
@@ -95,8 +113,10 @@ fn test_api() {
.connection_info(connection_info.clone()) .connection_info(connection_info.clone())
// Server key Notary received during handshake // Server key Notary received during handshake
.server_ephemeral_key(server_ephemeral_key) .server_ephemeral_key(server_ephemeral_key)
.encoder_secret(encoder_secret()) .transcript_commitments(vec![
.transcript_commitments(vec![TranscriptCommitment::Encoding(encoding_commitment)]); TranscriptCommitment::Hash(sent_hash_commitment),
TranscriptCommitment::Hash(recv_hash_commitment),
]);
let attestation = attestation_builder.build(&provider).unwrap(); let attestation = attestation_builder.build(&provider).unwrap();

View File

@@ -5,7 +5,7 @@ description = "This crate provides implementations of ciphers for two parties"
keywords = ["tls", "mpc", "2pc", "aes"] keywords = ["tls", "mpc", "2pc", "aes"]
categories = ["cryptography"] categories = ["cryptography"]
license = "MIT OR Apache-2.0" license = "MIT OR Apache-2.0"
version = "0.1.0-alpha.14-pre" version = "0.1.0-alpha.14"
edition = "2021" edition = "2021"
[lints] [lints]

View File

@@ -1,6 +1,6 @@
[package] [package]
name = "tlsn-deap" name = "tlsn-deap"
version = "0.1.0-alpha.14-pre" version = "0.1.0-alpha.14"
edition = "2021" edition = "2021"
[lints] [lints]

View File

@@ -5,7 +5,7 @@ description = "A 2PC implementation of TLS HMAC-SHA256 PRF"
keywords = ["tls", "mpc", "2pc", "hmac", "sha256"] keywords = ["tls", "mpc", "2pc", "hmac", "sha256"]
categories = ["cryptography"] categories = ["cryptography"]
license = "MIT OR Apache-2.0" license = "MIT OR Apache-2.0"
version = "0.1.0-alpha.14-pre" version = "0.1.0-alpha.14"
edition = "2021" edition = "2021"
[lints] [lints]

View File

@@ -32,8 +32,8 @@ async fn prf(mode: Mode) {
let server_random: [u8; 32] = [96u8; 32]; let server_random: [u8; 32] = [96u8; 32];
let (mut leader_exec, mut follower_exec) = test_mt_context(8); let (mut leader_exec, mut follower_exec) = test_mt_context(8);
let mut leader_ctx = leader_exec.new_context().await.unwrap(); let mut leader_ctx = leader_exec.new_context().unwrap();
let mut follower_ctx = follower_exec.new_context().await.unwrap(); let mut follower_ctx = follower_exec.new_context().unwrap();
let mut leader_vm = IdealVm::new(); let mut leader_vm = IdealVm::new();
let mut follower_vm = IdealVm::new(); let mut follower_vm = IdealVm::new();

View File

@@ -5,7 +5,7 @@ description = "Implementation of the 3-party key-exchange protocol"
keywords = ["tls", "mpc", "2pc", "pms", "key-exchange"] keywords = ["tls", "mpc", "2pc", "pms", "key-exchange"]
categories = ["cryptography"] categories = ["cryptography"]
license = "MIT OR Apache-2.0" license = "MIT OR Apache-2.0"
version = "0.1.0-alpha.14-pre" version = "0.1.0-alpha.14"
edition = "2021" edition = "2021"
[lints] [lints]

View File

@@ -5,7 +5,7 @@ description = "Core types for TLSNotary"
keywords = ["tls", "mpc", "2pc", "types"] keywords = ["tls", "mpc", "2pc", "types"]
categories = ["cryptography"] categories = ["cryptography"]
license = "MIT OR Apache-2.0" license = "MIT OR Apache-2.0"
version = "0.1.0-alpha.14-pre" version = "0.1.0-alpha.14"
edition = "2021" edition = "2021"
[lints] [lints]

View File

@@ -1,10 +1,7 @@
//! Fixtures for testing //! Fixtures for testing
mod provider;
pub mod transcript; pub mod transcript;
pub use provider::FixtureEncodingProvider;
use hex::FromHex; use hex::FromHex;
use crate::{ use crate::{
@@ -13,10 +10,6 @@ use crate::{
ServerEphemKey, ServerName, ServerSignature, SignatureAlgorithm, TlsVersion, ServerEphemKey, ServerName, ServerSignature, SignatureAlgorithm, TlsVersion,
TranscriptLength, TranscriptLength,
}, },
transcript::{
encoding::{EncoderSecret, EncodingProvider},
Transcript,
},
webpki::CertificateDer, webpki::CertificateDer,
}; };
@@ -129,27 +122,3 @@ impl ConnectionFixture {
server_ephemeral_key server_ephemeral_key
} }
} }
/// Returns an encoding provider fixture.
pub fn encoding_provider(tx: &[u8], rx: &[u8]) -> impl EncodingProvider {
let secret = encoder_secret();
FixtureEncodingProvider::new(&secret, Transcript::new(tx, rx))
}
/// Seed fixture.
const SEED: [u8; 32] = [0; 32];
/// Delta fixture.
const DELTA: [u8; 16] = [1; 16];
/// Returns an encoder secret fixture.
pub fn encoder_secret() -> EncoderSecret {
EncoderSecret::new(SEED, DELTA)
}
/// Returns a tampered encoder secret fixture.
pub fn encoder_secret_tampered_seed() -> EncoderSecret {
let mut seed = SEED;
seed[0] += 1;
EncoderSecret::new(seed, DELTA)
}

View File

@@ -1,41 +0,0 @@
use std::ops::Range;
use crate::transcript::{
encoding::{new_encoder, Encoder, EncoderSecret, EncodingProvider, EncodingProviderError},
Direction, Transcript,
};
/// A encoding provider fixture.
pub struct FixtureEncodingProvider {
encoder: Box<dyn Encoder>,
transcript: Transcript,
}
impl FixtureEncodingProvider {
/// Creates a new encoding provider fixture.
pub(crate) fn new(secret: &EncoderSecret, transcript: Transcript) -> Self {
Self {
encoder: Box::new(new_encoder(secret)),
transcript,
}
}
}
impl EncodingProvider for FixtureEncodingProvider {
fn provide_encoding(
&self,
direction: Direction,
range: Range<usize>,
dest: &mut Vec<u8>,
) -> Result<(), EncodingProviderError> {
let transcript = match direction {
Direction::Sent => &self.transcript.sent(),
Direction::Received => &self.transcript.received(),
};
let data = transcript.get(range.clone()).ok_or(EncodingProviderError)?;
self.encoder.encode_data(direction, range, data, dest);
Ok(())
}
}

View File

@@ -19,9 +19,7 @@ use serde::{Deserialize, Serialize};
use crate::{ use crate::{
connection::ServerName, connection::ServerName,
transcript::{ transcript::{PartialTranscript, TranscriptCommitment, TranscriptSecret},
encoding::EncoderSecret, PartialTranscript, TranscriptCommitment, TranscriptSecret,
},
}; };
/// Prover output. /// Prover output.
@@ -42,8 +40,6 @@ pub struct VerifierOutput {
pub server_name: Option<ServerName>, pub server_name: Option<ServerName>,
/// Transcript data. /// Transcript data.
pub transcript: Option<PartialTranscript>, pub transcript: Option<PartialTranscript>,
/// Encoding commitment secret.
pub encoder_secret: Option<EncoderSecret>,
/// Transcript commitments. /// Transcript commitments.
pub transcript_commitments: Vec<TranscriptCommitment>, pub transcript_commitments: Vec<TranscriptCommitment>,
} }

View File

@@ -63,11 +63,6 @@ impl MerkleProof {
Ok(()) Ok(())
} }
/// Returns the leaf count of the Merkle tree associated with the proof.
pub(crate) fn leaf_count(&self) -> usize {
self.leaf_count
}
} }
#[derive(Clone)] #[derive(Clone)]

View File

@@ -19,7 +19,6 @@
//! withheld. //! withheld.
mod commit; mod commit;
pub mod encoding;
pub mod hash; pub mod hash;
mod proof; mod proof;
mod tls; mod tls;

View File

@@ -8,27 +8,15 @@ use serde::{Deserialize, Serialize};
use crate::{ use crate::{
hash::HashAlgId, hash::HashAlgId,
transcript::{ transcript::{
encoding::{EncodingCommitment, EncodingTree},
hash::{PlaintextHash, PlaintextHashSecret}, hash::{PlaintextHash, PlaintextHashSecret},
Direction, RangeSet, Transcript, Direction, RangeSet, Transcript,
}, },
}; };
/// The maximum allowed total bytelength of committed data for a single
/// commitment kind. Used to prevent DoS during verification. (May cause the
/// verifier to hash up to a max of 1GB * 128 = 128GB of data for certain kinds
/// of encoding commitments.)
///
/// This value must not exceed bcs's MAX_SEQUENCE_LENGTH limit (which is (1 <<
/// 31) - 1 by default)
pub(crate) const MAX_TOTAL_COMMITTED_DATA: usize = 1_000_000_000;
/// Kind of transcript commitment. /// Kind of transcript commitment.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)] #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
#[non_exhaustive] #[non_exhaustive]
pub enum TranscriptCommitmentKind { pub enum TranscriptCommitmentKind {
/// A commitment to encodings of the transcript.
Encoding,
/// A hash commitment to plaintext in the transcript. /// A hash commitment to plaintext in the transcript.
Hash { Hash {
/// The hash algorithm used. /// The hash algorithm used.
@@ -39,7 +27,6 @@ pub enum TranscriptCommitmentKind {
impl fmt::Display for TranscriptCommitmentKind { impl fmt::Display for TranscriptCommitmentKind {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self { match self {
Self::Encoding => f.write_str("encoding"),
Self::Hash { alg } => write!(f, "hash ({alg})"), Self::Hash { alg } => write!(f, "hash ({alg})"),
} }
} }
@@ -49,8 +36,6 @@ impl fmt::Display for TranscriptCommitmentKind {
#[derive(Debug, Clone, Serialize, Deserialize)] #[derive(Debug, Clone, Serialize, Deserialize)]
#[non_exhaustive] #[non_exhaustive]
pub enum TranscriptCommitment { pub enum TranscriptCommitment {
/// Encoding commitment.
Encoding(EncodingCommitment),
/// Plaintext hash commitment. /// Plaintext hash commitment.
Hash(PlaintextHash), Hash(PlaintextHash),
} }
@@ -59,8 +44,6 @@ pub enum TranscriptCommitment {
#[derive(Debug, Clone, Serialize, Deserialize)] #[derive(Debug, Clone, Serialize, Deserialize)]
#[non_exhaustive] #[non_exhaustive]
pub enum TranscriptSecret { pub enum TranscriptSecret {
/// Encoding tree.
Encoding(EncodingTree),
/// Plaintext hash secret. /// Plaintext hash secret.
Hash(PlaintextHashSecret), Hash(PlaintextHashSecret),
} }
@@ -68,9 +51,6 @@ pub enum TranscriptSecret {
/// Configuration for transcript commitments. /// Configuration for transcript commitments.
#[derive(Debug, Clone, Serialize, Deserialize)] #[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TranscriptCommitConfig { pub struct TranscriptCommitConfig {
encoding_hash_alg: HashAlgId,
has_encoding: bool,
has_hash: bool,
commits: Vec<((Direction, RangeSet<usize>), TranscriptCommitmentKind)>, commits: Vec<((Direction, RangeSet<usize>), TranscriptCommitmentKind)>,
} }
@@ -80,53 +60,23 @@ impl TranscriptCommitConfig {
TranscriptCommitConfigBuilder::new(transcript) TranscriptCommitConfigBuilder::new(transcript)
} }
/// Returns the hash algorithm to use for encoding commitments.
pub fn encoding_hash_alg(&self) -> &HashAlgId {
&self.encoding_hash_alg
}
/// Returns `true` if the configuration has any encoding commitments.
pub fn has_encoding(&self) -> bool {
self.has_encoding
}
/// Returns `true` if the configuration has any hash commitments. /// Returns `true` if the configuration has any hash commitments.
pub fn has_hash(&self) -> bool { pub fn has_hash(&self) -> bool {
self.has_hash self.commits
} .iter()
.any(|(_, kind)| matches!(kind, TranscriptCommitmentKind::Hash { .. }))
/// Returns an iterator over the encoding commitment indices.
pub fn iter_encoding(&self) -> impl Iterator<Item = &(Direction, RangeSet<usize>)> {
self.commits.iter().filter_map(|(idx, kind)| match kind {
TranscriptCommitmentKind::Encoding => Some(idx),
_ => None,
})
} }
/// Returns an iterator over the hash commitment indices. /// Returns an iterator over the hash commitment indices.
pub fn iter_hash(&self) -> impl Iterator<Item = (&(Direction, RangeSet<usize>), &HashAlgId)> { pub fn iter_hash(&self) -> impl Iterator<Item = (&(Direction, RangeSet<usize>), &HashAlgId)> {
self.commits.iter().filter_map(|(idx, kind)| match kind { self.commits.iter().map(|(idx, kind)| match kind {
TranscriptCommitmentKind::Hash { alg } => Some((idx, alg)), TranscriptCommitmentKind::Hash { alg } => (idx, alg),
_ => None,
}) })
} }
/// Returns a request for the transcript commitments. /// Returns a request for the transcript commitments.
pub fn to_request(&self) -> TranscriptCommitRequest { pub fn to_request(&self) -> TranscriptCommitRequest {
TranscriptCommitRequest { TranscriptCommitRequest {
encoding: self.has_encoding.then(|| {
let mut sent = RangeSet::default();
let mut recv = RangeSet::default();
for (dir, idx) in self.iter_encoding() {
match dir {
Direction::Sent => sent.union_mut(idx),
Direction::Received => recv.union_mut(idx),
}
}
(sent, recv)
}),
hash: self hash: self
.iter_hash() .iter_hash()
.map(|((dir, idx), alg)| (*dir, idx.clone(), *alg)) .map(|((dir, idx), alg)| (*dir, idx.clone(), *alg))
@@ -136,15 +86,9 @@ impl TranscriptCommitConfig {
} }
/// A builder for [`TranscriptCommitConfig`]. /// A builder for [`TranscriptCommitConfig`].
///
/// The default hash algorithm is [`HashAlgId::BLAKE3`] and the default kind
/// is [`TranscriptCommitmentKind::Encoding`].
#[derive(Debug)] #[derive(Debug)]
pub struct TranscriptCommitConfigBuilder<'a> { pub struct TranscriptCommitConfigBuilder<'a> {
transcript: &'a Transcript, transcript: &'a Transcript,
encoding_hash_alg: HashAlgId,
has_encoding: bool,
has_hash: bool,
default_kind: TranscriptCommitmentKind, default_kind: TranscriptCommitmentKind,
commits: HashSet<((Direction, RangeSet<usize>), TranscriptCommitmentKind)>, commits: HashSet<((Direction, RangeSet<usize>), TranscriptCommitmentKind)>,
} }
@@ -154,20 +98,13 @@ impl<'a> TranscriptCommitConfigBuilder<'a> {
pub fn new(transcript: &'a Transcript) -> Self { pub fn new(transcript: &'a Transcript) -> Self {
Self { Self {
transcript, transcript,
encoding_hash_alg: HashAlgId::BLAKE3, default_kind: TranscriptCommitmentKind::Hash {
has_encoding: false, alg: HashAlgId::BLAKE3,
has_hash: false, },
default_kind: TranscriptCommitmentKind::Encoding,
commits: HashSet::default(), commits: HashSet::default(),
} }
} }
/// Sets the hash algorithm to use for encoding commitments.
pub fn encoding_hash_alg(&mut self, alg: HashAlgId) -> &mut Self {
self.encoding_hash_alg = alg;
self
}
/// Sets the default kind of commitment to use. /// Sets the default kind of commitment to use.
pub fn default_kind(&mut self, default_kind: TranscriptCommitmentKind) -> &mut Self { pub fn default_kind(&mut self, default_kind: TranscriptCommitmentKind) -> &mut Self {
self.default_kind = default_kind; self.default_kind = default_kind;
@@ -201,11 +138,6 @@ impl<'a> TranscriptCommitConfigBuilder<'a> {
)); ));
} }
match kind {
TranscriptCommitmentKind::Encoding => self.has_encoding = true,
TranscriptCommitmentKind::Hash { .. } => self.has_hash = true,
}
self.commits.insert(((direction, idx), kind)); self.commits.insert(((direction, idx), kind));
Ok(self) Ok(self)
@@ -252,9 +184,6 @@ impl<'a> TranscriptCommitConfigBuilder<'a> {
/// Builds the configuration. /// Builds the configuration.
pub fn build(self) -> Result<TranscriptCommitConfig, TranscriptCommitConfigBuilderError> { pub fn build(self) -> Result<TranscriptCommitConfig, TranscriptCommitConfigBuilderError> {
Ok(TranscriptCommitConfig { Ok(TranscriptCommitConfig {
encoding_hash_alg: self.encoding_hash_alg,
has_encoding: self.has_encoding,
has_hash: self.has_hash,
commits: Vec::from_iter(self.commits), commits: Vec::from_iter(self.commits),
}) })
} }
@@ -301,16 +230,10 @@ impl fmt::Display for TranscriptCommitConfigBuilderError {
/// Request to compute transcript commitments. /// Request to compute transcript commitments.
#[derive(Debug, Clone, Serialize, Deserialize)] #[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TranscriptCommitRequest { pub struct TranscriptCommitRequest {
encoding: Option<(RangeSet<usize>, RangeSet<usize>)>,
hash: Vec<(Direction, RangeSet<usize>, HashAlgId)>, hash: Vec<(Direction, RangeSet<usize>, HashAlgId)>,
} }
impl TranscriptCommitRequest { impl TranscriptCommitRequest {
/// Returns `true` if an encoding commitment is requested.
pub fn has_encoding(&self) -> bool {
self.encoding.is_some()
}
/// Returns `true` if a hash commitment is requested. /// Returns `true` if a hash commitment is requested.
pub fn has_hash(&self) -> bool { pub fn has_hash(&self) -> bool {
!self.hash.is_empty() !self.hash.is_empty()
@@ -320,11 +243,6 @@ impl TranscriptCommitRequest {
pub fn iter_hash(&self) -> impl Iterator<Item = &(Direction, RangeSet<usize>, HashAlgId)> { pub fn iter_hash(&self) -> impl Iterator<Item = &(Direction, RangeSet<usize>, HashAlgId)> {
self.hash.iter() self.hash.iter()
} }
/// Returns the ranges of the encoding commitments.
pub fn encoding(&self) -> Option<&(RangeSet<usize>, RangeSet<usize>)> {
self.encoding.as_ref()
}
} }
#[cfg(test)] #[cfg(test)]

View File

@@ -1,22 +0,0 @@
//! Transcript encoding commitments and proofs.
mod encoder;
mod proof;
mod provider;
mod tree;
pub use encoder::{new_encoder, Encoder, EncoderSecret};
pub use proof::{EncodingProof, EncodingProofError};
pub use provider::{EncodingProvider, EncodingProviderError};
pub use tree::{EncodingTree, EncodingTreeError};
use serde::{Deserialize, Serialize};
use crate::hash::TypedHash;
/// Transcript encoding commitment.
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub struct EncodingCommitment {
/// Merkle root of the encoding commitments.
pub root: TypedHash,
}

View File

@@ -1,137 +0,0 @@
use std::ops::Range;
use crate::transcript::Direction;
use itybity::ToBits;
use rand::{RngCore, SeedableRng};
use rand_chacha::ChaCha12Rng;
use serde::{Deserialize, Serialize};
/// The size of the encoding for 1 bit, in bytes.
const BIT_ENCODING_SIZE: usize = 16;
/// The size of the encoding for 1 byte, in bytes.
const BYTE_ENCODING_SIZE: usize = 128;
/// Secret used by an encoder to generate encodings.
#[derive(Clone, PartialEq, Eq, Serialize, Deserialize)]
pub struct EncoderSecret {
seed: [u8; 32],
delta: [u8; BIT_ENCODING_SIZE],
}
opaque_debug::implement!(EncoderSecret);
impl EncoderSecret {
/// Creates a new secret.
///
/// # Arguments
///
/// * `seed` - The seed for the PRG.
/// * `delta` - Delta for deriving the one-encodings.
pub fn new(seed: [u8; 32], delta: [u8; 16]) -> Self {
Self { seed, delta }
}
/// Returns the seed.
pub fn seed(&self) -> &[u8; 32] {
&self.seed
}
/// Returns the delta.
pub fn delta(&self) -> &[u8; 16] {
&self.delta
}
}
/// Creates a new encoder.
pub fn new_encoder(secret: &EncoderSecret) -> impl Encoder {
ChaChaEncoder::new(secret)
}
pub(crate) struct ChaChaEncoder {
seed: [u8; 32],
delta: [u8; 16],
}
impl ChaChaEncoder {
pub(crate) fn new(secret: &EncoderSecret) -> Self {
let seed = *secret.seed();
let delta = *secret.delta();
Self { seed, delta }
}
pub(crate) fn new_prg(&self, stream_id: u64) -> ChaCha12Rng {
let mut prg = ChaCha12Rng::from_seed(self.seed);
prg.set_stream(stream_id);
prg.set_word_pos(0);
prg
}
}
/// A transcript encoder.
///
/// This is an internal implementation detail that should not be exposed to the
/// public API.
pub trait Encoder {
/// Writes the zero encoding for the given range of the transcript into the
/// destination buffer.
fn encode_range(&self, direction: Direction, range: Range<usize>, dest: &mut Vec<u8>);
/// Writes the encoding for the given data into the destination buffer.
fn encode_data(
&self,
direction: Direction,
range: Range<usize>,
data: &[u8],
dest: &mut Vec<u8>,
);
}
impl Encoder for ChaChaEncoder {
fn encode_range(&self, direction: Direction, range: Range<usize>, dest: &mut Vec<u8>) {
// ChaCha encoder works with 32-bit words. Each encoded bit is 128 bits long.
const WORDS_PER_BYTE: u128 = 8 * 128 / 32;
let stream_id: u64 = match direction {
Direction::Sent => 0,
Direction::Received => 1,
};
let mut prg = self.new_prg(stream_id);
let len = range.len() * BYTE_ENCODING_SIZE;
let pos = dest.len();
// Write 0s to the destination buffer.
dest.resize(pos + len, 0);
// Fill the destination buffer with the PRG.
prg.set_word_pos(range.start as u128 * WORDS_PER_BYTE);
prg.fill_bytes(&mut dest[pos..pos + len]);
}
fn encode_data(
&self,
direction: Direction,
range: Range<usize>,
data: &[u8],
dest: &mut Vec<u8>,
) {
const ZERO: [u8; 16] = [0; BIT_ENCODING_SIZE];
let pos = dest.len();
// Write the zero encoding for the given range.
self.encode_range(direction, range, dest);
let dest = &mut dest[pos..];
for (pos, bit) in data.iter_lsb0().enumerate() {
// Add the delta to the encoding whenever the encoded bit is 1,
// otherwise add a zero.
let summand = if bit { &self.delta } else { &ZERO };
dest[pos * BIT_ENCODING_SIZE..(pos + 1) * BIT_ENCODING_SIZE]
.iter_mut()
.zip(summand)
.for_each(|(a, b)| *a ^= *b);
}
}
}

View File

@@ -1,361 +0,0 @@
use std::{collections::HashMap, fmt};
use rangeset::set::RangeSet;
use serde::{Deserialize, Serialize};
use crate::{
hash::{Blinder, HashProvider, HashProviderError},
merkle::{MerkleError, MerkleProof},
transcript::{
commit::MAX_TOTAL_COMMITTED_DATA,
encoding::{new_encoder, Encoder, EncoderSecret, EncodingCommitment},
Direction,
},
};
/// An opening of a leaf in the encoding tree.
#[derive(Clone, Serialize, Deserialize)]
pub(super) struct Opening {
pub(super) direction: Direction,
pub(super) idx: RangeSet<usize>,
pub(super) blinder: Blinder,
}
opaque_debug::implement!(Opening);
/// An encoding commitment proof.
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(try_from = "validation::EncodingProofUnchecked")]
pub struct EncodingProof {
/// The proof of inclusion of the commitment(s) in the Merkle tree of
/// commitments.
pub(super) inclusion_proof: MerkleProof,
pub(super) openings: HashMap<usize, Opening>,
}
impl EncodingProof {
/// Verifies the proof against the commitment.
///
/// Returns the authenticated indices of the sent and received data,
/// respectively.
///
/// # Arguments
///
/// * `provider` - Hash provider.
/// * `commitment` - Encoding commitment to verify against.
/// * `sent` - Sent data to authenticate.
/// * `recv` - Received data to authenticate.
pub fn verify_with_provider(
&self,
provider: &HashProvider,
secret: &EncoderSecret,
commitment: &EncodingCommitment,
sent: &[u8],
recv: &[u8],
) -> Result<(RangeSet<usize>, RangeSet<usize>), EncodingProofError> {
let hasher = provider.get(&commitment.root.alg)?;
let encoder = new_encoder(secret);
let Self {
inclusion_proof,
openings,
} = self;
let mut leaves = Vec::with_capacity(openings.len());
let mut expected_leaf = Vec::default();
let mut total_opened = 0u128;
let mut auth_sent = RangeSet::default();
let mut auth_recv = RangeSet::default();
for (
id,
Opening {
direction,
idx,
blinder,
},
) in openings
{
// Make sure the amount of data being proved is bounded.
total_opened += idx.len() as u128;
if total_opened > MAX_TOTAL_COMMITTED_DATA as u128 {
return Err(EncodingProofError::new(
ErrorKind::Proof,
"exceeded maximum allowed data",
))?;
}
let (data, auth) = match direction {
Direction::Sent => (sent, &mut auth_sent),
Direction::Received => (recv, &mut auth_recv),
};
// Make sure the ranges are within the bounds of the transcript.
if idx.end().unwrap_or(0) > data.len() {
return Err(EncodingProofError::new(
ErrorKind::Proof,
format!(
"index out of bounds of the transcript ({}): {} > {}",
direction,
idx.end().unwrap_or(0),
data.len()
),
));
}
expected_leaf.clear();
for range in idx.iter() {
encoder.encode_data(*direction, range.clone(), &data[range], &mut expected_leaf);
}
expected_leaf.extend_from_slice(blinder.as_bytes());
// Compute the expected hash of the commitment to make sure it is
// present in the merkle tree.
leaves.push((*id, hasher.hash(&expected_leaf)));
auth.union_mut(idx);
}
// Verify that the expected hashes are present in the merkle tree.
//
// This proves the Prover committed to the purported data prior to the encoder
// seed being revealed. Ergo, if the encodings are authentic then the purported
// data is authentic.
inclusion_proof.verify(hasher, &commitment.root, leaves)?;
Ok((auth_sent, auth_recv))
}
}
/// Error for [`EncodingProof`].
#[derive(Debug, thiserror::Error)]
pub struct EncodingProofError {
kind: ErrorKind,
source: Option<Box<dyn std::error::Error + Send + Sync>>,
}
impl EncodingProofError {
fn new<E>(kind: ErrorKind, source: E) -> Self
where
E: Into<Box<dyn std::error::Error + Send + Sync>>,
{
Self {
kind,
source: Some(source.into()),
}
}
}
#[derive(Debug)]
enum ErrorKind {
Provider,
Proof,
}
impl fmt::Display for EncodingProofError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str("encoding proof error: ")?;
match self.kind {
ErrorKind::Provider => f.write_str("provider error")?,
ErrorKind::Proof => f.write_str("proof error")?,
}
if let Some(source) = &self.source {
write!(f, " caused by: {source}")?;
}
Ok(())
}
}
impl From<HashProviderError> for EncodingProofError {
fn from(error: HashProviderError) -> Self {
Self::new(ErrorKind::Provider, error)
}
}
impl From<MerkleError> for EncodingProofError {
fn from(error: MerkleError) -> Self {
Self::new(ErrorKind::Proof, error)
}
}
/// Invalid encoding proof error.
#[derive(Debug, thiserror::Error)]
#[error("invalid encoding proof: {0}")]
pub struct InvalidEncodingProof(&'static str);
mod validation {
use super::*;
/// The maximum allowed height of the Merkle tree of encoding commitments.
///
/// The statistical security parameter (SSP) of the encoding commitment
/// protocol is calculated as "the number of uniformly random bits in a
/// single bit's encoding minus `MAX_HEIGHT`".
///
/// For example, a bit encoding used in garbled circuits typically has 127
/// uniformly random bits, hence when using it in the encoding
/// commitment protocol, the SSP is 127 - 30 = 97 bits.
///
/// Leaving this validation here as a fail-safe in case we ever start
/// using shorter encodings.
const MAX_HEIGHT: usize = 30;
#[derive(Debug, Deserialize)]
pub(super) struct EncodingProofUnchecked {
inclusion_proof: MerkleProof,
openings: HashMap<usize, Opening>,
}
impl TryFrom<EncodingProofUnchecked> for EncodingProof {
type Error = InvalidEncodingProof;
fn try_from(unchecked: EncodingProofUnchecked) -> Result<Self, Self::Error> {
if unchecked.inclusion_proof.leaf_count() > 1 << MAX_HEIGHT {
return Err(InvalidEncodingProof(
"the height of the tree exceeds the maximum allowed",
));
}
Ok(Self {
inclusion_proof: unchecked.inclusion_proof,
openings: unchecked.openings,
})
}
}
}
#[cfg(test)]
mod test {
use tlsn_data_fixtures::http::{request::POST_JSON, response::OK_JSON};
use crate::{
fixtures::{encoder_secret, encoder_secret_tampered_seed, encoding_provider},
hash::Blake3,
transcript::{encoding::EncodingTree, Transcript},
};
use super::*;
struct EncodingFixture {
transcript: Transcript,
proof: EncodingProof,
commitment: EncodingCommitment,
}
fn new_encoding_fixture() -> EncodingFixture {
let transcript = Transcript::new(POST_JSON, OK_JSON);
let idx_0 = (Direction::Sent, RangeSet::from(0..POST_JSON.len()));
let idx_1 = (Direction::Received, RangeSet::from(0..OK_JSON.len()));
let provider = encoding_provider(transcript.sent(), transcript.received());
let tree = EncodingTree::new(&Blake3::default(), [&idx_0, &idx_1], &provider).unwrap();
let proof = tree.proof([&idx_0, &idx_1].into_iter()).unwrap();
let commitment = EncodingCommitment { root: tree.root() };
EncodingFixture {
transcript,
proof,
commitment,
}
}
#[test]
fn test_verify_encoding_proof_tampered_seed() {
let EncodingFixture {
transcript,
proof,
commitment,
} = new_encoding_fixture();
let err = proof
.verify_with_provider(
&HashProvider::default(),
&encoder_secret_tampered_seed(),
&commitment,
transcript.sent(),
transcript.received(),
)
.unwrap_err();
assert!(matches!(err.kind, ErrorKind::Proof));
}
#[test]
fn test_verify_encoding_proof_out_of_range() {
let EncodingFixture {
transcript,
proof,
commitment,
} = new_encoding_fixture();
let sent = &transcript.sent()[transcript.sent().len() - 1..];
let recv = &transcript.received()[transcript.received().len() - 2..];
let err = proof
.verify_with_provider(
&HashProvider::default(),
&encoder_secret(),
&commitment,
sent,
recv,
)
.unwrap_err();
assert!(matches!(err.kind, ErrorKind::Proof));
}
#[test]
fn test_verify_encoding_proof_tampered_idx() {
let EncodingFixture {
transcript,
mut proof,
commitment,
} = new_encoding_fixture();
let Opening { idx, .. } = proof.openings.values_mut().next().unwrap();
*idx = RangeSet::from([0..3, 13..15]);
let err = proof
.verify_with_provider(
&HashProvider::default(),
&encoder_secret(),
&commitment,
transcript.sent(),
transcript.received(),
)
.unwrap_err();
assert!(matches!(err.kind, ErrorKind::Proof));
}
#[test]
fn test_verify_encoding_proof_tampered_encoding_blinder() {
let EncodingFixture {
transcript,
mut proof,
commitment,
} = new_encoding_fixture();
let Opening { blinder, .. } = proof.openings.values_mut().next().unwrap();
*blinder = rand::random();
let err = proof
.verify_with_provider(
&HashProvider::default(),
&encoder_secret(),
&commitment,
transcript.sent(),
transcript.received(),
)
.unwrap_err();
assert!(matches!(err.kind, ErrorKind::Proof));
}
}

View File

@@ -1,19 +0,0 @@
use std::ops::Range;
use crate::transcript::Direction;
/// A provider of plaintext encodings.
pub trait EncodingProvider {
/// Writes the encoding of the given range into the destination buffer.
fn provide_encoding(
&self,
direction: Direction,
range: Range<usize>,
dest: &mut Vec<u8>,
) -> Result<(), EncodingProviderError>;
}
/// Error for [`EncodingProvider`].
#[derive(Debug, thiserror::Error)]
#[error("failed to provide encoding")]
pub struct EncodingProviderError;

View File

@@ -1,327 +0,0 @@
use std::collections::HashMap;
use bimap::BiMap;
use rangeset::set::RangeSet;
use serde::{Deserialize, Serialize};
use crate::{
hash::{Blinder, HashAlgId, HashAlgorithm, TypedHash},
merkle::MerkleTree,
transcript::{
encoding::{
proof::{EncodingProof, Opening},
EncodingProvider,
},
Direction,
},
};
/// Encoding tree builder error.
#[derive(Debug, thiserror::Error)]
pub enum EncodingTreeError {
/// Index is out of bounds of the transcript.
#[error("index is out of bounds of the transcript")]
OutOfBounds {
/// The index.
index: RangeSet<usize>,
/// The transcript length.
transcript_length: usize,
},
/// Encoding provider is missing an encoding for an index.
#[error("encoding provider is missing an encoding for an index")]
MissingEncoding {
/// The index which is missing.
index: RangeSet<usize>,
},
/// Index is missing from the tree.
#[error("index is missing from the tree")]
MissingLeaf {
/// The index which is missing.
index: RangeSet<usize>,
},
}
/// A merkle tree of transcript encodings.
#[derive(Clone, Serialize, Deserialize)]
pub struct EncodingTree {
/// Merkle tree of the commitments.
tree: MerkleTree,
/// Nonces used to blind the hashes.
blinders: Vec<Blinder>,
/// Mapping between the index of a leaf and the transcript index it
/// corresponds to.
idxs: BiMap<usize, (Direction, RangeSet<usize>)>,
/// Union of all transcript indices in the sent direction.
sent_idx: RangeSet<usize>,
/// Union of all transcript indices in the received direction.
received_idx: RangeSet<usize>,
}
opaque_debug::implement!(EncodingTree);
impl EncodingTree {
/// Creates a new encoding tree.
///
/// # Arguments
///
/// * `hasher` - The hash algorithm to use.
/// * `idxs` - The subsequence indices to commit to.
/// * `provider` - The encoding provider.
pub fn new<'idx>(
hasher: &dyn HashAlgorithm,
idxs: impl IntoIterator<Item = &'idx (Direction, RangeSet<usize>)>,
provider: &dyn EncodingProvider,
) -> Result<Self, EncodingTreeError> {
let mut this = Self {
tree: MerkleTree::new(hasher.id()),
blinders: Vec::new(),
idxs: BiMap::new(),
sent_idx: RangeSet::default(),
received_idx: RangeSet::default(),
};
let mut leaves = Vec::new();
let mut encoding = Vec::new();
for dir_idx in idxs {
let direction = dir_idx.0;
let idx = &dir_idx.1;
// Ignore empty indices.
if idx.is_empty() {
continue;
}
if this.idxs.contains_right(dir_idx) {
// The subsequence is already in the tree.
continue;
}
let blinder: Blinder = rand::random();
encoding.clear();
for range in idx.iter() {
provider
.provide_encoding(direction, range, &mut encoding)
.map_err(|_| EncodingTreeError::MissingEncoding { index: idx.clone() })?;
}
encoding.extend_from_slice(blinder.as_bytes());
let leaf = hasher.hash(&encoding);
leaves.push(leaf);
this.blinders.push(blinder);
this.idxs.insert(this.idxs.len(), dir_idx.clone());
match direction {
Direction::Sent => this.sent_idx.union_mut(idx),
Direction::Received => this.received_idx.union_mut(idx),
}
}
this.tree.insert(hasher, leaves);
Ok(this)
}
/// Returns the root of the tree.
pub fn root(&self) -> TypedHash {
self.tree.root()
}
/// Returns the hash algorithm of the tree.
pub fn algorithm(&self) -> HashAlgId {
self.tree.algorithm()
}
/// Generates a proof for the given indices.
///
/// # Arguments
///
/// * `idxs` - The transcript indices to prove.
pub fn proof<'idx>(
&self,
idxs: impl Iterator<Item = &'idx (Direction, RangeSet<usize>)>,
) -> Result<EncodingProof, EncodingTreeError> {
let mut openings = HashMap::new();
for dir_idx in idxs {
let direction = dir_idx.0;
let idx = &dir_idx.1;
let leaf_idx = *self
.idxs
.get_by_right(dir_idx)
.ok_or_else(|| EncodingTreeError::MissingLeaf { index: idx.clone() })?;
let blinder = self.blinders[leaf_idx].clone();
openings.insert(
leaf_idx,
Opening {
direction,
idx: idx.clone(),
blinder,
},
);
}
let mut indices = openings.keys().copied().collect::<Vec<_>>();
indices.sort();
Ok(EncodingProof {
inclusion_proof: self.tree.proof(&indices),
openings,
})
}
/// Returns whether the tree contains the given transcript index.
pub fn contains(&self, idx: &(Direction, RangeSet<usize>)) -> bool {
self.idxs.contains_right(idx)
}
pub(crate) fn idx(&self, direction: Direction) -> &RangeSet<usize> {
match direction {
Direction::Sent => &self.sent_idx,
Direction::Received => &self.received_idx,
}
}
/// Returns the committed transcript indices.
pub(crate) fn transcript_indices(&self) -> impl Iterator<Item = &(Direction, RangeSet<usize>)> {
self.idxs.right_values()
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{
fixtures::{encoder_secret, encoding_provider},
hash::{Blake3, HashProvider},
transcript::{encoding::EncodingCommitment, Transcript},
};
use tlsn_data_fixtures::http::{request::POST_JSON, response::OK_JSON};
fn new_tree<'seq>(
transcript: &Transcript,
idxs: impl Iterator<Item = &'seq (Direction, RangeSet<usize>)>,
) -> Result<EncodingTree, EncodingTreeError> {
let provider = encoding_provider(transcript.sent(), transcript.received());
EncodingTree::new(&Blake3::default(), idxs, &provider)
}
#[test]
fn test_encoding_tree() {
let transcript = Transcript::new(POST_JSON, OK_JSON);
let idx_0 = (Direction::Sent, RangeSet::from(0..POST_JSON.len()));
let idx_1 = (Direction::Received, RangeSet::from(0..OK_JSON.len()));
let tree = new_tree(&transcript, [&idx_0, &idx_1].into_iter()).unwrap();
assert!(tree.contains(&idx_0));
assert!(tree.contains(&idx_1));
let proof = tree.proof([&idx_0, &idx_1].into_iter()).unwrap();
let commitment = EncodingCommitment { root: tree.root() };
let (auth_sent, auth_recv) = proof
.verify_with_provider(
&HashProvider::default(),
&encoder_secret(),
&commitment,
transcript.sent(),
transcript.received(),
)
.unwrap();
assert_eq!(auth_sent, idx_0.1);
assert_eq!(auth_recv, idx_1.1);
}
#[test]
fn test_encoding_tree_multiple_ranges() {
let transcript = Transcript::new(POST_JSON, OK_JSON);
let idx_0 = (Direction::Sent, RangeSet::from(0..1));
let idx_1 = (Direction::Sent, RangeSet::from(1..POST_JSON.len()));
let idx_2 = (Direction::Received, RangeSet::from(0..1));
let idx_3 = (Direction::Received, RangeSet::from(1..OK_JSON.len()));
let tree = new_tree(&transcript, [&idx_0, &idx_1, &idx_2, &idx_3].into_iter()).unwrap();
assert!(tree.contains(&idx_0));
assert!(tree.contains(&idx_1));
assert!(tree.contains(&idx_2));
assert!(tree.contains(&idx_3));
let proof = tree
.proof([&idx_0, &idx_1, &idx_2, &idx_3].into_iter())
.unwrap();
let commitment = EncodingCommitment { root: tree.root() };
let (auth_sent, auth_recv) = proof
.verify_with_provider(
&HashProvider::default(),
&encoder_secret(),
&commitment,
transcript.sent(),
transcript.received(),
)
.unwrap();
let mut expected_auth_sent = RangeSet::default();
expected_auth_sent.union_mut(&idx_0.1);
expected_auth_sent.union_mut(&idx_1.1);
let mut expected_auth_recv = RangeSet::default();
expected_auth_recv.union_mut(&idx_2.1);
expected_auth_recv.union_mut(&idx_3.1);
assert_eq!(auth_sent, expected_auth_sent);
assert_eq!(auth_recv, expected_auth_recv);
}
#[test]
fn test_encoding_tree_proof_missing_leaf() {
let transcript = Transcript::new(POST_JSON, OK_JSON);
let idx_0 = (Direction::Sent, RangeSet::from(0..POST_JSON.len()));
let idx_1 = (Direction::Received, RangeSet::from(0..4));
let idx_2 = (Direction::Received, RangeSet::from(4..OK_JSON.len()));
let tree = new_tree(&transcript, [&idx_0, &idx_1].into_iter()).unwrap();
let result = tree
.proof([&idx_0, &idx_1, &idx_2].into_iter())
.unwrap_err();
assert!(matches!(result, EncodingTreeError::MissingLeaf { .. }));
}
#[test]
fn test_encoding_tree_out_of_bounds() {
let transcript = Transcript::new(POST_JSON, OK_JSON);
let idx_0 = (Direction::Sent, RangeSet::from(0..POST_JSON.len() + 1));
let idx_1 = (Direction::Received, RangeSet::from(0..OK_JSON.len() + 1));
let result = new_tree(&transcript, [&idx_0].into_iter()).unwrap_err();
assert!(matches!(result, EncodingTreeError::MissingEncoding { .. }));
let result = new_tree(&transcript, [&idx_1].into_iter()).unwrap_err();
assert!(matches!(result, EncodingTreeError::MissingEncoding { .. }));
}
#[test]
fn test_encoding_tree_missing_encoding() {
let provider = encoding_provider(&[], &[]);
let result = EncodingTree::new(
&Blake3::default(),
[(Direction::Sent, RangeSet::from(0..8))].iter(),
&provider,
)
.unwrap_err();
assert!(matches!(result, EncodingTreeError::MissingEncoding { .. }));
}
}

View File

@@ -14,7 +14,6 @@ use crate::{
hash::{HashAlgId, HashProvider}, hash::{HashAlgId, HashProvider},
transcript::{ transcript::{
commit::{TranscriptCommitment, TranscriptCommitmentKind}, commit::{TranscriptCommitment, TranscriptCommitmentKind},
encoding::{EncoderSecret, EncodingProof, EncodingProofError, EncodingTree},
hash::{hash_plaintext, PlaintextHash, PlaintextHashSecret}, hash::{hash_plaintext, PlaintextHash, PlaintextHashSecret},
Direction, PartialTranscript, RangeSet, Transcript, TranscriptSecret, Direction, PartialTranscript, RangeSet, Transcript, TranscriptSecret,
}, },
@@ -32,14 +31,12 @@ const DEFAULT_COMMITMENT_KINDS: &[TranscriptCommitmentKind] = &[
TranscriptCommitmentKind::Hash { TranscriptCommitmentKind::Hash {
alg: HashAlgId::KECCAK256, alg: HashAlgId::KECCAK256,
}, },
TranscriptCommitmentKind::Encoding,
]; ];
/// Proof of the contents of a transcript. /// Proof of the contents of a transcript.
#[derive(Clone, Serialize, Deserialize)] #[derive(Clone, Serialize, Deserialize)]
pub struct TranscriptProof { pub struct TranscriptProof {
transcript: PartialTranscript, transcript: PartialTranscript,
encoding_proof: Option<EncodingProof>,
hash_secrets: Vec<PlaintextHashSecret>, hash_secrets: Vec<PlaintextHashSecret>,
} }
@@ -53,27 +50,18 @@ impl TranscriptProof {
/// # Arguments /// # Arguments
/// ///
/// * `provider` - The hash provider to use for verification. /// * `provider` - The hash provider to use for verification.
/// * `attestation_body` - The attestation body to verify against. /// * `length` - The transcript length.
/// * `commitments` - The commitments to verify against.
pub fn verify_with_provider<'a>( pub fn verify_with_provider<'a>(
self, self,
provider: &HashProvider, provider: &HashProvider,
length: &TranscriptLength, length: &TranscriptLength,
encoder_secret: Option<&EncoderSecret>,
commitments: impl IntoIterator<Item = &'a TranscriptCommitment>, commitments: impl IntoIterator<Item = &'a TranscriptCommitment>,
) -> Result<PartialTranscript, TranscriptProofError> { ) -> Result<PartialTranscript, TranscriptProofError> {
let mut encoding_commitment = None;
let mut hash_commitments = HashSet::new(); let mut hash_commitments = HashSet::new();
// Index commitments. // Index commitments.
for commitment in commitments { for commitment in commitments {
match commitment { match commitment {
TranscriptCommitment::Encoding(commitment) => {
if encoding_commitment.replace(commitment).is_some() {
return Err(TranscriptProofError::new(
ErrorKind::Encoding,
"multiple encoding commitments are present.",
));
}
}
TranscriptCommitment::Hash(plaintext_hash) => { TranscriptCommitment::Hash(plaintext_hash) => {
hash_commitments.insert(plaintext_hash); hash_commitments.insert(plaintext_hash);
} }
@@ -92,34 +80,6 @@ impl TranscriptProof {
let mut total_auth_sent = RangeSet::default(); let mut total_auth_sent = RangeSet::default();
let mut total_auth_recv = RangeSet::default(); let mut total_auth_recv = RangeSet::default();
// Verify encoding proof.
if let Some(proof) = self.encoding_proof {
let secret = encoder_secret.ok_or_else(|| {
TranscriptProofError::new(
ErrorKind::Encoding,
"contains an encoding proof but missing encoder secret",
)
})?;
let commitment = encoding_commitment.ok_or_else(|| {
TranscriptProofError::new(
ErrorKind::Encoding,
"contains an encoding proof but missing encoding commitment",
)
})?;
let (auth_sent, auth_recv) = proof.verify_with_provider(
provider,
secret,
commitment,
self.transcript.sent_unsafe(),
self.transcript.received_unsafe(),
)?;
total_auth_sent.union_mut(&auth_sent);
total_auth_recv.union_mut(&auth_recv);
}
let mut buffer = Vec::new(); let mut buffer = Vec::new();
for PlaintextHashSecret { for PlaintextHashSecret {
direction, direction,
@@ -203,7 +163,6 @@ impl TranscriptProofError {
#[derive(Debug)] #[derive(Debug)]
enum ErrorKind { enum ErrorKind {
Encoding,
Hash, Hash,
Proof, Proof,
} }
@@ -213,7 +172,6 @@ impl fmt::Display for TranscriptProofError {
f.write_str("transcript proof error: ")?; f.write_str("transcript proof error: ")?;
match self.kind { match self.kind {
ErrorKind::Encoding => f.write_str("encoding error")?,
ErrorKind::Hash => f.write_str("hash error")?, ErrorKind::Hash => f.write_str("hash error")?,
ErrorKind::Proof => f.write_str("proof error")?, ErrorKind::Proof => f.write_str("proof error")?,
} }
@@ -226,12 +184,6 @@ impl fmt::Display for TranscriptProofError {
} }
} }
impl From<EncodingProofError> for TranscriptProofError {
fn from(e: EncodingProofError) -> Self {
TranscriptProofError::new(ErrorKind::Encoding, e)
}
}
/// Union of ranges to reveal. /// Union of ranges to reveal.
#[derive(Clone, Debug, PartialEq)] #[derive(Clone, Debug, PartialEq)]
struct QueryIdx { struct QueryIdx {
@@ -276,7 +228,6 @@ pub struct TranscriptProofBuilder<'a> {
/// Commitment kinds in order of preference for building transcript proofs. /// Commitment kinds in order of preference for building transcript proofs.
commitment_kinds: Vec<TranscriptCommitmentKind>, commitment_kinds: Vec<TranscriptCommitmentKind>,
transcript: &'a Transcript, transcript: &'a Transcript,
encoding_tree: Option<&'a EncodingTree>,
hash_secrets: Vec<&'a PlaintextHashSecret>, hash_secrets: Vec<&'a PlaintextHashSecret>,
committed_sent: RangeSet<usize>, committed_sent: RangeSet<usize>,
committed_recv: RangeSet<usize>, committed_recv: RangeSet<usize>,
@@ -292,15 +243,9 @@ impl<'a> TranscriptProofBuilder<'a> {
let mut committed_sent = RangeSet::default(); let mut committed_sent = RangeSet::default();
let mut committed_recv = RangeSet::default(); let mut committed_recv = RangeSet::default();
let mut encoding_tree = None;
let mut hash_secrets = Vec::new(); let mut hash_secrets = Vec::new();
for secret in secrets { for secret in secrets {
match secret { match secret {
TranscriptSecret::Encoding(tree) => {
committed_sent.union_mut(tree.idx(Direction::Sent));
committed_recv.union_mut(tree.idx(Direction::Received));
encoding_tree = Some(tree);
}
TranscriptSecret::Hash(hash) => { TranscriptSecret::Hash(hash) => {
match hash.direction { match hash.direction {
Direction::Sent => committed_sent.union_mut(&hash.idx), Direction::Sent => committed_sent.union_mut(&hash.idx),
@@ -314,7 +259,6 @@ impl<'a> TranscriptProofBuilder<'a> {
Self { Self {
commitment_kinds: DEFAULT_COMMITMENT_KINDS.to_vec(), commitment_kinds: DEFAULT_COMMITMENT_KINDS.to_vec(),
transcript, transcript,
encoding_tree,
hash_secrets, hash_secrets,
committed_sent, committed_sent,
committed_recv, committed_recv,
@@ -412,7 +356,6 @@ impl<'a> TranscriptProofBuilder<'a> {
transcript: self transcript: self
.transcript .transcript
.to_partial(self.query_idx.sent.clone(), self.query_idx.recv.clone()), .to_partial(self.query_idx.sent.clone(), self.query_idx.recv.clone()),
encoding_proof: None,
hash_secrets: Vec::new(), hash_secrets: Vec::new(),
}; };
let mut uncovered_query_idx = self.query_idx.clone(); let mut uncovered_query_idx = self.query_idx.clone();
@@ -424,46 +367,6 @@ impl<'a> TranscriptProofBuilder<'a> {
// self.commitment_kinds. // self.commitment_kinds.
if let Some(kind) = commitment_kinds_iter.next() { if let Some(kind) = commitment_kinds_iter.next() {
match kind { match kind {
TranscriptCommitmentKind::Encoding => {
let Some(encoding_tree) = self.encoding_tree else {
// Proceeds to the next preferred commitment kind if encoding tree is
// not available.
continue;
};
let (sent_dir_idxs, sent_uncovered) = uncovered_query_idx.sent.cover_by(
encoding_tree
.transcript_indices()
.filter(|(dir, _)| *dir == Direction::Sent),
|(_, idx)| idx,
);
// Uncovered ranges will be checked with ranges of the next
// preferred commitment kind.
uncovered_query_idx.sent = sent_uncovered;
let (recv_dir_idxs, recv_uncovered) = uncovered_query_idx.recv.cover_by(
encoding_tree
.transcript_indices()
.filter(|(dir, _)| *dir == Direction::Received),
|(_, idx)| idx,
);
uncovered_query_idx.recv = recv_uncovered;
let dir_idxs = sent_dir_idxs
.into_iter()
.chain(recv_dir_idxs)
.collect::<Vec<_>>();
// Skip proof generation if there are no committed ranges that can cover the
// query ranges.
if !dir_idxs.is_empty() {
transcript_proof.encoding_proof = Some(
encoding_tree
.proof(dir_idxs.into_iter())
.expect("subsequences were checked to be in tree"),
);
}
}
TranscriptCommitmentKind::Hash { alg } => { TranscriptCommitmentKind::Hash { alg } => {
let (sent_hashes, sent_uncovered) = uncovered_query_idx.sent.cover_by( let (sent_hashes, sent_uncovered) = uncovered_query_idx.sent.cover_by(
self.hash_secrets.iter().filter(|hash| { self.hash_secrets.iter().filter(|hash| {
@@ -590,46 +493,10 @@ mod tests {
use rstest::rstest; use rstest::rstest;
use tlsn_data_fixtures::http::{request::GET_WITH_HEADER, response::OK_JSON}; use tlsn_data_fixtures::http::{request::GET_WITH_HEADER, response::OK_JSON};
use crate::{ use crate::hash::{Blinder, HashAlgId};
fixtures::{encoder_secret, encoding_provider},
hash::{Blake3, Blinder, HashAlgId},
transcript::TranscriptCommitConfigBuilder,
};
use super::*; use super::*;
#[rstest]
fn test_verify_missing_encoding_commitment_root() {
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
let idxs = vec![(Direction::Received, RangeSet::from(0..transcript.len().1))];
let encoding_tree = EncodingTree::new(
&Blake3::default(),
&idxs,
&encoding_provider(transcript.sent(), transcript.received()),
)
.unwrap();
let secrets = vec![TranscriptSecret::Encoding(encoding_tree)];
let mut builder = TranscriptProofBuilder::new(&transcript, &secrets);
builder.reveal_recv(&(0..transcript.len().1)).unwrap();
let transcript_proof = builder.build().unwrap();
let provider = HashProvider::default();
let err = transcript_proof
.verify_with_provider(
&provider,
&transcript.length(),
Some(&encoder_secret()),
&[],
)
.err()
.unwrap();
assert!(matches!(err.kind, ErrorKind::Encoding));
}
#[rstest] #[rstest]
fn test_reveal_range_out_of_bounds() { fn test_reveal_range_out_of_bounds() {
let transcript = Transcript::new( let transcript = Transcript::new(
@@ -649,7 +516,7 @@ mod tests {
} }
#[rstest] #[rstest]
fn test_reveal_missing_encoding_tree() { fn test_reveal_missing_commitment() {
let transcript = Transcript::new( let transcript = Transcript::new(
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
@@ -698,7 +565,6 @@ mod tests {
.verify_with_provider( .verify_with_provider(
&provider, &provider,
&transcript.length(), &transcript.length(),
None,
&[TranscriptCommitment::Hash(commitment)], &[TranscriptCommitment::Hash(commitment)],
) )
.unwrap(); .unwrap();
@@ -748,7 +614,6 @@ mod tests {
.verify_with_provider( .verify_with_provider(
&provider, &provider,
&transcript.length(), &transcript.length(),
None,
&[TranscriptCommitment::Hash(commitment)], &[TranscriptCommitment::Hash(commitment)],
) )
.unwrap_err(); .unwrap_err();
@@ -764,24 +629,19 @@ mod tests {
TranscriptCommitmentKind::Hash { TranscriptCommitmentKind::Hash {
alg: HashAlgId::SHA256, alg: HashAlgId::SHA256,
}, },
TranscriptCommitmentKind::Encoding,
TranscriptCommitmentKind::Hash { TranscriptCommitmentKind::Hash {
alg: HashAlgId::SHA256, alg: HashAlgId::SHA256,
}, },
TranscriptCommitmentKind::Hash { TranscriptCommitmentKind::Hash {
alg: HashAlgId::SHA256, alg: HashAlgId::SHA256,
}, },
TranscriptCommitmentKind::Encoding,
]); ]);
assert_eq!( assert_eq!(
builder.commitment_kinds, builder.commitment_kinds,
vec![ vec![TranscriptCommitmentKind::Hash {
TranscriptCommitmentKind::Hash {
alg: HashAlgId::SHA256 alg: HashAlgId::SHA256
}, },]
TranscriptCommitmentKind::Encoding
]
); );
} }
@@ -791,7 +651,7 @@ mod tests {
RangeSet::from([0..10, 12..30]), RangeSet::from([0..10, 12..30]),
true, true,
)] )]
#[case::reveal_all_rangesets_with_superset_ranges( #[case::reveal_all_rangesets_with_single_superset_range(
vec![RangeSet::from([0..1]), RangeSet::from([1..2, 8..9]), RangeSet::from([2..4, 6..8]), RangeSet::from([2..3, 6..7]), RangeSet::from([9..12])], vec![RangeSet::from([0..1]), RangeSet::from([1..2, 8..9]), RangeSet::from([2..4, 6..8]), RangeSet::from([2..3, 6..7]), RangeSet::from([9..12])],
RangeSet::from([0..4, 6..9]), RangeSet::from([0..4, 6..9]),
true, true,
@@ -822,29 +682,30 @@ mod tests {
false, false,
)] )]
#[allow(clippy::single_range_in_vec_init)] #[allow(clippy::single_range_in_vec_init)]
fn test_reveal_mutliple_rangesets_with_one_rangeset( fn test_reveal_multiple_rangesets_with_one_rangeset(
#[case] commit_recv_rangesets: Vec<RangeSet<usize>>, #[case] commit_recv_rangesets: Vec<RangeSet<usize>>,
#[case] reveal_recv_rangeset: RangeSet<usize>, #[case] reveal_recv_rangeset: RangeSet<usize>,
#[case] success: bool, #[case] success: bool,
) { ) {
use rand::{Rng, SeedableRng};
let mut rng = rand::rngs::StdRng::seed_from_u64(0);
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON); let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
// Encoding commitment kind // Create hash commitments for each rangeset
let mut transcript_commitment_builder = TranscriptCommitConfigBuilder::new(&transcript); let mut secrets = Vec::new();
for rangeset in commit_recv_rangesets.iter() { for rangeset in commit_recv_rangesets.iter() {
transcript_commitment_builder.commit_recv(rangeset).unwrap(); let blinder: crate::hash::Blinder = rng.random();
let secret = PlaintextHashSecret {
direction: Direction::Received,
idx: rangeset.clone(),
alg: HashAlgId::BLAKE3,
blinder,
};
secrets.push(TranscriptSecret::Hash(secret));
} }
let transcripts_commitment_config = transcript_commitment_builder.build().unwrap();
let encoding_tree = EncodingTree::new(
&Blake3::default(),
transcripts_commitment_config.iter_encoding(),
&encoding_provider(GET_WITH_HEADER, OK_JSON),
)
.unwrap();
let secrets = vec![TranscriptSecret::Encoding(encoding_tree)];
let mut builder = TranscriptProofBuilder::new(&transcript, &secrets); let mut builder = TranscriptProofBuilder::new(&transcript, &secrets);
if success { if success {
@@ -897,27 +758,34 @@ mod tests {
#[case] uncovered_sent_rangeset: RangeSet<usize>, #[case] uncovered_sent_rangeset: RangeSet<usize>,
#[case] uncovered_recv_rangeset: RangeSet<usize>, #[case] uncovered_recv_rangeset: RangeSet<usize>,
) { ) {
use rand::{Rng, SeedableRng};
let mut rng = rand::rngs::StdRng::seed_from_u64(0);
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON); let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
// Encoding commitment kind // Create hash commitments for each rangeset
let mut transcript_commitment_builder = TranscriptCommitConfigBuilder::new(&transcript); let mut secrets = Vec::new();
for rangeset in commit_sent_rangesets.iter() { for rangeset in commit_sent_rangesets.iter() {
transcript_commitment_builder.commit_sent(rangeset).unwrap(); let blinder: crate::hash::Blinder = rng.random();
let secret = PlaintextHashSecret {
direction: Direction::Sent,
idx: rangeset.clone(),
alg: HashAlgId::BLAKE3,
blinder,
};
secrets.push(TranscriptSecret::Hash(secret));
} }
for rangeset in commit_recv_rangesets.iter() { for rangeset in commit_recv_rangesets.iter() {
transcript_commitment_builder.commit_recv(rangeset).unwrap(); let blinder: crate::hash::Blinder = rng.random();
let secret = PlaintextHashSecret {
direction: Direction::Received,
idx: rangeset.clone(),
alg: HashAlgId::BLAKE3,
blinder,
};
secrets.push(TranscriptSecret::Hash(secret));
} }
let transcripts_commitment_config = transcript_commitment_builder.build().unwrap();
let encoding_tree = EncodingTree::new(
&Blake3::default(),
transcripts_commitment_config.iter_encoding(),
&encoding_provider(GET_WITH_HEADER, OK_JSON),
)
.unwrap();
let secrets = vec![TranscriptSecret::Encoding(encoding_tree)];
let mut builder = TranscriptProofBuilder::new(&transcript, &secrets); let mut builder = TranscriptProofBuilder::new(&transcript, &secrets);
builder.reveal_sent(&reveal_sent_rangeset).unwrap(); builder.reveal_sent(&reveal_sent_rangeset).unwrap();
builder.reveal_recv(&reveal_recv_rangeset).unwrap(); builder.reveal_recv(&reveal_recv_rangeset).unwrap();

View File

@@ -43,12 +43,12 @@ noir = { git = "https://github.com/zkmopro/noir-rs", tag = "v1.0.0-beta.8", feat
] } ] }
[[example]] [[example]]
name = "interactive" name = "basic"
path = "interactive/interactive.rs" path = "basic/basic.rs"
[[example]] [[example]]
name = "interactive_zk" name = "basic_zk"
path = "interactive_zk/interactive_zk.rs" path = "basic_zk/basic_zk.rs"
[[example]] [[example]]
name = "attestation_prove" name = "attestation_prove"

View File

@@ -2,9 +2,9 @@
This folder contains examples demonstrating how to use the TLSNotary protocol. This folder contains examples demonstrating how to use the TLSNotary protocol.
* [Interactive](./interactive/README.md): Interactive Prover and Verifier session without a trusted notary. * [Basic](./basic/README.md): Basic Prover and Verifier session.
* [Attestation](./attestation/README.md): Performing a simple notarization with a trusted notary. * [Attestation](./attestation/README.md): Issuing an attestation where a Verifier acts as a Notary.
* [Interactive_zk](./interactive_zk/README.md): Interactive Prover and Verifier session demonstrating zero-knowledge age verification using Noir. * [Basic_zk](./basic_zk/README.md): Basic Prover and Verifier session demonstrating zero-knowledge age verification using Noir.
Refer to <https://tlsnotary.org/docs/quick_start> for a quick start guide to using TLSNotary with these examples. Refer to <https://tlsnotary.org/docs/quick_start> for a quick start guide to using TLSNotary with these examples.

View File

@@ -1,21 +1,20 @@
# Attestation Example # Attestation Example
This example demonstrates an **attestation workflow**: notarizing data from a server with a trusted third party (Notary), then creating verifiable presentations with selective disclosure of sensitive information to a Verifier.
This example demonstrates a **TLSNotary attestation workflow**: notarizing data from a server with a trusted third party (Notary), then creating verifiable presentations with selective disclosure of sensitive information to a Verifier.
## 🔍 How It Works ## 🔍 How It Works
```mermaid ```mermaid
sequenceDiagram sequenceDiagram
participant P as Prover participant P as Prover
participant N as MPC-TLS<br/>Verifier participant N as TLS<br/>Verifier
participant S as Server<br/>Fixture participant S as Server<br/>Fixture
participant V as Attestation<br/>Verifier participant V as Attestation<br/>Verifier
Note over P,S: 1. Notarization Phase Note over P,S: 1. Notarization Phase
P->>N: Establish MPC-TLS connection P->>N: Establish TLS connection
P->>S: Request (MPC-TLS) P->>S: Request (TLS)
S->>P: Response (MPC-TLS) S->>P: Response (TLS)
N->>P: Issue signed attestation N->>P: Issue signed attestation
Note over P: 2. Presentation Phase Note over P: 2. Presentation Phase

View File

@@ -33,8 +33,9 @@ use tlsn::{
connection::{ConnectionInfo, HandshakeData, ServerName, TranscriptLength}, connection::{ConnectionInfo, HandshakeData, ServerName, TranscriptLength},
prover::{state::Committed, Prover, ProverOutput}, prover::{state::Committed, Prover, ProverOutput},
transcript::{ContentType, TranscriptCommitConfig}, transcript::{ContentType, TranscriptCommitConfig},
verifier::{Verifier, VerifierOutput}, verifier::VerifierOutput,
webpki::{CertificateDer, PrivateKeyDer, RootCertStore}, webpki::{CertificateDer, PrivateKeyDer, RootCertStore},
Session,
}; };
use tlsn_examples::ExampleType; use tlsn_examples::ExampleType;
use tlsn_formats::http::{DefaultHttpCommitter, HttpCommit, HttpTranscript}; use tlsn_formats::http::{DefaultHttpCommitter, HttpCommit, HttpTranscript};
@@ -99,8 +100,16 @@ async fn prover<S: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>(
.map(|port| port.parse().expect("port should be valid integer")) .map(|port| port.parse().expect("port should be valid integer"))
.unwrap_or(DEFAULT_FIXTURE_PORT); .unwrap_or(DEFAULT_FIXTURE_PORT);
// Create a session with the notary.
let session = Session::new(socket.compat());
let (driver, mut handle) = session.split();
// Spawn the session driver to run in the background.
let driver_task = tokio::spawn(driver);
// Create a new prover and perform necessary setup. // Create a new prover and perform necessary setup.
let prover = Prover::new(ProverConfig::builder().build()?) let prover = handle
.new_prover(ProverConfig::builder().build()?)?
.commit( .commit(
TlsCommitConfig::builder() TlsCommitConfig::builder()
// Select the TLS commitment protocol. // Select the TLS commitment protocol.
@@ -115,7 +124,6 @@ async fn prover<S: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>(
.build()?, .build()?,
) )
.build()?, .build()?,
socket.compat(),
) )
.await?; .await?;
@@ -224,6 +232,10 @@ async fn prover<S: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>(
let (attestation, secrets) = notarize(prover, &request_config, req_tx, resp_rx).await?; let (attestation, secrets) = notarize(prover, &request_config, req_tx, resp_rx).await?;
// Close the session and wait for the driver to complete.
handle.close();
driver_task.await??;
// Write the attestation to disk. // Write the attestation to disk.
let attestation_path = tlsn_examples::get_file_path(example_type, "attestation"); let attestation_path = tlsn_examples::get_file_path(example_type, "attestation");
let secrets_path = tlsn_examples::get_file_path(example_type, "secrets"); let secrets_path = tlsn_examples::get_file_path(example_type, "secrets");
@@ -311,6 +323,13 @@ async fn notary<S: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>(
request_rx: Receiver<AttestationRequest>, request_rx: Receiver<AttestationRequest>,
attestation_tx: Sender<Attestation>, attestation_tx: Sender<Attestation>,
) -> Result<()> { ) -> Result<()> {
// Create a session with the prover.
let session = Session::new(socket.compat());
let (driver, mut handle) = session.split();
// Spawn the session driver to run in the background.
let driver_task = tokio::spawn(driver);
// Create a root certificate store with the server-fixture's self-signed // Create a root certificate store with the server-fixture's self-signed
// certificate. This is only required for offline testing with the // certificate. This is only required for offline testing with the
// server-fixture. // server-fixture.
@@ -321,8 +340,9 @@ async fn notary<S: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>(
.build() .build()
.unwrap(); .unwrap();
let verifier = Verifier::new(verifier_config) let verifier = handle
.commit(socket.compat()) .new_verifier(verifier_config)?
.commit()
.await? .await?
.accept() .accept()
.await? .await?
@@ -332,7 +352,6 @@ async fn notary<S: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>(
let ( let (
VerifierOutput { VerifierOutput {
transcript_commitments, transcript_commitments,
encoder_secret,
.. ..
}, },
verifier, verifier,
@@ -393,10 +412,6 @@ async fn notary<S: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>(
.server_ephemeral_key(tls_transcript.server_ephemeral_key().clone()) .server_ephemeral_key(tls_transcript.server_ephemeral_key().clone())
.transcript_commitments(transcript_commitments); .transcript_commitments(transcript_commitments);
if let Some(encoder_secret) = encoder_secret {
builder.encoder_secret(encoder_secret);
}
let attestation = builder.build(&provider)?; let attestation = builder.build(&provider)?;
// Send attestation to prover. // Send attestation to prover.
@@ -404,5 +419,9 @@ async fn notary<S: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>(
.send(attestation) .send(attestation)
.map_err(|_| anyhow!("prover is not receiving attestation"))?; .map_err(|_| anyhow!("prover is not receiving attestation"))?;
// Close the session and wait for the driver to complete.
handle.close();
driver_task.await??;
Ok(()) Ok(())
} }

View File

@@ -1,18 +1,18 @@
## Simple Interactive Verifier: Verifying Data from an API in Rust ## Simple Basic Verifier: Verifying Data from an API in Rust
This example demonstrates how to use TLSNotary in a simple interactive session between a Prover and a Verifier. It involves the Verifier first verifying the MPC-TLS session and then confirming the correctness of the data. This example demonstrates how to use TLSNotary in a simple session between a Prover and a Verifier.
This example fetches data from a local test server. To start the server, run the following command from the root of this repository (not from this example's folder): This example fetches data from a local test server. To start the server, run the following command from the root of this repository (not from this example's folder):
```shell ```shell
RUST_LOG=info PORT=4000 cargo run --bin tlsn-server-fixture RUST_LOG=info PORT=4000 cargo run --bin tlsn-server-fixture
``` ```
Next, run the interactive example with: Next, run the basic example with:
```shell ```shell
SERVER_PORT=4000 cargo run --release --example interactive SERVER_PORT=4000 cargo run --release --example basic
``` ```
To view more detailed debug information, use the following command: To view more detailed debug information, use the following command:
``` ```
RUST_LOG=debug,yamux=info,uid_mux=info SERVER_PORT=4000 cargo run --release --example interactive RUST_LOG=debug,yamux=info,uid_mux=info SERVER_PORT=4000 cargo run --release --example basic
``` ```
> Note: In this example, the Prover and Verifier run on the same machine. In real-world scenarios, the Prover and Verifier would typically operate on separate machines. > Note: In this example, the Prover and Verifier run on the same machine. In real-world scenarios, the Prover and Verifier would typically operate on separate machines.

View File

@@ -20,10 +20,10 @@ use tlsn::{
verifier::VerifierConfig, verifier::VerifierConfig,
}, },
connection::ServerName, connection::ServerName,
prover::Prover,
transcript::PartialTranscript, transcript::PartialTranscript,
verifier::{Verifier, VerifierOutput}, verifier::VerifierOutput,
webpki::{CertificateDer, RootCertStore}, webpki::{CertificateDer, RootCertStore},
Session,
}; };
use tlsn_server_fixture::DEFAULT_FIXTURE_PORT; use tlsn_server_fixture::DEFAULT_FIXTURE_PORT;
use tlsn_server_fixture_certs::{CA_CERT_DER, SERVER_DOMAIN}; use tlsn_server_fixture_certs::{CA_CERT_DER, SERVER_DOMAIN};
@@ -77,8 +77,16 @@ async fn prover<T: AsyncWrite + AsyncRead + Send + Unpin + 'static>(
assert_eq!(uri.scheme().unwrap().as_str(), "https"); assert_eq!(uri.scheme().unwrap().as_str(), "https");
let server_domain = uri.authority().unwrap().host(); let server_domain = uri.authority().unwrap().host();
// Create a session with the verifier.
let session = Session::new(verifier_socket.compat());
let (driver, mut handle) = session.split();
// Spawn the session driver to run in the background.
let driver_task = tokio::spawn(driver);
// Create a new prover and perform necessary setup. // Create a new prover and perform necessary setup.
let prover = Prover::new(ProverConfig::builder().build()?) let prover = handle
.new_prover(ProverConfig::builder().build()?)?
.commit( .commit(
TlsCommitConfig::builder() TlsCommitConfig::builder()
// Select the TLS commitment protocol. // Select the TLS commitment protocol.
@@ -93,7 +101,6 @@ async fn prover<T: AsyncWrite + AsyncRead + Send + Unpin + 'static>(
.build()?, .build()?,
) )
.build()?, .build()?,
verifier_socket.compat(),
) )
.await?; .await?;
@@ -120,14 +127,13 @@ async fn prover<T: AsyncWrite + AsyncRead + Send + Unpin + 'static>(
// Spawn the Prover to run in the background. // Spawn the Prover to run in the background.
let prover_task = tokio::spawn(prover_fut); let prover_task = tokio::spawn(prover_fut);
// MPC-TLS Handshake.
let (mut request_sender, connection) = let (mut request_sender, connection) =
hyper::client::conn::http1::handshake(tls_connection).await?; hyper::client::conn::http1::handshake(tls_connection).await?;
// Spawn the connection to run in the background. // Spawn the connection to run in the background.
tokio::spawn(connection); tokio::spawn(connection);
// MPC-TLS: Send Request and wait for Response. // Send Request and wait for Response.
let request = Request::builder() let request = Request::builder()
.uri(uri.clone()) .uri(uri.clone())
.header("Host", server_domain) .header("Host", server_domain)
@@ -176,6 +182,10 @@ async fn prover<T: AsyncWrite + AsyncRead + Send + Unpin + 'static>(
prover.prove(&config).await?; prover.prove(&config).await?;
prover.close().await?; prover.close().await?;
// Close the session and wait for the driver to complete.
handle.close();
driver_task.await??;
Ok(()) Ok(())
} }
@@ -183,6 +193,13 @@ async fn prover<T: AsyncWrite + AsyncRead + Send + Unpin + 'static>(
async fn verifier<T: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>( async fn verifier<T: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>(
socket: T, socket: T,
) -> Result<PartialTranscript> { ) -> Result<PartialTranscript> {
// Create a session with the prover.
let session = Session::new(socket.compat());
let (driver, mut handle) = session.split();
// Spawn the session driver to run in the background.
let driver_task = tokio::spawn(driver);
// Create a root certificate store with the server-fixture's self-signed // Create a root certificate store with the server-fixture's self-signed
// certificate. This is only required for offline testing with the // certificate. This is only required for offline testing with the
// server-fixture. // server-fixture.
@@ -191,10 +208,10 @@ async fn verifier<T: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>(
roots: vec![CertificateDer(CA_CERT_DER.to_vec())], roots: vec![CertificateDer(CA_CERT_DER.to_vec())],
}) })
.build()?; .build()?;
let verifier = Verifier::new(verifier_config); let verifier = handle.new_verifier(verifier_config)?;
// Validate the proposed configuration and then run the TLS commitment protocol. // Validate the proposed configuration and then run the TLS commitment protocol.
let verifier = verifier.commit(socket.compat()).await?; let verifier = verifier.commit().await?;
// This is the opportunity to ensure the prover does not attempt to overload the // This is the opportunity to ensure the prover does not attempt to overload the
// verifier. // verifier.
@@ -241,6 +258,10 @@ async fn verifier<T: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>(
verifier.close().await?; verifier.close().await?;
// Close the session and wait for the driver to complete.
handle.close();
driver_task.await??;
let server_name = server_name.expect("prover should have revealed server name"); let server_name = server_name.expect("prover should have revealed server name");
let transcript = transcript.expect("prover should have revealed transcript data"); let transcript = transcript.expect("prover should have revealed transcript data");

View File

@@ -1,4 +1,4 @@
# Interactive Zero-Knowledge Age Verification with TLSNotary # Basic Zero-Knowledge Age Verification with TLSNotary
This example demonstrates **privacy-preserving age verification** using TLSNotary and zero-knowledge proofs. It allows a prover to demonstrate they are 18+ years old without revealing their actual birth date or any other personal information. This example demonstrates **privacy-preserving age verification** using TLSNotary and zero-knowledge proofs. It allows a prover to demonstrate they are 18+ years old without revealing their actual birth date or any other personal information.
@@ -10,8 +10,8 @@ sequenceDiagram
participant P as Prover participant P as Prover
participant V as Verifier participant V as Verifier
P->>S: Request tax data (with auth token) (MPC-TLS) P->>S: Request tax data (with auth token) (TLS)
S->>P: Tax data including `date_of_birth` (MPC-TLS) S->>P: Tax data including `date_of_birth` (TLS)
P->>V: Share transcript with redactions P->>V: Share transcript with redactions
P->>V: Commit to blinded hash of birth date P->>V: Commit to blinded hash of birth date
P->>P: Generate ZK proof of age ≥ 18 P->>P: Generate ZK proof of age ≥ 18
@@ -22,7 +22,7 @@ sequenceDiagram
### The Process ### The Process
1. **MPC-TLS Session**: The Prover fetches tax information containing their birth date, while the Verifier jointly verifies the TLS session to ensure the data comes from the authentic server. 1. **TLS Commitment**: The Prover fetches tax information containing their birth date, while the Verifier jointly verifies the TLS session to ensure the data comes from the authentic server.
2. **Selective Disclosure**: 2. **Selective Disclosure**:
* The authorization token is **redacted**: the Verifier sees the plaintext request but not the token. * The authorization token is **redacted**: the Verifier sees the plaintext request but not the token.
* The birth date is **committed** as a blinded hash: the Verifier cannot see the date, but the Prover is cryptographically bound to it. * The birth date is **committed** as a blinded hash: the Verifier cannot see the date, but the Prover is cryptographically bound to it.
@@ -61,7 +61,7 @@ The ZK circuit proves: **"I know a birth date that hashes to the committed value
**What the Verifier Learns:** **What the Verifier Learns:**
- ✅ The prover is 18+ years old - ✅ The prover is 18+ years old
- ✅ The birth date is authentic (from the MPC-TLS session) - ✅ The birth date is authentic (from the TLS session)
Everything else remains private. Everything else remains private.
@@ -74,12 +74,12 @@ Everything else remains private.
2. **Run the age verification** (in a new terminal): 2. **Run the age verification** (in a new terminal):
```bash ```bash
SERVER_PORT=4000 cargo run --release --example interactive_zk SERVER_PORT=4000 cargo run --release --example basic_zk
``` ```
3. **For detailed logs**: 3. **For detailed logs**:
```bash ```bash
RUST_LOG=debug,yamux=info,uid_mux=info SERVER_PORT=4000 cargo run --release --example interactive_zk RUST_LOG=debug,yamux=info,uid_mux=info SERVER_PORT=4000 cargo run --release --example basic_zk
``` ```
### Expected Output ### Expected Output
@@ -106,11 +106,11 @@ Verified received data:
### Project Structure ### Project Structure
``` ```
interactive_zk/ basic_zk/
├── prover.rs # Prover implementation ├── prover.rs # Prover implementation
├── verifier.rs # Verifier implementation ├── verifier.rs # Verifier implementation
├── types.rs # Shared types ├── types.rs # Shared types
└── interactive_zk.rs # Main example runner └── basic_zk.rs # Main example runner
├── noir/ # Zero-knowledge circuit ├── noir/ # Zero-knowledge circuit
│ ├── src/main.n # Noir circuit code │ ├── src/main.n # Noir circuit code
│ ├── target/ # Compiled circuit artifacts │ ├── target/ # Compiled circuit artifacts

View File

@@ -31,11 +31,10 @@ async fn main() -> Result<()> {
// Connect prover and verifier. // Connect prover and verifier.
let (prover_socket, verifier_socket) = tokio::io::duplex(1 << 23); let (prover_socket, verifier_socket) = tokio::io::duplex(1 << 23);
let (prover_extra_socket, verifier_extra_socket) = tokio::io::duplex(1 << 23);
let (_, transcript) = tokio::try_join!( let (_, transcript) = tokio::try_join!(
prover(prover_socket, prover_extra_socket, &server_addr, &uri), prover(prover_socket, &server_addr, &uri),
verifier(verifier_socket, verifier_extra_socket) verifier(verifier_socket)
)?; )?;
println!("---"); println!("---");

View File

@@ -32,24 +32,24 @@ use tlsn::{
}, },
connection::ServerName, connection::ServerName,
hash::HashAlgId, hash::HashAlgId,
prover::Prover,
transcript::{ transcript::{
hash::{PlaintextHash, PlaintextHashSecret}, hash::{PlaintextHash, PlaintextHashSecret},
Direction, TranscriptCommitConfig, TranscriptCommitConfigBuilder, TranscriptCommitmentKind, Direction, TranscriptCommitConfig, TranscriptCommitConfigBuilder, TranscriptCommitmentKind,
TranscriptSecret, TranscriptSecret,
}, },
webpki::{CertificateDer, RootCertStore}, webpki::{CertificateDer, RootCertStore},
Session,
}; };
use futures::io::AsyncWriteExt as _;
use tlsn_examples::{MAX_RECV_DATA, MAX_SENT_DATA}; use tlsn_examples::{MAX_RECV_DATA, MAX_SENT_DATA};
use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt}; use tokio::io::{AsyncRead, AsyncWrite};
use tokio_util::compat::{FuturesAsyncReadCompatExt, TokioAsyncReadCompatExt}; use tokio_util::compat::{FuturesAsyncReadCompatExt, TokioAsyncReadCompatExt};
use tracing::instrument; use tracing::instrument;
#[instrument(skip(verifier_socket, verifier_extra_socket))] #[instrument(skip(verifier_socket))]
pub async fn prover<T: AsyncWrite + AsyncRead + Send + Unpin + 'static>( pub async fn prover<T: AsyncWrite + AsyncRead + Send + Unpin + 'static>(
verifier_socket: T, verifier_socket: T,
mut verifier_extra_socket: T,
server_addr: &SocketAddr, server_addr: &SocketAddr,
uri: &str, uri: &str,
) -> Result<()> { ) -> Result<()> {
@@ -64,8 +64,16 @@ pub async fn prover<T: AsyncWrite + AsyncRead + Send + Unpin + 'static>(
.ok_or_else(|| anyhow::anyhow!("URI must have authority"))? .ok_or_else(|| anyhow::anyhow!("URI must have authority"))?
.host(); .host();
// Create a session with the verifier.
let session = Session::new(verifier_socket.compat());
let (driver, mut handle) = session.split();
// Spawn the session driver to run in the background.
let driver_task = tokio::spawn(driver);
// Create a new prover and perform necessary setup. // Create a new prover and perform necessary setup.
let prover = Prover::new(ProverConfig::builder().build()?) let prover = handle
.new_prover(ProverConfig::builder().build()?)?
.commit( .commit(
TlsCommitConfig::builder() TlsCommitConfig::builder()
// Select the TLS commitment protocol. // Select the TLS commitment protocol.
@@ -80,7 +88,6 @@ pub async fn prover<T: AsyncWrite + AsyncRead + Send + Unpin + 'static>(
.build()?, .build()?,
) )
.build()?, .build()?,
verifier_socket.compat(),
) )
.await?; .await?;
@@ -107,14 +114,13 @@ pub async fn prover<T: AsyncWrite + AsyncRead + Send + Unpin + 'static>(
// Spawn the Prover to run in the background. // Spawn the Prover to run in the background.
let prover_task = tokio::spawn(prover_fut); let prover_task = tokio::spawn(prover_fut);
// MPC-TLS Handshake.
let (mut request_sender, connection) = let (mut request_sender, connection) =
hyper::client::conn::http1::handshake(tls_connection).await?; hyper::client::conn::http1::handshake(tls_connection).await?;
// Spawn the connection to run in the background. // Spawn the connection to run in the background.
tokio::spawn(connection); tokio::spawn(connection);
// MPC-TLS: Send Request and wait for Response. // Send Request and wait for Response.
let request = Request::builder() let request = Request::builder()
.uri(uri.clone()) .uri(uri.clone())
.header("Host", server_domain) .header("Host", server_domain)
@@ -127,7 +133,7 @@ pub async fn prover<T: AsyncWrite + AsyncRead + Send + Unpin + 'static>(
if response.status() != StatusCode::OK { if response.status() != StatusCode::OK {
return Err(anyhow::anyhow!( return Err(anyhow::anyhow!(
"MPC-TLS request failed with status {}", "request failed with status {}",
response.status() response.status()
)); ));
} }
@@ -166,10 +172,13 @@ pub async fn prover<T: AsyncWrite + AsyncRead + Send + Unpin + 'static>(
let prove_config = prove_config_builder.build()?; let prove_config = prove_config_builder.build()?;
// MPC-TLS prove
let prover_output = prover.prove(&prove_config).await?; let prover_output = prover.prove(&prove_config).await?;
prover.close().await?; prover.close().await?;
// Close the session and wait for the driver to complete, reclaiming the socket.
handle.close();
let mut socket = driver_task.await??;
// Prove birthdate is more than 18 years ago. // Prove birthdate is more than 18 years ago.
let received_commitments = received_commitments(&prover_output.transcript_commitments); let received_commitments = received_commitments(&prover_output.transcript_commitments);
let received_commitment = received_commitments let received_commitment = received_commitments
@@ -184,8 +193,8 @@ pub async fn prover<T: AsyncWrite + AsyncRead + Send + Unpin + 'static>(
// Sent zk proof bundle to verifier // Sent zk proof bundle to verifier
let serialized_proof = bincode::serialize(&proof_bundle)?; let serialized_proof = bincode::serialize(&proof_bundle)?;
verifier_extra_socket.write_all(&serialized_proof).await?; socket.write_all(&serialized_proof).await?;
verifier_extra_socket.shutdown().await?; socket.close().await?;
Ok(()) Ok(())
} }

View File

@@ -3,6 +3,7 @@ use crate::types::received_commitments;
use super::types::ZKProofBundle; use super::types::ZKProofBundle;
use anyhow::Result; use anyhow::Result;
use chrono::{Local, NaiveDate}; use chrono::{Local, NaiveDate};
use futures::io::AsyncReadExt as _;
use noir::barretenberg::verify::{get_ultra_honk_verification_key, verify_ultra_honk}; use noir::barretenberg::verify::{get_ultra_honk_verification_key, verify_ultra_honk};
use serde_json::Value; use serde_json::Value;
use tls_server_fixture::CA_CERT_DER; use tls_server_fixture::CA_CERT_DER;
@@ -11,33 +12,39 @@ use tlsn::{
connection::ServerName, connection::ServerName,
hash::HashAlgId, hash::HashAlgId,
transcript::{Direction, PartialTranscript}, transcript::{Direction, PartialTranscript},
verifier::{Verifier, VerifierOutput}, verifier::VerifierOutput,
webpki::{CertificateDer, RootCertStore}, webpki::{CertificateDer, RootCertStore},
Session,
}; };
use tlsn_examples::{MAX_RECV_DATA, MAX_SENT_DATA}; use tlsn_examples::{MAX_RECV_DATA, MAX_SENT_DATA};
use tlsn_server_fixture_certs::SERVER_DOMAIN; use tlsn_server_fixture_certs::SERVER_DOMAIN;
use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite}; use tokio::io::{AsyncRead, AsyncWrite};
use tokio_util::compat::TokioAsyncReadCompatExt; use tokio_util::compat::TokioAsyncReadCompatExt;
use tracing::instrument; use tracing::instrument;
#[instrument(skip(socket, extra_socket))] #[instrument(skip(socket))]
pub async fn verifier<T: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>( pub async fn verifier<T: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>(
socket: T, socket: T,
mut extra_socket: T,
) -> Result<PartialTranscript> { ) -> Result<PartialTranscript> {
let verifier = Verifier::new( // Create a session with the prover.
VerifierConfig::builder() let session = Session::new(socket.compat());
let (driver, mut handle) = session.split();
// Spawn the session driver to run in the background.
let driver_task = tokio::spawn(driver);
// Create a root certificate store with the server-fixture's self-signed // Create a root certificate store with the server-fixture's self-signed
// certificate. This is only required for offline testing with the // certificate. This is only required for offline testing with the
// server-fixture. // server-fixture.
let verifier_config = VerifierConfig::builder()
.root_store(RootCertStore { .root_store(RootCertStore {
roots: vec![CertificateDer(CA_CERT_DER.to_vec())], roots: vec![CertificateDer(CA_CERT_DER.to_vec())],
}) })
.build()?, .build()?;
); let verifier = handle.new_verifier(verifier_config)?;
// Validate the proposed configuration and then run the TLS commitment protocol. // Validate the proposed configuration and then run the TLS commitment protocol.
let verifier = verifier.commit(socket.compat()).await?; let verifier = verifier.commit().await?;
// This is the opportunity to ensure the prover does not attempt to overload the // This is the opportunity to ensure the prover does not attempt to overload the
// verifier. // verifier.
@@ -90,6 +97,10 @@ pub async fn verifier<T: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>
verifier.close().await?; verifier.close().await?;
// Close the session and wait for the driver to complete, reclaiming the socket.
handle.close();
let mut socket = driver_task.await??;
let server_name = server_name.expect("server name should be present"); let server_name = server_name.expect("server name should be present");
let transcript = transcript.expect("transcript should be present"); let transcript = transcript.expect("transcript should be present");
@@ -126,7 +137,7 @@ pub async fn verifier<T: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>
// Receive ZKProof information from prover // Receive ZKProof information from prover
let mut buf = Vec::new(); let mut buf = Vec::new();
extra_socket.read_to_end(&mut buf).await?; socket.read_to_end(&mut buf).await?;
if buf.is_empty() { if buf.is_empty() {
return Err(anyhow::anyhow!("No ZK proof data received from prover")); return Err(anyhow::anyhow!("No ZK proof data received from prover"));
@@ -193,16 +204,16 @@ pub async fn verifier<T: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>
let expected_hash = committed_hash.value.as_bytes().to_vec(); let expected_hash = committed_hash.value.as_bytes().to_vec();
if committed_hash_in_proof != expected_hash { if committed_hash_in_proof != expected_hash {
tracing::error!( tracing::error!(
"❌ The hash in the proof does not match the committed hash in MPC-TLS: {} != {}", "❌ The hash in the proof does not match the committed hash: {} != {}",
hex::encode(&committed_hash_in_proof), hex::encode(&committed_hash_in_proof),
hex::encode(&expected_hash) hex::encode(&expected_hash)
); );
return Err(anyhow::anyhow!( return Err(anyhow::anyhow!(
"Hash in proof does not match committed hash in MPC-TLS" "Hash in proof does not match committed hash"
)); ));
} }
tracing::info!( tracing::info!(
"✅ The hash in the proof matches the committed hash in MPC-TLS ({})", "✅ The hash in the proof matches the committed hash ({})",
hex::encode(&expected_hash) hex::encode(&expected_hash)
); );

View File

@@ -1,6 +1,6 @@
[package] [package]
name = "tlsn-formats" name = "tlsn-formats"
version = "0.1.0-alpha.14-pre" version = "0.1.0-alpha.14"
edition = "2021" edition = "2021"
[lints] [lints]

View File

@@ -1,51 +1,59 @@
#### Latency #### #### Default Representative Benchmarks ####
#
# This benchmark measures TLSNotary performance on three representative network scenarios.
# Each scenario is run multiple times to produce statistical metrics (median, std dev, etc.)
# rather than plots. Use this for quick performance checks and CI regression testing.
#
# Payload sizes:
# - upload-size: 1KB (typical HTTP request)
# - download-size: 2KB (typical HTTP response/API data)
#
# Network scenarios are chosen to represent real-world user conditions where
# TLSNotary is primarily bottlenecked by upload bandwidth.
#### Cable/DSL Home Internet ####
# Most common residential internet connection
# - Asymmetric: high download, limited upload (typical bottleneck)
# - Upload bandwidth: 20 Mbps (realistic cable/DSL upload speed)
# - Latency: 20ms (typical ISP latency)
[[group]] [[group]]
name = "latency" name = "cable"
bandwidth = 1000 bandwidth = 20
protocol_latency = 20
upload-size = 1024
download-size = 2048
[[bench]] [[bench]]
group = "latency" group = "cable"
protocol_latency = 10
[[bench]] #### Mobile 5G ####
group = "latency" # Modern mobile connection with good coverage
protocol_latency = 25 # - Upload bandwidth: 30 Mbps (typical 5G upload in good conditions)
# - Latency: 30ms (higher than wired due to mobile tower hops)
[[bench]]
group = "latency"
protocol_latency = 50
[[bench]]
group = "latency"
protocol_latency = 100
[[bench]]
group = "latency"
protocol_latency = 200
#### Bandwidth ####
[[group]] [[group]]
name = "bandwidth" name = "mobile_5g"
protocol_latency = 25 bandwidth = 30
protocol_latency = 30
upload-size = 1024
download-size = 2048
[[bench]] [[bench]]
group = "bandwidth" group = "mobile_5g"
bandwidth = 10
[[bench]] #### Fiber Home Internet ####
group = "bandwidth" # High-end residential connection (best case scenario)
bandwidth = 50 # - Symmetric: equal upload/download bandwidth
# - Upload bandwidth: 100 Mbps (typical fiber upload)
# - Latency: 15ms (lower latency than cable)
[[bench]] [[group]]
group = "bandwidth" name = "fiber"
bandwidth = 100 bandwidth = 100
protocol_latency = 15
upload-size = 1024
download-size = 2048
[[bench]] [[bench]]
group = "bandwidth" group = "fiber"
bandwidth = 250
[[bench]]
group = "bandwidth"
bandwidth = 1000

View File

@@ -0,0 +1,52 @@
#### Bandwidth Sweep Benchmark ####
#
# Measures how network bandwidth affects TLSNotary runtime.
# Keeps latency and payload sizes fixed while varying upload bandwidth.
#
# Fixed parameters:
# - Latency: 25ms (typical internet latency)
# - Upload: 1KB (typical request)
# - Download: 2KB (typical response)
#
# Variable: Bandwidth from 5 Mbps to 1000 Mbps
#
# Use this to plot "Bandwidth vs Runtime" and understand bandwidth sensitivity.
# Focus on upload bandwidth as TLSNotary is primarily upload-bottlenecked
[[group]]
name = "bandwidth_sweep"
protocol_latency = 25
upload-size = 1024
download-size = 2048
[[bench]]
group = "bandwidth_sweep"
bandwidth = 5
[[bench]]
group = "bandwidth_sweep"
bandwidth = 10
[[bench]]
group = "bandwidth_sweep"
bandwidth = 20
[[bench]]
group = "bandwidth_sweep"
bandwidth = 50
[[bench]]
group = "bandwidth_sweep"
bandwidth = 100
[[bench]]
group = "bandwidth_sweep"
bandwidth = 250
[[bench]]
group = "bandwidth_sweep"
bandwidth = 500
[[bench]]
group = "bandwidth_sweep"
bandwidth = 1000

View File

@@ -0,0 +1,53 @@
#### Download Size Sweep Benchmark ####
#
# Measures how download payload size affects TLSNotary runtime.
# Keeps network conditions fixed while varying the response size.
#
# Fixed parameters:
# - Bandwidth: 100 Mbps (typical good connection)
# - Latency: 25ms (typical internet latency)
# - Upload: 1KB (typical request size)
#
# Variable: Download size from 1KB to 100KB
#
# Use this to plot "Download Size vs Runtime" and understand how much data
# TLSNotary can efficiently notarize. Useful for determining optimal
# chunking strategies for large responses.
[[group]]
name = "download_sweep"
bandwidth = 100
protocol_latency = 25
upload-size = 1024
[[bench]]
group = "download_sweep"
download-size = 1024
[[bench]]
group = "download_sweep"
download-size = 2048
[[bench]]
group = "download_sweep"
download-size = 5120
[[bench]]
group = "download_sweep"
download-size = 10240
[[bench]]
group = "download_sweep"
download-size = 20480
[[bench]]
group = "download_sweep"
download-size = 30720
[[bench]]
group = "download_sweep"
download-size = 40960
[[bench]]
group = "download_sweep"
download-size = 51200

View File

@@ -0,0 +1,47 @@
#### Latency Sweep Benchmark ####
#
# Measures how network latency affects TLSNotary runtime.
# Keeps bandwidth and payload sizes fixed while varying protocol latency.
#
# Fixed parameters:
# - Bandwidth: 100 Mbps (typical good connection)
# - Upload: 1KB (typical request)
# - Download: 2KB (typical response)
#
# Variable: Protocol latency from 10ms to 200ms
#
# Use this to plot "Latency vs Runtime" and understand latency sensitivity.
[[group]]
name = "latency_sweep"
bandwidth = 100
upload-size = 1024
download-size = 2048
[[bench]]
group = "latency_sweep"
protocol_latency = 10
[[bench]]
group = "latency_sweep"
protocol_latency = 25
[[bench]]
group = "latency_sweep"
protocol_latency = 50
[[bench]]
group = "latency_sweep"
protocol_latency = 75
[[bench]]
group = "latency_sweep"
protocol_latency = 100
[[bench]]
group = "latency_sweep"
protocol_latency = 150
[[bench]]
group = "latency_sweep"
protocol_latency = 200

View File

@@ -5,6 +5,7 @@ use futures::{AsyncReadExt, AsyncWriteExt, TryFutureExt};
use harness_core::bench::{Bench, ProverMetrics}; use harness_core::bench::{Bench, ProverMetrics};
use tlsn::{ use tlsn::{
Session,
config::{ config::{
prove::ProveConfig, prove::ProveConfig,
prover::ProverConfig, prover::ProverConfig,
@@ -12,7 +13,6 @@ use tlsn::{
tls_commit::{TlsCommitConfig, mpc::MpcTlsConfig}, tls_commit::{TlsCommitConfig, mpc::MpcTlsConfig},
}, },
connection::ServerName, connection::ServerName,
prover::Prover,
webpki::{CertificateDer, RootCertStore}, webpki::{CertificateDer, RootCertStore},
}; };
use tlsn_server_fixture_certs::{CA_CERT_DER, SERVER_DOMAIN}; use tlsn_server_fixture_certs::{CA_CERT_DER, SERVER_DOMAIN};
@@ -20,6 +20,7 @@ use tlsn_server_fixture_certs::{CA_CERT_DER, SERVER_DOMAIN};
use crate::{ use crate::{
IoProvider, IoProvider,
bench::{Meter, RECV_PADDING}, bench::{Meter, RECV_PADDING},
spawn,
}; };
pub async fn bench_prover(provider: &IoProvider, config: &Bench) -> Result<ProverMetrics> { pub async fn bench_prover(provider: &IoProvider, config: &Bench) -> Result<ProverMetrics> {
@@ -28,7 +29,12 @@ pub async fn bench_prover(provider: &IoProvider, config: &Bench) -> Result<Prove
let sent = verifier_io.sent(); let sent = verifier_io.sent();
let recv = verifier_io.recv(); let recv = verifier_io.recv();
let prover = Prover::new(ProverConfig::builder().build()?); let mut session = Session::new(verifier_io);
let prover = session.new_prover(ProverConfig::builder().build()?)?;
let (session, handle) = session.split();
_ = spawn(session);
let time_start = web_time::Instant::now(); let time_start = web_time::Instant::now();
@@ -49,7 +55,6 @@ pub async fn bench_prover(provider: &IoProvider, config: &Bench) -> Result<Prove
.build() .build()
}?) }?)
.build()?, .build()?,
verifier_io,
) )
.await?; .await?;
@@ -120,6 +125,7 @@ pub async fn bench_prover(provider: &IoProvider, config: &Bench) -> Result<Prove
prover.prove(&prove_config).await?; prover.prove(&prove_config).await?;
prover.close().await?; prover.close().await?;
handle.close();
let time_total = time_start.elapsed().as_millis(); let time_total = time_start.elapsed().as_millis();

View File

@@ -2,32 +2,34 @@ use anyhow::Result;
use harness_core::bench::Bench; use harness_core::bench::Bench;
use tlsn::{ use tlsn::{
Session,
config::verifier::VerifierConfig, config::verifier::VerifierConfig,
verifier::Verifier,
webpki::{CertificateDer, RootCertStore}, webpki::{CertificateDer, RootCertStore},
}; };
use tlsn_server_fixture_certs::CA_CERT_DER; use tlsn_server_fixture_certs::CA_CERT_DER;
use crate::IoProvider; use crate::{IoProvider, spawn};
pub async fn bench_verifier(provider: &IoProvider, _config: &Bench) -> Result<()> { pub async fn bench_verifier(provider: &IoProvider, _config: &Bench) -> Result<()> {
let verifier = Verifier::new( let io = provider.provide_proto_io().await?;
let mut session = Session::new(io);
let verifier = session.new_verifier(
VerifierConfig::builder() VerifierConfig::builder()
.root_store(RootCertStore { .root_store(RootCertStore {
roots: vec![CertificateDer(CA_CERT_DER.to_vec())], roots: vec![CertificateDer(CA_CERT_DER.to_vec())],
}) })
.build()?, .build()?,
); )?;
let verifier = verifier let (session, handle) = session.split();
.commit(provider.provide_proto_io().await?)
.await? _ = spawn(session);
.accept()
.await? let verifier = verifier.commit().await?.accept().await?.run().await?;
.run()
.await?;
let (_, verifier) = verifier.verify().await?.accept().await?; let (_, verifier) = verifier.verify().await?.accept().await?;
verifier.close().await?; verifier.close().await?;
handle.close();
Ok(()) Ok(())
} }

View File

@@ -1,4 +1,5 @@
use tlsn::{ use tlsn::{
Session,
config::{ config::{
prove::ProveConfig, prove::ProveConfig,
prover::ProverConfig, prover::ProverConfig,
@@ -8,9 +9,8 @@ use tlsn::{
}, },
connection::ServerName, connection::ServerName,
hash::HashAlgId, hash::HashAlgId,
prover::Prover,
transcript::{TranscriptCommitConfig, TranscriptCommitment, TranscriptCommitmentKind}, transcript::{TranscriptCommitConfig, TranscriptCommitment, TranscriptCommitmentKind},
verifier::{Verifier, VerifierOutput}, verifier::VerifierOutput,
webpki::{CertificateDer, RootCertStore}, webpki::{CertificateDer, RootCertStore},
}; };
use tlsn_server_fixture_certs::{CA_CERT_DER, SERVER_DOMAIN}; use tlsn_server_fixture_certs::{CA_CERT_DER, SERVER_DOMAIN};
@@ -28,7 +28,17 @@ const MAX_RECV_DATA: usize = 1 << 11;
crate::test!("basic", prover, verifier); crate::test!("basic", prover, verifier);
async fn prover(provider: &IoProvider) { async fn prover(provider: &IoProvider) {
let prover = Prover::new(ProverConfig::builder().build().unwrap()) let io = provider.provide_proto_io().await.unwrap();
let mut session = Session::new(io);
let prover = session
.new_prover(ProverConfig::builder().build().unwrap())
.unwrap();
let (session, handle) = session.split();
_ = spawn(session);
let prover = prover
.commit( .commit(
TlsCommitConfig::builder() TlsCommitConfig::builder()
.protocol( .protocol(
@@ -41,7 +51,6 @@ async fn prover(provider: &IoProvider) {
) )
.build() .build()
.unwrap(), .unwrap(),
provider.provide_proto_io().await.unwrap(),
) )
.await .await
.unwrap(); .unwrap();
@@ -116,18 +125,27 @@ async fn prover(provider: &IoProvider) {
prover.prove(&config).await.unwrap(); prover.prove(&config).await.unwrap();
prover.close().await.unwrap(); prover.close().await.unwrap();
handle.close();
} }
async fn verifier(provider: &IoProvider) { async fn verifier(provider: &IoProvider) {
let io = provider.provide_proto_io().await.unwrap();
let mut session = Session::new(io);
let config = VerifierConfig::builder() let config = VerifierConfig::builder()
.root_store(RootCertStore { .root_store(RootCertStore {
roots: vec![CertificateDer(CA_CERT_DER.to_vec())], roots: vec![CertificateDer(CA_CERT_DER.to_vec())],
}) })
.build() .build()
.unwrap(); .unwrap();
let verifier = session.new_verifier(config).unwrap();
let verifier = Verifier::new(config) let (session, handle) = session.split();
.commit(provider.provide_proto_io().await.unwrap())
_ = spawn(session);
let verifier = verifier
.commit()
.await .await
.unwrap() .unwrap()
.accept() .accept()
@@ -147,6 +165,7 @@ async fn verifier(provider: &IoProvider) {
) = verifier.verify().await.unwrap().accept().await.unwrap(); ) = verifier.verify().await.unwrap().accept().await.unwrap();
verifier.close().await.unwrap(); verifier.close().await.unwrap();
handle.close();
let ServerName::Dns(server_name) = server_name.unwrap(); let ServerName::Dns(server_name) = server_name.unwrap();

View File

@@ -22,6 +22,7 @@ clap = { workspace = true, features = ["derive", "env"] }
csv = { version = "1.3" } csv = { version = "1.3" }
duct = { version = "1" } duct = { version = "1" }
futures = { workspace = true } futures = { workspace = true }
indicatif = { version = "0.17" }
ipnet = { workspace = true } ipnet = { workspace = true }
serio = { workspace = true } serio = { workspace = true }
serde_json = { workspace = true } serde_json = { workspace = true }

View File

@@ -16,6 +16,10 @@ pub struct Cli {
/// Subnet to assign harness network interfaces. /// Subnet to assign harness network interfaces.
#[arg(long, default_value = "10.250.0.0/24", env = "SUBNET")] #[arg(long, default_value = "10.250.0.0/24", env = "SUBNET")]
pub subnet: Ipv4Net, pub subnet: Ipv4Net,
/// Run browser in headed mode (visible window) for debugging.
/// Works with both X11 and Wayland.
#[arg(long)]
pub headed: bool,
} }
#[derive(Subcommand)] #[derive(Subcommand)]
@@ -31,10 +35,13 @@ pub enum Command {
}, },
/// runs benchmarks. /// runs benchmarks.
Bench { Bench {
/// Configuration path. /// Configuration path. Defaults to bench.toml which contains
/// representative scenarios (cable, 5G, fiber) for quick performance
/// checks. Use bench_*_sweep.toml files for parametric
/// analysis.
#[arg(short, long, default_value = "bench.toml")] #[arg(short, long, default_value = "bench.toml")]
config: PathBuf, config: PathBuf,
/// Output file path. /// Output CSV file path for detailed metrics and post-processing.
#[arg(short, long, default_value = "metrics.csv")] #[arg(short, long, default_value = "metrics.csv")]
output: PathBuf, output: PathBuf,
/// Number of samples to measure per benchmark. This is overridden by /// Number of samples to measure per benchmark. This is overridden by

View File

@@ -28,6 +28,9 @@ pub struct Executor {
ns: Namespace, ns: Namespace,
config: ExecutorConfig, config: ExecutorConfig,
target: Target, target: Target,
/// Display environment variables for headed mode (X11/Wayland).
/// Empty means headless mode.
display_env: Vec<String>,
state: State, state: State,
} }
@@ -49,11 +52,17 @@ impl State {
} }
impl Executor { impl Executor {
pub fn new(ns: Namespace, config: ExecutorConfig, target: Target) -> Self { pub fn new(
ns: Namespace,
config: ExecutorConfig,
target: Target,
display_env: Vec<String>,
) -> Self {
Self { Self {
ns, ns,
config, config,
target, target,
display_env,
state: State::Init, state: State::Init,
} }
} }
@@ -120,23 +129,49 @@ impl Executor {
let tmp = duct::cmd!("mktemp", "-d").read()?; let tmp = duct::cmd!("mktemp", "-d").read()?;
let tmp = tmp.trim(); let tmp = tmp.trim();
let process = duct::cmd!( let headed = !self.display_env.is_empty();
"sudo",
"ip", // Build command args based on headed/headless mode
"netns", let mut args: Vec<String> = vec![
"exec", "ip".into(),
self.ns.name(), "netns".into(),
chrome_path, "exec".into(),
format!("--remote-debugging-port={PORT_BROWSER}"), self.ns.name().into(),
"--headless", ];
"--disable-dev-shm-usage",
"--disable-gpu", if headed {
"--disable-cache", // For headed mode: drop back to the current user and pass display env vars
"--disable-application-cache", // This allows the browser to connect to X11/Wayland while in the namespace
"--no-sandbox", let user =
std::env::var("USER").context("USER environment variable not set")?;
args.extend(["sudo".into(), "-E".into(), "-u".into(), user, "env".into()]);
args.extend(self.display_env.clone());
}
args.push(chrome_path.to_string_lossy().into());
args.push(format!("--remote-debugging-port={PORT_BROWSER}"));
if headed {
// Headed mode: no headless, add flags to suppress first-run dialogs
args.extend(["--no-first-run".into(), "--no-default-browser-check".into()]);
} else {
// Headless mode: original flags
args.extend([
"--headless".into(),
"--disable-dev-shm-usage".into(),
"--disable-gpu".into(),
"--disable-cache".into(),
"--disable-application-cache".into(),
]);
}
args.extend([
"--no-sandbox".into(),
format!("--user-data-dir={tmp}"), format!("--user-data-dir={tmp}"),
format!("--allowed-ips=10.250.0.1"), "--allowed-ips=10.250.0.1".into(),
); ]);
let process = duct::cmd("sudo", &args);
let process = if !cfg!(feature = "debug") { let process = if !cfg!(feature = "debug") {
process.stderr_capture().stdout_capture().start()? process.stderr_capture().stdout_capture().start()?

View File

@@ -9,7 +9,7 @@ mod ws_proxy;
#[cfg(feature = "debug")] #[cfg(feature = "debug")]
mod debug_prelude; mod debug_prelude;
use std::time::Duration; use std::{collections::HashMap, time::Duration};
use anyhow::Result; use anyhow::Result;
use clap::Parser; use clap::Parser;
@@ -22,6 +22,7 @@ use harness_core::{
rpc::{BenchCmd, TestCmd}, rpc::{BenchCmd, TestCmd},
test::TestStatus, test::TestStatus,
}; };
use indicatif::{ProgressBar, ProgressStyle};
use cli::{Cli, Command}; use cli::{Cli, Command};
use executor::Executor; use executor::Executor;
@@ -32,6 +33,60 @@ use crate::debug_prelude::*;
use crate::{cli::Route, network::Network, wasm_server::WasmServer, ws_proxy::WsProxy}; use crate::{cli::Route, network::Network, wasm_server::WasmServer, ws_proxy::WsProxy};
/// Statistics for a benchmark configuration
#[derive(Debug, Clone)]
struct BenchStats {
group: Option<String>,
bandwidth: usize,
latency: usize,
upload_size: usize,
download_size: usize,
times: Vec<u64>,
}
impl BenchStats {
fn median(&self) -> f64 {
let mut sorted = self.times.clone();
sorted.sort();
let len = sorted.len();
if len == 0 {
return 0.0;
}
if len.is_multiple_of(2) {
(sorted[len / 2 - 1] + sorted[len / 2]) as f64 / 2.0
} else {
sorted[len / 2] as f64
}
}
}
/// Print summary table of benchmark results
fn print_bench_summary(stats: &[BenchStats]) {
if stats.is_empty() {
println!("\nNo benchmark results to display (only warmup was run).");
return;
}
println!("\n{}", "=".repeat(80));
println!("TLSNotary Benchmark Results");
println!("{}", "=".repeat(80));
println!();
for stat in stats {
let group_name = stat.group.as_deref().unwrap_or("unnamed");
println!(
"{} ({} Mbps, {}ms latency, {}KB↑ {}KB↓):",
group_name,
stat.bandwidth,
stat.latency,
stat.upload_size / 1024,
stat.download_size / 1024
);
println!(" Median: {:.2}s", stat.median() / 1000.0);
println!();
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, clap::ValueEnum, Default)] #[derive(Debug, Clone, Copy, PartialEq, Eq, clap::ValueEnum, Default)]
pub enum Target { pub enum Target {
#[default] #[default]
@@ -50,14 +105,46 @@ struct Runner {
started: bool, started: bool,
} }
/// Collects display-related environment variables for headed browser mode.
/// Works with both X11 and Wayland by collecting whichever vars are present.
fn collect_display_env_vars() -> Vec<String> {
const DISPLAY_VARS: &[&str] = &[
"DISPLAY", // X11
"XAUTHORITY", // X11 auth
"WAYLAND_DISPLAY", // Wayland
"XDG_RUNTIME_DIR", // Wayland runtime dir
];
DISPLAY_VARS
.iter()
.filter_map(|&var| {
std::env::var(var)
.ok()
.map(|val| format!("{}={}", var, val))
})
.collect()
}
impl Runner { impl Runner {
fn new(cli: &Cli) -> Result<Self> { fn new(cli: &Cli) -> Result<Self> {
let Cli { target, subnet, .. } = cli; let Cli {
target,
subnet,
headed,
..
} = cli;
let current_path = std::env::current_exe().unwrap(); let current_path = std::env::current_exe().unwrap();
let fixture_path = current_path.parent().unwrap().join("server-fixture"); let fixture_path = current_path.parent().unwrap().join("server-fixture");
let network_config = NetworkConfig::new(*subnet); let network_config = NetworkConfig::new(*subnet);
let network = Network::new(network_config.clone())?; let network = Network::new(network_config.clone())?;
// Collect display env vars once if headed mode is enabled
let display_env = if *headed {
collect_display_env_vars()
} else {
Vec::new()
};
let server_fixture = let server_fixture =
ServerFixture::new(fixture_path, network.ns_app().clone(), network_config.app); ServerFixture::new(fixture_path, network.ns_app().clone(), network_config.app);
let wasm_server = WasmServer::new( let wasm_server = WasmServer::new(
@@ -75,6 +162,7 @@ impl Runner {
.network_config(network_config.clone()) .network_config(network_config.clone())
.build(), .build(),
*target, *target,
display_env.clone(),
); );
let exec_v = Executor::new( let exec_v = Executor::new(
network.ns_1().clone(), network.ns_1().clone(),
@@ -84,6 +172,7 @@ impl Runner {
.network_config(network_config.clone()) .network_config(network_config.clone())
.build(), .build(),
Target::Native, Target::Native,
Vec::new(), // Verifier doesn't need display env
); );
Ok(Self { Ok(Self {
@@ -118,6 +207,12 @@ pub async fn main() -> Result<()> {
tracing_subscriber::fmt::init(); tracing_subscriber::fmt::init();
let cli = Cli::parse(); let cli = Cli::parse();
// Validate --headed requires --target browser
if cli.headed && cli.target != Target::Browser {
anyhow::bail!("--headed can only be used with --target browser");
}
let mut runner = Runner::new(&cli)?; let mut runner = Runner::new(&cli)?;
let mut exit_code = 0; let mut exit_code = 0;
@@ -206,6 +301,12 @@ pub async fn main() -> Result<()> {
samples_override, samples_override,
skip_warmup, skip_warmup,
} => { } => {
// Print configuration info
println!("TLSNotary Benchmark Harness");
println!("Running benchmarks from: {}", config.display());
println!("Output will be written to: {}", output.display());
println!();
let items: BenchItems = toml::from_str(&std::fs::read_to_string(config)?)?; let items: BenchItems = toml::from_str(&std::fs::read_to_string(config)?)?;
let output_file = std::fs::File::create(output)?; let output_file = std::fs::File::create(output)?;
let mut writer = WriterBuilder::new().from_writer(output_file); let mut writer = WriterBuilder::new().from_writer(output_file);
@@ -220,7 +321,34 @@ pub async fn main() -> Result<()> {
runner.exec_p.start().await?; runner.exec_p.start().await?;
runner.exec_v.start().await?; runner.exec_v.start().await?;
for config in benches { // Create progress bar
let pb = ProgressBar::new(benches.len() as u64);
pb.set_style(
ProgressStyle::default_bar()
.template("[{elapsed_precise}] {bar:40.cyan/blue} {pos}/{len} {msg}")
.expect("valid template")
.progress_chars("█▓▒░ "),
);
// Collect measurements for stats
let mut measurements_by_config: HashMap<String, Vec<u64>> = HashMap::new();
let warmup_count = if skip_warmup { 0 } else { 3 };
for (idx, config) in benches.iter().enumerate() {
let is_warmup = idx < warmup_count;
let group_name = if is_warmup {
format!("Warmup {}/{}", idx + 1, warmup_count)
} else {
config.group.as_deref().unwrap_or("unnamed").to_string()
};
pb.set_message(format!(
"{} ({} Mbps, {}ms)",
group_name, config.bandwidth, config.protocol_latency
));
runner runner
.network .network
.set_proto_config(config.bandwidth, config.protocol_latency.div_ceil(2))?; .set_proto_config(config.bandwidth, config.protocol_latency.div_ceil(2))?;
@@ -249,11 +377,73 @@ pub async fn main() -> Result<()> {
panic!("expected prover output"); panic!("expected prover output");
}; };
let measurement = Measurement::new(config, metrics); // Collect metrics for stats (skip warmup benches)
if !is_warmup {
let config_key = format!(
"{:?}|{}|{}|{}|{}",
config.group,
config.bandwidth,
config.protocol_latency,
config.upload_size,
config.download_size
);
measurements_by_config
.entry(config_key)
.or_default()
.push(metrics.time_total);
}
let measurement = Measurement::new(config.clone(), metrics);
writer.serialize(measurement)?; writer.serialize(measurement)?;
writer.flush()?; writer.flush()?;
pb.inc(1);
} }
pb.finish_with_message("Benchmarks complete");
// Compute and print statistics
let mut all_stats: Vec<BenchStats> = Vec::new();
for (key, times) in measurements_by_config {
// Parse back the config from the key
let parts: Vec<&str> = key.split('|').collect();
if parts.len() >= 5 {
let group = if parts[0] == "None" {
None
} else {
Some(
parts[0]
.trim_start_matches("Some(\"")
.trim_end_matches("\")")
.to_string(),
)
};
let bandwidth: usize = parts[1].parse().unwrap_or(0);
let latency: usize = parts[2].parse().unwrap_or(0);
let upload_size: usize = parts[3].parse().unwrap_or(0);
let download_size: usize = parts[4].parse().unwrap_or(0);
all_stats.push(BenchStats {
group,
bandwidth,
latency,
upload_size,
download_size,
times,
});
}
}
// Sort stats by group name for consistent output
all_stats.sort_by(|a, b| {
a.group
.cmp(&b.group)
.then(a.latency.cmp(&b.latency))
.then(a.bandwidth.cmp(&b.bandwidth))
});
print_bench_summary(&all_stats);
} }
Command::Serve {} => { Command::Serve {} => {
runner.start_services().await?; runner.start_services().await?;

View File

@@ -5,7 +5,7 @@ description = "TLSNotary MPC-TLS protocol"
keywords = ["tls", "mpc", "2pc"] keywords = ["tls", "mpc", "2pc"]
categories = ["cryptography"] categories = ["cryptography"]
license = "MIT OR Apache-2.0" license = "MIT OR Apache-2.0"
version = "0.1.0-alpha.14-pre" version = "0.1.0-alpha.14"
edition = "2021" edition = "2021"
[lints] [lints]
@@ -60,6 +60,8 @@ mpz-common = { workspace = true, features = ["test-utils"] }
mpz-ot = { workspace = true, features = ["ideal"] } mpz-ot = { workspace = true, features = ["ideal"] }
mpz-ideal-vm = { workspace = true } mpz-ideal-vm = { workspace = true }
tlsn-mux = { workspace = true }
cipher-crate = { package = "cipher", version = "0.4" } cipher-crate = { package = "cipher", version = "0.4" }
generic-array = { workspace = true } generic-array = { workspace = true }
rand_chacha = { workspace = true } rand_chacha = { workspace = true }
@@ -70,6 +72,5 @@ tlsn-tls-client-async = { workspace = true }
tokio = { workspace = true, features = ["macros", "rt", "rt-multi-thread"] } tokio = { workspace = true, features = ["macros", "rt", "rt-multi-thread"] }
tokio-util = { workspace = true, features = ["compat"] } tokio-util = { workspace = true, features = ["compat"] }
tracing-subscriber = { workspace = true } tracing-subscriber = { workspace = true }
uid-mux = { workspace = true, features = ["serio", "test-utils"] }
rustls-pki-types = { workspace = true } rustls-pki-types = { workspace = true }
rustls-webpki = { workspace = true } rustls-webpki = { workspace = true }

View File

@@ -123,8 +123,8 @@ fn build_pair(config: Config) -> (MpcTlsLeader, MpcTlsFollower) {
let (mut mt_a, mut mt_b) = test_mt_context(8); let (mut mt_a, mut mt_b) = test_mt_context(8);
let ctx_a = futures::executor::block_on(mt_a.new_context()).unwrap(); let ctx_a = mt_a.new_context().unwrap();
let ctx_b = futures::executor::block_on(mt_b.new_context()).unwrap(); let ctx_b = mt_b.new_context().unwrap();
let delta_a = Delta::new(Block::random(&mut rng)); let delta_a = Delta::new(Block::random(&mut rng));
let delta_b = Delta::new(Block::random(&mut rng)); let delta_b = Delta::new(Block::random(&mut rng));

View File

@@ -5,7 +5,7 @@ description = "A TLS backend trait for TLSNotary"
keywords = ["tls", "mpc", "2pc"] keywords = ["tls", "mpc", "2pc"]
categories = ["cryptography"] categories = ["cryptography"]
license = "MIT OR Apache-2.0" license = "MIT OR Apache-2.0"
version = "0.1.0-alpha.14-pre" version = "0.1.0-alpha.14"
edition = "2021" edition = "2021"
[lints] [lints]

View File

@@ -5,7 +5,7 @@ description = "An async TLS client for TLSNotary"
keywords = ["tls", "mpc", "2pc", "client", "async"] keywords = ["tls", "mpc", "2pc", "client", "async"]
categories = ["cryptography"] categories = ["cryptography"]
license = "MIT OR Apache-2.0" license = "MIT OR Apache-2.0"
version = "0.1.0-alpha.14-pre" version = "0.1.0-alpha.14"
edition = "2021" edition = "2021"
[lints] [lints]

View File

@@ -5,7 +5,7 @@ description = "A TLS client for TLSNotary"
keywords = ["tls", "mpc", "2pc", "client", "sync"] keywords = ["tls", "mpc", "2pc", "client", "sync"]
categories = ["cryptography"] categories = ["cryptography"]
license = "Apache-2.0 OR ISC OR MIT" license = "Apache-2.0 OR ISC OR MIT"
version = "0.1.0-alpha.14-pre" version = "0.1.0-alpha.14"
edition = "2021" edition = "2021"
autobenches = false autobenches = false

View File

@@ -5,7 +5,7 @@ description = "Cryptographic operations for the TLSNotary TLS client"
keywords = ["tls", "mpc", "2pc"] keywords = ["tls", "mpc", "2pc"]
categories = ["cryptography"] categories = ["cryptography"]
license = "Apache-2.0 OR ISC OR MIT" license = "Apache-2.0 OR ISC OR MIT"
version = "0.1.0-alpha.14-pre" version = "0.1.0-alpha.14"
edition = "2021" edition = "2021"
[lints] [lints]

View File

@@ -4,7 +4,7 @@ authors = ["TLSNotary Team"]
keywords = ["tls", "mpc", "2pc", "prover"] keywords = ["tls", "mpc", "2pc", "prover"]
categories = ["cryptography"] categories = ["cryptography"]
license = "MIT OR Apache-2.0" license = "MIT OR Apache-2.0"
version = "0.1.0-alpha.14-pre" version = "0.1.0-alpha.14"
edition = "2024" edition = "2024"
[lints] [lints]
@@ -20,6 +20,7 @@ web = ["dep:web-spawn"]
tlsn-attestation = { workspace = true } tlsn-attestation = { workspace = true }
tlsn-core = { workspace = true } tlsn-core = { workspace = true }
tlsn-deap = { workspace = true } tlsn-deap = { workspace = true }
tlsn-mux = { workspace = true }
tlsn-tls-client = { workspace = true } tlsn-tls-client = { workspace = true }
tlsn-tls-client-async = { workspace = true } tlsn-tls-client-async = { workspace = true }
tlsn-tls-core = { workspace = true } tlsn-tls-core = { workspace = true }
@@ -27,7 +28,6 @@ tlsn-mpc-tls = { workspace = true }
tlsn-cipher = { workspace = true } tlsn-cipher = { workspace = true }
serio = { workspace = true, features = ["compat"] } serio = { workspace = true, features = ["compat"] }
uid-mux = { workspace = true, features = ["serio"] }
web-spawn = { workspace = true, optional = true } web-spawn = { workspace = true, optional = true }
mpz-circuits = { workspace = true, features = ["aes"] } mpz-circuits = { workspace = true, features = ["aes"] }

View File

@@ -1,21 +0,0 @@
//! Execution context.
use mpz_common::context::Multithread;
use crate::mux::MuxControl;
/// Maximum concurrency for multi-threaded context.
pub(crate) const MAX_CONCURRENCY: usize = 8;
/// Builds a multi-threaded context with the given muxer.
pub(crate) fn build_mt_context(mux: MuxControl) -> Multithread {
let builder = Multithread::builder().mux(mux).concurrency(MAX_CONCURRENCY);
#[cfg(all(feature = "web", target_arch = "wasm32"))]
let builder = builder.spawn_handler(|f| {
let _ = web_spawn::spawn(f);
Ok(())
});
builder.build().unwrap()
}

170
crates/tlsn/src/error.rs Normal file
View File

@@ -0,0 +1,170 @@
use std::fmt::Display;
/// TLSNotary error.
///
/// Errors are categorized by kind:
///
/// - **User** ([`is_user`](Self::is_user)): e.g. rejected by the remote party.
/// - **IO** ([`is_io`](Self::is_io)): network or communication failure.
/// - **Internal** ([`is_internal`](Self::is_internal)): an unknown internal
/// error in the library.
/// - **Config** ([`is_config`](Self::is_config)): invalid configuration
/// provided by the user.
///
/// The [`msg`](Self::msg) method returns additional context if available, such
/// as a rejection message provided by a verifier.
#[derive(Debug, thiserror::Error)]
pub struct Error {
kind: ErrorKind,
msg: Option<String>,
source: Option<Box<dyn std::error::Error + Send + Sync>>,
}
impl Error {
pub(crate) fn io() -> Self {
Self {
kind: ErrorKind::Io,
msg: None,
source: None,
}
}
pub(crate) fn internal() -> Self {
Self {
kind: ErrorKind::Internal,
msg: None,
source: None,
}
}
pub(crate) fn user() -> Self {
Self {
kind: ErrorKind::User,
msg: None,
source: None,
}
}
pub(crate) fn config() -> Self {
Self {
kind: ErrorKind::Config,
msg: None,
source: None,
}
}
pub(crate) fn with_msg(mut self, msg: impl Into<String>) -> Self {
self.msg = Some(msg.into());
self
}
pub(crate) fn with_source<T>(mut self, source: T) -> Self
where
T: Into<Box<dyn std::error::Error + Send + Sync>>,
{
self.source = Some(source.into());
self
}
/// Returns `true` if the error was user created.
pub fn is_user(&self) -> bool {
self.kind.is_user()
}
/// Returns `true` if the error originated from an IO error.
pub fn is_io(&self) -> bool {
self.kind.is_io()
}
/// Returns `true` if the error originated from an internal bug.
pub fn is_internal(&self) -> bool {
self.kind.is_internal()
}
/// Returns `true` if the error originated from invalid configuration.
pub fn is_config(&self) -> bool {
self.kind.is_config()
}
/// Returns the error message if available.
pub fn msg(&self) -> Option<&str> {
self.msg.as_deref()
}
}
impl Display for Error {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self.kind {
ErrorKind::User => write!(f, "user error")?,
ErrorKind::Io => write!(f, "io error")?,
ErrorKind::Internal => write!(f, "internal error")?,
ErrorKind::Config => write!(f, "config error")?,
}
if let Some(msg) = &self.msg {
write!(f, ": {msg}")?;
}
if let Some(source) = &self.source {
write!(f, " caused by: {source}")?;
}
Ok(())
}
}
impl From<std::io::Error> for Error {
fn from(e: std::io::Error) -> Self {
Self::io().with_source(e)
}
}
impl From<mpz_common::ContextError> for Error {
fn from(e: mpz_common::ContextError) -> Self {
Self::internal().with_msg("context error").with_source(e)
}
}
impl From<mpc_tls::MpcTlsError> for Error {
fn from(e: mpc_tls::MpcTlsError) -> Self {
Self::internal().with_msg("mpc-tls error").with_source(e)
}
}
impl From<tls_client_async::ConnectionError> for Error {
fn from(e: tls_client_async::ConnectionError) -> Self {
Self::io().with_msg("tls connection error").with_source(e)
}
}
impl From<tlsn_mux::ConnectionError> for Error {
fn from(e: tlsn_mux::ConnectionError) -> Self {
Self::io().with_msg("mux connection error").with_source(e)
}
}
#[derive(Debug)]
enum ErrorKind {
User,
Io,
Internal,
Config,
}
impl ErrorKind {
fn is_user(&self) -> bool {
matches!(self, ErrorKind::User)
}
fn is_io(&self) -> bool {
matches!(self, ErrorKind::Io)
}
fn is_internal(&self) -> bool {
matches!(self, ErrorKind::Internal)
}
fn is_config(&self) -> bool {
matches!(self, ErrorKind::Config)
}
}

View File

@@ -1,23 +1,66 @@
//! TLSNotary library. //! TLSNotary protocol implementation.
//!
//! This crate provides the core protocol for generating and verifying proofs
//! of TLS sessions. A prover can demonstrate to a verifier that specific data
//! was exchanged with a TLS server, without revealing the full transcript.
//!
//! # Overview
//!
//! The protocol involves two parties:
//!
//! - **Prover** ([`Prover`](prover::Prover)): connects to a TLS server and
//! generates proofs about the session.
//! - **Verifier** ([`Verifier`](verifier::Verifier)): collaborates with the
//! prover during the TLS session and verifies the resulting proofs.
//!
//! Both parties communicate through an established [`Session`].
//!
//! # Workflow
//!
//! The protocol has two main phases:
//!
//! **Commitment**: The prover and verifier collaborate to construct a TLS
//! transcript commitment from the prover's communication with a TLS server.
//! This authenticates the transcript for the verifier, without the verifier
//! learning the contents.
//!
//! **Selective Disclosure**: The prover selectively reveals portions of the
//! committed transcript to the verifier, proving statements about the data
//! exchanged with the server.
//!
//! ## Steps
//!
//! 1. Establish a communication channel between prover and verifier.
//! 2. Create a [`Session`] on each side from the channel.
//! 3. Create a [`Prover`](prover::Prover) or [`Verifier`](verifier::Verifier).
//! 4. Run the commitment phase: the prover connects to the TLS server and
//! exchanges data to obtain a commitment to the TLS transcript.
//! 5. (Optional) Perform selective disclosure: the prover provably reveals
//! selected data to the verifier.
#![deny(missing_docs, unreachable_pub, unused_must_use)] #![deny(missing_docs, unreachable_pub, unused_must_use)]
#![deny(clippy::all)] #![deny(clippy::all)]
#![forbid(unsafe_code)] #![forbid(unsafe_code)]
pub(crate) mod context; mod error;
pub(crate) mod ghash; pub(crate) mod ghash;
pub(crate) mod map; pub(crate) mod map;
pub(crate) mod mpz; pub(crate) mod mpz;
pub(crate) mod msg; pub(crate) mod msg;
pub(crate) mod mux;
pub mod prover; pub mod prover;
mod session;
pub(crate) mod tag; pub(crate) mod tag;
pub(crate) mod transcript_internal; pub(crate) mod transcript_internal;
pub mod verifier; pub mod verifier;
pub use error::Error;
pub use session::{Session, SessionDriver, SessionHandle};
pub use tlsn_attestation as attestation; pub use tlsn_attestation as attestation;
pub use tlsn_core::{config, connection, hash, transcript, webpki}; pub use tlsn_core::{config, connection, hash, transcript, webpki};
/// Result type.
pub type Result<T, E = Error> = core::result::Result<T, E>;
use std::sync::LazyLock; use std::sync::LazyLock;
use semver::Version; use semver::Version;

View File

@@ -21,20 +21,6 @@ impl<T> RangeMap<T>
where where
T: Item, T: Item,
{ {
pub(crate) fn new(map: Vec<(usize, T)>) -> Self {
let mut pos = 0;
for (idx, item) in &map {
assert!(
*idx >= pos,
"items must be sorted by index and non-overlapping"
);
pos = *idx + item.length();
}
Self { map }
}
/// Returns `true` if the map is empty. /// Returns `true` if the map is empty.
pub(crate) fn is_empty(&self) -> bool { pub(crate) fn is_empty(&self) -> bool {
self.map.is_empty() self.map.is_empty()
@@ -47,11 +33,6 @@ where
.map(|(idx, item)| *idx..*idx + item.length()) .map(|(idx, item)| *idx..*idx + item.length())
} }
/// Returns the length of the map.
pub(crate) fn len(&self) -> usize {
self.map.iter().map(|(_, item)| item.length()).sum()
}
pub(crate) fn iter(&self) -> impl Iterator<Item = (Range<usize>, &T)> { pub(crate) fn iter(&self) -> impl Iterator<Item = (Range<usize>, &T)> {
self.map self.map
.iter() .iter()

View File

@@ -6,11 +6,6 @@ use mpz_core::Block;
#[cfg(not(tlsn_insecure))] #[cfg(not(tlsn_insecure))]
use mpz_garble::protocol::semihonest::{Evaluator, Garbler}; use mpz_garble::protocol::semihonest::{Evaluator, Garbler};
use mpz_garble_core::Delta; use mpz_garble_core::Delta;
use mpz_memory_core::{
Vector,
binary::U8,
correlated::{Key, Mac},
};
#[cfg(not(tlsn_insecure))] #[cfg(not(tlsn_insecure))]
use mpz_ot::cot::{DerandCOTReceiver, DerandCOTSender}; use mpz_ot::cot::{DerandCOTReceiver, DerandCOTSender};
use mpz_ot::{ use mpz_ot::{
@@ -24,8 +19,6 @@ use tlsn_core::config::tls_commit::mpc::{MpcTlsConfig, NetworkSetting};
use tlsn_deap::Deap; use tlsn_deap::Deap;
use tokio::sync::Mutex; use tokio::sync::Mutex;
use crate::transcript_internal::commit::encoding::{KeyStore, MacStore};
#[cfg(not(tlsn_insecure))] #[cfg(not(tlsn_insecure))]
pub(crate) type ProverMpc = pub(crate) type ProverMpc =
Garbler<DerandCOTSender<SharedRCOTSender<kos::Sender<co::Receiver>, Block>>>; Garbler<DerandCOTSender<SharedRCOTSender<kos::Sender<co::Receiver>, Block>>>;
@@ -193,41 +186,3 @@ pub(crate) fn translate_keys<Mpc, Zk>(keys: &mut SessionKeys, vm: &Deap<Mpc, Zk>
.translate(keys.server_write_mac_key) .translate(keys.server_write_mac_key)
.expect("VM memory should be consistent"); .expect("VM memory should be consistent");
} }
impl<T> KeyStore for Verifier<T> {
fn delta(&self) -> &Delta {
self.delta()
}
fn get_keys(&self, data: Vector<U8>) -> Option<&[Key]> {
self.get_keys(data).ok()
}
}
impl<T> MacStore for Prover<T> {
fn get_macs(&self, data: Vector<U8>) -> Option<&[Mac]> {
self.get_macs(data).ok()
}
}
#[cfg(tlsn_insecure)]
mod insecure {
use super::*;
use mpz_ideal_vm::IdealVm;
impl KeyStore for IdealVm {
fn delta(&self) -> &Delta {
unimplemented!("encodings not supported in insecure mode")
}
fn get_keys(&self, _data: Vector<U8>) -> Option<&[Key]> {
unimplemented!("encodings not supported in insecure mode")
}
}
impl MacStore for IdealVm {
fn get_macs(&self, _data: Vector<U8>) -> Option<&[Mac]> {
unimplemented!("encodings not supported in insecure mode")
}
}
}

View File

@@ -1,3 +1,5 @@
use std::fmt;
use semver::Version; use semver::Version;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
@@ -40,12 +42,14 @@ impl Response {
#[derive(Debug, Serialize, Deserialize)] #[derive(Debug, Serialize, Deserialize)]
pub(crate) struct RejectionReason(Option<String>); pub(crate) struct RejectionReason(Option<String>);
impl From<RejectionReason> for crate::prover::ProverError { impl fmt::Display for RejectionReason {
fn from(value: RejectionReason) -> Self { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if let Some(msg) = value.0 { if let Some(msg) = &self.0 {
crate::prover::ProverError::config(format!("verifier rejected with reason: {msg}")) write!(f, "{msg}")
} else { } else {
crate::prover::ProverError::config("verifier rejected without providing a reason") write!(f, "no reason provided")
} }
} }
} }
impl std::error::Error for RejectionReason {}

View File

@@ -1,90 +0,0 @@
//! Multiplexer used in the TLSNotary protocol.
use std::future::IntoFuture;
use futures::{
AsyncRead, AsyncWrite, Future,
future::{FusedFuture, FutureExt},
};
use tracing::error;
use uid_mux::yamux;
use crate::Role;
/// Multiplexer supporting unique deterministic stream IDs.
pub(crate) type Mux<Io> = yamux::Yamux<Io>;
/// Multiplexer controller providing streams.
pub(crate) type MuxControl = yamux::YamuxCtrl;
/// Multiplexer future which must be polled for the muxer to make progress.
pub(crate) struct MuxFuture(
Box<dyn FusedFuture<Output = Result<(), yamux::ConnectionError>> + Send + Unpin>,
);
impl MuxFuture {
/// Returns true if the muxer is complete.
pub(crate) fn is_complete(&self) -> bool {
self.0.is_terminated()
}
/// Awaits a future, polling the muxer future concurrently.
pub(crate) async fn poll_with<F, R>(&mut self, fut: F) -> R
where
F: Future<Output = R>,
{
let mut fut = Box::pin(fut.fuse());
// Poll the future concurrently with the muxer future.
// If the muxer returns an error, continue polling the future
// until it completes.
loop {
futures::select! {
res = fut => return res,
res = &mut self.0 => if let Err(e) = res {
error!("mux error: {:?}", e);
},
}
}
}
}
impl Future for MuxFuture {
type Output = Result<(), yamux::ConnectionError>;
fn poll(
mut self: std::pin::Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
) -> std::task::Poll<Self::Output> {
self.0.as_mut().poll_unpin(cx)
}
}
/// Attaches a multiplexer to the provided socket.
///
/// Returns the multiplexer and a controller for creating streams with a codec
/// attached.
///
/// # Arguments
///
/// * `socket` - The socket to attach the multiplexer to.
/// * `role` - The role of the party using the multiplexer.
pub(crate) fn attach_mux<T: AsyncWrite + AsyncRead + Send + Unpin + 'static>(
socket: T,
role: Role,
) -> (MuxFuture, MuxControl) {
let mut mux_config = yamux::Config::default();
mux_config.set_max_num_streams(36);
let mux_role = match role {
Role::Prover => yamux::Mode::Client,
Role::Verifier => yamux::Mode::Server,
};
let mux = Mux::new(socket, mux_config, mux_role);
let ctrl = mux.control();
if let Role::Prover = role {
ctrl.alloc(32);
}
(MuxFuture(Box::new(mux.into_future().fuse())), ctrl)
}

View File

@@ -1,20 +1,17 @@
//! Prover. //! Prover.
mod error;
mod future; mod future;
mod prove; mod prove;
pub mod state; pub mod state;
pub use error::ProverError;
pub use future::ProverFuture; pub use future::ProverFuture;
use mpz_common::Context;
pub use tlsn_core::ProverOutput; pub use tlsn_core::ProverOutput;
use crate::{ use crate::{
Role, Error, Result,
context::build_mt_context,
mpz::{ProverDeps, build_prover_deps, translate_keys}, mpz::{ProverDeps, build_prover_deps, translate_keys},
msg::{ProveRequestMsg, Response, TlsCommitRequestMsg}, msg::{ProveRequestMsg, Response, TlsCommitRequestMsg},
mux::attach_mux,
tag::verify_tags, tag::verify_tags,
}; };
@@ -45,6 +42,7 @@ use tracing::{Instrument, Span, debug, info, info_span, instrument};
pub struct Prover<T: state::ProverState = state::Initialized> { pub struct Prover<T: state::ProverState = state::Initialized> {
config: ProverConfig, config: ProverConfig,
span: Span, span: Span,
ctx: Option<Context>,
state: T, state: T,
} }
@@ -53,12 +51,14 @@ impl Prover<state::Initialized> {
/// ///
/// # Arguments /// # Arguments
/// ///
/// * `ctx` - A thread context.
/// * `config` - The configuration for the prover. /// * `config` - The configuration for the prover.
pub fn new(config: ProverConfig) -> Self { pub(crate) fn new(ctx: Context, config: ProverConfig) -> Self {
let span = info_span!("prover"); let span = info_span!("prover");
Self { Self {
config, config,
span, span,
ctx: Some(ctx),
state: state::Initialized, state: state::Initialized,
} }
} }
@@ -71,34 +71,43 @@ impl Prover<state::Initialized> {
/// # Arguments /// # Arguments
/// ///
/// * `config` - The TLS commitment configuration. /// * `config` - The TLS commitment configuration.
/// * `socket` - The socket to the TLS verifier.
#[instrument(parent = &self.span, level = "debug", skip_all, err)] #[instrument(parent = &self.span, level = "debug", skip_all, err)]
pub async fn commit<S: AsyncWrite + AsyncRead + Send + Unpin + 'static>( pub async fn commit(
self, mut self,
config: TlsCommitConfig, config: TlsCommitConfig,
socket: S, ) -> Result<Prover<state::CommitAccepted>> {
) -> Result<Prover<state::CommitAccepted>, ProverError> { let mut ctx = self
let (mut mux_fut, mux_ctrl) = attach_mux(socket, Role::Prover); .ctx
let mut mt = build_mt_context(mux_ctrl.clone()); .take()
let mut ctx = mux_fut.poll_with(mt.new_context()).await?; .ok_or_else(|| Error::internal().with_msg("commitment protocol context was dropped"))?;
// Sends protocol configuration to verifier for compatibility check. // Sends protocol configuration to verifier for compatibility check.
mux_fut
.poll_with(async {
ctx.io_mut() ctx.io_mut()
.send(TlsCommitRequestMsg { .send(TlsCommitRequestMsg {
request: config.to_request(), request: config.to_request(),
version: crate::VERSION.clone(), version: crate::VERSION.clone(),
}) })
.await?; .await
.map_err(|e| {
Error::io()
.with_msg("commitment protocol failed to send request")
.with_source(e)
})?;
ctx.io_mut() ctx.io_mut()
.expect_next::<Response>() .expect_next::<Response>()
.await? .await
.map_err(|e| {
Error::io()
.with_msg("commitment protocol failed to receive response")
.with_source(e)
})?
.result .result
.map_err(ProverError::from) .map_err(|e| {
}) Error::user()
.await?; .with_msg("commitment protocol rejected by verifier")
.with_source(e)
})?;
let TlsCommitProtocolConfig::Mpc(mpc_tls_config) = config.protocol().clone() else { let TlsCommitProtocolConfig::Mpc(mpc_tls_config) = config.protocol().clone() else {
unreachable!("only MPC TLS is supported"); unreachable!("only MPC TLS is supported");
@@ -107,27 +116,30 @@ impl Prover<state::Initialized> {
let ProverDeps { vm, mut mpc_tls } = build_prover_deps(mpc_tls_config, ctx); let ProverDeps { vm, mut mpc_tls } = build_prover_deps(mpc_tls_config, ctx);
// Allocate resources for MPC-TLS in the VM. // Allocate resources for MPC-TLS in the VM.
let mut keys = mpc_tls.alloc()?; let mut keys = mpc_tls.alloc().map_err(|e| {
Error::internal()
.with_msg("commitment protocol failed to allocate mpc-tls resources")
.with_source(e)
})?;
let vm_lock = vm.try_lock().expect("VM is not locked"); let vm_lock = vm.try_lock().expect("VM is not locked");
translate_keys(&mut keys, &vm_lock); translate_keys(&mut keys, &vm_lock);
drop(vm_lock); drop(vm_lock);
debug!("setting up mpc-tls"); debug!("setting up mpc-tls");
mux_fut.poll_with(mpc_tls.preprocess()).await?; mpc_tls.preprocess().await.map_err(|e| {
Error::internal()
.with_msg("commitment protocol failed during mpc-tls preprocessing")
.with_source(e)
})?;
debug!("mpc-tls setup complete"); debug!("mpc-tls setup complete");
Ok(Prover { Ok(Prover {
config: self.config, config: self.config,
span: self.span, span: self.span,
state: state::CommitAccepted { ctx: None,
mux_ctrl, state: state::CommitAccepted { mpc_tls, keys, vm },
mux_fut,
mpc_tls,
keys,
vm,
},
}) })
} }
} }
@@ -148,14 +160,9 @@ impl Prover<state::CommitAccepted> {
self, self,
config: TlsClientConfig, config: TlsClientConfig,
socket: S, socket: S,
) -> Result<(TlsConnection, ProverFuture), ProverError> { ) -> Result<(TlsConnection, ProverFuture)> {
let state::CommitAccepted { let state::CommitAccepted {
mux_ctrl, mpc_tls, keys, vm, ..
mut mux_fut,
mpc_tls,
keys,
vm,
..
} = self.state; } = self.state;
let (mpc_ctrl, mpc_fut) = mpc_tls.run(); let (mpc_ctrl, mpc_fut) = mpc_tls.run();
@@ -173,7 +180,11 @@ impl Prover<state::CommitAccepted> {
let der = CertificateDer::from_slice(&cert.0); let der = CertificateDer::from_slice(&cert.0);
anchor_from_trusted_cert(&der) anchor_from_trusted_cert(&der)
.map(|anchor| anchor.to_owned()) .map(|anchor| anchor.to_owned())
.map_err(ProverError::config) .map_err(|e| {
Error::config()
.with_msg("failed to parse root certificate")
.with_source(e)
})
}) })
.collect::<Result<Vec<_>, _>>()?, .collect::<Result<Vec<_>, _>>()?,
}; };
@@ -190,7 +201,11 @@ impl Prover<state::CommitAccepted> {
.collect(), .collect(),
tls_client::PrivateKey(key.0.clone()), tls_client::PrivateKey(key.0.clone()),
) )
.map_err(ProverError::config)? .map_err(|e| {
Error::config()
.with_msg("failed to configure client authentication")
.with_source(e)
})?
} else { } else {
rustls_config.with_no_client_auth() rustls_config.with_no_client_auth()
}; };
@@ -200,7 +215,11 @@ impl Prover<state::CommitAccepted> {
Box::new(mpc_ctrl.clone()), Box::new(mpc_ctrl.clone()),
server_name, server_name,
) )
.map_err(ProverError::config)?; .map_err(|e| {
Error::config()
.with_msg("failed to create tls client connection")
.with_source(e)
})?;
let (conn, conn_fut) = bind_client(socket, client); let (conn, conn_fut) = bind_client(socket, client);
@@ -209,20 +228,27 @@ impl Prover<state::CommitAccepted> {
let mpc_ctrl = mpc_ctrl.clone(); let mpc_ctrl = mpc_ctrl.clone();
async move { async move {
let conn_fut = async { let conn_fut = async {
mux_fut conn_fut.await.map_err(|e| {
.poll_with(conn_fut.map_err(ProverError::from)) Error::io().with_msg("tls connection failed").with_source(e)
.await?; })?;
mpc_ctrl.stop().await.map_err(|e| {
Error::internal()
.with_msg("mpc-tls failed to stop")
.with_source(e)
})?;
mpc_ctrl.stop().await?; Ok::<_, crate::Error>(())
Ok::<_, ProverError>(())
}; };
info!("starting MPC-TLS"); info!("starting MPC-TLS");
let (_, (mut ctx, tls_transcript)) = futures::try_join!( let (_, (mut ctx, tls_transcript)) = futures::try_join!(
conn_fut, conn_fut,
mpc_fut.in_current_span().map_err(ProverError::from) mpc_fut.in_current_span().map_err(|e| {
Error::internal()
.with_msg("mpc-tls execution failed")
.with_source(e)
})
)?; )?;
info!("finished MPC-TLS"); info!("finished MPC-TLS");
@@ -233,10 +259,11 @@ impl Prover<state::CommitAccepted> {
debug!("finalizing mpc"); debug!("finalizing mpc");
// Finalize DEAP. // Finalize DEAP.
mux_fut vm.finalize(&mut ctx).await.map_err(|e| {
.poll_with(vm.finalize(&mut ctx)) Error::internal()
.await .with_msg("mpc finalization failed")
.map_err(ProverError::mpc)?; .with_source(e)
})?;
debug!("mpc finalized"); debug!("mpc finalized");
} }
@@ -256,11 +283,17 @@ impl Prover<state::CommitAccepted> {
*tls_transcript.version(), *tls_transcript.version(),
tls_transcript.recv().to_vec(), tls_transcript.recv().to_vec(),
) )
.map_err(ProverError::zk)?; .map_err(|e| {
Error::internal()
.with_msg("tag verification setup failed")
.with_source(e)
})?;
mux_fut vm.execute_all(&mut ctx).await.map_err(|e| {
.poll_with(vm.execute_all(&mut ctx).map_err(ProverError::zk)) Error::internal()
.await?; .with_msg("executing the zkVM failed during tag verification")
.with_source(e)
})?;
let transcript = tls_transcript let transcript = tls_transcript
.to_transcript() .to_transcript()
@@ -269,10 +302,8 @@ impl Prover<state::CommitAccepted> {
Ok(Prover { Ok(Prover {
config: self.config, config: self.config,
span: self.span, span: self.span,
ctx: Some(ctx),
state: state::Committed { state: state::Committed {
mux_ctrl,
mux_fut,
ctx,
vm, vm,
server_name: config.server_name().clone(), server_name: config.server_name().clone(),
keys, keys,
@@ -311,10 +342,12 @@ impl Prover<state::Committed> {
/// ///
/// * `config` - The disclosure configuration. /// * `config` - The disclosure configuration.
#[instrument(parent = &self.span, level = "info", skip_all, err)] #[instrument(parent = &self.span, level = "info", skip_all, err)]
pub async fn prove(&mut self, config: &ProveConfig) -> Result<ProverOutput, ProverError> { pub async fn prove(&mut self, config: &ProveConfig) -> Result<ProverOutput> {
let ctx = self
.ctx
.as_mut()
.ok_or_else(|| Error::internal().with_msg("proving context was dropped"))?;
let state::Committed { let state::Committed {
mux_fut,
ctx,
vm, vm,
keys, keys,
server_name, server_name,
@@ -350,32 +383,34 @@ impl Prover<state::Committed> {
transcript: partial_transcript, transcript: partial_transcript,
}; };
let output = mux_fut ctx.io_mut().send(msg).await.map_err(|e| {
.poll_with(async { Error::io()
ctx.io_mut().send(msg).await.map_err(ProverError::from)?; .with_msg("failed to send prove configuration")
.with_source(e)
})?;
ctx.io_mut()
.expect_next::<Response>()
.await
.map_err(|e| {
Error::io()
.with_msg("failed to receive prove response from verifier")
.with_source(e)
})?
.result
.map_err(|e| {
Error::user()
.with_msg("proving rejected by verifier")
.with_source(e)
})?;
ctx.io_mut().expect_next::<Response>().await?.result?; let output = prove::prove(ctx, vm, keys, transcript, tls_transcript, config).await?;
prove::prove(ctx, vm, keys, transcript, tls_transcript, config).await
})
.await?;
Ok(output) Ok(output)
} }
/// Closes the connection with the verifier. /// Closes the connection with the verifier.
#[instrument(parent = &self.span, level = "info", skip_all, err)] #[instrument(parent = &self.span, level = "info", skip_all, err)]
pub async fn close(self) -> Result<(), ProverError> { pub async fn close(self) -> Result<()> {
let state::Committed {
mux_ctrl, mux_fut, ..
} = self.state;
// Wait for the verifier to correctly close the connection.
if !mux_fut.is_complete() {
mux_ctrl.close();
mux_fut.await?;
}
Ok(()) Ok(())
} }
} }
@@ -398,10 +433,11 @@ impl ProverControl {
/// * The prover may need to close the connection to the server in order for /// * The prover may need to close the connection to the server in order for
/// it to close the connection on its end. If neither the prover or server /// it to close the connection on its end. If neither the prover or server
/// close the connection this will cause a deadlock. /// close the connection this will cause a deadlock.
pub async fn defer_decryption(&self) -> Result<(), ProverError> { pub async fn defer_decryption(&self) -> Result<()> {
self.mpc_ctrl self.mpc_ctrl.defer_decryption().await.map_err(|e| {
.defer_decryption() Error::internal()
.await .with_msg("failed to defer decryption")
.map_err(ProverError::from) .with_source(e)
})
} }
} }

View File

@@ -1,117 +0,0 @@
use std::{error::Error, fmt};
use mpc_tls::MpcTlsError;
use crate::transcript_internal::commit::encoding::EncodingError;
/// Error for [`Prover`](crate::prover::Prover).
#[derive(Debug, thiserror::Error)]
pub struct ProverError {
kind: ErrorKind,
source: Option<Box<dyn Error + Send + Sync + 'static>>,
}
impl ProverError {
fn new<E>(kind: ErrorKind, source: E) -> Self
where
E: Into<Box<dyn Error + Send + Sync + 'static>>,
{
Self {
kind,
source: Some(source.into()),
}
}
pub(crate) fn config<E>(source: E) -> Self
where
E: Into<Box<dyn Error + Send + Sync + 'static>>,
{
Self::new(ErrorKind::Config, source)
}
pub(crate) fn mpc<E>(source: E) -> Self
where
E: Into<Box<dyn Error + Send + Sync + 'static>>,
{
Self::new(ErrorKind::Mpc, source)
}
pub(crate) fn zk<E>(source: E) -> Self
where
E: Into<Box<dyn Error + Send + Sync + 'static>>,
{
Self::new(ErrorKind::Zk, source)
}
pub(crate) fn commit<E>(source: E) -> Self
where
E: Into<Box<dyn Error + Send + Sync + 'static>>,
{
Self::new(ErrorKind::Commit, source)
}
}
#[derive(Debug)]
enum ErrorKind {
Io,
Mpc,
Zk,
Config,
Commit,
}
impl fmt::Display for ProverError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("prover error: ")?;
match self.kind {
ErrorKind::Io => f.write_str("io error")?,
ErrorKind::Mpc => f.write_str("mpc error")?,
ErrorKind::Zk => f.write_str("zk error")?,
ErrorKind::Config => f.write_str("config error")?,
ErrorKind::Commit => f.write_str("commit error")?,
}
if let Some(source) = &self.source {
write!(f, " caused by: {source}")?;
}
Ok(())
}
}
impl From<std::io::Error> for ProverError {
fn from(e: std::io::Error) -> Self {
Self::new(ErrorKind::Io, e)
}
}
impl From<tls_client_async::ConnectionError> for ProverError {
fn from(e: tls_client_async::ConnectionError) -> Self {
Self::new(ErrorKind::Io, e)
}
}
impl From<uid_mux::yamux::ConnectionError> for ProverError {
fn from(e: uid_mux::yamux::ConnectionError) -> Self {
Self::new(ErrorKind::Io, e)
}
}
impl From<mpz_common::ContextError> for ProverError {
fn from(e: mpz_common::ContextError) -> Self {
Self::new(ErrorKind::Mpc, e)
}
}
impl From<MpcTlsError> for ProverError {
fn from(e: MpcTlsError) -> Self {
Self::new(ErrorKind::Mpc, e)
}
}
impl From<EncodingError> for ProverError {
fn from(e: EncodingError) -> Self {
Self::new(ErrorKind::Commit, e)
}
}

View File

@@ -1,15 +1,17 @@
//! This module collects futures which are used by the [Prover]. //! This module collects futures which are used by the [Prover].
use super::{Prover, ProverControl, ProverError, state};
use futures::Future;
use std::pin::Pin; use std::pin::Pin;
use futures::Future;
use super::{Prover, ProverControl, state};
use crate::Result;
/// Prover future which must be polled for the TLS connection to make progress. /// Prover future which must be polled for the TLS connection to make progress.
pub struct ProverFuture { pub struct ProverFuture {
#[allow(clippy::type_complexity)] #[allow(clippy::type_complexity)]
pub(crate) fut: Pin< pub(crate) fut:
Box<dyn Future<Output = Result<Prover<state::Committed>, ProverError>> + Send + 'static>, Pin<Box<dyn Future<Output = Result<Prover<state::Committed>>> + Send + 'static>>,
>,
pub(crate) ctrl: ProverControl, pub(crate) ctrl: ProverControl,
} }
@@ -21,7 +23,7 @@ impl ProverFuture {
} }
impl Future for ProverFuture { impl Future for ProverFuture {
type Output = Result<Prover<state::Committed>, ProverError>; type Output = Result<Prover<state::Committed>>;
fn poll( fn poll(
mut self: Pin<&mut Self>, mut self: Pin<&mut Self>,

View File

@@ -12,25 +12,18 @@ use tlsn_core::{
}; };
use crate::{ use crate::{
prover::ProverError, Error, Result,
transcript_internal::{ transcript_internal::{TranscriptRefs, auth::prove_plaintext, commit::hash::prove_hash},
TranscriptRefs,
auth::prove_plaintext,
commit::{
encoding::{self, MacStore},
hash::prove_hash,
},
},
}; };
pub(crate) async fn prove<T: Vm<Binary> + MacStore + Send + Sync>( pub(crate) async fn prove<T: Vm<Binary> + Send + Sync>(
ctx: &mut Context, ctx: &mut Context,
vm: &mut T, vm: &mut T,
keys: &SessionKeys, keys: &SessionKeys,
transcript: &Transcript, transcript: &Transcript,
tls_transcript: &TlsTranscript, tls_transcript: &TlsTranscript,
config: &ProveConfig, config: &ProveConfig,
) -> Result<ProverOutput, ProverError> { ) -> Result<ProverOutput> {
let mut output = ProverOutput { let mut output = ProverOutput {
transcript_commitments: Vec::default(), transcript_commitments: Vec::default(),
transcript_secrets: Vec::default(), transcript_secrets: Vec::default(),
@@ -45,13 +38,6 @@ pub(crate) async fn prove<T: Vm<Binary> + MacStore + Send + Sync>(
Direction::Sent => commit_sent.union_mut(idx), Direction::Sent => commit_sent.union_mut(idx),
Direction::Received => commit_recv.union_mut(idx), Direction::Received => commit_recv.union_mut(idx),
}); });
commit_config
.iter_encoding()
.for_each(|(direction, idx)| match direction {
Direction::Sent => commit_sent.union_mut(idx),
Direction::Received => commit_recv.union_mut(idx),
});
} }
let transcript_refs = TranscriptRefs { let transcript_refs = TranscriptRefs {
@@ -67,7 +53,11 @@ pub(crate) async fn prove<T: Vm<Binary> + MacStore + Send + Sync>(
&reveal_sent, &reveal_sent,
&commit_sent, &commit_sent,
) )
.map_err(ProverError::commit)?, .map_err(|e| {
Error::internal()
.with_msg("proving failed during sent plaintext commitment")
.with_source(e)
})?,
recv: prove_plaintext( recv: prove_plaintext(
vm, vm,
keys.server_write_key, keys.server_write_key,
@@ -80,7 +70,11 @@ pub(crate) async fn prove<T: Vm<Binary> + MacStore + Send + Sync>(
&reveal_recv, &reveal_recv,
&commit_recv, &commit_recv,
) )
.map_err(ProverError::commit)?, .map_err(|e| {
Error::internal()
.with_msg("proving failed during received plaintext commitment")
.with_source(e)
})?,
}; };
let hash_commitments = if let Some(commit_config) = config.transcript_commit() let hash_commitments = if let Some(commit_config) = config.transcript_commit()
@@ -94,55 +88,28 @@ pub(crate) async fn prove<T: Vm<Binary> + MacStore + Send + Sync>(
.iter_hash() .iter_hash()
.map(|((dir, idx), alg)| (*dir, idx.clone(), *alg)), .map(|((dir, idx), alg)| (*dir, idx.clone(), *alg)),
) )
.map_err(ProverError::commit)?, .map_err(|e| {
Error::internal()
.with_msg("proving failed during hash commitment setup")
.with_source(e)
})?,
) )
} else { } else {
None None
}; };
vm.execute_all(ctx).await.map_err(ProverError::zk)?; vm.execute_all(ctx).await.map_err(|e| {
Error::internal()
if let Some(commit_config) = config.transcript_commit() .with_msg("proving failed during zk execution")
&& commit_config.has_encoding() .with_source(e)
{ })?;
let mut sent_ranges = RangeSet::default();
let mut recv_ranges = RangeSet::default();
for (dir, idx) in commit_config.iter_encoding() {
match dir {
Direction::Sent => sent_ranges.union_mut(idx),
Direction::Received => recv_ranges.union_mut(idx),
}
}
let sent_map = transcript_refs
.sent
.index(&sent_ranges)
.expect("indices are valid");
let recv_map = transcript_refs
.recv
.index(&recv_ranges)
.expect("indices are valid");
let (commitment, tree) = encoding::receive(
ctx,
vm,
*commit_config.encoding_hash_alg(),
&sent_map,
&recv_map,
commit_config.iter_encoding(),
)
.await?;
output
.transcript_commitments
.push(TranscriptCommitment::Encoding(commitment));
output
.transcript_secrets
.push(TranscriptSecret::Encoding(tree));
}
if let Some((hash_fut, hash_secrets)) = hash_commitments { if let Some((hash_fut, hash_secrets)) = hash_commitments {
let hash_commitments = hash_fut.try_recv().map_err(ProverError::commit)?; let hash_commitments = hash_fut.try_recv().map_err(|e| {
Error::internal()
.with_msg("proving failed during hash commitment finalization")
.with_source(e)
})?;
for (commitment, secret) in hash_commitments.into_iter().zip(hash_secrets) { for (commitment, secret) in hash_commitments.into_iter().zip(hash_secrets) {
output output
.transcript_commitments .transcript_commitments

View File

@@ -3,7 +3,6 @@
use std::sync::Arc; use std::sync::Arc;
use mpc_tls::{MpcTlsLeader, SessionKeys}; use mpc_tls::{MpcTlsLeader, SessionKeys};
use mpz_common::Context;
use tlsn_core::{ use tlsn_core::{
connection::ServerName, connection::ServerName,
transcript::{TlsTranscript, Transcript}, transcript::{TlsTranscript, Transcript},
@@ -11,10 +10,7 @@ use tlsn_core::{
use tlsn_deap::Deap; use tlsn_deap::Deap;
use tokio::sync::Mutex; use tokio::sync::Mutex;
use crate::{ use crate::mpz::{ProverMpc, ProverZk};
mpz::{ProverMpc, ProverZk},
mux::{MuxControl, MuxFuture},
};
/// Entry state /// Entry state
pub struct Initialized; pub struct Initialized;
@@ -24,8 +20,6 @@ opaque_debug::implement!(Initialized);
/// State after the verifier has accepted the proposed TLS commitment protocol /// State after the verifier has accepted the proposed TLS commitment protocol
/// configuration and preprocessing has completed. /// configuration and preprocessing has completed.
pub struct CommitAccepted { pub struct CommitAccepted {
pub(crate) mux_ctrl: MuxControl,
pub(crate) mux_fut: MuxFuture,
pub(crate) mpc_tls: MpcTlsLeader, pub(crate) mpc_tls: MpcTlsLeader,
pub(crate) keys: SessionKeys, pub(crate) keys: SessionKeys,
pub(crate) vm: Arc<Mutex<Deap<ProverMpc, ProverZk>>>, pub(crate) vm: Arc<Mutex<Deap<ProverMpc, ProverZk>>>,
@@ -35,9 +29,6 @@ opaque_debug::implement!(CommitAccepted);
/// State after the TLS transcript has been committed. /// State after the TLS transcript has been committed.
pub struct Committed { pub struct Committed {
pub(crate) mux_ctrl: MuxControl,
pub(crate) mux_fut: MuxFuture,
pub(crate) ctx: Context,
pub(crate) vm: ProverZk, pub(crate) vm: ProverZk,
pub(crate) server_name: ServerName, pub(crate) server_name: ServerName,
pub(crate) keys: SessionKeys, pub(crate) keys: SessionKeys,

326
crates/tlsn/src/session.rs Normal file
View File

@@ -0,0 +1,326 @@
use std::{
future::Future,
pin::Pin,
sync::{
Arc, Mutex,
atomic::{AtomicBool, Ordering},
},
task::{Context, Poll, Waker},
};
use futures::{AsyncRead, AsyncWrite};
use mpz_common::{ThreadId, context::Multithread, io::Io, mux::Mux};
use tlsn_core::config::{prover::ProverConfig, verifier::VerifierConfig};
use tlsn_mux::{Connection, Handle};
use crate::{
Error, Result,
prover::{Prover, state as prover_state},
verifier::{Verifier, state as verifier_state},
};
/// Maximum concurrency for multi-threaded context.
const MAX_CONCURRENCY: usize = 8;
/// A TLSNotary session over a communication channel.
///
/// Wraps an async IO stream and provides multiplexing for the protocol. Use
/// [`new_prover`](Self::new_prover) or [`new_verifier`](Self::new_verifier) to
/// create protocol participants.
///
/// The session must be polled continuously (either directly or via
/// [`split`](Self::split)) to drive the underlying connection. After the
/// session closes, the underlying IO can be reclaimed with
/// [`try_take`](Self::try_take).
///
/// **Important**: The order in which provers and verifiers are created must
/// match on both sides. For example, if the prover side calls `new_prover`
/// then `new_verifier`, the verifier side must call `new_verifier` then
/// `new_prover`.
#[must_use = "session must be polled continuously to make progress, including during closing."]
pub struct Session<Io> {
conn: Option<Connection<Io>>,
mt: Multithread,
}
impl<Io> Session<Io>
where
Io: AsyncRead + AsyncWrite + Unpin,
{
/// Creates a new session.
pub fn new(io: Io) -> Self {
let mut mux_config = tlsn_mux::Config::default();
mux_config.set_max_num_streams(36);
mux_config.set_keep_alive(true);
mux_config.set_close_sync(true);
let conn = tlsn_mux::Connection::new(io, mux_config);
let handle = conn.handle().expect("handle should be available");
let mt = build_mt_context(MuxHandle { handle });
Self {
conn: Some(conn),
mt,
}
}
/// Creates a new prover.
pub fn new_prover(
&mut self,
config: ProverConfig,
) -> Result<Prover<prover_state::Initialized>> {
let ctx = self.mt.new_context().map_err(|e| {
Error::internal()
.with_msg("failed to create new prover")
.with_source(e)
})?;
Ok(Prover::new(ctx, config))
}
/// Creates a new verifier.
pub fn new_verifier(
&mut self,
config: VerifierConfig,
) -> Result<Verifier<verifier_state::Initialized>> {
let ctx = self.mt.new_context().map_err(|e| {
Error::internal()
.with_msg("failed to create new verifier")
.with_source(e)
})?;
Ok(Verifier::new(ctx, config))
}
/// Returns `true` if the session is closed.
pub fn is_closed(&self) -> bool {
self.conn
.as_ref()
.map(|mux| mux.is_complete())
.unwrap_or_default()
}
/// Closes the session.
///
/// This will cause the session to begin closing. Session must continue to
/// be polled until completion.
pub fn close(&mut self) {
if let Some(conn) = self.conn.as_mut() {
conn.close()
}
}
/// Attempts to take the IO, returning an error if it is not available.
pub fn try_take(&mut self) -> Result<Io> {
let conn = self.conn.take().ok_or_else(|| {
Error::io().with_msg("failed to take the session io, it was already taken")
})?;
match conn.try_into_io() {
Err(conn) => {
self.conn = Some(conn);
Err(Error::io()
.with_msg("failed to take the session io, session was not completed yet"))
}
Ok(conn) => Ok(conn),
}
}
/// Polls the session.
pub fn poll(&mut self, cx: &mut Context<'_>) -> Poll<Result<()>> {
self.conn
.as_mut()
.ok_or_else(|| {
Error::io()
.with_msg("failed to poll the session connection because it has been taken")
})?
.poll(cx)
.map_err(|e| {
Error::io()
.with_msg("error occurred while polling the session connection")
.with_source(e)
})
}
/// Splits the session into a driver and handle.
///
/// The driver must be polled to make progress. The handle is used
/// for creating provers/verifiers and closing the session.
pub fn split(self) -> (SessionDriver<Io>, SessionHandle) {
let should_close = Arc::new(AtomicBool::new(false));
let waker = Arc::new(Mutex::new(None::<Waker>));
(
SessionDriver {
conn: self.conn,
should_close: should_close.clone(),
waker: waker.clone(),
},
SessionHandle {
mt: self.mt,
should_close,
waker,
},
)
}
}
impl<Io> Future for Session<Io>
where
Io: AsyncRead + AsyncWrite + Unpin,
{
type Output = Result<()>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
Session::poll(&mut (*self), cx)
}
}
/// The polling half of a split session.
///
/// Must be polled continuously to drive the session. Returns the underlying
/// IO when the session closes.
#[must_use = "driver must be polled to make progress"]
pub struct SessionDriver<Io> {
conn: Option<Connection<Io>>,
should_close: Arc<AtomicBool>,
waker: Arc<Mutex<Option<Waker>>>,
}
impl<Io> SessionDriver<Io>
where
Io: AsyncRead + AsyncWrite + Unpin,
{
/// Polls the driver.
pub fn poll(&mut self, cx: &mut Context<'_>) -> Poll<Result<Io>> {
// Store the waker so the handle can wake us when close() is called.
{
let mut waker_guard = self.waker.lock().unwrap();
*waker_guard = Some(cx.waker().clone());
}
let conn = self
.conn
.as_mut()
.ok_or_else(|| Error::io().with_msg("session driver already completed"))?;
if self.should_close.load(Ordering::Acquire) {
conn.close();
}
match conn.poll(cx) {
Poll::Ready(Ok(())) => {}
Poll::Ready(Err(e)) => {
return Poll::Ready(Err(Error::io()
.with_msg("error polling session connection")
.with_source(e)));
}
Poll::Pending => return Poll::Pending,
}
let conn = self.conn.take().unwrap();
Poll::Ready(
conn.try_into_io()
.map_err(|_| Error::io().with_msg("failed to take session io")),
)
}
}
impl<Io> Future for SessionDriver<Io>
where
Io: AsyncRead + AsyncWrite + Unpin,
{
type Output = Result<Io>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
SessionDriver::poll(&mut *self, cx)
}
}
/// The control half of a split session.
///
/// Used to create provers/verifiers and control the session lifecycle.
pub struct SessionHandle {
mt: Multithread,
should_close: Arc<AtomicBool>,
waker: Arc<Mutex<Option<Waker>>>,
}
impl SessionHandle {
/// Creates a new prover.
pub fn new_prover(
&mut self,
config: ProverConfig,
) -> Result<Prover<prover_state::Initialized>> {
let ctx = self.mt.new_context().map_err(|e| {
Error::internal()
.with_msg("failed to create new prover")
.with_source(e)
})?;
Ok(Prover::new(ctx, config))
}
/// Creates a new verifier.
pub fn new_verifier(
&mut self,
config: VerifierConfig,
) -> Result<Verifier<verifier_state::Initialized>> {
let ctx = self.mt.new_context().map_err(|e| {
Error::internal()
.with_msg("failed to create new verifier")
.with_source(e)
})?;
Ok(Verifier::new(ctx, config))
}
/// Signals the session to close.
///
/// The driver must continue to be polled until it completes.
pub fn close(&self) {
self.should_close.store(true, Ordering::Release);
if let Some(waker) = self.waker.lock().unwrap().take() {
waker.wake();
}
}
}
/// Multiplexer controller providing streams.
struct MuxHandle {
handle: Handle,
}
impl std::fmt::Debug for MuxHandle {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("MuxHandle").finish_non_exhaustive()
}
}
impl Mux for MuxHandle {
fn open(&self, id: ThreadId) -> Result<Io, std::io::Error> {
let stream = self
.handle
.new_stream(id.as_ref())
.map_err(std::io::Error::other)?;
let io = Io::from_io(stream);
Ok(io)
}
}
/// Builds a multi-threaded context with the given muxer.
fn build_mt_context(mux: MuxHandle) -> Multithread {
let builder = Multithread::builder()
.mux(Box::new(mux) as Box<_>)
.concurrency(MAX_CONCURRENCY);
#[cfg(all(feature = "web", target_arch = "wasm32"))]
let builder = builder.spawn_handler(|f| {
let _ = web_spawn::spawn(f);
Ok(())
});
builder.build().unwrap()
}

View File

@@ -1,4 +1,3 @@
//! Plaintext commitment and proof of encryption. //! Plaintext commitment and proof of encryption.
pub(crate) mod encoding;
pub(crate) mod hash; pub(crate) mod hash;

View File

@@ -1,267 +0,0 @@
//! Encoding commitment protocol.
use std::ops::Range;
use mpz_common::Context;
use mpz_memory_core::{
Vector,
binary::U8,
correlated::{Delta, Key, Mac},
};
use rand::Rng;
use rangeset::set::RangeSet;
use serde::{Deserialize, Serialize};
use serio::{SinkExt, stream::IoStreamExt};
use tlsn_core::{
hash::{Blake3, HashAlgId, HashAlgorithm, Keccak256, Sha256},
transcript::{
Direction,
encoding::{
Encoder, EncoderSecret, EncodingCommitment, EncodingProvider, EncodingProviderError,
EncodingTree, EncodingTreeError, new_encoder,
},
},
};
use crate::{
map::{Item, RangeMap},
transcript_internal::ReferenceMap,
};
/// Bytes of encoding, per byte.
const ENCODING_SIZE: usize = 128;
#[derive(Debug, Serialize, Deserialize)]
struct Encodings {
sent: Vec<u8>,
recv: Vec<u8>,
}
/// Transfers encodings for the provided plaintext ranges.
pub(crate) async fn transfer<K: KeyStore>(
ctx: &mut Context,
store: &K,
sent: &ReferenceMap,
recv: &ReferenceMap,
) -> Result<(EncoderSecret, EncodingCommitment), EncodingError> {
let secret = EncoderSecret::new(rand::rng().random(), store.delta().as_block().to_bytes());
let encoder = new_encoder(&secret);
// Collects the encodings for the provided plaintext ranges.
fn collect_encodings(
encoder: &impl Encoder,
store: &impl KeyStore,
direction: Direction,
map: &ReferenceMap,
) -> Vec<u8> {
let mut encodings = Vec::with_capacity(map.len() * ENCODING_SIZE);
for (range, chunk) in map.iter() {
let start = encodings.len();
encoder.encode_range(direction, range, &mut encodings);
let keys = store
.get_keys(*chunk)
.expect("keys are present for provided plaintext ranges");
encodings[start..]
.iter_mut()
.zip(keys.iter().flat_map(|key| key.as_block().as_bytes()))
.for_each(|(encoding, key)| {
*encoding ^= *key;
});
}
encodings
}
let encodings = Encodings {
sent: collect_encodings(&encoder, store, Direction::Sent, sent),
recv: collect_encodings(&encoder, store, Direction::Received, recv),
};
let frame_limit = ctx
.io()
.limit()
.saturating_add(encodings.sent.len() + encodings.recv.len());
ctx.io_mut().with_limit(frame_limit).send(encodings).await?;
let root = ctx.io_mut().expect_next().await?;
Ok((secret, EncodingCommitment { root }))
}
/// Receives and commits to the encodings for the provided plaintext ranges.
pub(crate) async fn receive<M: MacStore>(
ctx: &mut Context,
store: &M,
hash_alg: HashAlgId,
sent: &ReferenceMap,
recv: &ReferenceMap,
idxs: impl IntoIterator<Item = &(Direction, RangeSet<usize>)>,
) -> Result<(EncodingCommitment, EncodingTree), EncodingError> {
let hasher: &(dyn HashAlgorithm + Send + Sync) = match hash_alg {
HashAlgId::SHA256 => &Sha256::default(),
HashAlgId::KECCAK256 => &Keccak256::default(),
HashAlgId::BLAKE3 => &Blake3::default(),
alg => {
return Err(ErrorRepr::UnsupportedHashAlgorithm(alg).into());
}
};
let (sent_len, recv_len) = (sent.len(), recv.len());
let frame_limit = ctx
.io()
.limit()
.saturating_add(ENCODING_SIZE * (sent_len + recv_len));
let encodings: Encodings = ctx.io_mut().with_limit(frame_limit).expect_next().await?;
if encodings.sent.len() != sent_len * ENCODING_SIZE {
return Err(ErrorRepr::IncorrectMacCount {
direction: Direction::Sent,
expected: sent_len,
got: encodings.sent.len() / ENCODING_SIZE,
}
.into());
}
if encodings.recv.len() != recv_len * ENCODING_SIZE {
return Err(ErrorRepr::IncorrectMacCount {
direction: Direction::Received,
expected: recv_len,
got: encodings.recv.len() / ENCODING_SIZE,
}
.into());
}
// Collects a map of plaintext ranges to their encodings.
fn collect_map(
store: &impl MacStore,
mut encodings: Vec<u8>,
map: &ReferenceMap,
) -> RangeMap<EncodingSlice> {
let mut encoding_map = Vec::new();
let mut pos = 0;
for (range, chunk) in map.iter() {
let macs = store
.get_macs(*chunk)
.expect("MACs are present for provided plaintext ranges");
let encoding = &mut encodings[pos..pos + range.len() * ENCODING_SIZE];
encoding
.iter_mut()
.zip(macs.iter().flat_map(|mac| mac.as_bytes()))
.for_each(|(encoding, mac)| {
*encoding ^= *mac;
});
encoding_map.push((range.start, EncodingSlice::from(&(*encoding))));
pos += range.len() * ENCODING_SIZE;
}
RangeMap::new(encoding_map)
}
let provider = Provider {
sent: collect_map(store, encodings.sent, sent),
recv: collect_map(store, encodings.recv, recv),
};
let tree = EncodingTree::new(hasher, idxs, &provider)?;
let root = tree.root();
ctx.io_mut().send(root.clone()).await?;
let commitment = EncodingCommitment { root };
Ok((commitment, tree))
}
pub(crate) trait KeyStore {
fn delta(&self) -> &Delta;
fn get_keys(&self, data: Vector<U8>) -> Option<&[Key]>;
}
pub(crate) trait MacStore {
fn get_macs(&self, data: Vector<U8>) -> Option<&[Mac]>;
}
#[derive(Debug)]
struct Provider {
sent: RangeMap<EncodingSlice>,
recv: RangeMap<EncodingSlice>,
}
impl EncodingProvider for Provider {
fn provide_encoding(
&self,
direction: Direction,
range: Range<usize>,
dest: &mut Vec<u8>,
) -> Result<(), EncodingProviderError> {
let encodings = match direction {
Direction::Sent => &self.sent,
Direction::Received => &self.recv,
};
let encoding = encodings.get(range).ok_or(EncodingProviderError)?;
dest.extend_from_slice(encoding);
Ok(())
}
}
#[derive(Debug)]
struct EncodingSlice(Vec<u8>);
impl From<&[u8]> for EncodingSlice {
fn from(value: &[u8]) -> Self {
Self(value.to_vec())
}
}
impl Item for EncodingSlice {
type Slice<'a>
= &'a [u8]
where
Self: 'a;
fn length(&self) -> usize {
self.0.len() / ENCODING_SIZE
}
fn slice<'a>(&'a self, range: Range<usize>) -> Option<Self::Slice<'a>> {
self.0
.get(range.start * ENCODING_SIZE..range.end * ENCODING_SIZE)
}
}
/// Encoding protocol error.
#[derive(Debug, thiserror::Error)]
#[error(transparent)]
pub struct EncodingError(#[from] ErrorRepr);
#[derive(Debug, thiserror::Error)]
#[error("encoding protocol error: {0}")]
enum ErrorRepr {
#[error("I/O error: {0}")]
Io(std::io::Error),
#[error("incorrect MAC count for {direction}: expected {expected}, got {got}")]
IncorrectMacCount {
direction: Direction,
expected: usize,
got: usize,
},
#[error("encoding tree error: {0}")]
EncodingTree(EncodingTreeError),
#[error("unsupported hash algorithm: {0}")]
UnsupportedHashAlgorithm(HashAlgId),
}
impl From<std::io::Error> for EncodingError {
fn from(value: std::io::Error) -> Self {
Self(ErrorRepr::Io(value))
}
}
impl From<EncodingTreeError> for EncodingError {
fn from(value: EncodingTreeError) -> Self {
Self(ErrorRepr::EncodingTree(value))
}
}

View File

@@ -1,23 +1,19 @@
//! Verifier. //! Verifier.
mod error;
pub mod state; pub mod state;
mod verify; mod verify;
use std::sync::Arc; use std::sync::Arc;
pub use error::VerifierError; use mpz_common::Context;
pub use tlsn_core::{VerifierOutput, webpki::ServerCertVerifier}; pub use tlsn_core::{VerifierOutput, webpki::ServerCertVerifier};
use crate::{ use crate::{
Role, Error, Result,
context::build_mt_context,
mpz::{VerifierDeps, build_verifier_deps, translate_keys}, mpz::{VerifierDeps, build_verifier_deps, translate_keys},
msg::{ProveRequestMsg, Response, TlsCommitRequestMsg}, msg::{ProveRequestMsg, Response, TlsCommitRequestMsg},
mux::attach_mux,
tag::verify_tags, tag::verify_tags,
}; };
use futures::{AsyncRead, AsyncWrite, TryFutureExt};
use mpz_vm_core::prelude::*; use mpz_vm_core::prelude::*;
use serio::{SinkExt, stream::IoStreamExt}; use serio::{SinkExt, stream::IoStreamExt};
use tlsn_core::{ use tlsn_core::{
@@ -45,16 +41,18 @@ pub struct SessionInfo {
pub struct Verifier<T: state::VerifierState = state::Initialized> { pub struct Verifier<T: state::VerifierState = state::Initialized> {
config: VerifierConfig, config: VerifierConfig,
span: Span, span: Span,
ctx: Option<Context>,
state: T, state: T,
} }
impl Verifier<state::Initialized> { impl Verifier<state::Initialized> {
/// Creates a new verifier. /// Creates a new verifier.
pub fn new(config: VerifierConfig) -> Self { pub(crate) fn new(ctx: Context, config: VerifierConfig) -> Self {
let span = info_span!("verifier"); let span = info_span!("verifier");
Self { Self {
config, config,
span, span,
ctx: Some(ctx),
state: state::Initialized, state: state::Initialized,
} }
} }
@@ -63,50 +61,43 @@ impl Verifier<state::Initialized> {
/// ///
/// This initiates the TLS commitment protocol, receiving the prover's /// This initiates the TLS commitment protocol, receiving the prover's
/// configuration and providing the opportunity to accept or reject it. /// configuration and providing the opportunity to accept or reject it.
///
/// # Arguments
///
/// * `socket` - The socket to the prover.
#[instrument(parent = &self.span, level = "info", skip_all, err)] #[instrument(parent = &self.span, level = "info", skip_all, err)]
pub async fn commit<S: AsyncWrite + AsyncRead + Send + Unpin + 'static>( pub async fn commit(mut self) -> Result<Verifier<state::CommitStart>> {
self, let mut ctx = self
socket: S, .ctx
) -> Result<Verifier<state::CommitStart>, VerifierError> { .take()
let (mut mux_fut, mux_ctrl) = attach_mux(socket, Role::Verifier); .ok_or_else(|| Error::internal().with_msg("commitment protocol context was dropped"))?;
let mut mt = build_mt_context(mux_ctrl.clone());
let mut ctx = mux_fut.poll_with(mt.new_context()).await?;
// Receives protocol configuration from prover to perform compatibility check. // Receives protocol configuration from prover to perform compatibility check.
let TlsCommitRequestMsg { request, version } = let TlsCommitRequestMsg { request, version } =
mux_fut.poll_with(ctx.io_mut().expect_next()).await?; ctx.io_mut().expect_next().await.map_err(|e| {
Error::io()
.with_msg("commitment protocol failed to receive request")
.with_source(e)
})?;
if version != *crate::VERSION { if version != *crate::VERSION {
let msg = format!( let msg = format!(
"prover version does not match with verifier: {version} != {}", "prover version does not match with verifier: {version} != {}",
*crate::VERSION *crate::VERSION
); );
mux_fut ctx.io_mut()
.poll_with(ctx.io_mut().send(Response::err(Some(msg.clone())))) .send(Response::err(Some(msg.clone())))
.await?; .await
.map_err(|e| {
Error::io()
.with_msg("commitment protocol failed to send version mismatch response")
.with_source(e)
})?;
// Wait for the prover to correctly close the connection. return Err(Error::config().with_msg(msg));
if !mux_fut.is_complete() {
mux_ctrl.close();
mux_fut.await?;
}
return Err(VerifierError::config(msg));
} }
Ok(Verifier { Ok(Verifier {
config: self.config, config: self.config,
span: self.span, span: self.span,
state: state::CommitStart { ctx: Some(ctx),
mux_ctrl, state: state::CommitStart { request },
mux_fut,
ctx,
request,
},
}) })
} }
} }
@@ -119,15 +110,18 @@ impl Verifier<state::CommitStart> {
/// Accepts the proposed protocol configuration. /// Accepts the proposed protocol configuration.
#[instrument(parent = &self.span, level = "info", skip_all, err)] #[instrument(parent = &self.span, level = "info", skip_all, err)]
pub async fn accept(self) -> Result<Verifier<state::CommitAccepted>, VerifierError> { pub async fn accept(mut self) -> Result<Verifier<state::CommitAccepted>> {
let state::CommitStart { let mut ctx = self
mux_ctrl, .ctx
mut mux_fut, .take()
mut ctx, .ok_or_else(|| Error::internal().with_msg("commitment protocol context was dropped"))?;
request, let state::CommitStart { request } = self.state;
} = self.state;
mux_fut.poll_with(ctx.io_mut().send(Response::ok())).await?; ctx.io_mut().send(Response::ok()).await.map_err(|e| {
Error::io()
.with_msg("commitment protocol failed to send acceptance")
.with_source(e)
})?;
let TlsCommitProtocolConfig::Mpc(mpc_tls_config) = request.protocol().clone() else { let TlsCommitProtocolConfig::Mpc(mpc_tls_config) = request.protocol().clone() else {
unreachable!("only MPC TLS is supported"); unreachable!("only MPC TLS is supported");
@@ -136,49 +130,46 @@ impl Verifier<state::CommitStart> {
let VerifierDeps { vm, mut mpc_tls } = build_verifier_deps(mpc_tls_config, ctx); let VerifierDeps { vm, mut mpc_tls } = build_verifier_deps(mpc_tls_config, ctx);
// Allocate resources for MPC-TLS in the VM. // Allocate resources for MPC-TLS in the VM.
let mut keys = mpc_tls.alloc()?; let mut keys = mpc_tls.alloc().map_err(|e| {
Error::internal()
.with_msg("commitment protocol failed to allocate mpc-tls resources")
.with_source(e)
})?;
let vm_lock = vm.try_lock().expect("VM is not locked"); let vm_lock = vm.try_lock().expect("VM is not locked");
translate_keys(&mut keys, &vm_lock); translate_keys(&mut keys, &vm_lock);
drop(vm_lock); drop(vm_lock);
debug!("setting up mpc-tls"); debug!("setting up mpc-tls");
mux_fut.poll_with(mpc_tls.preprocess()).await?; mpc_tls.preprocess().await.map_err(|e| {
Error::internal()
.with_msg("commitment protocol failed during mpc-tls preprocessing")
.with_source(e)
})?;
debug!("mpc-tls setup complete"); debug!("mpc-tls setup complete");
Ok(Verifier { Ok(Verifier {
config: self.config, config: self.config,
span: self.span, span: self.span,
state: state::CommitAccepted { ctx: None,
mux_ctrl, state: state::CommitAccepted { mpc_tls, keys, vm },
mux_fut,
mpc_tls,
keys,
vm,
},
}) })
} }
/// Rejects the proposed protocol configuration. /// Rejects the proposed protocol configuration.
#[instrument(parent = &self.span, level = "info", skip_all, err)] #[instrument(parent = &self.span, level = "info", skip_all, err)]
pub async fn reject(self, msg: Option<&str>) -> Result<(), VerifierError> { pub async fn reject(mut self, msg: Option<&str>) -> Result<()> {
let state::CommitStart { let mut ctx = self
mux_ctrl, .ctx
mut mux_fut, .take()
mut ctx, .ok_or_else(|| Error::internal().with_msg("commitment protocol context was dropped"))?;
..
} = self.state;
mux_fut ctx.io_mut().send(Response::err(msg)).await.map_err(|e| {
.poll_with(ctx.io_mut().send(Response::err(msg))) Error::io()
.await?; .with_msg("commitment protocol failed to send rejection")
.with_source(e)
// Wait for the prover to correctly close the connection. })?;
if !mux_fut.is_complete() {
mux_ctrl.close();
mux_fut.await?;
}
Ok(()) Ok(())
} }
@@ -187,18 +178,16 @@ impl Verifier<state::CommitStart> {
impl Verifier<state::CommitAccepted> { impl Verifier<state::CommitAccepted> {
/// Runs the verifier until the TLS connection is closed. /// Runs the verifier until the TLS connection is closed.
#[instrument(parent = &self.span, level = "info", skip_all, err)] #[instrument(parent = &self.span, level = "info", skip_all, err)]
pub async fn run(self) -> Result<Verifier<state::Committed>, VerifierError> { pub async fn run(self) -> Result<Verifier<state::Committed>> {
let state::CommitAccepted { let state::CommitAccepted { mpc_tls, vm, keys } = self.state;
mux_ctrl,
mut mux_fut,
mpc_tls,
vm,
keys,
} = self.state;
info!("starting MPC-TLS"); info!("starting MPC-TLS");
let (mut ctx, tls_transcript) = mux_fut.poll_with(mpc_tls.run()).await?; let (mut ctx, tls_transcript) = mpc_tls.run().await.map_err(|e| {
Error::internal()
.with_msg("mpc-tls execution failed")
.with_source(e)
})?;
info!("finished MPC-TLS"); info!("finished MPC-TLS");
@@ -207,10 +196,11 @@ impl Verifier<state::CommitAccepted> {
debug!("finalizing mpc"); debug!("finalizing mpc");
mux_fut vm.finalize(&mut ctx).await.map_err(|e| {
.poll_with(vm.finalize(&mut ctx)) Error::internal()
.await .with_msg("mpc finalization failed")
.map_err(VerifierError::mpc)?; .with_source(e)
})?;
debug!("mpc finalized"); debug!("mpc finalized");
} }
@@ -230,24 +220,32 @@ impl Verifier<state::CommitAccepted> {
*tls_transcript.version(), *tls_transcript.version(),
tls_transcript.recv().to_vec(), tls_transcript.recv().to_vec(),
) )
.map_err(VerifierError::zk)?; .map_err(|e| {
Error::internal()
.with_msg("tag verification setup failed")
.with_source(e)
})?;
mux_fut vm.execute_all(&mut ctx).await.map_err(|e| {
.poll_with(vm.execute_all(&mut ctx).map_err(VerifierError::zk)) Error::internal()
.await?; .with_msg("tag verification zk execution failed")
.with_source(e)
})?;
// Verify the tags. // Verify the tags.
// After the verification, the entire TLS trancript becomes // After the verification, the entire TLS trancript becomes
// authenticated from the verifier's perspective. // authenticated from the verifier's perspective.
tag_proof.verify().map_err(VerifierError::zk)?; tag_proof.verify().map_err(|e| {
Error::internal()
.with_msg("tag verification failed")
.with_source(e)
})?;
Ok(Verifier { Ok(Verifier {
config: self.config, config: self.config,
span: self.span, span: self.span,
ctx: Some(ctx),
state: state::Committed { state: state::Committed {
mux_ctrl,
mux_fut,
ctx,
vm, vm,
keys, keys,
tls_transcript, tls_transcript,
@@ -264,11 +262,12 @@ impl Verifier<state::Committed> {
/// Begins verification of statements from the prover. /// Begins verification of statements from the prover.
#[instrument(parent = &self.span, level = "info", skip_all, err)] #[instrument(parent = &self.span, level = "info", skip_all, err)]
pub async fn verify(self) -> Result<Verifier<state::Verify>, VerifierError> { pub async fn verify(mut self) -> Result<Verifier<state::Verify>> {
let mut ctx = self
.ctx
.take()
.ok_or_else(|| Error::internal().with_msg("verification context was dropped"))?;
let state::Committed { let state::Committed {
mux_ctrl,
mut mux_fut,
mut ctx,
vm, vm,
keys, keys,
tls_transcript, tls_transcript,
@@ -278,17 +277,17 @@ impl Verifier<state::Committed> {
request, request,
handshake, handshake,
transcript, transcript,
} = mux_fut } = ctx.io_mut().expect_next().await.map_err(|e| {
.poll_with(ctx.io_mut().expect_next().map_err(VerifierError::from)) Error::io()
.await?; .with_msg("verification failed to receive prove request")
.with_source(e)
})?;
Ok(Verifier { Ok(Verifier {
config: self.config, config: self.config,
span: self.span, span: self.span,
ctx: Some(ctx),
state: state::Verify { state: state::Verify {
mux_ctrl,
mux_fut,
ctx,
vm, vm,
keys, keys,
tls_transcript, tls_transcript,
@@ -301,17 +300,7 @@ impl Verifier<state::Committed> {
/// Closes the connection with the prover. /// Closes the connection with the prover.
#[instrument(parent = &self.span, level = "info", skip_all, err)] #[instrument(parent = &self.span, level = "info", skip_all, err)]
pub async fn close(self) -> Result<(), VerifierError> { pub async fn close(self) -> Result<()> {
let state::Committed {
mux_ctrl, mux_fut, ..
} = self.state;
// Wait for the prover to correctly close the connection.
if !mux_fut.is_complete() {
mux_ctrl.close();
mux_fut.await?;
}
Ok(()) Ok(())
} }
} }
@@ -323,13 +312,12 @@ impl Verifier<state::Verify> {
} }
/// Accepts the proving request. /// Accepts the proving request.
pub async fn accept( pub async fn accept(mut self) -> Result<(VerifierOutput, Verifier<state::Committed>)> {
self, let mut ctx = self
) -> Result<(VerifierOutput, Verifier<state::Committed>), VerifierError> { .ctx
.take()
.ok_or_else(|| Error::internal().with_msg("verification context was dropped"))?;
let state::Verify { let state::Verify {
mux_ctrl,
mut mux_fut,
mut ctx,
mut vm, mut vm,
keys, keys,
tls_transcript, tls_transcript,
@@ -338,13 +326,19 @@ impl Verifier<state::Verify> {
transcript, transcript,
} = self.state; } = self.state;
mux_fut.poll_with(ctx.io_mut().send(Response::ok())).await?; ctx.io_mut().send(Response::ok()).await.map_err(|e| {
Error::io()
.with_msg("verification failed to send acceptance")
.with_source(e)
})?;
let cert_verifier = let cert_verifier = ServerCertVerifier::new(self.config.root_store()).map_err(|e| {
ServerCertVerifier::new(self.config.root_store()).map_err(VerifierError::config)?; Error::config()
.with_msg("failed to create certificate verifier")
.with_source(e)
})?;
let output = mux_fut let output = verify::verify(
.poll_with(verify::verify(
&mut ctx, &mut ctx,
&mut vm, &mut vm,
&keys, &keys,
@@ -353,7 +347,7 @@ impl Verifier<state::Verify> {
request, request,
handshake, handshake,
transcript, transcript,
)) )
.await?; .await?;
Ok(( Ok((
@@ -361,10 +355,8 @@ impl Verifier<state::Verify> {
Verifier { Verifier {
config: self.config, config: self.config,
span: self.span, span: self.span,
ctx: Some(ctx),
state: state::Committed { state: state::Committed {
mux_ctrl,
mux_fut,
ctx,
vm, vm,
keys, keys,
tls_transcript, tls_transcript,
@@ -374,31 +366,29 @@ impl Verifier<state::Verify> {
} }
/// Rejects the proving request. /// Rejects the proving request.
pub async fn reject( pub async fn reject(mut self, msg: Option<&str>) -> Result<Verifier<state::Committed>> {
self, let mut ctx = self
msg: Option<&str>, .ctx
) -> Result<Verifier<state::Committed>, VerifierError> { .take()
.ok_or_else(|| Error::internal().with_msg("verification context was dropped"))?;
let state::Verify { let state::Verify {
mux_ctrl,
mut mux_fut,
mut ctx,
vm, vm,
keys, keys,
tls_transcript, tls_transcript,
.. ..
} = self.state; } = self.state;
mux_fut ctx.io_mut().send(Response::err(msg)).await.map_err(|e| {
.poll_with(ctx.io_mut().send(Response::err(msg))) Error::io()
.await?; .with_msg("verification failed to send rejection")
.with_source(e)
})?;
Ok(Verifier { Ok(Verifier {
config: self.config, config: self.config,
span: self.span, span: self.span,
ctx: Some(ctx),
state: state::Committed { state: state::Committed {
mux_ctrl,
mux_fut,
ctx,
vm, vm,
keys, keys,
tls_transcript, tls_transcript,

View File

@@ -1,113 +0,0 @@
use std::{error::Error, fmt};
use mpc_tls::MpcTlsError;
use crate::transcript_internal::commit::encoding::EncodingError;
/// Error for [`Verifier`](crate::verifier::Verifier).
#[derive(Debug, thiserror::Error)]
pub struct VerifierError {
kind: ErrorKind,
source: Option<Box<dyn Error + Send + Sync + 'static>>,
}
impl VerifierError {
fn new<E>(kind: ErrorKind, source: E) -> Self
where
E: Into<Box<dyn Error + Send + Sync + 'static>>,
{
Self {
kind,
source: Some(source.into()),
}
}
pub(crate) fn config<E>(source: E) -> Self
where
E: Into<Box<dyn Error + Send + Sync + 'static>>,
{
Self::new(ErrorKind::Config, source)
}
pub(crate) fn mpc<E>(source: E) -> Self
where
E: Into<Box<dyn Error + Send + Sync + 'static>>,
{
Self::new(ErrorKind::Mpc, source)
}
pub(crate) fn zk<E>(source: E) -> Self
where
E: Into<Box<dyn Error + Send + Sync + 'static>>,
{
Self::new(ErrorKind::Zk, source)
}
pub(crate) fn verify<E>(source: E) -> Self
where
E: Into<Box<dyn Error + Send + Sync + 'static>>,
{
Self::new(ErrorKind::Verify, source)
}
}
#[derive(Debug)]
enum ErrorKind {
Io,
Config,
Mpc,
Zk,
Commit,
Verify,
}
impl fmt::Display for VerifierError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("verifier error: ")?;
match self.kind {
ErrorKind::Io => f.write_str("io error")?,
ErrorKind::Config => f.write_str("config error")?,
ErrorKind::Mpc => f.write_str("mpc error")?,
ErrorKind::Zk => f.write_str("zk error")?,
ErrorKind::Commit => f.write_str("commit error")?,
ErrorKind::Verify => f.write_str("verification error")?,
}
if let Some(source) = &self.source {
write!(f, " caused by: {source}")?;
}
Ok(())
}
}
impl From<std::io::Error> for VerifierError {
fn from(e: std::io::Error) -> Self {
Self::new(ErrorKind::Io, e)
}
}
impl From<uid_mux::yamux::ConnectionError> for VerifierError {
fn from(e: uid_mux::yamux::ConnectionError) -> Self {
Self::new(ErrorKind::Io, e)
}
}
impl From<mpz_common::ContextError> for VerifierError {
fn from(e: mpz_common::ContextError) -> Self {
Self::new(ErrorKind::Mpc, e)
}
}
impl From<MpcTlsError> for VerifierError {
fn from(e: MpcTlsError) -> Self {
Self::new(ErrorKind::Mpc, e)
}
}
impl From<EncodingError> for VerifierError {
fn from(e: EncodingError) -> Self {
Self::new(ErrorKind::Commit, e)
}
}

View File

@@ -2,9 +2,7 @@
use std::sync::Arc; use std::sync::Arc;
use crate::mux::{MuxControl, MuxFuture};
use mpc_tls::{MpcTlsFollower, SessionKeys}; use mpc_tls::{MpcTlsFollower, SessionKeys};
use mpz_common::Context;
use tlsn_core::{ use tlsn_core::{
config::{prove::ProveRequest, tls_commit::TlsCommitRequest}, config::{prove::ProveRequest, tls_commit::TlsCommitRequest},
connection::{HandshakeData, ServerName}, connection::{HandshakeData, ServerName},
@@ -25,9 +23,6 @@ opaque_debug::implement!(Initialized);
/// State after receiving protocol configuration from the prover. /// State after receiving protocol configuration from the prover.
pub struct CommitStart { pub struct CommitStart {
pub(crate) mux_ctrl: MuxControl,
pub(crate) mux_fut: MuxFuture,
pub(crate) ctx: Context,
pub(crate) request: TlsCommitRequest, pub(crate) request: TlsCommitRequest,
} }
@@ -36,8 +31,6 @@ opaque_debug::implement!(CommitStart);
/// State after accepting the proposed TLS commitment protocol configuration and /// State after accepting the proposed TLS commitment protocol configuration and
/// performing preprocessing. /// performing preprocessing.
pub struct CommitAccepted { pub struct CommitAccepted {
pub(crate) mux_ctrl: MuxControl,
pub(crate) mux_fut: MuxFuture,
pub(crate) mpc_tls: MpcTlsFollower, pub(crate) mpc_tls: MpcTlsFollower,
pub(crate) keys: SessionKeys, pub(crate) keys: SessionKeys,
pub(crate) vm: Arc<Mutex<Deap<VerifierMpc, VerifierZk>>>, pub(crate) vm: Arc<Mutex<Deap<VerifierMpc, VerifierZk>>>,
@@ -47,9 +40,6 @@ opaque_debug::implement!(CommitAccepted);
/// State after the TLS transcript has been committed. /// State after the TLS transcript has been committed.
pub struct Committed { pub struct Committed {
pub(crate) mux_ctrl: MuxControl,
pub(crate) mux_fut: MuxFuture,
pub(crate) ctx: Context,
pub(crate) vm: VerifierZk, pub(crate) vm: VerifierZk,
pub(crate) keys: SessionKeys, pub(crate) keys: SessionKeys,
pub(crate) tls_transcript: TlsTranscript, pub(crate) tls_transcript: TlsTranscript,
@@ -59,9 +49,6 @@ opaque_debug::implement!(Committed);
/// State after receiving a proving request. /// State after receiving a proving request.
pub struct Verify { pub struct Verify {
pub(crate) mux_ctrl: MuxControl,
pub(crate) mux_fut: MuxFuture,
pub(crate) ctx: Context,
pub(crate) vm: VerifierZk, pub(crate) vm: VerifierZk,
pub(crate) keys: SessionKeys, pub(crate) keys: SessionKeys,
pub(crate) tls_transcript: TlsTranscript, pub(crate) tls_transcript: TlsTranscript,

View File

@@ -14,19 +14,12 @@ use tlsn_core::{
}; };
use crate::{ use crate::{
transcript_internal::{ Error, Result,
TranscriptRefs, transcript_internal::{TranscriptRefs, auth::verify_plaintext, commit::hash::verify_hash},
auth::verify_plaintext,
commit::{
encoding::{self, KeyStore},
hash::verify_hash,
},
},
verifier::VerifierError,
}; };
#[allow(clippy::too_many_arguments)] #[allow(clippy::too_many_arguments)]
pub(crate) async fn verify<T: Vm<Binary> + KeyStore + Send + Sync>( pub(crate) async fn verify<T: Vm<Binary> + Send + Sync>(
ctx: &mut Context, ctx: &mut Context,
vm: &mut T, vm: &mut T,
keys: &SessionKeys, keys: &SessionKeys,
@@ -35,35 +28,33 @@ pub(crate) async fn verify<T: Vm<Binary> + KeyStore + Send + Sync>(
request: ProveRequest, request: ProveRequest,
handshake: Option<(ServerName, HandshakeData)>, handshake: Option<(ServerName, HandshakeData)>,
transcript: Option<PartialTranscript>, transcript: Option<PartialTranscript>,
) -> Result<VerifierOutput, VerifierError> { ) -> Result<VerifierOutput> {
let ciphertext_sent = collect_ciphertext(tls_transcript.sent()); let ciphertext_sent = collect_ciphertext(tls_transcript.sent());
let ciphertext_recv = collect_ciphertext(tls_transcript.recv()); let ciphertext_recv = collect_ciphertext(tls_transcript.recv());
let transcript = if let Some((auth_sent, auth_recv)) = request.reveal() { let transcript = if let Some((auth_sent, auth_recv)) = request.reveal() {
let Some(transcript) = transcript else { let Some(transcript) = transcript else {
return Err(VerifierError::verify( return Err(Error::internal().with_msg(
"prover requested to reveal data but did not send transcript", "verification failed: prover requested to reveal data but did not send transcript",
)); ));
}; };
if transcript.len_sent() != ciphertext_sent.len() if transcript.len_sent() != ciphertext_sent.len()
|| transcript.len_received() != ciphertext_recv.len() || transcript.len_received() != ciphertext_recv.len()
{ {
return Err(VerifierError::verify( return Err(
"prover sent transcript with incorrect length", Error::internal().with_msg("verification failed: transcript length mismatch")
)); );
} }
if transcript.sent_authed() != auth_sent { if transcript.sent_authed() != auth_sent {
return Err(VerifierError::verify( return Err(Error::internal().with_msg("verification failed: sent auth data mismatch"));
"prover sent transcript with incorrect sent authed data",
));
} }
if transcript.received_authed() != auth_recv { if transcript.received_authed() != auth_recv {
return Err(VerifierError::verify( return Err(
"prover sent transcript with incorrect received authed data", Error::internal().with_msg("verification failed: received auth data mismatch")
)); );
} }
transcript transcript
@@ -79,7 +70,11 @@ pub(crate) async fn verify<T: Vm<Binary> + KeyStore + Send + Sync>(
tls_transcript.server_ephemeral_key(), tls_transcript.server_ephemeral_key(),
&name, &name,
) )
.map_err(VerifierError::verify)?; .map_err(|e| {
Error::internal()
.with_msg("verification failed: certificate verification failed")
.with_source(e)
})?;
Some(name) Some(name)
} else { } else {
@@ -94,11 +89,6 @@ pub(crate) async fn verify<T: Vm<Binary> + KeyStore + Send + Sync>(
Direction::Sent => commit_sent.union_mut(idx), Direction::Sent => commit_sent.union_mut(idx),
Direction::Received => commit_recv.union_mut(idx), Direction::Received => commit_recv.union_mut(idx),
}); });
if let Some((sent, recv)) = commit_config.encoding() {
commit_sent.union_mut(sent);
commit_recv.union_mut(recv);
}
} }
let (sent_refs, sent_proof) = verify_plaintext( let (sent_refs, sent_proof) = verify_plaintext(
@@ -114,7 +104,11 @@ pub(crate) async fn verify<T: Vm<Binary> + KeyStore + Send + Sync>(
transcript.sent_authed(), transcript.sent_authed(),
&commit_sent, &commit_sent,
) )
.map_err(VerifierError::zk)?; .map_err(|e| {
Error::internal()
.with_msg("verification failed during sent plaintext verification")
.with_source(e)
})?;
let (recv_refs, recv_proof) = verify_plaintext( let (recv_refs, recv_proof) = verify_plaintext(
vm, vm,
keys.server_write_key, keys.server_write_key,
@@ -128,7 +122,11 @@ pub(crate) async fn verify<T: Vm<Binary> + KeyStore + Send + Sync>(
transcript.received_authed(), transcript.received_authed(),
&commit_recv, &commit_recv,
) )
.map_err(VerifierError::zk)?; .map_err(|e| {
Error::internal()
.with_msg("verification failed during received plaintext verification")
.with_source(e)
})?;
let transcript_refs = TranscriptRefs { let transcript_refs = TranscriptRefs {
sent: sent_refs, sent: sent_refs,
@@ -141,36 +139,37 @@ pub(crate) async fn verify<T: Vm<Binary> + KeyStore + Send + Sync>(
&& commit_config.has_hash() && commit_config.has_hash()
{ {
hash_commitments = Some( hash_commitments = Some(
verify_hash(vm, &transcript_refs, commit_config.iter_hash().cloned()) verify_hash(vm, &transcript_refs, commit_config.iter_hash().cloned()).map_err(|e| {
.map_err(VerifierError::verify)?, Error::internal()
.with_msg("verification failed during hash commitment setup")
.with_source(e)
})?,
); );
} }
vm.execute_all(ctx).await.map_err(VerifierError::zk)?; vm.execute_all(ctx).await.map_err(|e| {
Error::internal()
.with_msg("verification failed during zk execution")
.with_source(e)
})?;
sent_proof.verify().map_err(VerifierError::verify)?; sent_proof.verify().map_err(|e| {
recv_proof.verify().map_err(VerifierError::verify)?; Error::internal()
.with_msg("verification failed: sent plaintext proof invalid")
let mut encoder_secret = None; .with_source(e)
if let Some(commit_config) = request.transcript_commit() })?;
&& let Some((sent, recv)) = commit_config.encoding() recv_proof.verify().map_err(|e| {
{ Error::internal()
let sent_map = transcript_refs .with_msg("verification failed: received plaintext proof invalid")
.sent .with_source(e)
.index(sent) })?;
.expect("ranges were authenticated");
let recv_map = transcript_refs
.recv
.index(recv)
.expect("ranges were authenticated");
let (secret, commitment) = encoding::transfer(ctx, vm, &sent_map, &recv_map).await?;
encoder_secret = Some(secret);
transcript_commitments.push(TranscriptCommitment::Encoding(commitment));
}
if let Some(hash_commitments) = hash_commitments { if let Some(hash_commitments) = hash_commitments {
for commitment in hash_commitments.try_recv().map_err(VerifierError::verify)? { for commitment in hash_commitments.try_recv().map_err(|e| {
Error::internal()
.with_msg("verification failed during hash commitment finalization")
.with_source(e)
})? {
transcript_commitments.push(TranscriptCommitment::Hash(commitment)); transcript_commitments.push(TranscriptCommitment::Hash(commitment));
} }
} }
@@ -178,7 +177,6 @@ pub(crate) async fn verify<T: Vm<Binary> + KeyStore + Send + Sync>(
Ok(VerifierOutput { Ok(VerifierOutput {
server_name, server_name,
transcript: request.reveal().is_some().then_some(transcript), transcript: request.reveal().is_some().then_some(transcript),
encoder_secret,
transcript_commitments, transcript_commitments,
}) })
} }

View File

@@ -1,6 +1,6 @@
use futures::{AsyncReadExt, AsyncWriteExt}; use futures::{AsyncReadExt, AsyncWriteExt};
use rangeset::set::RangeSet;
use tlsn::{ use tlsn::{
Session,
config::{ config::{
prove::ProveConfig, prove::ProveConfig,
prover::ProverConfig, prover::ProverConfig,
@@ -9,12 +9,9 @@ use tlsn::{
verifier::VerifierConfig, verifier::VerifierConfig,
}, },
connection::ServerName, connection::ServerName,
hash::{HashAlgId, HashProvider}, hash::HashAlgId,
prover::Prover, prover::Prover,
transcript::{ transcript::{Direction, Transcript, TranscriptCommitConfig, TranscriptCommitmentKind},
Direction, Transcript, TranscriptCommitConfig, TranscriptCommitment,
TranscriptCommitmentKind, TranscriptSecret,
},
verifier::{Verifier, VerifierOutput}, verifier::{Verifier, VerifierOutput},
webpki::{CertificateDer, RootCertStore}, webpki::{CertificateDer, RootCertStore},
}; };
@@ -22,9 +19,7 @@ use tlsn_core::ProverOutput;
use tlsn_server_fixture::bind; use tlsn_server_fixture::bind;
use tlsn_server_fixture_certs::{CA_CERT_DER, SERVER_DOMAIN}; use tlsn_server_fixture_certs::{CA_CERT_DER, SERVER_DOMAIN};
use tokio::io::{AsyncRead, AsyncWrite};
use tokio_util::compat::TokioAsyncReadCompatExt; use tokio_util::compat::TokioAsyncReadCompatExt;
use tracing::instrument;
// Maximum number of bytes that can be sent from prover to server // Maximum number of bytes that can be sent from prover to server
const MAX_SENT_DATA: usize = 1 << 12; const MAX_SENT_DATA: usize = 1 << 12;
@@ -41,9 +36,34 @@ async fn test() {
tracing_subscriber::fmt::init(); tracing_subscriber::fmt::init();
let (socket_0, socket_1) = tokio::io::duplex(2 << 23); let (socket_0, socket_1) = tokio::io::duplex(2 << 23);
let mut session_p = Session::new(socket_0.compat());
let mut session_v = Session::new(socket_1.compat());
let ((full_transcript, prover_output), verifier_output) = let prover = session_p
tokio::join!(prover(socket_0), verifier(socket_1)); .new_prover(ProverConfig::builder().build().unwrap())
.unwrap();
let verifier = session_v
.new_verifier(
VerifierConfig::builder()
.root_store(RootCertStore {
roots: vec![CertificateDer(CA_CERT_DER.to_vec())],
})
.build()
.unwrap(),
)
.unwrap();
let (session_p_driver, session_p_handle) = session_p.split();
let (session_v_driver, session_v_handle) = session_v.split();
tokio::spawn(session_p_driver);
tokio::spawn(session_v_driver);
let ((_full_transcript, _prover_output), verifier_output) =
tokio::join!(run_prover(prover), run_verifier(verifier));
session_p_handle.close();
session_v_handle.close();
let partial_transcript = verifier_output.transcript.unwrap(); let partial_transcript = verifier_output.transcript.unwrap();
let ServerName::Dns(server_name) = verifier_output.server_name.unwrap(); let ServerName::Dns(server_name) = verifier_output.server_name.unwrap();
@@ -58,61 +78,14 @@ async fn test() {
partial_transcript.received_authed().iter().next().unwrap(), partial_transcript.received_authed().iter().next().unwrap(),
0..10 0..10
); );
let encoding_tree = prover_output
.transcript_secrets
.iter()
.find_map(|secret| {
if let TranscriptSecret::Encoding(tree) = secret {
Some(tree)
} else {
None
}
})
.unwrap();
let encoding_commitment = prover_output
.transcript_commitments
.iter()
.find_map(|commitment| {
if let TranscriptCommitment::Encoding(commitment) = commitment {
Some(commitment)
} else {
None
}
})
.unwrap();
let prove_sent = RangeSet::from(1..full_transcript.sent().len() - 1);
let prove_recv = RangeSet::from(1..full_transcript.received().len() - 1);
let idxs = [
(Direction::Sent, prove_sent.clone()),
(Direction::Received, prove_recv.clone()),
];
let proof = encoding_tree.proof(idxs.iter()).unwrap();
let (auth_sent, auth_recv) = proof
.verify_with_provider(
&HashProvider::default(),
&verifier_output.encoder_secret.unwrap(),
encoding_commitment,
full_transcript.sent(),
full_transcript.received(),
)
.unwrap();
assert_eq!(auth_sent, prove_sent);
assert_eq!(auth_recv, prove_recv);
} }
#[instrument(skip(verifier_socket))] async fn run_prover(prover: Prover) -> (Transcript, ProverOutput) {
async fn prover<T: AsyncWrite + AsyncRead + Send + Unpin + 'static>(
verifier_socket: T,
) -> (Transcript, ProverOutput) {
let (client_socket, server_socket) = tokio::io::duplex(2 << 16); let (client_socket, server_socket) = tokio::io::duplex(2 << 16);
let server_task = tokio::spawn(bind(server_socket.compat())); let server_task = tokio::spawn(bind(server_socket.compat()));
let prover = Prover::new(ProverConfig::builder().build().unwrap()) let prover = prover
.commit( .commit(
TlsCommitConfig::builder() TlsCommitConfig::builder()
.protocol( .protocol(
@@ -126,7 +99,6 @@ async fn prover<T: AsyncWrite + AsyncRead + Send + Unpin + 'static>(
) )
.build() .build()
.unwrap(), .unwrap(),
verifier_socket.compat(),
) )
.await .await
.unwrap(); .unwrap();
@@ -163,12 +135,9 @@ async fn prover<T: AsyncWrite + AsyncRead + Send + Unpin + 'static>(
let mut builder = TranscriptCommitConfig::builder(prover.transcript()); let mut builder = TranscriptCommitConfig::builder(prover.transcript());
for kind in [ let kind = TranscriptCommitmentKind::Hash {
TranscriptCommitmentKind::Encoding,
TranscriptCommitmentKind::Hash {
alg: HashAlgId::SHA256, alg: HashAlgId::SHA256,
}, };
] {
builder builder
.commit_with_kind(&(0..sent_tx_len), Direction::Sent, kind) .commit_with_kind(&(0..sent_tx_len), Direction::Sent, kind)
.unwrap(); .unwrap();
@@ -181,7 +150,6 @@ async fn prover<T: AsyncWrite + AsyncRead + Send + Unpin + 'static>(
builder builder
.commit_with_kind(&(1..recv_tx_len - 1), Direction::Received, kind) .commit_with_kind(&(1..recv_tx_len - 1), Direction::Received, kind)
.unwrap(); .unwrap();
}
let transcript_commit = builder.build().unwrap(); let transcript_commit = builder.build().unwrap();
@@ -202,21 +170,9 @@ async fn prover<T: AsyncWrite + AsyncRead + Send + Unpin + 'static>(
(transcript, output) (transcript, output)
} }
#[instrument(skip(socket))] async fn run_verifier(verifier: Verifier) -> VerifierOutput {
async fn verifier<T: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>(
socket: T,
) -> VerifierOutput {
let verifier = Verifier::new(
VerifierConfig::builder()
.root_store(RootCertStore {
roots: vec![CertificateDer(CA_CERT_DER.to_vec())],
})
.build()
.unwrap(),
);
let verifier = verifier let verifier = verifier
.commit(socket.compat()) .commit()
.await .await
.unwrap() .unwrap()
.accept() .accept()

View File

@@ -1,6 +1,6 @@
[package] [package]
name = "tlsn-wasm" name = "tlsn-wasm"
version = "0.1.0-alpha.14-pre" version = "0.1.0-alpha.14"
edition = "2021" edition = "2021"
repository = "https://github.com/tlsnotary/tlsn.git" repository = "https://github.com/tlsnotary/tlsn.git"
description = "A core WebAssembly package for TLSNotary." description = "A core WebAssembly package for TLSNotary."
@@ -26,6 +26,7 @@ tlsn-server-fixture-certs = { workspace = true }
tlsn-tls-client-async = { workspace = true } tlsn-tls-client-async = { workspace = true }
tlsn-tls-core = { workspace = true } tlsn-tls-core = { workspace = true }
async_io_stream = { workspace = true }
bincode = { workspace = true } bincode = { workspace = true }
console_error_panic_hook = { version = "0.1" } console_error_panic_hook = { version = "0.1" }
enum-try-as-inner = { workspace = true } enum-try-as-inner = { workspace = true }

View File

@@ -16,6 +16,7 @@ use tlsn::{
connection::ServerName, connection::ServerName,
prover::{state, Prover}, prover::{state, Prover},
webpki::{CertificateDer, PrivateKeyDer, RootCertStore}, webpki::{CertificateDer, PrivateKeyDer, RootCertStore},
Session, SessionHandle,
}; };
use tracing::info; use tracing::info;
use wasm_bindgen::{prelude::*, JsError}; use wasm_bindgen::{prelude::*, JsError};
@@ -32,16 +33,34 @@ pub struct JsProver {
state: State, state: State,
} }
#[derive(Debug, EnumTryAsInner)] #[derive(EnumTryAsInner)]
#[derive_err(Debug)] #[derive_err(Debug)]
enum State { enum State {
Initialized(Prover<state::Initialized>), Initialized,
CommitAccepted(Prover<state::CommitAccepted>), CommitAccepted {
Committed(Prover<state::Committed>), prover: Prover<state::CommitAccepted>,
handle: SessionHandle,
},
Committed {
prover: Prover<state::Committed>,
handle: SessionHandle,
},
Complete, Complete,
Error, Error,
} }
impl std::fmt::Debug for State {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
State::Initialized => write!(f, "Initialized"),
State::CommitAccepted { .. } => write!(f, "CommitAccepted"),
State::Committed { .. } => write!(f, "Committed"),
State::Complete => write!(f, "Complete"),
State::Error => write!(f, "Error"),
}
}
}
impl State { impl State {
fn take(&mut self) -> Self { fn take(&mut self) -> Self {
std::mem::replace(self, State::Error) std::mem::replace(self, State::Error)
@@ -54,9 +73,7 @@ impl JsProver {
pub fn new(config: ProverConfig) -> Result<JsProver> { pub fn new(config: ProverConfig) -> Result<JsProver> {
Ok(JsProver { Ok(JsProver {
config, config,
state: State::Initialized(Prover::new( state: State::Initialized,
tlsn::config::prover::ProverConfig::builder().build()?,
)),
}) })
} }
@@ -65,9 +82,11 @@ impl JsProver {
/// This performs all MPC setup prior to establishing the connection to the /// This performs all MPC setup prior to establishing the connection to the
/// application server. /// application server.
pub async fn setup(&mut self, verifier_url: &str) -> Result<()> { pub async fn setup(&mut self, verifier_url: &str) -> Result<()> {
let prover = self.state.take().try_into_initialized()?; let State::Initialized = self.state.take() else {
return Err(JsError::new("prover is not in initialized state"));
};
let config = TlsCommitConfig::builder() let tls_commit_config = TlsCommitConfig::builder()
.protocol({ .protocol({
let mut builder = MpcTlsConfig::builder() let mut builder = MpcTlsConfig::builder()
.max_sent_data(self.config.max_sent_data) .max_sent_data(self.config.max_sent_data)
@@ -99,9 +118,23 @@ impl JsProver {
info!("connected to verifier"); info!("connected to verifier");
let prover = prover.commit(config, verifier_conn.into_io()).await?; let session = Session::new(verifier_conn.into_io());
let (driver, mut handle) = session.split();
spawn_local(async move {
if let Err(e) = driver.await {
tracing::error!("session driver error: {e}");
}
});
self.state = State::CommitAccepted(prover); let prover_config = tlsn::config::prover::ProverConfig::builder().build()?;
let prover = handle.new_prover(prover_config)?;
let prover = prover
.commit(tls_commit_config)
.await
.map_err(|e| JsError::new(&e.to_string()))?;
self.state = State::CommitAccepted { prover, handle };
Ok(()) Ok(())
} }
@@ -112,7 +145,9 @@ impl JsProver {
ws_proxy_url: &str, ws_proxy_url: &str,
request: HttpRequest, request: HttpRequest,
) -> Result<HttpResponse> { ) -> Result<HttpResponse> {
let prover = self.state.take().try_into_commit_accepted()?; let State::CommitAccepted { prover, handle } = self.state.take() else {
return Err(JsError::new("prover is not in commit accepted state"));
};
let mut builder = TlsClientConfig::builder() let mut builder = TlsClientConfig::builder()
.server_name(ServerName::Dns( .server_name(ServerName::Dns(
@@ -140,7 +175,7 @@ impl JsProver {
builder = builder.client_auth((certs, key)); builder = builder.client_auth((certs, key));
} }
let config = builder.build()?; let tls_config = builder.build()?;
info!("connecting to server"); info!("connecting to server");
@@ -148,32 +183,39 @@ impl JsProver {
info!("connected to server"); info!("connected to server");
let (tls_conn, prover_fut) = prover.connect(config, server_conn.into_io()).await?; let (tls_conn, prover_fut) = prover
.connect(tls_config, server_conn.into_io())
.await
.map_err(|e| JsError::new(&e.to_string()))?;
info!("sending request"); info!("sending request");
let (response, prover) = futures::try_join!( let (response, prover) = futures::try_join!(
send_request(tls_conn, request), send_request(tls_conn, request),
prover_fut.map_err(Into::into) prover_fut.map_err(|e| JsError::new(&e.to_string()))
)?; )?;
info!("response received"); info!("response received");
self.state = State::Committed(prover); self.state = State::Committed { prover, handle };
Ok(response) Ok(response)
} }
/// Returns the transcript. /// Returns the transcript.
pub fn transcript(&self) -> Result<Transcript> { pub fn transcript(&self) -> Result<Transcript> {
let prover = self.state.try_as_committed()?; let State::Committed { prover, .. } = &self.state else {
return Err(JsError::new("prover is not in committed state"));
};
Ok(Transcript::from(prover.transcript())) Ok(Transcript::from(prover.transcript()))
} }
/// Reveals data to the verifier and finalizes the protocol. /// Reveals data to the verifier and finalizes the protocol.
pub async fn reveal(&mut self, reveal: Reveal) -> Result<()> { pub async fn reveal(&mut self, reveal: Reveal) -> Result<()> {
let mut prover = self.state.take().try_into_committed()?; let State::Committed { mut prover, handle } = self.state.take() else {
return Err(JsError::new("prover is not in committed state"));
};
info!("revealing data"); info!("revealing data");
@@ -193,8 +235,16 @@ impl JsProver {
let config = builder.build()?; let config = builder.build()?;
prover.prove(&config).await?; prover
prover.close().await?; .prove(&config)
.await
.map_err(|e| JsError::new(&e.to_string()))?;
prover
.close()
.await
.map_err(|e| JsError::new(&e.to_string()))?;
handle.close();
info!("Finalized"); info!("Finalized");

View File

@@ -9,10 +9,12 @@ use tlsn::{
transcript::ContentType, transcript::ContentType,
verifier::{state, Verifier}, verifier::{state, Verifier},
webpki::RootCertStore, webpki::RootCertStore,
Session, SessionHandle,
}; };
use tracing::info; use tracing::info;
use wasm_bindgen::prelude::*; use wasm_bindgen::prelude::*;
use ws_stream_wasm::{WsMeta, WsStream}; use wasm_bindgen_futures::spawn_local;
use ws_stream_wasm::WsMeta;
use crate::types::VerifierOutput; use crate::types::VerifierOutput;
@@ -27,15 +29,23 @@ pub struct JsVerifier {
#[derive(EnumTryAsInner)] #[derive(EnumTryAsInner)]
#[derive_err(Debug)] #[derive_err(Debug)]
enum State { enum State {
Initialized(Verifier<state::Initialized>), Initialized,
Connected((Verifier<state::Initialized>, WsStream)), Connected {
verifier: Verifier<state::Initialized>,
handle: SessionHandle,
},
Complete, Complete,
Error, Error,
} }
impl std::fmt::Debug for State { impl std::fmt::Debug for State {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "State") match self {
State::Initialized => write!(f, "Initialized"),
State::Connected { .. } => write!(f, "Connected"),
State::Complete => write!(f, "Complete"),
State::Error => write!(f, "Error"),
}
} }
} }
@@ -49,19 +59,17 @@ impl State {
impl JsVerifier { impl JsVerifier {
#[wasm_bindgen(constructor)] #[wasm_bindgen(constructor)]
pub fn new(config: VerifierConfig) -> JsVerifier { pub fn new(config: VerifierConfig) -> JsVerifier {
let tlsn_config = tlsn::config::verifier::VerifierConfig::builder()
.root_store(RootCertStore::mozilla())
.build()
.unwrap();
JsVerifier { JsVerifier {
state: State::Initialized(Verifier::new(tlsn_config)), state: State::Initialized,
config, config,
} }
} }
/// Connect to the prover. /// Connect to the prover.
pub async fn connect(&mut self, prover_url: &str) -> Result<()> { pub async fn connect(&mut self, prover_url: &str) -> Result<()> {
let verifier = self.state.take().try_into_initialized()?; let State::Initialized = self.state.take() else {
return Err(JsError::new("verifier is not in initialized state"));
};
info!("Connecting to prover"); info!("Connecting to prover");
@@ -69,40 +77,75 @@ impl JsVerifier {
info!("Connected to prover"); info!("Connected to prover");
self.state = State::Connected((verifier, prover_conn)); let session = Session::new(prover_conn.into_io());
let (driver, mut handle) = session.split();
spawn_local(async move {
if let Err(e) = driver.await {
tracing::error!("session driver error: {e}");
}
});
let verifier_config = tlsn::config::verifier::VerifierConfig::builder()
.root_store(RootCertStore::mozilla())
.build()
.map_err(|e| JsError::new(&e.to_string()))?;
let verifier = handle
.new_verifier(verifier_config)
.map_err(|e| JsError::new(&e.to_string()))?;
self.state = State::Connected { verifier, handle };
Ok(()) Ok(())
} }
/// Verifies the connection and finalizes the protocol. /// Verifies the connection and finalizes the protocol.
pub async fn verify(&mut self) -> Result<VerifierOutput> { pub async fn verify(&mut self) -> Result<VerifierOutput> {
let (verifier, prover_conn) = self.state.take().try_into_connected()?; let State::Connected { verifier, handle } = self.state.take() else {
return Err(JsError::new("verifier is not in connected state"));
};
let verifier = verifier.commit(prover_conn.into_io()).await?; let max_sent_data = self.config.max_sent_data;
let max_recv_data = self.config.max_recv_data;
let max_sent_records = self.config.max_sent_records;
let max_recv_records_online = self.config.max_recv_records_online;
let verifier = verifier
.commit()
.await
.map_err(|e| JsError::new(&e.to_string()))?;
let request = verifier.request(); let request = verifier.request();
let TlsCommitProtocolConfig::Mpc(mpc_tls_config) = request.protocol() else { let TlsCommitProtocolConfig::Mpc(mpc_tls_config) = request.protocol() else {
unimplemented!("only MPC protocol is supported"); return Err(JsError::new("only MPC protocol is supported"));
}; };
let reject = if mpc_tls_config.max_sent_data() > self.config.max_sent_data { let reject = if mpc_tls_config.max_sent_data() > max_sent_data {
Some("max_sent_data is too large") Some("max_sent_data is too large")
} else if mpc_tls_config.max_recv_data() > self.config.max_recv_data { } else if mpc_tls_config.max_recv_data() > max_recv_data {
Some("max_recv_data is too large") Some("max_recv_data is too large")
} else if mpc_tls_config.max_sent_records() > self.config.max_sent_records { } else if mpc_tls_config.max_sent_records() > max_sent_records {
Some("max_sent_records is too large") Some("max_sent_records is too large")
} else if mpc_tls_config.max_recv_records_online() > self.config.max_recv_records_online { } else if mpc_tls_config.max_recv_records_online() > max_recv_records_online {
Some("max_recv_records_online is too large") Some("max_recv_records_online is too large")
} else { } else {
None None
}; };
if reject.is_some() { if reject.is_some() {
verifier.reject(reject).await?; verifier
.reject(reject)
.await
.map_err(|e| JsError::new(&e.to_string()))?;
return Err(JsError::new("protocol configuration rejected")); return Err(JsError::new("protocol configuration rejected"));
} }
let verifier = verifier.accept().await?.run().await?; let verifier = verifier
.accept()
.await
.map_err(|e| JsError::new(&e.to_string()))?
.run()
.await
.map_err(|e| JsError::new(&e.to_string()))?;
let sent = verifier let sent = verifier
.tls_transcript() .tls_transcript()
@@ -129,8 +172,19 @@ impl JsVerifier {
}, },
}; };
let (output, verifier) = verifier.verify().await?.accept().await?; let (output, verifier) = verifier
verifier.close().await?; .verify()
.await
.map_err(|e| JsError::new(&e.to_string()))?
.accept()
.await
.map_err(|e| JsError::new(&e.to_string()))?;
verifier
.close()
.await
.map_err(|e| JsError::new(&e.to_string()))?;
handle.close();
self.state = State::Complete; self.state = State::Complete;
@@ -139,8 +193,8 @@ impl JsVerifier {
let ServerName::Dns(name) = name; let ServerName::Dns(name) = name;
name.to_string() name.to_string()
}), }),
connection_info: connection_info.into(), connection_info: crate::types::ConnectionInfo::from(connection_info),
transcript: output.transcript.map(|t| t.into()), transcript: output.transcript.map(crate::types::PartialTranscript::from),
}) })
} }
} }