Compare commits

..

4 Commits

Author SHA1 Message Date
Hendrik Eeckhaut
b76775fc7c correction + legend placement 2025-12-23 15:11:35 +01:00
Hendrik Eeckhaut
72041d1f07 export dark svg 2025-12-23 14:47:19 +01:00
Hendrik Eeckhaut
ac1df8fc75 Allow plotting multiple data runs 2025-12-23 14:31:54 +01:00
Hendrik Eeckhaut
3cb7c5c0b4 Working on benchmark plots 2025-12-23 14:07:39 +01:00
61 changed files with 4786 additions and 1460 deletions

1708
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -53,7 +53,6 @@ tlsn-formats = { path = "crates/formats" }
tlsn-hmac-sha256 = { path = "crates/components/hmac-sha256" }
tlsn-key-exchange = { path = "crates/components/key-exchange" }
tlsn-mpc-tls = { path = "crates/mpc-tls" }
tlsn-mux = { git = "https://github.com/tlsnotary/tlsn-utils", rev = "d9facb6" }
tlsn-server-fixture = { path = "crates/server-fixture/server" }
tlsn-server-fixture-certs = { path = "crates/server-fixture/certs" }
tlsn-tls-backend = { path = "crates/tls/backend" }
@@ -67,25 +66,26 @@ tlsn-harness-runner = { path = "crates/harness/runner" }
tlsn-wasm = { path = "crates/wasm" }
tlsn = { path = "crates/tlsn" }
mpz-circuits = { git = "https://github.com/privacy-ethereum/mpz", rev = "d9baf0f" }
mpz-circuits-data = { git = "https://github.com/privacy-ethereum/mpz", rev = "d9baf0f" }
mpz-memory-core = { git = "https://github.com/privacy-ethereum/mpz", rev = "d9baf0f" }
mpz-common = { git = "https://github.com/privacy-ethereum/mpz", rev = "d9baf0f" }
mpz-core = { git = "https://github.com/privacy-ethereum/mpz", rev = "d9baf0f" }
mpz-vm-core = { git = "https://github.com/privacy-ethereum/mpz", rev = "d9baf0f" }
mpz-garble = { git = "https://github.com/privacy-ethereum/mpz", rev = "d9baf0f" }
mpz-garble-core = { git = "https://github.com/privacy-ethereum/mpz", rev = "d9baf0f" }
mpz-ole = { git = "https://github.com/privacy-ethereum/mpz", rev = "d9baf0f" }
mpz-ot = { git = "https://github.com/privacy-ethereum/mpz", rev = "d9baf0f" }
mpz-share-conversion = { git = "https://github.com/privacy-ethereum/mpz", rev = "d9baf0f" }
mpz-fields = { git = "https://github.com/privacy-ethereum/mpz", rev = "d9baf0f" }
mpz-zk = { git = "https://github.com/privacy-ethereum/mpz", rev = "d9baf0f" }
mpz-hash = { git = "https://github.com/privacy-ethereum/mpz", rev = "d9baf0f" }
mpz-ideal-vm = { git = "https://github.com/privacy-ethereum/mpz", rev = "d9baf0f" }
mpz-circuits = { git = "https://github.com/privacy-ethereum/mpz", rev = "9c343f8" }
mpz-circuits-data = { git = "https://github.com/privacy-ethereum/mpz", rev = "9c343f8" }
mpz-memory-core = { git = "https://github.com/privacy-ethereum/mpz", rev = "9c343f8" }
mpz-common = { git = "https://github.com/privacy-ethereum/mpz", rev = "9c343f8" }
mpz-core = { git = "https://github.com/privacy-ethereum/mpz", rev = "9c343f8" }
mpz-vm-core = { git = "https://github.com/privacy-ethereum/mpz", rev = "9c343f8" }
mpz-garble = { git = "https://github.com/privacy-ethereum/mpz", rev = "9c343f8" }
mpz-garble-core = { git = "https://github.com/privacy-ethereum/mpz", rev = "9c343f8" }
mpz-ole = { git = "https://github.com/privacy-ethereum/mpz", rev = "9c343f8" }
mpz-ot = { git = "https://github.com/privacy-ethereum/mpz", rev = "9c343f8" }
mpz-share-conversion = { git = "https://github.com/privacy-ethereum/mpz", rev = "9c343f8" }
mpz-fields = { git = "https://github.com/privacy-ethereum/mpz", rev = "9c343f8" }
mpz-zk = { git = "https://github.com/privacy-ethereum/mpz", rev = "9c343f8" }
mpz-hash = { git = "https://github.com/privacy-ethereum/mpz", rev = "9c343f8" }
mpz-ideal-vm = { git = "https://github.com/privacy-ethereum/mpz", rev = "9c343f8" }
rangeset = { version = "0.4" }
serio = { version = "0.2" }
spansy = { git = "https://github.com/tlsnotary/tlsn-utils", rev = "6f1a934" }
uid-mux = { version = "0.2" }
websocket-relay = { git = "https://github.com/tlsnotary/tlsn-utils", rev = "6f1a934" }
aead = { version = "0.4" }

View File

@@ -27,7 +27,6 @@ alloy-primitives = { version = "1.3.1", default-features = false }
alloy-signer = { version = "1.0", default-features = false }
alloy-signer-local = { version = "1.0", default-features = false }
rand06-compat = { workspace = true }
rangeset = { workspace = true }
rstest = { workspace = true }
tlsn-core = { workspace = true, features = ["fixtures"] }
tlsn-data-fixtures = { workspace = true }

View File

@@ -5,7 +5,7 @@ use rand::{Rng, rng};
use tlsn_core::{
connection::{ConnectionInfo, ServerEphemKey},
hash::HashAlgId,
transcript::TranscriptCommitment,
transcript::{TranscriptCommitment, encoding::EncoderSecret},
};
use crate::{
@@ -25,6 +25,7 @@ pub struct Sign {
connection_info: Option<ConnectionInfo>,
server_ephemeral_key: Option<ServerEphemKey>,
cert_commitment: ServerCertCommitment,
encoder_secret: Option<EncoderSecret>,
extensions: Vec<Extension>,
transcript_commitments: Vec<TranscriptCommitment>,
}
@@ -86,6 +87,7 @@ impl<'a> AttestationBuilder<'a, Accept> {
connection_info: None,
server_ephemeral_key: None,
cert_commitment,
encoder_secret: None,
transcript_commitments: Vec::new(),
extensions,
},
@@ -106,6 +108,12 @@ impl AttestationBuilder<'_, Sign> {
self
}
/// Sets the secret for encoding commitments.
pub fn encoder_secret(&mut self, secret: EncoderSecret) -> &mut Self {
self.state.encoder_secret = Some(secret);
self
}
/// Adds an extension to the attestation.
pub fn extension(&mut self, extension: Extension) -> &mut Self {
self.state.extensions.push(extension);
@@ -129,6 +137,7 @@ impl AttestationBuilder<'_, Sign> {
connection_info,
server_ephemeral_key,
cert_commitment,
encoder_secret,
extensions,
transcript_commitments,
} = self.state;
@@ -159,6 +168,7 @@ impl AttestationBuilder<'_, Sign> {
AttestationBuilderError::new(ErrorKind::Field, "handshake data was not set")
})?),
cert_commitment: field_id.next(cert_commitment),
encoder_secret: encoder_secret.map(|secret| field_id.next(secret)),
extensions: extensions
.into_iter()
.map(|extension| field_id.next(extension))
@@ -243,7 +253,8 @@ mod test {
use rstest::{fixture, rstest};
use tlsn_core::{
connection::{CertBinding, CertBindingV1_2},
fixtures::ConnectionFixture,
fixtures::{ConnectionFixture, encoding_provider},
hash::Blake3,
transcript::Transcript,
};
use tlsn_data_fixtures::http::{request::GET_WITH_HEADER, response::OK_JSON};
@@ -274,7 +285,13 @@ mod test {
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
let connection = ConnectionFixture::tlsnotary(transcript.length());
let RequestFixture { request, .. } = request_fixture(transcript, connection, Vec::new());
let RequestFixture { request, .. } = request_fixture(
transcript,
encoding_provider(GET_WITH_HEADER, OK_JSON),
connection,
Blake3::default(),
Vec::new(),
);
let attestation_config = AttestationConfig::builder()
.supported_signature_algs([SignatureAlgId::SECP256R1])
@@ -293,7 +310,13 @@ mod test {
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
let connection = ConnectionFixture::tlsnotary(transcript.length());
let RequestFixture { request, .. } = request_fixture(transcript, connection, Vec::new());
let RequestFixture { request, .. } = request_fixture(
transcript,
encoding_provider(GET_WITH_HEADER, OK_JSON),
connection,
Blake3::default(),
Vec::new(),
);
let attestation_config = AttestationConfig::builder()
.supported_signature_algs([SignatureAlgId::SECP256K1])
@@ -313,7 +336,13 @@ mod test {
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
let connection = ConnectionFixture::tlsnotary(transcript.length());
let RequestFixture { request, .. } = request_fixture(transcript, connection, Vec::new());
let RequestFixture { request, .. } = request_fixture(
transcript,
encoding_provider(GET_WITH_HEADER, OK_JSON),
connection,
Blake3::default(),
Vec::new(),
);
let attestation_builder = Attestation::builder(attestation_config)
.accept_request(request)
@@ -334,8 +363,13 @@ mod test {
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
let connection = ConnectionFixture::tlsnotary(transcript.length());
let RequestFixture { request, .. } =
request_fixture(transcript, connection.clone(), Vec::new());
let RequestFixture { request, .. } = request_fixture(
transcript,
encoding_provider(GET_WITH_HEADER, OK_JSON),
connection.clone(),
Blake3::default(),
Vec::new(),
);
let mut attestation_builder = Attestation::builder(attestation_config)
.accept_request(request)
@@ -359,8 +393,13 @@ mod test {
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
let connection = ConnectionFixture::tlsnotary(transcript.length());
let RequestFixture { request, .. } =
request_fixture(transcript, connection.clone(), Vec::new());
let RequestFixture { request, .. } = request_fixture(
transcript,
encoding_provider(GET_WITH_HEADER, OK_JSON),
connection.clone(),
Blake3::default(),
Vec::new(),
);
let mut attestation_builder = Attestation::builder(attestation_config)
.accept_request(request)
@@ -393,7 +432,9 @@ mod test {
let RequestFixture { request, .. } = request_fixture(
transcript,
encoding_provider(GET_WITH_HEADER, OK_JSON),
connection.clone(),
Blake3::default(),
vec![Extension {
id: b"foo".to_vec(),
value: b"bar".to_vec(),
@@ -420,7 +461,9 @@ mod test {
let RequestFixture { request, .. } = request_fixture(
transcript,
encoding_provider(GET_WITH_HEADER, OK_JSON),
connection.clone(),
Blake3::default(),
vec![Extension {
id: b"foo".to_vec(),
value: b"bar".to_vec(),

View File

@@ -2,7 +2,11 @@
use tlsn_core::{
connection::{CertBinding, CertBindingV1_2},
fixtures::ConnectionFixture,
transcript::{Transcript, TranscriptCommitConfigBuilder, TranscriptCommitment},
hash::HashAlgorithm,
transcript::{
Transcript, TranscriptCommitConfigBuilder, TranscriptCommitment,
encoding::{EncodingProvider, EncodingTree},
},
};
use crate::{
@@ -17,13 +21,16 @@ use crate::{
/// A Request fixture used for testing.
#[allow(missing_docs)]
pub struct RequestFixture {
pub encoding_tree: EncodingTree,
pub request: Request,
}
/// Returns a request fixture for testing.
pub fn request_fixture(
transcript: Transcript,
encodings_provider: impl EncodingProvider,
connection: ConnectionFixture,
encoding_hasher: impl HashAlgorithm,
extensions: Vec<Extension>,
) -> RequestFixture {
let provider = CryptoProvider::default();
@@ -43,9 +50,15 @@ pub fn request_fixture(
.unwrap();
let transcripts_commitment_config = transcript_commitment_builder.build().unwrap();
let mut builder = RequestConfig::builder();
// Prover constructs encoding tree.
let encoding_tree = EncodingTree::new(
&encoding_hasher,
transcripts_commitment_config.iter_encoding(),
&encodings_provider,
)
.unwrap();
builder.transcript_commit(transcripts_commitment_config);
let mut builder = RequestConfig::builder();
for extension in extensions {
builder.extension(extension);
@@ -61,7 +74,10 @@ pub fn request_fixture(
let (request, _) = request_builder.build(&provider).unwrap();
RequestFixture { request }
RequestFixture {
encoding_tree,
request,
}
}
/// Returns an attestation fixture for testing.

View File

@@ -79,6 +79,8 @@
//!
//! // Specify all the transcript commitments we want to make.
//! builder
//! // Use BLAKE3 for encoding commitments.
//! .encoding_hash_alg(HashAlgId::BLAKE3)
//! // Commit to all sent data.
//! .commit_sent(&(0..sent_len))?
//! // Commit to the first 10 bytes of sent data.
@@ -127,7 +129,7 @@
//!
//! ```no_run
//! # use tlsn_attestation::{Attestation, CryptoProvider, Secrets, presentation::Presentation};
//! # use tlsn_core::transcript::Direction;
//! # use tlsn_core::transcript::{TranscriptCommitmentKind, Direction};
//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
//! # let attestation: Attestation = unimplemented!();
//! # let secrets: Secrets = unimplemented!();
@@ -138,6 +140,8 @@
//! let mut builder = secrets.transcript_proof_builder();
//!
//! builder
//! // Use transcript encoding commitments.
//! .commitment_kinds(&[TranscriptCommitmentKind::Encoding])
//! // Disclose the first 10 bytes of the sent data.
//! .reveal(&(0..10), Direction::Sent)?
//! // Disclose all of the received data.
@@ -215,7 +219,7 @@ use tlsn_core::{
connection::{ConnectionInfo, ServerEphemKey},
hash::{Hash, HashAlgorithm, TypedHash},
merkle::MerkleTree,
transcript::TranscriptCommitment,
transcript::{TranscriptCommitment, encoding::EncoderSecret},
};
use crate::{
@@ -297,6 +301,8 @@ pub enum FieldKind {
ServerEphemKey = 0x02,
/// Server identity commitment.
ServerIdentityCommitment = 0x03,
/// Encoding commitment.
EncodingCommitment = 0x04,
/// Plaintext hash commitment.
PlaintextHash = 0x05,
}
@@ -321,6 +327,7 @@ pub struct Body {
connection_info: Field<ConnectionInfo>,
server_ephemeral_key: Field<ServerEphemKey>,
cert_commitment: Field<ServerCertCommitment>,
encoder_secret: Option<Field<EncoderSecret>>,
extensions: Vec<Field<Extension>>,
transcript_commitments: Vec<Field<TranscriptCommitment>>,
}
@@ -366,6 +373,7 @@ impl Body {
connection_info: conn_info,
server_ephemeral_key,
cert_commitment,
encoder_secret,
extensions,
transcript_commitments,
} = self;
@@ -383,6 +391,13 @@ impl Body {
),
];
if let Some(encoder_secret) = encoder_secret {
fields.push((
encoder_secret.id,
hasher.hash_separated(&encoder_secret.data),
));
}
for field in extensions.iter() {
fields.push((field.id, hasher.hash_separated(&field.data)));
}

View File

@@ -91,6 +91,11 @@ impl Presentation {
transcript.verify_with_provider(
&provider.hash,
&attestation.body.connection_info().transcript_length,
attestation
.body
.encoder_secret
.as_ref()
.map(|field| &field.data),
attestation.body.transcript_commitments(),
)
})

View File

@@ -144,7 +144,9 @@ impl std::fmt::Display for ErrorKind {
#[cfg(test)]
mod test {
use tlsn_core::{
connection::TranscriptLength, fixtures::ConnectionFixture, hash::HashAlgId,
connection::TranscriptLength,
fixtures::{ConnectionFixture, encoding_provider},
hash::{Blake3, HashAlgId},
transcript::Transcript,
};
use tlsn_data_fixtures::http::{request::GET_WITH_HEADER, response::OK_JSON};
@@ -162,8 +164,13 @@ mod test {
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
let connection = ConnectionFixture::tlsnotary(transcript.length());
let RequestFixture { request, .. } =
request_fixture(transcript, connection.clone(), Vec::new());
let RequestFixture { request, .. } = request_fixture(
transcript,
encoding_provider(GET_WITH_HEADER, OK_JSON),
connection.clone(),
Blake3::default(),
Vec::new(),
);
let attestation =
attestation_fixture(request.clone(), connection, SignatureAlgId::SECP256K1, &[]);
@@ -178,8 +185,13 @@ mod test {
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
let connection = ConnectionFixture::tlsnotary(transcript.length());
let RequestFixture { mut request, .. } =
request_fixture(transcript, connection.clone(), Vec::new());
let RequestFixture { mut request, .. } = request_fixture(
transcript,
encoding_provider(GET_WITH_HEADER, OK_JSON),
connection.clone(),
Blake3::default(),
Vec::new(),
);
let attestation =
attestation_fixture(request.clone(), connection, SignatureAlgId::SECP256K1, &[]);
@@ -197,8 +209,13 @@ mod test {
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
let connection = ConnectionFixture::tlsnotary(transcript.length());
let RequestFixture { mut request, .. } =
request_fixture(transcript, connection.clone(), Vec::new());
let RequestFixture { mut request, .. } = request_fixture(
transcript,
encoding_provider(GET_WITH_HEADER, OK_JSON),
connection.clone(),
Blake3::default(),
Vec::new(),
);
let attestation =
attestation_fixture(request.clone(), connection, SignatureAlgId::SECP256K1, &[]);
@@ -216,8 +233,13 @@ mod test {
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
let connection = ConnectionFixture::tlsnotary(transcript.length());
let RequestFixture { mut request, .. } =
request_fixture(transcript, connection.clone(), Vec::new());
let RequestFixture { mut request, .. } = request_fixture(
transcript,
encoding_provider(GET_WITH_HEADER, OK_JSON),
connection.clone(),
Blake3::default(),
Vec::new(),
);
let attestation =
attestation_fixture(request.clone(), connection, SignatureAlgId::SECP256K1, &[]);
@@ -243,8 +265,13 @@ mod test {
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
let connection = ConnectionFixture::tlsnotary(transcript.length());
let RequestFixture { request, .. } =
request_fixture(transcript, connection.clone(), Vec::new());
let RequestFixture { request, .. } = request_fixture(
transcript,
encoding_provider(GET_WITH_HEADER, OK_JSON),
connection.clone(),
Blake3::default(),
Vec::new(),
);
let mut attestation =
attestation_fixture(request.clone(), connection, SignatureAlgId::SECP256K1, &[]);
@@ -262,8 +289,13 @@ mod test {
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
let connection = ConnectionFixture::tlsnotary(transcript.length());
let RequestFixture { request, .. } =
request_fixture(transcript, connection.clone(), Vec::new());
let RequestFixture { request, .. } = request_fixture(
transcript,
encoding_provider(GET_WITH_HEADER, OK_JSON),
connection.clone(),
Blake3::default(),
Vec::new(),
);
let attestation =
attestation_fixture(request.clone(), connection, SignatureAlgId::SECP256K1, &[]);

View File

@@ -49,4 +49,6 @@ impl_domain_separator!(tlsn_core::connection::ConnectionInfo);
impl_domain_separator!(tlsn_core::connection::CertBinding);
impl_domain_separator!(tlsn_core::transcript::TranscriptCommitment);
impl_domain_separator!(tlsn_core::transcript::TranscriptSecret);
impl_domain_separator!(tlsn_core::transcript::encoding::EncoderSecret);
impl_domain_separator!(tlsn_core::transcript::encoding::EncodingCommitment);
impl_domain_separator!(tlsn_core::transcript::hash::PlaintextHash);

View File

@@ -1,5 +1,3 @@
use rand::{Rng, SeedableRng, rngs::StdRng};
use rangeset::set::RangeSet;
use tlsn_attestation::{
Attestation, AttestationConfig, CryptoProvider,
presentation::PresentationOutput,
@@ -8,11 +6,12 @@ use tlsn_attestation::{
};
use tlsn_core::{
connection::{CertBinding, CertBindingV1_2},
fixtures::ConnectionFixture,
hash::{Blake3, Blinder, HashAlgId},
fixtures::{self, ConnectionFixture, encoder_secret},
hash::Blake3,
transcript::{
Direction, Transcript, TranscriptCommitment, TranscriptSecret,
hash::{PlaintextHash, PlaintextHashSecret, hash_plaintext},
Direction, Transcript, TranscriptCommitConfigBuilder, TranscriptCommitment,
TranscriptSecret,
encoding::{EncodingCommitment, EncodingTree},
},
};
use tlsn_data_fixtures::http::{request::GET_WITH_HEADER, response::OK_JSON};
@@ -20,7 +19,6 @@ use tlsn_data_fixtures::http::{request::GET_WITH_HEADER, response::OK_JSON};
/// Tests that the attestation protocol and verification work end-to-end
#[test]
fn test_api() {
let mut rng = StdRng::seed_from_u64(0);
let mut provider = CryptoProvider::default();
// Configure signer for Notary
@@ -28,6 +26,8 @@ fn test_api() {
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
let (sent_len, recv_len) = transcript.len();
// Plaintext encodings which the Prover obtained from GC evaluation
let encodings_provider = fixtures::encoding_provider(GET_WITH_HEADER, OK_JSON);
// At the end of the TLS connection the Prover holds the:
let ConnectionFixture {
@@ -44,38 +44,26 @@ fn test_api() {
unreachable!()
};
// Create hash commitments
let hasher = Blake3::default();
let sent_blinder: Blinder = rng.random();
let recv_blinder: Blinder = rng.random();
// Prover specifies the ranges it wants to commit to.
let mut transcript_commitment_builder = TranscriptCommitConfigBuilder::new(&transcript);
transcript_commitment_builder
.commit_sent(&(0..sent_len))
.unwrap()
.commit_recv(&(0..recv_len))
.unwrap();
let sent_idx = RangeSet::from(0..sent_len);
let recv_idx = RangeSet::from(0..recv_len);
let transcripts_commitment_config = transcript_commitment_builder.build().unwrap();
let sent_hash_commitment = PlaintextHash {
direction: Direction::Sent,
idx: sent_idx.clone(),
hash: hash_plaintext(&hasher, transcript.sent(), &sent_blinder),
};
// Prover constructs encoding tree.
let encoding_tree = EncodingTree::new(
&Blake3::default(),
transcripts_commitment_config.iter_encoding(),
&encodings_provider,
)
.unwrap();
let recv_hash_commitment = PlaintextHash {
direction: Direction::Received,
idx: recv_idx.clone(),
hash: hash_plaintext(&hasher, transcript.received(), &recv_blinder),
};
let sent_hash_secret = PlaintextHashSecret {
direction: Direction::Sent,
idx: sent_idx,
alg: HashAlgId::BLAKE3,
blinder: sent_blinder,
};
let recv_hash_secret = PlaintextHashSecret {
direction: Direction::Received,
idx: recv_idx,
alg: HashAlgId::BLAKE3,
blinder: recv_blinder,
let encoding_commitment = EncodingCommitment {
root: encoding_tree.root(),
};
let request_config = RequestConfig::default();
@@ -86,14 +74,8 @@ fn test_api() {
.handshake_data(server_cert_data)
.transcript(transcript)
.transcript_commitments(
vec![
TranscriptSecret::Hash(sent_hash_secret),
TranscriptSecret::Hash(recv_hash_secret),
],
vec![
TranscriptCommitment::Hash(sent_hash_commitment.clone()),
TranscriptCommitment::Hash(recv_hash_commitment.clone()),
],
vec![TranscriptSecret::Encoding(encoding_tree)],
vec![TranscriptCommitment::Encoding(encoding_commitment.clone())],
);
let (request, secrets) = request_builder.build(&provider).unwrap();
@@ -113,10 +95,8 @@ fn test_api() {
.connection_info(connection_info.clone())
// Server key Notary received during handshake
.server_ephemeral_key(server_ephemeral_key)
.transcript_commitments(vec![
TranscriptCommitment::Hash(sent_hash_commitment),
TranscriptCommitment::Hash(recv_hash_commitment),
]);
.encoder_secret(encoder_secret())
.transcript_commitments(vec![TranscriptCommitment::Encoding(encoding_commitment)]);
let attestation = attestation_builder.build(&provider).unwrap();

View File

@@ -1,7 +1,10 @@
//! Fixtures for testing
mod provider;
pub mod transcript;
pub use provider::FixtureEncodingProvider;
use hex::FromHex;
use crate::{
@@ -10,6 +13,10 @@ use crate::{
ServerEphemKey, ServerName, ServerSignature, SignatureAlgorithm, TlsVersion,
TranscriptLength,
},
transcript::{
encoding::{EncoderSecret, EncodingProvider},
Transcript,
},
webpki::CertificateDer,
};
@@ -122,3 +129,27 @@ impl ConnectionFixture {
server_ephemeral_key
}
}
/// Returns an encoding provider fixture.
pub fn encoding_provider(tx: &[u8], rx: &[u8]) -> impl EncodingProvider {
let secret = encoder_secret();
FixtureEncodingProvider::new(&secret, Transcript::new(tx, rx))
}
/// Seed fixture.
const SEED: [u8; 32] = [0; 32];
/// Delta fixture.
const DELTA: [u8; 16] = [1; 16];
/// Returns an encoder secret fixture.
pub fn encoder_secret() -> EncoderSecret {
EncoderSecret::new(SEED, DELTA)
}
/// Returns a tampered encoder secret fixture.
pub fn encoder_secret_tampered_seed() -> EncoderSecret {
let mut seed = SEED;
seed[0] += 1;
EncoderSecret::new(seed, DELTA)
}

View File

@@ -0,0 +1,41 @@
use std::ops::Range;
use crate::transcript::{
encoding::{new_encoder, Encoder, EncoderSecret, EncodingProvider, EncodingProviderError},
Direction, Transcript,
};
/// A encoding provider fixture.
pub struct FixtureEncodingProvider {
encoder: Box<dyn Encoder>,
transcript: Transcript,
}
impl FixtureEncodingProvider {
/// Creates a new encoding provider fixture.
pub(crate) fn new(secret: &EncoderSecret, transcript: Transcript) -> Self {
Self {
encoder: Box::new(new_encoder(secret)),
transcript,
}
}
}
impl EncodingProvider for FixtureEncodingProvider {
fn provide_encoding(
&self,
direction: Direction,
range: Range<usize>,
dest: &mut Vec<u8>,
) -> Result<(), EncodingProviderError> {
let transcript = match direction {
Direction::Sent => &self.transcript.sent(),
Direction::Received => &self.transcript.received(),
};
let data = transcript.get(range.clone()).ok_or(EncodingProviderError)?;
self.encoder.encode_data(direction, range, data, dest);
Ok(())
}
}

View File

@@ -19,7 +19,9 @@ use serde::{Deserialize, Serialize};
use crate::{
connection::ServerName,
transcript::{PartialTranscript, TranscriptCommitment, TranscriptSecret},
transcript::{
encoding::EncoderSecret, PartialTranscript, TranscriptCommitment, TranscriptSecret,
},
};
/// Prover output.
@@ -40,6 +42,8 @@ pub struct VerifierOutput {
pub server_name: Option<ServerName>,
/// Transcript data.
pub transcript: Option<PartialTranscript>,
/// Encoding commitment secret.
pub encoder_secret: Option<EncoderSecret>,
/// Transcript commitments.
pub transcript_commitments: Vec<TranscriptCommitment>,
}

View File

@@ -63,6 +63,11 @@ impl MerkleProof {
Ok(())
}
/// Returns the leaf count of the Merkle tree associated with the proof.
pub(crate) fn leaf_count(&self) -> usize {
self.leaf_count
}
}
#[derive(Clone)]

View File

@@ -19,6 +19,7 @@
//! withheld.
mod commit;
pub mod encoding;
pub mod hash;
mod proof;
mod tls;

View File

@@ -8,15 +8,27 @@ use serde::{Deserialize, Serialize};
use crate::{
hash::HashAlgId,
transcript::{
encoding::{EncodingCommitment, EncodingTree},
hash::{PlaintextHash, PlaintextHashSecret},
Direction, RangeSet, Transcript,
},
};
/// The maximum allowed total bytelength of committed data for a single
/// commitment kind. Used to prevent DoS during verification. (May cause the
/// verifier to hash up to a max of 1GB * 128 = 128GB of data for certain kinds
/// of encoding commitments.)
///
/// This value must not exceed bcs's MAX_SEQUENCE_LENGTH limit (which is (1 <<
/// 31) - 1 by default)
pub(crate) const MAX_TOTAL_COMMITTED_DATA: usize = 1_000_000_000;
/// Kind of transcript commitment.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
#[non_exhaustive]
pub enum TranscriptCommitmentKind {
/// A commitment to encodings of the transcript.
Encoding,
/// A hash commitment to plaintext in the transcript.
Hash {
/// The hash algorithm used.
@@ -27,6 +39,7 @@ pub enum TranscriptCommitmentKind {
impl fmt::Display for TranscriptCommitmentKind {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Encoding => f.write_str("encoding"),
Self::Hash { alg } => write!(f, "hash ({alg})"),
}
}
@@ -36,6 +49,8 @@ impl fmt::Display for TranscriptCommitmentKind {
#[derive(Debug, Clone, Serialize, Deserialize)]
#[non_exhaustive]
pub enum TranscriptCommitment {
/// Encoding commitment.
Encoding(EncodingCommitment),
/// Plaintext hash commitment.
Hash(PlaintextHash),
}
@@ -44,6 +59,8 @@ pub enum TranscriptCommitment {
#[derive(Debug, Clone, Serialize, Deserialize)]
#[non_exhaustive]
pub enum TranscriptSecret {
/// Encoding tree.
Encoding(EncodingTree),
/// Plaintext hash secret.
Hash(PlaintextHashSecret),
}
@@ -51,6 +68,9 @@ pub enum TranscriptSecret {
/// Configuration for transcript commitments.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TranscriptCommitConfig {
encoding_hash_alg: HashAlgId,
has_encoding: bool,
has_hash: bool,
commits: Vec<((Direction, RangeSet<usize>), TranscriptCommitmentKind)>,
}
@@ -60,23 +80,53 @@ impl TranscriptCommitConfig {
TranscriptCommitConfigBuilder::new(transcript)
}
/// Returns the hash algorithm to use for encoding commitments.
pub fn encoding_hash_alg(&self) -> &HashAlgId {
&self.encoding_hash_alg
}
/// Returns `true` if the configuration has any encoding commitments.
pub fn has_encoding(&self) -> bool {
self.has_encoding
}
/// Returns `true` if the configuration has any hash commitments.
pub fn has_hash(&self) -> bool {
self.commits
.iter()
.any(|(_, kind)| matches!(kind, TranscriptCommitmentKind::Hash { .. }))
self.has_hash
}
/// Returns an iterator over the encoding commitment indices.
pub fn iter_encoding(&self) -> impl Iterator<Item = &(Direction, RangeSet<usize>)> {
self.commits.iter().filter_map(|(idx, kind)| match kind {
TranscriptCommitmentKind::Encoding => Some(idx),
_ => None,
})
}
/// Returns an iterator over the hash commitment indices.
pub fn iter_hash(&self) -> impl Iterator<Item = (&(Direction, RangeSet<usize>), &HashAlgId)> {
self.commits.iter().map(|(idx, kind)| match kind {
TranscriptCommitmentKind::Hash { alg } => (idx, alg),
self.commits.iter().filter_map(|(idx, kind)| match kind {
TranscriptCommitmentKind::Hash { alg } => Some((idx, alg)),
_ => None,
})
}
/// Returns a request for the transcript commitments.
pub fn to_request(&self) -> TranscriptCommitRequest {
TranscriptCommitRequest {
encoding: self.has_encoding.then(|| {
let mut sent = RangeSet::default();
let mut recv = RangeSet::default();
for (dir, idx) in self.iter_encoding() {
match dir {
Direction::Sent => sent.union_mut(idx),
Direction::Received => recv.union_mut(idx),
}
}
(sent, recv)
}),
hash: self
.iter_hash()
.map(|((dir, idx), alg)| (*dir, idx.clone(), *alg))
@@ -86,9 +136,15 @@ impl TranscriptCommitConfig {
}
/// A builder for [`TranscriptCommitConfig`].
///
/// The default hash algorithm is [`HashAlgId::BLAKE3`] and the default kind
/// is [`TranscriptCommitmentKind::Encoding`].
#[derive(Debug)]
pub struct TranscriptCommitConfigBuilder<'a> {
transcript: &'a Transcript,
encoding_hash_alg: HashAlgId,
has_encoding: bool,
has_hash: bool,
default_kind: TranscriptCommitmentKind,
commits: HashSet<((Direction, RangeSet<usize>), TranscriptCommitmentKind)>,
}
@@ -98,13 +154,20 @@ impl<'a> TranscriptCommitConfigBuilder<'a> {
pub fn new(transcript: &'a Transcript) -> Self {
Self {
transcript,
default_kind: TranscriptCommitmentKind::Hash {
alg: HashAlgId::BLAKE3,
},
encoding_hash_alg: HashAlgId::BLAKE3,
has_encoding: false,
has_hash: false,
default_kind: TranscriptCommitmentKind::Encoding,
commits: HashSet::default(),
}
}
/// Sets the hash algorithm to use for encoding commitments.
pub fn encoding_hash_alg(&mut self, alg: HashAlgId) -> &mut Self {
self.encoding_hash_alg = alg;
self
}
/// Sets the default kind of commitment to use.
pub fn default_kind(&mut self, default_kind: TranscriptCommitmentKind) -> &mut Self {
self.default_kind = default_kind;
@@ -138,6 +201,11 @@ impl<'a> TranscriptCommitConfigBuilder<'a> {
));
}
match kind {
TranscriptCommitmentKind::Encoding => self.has_encoding = true,
TranscriptCommitmentKind::Hash { .. } => self.has_hash = true,
}
self.commits.insert(((direction, idx), kind));
Ok(self)
@@ -184,6 +252,9 @@ impl<'a> TranscriptCommitConfigBuilder<'a> {
/// Builds the configuration.
pub fn build(self) -> Result<TranscriptCommitConfig, TranscriptCommitConfigBuilderError> {
Ok(TranscriptCommitConfig {
encoding_hash_alg: self.encoding_hash_alg,
has_encoding: self.has_encoding,
has_hash: self.has_hash,
commits: Vec::from_iter(self.commits),
})
}
@@ -230,10 +301,16 @@ impl fmt::Display for TranscriptCommitConfigBuilderError {
/// Request to compute transcript commitments.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TranscriptCommitRequest {
encoding: Option<(RangeSet<usize>, RangeSet<usize>)>,
hash: Vec<(Direction, RangeSet<usize>, HashAlgId)>,
}
impl TranscriptCommitRequest {
/// Returns `true` if an encoding commitment is requested.
pub fn has_encoding(&self) -> bool {
self.encoding.is_some()
}
/// Returns `true` if a hash commitment is requested.
pub fn has_hash(&self) -> bool {
!self.hash.is_empty()
@@ -243,6 +320,11 @@ impl TranscriptCommitRequest {
pub fn iter_hash(&self) -> impl Iterator<Item = &(Direction, RangeSet<usize>, HashAlgId)> {
self.hash.iter()
}
/// Returns the ranges of the encoding commitments.
pub fn encoding(&self) -> Option<&(RangeSet<usize>, RangeSet<usize>)> {
self.encoding.as_ref()
}
}
#[cfg(test)]

View File

@@ -0,0 +1,22 @@
//! Transcript encoding commitments and proofs.
mod encoder;
mod proof;
mod provider;
mod tree;
pub use encoder::{new_encoder, Encoder, EncoderSecret};
pub use proof::{EncodingProof, EncodingProofError};
pub use provider::{EncodingProvider, EncodingProviderError};
pub use tree::{EncodingTree, EncodingTreeError};
use serde::{Deserialize, Serialize};
use crate::hash::TypedHash;
/// Transcript encoding commitment.
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub struct EncodingCommitment {
/// Merkle root of the encoding commitments.
pub root: TypedHash,
}

View File

@@ -0,0 +1,137 @@
use std::ops::Range;
use crate::transcript::Direction;
use itybity::ToBits;
use rand::{RngCore, SeedableRng};
use rand_chacha::ChaCha12Rng;
use serde::{Deserialize, Serialize};
/// The size of the encoding for 1 bit, in bytes.
const BIT_ENCODING_SIZE: usize = 16;
/// The size of the encoding for 1 byte, in bytes.
const BYTE_ENCODING_SIZE: usize = 128;
/// Secret used by an encoder to generate encodings.
#[derive(Clone, PartialEq, Eq, Serialize, Deserialize)]
pub struct EncoderSecret {
seed: [u8; 32],
delta: [u8; BIT_ENCODING_SIZE],
}
opaque_debug::implement!(EncoderSecret);
impl EncoderSecret {
/// Creates a new secret.
///
/// # Arguments
///
/// * `seed` - The seed for the PRG.
/// * `delta` - Delta for deriving the one-encodings.
pub fn new(seed: [u8; 32], delta: [u8; 16]) -> Self {
Self { seed, delta }
}
/// Returns the seed.
pub fn seed(&self) -> &[u8; 32] {
&self.seed
}
/// Returns the delta.
pub fn delta(&self) -> &[u8; 16] {
&self.delta
}
}
/// Creates a new encoder.
pub fn new_encoder(secret: &EncoderSecret) -> impl Encoder {
ChaChaEncoder::new(secret)
}
pub(crate) struct ChaChaEncoder {
seed: [u8; 32],
delta: [u8; 16],
}
impl ChaChaEncoder {
pub(crate) fn new(secret: &EncoderSecret) -> Self {
let seed = *secret.seed();
let delta = *secret.delta();
Self { seed, delta }
}
pub(crate) fn new_prg(&self, stream_id: u64) -> ChaCha12Rng {
let mut prg = ChaCha12Rng::from_seed(self.seed);
prg.set_stream(stream_id);
prg.set_word_pos(0);
prg
}
}
/// A transcript encoder.
///
/// This is an internal implementation detail that should not be exposed to the
/// public API.
pub trait Encoder {
/// Writes the zero encoding for the given range of the transcript into the
/// destination buffer.
fn encode_range(&self, direction: Direction, range: Range<usize>, dest: &mut Vec<u8>);
/// Writes the encoding for the given data into the destination buffer.
fn encode_data(
&self,
direction: Direction,
range: Range<usize>,
data: &[u8],
dest: &mut Vec<u8>,
);
}
impl Encoder for ChaChaEncoder {
fn encode_range(&self, direction: Direction, range: Range<usize>, dest: &mut Vec<u8>) {
// ChaCha encoder works with 32-bit words. Each encoded bit is 128 bits long.
const WORDS_PER_BYTE: u128 = 8 * 128 / 32;
let stream_id: u64 = match direction {
Direction::Sent => 0,
Direction::Received => 1,
};
let mut prg = self.new_prg(stream_id);
let len = range.len() * BYTE_ENCODING_SIZE;
let pos = dest.len();
// Write 0s to the destination buffer.
dest.resize(pos + len, 0);
// Fill the destination buffer with the PRG.
prg.set_word_pos(range.start as u128 * WORDS_PER_BYTE);
prg.fill_bytes(&mut dest[pos..pos + len]);
}
fn encode_data(
&self,
direction: Direction,
range: Range<usize>,
data: &[u8],
dest: &mut Vec<u8>,
) {
const ZERO: [u8; 16] = [0; BIT_ENCODING_SIZE];
let pos = dest.len();
// Write the zero encoding for the given range.
self.encode_range(direction, range, dest);
let dest = &mut dest[pos..];
for (pos, bit) in data.iter_lsb0().enumerate() {
// Add the delta to the encoding whenever the encoded bit is 1,
// otherwise add a zero.
let summand = if bit { &self.delta } else { &ZERO };
dest[pos * BIT_ENCODING_SIZE..(pos + 1) * BIT_ENCODING_SIZE]
.iter_mut()
.zip(summand)
.for_each(|(a, b)| *a ^= *b);
}
}
}

View File

@@ -0,0 +1,361 @@
use std::{collections::HashMap, fmt};
use rangeset::set::RangeSet;
use serde::{Deserialize, Serialize};
use crate::{
hash::{Blinder, HashProvider, HashProviderError},
merkle::{MerkleError, MerkleProof},
transcript::{
commit::MAX_TOTAL_COMMITTED_DATA,
encoding::{new_encoder, Encoder, EncoderSecret, EncodingCommitment},
Direction,
},
};
/// An opening of a leaf in the encoding tree.
#[derive(Clone, Serialize, Deserialize)]
pub(super) struct Opening {
pub(super) direction: Direction,
pub(super) idx: RangeSet<usize>,
pub(super) blinder: Blinder,
}
opaque_debug::implement!(Opening);
/// An encoding commitment proof.
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(try_from = "validation::EncodingProofUnchecked")]
pub struct EncodingProof {
/// The proof of inclusion of the commitment(s) in the Merkle tree of
/// commitments.
pub(super) inclusion_proof: MerkleProof,
pub(super) openings: HashMap<usize, Opening>,
}
impl EncodingProof {
/// Verifies the proof against the commitment.
///
/// Returns the authenticated indices of the sent and received data,
/// respectively.
///
/// # Arguments
///
/// * `provider` - Hash provider.
/// * `commitment` - Encoding commitment to verify against.
/// * `sent` - Sent data to authenticate.
/// * `recv` - Received data to authenticate.
pub fn verify_with_provider(
&self,
provider: &HashProvider,
secret: &EncoderSecret,
commitment: &EncodingCommitment,
sent: &[u8],
recv: &[u8],
) -> Result<(RangeSet<usize>, RangeSet<usize>), EncodingProofError> {
let hasher = provider.get(&commitment.root.alg)?;
let encoder = new_encoder(secret);
let Self {
inclusion_proof,
openings,
} = self;
let mut leaves = Vec::with_capacity(openings.len());
let mut expected_leaf = Vec::default();
let mut total_opened = 0u128;
let mut auth_sent = RangeSet::default();
let mut auth_recv = RangeSet::default();
for (
id,
Opening {
direction,
idx,
blinder,
},
) in openings
{
// Make sure the amount of data being proved is bounded.
total_opened += idx.len() as u128;
if total_opened > MAX_TOTAL_COMMITTED_DATA as u128 {
return Err(EncodingProofError::new(
ErrorKind::Proof,
"exceeded maximum allowed data",
))?;
}
let (data, auth) = match direction {
Direction::Sent => (sent, &mut auth_sent),
Direction::Received => (recv, &mut auth_recv),
};
// Make sure the ranges are within the bounds of the transcript.
if idx.end().unwrap_or(0) > data.len() {
return Err(EncodingProofError::new(
ErrorKind::Proof,
format!(
"index out of bounds of the transcript ({}): {} > {}",
direction,
idx.end().unwrap_or(0),
data.len()
),
));
}
expected_leaf.clear();
for range in idx.iter() {
encoder.encode_data(*direction, range.clone(), &data[range], &mut expected_leaf);
}
expected_leaf.extend_from_slice(blinder.as_bytes());
// Compute the expected hash of the commitment to make sure it is
// present in the merkle tree.
leaves.push((*id, hasher.hash(&expected_leaf)));
auth.union_mut(idx);
}
// Verify that the expected hashes are present in the merkle tree.
//
// This proves the Prover committed to the purported data prior to the encoder
// seed being revealed. Ergo, if the encodings are authentic then the purported
// data is authentic.
inclusion_proof.verify(hasher, &commitment.root, leaves)?;
Ok((auth_sent, auth_recv))
}
}
/// Error for [`EncodingProof`].
#[derive(Debug, thiserror::Error)]
pub struct EncodingProofError {
kind: ErrorKind,
source: Option<Box<dyn std::error::Error + Send + Sync>>,
}
impl EncodingProofError {
fn new<E>(kind: ErrorKind, source: E) -> Self
where
E: Into<Box<dyn std::error::Error + Send + Sync>>,
{
Self {
kind,
source: Some(source.into()),
}
}
}
#[derive(Debug)]
enum ErrorKind {
Provider,
Proof,
}
impl fmt::Display for EncodingProofError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str("encoding proof error: ")?;
match self.kind {
ErrorKind::Provider => f.write_str("provider error")?,
ErrorKind::Proof => f.write_str("proof error")?,
}
if let Some(source) = &self.source {
write!(f, " caused by: {source}")?;
}
Ok(())
}
}
impl From<HashProviderError> for EncodingProofError {
fn from(error: HashProviderError) -> Self {
Self::new(ErrorKind::Provider, error)
}
}
impl From<MerkleError> for EncodingProofError {
fn from(error: MerkleError) -> Self {
Self::new(ErrorKind::Proof, error)
}
}
/// Invalid encoding proof error.
#[derive(Debug, thiserror::Error)]
#[error("invalid encoding proof: {0}")]
pub struct InvalidEncodingProof(&'static str);
mod validation {
use super::*;
/// The maximum allowed height of the Merkle tree of encoding commitments.
///
/// The statistical security parameter (SSP) of the encoding commitment
/// protocol is calculated as "the number of uniformly random bits in a
/// single bit's encoding minus `MAX_HEIGHT`".
///
/// For example, a bit encoding used in garbled circuits typically has 127
/// uniformly random bits, hence when using it in the encoding
/// commitment protocol, the SSP is 127 - 30 = 97 bits.
///
/// Leaving this validation here as a fail-safe in case we ever start
/// using shorter encodings.
const MAX_HEIGHT: usize = 30;
#[derive(Debug, Deserialize)]
pub(super) struct EncodingProofUnchecked {
inclusion_proof: MerkleProof,
openings: HashMap<usize, Opening>,
}
impl TryFrom<EncodingProofUnchecked> for EncodingProof {
type Error = InvalidEncodingProof;
fn try_from(unchecked: EncodingProofUnchecked) -> Result<Self, Self::Error> {
if unchecked.inclusion_proof.leaf_count() > 1 << MAX_HEIGHT {
return Err(InvalidEncodingProof(
"the height of the tree exceeds the maximum allowed",
));
}
Ok(Self {
inclusion_proof: unchecked.inclusion_proof,
openings: unchecked.openings,
})
}
}
}
#[cfg(test)]
mod test {
use tlsn_data_fixtures::http::{request::POST_JSON, response::OK_JSON};
use crate::{
fixtures::{encoder_secret, encoder_secret_tampered_seed, encoding_provider},
hash::Blake3,
transcript::{encoding::EncodingTree, Transcript},
};
use super::*;
struct EncodingFixture {
transcript: Transcript,
proof: EncodingProof,
commitment: EncodingCommitment,
}
fn new_encoding_fixture() -> EncodingFixture {
let transcript = Transcript::new(POST_JSON, OK_JSON);
let idx_0 = (Direction::Sent, RangeSet::from(0..POST_JSON.len()));
let idx_1 = (Direction::Received, RangeSet::from(0..OK_JSON.len()));
let provider = encoding_provider(transcript.sent(), transcript.received());
let tree = EncodingTree::new(&Blake3::default(), [&idx_0, &idx_1], &provider).unwrap();
let proof = tree.proof([&idx_0, &idx_1].into_iter()).unwrap();
let commitment = EncodingCommitment { root: tree.root() };
EncodingFixture {
transcript,
proof,
commitment,
}
}
#[test]
fn test_verify_encoding_proof_tampered_seed() {
let EncodingFixture {
transcript,
proof,
commitment,
} = new_encoding_fixture();
let err = proof
.verify_with_provider(
&HashProvider::default(),
&encoder_secret_tampered_seed(),
&commitment,
transcript.sent(),
transcript.received(),
)
.unwrap_err();
assert!(matches!(err.kind, ErrorKind::Proof));
}
#[test]
fn test_verify_encoding_proof_out_of_range() {
let EncodingFixture {
transcript,
proof,
commitment,
} = new_encoding_fixture();
let sent = &transcript.sent()[transcript.sent().len() - 1..];
let recv = &transcript.received()[transcript.received().len() - 2..];
let err = proof
.verify_with_provider(
&HashProvider::default(),
&encoder_secret(),
&commitment,
sent,
recv,
)
.unwrap_err();
assert!(matches!(err.kind, ErrorKind::Proof));
}
#[test]
fn test_verify_encoding_proof_tampered_idx() {
let EncodingFixture {
transcript,
mut proof,
commitment,
} = new_encoding_fixture();
let Opening { idx, .. } = proof.openings.values_mut().next().unwrap();
*idx = RangeSet::from([0..3, 13..15]);
let err = proof
.verify_with_provider(
&HashProvider::default(),
&encoder_secret(),
&commitment,
transcript.sent(),
transcript.received(),
)
.unwrap_err();
assert!(matches!(err.kind, ErrorKind::Proof));
}
#[test]
fn test_verify_encoding_proof_tampered_encoding_blinder() {
let EncodingFixture {
transcript,
mut proof,
commitment,
} = new_encoding_fixture();
let Opening { blinder, .. } = proof.openings.values_mut().next().unwrap();
*blinder = rand::random();
let err = proof
.verify_with_provider(
&HashProvider::default(),
&encoder_secret(),
&commitment,
transcript.sent(),
transcript.received(),
)
.unwrap_err();
assert!(matches!(err.kind, ErrorKind::Proof));
}
}

View File

@@ -0,0 +1,19 @@
use std::ops::Range;
use crate::transcript::Direction;
/// A provider of plaintext encodings.
pub trait EncodingProvider {
/// Writes the encoding of the given range into the destination buffer.
fn provide_encoding(
&self,
direction: Direction,
range: Range<usize>,
dest: &mut Vec<u8>,
) -> Result<(), EncodingProviderError>;
}
/// Error for [`EncodingProvider`].
#[derive(Debug, thiserror::Error)]
#[error("failed to provide encoding")]
pub struct EncodingProviderError;

View File

@@ -0,0 +1,327 @@
use std::collections::HashMap;
use bimap::BiMap;
use rangeset::set::RangeSet;
use serde::{Deserialize, Serialize};
use crate::{
hash::{Blinder, HashAlgId, HashAlgorithm, TypedHash},
merkle::MerkleTree,
transcript::{
encoding::{
proof::{EncodingProof, Opening},
EncodingProvider,
},
Direction,
},
};
/// Encoding tree builder error.
#[derive(Debug, thiserror::Error)]
pub enum EncodingTreeError {
/// Index is out of bounds of the transcript.
#[error("index is out of bounds of the transcript")]
OutOfBounds {
/// The index.
index: RangeSet<usize>,
/// The transcript length.
transcript_length: usize,
},
/// Encoding provider is missing an encoding for an index.
#[error("encoding provider is missing an encoding for an index")]
MissingEncoding {
/// The index which is missing.
index: RangeSet<usize>,
},
/// Index is missing from the tree.
#[error("index is missing from the tree")]
MissingLeaf {
/// The index which is missing.
index: RangeSet<usize>,
},
}
/// A merkle tree of transcript encodings.
#[derive(Clone, Serialize, Deserialize)]
pub struct EncodingTree {
/// Merkle tree of the commitments.
tree: MerkleTree,
/// Nonces used to blind the hashes.
blinders: Vec<Blinder>,
/// Mapping between the index of a leaf and the transcript index it
/// corresponds to.
idxs: BiMap<usize, (Direction, RangeSet<usize>)>,
/// Union of all transcript indices in the sent direction.
sent_idx: RangeSet<usize>,
/// Union of all transcript indices in the received direction.
received_idx: RangeSet<usize>,
}
opaque_debug::implement!(EncodingTree);
impl EncodingTree {
/// Creates a new encoding tree.
///
/// # Arguments
///
/// * `hasher` - The hash algorithm to use.
/// * `idxs` - The subsequence indices to commit to.
/// * `provider` - The encoding provider.
pub fn new<'idx>(
hasher: &dyn HashAlgorithm,
idxs: impl IntoIterator<Item = &'idx (Direction, RangeSet<usize>)>,
provider: &dyn EncodingProvider,
) -> Result<Self, EncodingTreeError> {
let mut this = Self {
tree: MerkleTree::new(hasher.id()),
blinders: Vec::new(),
idxs: BiMap::new(),
sent_idx: RangeSet::default(),
received_idx: RangeSet::default(),
};
let mut leaves = Vec::new();
let mut encoding = Vec::new();
for dir_idx in idxs {
let direction = dir_idx.0;
let idx = &dir_idx.1;
// Ignore empty indices.
if idx.is_empty() {
continue;
}
if this.idxs.contains_right(dir_idx) {
// The subsequence is already in the tree.
continue;
}
let blinder: Blinder = rand::random();
encoding.clear();
for range in idx.iter() {
provider
.provide_encoding(direction, range, &mut encoding)
.map_err(|_| EncodingTreeError::MissingEncoding { index: idx.clone() })?;
}
encoding.extend_from_slice(blinder.as_bytes());
let leaf = hasher.hash(&encoding);
leaves.push(leaf);
this.blinders.push(blinder);
this.idxs.insert(this.idxs.len(), dir_idx.clone());
match direction {
Direction::Sent => this.sent_idx.union_mut(idx),
Direction::Received => this.received_idx.union_mut(idx),
}
}
this.tree.insert(hasher, leaves);
Ok(this)
}
/// Returns the root of the tree.
pub fn root(&self) -> TypedHash {
self.tree.root()
}
/// Returns the hash algorithm of the tree.
pub fn algorithm(&self) -> HashAlgId {
self.tree.algorithm()
}
/// Generates a proof for the given indices.
///
/// # Arguments
///
/// * `idxs` - The transcript indices to prove.
pub fn proof<'idx>(
&self,
idxs: impl Iterator<Item = &'idx (Direction, RangeSet<usize>)>,
) -> Result<EncodingProof, EncodingTreeError> {
let mut openings = HashMap::new();
for dir_idx in idxs {
let direction = dir_idx.0;
let idx = &dir_idx.1;
let leaf_idx = *self
.idxs
.get_by_right(dir_idx)
.ok_or_else(|| EncodingTreeError::MissingLeaf { index: idx.clone() })?;
let blinder = self.blinders[leaf_idx].clone();
openings.insert(
leaf_idx,
Opening {
direction,
idx: idx.clone(),
blinder,
},
);
}
let mut indices = openings.keys().copied().collect::<Vec<_>>();
indices.sort();
Ok(EncodingProof {
inclusion_proof: self.tree.proof(&indices),
openings,
})
}
/// Returns whether the tree contains the given transcript index.
pub fn contains(&self, idx: &(Direction, RangeSet<usize>)) -> bool {
self.idxs.contains_right(idx)
}
pub(crate) fn idx(&self, direction: Direction) -> &RangeSet<usize> {
match direction {
Direction::Sent => &self.sent_idx,
Direction::Received => &self.received_idx,
}
}
/// Returns the committed transcript indices.
pub(crate) fn transcript_indices(&self) -> impl Iterator<Item = &(Direction, RangeSet<usize>)> {
self.idxs.right_values()
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{
fixtures::{encoder_secret, encoding_provider},
hash::{Blake3, HashProvider},
transcript::{encoding::EncodingCommitment, Transcript},
};
use tlsn_data_fixtures::http::{request::POST_JSON, response::OK_JSON};
fn new_tree<'seq>(
transcript: &Transcript,
idxs: impl Iterator<Item = &'seq (Direction, RangeSet<usize>)>,
) -> Result<EncodingTree, EncodingTreeError> {
let provider = encoding_provider(transcript.sent(), transcript.received());
EncodingTree::new(&Blake3::default(), idxs, &provider)
}
#[test]
fn test_encoding_tree() {
let transcript = Transcript::new(POST_JSON, OK_JSON);
let idx_0 = (Direction::Sent, RangeSet::from(0..POST_JSON.len()));
let idx_1 = (Direction::Received, RangeSet::from(0..OK_JSON.len()));
let tree = new_tree(&transcript, [&idx_0, &idx_1].into_iter()).unwrap();
assert!(tree.contains(&idx_0));
assert!(tree.contains(&idx_1));
let proof = tree.proof([&idx_0, &idx_1].into_iter()).unwrap();
let commitment = EncodingCommitment { root: tree.root() };
let (auth_sent, auth_recv) = proof
.verify_with_provider(
&HashProvider::default(),
&encoder_secret(),
&commitment,
transcript.sent(),
transcript.received(),
)
.unwrap();
assert_eq!(auth_sent, idx_0.1);
assert_eq!(auth_recv, idx_1.1);
}
#[test]
fn test_encoding_tree_multiple_ranges() {
let transcript = Transcript::new(POST_JSON, OK_JSON);
let idx_0 = (Direction::Sent, RangeSet::from(0..1));
let idx_1 = (Direction::Sent, RangeSet::from(1..POST_JSON.len()));
let idx_2 = (Direction::Received, RangeSet::from(0..1));
let idx_3 = (Direction::Received, RangeSet::from(1..OK_JSON.len()));
let tree = new_tree(&transcript, [&idx_0, &idx_1, &idx_2, &idx_3].into_iter()).unwrap();
assert!(tree.contains(&idx_0));
assert!(tree.contains(&idx_1));
assert!(tree.contains(&idx_2));
assert!(tree.contains(&idx_3));
let proof = tree
.proof([&idx_0, &idx_1, &idx_2, &idx_3].into_iter())
.unwrap();
let commitment = EncodingCommitment { root: tree.root() };
let (auth_sent, auth_recv) = proof
.verify_with_provider(
&HashProvider::default(),
&encoder_secret(),
&commitment,
transcript.sent(),
transcript.received(),
)
.unwrap();
let mut expected_auth_sent = RangeSet::default();
expected_auth_sent.union_mut(&idx_0.1);
expected_auth_sent.union_mut(&idx_1.1);
let mut expected_auth_recv = RangeSet::default();
expected_auth_recv.union_mut(&idx_2.1);
expected_auth_recv.union_mut(&idx_3.1);
assert_eq!(auth_sent, expected_auth_sent);
assert_eq!(auth_recv, expected_auth_recv);
}
#[test]
fn test_encoding_tree_proof_missing_leaf() {
let transcript = Transcript::new(POST_JSON, OK_JSON);
let idx_0 = (Direction::Sent, RangeSet::from(0..POST_JSON.len()));
let idx_1 = (Direction::Received, RangeSet::from(0..4));
let idx_2 = (Direction::Received, RangeSet::from(4..OK_JSON.len()));
let tree = new_tree(&transcript, [&idx_0, &idx_1].into_iter()).unwrap();
let result = tree
.proof([&idx_0, &idx_1, &idx_2].into_iter())
.unwrap_err();
assert!(matches!(result, EncodingTreeError::MissingLeaf { .. }));
}
#[test]
fn test_encoding_tree_out_of_bounds() {
let transcript = Transcript::new(POST_JSON, OK_JSON);
let idx_0 = (Direction::Sent, RangeSet::from(0..POST_JSON.len() + 1));
let idx_1 = (Direction::Received, RangeSet::from(0..OK_JSON.len() + 1));
let result = new_tree(&transcript, [&idx_0].into_iter()).unwrap_err();
assert!(matches!(result, EncodingTreeError::MissingEncoding { .. }));
let result = new_tree(&transcript, [&idx_1].into_iter()).unwrap_err();
assert!(matches!(result, EncodingTreeError::MissingEncoding { .. }));
}
#[test]
fn test_encoding_tree_missing_encoding() {
let provider = encoding_provider(&[], &[]);
let result = EncodingTree::new(
&Blake3::default(),
[(Direction::Sent, RangeSet::from(0..8))].iter(),
&provider,
)
.unwrap_err();
assert!(matches!(result, EncodingTreeError::MissingEncoding { .. }));
}
}

View File

@@ -14,6 +14,7 @@ use crate::{
hash::{HashAlgId, HashProvider},
transcript::{
commit::{TranscriptCommitment, TranscriptCommitmentKind},
encoding::{EncoderSecret, EncodingProof, EncodingProofError, EncodingTree},
hash::{hash_plaintext, PlaintextHash, PlaintextHashSecret},
Direction, PartialTranscript, RangeSet, Transcript, TranscriptSecret,
},
@@ -31,12 +32,14 @@ const DEFAULT_COMMITMENT_KINDS: &[TranscriptCommitmentKind] = &[
TranscriptCommitmentKind::Hash {
alg: HashAlgId::KECCAK256,
},
TranscriptCommitmentKind::Encoding,
];
/// Proof of the contents of a transcript.
#[derive(Clone, Serialize, Deserialize)]
pub struct TranscriptProof {
transcript: PartialTranscript,
encoding_proof: Option<EncodingProof>,
hash_secrets: Vec<PlaintextHashSecret>,
}
@@ -50,18 +53,27 @@ impl TranscriptProof {
/// # Arguments
///
/// * `provider` - The hash provider to use for verification.
/// * `length` - The transcript length.
/// * `commitments` - The commitments to verify against.
/// * `attestation_body` - The attestation body to verify against.
pub fn verify_with_provider<'a>(
self,
provider: &HashProvider,
length: &TranscriptLength,
encoder_secret: Option<&EncoderSecret>,
commitments: impl IntoIterator<Item = &'a TranscriptCommitment>,
) -> Result<PartialTranscript, TranscriptProofError> {
let mut encoding_commitment = None;
let mut hash_commitments = HashSet::new();
// Index commitments.
for commitment in commitments {
match commitment {
TranscriptCommitment::Encoding(commitment) => {
if encoding_commitment.replace(commitment).is_some() {
return Err(TranscriptProofError::new(
ErrorKind::Encoding,
"multiple encoding commitments are present.",
));
}
}
TranscriptCommitment::Hash(plaintext_hash) => {
hash_commitments.insert(plaintext_hash);
}
@@ -80,6 +92,34 @@ impl TranscriptProof {
let mut total_auth_sent = RangeSet::default();
let mut total_auth_recv = RangeSet::default();
// Verify encoding proof.
if let Some(proof) = self.encoding_proof {
let secret = encoder_secret.ok_or_else(|| {
TranscriptProofError::new(
ErrorKind::Encoding,
"contains an encoding proof but missing encoder secret",
)
})?;
let commitment = encoding_commitment.ok_or_else(|| {
TranscriptProofError::new(
ErrorKind::Encoding,
"contains an encoding proof but missing encoding commitment",
)
})?;
let (auth_sent, auth_recv) = proof.verify_with_provider(
provider,
secret,
commitment,
self.transcript.sent_unsafe(),
self.transcript.received_unsafe(),
)?;
total_auth_sent.union_mut(&auth_sent);
total_auth_recv.union_mut(&auth_recv);
}
let mut buffer = Vec::new();
for PlaintextHashSecret {
direction,
@@ -163,6 +203,7 @@ impl TranscriptProofError {
#[derive(Debug)]
enum ErrorKind {
Encoding,
Hash,
Proof,
}
@@ -172,6 +213,7 @@ impl fmt::Display for TranscriptProofError {
f.write_str("transcript proof error: ")?;
match self.kind {
ErrorKind::Encoding => f.write_str("encoding error")?,
ErrorKind::Hash => f.write_str("hash error")?,
ErrorKind::Proof => f.write_str("proof error")?,
}
@@ -184,6 +226,12 @@ impl fmt::Display for TranscriptProofError {
}
}
impl From<EncodingProofError> for TranscriptProofError {
fn from(e: EncodingProofError) -> Self {
TranscriptProofError::new(ErrorKind::Encoding, e)
}
}
/// Union of ranges to reveal.
#[derive(Clone, Debug, PartialEq)]
struct QueryIdx {
@@ -228,6 +276,7 @@ pub struct TranscriptProofBuilder<'a> {
/// Commitment kinds in order of preference for building transcript proofs.
commitment_kinds: Vec<TranscriptCommitmentKind>,
transcript: &'a Transcript,
encoding_tree: Option<&'a EncodingTree>,
hash_secrets: Vec<&'a PlaintextHashSecret>,
committed_sent: RangeSet<usize>,
committed_recv: RangeSet<usize>,
@@ -243,9 +292,15 @@ impl<'a> TranscriptProofBuilder<'a> {
let mut committed_sent = RangeSet::default();
let mut committed_recv = RangeSet::default();
let mut encoding_tree = None;
let mut hash_secrets = Vec::new();
for secret in secrets {
match secret {
TranscriptSecret::Encoding(tree) => {
committed_sent.union_mut(tree.idx(Direction::Sent));
committed_recv.union_mut(tree.idx(Direction::Received));
encoding_tree = Some(tree);
}
TranscriptSecret::Hash(hash) => {
match hash.direction {
Direction::Sent => committed_sent.union_mut(&hash.idx),
@@ -259,6 +314,7 @@ impl<'a> TranscriptProofBuilder<'a> {
Self {
commitment_kinds: DEFAULT_COMMITMENT_KINDS.to_vec(),
transcript,
encoding_tree,
hash_secrets,
committed_sent,
committed_recv,
@@ -356,6 +412,7 @@ impl<'a> TranscriptProofBuilder<'a> {
transcript: self
.transcript
.to_partial(self.query_idx.sent.clone(), self.query_idx.recv.clone()),
encoding_proof: None,
hash_secrets: Vec::new(),
};
let mut uncovered_query_idx = self.query_idx.clone();
@@ -367,6 +424,46 @@ impl<'a> TranscriptProofBuilder<'a> {
// self.commitment_kinds.
if let Some(kind) = commitment_kinds_iter.next() {
match kind {
TranscriptCommitmentKind::Encoding => {
let Some(encoding_tree) = self.encoding_tree else {
// Proceeds to the next preferred commitment kind if encoding tree is
// not available.
continue;
};
let (sent_dir_idxs, sent_uncovered) = uncovered_query_idx.sent.cover_by(
encoding_tree
.transcript_indices()
.filter(|(dir, _)| *dir == Direction::Sent),
|(_, idx)| idx,
);
// Uncovered ranges will be checked with ranges of the next
// preferred commitment kind.
uncovered_query_idx.sent = sent_uncovered;
let (recv_dir_idxs, recv_uncovered) = uncovered_query_idx.recv.cover_by(
encoding_tree
.transcript_indices()
.filter(|(dir, _)| *dir == Direction::Received),
|(_, idx)| idx,
);
uncovered_query_idx.recv = recv_uncovered;
let dir_idxs = sent_dir_idxs
.into_iter()
.chain(recv_dir_idxs)
.collect::<Vec<_>>();
// Skip proof generation if there are no committed ranges that can cover the
// query ranges.
if !dir_idxs.is_empty() {
transcript_proof.encoding_proof = Some(
encoding_tree
.proof(dir_idxs.into_iter())
.expect("subsequences were checked to be in tree"),
);
}
}
TranscriptCommitmentKind::Hash { alg } => {
let (sent_hashes, sent_uncovered) = uncovered_query_idx.sent.cover_by(
self.hash_secrets.iter().filter(|hash| {
@@ -493,10 +590,46 @@ mod tests {
use rstest::rstest;
use tlsn_data_fixtures::http::{request::GET_WITH_HEADER, response::OK_JSON};
use crate::hash::{Blinder, HashAlgId};
use crate::{
fixtures::{encoder_secret, encoding_provider},
hash::{Blake3, Blinder, HashAlgId},
transcript::TranscriptCommitConfigBuilder,
};
use super::*;
#[rstest]
fn test_verify_missing_encoding_commitment_root() {
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
let idxs = vec![(Direction::Received, RangeSet::from(0..transcript.len().1))];
let encoding_tree = EncodingTree::new(
&Blake3::default(),
&idxs,
&encoding_provider(transcript.sent(), transcript.received()),
)
.unwrap();
let secrets = vec![TranscriptSecret::Encoding(encoding_tree)];
let mut builder = TranscriptProofBuilder::new(&transcript, &secrets);
builder.reveal_recv(&(0..transcript.len().1)).unwrap();
let transcript_proof = builder.build().unwrap();
let provider = HashProvider::default();
let err = transcript_proof
.verify_with_provider(
&provider,
&transcript.length(),
Some(&encoder_secret()),
&[],
)
.err()
.unwrap();
assert!(matches!(err.kind, ErrorKind::Encoding));
}
#[rstest]
fn test_reveal_range_out_of_bounds() {
let transcript = Transcript::new(
@@ -516,7 +649,7 @@ mod tests {
}
#[rstest]
fn test_reveal_missing_commitment() {
fn test_reveal_missing_encoding_tree() {
let transcript = Transcript::new(
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
@@ -565,6 +698,7 @@ mod tests {
.verify_with_provider(
&provider,
&transcript.length(),
None,
&[TranscriptCommitment::Hash(commitment)],
)
.unwrap();
@@ -614,6 +748,7 @@ mod tests {
.verify_with_provider(
&provider,
&transcript.length(),
None,
&[TranscriptCommitment::Hash(commitment)],
)
.unwrap_err();
@@ -629,19 +764,24 @@ mod tests {
TranscriptCommitmentKind::Hash {
alg: HashAlgId::SHA256,
},
TranscriptCommitmentKind::Encoding,
TranscriptCommitmentKind::Hash {
alg: HashAlgId::SHA256,
},
TranscriptCommitmentKind::Hash {
alg: HashAlgId::SHA256,
},
TranscriptCommitmentKind::Encoding,
]);
assert_eq!(
builder.commitment_kinds,
vec![TranscriptCommitmentKind::Hash {
alg: HashAlgId::SHA256
},]
vec![
TranscriptCommitmentKind::Hash {
alg: HashAlgId::SHA256
},
TranscriptCommitmentKind::Encoding
]
);
}
@@ -651,7 +791,7 @@ mod tests {
RangeSet::from([0..10, 12..30]),
true,
)]
#[case::reveal_all_rangesets_with_single_superset_range(
#[case::reveal_all_rangesets_with_superset_ranges(
vec![RangeSet::from([0..1]), RangeSet::from([1..2, 8..9]), RangeSet::from([2..4, 6..8]), RangeSet::from([2..3, 6..7]), RangeSet::from([9..12])],
RangeSet::from([0..4, 6..9]),
true,
@@ -682,30 +822,29 @@ mod tests {
false,
)]
#[allow(clippy::single_range_in_vec_init)]
fn test_reveal_multiple_rangesets_with_one_rangeset(
fn test_reveal_mutliple_rangesets_with_one_rangeset(
#[case] commit_recv_rangesets: Vec<RangeSet<usize>>,
#[case] reveal_recv_rangeset: RangeSet<usize>,
#[case] success: bool,
) {
use rand::{Rng, SeedableRng};
let mut rng = rand::rngs::StdRng::seed_from_u64(0);
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
// Create hash commitments for each rangeset
let mut secrets = Vec::new();
// Encoding commitment kind
let mut transcript_commitment_builder = TranscriptCommitConfigBuilder::new(&transcript);
for rangeset in commit_recv_rangesets.iter() {
let blinder: crate::hash::Blinder = rng.random();
let secret = PlaintextHashSecret {
direction: Direction::Received,
idx: rangeset.clone(),
alg: HashAlgId::BLAKE3,
blinder,
};
secrets.push(TranscriptSecret::Hash(secret));
transcript_commitment_builder.commit_recv(rangeset).unwrap();
}
let transcripts_commitment_config = transcript_commitment_builder.build().unwrap();
let encoding_tree = EncodingTree::new(
&Blake3::default(),
transcripts_commitment_config.iter_encoding(),
&encoding_provider(GET_WITH_HEADER, OK_JSON),
)
.unwrap();
let secrets = vec![TranscriptSecret::Encoding(encoding_tree)];
let mut builder = TranscriptProofBuilder::new(&transcript, &secrets);
if success {
@@ -758,34 +897,27 @@ mod tests {
#[case] uncovered_sent_rangeset: RangeSet<usize>,
#[case] uncovered_recv_rangeset: RangeSet<usize>,
) {
use rand::{Rng, SeedableRng};
let mut rng = rand::rngs::StdRng::seed_from_u64(0);
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
// Create hash commitments for each rangeset
let mut secrets = Vec::new();
// Encoding commitment kind
let mut transcript_commitment_builder = TranscriptCommitConfigBuilder::new(&transcript);
for rangeset in commit_sent_rangesets.iter() {
let blinder: crate::hash::Blinder = rng.random();
let secret = PlaintextHashSecret {
direction: Direction::Sent,
idx: rangeset.clone(),
alg: HashAlgId::BLAKE3,
blinder,
};
secrets.push(TranscriptSecret::Hash(secret));
transcript_commitment_builder.commit_sent(rangeset).unwrap();
}
for rangeset in commit_recv_rangesets.iter() {
let blinder: crate::hash::Blinder = rng.random();
let secret = PlaintextHashSecret {
direction: Direction::Received,
idx: rangeset.clone(),
alg: HashAlgId::BLAKE3,
blinder,
};
secrets.push(TranscriptSecret::Hash(secret));
transcript_commitment_builder.commit_recv(rangeset).unwrap();
}
let transcripts_commitment_config = transcript_commitment_builder.build().unwrap();
let encoding_tree = EncodingTree::new(
&Blake3::default(),
transcripts_commitment_config.iter_encoding(),
&encoding_provider(GET_WITH_HEADER, OK_JSON),
)
.unwrap();
let secrets = vec![TranscriptSecret::Encoding(encoding_tree)];
let mut builder = TranscriptProofBuilder::new(&transcript, &secrets);
builder.reveal_sent(&reveal_sent_rangeset).unwrap();
builder.reveal_recv(&reveal_recv_rangeset).unwrap();

View File

@@ -332,6 +332,7 @@ async fn notary<S: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>(
let (
VerifierOutput {
transcript_commitments,
encoder_secret,
..
},
verifier,
@@ -392,6 +393,10 @@ async fn notary<S: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>(
.server_ephemeral_key(tls_transcript.server_ephemeral_key().clone())
.transcript_commitments(transcript_commitments);
if let Some(encoder_secret) = encoder_secret {
builder.encoder_secret(encoder_secret);
}
let attestation = builder.build(&provider)?;
// Send attestation to prover.

View File

@@ -1,59 +1,51 @@
#### Default Representative Benchmarks ####
#
# This benchmark measures TLSNotary performance on three representative network scenarios.
# Each scenario is run multiple times to produce statistical metrics (median, std dev, etc.)
# rather than plots. Use this for quick performance checks and CI regression testing.
#
# Payload sizes:
# - upload-size: 1KB (typical HTTP request)
# - download-size: 2KB (typical HTTP response/API data)
#
# Network scenarios are chosen to represent real-world user conditions where
# TLSNotary is primarily bottlenecked by upload bandwidth.
#### Cable/DSL Home Internet ####
# Most common residential internet connection
# - Asymmetric: high download, limited upload (typical bottleneck)
# - Upload bandwidth: 20 Mbps (realistic cable/DSL upload speed)
# - Latency: 20ms (typical ISP latency)
#### Latency ####
[[group]]
name = "cable"
bandwidth = 20
protocol_latency = 20
upload-size = 1024
download-size = 2048
name = "latency"
bandwidth = 1000
[[bench]]
group = "cable"
#### Mobile 5G ####
# Modern mobile connection with good coverage
# - Upload bandwidth: 30 Mbps (typical 5G upload in good conditions)
# - Latency: 30ms (higher than wired due to mobile tower hops)
[[group]]
name = "mobile_5g"
bandwidth = 30
protocol_latency = 30
upload-size = 1024
download-size = 2048
group = "latency"
protocol_latency = 10
[[bench]]
group = "mobile_5g"
group = "latency"
protocol_latency = 25
#### Fiber Home Internet ####
# High-end residential connection (best case scenario)
# - Symmetric: equal upload/download bandwidth
# - Upload bandwidth: 100 Mbps (typical fiber upload)
# - Latency: 15ms (lower latency than cable)
[[bench]]
group = "latency"
protocol_latency = 50
[[bench]]
group = "latency"
protocol_latency = 100
[[bench]]
group = "latency"
protocol_latency = 200
#### Bandwidth ####
[[group]]
name = "fiber"
name = "bandwidth"
protocol_latency = 25
[[bench]]
group = "bandwidth"
bandwidth = 10
[[bench]]
group = "bandwidth"
bandwidth = 50
[[bench]]
group = "bandwidth"
bandwidth = 100
protocol_latency = 15
upload-size = 1024
download-size = 2048
[[bench]]
group = "fiber"
group = "bandwidth"
bandwidth = 250
[[bench]]
group = "bandwidth"
bandwidth = 1000

View File

@@ -1,52 +0,0 @@
#### Bandwidth Sweep Benchmark ####
#
# Measures how network bandwidth affects TLSNotary runtime.
# Keeps latency and payload sizes fixed while varying upload bandwidth.
#
# Fixed parameters:
# - Latency: 25ms (typical internet latency)
# - Upload: 1KB (typical request)
# - Download: 2KB (typical response)
#
# Variable: Bandwidth from 5 Mbps to 1000 Mbps
#
# Use this to plot "Bandwidth vs Runtime" and understand bandwidth sensitivity.
# Focus on upload bandwidth as TLSNotary is primarily upload-bottlenecked
[[group]]
name = "bandwidth_sweep"
protocol_latency = 25
upload-size = 1024
download-size = 2048
[[bench]]
group = "bandwidth_sweep"
bandwidth = 5
[[bench]]
group = "bandwidth_sweep"
bandwidth = 10
[[bench]]
group = "bandwidth_sweep"
bandwidth = 20
[[bench]]
group = "bandwidth_sweep"
bandwidth = 50
[[bench]]
group = "bandwidth_sweep"
bandwidth = 100
[[bench]]
group = "bandwidth_sweep"
bandwidth = 250
[[bench]]
group = "bandwidth_sweep"
bandwidth = 500
[[bench]]
group = "bandwidth_sweep"
bandwidth = 1000

View File

@@ -1,53 +0,0 @@
#### Download Size Sweep Benchmark ####
#
# Measures how download payload size affects TLSNotary runtime.
# Keeps network conditions fixed while varying the response size.
#
# Fixed parameters:
# - Bandwidth: 100 Mbps (typical good connection)
# - Latency: 25ms (typical internet latency)
# - Upload: 1KB (typical request size)
#
# Variable: Download size from 1KB to 100KB
#
# Use this to plot "Download Size vs Runtime" and understand how much data
# TLSNotary can efficiently notarize. Useful for determining optimal
# chunking strategies for large responses.
[[group]]
name = "download_sweep"
bandwidth = 100
protocol_latency = 25
upload-size = 1024
[[bench]]
group = "download_sweep"
download-size = 1024
[[bench]]
group = "download_sweep"
download-size = 2048
[[bench]]
group = "download_sweep"
download-size = 5120
[[bench]]
group = "download_sweep"
download-size = 10240
[[bench]]
group = "download_sweep"
download-size = 20480
[[bench]]
group = "download_sweep"
download-size = 30720
[[bench]]
group = "download_sweep"
download-size = 40960
[[bench]]
group = "download_sweep"
download-size = 51200

View File

@@ -1,47 +0,0 @@
#### Latency Sweep Benchmark ####
#
# Measures how network latency affects TLSNotary runtime.
# Keeps bandwidth and payload sizes fixed while varying protocol latency.
#
# Fixed parameters:
# - Bandwidth: 100 Mbps (typical good connection)
# - Upload: 1KB (typical request)
# - Download: 2KB (typical response)
#
# Variable: Protocol latency from 10ms to 200ms
#
# Use this to plot "Latency vs Runtime" and understand latency sensitivity.
[[group]]
name = "latency_sweep"
bandwidth = 100
upload-size = 1024
download-size = 2048
[[bench]]
group = "latency_sweep"
protocol_latency = 10
[[bench]]
group = "latency_sweep"
protocol_latency = 25
[[bench]]
group = "latency_sweep"
protocol_latency = 50
[[bench]]
group = "latency_sweep"
protocol_latency = 75
[[bench]]
group = "latency_sweep"
protocol_latency = 100
[[bench]]
group = "latency_sweep"
protocol_latency = 150
[[bench]]
group = "latency_sweep"
protocol_latency = 200

View File

@@ -7,10 +7,9 @@ publish = false
[dependencies]
tlsn-harness-core = { workspace = true }
# tlsn-server-fixture = { workspace = true }
charming = { version = "0.5.1", features = ["ssr"] }
csv = "1.3.0"
charming = { version = "0.6.0", features = ["ssr"] }
clap = { workspace = true, features = ["derive", "env"] }
itertools = "0.14.0"
polars = { version = "0.44", features = ["csv", "lazy"] }
toml = { workspace = true }

View File

@@ -0,0 +1,111 @@
# TLSNotary Benchmark Plot Tool
Generates interactive HTML and SVG plots from TLSNotary benchmark results. Supports comparing multiple benchmark runs (e.g., before/after optimization, native vs browser).
## Usage
```bash
tlsn-harness-plot <TOML> <CSV>... [OPTIONS]
```
### Arguments
- `<TOML>` - Path to Bench.toml file defining benchmark structure
- `<CSV>...` - One or more CSV files with benchmark results
### Options
- `-l, --labels <LABEL>...` - Labels for each dataset (optional)
- If omitted, datasets are labeled "Dataset 1", "Dataset 2", etc.
- Number of labels must match number of CSV files
- `--min-max-band` - Add min/max bands to plots showing variance
- `-h, --help` - Print help information
## Examples
### Single Dataset
```bash
tlsn-harness-plot bench.toml results.csv
```
Generates plots from a single benchmark run.
### Compare Two Runs
```bash
tlsn-harness-plot bench.toml before.csv after.csv \
--labels "Before Optimization" "After Optimization"
```
Overlays two datasets to compare performance improvements.
### Multiple Datasets
```bash
tlsn-harness-plot bench.toml native.csv browser.csv wasm.csv \
--labels "Native" "Browser" "WASM"
```
Compare three different runtime environments.
### With Min/Max Bands
```bash
tlsn-harness-plot bench.toml run1.csv run2.csv \
--labels "Config A" "Config B" \
--min-max-band
```
Shows variance ranges for each dataset.
## Output Files
The tool generates two files per benchmark group:
- `<output>.html` - Interactive HTML chart (zoomable, hoverable)
- `<output>.svg` - Static SVG image for documentation
Default output filenames:
- `runtime_vs_bandwidth.{html,svg}` - When `protocol_latency` is defined in group
- `runtime_vs_latency.{html,svg}` - When `bandwidth` is defined in group
## Plot Format
Each dataset displays:
- **Solid line** - Total runtime (preprocessing + online phase)
- **Dashed line** - Online phase only
- **Shaded area** (optional) - Min/max variance bands
Different datasets automatically use distinct colors for easy comparison.
## CSV Format
Expected columns in each CSV file:
- `group` - Benchmark group name (must match TOML)
- `bandwidth` - Network bandwidth in Kbps (for bandwidth plots)
- `latency` - Network latency in ms (for latency plots)
- `time_preprocess` - Preprocessing time in ms
- `time_online` - Online phase time in ms
- `time_total` - Total runtime in ms
## TOML Format
The benchmark TOML file defines groups with either:
```toml
[[group]]
name = "my_benchmark"
protocol_latency = 50 # Fixed latency for bandwidth plots
# OR
bandwidth = 10000 # Fixed bandwidth for latency plots
```
All datasets must use the same TOML file to ensure consistent benchmark structure.
## Tips
- Use descriptive labels to make plots self-documenting
- Keep CSV files from the same benchmark configuration for valid comparisons
- Min/max bands are useful for showing stability but can clutter plots with many datasets
- Interactive HTML plots support zooming and hovering for detailed values

View File

@@ -1,17 +1,18 @@
use std::f32;
use charming::{
Chart, HtmlRenderer,
Chart, HtmlRenderer, ImageRenderer,
component::{Axis, Legend, Title},
element::{AreaStyle, LineStyle, NameLocation, Orient, TextStyle, Tooltip, Trigger},
element::{
AreaStyle, ItemStyle, LineStyle, LineStyleType, NameLocation, Orient, TextStyle, Tooltip,
Trigger,
},
series::Line,
theme::Theme,
};
use clap::Parser;
use harness_core::bench::{BenchItems, Measurement};
use itertools::Itertools;
const THEME: Theme = Theme::Default;
use harness_core::bench::BenchItems;
use polars::prelude::*;
#[derive(Parser, Debug)]
#[command(author, version, about)]
@@ -19,72 +20,131 @@ struct Cli {
/// Path to the Bench.toml file with benchmark spec
toml: String,
/// Path to the CSV file with benchmark results
csv: String,
/// Paths to CSV files with benchmark results (one or more)
csv: Vec<String>,
/// Prover kind: native or browser
#[arg(short, long, value_enum, default_value = "native")]
prover_kind: ProverKind,
/// Labels for each dataset (optional, defaults to "Dataset 1", "Dataset 2", etc.)
#[arg(short, long, num_args = 0..)]
labels: Vec<String>,
/// Add min/max bands to plots
#[arg(long, default_value_t = false)]
min_max_band: bool,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, clap::ValueEnum)]
enum ProverKind {
Native,
Browser,
}
impl std::fmt::Display for ProverKind {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
ProverKind::Native => write!(f, "Native"),
ProverKind::Browser => write!(f, "Browser"),
}
}
}
fn main() -> Result<(), Box<dyn std::error::Error>> {
let cli = Cli::parse();
let mut rdr = csv::Reader::from_path(&cli.csv)?;
if cli.csv.is_empty() {
return Err("At least one CSV file must be provided".into());
}
// Generate labels if not provided
let labels: Vec<String> = if cli.labels.is_empty() {
cli.csv
.iter()
.enumerate()
.map(|(i, _)| format!("Dataset {}", i + 1))
.collect()
} else if cli.labels.len() != cli.csv.len() {
return Err(format!(
"Number of labels ({}) must match number of CSV files ({})",
cli.labels.len(),
cli.csv.len()
)
.into());
} else {
cli.labels.clone()
};
// Load all CSVs and add dataset label
let mut dfs = Vec::new();
for (csv_path, label) in cli.csv.iter().zip(labels.iter()) {
let mut df = CsvReadOptions::default()
.try_into_reader_with_file_path(Some(csv_path.clone().into()))?
.finish()?;
let label_series = Series::new("dataset_label".into(), vec![label.as_str(); df.height()]);
df.with_column(label_series)?;
dfs.push(df);
}
// Combine all dataframes
let df = dfs
.into_iter()
.reduce(|acc, df| acc.vstack(&df).unwrap())
.unwrap();
let items: BenchItems = toml::from_str(&std::fs::read_to_string(&cli.toml)?)?;
let groups = items.group;
// Prepare data for plotting.
let all_data: Vec<Measurement> = rdr
.deserialize::<Measurement>()
.collect::<Result<Vec<_>, _>>()?;
for group in groups {
if group.protocol_latency.is_some() {
let latency = group.protocol_latency.unwrap();
plot_runtime_vs(
&all_data,
cli.min_max_band,
&group.name,
|r| r.bandwidth as f32 / 1000.0, // Kbps to Mbps
"Runtime vs Bandwidth",
format!("{} ms Latency, {} mode", latency, cli.prover_kind),
"runtime_vs_bandwidth.html",
"Bandwidth (Mbps)",
)?;
// Determine which field varies in benches for this group
let benches_in_group: Vec<_> = items
.bench
.iter()
.filter(|b| b.group.as_deref() == Some(&group.name))
.collect();
if benches_in_group.is_empty() {
continue;
}
if group.bandwidth.is_some() {
let bandwidth = group.bandwidth.unwrap();
// Check which field has varying values
let bandwidth_varies = benches_in_group
.windows(2)
.any(|w| w[0].bandwidth != w[1].bandwidth);
let latency_varies = benches_in_group
.windows(2)
.any(|w| w[0].protocol_latency != w[1].protocol_latency);
let download_size_varies = benches_in_group
.windows(2)
.any(|w| w[0].download_size != w[1].download_size);
if download_size_varies {
let upload_size = group.upload_size.unwrap_or(1024);
plot_runtime_vs(
&all_data,
&df,
&labels,
cli.min_max_band,
&group.name,
|r| r.latency as f32,
"download_size",
1.0 / 1024.0, // bytes to KB
"Runtime vs Response Size",
format!("{} bytes upload size", upload_size),
"runtime_vs_download_size",
"Response Size (KB)",
true, // legend on left
)?;
} else if bandwidth_varies {
let latency = group.protocol_latency.unwrap_or(50);
plot_runtime_vs(
&df,
&labels,
cli.min_max_band,
&group.name,
"bandwidth",
1.0 / 1000.0, // Kbps to Mbps
"Runtime vs Bandwidth",
format!("{} ms Latency", latency),
"runtime_vs_bandwidth",
"Bandwidth (Mbps)",
false, // legend on right
)?;
} else if latency_varies {
let bandwidth = group.bandwidth.unwrap_or(1000);
plot_runtime_vs(
&df,
&labels,
cli.min_max_band,
&group.name,
"latency",
1.0,
"Runtime vs Latency",
format!("{} bps bandwidth, {} mode", bandwidth, cli.prover_kind),
"runtime_vs_latency.html",
format!("{} bps bandwidth", bandwidth),
"runtime_vs_latency",
"Latency (ms)",
true, // legend on left
)?;
}
}
@@ -92,83 +152,51 @@ fn main() -> Result<(), Box<dyn std::error::Error>> {
Ok(())
}
struct DataPoint {
min: f32,
mean: f32,
max: f32,
}
struct Points {
preprocess: DataPoint,
online: DataPoint,
total: DataPoint,
}
#[allow(clippy::too_many_arguments)]
fn plot_runtime_vs<Fx>(
all_data: &[Measurement],
fn plot_runtime_vs(
df: &DataFrame,
labels: &[String],
show_min_max: bool,
group: &str,
x_value: Fx,
x_col: &str,
x_scale: f32,
title: &str,
subtitle: String,
output_file: &str,
x_axis_label: &str,
) -> Result<Chart, Box<dyn std::error::Error>>
where
Fx: Fn(&Measurement) -> f32,
{
fn data_point(values: &[f32]) -> DataPoint {
let mean = values.iter().copied().sum::<f32>() / values.len() as f32;
let max = values.iter().copied().reduce(f32::max).unwrap_or_default();
let min = values.iter().copied().reduce(f32::min).unwrap_or_default();
DataPoint { min, mean, max }
}
legend_left: bool,
) -> Result<Chart, Box<dyn std::error::Error>> {
let stats_df = df
.clone()
.lazy()
.filter(col("group").eq(lit(group)))
.with_column((col(x_col).cast(DataType::Float32) * lit(x_scale)).alias("x"))
.with_columns([
(col("time_preprocess").cast(DataType::Float32) / lit(1000.0)).alias("preprocess"),
(col("time_online").cast(DataType::Float32) / lit(1000.0)).alias("online"),
(col("time_total").cast(DataType::Float32) / lit(1000.0)).alias("total"),
])
.group_by([col("x"), col("dataset_label")])
.agg([
col("preprocess").min().alias("preprocess_min"),
col("preprocess").mean().alias("preprocess_mean"),
col("preprocess").max().alias("preprocess_max"),
col("online").min().alias("online_min"),
col("online").mean().alias("online_mean"),
col("online").max().alias("online_max"),
col("total").min().alias("total_min"),
col("total").mean().alias("total_mean"),
col("total").max().alias("total_max"),
])
.sort(["dataset_label", "x"], Default::default())
.collect()?;
let stats: Vec<(f32, Points)> = all_data
.iter()
.filter(|r| r.group.as_deref() == Some(group))
.map(|r| {
(
x_value(r),
r.time_preprocess as f32 / 1000.0, // ms to s
r.time_online as f32 / 1000.0,
r.time_total as f32 / 1000.0,
)
})
.sorted_by(|a, b| a.0.partial_cmp(&b.0).unwrap())
.chunk_by(|entry| entry.0)
.into_iter()
.map(|(x, group)| {
let group_vec: Vec<_> = group.collect();
let preprocess = data_point(
&group_vec
.iter()
.map(|(_, t, _, _)| *t)
.collect::<Vec<f32>>(),
);
let online = data_point(
&group_vec
.iter()
.map(|(_, _, t, _)| *t)
.collect::<Vec<f32>>(),
);
let total = data_point(
&group_vec
.iter()
.map(|(_, _, _, t)| *t)
.collect::<Vec<f32>>(),
);
(
x,
Points {
preprocess,
online,
total,
},
)
})
.collect();
// Build legend entries
let mut legend_data = Vec::new();
for label in labels {
legend_data.push(format!("Total Mean ({})", label));
legend_data.push(format!("Online Mean ({})", label));
}
let mut chart = Chart::new()
.title(
@@ -179,14 +207,6 @@ where
.subtext_style(TextStyle::new().font_size(16)),
)
.tooltip(Tooltip::new().trigger(Trigger::Axis))
.legend(
Legend::new()
.data(vec!["Preprocess Mean", "Online Mean", "Total Mean"])
.top("80")
.right("110")
.orient(Orient::Vertical)
.item_gap(10),
)
.x_axis(
Axis::new()
.name(x_axis_label)
@@ -205,73 +225,156 @@ where
.name_text_style(TextStyle::new().font_size(21)),
);
chart = add_mean_series(chart, &stats, "Preprocess Mean", |p| p.preprocess.mean);
chart = add_mean_series(chart, &stats, "Online Mean", |p| p.online.mean);
chart = add_mean_series(chart, &stats, "Total Mean", |p| p.total.mean);
// Add legend with conditional positioning
let legend = Legend::new()
.data(legend_data)
.top("80")
.orient(Orient::Vertical)
.item_gap(10);
if show_min_max {
chart = add_min_max_band(
chart,
&stats,
"Preprocess Min/Max",
|p| &p.preprocess,
"#ccc",
);
chart = add_min_max_band(chart, &stats, "Online Min/Max", |p| &p.online, "#ccc");
chart = add_min_max_band(chart, &stats, "Total Min/Max", |p| &p.total, "#ccc");
let legend = if legend_left {
legend.left("110")
} else {
legend.right("110")
};
chart = chart.legend(legend);
// Define colors for each dataset
let colors = vec![
"#5470c6", "#91cc75", "#fac858", "#ee6666", "#73c0de", "#3ba272", "#fc8452", "#9a60b4",
];
for (idx, label) in labels.iter().enumerate() {
let color = colors.get(idx % colors.len()).unwrap();
// Total time - solid line
chart = add_dataset_series(
&chart,
&stats_df,
label,
&format!("Total Mean ({})", label),
"total_mean",
false,
color,
)?;
// Online time - dashed line (same color as total)
chart = add_dataset_series(
&chart,
&stats_df,
label,
&format!("Online Mean ({})", label),
"online_mean",
true,
color,
)?;
if show_min_max {
chart = add_dataset_min_max_band(
&chart,
&stats_df,
label,
&format!("Total Min/Max ({})", label),
"total",
color,
)?;
}
}
// Save the chart as HTML file.
// Save the chart as HTML file (no theme)
HtmlRenderer::new(title, 1000, 800)
.theme(THEME)
.save(&chart, output_file)
.save(&chart, &format!("{}.html", output_file))
.unwrap();
// Save SVG with default theme
ImageRenderer::new(1000, 800)
.theme(Theme::Default)
.save(&chart, &format!("{}.svg", output_file))
.unwrap();
// Save SVG with dark theme
ImageRenderer::new(1000, 800)
.theme(Theme::Dark)
.save(&chart, &format!("{}_dark.svg", output_file))
.unwrap();
Ok(chart)
}
fn add_mean_series(
chart: Chart,
stats: &[(f32, Points)],
name: &str,
extract: impl Fn(&Points) -> f32,
) -> Chart {
chart.series(
Line::new()
.name(name)
.data(
stats
.iter()
.map(|(x, points)| vec![*x, extract(points)])
.collect(),
)
.symbol_size(6),
)
fn add_dataset_series(
chart: &Chart,
df: &DataFrame,
dataset_label: &str,
series_name: &str,
col_name: &str,
dashed: bool,
color: &str,
) -> Result<Chart, Box<dyn std::error::Error>> {
// Filter for specific dataset
let mask = df.column("dataset_label")?.str()?.equal(dataset_label);
let filtered = df.filter(&mask)?;
let x = filtered.column("x")?.f32()?;
let y = filtered.column(col_name)?.f32()?;
let data: Vec<Vec<f32>> = x
.into_iter()
.zip(y.into_iter())
.filter_map(|(x, y)| Some(vec![x?, y?]))
.collect();
let mut line = Line::new()
.name(series_name)
.data(data)
.symbol_size(6)
.item_style(ItemStyle::new().color(color));
let mut line_style = LineStyle::new();
if dashed {
line_style = line_style.type_(LineStyleType::Dashed);
}
line = line.line_style(line_style.color(color));
Ok(chart.clone().series(line))
}
fn add_min_max_band(
chart: Chart,
stats: &[(f32, Points)],
fn add_dataset_min_max_band(
chart: &Chart,
df: &DataFrame,
dataset_label: &str,
name: &str,
extract: impl Fn(&Points) -> &DataPoint,
col_prefix: &str,
color: &str,
) -> Chart {
chart.series(
) -> Result<Chart, Box<dyn std::error::Error>> {
// Filter for specific dataset
let mask = df.column("dataset_label")?.str()?.equal(dataset_label);
let filtered = df.filter(&mask)?;
let x = filtered.column("x")?.f32()?;
let min_col = filtered.column(&format!("{}_min", col_prefix))?.f32()?;
let max_col = filtered.column(&format!("{}_max", col_prefix))?.f32()?;
let max_data: Vec<Vec<f32>> = x
.into_iter()
.zip(max_col.into_iter())
.filter_map(|(x, y)| Some(vec![x?, y?]))
.collect();
let min_data: Vec<Vec<f32>> = x
.into_iter()
.zip(min_col.into_iter())
.filter_map(|(x, y)| Some(vec![x?, y?]))
.rev()
.collect();
let data: Vec<Vec<f32>> = max_data.into_iter().chain(min_data).collect();
Ok(chart.clone().series(
Line::new()
.name(name)
.data(
stats
.iter()
.map(|(x, points)| vec![*x, extract(points).max])
.chain(
stats
.iter()
.rev()
.map(|(x, points)| vec![*x, extract(points).min]),
)
.collect(),
)
.data(data)
.show_symbol(false)
.line_style(LineStyle::new().opacity(0.0))
.area_style(AreaStyle::new().opacity(0.3).color(color)),
)
))
}

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@@ -22,7 +22,6 @@ clap = { workspace = true, features = ["derive", "env"] }
csv = { version = "1.3" }
duct = { version = "1" }
futures = { workspace = true }
indicatif = { version = "0.17" }
ipnet = { workspace = true }
serio = { workspace = true }
serde_json = { workspace = true }

View File

@@ -16,10 +16,6 @@ pub struct Cli {
/// Subnet to assign harness network interfaces.
#[arg(long, default_value = "10.250.0.0/24", env = "SUBNET")]
pub subnet: Ipv4Net,
/// Run browser in headed mode (visible window) for debugging.
/// Works with both X11 and Wayland.
#[arg(long)]
pub headed: bool,
}
#[derive(Subcommand)]
@@ -35,13 +31,10 @@ pub enum Command {
},
/// runs benchmarks.
Bench {
/// Configuration path. Defaults to bench.toml which contains
/// representative scenarios (cable, 5G, fiber) for quick performance
/// checks. Use bench_*_sweep.toml files for parametric
/// analysis.
/// Configuration path.
#[arg(short, long, default_value = "bench.toml")]
config: PathBuf,
/// Output CSV file path for detailed metrics and post-processing.
/// Output file path.
#[arg(short, long, default_value = "metrics.csv")]
output: PathBuf,
/// Number of samples to measure per benchmark. This is overridden by

View File

@@ -28,9 +28,6 @@ pub struct Executor {
ns: Namespace,
config: ExecutorConfig,
target: Target,
/// Display environment variables for headed mode (X11/Wayland).
/// Empty means headless mode.
display_env: Vec<String>,
state: State,
}
@@ -52,17 +49,11 @@ impl State {
}
impl Executor {
pub fn new(
ns: Namespace,
config: ExecutorConfig,
target: Target,
display_env: Vec<String>,
) -> Self {
pub fn new(ns: Namespace, config: ExecutorConfig, target: Target) -> Self {
Self {
ns,
config,
target,
display_env,
state: State::Init,
}
}
@@ -129,49 +120,23 @@ impl Executor {
let tmp = duct::cmd!("mktemp", "-d").read()?;
let tmp = tmp.trim();
let headed = !self.display_env.is_empty();
// Build command args based on headed/headless mode
let mut args: Vec<String> = vec![
"ip".into(),
"netns".into(),
"exec".into(),
self.ns.name().into(),
];
if headed {
// For headed mode: drop back to the current user and pass display env vars
// This allows the browser to connect to X11/Wayland while in the namespace
let user =
std::env::var("USER").context("USER environment variable not set")?;
args.extend(["sudo".into(), "-E".into(), "-u".into(), user, "env".into()]);
args.extend(self.display_env.clone());
}
args.push(chrome_path.to_string_lossy().into());
args.push(format!("--remote-debugging-port={PORT_BROWSER}"));
if headed {
// Headed mode: no headless, add flags to suppress first-run dialogs
args.extend(["--no-first-run".into(), "--no-default-browser-check".into()]);
} else {
// Headless mode: original flags
args.extend([
"--headless".into(),
"--disable-dev-shm-usage".into(),
"--disable-gpu".into(),
"--disable-cache".into(),
"--disable-application-cache".into(),
]);
}
args.extend([
"--no-sandbox".into(),
let process = duct::cmd!(
"sudo",
"ip",
"netns",
"exec",
self.ns.name(),
chrome_path,
format!("--remote-debugging-port={PORT_BROWSER}"),
"--headless",
"--disable-dev-shm-usage",
"--disable-gpu",
"--disable-cache",
"--disable-application-cache",
"--no-sandbox",
format!("--user-data-dir={tmp}"),
"--allowed-ips=10.250.0.1".into(),
]);
let process = duct::cmd("sudo", &args);
format!("--allowed-ips=10.250.0.1"),
);
let process = if !cfg!(feature = "debug") {
process.stderr_capture().stdout_capture().start()?

View File

@@ -9,7 +9,7 @@ mod ws_proxy;
#[cfg(feature = "debug")]
mod debug_prelude;
use std::{collections::HashMap, time::Duration};
use std::time::Duration;
use anyhow::Result;
use clap::Parser;
@@ -22,7 +22,6 @@ use harness_core::{
rpc::{BenchCmd, TestCmd},
test::TestStatus,
};
use indicatif::{ProgressBar, ProgressStyle};
use cli::{Cli, Command};
use executor::Executor;
@@ -33,60 +32,6 @@ use crate::debug_prelude::*;
use crate::{cli::Route, network::Network, wasm_server::WasmServer, ws_proxy::WsProxy};
/// Statistics for a benchmark configuration
#[derive(Debug, Clone)]
struct BenchStats {
group: Option<String>,
bandwidth: usize,
latency: usize,
upload_size: usize,
download_size: usize,
times: Vec<u64>,
}
impl BenchStats {
fn median(&self) -> f64 {
let mut sorted = self.times.clone();
sorted.sort();
let len = sorted.len();
if len == 0 {
return 0.0;
}
if len.is_multiple_of(2) {
(sorted[len / 2 - 1] + sorted[len / 2]) as f64 / 2.0
} else {
sorted[len / 2] as f64
}
}
}
/// Print summary table of benchmark results
fn print_bench_summary(stats: &[BenchStats]) {
if stats.is_empty() {
println!("\nNo benchmark results to display (only warmup was run).");
return;
}
println!("\n{}", "=".repeat(80));
println!("TLSNotary Benchmark Results");
println!("{}", "=".repeat(80));
println!();
for stat in stats {
let group_name = stat.group.as_deref().unwrap_or("unnamed");
println!(
"{} ({} Mbps, {}ms latency, {}KB↑ {}KB↓):",
group_name,
stat.bandwidth,
stat.latency,
stat.upload_size / 1024,
stat.download_size / 1024
);
println!(" Median: {:.2}s", stat.median() / 1000.0);
println!();
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, clap::ValueEnum, Default)]
pub enum Target {
#[default]
@@ -105,46 +50,14 @@ struct Runner {
started: bool,
}
/// Collects display-related environment variables for headed browser mode.
/// Works with both X11 and Wayland by collecting whichever vars are present.
fn collect_display_env_vars() -> Vec<String> {
const DISPLAY_VARS: &[&str] = &[
"DISPLAY", // X11
"XAUTHORITY", // X11 auth
"WAYLAND_DISPLAY", // Wayland
"XDG_RUNTIME_DIR", // Wayland runtime dir
];
DISPLAY_VARS
.iter()
.filter_map(|&var| {
std::env::var(var)
.ok()
.map(|val| format!("{}={}", var, val))
})
.collect()
}
impl Runner {
fn new(cli: &Cli) -> Result<Self> {
let Cli {
target,
subnet,
headed,
..
} = cli;
let Cli { target, subnet, .. } = cli;
let current_path = std::env::current_exe().unwrap();
let fixture_path = current_path.parent().unwrap().join("server-fixture");
let network_config = NetworkConfig::new(*subnet);
let network = Network::new(network_config.clone())?;
// Collect display env vars once if headed mode is enabled
let display_env = if *headed {
collect_display_env_vars()
} else {
Vec::new()
};
let server_fixture =
ServerFixture::new(fixture_path, network.ns_app().clone(), network_config.app);
let wasm_server = WasmServer::new(
@@ -162,7 +75,6 @@ impl Runner {
.network_config(network_config.clone())
.build(),
*target,
display_env.clone(),
);
let exec_v = Executor::new(
network.ns_1().clone(),
@@ -172,7 +84,6 @@ impl Runner {
.network_config(network_config.clone())
.build(),
Target::Native,
Vec::new(), // Verifier doesn't need display env
);
Ok(Self {
@@ -207,12 +118,6 @@ pub async fn main() -> Result<()> {
tracing_subscriber::fmt::init();
let cli = Cli::parse();
// Validate --headed requires --target browser
if cli.headed && cli.target != Target::Browser {
anyhow::bail!("--headed can only be used with --target browser");
}
let mut runner = Runner::new(&cli)?;
let mut exit_code = 0;
@@ -301,12 +206,6 @@ pub async fn main() -> Result<()> {
samples_override,
skip_warmup,
} => {
// Print configuration info
println!("TLSNotary Benchmark Harness");
println!("Running benchmarks from: {}", config.display());
println!("Output will be written to: {}", output.display());
println!();
let items: BenchItems = toml::from_str(&std::fs::read_to_string(config)?)?;
let output_file = std::fs::File::create(output)?;
let mut writer = WriterBuilder::new().from_writer(output_file);
@@ -321,34 +220,7 @@ pub async fn main() -> Result<()> {
runner.exec_p.start().await?;
runner.exec_v.start().await?;
// Create progress bar
let pb = ProgressBar::new(benches.len() as u64);
pb.set_style(
ProgressStyle::default_bar()
.template("[{elapsed_precise}] {bar:40.cyan/blue} {pos}/{len} {msg}")
.expect("valid template")
.progress_chars("█▓▒░ "),
);
// Collect measurements for stats
let mut measurements_by_config: HashMap<String, Vec<u64>> = HashMap::new();
let warmup_count = if skip_warmup { 0 } else { 3 };
for (idx, config) in benches.iter().enumerate() {
let is_warmup = idx < warmup_count;
let group_name = if is_warmup {
format!("Warmup {}/{}", idx + 1, warmup_count)
} else {
config.group.as_deref().unwrap_or("unnamed").to_string()
};
pb.set_message(format!(
"{} ({} Mbps, {}ms)",
group_name, config.bandwidth, config.protocol_latency
));
for config in benches {
runner
.network
.set_proto_config(config.bandwidth, config.protocol_latency.div_ceil(2))?;
@@ -377,73 +249,11 @@ pub async fn main() -> Result<()> {
panic!("expected prover output");
};
// Collect metrics for stats (skip warmup benches)
if !is_warmup {
let config_key = format!(
"{:?}|{}|{}|{}|{}",
config.group,
config.bandwidth,
config.protocol_latency,
config.upload_size,
config.download_size
);
measurements_by_config
.entry(config_key)
.or_default()
.push(metrics.time_total);
}
let measurement = Measurement::new(config.clone(), metrics);
let measurement = Measurement::new(config, metrics);
writer.serialize(measurement)?;
writer.flush()?;
pb.inc(1);
}
pb.finish_with_message("Benchmarks complete");
// Compute and print statistics
let mut all_stats: Vec<BenchStats> = Vec::new();
for (key, times) in measurements_by_config {
// Parse back the config from the key
let parts: Vec<&str> = key.split('|').collect();
if parts.len() >= 5 {
let group = if parts[0] == "None" {
None
} else {
Some(
parts[0]
.trim_start_matches("Some(\"")
.trim_end_matches("\")")
.to_string(),
)
};
let bandwidth: usize = parts[1].parse().unwrap_or(0);
let latency: usize = parts[2].parse().unwrap_or(0);
let upload_size: usize = parts[3].parse().unwrap_or(0);
let download_size: usize = parts[4].parse().unwrap_or(0);
all_stats.push(BenchStats {
group,
bandwidth,
latency,
upload_size,
download_size,
times,
});
}
}
// Sort stats by group name for consistent output
all_stats.sort_by(|a, b| {
a.group
.cmp(&b.group)
.then(a.latency.cmp(&b.latency))
.then(a.bandwidth.cmp(&b.bandwidth))
});
print_bench_summary(&all_stats);
}
Command::Serve {} => {
runner.start_services().await?;

View File

@@ -0,0 +1,25 @@
#### Bandwidth ####
[[group]]
name = "bandwidth"
protocol_latency = 25
[[bench]]
group = "bandwidth"
bandwidth = 10
[[bench]]
group = "bandwidth"
bandwidth = 50
[[bench]]
group = "bandwidth"
bandwidth = 100
[[bench]]
group = "bandwidth"
bandwidth = 250
[[bench]]
group = "bandwidth"
bandwidth = 1000

View File

@@ -0,0 +1,37 @@
[[group]]
name = "download_size"
protocol_latency = 10
bandwidth = 200
upload-size = 2048
[[bench]]
group = "download_size"
download-size = 1024
[[bench]]
group = "download_size"
download-size = 2048
[[bench]]
group = "download_size"
download-size = 4096
[[bench]]
group = "download_size"
download-size = 8192
[[bench]]
group = "download_size"
download-size = 16384
[[bench]]
group = "download_size"
download-size = 32768
[[bench]]
group = "download_size"
download-size = 65536
[[bench]]
group = "download_size"
download-size = 131072

View File

@@ -0,0 +1,25 @@
#### Latency ####
[[group]]
name = "latency"
bandwidth = 1000
[[bench]]
group = "latency"
protocol_latency = 10
[[bench]]
group = "latency"
protocol_latency = 25
[[bench]]
group = "latency"
protocol_latency = 50
[[bench]]
group = "latency"
protocol_latency = 100
[[bench]]
group = "latency"
protocol_latency = 200

View File

@@ -60,8 +60,6 @@ mpz-common = { workspace = true, features = ["test-utils"] }
mpz-ot = { workspace = true, features = ["ideal"] }
mpz-ideal-vm = { workspace = true }
tlsn-mux = { workspace = true }
cipher-crate = { package = "cipher", version = "0.4" }
generic-array = { workspace = true }
rand_chacha = { workspace = true }
@@ -72,5 +70,6 @@ tlsn-tls-client-async = { workspace = true }
tokio = { workspace = true, features = ["macros", "rt", "rt-multi-thread"] }
tokio-util = { workspace = true, features = ["compat"] }
tracing-subscriber = { workspace = true }
uid-mux = { workspace = true, features = ["serio", "test-utils"] }
rustls-pki-types = { workspace = true }
rustls-webpki = { workspace = true }

View File

@@ -123,8 +123,8 @@ fn build_pair(config: Config) -> (MpcTlsLeader, MpcTlsFollower) {
let (mut mt_a, mut mt_b) = test_mt_context(8);
let ctx_a = mt_a.new_context().unwrap();
let ctx_b = mt_b.new_context().unwrap();
let ctx_a = futures::executor::block_on(mt_a.new_context()).unwrap();
let ctx_b = futures::executor::block_on(mt_b.new_context()).unwrap();
let delta_a = Delta::new(Block::random(&mut rng));
let delta_b = Delta::new(Block::random(&mut rng));

View File

@@ -20,7 +20,6 @@ web = ["dep:web-spawn"]
tlsn-attestation = { workspace = true }
tlsn-core = { workspace = true }
tlsn-deap = { workspace = true }
tlsn-mux = { workspace = true }
tlsn-tls-client = { workspace = true }
tlsn-tls-client-async = { workspace = true }
tlsn-tls-core = { workspace = true }
@@ -28,6 +27,7 @@ tlsn-mpc-tls = { workspace = true }
tlsn-cipher = { workspace = true }
serio = { workspace = true, features = ["compat"] }
uid-mux = { workspace = true, features = ["serio"] }
web-spawn = { workspace = true, optional = true }
mpz-circuits = { workspace = true, features = ["aes"] }

View File

@@ -0,0 +1,21 @@
//! Execution context.
use mpz_common::context::Multithread;
use crate::mux::MuxControl;
/// Maximum concurrency for multi-threaded context.
pub(crate) const MAX_CONCURRENCY: usize = 8;
/// Builds a multi-threaded context with the given muxer.
pub(crate) fn build_mt_context(mux: MuxControl) -> Multithread {
let builder = Multithread::builder().mux(mux).concurrency(MAX_CONCURRENCY);
#[cfg(all(feature = "web", target_arch = "wasm32"))]
let builder = builder.spawn_handler(|f| {
let _ = web_spawn::spawn(f);
Ok(())
});
builder.build().unwrap()
}

View File

@@ -1,87 +0,0 @@
use std::fmt::Display;
/// Crate-level error.
#[derive(Debug, thiserror::Error)]
pub struct Error {
kind: ErrorKind,
msg: Option<String>,
source: Option<Box<dyn std::error::Error + Send + Sync>>,
}
impl Error {
pub(crate) fn io() -> Self {
Self {
kind: ErrorKind::Internal,
msg: None,
source: None,
}
}
pub(crate) fn internal() -> Self {
Self {
kind: ErrorKind::Internal,
msg: None,
source: None,
}
}
pub(crate) fn with_msg(mut self, msg: impl Into<String>) -> Self {
self.msg = Some(msg.into());
self
}
pub(crate) fn with_source<T>(mut self, source: T) -> Self
where
T: Into<Box<dyn std::error::Error + Send + Sync>>,
{
self.source = Some(source.into());
self
}
/// Returns `true` if the error was user created.
pub fn is_user(&self) -> bool {
todo!()
}
/// Returns `true` if the error originated from an IO error.
pub fn is_io(&self) -> bool {
self.kind.is_io()
}
/// Returns `true` if the error originated from an internal bug.
pub fn is_internal(&self) -> bool {
self.kind.is_internal()
}
/// Returns the error message if available.
pub fn msg(&self) -> Option<&str> {
todo!()
}
}
impl Display for Error {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
todo!()
}
}
#[derive(Debug)]
enum ErrorKind {
User,
Io,
Internal,
}
impl ErrorKind {
fn is_user(&self) -> bool {
matches!(self, ErrorKind::User)
}
fn is_io(&self) -> bool {
matches!(self, ErrorKind::Io)
}
fn is_internal(&self) -> bool {
matches!(self, ErrorKind::Internal)
}
}

View File

@@ -4,25 +4,20 @@
#![deny(clippy::all)]
#![forbid(unsafe_code)]
mod error;
pub(crate) mod context;
pub(crate) mod ghash;
pub(crate) mod map;
pub(crate) mod mpz;
pub(crate) mod msg;
pub(crate) mod mux;
pub mod prover;
mod session;
pub(crate) mod tag;
pub(crate) mod transcript_internal;
pub mod verifier;
pub use error::Error;
pub use session::Session;
pub use tlsn_attestation as attestation;
pub use tlsn_core::{config, connection, hash, transcript, webpki};
/// Result type.
pub type Result<T, E = Error> = core::result::Result<T, E>;
use std::sync::LazyLock;
use semver::Version;

View File

@@ -21,6 +21,20 @@ impl<T> RangeMap<T>
where
T: Item,
{
pub(crate) fn new(map: Vec<(usize, T)>) -> Self {
let mut pos = 0;
for (idx, item) in &map {
assert!(
*idx >= pos,
"items must be sorted by index and non-overlapping"
);
pos = *idx + item.length();
}
Self { map }
}
/// Returns `true` if the map is empty.
pub(crate) fn is_empty(&self) -> bool {
self.map.is_empty()
@@ -33,6 +47,11 @@ where
.map(|(idx, item)| *idx..*idx + item.length())
}
/// Returns the length of the map.
pub(crate) fn len(&self) -> usize {
self.map.iter().map(|(_, item)| item.length()).sum()
}
pub(crate) fn iter(&self) -> impl Iterator<Item = (Range<usize>, &T)> {
self.map
.iter()

View File

@@ -6,6 +6,11 @@ use mpz_core::Block;
#[cfg(not(tlsn_insecure))]
use mpz_garble::protocol::semihonest::{Evaluator, Garbler};
use mpz_garble_core::Delta;
use mpz_memory_core::{
Vector,
binary::U8,
correlated::{Key, Mac},
};
#[cfg(not(tlsn_insecure))]
use mpz_ot::cot::{DerandCOTReceiver, DerandCOTSender};
use mpz_ot::{
@@ -19,6 +24,8 @@ use tlsn_core::config::tls_commit::mpc::{MpcTlsConfig, NetworkSetting};
use tlsn_deap::Deap;
use tokio::sync::Mutex;
use crate::transcript_internal::commit::encoding::{KeyStore, MacStore};
#[cfg(not(tlsn_insecure))]
pub(crate) type ProverMpc =
Garbler<DerandCOTSender<SharedRCOTSender<kos::Sender<co::Receiver>, Block>>>;
@@ -186,3 +193,41 @@ pub(crate) fn translate_keys<Mpc, Zk>(keys: &mut SessionKeys, vm: &Deap<Mpc, Zk>
.translate(keys.server_write_mac_key)
.expect("VM memory should be consistent");
}
impl<T> KeyStore for Verifier<T> {
fn delta(&self) -> &Delta {
self.delta()
}
fn get_keys(&self, data: Vector<U8>) -> Option<&[Key]> {
self.get_keys(data).ok()
}
}
impl<T> MacStore for Prover<T> {
fn get_macs(&self, data: Vector<U8>) -> Option<&[Mac]> {
self.get_macs(data).ok()
}
}
#[cfg(tlsn_insecure)]
mod insecure {
use super::*;
use mpz_ideal_vm::IdealVm;
impl KeyStore for IdealVm {
fn delta(&self) -> &Delta {
unimplemented!("encodings not supported in insecure mode")
}
fn get_keys(&self, _data: Vector<U8>) -> Option<&[Key]> {
unimplemented!("encodings not supported in insecure mode")
}
}
impl MacStore for IdealVm {
fn get_macs(&self, _data: Vector<U8>) -> Option<&[Mac]> {
unimplemented!("encodings not supported in insecure mode")
}
}
}

90
crates/tlsn/src/mux.rs Normal file
View File

@@ -0,0 +1,90 @@
//! Multiplexer used in the TLSNotary protocol.
use std::future::IntoFuture;
use futures::{
AsyncRead, AsyncWrite, Future,
future::{FusedFuture, FutureExt},
};
use tracing::error;
use uid_mux::yamux;
use crate::Role;
/// Multiplexer supporting unique deterministic stream IDs.
pub(crate) type Mux<Io> = yamux::Yamux<Io>;
/// Multiplexer controller providing streams.
pub(crate) type MuxControl = yamux::YamuxCtrl;
/// Multiplexer future which must be polled for the muxer to make progress.
pub(crate) struct MuxFuture(
Box<dyn FusedFuture<Output = Result<(), yamux::ConnectionError>> + Send + Unpin>,
);
impl MuxFuture {
/// Returns true if the muxer is complete.
pub(crate) fn is_complete(&self) -> bool {
self.0.is_terminated()
}
/// Awaits a future, polling the muxer future concurrently.
pub(crate) async fn poll_with<F, R>(&mut self, fut: F) -> R
where
F: Future<Output = R>,
{
let mut fut = Box::pin(fut.fuse());
// Poll the future concurrently with the muxer future.
// If the muxer returns an error, continue polling the future
// until it completes.
loop {
futures::select! {
res = fut => return res,
res = &mut self.0 => if let Err(e) = res {
error!("mux error: {:?}", e);
},
}
}
}
}
impl Future for MuxFuture {
type Output = Result<(), yamux::ConnectionError>;
fn poll(
mut self: std::pin::Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
) -> std::task::Poll<Self::Output> {
self.0.as_mut().poll_unpin(cx)
}
}
/// Attaches a multiplexer to the provided socket.
///
/// Returns the multiplexer and a controller for creating streams with a codec
/// attached.
///
/// # Arguments
///
/// * `socket` - The socket to attach the multiplexer to.
/// * `role` - The role of the party using the multiplexer.
pub(crate) fn attach_mux<T: AsyncWrite + AsyncRead + Send + Unpin + 'static>(
socket: T,
role: Role,
) -> (MuxFuture, MuxControl) {
let mut mux_config = yamux::Config::default();
mux_config.set_max_num_streams(36);
let mux_role = match role {
Role::Prover => yamux::Mode::Client,
Role::Verifier => yamux::Mode::Server,
};
let mux = Mux::new(socket, mux_config, mux_role);
let ctrl = mux.control();
if let Role::Prover = role {
ctrl.alloc(32);
}
(MuxFuture(Box::new(mux.into_future().fuse())), ctrl)
}

View File

@@ -7,13 +7,14 @@ pub mod state;
pub use error::ProverError;
pub use future::ProverFuture;
use mpz_common::Context;
pub use tlsn_core::ProverOutput;
use crate::{
Role,
context::build_mt_context,
mpz::{ProverDeps, build_prover_deps, translate_keys},
msg::{ProveRequestMsg, Response, TlsCommitRequestMsg},
prover::error::ErrorKind,
mux::attach_mux,
tag::verify_tags,
};
@@ -44,7 +45,6 @@ use tracing::{Instrument, Span, debug, info, info_span, instrument};
pub struct Prover<T: state::ProverState = state::Initialized> {
config: ProverConfig,
span: Span,
ctx: Option<Context>,
state: T,
}
@@ -53,14 +53,12 @@ impl Prover<state::Initialized> {
///
/// # Arguments
///
/// * `ctx` - A thread context.
/// * `config` - The configuration for the prover.
pub(crate) fn new(ctx: Context, config: ProverConfig) -> Self {
pub fn new(config: ProverConfig) -> Self {
let span = info_span!("prover");
Self {
config,
span,
ctx: Some(ctx),
state: state::Initialized,
}
}
@@ -73,30 +71,35 @@ impl Prover<state::Initialized> {
/// # Arguments
///
/// * `config` - The TLS commitment configuration.
/// * `socket` - The socket to the TLS verifier.
#[instrument(parent = &self.span, level = "debug", skip_all, err)]
pub async fn commit(
mut self,
pub async fn commit<S: AsyncWrite + AsyncRead + Send + Unpin + 'static>(
self,
config: TlsCommitConfig,
socket: S,
) -> Result<Prover<state::CommitAccepted>, ProverError> {
let mut ctx = self
.ctx
.take()
.ok_or_else(|| ProverError::new(ErrorKind::Io, "context was dropped"))?;
let (mut mux_fut, mux_ctrl) = attach_mux(socket, Role::Prover);
let mut mt = build_mt_context(mux_ctrl.clone());
let mut ctx = mux_fut.poll_with(mt.new_context()).await?;
// Sends protocol configuration to verifier for compatibility check.
ctx.io_mut()
.send(TlsCommitRequestMsg {
request: config.to_request(),
version: crate::VERSION.clone(),
mux_fut
.poll_with(async {
ctx.io_mut()
.send(TlsCommitRequestMsg {
request: config.to_request(),
version: crate::VERSION.clone(),
})
.await?;
ctx.io_mut()
.expect_next::<Response>()
.await?
.result
.map_err(ProverError::from)
})
.await?;
ctx.io_mut()
.expect_next::<Response>()
.await?
.result
.map_err(ProverError::from)?;
let TlsCommitProtocolConfig::Mpc(mpc_tls_config) = config.protocol().clone() else {
unreachable!("only MPC TLS is supported");
};
@@ -111,15 +114,20 @@ impl Prover<state::Initialized> {
debug!("setting up mpc-tls");
mpc_tls.preprocess().await?;
mux_fut.poll_with(mpc_tls.preprocess()).await?;
debug!("mpc-tls setup complete");
Ok(Prover {
config: self.config,
span: self.span,
ctx: None,
state: state::CommitAccepted { mpc_tls, keys, vm },
state: state::CommitAccepted {
mux_ctrl,
mux_fut,
mpc_tls,
keys,
vm,
},
})
}
}
@@ -142,7 +150,12 @@ impl Prover<state::CommitAccepted> {
socket: S,
) -> Result<(TlsConnection, ProverFuture), ProverError> {
let state::CommitAccepted {
mpc_tls, keys, vm, ..
mux_ctrl,
mut mux_fut,
mpc_tls,
keys,
vm,
..
} = self.state;
let (mpc_ctrl, mpc_fut) = mpc_tls.run();
@@ -196,7 +209,10 @@ impl Prover<state::CommitAccepted> {
let mpc_ctrl = mpc_ctrl.clone();
async move {
let conn_fut = async {
conn_fut.await.map_err(ProverError::from)?;
mux_fut
.poll_with(conn_fut.map_err(ProverError::from))
.await?;
mpc_ctrl.stop().await?;
Ok::<_, ProverError>(())
@@ -217,7 +233,10 @@ impl Prover<state::CommitAccepted> {
debug!("finalizing mpc");
// Finalize DEAP.
vm.finalize(&mut ctx).await.map_err(ProverError::mpc)?;
mux_fut
.poll_with(vm.finalize(&mut ctx))
.await
.map_err(ProverError::mpc)?;
debug!("mpc finalized");
}
@@ -239,7 +258,9 @@ impl Prover<state::CommitAccepted> {
)
.map_err(ProverError::zk)?;
vm.execute_all(&mut ctx).await.map_err(ProverError::zk)?;
mux_fut
.poll_with(vm.execute_all(&mut ctx).map_err(ProverError::zk))
.await?;
let transcript = tls_transcript
.to_transcript()
@@ -248,8 +269,10 @@ impl Prover<state::CommitAccepted> {
Ok(Prover {
config: self.config,
span: self.span,
ctx: Some(ctx),
state: state::Committed {
mux_ctrl,
mux_fut,
ctx,
vm,
server_name: config.server_name().clone(),
keys,
@@ -289,11 +312,9 @@ impl Prover<state::Committed> {
/// * `config` - The disclosure configuration.
#[instrument(parent = &self.span, level = "info", skip_all, err)]
pub async fn prove(&mut self, config: &ProveConfig) -> Result<ProverOutput, ProverError> {
let ctx = self
.ctx
.as_mut()
.ok_or_else(|| ProverError::new(ErrorKind::Io, "context was dropped"))?;
let state::Committed {
mux_fut,
ctx,
vm,
keys,
server_name,
@@ -329,10 +350,15 @@ impl Prover<state::Committed> {
transcript: partial_transcript,
};
ctx.io_mut().send(msg).await.map_err(ProverError::from)?;
ctx.io_mut().expect_next::<Response>().await?.result?;
let output = mux_fut
.poll_with(async {
ctx.io_mut().send(msg).await.map_err(ProverError::from)?;
let output = prove::prove(ctx, vm, keys, transcript, tls_transcript, config).await?;
ctx.io_mut().expect_next::<Response>().await?.result?;
prove::prove(ctx, vm, keys, transcript, tls_transcript, config).await
})
.await?;
Ok(output)
}
@@ -340,6 +366,16 @@ impl Prover<state::Committed> {
/// Closes the connection with the verifier.
#[instrument(parent = &self.span, level = "info", skip_all, err)]
pub async fn close(self) -> Result<(), ProverError> {
let state::Committed {
mux_ctrl, mux_fut, ..
} = self.state;
// Wait for the verifier to correctly close the connection.
if !mux_fut.is_complete() {
mux_ctrl.close();
mux_fut.await?;
}
Ok(())
}
}

View File

@@ -2,6 +2,8 @@ use std::{error::Error, fmt};
use mpc_tls::MpcTlsError;
use crate::transcript_internal::commit::encoding::EncodingError;
/// Error for [`Prover`](crate::prover::Prover).
#[derive(Debug, thiserror::Error)]
pub struct ProverError {
@@ -10,7 +12,7 @@ pub struct ProverError {
}
impl ProverError {
pub(crate) fn new<E>(kind: ErrorKind, source: E) -> Self
fn new<E>(kind: ErrorKind, source: E) -> Self
where
E: Into<Box<dyn Error + Send + Sync + 'static>>,
{
@@ -50,7 +52,7 @@ impl ProverError {
}
#[derive(Debug)]
pub(crate) enum ErrorKind {
enum ErrorKind {
Io,
Mpc,
Zk,
@@ -90,8 +92,8 @@ impl From<tls_client_async::ConnectionError> for ProverError {
}
}
impl From<tlsn_mux::ConnectionError> for ProverError {
fn from(e: tlsn_mux::ConnectionError) -> Self {
impl From<uid_mux::yamux::ConnectionError> for ProverError {
fn from(e: uid_mux::yamux::ConnectionError) -> Self {
Self::new(ErrorKind::Io, e)
}
}
@@ -107,3 +109,9 @@ impl From<MpcTlsError> for ProverError {
Self::new(ErrorKind::Mpc, e)
}
}
impl From<EncodingError> for ProverError {
fn from(e: EncodingError) -> Self {
Self::new(ErrorKind::Commit, e)
}
}

View File

@@ -13,10 +13,17 @@ use tlsn_core::{
use crate::{
prover::ProverError,
transcript_internal::{TranscriptRefs, auth::prove_plaintext, commit::hash::prove_hash},
transcript_internal::{
TranscriptRefs,
auth::prove_plaintext,
commit::{
encoding::{self, MacStore},
hash::prove_hash,
},
},
};
pub(crate) async fn prove<T: Vm<Binary> + Send + Sync>(
pub(crate) async fn prove<T: Vm<Binary> + MacStore + Send + Sync>(
ctx: &mut Context,
vm: &mut T,
keys: &SessionKeys,
@@ -38,6 +45,13 @@ pub(crate) async fn prove<T: Vm<Binary> + Send + Sync>(
Direction::Sent => commit_sent.union_mut(idx),
Direction::Received => commit_recv.union_mut(idx),
});
commit_config
.iter_encoding()
.for_each(|(direction, idx)| match direction {
Direction::Sent => commit_sent.union_mut(idx),
Direction::Received => commit_recv.union_mut(idx),
});
}
let transcript_refs = TranscriptRefs {
@@ -88,6 +102,45 @@ pub(crate) async fn prove<T: Vm<Binary> + Send + Sync>(
vm.execute_all(ctx).await.map_err(ProverError::zk)?;
if let Some(commit_config) = config.transcript_commit()
&& commit_config.has_encoding()
{
let mut sent_ranges = RangeSet::default();
let mut recv_ranges = RangeSet::default();
for (dir, idx) in commit_config.iter_encoding() {
match dir {
Direction::Sent => sent_ranges.union_mut(idx),
Direction::Received => recv_ranges.union_mut(idx),
}
}
let sent_map = transcript_refs
.sent
.index(&sent_ranges)
.expect("indices are valid");
let recv_map = transcript_refs
.recv
.index(&recv_ranges)
.expect("indices are valid");
let (commitment, tree) = encoding::receive(
ctx,
vm,
*commit_config.encoding_hash_alg(),
&sent_map,
&recv_map,
commit_config.iter_encoding(),
)
.await?;
output
.transcript_commitments
.push(TranscriptCommitment::Encoding(commitment));
output
.transcript_secrets
.push(TranscriptSecret::Encoding(tree));
}
if let Some((hash_fut, hash_secrets)) = hash_commitments {
let hash_commitments = hash_fut.try_recv().map_err(ProverError::commit)?;
for (commitment, secret) in hash_commitments.into_iter().zip(hash_secrets) {

View File

@@ -3,6 +3,7 @@
use std::sync::Arc;
use mpc_tls::{MpcTlsLeader, SessionKeys};
use mpz_common::Context;
use tlsn_core::{
connection::ServerName,
transcript::{TlsTranscript, Transcript},
@@ -10,7 +11,10 @@ use tlsn_core::{
use tlsn_deap::Deap;
use tokio::sync::Mutex;
use crate::mpz::{ProverMpc, ProverZk};
use crate::{
mpz::{ProverMpc, ProverZk},
mux::{MuxControl, MuxFuture},
};
/// Entry state
pub struct Initialized;
@@ -20,6 +24,8 @@ opaque_debug::implement!(Initialized);
/// State after the verifier has accepted the proposed TLS commitment protocol
/// configuration and preprocessing has completed.
pub struct CommitAccepted {
pub(crate) mux_ctrl: MuxControl,
pub(crate) mux_fut: MuxFuture,
pub(crate) mpc_tls: MpcTlsLeader,
pub(crate) keys: SessionKeys,
pub(crate) vm: Arc<Mutex<Deap<ProverMpc, ProverZk>>>,
@@ -29,6 +35,9 @@ opaque_debug::implement!(CommitAccepted);
/// State after the TLS transcript has been committed.
pub struct Committed {
pub(crate) mux_ctrl: MuxControl,
pub(crate) mux_fut: MuxFuture,
pub(crate) ctx: Context,
pub(crate) vm: ProverZk,
pub(crate) server_name: ServerName,
pub(crate) keys: SessionKeys,

View File

@@ -1,293 +0,0 @@
use std::{
future::Future,
pin::Pin,
sync::{
atomic::{AtomicBool, Ordering},
Arc,
},
task::{Context, Poll},
};
use futures::{AsyncRead, AsyncWrite};
use mpz_common::{ThreadId, context::Multithread, io::Io, mux::Mux};
use tlsn_core::config::{prover::ProverConfig, verifier::VerifierConfig};
use tlsn_mux::{Connection, Handle};
use crate::{
Error, Result,
prover::{Prover, state as prover_state},
verifier::{Verifier, state as verifier_state},
};
/// Maximum concurrency for multi-threaded context.
const MAX_CONCURRENCY: usize = 8;
/// Session state.
#[must_use = "session must be polled continuously to make progress, including during closing."]
pub struct Session<Io> {
conn: Option<Connection<Io>>,
mt: Multithread,
}
impl<Io> Session<Io>
where
Io: AsyncRead + AsyncWrite + Unpin,
{
/// Creates a new session.
pub fn new(io: Io) -> Self {
let mut mux_config = tlsn_mux::Config::default();
mux_config.set_max_num_streams(36);
mux_config.set_keep_alive(true);
mux_config.set_close_sync(true);
let conn = tlsn_mux::Connection::new(io, mux_config);
let handle = conn.handle().expect("handle should be available");
let mt = build_mt_context(MuxHandle { handle: handle });
Self {
conn: Some(conn),
mt,
}
}
/// Creates a new prover.
pub fn new_prover(
&mut self,
config: ProverConfig,
) -> Result<Prover<prover_state::Initialized>> {
let ctx = self.mt.new_context().map_err(|e| {
Error::internal()
.with_msg("failed to created new prover")
.with_source(e)
})?;
Ok(Prover::new(ctx, config))
}
/// Creates a new verifier.
pub fn new_verifier(
&mut self,
config: VerifierConfig,
) -> Result<Verifier<verifier_state::Initialized>> {
let ctx = self.mt.new_context().map_err(|e| {
Error::internal()
.with_msg("failed to created new verifier")
.with_source(e)
})?;
Ok(Verifier::new(ctx, config))
}
/// Returns `true` if the session is closed.
pub fn is_closed(&self) -> bool {
self.conn
.as_ref()
.map(|mux| mux.is_complete())
.unwrap_or_default()
}
/// Closes the session.
///
/// This will cause the session to begin closing. Session must continue to be polled until completion.
pub fn close(&mut self) {
self.conn.as_mut().map(|conn| conn.close());
}
/// Attempts to take the IO, returning an error if it is not available.
pub fn try_take(&mut self) -> Result<Io> {
let conn = self.conn.take().ok_or_else(|| {
Error::io().with_msg("failed to take the session io, it was already taken")
})?;
match conn.try_into_io() {
Err(conn) => {
self.conn = Some(conn);
Err(Error::io()
.with_msg("failed to take the session io, session was not completed yet"))
}
Ok(conn) => Ok(conn),
}
}
/// Polls the session.
pub fn poll(&mut self, cx: &mut Context<'_>) -> Poll<Result<()>> {
self.conn
.as_mut()
.ok_or_else(|| {
Error::io()
.with_msg("failed to poll the session connection because it has been taken")
})?
.poll(cx)
.map_err(|e| {
Error::io()
.with_msg("error occurred while polling the session connection")
.with_source(e)
})
}
/// Splits the session into a driver and handle.
///
/// The driver must be polled to make progress. The handle is used
/// for creating provers/verifiers and closing the session.
pub fn split(self) -> (SessionDriver<Io>, SessionHandle) {
let should_close = Arc::new(AtomicBool::new(false));
(
SessionDriver {
conn: self.conn,
should_close: should_close.clone(),
},
SessionHandle {
mt: self.mt,
should_close,
},
)
}
}
impl<Io> Future for Session<Io>
where
Io: AsyncRead + AsyncWrite + Unpin,
{
type Output = Result<()>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
Session::poll(&mut (*self), cx)
}
}
/// The polling half of a split session.
///
/// Must be polled continuously to drive the session. Returns the underlying
/// IO when the session closes.
#[must_use = "driver must be polled to make progress"]
pub struct SessionDriver<Io> {
conn: Option<Connection<Io>>,
should_close: Arc<AtomicBool>,
}
impl<Io> SessionDriver<Io>
where
Io: AsyncRead + AsyncWrite + Unpin,
{
/// Polls the driver.
pub fn poll(&mut self, cx: &mut Context<'_>) -> Poll<Result<Io>> {
let conn = self.conn.as_mut().ok_or_else(|| {
Error::io().with_msg("session driver already completed")
})?;
if self.should_close.load(Ordering::Acquire) {
conn.close();
}
match conn.poll(cx) {
Poll::Ready(Ok(())) => {}
Poll::Ready(Err(e)) => {
return Poll::Ready(Err(Error::io()
.with_msg("error polling session connection")
.with_source(e)));
}
Poll::Pending => return Poll::Pending,
}
let conn = self.conn.take().unwrap();
Poll::Ready(conn.try_into_io().map_err(|_| {
Error::io().with_msg("failed to take session io")
}))
}
}
impl<Io> Future for SessionDriver<Io>
where
Io: AsyncRead + AsyncWrite + Unpin,
{
type Output = Result<Io>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
SessionDriver::poll(&mut *self, cx)
}
}
/// The control half of a split session.
///
/// Used to create provers/verifiers and control the session lifecycle.
pub struct SessionHandle {
mt: Multithread,
should_close: Arc<AtomicBool>,
}
impl SessionHandle {
/// Creates a new prover.
pub fn new_prover(
&mut self,
config: ProverConfig,
) -> Result<Prover<prover_state::Initialized>> {
let ctx = self.mt.new_context().map_err(|e| {
Error::internal()
.with_msg("failed to create new prover")
.with_source(e)
})?;
Ok(Prover::new(ctx, config))
}
/// Creates a new verifier.
pub fn new_verifier(
&mut self,
config: VerifierConfig,
) -> Result<Verifier<verifier_state::Initialized>> {
let ctx = self.mt.new_context().map_err(|e| {
Error::internal()
.with_msg("failed to create new verifier")
.with_source(e)
})?;
Ok(Verifier::new(ctx, config))
}
/// Signals the session to close.
///
/// The driver must continue to be polled until it completes.
pub fn close(&self) {
self.should_close.store(true, Ordering::Release);
}
}
/// Multiplexer controller providing streams.
struct MuxHandle {
handle: Handle,
}
impl std::fmt::Debug for MuxHandle {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("MuxHandle").finish_non_exhaustive()
}
}
impl Mux for MuxHandle {
fn open(&self, id: ThreadId) -> Result<Io, std::io::Error> {
let stream = self
.handle
.new_stream(id.as_ref())
.map_err(std::io::Error::other)?;
let io = Io::from_io(stream);
Ok(io)
}
}
/// Builds a multi-threaded context with the given muxer.
fn build_mt_context(mux: MuxHandle) -> Multithread {
let builder = Multithread::builder()
.mux(Box::new(mux) as Box<_>)
.concurrency(MAX_CONCURRENCY);
#[cfg(all(feature = "web", target_arch = "wasm32"))]
let builder = builder.spawn_handler(|f| {
let _ = web_spawn::spawn(f);
Ok(())
});
builder.build().unwrap()
}

View File

@@ -1,3 +1,4 @@
//! Plaintext commitment and proof of encryption.
pub(crate) mod encoding;
pub(crate) mod hash;

View File

@@ -0,0 +1,267 @@
//! Encoding commitment protocol.
use std::ops::Range;
use mpz_common::Context;
use mpz_memory_core::{
Vector,
binary::U8,
correlated::{Delta, Key, Mac},
};
use rand::Rng;
use rangeset::set::RangeSet;
use serde::{Deserialize, Serialize};
use serio::{SinkExt, stream::IoStreamExt};
use tlsn_core::{
hash::{Blake3, HashAlgId, HashAlgorithm, Keccak256, Sha256},
transcript::{
Direction,
encoding::{
Encoder, EncoderSecret, EncodingCommitment, EncodingProvider, EncodingProviderError,
EncodingTree, EncodingTreeError, new_encoder,
},
},
};
use crate::{
map::{Item, RangeMap},
transcript_internal::ReferenceMap,
};
/// Bytes of encoding, per byte.
const ENCODING_SIZE: usize = 128;
#[derive(Debug, Serialize, Deserialize)]
struct Encodings {
sent: Vec<u8>,
recv: Vec<u8>,
}
/// Transfers encodings for the provided plaintext ranges.
pub(crate) async fn transfer<K: KeyStore>(
ctx: &mut Context,
store: &K,
sent: &ReferenceMap,
recv: &ReferenceMap,
) -> Result<(EncoderSecret, EncodingCommitment), EncodingError> {
let secret = EncoderSecret::new(rand::rng().random(), store.delta().as_block().to_bytes());
let encoder = new_encoder(&secret);
// Collects the encodings for the provided plaintext ranges.
fn collect_encodings(
encoder: &impl Encoder,
store: &impl KeyStore,
direction: Direction,
map: &ReferenceMap,
) -> Vec<u8> {
let mut encodings = Vec::with_capacity(map.len() * ENCODING_SIZE);
for (range, chunk) in map.iter() {
let start = encodings.len();
encoder.encode_range(direction, range, &mut encodings);
let keys = store
.get_keys(*chunk)
.expect("keys are present for provided plaintext ranges");
encodings[start..]
.iter_mut()
.zip(keys.iter().flat_map(|key| key.as_block().as_bytes()))
.for_each(|(encoding, key)| {
*encoding ^= *key;
});
}
encodings
}
let encodings = Encodings {
sent: collect_encodings(&encoder, store, Direction::Sent, sent),
recv: collect_encodings(&encoder, store, Direction::Received, recv),
};
let frame_limit = ctx
.io()
.limit()
.saturating_add(encodings.sent.len() + encodings.recv.len());
ctx.io_mut().with_limit(frame_limit).send(encodings).await?;
let root = ctx.io_mut().expect_next().await?;
Ok((secret, EncodingCommitment { root }))
}
/// Receives and commits to the encodings for the provided plaintext ranges.
pub(crate) async fn receive<M: MacStore>(
ctx: &mut Context,
store: &M,
hash_alg: HashAlgId,
sent: &ReferenceMap,
recv: &ReferenceMap,
idxs: impl IntoIterator<Item = &(Direction, RangeSet<usize>)>,
) -> Result<(EncodingCommitment, EncodingTree), EncodingError> {
let hasher: &(dyn HashAlgorithm + Send + Sync) = match hash_alg {
HashAlgId::SHA256 => &Sha256::default(),
HashAlgId::KECCAK256 => &Keccak256::default(),
HashAlgId::BLAKE3 => &Blake3::default(),
alg => {
return Err(ErrorRepr::UnsupportedHashAlgorithm(alg).into());
}
};
let (sent_len, recv_len) = (sent.len(), recv.len());
let frame_limit = ctx
.io()
.limit()
.saturating_add(ENCODING_SIZE * (sent_len + recv_len));
let encodings: Encodings = ctx.io_mut().with_limit(frame_limit).expect_next().await?;
if encodings.sent.len() != sent_len * ENCODING_SIZE {
return Err(ErrorRepr::IncorrectMacCount {
direction: Direction::Sent,
expected: sent_len,
got: encodings.sent.len() / ENCODING_SIZE,
}
.into());
}
if encodings.recv.len() != recv_len * ENCODING_SIZE {
return Err(ErrorRepr::IncorrectMacCount {
direction: Direction::Received,
expected: recv_len,
got: encodings.recv.len() / ENCODING_SIZE,
}
.into());
}
// Collects a map of plaintext ranges to their encodings.
fn collect_map(
store: &impl MacStore,
mut encodings: Vec<u8>,
map: &ReferenceMap,
) -> RangeMap<EncodingSlice> {
let mut encoding_map = Vec::new();
let mut pos = 0;
for (range, chunk) in map.iter() {
let macs = store
.get_macs(*chunk)
.expect("MACs are present for provided plaintext ranges");
let encoding = &mut encodings[pos..pos + range.len() * ENCODING_SIZE];
encoding
.iter_mut()
.zip(macs.iter().flat_map(|mac| mac.as_bytes()))
.for_each(|(encoding, mac)| {
*encoding ^= *mac;
});
encoding_map.push((range.start, EncodingSlice::from(&(*encoding))));
pos += range.len() * ENCODING_SIZE;
}
RangeMap::new(encoding_map)
}
let provider = Provider {
sent: collect_map(store, encodings.sent, sent),
recv: collect_map(store, encodings.recv, recv),
};
let tree = EncodingTree::new(hasher, idxs, &provider)?;
let root = tree.root();
ctx.io_mut().send(root.clone()).await?;
let commitment = EncodingCommitment { root };
Ok((commitment, tree))
}
pub(crate) trait KeyStore {
fn delta(&self) -> &Delta;
fn get_keys(&self, data: Vector<U8>) -> Option<&[Key]>;
}
pub(crate) trait MacStore {
fn get_macs(&self, data: Vector<U8>) -> Option<&[Mac]>;
}
#[derive(Debug)]
struct Provider {
sent: RangeMap<EncodingSlice>,
recv: RangeMap<EncodingSlice>,
}
impl EncodingProvider for Provider {
fn provide_encoding(
&self,
direction: Direction,
range: Range<usize>,
dest: &mut Vec<u8>,
) -> Result<(), EncodingProviderError> {
let encodings = match direction {
Direction::Sent => &self.sent,
Direction::Received => &self.recv,
};
let encoding = encodings.get(range).ok_or(EncodingProviderError)?;
dest.extend_from_slice(encoding);
Ok(())
}
}
#[derive(Debug)]
struct EncodingSlice(Vec<u8>);
impl From<&[u8]> for EncodingSlice {
fn from(value: &[u8]) -> Self {
Self(value.to_vec())
}
}
impl Item for EncodingSlice {
type Slice<'a>
= &'a [u8]
where
Self: 'a;
fn length(&self) -> usize {
self.0.len() / ENCODING_SIZE
}
fn slice<'a>(&'a self, range: Range<usize>) -> Option<Self::Slice<'a>> {
self.0
.get(range.start * ENCODING_SIZE..range.end * ENCODING_SIZE)
}
}
/// Encoding protocol error.
#[derive(Debug, thiserror::Error)]
#[error(transparent)]
pub struct EncodingError(#[from] ErrorRepr);
#[derive(Debug, thiserror::Error)]
#[error("encoding protocol error: {0}")]
enum ErrorRepr {
#[error("I/O error: {0}")]
Io(std::io::Error),
#[error("incorrect MAC count for {direction}: expected {expected}, got {got}")]
IncorrectMacCount {
direction: Direction,
expected: usize,
got: usize,
},
#[error("encoding tree error: {0}")]
EncodingTree(EncodingTreeError),
#[error("unsupported hash algorithm: {0}")]
UnsupportedHashAlgorithm(HashAlgId),
}
impl From<std::io::Error> for EncodingError {
fn from(value: std::io::Error) -> Self {
Self(ErrorRepr::Io(value))
}
}
impl From<EncodingTreeError> for EncodingError {
fn from(value: EncodingTreeError) -> Self {
Self(ErrorRepr::EncodingTree(value))
}
}

View File

@@ -7,16 +7,17 @@ mod verify;
use std::sync::Arc;
pub use error::VerifierError;
use mpz_common::Context;
pub use tlsn_core::{VerifierOutput, webpki::ServerCertVerifier};
use crate::{
Role,
context::build_mt_context,
mpz::{VerifierDeps, build_verifier_deps, translate_keys},
msg::{ProveRequestMsg, Response, TlsCommitRequestMsg},
mux::attach_mux,
tag::verify_tags,
verifier::error::ErrorKind,
};
use futures::TryFutureExt;
use futures::{AsyncRead, AsyncWrite, TryFutureExt};
use mpz_vm_core::prelude::*;
use serio::{SinkExt, stream::IoStreamExt};
use tlsn_core::{
@@ -44,18 +45,16 @@ pub struct SessionInfo {
pub struct Verifier<T: state::VerifierState = state::Initialized> {
config: VerifierConfig,
span: Span,
ctx: Option<Context>,
state: T,
}
impl Verifier<state::Initialized> {
/// Creates a new verifier.
pub(crate) fn new(ctx: Context, config: VerifierConfig) -> Self {
pub fn new(config: VerifierConfig) -> Self {
let span = info_span!("verifier");
Self {
config,
span,
ctx: Some(ctx),
state: state::Initialized,
}
}
@@ -64,22 +63,37 @@ impl Verifier<state::Initialized> {
///
/// This initiates the TLS commitment protocol, receiving the prover's
/// configuration and providing the opportunity to accept or reject it.
///
/// # Arguments
///
/// * `socket` - The socket to the prover.
#[instrument(parent = &self.span, level = "info", skip_all, err)]
pub async fn commit(mut self) -> Result<Verifier<state::CommitStart>, VerifierError> {
let mut ctx = self
.ctx
.take()
.ok_or_else(|| VerifierError::new(ErrorKind::Io, "context was dropped"))?;
pub async fn commit<S: AsyncWrite + AsyncRead + Send + Unpin + 'static>(
self,
socket: S,
) -> Result<Verifier<state::CommitStart>, VerifierError> {
let (mut mux_fut, mux_ctrl) = attach_mux(socket, Role::Verifier);
let mut mt = build_mt_context(mux_ctrl.clone());
let mut ctx = mux_fut.poll_with(mt.new_context()).await?;
// Receives protocol configuration from prover to perform compatibility check.
let TlsCommitRequestMsg { request, version } = ctx.io_mut().expect_next().await?;
let TlsCommitRequestMsg { request, version } =
mux_fut.poll_with(ctx.io_mut().expect_next()).await?;
if version != *crate::VERSION {
let msg = format!(
"prover version does not match with verifier: {version} != {}",
*crate::VERSION
);
ctx.io_mut().send(Response::err(Some(msg.clone()))).await?;
mux_fut
.poll_with(ctx.io_mut().send(Response::err(Some(msg.clone()))))
.await?;
// Wait for the prover to correctly close the connection.
if !mux_fut.is_complete() {
mux_ctrl.close();
mux_fut.await?;
}
return Err(VerifierError::config(msg));
}
@@ -87,8 +101,12 @@ impl Verifier<state::Initialized> {
Ok(Verifier {
config: self.config,
span: self.span,
ctx: Some(ctx),
state: state::CommitStart { request },
state: state::CommitStart {
mux_ctrl,
mux_fut,
ctx,
request,
},
})
}
}
@@ -101,14 +119,15 @@ impl Verifier<state::CommitStart> {
/// Accepts the proposed protocol configuration.
#[instrument(parent = &self.span, level = "info", skip_all, err)]
pub async fn accept(mut self) -> Result<Verifier<state::CommitAccepted>, VerifierError> {
let mut ctx = self
.ctx
.take()
.ok_or_else(|| VerifierError::new(ErrorKind::Io, "context was dropped"))?;
let state::CommitStart { request } = self.state;
pub async fn accept(self) -> Result<Verifier<state::CommitAccepted>, VerifierError> {
let state::CommitStart {
mux_ctrl,
mut mux_fut,
mut ctx,
request,
} = self.state;
ctx.io_mut().send(Response::ok()).await?;
mux_fut.poll_with(ctx.io_mut().send(Response::ok())).await?;
let TlsCommitProtocolConfig::Mpc(mpc_tls_config) = request.protocol().clone() else {
unreachable!("only MPC TLS is supported");
@@ -124,27 +143,42 @@ impl Verifier<state::CommitStart> {
debug!("setting up mpc-tls");
mpc_tls.preprocess().await?;
mux_fut.poll_with(mpc_tls.preprocess()).await?;
debug!("mpc-tls setup complete");
Ok(Verifier {
config: self.config,
span: self.span,
ctx: None,
state: state::CommitAccepted { mpc_tls, keys, vm },
state: state::CommitAccepted {
mux_ctrl,
mux_fut,
mpc_tls,
keys,
vm,
},
})
}
/// Rejects the proposed protocol configuration.
#[instrument(parent = &self.span, level = "info", skip_all, err)]
pub async fn reject(mut self, msg: Option<&str>) -> Result<(), VerifierError> {
let mut ctx = self
.ctx
.take()
.ok_or_else(|| VerifierError::new(ErrorKind::Io, "context was dropped"))?;
pub async fn reject(self, msg: Option<&str>) -> Result<(), VerifierError> {
let state::CommitStart {
mux_ctrl,
mut mux_fut,
mut ctx,
..
} = self.state;
ctx.io_mut().send(Response::err(msg)).await?;
mux_fut
.poll_with(ctx.io_mut().send(Response::err(msg)))
.await?;
// Wait for the prover to correctly close the connection.
if !mux_fut.is_complete() {
mux_ctrl.close();
mux_fut.await?;
}
Ok(())
}
@@ -154,11 +188,17 @@ impl Verifier<state::CommitAccepted> {
/// Runs the verifier until the TLS connection is closed.
#[instrument(parent = &self.span, level = "info", skip_all, err)]
pub async fn run(self) -> Result<Verifier<state::Committed>, VerifierError> {
let state::CommitAccepted { mpc_tls, vm, keys } = self.state;
let state::CommitAccepted {
mux_ctrl,
mut mux_fut,
mpc_tls,
vm,
keys,
} = self.state;
info!("starting MPC-TLS");
let (mut ctx, tls_transcript) = mpc_tls.run().await?;
let (mut ctx, tls_transcript) = mux_fut.poll_with(mpc_tls.run()).await?;
info!("finished MPC-TLS");
@@ -167,7 +207,10 @@ impl Verifier<state::CommitAccepted> {
debug!("finalizing mpc");
vm.finalize(&mut ctx).await.map_err(VerifierError::mpc)?;
mux_fut
.poll_with(vm.finalize(&mut ctx))
.await
.map_err(VerifierError::mpc)?;
debug!("mpc finalized");
}
@@ -189,7 +232,9 @@ impl Verifier<state::CommitAccepted> {
)
.map_err(VerifierError::zk)?;
vm.execute_all(&mut ctx).map_err(VerifierError::zk).await?;
mux_fut
.poll_with(vm.execute_all(&mut ctx).map_err(VerifierError::zk))
.await?;
// Verify the tags.
// After the verification, the entire TLS trancript becomes
@@ -199,8 +244,10 @@ impl Verifier<state::CommitAccepted> {
Ok(Verifier {
config: self.config,
span: self.span,
ctx: Some(ctx),
state: state::Committed {
mux_ctrl,
mux_fut,
ctx,
vm,
keys,
tls_transcript,
@@ -217,12 +264,11 @@ impl Verifier<state::Committed> {
/// Begins verification of statements from the prover.
#[instrument(parent = &self.span, level = "info", skip_all, err)]
pub async fn verify(mut self) -> Result<Verifier<state::Verify>, VerifierError> {
let mut ctx = self
.ctx
.take()
.ok_or_else(|| VerifierError::new(ErrorKind::Io, "context was dropped"))?;
pub async fn verify(self) -> Result<Verifier<state::Verify>, VerifierError> {
let state::Committed {
mux_ctrl,
mut mux_fut,
mut ctx,
vm,
keys,
tls_transcript,
@@ -232,17 +278,17 @@ impl Verifier<state::Committed> {
request,
handshake,
transcript,
} = ctx
.io_mut()
.expect_next()
.map_err(VerifierError::from)
} = mux_fut
.poll_with(ctx.io_mut().expect_next().map_err(VerifierError::from))
.await?;
Ok(Verifier {
config: self.config,
span: self.span,
ctx: Some(ctx),
state: state::Verify {
mux_ctrl,
mux_fut,
ctx,
vm,
keys,
tls_transcript,
@@ -256,6 +302,16 @@ impl Verifier<state::Committed> {
/// Closes the connection with the prover.
#[instrument(parent = &self.span, level = "info", skip_all, err)]
pub async fn close(self) -> Result<(), VerifierError> {
let state::Committed {
mux_ctrl, mux_fut, ..
} = self.state;
// Wait for the prover to correctly close the connection.
if !mux_fut.is_complete() {
mux_ctrl.close();
mux_fut.await?;
}
Ok(())
}
}
@@ -268,13 +324,12 @@ impl Verifier<state::Verify> {
/// Accepts the proving request.
pub async fn accept(
mut self,
self,
) -> Result<(VerifierOutput, Verifier<state::Committed>), VerifierError> {
let mut ctx = self
.ctx
.take()
.ok_or_else(|| VerifierError::new(ErrorKind::Io, "context was dropped"))?;
let state::Verify {
mux_ctrl,
mut mux_fut,
mut ctx,
mut vm,
keys,
tls_transcript,
@@ -283,30 +338,33 @@ impl Verifier<state::Verify> {
transcript,
} = self.state;
ctx.io_mut().send(Response::ok()).await?;
mux_fut.poll_with(ctx.io_mut().send(Response::ok())).await?;
let cert_verifier =
ServerCertVerifier::new(self.config.root_store()).map_err(VerifierError::config)?;
let output = verify::verify(
&mut ctx,
&mut vm,
&keys,
&cert_verifier,
&tls_transcript,
request,
handshake,
transcript,
)
.await?;
let output = mux_fut
.poll_with(verify::verify(
&mut ctx,
&mut vm,
&keys,
&cert_verifier,
&tls_transcript,
request,
handshake,
transcript,
))
.await?;
Ok((
output,
Verifier {
config: self.config,
span: self.span,
ctx: Some(ctx),
state: state::Committed {
mux_ctrl,
mux_fut,
ctx,
vm,
keys,
tls_transcript,
@@ -317,27 +375,30 @@ impl Verifier<state::Verify> {
/// Rejects the proving request.
pub async fn reject(
mut self,
self,
msg: Option<&str>,
) -> Result<Verifier<state::Committed>, VerifierError> {
let mut ctx = self
.ctx
.take()
.ok_or_else(|| VerifierError::new(ErrorKind::Io, "context was dropped"))?;
let state::Verify {
mux_ctrl,
mut mux_fut,
mut ctx,
vm,
keys,
tls_transcript,
..
} = self.state;
ctx.io_mut().send(Response::err(msg)).await?;
mux_fut
.poll_with(ctx.io_mut().send(Response::err(msg)))
.await?;
Ok(Verifier {
config: self.config,
span: self.span,
ctx: Some(ctx),
state: state::Committed {
mux_ctrl,
mux_fut,
ctx,
vm,
keys,
tls_transcript,

View File

@@ -2,6 +2,8 @@ use std::{error::Error, fmt};
use mpc_tls::MpcTlsError;
use crate::transcript_internal::commit::encoding::EncodingError;
/// Error for [`Verifier`](crate::verifier::Verifier).
#[derive(Debug, thiserror::Error)]
pub struct VerifierError {
@@ -10,7 +12,7 @@ pub struct VerifierError {
}
impl VerifierError {
pub(crate) fn new<E>(kind: ErrorKind, source: E) -> Self
fn new<E>(kind: ErrorKind, source: E) -> Self
where
E: Into<Box<dyn Error + Send + Sync + 'static>>,
{
@@ -50,11 +52,12 @@ impl VerifierError {
}
#[derive(Debug)]
pub(crate) enum ErrorKind {
enum ErrorKind {
Io,
Config,
Mpc,
Zk,
Commit,
Verify,
}
@@ -67,6 +70,7 @@ impl fmt::Display for VerifierError {
ErrorKind::Config => f.write_str("config error")?,
ErrorKind::Mpc => f.write_str("mpc error")?,
ErrorKind::Zk => f.write_str("zk error")?,
ErrorKind::Commit => f.write_str("commit error")?,
ErrorKind::Verify => f.write_str("verification error")?,
}
@@ -84,8 +88,8 @@ impl From<std::io::Error> for VerifierError {
}
}
impl From<tlsn_mux::ConnectionError> for VerifierError {
fn from(e: tlsn_mux::ConnectionError) -> Self {
impl From<uid_mux::yamux::ConnectionError> for VerifierError {
fn from(e: uid_mux::yamux::ConnectionError) -> Self {
Self::new(ErrorKind::Io, e)
}
}
@@ -101,3 +105,9 @@ impl From<MpcTlsError> for VerifierError {
Self::new(ErrorKind::Mpc, e)
}
}
impl From<EncodingError> for VerifierError {
fn from(e: EncodingError) -> Self {
Self::new(ErrorKind::Commit, e)
}
}

View File

@@ -2,7 +2,9 @@
use std::sync::Arc;
use crate::mux::{MuxControl, MuxFuture};
use mpc_tls::{MpcTlsFollower, SessionKeys};
use mpz_common::Context;
use tlsn_core::{
config::{prove::ProveRequest, tls_commit::TlsCommitRequest},
connection::{HandshakeData, ServerName},
@@ -23,6 +25,9 @@ opaque_debug::implement!(Initialized);
/// State after receiving protocol configuration from the prover.
pub struct CommitStart {
pub(crate) mux_ctrl: MuxControl,
pub(crate) mux_fut: MuxFuture,
pub(crate) ctx: Context,
pub(crate) request: TlsCommitRequest,
}
@@ -31,6 +36,8 @@ opaque_debug::implement!(CommitStart);
/// State after accepting the proposed TLS commitment protocol configuration and
/// performing preprocessing.
pub struct CommitAccepted {
pub(crate) mux_ctrl: MuxControl,
pub(crate) mux_fut: MuxFuture,
pub(crate) mpc_tls: MpcTlsFollower,
pub(crate) keys: SessionKeys,
pub(crate) vm: Arc<Mutex<Deap<VerifierMpc, VerifierZk>>>,
@@ -40,6 +47,9 @@ opaque_debug::implement!(CommitAccepted);
/// State after the TLS transcript has been committed.
pub struct Committed {
pub(crate) mux_ctrl: MuxControl,
pub(crate) mux_fut: MuxFuture,
pub(crate) ctx: Context,
pub(crate) vm: VerifierZk,
pub(crate) keys: SessionKeys,
pub(crate) tls_transcript: TlsTranscript,
@@ -49,6 +59,9 @@ opaque_debug::implement!(Committed);
/// State after receiving a proving request.
pub struct Verify {
pub(crate) mux_ctrl: MuxControl,
pub(crate) mux_fut: MuxFuture,
pub(crate) ctx: Context,
pub(crate) vm: VerifierZk,
pub(crate) keys: SessionKeys,
pub(crate) tls_transcript: TlsTranscript,

View File

@@ -14,12 +14,19 @@ use tlsn_core::{
};
use crate::{
transcript_internal::{TranscriptRefs, auth::verify_plaintext, commit::hash::verify_hash},
transcript_internal::{
TranscriptRefs,
auth::verify_plaintext,
commit::{
encoding::{self, KeyStore},
hash::verify_hash,
},
},
verifier::VerifierError,
};
#[allow(clippy::too_many_arguments)]
pub(crate) async fn verify<T: Vm<Binary> + Send + Sync>(
pub(crate) async fn verify<T: Vm<Binary> + KeyStore + Send + Sync>(
ctx: &mut Context,
vm: &mut T,
keys: &SessionKeys,
@@ -87,6 +94,11 @@ pub(crate) async fn verify<T: Vm<Binary> + Send + Sync>(
Direction::Sent => commit_sent.union_mut(idx),
Direction::Received => commit_recv.union_mut(idx),
});
if let Some((sent, recv)) = commit_config.encoding() {
commit_sent.union_mut(sent);
commit_recv.union_mut(recv);
}
}
let (sent_refs, sent_proof) = verify_plaintext(
@@ -139,6 +151,24 @@ pub(crate) async fn verify<T: Vm<Binary> + Send + Sync>(
sent_proof.verify().map_err(VerifierError::verify)?;
recv_proof.verify().map_err(VerifierError::verify)?;
let mut encoder_secret = None;
if let Some(commit_config) = request.transcript_commit()
&& let Some((sent, recv)) = commit_config.encoding()
{
let sent_map = transcript_refs
.sent
.index(sent)
.expect("ranges were authenticated");
let recv_map = transcript_refs
.recv
.index(recv)
.expect("ranges were authenticated");
let (secret, commitment) = encoding::transfer(ctx, vm, &sent_map, &recv_map).await?;
encoder_secret = Some(secret);
transcript_commitments.push(TranscriptCommitment::Encoding(commitment));
}
if let Some(hash_commitments) = hash_commitments {
for commitment in hash_commitments.try_recv().map_err(VerifierError::verify)? {
transcript_commitments.push(TranscriptCommitment::Hash(commitment));
@@ -148,6 +178,7 @@ pub(crate) async fn verify<T: Vm<Binary> + Send + Sync>(
Ok(VerifierOutput {
server_name,
transcript: request.reveal().is_some().then_some(transcript),
encoder_secret,
transcript_commitments,
})
}

View File

@@ -1,6 +1,6 @@
use futures::{AsyncReadExt, AsyncWriteExt};
use rangeset::set::RangeSet;
use tlsn::{
Session,
config::{
prove::ProveConfig,
prover::ProverConfig,
@@ -9,9 +9,12 @@ use tlsn::{
verifier::VerifierConfig,
},
connection::ServerName,
hash::HashAlgId,
hash::{HashAlgId, HashProvider},
prover::Prover,
transcript::{Direction, Transcript, TranscriptCommitConfig, TranscriptCommitmentKind},
transcript::{
Direction, Transcript, TranscriptCommitConfig, TranscriptCommitment,
TranscriptCommitmentKind, TranscriptSecret,
},
verifier::{Verifier, VerifierOutput},
webpki::{CertificateDer, RootCertStore},
};
@@ -19,7 +22,9 @@ use tlsn_core::ProverOutput;
use tlsn_server_fixture::bind;
use tlsn_server_fixture_certs::{CA_CERT_DER, SERVER_DOMAIN};
use tokio::io::{AsyncRead, AsyncWrite};
use tokio_util::compat::TokioAsyncReadCompatExt;
use tracing::instrument;
// Maximum number of bytes that can be sent from prover to server
const MAX_SENT_DATA: usize = 1 << 12;
@@ -36,34 +41,9 @@ async fn test() {
tracing_subscriber::fmt::init();
let (socket_0, socket_1) = tokio::io::duplex(2 << 23);
let mut session_p = Session::new(socket_0.compat());
let mut session_v = Session::new(socket_1.compat());
let prover = session_p
.new_prover(ProverConfig::builder().build().unwrap())
.unwrap();
let verifier = session_v
.new_verifier(
VerifierConfig::builder()
.root_store(RootCertStore {
roots: vec![CertificateDer(CA_CERT_DER.to_vec())],
})
.build()
.unwrap(),
)
.unwrap();
let (session_p_driver, session_p_handle) = session_p.split();
let (session_v_driver, session_v_handle) = session_v.split();
tokio::spawn(session_p_driver);
tokio::spawn(session_v_driver);
let ((_full_transcript, _prover_output), verifier_output) =
tokio::join!(run_prover(prover), run_verifier(verifier));
session_p_handle.close();
session_v_handle.close();
let ((full_transcript, prover_output), verifier_output) =
tokio::join!(prover(socket_0), verifier(socket_1));
let partial_transcript = verifier_output.transcript.unwrap();
let ServerName::Dns(server_name) = verifier_output.server_name.unwrap();
@@ -78,14 +58,61 @@ async fn test() {
partial_transcript.received_authed().iter().next().unwrap(),
0..10
);
let encoding_tree = prover_output
.transcript_secrets
.iter()
.find_map(|secret| {
if let TranscriptSecret::Encoding(tree) = secret {
Some(tree)
} else {
None
}
})
.unwrap();
let encoding_commitment = prover_output
.transcript_commitments
.iter()
.find_map(|commitment| {
if let TranscriptCommitment::Encoding(commitment) = commitment {
Some(commitment)
} else {
None
}
})
.unwrap();
let prove_sent = RangeSet::from(1..full_transcript.sent().len() - 1);
let prove_recv = RangeSet::from(1..full_transcript.received().len() - 1);
let idxs = [
(Direction::Sent, prove_sent.clone()),
(Direction::Received, prove_recv.clone()),
];
let proof = encoding_tree.proof(idxs.iter()).unwrap();
let (auth_sent, auth_recv) = proof
.verify_with_provider(
&HashProvider::default(),
&verifier_output.encoder_secret.unwrap(),
encoding_commitment,
full_transcript.sent(),
full_transcript.received(),
)
.unwrap();
assert_eq!(auth_sent, prove_sent);
assert_eq!(auth_recv, prove_recv);
}
async fn run_prover(prover: Prover) -> (Transcript, ProverOutput) {
#[instrument(skip(verifier_socket))]
async fn prover<T: AsyncWrite + AsyncRead + Send + Unpin + 'static>(
verifier_socket: T,
) -> (Transcript, ProverOutput) {
let (client_socket, server_socket) = tokio::io::duplex(2 << 16);
let server_task = tokio::spawn(bind(server_socket.compat()));
let prover = prover
let prover = Prover::new(ProverConfig::builder().build().unwrap())
.commit(
TlsCommitConfig::builder()
.protocol(
@@ -99,6 +126,7 @@ async fn run_prover(prover: Prover) -> (Transcript, ProverOutput) {
)
.build()
.unwrap(),
verifier_socket.compat(),
)
.await
.unwrap();
@@ -135,21 +163,25 @@ async fn run_prover(prover: Prover) -> (Transcript, ProverOutput) {
let mut builder = TranscriptCommitConfig::builder(prover.transcript());
let kind = TranscriptCommitmentKind::Hash {
alg: HashAlgId::SHA256,
};
builder
.commit_with_kind(&(0..sent_tx_len), Direction::Sent, kind)
.unwrap();
builder
.commit_with_kind(&(0..recv_tx_len), Direction::Received, kind)
.unwrap();
builder
.commit_with_kind(&(1..sent_tx_len - 1), Direction::Sent, kind)
.unwrap();
builder
.commit_with_kind(&(1..recv_tx_len - 1), Direction::Received, kind)
.unwrap();
for kind in [
TranscriptCommitmentKind::Encoding,
TranscriptCommitmentKind::Hash {
alg: HashAlgId::SHA256,
},
] {
builder
.commit_with_kind(&(0..sent_tx_len), Direction::Sent, kind)
.unwrap();
builder
.commit_with_kind(&(0..recv_tx_len), Direction::Received, kind)
.unwrap();
builder
.commit_with_kind(&(1..sent_tx_len - 1), Direction::Sent, kind)
.unwrap();
builder
.commit_with_kind(&(1..recv_tx_len - 1), Direction::Received, kind)
.unwrap();
}
let transcript_commit = builder.build().unwrap();
@@ -170,9 +202,21 @@ async fn run_prover(prover: Prover) -> (Transcript, ProverOutput) {
(transcript, output)
}
async fn run_verifier(verifier: Verifier) -> VerifierOutput {
#[instrument(skip(socket))]
async fn verifier<T: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>(
socket: T,
) -> VerifierOutput {
let verifier = Verifier::new(
VerifierConfig::builder()
.root_store(RootCertStore {
roots: vec![CertificateDer(CA_CERT_DER.to_vec())],
})
.build()
.unwrap(),
);
let verifier = verifier
.commit()
.commit(socket.compat())
.await
.unwrap()
.accept()