Compare commits

..

4 Commits
plot_py ... dev

Author SHA1 Message Date
sinu.eth
1897f0d1e6 refactor: remove encoding commitment (#1071)
* refactor: remove encoding commitment

* remove unused field

* clippy
2026-01-08 07:55:03 -08:00
dan
2101285f7f chore(bench): dont bench large payloads (#1074) 2026-01-05 09:55:35 +00:00
dan
98210e4059 chore(bench): added headed mode for debugging (#1073) 2026-01-05 09:38:31 +00:00
dan
9dfac850d5 chore(harness): improve UX with progress bar, separate sweep benches (#1068) 2025-12-23 13:59:14 +00:00
48 changed files with 850 additions and 4316 deletions

1574
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -27,6 +27,7 @@ alloy-primitives = { version = "1.3.1", default-features = false }
alloy-signer = { version = "1.0", default-features = false }
alloy-signer-local = { version = "1.0", default-features = false }
rand06-compat = { workspace = true }
rangeset = { workspace = true }
rstest = { workspace = true }
tlsn-core = { workspace = true, features = ["fixtures"] }
tlsn-data-fixtures = { workspace = true }

View File

@@ -5,7 +5,7 @@ use rand::{Rng, rng};
use tlsn_core::{
connection::{ConnectionInfo, ServerEphemKey},
hash::HashAlgId,
transcript::{TranscriptCommitment, encoding::EncoderSecret},
transcript::TranscriptCommitment,
};
use crate::{
@@ -25,7 +25,6 @@ pub struct Sign {
connection_info: Option<ConnectionInfo>,
server_ephemeral_key: Option<ServerEphemKey>,
cert_commitment: ServerCertCommitment,
encoder_secret: Option<EncoderSecret>,
extensions: Vec<Extension>,
transcript_commitments: Vec<TranscriptCommitment>,
}
@@ -87,7 +86,6 @@ impl<'a> AttestationBuilder<'a, Accept> {
connection_info: None,
server_ephemeral_key: None,
cert_commitment,
encoder_secret: None,
transcript_commitments: Vec::new(),
extensions,
},
@@ -108,12 +106,6 @@ impl AttestationBuilder<'_, Sign> {
self
}
/// Sets the secret for encoding commitments.
pub fn encoder_secret(&mut self, secret: EncoderSecret) -> &mut Self {
self.state.encoder_secret = Some(secret);
self
}
/// Adds an extension to the attestation.
pub fn extension(&mut self, extension: Extension) -> &mut Self {
self.state.extensions.push(extension);
@@ -137,7 +129,6 @@ impl AttestationBuilder<'_, Sign> {
connection_info,
server_ephemeral_key,
cert_commitment,
encoder_secret,
extensions,
transcript_commitments,
} = self.state;
@@ -168,7 +159,6 @@ impl AttestationBuilder<'_, Sign> {
AttestationBuilderError::new(ErrorKind::Field, "handshake data was not set")
})?),
cert_commitment: field_id.next(cert_commitment),
encoder_secret: encoder_secret.map(|secret| field_id.next(secret)),
extensions: extensions
.into_iter()
.map(|extension| field_id.next(extension))
@@ -253,8 +243,7 @@ mod test {
use rstest::{fixture, rstest};
use tlsn_core::{
connection::{CertBinding, CertBindingV1_2},
fixtures::{ConnectionFixture, encoding_provider},
hash::Blake3,
fixtures::ConnectionFixture,
transcript::Transcript,
};
use tlsn_data_fixtures::http::{request::GET_WITH_HEADER, response::OK_JSON};
@@ -285,13 +274,7 @@ mod test {
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
let connection = ConnectionFixture::tlsnotary(transcript.length());
let RequestFixture { request, .. } = request_fixture(
transcript,
encoding_provider(GET_WITH_HEADER, OK_JSON),
connection,
Blake3::default(),
Vec::new(),
);
let RequestFixture { request, .. } = request_fixture(transcript, connection, Vec::new());
let attestation_config = AttestationConfig::builder()
.supported_signature_algs([SignatureAlgId::SECP256R1])
@@ -310,13 +293,7 @@ mod test {
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
let connection = ConnectionFixture::tlsnotary(transcript.length());
let RequestFixture { request, .. } = request_fixture(
transcript,
encoding_provider(GET_WITH_HEADER, OK_JSON),
connection,
Blake3::default(),
Vec::new(),
);
let RequestFixture { request, .. } = request_fixture(transcript, connection, Vec::new());
let attestation_config = AttestationConfig::builder()
.supported_signature_algs([SignatureAlgId::SECP256K1])
@@ -336,13 +313,7 @@ mod test {
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
let connection = ConnectionFixture::tlsnotary(transcript.length());
let RequestFixture { request, .. } = request_fixture(
transcript,
encoding_provider(GET_WITH_HEADER, OK_JSON),
connection,
Blake3::default(),
Vec::new(),
);
let RequestFixture { request, .. } = request_fixture(transcript, connection, Vec::new());
let attestation_builder = Attestation::builder(attestation_config)
.accept_request(request)
@@ -363,13 +334,8 @@ mod test {
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
let connection = ConnectionFixture::tlsnotary(transcript.length());
let RequestFixture { request, .. } = request_fixture(
transcript,
encoding_provider(GET_WITH_HEADER, OK_JSON),
connection.clone(),
Blake3::default(),
Vec::new(),
);
let RequestFixture { request, .. } =
request_fixture(transcript, connection.clone(), Vec::new());
let mut attestation_builder = Attestation::builder(attestation_config)
.accept_request(request)
@@ -393,13 +359,8 @@ mod test {
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
let connection = ConnectionFixture::tlsnotary(transcript.length());
let RequestFixture { request, .. } = request_fixture(
transcript,
encoding_provider(GET_WITH_HEADER, OK_JSON),
connection.clone(),
Blake3::default(),
Vec::new(),
);
let RequestFixture { request, .. } =
request_fixture(transcript, connection.clone(), Vec::new());
let mut attestation_builder = Attestation::builder(attestation_config)
.accept_request(request)
@@ -432,9 +393,7 @@ mod test {
let RequestFixture { request, .. } = request_fixture(
transcript,
encoding_provider(GET_WITH_HEADER, OK_JSON),
connection.clone(),
Blake3::default(),
vec![Extension {
id: b"foo".to_vec(),
value: b"bar".to_vec(),
@@ -461,9 +420,7 @@ mod test {
let RequestFixture { request, .. } = request_fixture(
transcript,
encoding_provider(GET_WITH_HEADER, OK_JSON),
connection.clone(),
Blake3::default(),
vec![Extension {
id: b"foo".to_vec(),
value: b"bar".to_vec(),

View File

@@ -2,11 +2,7 @@
use tlsn_core::{
connection::{CertBinding, CertBindingV1_2},
fixtures::ConnectionFixture,
hash::HashAlgorithm,
transcript::{
Transcript, TranscriptCommitConfigBuilder, TranscriptCommitment,
encoding::{EncodingProvider, EncodingTree},
},
transcript::{Transcript, TranscriptCommitConfigBuilder, TranscriptCommitment},
};
use crate::{
@@ -21,16 +17,13 @@ use crate::{
/// A Request fixture used for testing.
#[allow(missing_docs)]
pub struct RequestFixture {
pub encoding_tree: EncodingTree,
pub request: Request,
}
/// Returns a request fixture for testing.
pub fn request_fixture(
transcript: Transcript,
encodings_provider: impl EncodingProvider,
connection: ConnectionFixture,
encoding_hasher: impl HashAlgorithm,
extensions: Vec<Extension>,
) -> RequestFixture {
let provider = CryptoProvider::default();
@@ -50,16 +43,10 @@ pub fn request_fixture(
.unwrap();
let transcripts_commitment_config = transcript_commitment_builder.build().unwrap();
// Prover constructs encoding tree.
let encoding_tree = EncodingTree::new(
&encoding_hasher,
transcripts_commitment_config.iter_encoding(),
&encodings_provider,
)
.unwrap();
let mut builder = RequestConfig::builder();
builder.transcript_commit(transcripts_commitment_config);
for extension in extensions {
builder.extension(extension);
}
@@ -74,10 +61,7 @@ pub fn request_fixture(
let (request, _) = request_builder.build(&provider).unwrap();
RequestFixture {
encoding_tree,
request,
}
RequestFixture { request }
}
/// Returns an attestation fixture for testing.

View File

@@ -79,8 +79,6 @@
//!
//! // Specify all the transcript commitments we want to make.
//! builder
//! // Use BLAKE3 for encoding commitments.
//! .encoding_hash_alg(HashAlgId::BLAKE3)
//! // Commit to all sent data.
//! .commit_sent(&(0..sent_len))?
//! // Commit to the first 10 bytes of sent data.
@@ -129,7 +127,7 @@
//!
//! ```no_run
//! # use tlsn_attestation::{Attestation, CryptoProvider, Secrets, presentation::Presentation};
//! # use tlsn_core::transcript::{TranscriptCommitmentKind, Direction};
//! # use tlsn_core::transcript::Direction;
//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
//! # let attestation: Attestation = unimplemented!();
//! # let secrets: Secrets = unimplemented!();
@@ -140,8 +138,6 @@
//! let mut builder = secrets.transcript_proof_builder();
//!
//! builder
//! // Use transcript encoding commitments.
//! .commitment_kinds(&[TranscriptCommitmentKind::Encoding])
//! // Disclose the first 10 bytes of the sent data.
//! .reveal(&(0..10), Direction::Sent)?
//! // Disclose all of the received data.
@@ -219,7 +215,7 @@ use tlsn_core::{
connection::{ConnectionInfo, ServerEphemKey},
hash::{Hash, HashAlgorithm, TypedHash},
merkle::MerkleTree,
transcript::{TranscriptCommitment, encoding::EncoderSecret},
transcript::TranscriptCommitment,
};
use crate::{
@@ -301,8 +297,6 @@ pub enum FieldKind {
ServerEphemKey = 0x02,
/// Server identity commitment.
ServerIdentityCommitment = 0x03,
/// Encoding commitment.
EncodingCommitment = 0x04,
/// Plaintext hash commitment.
PlaintextHash = 0x05,
}
@@ -327,7 +321,6 @@ pub struct Body {
connection_info: Field<ConnectionInfo>,
server_ephemeral_key: Field<ServerEphemKey>,
cert_commitment: Field<ServerCertCommitment>,
encoder_secret: Option<Field<EncoderSecret>>,
extensions: Vec<Field<Extension>>,
transcript_commitments: Vec<Field<TranscriptCommitment>>,
}
@@ -373,7 +366,6 @@ impl Body {
connection_info: conn_info,
server_ephemeral_key,
cert_commitment,
encoder_secret,
extensions,
transcript_commitments,
} = self;
@@ -391,13 +383,6 @@ impl Body {
),
];
if let Some(encoder_secret) = encoder_secret {
fields.push((
encoder_secret.id,
hasher.hash_separated(&encoder_secret.data),
));
}
for field in extensions.iter() {
fields.push((field.id, hasher.hash_separated(&field.data)));
}

View File

@@ -91,11 +91,6 @@ impl Presentation {
transcript.verify_with_provider(
&provider.hash,
&attestation.body.connection_info().transcript_length,
attestation
.body
.encoder_secret
.as_ref()
.map(|field| &field.data),
attestation.body.transcript_commitments(),
)
})

View File

@@ -144,9 +144,7 @@ impl std::fmt::Display for ErrorKind {
#[cfg(test)]
mod test {
use tlsn_core::{
connection::TranscriptLength,
fixtures::{ConnectionFixture, encoding_provider},
hash::{Blake3, HashAlgId},
connection::TranscriptLength, fixtures::ConnectionFixture, hash::HashAlgId,
transcript::Transcript,
};
use tlsn_data_fixtures::http::{request::GET_WITH_HEADER, response::OK_JSON};
@@ -164,13 +162,8 @@ mod test {
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
let connection = ConnectionFixture::tlsnotary(transcript.length());
let RequestFixture { request, .. } = request_fixture(
transcript,
encoding_provider(GET_WITH_HEADER, OK_JSON),
connection.clone(),
Blake3::default(),
Vec::new(),
);
let RequestFixture { request, .. } =
request_fixture(transcript, connection.clone(), Vec::new());
let attestation =
attestation_fixture(request.clone(), connection, SignatureAlgId::SECP256K1, &[]);
@@ -185,13 +178,8 @@ mod test {
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
let connection = ConnectionFixture::tlsnotary(transcript.length());
let RequestFixture { mut request, .. } = request_fixture(
transcript,
encoding_provider(GET_WITH_HEADER, OK_JSON),
connection.clone(),
Blake3::default(),
Vec::new(),
);
let RequestFixture { mut request, .. } =
request_fixture(transcript, connection.clone(), Vec::new());
let attestation =
attestation_fixture(request.clone(), connection, SignatureAlgId::SECP256K1, &[]);
@@ -209,13 +197,8 @@ mod test {
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
let connection = ConnectionFixture::tlsnotary(transcript.length());
let RequestFixture { mut request, .. } = request_fixture(
transcript,
encoding_provider(GET_WITH_HEADER, OK_JSON),
connection.clone(),
Blake3::default(),
Vec::new(),
);
let RequestFixture { mut request, .. } =
request_fixture(transcript, connection.clone(), Vec::new());
let attestation =
attestation_fixture(request.clone(), connection, SignatureAlgId::SECP256K1, &[]);
@@ -233,13 +216,8 @@ mod test {
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
let connection = ConnectionFixture::tlsnotary(transcript.length());
let RequestFixture { mut request, .. } = request_fixture(
transcript,
encoding_provider(GET_WITH_HEADER, OK_JSON),
connection.clone(),
Blake3::default(),
Vec::new(),
);
let RequestFixture { mut request, .. } =
request_fixture(transcript, connection.clone(), Vec::new());
let attestation =
attestation_fixture(request.clone(), connection, SignatureAlgId::SECP256K1, &[]);
@@ -265,13 +243,8 @@ mod test {
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
let connection = ConnectionFixture::tlsnotary(transcript.length());
let RequestFixture { request, .. } = request_fixture(
transcript,
encoding_provider(GET_WITH_HEADER, OK_JSON),
connection.clone(),
Blake3::default(),
Vec::new(),
);
let RequestFixture { request, .. } =
request_fixture(transcript, connection.clone(), Vec::new());
let mut attestation =
attestation_fixture(request.clone(), connection, SignatureAlgId::SECP256K1, &[]);
@@ -289,13 +262,8 @@ mod test {
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
let connection = ConnectionFixture::tlsnotary(transcript.length());
let RequestFixture { request, .. } = request_fixture(
transcript,
encoding_provider(GET_WITH_HEADER, OK_JSON),
connection.clone(),
Blake3::default(),
Vec::new(),
);
let RequestFixture { request, .. } =
request_fixture(transcript, connection.clone(), Vec::new());
let attestation =
attestation_fixture(request.clone(), connection, SignatureAlgId::SECP256K1, &[]);

View File

@@ -49,6 +49,4 @@ impl_domain_separator!(tlsn_core::connection::ConnectionInfo);
impl_domain_separator!(tlsn_core::connection::CertBinding);
impl_domain_separator!(tlsn_core::transcript::TranscriptCommitment);
impl_domain_separator!(tlsn_core::transcript::TranscriptSecret);
impl_domain_separator!(tlsn_core::transcript::encoding::EncoderSecret);
impl_domain_separator!(tlsn_core::transcript::encoding::EncodingCommitment);
impl_domain_separator!(tlsn_core::transcript::hash::PlaintextHash);

View File

@@ -1,3 +1,5 @@
use rand::{Rng, SeedableRng, rngs::StdRng};
use rangeset::set::RangeSet;
use tlsn_attestation::{
Attestation, AttestationConfig, CryptoProvider,
presentation::PresentationOutput,
@@ -6,12 +8,11 @@ use tlsn_attestation::{
};
use tlsn_core::{
connection::{CertBinding, CertBindingV1_2},
fixtures::{self, ConnectionFixture, encoder_secret},
hash::Blake3,
fixtures::ConnectionFixture,
hash::{Blake3, Blinder, HashAlgId},
transcript::{
Direction, Transcript, TranscriptCommitConfigBuilder, TranscriptCommitment,
TranscriptSecret,
encoding::{EncodingCommitment, EncodingTree},
Direction, Transcript, TranscriptCommitment, TranscriptSecret,
hash::{PlaintextHash, PlaintextHashSecret, hash_plaintext},
},
};
use tlsn_data_fixtures::http::{request::GET_WITH_HEADER, response::OK_JSON};
@@ -19,6 +20,7 @@ use tlsn_data_fixtures::http::{request::GET_WITH_HEADER, response::OK_JSON};
/// Tests that the attestation protocol and verification work end-to-end
#[test]
fn test_api() {
let mut rng = StdRng::seed_from_u64(0);
let mut provider = CryptoProvider::default();
// Configure signer for Notary
@@ -26,8 +28,6 @@ fn test_api() {
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
let (sent_len, recv_len) = transcript.len();
// Plaintext encodings which the Prover obtained from GC evaluation
let encodings_provider = fixtures::encoding_provider(GET_WITH_HEADER, OK_JSON);
// At the end of the TLS connection the Prover holds the:
let ConnectionFixture {
@@ -44,26 +44,38 @@ fn test_api() {
unreachable!()
};
// Prover specifies the ranges it wants to commit to.
let mut transcript_commitment_builder = TranscriptCommitConfigBuilder::new(&transcript);
transcript_commitment_builder
.commit_sent(&(0..sent_len))
.unwrap()
.commit_recv(&(0..recv_len))
.unwrap();
// Create hash commitments
let hasher = Blake3::default();
let sent_blinder: Blinder = rng.random();
let recv_blinder: Blinder = rng.random();
let transcripts_commitment_config = transcript_commitment_builder.build().unwrap();
let sent_idx = RangeSet::from(0..sent_len);
let recv_idx = RangeSet::from(0..recv_len);
// Prover constructs encoding tree.
let encoding_tree = EncodingTree::new(
&Blake3::default(),
transcripts_commitment_config.iter_encoding(),
&encodings_provider,
)
.unwrap();
let sent_hash_commitment = PlaintextHash {
direction: Direction::Sent,
idx: sent_idx.clone(),
hash: hash_plaintext(&hasher, transcript.sent(), &sent_blinder),
};
let encoding_commitment = EncodingCommitment {
root: encoding_tree.root(),
let recv_hash_commitment = PlaintextHash {
direction: Direction::Received,
idx: recv_idx.clone(),
hash: hash_plaintext(&hasher, transcript.received(), &recv_blinder),
};
let sent_hash_secret = PlaintextHashSecret {
direction: Direction::Sent,
idx: sent_idx,
alg: HashAlgId::BLAKE3,
blinder: sent_blinder,
};
let recv_hash_secret = PlaintextHashSecret {
direction: Direction::Received,
idx: recv_idx,
alg: HashAlgId::BLAKE3,
blinder: recv_blinder,
};
let request_config = RequestConfig::default();
@@ -74,8 +86,14 @@ fn test_api() {
.handshake_data(server_cert_data)
.transcript(transcript)
.transcript_commitments(
vec![TranscriptSecret::Encoding(encoding_tree)],
vec![TranscriptCommitment::Encoding(encoding_commitment.clone())],
vec![
TranscriptSecret::Hash(sent_hash_secret),
TranscriptSecret::Hash(recv_hash_secret),
],
vec![
TranscriptCommitment::Hash(sent_hash_commitment.clone()),
TranscriptCommitment::Hash(recv_hash_commitment.clone()),
],
);
let (request, secrets) = request_builder.build(&provider).unwrap();
@@ -95,8 +113,10 @@ fn test_api() {
.connection_info(connection_info.clone())
// Server key Notary received during handshake
.server_ephemeral_key(server_ephemeral_key)
.encoder_secret(encoder_secret())
.transcript_commitments(vec![TranscriptCommitment::Encoding(encoding_commitment)]);
.transcript_commitments(vec![
TranscriptCommitment::Hash(sent_hash_commitment),
TranscriptCommitment::Hash(recv_hash_commitment),
]);
let attestation = attestation_builder.build(&provider).unwrap();

View File

@@ -1,10 +1,7 @@
//! Fixtures for testing
mod provider;
pub mod transcript;
pub use provider::FixtureEncodingProvider;
use hex::FromHex;
use crate::{
@@ -13,10 +10,6 @@ use crate::{
ServerEphemKey, ServerName, ServerSignature, SignatureAlgorithm, TlsVersion,
TranscriptLength,
},
transcript::{
encoding::{EncoderSecret, EncodingProvider},
Transcript,
},
webpki::CertificateDer,
};
@@ -129,27 +122,3 @@ impl ConnectionFixture {
server_ephemeral_key
}
}
/// Returns an encoding provider fixture.
pub fn encoding_provider(tx: &[u8], rx: &[u8]) -> impl EncodingProvider {
let secret = encoder_secret();
FixtureEncodingProvider::new(&secret, Transcript::new(tx, rx))
}
/// Seed fixture.
const SEED: [u8; 32] = [0; 32];
/// Delta fixture.
const DELTA: [u8; 16] = [1; 16];
/// Returns an encoder secret fixture.
pub fn encoder_secret() -> EncoderSecret {
EncoderSecret::new(SEED, DELTA)
}
/// Returns a tampered encoder secret fixture.
pub fn encoder_secret_tampered_seed() -> EncoderSecret {
let mut seed = SEED;
seed[0] += 1;
EncoderSecret::new(seed, DELTA)
}

View File

@@ -1,41 +0,0 @@
use std::ops::Range;
use crate::transcript::{
encoding::{new_encoder, Encoder, EncoderSecret, EncodingProvider, EncodingProviderError},
Direction, Transcript,
};
/// A encoding provider fixture.
pub struct FixtureEncodingProvider {
encoder: Box<dyn Encoder>,
transcript: Transcript,
}
impl FixtureEncodingProvider {
/// Creates a new encoding provider fixture.
pub(crate) fn new(secret: &EncoderSecret, transcript: Transcript) -> Self {
Self {
encoder: Box::new(new_encoder(secret)),
transcript,
}
}
}
impl EncodingProvider for FixtureEncodingProvider {
fn provide_encoding(
&self,
direction: Direction,
range: Range<usize>,
dest: &mut Vec<u8>,
) -> Result<(), EncodingProviderError> {
let transcript = match direction {
Direction::Sent => &self.transcript.sent(),
Direction::Received => &self.transcript.received(),
};
let data = transcript.get(range.clone()).ok_or(EncodingProviderError)?;
self.encoder.encode_data(direction, range, data, dest);
Ok(())
}
}

View File

@@ -19,9 +19,7 @@ use serde::{Deserialize, Serialize};
use crate::{
connection::ServerName,
transcript::{
encoding::EncoderSecret, PartialTranscript, TranscriptCommitment, TranscriptSecret,
},
transcript::{PartialTranscript, TranscriptCommitment, TranscriptSecret},
};
/// Prover output.
@@ -42,8 +40,6 @@ pub struct VerifierOutput {
pub server_name: Option<ServerName>,
/// Transcript data.
pub transcript: Option<PartialTranscript>,
/// Encoding commitment secret.
pub encoder_secret: Option<EncoderSecret>,
/// Transcript commitments.
pub transcript_commitments: Vec<TranscriptCommitment>,
}

View File

@@ -63,11 +63,6 @@ impl MerkleProof {
Ok(())
}
/// Returns the leaf count of the Merkle tree associated with the proof.
pub(crate) fn leaf_count(&self) -> usize {
self.leaf_count
}
}
#[derive(Clone)]

View File

@@ -19,7 +19,6 @@
//! withheld.
mod commit;
pub mod encoding;
pub mod hash;
mod proof;
mod tls;

View File

@@ -8,27 +8,15 @@ use serde::{Deserialize, Serialize};
use crate::{
hash::HashAlgId,
transcript::{
encoding::{EncodingCommitment, EncodingTree},
hash::{PlaintextHash, PlaintextHashSecret},
Direction, RangeSet, Transcript,
},
};
/// The maximum allowed total bytelength of committed data for a single
/// commitment kind. Used to prevent DoS during verification. (May cause the
/// verifier to hash up to a max of 1GB * 128 = 128GB of data for certain kinds
/// of encoding commitments.)
///
/// This value must not exceed bcs's MAX_SEQUENCE_LENGTH limit (which is (1 <<
/// 31) - 1 by default)
pub(crate) const MAX_TOTAL_COMMITTED_DATA: usize = 1_000_000_000;
/// Kind of transcript commitment.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
#[non_exhaustive]
pub enum TranscriptCommitmentKind {
/// A commitment to encodings of the transcript.
Encoding,
/// A hash commitment to plaintext in the transcript.
Hash {
/// The hash algorithm used.
@@ -39,7 +27,6 @@ pub enum TranscriptCommitmentKind {
impl fmt::Display for TranscriptCommitmentKind {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Encoding => f.write_str("encoding"),
Self::Hash { alg } => write!(f, "hash ({alg})"),
}
}
@@ -49,8 +36,6 @@ impl fmt::Display for TranscriptCommitmentKind {
#[derive(Debug, Clone, Serialize, Deserialize)]
#[non_exhaustive]
pub enum TranscriptCommitment {
/// Encoding commitment.
Encoding(EncodingCommitment),
/// Plaintext hash commitment.
Hash(PlaintextHash),
}
@@ -59,8 +44,6 @@ pub enum TranscriptCommitment {
#[derive(Debug, Clone, Serialize, Deserialize)]
#[non_exhaustive]
pub enum TranscriptSecret {
/// Encoding tree.
Encoding(EncodingTree),
/// Plaintext hash secret.
Hash(PlaintextHashSecret),
}
@@ -68,9 +51,6 @@ pub enum TranscriptSecret {
/// Configuration for transcript commitments.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TranscriptCommitConfig {
encoding_hash_alg: HashAlgId,
has_encoding: bool,
has_hash: bool,
commits: Vec<((Direction, RangeSet<usize>), TranscriptCommitmentKind)>,
}
@@ -80,53 +60,23 @@ impl TranscriptCommitConfig {
TranscriptCommitConfigBuilder::new(transcript)
}
/// Returns the hash algorithm to use for encoding commitments.
pub fn encoding_hash_alg(&self) -> &HashAlgId {
&self.encoding_hash_alg
}
/// Returns `true` if the configuration has any encoding commitments.
pub fn has_encoding(&self) -> bool {
self.has_encoding
}
/// Returns `true` if the configuration has any hash commitments.
pub fn has_hash(&self) -> bool {
self.has_hash
}
/// Returns an iterator over the encoding commitment indices.
pub fn iter_encoding(&self) -> impl Iterator<Item = &(Direction, RangeSet<usize>)> {
self.commits.iter().filter_map(|(idx, kind)| match kind {
TranscriptCommitmentKind::Encoding => Some(idx),
_ => None,
})
self.commits
.iter()
.any(|(_, kind)| matches!(kind, TranscriptCommitmentKind::Hash { .. }))
}
/// Returns an iterator over the hash commitment indices.
pub fn iter_hash(&self) -> impl Iterator<Item = (&(Direction, RangeSet<usize>), &HashAlgId)> {
self.commits.iter().filter_map(|(idx, kind)| match kind {
TranscriptCommitmentKind::Hash { alg } => Some((idx, alg)),
_ => None,
self.commits.iter().map(|(idx, kind)| match kind {
TranscriptCommitmentKind::Hash { alg } => (idx, alg),
})
}
/// Returns a request for the transcript commitments.
pub fn to_request(&self) -> TranscriptCommitRequest {
TranscriptCommitRequest {
encoding: self.has_encoding.then(|| {
let mut sent = RangeSet::default();
let mut recv = RangeSet::default();
for (dir, idx) in self.iter_encoding() {
match dir {
Direction::Sent => sent.union_mut(idx),
Direction::Received => recv.union_mut(idx),
}
}
(sent, recv)
}),
hash: self
.iter_hash()
.map(|((dir, idx), alg)| (*dir, idx.clone(), *alg))
@@ -136,15 +86,9 @@ impl TranscriptCommitConfig {
}
/// A builder for [`TranscriptCommitConfig`].
///
/// The default hash algorithm is [`HashAlgId::BLAKE3`] and the default kind
/// is [`TranscriptCommitmentKind::Encoding`].
#[derive(Debug)]
pub struct TranscriptCommitConfigBuilder<'a> {
transcript: &'a Transcript,
encoding_hash_alg: HashAlgId,
has_encoding: bool,
has_hash: bool,
default_kind: TranscriptCommitmentKind,
commits: HashSet<((Direction, RangeSet<usize>), TranscriptCommitmentKind)>,
}
@@ -154,20 +98,13 @@ impl<'a> TranscriptCommitConfigBuilder<'a> {
pub fn new(transcript: &'a Transcript) -> Self {
Self {
transcript,
encoding_hash_alg: HashAlgId::BLAKE3,
has_encoding: false,
has_hash: false,
default_kind: TranscriptCommitmentKind::Encoding,
default_kind: TranscriptCommitmentKind::Hash {
alg: HashAlgId::BLAKE3,
},
commits: HashSet::default(),
}
}
/// Sets the hash algorithm to use for encoding commitments.
pub fn encoding_hash_alg(&mut self, alg: HashAlgId) -> &mut Self {
self.encoding_hash_alg = alg;
self
}
/// Sets the default kind of commitment to use.
pub fn default_kind(&mut self, default_kind: TranscriptCommitmentKind) -> &mut Self {
self.default_kind = default_kind;
@@ -201,11 +138,6 @@ impl<'a> TranscriptCommitConfigBuilder<'a> {
));
}
match kind {
TranscriptCommitmentKind::Encoding => self.has_encoding = true,
TranscriptCommitmentKind::Hash { .. } => self.has_hash = true,
}
self.commits.insert(((direction, idx), kind));
Ok(self)
@@ -252,9 +184,6 @@ impl<'a> TranscriptCommitConfigBuilder<'a> {
/// Builds the configuration.
pub fn build(self) -> Result<TranscriptCommitConfig, TranscriptCommitConfigBuilderError> {
Ok(TranscriptCommitConfig {
encoding_hash_alg: self.encoding_hash_alg,
has_encoding: self.has_encoding,
has_hash: self.has_hash,
commits: Vec::from_iter(self.commits),
})
}
@@ -301,16 +230,10 @@ impl fmt::Display for TranscriptCommitConfigBuilderError {
/// Request to compute transcript commitments.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TranscriptCommitRequest {
encoding: Option<(RangeSet<usize>, RangeSet<usize>)>,
hash: Vec<(Direction, RangeSet<usize>, HashAlgId)>,
}
impl TranscriptCommitRequest {
/// Returns `true` if an encoding commitment is requested.
pub fn has_encoding(&self) -> bool {
self.encoding.is_some()
}
/// Returns `true` if a hash commitment is requested.
pub fn has_hash(&self) -> bool {
!self.hash.is_empty()
@@ -320,11 +243,6 @@ impl TranscriptCommitRequest {
pub fn iter_hash(&self) -> impl Iterator<Item = &(Direction, RangeSet<usize>, HashAlgId)> {
self.hash.iter()
}
/// Returns the ranges of the encoding commitments.
pub fn encoding(&self) -> Option<&(RangeSet<usize>, RangeSet<usize>)> {
self.encoding.as_ref()
}
}
#[cfg(test)]

View File

@@ -1,22 +0,0 @@
//! Transcript encoding commitments and proofs.
mod encoder;
mod proof;
mod provider;
mod tree;
pub use encoder::{new_encoder, Encoder, EncoderSecret};
pub use proof::{EncodingProof, EncodingProofError};
pub use provider::{EncodingProvider, EncodingProviderError};
pub use tree::{EncodingTree, EncodingTreeError};
use serde::{Deserialize, Serialize};
use crate::hash::TypedHash;
/// Transcript encoding commitment.
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub struct EncodingCommitment {
/// Merkle root of the encoding commitments.
pub root: TypedHash,
}

View File

@@ -1,137 +0,0 @@
use std::ops::Range;
use crate::transcript::Direction;
use itybity::ToBits;
use rand::{RngCore, SeedableRng};
use rand_chacha::ChaCha12Rng;
use serde::{Deserialize, Serialize};
/// The size of the encoding for 1 bit, in bytes.
const BIT_ENCODING_SIZE: usize = 16;
/// The size of the encoding for 1 byte, in bytes.
const BYTE_ENCODING_SIZE: usize = 128;
/// Secret used by an encoder to generate encodings.
#[derive(Clone, PartialEq, Eq, Serialize, Deserialize)]
pub struct EncoderSecret {
seed: [u8; 32],
delta: [u8; BIT_ENCODING_SIZE],
}
opaque_debug::implement!(EncoderSecret);
impl EncoderSecret {
/// Creates a new secret.
///
/// # Arguments
///
/// * `seed` - The seed for the PRG.
/// * `delta` - Delta for deriving the one-encodings.
pub fn new(seed: [u8; 32], delta: [u8; 16]) -> Self {
Self { seed, delta }
}
/// Returns the seed.
pub fn seed(&self) -> &[u8; 32] {
&self.seed
}
/// Returns the delta.
pub fn delta(&self) -> &[u8; 16] {
&self.delta
}
}
/// Creates a new encoder.
pub fn new_encoder(secret: &EncoderSecret) -> impl Encoder {
ChaChaEncoder::new(secret)
}
pub(crate) struct ChaChaEncoder {
seed: [u8; 32],
delta: [u8; 16],
}
impl ChaChaEncoder {
pub(crate) fn new(secret: &EncoderSecret) -> Self {
let seed = *secret.seed();
let delta = *secret.delta();
Self { seed, delta }
}
pub(crate) fn new_prg(&self, stream_id: u64) -> ChaCha12Rng {
let mut prg = ChaCha12Rng::from_seed(self.seed);
prg.set_stream(stream_id);
prg.set_word_pos(0);
prg
}
}
/// A transcript encoder.
///
/// This is an internal implementation detail that should not be exposed to the
/// public API.
pub trait Encoder {
/// Writes the zero encoding for the given range of the transcript into the
/// destination buffer.
fn encode_range(&self, direction: Direction, range: Range<usize>, dest: &mut Vec<u8>);
/// Writes the encoding for the given data into the destination buffer.
fn encode_data(
&self,
direction: Direction,
range: Range<usize>,
data: &[u8],
dest: &mut Vec<u8>,
);
}
impl Encoder for ChaChaEncoder {
fn encode_range(&self, direction: Direction, range: Range<usize>, dest: &mut Vec<u8>) {
// ChaCha encoder works with 32-bit words. Each encoded bit is 128 bits long.
const WORDS_PER_BYTE: u128 = 8 * 128 / 32;
let stream_id: u64 = match direction {
Direction::Sent => 0,
Direction::Received => 1,
};
let mut prg = self.new_prg(stream_id);
let len = range.len() * BYTE_ENCODING_SIZE;
let pos = dest.len();
// Write 0s to the destination buffer.
dest.resize(pos + len, 0);
// Fill the destination buffer with the PRG.
prg.set_word_pos(range.start as u128 * WORDS_PER_BYTE);
prg.fill_bytes(&mut dest[pos..pos + len]);
}
fn encode_data(
&self,
direction: Direction,
range: Range<usize>,
data: &[u8],
dest: &mut Vec<u8>,
) {
const ZERO: [u8; 16] = [0; BIT_ENCODING_SIZE];
let pos = dest.len();
// Write the zero encoding for the given range.
self.encode_range(direction, range, dest);
let dest = &mut dest[pos..];
for (pos, bit) in data.iter_lsb0().enumerate() {
// Add the delta to the encoding whenever the encoded bit is 1,
// otherwise add a zero.
let summand = if bit { &self.delta } else { &ZERO };
dest[pos * BIT_ENCODING_SIZE..(pos + 1) * BIT_ENCODING_SIZE]
.iter_mut()
.zip(summand)
.for_each(|(a, b)| *a ^= *b);
}
}
}

View File

@@ -1,361 +0,0 @@
use std::{collections::HashMap, fmt};
use rangeset::set::RangeSet;
use serde::{Deserialize, Serialize};
use crate::{
hash::{Blinder, HashProvider, HashProviderError},
merkle::{MerkleError, MerkleProof},
transcript::{
commit::MAX_TOTAL_COMMITTED_DATA,
encoding::{new_encoder, Encoder, EncoderSecret, EncodingCommitment},
Direction,
},
};
/// An opening of a leaf in the encoding tree.
#[derive(Clone, Serialize, Deserialize)]
pub(super) struct Opening {
pub(super) direction: Direction,
pub(super) idx: RangeSet<usize>,
pub(super) blinder: Blinder,
}
opaque_debug::implement!(Opening);
/// An encoding commitment proof.
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(try_from = "validation::EncodingProofUnchecked")]
pub struct EncodingProof {
/// The proof of inclusion of the commitment(s) in the Merkle tree of
/// commitments.
pub(super) inclusion_proof: MerkleProof,
pub(super) openings: HashMap<usize, Opening>,
}
impl EncodingProof {
/// Verifies the proof against the commitment.
///
/// Returns the authenticated indices of the sent and received data,
/// respectively.
///
/// # Arguments
///
/// * `provider` - Hash provider.
/// * `commitment` - Encoding commitment to verify against.
/// * `sent` - Sent data to authenticate.
/// * `recv` - Received data to authenticate.
pub fn verify_with_provider(
&self,
provider: &HashProvider,
secret: &EncoderSecret,
commitment: &EncodingCommitment,
sent: &[u8],
recv: &[u8],
) -> Result<(RangeSet<usize>, RangeSet<usize>), EncodingProofError> {
let hasher = provider.get(&commitment.root.alg)?;
let encoder = new_encoder(secret);
let Self {
inclusion_proof,
openings,
} = self;
let mut leaves = Vec::with_capacity(openings.len());
let mut expected_leaf = Vec::default();
let mut total_opened = 0u128;
let mut auth_sent = RangeSet::default();
let mut auth_recv = RangeSet::default();
for (
id,
Opening {
direction,
idx,
blinder,
},
) in openings
{
// Make sure the amount of data being proved is bounded.
total_opened += idx.len() as u128;
if total_opened > MAX_TOTAL_COMMITTED_DATA as u128 {
return Err(EncodingProofError::new(
ErrorKind::Proof,
"exceeded maximum allowed data",
))?;
}
let (data, auth) = match direction {
Direction::Sent => (sent, &mut auth_sent),
Direction::Received => (recv, &mut auth_recv),
};
// Make sure the ranges are within the bounds of the transcript.
if idx.end().unwrap_or(0) > data.len() {
return Err(EncodingProofError::new(
ErrorKind::Proof,
format!(
"index out of bounds of the transcript ({}): {} > {}",
direction,
idx.end().unwrap_or(0),
data.len()
),
));
}
expected_leaf.clear();
for range in idx.iter() {
encoder.encode_data(*direction, range.clone(), &data[range], &mut expected_leaf);
}
expected_leaf.extend_from_slice(blinder.as_bytes());
// Compute the expected hash of the commitment to make sure it is
// present in the merkle tree.
leaves.push((*id, hasher.hash(&expected_leaf)));
auth.union_mut(idx);
}
// Verify that the expected hashes are present in the merkle tree.
//
// This proves the Prover committed to the purported data prior to the encoder
// seed being revealed. Ergo, if the encodings are authentic then the purported
// data is authentic.
inclusion_proof.verify(hasher, &commitment.root, leaves)?;
Ok((auth_sent, auth_recv))
}
}
/// Error for [`EncodingProof`].
#[derive(Debug, thiserror::Error)]
pub struct EncodingProofError {
kind: ErrorKind,
source: Option<Box<dyn std::error::Error + Send + Sync>>,
}
impl EncodingProofError {
fn new<E>(kind: ErrorKind, source: E) -> Self
where
E: Into<Box<dyn std::error::Error + Send + Sync>>,
{
Self {
kind,
source: Some(source.into()),
}
}
}
#[derive(Debug)]
enum ErrorKind {
Provider,
Proof,
}
impl fmt::Display for EncodingProofError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str("encoding proof error: ")?;
match self.kind {
ErrorKind::Provider => f.write_str("provider error")?,
ErrorKind::Proof => f.write_str("proof error")?,
}
if let Some(source) = &self.source {
write!(f, " caused by: {source}")?;
}
Ok(())
}
}
impl From<HashProviderError> for EncodingProofError {
fn from(error: HashProviderError) -> Self {
Self::new(ErrorKind::Provider, error)
}
}
impl From<MerkleError> for EncodingProofError {
fn from(error: MerkleError) -> Self {
Self::new(ErrorKind::Proof, error)
}
}
/// Invalid encoding proof error.
#[derive(Debug, thiserror::Error)]
#[error("invalid encoding proof: {0}")]
pub struct InvalidEncodingProof(&'static str);
mod validation {
use super::*;
/// The maximum allowed height of the Merkle tree of encoding commitments.
///
/// The statistical security parameter (SSP) of the encoding commitment
/// protocol is calculated as "the number of uniformly random bits in a
/// single bit's encoding minus `MAX_HEIGHT`".
///
/// For example, a bit encoding used in garbled circuits typically has 127
/// uniformly random bits, hence when using it in the encoding
/// commitment protocol, the SSP is 127 - 30 = 97 bits.
///
/// Leaving this validation here as a fail-safe in case we ever start
/// using shorter encodings.
const MAX_HEIGHT: usize = 30;
#[derive(Debug, Deserialize)]
pub(super) struct EncodingProofUnchecked {
inclusion_proof: MerkleProof,
openings: HashMap<usize, Opening>,
}
impl TryFrom<EncodingProofUnchecked> for EncodingProof {
type Error = InvalidEncodingProof;
fn try_from(unchecked: EncodingProofUnchecked) -> Result<Self, Self::Error> {
if unchecked.inclusion_proof.leaf_count() > 1 << MAX_HEIGHT {
return Err(InvalidEncodingProof(
"the height of the tree exceeds the maximum allowed",
));
}
Ok(Self {
inclusion_proof: unchecked.inclusion_proof,
openings: unchecked.openings,
})
}
}
}
#[cfg(test)]
mod test {
use tlsn_data_fixtures::http::{request::POST_JSON, response::OK_JSON};
use crate::{
fixtures::{encoder_secret, encoder_secret_tampered_seed, encoding_provider},
hash::Blake3,
transcript::{encoding::EncodingTree, Transcript},
};
use super::*;
struct EncodingFixture {
transcript: Transcript,
proof: EncodingProof,
commitment: EncodingCommitment,
}
fn new_encoding_fixture() -> EncodingFixture {
let transcript = Transcript::new(POST_JSON, OK_JSON);
let idx_0 = (Direction::Sent, RangeSet::from(0..POST_JSON.len()));
let idx_1 = (Direction::Received, RangeSet::from(0..OK_JSON.len()));
let provider = encoding_provider(transcript.sent(), transcript.received());
let tree = EncodingTree::new(&Blake3::default(), [&idx_0, &idx_1], &provider).unwrap();
let proof = tree.proof([&idx_0, &idx_1].into_iter()).unwrap();
let commitment = EncodingCommitment { root: tree.root() };
EncodingFixture {
transcript,
proof,
commitment,
}
}
#[test]
fn test_verify_encoding_proof_tampered_seed() {
let EncodingFixture {
transcript,
proof,
commitment,
} = new_encoding_fixture();
let err = proof
.verify_with_provider(
&HashProvider::default(),
&encoder_secret_tampered_seed(),
&commitment,
transcript.sent(),
transcript.received(),
)
.unwrap_err();
assert!(matches!(err.kind, ErrorKind::Proof));
}
#[test]
fn test_verify_encoding_proof_out_of_range() {
let EncodingFixture {
transcript,
proof,
commitment,
} = new_encoding_fixture();
let sent = &transcript.sent()[transcript.sent().len() - 1..];
let recv = &transcript.received()[transcript.received().len() - 2..];
let err = proof
.verify_with_provider(
&HashProvider::default(),
&encoder_secret(),
&commitment,
sent,
recv,
)
.unwrap_err();
assert!(matches!(err.kind, ErrorKind::Proof));
}
#[test]
fn test_verify_encoding_proof_tampered_idx() {
let EncodingFixture {
transcript,
mut proof,
commitment,
} = new_encoding_fixture();
let Opening { idx, .. } = proof.openings.values_mut().next().unwrap();
*idx = RangeSet::from([0..3, 13..15]);
let err = proof
.verify_with_provider(
&HashProvider::default(),
&encoder_secret(),
&commitment,
transcript.sent(),
transcript.received(),
)
.unwrap_err();
assert!(matches!(err.kind, ErrorKind::Proof));
}
#[test]
fn test_verify_encoding_proof_tampered_encoding_blinder() {
let EncodingFixture {
transcript,
mut proof,
commitment,
} = new_encoding_fixture();
let Opening { blinder, .. } = proof.openings.values_mut().next().unwrap();
*blinder = rand::random();
let err = proof
.verify_with_provider(
&HashProvider::default(),
&encoder_secret(),
&commitment,
transcript.sent(),
transcript.received(),
)
.unwrap_err();
assert!(matches!(err.kind, ErrorKind::Proof));
}
}

View File

@@ -1,19 +0,0 @@
use std::ops::Range;
use crate::transcript::Direction;
/// A provider of plaintext encodings.
pub trait EncodingProvider {
/// Writes the encoding of the given range into the destination buffer.
fn provide_encoding(
&self,
direction: Direction,
range: Range<usize>,
dest: &mut Vec<u8>,
) -> Result<(), EncodingProviderError>;
}
/// Error for [`EncodingProvider`].
#[derive(Debug, thiserror::Error)]
#[error("failed to provide encoding")]
pub struct EncodingProviderError;

View File

@@ -1,327 +0,0 @@
use std::collections::HashMap;
use bimap::BiMap;
use rangeset::set::RangeSet;
use serde::{Deserialize, Serialize};
use crate::{
hash::{Blinder, HashAlgId, HashAlgorithm, TypedHash},
merkle::MerkleTree,
transcript::{
encoding::{
proof::{EncodingProof, Opening},
EncodingProvider,
},
Direction,
},
};
/// Encoding tree builder error.
#[derive(Debug, thiserror::Error)]
pub enum EncodingTreeError {
/// Index is out of bounds of the transcript.
#[error("index is out of bounds of the transcript")]
OutOfBounds {
/// The index.
index: RangeSet<usize>,
/// The transcript length.
transcript_length: usize,
},
/// Encoding provider is missing an encoding for an index.
#[error("encoding provider is missing an encoding for an index")]
MissingEncoding {
/// The index which is missing.
index: RangeSet<usize>,
},
/// Index is missing from the tree.
#[error("index is missing from the tree")]
MissingLeaf {
/// The index which is missing.
index: RangeSet<usize>,
},
}
/// A merkle tree of transcript encodings.
#[derive(Clone, Serialize, Deserialize)]
pub struct EncodingTree {
/// Merkle tree of the commitments.
tree: MerkleTree,
/// Nonces used to blind the hashes.
blinders: Vec<Blinder>,
/// Mapping between the index of a leaf and the transcript index it
/// corresponds to.
idxs: BiMap<usize, (Direction, RangeSet<usize>)>,
/// Union of all transcript indices in the sent direction.
sent_idx: RangeSet<usize>,
/// Union of all transcript indices in the received direction.
received_idx: RangeSet<usize>,
}
opaque_debug::implement!(EncodingTree);
impl EncodingTree {
/// Creates a new encoding tree.
///
/// # Arguments
///
/// * `hasher` - The hash algorithm to use.
/// * `idxs` - The subsequence indices to commit to.
/// * `provider` - The encoding provider.
pub fn new<'idx>(
hasher: &dyn HashAlgorithm,
idxs: impl IntoIterator<Item = &'idx (Direction, RangeSet<usize>)>,
provider: &dyn EncodingProvider,
) -> Result<Self, EncodingTreeError> {
let mut this = Self {
tree: MerkleTree::new(hasher.id()),
blinders: Vec::new(),
idxs: BiMap::new(),
sent_idx: RangeSet::default(),
received_idx: RangeSet::default(),
};
let mut leaves = Vec::new();
let mut encoding = Vec::new();
for dir_idx in idxs {
let direction = dir_idx.0;
let idx = &dir_idx.1;
// Ignore empty indices.
if idx.is_empty() {
continue;
}
if this.idxs.contains_right(dir_idx) {
// The subsequence is already in the tree.
continue;
}
let blinder: Blinder = rand::random();
encoding.clear();
for range in idx.iter() {
provider
.provide_encoding(direction, range, &mut encoding)
.map_err(|_| EncodingTreeError::MissingEncoding { index: idx.clone() })?;
}
encoding.extend_from_slice(blinder.as_bytes());
let leaf = hasher.hash(&encoding);
leaves.push(leaf);
this.blinders.push(blinder);
this.idxs.insert(this.idxs.len(), dir_idx.clone());
match direction {
Direction::Sent => this.sent_idx.union_mut(idx),
Direction::Received => this.received_idx.union_mut(idx),
}
}
this.tree.insert(hasher, leaves);
Ok(this)
}
/// Returns the root of the tree.
pub fn root(&self) -> TypedHash {
self.tree.root()
}
/// Returns the hash algorithm of the tree.
pub fn algorithm(&self) -> HashAlgId {
self.tree.algorithm()
}
/// Generates a proof for the given indices.
///
/// # Arguments
///
/// * `idxs` - The transcript indices to prove.
pub fn proof<'idx>(
&self,
idxs: impl Iterator<Item = &'idx (Direction, RangeSet<usize>)>,
) -> Result<EncodingProof, EncodingTreeError> {
let mut openings = HashMap::new();
for dir_idx in idxs {
let direction = dir_idx.0;
let idx = &dir_idx.1;
let leaf_idx = *self
.idxs
.get_by_right(dir_idx)
.ok_or_else(|| EncodingTreeError::MissingLeaf { index: idx.clone() })?;
let blinder = self.blinders[leaf_idx].clone();
openings.insert(
leaf_idx,
Opening {
direction,
idx: idx.clone(),
blinder,
},
);
}
let mut indices = openings.keys().copied().collect::<Vec<_>>();
indices.sort();
Ok(EncodingProof {
inclusion_proof: self.tree.proof(&indices),
openings,
})
}
/// Returns whether the tree contains the given transcript index.
pub fn contains(&self, idx: &(Direction, RangeSet<usize>)) -> bool {
self.idxs.contains_right(idx)
}
pub(crate) fn idx(&self, direction: Direction) -> &RangeSet<usize> {
match direction {
Direction::Sent => &self.sent_idx,
Direction::Received => &self.received_idx,
}
}
/// Returns the committed transcript indices.
pub(crate) fn transcript_indices(&self) -> impl Iterator<Item = &(Direction, RangeSet<usize>)> {
self.idxs.right_values()
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{
fixtures::{encoder_secret, encoding_provider},
hash::{Blake3, HashProvider},
transcript::{encoding::EncodingCommitment, Transcript},
};
use tlsn_data_fixtures::http::{request::POST_JSON, response::OK_JSON};
fn new_tree<'seq>(
transcript: &Transcript,
idxs: impl Iterator<Item = &'seq (Direction, RangeSet<usize>)>,
) -> Result<EncodingTree, EncodingTreeError> {
let provider = encoding_provider(transcript.sent(), transcript.received());
EncodingTree::new(&Blake3::default(), idxs, &provider)
}
#[test]
fn test_encoding_tree() {
let transcript = Transcript::new(POST_JSON, OK_JSON);
let idx_0 = (Direction::Sent, RangeSet::from(0..POST_JSON.len()));
let idx_1 = (Direction::Received, RangeSet::from(0..OK_JSON.len()));
let tree = new_tree(&transcript, [&idx_0, &idx_1].into_iter()).unwrap();
assert!(tree.contains(&idx_0));
assert!(tree.contains(&idx_1));
let proof = tree.proof([&idx_0, &idx_1].into_iter()).unwrap();
let commitment = EncodingCommitment { root: tree.root() };
let (auth_sent, auth_recv) = proof
.verify_with_provider(
&HashProvider::default(),
&encoder_secret(),
&commitment,
transcript.sent(),
transcript.received(),
)
.unwrap();
assert_eq!(auth_sent, idx_0.1);
assert_eq!(auth_recv, idx_1.1);
}
#[test]
fn test_encoding_tree_multiple_ranges() {
let transcript = Transcript::new(POST_JSON, OK_JSON);
let idx_0 = (Direction::Sent, RangeSet::from(0..1));
let idx_1 = (Direction::Sent, RangeSet::from(1..POST_JSON.len()));
let idx_2 = (Direction::Received, RangeSet::from(0..1));
let idx_3 = (Direction::Received, RangeSet::from(1..OK_JSON.len()));
let tree = new_tree(&transcript, [&idx_0, &idx_1, &idx_2, &idx_3].into_iter()).unwrap();
assert!(tree.contains(&idx_0));
assert!(tree.contains(&idx_1));
assert!(tree.contains(&idx_2));
assert!(tree.contains(&idx_3));
let proof = tree
.proof([&idx_0, &idx_1, &idx_2, &idx_3].into_iter())
.unwrap();
let commitment = EncodingCommitment { root: tree.root() };
let (auth_sent, auth_recv) = proof
.verify_with_provider(
&HashProvider::default(),
&encoder_secret(),
&commitment,
transcript.sent(),
transcript.received(),
)
.unwrap();
let mut expected_auth_sent = RangeSet::default();
expected_auth_sent.union_mut(&idx_0.1);
expected_auth_sent.union_mut(&idx_1.1);
let mut expected_auth_recv = RangeSet::default();
expected_auth_recv.union_mut(&idx_2.1);
expected_auth_recv.union_mut(&idx_3.1);
assert_eq!(auth_sent, expected_auth_sent);
assert_eq!(auth_recv, expected_auth_recv);
}
#[test]
fn test_encoding_tree_proof_missing_leaf() {
let transcript = Transcript::new(POST_JSON, OK_JSON);
let idx_0 = (Direction::Sent, RangeSet::from(0..POST_JSON.len()));
let idx_1 = (Direction::Received, RangeSet::from(0..4));
let idx_2 = (Direction::Received, RangeSet::from(4..OK_JSON.len()));
let tree = new_tree(&transcript, [&idx_0, &idx_1].into_iter()).unwrap();
let result = tree
.proof([&idx_0, &idx_1, &idx_2].into_iter())
.unwrap_err();
assert!(matches!(result, EncodingTreeError::MissingLeaf { .. }));
}
#[test]
fn test_encoding_tree_out_of_bounds() {
let transcript = Transcript::new(POST_JSON, OK_JSON);
let idx_0 = (Direction::Sent, RangeSet::from(0..POST_JSON.len() + 1));
let idx_1 = (Direction::Received, RangeSet::from(0..OK_JSON.len() + 1));
let result = new_tree(&transcript, [&idx_0].into_iter()).unwrap_err();
assert!(matches!(result, EncodingTreeError::MissingEncoding { .. }));
let result = new_tree(&transcript, [&idx_1].into_iter()).unwrap_err();
assert!(matches!(result, EncodingTreeError::MissingEncoding { .. }));
}
#[test]
fn test_encoding_tree_missing_encoding() {
let provider = encoding_provider(&[], &[]);
let result = EncodingTree::new(
&Blake3::default(),
[(Direction::Sent, RangeSet::from(0..8))].iter(),
&provider,
)
.unwrap_err();
assert!(matches!(result, EncodingTreeError::MissingEncoding { .. }));
}
}

View File

@@ -14,7 +14,6 @@ use crate::{
hash::{HashAlgId, HashProvider},
transcript::{
commit::{TranscriptCommitment, TranscriptCommitmentKind},
encoding::{EncoderSecret, EncodingProof, EncodingProofError, EncodingTree},
hash::{hash_plaintext, PlaintextHash, PlaintextHashSecret},
Direction, PartialTranscript, RangeSet, Transcript, TranscriptSecret,
},
@@ -32,14 +31,12 @@ const DEFAULT_COMMITMENT_KINDS: &[TranscriptCommitmentKind] = &[
TranscriptCommitmentKind::Hash {
alg: HashAlgId::KECCAK256,
},
TranscriptCommitmentKind::Encoding,
];
/// Proof of the contents of a transcript.
#[derive(Clone, Serialize, Deserialize)]
pub struct TranscriptProof {
transcript: PartialTranscript,
encoding_proof: Option<EncodingProof>,
hash_secrets: Vec<PlaintextHashSecret>,
}
@@ -53,27 +50,18 @@ impl TranscriptProof {
/// # Arguments
///
/// * `provider` - The hash provider to use for verification.
/// * `attestation_body` - The attestation body to verify against.
/// * `length` - The transcript length.
/// * `commitments` - The commitments to verify against.
pub fn verify_with_provider<'a>(
self,
provider: &HashProvider,
length: &TranscriptLength,
encoder_secret: Option<&EncoderSecret>,
commitments: impl IntoIterator<Item = &'a TranscriptCommitment>,
) -> Result<PartialTranscript, TranscriptProofError> {
let mut encoding_commitment = None;
let mut hash_commitments = HashSet::new();
// Index commitments.
for commitment in commitments {
match commitment {
TranscriptCommitment::Encoding(commitment) => {
if encoding_commitment.replace(commitment).is_some() {
return Err(TranscriptProofError::new(
ErrorKind::Encoding,
"multiple encoding commitments are present.",
));
}
}
TranscriptCommitment::Hash(plaintext_hash) => {
hash_commitments.insert(plaintext_hash);
}
@@ -92,34 +80,6 @@ impl TranscriptProof {
let mut total_auth_sent = RangeSet::default();
let mut total_auth_recv = RangeSet::default();
// Verify encoding proof.
if let Some(proof) = self.encoding_proof {
let secret = encoder_secret.ok_or_else(|| {
TranscriptProofError::new(
ErrorKind::Encoding,
"contains an encoding proof but missing encoder secret",
)
})?;
let commitment = encoding_commitment.ok_or_else(|| {
TranscriptProofError::new(
ErrorKind::Encoding,
"contains an encoding proof but missing encoding commitment",
)
})?;
let (auth_sent, auth_recv) = proof.verify_with_provider(
provider,
secret,
commitment,
self.transcript.sent_unsafe(),
self.transcript.received_unsafe(),
)?;
total_auth_sent.union_mut(&auth_sent);
total_auth_recv.union_mut(&auth_recv);
}
let mut buffer = Vec::new();
for PlaintextHashSecret {
direction,
@@ -203,7 +163,6 @@ impl TranscriptProofError {
#[derive(Debug)]
enum ErrorKind {
Encoding,
Hash,
Proof,
}
@@ -213,7 +172,6 @@ impl fmt::Display for TranscriptProofError {
f.write_str("transcript proof error: ")?;
match self.kind {
ErrorKind::Encoding => f.write_str("encoding error")?,
ErrorKind::Hash => f.write_str("hash error")?,
ErrorKind::Proof => f.write_str("proof error")?,
}
@@ -226,12 +184,6 @@ impl fmt::Display for TranscriptProofError {
}
}
impl From<EncodingProofError> for TranscriptProofError {
fn from(e: EncodingProofError) -> Self {
TranscriptProofError::new(ErrorKind::Encoding, e)
}
}
/// Union of ranges to reveal.
#[derive(Clone, Debug, PartialEq)]
struct QueryIdx {
@@ -276,7 +228,6 @@ pub struct TranscriptProofBuilder<'a> {
/// Commitment kinds in order of preference for building transcript proofs.
commitment_kinds: Vec<TranscriptCommitmentKind>,
transcript: &'a Transcript,
encoding_tree: Option<&'a EncodingTree>,
hash_secrets: Vec<&'a PlaintextHashSecret>,
committed_sent: RangeSet<usize>,
committed_recv: RangeSet<usize>,
@@ -292,15 +243,9 @@ impl<'a> TranscriptProofBuilder<'a> {
let mut committed_sent = RangeSet::default();
let mut committed_recv = RangeSet::default();
let mut encoding_tree = None;
let mut hash_secrets = Vec::new();
for secret in secrets {
match secret {
TranscriptSecret::Encoding(tree) => {
committed_sent.union_mut(tree.idx(Direction::Sent));
committed_recv.union_mut(tree.idx(Direction::Received));
encoding_tree = Some(tree);
}
TranscriptSecret::Hash(hash) => {
match hash.direction {
Direction::Sent => committed_sent.union_mut(&hash.idx),
@@ -314,7 +259,6 @@ impl<'a> TranscriptProofBuilder<'a> {
Self {
commitment_kinds: DEFAULT_COMMITMENT_KINDS.to_vec(),
transcript,
encoding_tree,
hash_secrets,
committed_sent,
committed_recv,
@@ -412,7 +356,6 @@ impl<'a> TranscriptProofBuilder<'a> {
transcript: self
.transcript
.to_partial(self.query_idx.sent.clone(), self.query_idx.recv.clone()),
encoding_proof: None,
hash_secrets: Vec::new(),
};
let mut uncovered_query_idx = self.query_idx.clone();
@@ -424,46 +367,6 @@ impl<'a> TranscriptProofBuilder<'a> {
// self.commitment_kinds.
if let Some(kind) = commitment_kinds_iter.next() {
match kind {
TranscriptCommitmentKind::Encoding => {
let Some(encoding_tree) = self.encoding_tree else {
// Proceeds to the next preferred commitment kind if encoding tree is
// not available.
continue;
};
let (sent_dir_idxs, sent_uncovered) = uncovered_query_idx.sent.cover_by(
encoding_tree
.transcript_indices()
.filter(|(dir, _)| *dir == Direction::Sent),
|(_, idx)| idx,
);
// Uncovered ranges will be checked with ranges of the next
// preferred commitment kind.
uncovered_query_idx.sent = sent_uncovered;
let (recv_dir_idxs, recv_uncovered) = uncovered_query_idx.recv.cover_by(
encoding_tree
.transcript_indices()
.filter(|(dir, _)| *dir == Direction::Received),
|(_, idx)| idx,
);
uncovered_query_idx.recv = recv_uncovered;
let dir_idxs = sent_dir_idxs
.into_iter()
.chain(recv_dir_idxs)
.collect::<Vec<_>>();
// Skip proof generation if there are no committed ranges that can cover the
// query ranges.
if !dir_idxs.is_empty() {
transcript_proof.encoding_proof = Some(
encoding_tree
.proof(dir_idxs.into_iter())
.expect("subsequences were checked to be in tree"),
);
}
}
TranscriptCommitmentKind::Hash { alg } => {
let (sent_hashes, sent_uncovered) = uncovered_query_idx.sent.cover_by(
self.hash_secrets.iter().filter(|hash| {
@@ -590,46 +493,10 @@ mod tests {
use rstest::rstest;
use tlsn_data_fixtures::http::{request::GET_WITH_HEADER, response::OK_JSON};
use crate::{
fixtures::{encoder_secret, encoding_provider},
hash::{Blake3, Blinder, HashAlgId},
transcript::TranscriptCommitConfigBuilder,
};
use crate::hash::{Blinder, HashAlgId};
use super::*;
#[rstest]
fn test_verify_missing_encoding_commitment_root() {
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
let idxs = vec![(Direction::Received, RangeSet::from(0..transcript.len().1))];
let encoding_tree = EncodingTree::new(
&Blake3::default(),
&idxs,
&encoding_provider(transcript.sent(), transcript.received()),
)
.unwrap();
let secrets = vec![TranscriptSecret::Encoding(encoding_tree)];
let mut builder = TranscriptProofBuilder::new(&transcript, &secrets);
builder.reveal_recv(&(0..transcript.len().1)).unwrap();
let transcript_proof = builder.build().unwrap();
let provider = HashProvider::default();
let err = transcript_proof
.verify_with_provider(
&provider,
&transcript.length(),
Some(&encoder_secret()),
&[],
)
.err()
.unwrap();
assert!(matches!(err.kind, ErrorKind::Encoding));
}
#[rstest]
fn test_reveal_range_out_of_bounds() {
let transcript = Transcript::new(
@@ -649,7 +516,7 @@ mod tests {
}
#[rstest]
fn test_reveal_missing_encoding_tree() {
fn test_reveal_missing_commitment() {
let transcript = Transcript::new(
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
@@ -698,7 +565,6 @@ mod tests {
.verify_with_provider(
&provider,
&transcript.length(),
None,
&[TranscriptCommitment::Hash(commitment)],
)
.unwrap();
@@ -748,7 +614,6 @@ mod tests {
.verify_with_provider(
&provider,
&transcript.length(),
None,
&[TranscriptCommitment::Hash(commitment)],
)
.unwrap_err();
@@ -764,24 +629,19 @@ mod tests {
TranscriptCommitmentKind::Hash {
alg: HashAlgId::SHA256,
},
TranscriptCommitmentKind::Encoding,
TranscriptCommitmentKind::Hash {
alg: HashAlgId::SHA256,
},
TranscriptCommitmentKind::Hash {
alg: HashAlgId::SHA256,
},
TranscriptCommitmentKind::Encoding,
]);
assert_eq!(
builder.commitment_kinds,
vec![
TranscriptCommitmentKind::Hash {
alg: HashAlgId::SHA256
},
TranscriptCommitmentKind::Encoding
]
vec![TranscriptCommitmentKind::Hash {
alg: HashAlgId::SHA256
},]
);
}
@@ -791,7 +651,7 @@ mod tests {
RangeSet::from([0..10, 12..30]),
true,
)]
#[case::reveal_all_rangesets_with_superset_ranges(
#[case::reveal_all_rangesets_with_single_superset_range(
vec![RangeSet::from([0..1]), RangeSet::from([1..2, 8..9]), RangeSet::from([2..4, 6..8]), RangeSet::from([2..3, 6..7]), RangeSet::from([9..12])],
RangeSet::from([0..4, 6..9]),
true,
@@ -822,29 +682,30 @@ mod tests {
false,
)]
#[allow(clippy::single_range_in_vec_init)]
fn test_reveal_mutliple_rangesets_with_one_rangeset(
fn test_reveal_multiple_rangesets_with_one_rangeset(
#[case] commit_recv_rangesets: Vec<RangeSet<usize>>,
#[case] reveal_recv_rangeset: RangeSet<usize>,
#[case] success: bool,
) {
use rand::{Rng, SeedableRng};
let mut rng = rand::rngs::StdRng::seed_from_u64(0);
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
// Encoding commitment kind
let mut transcript_commitment_builder = TranscriptCommitConfigBuilder::new(&transcript);
// Create hash commitments for each rangeset
let mut secrets = Vec::new();
for rangeset in commit_recv_rangesets.iter() {
transcript_commitment_builder.commit_recv(rangeset).unwrap();
let blinder: crate::hash::Blinder = rng.random();
let secret = PlaintextHashSecret {
direction: Direction::Received,
idx: rangeset.clone(),
alg: HashAlgId::BLAKE3,
blinder,
};
secrets.push(TranscriptSecret::Hash(secret));
}
let transcripts_commitment_config = transcript_commitment_builder.build().unwrap();
let encoding_tree = EncodingTree::new(
&Blake3::default(),
transcripts_commitment_config.iter_encoding(),
&encoding_provider(GET_WITH_HEADER, OK_JSON),
)
.unwrap();
let secrets = vec![TranscriptSecret::Encoding(encoding_tree)];
let mut builder = TranscriptProofBuilder::new(&transcript, &secrets);
if success {
@@ -897,27 +758,34 @@ mod tests {
#[case] uncovered_sent_rangeset: RangeSet<usize>,
#[case] uncovered_recv_rangeset: RangeSet<usize>,
) {
use rand::{Rng, SeedableRng};
let mut rng = rand::rngs::StdRng::seed_from_u64(0);
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
// Encoding commitment kind
let mut transcript_commitment_builder = TranscriptCommitConfigBuilder::new(&transcript);
// Create hash commitments for each rangeset
let mut secrets = Vec::new();
for rangeset in commit_sent_rangesets.iter() {
transcript_commitment_builder.commit_sent(rangeset).unwrap();
let blinder: crate::hash::Blinder = rng.random();
let secret = PlaintextHashSecret {
direction: Direction::Sent,
idx: rangeset.clone(),
alg: HashAlgId::BLAKE3,
blinder,
};
secrets.push(TranscriptSecret::Hash(secret));
}
for rangeset in commit_recv_rangesets.iter() {
transcript_commitment_builder.commit_recv(rangeset).unwrap();
let blinder: crate::hash::Blinder = rng.random();
let secret = PlaintextHashSecret {
direction: Direction::Received,
idx: rangeset.clone(),
alg: HashAlgId::BLAKE3,
blinder,
};
secrets.push(TranscriptSecret::Hash(secret));
}
let transcripts_commitment_config = transcript_commitment_builder.build().unwrap();
let encoding_tree = EncodingTree::new(
&Blake3::default(),
transcripts_commitment_config.iter_encoding(),
&encoding_provider(GET_WITH_HEADER, OK_JSON),
)
.unwrap();
let secrets = vec![TranscriptSecret::Encoding(encoding_tree)];
let mut builder = TranscriptProofBuilder::new(&transcript, &secrets);
builder.reveal_sent(&reveal_sent_rangeset).unwrap();
builder.reveal_recv(&reveal_recv_rangeset).unwrap();

View File

@@ -332,7 +332,6 @@ async fn notary<S: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>(
let (
VerifierOutput {
transcript_commitments,
encoder_secret,
..
},
verifier,
@@ -393,10 +392,6 @@ async fn notary<S: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>(
.server_ephemeral_key(tls_transcript.server_ephemeral_key().clone())
.transcript_commitments(transcript_commitments);
if let Some(encoder_secret) = encoder_secret {
builder.encoder_secret(encoder_secret);
}
let attestation = builder.build(&provider)?;
// Send attestation to prover.

View File

@@ -1,51 +1,59 @@
#### Latency ####
#### Default Representative Benchmarks ####
#
# This benchmark measures TLSNotary performance on three representative network scenarios.
# Each scenario is run multiple times to produce statistical metrics (median, std dev, etc.)
# rather than plots. Use this for quick performance checks and CI regression testing.
#
# Payload sizes:
# - upload-size: 1KB (typical HTTP request)
# - download-size: 2KB (typical HTTP response/API data)
#
# Network scenarios are chosen to represent real-world user conditions where
# TLSNotary is primarily bottlenecked by upload bandwidth.
#### Cable/DSL Home Internet ####
# Most common residential internet connection
# - Asymmetric: high download, limited upload (typical bottleneck)
# - Upload bandwidth: 20 Mbps (realistic cable/DSL upload speed)
# - Latency: 20ms (typical ISP latency)
[[group]]
name = "latency"
bandwidth = 1000
name = "cable"
bandwidth = 20
protocol_latency = 20
upload-size = 1024
download-size = 2048
[[bench]]
group = "latency"
protocol_latency = 10
group = "cable"
[[bench]]
group = "latency"
protocol_latency = 25
[[bench]]
group = "latency"
protocol_latency = 50
[[bench]]
group = "latency"
protocol_latency = 100
[[bench]]
group = "latency"
protocol_latency = 200
#### Bandwidth ####
#### Mobile 5G ####
# Modern mobile connection with good coverage
# - Upload bandwidth: 30 Mbps (typical 5G upload in good conditions)
# - Latency: 30ms (higher than wired due to mobile tower hops)
[[group]]
name = "bandwidth"
protocol_latency = 25
name = "mobile_5g"
bandwidth = 30
protocol_latency = 30
upload-size = 1024
download-size = 2048
[[bench]]
group = "bandwidth"
bandwidth = 10
group = "mobile_5g"
[[bench]]
group = "bandwidth"
bandwidth = 50
#### Fiber Home Internet ####
# High-end residential connection (best case scenario)
# - Symmetric: equal upload/download bandwidth
# - Upload bandwidth: 100 Mbps (typical fiber upload)
# - Latency: 15ms (lower latency than cable)
[[bench]]
group = "bandwidth"
[[group]]
name = "fiber"
bandwidth = 100
protocol_latency = 15
upload-size = 1024
download-size = 2048
[[bench]]
group = "bandwidth"
bandwidth = 250
[[bench]]
group = "bandwidth"
bandwidth = 1000
group = "fiber"

View File

@@ -0,0 +1,52 @@
#### Bandwidth Sweep Benchmark ####
#
# Measures how network bandwidth affects TLSNotary runtime.
# Keeps latency and payload sizes fixed while varying upload bandwidth.
#
# Fixed parameters:
# - Latency: 25ms (typical internet latency)
# - Upload: 1KB (typical request)
# - Download: 2KB (typical response)
#
# Variable: Bandwidth from 5 Mbps to 1000 Mbps
#
# Use this to plot "Bandwidth vs Runtime" and understand bandwidth sensitivity.
# Focus on upload bandwidth as TLSNotary is primarily upload-bottlenecked
[[group]]
name = "bandwidth_sweep"
protocol_latency = 25
upload-size = 1024
download-size = 2048
[[bench]]
group = "bandwidth_sweep"
bandwidth = 5
[[bench]]
group = "bandwidth_sweep"
bandwidth = 10
[[bench]]
group = "bandwidth_sweep"
bandwidth = 20
[[bench]]
group = "bandwidth_sweep"
bandwidth = 50
[[bench]]
group = "bandwidth_sweep"
bandwidth = 100
[[bench]]
group = "bandwidth_sweep"
bandwidth = 250
[[bench]]
group = "bandwidth_sweep"
bandwidth = 500
[[bench]]
group = "bandwidth_sweep"
bandwidth = 1000

View File

@@ -0,0 +1,53 @@
#### Download Size Sweep Benchmark ####
#
# Measures how download payload size affects TLSNotary runtime.
# Keeps network conditions fixed while varying the response size.
#
# Fixed parameters:
# - Bandwidth: 100 Mbps (typical good connection)
# - Latency: 25ms (typical internet latency)
# - Upload: 1KB (typical request size)
#
# Variable: Download size from 1KB to 100KB
#
# Use this to plot "Download Size vs Runtime" and understand how much data
# TLSNotary can efficiently notarize. Useful for determining optimal
# chunking strategies for large responses.
[[group]]
name = "download_sweep"
bandwidth = 100
protocol_latency = 25
upload-size = 1024
[[bench]]
group = "download_sweep"
download-size = 1024
[[bench]]
group = "download_sweep"
download-size = 2048
[[bench]]
group = "download_sweep"
download-size = 5120
[[bench]]
group = "download_sweep"
download-size = 10240
[[bench]]
group = "download_sweep"
download-size = 20480
[[bench]]
group = "download_sweep"
download-size = 30720
[[bench]]
group = "download_sweep"
download-size = 40960
[[bench]]
group = "download_sweep"
download-size = 51200

View File

@@ -0,0 +1,47 @@
#### Latency Sweep Benchmark ####
#
# Measures how network latency affects TLSNotary runtime.
# Keeps bandwidth and payload sizes fixed while varying protocol latency.
#
# Fixed parameters:
# - Bandwidth: 100 Mbps (typical good connection)
# - Upload: 1KB (typical request)
# - Download: 2KB (typical response)
#
# Variable: Protocol latency from 10ms to 200ms
#
# Use this to plot "Latency vs Runtime" and understand latency sensitivity.
[[group]]
name = "latency_sweep"
bandwidth = 100
upload-size = 1024
download-size = 2048
[[bench]]
group = "latency_sweep"
protocol_latency = 10
[[bench]]
group = "latency_sweep"
protocol_latency = 25
[[bench]]
group = "latency_sweep"
protocol_latency = 50
[[bench]]
group = "latency_sweep"
protocol_latency = 75
[[bench]]
group = "latency_sweep"
protocol_latency = 100
[[bench]]
group = "latency_sweep"
protocol_latency = 150
[[bench]]
group = "latency_sweep"
protocol_latency = 200

View File

@@ -7,9 +7,10 @@ publish = false
[dependencies]
tlsn-harness-core = { workspace = true }
# tlsn-server-fixture = { workspace = true }
charming = { version = "0.6.0", features = ["ssr"] }
charming = { version = "0.5.1", features = ["ssr"] }
csv = "1.3.0"
clap = { workspace = true, features = ["derive", "env"] }
polars = { version = "0.44", features = ["csv", "lazy"] }
itertools = "0.14.0"
toml = { workspace = true }

View File

@@ -1,111 +0,0 @@
# TLSNotary Benchmark Plot Tool
Generates interactive HTML and SVG plots from TLSNotary benchmark results. Supports comparing multiple benchmark runs (e.g., before/after optimization, native vs browser).
## Usage
```bash
tlsn-harness-plot <TOML> <CSV>... [OPTIONS]
```
### Arguments
- `<TOML>` - Path to Bench.toml file defining benchmark structure
- `<CSV>...` - One or more CSV files with benchmark results
### Options
- `-l, --labels <LABEL>...` - Labels for each dataset (optional)
- If omitted, datasets are labeled "Dataset 1", "Dataset 2", etc.
- Number of labels must match number of CSV files
- `--min-max-band` - Add min/max bands to plots showing variance
- `-h, --help` - Print help information
## Examples
### Single Dataset
```bash
tlsn-harness-plot bench.toml results.csv
```
Generates plots from a single benchmark run.
### Compare Two Runs
```bash
tlsn-harness-plot bench.toml before.csv after.csv \
--labels "Before Optimization" "After Optimization"
```
Overlays two datasets to compare performance improvements.
### Multiple Datasets
```bash
tlsn-harness-plot bench.toml native.csv browser.csv wasm.csv \
--labels "Native" "Browser" "WASM"
```
Compare three different runtime environments.
### With Min/Max Bands
```bash
tlsn-harness-plot bench.toml run1.csv run2.csv \
--labels "Config A" "Config B" \
--min-max-band
```
Shows variance ranges for each dataset.
## Output Files
The tool generates two files per benchmark group:
- `<output>.html` - Interactive HTML chart (zoomable, hoverable)
- `<output>.svg` - Static SVG image for documentation
Default output filenames:
- `runtime_vs_bandwidth.{html,svg}` - When `protocol_latency` is defined in group
- `runtime_vs_latency.{html,svg}` - When `bandwidth` is defined in group
## Plot Format
Each dataset displays:
- **Solid line** - Total runtime (preprocessing + online phase)
- **Dashed line** - Online phase only
- **Shaded area** (optional) - Min/max variance bands
Different datasets automatically use distinct colors for easy comparison.
## CSV Format
Expected columns in each CSV file:
- `group` - Benchmark group name (must match TOML)
- `bandwidth` - Network bandwidth in Kbps (for bandwidth plots)
- `latency` - Network latency in ms (for latency plots)
- `time_preprocess` - Preprocessing time in ms
- `time_online` - Online phase time in ms
- `time_total` - Total runtime in ms
## TOML Format
The benchmark TOML file defines groups with either:
```toml
[[group]]
name = "my_benchmark"
protocol_latency = 50 # Fixed latency for bandwidth plots
# OR
bandwidth = 10000 # Fixed bandwidth for latency plots
```
All datasets must use the same TOML file to ensure consistent benchmark structure.
## Tips
- Use descriptive labels to make plots self-documenting
- Keep CSV files from the same benchmark configuration for valid comparisons
- Min/max bands are useful for showing stability but can clutter plots with many datasets
- Interactive HTML plots support zooming and hovering for detailed values

View File

@@ -1,18 +1,17 @@
use std::f32;
use charming::{
Chart, HtmlRenderer, ImageRenderer,
Chart, HtmlRenderer,
component::{Axis, Legend, Title},
element::{
AreaStyle, ItemStyle, LineStyle, LineStyleType, NameLocation, Orient, TextStyle, Tooltip,
Trigger,
},
element::{AreaStyle, LineStyle, NameLocation, Orient, TextStyle, Tooltip, Trigger},
series::Line,
theme::Theme,
};
use clap::Parser;
use harness_core::bench::BenchItems;
use polars::prelude::*;
use harness_core::bench::{BenchItems, Measurement};
use itertools::Itertools;
const THEME: Theme = Theme::Default;
#[derive(Parser, Debug)]
#[command(author, version, about)]
@@ -20,131 +19,72 @@ struct Cli {
/// Path to the Bench.toml file with benchmark spec
toml: String,
/// Paths to CSV files with benchmark results (one or more)
csv: Vec<String>,
/// Path to the CSV file with benchmark results
csv: String,
/// Labels for each dataset (optional, defaults to "Dataset 1", "Dataset 2", etc.)
#[arg(short, long, num_args = 0..)]
labels: Vec<String>,
/// Prover kind: native or browser
#[arg(short, long, value_enum, default_value = "native")]
prover_kind: ProverKind,
/// Add min/max bands to plots
#[arg(long, default_value_t = false)]
min_max_band: bool,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, clap::ValueEnum)]
enum ProverKind {
Native,
Browser,
}
impl std::fmt::Display for ProverKind {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
ProverKind::Native => write!(f, "Native"),
ProverKind::Browser => write!(f, "Browser"),
}
}
}
fn main() -> Result<(), Box<dyn std::error::Error>> {
let cli = Cli::parse();
if cli.csv.is_empty() {
return Err("At least one CSV file must be provided".into());
}
// Generate labels if not provided
let labels: Vec<String> = if cli.labels.is_empty() {
cli.csv
.iter()
.enumerate()
.map(|(i, _)| format!("Dataset {}", i + 1))
.collect()
} else if cli.labels.len() != cli.csv.len() {
return Err(format!(
"Number of labels ({}) must match number of CSV files ({})",
cli.labels.len(),
cli.csv.len()
)
.into());
} else {
cli.labels.clone()
};
// Load all CSVs and add dataset label
let mut dfs = Vec::new();
for (csv_path, label) in cli.csv.iter().zip(labels.iter()) {
let mut df = CsvReadOptions::default()
.try_into_reader_with_file_path(Some(csv_path.clone().into()))?
.finish()?;
let label_series = Series::new("dataset_label".into(), vec![label.as_str(); df.height()]);
df.with_column(label_series)?;
dfs.push(df);
}
// Combine all dataframes
let df = dfs
.into_iter()
.reduce(|acc, df| acc.vstack(&df).unwrap())
.unwrap();
let mut rdr = csv::Reader::from_path(&cli.csv)?;
let items: BenchItems = toml::from_str(&std::fs::read_to_string(&cli.toml)?)?;
let groups = items.group;
for group in groups {
// Determine which field varies in benches for this group
let benches_in_group: Vec<_> = items
.bench
.iter()
.filter(|b| b.group.as_deref() == Some(&group.name))
.collect();
// Prepare data for plotting.
let all_data: Vec<Measurement> = rdr
.deserialize::<Measurement>()
.collect::<Result<Vec<_>, _>>()?;
if benches_in_group.is_empty() {
continue;
for group in groups {
if group.protocol_latency.is_some() {
let latency = group.protocol_latency.unwrap();
plot_runtime_vs(
&all_data,
cli.min_max_band,
&group.name,
|r| r.bandwidth as f32 / 1000.0, // Kbps to Mbps
"Runtime vs Bandwidth",
format!("{} ms Latency, {} mode", latency, cli.prover_kind),
"runtime_vs_bandwidth.html",
"Bandwidth (Mbps)",
)?;
}
// Check which field has varying values
let bandwidth_varies = benches_in_group
.windows(2)
.any(|w| w[0].bandwidth != w[1].bandwidth);
let latency_varies = benches_in_group
.windows(2)
.any(|w| w[0].protocol_latency != w[1].protocol_latency);
let download_size_varies = benches_in_group
.windows(2)
.any(|w| w[0].download_size != w[1].download_size);
if download_size_varies {
let upload_size = group.upload_size.unwrap_or(1024);
if group.bandwidth.is_some() {
let bandwidth = group.bandwidth.unwrap();
plot_runtime_vs(
&df,
&labels,
&all_data,
cli.min_max_band,
&group.name,
"download_size",
1.0 / 1024.0, // bytes to KB
"Runtime vs Response Size",
format!("{} bytes upload size", upload_size),
"runtime_vs_download_size",
"Response Size (KB)",
true, // legend on left
)?;
} else if bandwidth_varies {
let latency = group.protocol_latency.unwrap_or(50);
plot_runtime_vs(
&df,
&labels,
cli.min_max_band,
&group.name,
"bandwidth",
1.0 / 1000.0, // Kbps to Mbps
"Runtime vs Bandwidth",
format!("{} ms Latency", latency),
"runtime_vs_bandwidth",
"Bandwidth (Mbps)",
false, // legend on right
)?;
} else if latency_varies {
let bandwidth = group.bandwidth.unwrap_or(1000);
plot_runtime_vs(
&df,
&labels,
cli.min_max_band,
&group.name,
"latency",
1.0,
|r| r.latency as f32,
"Runtime vs Latency",
format!("{} bps bandwidth", bandwidth),
"runtime_vs_latency",
format!("{} bps bandwidth, {} mode", bandwidth, cli.prover_kind),
"runtime_vs_latency.html",
"Latency (ms)",
true, // legend on left
)?;
}
}
@@ -152,52 +92,84 @@ fn main() -> Result<(), Box<dyn std::error::Error>> {
Ok(())
}
struct DataPoint {
min: f32,
mean: f32,
max: f32,
}
struct Points {
preprocess: DataPoint,
online: DataPoint,
total: DataPoint,
}
#[allow(clippy::too_many_arguments)]
fn plot_runtime_vs(
df: &DataFrame,
labels: &[String],
fn plot_runtime_vs<Fx>(
all_data: &[Measurement],
show_min_max: bool,
group: &str,
x_col: &str,
x_scale: f32,
x_value: Fx,
title: &str,
subtitle: String,
output_file: &str,
x_axis_label: &str,
legend_left: bool,
) -> Result<Chart, Box<dyn std::error::Error>> {
let stats_df = df
.clone()
.lazy()
.filter(col("group").eq(lit(group)))
.with_column((col(x_col).cast(DataType::Float32) * lit(x_scale)).alias("x"))
.with_columns([
(col("time_preprocess").cast(DataType::Float32) / lit(1000.0)).alias("preprocess"),
(col("time_online").cast(DataType::Float32) / lit(1000.0)).alias("online"),
(col("time_total").cast(DataType::Float32) / lit(1000.0)).alias("total"),
])
.group_by([col("x"), col("dataset_label")])
.agg([
col("preprocess").min().alias("preprocess_min"),
col("preprocess").mean().alias("preprocess_mean"),
col("preprocess").max().alias("preprocess_max"),
col("online").min().alias("online_min"),
col("online").mean().alias("online_mean"),
col("online").max().alias("online_max"),
col("total").min().alias("total_min"),
col("total").mean().alias("total_mean"),
col("total").max().alias("total_max"),
])
.sort(["dataset_label", "x"], Default::default())
.collect()?;
// Build legend entries
let mut legend_data = Vec::new();
for label in labels {
legend_data.push(format!("Total Mean ({})", label));
legend_data.push(format!("Online Mean ({})", label));
) -> Result<Chart, Box<dyn std::error::Error>>
where
Fx: Fn(&Measurement) -> f32,
{
fn data_point(values: &[f32]) -> DataPoint {
let mean = values.iter().copied().sum::<f32>() / values.len() as f32;
let max = values.iter().copied().reduce(f32::max).unwrap_or_default();
let min = values.iter().copied().reduce(f32::min).unwrap_or_default();
DataPoint { min, mean, max }
}
let stats: Vec<(f32, Points)> = all_data
.iter()
.filter(|r| r.group.as_deref() == Some(group))
.map(|r| {
(
x_value(r),
r.time_preprocess as f32 / 1000.0, // ms to s
r.time_online as f32 / 1000.0,
r.time_total as f32 / 1000.0,
)
})
.sorted_by(|a, b| a.0.partial_cmp(&b.0).unwrap())
.chunk_by(|entry| entry.0)
.into_iter()
.map(|(x, group)| {
let group_vec: Vec<_> = group.collect();
let preprocess = data_point(
&group_vec
.iter()
.map(|(_, t, _, _)| *t)
.collect::<Vec<f32>>(),
);
let online = data_point(
&group_vec
.iter()
.map(|(_, _, t, _)| *t)
.collect::<Vec<f32>>(),
);
let total = data_point(
&group_vec
.iter()
.map(|(_, _, _, t)| *t)
.collect::<Vec<f32>>(),
);
(
x,
Points {
preprocess,
online,
total,
},
)
})
.collect();
let mut chart = Chart::new()
.title(
Title::new()
@@ -207,6 +179,14 @@ fn plot_runtime_vs(
.subtext_style(TextStyle::new().font_size(16)),
)
.tooltip(Tooltip::new().trigger(Trigger::Axis))
.legend(
Legend::new()
.data(vec!["Preprocess Mean", "Online Mean", "Total Mean"])
.top("80")
.right("110")
.orient(Orient::Vertical)
.item_gap(10),
)
.x_axis(
Axis::new()
.name(x_axis_label)
@@ -225,156 +205,73 @@ fn plot_runtime_vs(
.name_text_style(TextStyle::new().font_size(21)),
);
// Add legend with conditional positioning
let legend = Legend::new()
.data(legend_data)
.top("80")
.orient(Orient::Vertical)
.item_gap(10);
chart = add_mean_series(chart, &stats, "Preprocess Mean", |p| p.preprocess.mean);
chart = add_mean_series(chart, &stats, "Online Mean", |p| p.online.mean);
chart = add_mean_series(chart, &stats, "Total Mean", |p| p.total.mean);
let legend = if legend_left {
legend.left("110")
} else {
legend.right("110")
};
chart = chart.legend(legend);
// Define colors for each dataset
let colors = vec![
"#5470c6", "#91cc75", "#fac858", "#ee6666", "#73c0de", "#3ba272", "#fc8452", "#9a60b4",
];
for (idx, label) in labels.iter().enumerate() {
let color = colors.get(idx % colors.len()).unwrap();
// Total time - solid line
chart = add_dataset_series(
&chart,
&stats_df,
label,
&format!("Total Mean ({})", label),
"total_mean",
false,
color,
)?;
// Online time - dashed line (same color as total)
chart = add_dataset_series(
&chart,
&stats_df,
label,
&format!("Online Mean ({})", label),
"online_mean",
true,
color,
)?;
if show_min_max {
chart = add_dataset_min_max_band(
&chart,
&stats_df,
label,
&format!("Total Min/Max ({})", label),
"total",
color,
)?;
}
if show_min_max {
chart = add_min_max_band(
chart,
&stats,
"Preprocess Min/Max",
|p| &p.preprocess,
"#ccc",
);
chart = add_min_max_band(chart, &stats, "Online Min/Max", |p| &p.online, "#ccc");
chart = add_min_max_band(chart, &stats, "Total Min/Max", |p| &p.total, "#ccc");
}
// Save the chart as HTML file (no theme)
// Save the chart as HTML file.
HtmlRenderer::new(title, 1000, 800)
.save(&chart, &format!("{}.html", output_file))
.unwrap();
// Save SVG with default theme
ImageRenderer::new(1000, 800)
.theme(Theme::Default)
.save(&chart, &format!("{}.svg", output_file))
.unwrap();
// Save SVG with dark theme
ImageRenderer::new(1000, 800)
.theme(Theme::Dark)
.save(&chart, &format!("{}_dark.svg", output_file))
.theme(THEME)
.save(&chart, output_file)
.unwrap();
Ok(chart)
}
fn add_dataset_series(
chart: &Chart,
df: &DataFrame,
dataset_label: &str,
series_name: &str,
col_name: &str,
dashed: bool,
color: &str,
) -> Result<Chart, Box<dyn std::error::Error>> {
// Filter for specific dataset
let mask = df.column("dataset_label")?.str()?.equal(dataset_label);
let filtered = df.filter(&mask)?;
let x = filtered.column("x")?.f32()?;
let y = filtered.column(col_name)?.f32()?;
let data: Vec<Vec<f32>> = x
.into_iter()
.zip(y.into_iter())
.filter_map(|(x, y)| Some(vec![x?, y?]))
.collect();
let mut line = Line::new()
.name(series_name)
.data(data)
.symbol_size(6)
.item_style(ItemStyle::new().color(color));
let mut line_style = LineStyle::new();
if dashed {
line_style = line_style.type_(LineStyleType::Dashed);
}
line = line.line_style(line_style.color(color));
Ok(chart.clone().series(line))
}
fn add_dataset_min_max_band(
chart: &Chart,
df: &DataFrame,
dataset_label: &str,
fn add_mean_series(
chart: Chart,
stats: &[(f32, Points)],
name: &str,
col_prefix: &str,
color: &str,
) -> Result<Chart, Box<dyn std::error::Error>> {
// Filter for specific dataset
let mask = df.column("dataset_label")?.str()?.equal(dataset_label);
let filtered = df.filter(&mask)?;
let x = filtered.column("x")?.f32()?;
let min_col = filtered.column(&format!("{}_min", col_prefix))?.f32()?;
let max_col = filtered.column(&format!("{}_max", col_prefix))?.f32()?;
let max_data: Vec<Vec<f32>> = x
.into_iter()
.zip(max_col.into_iter())
.filter_map(|(x, y)| Some(vec![x?, y?]))
.collect();
let min_data: Vec<Vec<f32>> = x
.into_iter()
.zip(min_col.into_iter())
.filter_map(|(x, y)| Some(vec![x?, y?]))
.rev()
.collect();
let data: Vec<Vec<f32>> = max_data.into_iter().chain(min_data).collect();
Ok(chart.clone().series(
extract: impl Fn(&Points) -> f32,
) -> Chart {
chart.series(
Line::new()
.name(name)
.data(data)
.data(
stats
.iter()
.map(|(x, points)| vec![*x, extract(points)])
.collect(),
)
.symbol_size(6),
)
}
fn add_min_max_band(
chart: Chart,
stats: &[(f32, Points)],
name: &str,
extract: impl Fn(&Points) -> &DataPoint,
color: &str,
) -> Chart {
chart.series(
Line::new()
.name(name)
.data(
stats
.iter()
.map(|(x, points)| vec![*x, extract(points).max])
.chain(
stats
.iter()
.rev()
.map(|(x, points)| vec![*x, extract(points).min]),
)
.collect(),
)
.show_symbol(false)
.line_style(LineStyle::new().opacity(0.0))
.area_style(AreaStyle::new().opacity(0.3).color(color)),
))
)
}

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@@ -22,6 +22,7 @@ clap = { workspace = true, features = ["derive", "env"] }
csv = { version = "1.3" }
duct = { version = "1" }
futures = { workspace = true }
indicatif = { version = "0.17" }
ipnet = { workspace = true }
serio = { workspace = true }
serde_json = { workspace = true }

View File

@@ -16,6 +16,10 @@ pub struct Cli {
/// Subnet to assign harness network interfaces.
#[arg(long, default_value = "10.250.0.0/24", env = "SUBNET")]
pub subnet: Ipv4Net,
/// Run browser in headed mode (visible window) for debugging.
/// Works with both X11 and Wayland.
#[arg(long)]
pub headed: bool,
}
#[derive(Subcommand)]
@@ -31,10 +35,13 @@ pub enum Command {
},
/// runs benchmarks.
Bench {
/// Configuration path.
/// Configuration path. Defaults to bench.toml which contains
/// representative scenarios (cable, 5G, fiber) for quick performance
/// checks. Use bench_*_sweep.toml files for parametric
/// analysis.
#[arg(short, long, default_value = "bench.toml")]
config: PathBuf,
/// Output file path.
/// Output CSV file path for detailed metrics and post-processing.
#[arg(short, long, default_value = "metrics.csv")]
output: PathBuf,
/// Number of samples to measure per benchmark. This is overridden by

View File

@@ -28,6 +28,9 @@ pub struct Executor {
ns: Namespace,
config: ExecutorConfig,
target: Target,
/// Display environment variables for headed mode (X11/Wayland).
/// Empty means headless mode.
display_env: Vec<String>,
state: State,
}
@@ -49,11 +52,17 @@ impl State {
}
impl Executor {
pub fn new(ns: Namespace, config: ExecutorConfig, target: Target) -> Self {
pub fn new(
ns: Namespace,
config: ExecutorConfig,
target: Target,
display_env: Vec<String>,
) -> Self {
Self {
ns,
config,
target,
display_env,
state: State::Init,
}
}
@@ -120,23 +129,49 @@ impl Executor {
let tmp = duct::cmd!("mktemp", "-d").read()?;
let tmp = tmp.trim();
let process = duct::cmd!(
"sudo",
"ip",
"netns",
"exec",
self.ns.name(),
chrome_path,
format!("--remote-debugging-port={PORT_BROWSER}"),
"--headless",
"--disable-dev-shm-usage",
"--disable-gpu",
"--disable-cache",
"--disable-application-cache",
"--no-sandbox",
let headed = !self.display_env.is_empty();
// Build command args based on headed/headless mode
let mut args: Vec<String> = vec![
"ip".into(),
"netns".into(),
"exec".into(),
self.ns.name().into(),
];
if headed {
// For headed mode: drop back to the current user and pass display env vars
// This allows the browser to connect to X11/Wayland while in the namespace
let user =
std::env::var("USER").context("USER environment variable not set")?;
args.extend(["sudo".into(), "-E".into(), "-u".into(), user, "env".into()]);
args.extend(self.display_env.clone());
}
args.push(chrome_path.to_string_lossy().into());
args.push(format!("--remote-debugging-port={PORT_BROWSER}"));
if headed {
// Headed mode: no headless, add flags to suppress first-run dialogs
args.extend(["--no-first-run".into(), "--no-default-browser-check".into()]);
} else {
// Headless mode: original flags
args.extend([
"--headless".into(),
"--disable-dev-shm-usage".into(),
"--disable-gpu".into(),
"--disable-cache".into(),
"--disable-application-cache".into(),
]);
}
args.extend([
"--no-sandbox".into(),
format!("--user-data-dir={tmp}"),
format!("--allowed-ips=10.250.0.1"),
);
"--allowed-ips=10.250.0.1".into(),
]);
let process = duct::cmd("sudo", &args);
let process = if !cfg!(feature = "debug") {
process.stderr_capture().stdout_capture().start()?

View File

@@ -9,7 +9,7 @@ mod ws_proxy;
#[cfg(feature = "debug")]
mod debug_prelude;
use std::time::Duration;
use std::{collections::HashMap, time::Duration};
use anyhow::Result;
use clap::Parser;
@@ -22,6 +22,7 @@ use harness_core::{
rpc::{BenchCmd, TestCmd},
test::TestStatus,
};
use indicatif::{ProgressBar, ProgressStyle};
use cli::{Cli, Command};
use executor::Executor;
@@ -32,6 +33,60 @@ use crate::debug_prelude::*;
use crate::{cli::Route, network::Network, wasm_server::WasmServer, ws_proxy::WsProxy};
/// Statistics for a benchmark configuration
#[derive(Debug, Clone)]
struct BenchStats {
group: Option<String>,
bandwidth: usize,
latency: usize,
upload_size: usize,
download_size: usize,
times: Vec<u64>,
}
impl BenchStats {
fn median(&self) -> f64 {
let mut sorted = self.times.clone();
sorted.sort();
let len = sorted.len();
if len == 0 {
return 0.0;
}
if len.is_multiple_of(2) {
(sorted[len / 2 - 1] + sorted[len / 2]) as f64 / 2.0
} else {
sorted[len / 2] as f64
}
}
}
/// Print summary table of benchmark results
fn print_bench_summary(stats: &[BenchStats]) {
if stats.is_empty() {
println!("\nNo benchmark results to display (only warmup was run).");
return;
}
println!("\n{}", "=".repeat(80));
println!("TLSNotary Benchmark Results");
println!("{}", "=".repeat(80));
println!();
for stat in stats {
let group_name = stat.group.as_deref().unwrap_or("unnamed");
println!(
"{} ({} Mbps, {}ms latency, {}KB↑ {}KB↓):",
group_name,
stat.bandwidth,
stat.latency,
stat.upload_size / 1024,
stat.download_size / 1024
);
println!(" Median: {:.2}s", stat.median() / 1000.0);
println!();
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, clap::ValueEnum, Default)]
pub enum Target {
#[default]
@@ -50,14 +105,46 @@ struct Runner {
started: bool,
}
/// Collects display-related environment variables for headed browser mode.
/// Works with both X11 and Wayland by collecting whichever vars are present.
fn collect_display_env_vars() -> Vec<String> {
const DISPLAY_VARS: &[&str] = &[
"DISPLAY", // X11
"XAUTHORITY", // X11 auth
"WAYLAND_DISPLAY", // Wayland
"XDG_RUNTIME_DIR", // Wayland runtime dir
];
DISPLAY_VARS
.iter()
.filter_map(|&var| {
std::env::var(var)
.ok()
.map(|val| format!("{}={}", var, val))
})
.collect()
}
impl Runner {
fn new(cli: &Cli) -> Result<Self> {
let Cli { target, subnet, .. } = cli;
let Cli {
target,
subnet,
headed,
..
} = cli;
let current_path = std::env::current_exe().unwrap();
let fixture_path = current_path.parent().unwrap().join("server-fixture");
let network_config = NetworkConfig::new(*subnet);
let network = Network::new(network_config.clone())?;
// Collect display env vars once if headed mode is enabled
let display_env = if *headed {
collect_display_env_vars()
} else {
Vec::new()
};
let server_fixture =
ServerFixture::new(fixture_path, network.ns_app().clone(), network_config.app);
let wasm_server = WasmServer::new(
@@ -75,6 +162,7 @@ impl Runner {
.network_config(network_config.clone())
.build(),
*target,
display_env.clone(),
);
let exec_v = Executor::new(
network.ns_1().clone(),
@@ -84,6 +172,7 @@ impl Runner {
.network_config(network_config.clone())
.build(),
Target::Native,
Vec::new(), // Verifier doesn't need display env
);
Ok(Self {
@@ -118,6 +207,12 @@ pub async fn main() -> Result<()> {
tracing_subscriber::fmt::init();
let cli = Cli::parse();
// Validate --headed requires --target browser
if cli.headed && cli.target != Target::Browser {
anyhow::bail!("--headed can only be used with --target browser");
}
let mut runner = Runner::new(&cli)?;
let mut exit_code = 0;
@@ -206,6 +301,12 @@ pub async fn main() -> Result<()> {
samples_override,
skip_warmup,
} => {
// Print configuration info
println!("TLSNotary Benchmark Harness");
println!("Running benchmarks from: {}", config.display());
println!("Output will be written to: {}", output.display());
println!();
let items: BenchItems = toml::from_str(&std::fs::read_to_string(config)?)?;
let output_file = std::fs::File::create(output)?;
let mut writer = WriterBuilder::new().from_writer(output_file);
@@ -220,7 +321,34 @@ pub async fn main() -> Result<()> {
runner.exec_p.start().await?;
runner.exec_v.start().await?;
for config in benches {
// Create progress bar
let pb = ProgressBar::new(benches.len() as u64);
pb.set_style(
ProgressStyle::default_bar()
.template("[{elapsed_precise}] {bar:40.cyan/blue} {pos}/{len} {msg}")
.expect("valid template")
.progress_chars("█▓▒░ "),
);
// Collect measurements for stats
let mut measurements_by_config: HashMap<String, Vec<u64>> = HashMap::new();
let warmup_count = if skip_warmup { 0 } else { 3 };
for (idx, config) in benches.iter().enumerate() {
let is_warmup = idx < warmup_count;
let group_name = if is_warmup {
format!("Warmup {}/{}", idx + 1, warmup_count)
} else {
config.group.as_deref().unwrap_or("unnamed").to_string()
};
pb.set_message(format!(
"{} ({} Mbps, {}ms)",
group_name, config.bandwidth, config.protocol_latency
));
runner
.network
.set_proto_config(config.bandwidth, config.protocol_latency.div_ceil(2))?;
@@ -249,11 +377,73 @@ pub async fn main() -> Result<()> {
panic!("expected prover output");
};
let measurement = Measurement::new(config, metrics);
// Collect metrics for stats (skip warmup benches)
if !is_warmup {
let config_key = format!(
"{:?}|{}|{}|{}|{}",
config.group,
config.bandwidth,
config.protocol_latency,
config.upload_size,
config.download_size
);
measurements_by_config
.entry(config_key)
.or_default()
.push(metrics.time_total);
}
let measurement = Measurement::new(config.clone(), metrics);
writer.serialize(measurement)?;
writer.flush()?;
pb.inc(1);
}
pb.finish_with_message("Benchmarks complete");
// Compute and print statistics
let mut all_stats: Vec<BenchStats> = Vec::new();
for (key, times) in measurements_by_config {
// Parse back the config from the key
let parts: Vec<&str> = key.split('|').collect();
if parts.len() >= 5 {
let group = if parts[0] == "None" {
None
} else {
Some(
parts[0]
.trim_start_matches("Some(\"")
.trim_end_matches("\")")
.to_string(),
)
};
let bandwidth: usize = parts[1].parse().unwrap_or(0);
let latency: usize = parts[2].parse().unwrap_or(0);
let upload_size: usize = parts[3].parse().unwrap_or(0);
let download_size: usize = parts[4].parse().unwrap_or(0);
all_stats.push(BenchStats {
group,
bandwidth,
latency,
upload_size,
download_size,
times,
});
}
}
// Sort stats by group name for consistent output
all_stats.sort_by(|a, b| {
a.group
.cmp(&b.group)
.then(a.latency.cmp(&b.latency))
.then(a.bandwidth.cmp(&b.bandwidth))
});
print_bench_summary(&all_stats);
}
Command::Serve {} => {
runner.start_services().await?;

View File

@@ -1,25 +0,0 @@
#### Bandwidth ####
[[group]]
name = "bandwidth"
protocol_latency = 25
[[bench]]
group = "bandwidth"
bandwidth = 10
[[bench]]
group = "bandwidth"
bandwidth = 50
[[bench]]
group = "bandwidth"
bandwidth = 100
[[bench]]
group = "bandwidth"
bandwidth = 250
[[bench]]
group = "bandwidth"
bandwidth = 1000

View File

@@ -1,37 +0,0 @@
[[group]]
name = "download_size"
protocol_latency = 10
bandwidth = 200
upload-size = 2048
[[bench]]
group = "download_size"
download-size = 1024
[[bench]]
group = "download_size"
download-size = 2048
[[bench]]
group = "download_size"
download-size = 4096
[[bench]]
group = "download_size"
download-size = 8192
[[bench]]
group = "download_size"
download-size = 16384
[[bench]]
group = "download_size"
download-size = 32768
[[bench]]
group = "download_size"
download-size = 65536
[[bench]]
group = "download_size"
download-size = 131072

View File

@@ -1,25 +0,0 @@
#### Latency ####
[[group]]
name = "latency"
bandwidth = 1000
[[bench]]
group = "latency"
protocol_latency = 10
[[bench]]
group = "latency"
protocol_latency = 25
[[bench]]
group = "latency"
protocol_latency = 50
[[bench]]
group = "latency"
protocol_latency = 100
[[bench]]
group = "latency"
protocol_latency = 200

View File

@@ -21,20 +21,6 @@ impl<T> RangeMap<T>
where
T: Item,
{
pub(crate) fn new(map: Vec<(usize, T)>) -> Self {
let mut pos = 0;
for (idx, item) in &map {
assert!(
*idx >= pos,
"items must be sorted by index and non-overlapping"
);
pos = *idx + item.length();
}
Self { map }
}
/// Returns `true` if the map is empty.
pub(crate) fn is_empty(&self) -> bool {
self.map.is_empty()
@@ -47,11 +33,6 @@ where
.map(|(idx, item)| *idx..*idx + item.length())
}
/// Returns the length of the map.
pub(crate) fn len(&self) -> usize {
self.map.iter().map(|(_, item)| item.length()).sum()
}
pub(crate) fn iter(&self) -> impl Iterator<Item = (Range<usize>, &T)> {
self.map
.iter()

View File

@@ -6,11 +6,6 @@ use mpz_core::Block;
#[cfg(not(tlsn_insecure))]
use mpz_garble::protocol::semihonest::{Evaluator, Garbler};
use mpz_garble_core::Delta;
use mpz_memory_core::{
Vector,
binary::U8,
correlated::{Key, Mac},
};
#[cfg(not(tlsn_insecure))]
use mpz_ot::cot::{DerandCOTReceiver, DerandCOTSender};
use mpz_ot::{
@@ -24,8 +19,6 @@ use tlsn_core::config::tls_commit::mpc::{MpcTlsConfig, NetworkSetting};
use tlsn_deap::Deap;
use tokio::sync::Mutex;
use crate::transcript_internal::commit::encoding::{KeyStore, MacStore};
#[cfg(not(tlsn_insecure))]
pub(crate) type ProverMpc =
Garbler<DerandCOTSender<SharedRCOTSender<kos::Sender<co::Receiver>, Block>>>;
@@ -193,41 +186,3 @@ pub(crate) fn translate_keys<Mpc, Zk>(keys: &mut SessionKeys, vm: &Deap<Mpc, Zk>
.translate(keys.server_write_mac_key)
.expect("VM memory should be consistent");
}
impl<T> KeyStore for Verifier<T> {
fn delta(&self) -> &Delta {
self.delta()
}
fn get_keys(&self, data: Vector<U8>) -> Option<&[Key]> {
self.get_keys(data).ok()
}
}
impl<T> MacStore for Prover<T> {
fn get_macs(&self, data: Vector<U8>) -> Option<&[Mac]> {
self.get_macs(data).ok()
}
}
#[cfg(tlsn_insecure)]
mod insecure {
use super::*;
use mpz_ideal_vm::IdealVm;
impl KeyStore for IdealVm {
fn delta(&self) -> &Delta {
unimplemented!("encodings not supported in insecure mode")
}
fn get_keys(&self, _data: Vector<U8>) -> Option<&[Key]> {
unimplemented!("encodings not supported in insecure mode")
}
}
impl MacStore for IdealVm {
fn get_macs(&self, _data: Vector<U8>) -> Option<&[Mac]> {
unimplemented!("encodings not supported in insecure mode")
}
}
}

View File

@@ -2,8 +2,6 @@ use std::{error::Error, fmt};
use mpc_tls::MpcTlsError;
use crate::transcript_internal::commit::encoding::EncodingError;
/// Error for [`Prover`](crate::prover::Prover).
#[derive(Debug, thiserror::Error)]
pub struct ProverError {
@@ -109,9 +107,3 @@ impl From<MpcTlsError> for ProverError {
Self::new(ErrorKind::Mpc, e)
}
}
impl From<EncodingError> for ProverError {
fn from(e: EncodingError) -> Self {
Self::new(ErrorKind::Commit, e)
}
}

View File

@@ -13,17 +13,10 @@ use tlsn_core::{
use crate::{
prover::ProverError,
transcript_internal::{
TranscriptRefs,
auth::prove_plaintext,
commit::{
encoding::{self, MacStore},
hash::prove_hash,
},
},
transcript_internal::{TranscriptRefs, auth::prove_plaintext, commit::hash::prove_hash},
};
pub(crate) async fn prove<T: Vm<Binary> + MacStore + Send + Sync>(
pub(crate) async fn prove<T: Vm<Binary> + Send + Sync>(
ctx: &mut Context,
vm: &mut T,
keys: &SessionKeys,
@@ -45,13 +38,6 @@ pub(crate) async fn prove<T: Vm<Binary> + MacStore + Send + Sync>(
Direction::Sent => commit_sent.union_mut(idx),
Direction::Received => commit_recv.union_mut(idx),
});
commit_config
.iter_encoding()
.for_each(|(direction, idx)| match direction {
Direction::Sent => commit_sent.union_mut(idx),
Direction::Received => commit_recv.union_mut(idx),
});
}
let transcript_refs = TranscriptRefs {
@@ -102,45 +88,6 @@ pub(crate) async fn prove<T: Vm<Binary> + MacStore + Send + Sync>(
vm.execute_all(ctx).await.map_err(ProverError::zk)?;
if let Some(commit_config) = config.transcript_commit()
&& commit_config.has_encoding()
{
let mut sent_ranges = RangeSet::default();
let mut recv_ranges = RangeSet::default();
for (dir, idx) in commit_config.iter_encoding() {
match dir {
Direction::Sent => sent_ranges.union_mut(idx),
Direction::Received => recv_ranges.union_mut(idx),
}
}
let sent_map = transcript_refs
.sent
.index(&sent_ranges)
.expect("indices are valid");
let recv_map = transcript_refs
.recv
.index(&recv_ranges)
.expect("indices are valid");
let (commitment, tree) = encoding::receive(
ctx,
vm,
*commit_config.encoding_hash_alg(),
&sent_map,
&recv_map,
commit_config.iter_encoding(),
)
.await?;
output
.transcript_commitments
.push(TranscriptCommitment::Encoding(commitment));
output
.transcript_secrets
.push(TranscriptSecret::Encoding(tree));
}
if let Some((hash_fut, hash_secrets)) = hash_commitments {
let hash_commitments = hash_fut.try_recv().map_err(ProverError::commit)?;
for (commitment, secret) in hash_commitments.into_iter().zip(hash_secrets) {

View File

@@ -1,4 +1,3 @@
//! Plaintext commitment and proof of encryption.
pub(crate) mod encoding;
pub(crate) mod hash;

View File

@@ -1,267 +0,0 @@
//! Encoding commitment protocol.
use std::ops::Range;
use mpz_common::Context;
use mpz_memory_core::{
Vector,
binary::U8,
correlated::{Delta, Key, Mac},
};
use rand::Rng;
use rangeset::set::RangeSet;
use serde::{Deserialize, Serialize};
use serio::{SinkExt, stream::IoStreamExt};
use tlsn_core::{
hash::{Blake3, HashAlgId, HashAlgorithm, Keccak256, Sha256},
transcript::{
Direction,
encoding::{
Encoder, EncoderSecret, EncodingCommitment, EncodingProvider, EncodingProviderError,
EncodingTree, EncodingTreeError, new_encoder,
},
},
};
use crate::{
map::{Item, RangeMap},
transcript_internal::ReferenceMap,
};
/// Bytes of encoding, per byte.
const ENCODING_SIZE: usize = 128;
#[derive(Debug, Serialize, Deserialize)]
struct Encodings {
sent: Vec<u8>,
recv: Vec<u8>,
}
/// Transfers encodings for the provided plaintext ranges.
pub(crate) async fn transfer<K: KeyStore>(
ctx: &mut Context,
store: &K,
sent: &ReferenceMap,
recv: &ReferenceMap,
) -> Result<(EncoderSecret, EncodingCommitment), EncodingError> {
let secret = EncoderSecret::new(rand::rng().random(), store.delta().as_block().to_bytes());
let encoder = new_encoder(&secret);
// Collects the encodings for the provided plaintext ranges.
fn collect_encodings(
encoder: &impl Encoder,
store: &impl KeyStore,
direction: Direction,
map: &ReferenceMap,
) -> Vec<u8> {
let mut encodings = Vec::with_capacity(map.len() * ENCODING_SIZE);
for (range, chunk) in map.iter() {
let start = encodings.len();
encoder.encode_range(direction, range, &mut encodings);
let keys = store
.get_keys(*chunk)
.expect("keys are present for provided plaintext ranges");
encodings[start..]
.iter_mut()
.zip(keys.iter().flat_map(|key| key.as_block().as_bytes()))
.for_each(|(encoding, key)| {
*encoding ^= *key;
});
}
encodings
}
let encodings = Encodings {
sent: collect_encodings(&encoder, store, Direction::Sent, sent),
recv: collect_encodings(&encoder, store, Direction::Received, recv),
};
let frame_limit = ctx
.io()
.limit()
.saturating_add(encodings.sent.len() + encodings.recv.len());
ctx.io_mut().with_limit(frame_limit).send(encodings).await?;
let root = ctx.io_mut().expect_next().await?;
Ok((secret, EncodingCommitment { root }))
}
/// Receives and commits to the encodings for the provided plaintext ranges.
pub(crate) async fn receive<M: MacStore>(
ctx: &mut Context,
store: &M,
hash_alg: HashAlgId,
sent: &ReferenceMap,
recv: &ReferenceMap,
idxs: impl IntoIterator<Item = &(Direction, RangeSet<usize>)>,
) -> Result<(EncodingCommitment, EncodingTree), EncodingError> {
let hasher: &(dyn HashAlgorithm + Send + Sync) = match hash_alg {
HashAlgId::SHA256 => &Sha256::default(),
HashAlgId::KECCAK256 => &Keccak256::default(),
HashAlgId::BLAKE3 => &Blake3::default(),
alg => {
return Err(ErrorRepr::UnsupportedHashAlgorithm(alg).into());
}
};
let (sent_len, recv_len) = (sent.len(), recv.len());
let frame_limit = ctx
.io()
.limit()
.saturating_add(ENCODING_SIZE * (sent_len + recv_len));
let encodings: Encodings = ctx.io_mut().with_limit(frame_limit).expect_next().await?;
if encodings.sent.len() != sent_len * ENCODING_SIZE {
return Err(ErrorRepr::IncorrectMacCount {
direction: Direction::Sent,
expected: sent_len,
got: encodings.sent.len() / ENCODING_SIZE,
}
.into());
}
if encodings.recv.len() != recv_len * ENCODING_SIZE {
return Err(ErrorRepr::IncorrectMacCount {
direction: Direction::Received,
expected: recv_len,
got: encodings.recv.len() / ENCODING_SIZE,
}
.into());
}
// Collects a map of plaintext ranges to their encodings.
fn collect_map(
store: &impl MacStore,
mut encodings: Vec<u8>,
map: &ReferenceMap,
) -> RangeMap<EncodingSlice> {
let mut encoding_map = Vec::new();
let mut pos = 0;
for (range, chunk) in map.iter() {
let macs = store
.get_macs(*chunk)
.expect("MACs are present for provided plaintext ranges");
let encoding = &mut encodings[pos..pos + range.len() * ENCODING_SIZE];
encoding
.iter_mut()
.zip(macs.iter().flat_map(|mac| mac.as_bytes()))
.for_each(|(encoding, mac)| {
*encoding ^= *mac;
});
encoding_map.push((range.start, EncodingSlice::from(&(*encoding))));
pos += range.len() * ENCODING_SIZE;
}
RangeMap::new(encoding_map)
}
let provider = Provider {
sent: collect_map(store, encodings.sent, sent),
recv: collect_map(store, encodings.recv, recv),
};
let tree = EncodingTree::new(hasher, idxs, &provider)?;
let root = tree.root();
ctx.io_mut().send(root.clone()).await?;
let commitment = EncodingCommitment { root };
Ok((commitment, tree))
}
pub(crate) trait KeyStore {
fn delta(&self) -> &Delta;
fn get_keys(&self, data: Vector<U8>) -> Option<&[Key]>;
}
pub(crate) trait MacStore {
fn get_macs(&self, data: Vector<U8>) -> Option<&[Mac]>;
}
#[derive(Debug)]
struct Provider {
sent: RangeMap<EncodingSlice>,
recv: RangeMap<EncodingSlice>,
}
impl EncodingProvider for Provider {
fn provide_encoding(
&self,
direction: Direction,
range: Range<usize>,
dest: &mut Vec<u8>,
) -> Result<(), EncodingProviderError> {
let encodings = match direction {
Direction::Sent => &self.sent,
Direction::Received => &self.recv,
};
let encoding = encodings.get(range).ok_or(EncodingProviderError)?;
dest.extend_from_slice(encoding);
Ok(())
}
}
#[derive(Debug)]
struct EncodingSlice(Vec<u8>);
impl From<&[u8]> for EncodingSlice {
fn from(value: &[u8]) -> Self {
Self(value.to_vec())
}
}
impl Item for EncodingSlice {
type Slice<'a>
= &'a [u8]
where
Self: 'a;
fn length(&self) -> usize {
self.0.len() / ENCODING_SIZE
}
fn slice<'a>(&'a self, range: Range<usize>) -> Option<Self::Slice<'a>> {
self.0
.get(range.start * ENCODING_SIZE..range.end * ENCODING_SIZE)
}
}
/// Encoding protocol error.
#[derive(Debug, thiserror::Error)]
#[error(transparent)]
pub struct EncodingError(#[from] ErrorRepr);
#[derive(Debug, thiserror::Error)]
#[error("encoding protocol error: {0}")]
enum ErrorRepr {
#[error("I/O error: {0}")]
Io(std::io::Error),
#[error("incorrect MAC count for {direction}: expected {expected}, got {got}")]
IncorrectMacCount {
direction: Direction,
expected: usize,
got: usize,
},
#[error("encoding tree error: {0}")]
EncodingTree(EncodingTreeError),
#[error("unsupported hash algorithm: {0}")]
UnsupportedHashAlgorithm(HashAlgId),
}
impl From<std::io::Error> for EncodingError {
fn from(value: std::io::Error) -> Self {
Self(ErrorRepr::Io(value))
}
}
impl From<EncodingTreeError> for EncodingError {
fn from(value: EncodingTreeError) -> Self {
Self(ErrorRepr::EncodingTree(value))
}
}

View File

@@ -2,8 +2,6 @@ use std::{error::Error, fmt};
use mpc_tls::MpcTlsError;
use crate::transcript_internal::commit::encoding::EncodingError;
/// Error for [`Verifier`](crate::verifier::Verifier).
#[derive(Debug, thiserror::Error)]
pub struct VerifierError {
@@ -57,7 +55,6 @@ enum ErrorKind {
Config,
Mpc,
Zk,
Commit,
Verify,
}
@@ -70,7 +67,6 @@ impl fmt::Display for VerifierError {
ErrorKind::Config => f.write_str("config error")?,
ErrorKind::Mpc => f.write_str("mpc error")?,
ErrorKind::Zk => f.write_str("zk error")?,
ErrorKind::Commit => f.write_str("commit error")?,
ErrorKind::Verify => f.write_str("verification error")?,
}
@@ -105,9 +101,3 @@ impl From<MpcTlsError> for VerifierError {
Self::new(ErrorKind::Mpc, e)
}
}
impl From<EncodingError> for VerifierError {
fn from(e: EncodingError) -> Self {
Self::new(ErrorKind::Commit, e)
}
}

View File

@@ -14,19 +14,12 @@ use tlsn_core::{
};
use crate::{
transcript_internal::{
TranscriptRefs,
auth::verify_plaintext,
commit::{
encoding::{self, KeyStore},
hash::verify_hash,
},
},
transcript_internal::{TranscriptRefs, auth::verify_plaintext, commit::hash::verify_hash},
verifier::VerifierError,
};
#[allow(clippy::too_many_arguments)]
pub(crate) async fn verify<T: Vm<Binary> + KeyStore + Send + Sync>(
pub(crate) async fn verify<T: Vm<Binary> + Send + Sync>(
ctx: &mut Context,
vm: &mut T,
keys: &SessionKeys,
@@ -94,11 +87,6 @@ pub(crate) async fn verify<T: Vm<Binary> + KeyStore + Send + Sync>(
Direction::Sent => commit_sent.union_mut(idx),
Direction::Received => commit_recv.union_mut(idx),
});
if let Some((sent, recv)) = commit_config.encoding() {
commit_sent.union_mut(sent);
commit_recv.union_mut(recv);
}
}
let (sent_refs, sent_proof) = verify_plaintext(
@@ -151,24 +139,6 @@ pub(crate) async fn verify<T: Vm<Binary> + KeyStore + Send + Sync>(
sent_proof.verify().map_err(VerifierError::verify)?;
recv_proof.verify().map_err(VerifierError::verify)?;
let mut encoder_secret = None;
if let Some(commit_config) = request.transcript_commit()
&& let Some((sent, recv)) = commit_config.encoding()
{
let sent_map = transcript_refs
.sent
.index(sent)
.expect("ranges were authenticated");
let recv_map = transcript_refs
.recv
.index(recv)
.expect("ranges were authenticated");
let (secret, commitment) = encoding::transfer(ctx, vm, &sent_map, &recv_map).await?;
encoder_secret = Some(secret);
transcript_commitments.push(TranscriptCommitment::Encoding(commitment));
}
if let Some(hash_commitments) = hash_commitments {
for commitment in hash_commitments.try_recv().map_err(VerifierError::verify)? {
transcript_commitments.push(TranscriptCommitment::Hash(commitment));
@@ -178,7 +148,6 @@ pub(crate) async fn verify<T: Vm<Binary> + KeyStore + Send + Sync>(
Ok(VerifierOutput {
server_name,
transcript: request.reveal().is_some().then_some(transcript),
encoder_secret,
transcript_commitments,
})
}

View File

@@ -1,5 +1,4 @@
use futures::{AsyncReadExt, AsyncWriteExt};
use rangeset::set::RangeSet;
use tlsn::{
config::{
prove::ProveConfig,
@@ -9,12 +8,9 @@ use tlsn::{
verifier::VerifierConfig,
},
connection::ServerName,
hash::{HashAlgId, HashProvider},
hash::HashAlgId,
prover::Prover,
transcript::{
Direction, Transcript, TranscriptCommitConfig, TranscriptCommitment,
TranscriptCommitmentKind, TranscriptSecret,
},
transcript::{Direction, Transcript, TranscriptCommitConfig, TranscriptCommitmentKind},
verifier::{Verifier, VerifierOutput},
webpki::{CertificateDer, RootCertStore},
};
@@ -42,7 +38,7 @@ async fn test() {
let (socket_0, socket_1) = tokio::io::duplex(2 << 23);
let ((full_transcript, prover_output), verifier_output) =
let ((_full_transcript, _prover_output), verifier_output) =
tokio::join!(prover(socket_0), verifier(socket_1));
let partial_transcript = verifier_output.transcript.unwrap();
@@ -58,50 +54,6 @@ async fn test() {
partial_transcript.received_authed().iter().next().unwrap(),
0..10
);
let encoding_tree = prover_output
.transcript_secrets
.iter()
.find_map(|secret| {
if let TranscriptSecret::Encoding(tree) = secret {
Some(tree)
} else {
None
}
})
.unwrap();
let encoding_commitment = prover_output
.transcript_commitments
.iter()
.find_map(|commitment| {
if let TranscriptCommitment::Encoding(commitment) = commitment {
Some(commitment)
} else {
None
}
})
.unwrap();
let prove_sent = RangeSet::from(1..full_transcript.sent().len() - 1);
let prove_recv = RangeSet::from(1..full_transcript.received().len() - 1);
let idxs = [
(Direction::Sent, prove_sent.clone()),
(Direction::Received, prove_recv.clone()),
];
let proof = encoding_tree.proof(idxs.iter()).unwrap();
let (auth_sent, auth_recv) = proof
.verify_with_provider(
&HashProvider::default(),
&verifier_output.encoder_secret.unwrap(),
encoding_commitment,
full_transcript.sent(),
full_transcript.received(),
)
.unwrap();
assert_eq!(auth_sent, prove_sent);
assert_eq!(auth_recv, prove_recv);
}
#[instrument(skip(verifier_socket))]
@@ -163,25 +115,21 @@ async fn prover<T: AsyncWrite + AsyncRead + Send + Unpin + 'static>(
let mut builder = TranscriptCommitConfig::builder(prover.transcript());
for kind in [
TranscriptCommitmentKind::Encoding,
TranscriptCommitmentKind::Hash {
alg: HashAlgId::SHA256,
},
] {
builder
.commit_with_kind(&(0..sent_tx_len), Direction::Sent, kind)
.unwrap();
builder
.commit_with_kind(&(0..recv_tx_len), Direction::Received, kind)
.unwrap();
builder
.commit_with_kind(&(1..sent_tx_len - 1), Direction::Sent, kind)
.unwrap();
builder
.commit_with_kind(&(1..recv_tx_len - 1), Direction::Received, kind)
.unwrap();
}
let kind = TranscriptCommitmentKind::Hash {
alg: HashAlgId::SHA256,
};
builder
.commit_with_kind(&(0..sent_tx_len), Direction::Sent, kind)
.unwrap();
builder
.commit_with_kind(&(0..recv_tx_len), Direction::Received, kind)
.unwrap();
builder
.commit_with_kind(&(1..sent_tx_len - 1), Direction::Sent, kind)
.unwrap();
builder
.commit_with_kind(&(1..recv_tx_len - 1), Direction::Received, kind)
.unwrap();
let transcript_commit = builder.build().unwrap();