Compare commits

...

23 Commits

Author SHA1 Message Date
dan
0f9c04fc11 feat: added plugin-core crate 2025-10-30 09:11:12 +02:00
sinu.eth
5fef2af698 fix(example): close prover (#1025) 2025-10-17 10:39:08 -07:00
sinu.eth
5b2083e211 refactor(tlsn): invert control of config validation (#1023)
* refactor(tlsn): invert control of config validation

* clippy
2025-10-17 10:19:02 -07:00
sinu.eth
d26bb02d2e chore: update to alpha.14-pre (#1022) 2025-10-15 11:11:43 -07:00
sinu
a766b64184 ci: add manual trigger to main update 2025-10-15 10:11:01 -07:00
sinu.eth
0885d40ddf chore: release v0.1.0-alpha.13 (#1019) 2025-10-15 09:38:52 -07:00
sinu.eth
610411aae4 ci: relax clippy (#1020) 2025-10-15 09:27:55 -07:00
sinu.eth
37df1baed7 feat(core): proof config builder reveal all methods (#1017)
Co-authored-by: Hendrik Eeckhaut <hendrik@eeckhaut.org>
2025-10-14 08:56:28 -07:00
dan
aeaebc5c60 chore(harness): expose debug flag in dockerfile (#1018) 2025-10-14 11:19:30 +00:00
sinu.eth
2e7e3db11d fix: fully identify signature algorithm (#1015) 2025-10-13 09:57:34 +02:00
sinu.eth
0a68837d0a fix: empty auth ranges (#1016) 2025-10-10 15:44:38 -07:00
sinu.eth
0ec2392716 chore(tlsn): add transcript auth tests (#1014)
* chore(tlsn): add transcript auth tests

* clippy
2025-10-10 14:10:17 -07:00
sinu.eth
f99fce5b5a fix(tlsn): do not implicitly reveal encoder secret (#1011) 2025-10-10 08:39:24 -07:00
sinu.eth
6b9f44e7e5 feat(tlsn): disclose encryption key (#1010)
Co-authored-by: th4s <th4s@metavoid.xyz>
2025-10-10 08:32:50 -07:00
dan
bf1cf2302a fix(harness): add harness debug feature (#1012) 2025-10-10 14:20:42 +00:00
sinu.eth
2884be17e0 feat(tlsn): partial plaintext auth (#1006)
Co-authored-by: th4s <th4s@metavoid.xyz>
2025-10-09 11:22:23 -07:00
sinu.eth
df8d79c152 fix(wasm): explicitly enable link args for wasm (#1007) 2025-10-09 08:34:11 -07:00
yuroitaki
82d509266b feat: add blake3 transcript commitment (#1000)
* Add blake3.

* Update mpz version.

---------

Co-authored-by: yuroitaki <>
2025-10-08 10:13:07 +08:00
dan
d5ad768e7c chore: improve error msg (#1003) 2025-10-03 05:43:58 +00:00
Hendrik Eeckhaut
d25fb320d4 build: update Rust to version 1.90.0 2025-09-24 09:32:56 +02:00
Hendrik Eeckhaut
0539268da7 Interactive noir example (#981)
demo for interactive zk age proof

Co-authored-by: th4s <th4s@metavoid.xyz>
2025-09-19 16:55:10 +02:00
dan
427b2896b5 allow root_store to be None (#995) 2025-09-19 15:15:04 +02:00
Hendrik Eeckhaut
89d1e594d1 privacy-scaling-explorations -> privacy-ethereum (#993) 2025-09-11 16:48:01 +02:00
99 changed files with 6413 additions and 2558 deletions

View File

@@ -18,10 +18,10 @@ env:
# We need a higher number of parallel rayon tasks than the default (which is 4)
# in order to prevent a deadlock, c.f.
# - https://github.com/tlsnotary/tlsn/issues/548
# - https://github.com/privacy-scaling-explorations/mpz/issues/178
# - https://github.com/privacy-ethereum/mpz/issues/178
# 32 seems to be big enough for the foreseeable future
RAYON_NUM_THREADS: 32
RUST_VERSION: 1.89.0
RUST_VERSION: 1.90.0
jobs:
clippy:
@@ -32,7 +32,7 @@ jobs:
uses: actions/checkout@v4
- name: Install rust toolchain
uses: dtolnay/rust-toolchain@stable
uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ env.RUST_VERSION }}
components: clippy
@@ -41,7 +41,7 @@ jobs:
uses: Swatinem/rust-cache@v2.7.7
- name: Clippy
run: cargo clippy --keep-going --all-features --all-targets --locked -- -D warnings
run: cargo clippy --keep-going --all-features --all-targets --locked
fmt:
name: Check formatting

View File

@@ -6,7 +6,7 @@ on:
tag:
description: 'Tag to publish to NPM'
required: true
default: 'v0.1.0-alpha.13-pre'
default: 'v0.1.0-alpha.14-pre'
jobs:
release:

View File

@@ -23,7 +23,6 @@ jobs:
- name: "rustdoc"
run: crates/wasm/build-docs.sh
- name: Deploy
uses: peaceiris/actions-gh-pages@v3
if: ${{ github.ref == 'refs/heads/dev' }}

View File

@@ -1,6 +1,7 @@
name: Fast-forward main branch to published release tag
on:
workflow_dispatch:
release:
types: [published]

2714
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -9,6 +9,7 @@ members = [
"crates/data-fixtures",
"crates/examples",
"crates/formats",
"crates/plugin-core",
"crates/server-fixture/certs",
"crates/server-fixture/server",
"crates/tls/backend",
@@ -53,6 +54,7 @@ tlsn-formats = { path = "crates/formats" }
tlsn-hmac-sha256 = { path = "crates/components/hmac-sha256" }
tlsn-key-exchange = { path = "crates/components/key-exchange" }
tlsn-mpc-tls = { path = "crates/mpc-tls" }
tlsn-plugin-core = { path = "crates/plugin-core" }
tlsn-server-fixture = { path = "crates/server-fixture/server" }
tlsn-server-fixture-certs = { path = "crates/server-fixture/certs" }
tlsn-tls-backend = { path = "crates/tls/backend" }
@@ -66,25 +68,27 @@ tlsn-harness-runner = { path = "crates/harness/runner" }
tlsn-wasm = { path = "crates/wasm" }
tlsn = { path = "crates/tlsn" }
mpz-circuits = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "3d90b6c" }
mpz-memory-core = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "3d90b6c" }
mpz-common = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "3d90b6c" }
mpz-core = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "3d90b6c" }
mpz-vm-core = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "3d90b6c" }
mpz-garble = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "3d90b6c" }
mpz-garble-core = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "3d90b6c" }
mpz-ole = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "3d90b6c" }
mpz-ot = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "3d90b6c" }
mpz-share-conversion = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "3d90b6c" }
mpz-fields = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "3d90b6c" }
mpz-zk = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "3d90b6c" }
mpz-hash = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "3d90b6c" }
mpz-circuits = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.4" }
mpz-memory-core = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.4" }
mpz-common = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.4" }
mpz-core = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.4" }
mpz-vm-core = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.4" }
mpz-garble = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.4" }
mpz-garble-core = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.4" }
mpz-ole = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.4" }
mpz-ot = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.4" }
mpz-share-conversion = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.4" }
mpz-fields = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.4" }
mpz-zk = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.4" }
mpz-hash = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.4" }
mpz-ideal-vm = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.4" }
rangeset = { version = "0.2" }
serio = { version = "0.2" }
spansy = { git = "https://github.com/tlsnotary/tlsn-utils", rev = "6168663" }
spansy = { git = "https://github.com/tlsnotary/tlsn-utils", rev = "304b910" }
uid-mux = { version = "0.2" }
websocket-relay = { git = "https://github.com/tlsnotary/tlsn-utils", rev = "6168663" }
websocket-relay = { git = "https://github.com/tlsnotary/tlsn-utils", rev = "304b910" }
futures-plex = { git = "https://github.com/tlsnotary/tlsn-utils", rev = "304b910" }
aead = { version = "0.4" }
aes = { version = "0.8" }
@@ -110,7 +114,7 @@ elliptic-curve = { version = "0.13" }
enum-try-as-inner = { version = "0.1" }
env_logger = { version = "0.10" }
futures = { version = "0.3" }
futures-rustls = { version = "0.26" }
futures-rustls = { version = "0.25" }
generic-array = { version = "0.14" }
ghash = { version = "0.5" }
hex = { version = "0.4" }

View File

@@ -1,6 +1,6 @@
[package]
name = "tlsn-attestation"
version = "0.1.0-alpha.13-pre"
version = "0.1.0-alpha.14-pre"
edition = "2024"
[features]
@@ -23,9 +23,9 @@ thiserror = { workspace = true }
tiny-keccak = { workspace = true, features = ["keccak"] }
[dev-dependencies]
alloy-primitives = { version = "0.8.22", default-features = false }
alloy-signer = { version = "0.12", default-features = false }
alloy-signer-local = { version = "0.12", default-features = false }
alloy-primitives = { version = "1.3.1", default-features = false }
alloy-signer = { version = "1.0", default-features = false }
alloy-signer-local = { version = "1.0", default-features = false }
rand06-compat = { workspace = true }
rstest = { workspace = true }
tlsn-core = { workspace = true, features = ["fixtures"] }

View File

@@ -5,7 +5,7 @@ use rand::{Rng, rng};
use tlsn_core::{
connection::{ConnectionInfo, ServerEphemKey},
hash::HashAlgId,
transcript::TranscriptCommitment,
transcript::{TranscriptCommitment, encoding::EncoderSecret},
};
use crate::{
@@ -25,6 +25,7 @@ pub struct Sign {
connection_info: Option<ConnectionInfo>,
server_ephemeral_key: Option<ServerEphemKey>,
cert_commitment: ServerCertCommitment,
encoder_secret: Option<EncoderSecret>,
extensions: Vec<Extension>,
transcript_commitments: Vec<TranscriptCommitment>,
}
@@ -86,6 +87,7 @@ impl<'a> AttestationBuilder<'a, Accept> {
connection_info: None,
server_ephemeral_key: None,
cert_commitment,
encoder_secret: None,
transcript_commitments: Vec::new(),
extensions,
},
@@ -106,6 +108,12 @@ impl AttestationBuilder<'_, Sign> {
self
}
/// Sets the secret for encoding commitments.
pub fn encoder_secret(&mut self, secret: EncoderSecret) -> &mut Self {
self.state.encoder_secret = Some(secret);
self
}
/// Adds an extension to the attestation.
pub fn extension(&mut self, extension: Extension) -> &mut Self {
self.state.extensions.push(extension);
@@ -129,6 +137,7 @@ impl AttestationBuilder<'_, Sign> {
connection_info,
server_ephemeral_key,
cert_commitment,
encoder_secret,
extensions,
transcript_commitments,
} = self.state;
@@ -159,6 +168,7 @@ impl AttestationBuilder<'_, Sign> {
AttestationBuilderError::new(ErrorKind::Field, "handshake data was not set")
})?),
cert_commitment: field_id.next(cert_commitment),
encoder_secret: encoder_secret.map(|secret| field_id.next(secret)),
extensions: extensions
.into_iter()
.map(|extension| field_id.next(extension))

View File

@@ -219,7 +219,7 @@ use tlsn_core::{
connection::{ConnectionInfo, ServerEphemKey},
hash::{Hash, HashAlgorithm, TypedHash},
merkle::MerkleTree,
transcript::TranscriptCommitment,
transcript::{TranscriptCommitment, encoding::EncoderSecret},
};
use crate::{
@@ -327,6 +327,7 @@ pub struct Body {
connection_info: Field<ConnectionInfo>,
server_ephemeral_key: Field<ServerEphemKey>,
cert_commitment: Field<ServerCertCommitment>,
encoder_secret: Option<Field<EncoderSecret>>,
extensions: Vec<Field<Extension>>,
transcript_commitments: Vec<Field<TranscriptCommitment>>,
}
@@ -372,6 +373,7 @@ impl Body {
connection_info: conn_info,
server_ephemeral_key,
cert_commitment,
encoder_secret,
extensions,
transcript_commitments,
} = self;
@@ -389,6 +391,13 @@ impl Body {
),
];
if let Some(encoder_secret) = encoder_secret {
fields.push((
encoder_secret.id,
hasher.hash_separated(&encoder_secret.data),
));
}
for field in extensions.iter() {
fields.push((field.id, hasher.hash_separated(&field.data)));
}

View File

@@ -91,6 +91,11 @@ impl Presentation {
transcript.verify_with_provider(
&provider.hash,
&attestation.body.connection_info().transcript_length,
attestation
.body
.encoder_secret
.as_ref()
.map(|field| &field.data),
attestation.body.transcript_commitments(),
)
})

View File

@@ -49,5 +49,6 @@ impl_domain_separator!(tlsn_core::connection::ConnectionInfo);
impl_domain_separator!(tlsn_core::connection::CertBinding);
impl_domain_separator!(tlsn_core::transcript::TranscriptCommitment);
impl_domain_separator!(tlsn_core::transcript::TranscriptSecret);
impl_domain_separator!(tlsn_core::transcript::encoding::EncoderSecret);
impl_domain_separator!(tlsn_core::transcript::encoding::EncodingCommitment);
impl_domain_separator!(tlsn_core::transcript::hash::PlaintextHash);

View File

@@ -64,7 +64,6 @@ fn test_api() {
let encoding_commitment = EncodingCommitment {
root: encoding_tree.root(),
secret: encoder_secret(),
};
let request_config = RequestConfig::default();
@@ -96,6 +95,7 @@ fn test_api() {
.connection_info(connection_info.clone())
// Server key Notary received during handshake
.server_ephemeral_key(server_ephemeral_key)
.encoder_secret(encoder_secret())
.transcript_commitments(vec![TranscriptCommitment::Encoding(encoding_commitment)]);
let attestation = attestation_builder.build(&provider).unwrap();

View File

@@ -5,7 +5,7 @@ description = "This crate provides implementations of ciphers for two parties"
keywords = ["tls", "mpc", "2pc", "aes"]
categories = ["cryptography"]
license = "MIT OR Apache-2.0"
version = "0.1.0-alpha.13-pre"
version = "0.1.0-alpha.14-pre"
edition = "2021"
[lints]

View File

@@ -1,6 +1,6 @@
[package]
name = "tlsn-deap"
version = "0.1.0-alpha.13-pre"
version = "0.1.0-alpha.14-pre"
edition = "2021"
[lints]

View File

@@ -5,7 +5,7 @@ description = "A 2PC implementation of TLS HMAC-SHA256 PRF"
keywords = ["tls", "mpc", "2pc", "hmac", "sha256"]
categories = ["cryptography"]
license = "MIT OR Apache-2.0"
version = "0.1.0-alpha.13-pre"
version = "0.1.0-alpha.14-pre"
edition = "2021"
[lints]

View File

@@ -5,7 +5,7 @@ description = "Implementation of the 3-party key-exchange protocol"
keywords = ["tls", "mpc", "2pc", "pms", "key-exchange"]
categories = ["cryptography"]
license = "MIT OR Apache-2.0"
version = "0.1.0-alpha.13-pre"
version = "0.1.0-alpha.14-pre"
edition = "2021"
[lints]

View File

@@ -5,7 +5,7 @@ description = "Core types for TLSNotary"
keywords = ["tls", "mpc", "2pc", "types"]
categories = ["cryptography"]
license = "MIT OR Apache-2.0"
version = "0.1.0-alpha.13-pre"
version = "0.1.0-alpha.14-pre"
edition = "2021"
[lints]

View File

@@ -116,84 +116,75 @@ pub enum KeyType {
SECP256R1 = 0x0017,
}
/// Signature scheme on the key exchange parameters.
/// Signature algorithm used on the key exchange parameters.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
#[allow(non_camel_case_types, missing_docs)]
pub enum SignatureScheme {
RSA_PKCS1_SHA1 = 0x0201,
ECDSA_SHA1_Legacy = 0x0203,
RSA_PKCS1_SHA256 = 0x0401,
ECDSA_NISTP256_SHA256 = 0x0403,
RSA_PKCS1_SHA384 = 0x0501,
ECDSA_NISTP384_SHA384 = 0x0503,
RSA_PKCS1_SHA512 = 0x0601,
ECDSA_NISTP521_SHA512 = 0x0603,
RSA_PSS_SHA256 = 0x0804,
RSA_PSS_SHA384 = 0x0805,
RSA_PSS_SHA512 = 0x0806,
ED25519 = 0x0807,
pub enum SignatureAlgorithm {
ECDSA_NISTP256_SHA256,
ECDSA_NISTP256_SHA384,
ECDSA_NISTP384_SHA256,
ECDSA_NISTP384_SHA384,
ED25519,
RSA_PKCS1_2048_8192_SHA256,
RSA_PKCS1_2048_8192_SHA384,
RSA_PKCS1_2048_8192_SHA512,
RSA_PSS_2048_8192_SHA256_LEGACY_KEY,
RSA_PSS_2048_8192_SHA384_LEGACY_KEY,
RSA_PSS_2048_8192_SHA512_LEGACY_KEY,
}
impl fmt::Display for SignatureScheme {
impl fmt::Display for SignatureAlgorithm {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
SignatureScheme::RSA_PKCS1_SHA1 => write!(f, "RSA_PKCS1_SHA1"),
SignatureScheme::ECDSA_SHA1_Legacy => write!(f, "ECDSA_SHA1_Legacy"),
SignatureScheme::RSA_PKCS1_SHA256 => write!(f, "RSA_PKCS1_SHA256"),
SignatureScheme::ECDSA_NISTP256_SHA256 => write!(f, "ECDSA_NISTP256_SHA256"),
SignatureScheme::RSA_PKCS1_SHA384 => write!(f, "RSA_PKCS1_SHA384"),
SignatureScheme::ECDSA_NISTP384_SHA384 => write!(f, "ECDSA_NISTP384_SHA384"),
SignatureScheme::RSA_PKCS1_SHA512 => write!(f, "RSA_PKCS1_SHA512"),
SignatureScheme::ECDSA_NISTP521_SHA512 => write!(f, "ECDSA_NISTP521_SHA512"),
SignatureScheme::RSA_PSS_SHA256 => write!(f, "RSA_PSS_SHA256"),
SignatureScheme::RSA_PSS_SHA384 => write!(f, "RSA_PSS_SHA384"),
SignatureScheme::RSA_PSS_SHA512 => write!(f, "RSA_PSS_SHA512"),
SignatureScheme::ED25519 => write!(f, "ED25519"),
SignatureAlgorithm::ECDSA_NISTP256_SHA256 => write!(f, "ECDSA_NISTP256_SHA256"),
SignatureAlgorithm::ECDSA_NISTP256_SHA384 => write!(f, "ECDSA_NISTP256_SHA384"),
SignatureAlgorithm::ECDSA_NISTP384_SHA256 => write!(f, "ECDSA_NISTP384_SHA256"),
SignatureAlgorithm::ECDSA_NISTP384_SHA384 => write!(f, "ECDSA_NISTP384_SHA384"),
SignatureAlgorithm::ED25519 => write!(f, "ED25519"),
SignatureAlgorithm::RSA_PKCS1_2048_8192_SHA256 => {
write!(f, "RSA_PKCS1_2048_8192_SHA256")
}
SignatureAlgorithm::RSA_PKCS1_2048_8192_SHA384 => {
write!(f, "RSA_PKCS1_2048_8192_SHA384")
}
SignatureAlgorithm::RSA_PKCS1_2048_8192_SHA512 => {
write!(f, "RSA_PKCS1_2048_8192_SHA512")
}
SignatureAlgorithm::RSA_PSS_2048_8192_SHA256_LEGACY_KEY => {
write!(f, "RSA_PSS_2048_8192_SHA256_LEGACY_KEY")
}
SignatureAlgorithm::RSA_PSS_2048_8192_SHA384_LEGACY_KEY => {
write!(f, "RSA_PSS_2048_8192_SHA384_LEGACY_KEY")
}
SignatureAlgorithm::RSA_PSS_2048_8192_SHA512_LEGACY_KEY => {
write!(f, "RSA_PSS_2048_8192_SHA512_LEGACY_KEY")
}
}
}
}
impl TryFrom<tls_core::msgs::enums::SignatureScheme> for SignatureScheme {
type Error = &'static str;
fn try_from(value: tls_core::msgs::enums::SignatureScheme) -> Result<Self, Self::Error> {
use tls_core::msgs::enums::SignatureScheme as Core;
use SignatureScheme::*;
Ok(match value {
Core::RSA_PKCS1_SHA1 => RSA_PKCS1_SHA1,
Core::ECDSA_SHA1_Legacy => ECDSA_SHA1_Legacy,
Core::RSA_PKCS1_SHA256 => RSA_PKCS1_SHA256,
Core::ECDSA_NISTP256_SHA256 => ECDSA_NISTP256_SHA256,
Core::RSA_PKCS1_SHA384 => RSA_PKCS1_SHA384,
Core::ECDSA_NISTP384_SHA384 => ECDSA_NISTP384_SHA384,
Core::RSA_PKCS1_SHA512 => RSA_PKCS1_SHA512,
Core::ECDSA_NISTP521_SHA512 => ECDSA_NISTP521_SHA512,
Core::RSA_PSS_SHA256 => RSA_PSS_SHA256,
Core::RSA_PSS_SHA384 => RSA_PSS_SHA384,
Core::RSA_PSS_SHA512 => RSA_PSS_SHA512,
Core::ED25519 => ED25519,
_ => return Err("unsupported signature scheme"),
})
}
}
impl From<SignatureScheme> for tls_core::msgs::enums::SignatureScheme {
fn from(value: SignatureScheme) -> Self {
use tls_core::msgs::enums::SignatureScheme::*;
impl From<tls_core::verify::SignatureAlgorithm> for SignatureAlgorithm {
fn from(value: tls_core::verify::SignatureAlgorithm) -> Self {
use tls_core::verify::SignatureAlgorithm as Core;
match value {
SignatureScheme::RSA_PKCS1_SHA1 => RSA_PKCS1_SHA1,
SignatureScheme::ECDSA_SHA1_Legacy => ECDSA_SHA1_Legacy,
SignatureScheme::RSA_PKCS1_SHA256 => RSA_PKCS1_SHA256,
SignatureScheme::ECDSA_NISTP256_SHA256 => ECDSA_NISTP256_SHA256,
SignatureScheme::RSA_PKCS1_SHA384 => RSA_PKCS1_SHA384,
SignatureScheme::ECDSA_NISTP384_SHA384 => ECDSA_NISTP384_SHA384,
SignatureScheme::RSA_PKCS1_SHA512 => RSA_PKCS1_SHA512,
SignatureScheme::ECDSA_NISTP521_SHA512 => ECDSA_NISTP521_SHA512,
SignatureScheme::RSA_PSS_SHA256 => RSA_PSS_SHA256,
SignatureScheme::RSA_PSS_SHA384 => RSA_PSS_SHA384,
SignatureScheme::RSA_PSS_SHA512 => RSA_PSS_SHA512,
SignatureScheme::ED25519 => ED25519,
Core::ECDSA_NISTP256_SHA256 => SignatureAlgorithm::ECDSA_NISTP256_SHA256,
Core::ECDSA_NISTP256_SHA384 => SignatureAlgorithm::ECDSA_NISTP256_SHA384,
Core::ECDSA_NISTP384_SHA256 => SignatureAlgorithm::ECDSA_NISTP384_SHA256,
Core::ECDSA_NISTP384_SHA384 => SignatureAlgorithm::ECDSA_NISTP384_SHA384,
Core::ED25519 => SignatureAlgorithm::ED25519,
Core::RSA_PKCS1_2048_8192_SHA256 => SignatureAlgorithm::RSA_PKCS1_2048_8192_SHA256,
Core::RSA_PKCS1_2048_8192_SHA384 => SignatureAlgorithm::RSA_PKCS1_2048_8192_SHA384,
Core::RSA_PKCS1_2048_8192_SHA512 => SignatureAlgorithm::RSA_PKCS1_2048_8192_SHA512,
Core::RSA_PSS_2048_8192_SHA256_LEGACY_KEY => {
SignatureAlgorithm::RSA_PSS_2048_8192_SHA256_LEGACY_KEY
}
Core::RSA_PSS_2048_8192_SHA384_LEGACY_KEY => {
SignatureAlgorithm::RSA_PSS_2048_8192_SHA384_LEGACY_KEY
}
Core::RSA_PSS_2048_8192_SHA512_LEGACY_KEY => {
SignatureAlgorithm::RSA_PSS_2048_8192_SHA512_LEGACY_KEY
}
}
}
}
@@ -201,8 +192,8 @@ impl From<SignatureScheme> for tls_core::msgs::enums::SignatureScheme {
/// Server's signature of the key exchange parameters.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ServerSignature {
/// Signature scheme.
pub scheme: SignatureScheme,
/// Signature algorithm.
pub alg: SignatureAlgorithm,
/// Signature data.
pub sig: Vec<u8>,
}
@@ -359,20 +350,23 @@ impl HandshakeData {
message.extend_from_slice(&server_ephemeral_key.kx_params());
use webpki::ring as alg;
let sig_alg = match self.sig.scheme {
SignatureScheme::RSA_PKCS1_SHA256 => alg::RSA_PKCS1_2048_8192_SHA256,
SignatureScheme::RSA_PKCS1_SHA384 => alg::RSA_PKCS1_2048_8192_SHA384,
SignatureScheme::RSA_PKCS1_SHA512 => alg::RSA_PKCS1_2048_8192_SHA512,
SignatureScheme::RSA_PSS_SHA256 => alg::RSA_PSS_2048_8192_SHA256_LEGACY_KEY,
SignatureScheme::RSA_PSS_SHA384 => alg::RSA_PSS_2048_8192_SHA384_LEGACY_KEY,
SignatureScheme::RSA_PSS_SHA512 => alg::RSA_PSS_2048_8192_SHA512_LEGACY_KEY,
SignatureScheme::ECDSA_NISTP256_SHA256 => alg::ECDSA_P256_SHA256,
SignatureScheme::ECDSA_NISTP384_SHA384 => alg::ECDSA_P384_SHA384,
SignatureScheme::ED25519 => alg::ED25519,
scheme => {
return Err(HandshakeVerificationError::UnsupportedSignatureScheme(
scheme,
))
let sig_alg = match self.sig.alg {
SignatureAlgorithm::ECDSA_NISTP256_SHA256 => alg::ECDSA_P256_SHA256,
SignatureAlgorithm::ECDSA_NISTP256_SHA384 => alg::ECDSA_P256_SHA384,
SignatureAlgorithm::ECDSA_NISTP384_SHA256 => alg::ECDSA_P384_SHA256,
SignatureAlgorithm::ECDSA_NISTP384_SHA384 => alg::ECDSA_P384_SHA384,
SignatureAlgorithm::ED25519 => alg::ED25519,
SignatureAlgorithm::RSA_PKCS1_2048_8192_SHA256 => alg::RSA_PKCS1_2048_8192_SHA256,
SignatureAlgorithm::RSA_PKCS1_2048_8192_SHA384 => alg::RSA_PKCS1_2048_8192_SHA384,
SignatureAlgorithm::RSA_PKCS1_2048_8192_SHA512 => alg::RSA_PKCS1_2048_8192_SHA512,
SignatureAlgorithm::RSA_PSS_2048_8192_SHA256_LEGACY_KEY => {
alg::RSA_PSS_2048_8192_SHA256_LEGACY_KEY
}
SignatureAlgorithm::RSA_PSS_2048_8192_SHA384_LEGACY_KEY => {
alg::RSA_PSS_2048_8192_SHA384_LEGACY_KEY
}
SignatureAlgorithm::RSA_PSS_2048_8192_SHA512_LEGACY_KEY => {
alg::RSA_PSS_2048_8192_SHA512_LEGACY_KEY
}
};
@@ -402,8 +396,6 @@ pub enum HandshakeVerificationError {
InvalidServerEphemeralKey,
#[error("server certificate verification failed: {0}")]
ServerCert(ServerCertVerifierError),
#[error("unsupported signature scheme: {0}")]
UnsupportedSignatureScheme(SignatureScheme),
}
#[cfg(test)]

View File

@@ -10,7 +10,8 @@ use hex::FromHex;
use crate::{
connection::{
CertBinding, CertBindingV1_2, ConnectionInfo, DnsName, HandshakeData, KeyType,
ServerEphemKey, ServerName, ServerSignature, SignatureScheme, TlsVersion, TranscriptLength,
ServerEphemKey, ServerName, ServerSignature, SignatureAlgorithm, TlsVersion,
TranscriptLength,
},
transcript::{
encoding::{EncoderSecret, EncodingProvider},
@@ -47,7 +48,7 @@ impl ConnectionFixture {
CertificateDer(include_bytes!("fixtures/data/tlsnotary.org/ca.der").to_vec()),
],
sig: ServerSignature {
scheme: SignatureScheme::RSA_PKCS1_SHA256,
alg: SignatureAlgorithm::RSA_PKCS1_2048_8192_SHA256,
sig: Vec::<u8>::from_hex(include_bytes!(
"fixtures/data/tlsnotary.org/signature"
))
@@ -92,7 +93,7 @@ impl ConnectionFixture {
CertificateDer(include_bytes!("fixtures/data/appliedzkp.org/ca.der").to_vec()),
],
sig: ServerSignature {
scheme: SignatureScheme::ECDSA_NISTP256_SHA256,
alg: SignatureAlgorithm::ECDSA_NISTP256_SHA256,
sig: Vec::<u8>::from_hex(include_bytes!(
"fixtures/data/appliedzkp.org/signature"
))

View File

@@ -191,6 +191,11 @@ impl Hash {
len: value.len(),
}
}
/// Returns a byte slice of the hash value.
pub fn as_bytes(&self) -> &[u8] {
&self.value[..self.len]
}
}
impl rs_merkle::Hash for Hash {

View File

@@ -20,8 +20,8 @@ use serde::{Deserialize, Serialize};
use crate::{
connection::{HandshakeData, ServerName},
transcript::{
Direction, PartialTranscript, Transcript, TranscriptCommitConfig, TranscriptCommitRequest,
TranscriptCommitment, TranscriptSecret,
encoding::EncoderSecret, Direction, PartialTranscript, Transcript, TranscriptCommitConfig,
TranscriptCommitRequest, TranscriptCommitment, TranscriptSecret,
},
};
@@ -122,6 +122,14 @@ impl<'a> ProveConfigBuilder<'a> {
self.reveal(Direction::Sent, ranges)
}
/// Reveals all of the sent data transcript.
pub fn reveal_sent_all(&mut self) -> Result<&mut Self, ProveConfigBuilderError> {
let len = self.transcript.len_of_direction(Direction::Sent);
let (sent, _) = self.reveal.get_or_insert_default();
sent.union_mut(&(0..len));
Ok(self)
}
/// Reveals the given ranges of the received data transcript.
pub fn reveal_recv(
&mut self,
@@ -130,6 +138,14 @@ impl<'a> ProveConfigBuilder<'a> {
self.reveal(Direction::Received, ranges)
}
/// Reveals all of the received data transcript.
pub fn reveal_recv_all(&mut self) -> Result<&mut Self, ProveConfigBuilderError> {
let len = self.transcript.len_of_direction(Direction::Received);
let (_, recv) = self.reveal.get_or_insert_default();
recv.union_mut(&(0..len));
Ok(self)
}
/// Builds the configuration.
pub fn build(self) -> Result<ProveConfig, ProveConfigBuilderError> {
Ok(ProveConfig {
@@ -155,45 +171,9 @@ enum ProveConfigBuilderErrorRepr {
},
}
/// Configuration to verify information from the prover.
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
pub struct VerifyConfig {}
impl VerifyConfig {
/// Creates a new builder.
pub fn builder() -> VerifyConfigBuilder {
VerifyConfigBuilder::new()
}
}
/// Builder for [`VerifyConfig`].
#[derive(Debug, Default)]
pub struct VerifyConfigBuilder {}
impl VerifyConfigBuilder {
/// Creates a new builder.
pub fn new() -> Self {
Self {}
}
/// Builds the configuration.
pub fn build(self) -> Result<VerifyConfig, VerifyConfigBuilderError> {
Ok(VerifyConfig {})
}
}
/// Error for [`VerifyConfigBuilder`].
#[derive(Debug, thiserror::Error)]
#[error(transparent)]
pub struct VerifyConfigBuilderError(#[from] VerifyConfigBuilderErrorRepr);
#[derive(Debug, thiserror::Error)]
enum VerifyConfigBuilderErrorRepr {}
/// Payload sent to the verifier.
#[doc(hidden)]
/// Request to prove statements about the connection.
#[derive(Debug, Serialize, Deserialize)]
pub struct ProvePayload {
pub struct ProveRequest {
/// Handshake data.
pub handshake: Option<(ServerName, HandshakeData)>,
/// Transcript data.
@@ -220,6 +200,8 @@ pub struct VerifierOutput {
pub server_name: Option<ServerName>,
/// Transcript data.
pub transcript: Option<PartialTranscript>,
/// Encoding commitment secret.
pub encoder_secret: Option<EncoderSecret>,
/// Transcript commitments.
pub transcript_commitments: Vec<TranscriptCommitment>,
}

View File

@@ -2,7 +2,7 @@
use std::{collections::HashSet, fmt};
use rangeset::ToRangeSet;
use rangeset::{ToRangeSet, UnionMut};
use serde::{Deserialize, Serialize};
use crate::{
@@ -114,7 +114,19 @@ impl TranscriptCommitConfig {
/// Returns a request for the transcript commitments.
pub fn to_request(&self) -> TranscriptCommitRequest {
TranscriptCommitRequest {
encoding: self.has_encoding,
encoding: self.has_encoding.then(|| {
let mut sent = RangeSet::default();
let mut recv = RangeSet::default();
for (dir, idx) in self.iter_encoding() {
match dir {
Direction::Sent => sent.union_mut(idx),
Direction::Received => recv.union_mut(idx),
}
}
(sent, recv)
}),
hash: self
.iter_hash()
.map(|((dir, idx), alg)| (*dir, idx.clone(), *alg))
@@ -289,14 +301,14 @@ impl fmt::Display for TranscriptCommitConfigBuilderError {
/// Request to compute transcript commitments.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TranscriptCommitRequest {
encoding: bool,
encoding: Option<(RangeSet<usize>, RangeSet<usize>)>,
hash: Vec<(Direction, RangeSet<usize>, HashAlgId)>,
}
impl TranscriptCommitRequest {
/// Returns `true` if an encoding commitment is requested.
pub fn encoding(&self) -> bool {
self.encoding
pub fn has_encoding(&self) -> bool {
self.encoding.is_some()
}
/// Returns `true` if a hash commitment is requested.
@@ -308,6 +320,11 @@ impl TranscriptCommitRequest {
pub fn iter_hash(&self) -> impl Iterator<Item = &(Direction, RangeSet<usize>, HashAlgId)> {
self.hash.iter()
}
/// Returns the ranges of the encoding commitments.
pub fn encoding(&self) -> Option<&(RangeSet<usize>, RangeSet<usize>)> {
self.encoding.as_ref()
}
}
#[cfg(test)]

View File

@@ -19,6 +19,4 @@ use crate::hash::TypedHash;
pub struct EncodingCommitment {
/// Merkle root of the encoding commitments.
pub root: TypedHash,
/// Seed used to generate the encodings.
pub secret: EncoderSecret,
}

View File

@@ -8,7 +8,7 @@ use crate::{
merkle::{MerkleError, MerkleProof},
transcript::{
commit::MAX_TOTAL_COMMITTED_DATA,
encoding::{new_encoder, Encoder, EncodingCommitment},
encoding::{new_encoder, Encoder, EncoderSecret, EncodingCommitment},
Direction,
},
};
@@ -48,13 +48,14 @@ impl EncodingProof {
pub fn verify_with_provider(
&self,
provider: &HashProvider,
secret: &EncoderSecret,
commitment: &EncodingCommitment,
sent: &[u8],
recv: &[u8],
) -> Result<(RangeSet<usize>, RangeSet<usize>), EncodingProofError> {
let hasher = provider.get(&commitment.root.alg)?;
let encoder = new_encoder(&commitment.secret);
let encoder = new_encoder(secret);
let Self {
inclusion_proof,
openings,
@@ -232,10 +233,7 @@ mod test {
use crate::{
fixtures::{encoder_secret, encoder_secret_tampered_seed, encoding_provider},
hash::Blake3,
transcript::{
encoding::{EncoderSecret, EncodingTree},
Transcript,
},
transcript::{encoding::EncodingTree, Transcript},
};
use super::*;
@@ -246,7 +244,7 @@ mod test {
commitment: EncodingCommitment,
}
fn new_encoding_fixture(secret: EncoderSecret) -> EncodingFixture {
fn new_encoding_fixture() -> EncodingFixture {
let transcript = Transcript::new(POST_JSON, OK_JSON);
let idx_0 = (Direction::Sent, RangeSet::from(0..POST_JSON.len()));
@@ -257,10 +255,7 @@ mod test {
let proof = tree.proof([&idx_0, &idx_1].into_iter()).unwrap();
let commitment = EncodingCommitment {
root: tree.root(),
secret,
};
let commitment = EncodingCommitment { root: tree.root() };
EncodingFixture {
transcript,
@@ -275,11 +270,12 @@ mod test {
transcript,
proof,
commitment,
} = new_encoding_fixture(encoder_secret_tampered_seed());
} = new_encoding_fixture();
let err = proof
.verify_with_provider(
&HashProvider::default(),
&encoder_secret_tampered_seed(),
&commitment,
transcript.sent(),
transcript.received(),
@@ -295,13 +291,19 @@ mod test {
transcript,
proof,
commitment,
} = new_encoding_fixture(encoder_secret());
} = new_encoding_fixture();
let sent = &transcript.sent()[transcript.sent().len() - 1..];
let recv = &transcript.received()[transcript.received().len() - 2..];
let err = proof
.verify_with_provider(&HashProvider::default(), &commitment, sent, recv)
.verify_with_provider(
&HashProvider::default(),
&encoder_secret(),
&commitment,
sent,
recv,
)
.unwrap_err();
assert!(matches!(err.kind, ErrorKind::Proof));
@@ -313,7 +315,7 @@ mod test {
transcript,
mut proof,
commitment,
} = new_encoding_fixture(encoder_secret());
} = new_encoding_fixture();
let Opening { idx, .. } = proof.openings.values_mut().next().unwrap();
@@ -322,6 +324,7 @@ mod test {
let err = proof
.verify_with_provider(
&HashProvider::default(),
&encoder_secret(),
&commitment,
transcript.sent(),
transcript.received(),
@@ -337,7 +340,7 @@ mod test {
transcript,
mut proof,
commitment,
} = new_encoding_fixture(encoder_secret());
} = new_encoding_fixture();
let Opening { blinder, .. } = proof.openings.values_mut().next().unwrap();
@@ -346,6 +349,7 @@ mod test {
let err = proof
.verify_with_provider(
&HashProvider::default(),
&encoder_secret(),
&commitment,
transcript.sent(),
transcript.received(),

View File

@@ -222,14 +222,12 @@ mod tests {
let proof = tree.proof([&idx_0, &idx_1].into_iter()).unwrap();
let commitment = EncodingCommitment {
root: tree.root(),
secret: encoder_secret(),
};
let commitment = EncodingCommitment { root: tree.root() };
let (auth_sent, auth_recv) = proof
.verify_with_provider(
&HashProvider::default(),
&encoder_secret(),
&commitment,
transcript.sent(),
transcript.received(),
@@ -260,14 +258,12 @@ mod tests {
.proof([&idx_0, &idx_1, &idx_2, &idx_3].into_iter())
.unwrap();
let commitment = EncodingCommitment {
root: tree.root(),
secret: encoder_secret(),
};
let commitment = EncodingCommitment { root: tree.root() };
let (auth_sent, auth_recv) = proof
.verify_with_provider(
&HashProvider::default(),
&encoder_secret(),
&commitment,
transcript.sent(),
transcript.received(),

View File

@@ -10,7 +10,7 @@ use crate::{
hash::{HashAlgId, HashProvider},
transcript::{
commit::{TranscriptCommitment, TranscriptCommitmentKind},
encoding::{EncodingProof, EncodingProofError, EncodingTree},
encoding::{EncoderSecret, EncodingProof, EncodingProofError, EncodingTree},
hash::{hash_plaintext, PlaintextHash, PlaintextHashSecret},
Direction, PartialTranscript, RangeSet, Transcript, TranscriptSecret,
},
@@ -22,6 +22,9 @@ const DEFAULT_COMMITMENT_KINDS: &[TranscriptCommitmentKind] = &[
TranscriptCommitmentKind::Hash {
alg: HashAlgId::SHA256,
},
TranscriptCommitmentKind::Hash {
alg: HashAlgId::BLAKE3,
},
TranscriptCommitmentKind::Encoding,
];
@@ -48,6 +51,7 @@ impl TranscriptProof {
self,
provider: &HashProvider,
length: &TranscriptLength,
encoder_secret: Option<&EncoderSecret>,
commitments: impl IntoIterator<Item = &'a TranscriptCommitment>,
) -> Result<PartialTranscript, TranscriptProofError> {
let mut encoding_commitment = None;
@@ -83,6 +87,13 @@ impl TranscriptProof {
// Verify encoding proof.
if let Some(proof) = self.encoding_proof {
let secret = encoder_secret.ok_or_else(|| {
TranscriptProofError::new(
ErrorKind::Encoding,
"contains an encoding proof but missing encoder secret",
)
})?;
let commitment = encoding_commitment.ok_or_else(|| {
TranscriptProofError::new(
ErrorKind::Encoding,
@@ -92,6 +103,7 @@ impl TranscriptProof {
let (auth_sent, auth_recv) = proof.verify_with_provider(
provider,
secret,
commitment,
self.transcript.sent_unsafe(),
self.transcript.received_unsafe(),
@@ -572,7 +584,7 @@ mod tests {
use tlsn_data_fixtures::http::{request::GET_WITH_HEADER, response::OK_JSON};
use crate::{
fixtures::encoding_provider,
fixtures::{encoder_secret, encoding_provider},
hash::{Blake3, Blinder, HashAlgId},
transcript::TranscriptCommitConfigBuilder,
};
@@ -599,7 +611,12 @@ mod tests {
let provider = HashProvider::default();
let err = transcript_proof
.verify_with_provider(&provider, &transcript.length(), &[])
.verify_with_provider(
&provider,
&transcript.length(),
Some(&encoder_secret()),
&[],
)
.err()
.unwrap();
@@ -637,7 +654,9 @@ mod tests {
}
#[rstest]
fn test_reveal_with_hash_commitment() {
#[case::sha256(HashAlgId::SHA256)]
#[case::blake3(HashAlgId::BLAKE3)]
fn test_reveal_with_hash_commitment(#[case] alg: HashAlgId) {
let mut rng = rand::rngs::StdRng::seed_from_u64(0);
let provider = HashProvider::default();
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
@@ -645,7 +664,6 @@ mod tests {
let direction = Direction::Sent;
let idx = RangeSet::from(0..10);
let blinder: Blinder = rng.random();
let alg = HashAlgId::SHA256;
let hasher = provider.get(&alg).unwrap();
let commitment = PlaintextHash {
@@ -672,6 +690,7 @@ mod tests {
.verify_with_provider(
&provider,
&transcript.length(),
None,
&[TranscriptCommitment::Hash(commitment)],
)
.unwrap();
@@ -683,7 +702,9 @@ mod tests {
}
#[rstest]
fn test_reveal_with_inconsistent_hash_commitment() {
#[case::sha256(HashAlgId::SHA256)]
#[case::blake3(HashAlgId::BLAKE3)]
fn test_reveal_with_inconsistent_hash_commitment(#[case] alg: HashAlgId) {
let mut rng = rand::rngs::StdRng::seed_from_u64(0);
let provider = HashProvider::default();
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
@@ -691,7 +712,6 @@ mod tests {
let direction = Direction::Sent;
let idx = RangeSet::from(0..10);
let blinder: Blinder = rng.random();
let alg = HashAlgId::SHA256;
let hasher = provider.get(&alg).unwrap();
let commitment = PlaintextHash {
@@ -719,6 +739,7 @@ mod tests {
.verify_with_provider(
&provider,
&transcript.length(),
None,
&[TranscriptCommitment::Hash(commitment)],
)
.unwrap_err();

View File

@@ -7,6 +7,7 @@ use crate::{
transcript::{Direction, Transcript},
webpki::CertificateDer,
};
use serde::{Deserialize, Serialize};
use tls_core::msgs::{
alert::AlertMessagePayload,
codec::{Codec, Reader},
@@ -15,7 +16,7 @@ use tls_core::msgs::{
};
/// A transcript of TLS records sent and received by the prover.
#[derive(Debug, Clone)]
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TlsTranscript {
time: u64,
version: TlsVersion,
@@ -291,7 +292,7 @@ impl TlsTranscript {
}
/// A TLS record.
#[derive(Clone)]
#[derive(Clone, Serialize, Deserialize)]
pub struct Record {
/// Sequence number.
pub seq: u64,

View File

@@ -3,4 +3,4 @@ Cookie: very-secret-cookie
Content-Length: 44
Content-Type: application/json
{"foo": "bar", "bazz": 123, "buzz": [1,"5"]}
{"foo": "bar", "bazz": 123, "buzz": [1,"5"]}

View File

@@ -15,6 +15,7 @@ tlsn-server-fixture = { workspace = true }
tlsn-server-fixture-certs = { workspace = true }
spansy = { workspace = true }
anyhow = { workspace = true }
bincode = { workspace = true }
chrono = { workspace = true }
clap = { version = "4.5", features = ["derive"] }
@@ -24,6 +25,7 @@ hex = { workspace = true }
hyper = { workspace = true, features = ["client", "http1"] }
hyper-util = { workspace = true, features = ["full"] }
k256 = { workspace = true, features = ["ecdsa"] }
serde = { workspace = true, features = ["derive"] }
serde_json = { workspace = true }
tokio = { workspace = true, features = [
"rt",
@@ -36,11 +38,18 @@ tokio = { workspace = true, features = [
tokio-util = { workspace = true }
tracing = { workspace = true }
tracing-subscriber = { workspace = true }
noir = { git = "https://github.com/zkmopro/noir-rs", tag = "v1.0.0-beta.8", features = [
"barretenberg",
] }
[[example]]
name = "interactive"
path = "interactive/interactive.rs"
[[example]]
name = "interactive_zk"
path = "interactive_zk/interactive_zk.rs"
[[example]]
name = "attestation_prove"
path = "attestation/prove.rs"

View File

@@ -22,13 +22,11 @@ use tlsn::{
signing::Secp256k1Signer,
Attestation, AttestationConfig, CryptoProvider, Secrets,
},
config::{
CertificateDer, PrivateKeyDer, ProtocolConfig, ProtocolConfigValidator, RootCertStore,
},
config::{CertificateDer, PrivateKeyDer, ProtocolConfig, RootCertStore},
connection::{ConnectionInfo, HandshakeData, ServerName, TranscriptLength},
prover::{state::Committed, ProveConfig, Prover, ProverConfig, ProverOutput, TlsConfig},
transcript::{ContentType, TranscriptCommitConfig},
verifier::{Verifier, VerifierConfig, VerifierOutput, VerifyConfig},
verifier::{Verifier, VerifierConfig, VerifierOutput},
};
use tlsn_examples::ExampleType;
use tlsn_formats::http::{DefaultHttpCommitter, HttpCommit, HttpTranscript};
@@ -175,7 +173,7 @@ async fn prover<S: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>(
assert!(response.status() == StatusCode::OK);
// The prover task should be done now, so we can await it.
let mut prover = prover_task.await??;
let prover = prover_task.await??;
// Parse the HTTP transcript.
let transcript = HttpTranscript::parse(prover.transcript())?;
@@ -217,7 +215,7 @@ async fn prover<S: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>(
let request_config = builder.build()?;
let (attestation, secrets) = notarize(&mut prover, &request_config, req_tx, resp_rx).await?;
let (attestation, secrets) = notarize(prover, &request_config, req_tx, resp_rx).await?;
// Write the attestation to disk.
let attestation_path = tlsn_examples::get_file_path(example_type, "attestation");
@@ -238,7 +236,7 @@ async fn prover<S: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>(
}
async fn notarize(
prover: &mut Prover<Committed>,
mut prover: Prover<Committed>,
config: &RequestConfig,
request_tx: Sender<AttestationRequest>,
attestation_rx: Receiver<Attestation>,
@@ -257,25 +255,27 @@ async fn notarize(
..
} = prover.prove(&disclosure_config).await?;
let transcript = prover.transcript().clone();
let tls_transcript = prover.tls_transcript().clone();
prover.close().await?;
// Build an attestation request.
let mut builder = AttestationRequest::builder(config);
builder
.server_name(ServerName::Dns(SERVER_DOMAIN.try_into().unwrap()))
.handshake_data(HandshakeData {
certs: prover
.tls_transcript()
certs: tls_transcript
.server_cert_chain()
.expect("server cert chain is present")
.to_vec(),
sig: prover
.tls_transcript()
sig: tls_transcript
.server_signature()
.expect("server signature is present")
.clone(),
binding: prover.tls_transcript().certificate_binding().clone(),
binding: tls_transcript.certificate_binding().clone(),
})
.transcript(prover.transcript().clone())
.transcript(transcript)
.transcript_commitments(transcript_secrets, transcript_commitments);
let (request, secrets) = builder.build(&CryptoProvider::default())?;
@@ -301,13 +301,6 @@ async fn notary<S: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>(
request_rx: Receiver<AttestationRequest>,
attestation_tx: Sender<Attestation>,
) -> Result<(), Box<dyn std::error::Error>> {
// Set up Verifier.
let config_validator = ProtocolConfigValidator::builder()
.max_sent_data(tlsn_examples::MAX_SENT_DATA)
.max_recv_data(tlsn_examples::MAX_RECV_DATA)
.build()
.unwrap();
// Create a root certificate store with the server-fixture's self-signed
// certificate. This is only required for offline testing with the
// server-fixture.
@@ -315,20 +308,24 @@ async fn notary<S: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>(
.root_store(RootCertStore {
roots: vec![CertificateDer(CA_CERT_DER.to_vec())],
})
.protocol_config_validator(config_validator)
.build()
.unwrap();
let mut verifier = Verifier::new(verifier_config)
let verifier = Verifier::new(verifier_config)
.setup(socket.compat())
.await?
.accept()
.await?
.run()
.await?;
let VerifierOutput {
transcript_commitments,
..
} = verifier.verify(&VerifyConfig::default()).await?;
let (
VerifierOutput {
transcript_commitments,
..
},
verifier,
) = verifier.verify().await?.accept().await?;
let tls_transcript = verifier.tls_transcript().clone();

View File

@@ -3,6 +3,7 @@ use std::{
net::{IpAddr, SocketAddr},
};
use anyhow::Result;
use http_body_util::Empty;
use hyper::{body::Bytes, Request, StatusCode, Uri};
use hyper_util::rt::TokioIo;
@@ -11,11 +12,11 @@ use tokio_util::compat::{FuturesAsyncReadCompatExt, TokioAsyncReadCompatExt};
use tracing::instrument;
use tlsn::{
config::{CertificateDer, ProtocolConfig, ProtocolConfigValidator, RootCertStore},
config::{CertificateDer, ProtocolConfig, RootCertStore},
connection::ServerName,
prover::{ProveConfig, Prover, ProverConfig, TlsConfig},
transcript::PartialTranscript,
verifier::{Verifier, VerifierConfig, VerifierOutput, VerifyConfig},
verifier::{Verifier, VerifierConfig, VerifierOutput},
};
use tlsn_server_fixture::DEFAULT_FIXTURE_PORT;
use tlsn_server_fixture_certs::{CA_CERT_DER, SERVER_DOMAIN};
@@ -46,7 +47,7 @@ async fn main() {
let (prover_socket, verifier_socket) = tokio::io::duplex(1 << 23);
let prover = prover(prover_socket, &server_addr, &uri);
let verifier = verifier(verifier_socket);
let (_, transcript) = tokio::join!(prover, verifier);
let (_, transcript) = tokio::try_join!(prover, verifier).unwrap();
println!("Successfully verified {}", &uri);
println!(
@@ -64,7 +65,7 @@ async fn prover<T: AsyncWrite + AsyncRead + Send + Unpin + 'static>(
verifier_socket: T,
server_addr: &SocketAddr,
uri: &str,
) {
) -> Result<()> {
let uri = uri.parse::<Uri>().unwrap();
assert_eq!(uri.scheme().unwrap().as_str(), "https");
let server_domain = uri.authority().unwrap().host();
@@ -98,15 +99,13 @@ async fn prover<T: AsyncWrite + AsyncRead + Send + Unpin + 'static>(
// Perform the setup phase with the verifier.
let prover = Prover::new(prover_config)
.setup(verifier_socket.compat())
.await
.unwrap();
.await?;
// Connect to TLS Server.
let tls_client_socket = tokio::net::TcpStream::connect(server_addr).await.unwrap();
let tls_client_socket = tokio::net::TcpStream::connect(server_addr).await?;
// Pass server connection into the prover.
let (mpc_tls_connection, prover_fut) =
prover.connect(tls_client_socket.compat()).await.unwrap();
let (mpc_tls_connection, prover_fut) = prover.connect(tls_client_socket.compat()).await?;
// Wrap the connection in a TokioIo compatibility layer to use it with hyper.
let mpc_tls_connection = TokioIo::new(mpc_tls_connection.compat());
@@ -116,9 +115,7 @@ async fn prover<T: AsyncWrite + AsyncRead + Send + Unpin + 'static>(
// MPC-TLS Handshake.
let (mut request_sender, connection) =
hyper::client::conn::http1::handshake(mpc_tls_connection)
.await
.unwrap();
hyper::client::conn::http1::handshake(mpc_tls_connection).await?;
// Spawn the connection to run in the background.
tokio::spawn(connection);
@@ -130,14 +127,13 @@ async fn prover<T: AsyncWrite + AsyncRead + Send + Unpin + 'static>(
.header("Connection", "close")
.header("Secret", SECRET)
.method("GET")
.body(Empty::<Bytes>::new())
.unwrap();
let response = request_sender.send_request(request).await.unwrap();
.body(Empty::<Bytes>::new())?;
let response = request_sender.send_request(request).await?;
assert!(response.status() == StatusCode::OK);
// Create proof for the Verifier.
let mut prover = prover_task.await.unwrap().unwrap();
let mut prover = prover_task.await??;
let mut builder = ProveConfig::builder(prover.transcript());
@@ -153,10 +149,8 @@ async fn prover<T: AsyncWrite + AsyncRead + Send + Unpin + 'static>(
.expect("the secret should be in the sent data");
// Reveal everything except for the secret.
builder.reveal_sent(&(0..pos)).unwrap();
builder
.reveal_sent(&(pos + SECRET.len()..prover.transcript().sent().len()))
.unwrap();
builder.reveal_sent(&(0..pos))?;
builder.reveal_sent(&(pos + SECRET.len()..prover.transcript().sent().len()))?;
// Find the substring "Dick".
let pos = prover
@@ -167,28 +161,21 @@ async fn prover<T: AsyncWrite + AsyncRead + Send + Unpin + 'static>(
.expect("the substring 'Dick' should be in the received data");
// Reveal everything except for the substring.
builder.reveal_recv(&(0..pos)).unwrap();
builder
.reveal_recv(&(pos + 4..prover.transcript().received().len()))
.unwrap();
builder.reveal_recv(&(0..pos))?;
builder.reveal_recv(&(pos + 4..prover.transcript().received().len()))?;
let config = builder.build().unwrap();
let config = builder.build()?;
prover.prove(&config).await.unwrap();
prover.close().await.unwrap();
prover.prove(&config).await?;
prover.close().await?;
Ok(())
}
#[instrument(skip(socket))]
async fn verifier<T: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>(
socket: T,
) -> PartialTranscript {
// Set up Verifier.
let config_validator = ProtocolConfigValidator::builder()
.max_sent_data(MAX_SENT_DATA)
.max_recv_data(MAX_RECV_DATA)
.build()
.unwrap();
) -> Result<PartialTranscript> {
// Create a root certificate store with the server-fixture's self-signed
// certificate. This is only required for offline testing with the
// server-fixture.
@@ -196,20 +183,51 @@ async fn verifier<T: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>(
.root_store(RootCertStore {
roots: vec![CertificateDer(CA_CERT_DER.to_vec())],
})
.protocol_config_validator(config_validator)
.build()
.unwrap();
.build()?;
let verifier = Verifier::new(verifier_config);
// Receive authenticated data.
let VerifierOutput {
server_name,
transcript,
..
} = verifier
.verify(socket.compat(), &VerifyConfig::default())
.await
.unwrap();
// Validate the proposed configuration and then run the TLS commitment protocol.
let verifier = verifier.setup(socket.compat()).await?;
// This is the opportunity to ensure the prover does not attempt to overload the
// verifier.
let reject = if verifier.config().max_sent_data() > MAX_SENT_DATA {
Some("max_sent_data is too large")
} else if verifier.config().max_recv_data() > MAX_RECV_DATA {
Some("max_recv_data is too large")
} else {
None
};
if reject.is_some() {
verifier.reject(reject).await?;
return Err(anyhow::anyhow!("protocol configuration rejected"));
}
// Runs the TLS commitment protocol to completion.
let verifier = verifier.accept().await?.run().await?;
// Validate the proving request and then verify.
let verifier = verifier.verify().await?;
if verifier.request().handshake.is_none() {
let verifier = verifier
.reject(Some("expecting to verify the server name"))
.await?;
verifier.close().await?;
return Err(anyhow::anyhow!("prover did not reveal the server name"));
}
let (
VerifierOutput {
server_name,
transcript,
..
},
verifier,
) = verifier.accept().await?;
verifier.close().await?;
let server_name = server_name.expect("prover should have revealed server name");
let transcript = transcript.expect("prover should have revealed transcript data");
@@ -232,7 +250,7 @@ async fn verifier<T: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>(
let ServerName::Dns(server_name) = server_name;
assert_eq!(server_name.as_str(), SERVER_DOMAIN);
transcript
Ok(transcript)
}
/// Render redacted bytes as `🙈`.

View File

@@ -0,0 +1,5 @@
!noir/target/
# Ignore everything inside noir/target
noir/target/*
# Except noir.json
!noir/target/noir.json

View File

@@ -0,0 +1,167 @@
# Interactive Zero-Knowledge Age Verification with TLSNotary
This example demonstrates **privacy-preserving age verification** using TLSNotary and zero-knowledge proofs. It allows a prover to demonstrate they are 18+ years old without revealing their actual birth date or any other personal information.
## 🔍 How It Works (simplified overview)
```mermaid
sequenceDiagram
participant S as Tax Server<br/>(fixture)
participant P as Prover
participant V as Verifier
P->>S: Request tax data (with auth token) (MPC-TLS)
S->>P: Tax data including `date_of_birth` (MPC-TLS)
P->>V: Share transcript with redactions
P->>V: Commit to blinded hash of birth date
P->>P: Generate ZK proof of age ≥ 18
P->>V: Send ZK proof
V->>V: Verify transcript & ZK proof
V->>V: ✅ Confirm: Prover is 18+ (no birth date revealed)
```
### The Process
1. **MPC-TLS Session**: The Prover fetches tax information containing their birth date, while the Verifier jointly verifies the TLS session to ensure the data comes from the authentic server.
2. **Selective Disclosure**:
* The authorization token is **redacted**: the Verifier sees the plaintext request but not the token.
* The birth date is **committed** as a blinded hash: the Verifier cannot see the date, but the Prover is cryptographically bound to it.
(Depending on the use case more data can be redacted or revealed)
3. **Zero-Knowledge Proof**: The Prover generates a ZK proof that the committed birth date corresponds to an age ≥ 18.
4. **Verification**: The Verifier checks both the TLS transcript and the ZK proof, confirming age ≥ 18 without learning the actual date of birth.
### Example Data
The tax server returns data like this:
```json
{
"tax_year": 2024,
"taxpayer": {
"idnr": "12345678901",
"first_name": "Max",
"last_name": "Mustermann",
"date_of_birth": "1985-03-12",
// ...
}
}
```
## 🔐 Zero-Knowledge Proof Details
The ZK circuit proves: **"I know a birth date that hashes to the committed value AND indicates I am 18+ years old"**
**Public Inputs:**
- ✅ Verification date
- ✅ Committed blinded hash of birth date
**Private Inputs (Hidden):**
- 🔒 Actual birth date plaintext
- 🔒 Random blinder used in hash commitment
**What the Verifier Learns:**
- ✅ The prover is 18+ years old
- ✅ The birth date is authentic (from the MPC-TLS session)
Everything else remains private.
## 🏃 Run the Example
1. **Start the test server** (from repository root):
```bash
RUST_LOG=info PORT=4000 cargo run --bin tlsn-server-fixture
```
2. **Run the age verification** (in a new terminal):
```bash
SERVER_PORT=4000 cargo run --release --example interactive_zk
```
3. **For detailed logs**:
```bash
RUST_LOG=debug,yamux=info,uid_mux=info SERVER_PORT=4000 cargo run --release --example interactive_zk
```
### Expected Output
```
Successfully verified https://test-server.io:4000/elster
Age verified in ZK: 18+ ✅
Verified sent data:
GET https://test-server.io:4000/elster HTTP/1.1
host: test-server.io
connection: close
authorization: 🙈🙈🙈🙈🙈🙈🙈🙈🙈🙈🙈🙈🙈🙈🙈🙈🙈🙈🙈🙈🙈🙈🙈🙈🙈🙈
Verified received data:
🙈🙈🙈🙈🙈🙈🙈🙈[truncated for brevity]...🙈🙈🙈🙈🙈"tax_year":2024🙈🙈🙈🙈🙈...
```
> 💡 **Note**: In this demo, both Prover and Verifier run on the same machine. In production, they would operate on separate systems.
> 💡 **Note**: This demo assumes that the tax server serves correct data, and that only the submitter of the tax data has access to the specified page.
## 🛠 Development
### Project Structure
```
interactive_zk/
├── prover.rs # Prover implementation
├── verifier.rs # Verifier implementation
├── types.rs # Shared types
└── interactive_zk.rs # Main example runner
├── noir/ # Zero-knowledge circuit
│ ├── src/main.n # Noir circuit code
│ ├── target/ # Compiled circuit artifacts
│ └── Nargo.toml # Noir project config
│ └── Prover.toml # Example input for `nargo execute`
│ └── generate_test_data.rs # Rust script to generate Noir test data
└── README.md
```
### Noir Circuit Commands
We use [Mopro's `noir_rs`](https://zkmopro.org/docs/crates/noir-rs/) for ZK proof generation. The **circuit is pre-compiled and ready to use**. You don't need to install Noir tools to run the example. But if you want to change or test the circuit in isolation, you can use the following instructions.
Before you proceed, we recommend to double check that your Noir tooling matches the versions used in Mopro's `noir_rs`:
```sh
# Install correct Noir and BB versions (important for compatibility!)
noirup --version 1.0.0-beta.8
bbup -v 1.0.0-nightly.20250723
```
If you don't have `noirup` and `bbup` installed yet, check [Noir's Quick Start](https://noir-lang.org/docs/getting_started/quick_start).
To compile the circuit, go to the `noir` folder and run `nargo compile`.
To check and experiment with the Noir circuit, you can use these commands:
* Execute Circuit: Compile the circuit and run it with sample data from `Prover.toml`:
```sh
nargo execute
```
* Generate Verification Key: Create the verification key needed to verify proofs
```sh
bb write_vk -b ./target/noir.json -o ./target
```
* Generate Proof: Create a zero-knowledge proof using the circuit and witness data.
```sh
bb prove --bytecode_path ./target/noir.json --witness_path ./target/noir.gz -o ./target
```
* Verify Proof: Verify that a proof is valid using the verification key.
```sh
bb verify -k ./target/vk -p ./target/proof
```
* Run the Noir tests:
```sh
nargo test --show-output
```
To create extra tests, you can use `./generate_test_data.rs` to help with generating correct blinders and hashes.
## 📚 Learn More
- [TLSNotary Documentation](https://docs.tlsnotary.org/)
- [Noir Language Guide](https://noir-lang.org/)
- [Zero-Knowledge Proofs Explained](https://ethereum.org/en/zero-knowledge-proofs/)
- [Mopro ZK Toolkit](https://zkmopro.org/)

View File

@@ -0,0 +1,60 @@
mod prover;
mod types;
mod verifier;
use anyhow::Result;
use prover::prover;
use std::{
env,
net::{IpAddr, SocketAddr},
};
use tlsn_server_fixture::DEFAULT_FIXTURE_PORT;
use tlsn_server_fixture_certs::SERVER_DOMAIN;
use verifier::verifier;
#[tokio::main]
async fn main() -> Result<()> {
tracing_subscriber::fmt::init();
let server_host: String = env::var("SERVER_HOST").unwrap_or("127.0.0.1".into());
let server_port: u16 = env::var("SERVER_PORT")
.map(|port| port.parse().expect("port should be valid integer"))
.unwrap_or(DEFAULT_FIXTURE_PORT);
// We use SERVER_DOMAIN here to make sure it matches the domain in the test
// server's certificate.
let uri = format!("https://{SERVER_DOMAIN}:{server_port}/elster");
let server_ip: IpAddr = server_host
.parse()
.map_err(|e| anyhow::anyhow!("Invalid IP address '{server_host}': {e}"))?;
let server_addr = SocketAddr::from((server_ip, server_port));
// Connect prover and verifier.
let (prover_socket, verifier_socket) = tokio::io::duplex(1 << 23);
let (prover_extra_socket, verifier_extra_socket) = tokio::io::duplex(1 << 23);
let (_, transcript) = tokio::try_join!(
prover(prover_socket, prover_extra_socket, &server_addr, &uri),
verifier(verifier_socket, verifier_extra_socket)
)?;
println!("---");
println!("Successfully verified {}", &uri);
println!("Age verified in ZK: 18+ ✅\n");
println!(
"Verified sent data:\n{}",
bytes_to_redacted_string(transcript.sent_unsafe())
);
println!(
"Verified received data:\n{}",
bytes_to_redacted_string(transcript.received_unsafe())
);
Ok(())
}
/// Render redacted bytes as `🙈`.
pub fn bytes_to_redacted_string(bytes: &[u8]) -> String {
String::from_utf8_lossy(bytes).replace('\0', "🙈")
}

View File

@@ -0,0 +1,8 @@
[package]
name = "noir"
type = "bin"
authors = [""]
[dependencies]
sha256 = { tag = "v0.1.5", git = "https://github.com/noir-lang/sha256" }
date = { tag = "v0.5.4", git = "https://github.com/madztheo/noir-date.git" }

View File

@@ -0,0 +1,8 @@
blinder = [108, 93, 120, 205, 15, 35, 159, 124, 243, 96, 22, 128, 16, 149, 219, 216]
committed_hash = [186, 158, 101, 39, 49, 48, 26, 83, 242, 96, 10, 221, 121, 174, 62, 50, 136, 132, 232, 58, 25, 32, 66, 196, 99, 85, 66, 85, 255, 1, 202, 254]
date_of_birth = "1985-03-12"
[proof_date]
day = "29"
month = "08"
year = "2025"

View File

@@ -0,0 +1,64 @@
#!/usr/bin/env -S cargo +nightly -Zscript
---
[package]
name = "generate_test_data"
version = "0.0.0"
edition = "2021"
publish = false
[dependencies]
sha2 = "0.10"
rand = "0.8"
chrono = "0.4"
---
use chrono::Datelike;
use chrono::Local;
use rand::RngCore;
use sha2::{Digest, Sha256};
fn main() {
// 1. Birthdate string (fixed)
let dob_str = "1985-03-12"; // 10 bytes long
let proof_date = Local::now().date_naive();
let proof_year = proof_date.year();
let proof_month = proof_date.month();
let proof_day = proof_date.day();
// 2. Generate random 16-byte blinder
let mut blinder = [0u8; 16];
rand::thread_rng().fill_bytes(&mut blinder);
// 3. Concatenate blinder + dob string bytes
let mut preimage = Vec::with_capacity(26);
preimage.extend_from_slice(dob_str.as_bytes());
preimage.extend_from_slice(&blinder);
// 4. Hash it
let hash = Sha256::digest(&preimage);
let blinder = blinder
.iter()
.map(|b| b.to_string())
.collect::<Vec<_>>()
.join(", ");
let committed_hash = hash
.iter()
.map(|b| b.to_string())
.collect::<Vec<_>>()
.join(", ");
println!(
"
// Private input
let date_of_birth = \"{dob_str}\";
let blinder = [{blinder}];
// Public input
let proof_date = date::Date {{ year: {proof_year}, month: {proof_month}, day: {proof_day} }};
let committed_hash = [{committed_hash}];
main(proof_date, committed_hash, date_of_birth, blinder);
"
);
}

View File

@@ -0,0 +1,82 @@
use dep::date::Date;
fn main(
// Public inputs
proof_date: pub date::Date, // "2025-08-29"
committed_hash: pub [u8; 32], // Hash of (blinder || dob string)
// Private inputs
date_of_birth: str<10>, // "1985-03-12"
blinder: [u8; 16], // Random 16-byte blinder
) {
let is_18 = check_18(date_of_birth, proof_date);
let correct_hash = check_hash(date_of_birth, blinder, committed_hash);
assert(correct_hash);
assert(is_18);
}
fn check_18(date_of_birth: str<10>, proof_date: date::Date) -> bool {
let dob = parse_birth_date(date_of_birth);
let is_18 = dob.add_years(18).lt(proof_date);
println(f"Is 18? {is_18}");
is_18
}
fn check_hash(date_of_birth: str<10>, blinder: [u8; 16], committed_hash: [u8; 32]) -> bool {
let hash_input: [u8; 26] = make_hash_input(date_of_birth, blinder);
let computed_hash = sha256::sha256_var(hash_input, 26);
let correct_hash = computed_hash == committed_hash;
println(f"Correct hash? {correct_hash}");
correct_hash
}
fn make_hash_input(dob: str<10>, blinder: [u8; 16]) -> [u8; 26] {
let mut input: [u8; 26] = [0; 26];
for i in 0..10 {
input[i] = dob.as_bytes()[i];
}
for i in 0..16 {
input[10 + i] = blinder[i];
}
input
}
pub fn parse_birth_date(birth_date: str<10>) -> date::Date {
let date: [u8; 10] = birth_date.as_bytes();
let date_str: str<8> =
[date[0], date[1], date[2], date[3], date[5], date[6], date[8], date[9]].as_str_unchecked();
Date::from_str_long_year(date_str)
}
#[test]
fn test_max_is_over_18() {
// Private input
let date_of_birth = "1985-03-12";
let blinder = [120, 80, 62, 10, 76, 60, 130, 98, 147, 161, 139, 126, 27, 236, 36, 56];
// Public input
let proof_date = date::Date { year: 2025, month: 9, day: 2 };
let committed_hash = [
229, 118, 202, 216, 213, 230, 125, 163, 48, 178, 118, 225, 84, 7, 140, 63, 173, 255, 163,
208, 163, 3, 63, 204, 37, 120, 254, 246, 202, 116, 122, 145,
];
main(proof_date, committed_hash, date_of_birth, blinder);
}
#[test(should_fail)]
fn test_under_18() {
// Private input
let date_of_birth = "2010-08-01";
let blinder = [160, 23, 57, 158, 141, 195, 155, 132, 109, 242, 48, 220, 70, 217, 229, 189];
// Public input
let proof_date = date::Date { year: 2025, month: 8, day: 29 };
let committed_hash = [
16, 132, 194, 62, 232, 90, 157, 153, 4, 231, 1, 54, 226, 3, 87, 174, 129, 177, 80, 69, 37,
222, 209, 91, 168, 156, 9, 109, 108, 144, 168, 109,
];
main(proof_date, committed_hash, date_of_birth, blinder);
}

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,386 @@
use std::net::SocketAddr;
use crate::types::received_commitments;
use super::types::ZKProofBundle;
use anyhow::Result;
use chrono::{Datelike, Local, NaiveDate};
use http_body_util::Empty;
use hyper::{body::Bytes, header, Request, StatusCode, Uri};
use hyper_util::rt::TokioIo;
use k256::sha2::{Digest, Sha256};
use noir::{
barretenberg::{
prove::prove_ultra_honk, srs::setup_srs_from_bytecode,
verify::get_ultra_honk_verification_key,
},
witness::from_vec_str_to_witness_map,
};
use serde_json::Value;
use spansy::{
http::{BodyContent, Requests, Responses},
Spanned,
};
use tls_server_fixture::CA_CERT_DER;
use tlsn::{
config::{CertificateDer, ProtocolConfig, RootCertStore},
connection::ServerName,
hash::HashAlgId,
prover::{ProveConfig, ProveConfigBuilder, Prover, ProverConfig, TlsConfig},
transcript::{
hash::{PlaintextHash, PlaintextHashSecret},
Direction, TranscriptCommitConfig, TranscriptCommitConfigBuilder, TranscriptCommitmentKind,
TranscriptSecret,
},
};
use tlsn_examples::MAX_RECV_DATA;
use tokio::io::AsyncWriteExt;
use tlsn_examples::MAX_SENT_DATA;
use tokio::io::{AsyncRead, AsyncWrite};
use tokio_util::compat::{FuturesAsyncReadCompatExt, TokioAsyncReadCompatExt};
use tracing::instrument;
#[instrument(skip(verifier_socket, verifier_extra_socket))]
pub async fn prover<T: AsyncWrite + AsyncRead + Send + Unpin + 'static>(
verifier_socket: T,
mut verifier_extra_socket: T,
server_addr: &SocketAddr,
uri: &str,
) -> Result<()> {
let uri = uri.parse::<Uri>()?;
if uri.scheme().map(|s| s.as_str()) != Some("https") {
return Err(anyhow::anyhow!("URI must use HTTPS scheme"));
}
let server_domain = uri
.authority()
.ok_or_else(|| anyhow::anyhow!("URI must have authority"))?
.host();
// Create a root certificate store with the server-fixture's self-signed
// certificate. This is only required for offline testing with the
// server-fixture.
let mut tls_config_builder = TlsConfig::builder();
tls_config_builder.root_store(RootCertStore {
roots: vec![CertificateDer(CA_CERT_DER.to_vec())],
});
let tls_config = tls_config_builder.build()?;
// Set up protocol configuration for prover.
let mut prover_config_builder = ProverConfig::builder();
prover_config_builder
.server_name(ServerName::Dns(server_domain.try_into()?))
.tls_config(tls_config)
.protocol_config(
ProtocolConfig::builder()
.max_sent_data(MAX_SENT_DATA)
.max_recv_data(MAX_RECV_DATA)
.build()?,
);
let prover_config = prover_config_builder.build()?;
// Create prover and connect to verifier.
//
// Perform the setup phase with the verifier.
let prover = Prover::new(prover_config)
.setup(verifier_socket.compat())
.await?;
// Connect to TLS Server.
let tls_client_socket = tokio::net::TcpStream::connect(server_addr).await?;
// Pass server connection into the prover.
let (mpc_tls_connection, prover_fut) = prover.connect(tls_client_socket.compat()).await?;
// Wrap the connection in a TokioIo compatibility layer to use it with hyper.
let mpc_tls_connection = TokioIo::new(mpc_tls_connection.compat());
// Spawn the Prover to run in the background.
let prover_task = tokio::spawn(prover_fut);
// MPC-TLS Handshake.
let (mut request_sender, connection) =
hyper::client::conn::http1::handshake(mpc_tls_connection).await?;
// Spawn the connection to run in the background.
tokio::spawn(connection);
// MPC-TLS: Send Request and wait for Response.
let request = Request::builder()
.uri(uri.clone())
.header("Host", server_domain)
.header("Connection", "close")
.header(header::AUTHORIZATION, "Bearer random_auth_token")
.method("GET")
.body(Empty::<Bytes>::new())?;
let response = request_sender.send_request(request).await?;
if response.status() != StatusCode::OK {
return Err(anyhow::anyhow!(
"MPC-TLS request failed with status {}",
response.status()
));
}
// Create proof for the Verifier.
let mut prover = prover_task.await??;
let transcript = prover.transcript().clone();
let mut prove_config_builder = ProveConfig::builder(&transcript);
// Reveal the DNS name.
prove_config_builder.server_identity();
let sent: &[u8] = transcript.sent();
let received: &[u8] = transcript.received();
let sent_len = sent.len();
let recv_len = received.len();
tracing::info!("Sent length: {}, Received length: {}", sent_len, recv_len);
// Reveal the entire HTTP request except for the authorization bearer token
reveal_request(sent, &mut prove_config_builder)?;
// Create hash commitment for the date of birth field from the response
let mut transcript_commitment_builder = TranscriptCommitConfig::builder(&transcript);
transcript_commitment_builder.default_kind(TranscriptCommitmentKind::Hash {
alg: HashAlgId::SHA256,
});
reveal_received(
received,
&mut prove_config_builder,
&mut transcript_commitment_builder,
)?;
let transcripts_commitment_config = transcript_commitment_builder.build()?;
prove_config_builder.transcript_commit(transcripts_commitment_config);
let prove_config = prove_config_builder.build()?;
// MPC-TLS prove
let prover_output = prover.prove(&prove_config).await?;
prover.close().await?;
// Prove birthdate is more than 18 years ago.
let received_commitments = received_commitments(&prover_output.transcript_commitments);
let received_commitment = received_commitments
.first()
.ok_or_else(|| anyhow::anyhow!("No received commitments found"))?; // committed hash (of date of birth string)
let received_secrets = received_secrets(&prover_output.transcript_secrets);
let received_secret = received_secrets
.first()
.ok_or_else(|| anyhow::anyhow!("No received secrets found"))?; // hash blinder
let proof_input = prepare_zk_proof_input(received, received_commitment, received_secret)?;
let proof_bundle = generate_zk_proof(&proof_input)?;
// Sent zk proof bundle to verifier
let serialized_proof = bincode::serialize(&proof_bundle)?;
verifier_extra_socket.write_all(&serialized_proof).await?;
verifier_extra_socket.shutdown().await?;
Ok(())
}
// Reveal everything from the request, except for the authorization token.
fn reveal_request(request: &[u8], builder: &mut ProveConfigBuilder<'_>) -> Result<()> {
let reqs = Requests::new_from_slice(request).collect::<Result<Vec<_>, _>>()?;
let req = reqs
.first()
.ok_or_else(|| anyhow::anyhow!("No requests found"))?;
if req.request.method.as_str() != "GET" {
return Err(anyhow::anyhow!(
"Expected GET method, found {}",
req.request.method.as_str()
));
}
let authorization_header = req
.headers_with_name(header::AUTHORIZATION.as_str())
.next()
.ok_or_else(|| anyhow::anyhow!("Authorization header not found"))?;
let start_pos = authorization_header
.span()
.indices()
.min()
.ok_or_else(|| anyhow::anyhow!("Could not find authorization header start position"))?
+ header::AUTHORIZATION.as_str().len()
+ 2;
let end_pos =
start_pos + authorization_header.span().len() - header::AUTHORIZATION.as_str().len() - 2;
builder.reveal_sent(&(0..start_pos))?;
builder.reveal_sent(&(end_pos..request.len()))?;
Ok(())
}
fn reveal_received(
received: &[u8],
builder: &mut ProveConfigBuilder<'_>,
transcript_commitment_builder: &mut TranscriptCommitConfigBuilder,
) -> Result<()> {
let resp = Responses::new_from_slice(received).collect::<Result<Vec<_>, _>>()?;
let response = resp
.first()
.ok_or_else(|| anyhow::anyhow!("No responses found"))?;
let body = response
.body
.as_ref()
.ok_or_else(|| anyhow::anyhow!("Response body not found"))?;
let BodyContent::Json(json) = &body.content else {
return Err(anyhow::anyhow!("Expected JSON body content"));
};
// reveal tax year
let tax_year = json
.get("tax_year")
.ok_or_else(|| anyhow::anyhow!("tax_year field not found in JSON"))?;
let start_pos = tax_year
.span()
.indices()
.min()
.ok_or_else(|| anyhow::anyhow!("Could not find tax_year start position"))?
- 11;
let end_pos = tax_year
.span()
.indices()
.max()
.ok_or_else(|| anyhow::anyhow!("Could not find tax_year end position"))?
+ 1;
builder.reveal_recv(&(start_pos..end_pos))?;
// commit to hash of date of birth
let dob = json
.get("taxpayer.date_of_birth")
.ok_or_else(|| anyhow::anyhow!("taxpayer.date_of_birth field not found in JSON"))?;
transcript_commitment_builder.commit_recv(dob.span())?;
Ok(())
}
// extract secret from prover output
fn received_secrets(transcript_secrets: &[TranscriptSecret]) -> Vec<&PlaintextHashSecret> {
transcript_secrets
.iter()
.filter_map(|secret| match secret {
TranscriptSecret::Hash(hash) if hash.direction == Direction::Received => Some(hash),
_ => None,
})
.collect()
}
#[derive(Debug)]
pub struct ZKProofInput {
dob: Vec<u8>,
proof_date: NaiveDate,
blinder: Vec<u8>,
committed_hash: Vec<u8>,
}
// Verify that the blinded, committed hash is correct
fn prepare_zk_proof_input(
received: &[u8],
received_commitment: &PlaintextHash,
received_secret: &PlaintextHashSecret,
) -> Result<ZKProofInput> {
assert_eq!(received_commitment.direction, Direction::Received);
assert_eq!(received_commitment.hash.alg, HashAlgId::SHA256);
let hash = &received_commitment.hash;
let dob_start = received_commitment
.idx
.min()
.ok_or_else(|| anyhow::anyhow!("No start index for DOB"))?;
let dob_end = received_commitment
.idx
.end()
.ok_or_else(|| anyhow::anyhow!("No end index for DOB"))?;
let dob = received[dob_start..dob_end].to_vec();
let blinder = received_secret.blinder.as_bytes().to_vec();
let committed_hash = hash.value.as_bytes().to_vec();
let proof_date = Local::now().date_naive();
assert_eq!(received_secret.direction, Direction::Received);
assert_eq!(received_secret.alg, HashAlgId::SHA256);
let mut hasher = Sha256::new();
hasher.update(&dob);
hasher.update(&blinder);
let computed_hash = hasher.finalize();
if committed_hash != computed_hash.as_slice() {
return Err(anyhow::anyhow!(
"Computed hash does not match committed hash"
));
}
Ok(ZKProofInput {
dob,
proof_date,
committed_hash,
blinder,
})
}
fn generate_zk_proof(proof_input: &ZKProofInput) -> Result<ZKProofBundle> {
tracing::info!("🔒 Generating ZK proof with Noir...");
const PROGRAM_JSON: &str = include_str!("./noir/target/noir.json");
// 1. Load bytecode from program.json
let json: Value = serde_json::from_str(PROGRAM_JSON)?;
let bytecode = json["bytecode"]
.as_str()
.ok_or_else(|| anyhow::anyhow!("bytecode field not found in program.json"))?;
let mut inputs: Vec<String> = vec![];
inputs.push(proof_input.proof_date.day().to_string());
inputs.push(proof_input.proof_date.month().to_string());
inputs.push(proof_input.proof_date.year().to_string());
inputs.extend(proof_input.committed_hash.iter().map(|b| b.to_string()));
inputs.extend(proof_input.dob.iter().map(|b| b.to_string()));
inputs.extend(proof_input.blinder.iter().map(|b| b.to_string()));
let proof_date = proof_input.proof_date.to_string();
tracing::info!(
"Public inputs : Proof date ({}) and committed hash ({})",
proof_date,
hex::encode(&proof_input.committed_hash)
);
tracing::info!(
"Private inputs: Blinder ({}) and Date of Birth ({})",
hex::encode(&proof_input.blinder),
String::from_utf8_lossy(&proof_input.dob)
);
tracing::debug!("Witness inputs {:?}", inputs);
let input_refs: Vec<&str> = inputs.iter().map(String::as_str).collect();
let witness = from_vec_str_to_witness_map(input_refs).map_err(|e| anyhow::anyhow!(e))?;
// Setup SRS
setup_srs_from_bytecode(bytecode, None, false).map_err(|e| anyhow::anyhow!(e))?;
// Verification key
let vk = get_ultra_honk_verification_key(bytecode, false).map_err(|e| anyhow::anyhow!(e))?;
// Generate proof
let proof = prove_ultra_honk(bytecode, witness.clone(), vk.clone(), false)
.map_err(|e| anyhow::anyhow!(e))?;
tracing::info!("✅ Proof generated ({} bytes)", proof.len());
let proof_bundle = ZKProofBundle { vk, proof };
Ok(proof_bundle)
}

View File

@@ -0,0 +1,21 @@
use serde::{Deserialize, Serialize};
use tlsn::transcript::{hash::PlaintextHash, Direction, TranscriptCommitment};
#[derive(Serialize, Deserialize, Debug)]
pub struct ZKProofBundle {
pub vk: Vec<u8>,
pub proof: Vec<u8>,
}
// extract commitment from prover output
pub fn received_commitments(
transcript_commitments: &[TranscriptCommitment],
) -> Vec<&PlaintextHash> {
transcript_commitments
.iter()
.filter_map(|commitment| match commitment {
TranscriptCommitment::Hash(hash) if hash.direction == Direction::Received => Some(hash),
_ => None,
})
.collect()
}

View File

@@ -0,0 +1,213 @@
use crate::types::received_commitments;
use super::types::ZKProofBundle;
use anyhow::Result;
use chrono::{Local, NaiveDate};
use noir::barretenberg::verify::{get_ultra_honk_verification_key, verify_ultra_honk};
use serde_json::Value;
use tls_server_fixture::CA_CERT_DER;
use tlsn::{
config::{CertificateDer, RootCertStore},
connection::ServerName,
hash::HashAlgId,
transcript::{Direction, PartialTranscript},
verifier::{Verifier, VerifierConfig, VerifierOutput},
};
use tlsn_examples::{MAX_RECV_DATA, MAX_SENT_DATA};
use tlsn_server_fixture_certs::SERVER_DOMAIN;
use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite};
use tokio_util::compat::TokioAsyncReadCompatExt;
use tracing::instrument;
#[instrument(skip(socket, extra_socket))]
pub async fn verifier<T: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>(
socket: T,
mut extra_socket: T,
) -> Result<PartialTranscript> {
// Create a root certificate store with the server-fixture's self-signed
// certificate. This is only required for offline testing with the
// server-fixture.
let verifier_config = VerifierConfig::builder()
.root_store(RootCertStore {
roots: vec![CertificateDer(CA_CERT_DER.to_vec())],
})
.build()?;
let verifier = Verifier::new(verifier_config);
// Validate the proposed configuration and then run the TLS commitment protocol.
let verifier = verifier.setup(socket.compat()).await?;
// This is the opportunity to ensure the prover does not attempt to overload the
// verifier.
let reject = if verifier.config().max_sent_data() > MAX_SENT_DATA {
Some("max_sent_data is too large")
} else if verifier.config().max_recv_data() > MAX_RECV_DATA {
Some("max_recv_data is too large")
} else {
None
};
if reject.is_some() {
verifier.reject(reject).await?;
return Err(anyhow::anyhow!("protocol configuration rejected"));
}
// Runs the TLS commitment protocol to completion.
let verifier = verifier.accept().await?.run().await?;
// Validate the proving request and then verify.
let verifier = verifier.verify().await?;
let request = verifier.request();
if request.handshake.is_none() || request.transcript.is_none() {
let verifier = verifier
.reject(Some(
"expecting to verify the server name and transcript data",
))
.await?;
verifier.close().await?;
return Err(anyhow::anyhow!(
"prover did not reveal the server name and transcript data"
));
}
let (
VerifierOutput {
server_name,
transcript,
transcript_commitments,
..
},
verifier,
) = verifier.accept().await?;
verifier.close().await?;
let server_name = server_name.expect("server name should be present");
let transcript = transcript.expect("transcript should be present");
// Create hash commitment for the date of birth field from the response
let sent = transcript.sent_unsafe().to_vec();
let sent_data = String::from_utf8(sent.clone())
.map_err(|e| anyhow::anyhow!("Verifier expected valid UTF-8 sent data: {e}"))?;
if !sent_data.contains(SERVER_DOMAIN) {
return Err(anyhow::anyhow!(
"Verification failed: Expected host {SERVER_DOMAIN} not found in sent data"
));
}
// Check received data.
let received_commitments = received_commitments(&transcript_commitments);
let received_commitment = received_commitments
.first()
.ok_or_else(|| anyhow::anyhow!("Missing hash commitment"))?;
assert!(received_commitment.direction == Direction::Received);
assert!(received_commitment.hash.alg == HashAlgId::SHA256);
let committed_hash = &received_commitment.hash;
// Check Session info: server name.
let ServerName::Dns(server_name) = server_name;
if server_name.as_str() != SERVER_DOMAIN {
return Err(anyhow::anyhow!(
"Server name mismatch: expected {SERVER_DOMAIN}, got {}",
server_name.as_str()
));
}
// Receive ZKProof information from prover
let mut buf = Vec::new();
extra_socket.read_to_end(&mut buf).await?;
if buf.is_empty() {
return Err(anyhow::anyhow!("No ZK proof data received from prover"));
}
let msg: ZKProofBundle = bincode::deserialize(&buf)
.map_err(|e| anyhow::anyhow!("Failed to deserialize ZK proof bundle: {e}"))?;
// Verify zk proof
const PROGRAM_JSON: &str = include_str!("./noir/target/noir.json");
let json: Value = serde_json::from_str(PROGRAM_JSON)
.map_err(|e| anyhow::anyhow!("Failed to parse Noir circuit: {e}"))?;
let bytecode = json["bytecode"]
.as_str()
.ok_or_else(|| anyhow::anyhow!("Bytecode field missing in noir.json"))?;
let vk = get_ultra_honk_verification_key(bytecode, false)
.map_err(|e| anyhow::anyhow!("Failed to get verification key: {e}"))?;
if vk != msg.vk {
return Err(anyhow::anyhow!(
"Verification key mismatch between computed and provided by prover"
));
}
let proof = msg.proof.clone();
// Validate proof has enough data.
// The proof should start with the public inputs:
// * We expect at least 3 * 32 bytes for the three date fields (day, month,
// year)
// * and 32*32 bytes for the hash
let min_bytes = (32 + 3) * 32;
if proof.len() < min_bytes {
return Err(anyhow::anyhow!(
"Proof too short: expected at least {min_bytes} bytes, got {}",
proof.len()
));
}
// Check that the proof date is correctly included in the proof
let proof_date_day: u32 = u32::from_be_bytes(proof[28..32].try_into()?);
let proof_date_month: u32 = u32::from_be_bytes(proof[60..64].try_into()?);
let proof_date_year: i32 = i32::from_be_bytes(proof[92..96].try_into()?);
let proof_date_from_proof =
NaiveDate::from_ymd_opt(proof_date_year, proof_date_month, proof_date_day)
.ok_or_else(|| anyhow::anyhow!("Invalid proof date in proof"))?;
let today = Local::now().date_naive();
if (today - proof_date_from_proof).num_days() < 0 {
return Err(anyhow::anyhow!(
"The proof date can only be today or in the past: provided {proof_date_from_proof}, today {today}"
));
}
// Check that the committed hash in the proof matches the hash from the
// commitment
let committed_hash_in_proof: Vec<u8> = proof
.chunks(32)
.skip(3) // skip the first 3 chunks
.take(32)
.map(|chunk| *chunk.last().unwrap_or(&0))
.collect();
let expected_hash = committed_hash.value.as_bytes().to_vec();
if committed_hash_in_proof != expected_hash {
tracing::error!(
"❌ The hash in the proof does not match the committed hash in MPC-TLS: {} != {}",
hex::encode(&committed_hash_in_proof),
hex::encode(&expected_hash)
);
return Err(anyhow::anyhow!(
"Hash in proof does not match committed hash in MPC-TLS"
));
}
tracing::info!(
"✅ The hash in the proof matches the committed hash in MPC-TLS ({})",
hex::encode(&expected_hash)
);
// Finally verify the proof
let is_valid = verify_ultra_honk(msg.proof, msg.vk)
.map_err(|e| anyhow::anyhow!("ZKProof Verification failed: {e}"))?;
if !is_valid {
tracing::error!("❌ Age verification ZKProof failed to verify");
return Err(anyhow::anyhow!("Age verification ZKProof failed to verify"));
}
tracing::info!("✅ Age verification ZKProof successfully verified");
Ok(transcript)
}

View File

@@ -1,6 +1,6 @@
[package]
name = "tlsn-formats"
version = "0.1.0-alpha.13-pre"
version = "0.1.0-alpha.14-pre"
edition = "2021"
[lints]

View File

@@ -3,7 +3,19 @@
# Ensure the script runs in the folder that contains this script
cd "$(dirname "$0")"
cargo build --release --package tlsn-harness-runner --package tlsn-harness-executor --package tlsn-server-fixture --package tlsn-harness-plot
RUNNER_FEATURES=""
EXECUTOR_FEATURES=""
if [ "$1" = "debug" ]; then
RUNNER_FEATURES="--features debug"
EXECUTOR_FEATURES="--no-default-features --features debug"
fi
cargo build --release \
--package tlsn-harness-runner $RUNNER_FEATURES \
--package tlsn-harness-executor $EXECUTOR_FEATURES \
--package tlsn-server-fixture \
--package tlsn-harness-plot
mkdir -p bin

View File

@@ -1,10 +1,14 @@
[target.wasm32-unknown-unknown]
rustflags = [
"-C",
"target-feature=+atomics,+bulk-memory,+mutable-globals,+simd128",
"-C",
"-Ctarget-feature=+atomics,+bulk-memory,+mutable-globals,+simd128",
"-Clink-arg=--shared-memory",
# 4GB
"link-arg=--max-memory=4294967296",
"-Clink-arg=--max-memory=4294967296",
"-Clink-arg=--import-memory",
"-Clink-arg=--export=__wasm_init_tls",
"-Clink-arg=--export=__tls_size",
"-Clink-arg=--export=__tls_align",
"-Clink-arg=--export=__tls_base",
"--cfg",
'getrandom_backend="wasm_js"',
]

View File

@@ -4,6 +4,12 @@ version = "0.1.0"
edition = "2024"
publish = false
[features]
# Disable tracing events as a workaround for issue 959.
default = ["tracing/release_max_level_off"]
# Used to debug the executor itself.
debug = []
[lib]
name = "harness_executor"
crate-type = ["cdylib", "rlib"]
@@ -28,8 +34,7 @@ tokio = { workspace = true, features = ["full"] }
tokio-util = { workspace = true, features = ["compat"] }
[target.'cfg(target_arch = "wasm32")'.dependencies]
# Disable tracing events as a workaround for issue 959.
tracing = { workspace = true, features = ["release_max_level_off"] }
tracing = { workspace = true }
wasm-bindgen = { workspace = true }
tlsn-wasm = { workspace = true }
js-sys = { workspace = true }

View File

@@ -2,33 +2,30 @@ use anyhow::Result;
use harness_core::bench::Bench;
use tlsn::{
config::{CertificateDer, ProtocolConfigValidator, RootCertStore},
verifier::{Verifier, VerifierConfig, VerifyConfig},
config::{CertificateDer, RootCertStore},
verifier::{Verifier, VerifierConfig},
};
use tlsn_server_fixture_certs::CA_CERT_DER;
use crate::{IoProvider, bench::RECV_PADDING};
pub async fn bench_verifier(provider: &IoProvider, config: &Bench) -> Result<()> {
let mut builder = ProtocolConfigValidator::builder();
builder
.max_sent_data(config.upload_size)
.max_recv_data(config.download_size + RECV_PADDING);
let protocol_config = builder.build()?;
use crate::IoProvider;
pub async fn bench_verifier(provider: &IoProvider, _config: &Bench) -> Result<()> {
let verifier = Verifier::new(
VerifierConfig::builder()
.root_store(RootCertStore {
roots: vec![CertificateDer(CA_CERT_DER.to_vec())],
})
.protocol_config_validator(protocol_config)
.build()?,
);
let verifier = verifier.setup(provider.provide_proto_io().await?).await?;
let mut verifier = verifier.run().await?;
verifier.verify(&VerifyConfig::default()).await?;
let verifier = verifier
.setup(provider.provide_proto_io().await?)
.await?
.accept()
.await?
.run()
.await?;
let (_, verifier) = verifier.verify().await?.accept().await?;
verifier.close().await?;
Ok(())

View File

@@ -1,10 +1,10 @@
use tlsn::{
config::{CertificateDer, ProtocolConfig, ProtocolConfigValidator, RootCertStore},
config::{CertificateDer, ProtocolConfig, RootCertStore},
connection::ServerName,
hash::HashAlgId,
prover::{ProveConfig, Prover, ProverConfig, TlsConfig},
transcript::{TranscriptCommitConfig, TranscriptCommitment, TranscriptCommitmentKind},
verifier::{Verifier, VerifierConfig, VerifierOutput, VerifyConfig},
verifier::{Verifier, VerifierConfig, VerifierOutput},
};
use tlsn_server_fixture_certs::{CA_CERT_DER, SERVER_DOMAIN};
@@ -113,33 +113,34 @@ async fn prover(provider: &IoProvider) {
async fn verifier(provider: &IoProvider) {
let config = VerifierConfig::builder()
.protocol_config_validator(
ProtocolConfigValidator::builder()
.max_sent_data(MAX_SENT_DATA)
.max_recv_data(MAX_RECV_DATA)
.build()
.unwrap(),
)
.root_store(RootCertStore {
roots: vec![CertificateDer(CA_CERT_DER.to_vec())],
})
.build()
.unwrap();
let verifier = Verifier::new(config);
let VerifierOutput {
server_name,
transcript_commitments,
..
} = verifier
.verify(
provider.provide_proto_io().await.unwrap(),
&VerifyConfig::default(),
)
let verifier = Verifier::new(config)
.setup(provider.provide_proto_io().await.unwrap())
.await
.unwrap()
.accept()
.await
.unwrap()
.run()
.await
.unwrap();
let (
VerifierOutput {
server_name,
transcript_commitments,
..
},
verifier,
) = verifier.verify().await.unwrap().accept().await.unwrap();
verifier.close().await.unwrap();
let ServerName::Dns(server_name) = server_name.unwrap();
assert_eq!(server_name.as_str(), SERVER_DOMAIN);

View File

@@ -1,6 +1,8 @@
FROM rust AS builder
WORKDIR /usr/src/tlsn
ARG DEBUG=0
RUN \
rustup update; \
apt update && apt install -y clang; \
@@ -10,7 +12,12 @@ RUN \
COPY . .
RUN \
cd crates/harness; \
./build.sh;
# Pass `--build-arg DEBUG=1` to `docker build` if you need to debug the harness.
if [ "$DEBUG" = "1" ]; then \
./build.sh debug; \
else \
./build.sh; \
fi
FROM debian:latest

View File

@@ -7,6 +7,10 @@ publish = false
[lib]
name = "harness_runner"
[features]
# Used to debug the runner itself.
debug = []
[dependencies]
tlsn-harness-core = { workspace = true }
tlsn-server-fixture = { workspace = true }

View File

@@ -0,0 +1,17 @@
#![allow(unused_imports)]
pub use futures::FutureExt;
pub use tracing::{debug, error};
pub use chromiumoxide::{
Browser, Page,
cdp::{
browser_protocol::{
log::{EventEntryAdded, LogEntryLevel},
network::{EnableParams, SetCacheDisabledParams},
page::ReloadParams,
},
js_protocol::runtime::EventExceptionThrown,
},
handler::HandlerConfig,
};

View File

@@ -21,6 +21,9 @@ use harness_core::{
use crate::{Target, network::Namespace, rpc::Rpc};
#[cfg(feature = "debug")]
use crate::debug_prelude::*;
pub struct Executor {
ns: Namespace,
config: ExecutorConfig,
@@ -66,20 +69,34 @@ impl Executor {
Id::One => self.config.network().rpc_1,
};
let process = duct::cmd!(
"sudo",
"ip",
"netns",
"exec",
self.ns.name(),
"env",
let mut args = vec![
"ip".into(),
"netns".into(),
"exec".into(),
self.ns.name().into(),
"env".into(),
format!("CONFIG={}", serde_json::to_string(&self.config)?),
executor_path
)
.stdout_capture()
.stderr_capture()
.unchecked()
.start()?;
];
if cfg!(feature = "debug") {
let level = &std::env::var("RUST_LOG").unwrap_or("debug".to_string());
args.push("env".into());
args.push(format!("RUST_LOG={}", level));
};
args.push(executor_path.to_str().expect("valid path").into());
let process = duct::cmd("sudo", args);
let process = if !cfg!(feature = "debug") {
process
.stdout_capture()
.stderr_capture()
.unchecked()
.start()?
} else {
process.unchecked().start()?
};
let rpc = Rpc::new_native(rpc_addr).await?;
@@ -119,10 +136,13 @@ impl Executor {
"--no-sandbox",
format!("--user-data-dir={tmp}"),
format!("--allowed-ips=10.250.0.1"),
)
.stderr_capture()
.stdout_capture()
.start()?;
);
let process = if !cfg!(feature = "debug") {
process.stderr_capture().stdout_capture().start()?
} else {
process.start()?
};
const TIMEOUT: usize = 10000;
const DELAY: usize = 100;
@@ -171,6 +191,38 @@ impl Executor {
.new_page(&format!("http://{wasm_addr}:{wasm_port}/index.html"))
.await?;
#[cfg(feature = "debug")]
tokio::spawn(register_listeners(page.clone()).await?);
#[cfg(feature = "debug")]
async fn register_listeners(page: Page) -> Result<impl Future<Output = ()>> {
let mut logs = page.event_listener::<EventEntryAdded>().await?.fuse();
let mut exceptions =
page.event_listener::<EventExceptionThrown>().await?.fuse();
Ok(futures::future::join(
async move {
while let Some(event) = logs.next().await {
let entry = &event.entry;
match entry.level {
LogEntryLevel::Error => {
error!("{:?}", entry);
}
_ => {
debug!("{:?}: {}", entry.timestamp, entry.text);
}
}
}
},
async move {
while let Some(event) = exceptions.next().await {
error!("{:?}", event);
}
},
)
.map(|_| ()))
}
page.execute(EnableParams::builder().build()).await?;
page.execute(SetCacheDisabledParams {
cache_disabled: true,

View File

@@ -6,6 +6,9 @@ mod server_fixture;
pub mod wasm_server;
mod ws_proxy;
#[cfg(feature = "debug")]
mod debug_prelude;
use std::time::Duration;
use anyhow::Result;
@@ -24,6 +27,9 @@ use cli::{Cli, Command};
use executor::Executor;
use server_fixture::ServerFixture;
#[cfg(feature = "debug")]
use crate::debug_prelude::*;
use crate::{cli::Route, network::Network, wasm_server::WasmServer, ws_proxy::WsProxy};
#[derive(Debug, Clone, Copy, PartialEq, Eq, clap::ValueEnum)]
@@ -113,6 +119,9 @@ impl Runner {
}
pub async fn main() -> Result<()> {
#[cfg(feature = "debug")]
tracing_subscriber::fmt::init();
let cli = Cli::parse();
let mut runner = Runner::new(&cli)?;
@@ -227,6 +236,9 @@ pub async fn main() -> Result<()> {
// Wait for the network to stabilize
tokio::time::sleep(Duration::from_millis(100)).await;
#[cfg(feature = "debug")]
debug!("Starting bench in group {:?}", config.group);
let (output, _) = tokio::try_join!(
runner.exec_p.bench(BenchCmd {
config: config.clone(),

View File

@@ -5,7 +5,7 @@ description = "TLSNotary MPC-TLS protocol"
keywords = ["tls", "mpc", "2pc"]
categories = ["cryptography"]
license = "MIT OR Apache-2.0"
version = "0.1.0-alpha.13-pre"
version = "0.1.0-alpha.14-pre"
edition = "2021"
[lints]

View File

@@ -41,6 +41,7 @@ use tls_core::{
message::{OpaqueMessage, PlainMessage},
},
suites::SupportedCipherSuite,
verify::verify_sig_determine_alg,
};
use tlsn_core::{
connection::{CertBinding, CertBindingV1_2, ServerSignature, TlsVersion, VerifyData},
@@ -327,12 +328,20 @@ impl MpcTlsLeader {
.map(|cert| CertificateDer(cert.0.clone()))
.collect();
let mut sig_msg = Vec::new();
sig_msg.extend_from_slice(&client_random.0);
sig_msg.extend_from_slice(&server_random.0);
sig_msg.extend_from_slice(server_kx_details.kx_params());
let server_signature_alg = verify_sig_determine_alg(
&server_cert_details.cert_chain()[0],
&sig_msg,
server_kx_details.kx_sig(),
)
.expect("only supported signature should have been accepted");
let server_signature = ServerSignature {
scheme: server_kx_details
.kx_sig()
.scheme
.try_into()
.expect("only supported signature scheme should have been accepted"),
alg: server_signature_alg.into(),
sig: server_kx_details.kx_sig().sig.0.clone(),
};

View File

@@ -72,4 +72,5 @@ pub(crate) struct ServerFinishedVd {
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[allow(dead_code)]
pub(crate) struct CloseConnection;

View File

@@ -193,7 +193,7 @@ where
};
// Divide by block length and round up.
let block_count = input.len() / 16 + (input.len() % 16 != 0) as usize;
let block_count = input.len() / 16 + !input.len().is_multiple_of(16) as usize;
if block_count > MAX_POWER {
return Err(ErrorRepr::InputLength {
@@ -282,11 +282,11 @@ fn build_ghash_data(mut aad: Vec<u8>, mut ciphertext: Vec<u8>) -> Vec<u8> {
let len_block = ((associated_data_bitlen as u128) << 64) + (text_bitlen as u128);
// Pad data to be a multiple of 16 bytes.
let aad_padded_block_count = (aad.len() / 16) + (aad.len() % 16 != 0) as usize;
let aad_padded_block_count = (aad.len() / 16) + !aad.len().is_multiple_of(16) as usize;
aad.resize(aad_padded_block_count * 16, 0);
let ciphertext_padded_block_count =
(ciphertext.len() / 16) + (ciphertext.len() % 16 != 0) as usize;
(ciphertext.len() / 16) + !ciphertext.len().is_multiple_of(16) as usize;
ciphertext.resize(ciphertext_padded_block_count * 16, 0);
let mut data: Vec<u8> = Vec::with_capacity(aad.len() + ciphertext.len() + 16);

View File

@@ -0,0 +1,22 @@
[package]
name = "tlsn-plugin-core"
version = "0.1.0"
edition = "2024"
[dependencies]
tlsn = { workspace = true }
tlsn-core = { workspace = true }
tlsn-formats = { workspace = true }
http-body-util = { workspace = true }
hyper = { workspace = true, features = ["client", "http1"] }
rangeset = { workspace = true }
serde = { workspace = true }
spansy = { workspace = true }
thiserror = { workspace = true }
[dev-dependencies]
tlsn-data-fixtures = { workspace = true }
[lints]
workspace = true

View File

@@ -0,0 +1,105 @@
//! Core types of the prover and verifier plugin.
use serde::{Deserialize, Serialize};
use tlsn_core::{
hash::HashAlgId,
transcript::{Direction, TranscriptCommitmentKind},
};
mod prover;
mod verifier;
pub use prover::{
Config as ProverPluginConfig, ConfigError as ProverPLuginConfigError,
Output as ProverPluginOutput,
};
pub use verifier::{
Config as VerifierPluginConfig, ConfigError as VerifierPluginConfigError,
Output as VerifierPluginOutput,
};
/// A rule for disclosing HTTP data.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct DisclosureRule {
http: HttpHandle,
policy: DisclosurePolicy,
}
/// Handle for a part of an HTTP message.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct HttpHandle {
typ: MessageType,
part: MessagePart,
}
#[derive(PartialEq, Clone, Debug, Serialize, Deserialize)]
pub enum MessageType {
Request,
Response,
}
impl From<&MessageType> for Direction {
fn from(mt: &MessageType) -> Self {
match mt {
MessageType::Request => Direction::Sent,
MessageType::Response => Direction::Received,
}
}
}
/// Disclosure policy.
#[derive(PartialEq, Debug, Clone, Serialize, Deserialize)]
pub enum DisclosurePolicy {
/// Reveals data.
Reveal,
/// Creates a hiding commitment.
Commit(Alg),
}
/// Commitment algorithm.
#[derive(PartialEq, Debug, Clone, Serialize, Deserialize)]
pub enum Alg {
EncodingSha256,
EncodingBlake3,
EncodingKeccak256,
Sha256,
Blake3,
}
impl From<&Alg> for TranscriptCommitmentKind {
fn from(alg: &Alg) -> Self {
match alg {
Alg::EncodingSha256 | Alg::EncodingBlake3 | Alg::EncodingKeccak256 => {
TranscriptCommitmentKind::Encoding
}
Alg::Sha256 => TranscriptCommitmentKind::Hash {
alg: HashAlgId::SHA256,
},
Alg::Blake3 => TranscriptCommitmentKind::Hash {
alg: HashAlgId::BLAKE3,
},
}
}
}
/// The part of an HTTP message.
#[derive(PartialEq, Clone, Debug, Serialize, Deserialize)]
pub enum MessagePart {
All,
StartLine,
Header(HeaderParams),
Body(BodyParams),
}
/// Parameters for an HTTP header.
#[derive(PartialEq, Clone, Debug, Serialize, Deserialize)]
pub struct HeaderParams {
pub key: String,
}
/// Parameters for a part of an HTTP body.
#[derive(PartialEq, Clone, Debug, Serialize, Deserialize)]
pub enum BodyParams {
JsonPath(String),
XPath(String),
}

View File

@@ -0,0 +1,34 @@
//! Core types of the prover plugin.
use crate::HttpHandle;
use serde::{Deserialize, Serialize};
use tlsn_core::ProverOutput;
mod config;
pub use config::{Config, ConfigError};
/// Output of the prover plugin.
#[allow(dead_code)]
pub struct Output {
output: ProverOutput,
/// Plaintext exposed to the host.
plaintext: Vec<(HttpHandle, Vec<u8>)>,
}
/// Params for protocol prover.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ProverParams {
max_recv_data: usize,
max_sent_data: usize,
prove_server_identity: bool,
pub server_dns: String,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct HttpRequest {
url: String,
method: String,
body: Option<Vec<u8>>,
pub headers: Vec<(String, String)>,
}

View File

@@ -0,0 +1,463 @@
use crate::{
BodyParams, DisclosurePolicy, DisclosureRule, HttpHandle, MessagePart, MessageType,
prover::{HttpRequest, ProverParams},
};
use crate::prover::Output;
use http_body_util::Full;
use hyper::{Request as HyperRequest, body::Bytes};
use rangeset::RangeSet;
use serde::{Deserialize, Serialize};
use tlsn::{
config::ProtocolConfig,
prover::{ProverConfig, TlsConfig},
};
use tlsn_core::{
ProveConfig, ProveConfigBuilder, ProverOutput,
connection::{DnsName, ServerName},
transcript::{Transcript, TranscriptCommitConfig, TranscriptCommitConfigBuilder},
webpki::RootCertStore,
};
use tlsn_formats::{
http::{Body, Request, Requests, Response, Responses},
json::JsonValue,
spansy,
spansy::Spanned,
};
/// Prover plugin config.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Config {
pub prover_params: ProverParams,
pub request: HttpRequest,
/// Data which will be disclosed to the verifier.
pub disclose: Vec<DisclosureRule>,
/// Data which will be exposed in the plugin output.
pub expose: Vec<HttpHandle>,
pub root_store: RootCertStore,
pub verifier_endpoint: String,
/// Proxy endpoint for connecting to the server.
pub proxy_endpoint: Option<String>,
}
impl Config {
/// Returns the verifier endpoint.
pub fn prover_endpoint(&self) -> &String {
&self.verifier_endpoint
}
/// Builds and returns [ProverConfig].
pub fn prover_config(&self) -> Result<ProverConfig, ConfigError> {
let dns_name: DnsName = self
.prover_params
.server_dns
.clone()
.try_into()
.map_err(|_| ConfigError("prover_config error".to_string()))?;
let mut builder = TlsConfig::builder();
builder.root_store(self.root_store.clone());
let tls_config = builder.build().unwrap();
let config = ProverConfig::builder()
.server_name(ServerName::Dns(dns_name))
.tls_config(tls_config)
.protocol_config(
ProtocolConfig::builder()
.max_sent_data(self.prover_params.max_sent_data)
.max_recv_data(self.prover_params.max_recv_data)
.build()
.unwrap(),
)
.build()
.unwrap();
Ok(config)
}
/// Returns the HTTP request.
pub fn http_request(&self) -> Result<HyperRequest<Full<Bytes>>, ConfigError> {
let mut request = HyperRequest::builder()
.uri(self.request.url.clone())
.header("Host", self.prover_params.server_dns.clone());
for (k, v) in &self.request.headers {
request = request.header(k, v);
}
request = request.method(self.request.method.as_str());
let body = match &self.request.body {
Some(data) => Full::<Bytes>::from(data.clone()),
None => Full::<Bytes>::from(vec![]),
};
request
.body(body)
.map_err(|_| ConfigError("http_request error".to_string()))
}
/// Creates a [ProveConfig] for the given `transcript`.
pub fn prove_config(&self, transcript: &Transcript) -> Result<ProveConfig, ConfigError> {
let mut prove_cfg = ProveConfig::builder(transcript);
let mut commit_cfg = TranscriptCommitConfig::builder(transcript);
if self.prover_params.prove_server_identity {
prove_cfg.server_identity();
}
let reqs = Requests::new_from_slice(transcript.sent())
.collect::<Result<Vec<_>, _>>()
.map_err(|_| ConfigError("prove_config error".to_string()))?;
let resps = Responses::new_from_slice(transcript.received())
.collect::<Result<Vec<_>, _>>()
.map_err(|_| ConfigError("prove_config error".to_string()))?;
let req = reqs.first().expect("at least one request");
let resp = resps.first().expect("at least one response");
let req_rules = self
.disclose
.iter()
.filter(|h| h.http.typ == MessageType::Request);
let resp_rules = self
.disclose
.iter()
.filter(|h| h.http.typ == MessageType::Response);
disclose_req(req, req_rules, &mut commit_cfg, &mut prove_cfg);
disclose_resp(resp, resp_rules, &mut commit_cfg, &mut prove_cfg);
prove_cfg.transcript_commit(commit_cfg.build().unwrap());
Ok(prove_cfg.build().unwrap())
}
/// Returns the output of the plugin.
pub fn output(
&self,
transcript: Transcript,
prover_output: ProverOutput,
) -> Result<Output, ConfigError> {
let reqs = Requests::new_from_slice(transcript.sent())
.collect::<Result<Vec<_>, _>>()
.map_err(|_| ConfigError("output error".to_string()))?;
let resps = Responses::new_from_slice(transcript.received())
.collect::<Result<Vec<_>, _>>()
.map_err(|_| ConfigError("output error".to_string()))?;
let req = reqs.first().expect("at least one request");
let resp = resps.first().expect("at least one response");
let mut exposed = Vec::new();
// Extract the to-be-exposed data from the transcript.
for h in self.expose.iter() {
let range = if h.typ == MessageType::Request {
req_part_range(req, h)
} else {
resp_part_range(resp, h)
};
let seq = transcript
.get((&h.typ).into(), &range)
.ok_or(ConfigError("range not found in transcript".to_string()))?;
exposed.push((h.clone(), seq.data().to_vec()));
}
Ok(Output {
output: prover_output,
plaintext: exposed,
})
}
}
#[derive(Debug, thiserror::Error)]
#[error("config error: {0}")]
pub struct ConfigError(String);
/// Processes disclosure rules for the request.
fn disclose_req<'a, I>(
req: &Request,
rules: I,
commit_cfg: &mut TranscriptCommitConfigBuilder<'_>,
prove_cfg: &mut ProveConfigBuilder<'_>,
) where
I: Iterator<Item = &'a DisclosureRule>,
{
for r in rules {
let range = req_part_range(req, &r.http);
if range.is_empty() {
// TODO: maybe return an error here when the part was not found.
return;
}
match &r.policy {
DisclosurePolicy::Commit(alg) => {
commit_cfg
.commit_with_kind(&range, (&r.http.typ).into(), alg.into())
.expect("range is in the transcript");
}
DisclosurePolicy::Reveal => {
prove_cfg
.reveal_sent(&range)
.expect("range is in the transcript");
}
}
}
}
/// Processes disclosure rules for the response.
fn disclose_resp<'a, I>(
resp: &Response,
rules: I,
commit_cfg: &mut TranscriptCommitConfigBuilder<'_>,
prove_cfg: &mut ProveConfigBuilder<'_>,
) where
I: Iterator<Item = &'a DisclosureRule>,
{
for r in rules {
let range = resp_part_range(resp, &r.http);
if range.is_empty() {
// TODO: maybe return an error here when the part was not found.
return;
}
match &r.policy {
DisclosurePolicy::Commit(alg) => {
commit_cfg
.commit_with_kind(&range, (&r.http.typ).into(), alg.into())
.expect("range is in the transcript");
}
DisclosurePolicy::Reveal => {
prove_cfg
.reveal_recv(&range)
.expect("range is in the transcript");
}
}
}
}
/// Returns the range for the given `part` of the HTTP request,
fn req_part_range(req: &Request, part: &HttpHandle) -> RangeSet<usize> {
match &part.part {
MessagePart::All => {
(req.span().indices().min().unwrap()..req.span().indices().end().unwrap()).into()
}
MessagePart::StartLine => req.request.span().indices().clone(),
MessagePart::Header(params) => req
.headers_with_name(params.key.as_str())
.map(|h| h.span().indices())
.fold(RangeSet::default(), |acc, r| acc | r),
MessagePart::Body(params) => match &req.body {
Some(body) => {
// Body offset from the start of an HTTP message.
let body_offset = body
.span()
.indices()
.min()
.expect("body span cannot be empty");
let mut range = body_params_range(body, params);
range.shift_right(&body_offset);
range
}
None => RangeSet::default(),
},
}
}
/// Returns the range for the given `part` of the HTTP response,
fn resp_part_range(resp: &Response, part: &HttpHandle) -> RangeSet<usize> {
match &part.part {
MessagePart::All => {
(resp.span().indices().min().unwrap()..resp.span().indices().end().unwrap()).into()
}
MessagePart::StartLine => resp.status.span().indices().clone(),
MessagePart::Header(params) => resp
.headers_with_name(params.key.as_str())
.map(|h| h.span().indices())
.fold(RangeSet::default(), |acc, r| acc | r),
MessagePart::Body(params) => match &resp.body {
Some(body) => {
// Body offset from the start of an HTTP message.
let body_offset = body.span().indices().min().expect("body cannot be empty");
let mut range = body_params_range(body, params);
range.shift_right(&body_offset);
range
}
None => RangeSet::default(),
},
}
}
/// Returns the byte range of the `params` in the given `body`.
fn body_params_range(body: &Body, params: &BodyParams) -> RangeSet<usize> {
match params {
BodyParams::JsonPath(path) => {
// TODO: use a better approach than re-parsing the entire
// json for each path.
match spansy::json::parse(body.as_bytes().to_vec().into()) {
Ok(json) => json_path_range(&json, path),
Err(_) => RangeSet::default(),
}
}
_ => unimplemented!("only json parsing is currently supported"),
}
}
/// Returns the byte range of the keyvalue pair corresponding to the given
/// `path` in a JSON value `source`.
///
/// If the path points to an array element, only the range of the **value**
/// of the element is returned.
fn json_path_range(source: &JsonValue, path: &String) -> RangeSet<usize> {
let val = match source.get(path) {
Some(val) => val,
None => return RangeSet::default(),
};
let dot = ".";
let last = path.split(dot).last().unwrap();
// Whether `path` is a top-level key.
let is_top_level = last == path;
if last.parse::<usize>().is_ok() {
// The path points to an array element, so we only need the range of
// the **value**.
val.span().indices().clone()
} else {
let parent_val = if is_top_level {
source
} else {
source
.get(&path[..path.len() - last.len() - dot.len()])
.expect("path is valid")
};
let JsonValue::Object(parent_obj) = parent_val else {
unreachable!("parent value is always an object");
};
// We need the range of the **key-value** pair.
let kv = parent_obj
.elems
.iter()
.find(|kv| kv.value == *val)
.expect("element exists");
kv.without_separator()
}
}
#[cfg(test)]
mod tests {
use crate::HeaderParams;
use super::*;
use spansy::http::parse_response;
use tlsn_data_fixtures::http::{request, response};
use tlsn_formats::spansy::http::parse_request;
#[test]
fn test_req_part_range() {
let data = request::POST_JSON;
let req = parse_request(data).unwrap();
let s = std::str::from_utf8(data).unwrap();
//===============All
let part = HttpHandle {
part: MessagePart::All,
typ: MessageType::Request,
};
let range = req_part_range(&req, &part);
assert_eq!(range, 0..data.len());
//===============StartLine
let part = HttpHandle {
part: MessagePart::StartLine,
typ: MessageType::Request,
};
let range = req_part_range(&req, &part);
let end = s.find("\r\n").unwrap() + 2;
assert_eq!(range, 0..end);
//===============Header
let part = HttpHandle {
part: MessagePart::Header(HeaderParams {
key: "Content-Length".to_string(),
}),
typ: MessageType::Request,
};
let range = req_part_range(&req, &part);
let target: &'static str = "Content-Length: 44";
let start = s.find(target).unwrap();
let end = start + target.len() + 2;
assert_eq!(range, start..end);
//===============Body
let part = HttpHandle {
part: MessagePart::Body(BodyParams::JsonPath("bazz".to_string())),
typ: MessageType::Request,
};
let range = req_part_range(&req, &part);
let target: &'static str = "\"bazz\": 123";
let start = s.find(target).unwrap();
let end = start + target.len();
assert_eq!(range, start..end);
}
#[test]
fn test_resp_part_range() {
let data = response::OK_JSON;
let resp = parse_response(data).unwrap();
let s = std::str::from_utf8(data).unwrap();
//===============All
let part = HttpHandle {
part: MessagePart::All,
typ: MessageType::Response,
};
let range = resp_part_range(&resp, &part);
assert_eq!(range, 0..data.len());
//===============StartLine
let part = HttpHandle {
part: MessagePart::StartLine,
typ: MessageType::Response,
};
let range = resp_part_range(&resp, &part);
let end = s.find("\r\n").unwrap() + 2;
assert_eq!(range, 0..end);
//===============Header
let part = HttpHandle {
part: MessagePart::Header(HeaderParams {
key: "Content-Length".to_string(),
}),
typ: MessageType::Response,
};
let range = resp_part_range(&resp, &part);
let target: &'static str = "Content-Length: 44";
let start = s.find(target).unwrap();
let end = start + target.len() + 2;
assert_eq!(range, start..end);
//===============Body
let part = HttpHandle {
part: MessagePart::Body(BodyParams::JsonPath("bazz".to_string())),
typ: MessageType::Request,
};
let range = resp_part_range(&resp, &part);
let target: &'static str = "\"bazz\": 123";
let start = s.find(target).unwrap();
let end = start + target.len();
assert_eq!(range, start..end);
}
}

View File

@@ -0,0 +1,20 @@
//! Core types of the verifier plugin.
use tlsn_core::VerifierOutput;
mod config;
pub use config::{Config, ConfigError};
/// Output of the verifier plugin.
#[allow(dead_code)]
pub struct Output {
output: VerifierOutput,
}
/// Params for protocol verifier.
pub struct VerifierParams {
pub max_sent_data: usize,
pub max_recv_data: usize,
pub prover_endpoint: String,
}

View File

@@ -0,0 +1,56 @@
use crate::{
DisclosureRule,
verifier::{Output, VerifierParams},
};
use tlsn::{
config::{ProtocolConfig, RootCertStore},
verifier::VerifierConfig,
};
use tlsn_core::VerifierOutput;
/// Verifier plugin config.
#[allow(dead_code)]
pub struct Config {
pub verifier_params: VerifierParams,
/// Data which the prover is expected to disclose.
pub disclose: Vec<DisclosureRule>,
pub root_store: RootCertStore,
pub prover_endpoint: String,
}
impl Config {
/// Returns the prover endpoint.
pub fn prover_endpoint(&self) -> &String {
&self.verifier_params.prover_endpoint
}
/// Builds and returns [VerifierConfig].
pub fn verifier_config(&self) -> VerifierConfig {
VerifierConfig::builder()
.root_store(self.root_store.clone())
.build()
.unwrap()
}
/// Validates the given protocol `config`.
pub fn validate_protocol_config(&self, config: &ProtocolConfig) -> Result<(), ConfigError> {
if config.max_recv_data() > self.verifier_params.max_recv_data
|| config.max_sent_data() > self.verifier_params.max_sent_data
{
Err(ConfigError(
"failed to validate protocol config".to_string(),
))
} else {
Ok(())
}
}
/// Returns verifier plugin output.
pub fn output(&self, output: VerifierOutput) -> Output {
Output { output }
}
}
#[derive(Debug, thiserror::Error)]
#[error("config error: {0}")]
pub struct ConfigError(String);

View File

@@ -0,0 +1,37 @@
{
"tax_year": 2024,
"taxpayer": {
"idnr": "12345678901",
"first_name": "Max",
"last_name": "Mustermann",
"date_of_birth": "1985-03-12",
"address": {
"street": "Musterstraße 1",
"postal_code": "10115",
"city": "Berlin"
}
},
"income": {
"employment_income": 54200.00,
"other_income": 1200.00,
"capital_gains": 350.00
},
"deductions": {
"pension_insurance": 4200.00,
"health_insurance": 3600.00,
"donations": 500.00,
"work_related_expenses": 1100.00
},
"assessment": {
"taxable_income": 49200.00,
"income_tax": 9156.00,
"solidarity_surcharge": 503.58,
"total_tax": 9659.58,
"prepaid_tax": 9500.00,
"refund": 159.58
},
"submission": {
"submitted_at": "2025-03-01T14:22:30Z",
"submitted_by": "ElsterOnline-Portal"
}
}

View File

@@ -47,6 +47,7 @@ fn app(state: AppState) -> Router {
.route("/formats/json", get(json))
.route("/formats/html", get(html))
.route("/protected", get(protected_route))
.route("/elster", get(elster_route))
.layer(TraceLayer::new_for_http())
.with_state(Arc::new(Mutex::new(state)))
}
@@ -196,6 +197,12 @@ async fn protected_route(_: AuthenticatedUser) -> Result<Json<Value>, StatusCode
get_json_value(include_str!("data/protected_data.json"))
}
async fn elster_route(_: AuthenticatedUser) -> Result<Json<Value>, StatusCode> {
info!("Handling /elster");
get_json_value(include_str!("data/elster.json"))
}
#[cfg(test)]
mod tests {
use super::*;

View File

@@ -5,7 +5,7 @@ description = "A TLS backend trait for TLSNotary"
keywords = ["tls", "mpc", "2pc"]
categories = ["cryptography"]
license = "MIT OR Apache-2.0"
version = "0.1.0-alpha.13-pre"
version = "0.1.0-alpha.14-pre"
edition = "2021"
[lints]

View File

@@ -5,7 +5,7 @@ description = "An async TLS client for TLSNotary"
keywords = ["tls", "mpc", "2pc", "client", "async"]
categories = ["cryptography"]
license = "MIT OR Apache-2.0"
version = "0.1.0-alpha.13-pre"
version = "0.1.0-alpha.14-pre"
edition = "2021"
[lints]

View File

@@ -5,7 +5,7 @@ description = "A TLS client for TLSNotary"
keywords = ["tls", "mpc", "2pc", "client", "sync"]
categories = ["cryptography"]
license = "Apache-2.0 OR ISC OR MIT"
version = "0.1.0-alpha.13-pre"
version = "0.1.0-alpha.14-pre"
edition = "2021"
autobenches = false

View File

@@ -5,7 +5,7 @@ description = "Cryptographic operations for the TLSNotary TLS client"
keywords = ["tls", "mpc", "2pc"]
categories = ["cryptography"]
license = "Apache-2.0 OR ISC OR MIT"
version = "0.1.0-alpha.13-pre"
version = "0.1.0-alpha.14-pre"
edition = "2021"
[lints]

View File

@@ -465,19 +465,81 @@ fn convert_scheme(scheme: SignatureScheme) -> Result<SignatureAlgorithms, Error>
}
}
/// Signature algorithm.
#[derive(Debug, Clone, Copy, PartialEq)]
#[allow(non_camel_case_types)]
pub enum SignatureAlgorithm {
ECDSA_NISTP256_SHA256,
ECDSA_NISTP256_SHA384,
ECDSA_NISTP384_SHA256,
ECDSA_NISTP384_SHA384,
ED25519,
RSA_PKCS1_2048_8192_SHA256,
RSA_PKCS1_2048_8192_SHA384,
RSA_PKCS1_2048_8192_SHA512,
RSA_PSS_2048_8192_SHA256_LEGACY_KEY,
RSA_PSS_2048_8192_SHA384_LEGACY_KEY,
RSA_PSS_2048_8192_SHA512_LEGACY_KEY,
}
impl SignatureAlgorithm {
pub fn from_alg(alg: &dyn pki_types::SignatureVerificationAlgorithm) -> Self {
let id = alg.signature_alg_id();
if id == webpki::ring::ECDSA_P256_SHA256.signature_alg_id() {
SignatureAlgorithm::ECDSA_NISTP256_SHA256
} else if id == webpki::ring::ECDSA_P256_SHA384.signature_alg_id() {
SignatureAlgorithm::ECDSA_NISTP256_SHA384
} else if id == webpki::ring::ECDSA_P384_SHA256.signature_alg_id() {
SignatureAlgorithm::ECDSA_NISTP384_SHA256
} else if id == webpki::ring::ECDSA_P384_SHA384.signature_alg_id() {
SignatureAlgorithm::ECDSA_NISTP384_SHA384
} else if id == webpki::ring::ED25519.signature_alg_id() {
SignatureAlgorithm::ED25519
} else if id == webpki::ring::RSA_PKCS1_2048_8192_SHA256.signature_alg_id() {
SignatureAlgorithm::RSA_PKCS1_2048_8192_SHA256
} else if id == webpki::ring::RSA_PKCS1_2048_8192_SHA384.signature_alg_id() {
SignatureAlgorithm::RSA_PKCS1_2048_8192_SHA384
} else if id == webpki::ring::RSA_PKCS1_2048_8192_SHA512.signature_alg_id() {
SignatureAlgorithm::RSA_PKCS1_2048_8192_SHA512
} else if id == webpki::ring::RSA_PSS_2048_8192_SHA256_LEGACY_KEY.signature_alg_id() {
SignatureAlgorithm::RSA_PSS_2048_8192_SHA256_LEGACY_KEY
} else if id == webpki::ring::RSA_PSS_2048_8192_SHA384_LEGACY_KEY.signature_alg_id() {
SignatureAlgorithm::RSA_PSS_2048_8192_SHA384_LEGACY_KEY
} else if id == webpki::ring::RSA_PSS_2048_8192_SHA512_LEGACY_KEY.signature_alg_id() {
SignatureAlgorithm::RSA_PSS_2048_8192_SHA512_LEGACY_KEY
} else {
unreachable!()
}
}
}
/// Verify the signature and return the algorithm which passed verification.
pub fn verify_sig_determine_alg(
cert: &Certificate,
message: &[u8],
dss: &DigitallySignedStruct,
) -> Result<SignatureAlgorithm, Error> {
let cert = pki_types::CertificateDer::from(cert.0.as_slice());
let cert = webpki::EndEntityCert::try_from(&cert).map_err(pki_error)?;
verify_sig_using_any_alg(&cert, convert_scheme(dss.scheme)?, message, &dss.sig.0)
.map_err(pki_error)
}
fn verify_sig_using_any_alg(
cert: &webpki::EndEntityCert,
algs: SignatureAlgorithms,
message: &[u8],
sig: &[u8],
) -> Result<(), webpki::Error> {
) -> Result<SignatureAlgorithm, webpki::Error> {
// TLS doesn't itself give us enough info to map to a single
// webpki::SignatureAlgorithm. Therefore, convert_algs maps to several and
// we try them all.
for alg in algs {
match cert.verify_signature(*alg, message, sig) {
Ok(_) => return Ok(SignatureAlgorithm::from_alg(*alg)),
Err(webpki::Error::UnsupportedSignatureAlgorithmForPublicKeyContext(_)) => continue,
res => return res,
Err(e) => return Err(e),
}
}

View File

@@ -4,7 +4,7 @@ authors = ["TLSNotary Team"]
keywords = ["tls", "mpc", "2pc", "prover"]
categories = ["cryptography"]
license = "MIT OR Apache-2.0"
version = "0.1.0-alpha.13-pre"
version = "0.1.0-alpha.14-pre"
edition = "2024"
[lints]
@@ -31,6 +31,7 @@ web-spawn = { workspace = true, optional = true }
mpz-common = { workspace = true }
mpz-core = { workspace = true }
mpz-circuits = { workspace = true }
mpz-garble = { workspace = true }
mpz-garble-core = { workspace = true }
mpz-hash = { workspace = true }
@@ -40,6 +41,8 @@ mpz-ot = { workspace = true }
mpz-vm-core = { workspace = true }
mpz-zk = { workspace = true }
aes = { workspace = true }
ctr = { workspace = true }
derive_builder = { workspace = true }
futures = { workspace = true }
opaque-debug = { workspace = true }
@@ -57,7 +60,9 @@ rangeset = { workspace = true }
webpki-roots = { workspace = true }
[dev-dependencies]
mpz-ideal-vm = { workspace = true }
rstest = { workspace = true }
tlsn-core = { workspace = true, features = ["fixtures"] }
tlsn-server-fixture = { workspace = true }
tlsn-server-fixture-certs = { workspace = true }
tokio = { workspace = true, features = ["full"] }

View File

@@ -1,109 +0,0 @@
//! Plaintext commitment and proof of encryption.
pub(crate) mod hash;
pub(crate) mod transcript;
use mpz_core::bitvec::BitVec;
use mpz_memory_core::{
DecodeFutureTyped, Vector,
binary::{Binary, U8},
};
use mpz_vm_core::{Vm, prelude::*};
use tlsn_core::transcript::Record;
use crate::{
Role,
zk_aes_ctr::{ZkAesCtr, ZkAesCtrError},
};
/// Commits the plaintext of the provided records, returning a proof of
/// encryption.
///
/// Writes the plaintext VM reference to the provided records.
pub(crate) fn commit_records<'record>(
vm: &mut dyn Vm<Binary>,
aes: &mut ZkAesCtr,
records: impl IntoIterator<Item = &'record Record>,
) -> Result<(Vec<Vector<U8>>, RecordProof), RecordProofError> {
let mut plaintexts = Vec::new();
let mut ciphertexts = Vec::new();
for record in records {
let (plaintext_ref, ciphertext_ref) = aes
.encrypt(vm, record.explicit_nonce.clone(), record.ciphertext.len())
.map_err(ErrorRepr::Aes)?;
if let Role::Prover = aes.role() {
let Some(plaintext) = record.plaintext.clone() else {
return Err(ErrorRepr::MissingPlaintext.into());
};
vm.assign(plaintext_ref, plaintext)
.map_err(RecordProofError::vm)?;
}
vm.commit(plaintext_ref).map_err(RecordProofError::vm)?;
let ciphertext = vm.decode(ciphertext_ref).map_err(RecordProofError::vm)?;
plaintexts.push(plaintext_ref);
ciphertexts.push((ciphertext, record.ciphertext.clone()));
}
Ok((plaintexts, RecordProof { ciphertexts }))
}
/// Proof of encryption.
#[derive(Debug)]
#[must_use]
#[allow(clippy::type_complexity)]
pub(crate) struct RecordProof {
ciphertexts: Vec<(DecodeFutureTyped<BitVec, Vec<u8>>, Vec<u8>)>,
}
impl RecordProof {
/// Verifies the proof.
pub(crate) fn verify(self) -> Result<(), RecordProofError> {
let Self { ciphertexts } = self;
for (mut ciphertext, expected) in ciphertexts {
let ciphertext = ciphertext
.try_recv()
.map_err(RecordProofError::vm)?
.ok_or_else(|| ErrorRepr::NotDecoded)?;
if ciphertext != expected {
return Err(ErrorRepr::InvalidCiphertext.into());
}
}
Ok(())
}
}
/// Error for [`RecordProof`].
#[derive(Debug, thiserror::Error)]
#[error(transparent)]
pub(crate) struct RecordProofError(#[from] ErrorRepr);
impl RecordProofError {
fn vm<E>(err: E) -> Self
where
E: Into<Box<dyn std::error::Error + Send + Sync + 'static>>,
{
Self(ErrorRepr::Vm(err.into()))
}
}
#[derive(Debug, thiserror::Error)]
#[error("record proof error: {0}")]
enum ErrorRepr {
#[error("VM error: {0}")]
Vm(Box<dyn std::error::Error + Send + Sync + 'static>),
#[error("zk aes error: {0}")]
Aes(ZkAesCtrError),
#[error("plaintext is missing")]
MissingPlaintext,
#[error("ciphertext was not decoded")]
NotDecoded,
#[error("ciphertext does not match expected")]
InvalidCiphertext,
}

View File

@@ -1,211 +0,0 @@
use mpz_memory_core::{
MemoryExt, Vector,
binary::{Binary, U8},
};
use mpz_vm_core::{Vm, VmError};
use rangeset::{Intersection, RangeSet};
use tlsn_core::transcript::{Direction, PartialTranscript};
/// References to the application plaintext in the transcript.
#[derive(Debug, Default, Clone)]
pub(crate) struct TranscriptRefs {
sent: Vec<Vector<U8>>,
recv: Vec<Vector<U8>>,
}
impl TranscriptRefs {
pub(crate) fn new(sent: Vec<Vector<U8>>, recv: Vec<Vector<U8>>) -> Self {
Self { sent, recv }
}
/// Returns the sent plaintext references.
pub(crate) fn sent(&self) -> &[Vector<U8>] {
&self.sent
}
/// Returns the received plaintext references.
pub(crate) fn recv(&self) -> &[Vector<U8>] {
&self.recv
}
/// Returns the transcript lengths.
pub(crate) fn len(&self) -> (usize, usize) {
let sent = self.sent.iter().map(|v| v.len()).sum();
let recv = self.recv.iter().map(|v| v.len()).sum();
(sent, recv)
}
/// Returns VM references for the given direction and index, otherwise
/// `None` if the index is out of bounds.
pub(crate) fn get(
&self,
direction: Direction,
idx: &RangeSet<usize>,
) -> Option<Vec<Vector<U8>>> {
if idx.is_empty() {
return Some(Vec::new());
}
let refs = match direction {
Direction::Sent => &self.sent,
Direction::Received => &self.recv,
};
// Computes the transcript range for each reference.
let mut start = 0;
let mut slice_iter = refs.iter().map(move |slice| {
let out = (slice, start..start + slice.len());
start += slice.len();
out
});
let mut slices = Vec::new();
let (mut slice, mut slice_range) = slice_iter.next()?;
for range in idx.iter_ranges() {
loop {
if let Some(intersection) = slice_range.intersection(&range) {
let start = intersection.start - slice_range.start;
let end = intersection.end - slice_range.start;
slices.push(slice.get(start..end).expect("range should be in bounds"));
}
// Proceed to next range if the current slice extends beyond. Otherwise, proceed
// to the next slice.
if range.end <= slice_range.end {
break;
} else {
(slice, slice_range) = slice_iter.next()?;
}
}
}
Some(slices)
}
}
/// Decodes the transcript.
pub(crate) fn decode_transcript(
vm: &mut dyn Vm<Binary>,
sent: &RangeSet<usize>,
recv: &RangeSet<usize>,
refs: &TranscriptRefs,
) -> Result<(), VmError> {
let sent_refs = refs.get(Direction::Sent, sent).expect("index is in bounds");
let recv_refs = refs
.get(Direction::Received, recv)
.expect("index is in bounds");
for slice in sent_refs.into_iter().chain(recv_refs) {
// Drop the future, we don't need it.
drop(vm.decode(slice)?);
}
Ok(())
}
/// Verifies a partial transcript.
pub(crate) fn verify_transcript(
vm: &mut dyn Vm<Binary>,
transcript: &PartialTranscript,
refs: &TranscriptRefs,
) -> Result<(), InconsistentTranscript> {
let sent_refs = refs
.get(Direction::Sent, transcript.sent_authed())
.expect("index is in bounds");
let recv_refs = refs
.get(Direction::Received, transcript.received_authed())
.expect("index is in bounds");
let mut authenticated_data = Vec::new();
for data in sent_refs.into_iter().chain(recv_refs) {
let plaintext = vm
.get(data)
.expect("reference is valid")
.expect("plaintext is decoded");
authenticated_data.extend_from_slice(&plaintext);
}
let mut purported_data = Vec::with_capacity(authenticated_data.len());
for range in transcript.sent_authed().iter_ranges() {
purported_data.extend_from_slice(&transcript.sent_unsafe()[range]);
}
for range in transcript.received_authed().iter_ranges() {
purported_data.extend_from_slice(&transcript.received_unsafe()[range]);
}
if purported_data != authenticated_data {
return Err(InconsistentTranscript {});
}
Ok(())
}
/// Error for [`verify_transcript`].
#[derive(Debug, thiserror::Error)]
#[error("inconsistent transcript")]
pub(crate) struct InconsistentTranscript {}
#[cfg(test)]
mod tests {
use super::TranscriptRefs;
use mpz_memory_core::{FromRaw, Slice, Vector, binary::U8};
use rangeset::RangeSet;
use std::ops::Range;
use tlsn_core::transcript::Direction;
// TRANSCRIPT_REFS:
//
// 48..96 -> 6 slots
// 112..176 -> 8 slots
// 240..288 -> 6 slots
// 352..392 -> 5 slots
// 440..480 -> 5 slots
const TRANSCRIPT_REFS: &[Range<usize>] = &[48..96, 112..176, 240..288, 352..392, 440..480];
const IDXS: &[Range<usize>] = &[0..4, 5..10, 14..16, 16..28];
// 1. Take slots 0..4, 4 slots -> 48..80 (4)
// 2. Take slots 5..10, 5 slots -> 88..96 (1) + 112..144 (4)
// 3. Take slots 14..16, 2 slots -> 240..256 (2)
// 4. Take slots 16..28, 12 slots -> 256..288 (4) + 352..392 (5) + 440..464 (3)
//
// 5. Merge slots 240..256 and 256..288 => 240..288 and get EXPECTED_REFS
const EXPECTED_REFS: &[Range<usize>] =
&[48..80, 88..96, 112..144, 240..288, 352..392, 440..464];
#[test]
fn test_transcript_refs_get() {
let transcript_refs: Vec<Vector<U8>> = TRANSCRIPT_REFS
.iter()
.cloned()
.map(|range| Vector::from_raw(Slice::from_range_unchecked(range)))
.collect();
let transcript_refs = TranscriptRefs {
sent: transcript_refs.clone(),
recv: transcript_refs,
};
let vm_refs = transcript_refs
.get(Direction::Sent, &RangeSet::from(IDXS))
.unwrap();
let expected_refs: Vec<Vector<U8>> = EXPECTED_REFS
.iter()
.cloned()
.map(|range| Vector::from_raw(Slice::from_range_unchecked(range)))
.collect();
assert_eq!(
vm_refs.len(),
expected_refs.len(),
"Length of actual and expected refs are not equal"
);
for (&expected, actual) in expected_refs.iter().zip(vm_refs) {
assert_eq!(expected, actual);
}
}
}

View File

@@ -1,24 +1,17 @@
//! TLSNotary protocol config and config utilities.
use core::fmt;
use once_cell::sync::Lazy;
use semver::Version;
use serde::{Deserialize, Serialize};
use std::error::Error;
pub use tlsn_core::webpki::{CertificateDer, PrivateKeyDer, RootCertStore};
// Default is 32 bytes to decrypt the TLS protocol messages.
const DEFAULT_MAX_RECV_ONLINE: usize = 32;
// Default maximum number of TLS records to allow.
//
// This would allow for up to 50Mb upload from prover to verifier.
const DEFAULT_RECORDS_LIMIT: usize = 256;
// Current version that is running.
static VERSION: Lazy<Version> = Lazy::new(|| {
Version::parse(env!("CARGO_PKG_VERSION"))
.map_err(|err| ProtocolConfigError::new(ErrorKind::Version, err))
.unwrap()
pub(crate) static VERSION: Lazy<Version> = Lazy::new(|| {
Version::parse(env!("CARGO_PKG_VERSION")).expect("cargo pkg version should be a valid semver")
});
/// Protocol configuration to be set up initially by prover and verifier.
@@ -47,9 +40,6 @@ pub struct ProtocolConfig {
/// Network settings.
#[builder(default)]
network: NetworkSetting,
/// Version that is being run by prover/verifier.
#[builder(setter(skip), default = "VERSION.clone()")]
version: Version,
}
impl ProtocolConfigBuilder {
@@ -108,125 +98,6 @@ impl ProtocolConfig {
}
}
/// Protocol configuration validator used by checker (i.e. verifier) to perform
/// compatibility check with the peer's (i.e. the prover's) configuration.
#[derive(derive_builder::Builder, Clone, Debug, Serialize, Deserialize)]
pub struct ProtocolConfigValidator {
/// Maximum number of bytes that can be sent.
max_sent_data: usize,
/// Maximum number of application data records that can be sent.
#[builder(default = "DEFAULT_RECORDS_LIMIT")]
max_sent_records: usize,
/// Maximum number of bytes that can be received.
max_recv_data: usize,
/// Maximum number of application data records that can be received online.
#[builder(default = "DEFAULT_RECORDS_LIMIT")]
max_recv_records_online: usize,
/// Version that is being run by checker.
#[builder(setter(skip), default = "VERSION.clone()")]
version: Version,
}
impl ProtocolConfigValidator {
/// Creates a new builder for `ProtocolConfigValidator`.
pub fn builder() -> ProtocolConfigValidatorBuilder {
ProtocolConfigValidatorBuilder::default()
}
/// Returns the maximum number of bytes that can be sent.
pub fn max_sent_data(&self) -> usize {
self.max_sent_data
}
/// Returns the maximum number of application data records that can
/// be sent.
pub fn max_sent_records(&self) -> usize {
self.max_sent_records
}
/// Returns the maximum number of bytes that can be received.
pub fn max_recv_data(&self) -> usize {
self.max_recv_data
}
/// Returns the maximum number of application data records that can
/// be received online.
pub fn max_recv_records_online(&self) -> usize {
self.max_recv_records_online
}
/// Performs compatibility check of the protocol configuration between
/// prover and verifier.
pub fn validate(&self, config: &ProtocolConfig) -> Result<(), ProtocolConfigError> {
self.check_max_transcript_size(config.max_sent_data, config.max_recv_data)?;
self.check_max_records(config.max_sent_records, config.max_recv_records_online)?;
self.check_version(&config.version)?;
Ok(())
}
// Checks if both the sent and recv data are within limits.
fn check_max_transcript_size(
&self,
max_sent_data: usize,
max_recv_data: usize,
) -> Result<(), ProtocolConfigError> {
if max_sent_data > self.max_sent_data {
return Err(ProtocolConfigError::max_transcript_size(format!(
"max_sent_data {:?} is greater than the configured limit {:?}",
max_sent_data, self.max_sent_data,
)));
}
if max_recv_data > self.max_recv_data {
return Err(ProtocolConfigError::max_transcript_size(format!(
"max_recv_data {:?} is greater than the configured limit {:?}",
max_recv_data, self.max_recv_data,
)));
}
Ok(())
}
fn check_max_records(
&self,
max_sent_records: Option<usize>,
max_recv_records_online: Option<usize>,
) -> Result<(), ProtocolConfigError> {
if let Some(max_sent_records) = max_sent_records
&& max_sent_records > self.max_sent_records
{
return Err(ProtocolConfigError::max_record_count(format!(
"max_sent_records {} is greater than the configured limit {}",
max_sent_records, self.max_sent_records,
)));
}
if let Some(max_recv_records_online) = max_recv_records_online
&& max_recv_records_online > self.max_recv_records_online
{
return Err(ProtocolConfigError::max_record_count(format!(
"max_recv_records_online {} is greater than the configured limit {}",
max_recv_records_online, self.max_recv_records_online,
)));
}
Ok(())
}
// Checks if both versions are the same (might support check for different but
// compatible versions in the future).
fn check_version(&self, peer_version: &Version) -> Result<(), ProtocolConfigError> {
if *peer_version != self.version {
return Err(ProtocolConfigError::version(format!(
"prover's version {:?} is different from verifier's version {:?}",
peer_version, self.version
)));
}
Ok(())
}
}
/// Settings for the network environment.
///
/// Provides optimization options to adapt the protocol to different network
@@ -246,123 +117,3 @@ impl Default for NetworkSetting {
Self::Latency
}
}
/// A ProtocolConfig error.
#[derive(thiserror::Error, Debug)]
pub struct ProtocolConfigError {
kind: ErrorKind,
#[source]
source: Option<Box<dyn Error + Send + Sync>>,
}
impl ProtocolConfigError {
fn new<E>(kind: ErrorKind, source: E) -> Self
where
E: Into<Box<dyn Error + Send + Sync>>,
{
Self {
kind,
source: Some(source.into()),
}
}
fn max_transcript_size(msg: impl Into<String>) -> Self {
Self {
kind: ErrorKind::MaxTranscriptSize,
source: Some(msg.into().into()),
}
}
fn max_record_count(msg: impl Into<String>) -> Self {
Self {
kind: ErrorKind::MaxRecordCount,
source: Some(msg.into().into()),
}
}
fn version(msg: impl Into<String>) -> Self {
Self {
kind: ErrorKind::Version,
source: Some(msg.into().into()),
}
}
}
impl fmt::Display for ProtocolConfigError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self.kind {
ErrorKind::MaxTranscriptSize => write!(f, "max transcript size exceeded")?,
ErrorKind::MaxRecordCount => write!(f, "max record count exceeded")?,
ErrorKind::Version => write!(f, "version error")?,
}
if let Some(ref source) = self.source {
write!(f, " caused by: {source}")?;
}
Ok(())
}
}
#[derive(Debug)]
enum ErrorKind {
MaxTranscriptSize,
MaxRecordCount,
Version,
}
#[cfg(test)]
mod test {
use super::*;
use rstest::{fixture, rstest};
const TEST_MAX_SENT_LIMIT: usize = 1 << 12;
const TEST_MAX_RECV_LIMIT: usize = 1 << 14;
#[fixture]
#[once]
fn config_validator() -> ProtocolConfigValidator {
ProtocolConfigValidator::builder()
.max_sent_data(TEST_MAX_SENT_LIMIT)
.max_recv_data(TEST_MAX_RECV_LIMIT)
.build()
.unwrap()
}
#[rstest]
#[case::same_max_sent_recv_data(TEST_MAX_SENT_LIMIT, TEST_MAX_RECV_LIMIT)]
#[case::smaller_max_sent_data(1 << 11, TEST_MAX_RECV_LIMIT)]
#[case::smaller_max_recv_data(TEST_MAX_SENT_LIMIT, 1 << 13)]
#[case::smaller_max_sent_recv_data(1 << 7, 1 << 9)]
fn test_check_success(
config_validator: &ProtocolConfigValidator,
#[case] max_sent_data: usize,
#[case] max_recv_data: usize,
) {
let peer_config = ProtocolConfig::builder()
.max_sent_data(max_sent_data)
.max_recv_data(max_recv_data)
.build()
.unwrap();
assert!(config_validator.validate(&peer_config).is_ok())
}
#[rstest]
#[case::bigger_max_sent_data(1 << 13, TEST_MAX_RECV_LIMIT)]
#[case::bigger_max_recv_data(1 << 10, 1 << 16)]
#[case::bigger_max_sent_recv_data(1 << 14, 1 << 21)]
fn test_check_fail(
config_validator: &ProtocolConfigValidator,
#[case] max_sent_data: usize,
#[case] max_recv_data: usize,
) {
let peer_config = ProtocolConfig::builder()
.max_sent_data(max_sent_data)
.max_recv_data(max_recv_data)
.build()
.unwrap();
assert!(config_validator.validate(&peer_config).is_err())
}
}

View File

@@ -1,249 +0,0 @@
//! Encoding commitment protocol.
use std::ops::Range;
use mpz_common::Context;
use mpz_memory_core::{
Vector,
binary::U8,
correlated::{Delta, Key, Mac},
};
use rand::Rng;
use rangeset::RangeSet;
use serde::{Deserialize, Serialize};
use serio::{SinkExt, stream::IoStreamExt};
use tlsn_core::{
hash::HashAlgorithm,
transcript::{
Direction,
encoding::{
Encoder, EncoderSecret, EncodingCommitment, EncodingProvider, EncodingProviderError,
EncodingTree, EncodingTreeError, new_encoder,
},
},
};
use crate::commit::transcript::TranscriptRefs;
/// Bytes of encoding, per byte.
const ENCODING_SIZE: usize = 128;
#[derive(Debug, Serialize, Deserialize)]
struct Encodings {
sent: Vec<u8>,
recv: Vec<u8>,
}
/// Transfers the encodings using the provided seed and keys.
///
/// The keys must be consistent with the global delta used in the encodings.
pub(crate) async fn transfer<'a>(
ctx: &mut Context,
refs: &TranscriptRefs,
delta: &Delta,
f: impl Fn(Vector<U8>) -> &'a [Key],
) -> Result<EncodingCommitment, EncodingError> {
let secret = EncoderSecret::new(rand::rng().random(), delta.as_block().to_bytes());
let encoder = new_encoder(&secret);
let sent_keys: Vec<u8> = refs
.sent()
.iter()
.copied()
.flat_map(&f)
.flat_map(|key| key.as_block().as_bytes())
.copied()
.collect();
let recv_keys: Vec<u8> = refs
.recv()
.iter()
.copied()
.flat_map(&f)
.flat_map(|key| key.as_block().as_bytes())
.copied()
.collect();
assert_eq!(sent_keys.len() % ENCODING_SIZE, 0);
assert_eq!(recv_keys.len() % ENCODING_SIZE, 0);
let mut sent_encoding = Vec::with_capacity(sent_keys.len());
let mut recv_encoding = Vec::with_capacity(recv_keys.len());
encoder.encode_range(
Direction::Sent,
0..sent_keys.len() / ENCODING_SIZE,
&mut sent_encoding,
);
encoder.encode_range(
Direction::Received,
0..recv_keys.len() / ENCODING_SIZE,
&mut recv_encoding,
);
sent_encoding
.iter_mut()
.zip(sent_keys)
.for_each(|(enc, key)| *enc ^= key);
recv_encoding
.iter_mut()
.zip(recv_keys)
.for_each(|(enc, key)| *enc ^= key);
// Set frame limit and add some extra bytes cushion room.
let (sent, recv) = refs.len();
let frame_limit = ENCODING_SIZE * (sent + recv) + ctx.io().limit();
ctx.io_mut()
.with_limit(frame_limit)
.send(Encodings {
sent: sent_encoding,
recv: recv_encoding,
})
.await?;
let root = ctx.io_mut().expect_next().await?;
ctx.io_mut().send(secret.clone()).await?;
Ok(EncodingCommitment {
root,
secret: secret.clone(),
})
}
/// Receives the encodings using the provided MACs.
///
/// The MACs must be consistent with the global delta used in the encodings.
pub(crate) async fn receive<'a>(
ctx: &mut Context,
hasher: &(dyn HashAlgorithm + Send + Sync),
refs: &TranscriptRefs,
f: impl Fn(Vector<U8>) -> &'a [Mac],
idxs: impl IntoIterator<Item = &(Direction, RangeSet<usize>)>,
) -> Result<(EncodingCommitment, EncodingTree), EncodingError> {
// Set frame limit and add some extra bytes cushion room.
let (sent, recv) = refs.len();
let frame_limit = ENCODING_SIZE * (sent + recv) + ctx.io().limit();
let Encodings { mut sent, mut recv } =
ctx.io_mut().with_limit(frame_limit).expect_next().await?;
let sent_macs: Vec<u8> = refs
.sent()
.iter()
.copied()
.flat_map(&f)
.flat_map(|mac| mac.as_bytes())
.copied()
.collect();
let recv_macs: Vec<u8> = refs
.recv()
.iter()
.copied()
.flat_map(&f)
.flat_map(|mac| mac.as_bytes())
.copied()
.collect();
assert_eq!(sent_macs.len() % ENCODING_SIZE, 0);
assert_eq!(recv_macs.len() % ENCODING_SIZE, 0);
if sent.len() != sent_macs.len() {
return Err(ErrorRepr::IncorrectMacCount {
direction: Direction::Sent,
expected: sent_macs.len(),
got: sent.len(),
}
.into());
}
if recv.len() != recv_macs.len() {
return Err(ErrorRepr::IncorrectMacCount {
direction: Direction::Received,
expected: recv_macs.len(),
got: recv.len(),
}
.into());
}
sent.iter_mut()
.zip(sent_macs)
.for_each(|(enc, mac)| *enc ^= mac);
recv.iter_mut()
.zip(recv_macs)
.for_each(|(enc, mac)| *enc ^= mac);
let provider = Provider { sent, recv };
let tree = EncodingTree::new(hasher, idxs, &provider)?;
let root = tree.root();
ctx.io_mut().send(root.clone()).await?;
let secret = ctx.io_mut().expect_next().await?;
let commitment = EncodingCommitment { root, secret };
Ok((commitment, tree))
}
#[derive(Debug)]
struct Provider {
sent: Vec<u8>,
recv: Vec<u8>,
}
impl EncodingProvider for Provider {
fn provide_encoding(
&self,
direction: Direction,
range: Range<usize>,
dest: &mut Vec<u8>,
) -> Result<(), EncodingProviderError> {
let encodings = match direction {
Direction::Sent => &self.sent,
Direction::Received => &self.recv,
};
let start = range.start * ENCODING_SIZE;
let end = range.end * ENCODING_SIZE;
if end > encodings.len() {
return Err(EncodingProviderError);
}
dest.extend_from_slice(&encodings[start..end]);
Ok(())
}
}
/// Encoding protocol error.
#[derive(Debug, thiserror::Error)]
#[error(transparent)]
pub struct EncodingError(#[from] ErrorRepr);
#[derive(Debug, thiserror::Error)]
#[error("encoding protocol error: {0}")]
enum ErrorRepr {
#[error("I/O error: {0}")]
Io(std::io::Error),
#[error("incorrect MAC count for {direction}: expected {expected}, got {got}")]
IncorrectMacCount {
direction: Direction,
expected: usize,
got: usize,
},
#[error("encoding tree error: {0}")]
EncodingTree(EncodingTreeError),
}
impl From<std::io::Error> for EncodingError {
fn from(value: std::io::Error) -> Self {
Self(ErrorRepr::Io(value))
}
}
impl From<EncodingTreeError> for EncodingError {
fn from(value: EncodingTreeError) -> Self {
Self(ErrorRepr::EncodingTree(value))
}
}

View File

@@ -23,11 +23,11 @@ pub(crate) fn build_ghash_data(mut aad: Vec<u8>, mut ciphertext: Vec<u8>) -> Vec
let len_block = ((associated_data_bitlen as u128) << 64) + (text_bitlen as u128);
// Pad data to be a multiple of 16 bytes.
let aad_padded_block_count = (aad.len() / 16) + (aad.len() % 16 != 0) as usize;
let aad_padded_block_count = (aad.len() / 16) + !aad.len().is_multiple_of(16) as usize;
aad.resize(aad_padded_block_count * 16, 0);
let ciphertext_padded_block_count =
(ciphertext.len() / 16) + (ciphertext.len() % 16 != 0) as usize;
(ciphertext.len() / 16) + !ciphertext.len().is_multiple_of(16) as usize;
ciphertext.resize(ciphertext_padded_block_count * 16, 0);
let mut data: Vec<u8> = Vec::with_capacity(aad.len() + ciphertext.len() + 16);

View File

@@ -4,17 +4,16 @@
#![deny(clippy::all)]
#![forbid(unsafe_code)]
pub(crate) mod commit;
pub mod config;
pub(crate) mod context;
pub(crate) mod encoding;
pub(crate) mod ghash;
pub(crate) mod map;
pub(crate) mod msg;
pub(crate) mod mux;
pub mod prover;
pub(crate) mod tag;
pub(crate) mod transcript_internal;
pub mod verifier;
pub(crate) mod zk_aes_ctr;
pub use tlsn_attestation as attestation;
pub use tlsn_core::{connection, hash, transcript};

208
crates/tlsn/src/map.rs Normal file
View File

@@ -0,0 +1,208 @@
use std::ops::Range;
use mpz_memory_core::{Vector, binary::U8};
use rangeset::RangeSet;
#[derive(Debug, Clone, PartialEq)]
pub(crate) struct RangeMap<T> {
map: Vec<(usize, T)>,
}
impl<T> Default for RangeMap<T>
where
T: Item,
{
fn default() -> Self {
Self { map: Vec::new() }
}
}
impl<T> RangeMap<T>
where
T: Item,
{
pub(crate) fn new(map: Vec<(usize, T)>) -> Self {
let mut pos = 0;
for (idx, item) in &map {
assert!(
*idx >= pos,
"items must be sorted by index and non-overlapping"
);
pos = *idx + item.length();
}
Self { map }
}
/// Returns `true` if the map is empty.
pub(crate) fn is_empty(&self) -> bool {
self.map.is_empty()
}
/// Returns the keys of the map.
pub(crate) fn keys(&self) -> impl Iterator<Item = Range<usize>> {
self.map
.iter()
.map(|(idx, item)| *idx..*idx + item.length())
}
/// Returns the length of the map.
pub(crate) fn len(&self) -> usize {
self.map.iter().map(|(_, item)| item.length()).sum()
}
pub(crate) fn iter(&self) -> impl Iterator<Item = (Range<usize>, &T)> {
self.map
.iter()
.map(|(idx, item)| (*idx..*idx + item.length(), item))
}
pub(crate) fn get(&self, range: Range<usize>) -> Option<T::Slice<'_>> {
if range.start >= range.end {
return None;
}
// Find the item with the greatest start index <= range.start
let pos = match self.map.binary_search_by(|(idx, _)| idx.cmp(&range.start)) {
Ok(i) => i,
Err(0) => return None,
Err(i) => i - 1,
};
let (base, item) = &self.map[pos];
item.slice(range.start - *base..range.end - *base)
}
pub(crate) fn index(&self, idx: &RangeSet<usize>) -> Option<Self> {
let mut map = Vec::new();
for idx in idx.iter_ranges() {
let pos = match self.map.binary_search_by(|(base, _)| base.cmp(&idx.start)) {
Ok(i) => i,
Err(0) => return None,
Err(i) => i - 1,
};
let (base, item) = self.map.get(pos)?;
if idx.start < *base || idx.end > *base + item.length() {
return None;
}
let start = idx.start - *base;
let end = start + idx.len();
map.push((
idx.start,
item.slice(start..end)
.expect("slice length is checked")
.into(),
));
}
Some(Self { map })
}
}
impl<T> FromIterator<(usize, T)> for RangeMap<T>
where
T: Item,
{
fn from_iter<I: IntoIterator<Item = (usize, T)>>(items: I) -> Self {
let mut pos = 0;
let mut map = Vec::new();
for (idx, item) in items {
assert!(
idx >= pos,
"items must be sorted by index and non-overlapping"
);
pos = idx + item.length();
map.push((idx, item));
}
Self { map }
}
}
pub(crate) trait Item: Sized {
type Slice<'a>: Into<Self>
where
Self: 'a;
fn length(&self) -> usize;
fn slice<'a>(&'a self, range: Range<usize>) -> Option<Self::Slice<'a>>;
}
impl Item for Vector<U8> {
type Slice<'a> = Vector<U8>;
fn length(&self) -> usize {
self.len()
}
fn slice<'a>(&'a self, range: Range<usize>) -> Option<Self::Slice<'a>> {
self.get(range)
}
}
#[cfg(test)]
mod tests {
use super::*;
impl Item for Range<usize> {
type Slice<'a> = Range<usize>;
fn length(&self) -> usize {
self.end - self.start
}
fn slice(&self, range: Range<usize>) -> Option<Self> {
if range.end > self.end - self.start {
return None;
}
Some(range.start + self.start..range.end + self.start)
}
}
#[test]
fn test_range_map() {
let map = RangeMap::from_iter([(0, 10..14), (10, 20..24), (20, 30..32)]);
assert_eq!(map.get(0..4), Some(10..14));
assert_eq!(map.get(10..14), Some(20..24));
assert_eq!(map.get(20..22), Some(30..32));
assert_eq!(map.get(0..2), Some(10..12));
assert_eq!(map.get(11..13), Some(21..23));
assert_eq!(map.get(0..10), None);
assert_eq!(map.get(10..20), None);
assert_eq!(map.get(20..30), None);
}
#[test]
fn test_range_map_index() {
let map = RangeMap::from_iter([(0, 10..14), (10, 20..24), (20, 30..32)]);
let idx = RangeSet::from([0..4, 10..14, 20..22]);
assert_eq!(map.index(&idx), Some(map.clone()));
let idx = RangeSet::from(25..30);
assert_eq!(map.index(&idx), None);
let idx = RangeSet::from(15..20);
assert_eq!(map.index(&idx), None);
let idx = RangeSet::from([1..3, 11..12, 13..14, 21..22]);
assert_eq!(
map.index(&idx),
Some(RangeMap::from_iter([
(1, 11..13),
(11, 21..22),
(13, 23..24),
(21, 31..32)
]))
);
}
}

View File

@@ -1,14 +1,40 @@
//! Message types.
use semver::Version;
use serde::{Deserialize, Serialize};
use tlsn_core::connection::{HandshakeData, ServerName};
use crate::config::ProtocolConfig;
/// Message sent from Prover to Verifier to prove the server identity.
#[derive(Debug, Serialize, Deserialize)]
pub(crate) struct ServerIdentityProof {
/// Server name.
pub name: ServerName,
/// Server identity data.
pub data: HandshakeData,
pub(crate) struct SetupRequest {
pub(crate) config: ProtocolConfig,
pub(crate) version: Version,
}
#[derive(Debug, Serialize, Deserialize)]
pub(crate) struct Response {
pub(crate) result: Result<(), RejectionReason>,
}
impl Response {
pub(crate) fn ok() -> Self {
Self { result: Ok(()) }
}
pub(crate) fn err(msg: Option<impl Into<String>>) -> Self {
Self {
result: Err(RejectionReason(msg.map(Into::into))),
}
}
}
#[derive(Debug, Serialize, Deserialize)]
pub(crate) struct RejectionReason(Option<String>);
impl From<RejectionReason> for crate::prover::ProverError {
fn from(value: RejectionReason) -> Self {
if let Some(msg) = value.0 {
crate::prover::ProverError::config(format!("verifier rejected with reason: {msg}"))
} else {
crate::prover::ProverError::config("verifier rejected without providing a reason")
}
}
}

View File

@@ -3,13 +3,16 @@
mod config;
mod error;
mod future;
mod prove;
pub mod state;
pub use config::{ProverConfig, ProverConfigBuilder, TlsConfig, TlsConfigBuilder};
pub use error::ProverError;
pub use future::ProverFuture;
use rustls_pki_types::CertificateDer;
pub use tlsn_core::{ProveConfig, ProveConfigBuilder, ProveConfigBuilderError, ProverOutput};
pub use tlsn_core::{
ProveConfig, ProveConfigBuilder, ProveConfigBuilderError, ProveRequest, ProverOutput,
};
use mpz_common::Context;
use mpz_core::Block;
@@ -20,31 +23,22 @@ use webpki::anchor_from_trusted_cert;
use crate::{
Role,
commit::{
commit_records,
hash::prove_hash,
transcript::{TranscriptRefs, decode_transcript},
},
context::build_mt_context,
encoding,
msg::{Response, SetupRequest},
mux::attach_mux,
tag::verify_tags,
zk_aes_ctr::ZkAesCtr,
};
use futures::{AsyncRead, AsyncWrite, TryFutureExt};
use mpc_tls::{LeaderCtrl, MpcTlsLeader, SessionKeys};
use rand::Rng;
use serio::SinkExt;
use serio::{SinkExt, stream::IoStreamExt};
use std::sync::Arc;
use tls_client::{ClientConnection, ServerName as TlsServerName};
use tls_client_async::{TlsConnection, bind_client};
use tls_core::msgs::enums::ContentType;
use tlsn_core::{
ProvePayload,
connection::{HandshakeData, ServerName},
hash::{Blake3, HashAlgId, HashAlgorithm, Keccak256, Sha256},
transcript::{TlsTranscript, Transcript, TranscriptCommitment, TranscriptSecret},
transcript::{TlsTranscript, Transcript},
};
use tlsn_deap::Deap;
use tokio::sync::Mutex;
@@ -106,7 +100,20 @@ impl Prover<state::Initialized> {
// Sends protocol configuration to verifier for compatibility check.
mux_fut
.poll_with(ctx.io_mut().send(self.config.protocol_config().clone()))
.poll_with(async {
ctx.io_mut()
.send(SetupRequest {
config: self.config.protocol_config().clone(),
version: crate::config::VERSION.clone(),
})
.await?;
ctx.io_mut()
.expect_next::<Response>()
.await?
.result
.map_err(ProverError::from)
})
.await?;
let (vm, mut mpc_tls) = build_mpc_tls(&self.config, ctx);
@@ -115,22 +122,6 @@ impl Prover<state::Initialized> {
let mut keys = mpc_tls.alloc()?;
let vm_lock = vm.try_lock().expect("VM is not locked");
translate_keys(&mut keys, &vm_lock)?;
// Allocate for committing to plaintext.
let mut zk_aes_ctr_sent = ZkAesCtr::new(Role::Prover);
zk_aes_ctr_sent.set_key(keys.client_write_key, keys.client_write_iv);
zk_aes_ctr_sent.alloc(
&mut *vm_lock.zk(),
self.config.protocol_config().max_sent_data(),
)?;
let mut zk_aes_ctr_recv = ZkAesCtr::new(Role::Prover);
zk_aes_ctr_recv.set_key(keys.server_write_key, keys.server_write_iv);
zk_aes_ctr_recv.alloc(
&mut *vm_lock.zk(),
self.config.protocol_config().max_recv_data(),
)?;
drop(vm_lock);
debug!("setting up mpc-tls");
@@ -146,8 +137,6 @@ impl Prover<state::Initialized> {
mux_ctrl,
mux_fut,
mpc_tls,
zk_aes_ctr_sent,
zk_aes_ctr_recv,
keys,
vm,
},
@@ -173,8 +162,6 @@ impl Prover<state::Setup> {
mux_ctrl,
mut mux_fut,
mpc_tls,
mut zk_aes_ctr_sent,
mut zk_aes_ctr_recv,
keys,
vm,
..
@@ -281,28 +268,6 @@ impl Prover<state::Setup> {
)
.map_err(ProverError::zk)?;
// Prove received plaintext. Prover drops the proof output, as
// they trust themselves.
let (sent_refs, _) = commit_records(
&mut vm,
&mut zk_aes_ctr_sent,
tls_transcript
.sent()
.iter()
.filter(|record| record.typ == ContentType::ApplicationData),
)
.map_err(ProverError::zk)?;
let (recv_refs, _) = commit_records(
&mut vm,
&mut zk_aes_ctr_recv,
tls_transcript
.recv()
.iter()
.filter(|record| record.typ == ContentType::ApplicationData),
)
.map_err(ProverError::zk)?;
mux_fut
.poll_with(vm.execute_all(&mut ctx).map_err(ProverError::zk))
.await?;
@@ -310,7 +275,6 @@ impl Prover<state::Setup> {
let transcript = tls_transcript
.to_transcript()
.expect("transcript is complete");
let transcript_refs = TranscriptRefs::new(sent_refs, recv_refs);
Ok(Prover {
config: self.config,
@@ -320,9 +284,9 @@ impl Prover<state::Setup> {
mux_fut,
ctx,
vm,
keys,
tls_transcript,
transcript,
transcript_refs,
},
})
}
@@ -361,26 +325,13 @@ impl Prover<state::Committed> {
mux_fut,
ctx,
vm,
keys,
tls_transcript,
transcript,
transcript_refs,
..
} = &mut self.state;
let mut output = ProverOutput {
transcript_commitments: Vec::new(),
transcript_secrets: Vec::new(),
};
let partial_transcript = if let Some((sent, recv)) = config.reveal() {
decode_transcript(vm, sent, recv, transcript_refs).map_err(ProverError::zk)?;
Some(transcript.to_partial(sent.clone(), recv.clone()))
} else {
None
};
let payload = ProvePayload {
let request = ProveRequest {
handshake: config.server_identity().then(|| {
(
self.config.server_name().clone(),
@@ -397,81 +348,25 @@ impl Prover<state::Committed> {
},
)
}),
transcript: partial_transcript,
transcript: config
.reveal()
.map(|(sent, recv)| transcript.to_partial(sent.clone(), recv.clone())),
transcript_commit: config.transcript_commit().map(|config| config.to_request()),
};
// Send payload.
mux_fut
.poll_with(ctx.io_mut().send(payload).map_err(ProverError::from))
let output = mux_fut
.poll_with(async {
ctx.io_mut()
.send(request)
.await
.map_err(ProverError::from)?;
ctx.io_mut().expect_next::<Response>().await?.result?;
prove::prove(ctx, vm, keys, transcript, tls_transcript, config).await
})
.await?;
let mut hash_commitments = None;
if let Some(commit_config) = config.transcript_commit() {
if commit_config.has_encoding() {
let hasher: &(dyn HashAlgorithm + Send + Sync) =
match *commit_config.encoding_hash_alg() {
HashAlgId::SHA256 => &Sha256::default(),
HashAlgId::KECCAK256 => &Keccak256::default(),
HashAlgId::BLAKE3 => &Blake3::default(),
alg => {
return Err(ProverError::config(format!(
"unsupported hash algorithm for encoding commitment: {alg}"
)));
}
};
let (commitment, tree) = mux_fut
.poll_with(
encoding::receive(
ctx,
hasher,
transcript_refs,
|plaintext| vm.get_macs(plaintext).expect("reference is valid"),
commit_config.iter_encoding(),
)
.map_err(ProverError::commit),
)
.await?;
output
.transcript_commitments
.push(TranscriptCommitment::Encoding(commitment));
output
.transcript_secrets
.push(TranscriptSecret::Encoding(tree));
}
if commit_config.has_hash() {
hash_commitments = Some(
prove_hash(
vm,
transcript_refs,
commit_config
.iter_hash()
.map(|((dir, idx), alg)| (*dir, idx.clone(), *alg)),
)
.map_err(ProverError::commit)?,
);
}
}
mux_fut
.poll_with(vm.execute_all(ctx).map_err(ProverError::zk))
.await?;
if let Some((hash_fut, hash_secrets)) = hash_commitments {
let hash_commitments = hash_fut.try_recv().map_err(ProverError::commit)?;
for (commitment, secret) in hash_commitments.into_iter().zip(hash_secrets) {
output
.transcript_commitments
.push(TranscriptCommitment::Hash(commitment));
output
.transcript_secrets
.push(TranscriptSecret::Hash(secret));
}
}
Ok(output)
}

View File

@@ -2,7 +2,7 @@ use std::{error::Error, fmt};
use mpc_tls::MpcTlsError;
use crate::{encoding::EncodingError, zk_aes_ctr::ZkAesCtrError};
use crate::transcript_internal::commit::encoding::EncodingError;
/// Error for [`Prover`](crate::Prover).
#[derive(Debug, thiserror::Error)]
@@ -110,12 +110,6 @@ impl From<MpcTlsError> for ProverError {
}
}
impl From<ZkAesCtrError> for ProverError {
fn from(e: ZkAesCtrError) -> Self {
Self::new(ErrorKind::Zk, e)
}
}
impl From<EncodingError> for ProverError {
fn from(e: EncodingError) -> Self {
Self::new(ErrorKind::Commit, e)

View File

@@ -0,0 +1,156 @@
use mpc_tls::SessionKeys;
use mpz_common::Context;
use mpz_memory_core::binary::Binary;
use mpz_vm_core::Vm;
use rangeset::{RangeSet, UnionMut};
use tlsn_core::{
ProveConfig, ProverOutput,
transcript::{
ContentType, Direction, TlsTranscript, Transcript, TranscriptCommitment, TranscriptSecret,
},
};
use crate::{
prover::ProverError,
transcript_internal::{
TranscriptRefs,
auth::prove_plaintext,
commit::{
encoding::{self, MacStore},
hash::prove_hash,
},
},
};
pub(crate) async fn prove<T: Vm<Binary> + MacStore + Send + Sync>(
ctx: &mut Context,
vm: &mut T,
keys: &SessionKeys,
transcript: &Transcript,
tls_transcript: &TlsTranscript,
config: &ProveConfig,
) -> Result<ProverOutput, ProverError> {
let mut output = ProverOutput {
transcript_commitments: Vec::default(),
transcript_secrets: Vec::default(),
};
let (reveal_sent, reveal_recv) = config.reveal().cloned().unwrap_or_default();
let (mut commit_sent, mut commit_recv) = (RangeSet::default(), RangeSet::default());
if let Some(commit_config) = config.transcript_commit() {
commit_config
.iter_hash()
.for_each(|((direction, idx), _)| match direction {
Direction::Sent => commit_sent.union_mut(idx),
Direction::Received => commit_recv.union_mut(idx),
});
commit_config
.iter_encoding()
.for_each(|(direction, idx)| match direction {
Direction::Sent => commit_sent.union_mut(idx),
Direction::Received => commit_recv.union_mut(idx),
});
}
let transcript_refs = TranscriptRefs {
sent: prove_plaintext(
vm,
keys.client_write_key,
keys.client_write_iv,
transcript.sent(),
tls_transcript
.sent()
.iter()
.filter(|record| record.typ == ContentType::ApplicationData),
&reveal_sent,
&commit_sent,
)
.map_err(ProverError::commit)?,
recv: prove_plaintext(
vm,
keys.server_write_key,
keys.server_write_iv,
transcript.received(),
tls_transcript
.recv()
.iter()
.filter(|record| record.typ == ContentType::ApplicationData),
&reveal_recv,
&commit_recv,
)
.map_err(ProverError::commit)?,
};
let hash_commitments = if let Some(commit_config) = config.transcript_commit()
&& commit_config.has_hash()
{
Some(
prove_hash(
vm,
&transcript_refs,
commit_config
.iter_hash()
.map(|((dir, idx), alg)| (*dir, idx.clone(), *alg)),
)
.map_err(ProverError::commit)?,
)
} else {
None
};
vm.execute_all(ctx).await.map_err(ProverError::zk)?;
if let Some(commit_config) = config.transcript_commit()
&& commit_config.has_encoding()
{
let mut sent_ranges = RangeSet::default();
let mut recv_ranges = RangeSet::default();
for (dir, idx) in commit_config.iter_encoding() {
match dir {
Direction::Sent => sent_ranges.union_mut(idx),
Direction::Received => recv_ranges.union_mut(idx),
}
}
let sent_map = transcript_refs
.sent
.index(&sent_ranges)
.expect("indices are valid");
let recv_map = transcript_refs
.recv
.index(&recv_ranges)
.expect("indices are valid");
let (commitment, tree) = encoding::receive(
ctx,
vm,
*commit_config.encoding_hash_alg(),
&sent_map,
&recv_map,
commit_config.iter_encoding(),
)
.await?;
output
.transcript_commitments
.push(TranscriptCommitment::Encoding(commitment));
output
.transcript_secrets
.push(TranscriptSecret::Encoding(tree));
}
if let Some((hash_fut, hash_secrets)) = hash_commitments {
let hash_commitments = hash_fut.try_recv().map_err(ProverError::commit)?;
for (commitment, secret) in hash_commitments.into_iter().zip(hash_secrets) {
output
.transcript_commitments
.push(TranscriptCommitment::Hash(commitment));
output
.transcript_secrets
.push(TranscriptSecret::Hash(secret));
}
}
Ok(output)
}

View File

@@ -9,10 +9,8 @@ use tlsn_deap::Deap;
use tokio::sync::Mutex;
use crate::{
commit::transcript::TranscriptRefs,
mux::{MuxControl, MuxFuture},
prover::{Mpc, Zk},
zk_aes_ctr::ZkAesCtr,
};
/// Entry state
@@ -25,8 +23,6 @@ pub struct Setup {
pub(crate) mux_ctrl: MuxControl,
pub(crate) mux_fut: MuxFuture,
pub(crate) mpc_tls: MpcTlsLeader,
pub(crate) zk_aes_ctr_sent: ZkAesCtr,
pub(crate) zk_aes_ctr_recv: ZkAesCtr,
pub(crate) keys: SessionKeys,
pub(crate) vm: Arc<Mutex<Deap<Mpc, Zk>>>,
}
@@ -39,9 +35,9 @@ pub struct Committed {
pub(crate) mux_fut: MuxFuture,
pub(crate) ctx: Context,
pub(crate) vm: Zk,
pub(crate) keys: SessionKeys,
pub(crate) tls_transcript: TlsTranscript,
pub(crate) transcript: Transcript,
pub(crate) transcript_refs: TranscriptRefs,
}
opaque_debug::implement!(Committed);

View File

@@ -0,0 +1,16 @@
pub(crate) mod auth;
pub(crate) mod commit;
use mpz_memory_core::{Vector, binary::U8};
use crate::map::RangeMap;
/// Maps transcript ranges to VM references.
pub(crate) type ReferenceMap = RangeMap<Vector<U8>>;
/// References to the application plaintext in the transcript.
#[derive(Debug, Default, Clone)]
pub(crate) struct TranscriptRefs {
pub(crate) sent: ReferenceMap,
pub(crate) recv: ReferenceMap,
}

View File

@@ -0,0 +1,639 @@
use std::sync::Arc;
use aes::Aes128;
use ctr::{
Ctr32BE,
cipher::{KeyIvInit, StreamCipher, StreamCipherSeek},
};
use mpz_circuits::circuits::{AES128, xor};
use mpz_core::bitvec::BitVec;
use mpz_memory_core::{
Array, DecodeFutureTyped, MemoryExt, Vector, ViewExt,
binary::{Binary, U8},
};
use mpz_vm_core::{Call, CallableExt, Vm};
use rangeset::{Difference, RangeSet, Union};
use tlsn_core::transcript::Record;
use crate::transcript_internal::ReferenceMap;
pub(crate) fn prove_plaintext<'a>(
vm: &mut dyn Vm<Binary>,
key: Array<U8, 16>,
iv: Array<U8, 4>,
plaintext: &[u8],
records: impl IntoIterator<Item = &'a Record>,
reveal: &RangeSet<usize>,
commit: &RangeSet<usize>,
) -> Result<ReferenceMap, PlaintextAuthError> {
let is_reveal_all = reveal == (0..plaintext.len());
let alloc_ranges = if is_reveal_all {
commit.clone()
} else {
// The plaintext is only partially revealed, so we need to authenticate in ZK.
commit.union(reveal)
};
let plaintext_refs = alloc_plaintext(vm, &alloc_ranges)?;
let records = RecordParams::from_iter(records).collect::<Vec<_>>();
if is_reveal_all {
drop(vm.decode(key).map_err(PlaintextAuthError::vm)?);
drop(vm.decode(iv).map_err(PlaintextAuthError::vm)?);
for (range, slice) in plaintext_refs.iter() {
vm.mark_public(*slice).map_err(PlaintextAuthError::vm)?;
vm.assign(*slice, plaintext[range].to_vec())
.map_err(PlaintextAuthError::vm)?;
vm.commit(*slice).map_err(PlaintextAuthError::vm)?;
}
} else {
let private = commit.difference(reveal);
for (_, slice) in plaintext_refs
.index(&private)
.expect("all ranges are allocated")
.iter()
{
vm.mark_private(*slice).map_err(PlaintextAuthError::vm)?;
}
for (_, slice) in plaintext_refs
.index(reveal)
.expect("all ranges are allocated")
.iter()
{
vm.mark_public(*slice).map_err(PlaintextAuthError::vm)?;
}
for (range, slice) in plaintext_refs.iter() {
vm.assign(*slice, plaintext[range].to_vec())
.map_err(PlaintextAuthError::vm)?;
vm.commit(*slice).map_err(PlaintextAuthError::vm)?;
}
let ciphertext = alloc_ciphertext(vm, key, iv, plaintext_refs.clone(), &records)?;
for (_, slice) in ciphertext.iter() {
drop(vm.decode(*slice).map_err(PlaintextAuthError::vm)?);
}
}
Ok(plaintext_refs)
}
#[allow(clippy::too_many_arguments)]
pub(crate) fn verify_plaintext<'a>(
vm: &mut dyn Vm<Binary>,
key: Array<U8, 16>,
iv: Array<U8, 4>,
plaintext: &'a [u8],
ciphertext: &'a [u8],
records: impl IntoIterator<Item = &'a Record>,
reveal: &RangeSet<usize>,
commit: &RangeSet<usize>,
) -> Result<(ReferenceMap, PlaintextProof<'a>), PlaintextAuthError> {
let is_reveal_all = reveal == (0..plaintext.len());
let alloc_ranges = if is_reveal_all {
commit.clone()
} else {
// The plaintext is only partially revealed, so we need to authenticate in ZK.
commit.union(reveal)
};
let plaintext_refs = alloc_plaintext(vm, &alloc_ranges)?;
let records = RecordParams::from_iter(records).collect::<Vec<_>>();
let plaintext_proof = if is_reveal_all {
let key = vm.decode(key).map_err(PlaintextAuthError::vm)?;
let iv = vm.decode(iv).map_err(PlaintextAuthError::vm)?;
for (range, slice) in plaintext_refs.iter() {
vm.mark_public(*slice).map_err(PlaintextAuthError::vm)?;
vm.assign(*slice, plaintext[range].to_vec())
.map_err(PlaintextAuthError::vm)?;
vm.commit(*slice).map_err(PlaintextAuthError::vm)?;
}
PlaintextProof(ProofInner::WithKey {
key,
iv,
records,
plaintext,
ciphertext,
})
} else {
let private = commit.difference(reveal);
for (_, slice) in plaintext_refs
.index(&private)
.expect("all ranges are allocated")
.iter()
{
vm.mark_blind(*slice).map_err(PlaintextAuthError::vm)?;
}
for (range, slice) in plaintext_refs
.index(reveal)
.expect("all ranges are allocated")
.iter()
{
vm.mark_public(*slice).map_err(PlaintextAuthError::vm)?;
vm.assign(*slice, plaintext[range].to_vec())
.map_err(PlaintextAuthError::vm)?;
}
for (_, slice) in plaintext_refs.iter() {
vm.commit(*slice).map_err(PlaintextAuthError::vm)?;
}
let ciphertext_map = alloc_ciphertext(vm, key, iv, plaintext_refs.clone(), &records)?;
let mut ciphertexts = Vec::new();
for (range, chunk) in ciphertext_map.iter() {
ciphertexts.push((
&ciphertext[range],
vm.decode(*chunk).map_err(PlaintextAuthError::vm)?,
));
}
PlaintextProof(ProofInner::WithZk { ciphertexts })
};
Ok((plaintext_refs, plaintext_proof))
}
fn alloc_plaintext(
vm: &mut dyn Vm<Binary>,
ranges: &RangeSet<usize>,
) -> Result<ReferenceMap, PlaintextAuthError> {
let len = ranges.len();
if len == 0 {
return Ok(ReferenceMap::default());
}
let plaintext = vm.alloc_vec::<U8>(len).map_err(PlaintextAuthError::vm)?;
let mut pos = 0;
Ok(ReferenceMap::from_iter(ranges.iter_ranges().map(
move |range| {
let chunk = plaintext
.get(pos..pos + range.len())
.expect("length was checked");
pos += range.len();
(range.start, chunk)
},
)))
}
fn alloc_ciphertext<'a>(
vm: &mut dyn Vm<Binary>,
key: Array<U8, 16>,
iv: Array<U8, 4>,
plaintext: ReferenceMap,
records: impl IntoIterator<Item = &'a RecordParams>,
) -> Result<ReferenceMap, PlaintextAuthError> {
if plaintext.is_empty() {
return Ok(ReferenceMap::default());
}
let ranges = RangeSet::from(plaintext.keys().collect::<Vec<_>>());
let keystream = alloc_keystream(vm, key, iv, &ranges, records)?;
let mut builder = Call::builder(Arc::new(xor(ranges.len() * 8)));
for (_, slice) in plaintext.iter() {
builder = builder.arg(*slice);
}
for slice in keystream {
builder = builder.arg(slice);
}
let call = builder.build().expect("call should be valid");
let ciphertext: Vector<U8> = vm.call(call).map_err(PlaintextAuthError::vm)?;
let mut pos = 0;
Ok(ReferenceMap::from_iter(ranges.iter_ranges().map(
move |range| {
let chunk = ciphertext
.get(pos..pos + range.len())
.expect("length was checked");
pos += range.len();
(range.start, chunk)
},
)))
}
fn alloc_keystream<'a>(
vm: &mut dyn Vm<Binary>,
key: Array<U8, 16>,
iv: Array<U8, 4>,
ranges: &RangeSet<usize>,
records: impl IntoIterator<Item = &'a RecordParams>,
) -> Result<Vec<Vector<U8>>, PlaintextAuthError> {
let mut keystream = Vec::new();
let mut pos = 0;
let mut range_iter = ranges.iter_ranges();
let mut current_range = range_iter.next();
for record in records {
let mut explicit_nonce = None;
let mut current_block = None;
loop {
let Some(range) = current_range.take().or_else(|| range_iter.next()) else {
return Ok(keystream);
};
let record_range = pos..pos + record.len;
if range.start >= record_range.end {
current_range = Some(range);
break;
}
// Range with record offset applied.
let offset_range = range.start - pos..range.end - pos;
let explicit_nonce = if let Some(explicit_nonce) = explicit_nonce {
explicit_nonce
} else {
let nonce = alloc_explicit_nonce(vm, record.explicit_nonce.clone())?;
explicit_nonce = Some(nonce);
nonce
};
const BLOCK_SIZE: usize = 16;
let block_num = offset_range.start / BLOCK_SIZE;
let block = if let Some((current_block_num, block)) = current_block.take()
&& current_block_num == block_num
{
block
} else {
let block = alloc_block(vm, key, iv, explicit_nonce, block_num)?;
current_block = Some((block_num, block));
block
};
// Range within the block.
let block_range_start = offset_range.start % BLOCK_SIZE;
let len =
(range.end.min(record_range.end) - range.start).min(BLOCK_SIZE - block_range_start);
let block_range = block_range_start..block_range_start + len;
keystream.push(block.get(block_range).expect("range is checked"));
// If the range extends past the block, process the tail.
if range.start + len < range.end {
current_range = Some(range.start + len..range.end);
}
}
pos += record.len;
}
Err(ErrorRepr::OutOfBounds.into())
}
fn alloc_explicit_nonce(
vm: &mut dyn Vm<Binary>,
explicit_nonce: Vec<u8>,
) -> Result<Vector<U8>, PlaintextAuthError> {
const EXPLICIT_NONCE_LEN: usize = 8;
let nonce = vm
.alloc_vec::<U8>(EXPLICIT_NONCE_LEN)
.map_err(PlaintextAuthError::vm)?;
vm.mark_public(nonce).map_err(PlaintextAuthError::vm)?;
vm.assign(nonce, explicit_nonce)
.map_err(PlaintextAuthError::vm)?;
vm.commit(nonce).map_err(PlaintextAuthError::vm)?;
Ok(nonce)
}
fn alloc_block(
vm: &mut dyn Vm<Binary>,
key: Array<U8, 16>,
iv: Array<U8, 4>,
explicit_nonce: Vector<U8>,
block: usize,
) -> Result<Vector<U8>, PlaintextAuthError> {
let ctr: Array<U8, 4> = vm.alloc().map_err(PlaintextAuthError::vm)?;
vm.mark_public(ctr).map_err(PlaintextAuthError::vm)?;
const START_CTR: u32 = 2;
vm.assign(ctr, (START_CTR + block as u32).to_be_bytes())
.map_err(PlaintextAuthError::vm)?;
vm.commit(ctr).map_err(PlaintextAuthError::vm)?;
let block: Array<U8, 16> = vm
.call(
Call::builder(AES128.clone())
.arg(key)
.arg(iv)
.arg(explicit_nonce)
.arg(ctr)
.build()
.expect("call should be valid"),
)
.map_err(PlaintextAuthError::vm)?;
Ok(Vector::from(block))
}
struct RecordParams {
explicit_nonce: Vec<u8>,
len: usize,
}
impl RecordParams {
fn from_iter<'a>(records: impl IntoIterator<Item = &'a Record>) -> impl Iterator<Item = Self> {
records.into_iter().map(|record| Self {
explicit_nonce: record.explicit_nonce.clone(),
len: record.ciphertext.len(),
})
}
}
#[must_use]
pub(crate) struct PlaintextProof<'a>(ProofInner<'a>);
impl<'a> PlaintextProof<'a> {
pub(crate) fn verify(self) -> Result<(), PlaintextAuthError> {
match self.0 {
ProofInner::WithKey {
mut key,
mut iv,
records,
plaintext,
ciphertext,
} => {
let key = key
.try_recv()
.map_err(PlaintextAuthError::vm)?
.ok_or(ErrorRepr::MissingDecoding)?;
let iv = iv
.try_recv()
.map_err(PlaintextAuthError::vm)?
.ok_or(ErrorRepr::MissingDecoding)?;
verify_plaintext_with_key(key, iv, &records, plaintext, ciphertext)?;
}
ProofInner::WithZk { ciphertexts } => {
for (expected, mut actual) in ciphertexts {
let actual = actual
.try_recv()
.map_err(PlaintextAuthError::vm)?
.ok_or(PlaintextAuthError(ErrorRepr::MissingDecoding))?;
if actual != expected {
return Err(PlaintextAuthError(ErrorRepr::InvalidPlaintext));
}
}
}
}
Ok(())
}
}
enum ProofInner<'a> {
WithKey {
key: DecodeFutureTyped<BitVec, [u8; 16]>,
iv: DecodeFutureTyped<BitVec, [u8; 4]>,
records: Vec<RecordParams>,
plaintext: &'a [u8],
ciphertext: &'a [u8],
},
WithZk {
// (expected, actual)
#[allow(clippy::type_complexity)]
ciphertexts: Vec<(&'a [u8], DecodeFutureTyped<BitVec, Vec<u8>>)>,
},
}
fn aes_ctr_apply_keystream(key: &[u8; 16], iv: &[u8; 4], explicit_nonce: &[u8], input: &mut [u8]) {
let mut full_iv = [0u8; 16];
full_iv[0..4].copy_from_slice(iv);
full_iv[4..12].copy_from_slice(&explicit_nonce[..8]);
const START_CTR: u32 = 2;
let mut cipher = Ctr32BE::<Aes128>::new(key.into(), &full_iv.into());
cipher
.try_seek(START_CTR * 16)
.expect("start counter is less than keystream length");
cipher.apply_keystream(input);
}
fn verify_plaintext_with_key<'a>(
key: [u8; 16],
iv: [u8; 4],
records: impl IntoIterator<Item = &'a RecordParams>,
plaintext: &[u8],
ciphertext: &[u8],
) -> Result<(), PlaintextAuthError> {
let mut pos = 0;
let mut text = Vec::new();
for record in records {
text.clear();
text.extend_from_slice(&plaintext[pos..pos + record.len]);
aes_ctr_apply_keystream(&key, &iv, &record.explicit_nonce, &mut text);
if text != ciphertext[pos..pos + record.len] {
return Err(PlaintextAuthError(ErrorRepr::InvalidPlaintext));
}
pos += record.len;
}
Ok(())
}
#[derive(Debug, thiserror::Error)]
#[error("plaintext authentication error: {0}")]
pub(crate) struct PlaintextAuthError(#[from] ErrorRepr);
impl PlaintextAuthError {
fn vm<E>(err: E) -> Self
where
E: Into<Box<dyn std::error::Error + Send + Sync + 'static>>,
{
Self(ErrorRepr::Vm(err.into()))
}
}
#[derive(Debug, thiserror::Error)]
enum ErrorRepr {
#[error("vm error: {0}")]
Vm(Box<dyn std::error::Error + Send + Sync + 'static>),
#[error("plaintext out of bounds of records. This should never happen and is an internal bug.")]
OutOfBounds,
#[error("missing decoding")]
MissingDecoding,
#[error("plaintext does not match ciphertext")]
InvalidPlaintext,
}
#[cfg(test)]
#[allow(clippy::all)]
mod tests {
use super::*;
use mpz_common::context::test_st_context;
use mpz_ideal_vm::IdealVm;
use mpz_vm_core::prelude::*;
use rand::{Rng, SeedableRng, rngs::StdRng};
use rstest::*;
use std::ops::Range;
fn build_vm(key: [u8; 16], iv: [u8; 4]) -> (IdealVm, Array<U8, 16>, Array<U8, 4>) {
let mut vm = IdealVm::new();
let key_ref = vm.alloc::<Array<U8, 16>>().unwrap();
let iv_ref = vm.alloc::<Array<U8, 4>>().unwrap();
vm.mark_public(key_ref).unwrap();
vm.mark_public(iv_ref).unwrap();
vm.assign(key_ref, key).unwrap();
vm.assign(iv_ref, iv).unwrap();
vm.commit(key_ref).unwrap();
vm.commit(iv_ref).unwrap();
(vm, key_ref, iv_ref)
}
fn expected_aes_ctr<'a>(
key: [u8; 16],
iv: [u8; 4],
records: impl IntoIterator<Item = &'a RecordParams>,
ranges: &RangeSet<usize>,
) -> Vec<u8> {
let mut keystream = Vec::new();
let mut pos = 0;
for record in records {
let mut record_keystream = vec![0u8; record.len];
aes_ctr_apply_keystream(&key, &iv, &record.explicit_nonce, &mut record_keystream);
for mut range in ranges.iter_ranges() {
range.start = range.start.max(pos);
range.end = range.end.min(pos + record.len);
if range.start < range.end {
keystream
.extend_from_slice(&record_keystream[range.start - pos..range.end - pos]);
}
}
pos += record.len;
}
keystream
}
#[rstest]
#[case::single_record_empty([0], [])]
#[case::multiple_empty_records_empty([0, 0], [])]
#[case::multiple_records_empty([128, 64], [])]
#[case::single_block_full([16], [0..16])]
#[case::single_block_partial([16], [2..14])]
#[case::partial_block_full([15], [0..15])]
#[case::out_of_bounds([16], [0..17])]
#[case::multiple_records_full([128, 63, 33, 15, 4], [0..243])]
#[case::multiple_records_partial([128, 63, 33, 15, 4], [1..15, 16..17, 18..19, 126..130, 224..225, 242..243])]
#[tokio::test]
async fn test_alloc_keystream(
#[case] record_lens: impl IntoIterator<Item = usize>,
#[case] ranges: impl IntoIterator<Item = Range<usize>>,
) {
let mut rng = StdRng::seed_from_u64(0);
let mut key = [0u8; 16];
let mut iv = [0u8; 4];
rng.fill(&mut key);
rng.fill(&mut iv);
let mut total_len = 0;
let records = record_lens
.into_iter()
.map(|len| {
let mut explicit_nonce = [0u8; 8];
rng.fill(&mut explicit_nonce);
total_len += len;
RecordParams {
explicit_nonce: explicit_nonce.to_vec(),
len,
}
})
.collect::<Vec<_>>();
let ranges = RangeSet::from(ranges.into_iter().collect::<Vec<_>>());
let is_out_of_bounds = ranges.end().unwrap_or(0) > total_len;
let (mut ctx, _) = test_st_context(1024);
let (mut vm, key_ref, iv_ref) = build_vm(key, iv);
let keystream = match alloc_keystream(&mut vm, key_ref, iv_ref, &ranges, &records) {
Ok(_) if is_out_of_bounds => panic!("should be out of bounds"),
Ok(keystream) => keystream,
Err(PlaintextAuthError(ErrorRepr::OutOfBounds)) if is_out_of_bounds => {
return;
}
Err(e) => panic!("unexpected error: {:?}", e),
};
vm.execute(&mut ctx).await.unwrap();
let keystream: Vec<u8> = keystream
.iter()
.flat_map(|slice| vm.get(*slice).unwrap().unwrap())
.collect();
assert_eq!(keystream.len(), ranges.len());
let expected = expected_aes_ctr(key, iv, &records, &ranges);
assert_eq!(keystream, expected);
}
#[rstest]
#[case::single_record_empty([0])]
#[case::single_record([32])]
#[case::multiple_records([128, 63, 33, 15, 4])]
#[case::multiple_records_with_empty([128, 63, 33, 0, 15, 4])]
fn test_verify_plaintext_with_key(
#[case] record_lens: impl IntoIterator<Item = usize>,
#[values(false, true)] tamper: bool,
) {
let mut rng = StdRng::seed_from_u64(0);
let mut key = [0u8; 16];
let mut iv = [0u8; 4];
rng.fill(&mut key);
rng.fill(&mut iv);
let mut total_len = 0;
let records = record_lens
.into_iter()
.map(|len| {
let mut explicit_nonce = [0u8; 8];
rng.fill(&mut explicit_nonce);
total_len += len;
RecordParams {
explicit_nonce: explicit_nonce.to_vec(),
len,
}
})
.collect::<Vec<_>>();
let mut plaintext = vec![0u8; total_len];
rng.fill(plaintext.as_mut_slice());
let mut ciphertext = plaintext.clone();
expected_aes_ctr(key, iv, &records, &(0..total_len).into())
.iter()
.zip(ciphertext.iter_mut())
.for_each(|(key, pt)| {
*pt ^= *key;
});
if tamper {
plaintext.first_mut().map(|pt| *pt ^= 1);
}
match verify_plaintext_with_key(key, iv, &records, &plaintext, &ciphertext) {
Ok(_) if tamper && !plaintext.is_empty() => panic!("should be invalid"),
Err(e) if !tamper => panic!("unexpected error: {:?}", e),
_ => {}
}
}
}

View File

@@ -0,0 +1,4 @@
//! Plaintext commitment and proof of encryption.
pub(crate) mod encoding;
pub(crate) mod hash;

View File

@@ -0,0 +1,283 @@
//! Encoding commitment protocol.
use std::ops::Range;
use mpz_common::Context;
use mpz_memory_core::{
Vector,
binary::U8,
correlated::{Delta, Key, Mac},
};
use rand::Rng;
use rangeset::RangeSet;
use serde::{Deserialize, Serialize};
use serio::{SinkExt, stream::IoStreamExt};
use tlsn_core::{
hash::{Blake3, HashAlgId, HashAlgorithm, Keccak256, Sha256},
transcript::{
Direction,
encoding::{
Encoder, EncoderSecret, EncodingCommitment, EncodingProvider, EncodingProviderError,
EncodingTree, EncodingTreeError, new_encoder,
},
},
};
use crate::{
map::{Item, RangeMap},
transcript_internal::ReferenceMap,
};
/// Bytes of encoding, per byte.
const ENCODING_SIZE: usize = 128;
#[derive(Debug, Serialize, Deserialize)]
struct Encodings {
sent: Vec<u8>,
recv: Vec<u8>,
}
/// Transfers encodings for the provided plaintext ranges.
pub(crate) async fn transfer<K: KeyStore>(
ctx: &mut Context,
store: &K,
sent: &ReferenceMap,
recv: &ReferenceMap,
) -> Result<(EncoderSecret, EncodingCommitment), EncodingError> {
let secret = EncoderSecret::new(rand::rng().random(), store.delta().as_block().to_bytes());
let encoder = new_encoder(&secret);
// Collects the encodings for the provided plaintext ranges.
fn collect_encodings(
encoder: &impl Encoder,
store: &impl KeyStore,
direction: Direction,
map: &ReferenceMap,
) -> Vec<u8> {
let mut encodings = Vec::with_capacity(map.len() * ENCODING_SIZE);
for (range, chunk) in map.iter() {
let start = encodings.len();
encoder.encode_range(direction, range, &mut encodings);
let keys = store
.get_keys(*chunk)
.expect("keys are present for provided plaintext ranges");
encodings[start..]
.iter_mut()
.zip(keys.iter().flat_map(|key| key.as_block().as_bytes()))
.for_each(|(encoding, key)| {
*encoding ^= *key;
});
}
encodings
}
let encodings = Encodings {
sent: collect_encodings(&encoder, store, Direction::Sent, sent),
recv: collect_encodings(&encoder, store, Direction::Received, recv),
};
let frame_limit = ctx
.io()
.limit()
.saturating_add(encodings.sent.len() + encodings.recv.len());
ctx.io_mut().with_limit(frame_limit).send(encodings).await?;
let root = ctx.io_mut().expect_next().await?;
Ok((secret, EncodingCommitment { root }))
}
/// Receives and commits to the encodings for the provided plaintext ranges.
pub(crate) async fn receive<M: MacStore>(
ctx: &mut Context,
store: &M,
hash_alg: HashAlgId,
sent: &ReferenceMap,
recv: &ReferenceMap,
idxs: impl IntoIterator<Item = &(Direction, RangeSet<usize>)>,
) -> Result<(EncodingCommitment, EncodingTree), EncodingError> {
let hasher: &(dyn HashAlgorithm + Send + Sync) = match hash_alg {
HashAlgId::SHA256 => &Sha256::default(),
HashAlgId::KECCAK256 => &Keccak256::default(),
HashAlgId::BLAKE3 => &Blake3::default(),
alg => {
return Err(ErrorRepr::UnsupportedHashAlgorithm(alg).into());
}
};
let (sent_len, recv_len) = (sent.len(), recv.len());
let frame_limit = ctx
.io()
.limit()
.saturating_add(ENCODING_SIZE * (sent_len + recv_len));
let encodings: Encodings = ctx.io_mut().with_limit(frame_limit).expect_next().await?;
if encodings.sent.len() != sent_len * ENCODING_SIZE {
return Err(ErrorRepr::IncorrectMacCount {
direction: Direction::Sent,
expected: sent_len,
got: encodings.sent.len() / ENCODING_SIZE,
}
.into());
}
if encodings.recv.len() != recv_len * ENCODING_SIZE {
return Err(ErrorRepr::IncorrectMacCount {
direction: Direction::Received,
expected: recv_len,
got: encodings.recv.len() / ENCODING_SIZE,
}
.into());
}
// Collects a map of plaintext ranges to their encodings.
fn collect_map(
store: &impl MacStore,
mut encodings: Vec<u8>,
map: &ReferenceMap,
) -> RangeMap<EncodingSlice> {
let mut encoding_map = Vec::new();
let mut pos = 0;
for (range, chunk) in map.iter() {
let macs = store
.get_macs(*chunk)
.expect("MACs are present for provided plaintext ranges");
let encoding = &mut encodings[pos..pos + range.len() * ENCODING_SIZE];
encoding
.iter_mut()
.zip(macs.iter().flat_map(|mac| mac.as_bytes()))
.for_each(|(encoding, mac)| {
*encoding ^= *mac;
});
encoding_map.push((range.start, EncodingSlice::from(&(*encoding))));
pos += range.len() * ENCODING_SIZE;
}
RangeMap::new(encoding_map)
}
let provider = Provider {
sent: collect_map(store, encodings.sent, sent),
recv: collect_map(store, encodings.recv, recv),
};
let tree = EncodingTree::new(hasher, idxs, &provider)?;
let root = tree.root();
ctx.io_mut().send(root.clone()).await?;
let commitment = EncodingCommitment { root };
Ok((commitment, tree))
}
pub(crate) trait KeyStore {
fn delta(&self) -> &Delta;
fn get_keys(&self, data: Vector<U8>) -> Option<&[Key]>;
}
impl KeyStore for crate::verifier::Zk {
fn delta(&self) -> &Delta {
crate::verifier::Zk::delta(self)
}
fn get_keys(&self, data: Vector<U8>) -> Option<&[Key]> {
self.get_keys(data).ok()
}
}
pub(crate) trait MacStore {
fn get_macs(&self, data: Vector<U8>) -> Option<&[Mac]>;
}
impl MacStore for crate::prover::Zk {
fn get_macs(&self, data: Vector<U8>) -> Option<&[Mac]> {
self.get_macs(data).ok()
}
}
#[derive(Debug)]
struct Provider {
sent: RangeMap<EncodingSlice>,
recv: RangeMap<EncodingSlice>,
}
impl EncodingProvider for Provider {
fn provide_encoding(
&self,
direction: Direction,
range: Range<usize>,
dest: &mut Vec<u8>,
) -> Result<(), EncodingProviderError> {
let encodings = match direction {
Direction::Sent => &self.sent,
Direction::Received => &self.recv,
};
let encoding = encodings.get(range).ok_or(EncodingProviderError)?;
dest.extend_from_slice(encoding);
Ok(())
}
}
#[derive(Debug)]
struct EncodingSlice(Vec<u8>);
impl From<&[u8]> for EncodingSlice {
fn from(value: &[u8]) -> Self {
Self(value.to_vec())
}
}
impl Item for EncodingSlice {
type Slice<'a>
= &'a [u8]
where
Self: 'a;
fn length(&self) -> usize {
self.0.len() / ENCODING_SIZE
}
fn slice<'a>(&'a self, range: Range<usize>) -> Option<Self::Slice<'a>> {
self.0
.get(range.start * ENCODING_SIZE..range.end * ENCODING_SIZE)
}
}
/// Encoding protocol error.
#[derive(Debug, thiserror::Error)]
#[error(transparent)]
pub struct EncodingError(#[from] ErrorRepr);
#[derive(Debug, thiserror::Error)]
#[error("encoding protocol error: {0}")]
enum ErrorRepr {
#[error("I/O error: {0}")]
Io(std::io::Error),
#[error("incorrect MAC count for {direction}: expected {expected}, got {got}")]
IncorrectMacCount {
direction: Direction,
expected: usize,
got: usize,
},
#[error("encoding tree error: {0}")]
EncodingTree(EncodingTreeError),
#[error("unsupported hash algorithm: {0}")]
UnsupportedHashAlgorithm(HashAlgId),
}
impl From<std::io::Error> for EncodingError {
fn from(value: std::io::Error) -> Self {
Self(ErrorRepr::Io(value))
}
}
impl From<EncodingTreeError> for EncodingError {
fn from(value: EncodingTreeError) -> Self {
Self(ErrorRepr::EncodingTree(value))
}
}

View File

@@ -3,7 +3,7 @@
use std::collections::HashMap;
use mpz_core::bitvec::BitVec;
use mpz_hash::sha256::Sha256;
use mpz_hash::{blake3::Blake3, sha256::Sha256};
use mpz_memory_core::{
DecodeFutureTyped, MemoryExt, Vector,
binary::{Binary, U8},
@@ -18,7 +18,7 @@ use tlsn_core::{
},
};
use crate::{Role, commit::transcript::TranscriptRefs};
use crate::{Role, transcript_internal::TranscriptRefs};
/// Future which will resolve to the committed hash values.
#[derive(Debug)]
@@ -107,6 +107,12 @@ pub(crate) fn verify_hash(
Ok(HashCommitFuture { futs })
}
#[derive(Clone)]
enum Hasher {
Sha256(Sha256),
Blake3(Blake3),
}
/// Commit plaintext hashes of the transcript.
#[allow(clippy::type_complexity)]
fn hash_commit_inner(
@@ -135,20 +141,50 @@ fn hash_commit_inner(
let hash = match alg {
HashAlgId::SHA256 => {
let mut hasher = if let Some(hasher) = hashers.get(&alg).cloned() {
let mut hasher = if let Some(Hasher::Sha256(hasher)) = hashers.get(&alg).cloned() {
hasher
} else {
let hasher = Sha256::new_with_init(vm).map_err(HashCommitError::hasher)?;
hashers.insert(alg, hasher.clone());
hashers.insert(alg, Hasher::Sha256(hasher.clone()));
hasher
};
for plaintext in refs.get(direction, &idx).expect("plaintext refs are valid") {
hasher.update(&plaintext);
let refs = match direction {
Direction::Sent => &refs.sent,
Direction::Received => &refs.recv,
};
for range in idx.iter_ranges() {
hasher.update(&refs.get(range).expect("plaintext refs are valid"));
}
hasher.update(&blinder);
hasher.finalize(vm).map_err(HashCommitError::hasher)?
}
HashAlgId::BLAKE3 => {
let mut hasher = if let Some(Hasher::Blake3(hasher)) = hashers.get(&alg).cloned() {
hasher
} else {
let hasher = Blake3::new(vm).map_err(HashCommitError::hasher)?;
hashers.insert(alg, Hasher::Blake3(hasher.clone()));
hasher
};
let refs = match direction {
Direction::Sent => &refs.sent,
Direction::Received => &refs.recv,
};
for range in idx.iter_ranges() {
hasher
.update(vm, &refs.get(range).expect("plaintext refs are valid"))
.map_err(HashCommitError::hasher)?;
}
hasher
.update(vm, &blinder)
.map_err(HashCommitError::hasher)?;
hasher.finalize(vm).map_err(HashCommitError::hasher)?
}
alg => {
return Err(HashCommitError::unsupported_alg(alg));
}

View File

@@ -3,29 +3,21 @@
pub(crate) mod config;
mod error;
pub mod state;
mod verify;
use std::sync::Arc;
pub use config::{VerifierConfig, VerifierConfigBuilder, VerifierConfigBuilderError};
pub use error::VerifierError;
pub use tlsn_core::{
VerifierOutput, VerifyConfig, VerifyConfigBuilder, VerifyConfigBuilderError,
webpki::ServerCertVerifier,
};
pub use tlsn_core::{VerifierOutput, webpki::ServerCertVerifier};
use crate::{
Role,
commit::{
commit_records,
hash::verify_hash,
transcript::{TranscriptRefs, decode_transcript, verify_transcript},
},
config::ProtocolConfig,
context::build_mt_context,
encoding,
msg::{Response, SetupRequest},
mux::attach_mux,
tag::verify_tags,
zk_aes_ctr::ZkAesCtr,
};
use futures::{AsyncRead, AsyncWrite, TryFutureExt};
use mpc_tls::{MpcTlsFollower, SessionKeys};
@@ -34,12 +26,11 @@ use mpz_core::Block;
use mpz_garble_core::Delta;
use mpz_vm_core::prelude::*;
use mpz_zk::VerifierConfig as ZkVerifierConfig;
use serio::stream::IoStreamExt;
use tls_core::msgs::enums::ContentType;
use serio::{SinkExt, stream::IoStreamExt};
use tlsn_core::{
ProvePayload,
ProveRequest,
connection::{ConnectionInfo, ServerName},
transcript::{TlsTranscript, TranscriptCommitment},
transcript::TlsTranscript,
};
use tlsn_deap::Deap;
use tokio::sync::Mutex;
@@ -97,40 +88,70 @@ impl Verifier<state::Initialized> {
pub async fn setup<S: AsyncWrite + AsyncRead + Send + Unpin + 'static>(
self,
socket: S,
) -> Result<Verifier<state::Setup>, VerifierError> {
) -> Result<Verifier<state::Config>, VerifierError> {
let (mut mux_fut, mux_ctrl) = attach_mux(socket, Role::Verifier);
let mut mt = build_mt_context(mux_ctrl.clone());
let mut ctx = mux_fut.poll_with(mt.new_context()).await?;
// Receives protocol configuration from prover to perform compatibility check.
let protocol_config = mux_fut
.poll_with(async {
let peer_configuration: ProtocolConfig = ctx.io_mut().expect_next().await?;
self.config
.protocol_config_validator()
.validate(&peer_configuration)?;
let SetupRequest { config, version } =
mux_fut.poll_with(ctx.io_mut().expect_next()).await?;
Ok::<_, VerifierError>(peer_configuration)
})
.await?;
if version != *crate::config::VERSION {
let msg = format!(
"prover version does not match with verifier: {version} != {}",
*crate::config::VERSION
);
mux_fut
.poll_with(ctx.io_mut().send(Response::err(Some(msg.clone()))))
.await?;
let delta = Delta::random(&mut rand::rng());
let (vm, mut mpc_tls) = build_mpc_tls(&self.config, &protocol_config, delta, ctx);
// Wait for the prover to correctly close the connection.
if !mux_fut.is_complete() {
mux_ctrl.close();
mux_fut.await?;
}
return Err(VerifierError::config(msg));
}
Ok(Verifier {
config: self.config,
span: self.span,
state: state::Config {
mux_ctrl,
mux_fut,
ctx,
config,
},
})
}
}
impl Verifier<state::Config> {
/// Returns the proposed protocol configuration.
pub fn config(&self) -> &ProtocolConfig {
&self.state.config
}
/// Accepts the proposed protocol configuration.
#[instrument(parent = &self.span, level = "info", skip_all, err)]
pub async fn accept(self) -> Result<Verifier<state::Setup>, VerifierError> {
let state::Config {
mux_ctrl,
mut mux_fut,
mut ctx,
config,
} = self.state;
mux_fut.poll_with(ctx.io_mut().send(Response::ok())).await?;
let (vm, mut mpc_tls) = build_mpc_tls(&self.config, &config, ctx);
// Allocate resources for MPC-TLS in the VM.
let mut keys = mpc_tls.alloc()?;
let vm_lock = vm.try_lock().expect("VM is not locked");
translate_keys(&mut keys, &vm_lock)?;
// Allocate for committing to plaintext.
let mut zk_aes_ctr_sent = ZkAesCtr::new(Role::Verifier);
zk_aes_ctr_sent.set_key(keys.client_write_key, keys.client_write_iv);
zk_aes_ctr_sent.alloc(&mut *vm_lock.zk(), protocol_config.max_sent_data())?;
let mut zk_aes_ctr_recv = ZkAesCtr::new(Role::Verifier);
zk_aes_ctr_recv.set_key(keys.server_write_key, keys.server_write_iv);
zk_aes_ctr_recv.alloc(&mut *vm_lock.zk(), protocol_config.max_recv_data())?;
drop(vm_lock);
debug!("setting up mpc-tls");
@@ -145,37 +166,34 @@ impl Verifier<state::Initialized> {
state: state::Setup {
mux_ctrl,
mux_fut,
delta,
mpc_tls,
zk_aes_ctr_sent,
zk_aes_ctr_recv,
keys,
vm,
},
})
}
/// Runs the TLS verifier to completion, verifying the TLS session.
///
/// This is a convenience method which runs all the steps needed for
/// verification.
///
/// # Arguments
///
/// * `socket` - The socket to the prover.
/// Rejects the proposed protocol configuration.
#[instrument(parent = &self.span, level = "info", skip_all, err)]
pub async fn verify<S: AsyncWrite + AsyncRead + Send + Unpin + 'static>(
self,
socket: S,
config: &VerifyConfig,
) -> Result<VerifierOutput, VerifierError> {
let mut verifier = self.setup(socket).await?.run().await?;
pub async fn reject(self, msg: Option<&str>) -> Result<(), VerifierError> {
let state::Config {
mux_ctrl,
mut mux_fut,
mut ctx,
..
} = self.state;
let output = verifier.verify(config).await?;
mux_fut
.poll_with(ctx.io_mut().send(Response::err(msg)))
.await?;
verifier.close().await?;
// Wait for the prover to correctly close the connection.
if !mux_fut.is_complete() {
mux_ctrl.close();
mux_fut.await?;
}
Ok(output)
Ok(())
}
}
@@ -186,10 +204,7 @@ impl Verifier<state::Setup> {
let state::Setup {
mux_ctrl,
mut mux_fut,
delta,
mpc_tls,
mut zk_aes_ctr_sent,
mut zk_aes_ctr_recv,
vm,
keys,
} = self.state;
@@ -230,27 +245,6 @@ impl Verifier<state::Setup> {
)
.map_err(VerifierError::zk)?;
// Prepare for the prover to prove received plaintext.
let (sent_refs, sent_proof) = commit_records(
&mut vm,
&mut zk_aes_ctr_sent,
tls_transcript
.sent()
.iter()
.filter(|record| record.typ == ContentType::ApplicationData),
)
.map_err(VerifierError::zk)?;
let (recv_refs, recv_proof) = commit_records(
&mut vm,
&mut zk_aes_ctr_recv,
tls_transcript
.recv()
.iter()
.filter(|record| record.typ == ContentType::ApplicationData),
)
.map_err(VerifierError::zk)?;
mux_fut
.poll_with(vm.execute_all(&mut ctx).map_err(VerifierError::zk))
.await?;
@@ -260,23 +254,16 @@ impl Verifier<state::Setup> {
// authenticated from the verifier's perspective.
tag_proof.verify().map_err(VerifierError::zk)?;
// Verify the plaintext proofs.
sent_proof.verify().map_err(VerifierError::zk)?;
recv_proof.verify().map_err(VerifierError::zk)?;
let transcript_refs = TranscriptRefs::new(sent_refs, recv_refs);
Ok(Verifier {
config: self.config,
span: self.span,
state: state::Committed {
mux_ctrl,
mux_fut,
delta,
ctx,
vm,
keys,
tls_transcript,
transcript_refs,
},
})
}
@@ -288,142 +275,34 @@ impl Verifier<state::Committed> {
&self.state.tls_transcript
}
/// Verifies information from the prover.
///
/// # Arguments
///
/// * `config` - Verification configuration.
/// Begins verification of statements from the prover.
#[instrument(parent = &self.span, level = "info", skip_all, err)]
pub async fn verify(
&mut self,
#[allow(unused_variables)] config: &VerifyConfig,
) -> Result<VerifierOutput, VerifierError> {
pub async fn verify(self) -> Result<Verifier<state::Verify>, VerifierError> {
let state::Committed {
mux_fut,
ctx,
delta,
mux_ctrl,
mut mux_fut,
mut ctx,
vm,
keys,
tls_transcript,
transcript_refs,
..
} = &mut self.state;
} = self.state;
let ProvePayload {
handshake,
transcript,
transcript_commit,
} = mux_fut
let request = mux_fut
.poll_with(ctx.io_mut().expect_next().map_err(VerifierError::from))
.await?;
let verifier = if let Some(root_store) = self.config.root_store() {
ServerCertVerifier::new(root_store).map_err(VerifierError::config)?
} else {
ServerCertVerifier::mozilla()
};
let server_name = if let Some((name, cert_data)) = handshake {
cert_data
.verify(
&verifier,
tls_transcript.time(),
tls_transcript.server_ephemeral_key(),
&name,
)
.map_err(VerifierError::verify)?;
Some(name)
} else {
None
};
if let Some(partial_transcript) = &transcript {
let sent_len = tls_transcript
.sent()
.iter()
.filter_map(|record| {
if let ContentType::ApplicationData = record.typ {
Some(record.ciphertext.len())
} else {
None
}
})
.sum::<usize>();
let recv_len = tls_transcript
.recv()
.iter()
.filter_map(|record| {
if let ContentType::ApplicationData = record.typ {
Some(record.ciphertext.len())
} else {
None
}
})
.sum::<usize>();
// Check ranges.
if partial_transcript.len_sent() != sent_len
|| partial_transcript.len_received() != recv_len
{
return Err(VerifierError::verify(
"prover sent transcript with incorrect length",
));
}
decode_transcript(
Ok(Verifier {
config: self.config,
span: self.span,
state: state::Verify {
mux_ctrl,
mux_fut,
ctx,
vm,
partial_transcript.sent_authed(),
partial_transcript.received_authed(),
transcript_refs,
)
.map_err(VerifierError::zk)?;
}
let mut transcript_commitments = Vec::new();
let mut hash_commitments = None;
if let Some(commit_config) = transcript_commit {
if commit_config.encoding() {
let commitment = mux_fut
.poll_with(encoding::transfer(
ctx,
transcript_refs,
delta,
|plaintext| vm.get_keys(plaintext).expect("reference is valid"),
))
.await?;
transcript_commitments.push(TranscriptCommitment::Encoding(commitment));
}
if commit_config.has_hash() {
hash_commitments = Some(
verify_hash(vm, transcript_refs, commit_config.iter_hash().cloned())
.map_err(VerifierError::verify)?,
);
}
}
mux_fut
.poll_with(vm.execute_all(ctx).map_err(VerifierError::zk))
.await?;
// Verify revealed data.
if let Some(partial_transcript) = &transcript {
verify_transcript(vm, partial_transcript, transcript_refs)
.map_err(VerifierError::verify)?;
}
if let Some(hash_commitments) = hash_commitments {
for commitment in hash_commitments.try_recv().map_err(VerifierError::verify)? {
transcript_commitments.push(TranscriptCommitment::Hash(commitment));
}
}
Ok(VerifierOutput {
server_name,
transcript,
transcript_commitments,
keys,
tls_transcript,
request,
},
})
}
@@ -444,14 +323,104 @@ impl Verifier<state::Committed> {
}
}
impl Verifier<state::Verify> {
/// Returns the proving request.
pub fn request(&self) -> &ProveRequest {
&self.state.request
}
/// Accepts the proving request.
pub async fn accept(
self,
) -> Result<(VerifierOutput, Verifier<state::Committed>), VerifierError> {
let state::Verify {
mux_ctrl,
mut mux_fut,
mut ctx,
mut vm,
keys,
tls_transcript,
request,
} = self.state;
mux_fut.poll_with(ctx.io_mut().send(Response::ok())).await?;
let cert_verifier = if let Some(root_store) = self.config.root_store() {
ServerCertVerifier::new(root_store).map_err(VerifierError::config)?
} else {
ServerCertVerifier::mozilla()
};
let output = mux_fut
.poll_with(verify::verify(
&mut ctx,
&mut vm,
&keys,
&cert_verifier,
&tls_transcript,
request,
))
.await?;
Ok((
output,
Verifier {
config: self.config,
span: self.span,
state: state::Committed {
mux_ctrl,
mux_fut,
ctx,
vm,
keys,
tls_transcript,
},
},
))
}
/// Rejects the proving request.
pub async fn reject(
self,
msg: Option<&str>,
) -> Result<Verifier<state::Committed>, VerifierError> {
let state::Verify {
mux_ctrl,
mut mux_fut,
mut ctx,
vm,
keys,
tls_transcript,
..
} = self.state;
mux_fut
.poll_with(ctx.io_mut().send(Response::err(msg)))
.await?;
Ok(Verifier {
config: self.config,
span: self.span,
state: state::Committed {
mux_ctrl,
mux_fut,
ctx,
vm,
keys,
tls_transcript,
},
})
}
}
fn build_mpc_tls(
config: &VerifierConfig,
protocol_config: &ProtocolConfig,
delta: Delta,
ctx: Context,
) -> (Arc<Mutex<Deap<Mpc, Zk>>>, MpcTlsFollower) {
let mut rng = rand::rng();
let delta = Delta::random(&mut rng);
let base_ot_send = mpz_ot::chou_orlandi::Sender::default();
let base_ot_recv = mpz_ot::chou_orlandi::Receiver::default();
let rcot_send = mpz_ot::kos::Sender::new(

View File

@@ -4,23 +4,20 @@ use mpc_tls::Config;
use serde::{Deserialize, Serialize};
use tlsn_core::webpki::RootCertStore;
use crate::config::{NetworkSetting, ProtocolConfig, ProtocolConfigValidator};
use crate::config::{NetworkSetting, ProtocolConfig};
/// Configuration for the [`Verifier`](crate::tls::Verifier).
#[allow(missing_docs)]
#[derive(derive_builder::Builder, Serialize, Deserialize)]
#[builder(pattern = "owned")]
pub struct VerifierConfig {
protocol_config_validator: ProtocolConfigValidator,
#[builder(setter(strip_option))]
#[builder(default, setter(strip_option))]
root_store: Option<RootCertStore>,
}
impl Debug for VerifierConfig {
fn fmt(&self, f: &mut Formatter<'_>) -> Result {
f.debug_struct("VerifierConfig")
.field("protocol_config_validator", &self.protocol_config_validator)
.finish_non_exhaustive()
f.debug_struct("VerifierConfig").finish_non_exhaustive()
}
}
@@ -30,11 +27,6 @@ impl VerifierConfig {
VerifierConfigBuilder::default()
}
/// Returns the protocol configuration validator.
pub fn protocol_config_validator(&self) -> &ProtocolConfigValidator {
&self.protocol_config_validator
}
/// Returns the root certificate store.
pub fn root_store(&self) -> Option<&RootCertStore> {
self.root_store.as_ref()

View File

@@ -1,7 +1,9 @@
use crate::{encoding::EncodingError, zk_aes_ctr::ZkAesCtrError};
use mpc_tls::MpcTlsError;
use std::{error::Error, fmt};
use mpc_tls::MpcTlsError;
use crate::transcript_internal::commit::encoding::EncodingError;
/// Error for [`Verifier`](crate::Verifier).
#[derive(Debug, thiserror::Error)]
pub struct VerifierError {
@@ -86,12 +88,6 @@ impl From<std::io::Error> for VerifierError {
}
}
impl From<crate::config::ProtocolConfigError> for VerifierError {
fn from(e: crate::config::ProtocolConfigError) -> Self {
Self::new(ErrorKind::Config, e)
}
}
impl From<uid_mux::yamux::ConnectionError> for VerifierError {
fn from(e: uid_mux::yamux::ConnectionError) -> Self {
Self::new(ErrorKind::Io, e)
@@ -110,12 +106,6 @@ impl From<MpcTlsError> for VerifierError {
}
}
impl From<ZkAesCtrError> for VerifierError {
fn from(e: ZkAesCtrError) -> Self {
Self::new(ErrorKind::Zk, e)
}
}
impl From<EncodingError> for VerifierError {
fn from(e: EncodingError) -> Self {
Self::new(ErrorKind::Commit, e)

View File

@@ -3,14 +3,12 @@
use std::sync::Arc;
use crate::{
commit::transcript::TranscriptRefs,
config::ProtocolConfig,
mux::{MuxControl, MuxFuture},
zk_aes_ctr::ZkAesCtr,
};
use mpc_tls::{MpcTlsFollower, SessionKeys};
use mpz_common::Context;
use mpz_memory_core::correlated::Delta;
use tlsn_core::transcript::TlsTranscript;
use tlsn_core::{ProveRequest, transcript::TlsTranscript};
use tlsn_deap::Deap;
use tokio::sync::Mutex;
@@ -24,38 +22,63 @@ pub struct Initialized;
opaque_debug::implement!(Initialized);
/// State after receiving protocol configuration from the prover.
pub struct Config {
pub(crate) mux_ctrl: MuxControl,
pub(crate) mux_fut: MuxFuture,
pub(crate) ctx: Context,
pub(crate) config: ProtocolConfig,
}
opaque_debug::implement!(Config);
/// State after setup has completed.
pub struct Setup {
pub(crate) mux_ctrl: MuxControl,
pub(crate) mux_fut: MuxFuture,
pub(crate) delta: Delta,
pub(crate) mpc_tls: MpcTlsFollower,
pub(crate) zk_aes_ctr_sent: ZkAesCtr,
pub(crate) zk_aes_ctr_recv: ZkAesCtr,
pub(crate) keys: SessionKeys,
pub(crate) vm: Arc<Mutex<Deap<Mpc, Zk>>>,
}
opaque_debug::implement!(Setup);
/// State after the TLS connection has been closed.
pub struct Committed {
pub(crate) mux_ctrl: MuxControl,
pub(crate) mux_fut: MuxFuture,
pub(crate) delta: Delta,
pub(crate) ctx: Context,
pub(crate) vm: Zk,
pub(crate) keys: SessionKeys,
pub(crate) tls_transcript: TlsTranscript,
pub(crate) transcript_refs: TranscriptRefs,
}
opaque_debug::implement!(Committed);
/// State after receiving a proving request.
pub struct Verify {
pub(crate) mux_ctrl: MuxControl,
pub(crate) mux_fut: MuxFuture,
pub(crate) ctx: Context,
pub(crate) vm: Zk,
pub(crate) keys: SessionKeys,
pub(crate) tls_transcript: TlsTranscript,
pub(crate) request: ProveRequest,
}
opaque_debug::implement!(Verify);
impl VerifierState for Initialized {}
impl VerifierState for Config {}
impl VerifierState for Setup {}
impl VerifierState for Committed {}
impl VerifierState for Verify {}
mod sealed {
pub trait Sealed {}
impl Sealed for super::Initialized {}
impl Sealed for super::Config {}
impl Sealed for super::Setup {}
impl Sealed for super::Committed {}
impl Sealed for super::Verify {}
}

View File

@@ -0,0 +1,179 @@
use mpc_tls::SessionKeys;
use mpz_common::Context;
use mpz_memory_core::binary::Binary;
use mpz_vm_core::Vm;
use rangeset::{RangeSet, UnionMut};
use tlsn_core::{
ProveRequest, VerifierOutput,
transcript::{
ContentType, Direction, PartialTranscript, Record, TlsTranscript, TranscriptCommitment,
},
webpki::ServerCertVerifier,
};
use crate::{
transcript_internal::{
TranscriptRefs,
auth::verify_plaintext,
commit::{
encoding::{self, KeyStore},
hash::verify_hash,
},
},
verifier::VerifierError,
};
pub(crate) async fn verify<T: Vm<Binary> + KeyStore + Send + Sync>(
ctx: &mut Context,
vm: &mut T,
keys: &SessionKeys,
cert_verifier: &ServerCertVerifier,
tls_transcript: &TlsTranscript,
request: ProveRequest,
) -> Result<VerifierOutput, VerifierError> {
let ProveRequest {
handshake,
transcript,
transcript_commit,
} = request;
let ciphertext_sent = collect_ciphertext(tls_transcript.sent());
let ciphertext_recv = collect_ciphertext(tls_transcript.recv());
let has_reveal = transcript.is_some();
let transcript = if let Some(transcript) = transcript {
if transcript.len_sent() != ciphertext_sent.len()
|| transcript.len_received() != ciphertext_recv.len()
{
return Err(VerifierError::verify(
"prover sent transcript with incorrect length",
));
}
transcript
} else {
PartialTranscript::new(ciphertext_sent.len(), ciphertext_recv.len())
};
let server_name = if let Some((name, cert_data)) = handshake {
cert_data
.verify(
cert_verifier,
tls_transcript.time(),
tls_transcript.server_ephemeral_key(),
&name,
)
.map_err(VerifierError::verify)?;
Some(name)
} else {
None
};
let (mut commit_sent, mut commit_recv) = (RangeSet::default(), RangeSet::default());
if let Some(commit_config) = transcript_commit.as_ref() {
commit_config
.iter_hash()
.for_each(|(direction, idx, _)| match direction {
Direction::Sent => commit_sent.union_mut(idx),
Direction::Received => commit_recv.union_mut(idx),
});
if let Some((sent, recv)) = commit_config.encoding() {
commit_sent.union_mut(sent);
commit_recv.union_mut(recv);
}
}
let (sent_refs, sent_proof) = verify_plaintext(
vm,
keys.client_write_key,
keys.client_write_iv,
transcript.sent_unsafe(),
&ciphertext_sent,
tls_transcript
.sent()
.iter()
.filter(|record| record.typ == ContentType::ApplicationData),
transcript.sent_authed(),
&commit_sent,
)
.map_err(VerifierError::zk)?;
let (recv_refs, recv_proof) = verify_plaintext(
vm,
keys.server_write_key,
keys.server_write_iv,
transcript.received_unsafe(),
&ciphertext_recv,
tls_transcript
.recv()
.iter()
.filter(|record| record.typ == ContentType::ApplicationData),
transcript.received_authed(),
&commit_recv,
)
.map_err(VerifierError::zk)?;
let transcript_refs = TranscriptRefs {
sent: sent_refs,
recv: recv_refs,
};
let mut transcript_commitments = Vec::new();
let mut hash_commitments = None;
if let Some(commit_config) = transcript_commit.as_ref()
&& commit_config.has_hash()
{
hash_commitments = Some(
verify_hash(vm, &transcript_refs, commit_config.iter_hash().cloned())
.map_err(VerifierError::verify)?,
);
}
vm.execute_all(ctx).await.map_err(VerifierError::zk)?;
sent_proof.verify().map_err(VerifierError::verify)?;
recv_proof.verify().map_err(VerifierError::verify)?;
let mut encoder_secret = None;
if let Some(commit_config) = transcript_commit
&& let Some((sent, recv)) = commit_config.encoding()
{
let sent_map = transcript_refs
.sent
.index(sent)
.expect("ranges were authenticated");
let recv_map = transcript_refs
.recv
.index(recv)
.expect("ranges were authenticated");
let (secret, commitment) = encoding::transfer(ctx, vm, &sent_map, &recv_map).await?;
encoder_secret = Some(secret);
transcript_commitments.push(TranscriptCommitment::Encoding(commitment));
}
if let Some(hash_commitments) = hash_commitments {
for commitment in hash_commitments.try_recv().map_err(VerifierError::verify)? {
transcript_commitments.push(TranscriptCommitment::Hash(commitment));
}
}
Ok(VerifierOutput {
server_name,
transcript: has_reveal.then_some(transcript),
encoder_secret,
transcript_commitments,
})
}
fn collect_ciphertext<'a>(records: impl IntoIterator<Item = &'a Record>) -> Vec<u8> {
let mut ciphertext = Vec::new();
records
.into_iter()
.filter(|record| record.typ == ContentType::ApplicationData)
.for_each(|record| {
ciphertext.extend_from_slice(&record.ciphertext);
});
ciphertext
}

View File

@@ -1,214 +0,0 @@
//! Zero-knowledge AES-CTR encryption.
use cipher::{
Cipher, CipherError, Keystream,
aes::{Aes128, AesError},
};
use mpz_memory_core::{
Array, Vector,
binary::{Binary, U8},
};
use mpz_vm_core::{Vm, prelude::*};
use crate::Role;
type Nonce = Array<U8, 8>;
type Ctr = Array<U8, 4>;
type Block = Array<U8, 16>;
const START_CTR: u32 = 2;
/// ZK AES-CTR encryption.
#[derive(Debug)]
pub(crate) struct ZkAesCtr {
role: Role,
aes: Aes128,
state: State,
}
impl ZkAesCtr {
/// Creates a new ZK AES-CTR instance.
pub(crate) fn new(role: Role) -> Self {
Self {
role,
aes: Aes128::default(),
state: State::Init,
}
}
/// Returns the role.
pub(crate) fn role(&self) -> &Role {
&self.role
}
/// Allocates `len` bytes for encryption.
pub(crate) fn alloc(
&mut self,
vm: &mut dyn Vm<Binary>,
len: usize,
) -> Result<(), ZkAesCtrError> {
let State::Init = self.state.take() else {
Err(ErrorRepr::State {
reason: "must be in init state to allocate",
})?
};
// Round up to the nearest block size.
let len = 16 * len.div_ceil(16);
let input = vm.alloc_vec::<U8>(len).map_err(ZkAesCtrError::vm)?;
let keystream = self.aes.alloc_keystream(vm, len)?;
match self.role {
Role::Prover => vm.mark_private(input).map_err(ZkAesCtrError::vm)?,
Role::Verifier => vm.mark_blind(input).map_err(ZkAesCtrError::vm)?,
}
self.state = State::Ready { input, keystream };
Ok(())
}
/// Sets the key and IV for the cipher.
pub(crate) fn set_key(&mut self, key: Array<U8, 16>, iv: Array<U8, 4>) {
self.aes.set_key(key);
self.aes.set_iv(iv);
}
/// Proves the encryption of `len` bytes.
///
/// Here we only assign certain values in the VM but the actual proving
/// happens later when the plaintext is assigned and the VM is executed.
///
/// # Arguments
///
/// * `vm` - Virtual machine.
/// * `explicit_nonce` - Explicit nonce.
/// * `len` - Length of the plaintext in bytes.
///
/// # Returns
///
/// A VM reference to the plaintext and the ciphertext.
pub(crate) fn encrypt(
&mut self,
vm: &mut dyn Vm<Binary>,
explicit_nonce: Vec<u8>,
len: usize,
) -> Result<(Vector<U8>, Vector<U8>), ZkAesCtrError> {
let State::Ready { input, keystream } = &mut self.state else {
Err(ErrorRepr::State {
reason: "must be in ready state to encrypt",
})?
};
let explicit_nonce: [u8; 8] =
explicit_nonce
.try_into()
.map_err(|explicit_nonce: Vec<_>| ErrorRepr::ExplicitNonceLength {
expected: 8,
actual: explicit_nonce.len(),
})?;
let block_count = len.div_ceil(16);
let padded_len = block_count * 16;
let padding_len = padded_len - len;
if padded_len > input.len() {
Err(ErrorRepr::InsufficientPreprocessing {
expected: padded_len,
actual: input.len(),
})?
}
let mut input = input.split_off(input.len() - padded_len);
let keystream = keystream.consume(padded_len)?;
let mut output = keystream.apply(vm, input)?;
// Assign counter block inputs.
let mut ctr = START_CTR..;
keystream.assign(vm, explicit_nonce, move || {
ctr.next().expect("range is unbounded").to_be_bytes()
})?;
// Assign zeroes to the padding.
if padding_len > 0 {
let padding = input.split_off(input.len() - padding_len);
// To simplify the impl, we don't mark the padding as public, that's why only
// the prover assigns it.
if let Role::Prover = self.role {
vm.assign(padding, vec![0; padding_len])
.map_err(ZkAesCtrError::vm)?;
}
vm.commit(padding).map_err(ZkAesCtrError::vm)?;
output.truncate(len);
}
Ok((input, output))
}
}
enum State {
Init,
Ready {
input: Vector<U8>,
keystream: Keystream<Nonce, Ctr, Block>,
},
Error,
}
impl State {
fn take(&mut self) -> Self {
std::mem::replace(self, State::Error)
}
}
impl std::fmt::Debug for State {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
State::Init => write!(f, "Init"),
State::Ready { .. } => write!(f, "Ready"),
State::Error => write!(f, "Error"),
}
}
}
/// Error for [`ZkAesCtr`].
#[derive(Debug, thiserror::Error)]
#[error(transparent)]
pub struct ZkAesCtrError(#[from] ErrorRepr);
impl ZkAesCtrError {
fn vm<E>(err: E) -> Self
where
E: Into<Box<dyn std::error::Error + Send + Sync + 'static>>,
{
Self(ErrorRepr::Vm(err.into()))
}
}
#[derive(Debug, thiserror::Error)]
#[error("zk aes error")]
enum ErrorRepr {
#[error("invalid state: {reason}")]
State { reason: &'static str },
#[error("cipher error: {0}")]
Cipher(Box<dyn std::error::Error + Send + Sync + 'static>),
#[error("vm error: {0}")]
Vm(Box<dyn std::error::Error + Send + Sync + 'static>),
#[error("invalid explicit nonce length: expected {expected}, got {actual}")]
ExplicitNonceLength { expected: usize, actual: usize },
#[error("insufficient preprocessing: expected {expected}, got {actual}")]
InsufficientPreprocessing { expected: usize, actual: usize },
}
impl From<AesError> for ZkAesCtrError {
fn from(err: AesError) -> Self {
Self(ErrorRepr::Cipher(Box::new(err)))
}
}
impl From<CipherError> for ZkAesCtrError {
fn from(err: CipherError) -> Self {
Self(ErrorRepr::Cipher(Box::new(err)))
}
}

View File

@@ -1,11 +1,17 @@
use futures::{AsyncReadExt, AsyncWriteExt};
use rangeset::RangeSet;
use tlsn::{
config::{CertificateDer, ProtocolConfig, ProtocolConfigValidator, RootCertStore},
config::{CertificateDer, ProtocolConfig, RootCertStore},
connection::ServerName,
hash::{HashAlgId, HashProvider},
prover::{ProveConfig, Prover, ProverConfig, TlsConfig},
transcript::{TranscriptCommitConfig, TranscriptCommitment},
verifier::{Verifier, VerifierConfig, VerifierOutput, VerifyConfig},
transcript::{
Direction, Transcript, TranscriptCommitConfig, TranscriptCommitment,
TranscriptCommitmentKind, TranscriptSecret,
},
verifier::{Verifier, VerifierConfig, VerifierOutput},
};
use tlsn_core::ProverOutput;
use tlsn_server_fixture::bind;
use tlsn_server_fixture_certs::{CA_CERT_DER, SERVER_DOMAIN};
@@ -29,11 +35,80 @@ async fn test() {
let (socket_0, socket_1) = tokio::io::duplex(2 << 23);
tokio::join!(prover(socket_0), verifier(socket_1));
let ((full_transcript, prover_output), verifier_output) =
tokio::join!(prover(socket_0), verifier(socket_1));
let partial_transcript = verifier_output.transcript.unwrap();
let ServerName::Dns(server_name) = verifier_output.server_name.unwrap();
assert_eq!(server_name.as_str(), SERVER_DOMAIN);
assert!(!partial_transcript.is_complete());
assert_eq!(
partial_transcript
.sent_authed()
.iter_ranges()
.next()
.unwrap(),
0..10
);
assert_eq!(
partial_transcript
.received_authed()
.iter_ranges()
.next()
.unwrap(),
0..10
);
let encoding_tree = prover_output
.transcript_secrets
.iter()
.find_map(|secret| {
if let TranscriptSecret::Encoding(tree) = secret {
Some(tree)
} else {
None
}
})
.unwrap();
let encoding_commitment = prover_output
.transcript_commitments
.iter()
.find_map(|commitment| {
if let TranscriptCommitment::Encoding(commitment) = commitment {
Some(commitment)
} else {
None
}
})
.unwrap();
let prove_sent = RangeSet::from(1..full_transcript.sent().len() - 1);
let prove_recv = RangeSet::from(1..full_transcript.received().len() - 1);
let idxs = [
(Direction::Sent, prove_sent.clone()),
(Direction::Received, prove_recv.clone()),
];
let proof = encoding_tree.proof(idxs.iter()).unwrap();
let (auth_sent, auth_recv) = proof
.verify_with_provider(
&HashProvider::default(),
&verifier_output.encoder_secret.unwrap(),
encoding_commitment,
full_transcript.sent(),
full_transcript.received(),
)
.unwrap();
assert_eq!(auth_sent, prove_sent);
assert_eq!(auth_recv, prove_recv);
}
#[instrument(skip(verifier_socket))]
async fn prover<T: AsyncWrite + AsyncRead + Send + Unpin + 'static>(verifier_socket: T) {
async fn prover<T: AsyncWrite + AsyncRead + Send + Unpin + 'static>(
verifier_socket: T,
) -> (Transcript, ProverOutput) {
let (client_socket, server_socket) = tokio::io::duplex(2 << 16);
let server_task = tokio::spawn(bind(server_socket.compat()));
@@ -86,9 +161,25 @@ async fn prover<T: AsyncWrite + AsyncRead + Send + Unpin + 'static>(verifier_soc
let mut builder = TranscriptCommitConfig::builder(prover.transcript());
// Commit to everything
builder.commit_sent(&(0..sent_tx_len)).unwrap();
builder.commit_recv(&(0..recv_tx_len)).unwrap();
for kind in [
TranscriptCommitmentKind::Encoding,
TranscriptCommitmentKind::Hash {
alg: HashAlgId::SHA256,
},
] {
builder
.commit_with_kind(&(0..sent_tx_len), Direction::Sent, kind)
.unwrap();
builder
.commit_with_kind(&(0..recv_tx_len), Direction::Received, kind)
.unwrap();
builder
.commit_with_kind(&(1..sent_tx_len - 1), Direction::Sent, kind)
.unwrap();
builder
.commit_with_kind(&(1..recv_tx_len - 1), Direction::Received, kind)
.unwrap();
}
let transcript_commit = builder.build().unwrap();
@@ -102,54 +193,39 @@ async fn prover<T: AsyncWrite + AsyncRead + Send + Unpin + 'static>(verifier_soc
builder.transcript_commit(transcript_commit);
let config = builder.build().unwrap();
prover.prove(&config).await.unwrap();
let transcript = prover.transcript().clone();
let output = prover.prove(&config).await.unwrap();
prover.close().await.unwrap();
(transcript, output)
}
#[instrument(skip(socket))]
async fn verifier<T: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>(socket: T) {
let config_validator = ProtocolConfigValidator::builder()
.max_sent_data(MAX_SENT_DATA)
.max_recv_data(MAX_RECV_DATA)
.build()
.unwrap();
async fn verifier<T: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>(
socket: T,
) -> VerifierOutput {
let verifier = Verifier::new(
VerifierConfig::builder()
.root_store(RootCertStore {
roots: vec![CertificateDer(CA_CERT_DER.to_vec())],
})
.protocol_config_validator(config_validator)
.build()
.unwrap(),
);
let VerifierOutput {
server_name,
transcript,
transcript_commitments,
} = verifier
.verify(socket.compat(), &VerifyConfig::default())
let verifier = verifier
.setup(socket.compat())
.await
.unwrap()
.accept()
.await
.unwrap()
.run()
.await
.unwrap();
let transcript = transcript.unwrap();
let (output, verifier) = verifier.verify().await.unwrap().accept().await.unwrap();
verifier.close().await.unwrap();
let ServerName::Dns(server_name) = server_name.unwrap();
assert_eq!(server_name.as_str(), SERVER_DOMAIN);
assert!(!transcript.is_complete());
assert_eq!(
transcript.sent_authed().iter_ranges().next().unwrap(),
0..10
);
assert_eq!(
transcript.received_authed().iter_ranges().next().unwrap(),
0..10
);
assert!(matches!(
transcript_commitments[0],
TranscriptCommitment::Encoding(_)
));
output
}

View File

@@ -6,11 +6,15 @@ build-std = ["panic_abort", "std"]
[target.wasm32-unknown-unknown]
rustflags = [
"-C",
"target-feature=+atomics,+bulk-memory,+mutable-globals,+simd128",
"-C",
"-Ctarget-feature=+atomics,+bulk-memory,+mutable-globals,+simd128",
"-Clink-arg=--shared-memory",
# 4GB
"link-arg=--max-memory=4294967296",
"-Clink-arg=--max-memory=4294967296",
"-Clink-arg=--import-memory",
"-Clink-arg=--export=__wasm_init_tls",
"-Clink-arg=--export=__tls_size",
"-Clink-arg=--export=__tls_align",
"-Clink-arg=--export=__tls_base",
"--cfg",
'getrandom_backend="wasm_js"',
]

View File

@@ -1,6 +1,6 @@
[package]
name = "tlsn-wasm"
version = "0.1.0-alpha.13-pre"
version = "0.1.0-alpha.14-pre"
edition = "2021"
repository = "https://github.com/tlsnotary/tlsn.git"
description = "A core WebAssembly package for TLSNotary."

View File

@@ -1,5 +1,4 @@
use serde::Deserialize;
use tlsn::config::ProtocolConfigValidator;
use tsify_next::Tsify;
#[derive(Debug, Tsify, Deserialize)]
@@ -10,27 +9,3 @@ pub struct VerifierConfig {
pub max_sent_records: Option<usize>,
pub max_recv_records_online: Option<usize>,
}
impl From<VerifierConfig> for tlsn::verifier::VerifierConfig {
fn from(value: VerifierConfig) -> Self {
let mut builder = ProtocolConfigValidator::builder();
builder.max_sent_data(value.max_sent_data);
builder.max_recv_data(value.max_recv_data);
if let Some(value) = value.max_sent_records {
builder.max_sent_records(value);
}
if let Some(value) = value.max_recv_records_online {
builder.max_recv_records_online(value);
}
let validator = builder.build().unwrap();
tlsn::verifier::VerifierConfig::builder()
.protocol_config_validator(validator)
.build()
.unwrap()
}
}

View File

@@ -6,10 +6,7 @@ use enum_try_as_inner::EnumTryAsInner;
use tls_core::msgs::enums::ContentType;
use tlsn::{
connection::{ConnectionInfo, ServerName, TranscriptLength},
verifier::{
state::{self, Initialized},
Verifier, VerifyConfig,
},
verifier::{state, Verifier},
};
use tracing::info;
use wasm_bindgen::prelude::*;
@@ -21,6 +18,7 @@ type Result<T> = std::result::Result<T, JsError>;
#[wasm_bindgen(js_name = Verifier)]
pub struct JsVerifier {
config: VerifierConfig,
state: State,
}
@@ -49,8 +47,10 @@ impl State {
impl JsVerifier {
#[wasm_bindgen(constructor)]
pub fn new(config: VerifierConfig) -> JsVerifier {
let tlsn_config = tlsn::verifier::VerifierConfig::builder().build().unwrap();
JsVerifier {
state: State::Initialized(Verifier::new(config.into())),
state: State::Initialized(Verifier::new(tlsn_config)),
config,
}
}
@@ -73,7 +73,27 @@ impl JsVerifier {
pub async fn verify(&mut self) -> Result<VerifierOutput> {
let (verifier, prover_conn) = self.state.take().try_into_connected()?;
let mut verifier = verifier.setup(prover_conn.into_io()).await?.run().await?;
let verifier = verifier.setup(prover_conn.into_io()).await?;
let config = verifier.config();
let reject = if config.max_sent_data() > self.config.max_sent_data {
Some("max_sent_data is too large")
} else if config.max_recv_data() > self.config.max_recv_data {
Some("max_recv_data is too large")
} else if config.max_sent_records() > self.config.max_sent_records {
Some("max_sent_records is too large")
} else if config.max_recv_records_online() > self.config.max_recv_records_online {
Some("max_recv_records_online is too large")
} else {
None
};
if reject.is_some() {
verifier.reject(reject).await?;
return Err(JsError::new("protocol configuration rejected"));
}
let verifier = verifier.accept().await?.run().await?;
let sent = verifier
.tls_transcript()
@@ -100,7 +120,7 @@ impl JsVerifier {
},
};
let output = verifier.verify(&VerifyConfig::default()).await?;
let (output, verifier) = verifier.verify().await?.accept().await?;
verifier.close().await?;
self.state = State::Complete;
@@ -115,11 +135,3 @@ impl JsVerifier {
})
}
}
impl From<tlsn::verifier::Verifier<Initialized>> for JsVerifier {
fn from(value: tlsn::verifier::Verifier<Initialized>) -> Self {
Self {
state: State::Initialized(value),
}
}
}