Compare commits

..

1 Commits

Author SHA1 Message Date
dan
0f9c04fc11 feat: added plugin-core crate 2025-10-30 09:11:12 +02:00
90 changed files with 2349 additions and 4113 deletions

461
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -9,6 +9,7 @@ members = [
"crates/data-fixtures",
"crates/examples",
"crates/formats",
"crates/plugin-core",
"crates/server-fixture/certs",
"crates/server-fixture/server",
"crates/tls/backend",
@@ -53,6 +54,7 @@ tlsn-formats = { path = "crates/formats" }
tlsn-hmac-sha256 = { path = "crates/components/hmac-sha256" }
tlsn-key-exchange = { path = "crates/components/key-exchange" }
tlsn-mpc-tls = { path = "crates/mpc-tls" }
tlsn-plugin-core = { path = "crates/plugin-core" }
tlsn-server-fixture = { path = "crates/server-fixture/server" }
tlsn-server-fixture-certs = { path = "crates/server-fixture/certs" }
tlsn-tls-backend = { path = "crates/tls/backend" }
@@ -66,26 +68,27 @@ tlsn-harness-runner = { path = "crates/harness/runner" }
tlsn-wasm = { path = "crates/wasm" }
tlsn = { path = "crates/tlsn" }
mpz-circuits = { git = "https://github.com/privacy-ethereum/mpz", rev = "5250a78" }
mpz-memory-core = { git = "https://github.com/privacy-ethereum/mpz", rev = "5250a78" }
mpz-common = { git = "https://github.com/privacy-ethereum/mpz", rev = "5250a78" }
mpz-core = { git = "https://github.com/privacy-ethereum/mpz", rev = "5250a78" }
mpz-vm-core = { git = "https://github.com/privacy-ethereum/mpz", rev = "5250a78" }
mpz-garble = { git = "https://github.com/privacy-ethereum/mpz", rev = "5250a78" }
mpz-garble-core = { git = "https://github.com/privacy-ethereum/mpz", rev = "5250a78" }
mpz-ole = { git = "https://github.com/privacy-ethereum/mpz", rev = "5250a78" }
mpz-ot = { git = "https://github.com/privacy-ethereum/mpz", rev = "5250a78" }
mpz-share-conversion = { git = "https://github.com/privacy-ethereum/mpz", rev = "5250a78" }
mpz-fields = { git = "https://github.com/privacy-ethereum/mpz", rev = "5250a78" }
mpz-zk = { git = "https://github.com/privacy-ethereum/mpz", rev = "5250a78" }
mpz-hash = { git = "https://github.com/privacy-ethereum/mpz", rev = "5250a78" }
mpz-ideal-vm = { git = "https://github.com/privacy-ethereum/mpz", rev = "5250a78" }
mpz-circuits = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.4" }
mpz-memory-core = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.4" }
mpz-common = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.4" }
mpz-core = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.4" }
mpz-vm-core = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.4" }
mpz-garble = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.4" }
mpz-garble-core = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.4" }
mpz-ole = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.4" }
mpz-ot = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.4" }
mpz-share-conversion = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.4" }
mpz-fields = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.4" }
mpz-zk = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.4" }
mpz-hash = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.4" }
mpz-ideal-vm = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.4" }
rangeset = { version = "0.2" }
serio = { version = "0.2" }
spansy = { git = "https://github.com/tlsnotary/tlsn-utils", rev = "6168663" }
spansy = { git = "https://github.com/tlsnotary/tlsn-utils", rev = "304b910" }
uid-mux = { version = "0.2" }
websocket-relay = { git = "https://github.com/tlsnotary/tlsn-utils", rev = "6168663" }
websocket-relay = { git = "https://github.com/tlsnotary/tlsn-utils", rev = "304b910" }
futures-plex = { git = "https://github.com/tlsnotary/tlsn-utils", rev = "304b910" }
aead = { version = "0.4" }
aes = { version = "0.8" }

View File

@@ -9,7 +9,7 @@ fixtures = ["tlsn-core/fixtures", "dep:tlsn-data-fixtures"]
[dependencies]
tlsn-tls-core = { workspace = true }
tlsn-core = { workspace = true, features = ["mozilla-certs"] }
tlsn-core = { workspace = true }
tlsn-data-fixtures = { workspace = true, optional = true }
bcs = { workspace = true }

View File

@@ -1,4 +1,5 @@
//! Attestation fixtures.
use tlsn_core::{
connection::{CertBinding, CertBindingV1_2},
fixtures::ConnectionFixture,
@@ -12,10 +13,7 @@ use tlsn_core::{
use crate::{
Attestation, AttestationConfig, CryptoProvider, Extension,
request::{Request, RequestConfig},
signing::{
KeyAlgId, SignatureAlgId, SignatureVerifier, SignatureVerifierProvider, Signer,
SignerProvider,
},
signing::SignatureAlgId,
};
/// A Request fixture used for testing.
@@ -104,8 +102,7 @@ pub fn attestation_fixture(
let mut provider = CryptoProvider::default();
match signature_alg {
SignatureAlgId::SECP256K1 => provider.signer.set_secp256k1(&[42u8; 32]).unwrap(),
SignatureAlgId::SECP256K1ETH => provider.signer.set_secp256k1eth(&[43u8; 32]).unwrap(),
SignatureAlgId::SECP256R1 => provider.signer.set_secp256r1(&[44u8; 32]).unwrap(),
SignatureAlgId::SECP256R1 => provider.signer.set_secp256r1(&[42u8; 32]).unwrap(),
_ => unimplemented!(),
};
@@ -125,68 +122,3 @@ pub fn attestation_fixture(
attestation_builder.build(&provider).unwrap()
}
/// Returns a crypto provider which supports only a custom signature alg.
pub fn custom_provider_fixture() -> CryptoProvider {
const CUSTOM_SIG_ALG_ID: SignatureAlgId = SignatureAlgId::new(128);
// A dummy signer.
struct DummySigner {}
impl Signer for DummySigner {
fn alg_id(&self) -> SignatureAlgId {
CUSTOM_SIG_ALG_ID
}
fn sign(
&self,
msg: &[u8],
) -> Result<crate::signing::Signature, crate::signing::SignatureError> {
Ok(crate::signing::Signature {
alg: CUSTOM_SIG_ALG_ID,
data: msg.to_vec(),
})
}
fn verifying_key(&self) -> crate::signing::VerifyingKey {
crate::signing::VerifyingKey {
alg: KeyAlgId::new(128),
data: vec![1, 2, 3, 4],
}
}
}
// A dummy verifier.
struct DummyVerifier {}
impl SignatureVerifier for DummyVerifier {
fn alg_id(&self) -> SignatureAlgId {
CUSTOM_SIG_ALG_ID
}
fn verify(
&self,
_key: &crate::signing::VerifyingKey,
msg: &[u8],
sig: &[u8],
) -> Result<(), crate::signing::SignatureError> {
if msg == sig {
Ok(())
} else {
Err(crate::signing::SignatureError::from_str(
"invalid signature",
))
}
}
}
let mut provider = CryptoProvider::default();
let mut signer_provider = SignerProvider::default();
signer_provider.set_signer(Box::new(DummySigner {}));
provider.signer = signer_provider;
let mut verifier_provider = SignatureVerifierProvider::empty();
verifier_provider.set_verifier(Box::new(DummyVerifier {}));
provider.signature = verifier_provider;
provider
}

View File

@@ -20,10 +20,7 @@ use serde::{Deserialize, Serialize};
use tlsn_core::hash::HashAlgId;
use crate::{
Attestation, CryptoProvider, Extension, connection::ServerCertCommitment,
serialize::CanonicalSerialize, signing::SignatureAlgId,
};
use crate::{Attestation, Extension, connection::ServerCertCommitment, signing::SignatureAlgId};
pub use builder::{RequestBuilder, RequestBuilderError};
pub use config::{RequestConfig, RequestConfigBuilder, RequestConfigBuilderError};
@@ -44,102 +41,44 @@ impl Request {
}
/// Validates the content of the attestation against this request.
pub fn validate(
&self,
attestation: &Attestation,
provider: &CryptoProvider,
) -> Result<(), AttestationValidationError> {
pub fn validate(&self, attestation: &Attestation) -> Result<(), InconsistentAttestation> {
if attestation.signature.alg != self.signature_alg {
return Err(AttestationValidationError::inconsistent(format!(
return Err(InconsistentAttestation(format!(
"signature algorithm: expected {:?}, got {:?}",
self.signature_alg, attestation.signature.alg
)));
}
if attestation.header.root.alg != self.hash_alg {
return Err(AttestationValidationError::inconsistent(format!(
return Err(InconsistentAttestation(format!(
"hash algorithm: expected {:?}, got {:?}",
self.hash_alg, attestation.header.root.alg
)));
}
if attestation.body.cert_commitment() != &self.server_cert_commitment {
return Err(AttestationValidationError::inconsistent(
"server certificate commitment does not match",
return Err(InconsistentAttestation(
"server certificate commitment does not match".to_string(),
));
}
// TODO: improve the O(M*N) complexity of this check.
for extension in &self.extensions {
if !attestation.body.extensions().any(|e| e == extension) {
return Err(AttestationValidationError::inconsistent(
"extension is missing from the attestation",
return Err(InconsistentAttestation(
"extension is missing from the attestation".to_string(),
));
}
}
let verifier = provider
.signature
.get(&attestation.signature.alg)
.map_err(|_| {
AttestationValidationError::provider(format!(
"provider not configured for signature algorithm id {:?}",
attestation.signature.alg,
))
})?;
verifier
.verify(
&attestation.body.verifying_key.data,
&CanonicalSerialize::serialize(&attestation.header),
&attestation.signature.data,
)
.map_err(|_| {
AttestationValidationError::inconsistent("failed to verify the signature")
})?;
Ok(())
}
}
/// Error for [`Request::validate`].
#[derive(Debug, thiserror::Error)]
#[error("attestation validation error: {kind}: {message}")]
pub struct AttestationValidationError {
kind: ErrorKind,
message: String,
}
impl AttestationValidationError {
fn inconsistent(msg: impl Into<String>) -> Self {
Self {
kind: ErrorKind::Inconsistent,
message: msg.into(),
}
}
fn provider(msg: impl Into<String>) -> Self {
Self {
kind: ErrorKind::Provider,
message: msg.into(),
}
}
}
#[derive(Debug)]
enum ErrorKind {
Inconsistent,
Provider,
}
impl std::fmt::Display for ErrorKind {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
ErrorKind::Inconsistent => write!(f, "inconsistent"),
ErrorKind::Provider => write!(f, "provider"),
}
}
}
#[error("inconsistent attestation: {0}")]
pub struct InconsistentAttestation(String);
#[cfg(test)]
mod test {
@@ -154,8 +93,7 @@ mod test {
use crate::{
CryptoProvider,
connection::ServerCertOpening,
fixtures::{RequestFixture, attestation_fixture, custom_provider_fixture, request_fixture},
request::{AttestationValidationError, ErrorKind},
fixtures::{RequestFixture, attestation_fixture, request_fixture},
signing::SignatureAlgId,
};
@@ -175,9 +113,7 @@ mod test {
let attestation =
attestation_fixture(request.clone(), connection, SignatureAlgId::SECP256K1, &[]);
let provider = CryptoProvider::default();
assert!(request.validate(&attestation, &provider).is_ok())
assert!(request.validate(&attestation).is_ok())
}
#[test]
@@ -198,9 +134,7 @@ mod test {
request.signature_alg = SignatureAlgId::SECP256R1;
let provider = CryptoProvider::default();
let res = request.validate(&attestation, &provider);
let res = request.validate(&attestation);
assert!(res.is_err());
}
@@ -222,9 +156,7 @@ mod test {
request.hash_alg = HashAlgId::SHA256;
let provider = CryptoProvider::default();
let res = request.validate(&attestation, &provider);
let res = request.validate(&attestation);
assert!(res.is_err())
}
@@ -252,62 +184,11 @@ mod test {
});
let opening = ServerCertOpening::new(server_cert_data);
let provider = CryptoProvider::default();
let crypto_provider = CryptoProvider::default();
request.server_cert_commitment =
opening.commit(provider.hash.get(&HashAlgId::BLAKE3).unwrap());
opening.commit(crypto_provider.hash.get(&HashAlgId::BLAKE3).unwrap());
let res = request.validate(&attestation, &provider);
let res = request.validate(&attestation);
assert!(res.is_err())
}
#[test]
fn test_wrong_sig() {
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
let connection = ConnectionFixture::tlsnotary(transcript.length());
let RequestFixture { request, .. } = request_fixture(
transcript,
encoding_provider(GET_WITH_HEADER, OK_JSON),
connection.clone(),
Blake3::default(),
Vec::new(),
);
let mut attestation =
attestation_fixture(request.clone(), connection, SignatureAlgId::SECP256K1, &[]);
// Corrupt the signature.
attestation.signature.data[1] = attestation.signature.data[1].wrapping_add(1);
let provider = CryptoProvider::default();
assert!(request.validate(&attestation, &provider).is_err())
}
#[test]
fn test_wrong_provider() {
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
let connection = ConnectionFixture::tlsnotary(transcript.length());
let RequestFixture { request, .. } = request_fixture(
transcript,
encoding_provider(GET_WITH_HEADER, OK_JSON),
connection.clone(),
Blake3::default(),
Vec::new(),
);
let attestation =
attestation_fixture(request.clone(), connection, SignatureAlgId::SECP256K1, &[]);
let provider = custom_provider_fixture();
assert!(matches!(
request.validate(&attestation, &provider),
Err(AttestationValidationError {
kind: ErrorKind::Provider,
..
})
))
}
}

View File

@@ -202,14 +202,6 @@ impl SignatureVerifierProvider {
.map(|s| &**s)
.ok_or(UnknownSignatureAlgId(*alg))
}
/// Returns am empty provider.
#[cfg(any(test, feature = "fixtures"))]
pub fn empty() -> Self {
Self {
verifiers: HashMap::default(),
}
}
}
/// Signature verifier.
@@ -237,14 +229,6 @@ impl_domain_separator!(VerifyingKey);
#[error("signature verification failed: {0}")]
pub struct SignatureError(String);
impl SignatureError {
/// Creates a new error with the given message.
#[allow(clippy::should_implement_trait)]
pub fn from_str(msg: &str) -> Self {
Self(msg.to_string())
}
}
/// A signature.
#[derive(Debug, Clone, Deserialize, Serialize)]
pub struct Signature {

View File

@@ -101,7 +101,7 @@ fn test_api() {
let attestation = attestation_builder.build(&provider).unwrap();
// Prover validates the attestation is consistent with its request.
request.validate(&attestation, &provider).unwrap();
request.validate(&attestation).unwrap();
let mut transcript_proof_builder = secrets.transcript_proof_builder();

View File

@@ -15,7 +15,7 @@ workspace = true
name = "cipher"
[dependencies]
mpz-circuits = { workspace = true, features = ["aes"] }
mpz-circuits = { workspace = true }
mpz-vm-core = { workspace = true }
mpz-memory-core = { workspace = true }
@@ -24,9 +24,11 @@ thiserror = { workspace = true }
aes = { workspace = true }
[dev-dependencies]
mpz-common = { workspace = true, features = ["test-utils"] }
mpz-ideal-vm = { workspace = true }
mpz-garble = { workspace = true }
mpz-common = { workspace = true }
mpz-ot = { workspace = true }
tokio = { version = "1", features = ["macros", "rt", "rt-multi-thread"] }
rand = { workspace = true }
ctr = { workspace = true }
cipher = { workspace = true }

View File

@@ -2,7 +2,7 @@
use crate::{Cipher, CtrBlock, Keystream};
use async_trait::async_trait;
use mpz_circuits::{AES128_KS, AES128_POST_KS};
use mpz_circuits::circuits::AES128;
use mpz_memory_core::binary::{Binary, U8};
use mpz_vm_core::{prelude::*, Call, Vm};
use std::fmt::Debug;
@@ -12,35 +12,13 @@ mod error;
pub use error::AesError;
use error::ErrorKind;
/// AES key schedule: 11 round keys, 16 bytes each.
type KeySchedule = Array<U8, 176>;
/// Computes AES-128.
#[derive(Default, Debug)]
pub struct Aes128 {
key: Option<Array<U8, 16>>,
key_schedule: Option<KeySchedule>,
iv: Option<Array<U8, 4>>,
}
impl Aes128 {
// Allocates key schedule.
//
// Expects the key to be already set.
fn alloc_key_schedule(&self, vm: &mut dyn Vm<Binary>) -> Result<KeySchedule, AesError> {
let ks: KeySchedule = vm
.call(
Call::builder(AES128_KS.clone())
.arg(self.key.expect("key is set"))
.build()
.expect("call should be valid"),
)
.map_err(|err| AesError::new(ErrorKind::Vm, err))?;
Ok(ks)
}
}
#[async_trait]
impl Cipher for Aes128 {
type Error = AesError;
@@ -67,22 +45,18 @@ impl Cipher for Aes128 {
}
fn alloc_block(
&mut self,
&self,
vm: &mut dyn Vm<Binary>,
input: Array<U8, 16>,
) -> Result<Self::Block, Self::Error> {
self.key
let key = self
.key
.ok_or_else(|| AesError::new(ErrorKind::Key, "key not set"))?;
if self.key_schedule.is_none() {
self.key_schedule = Some(self.alloc_key_schedule(vm)?);
}
let ks = *self.key_schedule.as_ref().expect("key schedule was set");
let output = vm
.call(
Call::builder(AES128_POST_KS.clone())
.arg(ks)
Call::builder(AES128.clone())
.arg(key)
.arg(input)
.build()
.expect("call should be valid"),
@@ -93,10 +67,11 @@ impl Cipher for Aes128 {
}
fn alloc_ctr_block(
&mut self,
&self,
vm: &mut dyn Vm<Binary>,
) -> Result<CtrBlock<Self::Nonce, Self::Counter, Self::Block>, Self::Error> {
self.key
let key = self
.key
.ok_or_else(|| AesError::new(ErrorKind::Key, "key not set"))?;
let iv = self
.iv
@@ -114,15 +89,10 @@ impl Cipher for Aes128 {
vm.mark_public(counter)
.map_err(|err| AesError::new(ErrorKind::Vm, err))?;
if self.key_schedule.is_none() {
self.key_schedule = Some(self.alloc_key_schedule(vm)?);
}
let ks = *self.key_schedule.as_ref().expect("key schedule was set");
let output = vm
.call(
Call::builder(AES128_POST_KS.clone())
.arg(ks)
Call::builder(AES128.clone())
.arg(key)
.arg(iv)
.arg(explicit_nonce)
.arg(counter)
@@ -139,11 +109,12 @@ impl Cipher for Aes128 {
}
fn alloc_keystream(
&mut self,
&self,
vm: &mut dyn Vm<Binary>,
len: usize,
) -> Result<Keystream<Self::Nonce, Self::Counter, Self::Block>, Self::Error> {
self.key
let key = self
.key
.ok_or_else(|| AesError::new(ErrorKind::Key, "key not set"))?;
let iv = self
.iv
@@ -172,15 +143,10 @@ impl Cipher for Aes128 {
let blocks = inputs
.into_iter()
.map(|(explicit_nonce, counter)| {
if self.key_schedule.is_none() {
self.key_schedule = Some(self.alloc_key_schedule(vm)?);
}
let ks = *self.key_schedule.as_ref().expect("key schedule was set");
let output = vm
.call(
Call::builder(AES128_POST_KS.clone())
.arg(ks)
Call::builder(AES128.clone())
.arg(key)
.arg(iv)
.arg(explicit_nonce)
.arg(counter)
@@ -206,12 +172,15 @@ mod tests {
use super::*;
use crate::Cipher;
use mpz_common::context::test_st_context;
use mpz_ideal_vm::IdealVm;
use mpz_garble::protocol::semihonest::{Evaluator, Garbler};
use mpz_memory_core::{
binary::{Binary, U8},
correlated::Delta,
Array, MemoryExt, Vector, ViewExt,
};
use mpz_ot::ideal::cot::ideal_cot;
use mpz_vm_core::{Execute, Vm};
use rand::{rngs::StdRng, SeedableRng};
#[tokio::test]
async fn test_aes_ctr() {
@@ -221,11 +190,10 @@ mod tests {
let start_counter = 3u32;
let (mut ctx_a, mut ctx_b) = test_st_context(8);
let mut gen = IdealVm::new();
let mut ev = IdealVm::new();
let (mut gen, mut ev) = mock_vm();
let mut aes_gen = setup_ctr(key, iv, &mut gen);
let mut aes_ev = setup_ctr(key, iv, &mut ev);
let aes_gen = setup_ctr(key, iv, &mut gen);
let aes_ev = setup_ctr(key, iv, &mut ev);
let msg = vec![42u8; 128];
@@ -284,11 +252,10 @@ mod tests {
let input = [5_u8; 16];
let (mut ctx_a, mut ctx_b) = test_st_context(8);
let mut gen = IdealVm::new();
let mut ev = IdealVm::new();
let (mut gen, mut ev) = mock_vm();
let mut aes_gen = setup_block(key, &mut gen);
let mut aes_ev = setup_block(key, &mut ev);
let aes_gen = setup_block(key, &mut gen);
let aes_ev = setup_block(key, &mut ev);
let block_ref_gen: Array<U8, 16> = gen.alloc().unwrap();
gen.mark_public(block_ref_gen).unwrap();
@@ -327,6 +294,18 @@ mod tests {
assert_eq!(ciphertext_gen, expected);
}
fn mock_vm() -> (impl Vm<Binary>, impl Vm<Binary>) {
let mut rng = StdRng::seed_from_u64(0);
let delta = Delta::random(&mut rng);
let (cot_send, cot_recv) = ideal_cot(delta.into_inner());
let gen = Garbler::new(cot_send, [0u8; 16], delta);
let ev = Evaluator::new(cot_recv);
(gen, ev)
}
fn setup_ctr(key: [u8; 16], iv: [u8; 4], vm: &mut dyn Vm<Binary>) -> Aes128 {
let key_ref: Array<U8, 16> = vm.alloc().unwrap();
vm.mark_public(key_ref).unwrap();

View File

@@ -55,7 +55,7 @@ pub trait Cipher {
/// Allocates a single block in ECB mode.
fn alloc_block(
&mut self,
&self,
vm: &mut dyn Vm<Binary>,
input: Self::Block,
) -> Result<Self::Block, Self::Error>;
@@ -63,7 +63,7 @@ pub trait Cipher {
/// Allocates a single block in counter mode.
#[allow(clippy::type_complexity)]
fn alloc_ctr_block(
&mut self,
&self,
vm: &mut dyn Vm<Binary>,
) -> Result<CtrBlock<Self::Nonce, Self::Counter, Self::Block>, Self::Error>;
@@ -75,7 +75,7 @@ pub trait Cipher {
/// * `len` - Length of the stream in bytes.
#[allow(clippy::type_complexity)]
fn alloc_keystream(
&mut self,
&self,
vm: &mut dyn Vm<Binary>,
len: usize,
) -> Result<Keystream<Self::Nonce, Self::Counter, Self::Block>, Self::Error>;

View File

@@ -19,8 +19,11 @@ futures = { workspace = true }
tokio = { workspace = true, features = ["sync"] }
[dev-dependencies]
mpz-circuits = { workspace = true, features = ["aes"] }
mpz-common = { workspace = true, features = ["test-utils"] }
mpz-ideal-vm = { workspace = true }
mpz-circuits = { workspace = true }
mpz-garble = { workspace = true }
mpz-ot = { workspace = true }
mpz-zk = { workspace = true }
tokio = { workspace = true, features = ["macros", "rt", "rt-multi-thread"] }
rand = { workspace = true }
rand06-compat = { workspace = true }

View File

@@ -382,27 +382,37 @@ enum ErrorRepr {
#[cfg(test)]
mod tests {
use mpz_circuits::AES128;
use mpz_circuits::circuits::AES128;
use mpz_common::context::test_st_context;
use mpz_ideal_vm::IdealVm;
use mpz_core::Block;
use mpz_garble::protocol::semihonest::{Evaluator, Garbler};
use mpz_ot::ideal::{cot::ideal_cot, rcot::ideal_rcot};
use mpz_vm_core::{
memory::{binary::U8, Array},
memory::{binary::U8, correlated::Delta, Array},
prelude::*,
};
use mpz_zk::{Prover, ProverConfig, Verifier, VerifierConfig};
use rand::{rngs::StdRng, SeedableRng};
use super::*;
#[tokio::test]
async fn test_deap() {
let mut rng = StdRng::seed_from_u64(0);
let delta_mpc = Delta::random(&mut rng);
let delta_zk = Delta::random(&mut rng);
let (mut ctx_a, mut ctx_b) = test_st_context(8);
let (rcot_send, rcot_recv) = ideal_rcot(Block::ZERO, delta_zk.into_inner());
let (cot_send, cot_recv) = ideal_cot(delta_mpc.into_inner());
let leader_mpc = IdealVm::new();
let leader_zk = IdealVm::new();
let follower_mpc = IdealVm::new();
let follower_zk = IdealVm::new();
let gb = Garbler::new(cot_send, [0u8; 16], delta_mpc);
let ev = Evaluator::new(cot_recv);
let prover = Prover::new(ProverConfig::default(), rcot_recv);
let verifier = Verifier::new(VerifierConfig::default(), delta_zk, rcot_send);
let mut leader = Deap::new(Role::Leader, leader_mpc, leader_zk);
let mut follower = Deap::new(Role::Follower, follower_mpc, follower_zk);
let mut leader = Deap::new(Role::Leader, gb, prover);
let mut follower = Deap::new(Role::Follower, ev, verifier);
let (ct_leader, ct_follower) = futures::join!(
async {
@@ -468,15 +478,21 @@ mod tests {
#[tokio::test]
async fn test_deap_desync_memory() {
let mut rng = StdRng::seed_from_u64(0);
let delta_mpc = Delta::random(&mut rng);
let delta_zk = Delta::random(&mut rng);
let (mut ctx_a, mut ctx_b) = test_st_context(8);
let (rcot_send, rcot_recv) = ideal_rcot(Block::ZERO, delta_zk.into_inner());
let (cot_send, cot_recv) = ideal_cot(delta_mpc.into_inner());
let leader_mpc = IdealVm::new();
let leader_zk = IdealVm::new();
let follower_mpc = IdealVm::new();
let follower_zk = IdealVm::new();
let gb = Garbler::new(cot_send, [0u8; 16], delta_mpc);
let ev = Evaluator::new(cot_recv);
let prover = Prover::new(ProverConfig::default(), rcot_recv);
let verifier = Verifier::new(VerifierConfig::default(), delta_zk, rcot_send);
let mut leader = Deap::new(Role::Leader, leader_mpc, leader_zk);
let mut follower = Deap::new(Role::Follower, follower_mpc, follower_zk);
let mut leader = Deap::new(Role::Leader, gb, prover);
let mut follower = Deap::new(Role::Follower, ev, verifier);
// Desynchronize the memories.
let _ = leader.zk().alloc_raw(1).unwrap();
@@ -548,15 +564,21 @@ mod tests {
// detection by the follower.
#[tokio::test]
async fn test_malicious() {
let mut rng = StdRng::seed_from_u64(0);
let delta_mpc = Delta::random(&mut rng);
let delta_zk = Delta::random(&mut rng);
let (mut ctx_a, mut ctx_b) = test_st_context(8);
let (rcot_send, rcot_recv) = ideal_rcot(Block::ZERO, delta_zk.into_inner());
let (cot_send, cot_recv) = ideal_cot(delta_mpc.into_inner());
let leader_mpc = IdealVm::new();
let leader_zk = IdealVm::new();
let follower_mpc = IdealVm::new();
let follower_zk = IdealVm::new();
let gb = Garbler::new(cot_send, [1u8; 16], delta_mpc);
let ev = Evaluator::new(cot_recv);
let prover = Prover::new(ProverConfig::default(), rcot_recv);
let verifier = Verifier::new(VerifierConfig::default(), delta_zk, rcot_send);
let mut leader = Deap::new(Role::Leader, leader_mpc, leader_zk);
let mut follower = Deap::new(Role::Follower, follower_mpc, follower_zk);
let mut leader = Deap::new(Role::Leader, gb, prover);
let mut follower = Deap::new(Role::Follower, ev, verifier);
let (_, follower_res) = futures::join!(
async {

View File

@@ -20,13 +20,14 @@ mpz-core = { workspace = true }
mpz-circuits = { workspace = true }
mpz-hash = { workspace = true }
sha2 = { workspace = true, features = ["compress"] }
thiserror = { workspace = true }
tracing = { workspace = true }
sha2 = { workspace = true }
[dev-dependencies]
mpz-ot = { workspace = true, features = ["ideal"] }
mpz-garble = { workspace = true }
mpz-common = { workspace = true, features = ["test-utils"] }
mpz-ideal-vm = { workspace = true }
criterion = { workspace = true, features = ["async_tokio"] }
tokio = { workspace = true, features = ["macros", "rt", "rt-multi-thread"] }

View File

@@ -4,12 +4,14 @@ use criterion::{criterion_group, criterion_main, Criterion};
use hmac_sha256::{Mode, MpcPrf};
use mpz_common::context::test_mt_context;
use mpz_ideal_vm::IdealVm;
use mpz_garble::protocol::semihonest::{Evaluator, Garbler};
use mpz_ot::ideal::cot::ideal_cot;
use mpz_vm_core::{
memory::{binary::U8, Array},
memory::{binary::U8, correlated::Delta, Array},
prelude::*,
Execute,
};
use rand::{rngs::StdRng, SeedableRng};
#[allow(clippy::unit_arg)]
fn criterion_benchmark(c: &mut Criterion) {
@@ -27,6 +29,8 @@ criterion_group!(benches, criterion_benchmark);
criterion_main!(benches);
async fn prf(mode: Mode) {
let mut rng = StdRng::seed_from_u64(0);
let pms = [42u8; 32];
let client_random = [69u8; 32];
let server_random: [u8; 32] = [96u8; 32];
@@ -35,8 +39,11 @@ async fn prf(mode: Mode) {
let mut leader_ctx = leader_exec.new_context().await.unwrap();
let mut follower_ctx = follower_exec.new_context().await.unwrap();
let mut leader_vm = IdealVm::new();
let mut follower_vm = IdealVm::new();
let delta = Delta::random(&mut rng);
let (ot_send, ot_recv) = ideal_cot(delta.into_inner());
let mut leader_vm = Garbler::new(ot_send, [0u8; 16], delta);
let mut follower_vm = Evaluator::new(ot_recv);
let leader_pms: Array<U8, 32> = leader_vm.alloc().unwrap();
leader_vm.mark_public(leader_pms).unwrap();

View File

@@ -54,11 +54,10 @@ mod tests {
use crate::{
hmac::hmac_sha256,
sha256, state_to_bytes,
test_utils::{compute_inner_local, compute_outer_partial},
test_utils::{compute_inner_local, compute_outer_partial, mock_vm},
};
use mpz_common::context::test_st_context;
use mpz_hash::sha256::Sha256;
use mpz_ideal_vm::IdealVm;
use mpz_vm_core::{
memory::{
binary::{U32, U8},
@@ -84,8 +83,7 @@ mod tests {
#[tokio::test]
async fn test_hmac_circuit() {
let (mut ctx_a, mut ctx_b) = test_st_context(8);
let mut leader = IdealVm::new();
let mut follower = IdealVm::new();
let (mut leader, mut follower) = mock_vm();
let (inputs, references) = test_fixtures();
for (input, &reference) in inputs.iter().zip(references.iter()) {

View File

@@ -72,11 +72,10 @@ fn state_to_bytes(input: [u32; 8]) -> [u8; 32] {
#[cfg(test)]
mod tests {
use crate::{
test_utils::{prf_cf_vd, prf_keys, prf_ms, prf_sf_vd},
test_utils::{mock_vm, prf_cf_vd, prf_keys, prf_ms, prf_sf_vd},
Mode, MpcPrf, SessionKeys,
};
use mpz_common::context::test_st_context;
use mpz_ideal_vm::IdealVm;
use mpz_vm_core::{
memory::{binary::U8, Array, MemoryExt, ViewExt},
Execute,
@@ -124,8 +123,7 @@ mod tests {
// Set up vm and prf
let (mut ctx_a, mut ctx_b) = test_st_context(128);
let mut leader = IdealVm::new();
let mut follower = IdealVm::new();
let (mut leader, mut follower) = mock_vm();
let leader_pms: Array<U8, 32> = leader.alloc().unwrap();
leader.mark_public(leader_pms).unwrap();

View File

@@ -339,9 +339,8 @@ fn gen_merge_circ(size: usize) -> Arc<Circuit> {
#[cfg(test)]
mod tests {
use crate::prf::merge_outputs;
use crate::{prf::merge_outputs, test_utils::mock_vm};
use mpz_common::context::test_st_context;
use mpz_ideal_vm::IdealVm;
use mpz_vm_core::{
memory::{binary::U8, Array, MemoryExt, ViewExt},
Execute,
@@ -350,8 +349,7 @@ mod tests {
#[tokio::test]
async fn test_merge_outputs() {
let (mut ctx_a, mut ctx_b) = test_st_context(8);
let mut leader = IdealVm::new();
let mut follower = IdealVm::new();
let (mut leader, mut follower) = mock_vm();
let input1: [u8; 32] = std::array::from_fn(|i| i as u8);
let input2: [u8; 32] = std::array::from_fn(|i| i as u8 + 32);

View File

@@ -137,11 +137,10 @@ impl Prf {
mod tests {
use crate::{
prf::{compute_partial, function::Prf},
test_utils::phash,
test_utils::{mock_vm, phash},
Mode,
};
use mpz_common::context::test_st_context;
use mpz_ideal_vm::IdealVm;
use mpz_vm_core::{
memory::{binary::U8, Array, MemoryExt, ViewExt},
Execute,
@@ -167,8 +166,7 @@ mod tests {
let mut rng = ThreadRng::default();
let (mut ctx_a, mut ctx_b) = test_st_context(8);
let mut leader = IdealVm::new();
let mut follower = IdealVm::new();
let (mut leader, mut follower) = mock_vm();
let key: [u8; 32] = rng.random();
let start_seed: Vec<u8> = vec![42; 64];

View File

@@ -1,10 +1,25 @@
use crate::{sha256, state_to_bytes};
use mpz_garble::protocol::semihonest::{Evaluator, Garbler};
use mpz_ot::ideal::cot::{ideal_cot, IdealCOTReceiver, IdealCOTSender};
use mpz_vm_core::memory::correlated::Delta;
use rand::{rngs::StdRng, Rng, SeedableRng};
pub(crate) const SHA256_IV: [u32; 8] = [
0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a, 0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19,
];
pub(crate) fn mock_vm() -> (Garbler<IdealCOTSender>, Evaluator<IdealCOTReceiver>) {
let mut rng = StdRng::seed_from_u64(0);
let delta = Delta::random(&mut rng);
let (cot_send, cot_recv) = ideal_cot(delta.into_inner());
let gen = Garbler::new(cot_send, [0u8; 16], delta);
let ev = Evaluator::new(cot_recv);
(gen, ev)
}
pub(crate) fn prf_ms(pms: [u8; 32], client_random: [u8; 32], server_random: [u8; 32]) -> [u8; 48] {
let mut label_start_seed = b"master secret".to_vec();
label_start_seed.extend_from_slice(&client_random);

View File

@@ -40,7 +40,6 @@ tokio = { workspace = true, features = ["sync"] }
[dev-dependencies]
mpz-ot = { workspace = true, features = ["ideal"] }
mpz-garble = { workspace = true }
mpz-ideal-vm = { workspace = true }
rand_core = { workspace = true }
tokio = { workspace = true, features = ["macros", "rt", "rt-multi-thread"] }

View File

@@ -459,7 +459,9 @@ mod tests {
use mpz_common::context::test_st_context;
use mpz_core::Block;
use mpz_fields::UniformRand;
use mpz_ideal_vm::IdealVm;
use mpz_garble::protocol::semihonest::{Evaluator, Garbler};
use mpz_memory_core::correlated::Delta;
use mpz_ot::ideal::cot::{ideal_cot, IdealCOTReceiver, IdealCOTSender};
use mpz_share_conversion::ideal::{
ideal_share_convert, IdealShareConvertReceiver, IdealShareConvertSender,
};
@@ -482,8 +484,7 @@ mod tests {
async fn test_key_exchange() {
let mut rng = StdRng::seed_from_u64(0).compat();
let (mut ctx_a, mut ctx_b) = test_st_context(8);
let mut gen = IdealVm::new();
let mut ev = IdealVm::new();
let (mut gen, mut ev) = mock_vm();
let leader_private_key = SecretKey::random(&mut rng);
let follower_private_key = SecretKey::random(&mut rng);
@@ -624,8 +625,7 @@ mod tests {
async fn test_malicious_key_exchange(#[case] malicious: Malicious) {
let mut rng = StdRng::seed_from_u64(0);
let (mut ctx_a, mut ctx_b) = test_st_context(8);
let mut gen = IdealVm::new();
let mut ev = IdealVm::new();
let (mut gen, mut ev) = mock_vm();
let leader_private_key = SecretKey::random(&mut rng.compat_by_ref());
let follower_private_key = SecretKey::random(&mut rng.compat_by_ref());
@@ -704,8 +704,7 @@ mod tests {
#[tokio::test]
async fn test_circuit() {
let (mut ctx_a, mut ctx_b) = test_st_context(8);
let gen = IdealVm::new();
let ev = IdealVm::new();
let (gen, ev) = mock_vm();
let share_a0_bytes = [5_u8; 32];
let share_a1_bytes = [2_u8; 32];
@@ -835,4 +834,16 @@ mod tests {
(leader, follower)
}
fn mock_vm() -> (Garbler<IdealCOTSender>, Evaluator<IdealCOTReceiver>) {
let mut rng = StdRng::seed_from_u64(0);
let delta = Delta::random(&mut rng);
let (cot_send, cot_recv) = ideal_cot(delta.into_inner());
let gen = Garbler::new(cot_send, [0u8; 16], delta);
let ev = Evaluator::new(cot_recv);
(gen, ev)
}
}

View File

@@ -8,7 +8,7 @@
//! with the server alone and forward all messages from and to the follower.
//!
//! A detailed description of this protocol can be found in our documentation
//! <https://tlsnotary.org/docs/mpc/key_exchange>.
//! <https://docs.tlsnotary.org/protocol/notarization/key_exchange.html>.
#![deny(missing_docs, unreachable_pub, unused_must_use)]
#![deny(clippy::all)]

View File

@@ -26,7 +26,8 @@ pub fn create_mock_key_exchange_pair() -> (MockKeyExchange, MockKeyExchange) {
#[cfg(test)]
mod tests {
use mpz_ideal_vm::IdealVm;
use mpz_garble::protocol::semihonest::{Evaluator, Garbler};
use mpz_ot::ideal::cot::{IdealCOTReceiver, IdealCOTSender};
use super::*;
use crate::KeyExchange;
@@ -39,12 +40,12 @@ mod tests {
is_key_exchange::<
MpcKeyExchange<IdealShareConvertSender<P256>, IdealShareConvertReceiver<P256>>,
IdealVm,
Garbler<IdealCOTSender>,
>(leader);
is_key_exchange::<
MpcKeyExchange<IdealShareConvertSender<P256>, IdealShareConvertReceiver<P256>>,
IdealVm,
Evaluator<IdealCOTReceiver>,
>(follower);
}
}

View File

@@ -4,7 +4,7 @@
//! protocol has semi-honest security.
//!
//! The protocol is described in
//! <https://tlsnotary.org/docs/mpc/key_exchange>
//! <https://docs.tlsnotary.org/protocol/notarization/key_exchange.html>
use crate::{KeyExchangeError, Role};
use mpz_common::{Context, Flush};

View File

@@ -13,7 +13,6 @@ workspace = true
[features]
default = []
mozilla-certs = ["dep:webpki-root-certs", "dep:webpki-roots"]
fixtures = [
"dep:hex",
"dep:tlsn-data-fixtures",
@@ -27,12 +26,6 @@ tlsn-data-fixtures = { workspace = true, optional = true }
tlsn-tls-core = { workspace = true, features = ["serde"] }
tlsn-utils = { workspace = true }
rangeset = { workspace = true, features = ["serde"] }
mpz-circuits = { workspace = true }
pest = "*"
pest_derive = "*"
pest_meta = "*"
aead = { workspace = true, features = ["alloc"], optional = true }
aes-gcm = { workspace = true, optional = true }
@@ -51,8 +44,7 @@ sha2 = { workspace = true }
thiserror = { workspace = true }
tiny-keccak = { workspace = true, features = ["keccak"] }
web-time = { workspace = true }
webpki-roots = { workspace = true, optional = true }
webpki-root-certs = { workspace = true, optional = true }
webpki-roots = { workspace = true }
rustls-webpki = { workspace = true, features = ["ring"] }
rustls-pki-types = { workspace = true }
itybity = { workspace = true }

View File

@@ -1,7 +0,0 @@
//! Configuration types.
pub mod prove;
pub mod prover;
pub mod tls;
pub mod tls_commit;
pub mod verifier;

View File

@@ -1,189 +0,0 @@
//! Proving configuration.
use rangeset::{RangeSet, ToRangeSet, UnionMut};
use serde::{Deserialize, Serialize};
use crate::transcript::{Direction, Transcript, TranscriptCommitConfig, TranscriptCommitRequest};
/// Configuration to prove information to the verifier.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ProveConfig {
server_identity: bool,
reveal: Option<(RangeSet<usize>, RangeSet<usize>)>,
transcript_commit: Option<TranscriptCommitConfig>,
}
impl ProveConfig {
/// Creates a new builder.
pub fn builder(transcript: &Transcript) -> ProveConfigBuilder<'_> {
ProveConfigBuilder::new(transcript)
}
/// Returns `true` if the server identity is to be proven.
pub fn server_identity(&self) -> bool {
self.server_identity
}
/// Returns the sent and received ranges of the transcript to be revealed,
/// respectively.
pub fn reveal(&self) -> Option<&(RangeSet<usize>, RangeSet<usize>)> {
self.reveal.as_ref()
}
/// Returns the transcript commitment configuration.
pub fn transcript_commit(&self) -> Option<&TranscriptCommitConfig> {
self.transcript_commit.as_ref()
}
/// Returns a request.
pub fn to_request(&self) -> ProveRequest {
ProveRequest {
server_identity: self.server_identity,
reveal: self.reveal.clone(),
transcript_commit: self
.transcript_commit
.clone()
.map(|config| config.to_request()),
}
}
}
/// Builder for [`ProveConfig`].
#[derive(Debug)]
pub struct ProveConfigBuilder<'a> {
transcript: &'a Transcript,
server_identity: bool,
reveal: Option<(RangeSet<usize>, RangeSet<usize>)>,
transcript_commit: Option<TranscriptCommitConfig>,
}
impl<'a> ProveConfigBuilder<'a> {
/// Creates a new builder.
pub fn new(transcript: &'a Transcript) -> Self {
Self {
transcript,
server_identity: false,
reveal: None,
transcript_commit: None,
}
}
/// Proves the server identity.
pub fn server_identity(&mut self) -> &mut Self {
self.server_identity = true;
self
}
/// Configures transcript commitments.
pub fn transcript_commit(&mut self, transcript_commit: TranscriptCommitConfig) -> &mut Self {
self.transcript_commit = Some(transcript_commit);
self
}
/// Reveals the given ranges of the transcript.
pub fn reveal(
&mut self,
direction: Direction,
ranges: &dyn ToRangeSet<usize>,
) -> Result<&mut Self, ProveConfigError> {
let idx = ranges.to_range_set();
if idx.end().unwrap_or(0) > self.transcript.len_of_direction(direction) {
return Err(ProveConfigError(ErrorRepr::IndexOutOfBounds {
direction,
actual: idx.end().unwrap_or(0),
len: self.transcript.len_of_direction(direction),
}));
}
let (sent, recv) = self.reveal.get_or_insert_default();
match direction {
Direction::Sent => sent.union_mut(&idx),
Direction::Received => recv.union_mut(&idx),
}
Ok(self)
}
/// Reveals the given ranges of the sent data transcript.
pub fn reveal_sent(
&mut self,
ranges: &dyn ToRangeSet<usize>,
) -> Result<&mut Self, ProveConfigError> {
self.reveal(Direction::Sent, ranges)
}
/// Reveals all of the sent data transcript.
pub fn reveal_sent_all(&mut self) -> Result<&mut Self, ProveConfigError> {
let len = self.transcript.len_of_direction(Direction::Sent);
let (sent, _) = self.reveal.get_or_insert_default();
sent.union_mut(&(0..len));
Ok(self)
}
/// Reveals the given ranges of the received data transcript.
pub fn reveal_recv(
&mut self,
ranges: &dyn ToRangeSet<usize>,
) -> Result<&mut Self, ProveConfigError> {
self.reveal(Direction::Received, ranges)
}
/// Reveals all of the received data transcript.
pub fn reveal_recv_all(&mut self) -> Result<&mut Self, ProveConfigError> {
let len = self.transcript.len_of_direction(Direction::Received);
let (_, recv) = self.reveal.get_or_insert_default();
recv.union_mut(&(0..len));
Ok(self)
}
/// Builds the configuration.
pub fn build(self) -> Result<ProveConfig, ProveConfigError> {
Ok(ProveConfig {
server_identity: self.server_identity,
reveal: self.reveal,
transcript_commit: self.transcript_commit,
})
}
}
/// Request to prove statements about the connection.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ProveRequest {
server_identity: bool,
reveal: Option<(RangeSet<usize>, RangeSet<usize>)>,
transcript_commit: Option<TranscriptCommitRequest>,
}
impl ProveRequest {
/// Returns `true` if the server identity is to be proven.
pub fn server_identity(&self) -> bool {
self.server_identity
}
/// Returns the sent and received ranges of the transcript to be revealed,
/// respectively.
pub fn reveal(&self) -> Option<&(RangeSet<usize>, RangeSet<usize>)> {
self.reveal.as_ref()
}
/// Returns the transcript commitment configuration.
pub fn transcript_commit(&self) -> Option<&TranscriptCommitRequest> {
self.transcript_commit.as_ref()
}
}
/// Error for [`ProveConfig`].
#[derive(Debug, thiserror::Error)]
#[error(transparent)]
pub struct ProveConfigError(#[from] ErrorRepr);
#[derive(Debug, thiserror::Error)]
enum ErrorRepr {
#[error("range is out of bounds of the transcript ({direction}): {actual} > {len}")]
IndexOutOfBounds {
direction: Direction,
actual: usize,
len: usize,
},
}

View File

@@ -1,33 +0,0 @@
//! Prover configuration.
use serde::{Deserialize, Serialize};
/// Prover configuration.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ProverConfig {}
impl ProverConfig {
/// Creates a new builder.
pub fn builder() -> ProverConfigBuilder {
ProverConfigBuilder::default()
}
}
/// Builder for [`ProverConfig`].
#[derive(Debug, Default)]
pub struct ProverConfigBuilder {}
impl ProverConfigBuilder {
/// Builds the configuration.
pub fn build(self) -> Result<ProverConfig, ProverConfigError> {
Ok(ProverConfig {})
}
}
/// Error for [`ProverConfig`].
#[derive(Debug, thiserror::Error)]
#[error(transparent)]
pub struct ProverConfigError(#[from] ErrorRepr);
#[derive(Debug, thiserror::Error)]
enum ErrorRepr {}

View File

@@ -1,111 +0,0 @@
//! TLS client configuration.
use serde::{Deserialize, Serialize};
use crate::{
connection::ServerName,
webpki::{CertificateDer, PrivateKeyDer, RootCertStore},
};
/// TLS client configuration.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TlsClientConfig {
server_name: ServerName,
/// Root certificates.
root_store: RootCertStore,
/// Certificate chain and a matching private key for client
/// authentication.
client_auth: Option<(Vec<CertificateDer>, PrivateKeyDer)>,
}
impl TlsClientConfig {
/// Creates a new builder.
pub fn builder() -> TlsConfigBuilder {
TlsConfigBuilder::default()
}
/// Returns the server name.
pub fn server_name(&self) -> &ServerName {
&self.server_name
}
/// Returns the root certificates.
pub fn root_store(&self) -> &RootCertStore {
&self.root_store
}
/// Returns a certificate chain and a matching private key for client
/// authentication.
pub fn client_auth(&self) -> Option<&(Vec<CertificateDer>, PrivateKeyDer)> {
self.client_auth.as_ref()
}
}
/// Builder for [`TlsClientConfig`].
#[derive(Debug, Default)]
pub struct TlsConfigBuilder {
server_name: Option<ServerName>,
root_store: Option<RootCertStore>,
client_auth: Option<(Vec<CertificateDer>, PrivateKeyDer)>,
}
impl TlsConfigBuilder {
/// Sets the server name.
pub fn server_name(mut self, server_name: ServerName) -> Self {
self.server_name = Some(server_name);
self
}
/// Sets the root certificates to use for verifying the server's
/// certificate.
pub fn root_store(mut self, store: RootCertStore) -> Self {
self.root_store = Some(store);
self
}
/// Sets a DER-encoded certificate chain and a matching private key for
/// client authentication.
///
/// Often the chain will consist of a single end-entity certificate.
///
/// # Arguments
///
/// * `cert_key` - A tuple containing the certificate chain and the private
/// key.
///
/// - Each certificate in the chain must be in the X.509 format.
/// - The key must be in the ASN.1 format (either PKCS#8 or PKCS#1).
pub fn client_auth(mut self, cert_key: (Vec<CertificateDer>, PrivateKeyDer)) -> Self {
self.client_auth = Some(cert_key);
self
}
/// Builds the TLS configuration.
pub fn build(self) -> Result<TlsClientConfig, TlsConfigError> {
let server_name = self.server_name.ok_or(ErrorRepr::MissingField {
field: "server_name",
})?;
let root_store = self.root_store.ok_or(ErrorRepr::MissingField {
field: "root_store",
})?;
Ok(TlsClientConfig {
server_name,
root_store,
client_auth: self.client_auth,
})
}
}
/// TLS configuration error.
#[derive(Debug, thiserror::Error)]
#[error(transparent)]
pub struct TlsConfigError(#[from] ErrorRepr);
#[derive(Debug, thiserror::Error)]
#[error("tls config error")]
enum ErrorRepr {
#[error("missing required field: {field}")]
MissingField { field: &'static str },
}

View File

@@ -1,94 +0,0 @@
//! TLS commitment configuration.
pub mod mpc;
use serde::{Deserialize, Serialize};
/// TLS commitment configuration.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TlsCommitConfig {
protocol: TlsCommitProtocolConfig,
}
impl TlsCommitConfig {
/// Creates a new builder.
pub fn builder() -> TlsCommitConfigBuilder {
TlsCommitConfigBuilder::default()
}
/// Returns the protocol configuration.
pub fn protocol(&self) -> &TlsCommitProtocolConfig {
&self.protocol
}
/// Returns a TLS commitment request.
pub fn to_request(&self) -> TlsCommitRequest {
TlsCommitRequest {
config: self.protocol.clone(),
}
}
}
/// Builder for [`TlsCommitConfig`].
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
pub struct TlsCommitConfigBuilder {
protocol: Option<TlsCommitProtocolConfig>,
}
impl TlsCommitConfigBuilder {
/// Sets the protocol configuration.
pub fn protocol<C>(mut self, protocol: C) -> Self
where
C: Into<TlsCommitProtocolConfig>,
{
self.protocol = Some(protocol.into());
self
}
/// Builds the configuration.
pub fn build(self) -> Result<TlsCommitConfig, TlsCommitConfigError> {
let protocol = self
.protocol
.ok_or(ErrorRepr::MissingField { name: "protocol" })?;
Ok(TlsCommitConfig { protocol })
}
}
/// TLS commitment protocol configuration.
#[derive(Debug, Clone, Serialize, Deserialize)]
#[non_exhaustive]
pub enum TlsCommitProtocolConfig {
/// MPC-TLS configuration.
Mpc(mpc::MpcTlsConfig),
}
impl From<mpc::MpcTlsConfig> for TlsCommitProtocolConfig {
fn from(config: mpc::MpcTlsConfig) -> Self {
Self::Mpc(config)
}
}
/// TLS commitment request.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TlsCommitRequest {
config: TlsCommitProtocolConfig,
}
impl TlsCommitRequest {
/// Returns the protocol configuration.
pub fn protocol(&self) -> &TlsCommitProtocolConfig {
&self.config
}
}
/// Error for [`TlsCommitConfig`].
#[derive(Debug, thiserror::Error)]
#[error(transparent)]
pub struct TlsCommitConfigError(#[from] ErrorRepr);
#[derive(Debug, thiserror::Error)]
enum ErrorRepr {
#[error("missing field: {name}")]
MissingField { name: &'static str },
}

View File

@@ -1,246 +0,0 @@
//! MPC-TLS commitment protocol configuration.
use serde::{Deserialize, Serialize};
// Default is 32 bytes to decrypt the TLS protocol messages.
const DEFAULT_MAX_RECV_ONLINE: usize = 32;
/// MPC-TLS commitment protocol configuration.
#[derive(Clone, Debug, Deserialize, Serialize)]
#[serde(try_from = "unchecked::MpcTlsConfigUnchecked")]
pub struct MpcTlsConfig {
/// Maximum number of bytes that can be sent.
max_sent_data: usize,
/// Maximum number of application data records that can be sent.
max_sent_records: Option<usize>,
/// Maximum number of bytes that can be decrypted online, i.e. while the
/// MPC-TLS connection is active.
max_recv_data_online: usize,
/// Maximum number of bytes that can be received.
max_recv_data: usize,
/// Maximum number of received application data records that can be
/// decrypted online, i.e. while the MPC-TLS connection is active.
max_recv_records_online: Option<usize>,
/// Whether the `deferred decryption` feature is toggled on from the start
/// of the MPC-TLS connection.
defer_decryption_from_start: bool,
/// Network settings.
network: NetworkSetting,
}
impl MpcTlsConfig {
/// Creates a new builder.
pub fn builder() -> MpcTlsConfigBuilder {
MpcTlsConfigBuilder::default()
}
/// Returns the maximum number of bytes that can be sent.
pub fn max_sent_data(&self) -> usize {
self.max_sent_data
}
/// Returns the maximum number of application data records that can
/// be sent.
pub fn max_sent_records(&self) -> Option<usize> {
self.max_sent_records
}
/// Returns the maximum number of bytes that can be decrypted online.
pub fn max_recv_data_online(&self) -> usize {
self.max_recv_data_online
}
/// Returns the maximum number of bytes that can be received.
pub fn max_recv_data(&self) -> usize {
self.max_recv_data
}
/// Returns the maximum number of received application data records that
/// can be decrypted online.
pub fn max_recv_records_online(&self) -> Option<usize> {
self.max_recv_records_online
}
/// Returns whether the `deferred decryption` feature is toggled on from the
/// start of the MPC-TLS connection.
pub fn defer_decryption_from_start(&self) -> bool {
self.defer_decryption_from_start
}
/// Returns the network settings.
pub fn network(&self) -> NetworkSetting {
self.network
}
}
fn validate(config: MpcTlsConfig) -> Result<MpcTlsConfig, MpcTlsConfigError> {
if config.max_recv_data_online > config.max_recv_data {
return Err(ErrorRepr::InvalidValue {
name: "max_recv_data_online",
reason: format!(
"must be <= max_recv_data ({} > {})",
config.max_recv_data_online, config.max_recv_data
),
}
.into());
}
Ok(config)
}
/// Builder for [`MpcTlsConfig`].
#[derive(Debug, Default)]
pub struct MpcTlsConfigBuilder {
max_sent_data: Option<usize>,
max_sent_records: Option<usize>,
max_recv_data_online: Option<usize>,
max_recv_data: Option<usize>,
max_recv_records_online: Option<usize>,
defer_decryption_from_start: Option<bool>,
network: Option<NetworkSetting>,
}
impl MpcTlsConfigBuilder {
/// Sets the maximum number of bytes that can be sent.
pub fn max_sent_data(mut self, max_sent_data: usize) -> Self {
self.max_sent_data = Some(max_sent_data);
self
}
/// Sets the maximum number of application data records that can be sent.
pub fn max_sent_records(mut self, max_sent_records: usize) -> Self {
self.max_sent_records = Some(max_sent_records);
self
}
/// Sets the maximum number of bytes that can be decrypted online.
pub fn max_recv_data_online(mut self, max_recv_data_online: usize) -> Self {
self.max_recv_data_online = Some(max_recv_data_online);
self
}
/// Sets the maximum number of bytes that can be received.
pub fn max_recv_data(mut self, max_recv_data: usize) -> Self {
self.max_recv_data = Some(max_recv_data);
self
}
/// Sets the maximum number of received application data records that can
/// be decrypted online.
pub fn max_recv_records_online(mut self, max_recv_records_online: usize) -> Self {
self.max_recv_records_online = Some(max_recv_records_online);
self
}
/// Sets whether the `deferred decryption` feature is toggled on from the
/// start of the MPC-TLS connection.
pub fn defer_decryption_from_start(mut self, defer_decryption_from_start: bool) -> Self {
self.defer_decryption_from_start = Some(defer_decryption_from_start);
self
}
/// Sets the network settings.
pub fn network(mut self, network: NetworkSetting) -> Self {
self.network = Some(network);
self
}
/// Builds the configuration.
pub fn build(self) -> Result<MpcTlsConfig, MpcTlsConfigError> {
let Self {
max_sent_data,
max_sent_records,
max_recv_data_online,
max_recv_data,
max_recv_records_online,
defer_decryption_from_start,
network,
} = self;
let max_sent_data = max_sent_data.ok_or(ErrorRepr::MissingField {
name: "max_sent_data",
})?;
let max_recv_data_online = max_recv_data_online.unwrap_or(DEFAULT_MAX_RECV_ONLINE);
let max_recv_data = max_recv_data.ok_or(ErrorRepr::MissingField {
name: "max_recv_data",
})?;
let defer_decryption_from_start = defer_decryption_from_start.unwrap_or(true);
let network = network.unwrap_or_default();
validate(MpcTlsConfig {
max_sent_data,
max_sent_records,
max_recv_data_online,
max_recv_data,
max_recv_records_online,
defer_decryption_from_start,
network,
})
}
}
/// Settings for the network environment.
///
/// Provides optimization options to adapt the protocol to different network
/// situations.
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
pub enum NetworkSetting {
/// Reduces network round-trips at the expense of consuming more network
/// bandwidth.
Bandwidth,
/// Reduces network bandwidth utilization at the expense of more network
/// round-trips.
Latency,
}
impl Default for NetworkSetting {
fn default() -> Self {
Self::Latency
}
}
/// Error for [`MpcTlsConfig`].
#[derive(Debug, thiserror::Error)]
#[error(transparent)]
pub struct MpcTlsConfigError(#[from] ErrorRepr);
#[derive(Debug, thiserror::Error)]
enum ErrorRepr {
#[error("missing field: {name}")]
MissingField { name: &'static str },
#[error("invalid value for field({name}): {reason}")]
InvalidValue { name: &'static str, reason: String },
}
mod unchecked {
use super::*;
#[derive(Deserialize)]
pub(super) struct MpcTlsConfigUnchecked {
max_sent_data: usize,
max_sent_records: Option<usize>,
max_recv_data_online: usize,
max_recv_data: usize,
max_recv_records_online: Option<usize>,
defer_decryption_from_start: bool,
network: NetworkSetting,
}
impl TryFrom<MpcTlsConfigUnchecked> for MpcTlsConfig {
type Error = MpcTlsConfigError;
fn try_from(value: MpcTlsConfigUnchecked) -> Result<Self, Self::Error> {
validate(MpcTlsConfig {
max_sent_data: value.max_sent_data,
max_sent_records: value.max_sent_records,
max_recv_data_online: value.max_recv_data_online,
max_recv_data: value.max_recv_data,
max_recv_records_online: value.max_recv_records_online,
defer_decryption_from_start: value.defer_decryption_from_start,
network: value.network,
})
}
}
}

View File

@@ -1,56 +0,0 @@
//! Verifier configuration.
use serde::{Deserialize, Serialize};
use crate::webpki::RootCertStore;
/// Verifier configuration.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct VerifierConfig {
root_store: RootCertStore,
}
impl VerifierConfig {
/// Creates a new builder.
pub fn builder() -> VerifierConfigBuilder {
VerifierConfigBuilder::default()
}
/// Returns the root certificate store.
pub fn root_store(&self) -> &RootCertStore {
&self.root_store
}
}
/// Builder for [`VerifierConfig`].
#[derive(Debug, Default)]
pub struct VerifierConfigBuilder {
root_store: Option<RootCertStore>,
}
impl VerifierConfigBuilder {
/// Sets the root certificate store.
pub fn root_store(mut self, root_store: RootCertStore) -> Self {
self.root_store = Some(root_store);
self
}
/// Builds the configuration.
pub fn build(self) -> Result<VerifierConfig, VerifierConfigError> {
let root_store = self
.root_store
.ok_or(ErrorRepr::MissingField { name: "root_store" })?;
Ok(VerifierConfig { root_store })
}
}
/// Error for [`VerifierConfig`].
#[derive(Debug, thiserror::Error)]
#[error(transparent)]
pub struct VerifierConfigError(#[from] ErrorRepr);
#[derive(Debug, thiserror::Error)]
enum ErrorRepr {
#[error("missing field: {name}")]
MissingField { name: &'static str },
}

View File

@@ -7,7 +7,7 @@ use rand::{rngs::StdRng, Rng, SeedableRng};
use tls_core::msgs::{
base::Payload,
codec::Codec,
enums::{HandshakeType, ProtocolVersion},
enums::{ContentType, HandshakeType, ProtocolVersion},
handshake::{HandshakeMessagePayload, HandshakePayload},
message::{OpaqueMessage, PlainMessage},
};
@@ -15,7 +15,7 @@ use tls_core::msgs::{
use crate::{
connection::{TranscriptLength, VerifyData},
fixtures::ConnectionFixture,
transcript::{ContentType, Record, TlsTranscript},
transcript::{Record, TlsTranscript},
};
/// The key used for encryption of the sent and received transcript.
@@ -103,7 +103,7 @@ impl TranscriptGenerator {
let explicit_nonce: [u8; 8] = seq.to_be_bytes();
let msg = PlainMessage {
typ: ContentType::ApplicationData.into(),
typ: ContentType::ApplicationData,
version: ProtocolVersion::TLSv1_2,
payload: Payload::new(plaintext),
};
@@ -138,7 +138,7 @@ impl TranscriptGenerator {
handshake_message.encode(&mut plaintext);
let msg = PlainMessage {
typ: ContentType::Handshake.into(),
typ: ContentType::Handshake,
version: ProtocolVersion::TLSv1_2,
payload: Payload::new(plaintext.clone()),
};

View File

@@ -1,159 +0,0 @@
use crate::predicates::Pred;
use pest::{
iterators::{Pair, Pairs},
Parser,
};
use pest_derive::Parser;
#[derive(Parser)]
#[grammar = "expr.pest"]
struct ExprParser;
fn parse_expr(input: &str) -> Result<Pred, pest::error::Error<Rule>> {
let mut pairs = ExprParser::parse(Rule::expr, input)?;
Ok(build_expr(pairs.next().unwrap()))
}
fn build_expr(pair: Pair<Rule>) -> Pred {
match pair.as_rule() {
Rule::expr | Rule::or_expr => build_left_assoc(pair.into_inner(), Rule::and_expr, Pred::Or),
Rule::and_expr => build_left_assoc(pair.into_inner(), Rule::not_expr, Pred::And),
Rule::not_expr => {
// NOT* cmp
let mut inner = pair.into_inner(); // possibly multiple NOT then a cmp
// Count NOTs, then parse cmp
let mut not_count = 0;
let mut rest = Vec::new();
for p in inner {
match p.as_rule() {
Rule::NOT => not_count += 1,
_ => {
rest.push(p);
}
}
}
let mut node = build_cmp(rest.into_iter().next().expect("cmp missing"));
if not_count % 2 == 1 {
node = Pred::Not(Box::new(node));
}
node
}
Rule::cmp => build_cmp(pair),
Rule::primary => build_expr(pair.into_inner().next().unwrap()),
Rule::paren => build_expr(pair.into_inner().next().unwrap()),
_ => unreachable!("unexpected rule: {:?}", pair.as_rule()),
}
}
fn build_left_assoc(
mut inner: Pairs<Rule>,
unit_rule: Rule,
mk_node: impl Fn(Vec<Pred>) -> Pred,
) -> Pred {
// pattern: unit (OP unit)*
let mut nodes = Vec::new();
// First unit
if let Some(first) = inner.next() {
assert_eq!(first.as_rule(), unit_rule);
nodes.push(build_expr(first));
}
// Remaining are: OP unit pairs; we only collect the units and wrap later.
while let Some(next) = inner.next() {
// next is the operator token pair (AND/OR), skip it
// then the unit:
if let Some(unit) = inner.next() {
assert_eq!(unit.as_rule(), unit_rule);
nodes.push(build_expr(unit));
}
}
if nodes.len() == 1 {
nodes.pop().unwrap()
} else {
mk_node(nodes)
}
}
fn build_cmp(pair: Pair<Rule>) -> Pred {
// cmp: primary (cmp_op primary)?
let mut inner = pair.into_inner();
let lhs = inner.next().unwrap();
let lhs_term = parse_term(lhs);
if let Some(op_pair) = inner.next() {
let op = match op_pair.as_str() {
"==" => CmpOp::Eq,
"!=" => CmpOp::Ne,
"<" => CmpOp::Lt,
"<=" => CmpOp::Lte,
">" => CmpOp::Gt,
">=" => CmpOp::Gte,
_ => unreachable!(),
};
let rhs = parse_term(inner.next().unwrap());
// Map to your Atom constraint form (LHS must be x[idx]):
let (index, rhs_val) = match (lhs_term, rhs) {
(Term::Idx(i), Term::Const(c)) => (i, Rhs::Const(c)),
(Term::Idx(i1), Term::Idx(i2)) => (i1, Rhs::Idx(i2)),
// If you want to allow const OP idx or const OP const, handle here (flip, etc.)
other => panic!("unsupported comparison pattern: {:?}", other),
};
Pred::Atom(Atom {
index,
op,
rhs: rhs_val,
})
} else {
// A bare primary is treated as a boolean atom; you can decide policy.
// Here we treat "x[i]" as (x[i] != 0) and const as (const != 0).
match lhs_term {
Term::Idx(i) => Pred::Atom(Atom {
index: i,
op: CmpOp::Ne,
rhs: Rhs::Const(0),
}),
Term::Const(c) => {
if c != 0 {
Pred::Or(vec![])
} else {
Pred::And(vec![])
} // true/false constants if you add Const
}
}
}
}
#[derive(Debug, Clone, Copy)]
enum Term {
Idx(usize),
Const(u8),
}
fn parse_term(pair: Pair<Rule>) -> Term {
match pair.as_rule() {
Rule::atom => parse_term(pair.into_inner().next().unwrap()),
Rule::byte_idx => {
// "x" "[" number "]"
let mut i = pair.into_inner();
let num = i.find(|p| p.as_rule() == Rule::number).unwrap();
Term::Idx(num.as_str().parse::<usize>().unwrap())
}
Rule::byte_const => {
let n = pair.into_inner().next().unwrap(); // number
Term::Const(n.as_str().parse::<u8>().unwrap())
}
Rule::paren => parse_term(pair.into_inner().next().unwrap()),
Rule::primary => parse_term(pair.into_inner().next().unwrap()),
_ => unreachable!("term {:?}", pair.as_rule()),
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_and() {
let pred = parse_expr("x[100] < x[300] && x[200] == 2 || ! (x[5] >= 57)").unwrap();
// `pred` is a Pred::Or with an And on the left and a Not on the right,
// with Atoms inside.
}
}

View File

@@ -1,41 +0,0 @@
// pest. The Elegant Parser
// Copyright (c) 2018 Dragoș Tiselice
//
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0> or the MIT
// license <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. All files in the project carrying such notice may not be copied,
// modified, or distributed except according to those terms.
//! A parser for JSON file.
//!
//! And this is a example for JSON parser.
json = _{ SOI ~ value ~ eoi }
eoi = _{ !ANY }
/// Matches object, e.g.: `{ "foo": "bar" }`
/// Foobar
object = { "{" ~ pair ~ (pair)* ~ "}" | "{" ~ "}" }
pair = { quoted_string ~ ":" ~ value ~ (",")? }
array = { "[" ~ value ~ ("," ~ value)* ~ "]" | "[" ~ "]" }
//////////////////////
/// Matches value, e.g.: `"foo"`, `42`, `true`, `null`, `[]`, `{}`.
//////////////////////
value = _{ quoted_string | number | object | array | bool | null }
quoted_string = _{ "\"" ~ string ~ "\"" }
string = @{ (!("\"" | "\\") ~ ANY)* ~ (escape ~ string)? }
escape = @{ "\\" ~ ("\"" | "\\" | "/" | "b" | "f" | "n" | "r" | "t" | unicode) }
unicode = @{ "u" ~ ASCII_HEX_DIGIT{4} }
number = @{ "-"? ~ int ~ ("." ~ ASCII_DIGIT+ ~ exp? | exp)? }
int = @{ "0" | ASCII_NONZERO_DIGIT ~ ASCII_DIGIT* }
exp = @{ ("E" | "e") ~ ("+" | "-")? ~ ASCII_DIGIT+ }
bool = { "true" | "false" }
null = { "null" }
WHITESPACE = _{ " " | "\t" | "\r" | "\n" }

View File

@@ -1,760 +0,0 @@
//!
use crate::predicates::Pred;
use pest::{
iterators::{Pair, Pairs},
Parser,
};
use pest_derive::Parser;
use pest_meta::{ast, parser as meta, parser::consume_rules, validator};
#[cfg(test)]
mod test {
use core::panic;
use std::cmp::{max, min};
use crate::{
config::prove::ProveConfig,
predicates::{eval_pred, is_unicode, Atom, CmpOp, Compiler, Rhs},
};
use super::*;
use mpz_circuits::{
evaluate,
ops::{all, any},
};
use pest_meta::ast::Expr;
use rangeset::RangeSet;
const MAX_LEN: usize = 999_999;
#[derive(Debug, Clone)]
enum Ex {
RepEx(Rep),
RepExactEx(RepExact),
SeqEx(Seq),
StrEx(Str),
ChoiceEx(Choice),
NegPredEx(NegPred),
OptEx(Opt),
// An expression which must be replaced with a copy of the rule.
NestedEx,
#[allow(non_camel_case_types)]
ASCII_NONZERO_DIGIT,
#[allow(non_camel_case_types)]
ASCII_DIGIT,
#[allow(non_camel_case_types)]
// A single Unicode character
ANY,
#[allow(non_camel_case_types)]
ASCII_HEX_DIGIT,
}
impl Ex {
fn min_len(&self) -> usize {
match self {
Ex::RepEx(e) => 0,
Ex::RepExactEx(e) => e.0 .0.min_len() * e.0 .1 as usize,
Ex::StrEx(e) => e.0.len(),
Ex::SeqEx(e) => e.0.min_len() + e.1.min_len(),
Ex::ChoiceEx(e) => min(e.0.min_len(), e.1.min_len()),
Ex::NegPredEx(e) => 0,
Ex::ASCII_NONZERO_DIGIT => 1,
Ex::ASCII_DIGIT => 1,
Ex::ANY => 1,
Ex::ASCII_HEX_DIGIT => 1,
Ex::OptEx(e) => 0,
Ex::NestedEx => 0,
_ => unimplemented!(),
}
}
fn max_len(&self) -> usize {
match self {
Ex::RepEx(e) => MAX_LEN,
Ex::RepExactEx(e) => e.0 .0.max_len() * e.0 .1 as usize,
Ex::StrEx(e) => e.0.len(),
Ex::SeqEx(e) => e.0.max_len() + e.1.max_len(),
Ex::ChoiceEx(e) => max(e.0.max_len(), e.1.max_len()),
Ex::NegPredEx(e) => 0,
Ex::ASCII_NONZERO_DIGIT => 1,
Ex::ASCII_DIGIT => 1,
Ex::ANY => 4,
Ex::ASCII_HEX_DIGIT => 1,
Ex::OptEx(e) => e.0.max_len(),
Ex::NestedEx => 0,
_ => unimplemented!(),
}
}
}
#[derive(Debug, Clone)]
struct Rep(Box<Ex>);
#[derive(Debug, Clone)]
struct RepExact((Box<Ex>, u32));
#[derive(Debug, Clone)]
struct Str(String);
#[derive(Debug, Clone)]
struct Seq(Box<Ex>, Box<Ex>);
#[derive(Debug, Clone)]
struct Choice(Box<Ex>, Box<Ex>);
#[derive(Debug, Clone)]
struct NegPred(Box<Ex>);
#[derive(Debug, Clone)]
struct Opt(Box<Ex>);
struct Rule {
name: String,
pub ex: Ex,
}
/// Builds the rules, returning the final expression.
fn build_rules(ast_rules: &[ast::Rule]) -> Ex {
let mut rules = Vec::new();
// build from the bottom up
let iter = ast_rules.iter().rev();
for r in iter {
println!("building rule with name {:?}", r.name);
let ex = build_expr(&r.expr, &rules, &r.name, false);
// TODO deal with recursive rules
rules.push(Rule {
name: r.name.clone(),
ex,
});
}
let ex = rules.last().unwrap().ex.clone();
ex
}
/// Builds expression from pest expression.
/// passes in current rule's name to deal with recursion.
/// depth is used to prevent infinite recursion.
fn build_expr(exp: &Expr, rules: &[Rule], this_name: &String, is_nested: bool) -> Ex {
match exp {
Expr::Rep(exp) => {
Ex::RepEx(Rep(Box::new(build_expr(exp, rules, this_name, is_nested))))
}
Expr::RepExact(exp, count) => Ex::RepExactEx(RepExact((
Box::new(build_expr(exp, rules, this_name, is_nested)),
*count,
))),
Expr::Str(str) => Ex::StrEx(Str(str.clone())),
Expr::NegPred(exp) => Ex::NegPredEx(NegPred(Box::new(build_expr(
exp, rules, this_name, is_nested,
)))),
Expr::Seq(a, b) => {
//
let a = build_expr(a, rules, this_name, is_nested);
Ex::SeqEx(Seq(
Box::new(a),
Box::new(build_expr(b, rules, this_name, is_nested)),
))
}
Expr::Choice(a, b) => Ex::ChoiceEx(Choice(
Box::new(build_expr(a, rules, this_name, is_nested)),
Box::new(build_expr(b, rules, this_name, is_nested)),
)),
Expr::Opt(exp) => {
Ex::OptEx(Opt(Box::new(build_expr(exp, rules, this_name, is_nested))))
}
Expr::Ident(ident) => {
let ex = match ident.as_str() {
"ASCII_NONZERO_DIGIT" => Ex::ASCII_NONZERO_DIGIT,
"ASCII_DIGIT" => Ex::ASCII_DIGIT,
"ANY" => Ex::ANY,
"ASCII_HEX_DIGIT" => Ex::ASCII_HEX_DIGIT,
_ => {
if *ident == *this_name {
return Ex::NestedEx;
}
for rule in rules {
return rule.ex.clone();
}
panic!("couldnt find rule {:?}", ident);
}
};
ex
}
_ => unimplemented!(),
}
}
// This method must be called when we know that there is enough
// data remained starting from the offset to match the expression
// at least once.
//
// returns the predicate and the offset from which the next expression
// should be matched.
// Returns multiple predicates if the expression caused multiple branches.
// A top level expr always returns a single predicate, in which all branches
// are coalesced.
fn expr_to_pred(
exp: &Ex,
offset: usize,
data_len: usize,
is_top_level: bool,
) -> Vec<(Pred, usize)> {
// if is_top_level {
// println!("top level exps {:?}", exp);
// } else {
// println!("Non-top level exps {:?}", exp);
// }
match exp {
Ex::SeqEx(s) => {
let a = &s.0;
let b = &s.1;
if is_top_level && (offset + a.max_len() + b.max_len() < data_len) {
panic!();
}
if offset + a.min_len() + b.min_len() > data_len {
panic!();
}
// The first expression must not try to match in the
// data of the next expression
let pred1 = expr_to_pred(a, offset, data_len - b.min_len(), false);
// interlace all branches
let mut interlaced = Vec::new();
for (p1, offset) in pred1.iter() {
// if the seq expr was top-level, the 2nd expr becomes top-level
let mut pred2 = expr_to_pred(b, *offset, data_len, is_top_level);
for (p2, offser_inner) in pred2.iter() {
let pred = Pred::And(vec![p1.clone(), p2.clone()]);
interlaced.push((pred, *offser_inner));
}
}
if is_top_level {
// coalesce all branches
let preds: Vec<Pred> = interlaced.into_iter().map(|(a, _b)| a).collect();
if preds.len() == 1 {
vec![(preds[0].clone(), 0)]
} else {
vec![(Pred::Or(preds), 0)]
}
} else {
interlaced
}
}
Ex::ChoiceEx(s) => {
let a = &s.0;
let b = &s.1;
let mut skip_a = false;
let mut skip_b = false;
if is_top_level {
if offset + a.max_len() != data_len {
skip_a = true
}
if offset + b.max_len() != data_len {
skip_b = true;
}
} else {
// if not top level, we may skip an expression when it will
// overflow the data len
if offset + a.min_len() > data_len {
skip_a = true
}
if offset + b.min_len() > data_len {
skip_b = true
}
}
if skip_a && skip_b {
panic!();
}
let mut preds_a = Vec::new();
let mut preds_b = Vec::new();
if !skip_a {
preds_a = expr_to_pred(a, offset, data_len, is_top_level);
}
if !skip_b {
preds_b = expr_to_pred(b, offset, data_len, is_top_level);
}
// combine all branches
let mut combined = Vec::new();
if preds_a.is_empty() {
combined = preds_b.clone();
} else if preds_b.is_empty() {
combined = preds_a.clone();
} else {
assert!(!(preds_a.is_empty() && preds_b.is_empty()));
combined.append(&mut preds_a);
combined.append(&mut preds_b);
}
if is_top_level {
// coalesce all branches
let preds: Vec<Pred> = combined.into_iter().map(|(a, _b)| a).collect();
if preds.len() == 1 {
vec![(preds[0].clone(), 0)]
} else {
vec![(Pred::Or(preds), 0)]
}
} else {
combined
}
}
Ex::RepEx(r) => {
let e = &r.0;
if offset + e.min_len() > data_len {
if is_top_level {
panic!();
}
// zero matches
return vec![];
}
let mut interlaced = Vec::new();
let mut preds = expr_to_pred(&e, offset, data_len, false);
// for (i, (pred, depth)) in preds.iter().enumerate() {
// println!("preds[{i}] (depth {depth}):");
// println!("{pred}");
// }
// Append single matches.
interlaced.append(&mut preds.clone());
let mut was_found = true;
while was_found {
was_found = false;
for (pred_outer, offset_outer) in std::mem::take(&mut preds).into_iter() {
if offset_outer + e.min_len() > data_len {
// cannot match any more
continue;
}
let mut preds_inner = expr_to_pred(&e, offset_outer, data_len, false);
// for (i, (pred, depth)) in preds_inner.iter().enumerate() {
// println!("preds[{i}] (depth {depth}):");
// println!("{pred}");
// }
for (pred_inner, offset_inner) in preds_inner {
let pred = (
Pred::And(vec![pred_outer.clone(), pred_inner]),
offset_inner,
);
preds.push(pred);
was_found = true;
}
}
interlaced.append(&mut preds.clone());
}
// for (i, (pred, depth)) in interlaced.iter().enumerate() {
// println!("preds[{i}] (depth {depth}):");
// println!("{pred}");
// }
if is_top_level {
// drop all branches which do not match exactly at the data length
// border and coalesce the rest
let preds: Vec<Pred> = interlaced
.into_iter()
.filter(|(_a, b)| *b == data_len)
.map(|(a, _b)| a)
.collect();
if preds.is_empty() {
panic!()
}
if preds.len() == 1 {
vec![(preds[0].clone(), 0)]
} else {
// coalesce all branches
vec![(Pred::Or(preds), 0)]
}
} else {
interlaced
}
}
Ex::RepExactEx(r) => {
let e = &r.0 .0;
let count = r.0 .1;
assert!(count > 0);
if is_top_level && (offset + e.max_len() * count as usize <= data_len) {
panic!();
}
let mut preds = expr_to_pred(&e, offset, data_len, false);
for i in 1..count {
for (pred_outer, offset_outer) in std::mem::take(&mut preds).into_iter() {
if offset_outer + e.min_len() > data_len {
// cannot match any more
continue;
}
let mut preds_inner = expr_to_pred(&e, offset_outer, data_len, false);
for (pred_inner, offset_inner) in preds_inner {
let pred = (
Pred::And(vec![pred_outer.clone(), pred_inner]),
offset_inner,
);
preds.push(pred);
}
}
}
if is_top_level {
// drop all branches which do not match exactly at the data length
// border and coalesce the rest
let preds: Vec<Pred> = preds
.into_iter()
.filter(|(_a, b)| *b != data_len)
.map(|(a, _b)| a)
.collect();
if preds.is_empty() {
panic!()
}
if preds.len() == 1 {
vec![(preds[0].clone(), 0)]
} else {
// coalesce all branches
vec![(Pred::Or(preds), 0)]
}
} else {
preds
}
}
Ex::NegPredEx(e) => {
assert!(offset <= data_len);
if offset == data_len {
// the internal expression cannot be match since there is no data left,
// this means that the negative expression matched
if is_top_level {
panic!("always true predicate doesnt make sense")
}
// TODO this is hacky.
return vec![(Pred::True, offset)];
}
let e = &e.0;
let preds = expr_to_pred(&e, offset, data_len, is_top_level);
let preds: Vec<Pred> = preds.into_iter().map(|(a, _b)| a).collect();
let len = preds.len();
// coalesce all branches, offset doesnt matter since those
// offset will never be used anymore.
let pred = if preds.len() == 1 {
Pred::Not(Box::new(preds[0].clone()))
} else {
Pred::Not(Box::new(Pred::Or(preds)))
};
if is_top_level && len == 0 {
panic!()
}
// all offset if negative predicate are ignored since no matching
// will be done from those offsets.
vec![(pred, offset)]
}
Ex::OptEx(e) => {
let e = &e.0;
if is_top_level {
return vec![(Pred::True, 0)];
}
// add an always-matching branch
let mut preds = vec![(Pred::True, offset)];
if e.min_len() + offset <= data_len {
// try to match only if there is enough data
let mut p = expr_to_pred(&e, offset, data_len, is_top_level);
preds.append(&mut p);
}
preds
}
Ex::StrEx(s) => {
if is_top_level && offset + s.0.len() != data_len {
panic!();
}
let mut preds = Vec::new();
for (idx, byte) in s.0.clone().into_bytes().iter().enumerate() {
let a = Atom {
index: offset + idx,
op: CmpOp::Eq,
rhs: Rhs::Const(*byte),
};
preds.push(Pred::Atom(a));
}
if preds.len() == 1 {
vec![(preds[0].clone(), offset + s.0.len())]
} else {
vec![(Pred::And(preds), offset + s.0.len())]
}
}
Ex::ASCII_NONZERO_DIGIT => {
if is_top_level && (offset + 1 != data_len) {
panic!();
}
let gte = Pred::Atom(Atom {
index: offset,
op: CmpOp::Gte,
rhs: Rhs::Const(49u8),
});
let lte = Pred::Atom(Atom {
index: offset,
op: CmpOp::Lte,
rhs: Rhs::Const(57u8),
});
vec![(Pred::And(vec![gte, lte]), offset + 1)]
}
Ex::ASCII_DIGIT => {
if is_top_level && (offset + 1 != data_len) {
panic!();
}
let gte = Pred::Atom(Atom {
index: offset,
op: CmpOp::Gte,
rhs: Rhs::Const(48u8),
});
let lte = Pred::Atom(Atom {
index: offset,
op: CmpOp::Lte,
rhs: Rhs::Const(57u8),
});
vec![(Pred::And(vec![gte, lte]), offset + 1)]
}
Ex::ANY => {
if is_top_level && (offset + 1 > data_len) {
panic!();
}
let start = offset;
let end = min(offset + 4, data_len);
let mut branches = Vec::new();
for branch_end in start + 1..end {
branches.push((is_unicode(RangeSet::from(start..branch_end)), branch_end))
}
if is_top_level {
assert!(branches.len() == 1);
}
branches
}
_ => unimplemented!(),
}
}
#[test]
fn test_json_int() {
use rand::{distr::Alphanumeric, rng, Rng};
let grammar = include_str!("json_int.pest");
// Parse the grammar file into Pairs (the grammars own parse tree)
let pairs = meta::parse(meta::Rule::grammar_rules, grammar).expect("grammar parse error");
// Optional: validate (reports duplicate rules, unreachable rules, etc.)
validator::validate_pairs(pairs.clone()).expect("invalid grammar");
// 4) Convert the parsed pairs into the stable AST representation
let rules_ast: Vec<ast::Rule> = consume_rules(pairs).unwrap();
let exp = build_rules(&rules_ast);
// 5) Inspect the AST however you like For a quick look, the Debug print is the
// safest (works across versions)
for rule in &rules_ast {
println!("{:#?}", rule);
}
const LENGTH: usize = 7; // Adjustable constant
let pred = expr_to_pred(&exp, 0, LENGTH, true);
assert!(pred.len() == 1);
let pred = &pred[0].0;
let circ = Compiler::new().compile(&pred);
println!("{:?} and gates", circ.and_count());
for i in 0..1000000 {
let s: String = rand::thread_rng()
.sample_iter(&Alphanumeric)
.take(LENGTH)
.map(char::from)
.collect();
let out = eval_pred(pred, s.as_bytes());
let is_int = s.chars().all(|c| c.is_ascii_digit()) && !s.starts_with('0');
if out != is_int {
println!("failed at index {:?} with {:?}", i, s);
}
assert_eq!(out, is_int)
}
}
#[test]
fn test_json_str() {
use rand::{distr::Alphanumeric, rng, Rng};
const LENGTH: usize = 10; // Adjustable constant
let grammar = include_str!("json_str.pest");
// Parse the grammar file into Pairs (the grammars own parse tree)
let pairs = meta::parse(meta::Rule::grammar_rules, grammar).expect("grammar parse error");
// Optional: validate (reports duplicate rules, unreachable rules, etc.)
validator::validate_pairs(pairs.clone()).expect("invalid grammar");
// 4) Convert the parsed pairs into the stable AST representation
let rules_ast: Vec<ast::Rule> = consume_rules(pairs).unwrap();
for rule in &rules_ast {
println!("{:#?}", rule);
}
let exp = build_rules(&rules_ast);
for len in LENGTH..LENGTH + 7 {
let pred = expr_to_pred(&exp, 0, len, true);
assert!(pred.len() == 1);
let pred = &pred[0].0;
let circ = Compiler::new().compile(pred);
println!(
"JSON string length: {:?}; circuit AND gate count {:?}",
len,
circ.and_count()
);
}
}
#[test]
fn test_choice() {
let a = Expr::Ident("ASCII_NONZERO_DIGIT".to_string());
let b = Expr::Ident("ASCII_DIGIT".to_string());
let rule = ast::Rule {
name: "test".to_string(),
ty: ast::RuleType::Atomic,
expr: Expr::Choice(Box::new(a), Box::new(b)),
};
let exp = build_rules(&vec![rule]);
let pred = expr_to_pred(&exp, 0, 1, true);
assert!(pred.len() == 1);
let pred = &pred[0].0;
println!("pred is {:?}", pred);
}
#[test]
fn test_seq() {
let a = Expr::Ident("ASCII_NONZERO_DIGIT".to_string());
let b = Expr::Ident("ASCII_DIGIT".to_string());
let rule = ast::Rule {
name: "test".to_string(),
ty: ast::RuleType::Atomic,
expr: Expr::Seq(Box::new(a), Box::new(b)),
};
let exp = build_rules(&vec![rule]);
let pred = expr_to_pred(&exp, 0, 2, true);
assert!(pred.len() == 1);
let pred = &pred[0].0;
println!("pred is {:?}", pred);
}
#[test]
fn test_rep() {
let a = Expr::Ident("ASCII_NONZERO_DIGIT".to_string());
let b = Expr::Ident("ASCII_DIGIT".to_string());
let rule = ast::Rule {
name: "test".to_string(),
ty: ast::RuleType::Atomic,
expr: Expr::Rep(Box::new(a)),
};
let exp = build_rules(&vec![rule]);
let pred = expr_to_pred(&exp, 0, 3, true);
assert!(pred.len() == 1);
let pred = &pred[0].0;
println!("pred is {:?}", pred);
}
#[test]
fn test_rep_choice() {
const LENGTH: usize = 5; // Adjustable constant
let a = Expr::Ident("ASCII_NONZERO_DIGIT".to_string());
let b = Expr::Ident("ASCII_DIGIT".to_string());
// Number of predicates needed to represent the expressions.
let a_weight = 2usize;
let b_weight = 2usize;
let rep_a = Expr::Rep(Box::new(a));
let rep_b = Expr::Rep(Box::new(b));
let rule = ast::Rule {
name: "test".to_string(),
ty: ast::RuleType::Atomic,
expr: Expr::Choice(Box::new(rep_a), Box::new(rep_b)),
};
let exp = build_rules(&vec![rule]);
let pred = expr_to_pred(&exp, 0, LENGTH, true);
assert!(pred.len() == 1);
let pred = &pred[0].0;
println!("pred is {}", pred);
// This is for sanity that no extra predicates are being added.
assert_eq!(pred.leaves(), a_weight * LENGTH + b_weight * LENGTH);
}
#[test]
fn test_neg_choice() {
let a = Expr::Str("4".to_string());
let b = Expr::Str("5".to_string());
let choice = Expr::Choice(Box::new(a), Box::new(b));
let neg_choice = Expr::NegPred(Box::new(choice));
let c = Expr::Str("a".to_string());
let d = Expr::Str("BC".to_string());
let choice2 = Expr::Choice(Box::new(c), Box::new(d));
let rule = ast::Rule {
name: "test".to_string(),
ty: ast::RuleType::Atomic,
expr: Expr::Seq(Box::new(neg_choice), Box::new(choice2)),
};
let exp = build_rules(&vec![rule]);
let pred = expr_to_pred(&exp, 0, 2, true);
assert!(pred.len() == 1);
let pred = &pred[0].0;
println!("pred is {:?}", pred);
assert_eq!(pred.leaves(), 4);
}
}

View File

@@ -1,3 +0,0 @@
// Copied from pest.json
int = @{ "0" | ASCII_NONZERO_DIGIT ~ ASCII_DIGIT* }

View File

@@ -1,6 +0,0 @@
// Copied from pest.json
// Replaced "string" with "X" to avoid recursion.
string = @{ (!("\"" | "\\") ~ ANY)* ~ (escape ~ "X")? }
escape = @{ "\\" ~ ("\"" | "\\" | "/" | "b" | "f" | "n" | "r" | "t" | unicode) }
unicode = @{ "u" ~ ASCII_HEX_DIGIT{4} }

View File

@@ -12,21 +12,176 @@ pub mod merkle;
pub mod transcript;
pub mod webpki;
pub use rangeset;
pub mod config;
pub(crate) mod display;
//pub mod grammar;
pub mod json;
pub mod predicates;
use rangeset::{RangeSet, ToRangeSet, UnionMut};
use serde::{Deserialize, Serialize};
use crate::{
connection::ServerName,
connection::{HandshakeData, ServerName},
transcript::{
encoding::EncoderSecret, PartialTranscript, TranscriptCommitment, TranscriptSecret,
encoding::EncoderSecret, Direction, PartialTranscript, Transcript, TranscriptCommitConfig,
TranscriptCommitRequest, TranscriptCommitment, TranscriptSecret,
},
};
/// Configuration to prove information to the verifier.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ProveConfig {
server_identity: bool,
reveal: Option<(RangeSet<usize>, RangeSet<usize>)>,
transcript_commit: Option<TranscriptCommitConfig>,
}
impl ProveConfig {
/// Creates a new builder.
pub fn builder(transcript: &Transcript) -> ProveConfigBuilder<'_> {
ProveConfigBuilder::new(transcript)
}
/// Returns `true` if the server identity is to be proven.
pub fn server_identity(&self) -> bool {
self.server_identity
}
/// Returns the ranges of the transcript to be revealed.
pub fn reveal(&self) -> Option<&(RangeSet<usize>, RangeSet<usize>)> {
self.reveal.as_ref()
}
/// Returns the transcript commitment configuration.
pub fn transcript_commit(&self) -> Option<&TranscriptCommitConfig> {
self.transcript_commit.as_ref()
}
}
/// Builder for [`ProveConfig`].
#[derive(Debug)]
pub struct ProveConfigBuilder<'a> {
transcript: &'a Transcript,
server_identity: bool,
reveal: Option<(RangeSet<usize>, RangeSet<usize>)>,
transcript_commit: Option<TranscriptCommitConfig>,
}
impl<'a> ProveConfigBuilder<'a> {
/// Creates a new builder.
pub fn new(transcript: &'a Transcript) -> Self {
Self {
transcript,
server_identity: false,
reveal: None,
transcript_commit: None,
}
}
/// Proves the server identity.
pub fn server_identity(&mut self) -> &mut Self {
self.server_identity = true;
self
}
/// Configures transcript commitments.
pub fn transcript_commit(&mut self, transcript_commit: TranscriptCommitConfig) -> &mut Self {
self.transcript_commit = Some(transcript_commit);
self
}
/// Reveals the given ranges of the transcript.
pub fn reveal(
&mut self,
direction: Direction,
ranges: &dyn ToRangeSet<usize>,
) -> Result<&mut Self, ProveConfigBuilderError> {
let idx = ranges.to_range_set();
if idx.end().unwrap_or(0) > self.transcript.len_of_direction(direction) {
return Err(ProveConfigBuilderError(
ProveConfigBuilderErrorRepr::IndexOutOfBounds {
direction,
actual: idx.end().unwrap_or(0),
len: self.transcript.len_of_direction(direction),
},
));
}
let (sent, recv) = self.reveal.get_or_insert_default();
match direction {
Direction::Sent => sent.union_mut(&idx),
Direction::Received => recv.union_mut(&idx),
}
Ok(self)
}
/// Reveals the given ranges of the sent data transcript.
pub fn reveal_sent(
&mut self,
ranges: &dyn ToRangeSet<usize>,
) -> Result<&mut Self, ProveConfigBuilderError> {
self.reveal(Direction::Sent, ranges)
}
/// Reveals all of the sent data transcript.
pub fn reveal_sent_all(&mut self) -> Result<&mut Self, ProveConfigBuilderError> {
let len = self.transcript.len_of_direction(Direction::Sent);
let (sent, _) = self.reveal.get_or_insert_default();
sent.union_mut(&(0..len));
Ok(self)
}
/// Reveals the given ranges of the received data transcript.
pub fn reveal_recv(
&mut self,
ranges: &dyn ToRangeSet<usize>,
) -> Result<&mut Self, ProveConfigBuilderError> {
self.reveal(Direction::Received, ranges)
}
/// Reveals all of the received data transcript.
pub fn reveal_recv_all(&mut self) -> Result<&mut Self, ProveConfigBuilderError> {
let len = self.transcript.len_of_direction(Direction::Received);
let (_, recv) = self.reveal.get_or_insert_default();
recv.union_mut(&(0..len));
Ok(self)
}
/// Builds the configuration.
pub fn build(self) -> Result<ProveConfig, ProveConfigBuilderError> {
Ok(ProveConfig {
server_identity: self.server_identity,
reveal: self.reveal,
transcript_commit: self.transcript_commit,
})
}
}
/// Error for [`ProveConfigBuilder`].
#[derive(Debug, thiserror::Error)]
#[error(transparent)]
pub struct ProveConfigBuilderError(#[from] ProveConfigBuilderErrorRepr);
#[derive(Debug, thiserror::Error)]
enum ProveConfigBuilderErrorRepr {
#[error("range is out of bounds of the transcript ({direction}): {actual} > {len}")]
IndexOutOfBounds {
direction: Direction,
actual: usize,
len: usize,
},
}
/// Request to prove statements about the connection.
#[derive(Debug, Serialize, Deserialize)]
pub struct ProveRequest {
/// Handshake data.
pub handshake: Option<(ServerName, HandshakeData)>,
/// Transcript data.
pub transcript: Option<PartialTranscript>,
/// Transcript commitment configuration.
pub transcript_commit: Option<TranscriptCommitRequest>,
}
/// Prover output.
#[derive(Serialize, Deserialize)]
pub struct ProverOutput {

View File

@@ -1,790 +0,0 @@
//! Predicate and compiler.
use std::{collections::HashMap, fmt};
use mpz_circuits::{itybity::ToBits, ops, Circuit, CircuitBuilder, Feed, Node};
use rangeset::RangeSet;
/// ddd
#[derive(Debug, Clone)]
pub(crate) enum Pred {
And(Vec<Pred>),
Or(Vec<Pred>),
Not(Box<Pred>),
Atom(Atom),
// An always-true predicate.
True,
// An always-false predicate.
False,
}
impl Pred {
/// Returns sorted unique byte indices of this predicate.
pub(crate) fn indices(&self) -> Vec<usize> {
let mut indices = self.indices_internal(self);
indices.sort_unstable();
indices.dedup();
indices
}
// Returns the number of leaves (i.e atoms) the AST of this predicate has.
pub(crate) fn leaves(&self) -> usize {
match self {
Pred::And(vec) => vec.iter().map(|p| p.leaves()).sum(),
Pred::Or(vec) => vec.iter().map(|p| p.leaves()).sum(),
Pred::Not(p) => p.leaves(),
Pred::Atom(atom) => 1,
Pred::True => 0,
Pred::False => 0,
}
}
/// Returns all byte indices of the given `pred`icate.
fn indices_internal(&self, pred: &Pred) -> Vec<usize> {
match pred {
Pred::And(vec) => vec
.iter()
.flat_map(|p| self.indices_internal(p))
.collect::<Vec<_>>(),
Pred::Or(vec) => vec
.iter()
.flat_map(|p| self.indices_internal(p))
.collect::<Vec<_>>(),
Pred::Not(p) => self.indices_internal(p),
Pred::Atom(atom) => {
let mut indices = Vec::new();
indices.push(atom.index);
if let Rhs::Idx(idx) = atom.rhs {
indices.push(idx);
}
indices
}
Pred::True => vec![],
Pred::False => vec![],
}
}
}
impl fmt::Display for Pred {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.fmt_with_indent(f, 0)
}
}
impl Pred {
fn fmt_with_indent(&self, f: &mut fmt::Formatter<'_>, indent: usize) -> fmt::Result {
// helper to write the current indentation
fn pad(f: &mut fmt::Formatter<'_>, indent: usize) -> fmt::Result {
// 2 spaces per level; tweak as you like
write!(f, "{:indent$}", "", indent = indent * 2)
}
match self {
Pred::And(preds) => {
pad(f, indent)?;
writeln!(f, "And(")?;
for p in preds {
p.fmt_with_indent(f, indent + 1)?;
}
pad(f, indent)?;
writeln!(f, ")")
}
Pred::Or(preds) => {
pad(f, indent)?;
writeln!(f, "Or(")?;
for p in preds {
p.fmt_with_indent(f, indent + 1)?;
}
pad(f, indent)?;
writeln!(f, ")")
}
Pred::Not(p) => {
pad(f, indent)?;
writeln!(f, "Not(")?;
p.fmt_with_indent(f, indent + 1)?;
pad(f, indent)?;
writeln!(f, ")")
}
Pred::Atom(a) => {
pad(f, indent)?;
writeln!(f, "Atom({:?})", a)
}
Pred::True => {
pad(f, indent)?;
writeln!(f, "True")
}
Pred::False => {
pad(f, indent)?;
writeln!(f, "False")
}
}
}
}
/// Atomic predicate of the form:
/// x[index] (op) rhs
#[derive(Debug, Clone)]
pub struct Atom {
/// Left-hand side byte index `i` (x_i).
pub index: usize,
/// Comparison operator.
pub op: CmpOp,
/// Right-hand side operand (constant or x_j).
pub rhs: Rhs,
}
/// ddd
#[derive(Debug, Clone)]
pub(crate) enum CmpOp {
Eq, // ==
Ne, // !=
Gt, // >
Gte, // >=
Lt, // <
Lte, // <=
}
/// RHS of a comparison
#[derive(Debug, Clone)]
pub enum Rhs {
/// Byte at index
Idx(usize),
/// Literal constant.
Const(u8),
}
/// Compiles a predicate into a circuit.
pub struct Compiler {
/// A <byte index, circuit feeds> map.
map: HashMap<usize, [Node<Feed>; 8]>,
}
impl Compiler {
pub(crate) fn new() -> Self {
Self {
map: HashMap::new(),
}
}
/// Compiles the given predicate into a circuit, consuming the
/// compiler.
pub(crate) fn compile(&mut self, pred: &Pred) -> Circuit {
let mut builder = CircuitBuilder::new();
for idx in pred.indices() {
let feeds = (0..8).map(|_| builder.add_input()).collect::<Vec<_>>();
self.map.insert(idx, feeds.try_into().unwrap());
}
let out = self.process(&mut builder, pred);
builder.add_output(out);
builder.build().unwrap()
}
// Processes a single predicate.
fn process(&mut self, builder: &mut CircuitBuilder, pred: &Pred) -> Node<Feed> {
match pred {
Pred::And(vec) => {
let out = vec
.iter()
.map(|p| self.process(builder, p))
.collect::<Vec<_>>();
ops::all(builder, &out)
}
Pred::Or(vec) => {
let out = vec
.iter()
.map(|p| self.process(builder, p))
.collect::<Vec<_>>();
ops::any(builder, &out)
}
Pred::Not(p) => {
let pred_out = self.process(builder, p);
let inv = ops::inv(builder, [pred_out]);
inv[0]
}
Pred::Atom(atom) => {
let lhs = self.map.get(&atom.index).unwrap().clone();
let rhs = match atom.rhs {
Rhs::Const(c) => const_feeds(builder, c),
Rhs::Idx(s) => self.map.get(&s).unwrap().clone(),
};
match atom.op {
CmpOp::Eq => ops::eq(builder, lhs, rhs),
CmpOp::Ne => ops::neq(builder, lhs, rhs),
CmpOp::Lt => ops::lt(builder, lhs, rhs),
CmpOp::Lte => ops::lte(builder, lhs, rhs),
CmpOp::Gt => ops::gt(builder, lhs, rhs),
CmpOp::Gte => ops::gte(builder, lhs, rhs),
}
}
Pred::True => builder.get_const_one(),
Pred::False => builder.get_const_zero(),
}
}
}
// Returns circuit feeds for the given constant u8 value.
fn const_feeds(builder: &CircuitBuilder, cnst: u8) -> [Node<Feed>; 8] {
cnst.iter_lsb0()
.map(|b| {
if b {
builder.get_const_one()
} else {
builder.get_const_zero()
}
})
.collect::<Vec<_>>()
.try_into()
.expect("u8 has 8 feeds")
}
// Evaluates the predicate on the input `data`.
pub(crate) fn eval_pred(pred: &Pred, data: &[u8]) -> bool {
match pred {
Pred::And(vec) => vec.iter().map(|p| eval_pred(p, data)).all(|b| b),
Pred::Or(vec) => vec.iter().map(|p| eval_pred(p, data)).any(|b| b),
Pred::Not(p) => !eval_pred(p, data),
Pred::Atom(atom) => {
let lhs = data[atom.index];
let rhs = match atom.rhs {
Rhs::Const(c) => c,
Rhs::Idx(s) => data[s],
};
match atom.op {
CmpOp::Eq => lhs == rhs,
CmpOp::Ne => lhs != rhs,
CmpOp::Lt => lhs < rhs,
CmpOp::Lte => lhs <= rhs,
CmpOp::Gt => lhs > rhs,
CmpOp::Gte => lhs >= rhs,
}
}
Pred::True => true,
Pred::False => true,
}
}
/// Builds a predicate that an ascii integer is contained in the ranges.
fn is_ascii_integer(range: RangeSet<usize>) -> Pred {
let mut preds = Vec::new();
for idx in range.iter() {
let lte = Pred::Atom(Atom {
index: idx,
op: CmpOp::Lte,
rhs: Rhs::Const(57u8),
});
let gte = Pred::Atom(Atom {
index: idx,
op: CmpOp::Gte,
rhs: Rhs::Const(48u8),
});
preds.push(Pred::And(vec![lte, gte]));
}
Pred::And(preds)
}
/// Builds a predicate that a valid HTTP header value is contained in the
/// ranges.
fn is_valid_http_header_value(range: RangeSet<usize>) -> Pred {
let mut preds = Vec::new();
for idx in range.iter() {
let ne = Pred::Atom(Atom {
index: idx,
op: CmpOp::Ne,
// ascii code for carriage return \r
rhs: Rhs::Const(13u8),
});
preds.push(ne);
}
Pred::And(preds)
}
/// Builds a predicate that a valid JSON string is contained in the
/// ranges.
fn is_valid_json_string(range: RangeSet<usize>) -> Pred {
assert!(
range.len_ranges() == 1,
"only a contiguous range is allowed"
);
const BACKSLASH: u8 = 92;
// check if all unicode chars are allowed
let mut preds = Vec::new();
// Find all /u sequences
for (i, idx) in range.iter().enumerate() {
if i == range.len() - 1 {
// if this is a last char, skip it
continue;
}
let is_backslash = Pred::Atom(Atom {
index: idx,
op: CmpOp::Eq,
rhs: Rhs::Const(BACKSLASH),
});
}
Pred::And(preds)
}
// Returns a predicate that a unicode char is contained in the range
pub(crate) fn is_unicode(range: RangeSet<usize>) -> Pred {
assert!(range.len() <= 4);
match range.len() {
1 => is_1_byte_unicode(range.max().unwrap()),
2 => is_2_byte_unicode(range),
3 => is_3_byte_unicode(range),
4 => is_4_byte_unicode(range),
_ => unimplemented!(),
}
}
fn is_1_byte_unicode(pos: usize) -> Pred {
Pred::Atom(Atom {
index: pos,
op: CmpOp::Lte,
rhs: Rhs::Const(127u8),
})
}
fn is_2_byte_unicode(range: RangeSet<usize>) -> Pred {
assert!(range.len() == 2);
let mut iter = range.iter();
// should be 110xxxxx
let first = iter.next().unwrap();
let gte = Pred::Atom(Atom {
index: first,
op: CmpOp::Gte,
rhs: Rhs::Const(0xC0),
});
let lte = Pred::Atom(Atom {
index: first,
op: CmpOp::Lte,
rhs: Rhs::Const(0xDF),
});
let second = iter.next().unwrap();
Pred::And(vec![lte, gte, is_unicode_continuation(second)])
}
fn is_3_byte_unicode(range: RangeSet<usize>) -> Pred {
assert!(range.len() == 3);
let mut iter = range.iter();
let first = iter.next().unwrap();
// should be 1110xxxx
let gte = Pred::Atom(Atom {
index: first,
op: CmpOp::Gte,
rhs: Rhs::Const(0xE0),
});
let lte = Pred::Atom(Atom {
index: first,
op: CmpOp::Lte,
rhs: Rhs::Const(0xEF),
});
let second = iter.next().unwrap();
let third = iter.next().unwrap();
Pred::And(vec![
lte,
gte,
is_unicode_continuation(second),
is_unicode_continuation(third),
])
}
fn is_4_byte_unicode(range: RangeSet<usize>) -> Pred {
assert!(range.len() == 4);
let mut iter = range.iter();
let first = iter.next().unwrap();
// should be 11110xxx
let gte = Pred::Atom(Atom {
index: first,
op: CmpOp::Gte,
rhs: Rhs::Const(0xF0),
});
let lte = Pred::Atom(Atom {
index: first,
op: CmpOp::Lte,
rhs: Rhs::Const(0xF7),
});
let second = iter.next().unwrap();
let third = iter.next().unwrap();
let fourth = iter.next().unwrap();
Pred::And(vec![
lte,
gte,
is_unicode_continuation(second),
is_unicode_continuation(third),
is_unicode_continuation(fourth),
])
}
fn is_unicode_continuation(pos: usize) -> Pred {
// should be 10xxxxxx
let gte = Pred::Atom(Atom {
index: pos,
op: CmpOp::Gte,
rhs: Rhs::Const(0x80),
});
let lte = Pred::Atom(Atom {
index: pos,
op: CmpOp::Lte,
rhs: Rhs::Const(0xBF),
});
Pred::And(vec![lte, gte])
}
fn is_ascii_hex_digit(pos: usize) -> Pred {
let gte = Pred::Atom(Atom {
index: pos,
op: CmpOp::Gte,
rhs: Rhs::Const(48u8),
});
let lte = Pred::Atom(Atom {
index: pos,
op: CmpOp::Lte,
rhs: Rhs::Const(57u8),
});
let is_digit = Pred::And(vec![lte, gte]);
let gte = Pred::Atom(Atom {
index: pos,
op: CmpOp::Gte,
rhs: Rhs::Const(65u8),
});
let lte = Pred::Atom(Atom {
index: pos,
op: CmpOp::Lte,
rhs: Rhs::Const(70u8),
});
let is_upper = Pred::And(vec![lte, gte]);
let gte = Pred::Atom(Atom {
index: pos,
op: CmpOp::Gte,
rhs: Rhs::Const(97u8),
});
let lte = Pred::Atom(Atom {
index: pos,
op: CmpOp::Lte,
rhs: Rhs::Const(102u8),
});
let is_lower = Pred::And(vec![lte, gte]);
Pred::Or(vec![is_digit, is_lower, is_upper])
}
fn is_ascii_lowercase(pos: usize) -> Pred {
let gte = Pred::Atom(Atom {
index: pos,
op: CmpOp::Gte,
rhs: Rhs::Const(48u8),
});
let lte = Pred::Atom(Atom {
index: pos,
op: CmpOp::Lte,
rhs: Rhs::Const(57u8),
});
Pred::And(vec![lte, gte])
}
#[cfg(test)]
mod test {
use super::*;
use mpz_circuits::evaluate;
use rand::rng;
#[test]
fn test_and() {
let pred = Pred::And(vec![
Pred::Atom(Atom {
index: 100,
op: CmpOp::Lt,
rhs: Rhs::Idx(300),
}),
Pred::Atom(Atom {
index: 200,
op: CmpOp::Eq,
rhs: Rhs::Const(2u8),
}),
]);
let circ = Compiler::new().compile(&pred);
let out: bool = evaluate!(circ, [1u8, 2, 3]).unwrap();
assert_eq!(out, true);
let out: bool = evaluate!(circ, [1u8, 3, 3]).unwrap();
assert_eq!(out, false);
}
#[test]
fn test_or() {
let pred = Pred::Or(vec![
Pred::Atom(Atom {
index: 100,
op: CmpOp::Lt,
rhs: Rhs::Idx(300),
}),
Pred::Atom(Atom {
index: 200,
op: CmpOp::Eq,
rhs: Rhs::Const(2u8),
}),
]);
let circ = Compiler::new().compile(&pred);
let out: bool = evaluate!(circ, [1u8, 0, 3]).unwrap();
assert_eq!(out, true);
let out: bool = evaluate!(circ, [1u8, 3, 0]).unwrap();
assert_eq!(out, false);
}
#[test]
fn test_not() {
let pred = Pred::Not(Box::new(Pred::Atom(Atom {
index: 100,
op: CmpOp::Lt,
rhs: Rhs::Idx(300),
})));
let circ = Compiler::new().compile(&pred);
let out: bool = evaluate!(circ, [5u8, 3]).unwrap();
assert_eq!(out, true);
let out: bool = evaluate!(circ, [1u8, 3]).unwrap();
assert_eq!(out, false);
}
// Tests when RHS is a const.
#[test]
fn test_rhs_const() {
let pred = Pred::Atom(Atom {
index: 100,
op: CmpOp::Lt,
rhs: Rhs::Const(22u8),
});
let circ = Compiler::new().compile(&pred);
let out: bool = evaluate!(circ, 5u8).unwrap();
assert_eq!(out, true);
let out: bool = evaluate!(circ, 23u8).unwrap();
assert_eq!(out, false);
}
// Tests when RHS is an index.
#[test]
fn test_rhs_idx() {
let pred = Pred::Atom(Atom {
index: 100,
op: CmpOp::Lt,
rhs: Rhs::Idx(200),
});
let circ = Compiler::new().compile(&pred);
let out: bool = evaluate!(circ, 5u8, 10u8).unwrap();
assert_eq!(out, true);
let out: bool = evaluate!(circ, 23u8, 5u8).unwrap();
assert_eq!(out, false);
}
// Tests when same index is used in the predicate.
#[test]
fn test_same_idx() {
let pred1 = Pred::Atom(Atom {
index: 100,
op: CmpOp::Eq,
rhs: Rhs::Idx(100),
});
let pred2 = Pred::Atom(Atom {
index: 100,
op: CmpOp::Lt,
rhs: Rhs::Idx(100),
});
let circ = Compiler::new().compile(&pred1);
let out: bool = evaluate!(circ, 5u8).unwrap();
assert_eq!(out, true);
let circ = Compiler::new().compile(&pred2);
let out: bool = evaluate!(circ, 5u8).unwrap();
assert_eq!(out, false);
}
#[test]
fn test_atom_eq() {
let pred = Pred::Atom(Atom {
index: 100,
op: CmpOp::Eq,
rhs: Rhs::Idx(300),
});
let circ = Compiler::new().compile(&pred);
let out: bool = evaluate!(circ, [5u8, 5]).unwrap();
assert_eq!(out, true);
let out: bool = evaluate!(circ, [1u8, 3]).unwrap();
assert_eq!(out, false);
}
#[test]
fn test_atom_neq() {
let pred = Pred::Atom(Atom {
index: 100,
op: CmpOp::Ne,
rhs: Rhs::Idx(300),
});
let circ = Compiler::new().compile(&pred);
let out: bool = evaluate!(circ, [5u8, 6]).unwrap();
assert_eq!(out, true);
let out: bool = evaluate!(circ, [1u8, 1]).unwrap();
assert_eq!(out, false);
}
#[test]
fn test_atom_gt() {
let pred = Pred::Atom(Atom {
index: 100,
op: CmpOp::Gt,
rhs: Rhs::Idx(300),
});
let circ = Compiler::new().compile(&pred);
let out: bool = evaluate!(circ, [7u8, 6]).unwrap();
assert_eq!(out, true);
let out: bool = evaluate!(circ, [1u8, 1]).unwrap();
assert_eq!(out, false);
}
#[test]
fn test_atom_gte() {
let pred = Pred::Atom(Atom {
index: 100,
op: CmpOp::Gte,
rhs: Rhs::Idx(300),
});
let circ = Compiler::new().compile(&pred);
let out: bool = evaluate!(circ, [7u8, 7]).unwrap();
assert_eq!(out, true);
let out: bool = evaluate!(circ, [0u8, 1]).unwrap();
assert_eq!(out, false);
}
#[test]
fn test_atom_lt() {
let pred = Pred::Atom(Atom {
index: 100,
op: CmpOp::Lt,
rhs: Rhs::Idx(300),
});
let circ = Compiler::new().compile(&pred);
let out: bool = evaluate!(circ, [2u8, 7]).unwrap();
assert_eq!(out, true);
let out: bool = evaluate!(circ, [4u8, 1]).unwrap();
assert_eq!(out, false);
}
#[test]
fn test_atom_lte() {
let pred = Pred::Atom(Atom {
index: 100,
op: CmpOp::Lte,
rhs: Rhs::Idx(300),
});
let circ = Compiler::new().compile(&pred);
let out: bool = evaluate!(circ, [2u8, 2]).unwrap();
assert_eq!(out, true);
let out: bool = evaluate!(circ, [4u8, 1]).unwrap();
assert_eq!(out, false);
}
#[test]
fn test_is_ascii_integer() {
let text = "text with integers 123456 text";
let pos = text.find("123456").unwrap();
let pred = is_ascii_integer(RangeSet::from(pos..pos + 6));
let bytes: &[u8] = text.as_bytes();
let out = eval_pred(&pred, bytes);
assert_eq!(out, true);
let out = eval_pred(&pred, &[&[0u8], bytes].concat());
assert_eq!(out, false);
}
#[test]
fn test_is_valid_http_header_value() {
let valid = "valid header value";
let invalid = "invalid header \r value";
let pred = is_valid_http_header_value(RangeSet::from(0..valid.len()));
let out: bool = eval_pred(&pred, valid.as_bytes());
assert_eq!(out, true);
let pred = is_valid_http_header_value(RangeSet::from(0..invalid.len()));
let out = eval_pred(&pred, invalid.as_bytes());
assert_eq!(out, false);
}
#[test]
fn test_is_unicode() {
use rand::{distr::Alphanumeric, rng, Rng};
let mut rng = rng();
for _ in 0..1000000 {
let mut s = String::from("HelloWorld");
let insert_pos = 5; // logical character index (after "Hello")
let byte_index = s
.char_indices()
.nth(insert_pos)
.map(|(i, _)| i)
.unwrap_or_else(|| s.len());
// Pick a random Unicode scalar value (0x0000..=0x10FFFF)
// Retry if it's in the surrogate range (U+D800..=U+DFFF)
let c = loop {
let code = rng.random_range(0x0000u32..=0x10FFFF);
if !(0xD800..=0xDFFF).contains(&code) {
if let Some(ch) = char::from_u32(code) {
break ch;
}
}
};
let mut buf = [0u8; 4]; // max UTF-8 length
let encoded = c.encode_utf8(&mut buf); // returns &str
let len = encoded.len();
s.insert_str(byte_index, &c.to_string());
let pred = is_unicode(RangeSet::from(byte_index..byte_index + len));
let out = eval_pred(&pred, s.as_bytes());
assert_eq!(out, true);
}
let bad_unicode = 255u8;
let pred = is_unicode(RangeSet::from(0..1));
let out = eval_pred(&pred, &[bad_unicode]);
assert_eq!(out, false);
}
}

View File

@@ -38,7 +38,8 @@ pub use commit::{
pub use proof::{
TranscriptProof, TranscriptProofBuilder, TranscriptProofBuilderError, TranscriptProofError,
};
pub use tls::{ContentType, Record, TlsTranscript};
pub use tls::{Record, TlsTranscript};
pub use tls_core::msgs::enums::ContentType;
/// A transcript contains the plaintext of all application data communicated
/// between the Prover and the Server.

View File

@@ -25,9 +25,6 @@ const DEFAULT_COMMITMENT_KINDS: &[TranscriptCommitmentKind] = &[
TranscriptCommitmentKind::Hash {
alg: HashAlgId::BLAKE3,
},
TranscriptCommitmentKind::Hash {
alg: HashAlgId::KECCAK256,
},
TranscriptCommitmentKind::Encoding,
];
@@ -659,7 +656,6 @@ mod tests {
#[rstest]
#[case::sha256(HashAlgId::SHA256)]
#[case::blake3(HashAlgId::BLAKE3)]
#[case::keccak256(HashAlgId::KECCAK256)]
fn test_reveal_with_hash_commitment(#[case] alg: HashAlgId) {
let mut rng = rand::rngs::StdRng::seed_from_u64(0);
let provider = HashProvider::default();
@@ -708,7 +704,6 @@ mod tests {
#[rstest]
#[case::sha256(HashAlgId::SHA256)]
#[case::blake3(HashAlgId::BLAKE3)]
#[case::keccak256(HashAlgId::KECCAK256)]
fn test_reveal_with_inconsistent_hash_commitment(#[case] alg: HashAlgId) {
let mut rng = rand::rngs::StdRng::seed_from_u64(0);
let provider = HashProvider::default();

View File

@@ -7,58 +7,16 @@ use crate::{
transcript::{Direction, Transcript},
webpki::CertificateDer,
};
use serde::{Deserialize, Serialize};
use tls_core::msgs::{
alert::AlertMessagePayload,
codec::{Codec, Reader},
enums::{AlertDescription, ProtocolVersion},
enums::{AlertDescription, ContentType, ProtocolVersion},
handshake::{HandshakeMessagePayload, HandshakePayload},
};
/// TLS record content type.
#[derive(Debug, Clone, Copy, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
pub enum ContentType {
/// Change cipher spec protocol.
ChangeCipherSpec,
/// Alert protocol.
Alert,
/// Handshake protocol.
Handshake,
/// Application data protocol.
ApplicationData,
/// Heartbeat protocol.
Heartbeat,
/// Unknown protocol.
Unknown(u8),
}
impl From<ContentType> for tls_core::msgs::enums::ContentType {
fn from(content_type: ContentType) -> Self {
match content_type {
ContentType::ChangeCipherSpec => tls_core::msgs::enums::ContentType::ChangeCipherSpec,
ContentType::Alert => tls_core::msgs::enums::ContentType::Alert,
ContentType::Handshake => tls_core::msgs::enums::ContentType::Handshake,
ContentType::ApplicationData => tls_core::msgs::enums::ContentType::ApplicationData,
ContentType::Heartbeat => tls_core::msgs::enums::ContentType::Heartbeat,
ContentType::Unknown(id) => tls_core::msgs::enums::ContentType::Unknown(id),
}
}
}
impl From<tls_core::msgs::enums::ContentType> for ContentType {
fn from(content_type: tls_core::msgs::enums::ContentType) -> Self {
match content_type {
tls_core::msgs::enums::ContentType::ChangeCipherSpec => ContentType::ChangeCipherSpec,
tls_core::msgs::enums::ContentType::Alert => ContentType::Alert,
tls_core::msgs::enums::ContentType::Handshake => ContentType::Handshake,
tls_core::msgs::enums::ContentType::ApplicationData => ContentType::ApplicationData,
tls_core::msgs::enums::ContentType::Heartbeat => ContentType::Heartbeat,
tls_core::msgs::enums::ContentType::Unknown(id) => ContentType::Unknown(id),
}
}
}
/// A transcript of TLS records sent and received by the prover.
#[derive(Debug, Clone)]
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TlsTranscript {
time: u64,
version: TlsVersion,
@@ -334,7 +292,7 @@ impl TlsTranscript {
}
/// A TLS record.
#[derive(Clone)]
#[derive(Clone, Serialize, Deserialize)]
pub struct Record {
/// Sequence number.
pub seq: u64,

View File

@@ -53,21 +53,6 @@ impl RootCertStore {
pub fn empty() -> Self {
Self { roots: Vec::new() }
}
/// Creates a root certificate store with Mozilla root certificates.
///
/// These certificates are sourced from [`webpki-root-certs`](https://docs.rs/webpki-root-certs/latest/webpki_root_certs/). It is not recommended to use these unless the
/// application binary can be recompiled and deployed on-demand in the case
/// that the root certificates need to be updated.
#[cfg(feature = "mozilla-certs")]
pub fn mozilla() -> Self {
Self {
roots: webpki_root_certs::TLS_SERVER_ROOT_CERTS
.iter()
.map(|cert| CertificateDer(cert.to_vec()))
.collect(),
}
}
}
/// Server certificate verifier.
@@ -97,12 +82,8 @@ impl ServerCertVerifier {
Ok(Self { roots })
}
/// Creates a server certificate verifier with Mozilla root certificates.
///
/// These certificates are sourced from [`webpki-root-certs`](https://docs.rs/webpki-root-certs/latest/webpki_root_certs/). It is not recommended to use these unless the
/// application binary can be recompiled and deployed on-demand in the case
/// that the root certificates need to be updated.
#[cfg(feature = "mozilla-certs")]
/// Creates a new server certificate verifier with Mozilla root
/// certificates.
pub fn mozilla() -> Self {
Self {
roots: webpki_roots::TLS_SERVER_ROOTS.to_vec(),

View File

@@ -3,4 +3,4 @@ Cookie: very-secret-cookie
Content-Length: 44
Content-Type: application/json
{"foo": "bar", "bazz": 123, "buzz": [1,"5"]}
{"foo": "bar", "bazz": 123, "buzz": [1,"5"]}

View File

@@ -4,7 +4,5 @@ This folder contains examples demonstrating how to use the TLSNotary protocol.
* [Interactive](./interactive/README.md): Interactive Prover and Verifier session without a trusted notary.
* [Attestation](./attestation/README.md): Performing a simple notarization with a trusted notary.
* [Interactive_zk](./interactive_zk/README.md): Interactive Prover and Verifier session demonstrating zero-knowledge age verification using Noir.
Refer to <https://tlsnotary.org/docs/quick_start> for a quick start guide to using TLSNotary with these examples.

View File

@@ -4,7 +4,6 @@
use std::env;
use anyhow::{anyhow, Result};
use clap::Parser;
use http_body_util::Empty;
use hyper::{body::Bytes, Request, StatusCode};
@@ -23,18 +22,11 @@ use tlsn::{
signing::Secp256k1Signer,
Attestation, AttestationConfig, CryptoProvider, Secrets,
},
config::{
prove::ProveConfig,
prover::ProverConfig,
tls::TlsClientConfig,
tls_commit::{mpc::MpcTlsConfig, TlsCommitConfig},
verifier::VerifierConfig,
},
config::{CertificateDer, PrivateKeyDer, ProtocolConfig, RootCertStore},
connection::{ConnectionInfo, HandshakeData, ServerName, TranscriptLength},
prover::{state::Committed, Prover, ProverOutput},
prover::{state::Committed, ProveConfig, Prover, ProverConfig, ProverOutput, TlsConfig},
transcript::{ContentType, TranscriptCommitConfig},
verifier::{Verifier, VerifierOutput},
webpki::{CertificateDer, PrivateKeyDer, RootCertStore},
verifier::{Verifier, VerifierConfig, VerifierOutput},
};
use tlsn_examples::ExampleType;
use tlsn_formats::http::{DefaultHttpCommitter, HttpCommit, HttpTranscript};
@@ -53,7 +45,7 @@ struct Args {
}
#[tokio::main]
async fn main() -> Result<()> {
async fn main() -> Result<(), Box<dyn std::error::Error>> {
tracing_subscriber::fmt::init();
let args = Args::parse();
@@ -93,63 +85,64 @@ async fn prover<S: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>(
uri: &str,
extra_headers: Vec<(&str, &str)>,
example_type: &ExampleType,
) -> Result<()> {
) -> Result<(), Box<dyn std::error::Error>> {
let server_host: String = env::var("SERVER_HOST").unwrap_or("127.0.0.1".into());
let server_port: u16 = env::var("SERVER_PORT")
.map(|port| port.parse().expect("port should be valid integer"))
.unwrap_or(DEFAULT_FIXTURE_PORT);
// Create a new prover and perform necessary setup.
let prover = Prover::new(ProverConfig::builder().build()?)
.commit(
TlsCommitConfig::builder()
// Select the TLS commitment protocol.
.protocol(
MpcTlsConfig::builder()
// We must configure the amount of data we expect to exchange beforehand,
// which will be preprocessed prior to the
// connection. Reducing these limits will improve
// performance.
.max_sent_data(tlsn_examples::MAX_SENT_DATA)
.max_recv_data(tlsn_examples::MAX_RECV_DATA)
.build()?,
)
// Create a root certificate store with the server-fixture's self-signed
// certificate. This is only required for offline testing with the
// server-fixture.
let mut tls_config_builder = TlsConfig::builder();
tls_config_builder
.root_store(RootCertStore {
roots: vec![CertificateDer(CA_CERT_DER.to_vec())],
})
// (Optional) Set up TLS client authentication if required by the server.
.client_auth((
vec![CertificateDer(CLIENT_CERT_DER.to_vec())],
PrivateKeyDer(CLIENT_KEY_DER.to_vec()),
));
let tls_config = tls_config_builder.build().unwrap();
// Set up protocol configuration for prover.
let mut prover_config_builder = ProverConfig::builder();
prover_config_builder
.server_name(ServerName::Dns(SERVER_DOMAIN.try_into().unwrap()))
.tls_config(tls_config)
.protocol_config(
ProtocolConfig::builder()
// We must configure the amount of data we expect to exchange beforehand, which will
// be preprocessed prior to the connection. Reducing these limits will improve
// performance.
.max_sent_data(tlsn_examples::MAX_SENT_DATA)
.max_recv_data(tlsn_examples::MAX_RECV_DATA)
.build()?,
socket.compat(),
)
.await?;
);
let prover_config = prover_config_builder.build()?;
// Create a new prover and perform necessary setup.
let prover = Prover::new(prover_config).setup(socket.compat()).await?;
// Open a TCP connection to the server.
let client_socket = tokio::net::TcpStream::connect((server_host, server_port)).await?;
// Bind the prover to the server connection.
let (tls_connection, prover_fut) = prover
.connect(
TlsClientConfig::builder()
.server_name(ServerName::Dns(SERVER_DOMAIN.try_into()?))
// Create a root certificate store with the server-fixture's self-signed
// certificate. This is only required for offline testing with the
// server-fixture.
.root_store(RootCertStore {
roots: vec![CertificateDer(CA_CERT_DER.to_vec())],
})
// (Optional) Set up TLS client authentication if required by the server.
.client_auth((
vec![CertificateDer(CLIENT_CERT_DER.to_vec())],
PrivateKeyDer(CLIENT_KEY_DER.to_vec()),
))
.build()?,
client_socket.compat(),
)
.await?;
let tls_connection = TokioIo::new(tls_connection.compat());
// The returned `mpc_tls_connection` is an MPC TLS connection to the server: all
// data written to/read from it will be encrypted/decrypted using MPC with
// the notary.
let (mpc_tls_connection, prover_fut) = prover.connect(client_socket.compat()).await?;
let mpc_tls_connection = TokioIo::new(mpc_tls_connection.compat());
// Spawn the prover task to be run concurrently in the background.
let prover_task = tokio::spawn(prover_fut);
// Attach the hyper HTTP client to the connection.
let (mut request_sender, connection) =
hyper::client::conn::http1::handshake(tls_connection).await?;
hyper::client::conn::http1::handshake(mpc_tls_connection).await?;
// Spawn the HTTP task to be run concurrently in the background.
tokio::spawn(connection);
@@ -170,7 +163,7 @@ async fn prover<S: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>(
}
let request = request_builder.body(Empty::<Bytes>::new())?;
info!("Starting connection with the server");
info!("Starting an MPC TLS connection with the server");
// Send the request to the server and wait for the response.
let response = request_sender.send_request(request).await?;
@@ -247,7 +240,7 @@ async fn notarize(
config: &RequestConfig,
request_tx: Sender<AttestationRequest>,
attestation_rx: Receiver<Attestation>,
) -> Result<(Attestation, Secrets)> {
) -> Result<(Attestation, Secrets), Box<dyn std::error::Error>> {
let mut builder = ProveConfig::builder(prover.transcript());
if let Some(config) = config.transcript_commit() {
@@ -290,18 +283,15 @@ async fn notarize(
// Send attestation request to notary.
request_tx
.send(request.clone())
.map_err(|_| anyhow!("notary is not receiving attestation request"))?;
.map_err(|_| "notary is not receiving attestation request".to_string())?;
// Receive attestation from notary.
let attestation = attestation_rx
.await
.map_err(|err| anyhow!("notary did not respond with attestation: {err}"))?;
// Signature verifier for the signature algorithm in the request.
let provider = CryptoProvider::default();
.map_err(|err| format!("notary did not respond with attestation: {err}"))?;
// Check the attestation is consistent with the Prover's view.
request.validate(&attestation, &provider)?;
request.validate(&attestation)?;
Ok((attestation, secrets))
}
@@ -310,7 +300,7 @@ async fn notary<S: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>(
socket: S,
request_rx: Receiver<AttestationRequest>,
attestation_tx: Sender<Attestation>,
) -> Result<()> {
) -> Result<(), Box<dyn std::error::Error>> {
// Create a root certificate store with the server-fixture's self-signed
// certificate. This is only required for offline testing with the
// server-fixture.
@@ -322,7 +312,7 @@ async fn notary<S: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>(
.unwrap();
let verifier = Verifier::new(verifier_config)
.commit(socket.compat())
.setup(socket.compat())
.await?
.accept()
.await?
@@ -332,7 +322,6 @@ async fn notary<S: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>(
let (
VerifierOutput {
transcript_commitments,
encoder_secret,
..
},
verifier,
@@ -393,16 +382,12 @@ async fn notary<S: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>(
.server_ephemeral_key(tls_transcript.server_ephemeral_key().clone())
.transcript_commitments(transcript_commitments);
if let Some(encoder_secret) = encoder_secret {
builder.encoder_secret(encoder_secret);
}
let attestation = builder.build(&provider)?;
// Send attestation to prover.
attestation_tx
.send(attestation)
.map_err(|_| anyhow!("prover is not receiving attestation"))?;
.map_err(|_| "prover is not receiving attestation".to_string())?;
Ok(())
}

View File

@@ -12,8 +12,8 @@ use tlsn::{
signing::VerifyingKey,
CryptoProvider,
},
config::{CertificateDer, RootCertStore},
verifier::ServerCertVerifier,
webpki::{CertificateDer, RootCertStore},
};
use tlsn_examples::ExampleType;
use tlsn_server_fixture_certs::CA_CERT_DER;

View File

@@ -12,18 +12,11 @@ use tokio_util::compat::{FuturesAsyncReadCompatExt, TokioAsyncReadCompatExt};
use tracing::instrument;
use tlsn::{
config::{
prove::ProveConfig,
prover::ProverConfig,
tls::TlsClientConfig,
tls_commit::{mpc::MpcTlsConfig, TlsCommitConfig, TlsCommitProtocolConfig},
verifier::VerifierConfig,
},
config::{CertificateDer, ProtocolConfig, RootCertStore},
connection::ServerName,
prover::Prover,
prover::{ProveConfig, Prover, ProverConfig, TlsConfig},
transcript::PartialTranscript,
verifier::{Verifier, VerifierOutput},
webpki::{CertificateDer, RootCertStore},
verifier::{Verifier, VerifierConfig, VerifierOutput},
};
use tlsn_server_fixture::DEFAULT_FIXTURE_PORT;
use tlsn_server_fixture_certs::{CA_CERT_DER, SERVER_DOMAIN};
@@ -77,52 +70,52 @@ async fn prover<T: AsyncWrite + AsyncRead + Send + Unpin + 'static>(
assert_eq!(uri.scheme().unwrap().as_str(), "https");
let server_domain = uri.authority().unwrap().host();
// Create a new prover and perform necessary setup.
let prover = Prover::new(ProverConfig::builder().build()?)
.commit(
TlsCommitConfig::builder()
// Select the TLS commitment protocol.
.protocol(
MpcTlsConfig::builder()
// We must configure the amount of data we expect to exchange beforehand,
// which will be preprocessed prior to the
// connection. Reducing these limits will improve
// performance.
.max_sent_data(tlsn_examples::MAX_SENT_DATA)
.max_recv_data(tlsn_examples::MAX_RECV_DATA)
.build()?,
)
.build()?,
verifier_socket.compat(),
)
// Create a root certificate store with the server-fixture's self-signed
// certificate. This is only required for offline testing with the
// server-fixture.
let mut tls_config_builder = TlsConfig::builder();
tls_config_builder.root_store(RootCertStore {
roots: vec![CertificateDer(CA_CERT_DER.to_vec())],
});
let tls_config = tls_config_builder.build().unwrap();
// Set up protocol configuration for prover.
let mut prover_config_builder = ProverConfig::builder();
prover_config_builder
.server_name(ServerName::Dns(server_domain.try_into().unwrap()))
.tls_config(tls_config)
.protocol_config(
ProtocolConfig::builder()
.max_sent_data(MAX_SENT_DATA)
.max_recv_data(MAX_RECV_DATA)
.build()
.unwrap(),
);
let prover_config = prover_config_builder.build().unwrap();
// Create prover and connect to verifier.
//
// Perform the setup phase with the verifier.
let prover = Prover::new(prover_config)
.setup(verifier_socket.compat())
.await?;
// Open a TCP connection to the server.
let client_socket = tokio::net::TcpStream::connect(server_addr).await?;
// Connect to TLS Server.
let tls_client_socket = tokio::net::TcpStream::connect(server_addr).await?;
// Bind the prover to the server connection.
let (tls_connection, prover_fut) = prover
.connect(
TlsClientConfig::builder()
.server_name(ServerName::Dns(SERVER_DOMAIN.try_into()?))
// Create a root certificate store with the server-fixture's self-signed
// certificate. This is only required for offline testing with the
// server-fixture.
.root_store(RootCertStore {
roots: vec![CertificateDer(CA_CERT_DER.to_vec())],
})
.build()?,
client_socket.compat(),
)
.await?;
let tls_connection = TokioIo::new(tls_connection.compat());
// Pass server connection into the prover.
let (mpc_tls_connection, prover_fut) = prover.connect(tls_client_socket.compat()).await?;
// Wrap the connection in a TokioIo compatibility layer to use it with hyper.
let mpc_tls_connection = TokioIo::new(mpc_tls_connection.compat());
// Spawn the Prover to run in the background.
let prover_task = tokio::spawn(prover_fut);
// MPC-TLS Handshake.
let (mut request_sender, connection) =
hyper::client::conn::http1::handshake(tls_connection).await?;
hyper::client::conn::http1::handshake(mpc_tls_connection).await?;
// Spawn the connection to run in the background.
tokio::spawn(connection);
@@ -194,21 +187,16 @@ async fn verifier<T: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>(
let verifier = Verifier::new(verifier_config);
// Validate the proposed configuration and then run the TLS commitment protocol.
let verifier = verifier.commit(socket.compat()).await?;
let verifier = verifier.setup(socket.compat()).await?;
// This is the opportunity to ensure the prover does not attempt to overload the
// verifier.
let reject = if let TlsCommitProtocolConfig::Mpc(mpc_tls_config) = verifier.request().protocol()
{
if mpc_tls_config.max_sent_data() > MAX_SENT_DATA {
Some("max_sent_data is too large")
} else if mpc_tls_config.max_recv_data() > MAX_RECV_DATA {
Some("max_recv_data is too large")
} else {
None
}
let reject = if verifier.config().max_sent_data() > MAX_SENT_DATA {
Some("max_sent_data is too large")
} else if verifier.config().max_recv_data() > MAX_RECV_DATA {
Some("max_recv_data is too large")
} else {
Some("expecting to use MPC-TLS")
None
};
if reject.is_some() {
@@ -222,7 +210,7 @@ async fn verifier<T: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>(
// Validate the proving request and then verify.
let verifier = verifier.verify().await?;
if !verifier.request().server_identity() {
if verifier.request().handshake.is_none() {
let verifier = verifier
.reject(Some("expecting to verify the server name"))
.await?;

View File

@@ -22,27 +22,24 @@ use spansy::{
http::{BodyContent, Requests, Responses},
Spanned,
};
use tls_server_fixture::{CA_CERT_DER, SERVER_DOMAIN};
use tls_server_fixture::CA_CERT_DER;
use tlsn::{
config::{
prove::{ProveConfig, ProveConfigBuilder},
prover::ProverConfig,
tls::TlsClientConfig,
tls_commit::{mpc::MpcTlsConfig, TlsCommitConfig},
},
config::{CertificateDer, ProtocolConfig, RootCertStore},
connection::ServerName,
hash::HashAlgId,
prover::Prover,
prover::{ProveConfig, ProveConfigBuilder, Prover, ProverConfig, TlsConfig},
transcript::{
hash::{PlaintextHash, PlaintextHashSecret},
Direction, TranscriptCommitConfig, TranscriptCommitConfigBuilder, TranscriptCommitmentKind,
TranscriptSecret,
},
webpki::{CertificateDer, RootCertStore},
};
use tlsn_examples::{MAX_RECV_DATA, MAX_SENT_DATA};
use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt};
use tlsn_examples::MAX_RECV_DATA;
use tokio::io::AsyncWriteExt;
use tlsn_examples::MAX_SENT_DATA;
use tokio::io::{AsyncRead, AsyncWrite};
use tokio_util::compat::{FuturesAsyncReadCompatExt, TokioAsyncReadCompatExt};
use tracing::instrument;
@@ -64,52 +61,51 @@ pub async fn prover<T: AsyncWrite + AsyncRead + Send + Unpin + 'static>(
.ok_or_else(|| anyhow::anyhow!("URI must have authority"))?
.host();
// Create a new prover and perform necessary setup.
let prover = Prover::new(ProverConfig::builder().build()?)
.commit(
TlsCommitConfig::builder()
// Select the TLS commitment protocol.
.protocol(
MpcTlsConfig::builder()
// We must configure the amount of data we expect to exchange beforehand,
// which will be preprocessed prior to the
// connection. Reducing these limits will improve
// performance.
.max_sent_data(MAX_SENT_DATA)
.max_recv_data(MAX_RECV_DATA)
.build()?,
)
// Create a root certificate store with the server-fixture's self-signed
// certificate. This is only required for offline testing with the
// server-fixture.
let mut tls_config_builder = TlsConfig::builder();
tls_config_builder.root_store(RootCertStore {
roots: vec![CertificateDer(CA_CERT_DER.to_vec())],
});
let tls_config = tls_config_builder.build()?;
// Set up protocol configuration for prover.
let mut prover_config_builder = ProverConfig::builder();
prover_config_builder
.server_name(ServerName::Dns(server_domain.try_into()?))
.tls_config(tls_config)
.protocol_config(
ProtocolConfig::builder()
.max_sent_data(MAX_SENT_DATA)
.max_recv_data(MAX_RECV_DATA)
.build()?,
verifier_socket.compat(),
)
);
let prover_config = prover_config_builder.build()?;
// Create prover and connect to verifier.
//
// Perform the setup phase with the verifier.
let prover = Prover::new(prover_config)
.setup(verifier_socket.compat())
.await?;
// Open a TCP connection to the server.
let client_socket = tokio::net::TcpStream::connect(server_addr).await?;
// Connect to TLS Server.
let tls_client_socket = tokio::net::TcpStream::connect(server_addr).await?;
// Bind the prover to the server connection.
let (tls_connection, prover_fut) = prover
.connect(
TlsClientConfig::builder()
.server_name(ServerName::Dns(SERVER_DOMAIN.try_into()?))
// Create a root certificate store with the server-fixture's self-signed
// certificate. This is only required for offline testing with the
// server-fixture.
.root_store(RootCertStore {
roots: vec![CertificateDer(CA_CERT_DER.to_vec())],
})
.build()?,
client_socket.compat(),
)
.await?;
let tls_connection = TokioIo::new(tls_connection.compat());
// Pass server connection into the prover.
let (mpc_tls_connection, prover_fut) = prover.connect(tls_client_socket.compat()).await?;
// Wrap the connection in a TokioIo compatibility layer to use it with hyper.
let mpc_tls_connection = TokioIo::new(mpc_tls_connection.compat());
// Spawn the Prover to run in the background.
let prover_task = tokio::spawn(prover_fut);
// MPC-TLS Handshake.
let (mut request_sender, connection) =
hyper::client::conn::http1::handshake(tls_connection).await?;
hyper::client::conn::http1::handshake(mpc_tls_connection).await?;
// Spawn the connection to run in the background.
tokio::spawn(connection);

View File

@@ -7,12 +7,11 @@ use noir::barretenberg::verify::{get_ultra_honk_verification_key, verify_ultra_h
use serde_json::Value;
use tls_server_fixture::CA_CERT_DER;
use tlsn::{
config::{tls_commit::TlsCommitProtocolConfig, verifier::VerifierConfig},
config::{CertificateDer, RootCertStore},
connection::ServerName,
hash::HashAlgId,
transcript::{Direction, PartialTranscript},
verifier::{Verifier, VerifierOutput},
webpki::{CertificateDer, RootCertStore},
verifier::{Verifier, VerifierConfig, VerifierOutput},
};
use tlsn_examples::{MAX_RECV_DATA, MAX_SENT_DATA};
use tlsn_server_fixture_certs::SERVER_DOMAIN;
@@ -25,33 +24,28 @@ pub async fn verifier<T: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>
socket: T,
mut extra_socket: T,
) -> Result<PartialTranscript> {
let verifier = Verifier::new(
VerifierConfig::builder()
// Create a root certificate store with the server-fixture's self-signed
// certificate. This is only required for offline testing with the
// server-fixture.
.root_store(RootCertStore {
roots: vec![CertificateDer(CA_CERT_DER.to_vec())],
})
.build()?,
);
// Create a root certificate store with the server-fixture's self-signed
// certificate. This is only required for offline testing with the
// server-fixture.
let verifier_config = VerifierConfig::builder()
.root_store(RootCertStore {
roots: vec![CertificateDer(CA_CERT_DER.to_vec())],
})
.build()?;
let verifier = Verifier::new(verifier_config);
// Validate the proposed configuration and then run the TLS commitment protocol.
let verifier = verifier.commit(socket.compat()).await?;
let verifier = verifier.setup(socket.compat()).await?;
// This is the opportunity to ensure the prover does not attempt to overload the
// verifier.
let reject = if let TlsCommitProtocolConfig::Mpc(mpc_tls_config) = verifier.request().protocol()
{
if mpc_tls_config.max_sent_data() > MAX_SENT_DATA {
Some("max_sent_data is too large")
} else if mpc_tls_config.max_recv_data() > MAX_RECV_DATA {
Some("max_recv_data is too large")
} else {
None
}
let reject = if verifier.config().max_sent_data() > MAX_SENT_DATA {
Some("max_sent_data is too large")
} else if verifier.config().max_recv_data() > MAX_RECV_DATA {
Some("max_recv_data is too large")
} else {
Some("expecting to use MPC-TLS")
None
};
if reject.is_some() {
@@ -66,7 +60,7 @@ pub async fn verifier<T: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>
let verifier = verifier.verify().await?;
let request = verifier.request();
if !request.server_identity() || request.reveal().is_none() {
if request.handshake.is_none() || request.transcript.is_none() {
let verifier = verifier
.reject(Some(
"expecting to verify the server name and transcript data",

View File

@@ -5,15 +5,9 @@ use futures::{AsyncReadExt, AsyncWriteExt, TryFutureExt};
use harness_core::bench::{Bench, ProverMetrics};
use tlsn::{
config::{
prove::ProveConfig,
prover::ProverConfig,
tls::TlsClientConfig,
tls_commit::{TlsCommitConfig, mpc::MpcTlsConfig},
},
config::{CertificateDer, ProtocolConfig, RootCertStore},
connection::ServerName,
prover::Prover,
webpki::{CertificateDer, RootCertStore},
prover::{ProveConfig, Prover, ProverConfig, TlsConfig},
};
use tlsn_server_fixture_certs::{CA_CERT_DER, SERVER_DOMAIN};
@@ -28,47 +22,41 @@ pub async fn bench_prover(provider: &IoProvider, config: &Bench) -> Result<Prove
let sent = verifier_io.sent();
let recv = verifier_io.recv();
let prover = Prover::new(ProverConfig::builder().build()?);
let mut builder = ProtocolConfig::builder();
builder.max_sent_data(config.upload_size);
builder.defer_decryption_from_start(config.defer_decryption);
if !config.defer_decryption {
builder.max_recv_data_online(config.download_size + RECV_PADDING);
}
builder.max_recv_data(config.download_size + RECV_PADDING);
let protocol_config = builder.build()?;
let mut tls_config_builder = TlsConfig::builder();
tls_config_builder.root_store(RootCertStore {
roots: vec![CertificateDer(CA_CERT_DER.to_vec())],
});
let tls_config = tls_config_builder.build()?;
let prover = Prover::new(
ProverConfig::builder()
.tls_config(tls_config)
.protocol_config(protocol_config)
.server_name(ServerName::Dns(SERVER_DOMAIN.try_into().unwrap()))
.build()?,
);
let time_start = web_time::Instant::now();
let prover = prover
.commit(
TlsCommitConfig::builder()
.protocol({
let mut builder = MpcTlsConfig::builder()
.max_sent_data(config.upload_size)
.defer_decryption_from_start(config.defer_decryption);
if !config.defer_decryption {
builder = builder.max_recv_data_online(config.download_size + RECV_PADDING);
}
builder
.max_recv_data(config.download_size + RECV_PADDING)
.build()
}?)
.build()?,
verifier_io,
)
.await?;
let prover = prover.setup(verifier_io).await?;
let time_preprocess = time_start.elapsed().as_millis();
let time_start_online = web_time::Instant::now();
let uploaded_preprocess = sent.load(Ordering::Relaxed);
let downloaded_preprocess = recv.load(Ordering::Relaxed);
let (mut conn, prover_fut) = prover
.connect(
TlsClientConfig::builder()
.server_name(ServerName::Dns(SERVER_DOMAIN.try_into()?))
.root_store(RootCertStore {
roots: vec![CertificateDer(CA_CERT_DER.to_vec())],
})
.build()?,
provider.provide_server_io().await?,
)
.await?;
let (mut conn, prover_fut) = prover.connect(provider.provide_server_io().await?).await?;
let (_, mut prover) = futures::try_join!(
async {

View File

@@ -2,9 +2,8 @@ use anyhow::Result;
use harness_core::bench::Bench;
use tlsn::{
config::verifier::VerifierConfig,
verifier::Verifier,
webpki::{CertificateDer, RootCertStore},
config::{CertificateDer, RootCertStore},
verifier::{Verifier, VerifierConfig},
};
use tlsn_server_fixture_certs::CA_CERT_DER;
@@ -20,7 +19,7 @@ pub async fn bench_verifier(provider: &IoProvider, _config: &Bench) -> Result<()
);
let verifier = verifier
.commit(provider.provide_proto_io().await?)
.setup(provider.provide_proto_io().await?)
.await?
.accept()
.await?

View File

@@ -1,17 +1,10 @@
use tlsn::{
config::{
prove::ProveConfig,
prover::ProverConfig,
tls::TlsClientConfig,
tls_commit::{TlsCommitConfig, mpc::MpcTlsConfig},
verifier::VerifierConfig,
},
config::{CertificateDer, ProtocolConfig, RootCertStore},
connection::ServerName,
hash::HashAlgId,
prover::Prover,
prover::{ProveConfig, Prover, ProverConfig, TlsConfig},
transcript::{TranscriptCommitConfig, TranscriptCommitment, TranscriptCommitmentKind},
verifier::{Verifier, VerifierOutput},
webpki::{CertificateDer, RootCertStore},
verifier::{Verifier, VerifierConfig, VerifierOutput},
};
use tlsn_server_fixture_certs::{CA_CERT_DER, SERVER_DOMAIN};
@@ -28,35 +21,35 @@ const MAX_RECV_DATA: usize = 1 << 11;
crate::test!("basic", prover, verifier);
async fn prover(provider: &IoProvider) {
let prover = Prover::new(ProverConfig::builder().build().unwrap())
.commit(
TlsCommitConfig::builder()
.protocol(
MpcTlsConfig::builder()
.max_sent_data(MAX_SENT_DATA)
.max_recv_data(MAX_RECV_DATA)
.defer_decryption_from_start(true)
.build()
.unwrap(),
)
.build()
.unwrap(),
provider.provide_proto_io().await.unwrap(),
)
.await
.unwrap();
let mut tls_config_builder = TlsConfig::builder();
tls_config_builder.root_store(RootCertStore {
roots: vec![CertificateDer(CA_CERT_DER.to_vec())],
});
let tls_config = tls_config_builder.build().unwrap();
let server_name = ServerName::Dns(SERVER_DOMAIN.try_into().unwrap());
let prover = Prover::new(
ProverConfig::builder()
.server_name(server_name)
.tls_config(tls_config)
.protocol_config(
ProtocolConfig::builder()
.max_sent_data(MAX_SENT_DATA)
.max_recv_data(MAX_RECV_DATA)
.defer_decryption_from_start(true)
.build()
.unwrap(),
)
.build()
.unwrap(),
)
.setup(provider.provide_proto_io().await.unwrap())
.await
.unwrap();
let (tls_connection, prover_fut) = prover
.connect(
TlsClientConfig::builder()
.server_name(ServerName::Dns(SERVER_DOMAIN.try_into().unwrap()))
.root_store(RootCertStore {
roots: vec![CertificateDer(CA_CERT_DER.to_vec())],
})
.build()
.unwrap(),
provider.provide_server_io().await.unwrap(),
)
.connect(provider.provide_server_io().await.unwrap())
.await
.unwrap();
@@ -127,7 +120,7 @@ async fn verifier(provider: &IoProvider) {
.unwrap();
let verifier = Verifier::new(config)
.commit(provider.provide_proto_io().await.unwrap())
.setup(provider.provide_proto_io().await.unwrap())
.await
.unwrap()
.accept()

View File

@@ -33,6 +33,7 @@ mpz-ole = { workspace = true }
mpz-share-conversion = { workspace = true }
mpz-vm-core = { workspace = true }
mpz-memory-core = { workspace = true }
mpz-circuits = { workspace = true }
ludi = { git = "https://github.com/sinui0/ludi", rev = "e511c3b", default-features = false }
serio = { workspace = true }
@@ -56,9 +57,9 @@ pin-project-lite = { workspace = true }
web-time = { workspace = true }
[dev-dependencies]
mpz-common = { workspace = true, features = ["test-utils"] }
mpz-ot = { workspace = true, features = ["ideal"] }
mpz-ideal-vm = { workspace = true }
mpz-ole = { workspace = true, features = ["test-utils"] }
mpz-ot = { workspace = true }
mpz-garble = { workspace = true }
cipher-crate = { package = "cipher", version = "0.4" }
generic-array = { workspace = true }

View File

@@ -487,7 +487,7 @@ impl RecordLayer {
sent_records.push(Record {
seq: op.seq,
typ: op.typ.into(),
typ: op.typ,
plaintext: op.plaintext,
explicit_nonce: op.explicit_nonce,
ciphertext,
@@ -505,7 +505,7 @@ impl RecordLayer {
recv_records.push(Record {
seq: op.seq,
typ: op.typ.into(),
typ: op.typ,
plaintext,
explicit_nonce: op.explicit_nonce,
ciphertext: op.ciphertext,
@@ -578,7 +578,7 @@ impl RecordLayer {
recv_records.push(Record {
seq: op.seq,
typ: op.typ.into(),
typ: op.typ,
plaintext,
explicit_nonce: op.explicit_nonce,
ciphertext: op.ciphertext,

View File

@@ -456,8 +456,9 @@ mod tests {
};
use mpz_common::context::test_st_context;
use mpz_core::Block;
use mpz_ideal_vm::IdealVm;
use mpz_memory_core::binary::U8;
use mpz_garble::protocol::semihonest::{Evaluator, Garbler};
use mpz_memory_core::{binary::U8, correlated::Delta};
use mpz_ot::ideal::cot::ideal_cot;
use mpz_share_conversion::ideal::ideal_share_convert;
use rand::{rngs::StdRng, SeedableRng};
use rstest::*;
@@ -573,8 +574,13 @@ mod tests {
}
fn create_vm(key: [u8; 16], iv: [u8; 4]) -> ((impl Vm<Binary>, Vars), (impl Vm<Binary>, Vars)) {
let mut vm_0 = IdealVm::new();
let mut vm_1 = IdealVm::new();
let mut rng = StdRng::seed_from_u64(0);
let block = Block::random(&mut rng);
let (sender, receiver) = ideal_cot(block);
let delta = Delta::new(block);
let mut vm_0 = Garbler::new(sender, [0u8; 16], delta);
let mut vm_1 = Evaluator::new(receiver);
let key_ref_0 = vm_0.alloc::<Array<U8, 16>>().unwrap();
vm_0.mark_public(key_ref_0).unwrap();

View File

@@ -4,13 +4,14 @@ use futures::{AsyncReadExt, AsyncWriteExt};
use mpc_tls::{Config, MpcTlsFollower, MpcTlsLeader};
use mpz_common::context::test_mt_context;
use mpz_core::Block;
use mpz_ideal_vm::IdealVm;
use mpz_garble::protocol::semihonest::{Evaluator, Garbler};
use mpz_memory_core::correlated::Delta;
use mpz_ot::{
cot::{DerandCOTReceiver, DerandCOTSender},
ideal::rcot::ideal_rcot,
rcot::shared::{SharedRCOTReceiver, SharedRCOTSender},
};
use rand::{rngs::StdRng, SeedableRng};
use rand::{rngs::StdRng, Rng, SeedableRng};
use rustls_pki_types::CertificateDer;
use tls_client::RootCertStore;
use tls_client_async::bind_client;
@@ -22,6 +23,7 @@ use webpki::anchor_from_trusted_cert;
const CA_CERT: CertificateDer = CertificateDer::from_slice(CA_CERT_DER);
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
#[ignore = "expensive"]
async fn mpc_tls_test() {
tracing_subscriber::fmt::init();
@@ -137,8 +139,14 @@ fn build_pair(config: Config) -> (MpcTlsLeader, MpcTlsFollower) {
let rcot_recv_a = SharedRCOTReceiver::new(rcot_recv_a);
let rcot_recv_b = SharedRCOTReceiver::new(rcot_recv_b);
let mpc_a = Arc::new(Mutex::new(IdealVm::new()));
let mpc_b = Arc::new(Mutex::new(IdealVm::new()));
let mpc_a = Arc::new(Mutex::new(Garbler::new(
DerandCOTSender::new(rcot_send_a.clone()),
rand::rng().random(),
delta_a,
)));
let mpc_b = Arc::new(Mutex::new(Evaluator::new(DerandCOTReceiver::new(
rcot_recv_b.clone(),
))));
let leader = MpcTlsLeader::new(
config.clone(),

View File

@@ -0,0 +1,22 @@
[package]
name = "tlsn-plugin-core"
version = "0.1.0"
edition = "2024"
[dependencies]
tlsn = { workspace = true }
tlsn-core = { workspace = true }
tlsn-formats = { workspace = true }
http-body-util = { workspace = true }
hyper = { workspace = true, features = ["client", "http1"] }
rangeset = { workspace = true }
serde = { workspace = true }
spansy = { workspace = true }
thiserror = { workspace = true }
[dev-dependencies]
tlsn-data-fixtures = { workspace = true }
[lints]
workspace = true

View File

@@ -0,0 +1,105 @@
//! Core types of the prover and verifier plugin.
use serde::{Deserialize, Serialize};
use tlsn_core::{
hash::HashAlgId,
transcript::{Direction, TranscriptCommitmentKind},
};
mod prover;
mod verifier;
pub use prover::{
Config as ProverPluginConfig, ConfigError as ProverPLuginConfigError,
Output as ProverPluginOutput,
};
pub use verifier::{
Config as VerifierPluginConfig, ConfigError as VerifierPluginConfigError,
Output as VerifierPluginOutput,
};
/// A rule for disclosing HTTP data.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct DisclosureRule {
http: HttpHandle,
policy: DisclosurePolicy,
}
/// Handle for a part of an HTTP message.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct HttpHandle {
typ: MessageType,
part: MessagePart,
}
#[derive(PartialEq, Clone, Debug, Serialize, Deserialize)]
pub enum MessageType {
Request,
Response,
}
impl From<&MessageType> for Direction {
fn from(mt: &MessageType) -> Self {
match mt {
MessageType::Request => Direction::Sent,
MessageType::Response => Direction::Received,
}
}
}
/// Disclosure policy.
#[derive(PartialEq, Debug, Clone, Serialize, Deserialize)]
pub enum DisclosurePolicy {
/// Reveals data.
Reveal,
/// Creates a hiding commitment.
Commit(Alg),
}
/// Commitment algorithm.
#[derive(PartialEq, Debug, Clone, Serialize, Deserialize)]
pub enum Alg {
EncodingSha256,
EncodingBlake3,
EncodingKeccak256,
Sha256,
Blake3,
}
impl From<&Alg> for TranscriptCommitmentKind {
fn from(alg: &Alg) -> Self {
match alg {
Alg::EncodingSha256 | Alg::EncodingBlake3 | Alg::EncodingKeccak256 => {
TranscriptCommitmentKind::Encoding
}
Alg::Sha256 => TranscriptCommitmentKind::Hash {
alg: HashAlgId::SHA256,
},
Alg::Blake3 => TranscriptCommitmentKind::Hash {
alg: HashAlgId::BLAKE3,
},
}
}
}
/// The part of an HTTP message.
#[derive(PartialEq, Clone, Debug, Serialize, Deserialize)]
pub enum MessagePart {
All,
StartLine,
Header(HeaderParams),
Body(BodyParams),
}
/// Parameters for an HTTP header.
#[derive(PartialEq, Clone, Debug, Serialize, Deserialize)]
pub struct HeaderParams {
pub key: String,
}
/// Parameters for a part of an HTTP body.
#[derive(PartialEq, Clone, Debug, Serialize, Deserialize)]
pub enum BodyParams {
JsonPath(String),
XPath(String),
}

View File

@@ -0,0 +1,34 @@
//! Core types of the prover plugin.
use crate::HttpHandle;
use serde::{Deserialize, Serialize};
use tlsn_core::ProverOutput;
mod config;
pub use config::{Config, ConfigError};
/// Output of the prover plugin.
#[allow(dead_code)]
pub struct Output {
output: ProverOutput,
/// Plaintext exposed to the host.
plaintext: Vec<(HttpHandle, Vec<u8>)>,
}
/// Params for protocol prover.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ProverParams {
max_recv_data: usize,
max_sent_data: usize,
prove_server_identity: bool,
pub server_dns: String,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct HttpRequest {
url: String,
method: String,
body: Option<Vec<u8>>,
pub headers: Vec<(String, String)>,
}

View File

@@ -0,0 +1,463 @@
use crate::{
BodyParams, DisclosurePolicy, DisclosureRule, HttpHandle, MessagePart, MessageType,
prover::{HttpRequest, ProverParams},
};
use crate::prover::Output;
use http_body_util::Full;
use hyper::{Request as HyperRequest, body::Bytes};
use rangeset::RangeSet;
use serde::{Deserialize, Serialize};
use tlsn::{
config::ProtocolConfig,
prover::{ProverConfig, TlsConfig},
};
use tlsn_core::{
ProveConfig, ProveConfigBuilder, ProverOutput,
connection::{DnsName, ServerName},
transcript::{Transcript, TranscriptCommitConfig, TranscriptCommitConfigBuilder},
webpki::RootCertStore,
};
use tlsn_formats::{
http::{Body, Request, Requests, Response, Responses},
json::JsonValue,
spansy,
spansy::Spanned,
};
/// Prover plugin config.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Config {
pub prover_params: ProverParams,
pub request: HttpRequest,
/// Data which will be disclosed to the verifier.
pub disclose: Vec<DisclosureRule>,
/// Data which will be exposed in the plugin output.
pub expose: Vec<HttpHandle>,
pub root_store: RootCertStore,
pub verifier_endpoint: String,
/// Proxy endpoint for connecting to the server.
pub proxy_endpoint: Option<String>,
}
impl Config {
/// Returns the verifier endpoint.
pub fn prover_endpoint(&self) -> &String {
&self.verifier_endpoint
}
/// Builds and returns [ProverConfig].
pub fn prover_config(&self) -> Result<ProverConfig, ConfigError> {
let dns_name: DnsName = self
.prover_params
.server_dns
.clone()
.try_into()
.map_err(|_| ConfigError("prover_config error".to_string()))?;
let mut builder = TlsConfig::builder();
builder.root_store(self.root_store.clone());
let tls_config = builder.build().unwrap();
let config = ProverConfig::builder()
.server_name(ServerName::Dns(dns_name))
.tls_config(tls_config)
.protocol_config(
ProtocolConfig::builder()
.max_sent_data(self.prover_params.max_sent_data)
.max_recv_data(self.prover_params.max_recv_data)
.build()
.unwrap(),
)
.build()
.unwrap();
Ok(config)
}
/// Returns the HTTP request.
pub fn http_request(&self) -> Result<HyperRequest<Full<Bytes>>, ConfigError> {
let mut request = HyperRequest::builder()
.uri(self.request.url.clone())
.header("Host", self.prover_params.server_dns.clone());
for (k, v) in &self.request.headers {
request = request.header(k, v);
}
request = request.method(self.request.method.as_str());
let body = match &self.request.body {
Some(data) => Full::<Bytes>::from(data.clone()),
None => Full::<Bytes>::from(vec![]),
};
request
.body(body)
.map_err(|_| ConfigError("http_request error".to_string()))
}
/// Creates a [ProveConfig] for the given `transcript`.
pub fn prove_config(&self, transcript: &Transcript) -> Result<ProveConfig, ConfigError> {
let mut prove_cfg = ProveConfig::builder(transcript);
let mut commit_cfg = TranscriptCommitConfig::builder(transcript);
if self.prover_params.prove_server_identity {
prove_cfg.server_identity();
}
let reqs = Requests::new_from_slice(transcript.sent())
.collect::<Result<Vec<_>, _>>()
.map_err(|_| ConfigError("prove_config error".to_string()))?;
let resps = Responses::new_from_slice(transcript.received())
.collect::<Result<Vec<_>, _>>()
.map_err(|_| ConfigError("prove_config error".to_string()))?;
let req = reqs.first().expect("at least one request");
let resp = resps.first().expect("at least one response");
let req_rules = self
.disclose
.iter()
.filter(|h| h.http.typ == MessageType::Request);
let resp_rules = self
.disclose
.iter()
.filter(|h| h.http.typ == MessageType::Response);
disclose_req(req, req_rules, &mut commit_cfg, &mut prove_cfg);
disclose_resp(resp, resp_rules, &mut commit_cfg, &mut prove_cfg);
prove_cfg.transcript_commit(commit_cfg.build().unwrap());
Ok(prove_cfg.build().unwrap())
}
/// Returns the output of the plugin.
pub fn output(
&self,
transcript: Transcript,
prover_output: ProverOutput,
) -> Result<Output, ConfigError> {
let reqs = Requests::new_from_slice(transcript.sent())
.collect::<Result<Vec<_>, _>>()
.map_err(|_| ConfigError("output error".to_string()))?;
let resps = Responses::new_from_slice(transcript.received())
.collect::<Result<Vec<_>, _>>()
.map_err(|_| ConfigError("output error".to_string()))?;
let req = reqs.first().expect("at least one request");
let resp = resps.first().expect("at least one response");
let mut exposed = Vec::new();
// Extract the to-be-exposed data from the transcript.
for h in self.expose.iter() {
let range = if h.typ == MessageType::Request {
req_part_range(req, h)
} else {
resp_part_range(resp, h)
};
let seq = transcript
.get((&h.typ).into(), &range)
.ok_or(ConfigError("range not found in transcript".to_string()))?;
exposed.push((h.clone(), seq.data().to_vec()));
}
Ok(Output {
output: prover_output,
plaintext: exposed,
})
}
}
#[derive(Debug, thiserror::Error)]
#[error("config error: {0}")]
pub struct ConfigError(String);
/// Processes disclosure rules for the request.
fn disclose_req<'a, I>(
req: &Request,
rules: I,
commit_cfg: &mut TranscriptCommitConfigBuilder<'_>,
prove_cfg: &mut ProveConfigBuilder<'_>,
) where
I: Iterator<Item = &'a DisclosureRule>,
{
for r in rules {
let range = req_part_range(req, &r.http);
if range.is_empty() {
// TODO: maybe return an error here when the part was not found.
return;
}
match &r.policy {
DisclosurePolicy::Commit(alg) => {
commit_cfg
.commit_with_kind(&range, (&r.http.typ).into(), alg.into())
.expect("range is in the transcript");
}
DisclosurePolicy::Reveal => {
prove_cfg
.reveal_sent(&range)
.expect("range is in the transcript");
}
}
}
}
/// Processes disclosure rules for the response.
fn disclose_resp<'a, I>(
resp: &Response,
rules: I,
commit_cfg: &mut TranscriptCommitConfigBuilder<'_>,
prove_cfg: &mut ProveConfigBuilder<'_>,
) where
I: Iterator<Item = &'a DisclosureRule>,
{
for r in rules {
let range = resp_part_range(resp, &r.http);
if range.is_empty() {
// TODO: maybe return an error here when the part was not found.
return;
}
match &r.policy {
DisclosurePolicy::Commit(alg) => {
commit_cfg
.commit_with_kind(&range, (&r.http.typ).into(), alg.into())
.expect("range is in the transcript");
}
DisclosurePolicy::Reveal => {
prove_cfg
.reveal_recv(&range)
.expect("range is in the transcript");
}
}
}
}
/// Returns the range for the given `part` of the HTTP request,
fn req_part_range(req: &Request, part: &HttpHandle) -> RangeSet<usize> {
match &part.part {
MessagePart::All => {
(req.span().indices().min().unwrap()..req.span().indices().end().unwrap()).into()
}
MessagePart::StartLine => req.request.span().indices().clone(),
MessagePart::Header(params) => req
.headers_with_name(params.key.as_str())
.map(|h| h.span().indices())
.fold(RangeSet::default(), |acc, r| acc | r),
MessagePart::Body(params) => match &req.body {
Some(body) => {
// Body offset from the start of an HTTP message.
let body_offset = body
.span()
.indices()
.min()
.expect("body span cannot be empty");
let mut range = body_params_range(body, params);
range.shift_right(&body_offset);
range
}
None => RangeSet::default(),
},
}
}
/// Returns the range for the given `part` of the HTTP response,
fn resp_part_range(resp: &Response, part: &HttpHandle) -> RangeSet<usize> {
match &part.part {
MessagePart::All => {
(resp.span().indices().min().unwrap()..resp.span().indices().end().unwrap()).into()
}
MessagePart::StartLine => resp.status.span().indices().clone(),
MessagePart::Header(params) => resp
.headers_with_name(params.key.as_str())
.map(|h| h.span().indices())
.fold(RangeSet::default(), |acc, r| acc | r),
MessagePart::Body(params) => match &resp.body {
Some(body) => {
// Body offset from the start of an HTTP message.
let body_offset = body.span().indices().min().expect("body cannot be empty");
let mut range = body_params_range(body, params);
range.shift_right(&body_offset);
range
}
None => RangeSet::default(),
},
}
}
/// Returns the byte range of the `params` in the given `body`.
fn body_params_range(body: &Body, params: &BodyParams) -> RangeSet<usize> {
match params {
BodyParams::JsonPath(path) => {
// TODO: use a better approach than re-parsing the entire
// json for each path.
match spansy::json::parse(body.as_bytes().to_vec().into()) {
Ok(json) => json_path_range(&json, path),
Err(_) => RangeSet::default(),
}
}
_ => unimplemented!("only json parsing is currently supported"),
}
}
/// Returns the byte range of the keyvalue pair corresponding to the given
/// `path` in a JSON value `source`.
///
/// If the path points to an array element, only the range of the **value**
/// of the element is returned.
fn json_path_range(source: &JsonValue, path: &String) -> RangeSet<usize> {
let val = match source.get(path) {
Some(val) => val,
None => return RangeSet::default(),
};
let dot = ".";
let last = path.split(dot).last().unwrap();
// Whether `path` is a top-level key.
let is_top_level = last == path;
if last.parse::<usize>().is_ok() {
// The path points to an array element, so we only need the range of
// the **value**.
val.span().indices().clone()
} else {
let parent_val = if is_top_level {
source
} else {
source
.get(&path[..path.len() - last.len() - dot.len()])
.expect("path is valid")
};
let JsonValue::Object(parent_obj) = parent_val else {
unreachable!("parent value is always an object");
};
// We need the range of the **key-value** pair.
let kv = parent_obj
.elems
.iter()
.find(|kv| kv.value == *val)
.expect("element exists");
kv.without_separator()
}
}
#[cfg(test)]
mod tests {
use crate::HeaderParams;
use super::*;
use spansy::http::parse_response;
use tlsn_data_fixtures::http::{request, response};
use tlsn_formats::spansy::http::parse_request;
#[test]
fn test_req_part_range() {
let data = request::POST_JSON;
let req = parse_request(data).unwrap();
let s = std::str::from_utf8(data).unwrap();
//===============All
let part = HttpHandle {
part: MessagePart::All,
typ: MessageType::Request,
};
let range = req_part_range(&req, &part);
assert_eq!(range, 0..data.len());
//===============StartLine
let part = HttpHandle {
part: MessagePart::StartLine,
typ: MessageType::Request,
};
let range = req_part_range(&req, &part);
let end = s.find("\r\n").unwrap() + 2;
assert_eq!(range, 0..end);
//===============Header
let part = HttpHandle {
part: MessagePart::Header(HeaderParams {
key: "Content-Length".to_string(),
}),
typ: MessageType::Request,
};
let range = req_part_range(&req, &part);
let target: &'static str = "Content-Length: 44";
let start = s.find(target).unwrap();
let end = start + target.len() + 2;
assert_eq!(range, start..end);
//===============Body
let part = HttpHandle {
part: MessagePart::Body(BodyParams::JsonPath("bazz".to_string())),
typ: MessageType::Request,
};
let range = req_part_range(&req, &part);
let target: &'static str = "\"bazz\": 123";
let start = s.find(target).unwrap();
let end = start + target.len();
assert_eq!(range, start..end);
}
#[test]
fn test_resp_part_range() {
let data = response::OK_JSON;
let resp = parse_response(data).unwrap();
let s = std::str::from_utf8(data).unwrap();
//===============All
let part = HttpHandle {
part: MessagePart::All,
typ: MessageType::Response,
};
let range = resp_part_range(&resp, &part);
assert_eq!(range, 0..data.len());
//===============StartLine
let part = HttpHandle {
part: MessagePart::StartLine,
typ: MessageType::Response,
};
let range = resp_part_range(&resp, &part);
let end = s.find("\r\n").unwrap() + 2;
assert_eq!(range, 0..end);
//===============Header
let part = HttpHandle {
part: MessagePart::Header(HeaderParams {
key: "Content-Length".to_string(),
}),
typ: MessageType::Response,
};
let range = resp_part_range(&resp, &part);
let target: &'static str = "Content-Length: 44";
let start = s.find(target).unwrap();
let end = start + target.len() + 2;
assert_eq!(range, start..end);
//===============Body
let part = HttpHandle {
part: MessagePart::Body(BodyParams::JsonPath("bazz".to_string())),
typ: MessageType::Request,
};
let range = resp_part_range(&resp, &part);
let target: &'static str = "\"bazz\": 123";
let start = s.find(target).unwrap();
let end = start + target.len();
assert_eq!(range, start..end);
}
}

View File

@@ -0,0 +1,20 @@
//! Core types of the verifier plugin.
use tlsn_core::VerifierOutput;
mod config;
pub use config::{Config, ConfigError};
/// Output of the verifier plugin.
#[allow(dead_code)]
pub struct Output {
output: VerifierOutput,
}
/// Params for protocol verifier.
pub struct VerifierParams {
pub max_sent_data: usize,
pub max_recv_data: usize,
pub prover_endpoint: String,
}

View File

@@ -0,0 +1,56 @@
use crate::{
DisclosureRule,
verifier::{Output, VerifierParams},
};
use tlsn::{
config::{ProtocolConfig, RootCertStore},
verifier::VerifierConfig,
};
use tlsn_core::VerifierOutput;
/// Verifier plugin config.
#[allow(dead_code)]
pub struct Config {
pub verifier_params: VerifierParams,
/// Data which the prover is expected to disclose.
pub disclose: Vec<DisclosureRule>,
pub root_store: RootCertStore,
pub prover_endpoint: String,
}
impl Config {
/// Returns the prover endpoint.
pub fn prover_endpoint(&self) -> &String {
&self.verifier_params.prover_endpoint
}
/// Builds and returns [VerifierConfig].
pub fn verifier_config(&self) -> VerifierConfig {
VerifierConfig::builder()
.root_store(self.root_store.clone())
.build()
.unwrap()
}
/// Validates the given protocol `config`.
pub fn validate_protocol_config(&self, config: &ProtocolConfig) -> Result<(), ConfigError> {
if config.max_recv_data() > self.verifier_params.max_recv_data
|| config.max_sent_data() > self.verifier_params.max_sent_data
{
Err(ConfigError(
"failed to validate protocol config".to_string(),
))
} else {
Ok(())
}
}
/// Returns verifier plugin output.
pub fn output(&self, output: VerifierOutput) -> Output {
Output { output }
}
}
#[derive(Debug, thiserror::Error)]
#[error("config error: {0}")]
pub struct ConfigError(String);

View File

@@ -12,7 +12,6 @@ workspace = true
[features]
default = ["rayon"]
mozilla-certs = ["tlsn-core/mozilla-certs"]
rayon = ["mpz-zk/rayon", "mpz-garble/rayon"]
web = ["dep:web-spawn"]
@@ -30,9 +29,9 @@ serio = { workspace = true, features = ["compat"] }
uid-mux = { workspace = true, features = ["serio"] }
web-spawn = { workspace = true, optional = true }
mpz-circuits = { workspace = true, features = ["aes"] }
mpz-common = { workspace = true }
mpz-core = { workspace = true }
mpz-circuits = { workspace = true }
mpz-garble = { workspace = true }
mpz-garble-core = { workspace = true }
mpz-hash = { workspace = true }
@@ -41,10 +40,10 @@ mpz-ole = { workspace = true }
mpz-ot = { workspace = true }
mpz-vm-core = { workspace = true }
mpz-zk = { workspace = true }
mpz-ideal-vm = { workspace = true }
aes = { workspace = true }
ctr = { workspace = true }
derive_builder = { workspace = true }
futures = { workspace = true }
opaque-debug = { workspace = true }
rand = { workspace = true }

View File

@@ -1,3 +0,0 @@
fn main() {
println!("cargo:rustc-check-cfg=cfg(tlsn_insecure)");
}

119
crates/tlsn/src/config.rs Normal file
View File

@@ -0,0 +1,119 @@
//! TLSNotary protocol config and config utilities.
use once_cell::sync::Lazy;
use semver::Version;
use serde::{Deserialize, Serialize};
pub use tlsn_core::webpki::{CertificateDer, PrivateKeyDer, RootCertStore};
// Default is 32 bytes to decrypt the TLS protocol messages.
const DEFAULT_MAX_RECV_ONLINE: usize = 32;
// Current version that is running.
pub(crate) static VERSION: Lazy<Version> = Lazy::new(|| {
Version::parse(env!("CARGO_PKG_VERSION")).expect("cargo pkg version should be a valid semver")
});
/// Protocol configuration to be set up initially by prover and verifier.
#[derive(derive_builder::Builder, Clone, Debug, Deserialize, Serialize)]
#[builder(build_fn(validate = "Self::validate"))]
pub struct ProtocolConfig {
/// Maximum number of bytes that can be sent.
max_sent_data: usize,
/// Maximum number of application data records that can be sent.
#[builder(setter(strip_option), default)]
max_sent_records: Option<usize>,
/// Maximum number of bytes that can be decrypted online, i.e. while the
/// MPC-TLS connection is active.
#[builder(default = "DEFAULT_MAX_RECV_ONLINE")]
max_recv_data_online: usize,
/// Maximum number of bytes that can be received.
max_recv_data: usize,
/// Maximum number of received application data records that can be
/// decrypted online, i.e. while the MPC-TLS connection is active.
#[builder(setter(strip_option), default)]
max_recv_records_online: Option<usize>,
/// Whether the `deferred decryption` feature is toggled on from the start
/// of the MPC-TLS connection.
#[builder(default = "true")]
defer_decryption_from_start: bool,
/// Network settings.
#[builder(default)]
network: NetworkSetting,
}
impl ProtocolConfigBuilder {
fn validate(&self) -> Result<(), String> {
if self.max_recv_data_online > self.max_recv_data {
return Err(
"max_recv_data_online must be smaller or equal to max_recv_data".to_string(),
);
}
Ok(())
}
}
impl ProtocolConfig {
/// Creates a new builder for `ProtocolConfig`.
pub fn builder() -> ProtocolConfigBuilder {
ProtocolConfigBuilder::default()
}
/// Returns the maximum number of bytes that can be sent.
pub fn max_sent_data(&self) -> usize {
self.max_sent_data
}
/// Returns the maximum number of application data records that can
/// be sent.
pub fn max_sent_records(&self) -> Option<usize> {
self.max_sent_records
}
/// Returns the maximum number of bytes that can be decrypted online.
pub fn max_recv_data_online(&self) -> usize {
self.max_recv_data_online
}
/// Returns the maximum number of bytes that can be received.
pub fn max_recv_data(&self) -> usize {
self.max_recv_data
}
/// Returns the maximum number of received application data records that
/// can be decrypted online.
pub fn max_recv_records_online(&self) -> Option<usize> {
self.max_recv_records_online
}
/// Returns whether the `deferred decryption` feature is toggled on from the
/// start of the MPC-TLS connection.
pub fn defer_decryption_from_start(&self) -> bool {
self.defer_decryption_from_start
}
/// Returns the network settings.
pub fn network(&self) -> NetworkSetting {
self.network
}
}
/// Settings for the network environment.
///
/// Provides optimization options to adapt the protocol to different network
/// situations.
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
pub enum NetworkSetting {
/// Reduces network round-trips at the expense of consuming more network
/// bandwidth.
Bandwidth,
/// Reduces network bandwidth utilization at the expense of more network
/// round-trips.
Latency,
}
impl Default for NetworkSetting {
fn default() -> Self {
Self::Latency
}
}

View File

@@ -4,10 +4,10 @@
#![deny(clippy::all)]
#![forbid(unsafe_code)]
pub mod config;
pub(crate) mod context;
pub(crate) mod ghash;
pub(crate) mod map;
pub(crate) mod mpz;
pub(crate) mod msg;
pub(crate) mod mux;
pub mod prover;
@@ -16,16 +16,7 @@ pub(crate) mod transcript_internal;
pub mod verifier;
pub use tlsn_attestation as attestation;
pub use tlsn_core::{config, connection, hash, transcript, webpki};
use std::sync::LazyLock;
use semver::Version;
// Package version.
pub(crate) static VERSION: LazyLock<Version> = LazyLock::new(|| {
Version::parse(env!("CARGO_PKG_VERSION")).expect("cargo pkg version should be a valid semver")
});
pub use tlsn_core::{connection, hash, transcript};
/// The party's role in the TLSN protocol.
///

View File

@@ -1,233 +0,0 @@
use std::sync::Arc;
use mpc_tls::{MpcTlsFollower, MpcTlsLeader, SessionKeys};
use mpz_common::Context;
use mpz_core::Block;
#[cfg(not(tlsn_insecure))]
use mpz_garble::protocol::semihonest::{Evaluator, Garbler};
use mpz_garble_core::Delta;
use mpz_memory_core::{
Vector,
binary::U8,
correlated::{Key, Mac},
};
#[cfg(not(tlsn_insecure))]
use mpz_ot::cot::{DerandCOTReceiver, DerandCOTSender};
use mpz_ot::{
chou_orlandi as co, ferret, kos,
rcot::shared::{SharedRCOTReceiver, SharedRCOTSender},
};
use mpz_zk::{Prover, Verifier};
#[cfg(not(tlsn_insecure))]
use rand::Rng;
use tlsn_core::config::tls_commit::mpc::{MpcTlsConfig, NetworkSetting};
use tlsn_deap::Deap;
use tokio::sync::Mutex;
use crate::transcript_internal::commit::encoding::{KeyStore, MacStore};
#[cfg(not(tlsn_insecure))]
pub(crate) type ProverMpc =
Garbler<DerandCOTSender<SharedRCOTSender<kos::Sender<co::Receiver>, Block>>>;
#[cfg(tlsn_insecure)]
pub(crate) type ProverMpc = mpz_ideal_vm::IdealVm;
#[cfg(not(tlsn_insecure))]
pub(crate) type ProverZk =
Prover<SharedRCOTReceiver<ferret::Receiver<kos::Receiver<co::Sender>>, bool, Block>>;
#[cfg(tlsn_insecure)]
pub(crate) type ProverZk = mpz_ideal_vm::IdealVm;
#[cfg(not(tlsn_insecure))]
pub(crate) type VerifierMpc =
Evaluator<DerandCOTReceiver<SharedRCOTReceiver<kos::Receiver<co::Sender>, bool, Block>>>;
#[cfg(tlsn_insecure)]
pub(crate) type VerifierMpc = mpz_ideal_vm::IdealVm;
#[cfg(not(tlsn_insecure))]
pub(crate) type VerifierZk =
Verifier<SharedRCOTSender<ferret::Sender<kos::Sender<co::Receiver>>, Block>>;
#[cfg(tlsn_insecure)]
pub(crate) type VerifierZk = mpz_ideal_vm::IdealVm;
pub(crate) struct ProverDeps {
pub(crate) vm: Arc<Mutex<Deap<ProverMpc, ProverZk>>>,
pub(crate) mpc_tls: MpcTlsLeader,
}
pub(crate) fn build_prover_deps(config: MpcTlsConfig, ctx: Context) -> ProverDeps {
let mut rng = rand::rng();
let delta = Delta::new(Block::random(&mut rng));
let base_ot_send = co::Sender::default();
let base_ot_recv = co::Receiver::default();
let rcot_send = kos::Sender::new(
kos::SenderConfig::default(),
delta.into_inner(),
base_ot_recv,
);
let rcot_recv = kos::Receiver::new(kos::ReceiverConfig::default(), base_ot_send);
let rcot_recv = ferret::Receiver::new(
ferret::FerretConfig::builder()
.lpn_type(ferret::LpnType::Regular)
.build()
.expect("ferret config is valid"),
Block::random(&mut rng),
rcot_recv,
);
let rcot_send = SharedRCOTSender::new(rcot_send);
let rcot_recv = SharedRCOTReceiver::new(rcot_recv);
#[cfg(not(tlsn_insecure))]
let mpc = ProverMpc::new(DerandCOTSender::new(rcot_send.clone()), rng.random(), delta);
#[cfg(tlsn_insecure)]
let mpc = mpz_ideal_vm::IdealVm::new();
#[cfg(not(tlsn_insecure))]
let zk = ProverZk::new(Default::default(), rcot_recv.clone());
#[cfg(tlsn_insecure)]
let zk = mpz_ideal_vm::IdealVm::new();
let vm = Arc::new(Mutex::new(Deap::new(tlsn_deap::Role::Leader, mpc, zk)));
let mpc_tls = MpcTlsLeader::new(
build_mpc_tls_config(config),
ctx,
vm.clone(),
(rcot_send.clone(), rcot_send.clone(), rcot_send),
rcot_recv,
);
ProverDeps { vm, mpc_tls }
}
pub(crate) struct VerifierDeps {
pub(crate) vm: Arc<Mutex<Deap<VerifierMpc, VerifierZk>>>,
pub(crate) mpc_tls: MpcTlsFollower,
}
pub(crate) fn build_verifier_deps(config: MpcTlsConfig, ctx: Context) -> VerifierDeps {
let mut rng = rand::rng();
let delta = Delta::random(&mut rng);
let base_ot_send = co::Sender::default();
let base_ot_recv = co::Receiver::default();
let rcot_send = kos::Sender::new(
kos::SenderConfig::default(),
delta.into_inner(),
base_ot_recv,
);
let rcot_send = ferret::Sender::new(
ferret::FerretConfig::builder()
.lpn_type(ferret::LpnType::Regular)
.build()
.expect("ferret config is valid"),
Block::random(&mut rng),
rcot_send,
);
let rcot_recv = kos::Receiver::new(kos::ReceiverConfig::default(), base_ot_send);
let rcot_send = SharedRCOTSender::new(rcot_send);
let rcot_recv = SharedRCOTReceiver::new(rcot_recv);
#[cfg(not(tlsn_insecure))]
let mpc = VerifierMpc::new(DerandCOTReceiver::new(rcot_recv.clone()));
#[cfg(tlsn_insecure)]
let mpc = mpz_ideal_vm::IdealVm::new();
#[cfg(not(tlsn_insecure))]
let zk = VerifierZk::new(Default::default(), delta, rcot_send.clone());
#[cfg(tlsn_insecure)]
let zk = mpz_ideal_vm::IdealVm::new();
let vm = Arc::new(Mutex::new(Deap::new(tlsn_deap::Role::Follower, mpc, zk)));
let mpc_tls = MpcTlsFollower::new(
build_mpc_tls_config(config),
ctx,
vm.clone(),
rcot_send,
(rcot_recv.clone(), rcot_recv.clone(), rcot_recv),
);
VerifierDeps { vm, mpc_tls }
}
fn build_mpc_tls_config(config: MpcTlsConfig) -> mpc_tls::Config {
let mut builder = mpc_tls::Config::builder();
builder
.defer_decryption(config.defer_decryption_from_start())
.max_sent(config.max_sent_data())
.max_recv_online(config.max_recv_data_online())
.max_recv(config.max_recv_data());
if let Some(max_sent_records) = config.max_sent_records() {
builder.max_sent_records(max_sent_records);
}
if let Some(max_recv_records_online) = config.max_recv_records_online() {
builder.max_recv_records_online(max_recv_records_online);
}
if let NetworkSetting::Latency = config.network() {
builder.low_bandwidth();
}
builder.build().unwrap()
}
pub(crate) fn translate_keys<Mpc, Zk>(keys: &mut SessionKeys, vm: &Deap<Mpc, Zk>) {
keys.client_write_key = vm
.translate(keys.client_write_key)
.expect("VM memory should be consistent");
keys.client_write_iv = vm
.translate(keys.client_write_iv)
.expect("VM memory should be consistent");
keys.server_write_key = vm
.translate(keys.server_write_key)
.expect("VM memory should be consistent");
keys.server_write_iv = vm
.translate(keys.server_write_iv)
.expect("VM memory should be consistent");
keys.server_write_mac_key = vm
.translate(keys.server_write_mac_key)
.expect("VM memory should be consistent");
}
impl<T> KeyStore for Verifier<T> {
fn delta(&self) -> &Delta {
self.delta()
}
fn get_keys(&self, data: Vector<U8>) -> Option<&[Key]> {
self.get_keys(data).ok()
}
}
impl<T> MacStore for Prover<T> {
fn get_macs(&self, data: Vector<U8>) -> Option<&[Mac]> {
self.get_macs(data).ok()
}
}
#[cfg(tlsn_insecure)]
mod insecure {
use super::*;
use mpz_ideal_vm::IdealVm;
impl KeyStore for IdealVm {
fn delta(&self) -> &Delta {
unimplemented!("encodings not supported in insecure mode")
}
fn get_keys(&self, _data: Vector<U8>) -> Option<&[Key]> {
unimplemented!("encodings not supported in insecure mode")
}
}
impl MacStore for IdealVm {
fn get_macs(&self, _data: Vector<U8>) -> Option<&[Mac]> {
unimplemented!("encodings not supported in insecure mode")
}
}
}

View File

@@ -1,25 +1,14 @@
use semver::Version;
use serde::{Deserialize, Serialize};
use tlsn_core::{
config::{prove::ProveRequest, tls_commit::TlsCommitRequest},
connection::{HandshakeData, ServerName},
transcript::PartialTranscript,
};
use crate::config::ProtocolConfig;
#[derive(Debug, Serialize, Deserialize)]
pub(crate) struct TlsCommitRequestMsg {
pub(crate) request: TlsCommitRequest,
pub(crate) struct SetupRequest {
pub(crate) config: ProtocolConfig,
pub(crate) version: Version,
}
#[derive(Debug, Serialize, Deserialize)]
pub(crate) struct ProveRequestMsg {
pub(crate) request: ProveRequest,
pub(crate) handshake: Option<(ServerName, HandshakeData)>,
pub(crate) transcript: Option<PartialTranscript>,
}
#[derive(Debug, Serialize, Deserialize)]
pub(crate) struct Response {
pub(crate) result: Result<(), RejectionReason>,

View File

@@ -1,45 +1,63 @@
//! Prover.
mod config;
mod error;
mod future;
mod prove;
pub mod state;
pub use config::{ProverConfig, ProverConfigBuilder, TlsConfig, TlsConfigBuilder};
pub use error::ProverError;
pub use future::ProverFuture;
pub use tlsn_core::ProverOutput;
use rustls_pki_types::CertificateDer;
pub use tlsn_core::{
ProveConfig, ProveConfigBuilder, ProveConfigBuilderError, ProveRequest, ProverOutput,
};
use mpz_common::Context;
use mpz_core::Block;
use mpz_garble_core::Delta;
use mpz_vm_core::prelude::*;
use mpz_zk::ProverConfig as ZkProverConfig;
use webpki::anchor_from_trusted_cert;
use crate::{
Role,
context::build_mt_context,
mpz::{ProverDeps, build_prover_deps, translate_keys},
msg::{ProveRequestMsg, Response, TlsCommitRequestMsg},
msg::{Response, SetupRequest},
mux::attach_mux,
tag::verify_tags,
};
use futures::{AsyncRead, AsyncWrite, TryFutureExt};
use mpc_tls::LeaderCtrl;
use mpz_vm_core::prelude::*;
use rustls_pki_types::CertificateDer;
use mpc_tls::{LeaderCtrl, MpcTlsLeader, SessionKeys};
use rand::Rng;
use serio::{SinkExt, stream::IoStreamExt};
use std::sync::Arc;
use tls_client::{ClientConnection, ServerName as TlsServerName};
use tls_client_async::{TlsConnection, bind_client};
use tlsn_core::{
config::{
prove::ProveConfig,
prover::ProverConfig,
tls::TlsClientConfig,
tls_commit::{TlsCommitConfig, TlsCommitProtocolConfig},
},
connection::{HandshakeData, ServerName},
transcript::{TlsTranscript, Transcript},
};
use webpki::anchor_from_trusted_cert;
use tlsn_deap::Deap;
use tokio::sync::Mutex;
use tracing::{Instrument, Span, debug, info, info_span, instrument};
pub(crate) type RCOTSender = mpz_ot::rcot::shared::SharedRCOTSender<
mpz_ot::kos::Sender<mpz_ot::chou_orlandi::Receiver>,
mpz_core::Block,
>;
pub(crate) type RCOTReceiver = mpz_ot::rcot::shared::SharedRCOTReceiver<
mpz_ot::ferret::Receiver<mpz_ot::kos::Receiver<mpz_ot::chou_orlandi::Sender>>,
bool,
mpz_core::Block,
>;
pub(crate) type Mpc =
mpz_garble::protocol::semihonest::Garbler<mpz_ot::cot::DerandCOTSender<RCOTSender>>;
pub(crate) type Zk = mpz_zk::Prover<RCOTReceiver>;
/// A prover instance.
#[derive(Debug)]
pub struct Prover<T: state::ProverState = state::Initialized> {
@@ -63,21 +81,19 @@ impl Prover<state::Initialized> {
}
}
/// Starts the TLS commitment protocol.
/// Sets up the prover.
///
/// This initiates the TLS commitment protocol, including performing any
/// necessary preprocessing operations.
/// This performs all MPC setup prior to establishing the connection to the
/// application server.
///
/// # Arguments
///
/// * `config` - The TLS commitment configuration.
/// * `socket` - The socket to the TLS verifier.
#[instrument(parent = &self.span, level = "debug", skip_all, err)]
pub async fn commit<S: AsyncWrite + AsyncRead + Send + Unpin + 'static>(
pub async fn setup<S: AsyncWrite + AsyncRead + Send + Unpin + 'static>(
self,
config: TlsCommitConfig,
socket: S,
) -> Result<Prover<state::CommitAccepted>, ProverError> {
) -> Result<Prover<state::Setup>, ProverError> {
let (mut mux_fut, mux_ctrl) = attach_mux(socket, Role::Prover);
let mut mt = build_mt_context(mux_ctrl.clone());
let mut ctx = mux_fut.poll_with(mt.new_context()).await?;
@@ -86,9 +102,9 @@ impl Prover<state::Initialized> {
mux_fut
.poll_with(async {
ctx.io_mut()
.send(TlsCommitRequestMsg {
request: config.to_request(),
version: crate::VERSION.clone(),
.send(SetupRequest {
config: self.config.protocol_config().clone(),
version: crate::config::VERSION.clone(),
})
.await?;
@@ -100,16 +116,12 @@ impl Prover<state::Initialized> {
})
.await?;
let TlsCommitProtocolConfig::Mpc(mpc_tls_config) = config.protocol().clone() else {
unreachable!("only MPC TLS is supported");
};
let ProverDeps { vm, mut mpc_tls } = build_prover_deps(mpc_tls_config, ctx);
let (vm, mut mpc_tls) = build_mpc_tls(&self.config, ctx);
// Allocate resources for MPC-TLS in the VM.
let mut keys = mpc_tls.alloc()?;
let vm_lock = vm.try_lock().expect("VM is not locked");
translate_keys(&mut keys, &vm_lock);
translate_keys(&mut keys, &vm_lock)?;
drop(vm_lock);
debug!("setting up mpc-tls");
@@ -121,7 +133,7 @@ impl Prover<state::Initialized> {
Ok(Prover {
config: self.config,
span: self.span,
state: state::CommitAccepted {
state: state::Setup {
mux_ctrl,
mux_fut,
mpc_tls,
@@ -132,24 +144,21 @@ impl Prover<state::Initialized> {
}
}
impl Prover<state::CommitAccepted> {
impl Prover<state::Setup> {
/// Connects to the server using the provided socket.
///
/// Returns a handle to the TLS connection, a future which returns the
/// prover once the connection is closed and the TLS transcript is
/// committed.
/// prover once the connection is closed.
///
/// # Arguments
///
/// * `config` - The TLS client configuration.
/// * `socket` - The socket to the server.
#[instrument(parent = &self.span, level = "debug", skip_all, err)]
pub async fn connect<S: AsyncWrite + AsyncRead + Send + Unpin + 'static>(
self,
config: TlsClientConfig,
socket: S,
) -> Result<(TlsConnection, ProverFuture), ProverError> {
let state::CommitAccepted {
let state::Setup {
mux_ctrl,
mut mux_fut,
mpc_tls,
@@ -160,13 +169,12 @@ impl Prover<state::CommitAccepted> {
let (mpc_ctrl, mpc_fut) = mpc_tls.run();
let ServerName::Dns(server_name) = config.server_name();
let ServerName::Dns(server_name) = self.config.server_name();
let server_name =
TlsServerName::try_from(server_name.as_ref()).expect("name was validated");
let root_store = tls_client::RootCertStore {
roots: config
.root_store()
let root_store = if let Some(root_store) = self.config.tls_config().root_store() {
let roots = root_store
.roots
.iter()
.map(|cert| {
@@ -175,15 +183,20 @@ impl Prover<state::CommitAccepted> {
.map(|anchor| anchor.to_owned())
.map_err(ProverError::config)
})
.collect::<Result<Vec<_>, _>>()?,
.collect::<Result<Vec<_>, _>>()?;
tls_client::RootCertStore { roots }
} else {
tls_client::RootCertStore {
roots: webpki_roots::TLS_SERVER_ROOTS.to_vec(),
}
};
let rustls_config = tls_client::ClientConfig::builder()
let config = tls_client::ClientConfig::builder()
.with_safe_defaults()
.with_root_certificates(root_store);
let rustls_config = if let Some((cert, key)) = config.client_auth() {
rustls_config
let config = if let Some((cert, key)) = self.config.tls_config().client_auth() {
config
.with_single_cert(
cert.iter()
.map(|cert| tls_client::Certificate(cert.0.clone()))
@@ -192,15 +205,12 @@ impl Prover<state::CommitAccepted> {
)
.map_err(ProverError::config)?
} else {
rustls_config.with_no_client_auth()
config.with_no_client_auth()
};
let client = ClientConnection::new(
Arc::new(rustls_config),
Box::new(mpc_ctrl.clone()),
server_name,
)
.map_err(ProverError::config)?;
let client =
ClientConnection::new(Arc::new(config), Box::new(mpc_ctrl.clone()), server_name)
.map_err(ProverError::config)?;
let (conn, conn_fut) = bind_client(socket, client);
@@ -274,7 +284,6 @@ impl Prover<state::CommitAccepted> {
mux_fut,
ctx,
vm,
server_name: config.server_name().clone(),
keys,
tls_transcript,
transcript,
@@ -317,42 +326,40 @@ impl Prover<state::Committed> {
ctx,
vm,
keys,
server_name,
tls_transcript,
transcript,
..
} = &mut self.state;
let handshake = config.server_identity().then(|| {
(
server_name.clone(),
HandshakeData {
certs: tls_transcript
.server_cert_chain()
.expect("server cert chain is present")
.to_vec(),
sig: tls_transcript
.server_signature()
.expect("server signature is present")
.clone(),
binding: tls_transcript.certificate_binding().clone(),
},
)
});
let partial_transcript = config
.reveal()
.map(|(sent, recv)| transcript.to_partial(sent.clone(), recv.clone()));
let msg = ProveRequestMsg {
request: config.to_request(),
handshake,
transcript: partial_transcript,
let request = ProveRequest {
handshake: config.server_identity().then(|| {
(
self.config.server_name().clone(),
HandshakeData {
certs: tls_transcript
.server_cert_chain()
.expect("server cert chain is present")
.to_vec(),
sig: tls_transcript
.server_signature()
.expect("server signature is present")
.clone(),
binding: tls_transcript.certificate_binding().clone(),
},
)
}),
transcript: config
.reveal()
.map(|(sent, recv)| transcript.to_partial(sent.clone(), recv.clone())),
transcript_commit: config.transcript_commit().map(|config| config.to_request()),
};
let output = mux_fut
.poll_with(async {
ctx.io_mut().send(msg).await.map_err(ProverError::from)?;
ctx.io_mut()
.send(request)
.await
.map_err(ProverError::from)?;
ctx.io_mut().expect_next::<Response>().await?.result?;
@@ -380,6 +387,53 @@ impl Prover<state::Committed> {
}
}
fn build_mpc_tls(config: &ProverConfig, ctx: Context) -> (Arc<Mutex<Deap<Mpc, Zk>>>, MpcTlsLeader) {
let mut rng = rand::rng();
let delta = Delta::new(Block::random(&mut rng));
let base_ot_send = mpz_ot::chou_orlandi::Sender::default();
let base_ot_recv = mpz_ot::chou_orlandi::Receiver::default();
let rcot_send = mpz_ot::kos::Sender::new(
mpz_ot::kos::SenderConfig::default(),
delta.into_inner(),
base_ot_recv,
);
let rcot_recv =
mpz_ot::kos::Receiver::new(mpz_ot::kos::ReceiverConfig::default(), base_ot_send);
let rcot_recv = mpz_ot::ferret::Receiver::new(
mpz_ot::ferret::FerretConfig::builder()
.lpn_type(mpz_ot::ferret::LpnType::Regular)
.build()
.expect("ferret config is valid"),
Block::random(&mut rng),
rcot_recv,
);
let rcot_send = mpz_ot::rcot::shared::SharedRCOTSender::new(rcot_send);
let rcot_recv = mpz_ot::rcot::shared::SharedRCOTReceiver::new(rcot_recv);
let mpc = Mpc::new(
mpz_ot::cot::DerandCOTSender::new(rcot_send.clone()),
rng.random(),
delta,
);
let zk = Zk::new(ZkProverConfig::default(), rcot_recv.clone());
let vm = Arc::new(Mutex::new(Deap::new(tlsn_deap::Role::Leader, mpc, zk)));
(
vm.clone(),
MpcTlsLeader::new(
config.build_mpc_tls_config(),
ctx,
vm,
(rcot_send.clone(), rcot_send.clone(), rcot_send),
rcot_recv,
),
)
}
/// A controller for the prover.
#[derive(Clone)]
pub struct ProverControl {
@@ -405,3 +459,24 @@ impl ProverControl {
.map_err(ProverError::from)
}
}
/// Translates VM references to the ZK address space.
fn translate_keys<Mpc, Zk>(keys: &mut SessionKeys, vm: &Deap<Mpc, Zk>) -> Result<(), ProverError> {
keys.client_write_key = vm
.translate(keys.client_write_key)
.map_err(ProverError::mpc)?;
keys.client_write_iv = vm
.translate(keys.client_write_iv)
.map_err(ProverError::mpc)?;
keys.server_write_key = vm
.translate(keys.server_write_key)
.map_err(ProverError::mpc)?;
keys.server_write_iv = vm
.translate(keys.server_write_iv)
.map_err(ProverError::mpc)?;
keys.server_write_mac_key = vm
.translate(keys.server_write_mac_key)
.map_err(ProverError::mpc)?;
Ok(())
}

View File

@@ -0,0 +1,144 @@
use mpc_tls::Config;
use serde::{Deserialize, Serialize};
use tlsn_core::{
connection::ServerName,
webpki::{CertificateDer, PrivateKeyDer, RootCertStore},
};
use crate::config::{NetworkSetting, ProtocolConfig};
/// Configuration for the prover.
#[derive(Debug, Clone, derive_builder::Builder, Serialize, Deserialize)]
pub struct ProverConfig {
/// The server DNS name.
#[builder(setter(into))]
server_name: ServerName,
/// Protocol configuration to be checked with the verifier.
protocol_config: ProtocolConfig,
/// TLS configuration.
#[builder(default)]
tls_config: TlsConfig,
}
impl ProverConfig {
/// Creates a new builder for `ProverConfig`.
pub fn builder() -> ProverConfigBuilder {
ProverConfigBuilder::default()
}
/// Returns the server DNS name.
pub fn server_name(&self) -> &ServerName {
&self.server_name
}
/// Returns the protocol configuration.
pub fn protocol_config(&self) -> &ProtocolConfig {
&self.protocol_config
}
/// Returns the TLS configuration.
pub fn tls_config(&self) -> &TlsConfig {
&self.tls_config
}
pub(crate) fn build_mpc_tls_config(&self) -> Config {
let mut builder = Config::builder();
builder
.defer_decryption(self.protocol_config.defer_decryption_from_start())
.max_sent(self.protocol_config.max_sent_data())
.max_recv_online(self.protocol_config.max_recv_data_online())
.max_recv(self.protocol_config.max_recv_data());
if let Some(max_sent_records) = self.protocol_config.max_sent_records() {
builder.max_sent_records(max_sent_records);
}
if let Some(max_recv_records_online) = self.protocol_config.max_recv_records_online() {
builder.max_recv_records_online(max_recv_records_online);
}
if let NetworkSetting::Latency = self.protocol_config.network() {
builder.low_bandwidth();
}
builder.build().unwrap()
}
}
/// Configuration for the prover's TLS connection.
#[derive(Default, Debug, Clone, Serialize, Deserialize)]
pub struct TlsConfig {
/// Root certificates.
root_store: Option<RootCertStore>,
/// Certificate chain and a matching private key for client
/// authentication.
client_auth: Option<(Vec<CertificateDer>, PrivateKeyDer)>,
}
impl TlsConfig {
/// Creates a new builder for `TlsConfig`.
pub fn builder() -> TlsConfigBuilder {
TlsConfigBuilder::default()
}
pub(crate) fn root_store(&self) -> Option<&RootCertStore> {
self.root_store.as_ref()
}
/// Returns a certificate chain and a matching private key for client
/// authentication.
pub fn client_auth(&self) -> &Option<(Vec<CertificateDer>, PrivateKeyDer)> {
&self.client_auth
}
}
/// Builder for [`TlsConfig`].
#[derive(Debug, Default)]
pub struct TlsConfigBuilder {
root_store: Option<RootCertStore>,
client_auth: Option<(Vec<CertificateDer>, PrivateKeyDer)>,
}
impl TlsConfigBuilder {
/// Sets the root certificates to use for verifying the server's
/// certificate.
pub fn root_store(&mut self, store: RootCertStore) -> &mut Self {
self.root_store = Some(store);
self
}
/// Sets a DER-encoded certificate chain and a matching private key for
/// client authentication.
///
/// Often the chain will consist of a single end-entity certificate.
///
/// # Arguments
///
/// * `cert_key` - A tuple containing the certificate chain and the private
/// key.
///
/// - Each certificate in the chain must be in the X.509 format.
/// - The key must be in the ASN.1 format (either PKCS#8 or PKCS#1).
pub fn client_auth(&mut self, cert_key: (Vec<CertificateDer>, PrivateKeyDer)) -> &mut Self {
self.client_auth = Some(cert_key);
self
}
/// Builds the TLS configuration.
pub fn build(self) -> Result<TlsConfig, TlsConfigError> {
Ok(TlsConfig {
root_store: self.root_store,
client_auth: self.client_auth,
})
}
}
/// TLS configuration error.
#[derive(Debug, thiserror::Error)]
#[error(transparent)]
pub struct TlsConfigError(#[from] ErrorRepr);
#[derive(Debug, thiserror::Error)]
#[error("tls config error")]
enum ErrorRepr {}

View File

@@ -4,7 +4,7 @@ use mpc_tls::MpcTlsError;
use crate::transcript_internal::commit::encoding::EncodingError;
/// Error for [`Prover`](crate::prover::Prover).
/// Error for [`Prover`](crate::Prover).
#[derive(Debug, thiserror::Error)]
pub struct ProverError {
kind: ErrorKind,

View File

@@ -4,8 +4,7 @@ use mpz_memory_core::binary::Binary;
use mpz_vm_core::Vm;
use rangeset::{RangeSet, UnionMut};
use tlsn_core::{
ProverOutput,
config::prove::ProveConfig,
ProveConfig, ProverOutput,
transcript::{
ContentType, Direction, TlsTranscript, Transcript, TranscriptCommitment, TranscriptSecret,
},

View File

@@ -4,16 +4,13 @@ use std::sync::Arc;
use mpc_tls::{MpcTlsLeader, SessionKeys};
use mpz_common::Context;
use tlsn_core::{
connection::ServerName,
transcript::{TlsTranscript, Transcript},
};
use tlsn_core::transcript::{TlsTranscript, Transcript};
use tlsn_deap::Deap;
use tokio::sync::Mutex;
use crate::{
mpz::{ProverMpc, ProverZk},
mux::{MuxControl, MuxFuture},
prover::{Mpc, Zk},
};
/// Entry state
@@ -21,25 +18,23 @@ pub struct Initialized;
opaque_debug::implement!(Initialized);
/// State after the verifier has accepted the proposed TLS commitment protocol
/// configuration and preprocessing has completed.
pub struct CommitAccepted {
/// State after MPC setup has completed.
pub struct Setup {
pub(crate) mux_ctrl: MuxControl,
pub(crate) mux_fut: MuxFuture,
pub(crate) mpc_tls: MpcTlsLeader,
pub(crate) keys: SessionKeys,
pub(crate) vm: Arc<Mutex<Deap<ProverMpc, ProverZk>>>,
pub(crate) vm: Arc<Mutex<Deap<Mpc, Zk>>>,
}
opaque_debug::implement!(CommitAccepted);
opaque_debug::implement!(Setup);
/// State after the TLS transcript has been committed.
/// State after the TLS connection has been committed and closed.
pub struct Committed {
pub(crate) mux_ctrl: MuxControl,
pub(crate) mux_fut: MuxFuture,
pub(crate) ctx: Context,
pub(crate) vm: ProverZk,
pub(crate) server_name: ServerName,
pub(crate) vm: Zk,
pub(crate) keys: SessionKeys,
pub(crate) tls_transcript: TlsTranscript,
pub(crate) transcript: Transcript,
@@ -51,12 +46,12 @@ opaque_debug::implement!(Committed);
pub trait ProverState: sealed::Sealed {}
impl ProverState for Initialized {}
impl ProverState for CommitAccepted {}
impl ProverState for Setup {}
impl ProverState for Committed {}
mod sealed {
pub trait Sealed {}
impl Sealed for super::Initialized {}
impl Sealed for super::CommitAccepted {}
impl Sealed for super::Setup {}
impl Sealed for super::Committed {}
}

View File

@@ -112,7 +112,7 @@ impl TagProof {
.map_err(TagProofError::vm)?
.ok_or_else(|| ErrorRepr::NotDecoded)?;
let aad = make_tls12_aad(rec.seq, rec.typ.into(), vers, rec.ciphertext.len());
let aad = make_tls12_aad(rec.seq, rec.typ, vers, rec.ciphertext.len());
let ghash_tag = ghash(aad.as_ref(), &rec.ciphertext, &mac_key);

View File

@@ -5,7 +5,7 @@ use ctr::{
Ctr32BE,
cipher::{KeyIvInit, StreamCipher, StreamCipherSeek},
};
use mpz_circuits::{AES128, circuits::xor};
use mpz_circuits::circuits::{AES128, xor};
use mpz_core::bitvec::BitVec;
use mpz_memory_core::{
Array, DecodeFutureTyped, MemoryExt, Vector, ViewExt,

View File

@@ -177,10 +177,26 @@ pub(crate) trait KeyStore {
fn get_keys(&self, data: Vector<U8>) -> Option<&[Key]>;
}
impl KeyStore for crate::verifier::Zk {
fn delta(&self) -> &Delta {
crate::verifier::Zk::delta(self)
}
fn get_keys(&self, data: Vector<U8>) -> Option<&[Key]> {
self.get_keys(data).ok()
}
}
pub(crate) trait MacStore {
fn get_macs(&self, data: Vector<U8>) -> Option<&[Mac]>;
}
impl MacStore for crate::prover::Zk {
fn get_macs(&self, data: Vector<U8>) -> Option<&[Mac]> {
self.get_macs(data).ok()
}
}
#[derive(Debug)]
struct Provider {
sent: RangeMap<EncodingSlice>,

View File

@@ -3,7 +3,7 @@
use std::collections::HashMap;
use mpz_core::bitvec::BitVec;
use mpz_hash::{blake3::Blake3, keccak256::Keccak256, sha256::Sha256};
use mpz_hash::{blake3::Blake3, sha256::Sha256};
use mpz_memory_core::{
DecodeFutureTyped, MemoryExt, Vector,
binary::{Binary, U8},
@@ -111,7 +111,6 @@ pub(crate) fn verify_hash(
enum Hasher {
Sha256(Sha256),
Blake3(Blake3),
Keccak256(Keccak256),
}
/// Commit plaintext hashes of the transcript.
@@ -186,32 +185,6 @@ fn hash_commit_inner(
.map_err(HashCommitError::hasher)?;
hasher.finalize(vm).map_err(HashCommitError::hasher)?
}
HashAlgId::KECCAK256 => {
let mut hasher = if let Some(Hasher::Keccak256(hasher)) = hashers.get(&alg).cloned()
{
hasher
} else {
let hasher = Keccak256::new_with_init(vm).map_err(HashCommitError::hasher)?;
hashers.insert(alg, Hasher::Keccak256(hasher.clone()));
hasher
};
let refs = match direction {
Direction::Sent => &refs.sent,
Direction::Received => &refs.recv,
};
for range in idx.iter_ranges() {
hasher
.update(vm, &refs.get(range).expect("plaintext refs are valid"))
.map_err(HashCommitError::hasher)?;
}
hasher
.update(vm, &blinder)
.map_err(HashCommitError::hasher)?;
hasher.finalize(vm).map_err(HashCommitError::hasher)?
}
alg => {
return Err(HashCommitError::unsupported_alg(alg));
}

View File

@@ -1,37 +1,55 @@
//! Verifier.
pub(crate) mod config;
mod error;
pub mod state;
mod verify;
use std::sync::Arc;
pub use config::{VerifierConfig, VerifierConfigBuilder, VerifierConfigBuilderError};
pub use error::VerifierError;
pub use tlsn_core::{VerifierOutput, webpki::ServerCertVerifier};
use crate::{
Role,
config::ProtocolConfig,
context::build_mt_context,
mpz::{VerifierDeps, build_verifier_deps, translate_keys},
msg::{ProveRequestMsg, Response, TlsCommitRequestMsg},
msg::{Response, SetupRequest},
mux::attach_mux,
tag::verify_tags,
};
use futures::{AsyncRead, AsyncWrite, TryFutureExt};
use mpc_tls::{MpcTlsFollower, SessionKeys};
use mpz_common::Context;
use mpz_core::Block;
use mpz_garble_core::Delta;
use mpz_vm_core::prelude::*;
use mpz_zk::VerifierConfig as ZkVerifierConfig;
use serio::{SinkExt, stream::IoStreamExt};
use tlsn_core::{
config::{
prove::ProveRequest,
tls_commit::{TlsCommitProtocolConfig, TlsCommitRequest},
verifier::VerifierConfig,
},
ProveRequest,
connection::{ConnectionInfo, ServerName},
transcript::TlsTranscript,
};
use tlsn_deap::Deap;
use tokio::sync::Mutex;
use tracing::{Span, debug, info, info_span, instrument};
pub(crate) type RCOTSender = mpz_ot::rcot::shared::SharedRCOTSender<
mpz_ot::ferret::Sender<mpz_ot::kos::Sender<mpz_ot::chou_orlandi::Receiver>>,
mpz_core::Block,
>;
pub(crate) type RCOTReceiver = mpz_ot::rcot::shared::SharedRCOTReceiver<
mpz_ot::kos::Receiver<mpz_ot::chou_orlandi::Sender>,
bool,
mpz_core::Block,
>;
pub(crate) type Mpc =
mpz_garble::protocol::semihonest::Evaluator<mpz_ot::cot::DerandCOTReceiver<RCOTReceiver>>;
pub(crate) type Zk = mpz_zk::Verifier<RCOTSender>;
/// Information about the TLS session.
#[derive(Debug)]
pub struct SessionInfo {
@@ -59,31 +77,30 @@ impl Verifier<state::Initialized> {
}
}
/// Starts the TLS commitment protocol.
/// Sets up the verifier.
///
/// This initiates the TLS commitment protocol, receiving the prover's
/// configuration and providing the opportunity to accept or reject it.
/// This performs all MPC setup.
///
/// # Arguments
///
/// * `socket` - The socket to the prover.
#[instrument(parent = &self.span, level = "info", skip_all, err)]
pub async fn commit<S: AsyncWrite + AsyncRead + Send + Unpin + 'static>(
pub async fn setup<S: AsyncWrite + AsyncRead + Send + Unpin + 'static>(
self,
socket: S,
) -> Result<Verifier<state::CommitStart>, VerifierError> {
) -> Result<Verifier<state::Config>, VerifierError> {
let (mut mux_fut, mux_ctrl) = attach_mux(socket, Role::Verifier);
let mut mt = build_mt_context(mux_ctrl.clone());
let mut ctx = mux_fut.poll_with(mt.new_context()).await?;
// Receives protocol configuration from prover to perform compatibility check.
let TlsCommitRequestMsg { request, version } =
let SetupRequest { config, version } =
mux_fut.poll_with(ctx.io_mut().expect_next()).await?;
if version != *crate::VERSION {
if version != *crate::config::VERSION {
let msg = format!(
"prover version does not match with verifier: {version} != {}",
*crate::VERSION
*crate::config::VERSION
);
mux_fut
.poll_with(ctx.io_mut().send(Response::err(Some(msg.clone()))))
@@ -101,44 +118,40 @@ impl Verifier<state::Initialized> {
Ok(Verifier {
config: self.config,
span: self.span,
state: state::CommitStart {
state: state::Config {
mux_ctrl,
mux_fut,
ctx,
request,
config,
},
})
}
}
impl Verifier<state::CommitStart> {
/// Returns the TLS commitment request.
pub fn request(&self) -> &TlsCommitRequest {
&self.state.request
impl Verifier<state::Config> {
/// Returns the proposed protocol configuration.
pub fn config(&self) -> &ProtocolConfig {
&self.state.config
}
/// Accepts the proposed protocol configuration.
#[instrument(parent = &self.span, level = "info", skip_all, err)]
pub async fn accept(self) -> Result<Verifier<state::CommitAccepted>, VerifierError> {
let state::CommitStart {
pub async fn accept(self) -> Result<Verifier<state::Setup>, VerifierError> {
let state::Config {
mux_ctrl,
mut mux_fut,
mut ctx,
request,
config,
} = self.state;
mux_fut.poll_with(ctx.io_mut().send(Response::ok())).await?;
let TlsCommitProtocolConfig::Mpc(mpc_tls_config) = request.protocol().clone() else {
unreachable!("only MPC TLS is supported");
};
let VerifierDeps { vm, mut mpc_tls } = build_verifier_deps(mpc_tls_config, ctx);
let (vm, mut mpc_tls) = build_mpc_tls(&self.config, &config, ctx);
// Allocate resources for MPC-TLS in the VM.
let mut keys = mpc_tls.alloc()?;
let vm_lock = vm.try_lock().expect("VM is not locked");
translate_keys(&mut keys, &vm_lock);
translate_keys(&mut keys, &vm_lock)?;
drop(vm_lock);
debug!("setting up mpc-tls");
@@ -150,7 +163,7 @@ impl Verifier<state::CommitStart> {
Ok(Verifier {
config: self.config,
span: self.span,
state: state::CommitAccepted {
state: state::Setup {
mux_ctrl,
mux_fut,
mpc_tls,
@@ -163,7 +176,7 @@ impl Verifier<state::CommitStart> {
/// Rejects the proposed protocol configuration.
#[instrument(parent = &self.span, level = "info", skip_all, err)]
pub async fn reject(self, msg: Option<&str>) -> Result<(), VerifierError> {
let state::CommitStart {
let state::Config {
mux_ctrl,
mut mux_fut,
mut ctx,
@@ -184,11 +197,11 @@ impl Verifier<state::CommitStart> {
}
}
impl Verifier<state::CommitAccepted> {
impl Verifier<state::Setup> {
/// Runs the verifier until the TLS connection is closed.
#[instrument(parent = &self.span, level = "info", skip_all, err)]
pub async fn run(self) -> Result<Verifier<state::Committed>, VerifierError> {
let state::CommitAccepted {
let state::Setup {
mux_ctrl,
mut mux_fut,
mpc_tls,
@@ -274,11 +287,7 @@ impl Verifier<state::Committed> {
tls_transcript,
} = self.state;
let ProveRequestMsg {
request,
handshake,
transcript,
} = mux_fut
let request = mux_fut
.poll_with(ctx.io_mut().expect_next().map_err(VerifierError::from))
.await?;
@@ -293,8 +302,6 @@ impl Verifier<state::Committed> {
keys,
tls_transcript,
request,
handshake,
transcript,
},
})
}
@@ -334,14 +341,15 @@ impl Verifier<state::Verify> {
keys,
tls_transcript,
request,
handshake,
transcript,
} = self.state;
mux_fut.poll_with(ctx.io_mut().send(Response::ok())).await?;
let cert_verifier =
ServerCertVerifier::new(self.config.root_store()).map_err(VerifierError::config)?;
let cert_verifier = if let Some(root_store) = self.config.root_store() {
ServerCertVerifier::new(root_store).map_err(VerifierError::config)?
} else {
ServerCertVerifier::mozilla()
};
let output = mux_fut
.poll_with(verify::verify(
@@ -351,8 +359,6 @@ impl Verifier<state::Verify> {
&cert_verifier,
&tls_transcript,
request,
handshake,
transcript,
))
.await?;
@@ -406,3 +412,74 @@ impl Verifier<state::Verify> {
})
}
}
fn build_mpc_tls(
config: &VerifierConfig,
protocol_config: &ProtocolConfig,
ctx: Context,
) -> (Arc<Mutex<Deap<Mpc, Zk>>>, MpcTlsFollower) {
let mut rng = rand::rng();
let delta = Delta::random(&mut rng);
let base_ot_send = mpz_ot::chou_orlandi::Sender::default();
let base_ot_recv = mpz_ot::chou_orlandi::Receiver::default();
let rcot_send = mpz_ot::kos::Sender::new(
mpz_ot::kos::SenderConfig::default(),
delta.into_inner(),
base_ot_recv,
);
let rcot_send = mpz_ot::ferret::Sender::new(
mpz_ot::ferret::FerretConfig::builder()
.lpn_type(mpz_ot::ferret::LpnType::Regular)
.build()
.expect("ferret config is valid"),
Block::random(&mut rng),
rcot_send,
);
let rcot_recv =
mpz_ot::kos::Receiver::new(mpz_ot::kos::ReceiverConfig::default(), base_ot_send);
let rcot_send = mpz_ot::rcot::shared::SharedRCOTSender::new(rcot_send);
let rcot_recv = mpz_ot::rcot::shared::SharedRCOTReceiver::new(rcot_recv);
let mpc = Mpc::new(mpz_ot::cot::DerandCOTReceiver::new(rcot_recv.clone()));
let zk = Zk::new(ZkVerifierConfig::default(), delta, rcot_send.clone());
let vm = Arc::new(Mutex::new(Deap::new(tlsn_deap::Role::Follower, mpc, zk)));
(
vm.clone(),
MpcTlsFollower::new(
config.build_mpc_tls_config(protocol_config),
ctx,
vm,
rcot_send,
(rcot_recv.clone(), rcot_recv.clone(), rcot_recv),
),
)
}
/// Translates VM references to the ZK address space.
fn translate_keys<Mpc, Zk>(
keys: &mut SessionKeys,
vm: &Deap<Mpc, Zk>,
) -> Result<(), VerifierError> {
keys.client_write_key = vm
.translate(keys.client_write_key)
.map_err(VerifierError::mpc)?;
keys.client_write_iv = vm
.translate(keys.client_write_iv)
.map_err(VerifierError::mpc)?;
keys.server_write_key = vm
.translate(keys.server_write_key)
.map_err(VerifierError::mpc)?;
keys.server_write_iv = vm
.translate(keys.server_write_iv)
.map_err(VerifierError::mpc)?;
keys.server_write_mac_key = vm
.translate(keys.server_write_mac_key)
.map_err(VerifierError::mpc)?;
Ok(())
}

View File

@@ -0,0 +1,57 @@
use std::fmt::{Debug, Formatter, Result};
use mpc_tls::Config;
use serde::{Deserialize, Serialize};
use tlsn_core::webpki::RootCertStore;
use crate::config::{NetworkSetting, ProtocolConfig};
/// Configuration for the [`Verifier`](crate::tls::Verifier).
#[allow(missing_docs)]
#[derive(derive_builder::Builder, Serialize, Deserialize)]
#[builder(pattern = "owned")]
pub struct VerifierConfig {
#[builder(default, setter(strip_option))]
root_store: Option<RootCertStore>,
}
impl Debug for VerifierConfig {
fn fmt(&self, f: &mut Formatter<'_>) -> Result {
f.debug_struct("VerifierConfig").finish_non_exhaustive()
}
}
impl VerifierConfig {
/// Creates a new configuration builder.
pub fn builder() -> VerifierConfigBuilder {
VerifierConfigBuilder::default()
}
/// Returns the root certificate store.
pub fn root_store(&self) -> Option<&RootCertStore> {
self.root_store.as_ref()
}
pub(crate) fn build_mpc_tls_config(&self, protocol_config: &ProtocolConfig) -> Config {
let mut builder = Config::builder();
builder
.max_sent(protocol_config.max_sent_data())
.max_recv_online(protocol_config.max_recv_data_online())
.max_recv(protocol_config.max_recv_data());
if let Some(max_sent_records) = protocol_config.max_sent_records() {
builder.max_sent_records(max_sent_records);
}
if let Some(max_recv_records_online) = protocol_config.max_recv_records_online() {
builder.max_recv_records_online(max_recv_records_online);
}
if let NetworkSetting::Latency = protocol_config.network() {
builder.low_bandwidth();
}
builder.build().unwrap()
}
}

View File

@@ -4,7 +4,7 @@ use mpc_tls::MpcTlsError;
use crate::transcript_internal::commit::encoding::EncodingError;
/// Error for [`Verifier`](crate::verifier::Verifier).
/// Error for [`Verifier`](crate::Verifier).
#[derive(Debug, thiserror::Error)]
pub struct VerifierError {
kind: ErrorKind,

View File

@@ -2,18 +2,17 @@
use std::sync::Arc;
use crate::mux::{MuxControl, MuxFuture};
use crate::{
config::ProtocolConfig,
mux::{MuxControl, MuxFuture},
};
use mpc_tls::{MpcTlsFollower, SessionKeys};
use mpz_common::Context;
use tlsn_core::{
config::{prove::ProveRequest, tls_commit::TlsCommitRequest},
connection::{HandshakeData, ServerName},
transcript::{PartialTranscript, TlsTranscript},
};
use tlsn_core::{ProveRequest, transcript::TlsTranscript};
use tlsn_deap::Deap;
use tokio::sync::Mutex;
use crate::mpz::{VerifierMpc, VerifierZk};
use crate::verifier::{Mpc, Zk};
/// TLS Verifier state.
pub trait VerifierState: sealed::Sealed {}
@@ -24,33 +23,32 @@ pub struct Initialized;
opaque_debug::implement!(Initialized);
/// State after receiving protocol configuration from the prover.
pub struct CommitStart {
pub struct Config {
pub(crate) mux_ctrl: MuxControl,
pub(crate) mux_fut: MuxFuture,
pub(crate) ctx: Context,
pub(crate) request: TlsCommitRequest,
pub(crate) config: ProtocolConfig,
}
opaque_debug::implement!(CommitStart);
opaque_debug::implement!(Config);
/// State after accepting the proposed TLS commitment protocol configuration and
/// performing preprocessing.
pub struct CommitAccepted {
/// State after setup has completed.
pub struct Setup {
pub(crate) mux_ctrl: MuxControl,
pub(crate) mux_fut: MuxFuture,
pub(crate) mpc_tls: MpcTlsFollower,
pub(crate) keys: SessionKeys,
pub(crate) vm: Arc<Mutex<Deap<VerifierMpc, VerifierZk>>>,
pub(crate) vm: Arc<Mutex<Deap<Mpc, Zk>>>,
}
opaque_debug::implement!(CommitAccepted);
opaque_debug::implement!(Setup);
/// State after the TLS transcript has been committed.
/// State after the TLS connection has been closed.
pub struct Committed {
pub(crate) mux_ctrl: MuxControl,
pub(crate) mux_fut: MuxFuture,
pub(crate) ctx: Context,
pub(crate) vm: VerifierZk,
pub(crate) vm: Zk,
pub(crate) keys: SessionKeys,
pub(crate) tls_transcript: TlsTranscript,
}
@@ -62,27 +60,25 @@ pub struct Verify {
pub(crate) mux_ctrl: MuxControl,
pub(crate) mux_fut: MuxFuture,
pub(crate) ctx: Context,
pub(crate) vm: VerifierZk,
pub(crate) vm: Zk,
pub(crate) keys: SessionKeys,
pub(crate) tls_transcript: TlsTranscript,
pub(crate) request: ProveRequest,
pub(crate) handshake: Option<(ServerName, HandshakeData)>,
pub(crate) transcript: Option<PartialTranscript>,
}
opaque_debug::implement!(Verify);
impl VerifierState for Initialized {}
impl VerifierState for CommitStart {}
impl VerifierState for CommitAccepted {}
impl VerifierState for Config {}
impl VerifierState for Setup {}
impl VerifierState for Committed {}
impl VerifierState for Verify {}
mod sealed {
pub trait Sealed {}
impl Sealed for super::Initialized {}
impl Sealed for super::CommitStart {}
impl Sealed for super::CommitAccepted {}
impl Sealed for super::Config {}
impl Sealed for super::Setup {}
impl Sealed for super::Committed {}
impl Sealed for super::Verify {}
}

View File

@@ -4,9 +4,7 @@ use mpz_memory_core::binary::Binary;
use mpz_vm_core::Vm;
use rangeset::{RangeSet, UnionMut};
use tlsn_core::{
VerifierOutput,
config::prove::ProveRequest,
connection::{HandshakeData, ServerName},
ProveRequest, VerifierOutput,
transcript::{
ContentType, Direction, PartialTranscript, Record, TlsTranscript, TranscriptCommitment,
},
@@ -25,7 +23,6 @@ use crate::{
verifier::VerifierError,
};
#[allow(clippy::too_many_arguments)]
pub(crate) async fn verify<T: Vm<Binary> + KeyStore + Send + Sync>(
ctx: &mut Context,
vm: &mut T,
@@ -33,19 +30,18 @@ pub(crate) async fn verify<T: Vm<Binary> + KeyStore + Send + Sync>(
cert_verifier: &ServerCertVerifier,
tls_transcript: &TlsTranscript,
request: ProveRequest,
handshake: Option<(ServerName, HandshakeData)>,
transcript: Option<PartialTranscript>,
) -> Result<VerifierOutput, VerifierError> {
let ProveRequest {
handshake,
transcript,
transcript_commit,
} = request;
let ciphertext_sent = collect_ciphertext(tls_transcript.sent());
let ciphertext_recv = collect_ciphertext(tls_transcript.recv());
let transcript = if let Some((auth_sent, auth_recv)) = request.reveal() {
let Some(transcript) = transcript else {
return Err(VerifierError::verify(
"prover requested to reveal data but did not send transcript",
));
};
let has_reveal = transcript.is_some();
let transcript = if let Some(transcript) = transcript {
if transcript.len_sent() != ciphertext_sent.len()
|| transcript.len_received() != ciphertext_recv.len()
{
@@ -54,18 +50,6 @@ pub(crate) async fn verify<T: Vm<Binary> + KeyStore + Send + Sync>(
));
}
if transcript.sent_authed() != auth_sent {
return Err(VerifierError::verify(
"prover sent transcript with incorrect sent authed data",
));
}
if transcript.received_authed() != auth_recv {
return Err(VerifierError::verify(
"prover sent transcript with incorrect received authed data",
));
}
transcript
} else {
PartialTranscript::new(ciphertext_sent.len(), ciphertext_recv.len())
@@ -87,7 +71,7 @@ pub(crate) async fn verify<T: Vm<Binary> + KeyStore + Send + Sync>(
};
let (mut commit_sent, mut commit_recv) = (RangeSet::default(), RangeSet::default());
if let Some(commit_config) = request.transcript_commit() {
if let Some(commit_config) = transcript_commit.as_ref() {
commit_config
.iter_hash()
.for_each(|(direction, idx, _)| match direction {
@@ -137,7 +121,7 @@ pub(crate) async fn verify<T: Vm<Binary> + KeyStore + Send + Sync>(
let mut transcript_commitments = Vec::new();
let mut hash_commitments = None;
if let Some(commit_config) = request.transcript_commit()
if let Some(commit_config) = transcript_commit.as_ref()
&& commit_config.has_hash()
{
hash_commitments = Some(
@@ -152,7 +136,7 @@ pub(crate) async fn verify<T: Vm<Binary> + KeyStore + Send + Sync>(
recv_proof.verify().map_err(VerifierError::verify)?;
let mut encoder_secret = None;
if let Some(commit_config) = request.transcript_commit()
if let Some(commit_config) = transcript_commit
&& let Some((sent, recv)) = commit_config.encoding()
{
let sent_map = transcript_refs
@@ -177,7 +161,7 @@ pub(crate) async fn verify<T: Vm<Binary> + KeyStore + Send + Sync>(
Ok(VerifierOutput {
server_name,
transcript: request.reveal().is_some().then_some(transcript),
transcript: has_reveal.then_some(transcript),
encoder_secret,
transcript_commitments,
})

View File

@@ -1,22 +1,15 @@
use futures::{AsyncReadExt, AsyncWriteExt};
use rangeset::RangeSet;
use tlsn::{
config::{
prove::ProveConfig,
prover::ProverConfig,
tls::TlsClientConfig,
tls_commit::{TlsCommitConfig, mpc::MpcTlsConfig},
verifier::VerifierConfig,
},
config::{CertificateDer, ProtocolConfig, RootCertStore},
connection::ServerName,
hash::{HashAlgId, HashProvider},
prover::Prover,
prover::{ProveConfig, Prover, ProverConfig, TlsConfig},
transcript::{
Direction, Transcript, TranscriptCommitConfig, TranscriptCommitment,
TranscriptCommitmentKind, TranscriptSecret,
},
verifier::{Verifier, VerifierOutput},
webpki::{CertificateDer, RootCertStore},
verifier::{Verifier, VerifierConfig, VerifierOutput},
};
use tlsn_core::ProverOutput;
use tlsn_server_fixture::bind;
@@ -120,38 +113,35 @@ async fn prover<T: AsyncWrite + AsyncRead + Send + Unpin + 'static>(
let server_task = tokio::spawn(bind(server_socket.compat()));
let prover = Prover::new(ProverConfig::builder().build().unwrap())
.commit(
TlsCommitConfig::builder()
.protocol(
MpcTlsConfig::builder()
.max_sent_data(MAX_SENT_DATA)
.max_sent_records(MAX_SENT_RECORDS)
.max_recv_data(MAX_RECV_DATA)
.max_recv_records_online(MAX_RECV_RECORDS)
.build()
.unwrap(),
)
.build()
.unwrap(),
verifier_socket.compat(),
)
.await
.unwrap();
let mut tls_config_builder = TlsConfig::builder();
tls_config_builder.root_store(RootCertStore {
roots: vec![CertificateDer(CA_CERT_DER.to_vec())],
});
let (mut tls_connection, prover_fut) = prover
.connect(
TlsClientConfig::builder()
.server_name(ServerName::Dns(SERVER_DOMAIN.try_into().unwrap()))
.root_store(RootCertStore {
roots: vec![CertificateDer(CA_CERT_DER.to_vec())],
})
.build()
.unwrap(),
client_socket.compat(),
)
.await
.unwrap();
let tls_config = tls_config_builder.build().unwrap();
let server_name = ServerName::Dns(SERVER_DOMAIN.try_into().unwrap());
let prover = Prover::new(
ProverConfig::builder()
.server_name(server_name)
.tls_config(tls_config)
.protocol_config(
ProtocolConfig::builder()
.max_sent_data(MAX_SENT_DATA)
.max_sent_records(MAX_SENT_RECORDS)
.max_recv_data(MAX_RECV_DATA)
.max_recv_records_online(MAX_RECV_RECORDS)
.build()
.unwrap(),
)
.build()
.unwrap(),
)
.setup(verifier_socket.compat())
.await
.unwrap();
let (mut tls_connection, prover_fut) = prover.connect(client_socket.compat()).await.unwrap();
let prover_task = tokio::spawn(prover_fut);
tls_connection
@@ -224,7 +214,7 @@ async fn verifier<T: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>(
);
let verifier = verifier
.commit(socket.compat())
.setup(socket.compat())
.await
.unwrap()
.accept()

View File

@@ -21,7 +21,7 @@ no-bundler = ["web-spawn/no-bundler"]
[dependencies]
tlsn-core = { workspace = true }
tlsn = { workspace = true, features = ["web", "mozilla-certs"] }
tlsn = { workspace = true, features = ["web"] }
tlsn-server-fixture-certs = { workspace = true }
tlsn-tls-client-async = { workspace = true }
tlsn-tls-core = { workspace = true }

View File

@@ -1,6 +1,11 @@
use crate::types::NetworkSetting;
use serde::Deserialize;
use tlsn::{
config::{CertificateDer, PrivateKeyDer, ProtocolConfig},
connection::ServerName,
};
use tsify_next::Tsify;
use wasm_bindgen::JsError;
#[derive(Debug, Tsify, Deserialize)]
#[tsify(from_wasm_abi)]
@@ -15,3 +20,66 @@ pub struct ProverConfig {
pub network: NetworkSetting,
pub client_auth: Option<(Vec<Vec<u8>>, Vec<u8>)>,
}
impl TryFrom<ProverConfig> for tlsn::prover::ProverConfig {
type Error = JsError;
fn try_from(value: ProverConfig) -> Result<Self, Self::Error> {
let mut builder = ProtocolConfig::builder();
builder.max_sent_data(value.max_sent_data);
builder.max_recv_data(value.max_recv_data);
if let Some(value) = value.max_recv_data_online {
builder.max_recv_data_online(value);
}
if let Some(value) = value.max_sent_records {
builder.max_sent_records(value);
}
if let Some(value) = value.max_recv_records_online {
builder.max_recv_records_online(value);
}
if let Some(value) = value.defer_decryption_from_start {
builder.defer_decryption_from_start(value);
}
builder.network(value.network.into());
let protocol_config = builder.build().unwrap();
let mut builder = tlsn::prover::TlsConfig::builder();
if let Some((certs, key)) = value.client_auth {
let certs = certs
.into_iter()
.map(|cert| {
// Try to parse as PEM-encoded, otherwise assume DER.
if let Ok(cert) = CertificateDer::from_pem_slice(&cert) {
cert
} else {
CertificateDer(cert)
}
})
.collect();
let key = PrivateKeyDer(key);
builder.client_auth((certs, key));
}
let tls_config = builder.build().unwrap();
let server_name = ServerName::Dns(
value
.server_name
.try_into()
.map_err(|_| JsError::new("invalid server name"))?,
);
let mut builder = tlsn::prover::ProverConfig::builder();
builder
.server_name(server_name)
.protocol_config(protocol_config)
.tls_config(tls_config);
Ok(builder.build().unwrap())
}
}

View File

@@ -7,16 +7,7 @@ use futures::TryFutureExt;
use http_body_util::{BodyExt, Full};
use hyper::body::Bytes;
use tls_client_async::TlsConnection;
use tlsn::{
config::{
prove::ProveConfig,
tls::TlsClientConfig,
tls_commit::{mpc::MpcTlsConfig, TlsCommitConfig},
},
connection::ServerName,
prover::{state, Prover},
webpki::{CertificateDer, PrivateKeyDer, RootCertStore},
};
use tlsn::prover::{state, ProveConfig, Prover};
use tracing::info;
use wasm_bindgen::{prelude::*, JsError};
use wasm_bindgen_futures::spawn_local;
@@ -28,7 +19,6 @@ type Result<T> = std::result::Result<T, JsError>;
#[wasm_bindgen(js_name = Prover)]
pub struct JsProver {
config: ProverConfig,
state: State,
}
@@ -36,7 +26,7 @@ pub struct JsProver {
#[derive_err(Debug)]
enum State {
Initialized(Prover<state::Initialized>),
CommitAccepted(Prover<state::CommitAccepted>),
Setup(Prover<state::Setup>),
Committed(Prover<state::Committed>),
Complete,
Error,
@@ -53,10 +43,7 @@ impl JsProver {
#[wasm_bindgen(constructor)]
pub fn new(config: ProverConfig) -> Result<JsProver> {
Ok(JsProver {
config,
state: State::Initialized(Prover::new(
tlsn::config::prover::ProverConfig::builder().build()?,
)),
state: State::Initialized(Prover::new(config.try_into()?)),
})
}
@@ -67,41 +54,15 @@ impl JsProver {
pub async fn setup(&mut self, verifier_url: &str) -> Result<()> {
let prover = self.state.take().try_into_initialized()?;
let config = TlsCommitConfig::builder()
.protocol({
let mut builder = MpcTlsConfig::builder()
.max_sent_data(self.config.max_sent_data)
.max_recv_data(self.config.max_recv_data);
if let Some(value) = self.config.max_recv_data_online {
builder = builder.max_recv_data_online(value);
}
if let Some(value) = self.config.max_sent_records {
builder = builder.max_sent_records(value);
}
if let Some(value) = self.config.max_recv_records_online {
builder = builder.max_recv_records_online(value);
}
if let Some(value) = self.config.defer_decryption_from_start {
builder = builder.defer_decryption_from_start(value);
}
builder.network(self.config.network.into()).build()
}?)
.build()?;
info!("connecting to verifier");
let (_, verifier_conn) = WsMeta::connect(verifier_url, None).await?;
info!("connected to verifier");
let prover = prover.commit(config, verifier_conn.into_io()).await?;
let prover = prover.setup(verifier_conn.into_io()).await?;
self.state = State::CommitAccepted(prover);
self.state = State::Setup(prover);
Ok(())
}
@@ -112,35 +73,7 @@ impl JsProver {
ws_proxy_url: &str,
request: HttpRequest,
) -> Result<HttpResponse> {
let prover = self.state.take().try_into_commit_accepted()?;
let mut builder = TlsClientConfig::builder()
.server_name(ServerName::Dns(
self.config
.server_name
.clone()
.try_into()
.map_err(|_| JsError::new("invalid server name"))?,
))
.root_store(RootCertStore::mozilla());
if let Some((certs, key)) = self.config.client_auth.clone() {
let certs = certs
.into_iter()
.map(|cert| {
// Try to parse as PEM-encoded, otherwise assume DER.
if let Ok(cert) = CertificateDer::from_pem_slice(&cert) {
cert
} else {
CertificateDer(cert)
}
})
.collect();
let key = PrivateKeyDer(key);
builder = builder.client_auth((certs, key));
}
let config = builder.build()?;
let prover = self.state.take().try_into_setup()?;
info!("connecting to server");
@@ -148,7 +81,7 @@ impl JsProver {
info!("connected to server");
let (tls_conn, prover_fut) = prover.connect(config, server_conn.into_io()).await?;
let (tls_conn, prover_fut) = prover.connect(server_conn.into_io()).await?;
info!("sending request");
@@ -204,6 +137,14 @@ impl JsProver {
}
}
impl From<Prover<state::Initialized>> for JsProver {
fn from(value: Prover<state::Initialized>) -> Self {
JsProver {
state: State::Initialized(value),
}
}
}
async fn send_request(conn: TlsConnection, request: HttpRequest) -> Result<HttpResponse> {
let conn = FuturesIo::new(conn);
let request = hyper::Request::<Full<Bytes>>::try_from(request)?;

View File

@@ -181,7 +181,7 @@ pub struct VerifierOutput {
pub transcript: Option<PartialTranscript>,
}
#[derive(Debug, Clone, Copy, Tsify, Deserialize)]
#[derive(Debug, Tsify, Deserialize)]
#[tsify(from_wasm_abi)]
pub enum NetworkSetting {
/// Prefers a bandwidth-heavy protocol.
@@ -190,7 +190,7 @@ pub enum NetworkSetting {
Latency,
}
impl From<NetworkSetting> for tlsn::config::tls_commit::mpc::NetworkSetting {
impl From<NetworkSetting> for tlsn::config::NetworkSetting {
fn from(value: NetworkSetting) -> Self {
match value {
NetworkSetting::Bandwidth => Self::Bandwidth,

View File

@@ -3,12 +3,10 @@ mod config;
pub use config::VerifierConfig;
use enum_try_as_inner::EnumTryAsInner;
use tls_core::msgs::enums::ContentType;
use tlsn::{
config::tls_commit::TlsCommitProtocolConfig,
connection::{ConnectionInfo, ServerName, TranscriptLength},
transcript::ContentType,
verifier::{state, Verifier},
webpki::RootCertStore,
};
use tracing::info;
use wasm_bindgen::prelude::*;
@@ -49,10 +47,7 @@ impl State {
impl JsVerifier {
#[wasm_bindgen(constructor)]
pub fn new(config: VerifierConfig) -> JsVerifier {
let tlsn_config = tlsn::config::verifier::VerifierConfig::builder()
.root_store(RootCertStore::mozilla())
.build()
.unwrap();
let tlsn_config = tlsn::verifier::VerifierConfig::builder().build().unwrap();
JsVerifier {
state: State::Initialized(Verifier::new(tlsn_config)),
config,
@@ -78,20 +73,16 @@ impl JsVerifier {
pub async fn verify(&mut self) -> Result<VerifierOutput> {
let (verifier, prover_conn) = self.state.take().try_into_connected()?;
let verifier = verifier.commit(prover_conn.into_io()).await?;
let request = verifier.request();
let verifier = verifier.setup(prover_conn.into_io()).await?;
let config = verifier.config();
let TlsCommitProtocolConfig::Mpc(mpc_tls_config) = request.protocol() else {
unimplemented!("only MPC protocol is supported");
};
let reject = if mpc_tls_config.max_sent_data() > self.config.max_sent_data {
let reject = if config.max_sent_data() > self.config.max_sent_data {
Some("max_sent_data is too large")
} else if mpc_tls_config.max_recv_data() > self.config.max_recv_data {
} else if config.max_recv_data() > self.config.max_recv_data {
Some("max_recv_data is too large")
} else if mpc_tls_config.max_sent_records() > self.config.max_sent_records {
} else if config.max_sent_records() > self.config.max_sent_records {
Some("max_sent_records is too large")
} else if mpc_tls_config.max_recv_records_online() > self.config.max_recv_records_online {
} else if config.max_recv_records_online() > self.config.max_recv_records_online {
Some("max_recv_records_online is too large")
} else {
None