mirror of
https://github.com/tlsnotary/tlsn.git
synced 2026-01-11 15:47:58 -05:00
Compare commits
3 Commits
refactor/r
...
blake3_exa
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5e6c5c13aa | ||
|
|
4ab73bdfb5 | ||
|
|
a8fa57f2cb |
5
.github/workflows/ci.yml
vendored
5
.github/workflows/ci.yml
vendored
@@ -21,8 +21,7 @@ env:
|
||||
# - https://github.com/privacy-ethereum/mpz/issues/178
|
||||
# 32 seems to be big enough for the foreseeable future
|
||||
RAYON_NUM_THREADS: 32
|
||||
RUST_VERSION: 1.91.1
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
RUST_VERSION: 1.90.0
|
||||
|
||||
jobs:
|
||||
clippy:
|
||||
@@ -42,7 +41,7 @@ jobs:
|
||||
uses: Swatinem/rust-cache@v2.7.7
|
||||
|
||||
- name: Clippy
|
||||
run: cargo clippy --keep-going --all-features --all-targets --locked
|
||||
run: cargo clippy --keep-going --all-features --all-targets --locked -- -D warnings
|
||||
|
||||
fmt:
|
||||
name: Check formatting
|
||||
|
||||
2
.github/workflows/releng.yml
vendored
2
.github/workflows/releng.yml
vendored
@@ -6,7 +6,7 @@ on:
|
||||
tag:
|
||||
description: 'Tag to publish to NPM'
|
||||
required: true
|
||||
default: 'v0.1.0-alpha.14-pre'
|
||||
default: 'v0.1.0-alpha.13-pre'
|
||||
|
||||
jobs:
|
||||
release:
|
||||
|
||||
1
.github/workflows/updatemain.yml
vendored
1
.github/workflows/updatemain.yml
vendored
@@ -1,7 +1,6 @@
|
||||
name: Fast-forward main branch to published release tag
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
release:
|
||||
types: [published]
|
||||
|
||||
|
||||
834
Cargo.lock
generated
834
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
31
Cargo.toml
31
Cargo.toml
@@ -13,6 +13,7 @@ members = [
|
||||
"crates/server-fixture/server",
|
||||
"crates/tls/backend",
|
||||
"crates/tls/client",
|
||||
"crates/tls/client-async",
|
||||
"crates/tls/core",
|
||||
"crates/mpc-tls",
|
||||
"crates/tls/server-fixture",
|
||||
@@ -56,6 +57,7 @@ tlsn-server-fixture = { path = "crates/server-fixture/server" }
|
||||
tlsn-server-fixture-certs = { path = "crates/server-fixture/certs" }
|
||||
tlsn-tls-backend = { path = "crates/tls/backend" }
|
||||
tlsn-tls-client = { path = "crates/tls/client" }
|
||||
tlsn-tls-client-async = { path = "crates/tls/client-async" }
|
||||
tlsn-tls-core = { path = "crates/tls/core" }
|
||||
tlsn-utils = { git = "https://github.com/tlsnotary/tlsn-utils", rev = "6168663" }
|
||||
tlsn-harness-core = { path = "crates/harness/core" }
|
||||
@@ -64,23 +66,20 @@ tlsn-harness-runner = { path = "crates/harness/runner" }
|
||||
tlsn-wasm = { path = "crates/wasm" }
|
||||
tlsn = { path = "crates/tlsn" }
|
||||
|
||||
mpz-circuits = { git = "https://github.com/privacy-ethereum/mpz", rev = "bd80826" }
|
||||
mpz-circuits-data = { git = "https://github.com/privacy-ethereum/mpz", rev = "bd80826" }
|
||||
mpz-memory-core = { git = "https://github.com/privacy-ethereum/mpz", rev = "bd80826" }
|
||||
mpz-common = { git = "https://github.com/privacy-ethereum/mpz", rev = "bd80826" }
|
||||
mpz-core = { git = "https://github.com/privacy-ethereum/mpz", rev = "bd80826" }
|
||||
mpz-vm-core = { git = "https://github.com/privacy-ethereum/mpz", rev = "bd80826" }
|
||||
mpz-garble = { git = "https://github.com/privacy-ethereum/mpz", rev = "bd80826" }
|
||||
mpz-garble-core = { git = "https://github.com/privacy-ethereum/mpz", rev = "bd80826" }
|
||||
mpz-ole = { git = "https://github.com/privacy-ethereum/mpz", rev = "bd80826" }
|
||||
mpz-ot = { git = "https://github.com/privacy-ethereum/mpz", rev = "bd80826" }
|
||||
mpz-share-conversion = { git = "https://github.com/privacy-ethereum/mpz", rev = "bd80826" }
|
||||
mpz-fields = { git = "https://github.com/privacy-ethereum/mpz", rev = "bd80826" }
|
||||
mpz-zk = { git = "https://github.com/privacy-ethereum/mpz", rev = "bd80826" }
|
||||
mpz-hash = { git = "https://github.com/privacy-ethereum/mpz", rev = "bd80826" }
|
||||
mpz-ideal-vm = { git = "https://github.com/privacy-ethereum/mpz", rev = "bd80826" }
|
||||
mpz-circuits = { git = "https://github.com/privacy-ethereum/mpz", rev = "70348c1" }
|
||||
mpz-memory-core = { git = "https://github.com/privacy-ethereum/mpz", rev = "70348c1" }
|
||||
mpz-common = { git = "https://github.com/privacy-ethereum/mpz", rev = "70348c1" }
|
||||
mpz-core = { git = "https://github.com/privacy-ethereum/mpz", rev = "70348c1" }
|
||||
mpz-vm-core = { git = "https://github.com/privacy-ethereum/mpz", rev = "70348c1" }
|
||||
mpz-garble = { git = "https://github.com/privacy-ethereum/mpz", rev = "70348c1" }
|
||||
mpz-garble-core = { git = "https://github.com/privacy-ethereum/mpz", rev = "70348c1" }
|
||||
mpz-ole = { git = "https://github.com/privacy-ethereum/mpz", rev = "70348c1" }
|
||||
mpz-ot = { git = "https://github.com/privacy-ethereum/mpz", rev = "70348c1" }
|
||||
mpz-share-conversion = { git = "https://github.com/privacy-ethereum/mpz", rev = "70348c1" }
|
||||
mpz-fields = { git = "https://github.com/privacy-ethereum/mpz", rev = "70348c1" }
|
||||
mpz-zk = { git = "https://github.com/privacy-ethereum/mpz", rev = "70348c1" }
|
||||
mpz-hash = { git = "https://github.com/privacy-ethereum/mpz", rev = "70348c1" }
|
||||
|
||||
futures-plex = { git = "https://github.com/tlsnotary/tlsn-utils", rev = "0b46dc0" }
|
||||
rangeset = { version = "0.2" }
|
||||
serio = { version = "0.2" }
|
||||
spansy = { git = "https://github.com/tlsnotary/tlsn-utils", rev = "6168663" }
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "tlsn-attestation"
|
||||
version = "0.1.0-alpha.14-pre"
|
||||
version = "0.1.0-alpha.13-pre"
|
||||
edition = "2024"
|
||||
|
||||
[features]
|
||||
@@ -9,7 +9,7 @@ fixtures = ["tlsn-core/fixtures", "dep:tlsn-data-fixtures"]
|
||||
|
||||
[dependencies]
|
||||
tlsn-tls-core = { workspace = true }
|
||||
tlsn-core = { workspace = true, features = ["mozilla-certs"] }
|
||||
tlsn-core = { workspace = true }
|
||||
tlsn-data-fixtures = { workspace = true, optional = true }
|
||||
|
||||
bcs = { workspace = true }
|
||||
|
||||
@@ -5,7 +5,7 @@ use rand::{Rng, rng};
|
||||
use tlsn_core::{
|
||||
connection::{ConnectionInfo, ServerEphemKey},
|
||||
hash::HashAlgId,
|
||||
transcript::{TranscriptCommitment, encoding::EncoderSecret},
|
||||
transcript::TranscriptCommitment,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
@@ -25,7 +25,6 @@ pub struct Sign {
|
||||
connection_info: Option<ConnectionInfo>,
|
||||
server_ephemeral_key: Option<ServerEphemKey>,
|
||||
cert_commitment: ServerCertCommitment,
|
||||
encoder_secret: Option<EncoderSecret>,
|
||||
extensions: Vec<Extension>,
|
||||
transcript_commitments: Vec<TranscriptCommitment>,
|
||||
}
|
||||
@@ -87,7 +86,6 @@ impl<'a> AttestationBuilder<'a, Accept> {
|
||||
connection_info: None,
|
||||
server_ephemeral_key: None,
|
||||
cert_commitment,
|
||||
encoder_secret: None,
|
||||
transcript_commitments: Vec::new(),
|
||||
extensions,
|
||||
},
|
||||
@@ -108,12 +106,6 @@ impl AttestationBuilder<'_, Sign> {
|
||||
self
|
||||
}
|
||||
|
||||
/// Sets the secret for encoding commitments.
|
||||
pub fn encoder_secret(&mut self, secret: EncoderSecret) -> &mut Self {
|
||||
self.state.encoder_secret = Some(secret);
|
||||
self
|
||||
}
|
||||
|
||||
/// Adds an extension to the attestation.
|
||||
pub fn extension(&mut self, extension: Extension) -> &mut Self {
|
||||
self.state.extensions.push(extension);
|
||||
@@ -137,7 +129,6 @@ impl AttestationBuilder<'_, Sign> {
|
||||
connection_info,
|
||||
server_ephemeral_key,
|
||||
cert_commitment,
|
||||
encoder_secret,
|
||||
extensions,
|
||||
transcript_commitments,
|
||||
} = self.state;
|
||||
@@ -168,7 +159,6 @@ impl AttestationBuilder<'_, Sign> {
|
||||
AttestationBuilderError::new(ErrorKind::Field, "handshake data was not set")
|
||||
})?),
|
||||
cert_commitment: field_id.next(cert_commitment),
|
||||
encoder_secret: encoder_secret.map(|secret| field_id.next(secret)),
|
||||
extensions: extensions
|
||||
.into_iter()
|
||||
.map(|extension| field_id.next(extension))
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
//! Attestation fixtures.
|
||||
|
||||
use tlsn_core::{
|
||||
connection::{CertBinding, CertBindingV1_2},
|
||||
fixtures::ConnectionFixture,
|
||||
@@ -12,10 +13,7 @@ use tlsn_core::{
|
||||
use crate::{
|
||||
Attestation, AttestationConfig, CryptoProvider, Extension,
|
||||
request::{Request, RequestConfig},
|
||||
signing::{
|
||||
KeyAlgId, SignatureAlgId, SignatureVerifier, SignatureVerifierProvider, Signer,
|
||||
SignerProvider,
|
||||
},
|
||||
signing::SignatureAlgId,
|
||||
};
|
||||
|
||||
/// A Request fixture used for testing.
|
||||
@@ -104,8 +102,7 @@ pub fn attestation_fixture(
|
||||
let mut provider = CryptoProvider::default();
|
||||
match signature_alg {
|
||||
SignatureAlgId::SECP256K1 => provider.signer.set_secp256k1(&[42u8; 32]).unwrap(),
|
||||
SignatureAlgId::SECP256K1ETH => provider.signer.set_secp256k1eth(&[43u8; 32]).unwrap(),
|
||||
SignatureAlgId::SECP256R1 => provider.signer.set_secp256r1(&[44u8; 32]).unwrap(),
|
||||
SignatureAlgId::SECP256R1 => provider.signer.set_secp256r1(&[42u8; 32]).unwrap(),
|
||||
_ => unimplemented!(),
|
||||
};
|
||||
|
||||
@@ -125,68 +122,3 @@ pub fn attestation_fixture(
|
||||
|
||||
attestation_builder.build(&provider).unwrap()
|
||||
}
|
||||
|
||||
/// Returns a crypto provider which supports only a custom signature alg.
|
||||
pub fn custom_provider_fixture() -> CryptoProvider {
|
||||
const CUSTOM_SIG_ALG_ID: SignatureAlgId = SignatureAlgId::new(128);
|
||||
|
||||
// A dummy signer.
|
||||
struct DummySigner {}
|
||||
impl Signer for DummySigner {
|
||||
fn alg_id(&self) -> SignatureAlgId {
|
||||
CUSTOM_SIG_ALG_ID
|
||||
}
|
||||
|
||||
fn sign(
|
||||
&self,
|
||||
msg: &[u8],
|
||||
) -> Result<crate::signing::Signature, crate::signing::SignatureError> {
|
||||
Ok(crate::signing::Signature {
|
||||
alg: CUSTOM_SIG_ALG_ID,
|
||||
data: msg.to_vec(),
|
||||
})
|
||||
}
|
||||
|
||||
fn verifying_key(&self) -> crate::signing::VerifyingKey {
|
||||
crate::signing::VerifyingKey {
|
||||
alg: KeyAlgId::new(128),
|
||||
data: vec![1, 2, 3, 4],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// A dummy verifier.
|
||||
struct DummyVerifier {}
|
||||
impl SignatureVerifier for DummyVerifier {
|
||||
fn alg_id(&self) -> SignatureAlgId {
|
||||
CUSTOM_SIG_ALG_ID
|
||||
}
|
||||
|
||||
fn verify(
|
||||
&self,
|
||||
_key: &crate::signing::VerifyingKey,
|
||||
msg: &[u8],
|
||||
sig: &[u8],
|
||||
) -> Result<(), crate::signing::SignatureError> {
|
||||
if msg == sig {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(crate::signing::SignatureError::from_str(
|
||||
"invalid signature",
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let mut provider = CryptoProvider::default();
|
||||
|
||||
let mut signer_provider = SignerProvider::default();
|
||||
signer_provider.set_signer(Box::new(DummySigner {}));
|
||||
provider.signer = signer_provider;
|
||||
|
||||
let mut verifier_provider = SignatureVerifierProvider::empty();
|
||||
verifier_provider.set_verifier(Box::new(DummyVerifier {}));
|
||||
provider.signature = verifier_provider;
|
||||
|
||||
provider
|
||||
}
|
||||
|
||||
@@ -219,7 +219,7 @@ use tlsn_core::{
|
||||
connection::{ConnectionInfo, ServerEphemKey},
|
||||
hash::{Hash, HashAlgorithm, TypedHash},
|
||||
merkle::MerkleTree,
|
||||
transcript::{TranscriptCommitment, encoding::EncoderSecret},
|
||||
transcript::TranscriptCommitment,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
@@ -327,7 +327,6 @@ pub struct Body {
|
||||
connection_info: Field<ConnectionInfo>,
|
||||
server_ephemeral_key: Field<ServerEphemKey>,
|
||||
cert_commitment: Field<ServerCertCommitment>,
|
||||
encoder_secret: Option<Field<EncoderSecret>>,
|
||||
extensions: Vec<Field<Extension>>,
|
||||
transcript_commitments: Vec<Field<TranscriptCommitment>>,
|
||||
}
|
||||
@@ -373,7 +372,6 @@ impl Body {
|
||||
connection_info: conn_info,
|
||||
server_ephemeral_key,
|
||||
cert_commitment,
|
||||
encoder_secret,
|
||||
extensions,
|
||||
transcript_commitments,
|
||||
} = self;
|
||||
@@ -391,13 +389,6 @@ impl Body {
|
||||
),
|
||||
];
|
||||
|
||||
if let Some(encoder_secret) = encoder_secret {
|
||||
fields.push((
|
||||
encoder_secret.id,
|
||||
hasher.hash_separated(&encoder_secret.data),
|
||||
));
|
||||
}
|
||||
|
||||
for field in extensions.iter() {
|
||||
fields.push((field.id, hasher.hash_separated(&field.data)));
|
||||
}
|
||||
|
||||
@@ -91,11 +91,6 @@ impl Presentation {
|
||||
transcript.verify_with_provider(
|
||||
&provider.hash,
|
||||
&attestation.body.connection_info().transcript_length,
|
||||
attestation
|
||||
.body
|
||||
.encoder_secret
|
||||
.as_ref()
|
||||
.map(|field| &field.data),
|
||||
attestation.body.transcript_commitments(),
|
||||
)
|
||||
})
|
||||
|
||||
@@ -20,10 +20,7 @@ use serde::{Deserialize, Serialize};
|
||||
|
||||
use tlsn_core::hash::HashAlgId;
|
||||
|
||||
use crate::{
|
||||
Attestation, CryptoProvider, Extension, connection::ServerCertCommitment,
|
||||
serialize::CanonicalSerialize, signing::SignatureAlgId,
|
||||
};
|
||||
use crate::{Attestation, Extension, connection::ServerCertCommitment, signing::SignatureAlgId};
|
||||
|
||||
pub use builder::{RequestBuilder, RequestBuilderError};
|
||||
pub use config::{RequestConfig, RequestConfigBuilder, RequestConfigBuilderError};
|
||||
@@ -44,102 +41,44 @@ impl Request {
|
||||
}
|
||||
|
||||
/// Validates the content of the attestation against this request.
|
||||
pub fn validate(
|
||||
&self,
|
||||
attestation: &Attestation,
|
||||
provider: &CryptoProvider,
|
||||
) -> Result<(), AttestationValidationError> {
|
||||
pub fn validate(&self, attestation: &Attestation) -> Result<(), InconsistentAttestation> {
|
||||
if attestation.signature.alg != self.signature_alg {
|
||||
return Err(AttestationValidationError::inconsistent(format!(
|
||||
return Err(InconsistentAttestation(format!(
|
||||
"signature algorithm: expected {:?}, got {:?}",
|
||||
self.signature_alg, attestation.signature.alg
|
||||
)));
|
||||
}
|
||||
|
||||
if attestation.header.root.alg != self.hash_alg {
|
||||
return Err(AttestationValidationError::inconsistent(format!(
|
||||
return Err(InconsistentAttestation(format!(
|
||||
"hash algorithm: expected {:?}, got {:?}",
|
||||
self.hash_alg, attestation.header.root.alg
|
||||
)));
|
||||
}
|
||||
|
||||
if attestation.body.cert_commitment() != &self.server_cert_commitment {
|
||||
return Err(AttestationValidationError::inconsistent(
|
||||
"server certificate commitment does not match",
|
||||
return Err(InconsistentAttestation(
|
||||
"server certificate commitment does not match".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
// TODO: improve the O(M*N) complexity of this check.
|
||||
for extension in &self.extensions {
|
||||
if !attestation.body.extensions().any(|e| e == extension) {
|
||||
return Err(AttestationValidationError::inconsistent(
|
||||
"extension is missing from the attestation",
|
||||
return Err(InconsistentAttestation(
|
||||
"extension is missing from the attestation".to_string(),
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
let verifier = provider
|
||||
.signature
|
||||
.get(&attestation.signature.alg)
|
||||
.map_err(|_| {
|
||||
AttestationValidationError::provider(format!(
|
||||
"provider not configured for signature algorithm id {:?}",
|
||||
attestation.signature.alg,
|
||||
))
|
||||
})?;
|
||||
|
||||
verifier
|
||||
.verify(
|
||||
&attestation.body.verifying_key.data,
|
||||
&CanonicalSerialize::serialize(&attestation.header),
|
||||
&attestation.signature.data,
|
||||
)
|
||||
.map_err(|_| {
|
||||
AttestationValidationError::inconsistent("failed to verify the signature")
|
||||
})?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Error for [`Request::validate`].
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
#[error("attestation validation error: {kind}: {message}")]
|
||||
pub struct AttestationValidationError {
|
||||
kind: ErrorKind,
|
||||
message: String,
|
||||
}
|
||||
|
||||
impl AttestationValidationError {
|
||||
fn inconsistent(msg: impl Into<String>) -> Self {
|
||||
Self {
|
||||
kind: ErrorKind::Inconsistent,
|
||||
message: msg.into(),
|
||||
}
|
||||
}
|
||||
|
||||
fn provider(msg: impl Into<String>) -> Self {
|
||||
Self {
|
||||
kind: ErrorKind::Provider,
|
||||
message: msg.into(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
enum ErrorKind {
|
||||
Inconsistent,
|
||||
Provider,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for ErrorKind {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
ErrorKind::Inconsistent => write!(f, "inconsistent"),
|
||||
ErrorKind::Provider => write!(f, "provider"),
|
||||
}
|
||||
}
|
||||
}
|
||||
#[error("inconsistent attestation: {0}")]
|
||||
pub struct InconsistentAttestation(String);
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
@@ -154,8 +93,7 @@ mod test {
|
||||
use crate::{
|
||||
CryptoProvider,
|
||||
connection::ServerCertOpening,
|
||||
fixtures::{RequestFixture, attestation_fixture, custom_provider_fixture, request_fixture},
|
||||
request::{AttestationValidationError, ErrorKind},
|
||||
fixtures::{RequestFixture, attestation_fixture, request_fixture},
|
||||
signing::SignatureAlgId,
|
||||
};
|
||||
|
||||
@@ -175,9 +113,7 @@ mod test {
|
||||
let attestation =
|
||||
attestation_fixture(request.clone(), connection, SignatureAlgId::SECP256K1, &[]);
|
||||
|
||||
let provider = CryptoProvider::default();
|
||||
|
||||
assert!(request.validate(&attestation, &provider).is_ok())
|
||||
assert!(request.validate(&attestation).is_ok())
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -198,9 +134,7 @@ mod test {
|
||||
|
||||
request.signature_alg = SignatureAlgId::SECP256R1;
|
||||
|
||||
let provider = CryptoProvider::default();
|
||||
|
||||
let res = request.validate(&attestation, &provider);
|
||||
let res = request.validate(&attestation);
|
||||
assert!(res.is_err());
|
||||
}
|
||||
|
||||
@@ -222,9 +156,7 @@ mod test {
|
||||
|
||||
request.hash_alg = HashAlgId::SHA256;
|
||||
|
||||
let provider = CryptoProvider::default();
|
||||
|
||||
let res = request.validate(&attestation, &provider);
|
||||
let res = request.validate(&attestation);
|
||||
assert!(res.is_err())
|
||||
}
|
||||
|
||||
@@ -252,62 +184,11 @@ mod test {
|
||||
});
|
||||
let opening = ServerCertOpening::new(server_cert_data);
|
||||
|
||||
let provider = CryptoProvider::default();
|
||||
let crypto_provider = CryptoProvider::default();
|
||||
request.server_cert_commitment =
|
||||
opening.commit(provider.hash.get(&HashAlgId::BLAKE3).unwrap());
|
||||
opening.commit(crypto_provider.hash.get(&HashAlgId::BLAKE3).unwrap());
|
||||
|
||||
let res = request.validate(&attestation, &provider);
|
||||
let res = request.validate(&attestation);
|
||||
assert!(res.is_err())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_wrong_sig() {
|
||||
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
|
||||
let connection = ConnectionFixture::tlsnotary(transcript.length());
|
||||
|
||||
let RequestFixture { request, .. } = request_fixture(
|
||||
transcript,
|
||||
encoding_provider(GET_WITH_HEADER, OK_JSON),
|
||||
connection.clone(),
|
||||
Blake3::default(),
|
||||
Vec::new(),
|
||||
);
|
||||
|
||||
let mut attestation =
|
||||
attestation_fixture(request.clone(), connection, SignatureAlgId::SECP256K1, &[]);
|
||||
|
||||
// Corrupt the signature.
|
||||
attestation.signature.data[1] = attestation.signature.data[1].wrapping_add(1);
|
||||
|
||||
let provider = CryptoProvider::default();
|
||||
|
||||
assert!(request.validate(&attestation, &provider).is_err())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_wrong_provider() {
|
||||
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
|
||||
let connection = ConnectionFixture::tlsnotary(transcript.length());
|
||||
|
||||
let RequestFixture { request, .. } = request_fixture(
|
||||
transcript,
|
||||
encoding_provider(GET_WITH_HEADER, OK_JSON),
|
||||
connection.clone(),
|
||||
Blake3::default(),
|
||||
Vec::new(),
|
||||
);
|
||||
|
||||
let attestation =
|
||||
attestation_fixture(request.clone(), connection, SignatureAlgId::SECP256K1, &[]);
|
||||
|
||||
let provider = custom_provider_fixture();
|
||||
|
||||
assert!(matches!(
|
||||
request.validate(&attestation, &provider),
|
||||
Err(AttestationValidationError {
|
||||
kind: ErrorKind::Provider,
|
||||
..
|
||||
})
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -49,6 +49,5 @@ impl_domain_separator!(tlsn_core::connection::ConnectionInfo);
|
||||
impl_domain_separator!(tlsn_core::connection::CertBinding);
|
||||
impl_domain_separator!(tlsn_core::transcript::TranscriptCommitment);
|
||||
impl_domain_separator!(tlsn_core::transcript::TranscriptSecret);
|
||||
impl_domain_separator!(tlsn_core::transcript::encoding::EncoderSecret);
|
||||
impl_domain_separator!(tlsn_core::transcript::encoding::EncodingCommitment);
|
||||
impl_domain_separator!(tlsn_core::transcript::hash::PlaintextHash);
|
||||
|
||||
@@ -202,14 +202,6 @@ impl SignatureVerifierProvider {
|
||||
.map(|s| &**s)
|
||||
.ok_or(UnknownSignatureAlgId(*alg))
|
||||
}
|
||||
|
||||
/// Returns am empty provider.
|
||||
#[cfg(any(test, feature = "fixtures"))]
|
||||
pub fn empty() -> Self {
|
||||
Self {
|
||||
verifiers: HashMap::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Signature verifier.
|
||||
@@ -237,14 +229,6 @@ impl_domain_separator!(VerifyingKey);
|
||||
#[error("signature verification failed: {0}")]
|
||||
pub struct SignatureError(String);
|
||||
|
||||
impl SignatureError {
|
||||
/// Creates a new error with the given message.
|
||||
#[allow(clippy::should_implement_trait)]
|
||||
pub fn from_str(msg: &str) -> Self {
|
||||
Self(msg.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
/// A signature.
|
||||
#[derive(Debug, Clone, Deserialize, Serialize)]
|
||||
pub struct Signature {
|
||||
|
||||
@@ -64,6 +64,7 @@ fn test_api() {
|
||||
|
||||
let encoding_commitment = EncodingCommitment {
|
||||
root: encoding_tree.root(),
|
||||
secret: encoder_secret(),
|
||||
};
|
||||
|
||||
let request_config = RequestConfig::default();
|
||||
@@ -95,13 +96,12 @@ fn test_api() {
|
||||
.connection_info(connection_info.clone())
|
||||
// Server key Notary received during handshake
|
||||
.server_ephemeral_key(server_ephemeral_key)
|
||||
.encoder_secret(encoder_secret())
|
||||
.transcript_commitments(vec![TranscriptCommitment::Encoding(encoding_commitment)]);
|
||||
|
||||
let attestation = attestation_builder.build(&provider).unwrap();
|
||||
|
||||
// Prover validates the attestation is consistent with its request.
|
||||
request.validate(&attestation, &provider).unwrap();
|
||||
request.validate(&attestation).unwrap();
|
||||
|
||||
let mut transcript_proof_builder = secrets.transcript_proof_builder();
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@ description = "This crate provides implementations of ciphers for two parties"
|
||||
keywords = ["tls", "mpc", "2pc", "aes"]
|
||||
categories = ["cryptography"]
|
||||
license = "MIT OR Apache-2.0"
|
||||
version = "0.1.0-alpha.14-pre"
|
||||
version = "0.1.0-alpha.13-pre"
|
||||
edition = "2021"
|
||||
|
||||
[lints]
|
||||
@@ -15,7 +15,7 @@ workspace = true
|
||||
name = "cipher"
|
||||
|
||||
[dependencies]
|
||||
mpz-circuits = { workspace = true, features = ["aes"] }
|
||||
mpz-circuits = { workspace = true }
|
||||
mpz-vm-core = { workspace = true }
|
||||
mpz-memory-core = { workspace = true }
|
||||
|
||||
@@ -24,9 +24,11 @@ thiserror = { workspace = true }
|
||||
aes = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
mpz-common = { workspace = true, features = ["test-utils"] }
|
||||
mpz-ideal-vm = { workspace = true }
|
||||
mpz-garble = { workspace = true }
|
||||
mpz-common = { workspace = true }
|
||||
mpz-ot = { workspace = true }
|
||||
|
||||
tokio = { version = "1", features = ["macros", "rt", "rt-multi-thread"] }
|
||||
rand = { workspace = true }
|
||||
ctr = { workspace = true }
|
||||
cipher = { workspace = true }
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
use crate::{Cipher, CtrBlock, Keystream};
|
||||
use async_trait::async_trait;
|
||||
use mpz_circuits::{AES128_KS, AES128_POST_KS};
|
||||
use mpz_circuits::circuits::AES128;
|
||||
use mpz_memory_core::binary::{Binary, U8};
|
||||
use mpz_vm_core::{prelude::*, Call, Vm};
|
||||
use std::fmt::Debug;
|
||||
@@ -12,35 +12,13 @@ mod error;
|
||||
pub use error::AesError;
|
||||
use error::ErrorKind;
|
||||
|
||||
/// AES key schedule: 11 round keys, 16 bytes each.
|
||||
type KeySchedule = Array<U8, 176>;
|
||||
|
||||
/// Computes AES-128.
|
||||
#[derive(Default, Debug)]
|
||||
pub struct Aes128 {
|
||||
key: Option<Array<U8, 16>>,
|
||||
key_schedule: Option<KeySchedule>,
|
||||
iv: Option<Array<U8, 4>>,
|
||||
}
|
||||
|
||||
impl Aes128 {
|
||||
// Allocates key schedule.
|
||||
//
|
||||
// Expects the key to be already set.
|
||||
fn alloc_key_schedule(&self, vm: &mut dyn Vm<Binary>) -> Result<KeySchedule, AesError> {
|
||||
let ks: KeySchedule = vm
|
||||
.call(
|
||||
Call::builder(AES128_KS.clone())
|
||||
.arg(self.key.expect("key is set"))
|
||||
.build()
|
||||
.expect("call should be valid"),
|
||||
)
|
||||
.map_err(|err| AesError::new(ErrorKind::Vm, err))?;
|
||||
|
||||
Ok(ks)
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Cipher for Aes128 {
|
||||
type Error = AesError;
|
||||
@@ -67,22 +45,18 @@ impl Cipher for Aes128 {
|
||||
}
|
||||
|
||||
fn alloc_block(
|
||||
&mut self,
|
||||
&self,
|
||||
vm: &mut dyn Vm<Binary>,
|
||||
input: Array<U8, 16>,
|
||||
) -> Result<Self::Block, Self::Error> {
|
||||
self.key
|
||||
let key = self
|
||||
.key
|
||||
.ok_or_else(|| AesError::new(ErrorKind::Key, "key not set"))?;
|
||||
|
||||
if self.key_schedule.is_none() {
|
||||
self.key_schedule = Some(self.alloc_key_schedule(vm)?);
|
||||
}
|
||||
let ks = *self.key_schedule.as_ref().expect("key schedule was set");
|
||||
|
||||
let output = vm
|
||||
.call(
|
||||
Call::builder(AES128_POST_KS.clone())
|
||||
.arg(ks)
|
||||
Call::builder(AES128.clone())
|
||||
.arg(key)
|
||||
.arg(input)
|
||||
.build()
|
||||
.expect("call should be valid"),
|
||||
@@ -93,10 +67,11 @@ impl Cipher for Aes128 {
|
||||
}
|
||||
|
||||
fn alloc_ctr_block(
|
||||
&mut self,
|
||||
&self,
|
||||
vm: &mut dyn Vm<Binary>,
|
||||
) -> Result<CtrBlock<Self::Nonce, Self::Counter, Self::Block>, Self::Error> {
|
||||
self.key
|
||||
let key = self
|
||||
.key
|
||||
.ok_or_else(|| AesError::new(ErrorKind::Key, "key not set"))?;
|
||||
let iv = self
|
||||
.iv
|
||||
@@ -114,15 +89,10 @@ impl Cipher for Aes128 {
|
||||
vm.mark_public(counter)
|
||||
.map_err(|err| AesError::new(ErrorKind::Vm, err))?;
|
||||
|
||||
if self.key_schedule.is_none() {
|
||||
self.key_schedule = Some(self.alloc_key_schedule(vm)?);
|
||||
}
|
||||
let ks = *self.key_schedule.as_ref().expect("key schedule was set");
|
||||
|
||||
let output = vm
|
||||
.call(
|
||||
Call::builder(AES128_POST_KS.clone())
|
||||
.arg(ks)
|
||||
Call::builder(AES128.clone())
|
||||
.arg(key)
|
||||
.arg(iv)
|
||||
.arg(explicit_nonce)
|
||||
.arg(counter)
|
||||
@@ -139,11 +109,12 @@ impl Cipher for Aes128 {
|
||||
}
|
||||
|
||||
fn alloc_keystream(
|
||||
&mut self,
|
||||
&self,
|
||||
vm: &mut dyn Vm<Binary>,
|
||||
len: usize,
|
||||
) -> Result<Keystream<Self::Nonce, Self::Counter, Self::Block>, Self::Error> {
|
||||
self.key
|
||||
let key = self
|
||||
.key
|
||||
.ok_or_else(|| AesError::new(ErrorKind::Key, "key not set"))?;
|
||||
let iv = self
|
||||
.iv
|
||||
@@ -172,15 +143,10 @@ impl Cipher for Aes128 {
|
||||
let blocks = inputs
|
||||
.into_iter()
|
||||
.map(|(explicit_nonce, counter)| {
|
||||
if self.key_schedule.is_none() {
|
||||
self.key_schedule = Some(self.alloc_key_schedule(vm)?);
|
||||
}
|
||||
let ks = *self.key_schedule.as_ref().expect("key schedule was set");
|
||||
|
||||
let output = vm
|
||||
.call(
|
||||
Call::builder(AES128_POST_KS.clone())
|
||||
.arg(ks)
|
||||
Call::builder(AES128.clone())
|
||||
.arg(key)
|
||||
.arg(iv)
|
||||
.arg(explicit_nonce)
|
||||
.arg(counter)
|
||||
@@ -206,12 +172,15 @@ mod tests {
|
||||
use super::*;
|
||||
use crate::Cipher;
|
||||
use mpz_common::context::test_st_context;
|
||||
use mpz_ideal_vm::IdealVm;
|
||||
use mpz_garble::protocol::semihonest::{Evaluator, Garbler};
|
||||
use mpz_memory_core::{
|
||||
binary::{Binary, U8},
|
||||
correlated::Delta,
|
||||
Array, MemoryExt, Vector, ViewExt,
|
||||
};
|
||||
use mpz_ot::ideal::cot::ideal_cot;
|
||||
use mpz_vm_core::{Execute, Vm};
|
||||
use rand::{rngs::StdRng, SeedableRng};
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_aes_ctr() {
|
||||
@@ -221,11 +190,10 @@ mod tests {
|
||||
let start_counter = 3u32;
|
||||
|
||||
let (mut ctx_a, mut ctx_b) = test_st_context(8);
|
||||
let mut gen = IdealVm::new();
|
||||
let mut ev = IdealVm::new();
|
||||
let (mut gen, mut ev) = mock_vm();
|
||||
|
||||
let mut aes_gen = setup_ctr(key, iv, &mut gen);
|
||||
let mut aes_ev = setup_ctr(key, iv, &mut ev);
|
||||
let aes_gen = setup_ctr(key, iv, &mut gen);
|
||||
let aes_ev = setup_ctr(key, iv, &mut ev);
|
||||
|
||||
let msg = vec![42u8; 128];
|
||||
|
||||
@@ -284,11 +252,10 @@ mod tests {
|
||||
let input = [5_u8; 16];
|
||||
|
||||
let (mut ctx_a, mut ctx_b) = test_st_context(8);
|
||||
let mut gen = IdealVm::new();
|
||||
let mut ev = IdealVm::new();
|
||||
let (mut gen, mut ev) = mock_vm();
|
||||
|
||||
let mut aes_gen = setup_block(key, &mut gen);
|
||||
let mut aes_ev = setup_block(key, &mut ev);
|
||||
let aes_gen = setup_block(key, &mut gen);
|
||||
let aes_ev = setup_block(key, &mut ev);
|
||||
|
||||
let block_ref_gen: Array<U8, 16> = gen.alloc().unwrap();
|
||||
gen.mark_public(block_ref_gen).unwrap();
|
||||
@@ -327,6 +294,18 @@ mod tests {
|
||||
assert_eq!(ciphertext_gen, expected);
|
||||
}
|
||||
|
||||
fn mock_vm() -> (impl Vm<Binary>, impl Vm<Binary>) {
|
||||
let mut rng = StdRng::seed_from_u64(0);
|
||||
let delta = Delta::random(&mut rng);
|
||||
|
||||
let (cot_send, cot_recv) = ideal_cot(delta.into_inner());
|
||||
|
||||
let gen = Garbler::new(cot_send, [0u8; 16], delta);
|
||||
let ev = Evaluator::new(cot_recv);
|
||||
|
||||
(gen, ev)
|
||||
}
|
||||
|
||||
fn setup_ctr(key: [u8; 16], iv: [u8; 4], vm: &mut dyn Vm<Binary>) -> Aes128 {
|
||||
let key_ref: Array<U8, 16> = vm.alloc().unwrap();
|
||||
vm.mark_public(key_ref).unwrap();
|
||||
|
||||
@@ -55,7 +55,7 @@ pub trait Cipher {
|
||||
|
||||
/// Allocates a single block in ECB mode.
|
||||
fn alloc_block(
|
||||
&mut self,
|
||||
&self,
|
||||
vm: &mut dyn Vm<Binary>,
|
||||
input: Self::Block,
|
||||
) -> Result<Self::Block, Self::Error>;
|
||||
@@ -63,7 +63,7 @@ pub trait Cipher {
|
||||
/// Allocates a single block in counter mode.
|
||||
#[allow(clippy::type_complexity)]
|
||||
fn alloc_ctr_block(
|
||||
&mut self,
|
||||
&self,
|
||||
vm: &mut dyn Vm<Binary>,
|
||||
) -> Result<CtrBlock<Self::Nonce, Self::Counter, Self::Block>, Self::Error>;
|
||||
|
||||
@@ -75,7 +75,7 @@ pub trait Cipher {
|
||||
/// * `len` - Length of the stream in bytes.
|
||||
#[allow(clippy::type_complexity)]
|
||||
fn alloc_keystream(
|
||||
&mut self,
|
||||
&self,
|
||||
vm: &mut dyn Vm<Binary>,
|
||||
len: usize,
|
||||
) -> Result<Keystream<Self::Nonce, Self::Counter, Self::Block>, Self::Error>;
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "tlsn-deap"
|
||||
version = "0.1.0-alpha.14-pre"
|
||||
version = "0.1.0-alpha.13-pre"
|
||||
edition = "2021"
|
||||
|
||||
[lints]
|
||||
@@ -19,8 +19,11 @@ futures = { workspace = true }
|
||||
tokio = { workspace = true, features = ["sync"] }
|
||||
|
||||
[dev-dependencies]
|
||||
mpz-circuits = { workspace = true, features = ["aes"] }
|
||||
mpz-common = { workspace = true, features = ["test-utils"] }
|
||||
mpz-ideal-vm = { workspace = true }
|
||||
mpz-circuits = { workspace = true }
|
||||
mpz-garble = { workspace = true }
|
||||
mpz-ot = { workspace = true }
|
||||
mpz-zk = { workspace = true }
|
||||
|
||||
tokio = { workspace = true, features = ["macros", "rt", "rt-multi-thread"] }
|
||||
rand = { workspace = true }
|
||||
rand06-compat = { workspace = true }
|
||||
|
||||
@@ -382,27 +382,37 @@ enum ErrorRepr {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use mpz_circuits::AES128;
|
||||
use mpz_circuits::circuits::AES128;
|
||||
use mpz_common::context::test_st_context;
|
||||
use mpz_ideal_vm::IdealVm;
|
||||
use mpz_core::Block;
|
||||
use mpz_garble::protocol::semihonest::{Evaluator, Garbler};
|
||||
use mpz_ot::ideal::{cot::ideal_cot, rcot::ideal_rcot};
|
||||
use mpz_vm_core::{
|
||||
memory::{binary::U8, Array},
|
||||
memory::{binary::U8, correlated::Delta, Array},
|
||||
prelude::*,
|
||||
};
|
||||
use mpz_zk::{Prover, ProverConfig, Verifier, VerifierConfig};
|
||||
use rand::{rngs::StdRng, SeedableRng};
|
||||
|
||||
use super::*;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_deap() {
|
||||
let mut rng = StdRng::seed_from_u64(0);
|
||||
let delta_mpc = Delta::random(&mut rng);
|
||||
let delta_zk = Delta::random(&mut rng);
|
||||
|
||||
let (mut ctx_a, mut ctx_b) = test_st_context(8);
|
||||
let (rcot_send, rcot_recv) = ideal_rcot(Block::ZERO, delta_zk.into_inner());
|
||||
let (cot_send, cot_recv) = ideal_cot(delta_mpc.into_inner());
|
||||
|
||||
let leader_mpc = IdealVm::new();
|
||||
let leader_zk = IdealVm::new();
|
||||
let follower_mpc = IdealVm::new();
|
||||
let follower_zk = IdealVm::new();
|
||||
let gb = Garbler::new(cot_send, [0u8; 16], delta_mpc);
|
||||
let ev = Evaluator::new(cot_recv);
|
||||
let prover = Prover::new(ProverConfig::default(), rcot_recv);
|
||||
let verifier = Verifier::new(VerifierConfig::default(), delta_zk, rcot_send);
|
||||
|
||||
let mut leader = Deap::new(Role::Leader, leader_mpc, leader_zk);
|
||||
let mut follower = Deap::new(Role::Follower, follower_mpc, follower_zk);
|
||||
let mut leader = Deap::new(Role::Leader, gb, prover);
|
||||
let mut follower = Deap::new(Role::Follower, ev, verifier);
|
||||
|
||||
let (ct_leader, ct_follower) = futures::join!(
|
||||
async {
|
||||
@@ -468,15 +478,21 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_deap_desync_memory() {
|
||||
let mut rng = StdRng::seed_from_u64(0);
|
||||
let delta_mpc = Delta::random(&mut rng);
|
||||
let delta_zk = Delta::random(&mut rng);
|
||||
|
||||
let (mut ctx_a, mut ctx_b) = test_st_context(8);
|
||||
let (rcot_send, rcot_recv) = ideal_rcot(Block::ZERO, delta_zk.into_inner());
|
||||
let (cot_send, cot_recv) = ideal_cot(delta_mpc.into_inner());
|
||||
|
||||
let leader_mpc = IdealVm::new();
|
||||
let leader_zk = IdealVm::new();
|
||||
let follower_mpc = IdealVm::new();
|
||||
let follower_zk = IdealVm::new();
|
||||
let gb = Garbler::new(cot_send, [0u8; 16], delta_mpc);
|
||||
let ev = Evaluator::new(cot_recv);
|
||||
let prover = Prover::new(ProverConfig::default(), rcot_recv);
|
||||
let verifier = Verifier::new(VerifierConfig::default(), delta_zk, rcot_send);
|
||||
|
||||
let mut leader = Deap::new(Role::Leader, leader_mpc, leader_zk);
|
||||
let mut follower = Deap::new(Role::Follower, follower_mpc, follower_zk);
|
||||
let mut leader = Deap::new(Role::Leader, gb, prover);
|
||||
let mut follower = Deap::new(Role::Follower, ev, verifier);
|
||||
|
||||
// Desynchronize the memories.
|
||||
let _ = leader.zk().alloc_raw(1).unwrap();
|
||||
@@ -548,15 +564,21 @@ mod tests {
|
||||
// detection by the follower.
|
||||
#[tokio::test]
|
||||
async fn test_malicious() {
|
||||
let mut rng = StdRng::seed_from_u64(0);
|
||||
let delta_mpc = Delta::random(&mut rng);
|
||||
let delta_zk = Delta::random(&mut rng);
|
||||
|
||||
let (mut ctx_a, mut ctx_b) = test_st_context(8);
|
||||
let (rcot_send, rcot_recv) = ideal_rcot(Block::ZERO, delta_zk.into_inner());
|
||||
let (cot_send, cot_recv) = ideal_cot(delta_mpc.into_inner());
|
||||
|
||||
let leader_mpc = IdealVm::new();
|
||||
let leader_zk = IdealVm::new();
|
||||
let follower_mpc = IdealVm::new();
|
||||
let follower_zk = IdealVm::new();
|
||||
let gb = Garbler::new(cot_send, [1u8; 16], delta_mpc);
|
||||
let ev = Evaluator::new(cot_recv);
|
||||
let prover = Prover::new(ProverConfig::default(), rcot_recv);
|
||||
let verifier = Verifier::new(VerifierConfig::default(), delta_zk, rcot_send);
|
||||
|
||||
let mut leader = Deap::new(Role::Leader, leader_mpc, leader_zk);
|
||||
let mut follower = Deap::new(Role::Follower, follower_mpc, follower_zk);
|
||||
let mut leader = Deap::new(Role::Leader, gb, prover);
|
||||
let mut follower = Deap::new(Role::Follower, ev, verifier);
|
||||
|
||||
let (_, follower_res) = futures::join!(
|
||||
async {
|
||||
|
||||
@@ -5,7 +5,7 @@ description = "A 2PC implementation of TLS HMAC-SHA256 PRF"
|
||||
keywords = ["tls", "mpc", "2pc", "hmac", "sha256"]
|
||||
categories = ["cryptography"]
|
||||
license = "MIT OR Apache-2.0"
|
||||
version = "0.1.0-alpha.14-pre"
|
||||
version = "0.1.0-alpha.13-pre"
|
||||
edition = "2021"
|
||||
|
||||
[lints]
|
||||
@@ -20,13 +20,14 @@ mpz-core = { workspace = true }
|
||||
mpz-circuits = { workspace = true }
|
||||
mpz-hash = { workspace = true }
|
||||
|
||||
sha2 = { workspace = true, features = ["compress"] }
|
||||
thiserror = { workspace = true }
|
||||
tracing = { workspace = true }
|
||||
sha2 = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
mpz-ot = { workspace = true, features = ["ideal"] }
|
||||
mpz-garble = { workspace = true }
|
||||
mpz-common = { workspace = true, features = ["test-utils"] }
|
||||
mpz-ideal-vm = { workspace = true }
|
||||
|
||||
criterion = { workspace = true, features = ["async_tokio"] }
|
||||
tokio = { workspace = true, features = ["macros", "rt", "rt-multi-thread"] }
|
||||
|
||||
@@ -4,12 +4,14 @@ use criterion::{criterion_group, criterion_main, Criterion};
|
||||
|
||||
use hmac_sha256::{Mode, MpcPrf};
|
||||
use mpz_common::context::test_mt_context;
|
||||
use mpz_ideal_vm::IdealVm;
|
||||
use mpz_garble::protocol::semihonest::{Evaluator, Garbler};
|
||||
use mpz_ot::ideal::cot::ideal_cot;
|
||||
use mpz_vm_core::{
|
||||
memory::{binary::U8, Array},
|
||||
memory::{binary::U8, correlated::Delta, Array},
|
||||
prelude::*,
|
||||
Execute,
|
||||
};
|
||||
use rand::{rngs::StdRng, SeedableRng};
|
||||
|
||||
#[allow(clippy::unit_arg)]
|
||||
fn criterion_benchmark(c: &mut Criterion) {
|
||||
@@ -27,6 +29,8 @@ criterion_group!(benches, criterion_benchmark);
|
||||
criterion_main!(benches);
|
||||
|
||||
async fn prf(mode: Mode) {
|
||||
let mut rng = StdRng::seed_from_u64(0);
|
||||
|
||||
let pms = [42u8; 32];
|
||||
let client_random = [69u8; 32];
|
||||
let server_random: [u8; 32] = [96u8; 32];
|
||||
@@ -35,8 +39,11 @@ async fn prf(mode: Mode) {
|
||||
let mut leader_ctx = leader_exec.new_context().await.unwrap();
|
||||
let mut follower_ctx = follower_exec.new_context().await.unwrap();
|
||||
|
||||
let mut leader_vm = IdealVm::new();
|
||||
let mut follower_vm = IdealVm::new();
|
||||
let delta = Delta::random(&mut rng);
|
||||
let (ot_send, ot_recv) = ideal_cot(delta.into_inner());
|
||||
|
||||
let mut leader_vm = Garbler::new(ot_send, [0u8; 16], delta);
|
||||
let mut follower_vm = Evaluator::new(ot_recv);
|
||||
|
||||
let leader_pms: Array<U8, 32> = leader_vm.alloc().unwrap();
|
||||
leader_vm.mark_public(leader_pms).unwrap();
|
||||
|
||||
@@ -54,11 +54,10 @@ mod tests {
|
||||
use crate::{
|
||||
hmac::hmac_sha256,
|
||||
sha256, state_to_bytes,
|
||||
test_utils::{compute_inner_local, compute_outer_partial},
|
||||
test_utils::{compute_inner_local, compute_outer_partial, mock_vm},
|
||||
};
|
||||
use mpz_common::context::test_st_context;
|
||||
use mpz_hash::sha256::Sha256;
|
||||
use mpz_ideal_vm::IdealVm;
|
||||
use mpz_vm_core::{
|
||||
memory::{
|
||||
binary::{U32, U8},
|
||||
@@ -84,8 +83,7 @@ mod tests {
|
||||
#[tokio::test]
|
||||
async fn test_hmac_circuit() {
|
||||
let (mut ctx_a, mut ctx_b) = test_st_context(8);
|
||||
let mut leader = IdealVm::new();
|
||||
let mut follower = IdealVm::new();
|
||||
let (mut leader, mut follower) = mock_vm();
|
||||
|
||||
let (inputs, references) = test_fixtures();
|
||||
for (input, &reference) in inputs.iter().zip(references.iter()) {
|
||||
|
||||
@@ -72,11 +72,10 @@ fn state_to_bytes(input: [u32; 8]) -> [u8; 32] {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::{
|
||||
test_utils::{prf_cf_vd, prf_keys, prf_ms, prf_sf_vd},
|
||||
test_utils::{mock_vm, prf_cf_vd, prf_keys, prf_ms, prf_sf_vd},
|
||||
Mode, MpcPrf, SessionKeys,
|
||||
};
|
||||
use mpz_common::context::test_st_context;
|
||||
use mpz_ideal_vm::IdealVm;
|
||||
use mpz_vm_core::{
|
||||
memory::{binary::U8, Array, MemoryExt, ViewExt},
|
||||
Execute,
|
||||
@@ -124,8 +123,7 @@ mod tests {
|
||||
|
||||
// Set up vm and prf
|
||||
let (mut ctx_a, mut ctx_b) = test_st_context(128);
|
||||
let mut leader = IdealVm::new();
|
||||
let mut follower = IdealVm::new();
|
||||
let (mut leader, mut follower) = mock_vm();
|
||||
|
||||
let leader_pms: Array<U8, 32> = leader.alloc().unwrap();
|
||||
leader.mark_public(leader_pms).unwrap();
|
||||
|
||||
@@ -339,9 +339,8 @@ fn gen_merge_circ(size: usize) -> Arc<Circuit> {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::prf::merge_outputs;
|
||||
use crate::{prf::merge_outputs, test_utils::mock_vm};
|
||||
use mpz_common::context::test_st_context;
|
||||
use mpz_ideal_vm::IdealVm;
|
||||
use mpz_vm_core::{
|
||||
memory::{binary::U8, Array, MemoryExt, ViewExt},
|
||||
Execute,
|
||||
@@ -350,8 +349,7 @@ mod tests {
|
||||
#[tokio::test]
|
||||
async fn test_merge_outputs() {
|
||||
let (mut ctx_a, mut ctx_b) = test_st_context(8);
|
||||
let mut leader = IdealVm::new();
|
||||
let mut follower = IdealVm::new();
|
||||
let (mut leader, mut follower) = mock_vm();
|
||||
|
||||
let input1: [u8; 32] = std::array::from_fn(|i| i as u8);
|
||||
let input2: [u8; 32] = std::array::from_fn(|i| i as u8 + 32);
|
||||
|
||||
@@ -137,11 +137,10 @@ impl Prf {
|
||||
mod tests {
|
||||
use crate::{
|
||||
prf::{compute_partial, function::Prf},
|
||||
test_utils::phash,
|
||||
test_utils::{mock_vm, phash},
|
||||
Mode,
|
||||
};
|
||||
use mpz_common::context::test_st_context;
|
||||
use mpz_ideal_vm::IdealVm;
|
||||
use mpz_vm_core::{
|
||||
memory::{binary::U8, Array, MemoryExt, ViewExt},
|
||||
Execute,
|
||||
@@ -167,8 +166,7 @@ mod tests {
|
||||
let mut rng = ThreadRng::default();
|
||||
|
||||
let (mut ctx_a, mut ctx_b) = test_st_context(8);
|
||||
let mut leader = IdealVm::new();
|
||||
let mut follower = IdealVm::new();
|
||||
let (mut leader, mut follower) = mock_vm();
|
||||
|
||||
let key: [u8; 32] = rng.random();
|
||||
let start_seed: Vec<u8> = vec![42; 64];
|
||||
|
||||
@@ -1,10 +1,25 @@
|
||||
use crate::{sha256, state_to_bytes};
|
||||
use mpz_garble::protocol::semihonest::{Evaluator, Garbler};
|
||||
use mpz_ot::ideal::cot::{ideal_cot, IdealCOTReceiver, IdealCOTSender};
|
||||
use mpz_vm_core::memory::correlated::Delta;
|
||||
use rand::{rngs::StdRng, Rng, SeedableRng};
|
||||
|
||||
pub(crate) const SHA256_IV: [u32; 8] = [
|
||||
0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a, 0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19,
|
||||
];
|
||||
|
||||
pub(crate) fn mock_vm() -> (Garbler<IdealCOTSender>, Evaluator<IdealCOTReceiver>) {
|
||||
let mut rng = StdRng::seed_from_u64(0);
|
||||
let delta = Delta::random(&mut rng);
|
||||
|
||||
let (cot_send, cot_recv) = ideal_cot(delta.into_inner());
|
||||
|
||||
let gen = Garbler::new(cot_send, [0u8; 16], delta);
|
||||
let ev = Evaluator::new(cot_recv);
|
||||
|
||||
(gen, ev)
|
||||
}
|
||||
|
||||
pub(crate) fn prf_ms(pms: [u8; 32], client_random: [u8; 32], server_random: [u8; 32]) -> [u8; 48] {
|
||||
let mut label_start_seed = b"master secret".to_vec();
|
||||
label_start_seed.extend_from_slice(&client_random);
|
||||
|
||||
@@ -5,7 +5,7 @@ description = "Implementation of the 3-party key-exchange protocol"
|
||||
keywords = ["tls", "mpc", "2pc", "pms", "key-exchange"]
|
||||
categories = ["cryptography"]
|
||||
license = "MIT OR Apache-2.0"
|
||||
version = "0.1.0-alpha.14-pre"
|
||||
version = "0.1.0-alpha.13-pre"
|
||||
edition = "2021"
|
||||
|
||||
[lints]
|
||||
@@ -40,7 +40,6 @@ tokio = { workspace = true, features = ["sync"] }
|
||||
[dev-dependencies]
|
||||
mpz-ot = { workspace = true, features = ["ideal"] }
|
||||
mpz-garble = { workspace = true }
|
||||
mpz-ideal-vm = { workspace = true }
|
||||
|
||||
rand_core = { workspace = true }
|
||||
tokio = { workspace = true, features = ["macros", "rt", "rt-multi-thread"] }
|
||||
|
||||
@@ -459,7 +459,9 @@ mod tests {
|
||||
use mpz_common::context::test_st_context;
|
||||
use mpz_core::Block;
|
||||
use mpz_fields::UniformRand;
|
||||
use mpz_ideal_vm::IdealVm;
|
||||
use mpz_garble::protocol::semihonest::{Evaluator, Garbler};
|
||||
use mpz_memory_core::correlated::Delta;
|
||||
use mpz_ot::ideal::cot::{ideal_cot, IdealCOTReceiver, IdealCOTSender};
|
||||
use mpz_share_conversion::ideal::{
|
||||
ideal_share_convert, IdealShareConvertReceiver, IdealShareConvertSender,
|
||||
};
|
||||
@@ -482,8 +484,7 @@ mod tests {
|
||||
async fn test_key_exchange() {
|
||||
let mut rng = StdRng::seed_from_u64(0).compat();
|
||||
let (mut ctx_a, mut ctx_b) = test_st_context(8);
|
||||
let mut gen = IdealVm::new();
|
||||
let mut ev = IdealVm::new();
|
||||
let (mut gen, mut ev) = mock_vm();
|
||||
|
||||
let leader_private_key = SecretKey::random(&mut rng);
|
||||
let follower_private_key = SecretKey::random(&mut rng);
|
||||
@@ -624,8 +625,7 @@ mod tests {
|
||||
async fn test_malicious_key_exchange(#[case] malicious: Malicious) {
|
||||
let mut rng = StdRng::seed_from_u64(0);
|
||||
let (mut ctx_a, mut ctx_b) = test_st_context(8);
|
||||
let mut gen = IdealVm::new();
|
||||
let mut ev = IdealVm::new();
|
||||
let (mut gen, mut ev) = mock_vm();
|
||||
|
||||
let leader_private_key = SecretKey::random(&mut rng.compat_by_ref());
|
||||
let follower_private_key = SecretKey::random(&mut rng.compat_by_ref());
|
||||
@@ -704,8 +704,7 @@ mod tests {
|
||||
#[tokio::test]
|
||||
async fn test_circuit() {
|
||||
let (mut ctx_a, mut ctx_b) = test_st_context(8);
|
||||
let gen = IdealVm::new();
|
||||
let ev = IdealVm::new();
|
||||
let (gen, ev) = mock_vm();
|
||||
|
||||
let share_a0_bytes = [5_u8; 32];
|
||||
let share_a1_bytes = [2_u8; 32];
|
||||
@@ -835,4 +834,16 @@ mod tests {
|
||||
|
||||
(leader, follower)
|
||||
}
|
||||
|
||||
fn mock_vm() -> (Garbler<IdealCOTSender>, Evaluator<IdealCOTReceiver>) {
|
||||
let mut rng = StdRng::seed_from_u64(0);
|
||||
let delta = Delta::random(&mut rng);
|
||||
|
||||
let (cot_send, cot_recv) = ideal_cot(delta.into_inner());
|
||||
|
||||
let gen = Garbler::new(cot_send, [0u8; 16], delta);
|
||||
let ev = Evaluator::new(cot_recv);
|
||||
|
||||
(gen, ev)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
//! with the server alone and forward all messages from and to the follower.
|
||||
//!
|
||||
//! A detailed description of this protocol can be found in our documentation
|
||||
//! <https://tlsnotary.org/docs/mpc/key_exchange>.
|
||||
//! <https://docs.tlsnotary.org/protocol/notarization/key_exchange.html>.
|
||||
|
||||
#![deny(missing_docs, unreachable_pub, unused_must_use)]
|
||||
#![deny(clippy::all)]
|
||||
|
||||
@@ -26,7 +26,8 @@ pub fn create_mock_key_exchange_pair() -> (MockKeyExchange, MockKeyExchange) {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use mpz_ideal_vm::IdealVm;
|
||||
use mpz_garble::protocol::semihonest::{Evaluator, Garbler};
|
||||
use mpz_ot::ideal::cot::{IdealCOTReceiver, IdealCOTSender};
|
||||
|
||||
use super::*;
|
||||
use crate::KeyExchange;
|
||||
@@ -39,12 +40,12 @@ mod tests {
|
||||
|
||||
is_key_exchange::<
|
||||
MpcKeyExchange<IdealShareConvertSender<P256>, IdealShareConvertReceiver<P256>>,
|
||||
IdealVm,
|
||||
Garbler<IdealCOTSender>,
|
||||
>(leader);
|
||||
|
||||
is_key_exchange::<
|
||||
MpcKeyExchange<IdealShareConvertSender<P256>, IdealShareConvertReceiver<P256>>,
|
||||
IdealVm,
|
||||
Evaluator<IdealCOTReceiver>,
|
||||
>(follower);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
//! protocol has semi-honest security.
|
||||
//!
|
||||
//! The protocol is described in
|
||||
//! <https://tlsnotary.org/docs/mpc/key_exchange>
|
||||
//! <https://docs.tlsnotary.org/protocol/notarization/key_exchange.html>
|
||||
|
||||
use crate::{KeyExchangeError, Role};
|
||||
use mpz_common::{Context, Flush};
|
||||
|
||||
@@ -5,7 +5,7 @@ description = "Core types for TLSNotary"
|
||||
keywords = ["tls", "mpc", "2pc", "types"]
|
||||
categories = ["cryptography"]
|
||||
license = "MIT OR Apache-2.0"
|
||||
version = "0.1.0-alpha.14-pre"
|
||||
version = "0.1.0-alpha.13-pre"
|
||||
edition = "2021"
|
||||
|
||||
[lints]
|
||||
@@ -13,7 +13,6 @@ workspace = true
|
||||
|
||||
[features]
|
||||
default = []
|
||||
mozilla-certs = ["dep:webpki-root-certs", "dep:webpki-roots"]
|
||||
fixtures = [
|
||||
"dep:hex",
|
||||
"dep:tlsn-data-fixtures",
|
||||
@@ -45,8 +44,7 @@ sha2 = { workspace = true }
|
||||
thiserror = { workspace = true }
|
||||
tiny-keccak = { workspace = true, features = ["keccak"] }
|
||||
web-time = { workspace = true }
|
||||
webpki-roots = { workspace = true, optional = true }
|
||||
webpki-root-certs = { workspace = true, optional = true }
|
||||
webpki-roots = { workspace = true }
|
||||
rustls-webpki = { workspace = true, features = ["ring"] }
|
||||
rustls-pki-types = { workspace = true }
|
||||
itybity = { workspace = true }
|
||||
|
||||
@@ -1,7 +0,0 @@
|
||||
//! Configuration types.
|
||||
|
||||
pub mod prove;
|
||||
pub mod prover;
|
||||
pub mod tls;
|
||||
pub mod tls_commit;
|
||||
pub mod verifier;
|
||||
@@ -1,189 +0,0 @@
|
||||
//! Proving configuration.
|
||||
|
||||
use rangeset::{RangeSet, ToRangeSet, UnionMut};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::transcript::{Direction, Transcript, TranscriptCommitConfig, TranscriptCommitRequest};
|
||||
|
||||
/// Configuration to prove information to the verifier.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ProveConfig {
|
||||
server_identity: bool,
|
||||
reveal: Option<(RangeSet<usize>, RangeSet<usize>)>,
|
||||
transcript_commit: Option<TranscriptCommitConfig>,
|
||||
}
|
||||
|
||||
impl ProveConfig {
|
||||
/// Creates a new builder.
|
||||
pub fn builder(transcript: &Transcript) -> ProveConfigBuilder<'_> {
|
||||
ProveConfigBuilder::new(transcript)
|
||||
}
|
||||
|
||||
/// Returns `true` if the server identity is to be proven.
|
||||
pub fn server_identity(&self) -> bool {
|
||||
self.server_identity
|
||||
}
|
||||
|
||||
/// Returns the sent and received ranges of the transcript to be revealed,
|
||||
/// respectively.
|
||||
pub fn reveal(&self) -> Option<&(RangeSet<usize>, RangeSet<usize>)> {
|
||||
self.reveal.as_ref()
|
||||
}
|
||||
|
||||
/// Returns the transcript commitment configuration.
|
||||
pub fn transcript_commit(&self) -> Option<&TranscriptCommitConfig> {
|
||||
self.transcript_commit.as_ref()
|
||||
}
|
||||
|
||||
/// Returns a request.
|
||||
pub fn to_request(&self) -> ProveRequest {
|
||||
ProveRequest {
|
||||
server_identity: self.server_identity,
|
||||
reveal: self.reveal.clone(),
|
||||
transcript_commit: self
|
||||
.transcript_commit
|
||||
.clone()
|
||||
.map(|config| config.to_request()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Builder for [`ProveConfig`].
|
||||
#[derive(Debug)]
|
||||
pub struct ProveConfigBuilder<'a> {
|
||||
transcript: &'a Transcript,
|
||||
server_identity: bool,
|
||||
reveal: Option<(RangeSet<usize>, RangeSet<usize>)>,
|
||||
transcript_commit: Option<TranscriptCommitConfig>,
|
||||
}
|
||||
|
||||
impl<'a> ProveConfigBuilder<'a> {
|
||||
/// Creates a new builder.
|
||||
pub fn new(transcript: &'a Transcript) -> Self {
|
||||
Self {
|
||||
transcript,
|
||||
server_identity: false,
|
||||
reveal: None,
|
||||
transcript_commit: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Proves the server identity.
|
||||
pub fn server_identity(&mut self) -> &mut Self {
|
||||
self.server_identity = true;
|
||||
self
|
||||
}
|
||||
|
||||
/// Configures transcript commitments.
|
||||
pub fn transcript_commit(&mut self, transcript_commit: TranscriptCommitConfig) -> &mut Self {
|
||||
self.transcript_commit = Some(transcript_commit);
|
||||
self
|
||||
}
|
||||
|
||||
/// Reveals the given ranges of the transcript.
|
||||
pub fn reveal(
|
||||
&mut self,
|
||||
direction: Direction,
|
||||
ranges: &dyn ToRangeSet<usize>,
|
||||
) -> Result<&mut Self, ProveConfigError> {
|
||||
let idx = ranges.to_range_set();
|
||||
|
||||
if idx.end().unwrap_or(0) > self.transcript.len_of_direction(direction) {
|
||||
return Err(ProveConfigError(ErrorRepr::IndexOutOfBounds {
|
||||
direction,
|
||||
actual: idx.end().unwrap_or(0),
|
||||
len: self.transcript.len_of_direction(direction),
|
||||
}));
|
||||
}
|
||||
|
||||
let (sent, recv) = self.reveal.get_or_insert_default();
|
||||
match direction {
|
||||
Direction::Sent => sent.union_mut(&idx),
|
||||
Direction::Received => recv.union_mut(&idx),
|
||||
}
|
||||
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
/// Reveals the given ranges of the sent data transcript.
|
||||
pub fn reveal_sent(
|
||||
&mut self,
|
||||
ranges: &dyn ToRangeSet<usize>,
|
||||
) -> Result<&mut Self, ProveConfigError> {
|
||||
self.reveal(Direction::Sent, ranges)
|
||||
}
|
||||
|
||||
/// Reveals all of the sent data transcript.
|
||||
pub fn reveal_sent_all(&mut self) -> Result<&mut Self, ProveConfigError> {
|
||||
let len = self.transcript.len_of_direction(Direction::Sent);
|
||||
let (sent, _) = self.reveal.get_or_insert_default();
|
||||
sent.union_mut(&(0..len));
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
/// Reveals the given ranges of the received data transcript.
|
||||
pub fn reveal_recv(
|
||||
&mut self,
|
||||
ranges: &dyn ToRangeSet<usize>,
|
||||
) -> Result<&mut Self, ProveConfigError> {
|
||||
self.reveal(Direction::Received, ranges)
|
||||
}
|
||||
|
||||
/// Reveals all of the received data transcript.
|
||||
pub fn reveal_recv_all(&mut self) -> Result<&mut Self, ProveConfigError> {
|
||||
let len = self.transcript.len_of_direction(Direction::Received);
|
||||
let (_, recv) = self.reveal.get_or_insert_default();
|
||||
recv.union_mut(&(0..len));
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
/// Builds the configuration.
|
||||
pub fn build(self) -> Result<ProveConfig, ProveConfigError> {
|
||||
Ok(ProveConfig {
|
||||
server_identity: self.server_identity,
|
||||
reveal: self.reveal,
|
||||
transcript_commit: self.transcript_commit,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Request to prove statements about the connection.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ProveRequest {
|
||||
server_identity: bool,
|
||||
reveal: Option<(RangeSet<usize>, RangeSet<usize>)>,
|
||||
transcript_commit: Option<TranscriptCommitRequest>,
|
||||
}
|
||||
|
||||
impl ProveRequest {
|
||||
/// Returns `true` if the server identity is to be proven.
|
||||
pub fn server_identity(&self) -> bool {
|
||||
self.server_identity
|
||||
}
|
||||
|
||||
/// Returns the sent and received ranges of the transcript to be revealed,
|
||||
/// respectively.
|
||||
pub fn reveal(&self) -> Option<&(RangeSet<usize>, RangeSet<usize>)> {
|
||||
self.reveal.as_ref()
|
||||
}
|
||||
|
||||
/// Returns the transcript commitment configuration.
|
||||
pub fn transcript_commit(&self) -> Option<&TranscriptCommitRequest> {
|
||||
self.transcript_commit.as_ref()
|
||||
}
|
||||
}
|
||||
|
||||
/// Error for [`ProveConfig`].
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
#[error(transparent)]
|
||||
pub struct ProveConfigError(#[from] ErrorRepr);
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
enum ErrorRepr {
|
||||
#[error("range is out of bounds of the transcript ({direction}): {actual} > {len}")]
|
||||
IndexOutOfBounds {
|
||||
direction: Direction,
|
||||
actual: usize,
|
||||
len: usize,
|
||||
},
|
||||
}
|
||||
@@ -1,33 +0,0 @@
|
||||
//! Prover configuration.
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
/// Prover configuration.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ProverConfig {}
|
||||
|
||||
impl ProverConfig {
|
||||
/// Creates a new builder.
|
||||
pub fn builder() -> ProverConfigBuilder {
|
||||
ProverConfigBuilder::default()
|
||||
}
|
||||
}
|
||||
|
||||
/// Builder for [`ProverConfig`].
|
||||
#[derive(Debug, Default)]
|
||||
pub struct ProverConfigBuilder {}
|
||||
|
||||
impl ProverConfigBuilder {
|
||||
/// Builds the configuration.
|
||||
pub fn build(self) -> Result<ProverConfig, ProverConfigError> {
|
||||
Ok(ProverConfig {})
|
||||
}
|
||||
}
|
||||
|
||||
/// Error for [`ProverConfig`].
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
#[error(transparent)]
|
||||
pub struct ProverConfigError(#[from] ErrorRepr);
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
enum ErrorRepr {}
|
||||
@@ -1,111 +0,0 @@
|
||||
//! TLS client configuration.
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::{
|
||||
connection::ServerName,
|
||||
webpki::{CertificateDer, PrivateKeyDer, RootCertStore},
|
||||
};
|
||||
|
||||
/// TLS client configuration.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct TlsClientConfig {
|
||||
server_name: ServerName,
|
||||
/// Root certificates.
|
||||
root_store: RootCertStore,
|
||||
/// Certificate chain and a matching private key for client
|
||||
/// authentication.
|
||||
client_auth: Option<(Vec<CertificateDer>, PrivateKeyDer)>,
|
||||
}
|
||||
|
||||
impl TlsClientConfig {
|
||||
/// Creates a new builder.
|
||||
pub fn builder() -> TlsConfigBuilder {
|
||||
TlsConfigBuilder::default()
|
||||
}
|
||||
|
||||
/// Returns the server name.
|
||||
pub fn server_name(&self) -> &ServerName {
|
||||
&self.server_name
|
||||
}
|
||||
|
||||
/// Returns the root certificates.
|
||||
pub fn root_store(&self) -> &RootCertStore {
|
||||
&self.root_store
|
||||
}
|
||||
|
||||
/// Returns a certificate chain and a matching private key for client
|
||||
/// authentication.
|
||||
pub fn client_auth(&self) -> Option<&(Vec<CertificateDer>, PrivateKeyDer)> {
|
||||
self.client_auth.as_ref()
|
||||
}
|
||||
}
|
||||
|
||||
/// Builder for [`TlsClientConfig`].
|
||||
#[derive(Debug, Default)]
|
||||
pub struct TlsConfigBuilder {
|
||||
server_name: Option<ServerName>,
|
||||
root_store: Option<RootCertStore>,
|
||||
client_auth: Option<(Vec<CertificateDer>, PrivateKeyDer)>,
|
||||
}
|
||||
|
||||
impl TlsConfigBuilder {
|
||||
/// Sets the server name.
|
||||
pub fn server_name(mut self, server_name: ServerName) -> Self {
|
||||
self.server_name = Some(server_name);
|
||||
self
|
||||
}
|
||||
|
||||
/// Sets the root certificates to use for verifying the server's
|
||||
/// certificate.
|
||||
pub fn root_store(mut self, store: RootCertStore) -> Self {
|
||||
self.root_store = Some(store);
|
||||
self
|
||||
}
|
||||
|
||||
/// Sets a DER-encoded certificate chain and a matching private key for
|
||||
/// client authentication.
|
||||
///
|
||||
/// Often the chain will consist of a single end-entity certificate.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `cert_key` - A tuple containing the certificate chain and the private
|
||||
/// key.
|
||||
///
|
||||
/// - Each certificate in the chain must be in the X.509 format.
|
||||
/// - The key must be in the ASN.1 format (either PKCS#8 or PKCS#1).
|
||||
pub fn client_auth(mut self, cert_key: (Vec<CertificateDer>, PrivateKeyDer)) -> Self {
|
||||
self.client_auth = Some(cert_key);
|
||||
self
|
||||
}
|
||||
|
||||
/// Builds the TLS configuration.
|
||||
pub fn build(self) -> Result<TlsClientConfig, TlsConfigError> {
|
||||
let server_name = self.server_name.ok_or(ErrorRepr::MissingField {
|
||||
field: "server_name",
|
||||
})?;
|
||||
|
||||
let root_store = self.root_store.ok_or(ErrorRepr::MissingField {
|
||||
field: "root_store",
|
||||
})?;
|
||||
|
||||
Ok(TlsClientConfig {
|
||||
server_name,
|
||||
root_store,
|
||||
client_auth: self.client_auth,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// TLS configuration error.
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
#[error(transparent)]
|
||||
pub struct TlsConfigError(#[from] ErrorRepr);
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
#[error("tls config error")]
|
||||
enum ErrorRepr {
|
||||
#[error("missing required field: {field}")]
|
||||
MissingField { field: &'static str },
|
||||
}
|
||||
@@ -1,94 +0,0 @@
|
||||
//! TLS commitment configuration.
|
||||
|
||||
pub mod mpc;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
/// TLS commitment configuration.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct TlsCommitConfig {
|
||||
protocol: TlsCommitProtocolConfig,
|
||||
}
|
||||
|
||||
impl TlsCommitConfig {
|
||||
/// Creates a new builder.
|
||||
pub fn builder() -> TlsCommitConfigBuilder {
|
||||
TlsCommitConfigBuilder::default()
|
||||
}
|
||||
|
||||
/// Returns the protocol configuration.
|
||||
pub fn protocol(&self) -> &TlsCommitProtocolConfig {
|
||||
&self.protocol
|
||||
}
|
||||
|
||||
/// Returns a TLS commitment request.
|
||||
pub fn to_request(&self) -> TlsCommitRequest {
|
||||
TlsCommitRequest {
|
||||
config: self.protocol.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Builder for [`TlsCommitConfig`].
|
||||
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
|
||||
pub struct TlsCommitConfigBuilder {
|
||||
protocol: Option<TlsCommitProtocolConfig>,
|
||||
}
|
||||
|
||||
impl TlsCommitConfigBuilder {
|
||||
/// Sets the protocol configuration.
|
||||
pub fn protocol<C>(mut self, protocol: C) -> Self
|
||||
where
|
||||
C: Into<TlsCommitProtocolConfig>,
|
||||
{
|
||||
self.protocol = Some(protocol.into());
|
||||
self
|
||||
}
|
||||
|
||||
/// Builds the configuration.
|
||||
pub fn build(self) -> Result<TlsCommitConfig, TlsCommitConfigError> {
|
||||
let protocol = self
|
||||
.protocol
|
||||
.ok_or(ErrorRepr::MissingField { name: "protocol" })?;
|
||||
|
||||
Ok(TlsCommitConfig { protocol })
|
||||
}
|
||||
}
|
||||
|
||||
/// TLS commitment protocol configuration.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[non_exhaustive]
|
||||
pub enum TlsCommitProtocolConfig {
|
||||
/// MPC-TLS configuration.
|
||||
Mpc(mpc::MpcTlsConfig),
|
||||
}
|
||||
|
||||
impl From<mpc::MpcTlsConfig> for TlsCommitProtocolConfig {
|
||||
fn from(config: mpc::MpcTlsConfig) -> Self {
|
||||
Self::Mpc(config)
|
||||
}
|
||||
}
|
||||
|
||||
/// TLS commitment request.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct TlsCommitRequest {
|
||||
config: TlsCommitProtocolConfig,
|
||||
}
|
||||
|
||||
impl TlsCommitRequest {
|
||||
/// Returns the protocol configuration.
|
||||
pub fn protocol(&self) -> &TlsCommitProtocolConfig {
|
||||
&self.config
|
||||
}
|
||||
}
|
||||
|
||||
/// Error for [`TlsCommitConfig`].
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
#[error(transparent)]
|
||||
pub struct TlsCommitConfigError(#[from] ErrorRepr);
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
enum ErrorRepr {
|
||||
#[error("missing field: {name}")]
|
||||
MissingField { name: &'static str },
|
||||
}
|
||||
@@ -1,241 +0,0 @@
|
||||
//! MPC-TLS commitment protocol configuration.
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
// Default is 32 bytes to decrypt the TLS protocol messages.
|
||||
const DEFAULT_MAX_RECV_ONLINE: usize = 32;
|
||||
|
||||
/// MPC-TLS commitment protocol configuration.
|
||||
#[derive(Clone, Debug, Deserialize, Serialize)]
|
||||
#[serde(try_from = "unchecked::MpcTlsConfigUnchecked")]
|
||||
pub struct MpcTlsConfig {
|
||||
/// Maximum number of bytes that can be sent.
|
||||
max_sent_data: usize,
|
||||
/// Maximum number of application data records that can be sent.
|
||||
max_sent_records: Option<usize>,
|
||||
/// Maximum number of bytes that can be decrypted online, i.e. while the
|
||||
/// MPC-TLS connection is active.
|
||||
max_recv_data_online: usize,
|
||||
/// Maximum number of bytes that can be received.
|
||||
max_recv_data: usize,
|
||||
/// Maximum number of received application data records that can be
|
||||
/// decrypted online, i.e. while the MPC-TLS connection is active.
|
||||
max_recv_records_online: Option<usize>,
|
||||
/// Whether the `deferred decryption` feature is toggled on from the start
|
||||
/// of the MPC-TLS connection.
|
||||
defer_decryption_from_start: bool,
|
||||
/// Network settings.
|
||||
network: NetworkSetting,
|
||||
}
|
||||
|
||||
impl MpcTlsConfig {
|
||||
/// Creates a new builder.
|
||||
pub fn builder() -> MpcTlsConfigBuilder {
|
||||
MpcTlsConfigBuilder::default()
|
||||
}
|
||||
|
||||
/// Returns the maximum number of bytes that can be sent.
|
||||
pub fn max_sent_data(&self) -> usize {
|
||||
self.max_sent_data
|
||||
}
|
||||
|
||||
/// Returns the maximum number of application data records that can
|
||||
/// be sent.
|
||||
pub fn max_sent_records(&self) -> Option<usize> {
|
||||
self.max_sent_records
|
||||
}
|
||||
|
||||
/// Returns the maximum number of bytes that can be decrypted online.
|
||||
pub fn max_recv_data_online(&self) -> usize {
|
||||
self.max_recv_data_online
|
||||
}
|
||||
|
||||
/// Returns the maximum number of bytes that can be received.
|
||||
pub fn max_recv_data(&self) -> usize {
|
||||
self.max_recv_data
|
||||
}
|
||||
|
||||
/// Returns the maximum number of received application data records that
|
||||
/// can be decrypted online.
|
||||
pub fn max_recv_records_online(&self) -> Option<usize> {
|
||||
self.max_recv_records_online
|
||||
}
|
||||
|
||||
/// Returns whether the `deferred decryption` feature is toggled on from the
|
||||
/// start of the MPC-TLS connection.
|
||||
pub fn defer_decryption_from_start(&self) -> bool {
|
||||
self.defer_decryption_from_start
|
||||
}
|
||||
|
||||
/// Returns the network settings.
|
||||
pub fn network(&self) -> NetworkSetting {
|
||||
self.network
|
||||
}
|
||||
}
|
||||
|
||||
fn validate(config: MpcTlsConfig) -> Result<MpcTlsConfig, MpcTlsConfigError> {
|
||||
if config.max_recv_data_online > config.max_recv_data {
|
||||
return Err(ErrorRepr::InvalidValue {
|
||||
name: "max_recv_data_online",
|
||||
reason: format!(
|
||||
"must be <= max_recv_data ({} > {})",
|
||||
config.max_recv_data_online, config.max_recv_data
|
||||
),
|
||||
}
|
||||
.into());
|
||||
}
|
||||
|
||||
Ok(config)
|
||||
}
|
||||
|
||||
/// Builder for [`MpcTlsConfig`].
|
||||
#[derive(Debug, Default)]
|
||||
pub struct MpcTlsConfigBuilder {
|
||||
max_sent_data: Option<usize>,
|
||||
max_sent_records: Option<usize>,
|
||||
max_recv_data_online: Option<usize>,
|
||||
max_recv_data: Option<usize>,
|
||||
max_recv_records_online: Option<usize>,
|
||||
defer_decryption_from_start: Option<bool>,
|
||||
network: Option<NetworkSetting>,
|
||||
}
|
||||
|
||||
impl MpcTlsConfigBuilder {
|
||||
/// Sets the maximum number of bytes that can be sent.
|
||||
pub fn max_sent_data(mut self, max_sent_data: usize) -> Self {
|
||||
self.max_sent_data = Some(max_sent_data);
|
||||
self
|
||||
}
|
||||
|
||||
/// Sets the maximum number of application data records that can be sent.
|
||||
pub fn max_sent_records(mut self, max_sent_records: usize) -> Self {
|
||||
self.max_sent_records = Some(max_sent_records);
|
||||
self
|
||||
}
|
||||
|
||||
/// Sets the maximum number of bytes that can be decrypted online.
|
||||
pub fn max_recv_data_online(mut self, max_recv_data_online: usize) -> Self {
|
||||
self.max_recv_data_online = Some(max_recv_data_online);
|
||||
self
|
||||
}
|
||||
|
||||
/// Sets the maximum number of bytes that can be received.
|
||||
pub fn max_recv_data(mut self, max_recv_data: usize) -> Self {
|
||||
self.max_recv_data = Some(max_recv_data);
|
||||
self
|
||||
}
|
||||
|
||||
/// Sets the maximum number of received application data records that can
|
||||
/// be decrypted online.
|
||||
pub fn max_recv_records_online(mut self, max_recv_records_online: usize) -> Self {
|
||||
self.max_recv_records_online = Some(max_recv_records_online);
|
||||
self
|
||||
}
|
||||
|
||||
/// Sets whether the `deferred decryption` feature is toggled on from the
|
||||
/// start of the MPC-TLS connection.
|
||||
pub fn defer_decryption_from_start(mut self, defer_decryption_from_start: bool) -> Self {
|
||||
self.defer_decryption_from_start = Some(defer_decryption_from_start);
|
||||
self
|
||||
}
|
||||
|
||||
/// Sets the network settings.
|
||||
pub fn network(mut self, network: NetworkSetting) -> Self {
|
||||
self.network = Some(network);
|
||||
self
|
||||
}
|
||||
|
||||
/// Builds the configuration.
|
||||
pub fn build(self) -> Result<MpcTlsConfig, MpcTlsConfigError> {
|
||||
let Self {
|
||||
max_sent_data,
|
||||
max_sent_records,
|
||||
max_recv_data_online,
|
||||
max_recv_data,
|
||||
max_recv_records_online,
|
||||
defer_decryption_from_start,
|
||||
network,
|
||||
} = self;
|
||||
|
||||
let max_sent_data = max_sent_data.ok_or(ErrorRepr::MissingField {
|
||||
name: "max_sent_data",
|
||||
})?;
|
||||
|
||||
let max_recv_data_online = max_recv_data_online.unwrap_or(DEFAULT_MAX_RECV_ONLINE);
|
||||
let max_recv_data = max_recv_data.ok_or(ErrorRepr::MissingField {
|
||||
name: "max_recv_data",
|
||||
})?;
|
||||
|
||||
let defer_decryption_from_start = defer_decryption_from_start.unwrap_or(true);
|
||||
let network = network.unwrap_or_default();
|
||||
|
||||
validate(MpcTlsConfig {
|
||||
max_sent_data,
|
||||
max_sent_records,
|
||||
max_recv_data_online,
|
||||
max_recv_data,
|
||||
max_recv_records_online,
|
||||
defer_decryption_from_start,
|
||||
network,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Settings for the network environment.
|
||||
///
|
||||
/// Provides optimization options to adapt the protocol to different network
|
||||
/// situations.
|
||||
#[derive(Debug, Clone, Copy, Serialize, Deserialize, Default)]
|
||||
pub enum NetworkSetting {
|
||||
/// Reduces network round-trips at the expense of consuming more network
|
||||
/// bandwidth.
|
||||
Bandwidth,
|
||||
/// Reduces network bandwidth utilization at the expense of more network
|
||||
/// round-trips.
|
||||
#[default]
|
||||
Latency,
|
||||
}
|
||||
|
||||
/// Error for [`MpcTlsConfig`].
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
#[error(transparent)]
|
||||
pub struct MpcTlsConfigError(#[from] ErrorRepr);
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
enum ErrorRepr {
|
||||
#[error("missing field: {name}")]
|
||||
MissingField { name: &'static str },
|
||||
#[error("invalid value for field({name}): {reason}")]
|
||||
InvalidValue { name: &'static str, reason: String },
|
||||
}
|
||||
|
||||
mod unchecked {
|
||||
use super::*;
|
||||
|
||||
#[derive(Deserialize)]
|
||||
pub(super) struct MpcTlsConfigUnchecked {
|
||||
max_sent_data: usize,
|
||||
max_sent_records: Option<usize>,
|
||||
max_recv_data_online: usize,
|
||||
max_recv_data: usize,
|
||||
max_recv_records_online: Option<usize>,
|
||||
defer_decryption_from_start: bool,
|
||||
network: NetworkSetting,
|
||||
}
|
||||
|
||||
impl TryFrom<MpcTlsConfigUnchecked> for MpcTlsConfig {
|
||||
type Error = MpcTlsConfigError;
|
||||
|
||||
fn try_from(value: MpcTlsConfigUnchecked) -> Result<Self, Self::Error> {
|
||||
validate(MpcTlsConfig {
|
||||
max_sent_data: value.max_sent_data,
|
||||
max_sent_records: value.max_sent_records,
|
||||
max_recv_data_online: value.max_recv_data_online,
|
||||
max_recv_data: value.max_recv_data,
|
||||
max_recv_records_online: value.max_recv_records_online,
|
||||
defer_decryption_from_start: value.defer_decryption_from_start,
|
||||
network: value.network,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,56 +0,0 @@
|
||||
//! Verifier configuration.
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::webpki::RootCertStore;
|
||||
|
||||
/// Verifier configuration.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct VerifierConfig {
|
||||
root_store: RootCertStore,
|
||||
}
|
||||
|
||||
impl VerifierConfig {
|
||||
/// Creates a new builder.
|
||||
pub fn builder() -> VerifierConfigBuilder {
|
||||
VerifierConfigBuilder::default()
|
||||
}
|
||||
|
||||
/// Returns the root certificate store.
|
||||
pub fn root_store(&self) -> &RootCertStore {
|
||||
&self.root_store
|
||||
}
|
||||
}
|
||||
|
||||
/// Builder for [`VerifierConfig`].
|
||||
#[derive(Debug, Default)]
|
||||
pub struct VerifierConfigBuilder {
|
||||
root_store: Option<RootCertStore>,
|
||||
}
|
||||
|
||||
impl VerifierConfigBuilder {
|
||||
/// Sets the root certificate store.
|
||||
pub fn root_store(mut self, root_store: RootCertStore) -> Self {
|
||||
self.root_store = Some(root_store);
|
||||
self
|
||||
}
|
||||
|
||||
/// Builds the configuration.
|
||||
pub fn build(self) -> Result<VerifierConfig, VerifierConfigError> {
|
||||
let root_store = self
|
||||
.root_store
|
||||
.ok_or(ErrorRepr::MissingField { name: "root_store" })?;
|
||||
Ok(VerifierConfig { root_store })
|
||||
}
|
||||
}
|
||||
|
||||
/// Error for [`VerifierConfig`].
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
#[error(transparent)]
|
||||
pub struct VerifierConfigError(#[from] ErrorRepr);
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
enum ErrorRepr {
|
||||
#[error("missing field: {name}")]
|
||||
MissingField { name: &'static str },
|
||||
}
|
||||
@@ -116,75 +116,84 @@ pub enum KeyType {
|
||||
SECP256R1 = 0x0017,
|
||||
}
|
||||
|
||||
/// Signature algorithm used on the key exchange parameters.
|
||||
/// Signature scheme on the key exchange parameters.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
#[allow(non_camel_case_types, missing_docs)]
|
||||
pub enum SignatureAlgorithm {
|
||||
ECDSA_NISTP256_SHA256,
|
||||
ECDSA_NISTP256_SHA384,
|
||||
ECDSA_NISTP384_SHA256,
|
||||
ECDSA_NISTP384_SHA384,
|
||||
ED25519,
|
||||
RSA_PKCS1_2048_8192_SHA256,
|
||||
RSA_PKCS1_2048_8192_SHA384,
|
||||
RSA_PKCS1_2048_8192_SHA512,
|
||||
RSA_PSS_2048_8192_SHA256_LEGACY_KEY,
|
||||
RSA_PSS_2048_8192_SHA384_LEGACY_KEY,
|
||||
RSA_PSS_2048_8192_SHA512_LEGACY_KEY,
|
||||
pub enum SignatureScheme {
|
||||
RSA_PKCS1_SHA1 = 0x0201,
|
||||
ECDSA_SHA1_Legacy = 0x0203,
|
||||
RSA_PKCS1_SHA256 = 0x0401,
|
||||
ECDSA_NISTP256_SHA256 = 0x0403,
|
||||
RSA_PKCS1_SHA384 = 0x0501,
|
||||
ECDSA_NISTP384_SHA384 = 0x0503,
|
||||
RSA_PKCS1_SHA512 = 0x0601,
|
||||
ECDSA_NISTP521_SHA512 = 0x0603,
|
||||
RSA_PSS_SHA256 = 0x0804,
|
||||
RSA_PSS_SHA384 = 0x0805,
|
||||
RSA_PSS_SHA512 = 0x0806,
|
||||
ED25519 = 0x0807,
|
||||
}
|
||||
|
||||
impl fmt::Display for SignatureAlgorithm {
|
||||
impl fmt::Display for SignatureScheme {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
SignatureAlgorithm::ECDSA_NISTP256_SHA256 => write!(f, "ECDSA_NISTP256_SHA256"),
|
||||
SignatureAlgorithm::ECDSA_NISTP256_SHA384 => write!(f, "ECDSA_NISTP256_SHA384"),
|
||||
SignatureAlgorithm::ECDSA_NISTP384_SHA256 => write!(f, "ECDSA_NISTP384_SHA256"),
|
||||
SignatureAlgorithm::ECDSA_NISTP384_SHA384 => write!(f, "ECDSA_NISTP384_SHA384"),
|
||||
SignatureAlgorithm::ED25519 => write!(f, "ED25519"),
|
||||
SignatureAlgorithm::RSA_PKCS1_2048_8192_SHA256 => {
|
||||
write!(f, "RSA_PKCS1_2048_8192_SHA256")
|
||||
}
|
||||
SignatureAlgorithm::RSA_PKCS1_2048_8192_SHA384 => {
|
||||
write!(f, "RSA_PKCS1_2048_8192_SHA384")
|
||||
}
|
||||
SignatureAlgorithm::RSA_PKCS1_2048_8192_SHA512 => {
|
||||
write!(f, "RSA_PKCS1_2048_8192_SHA512")
|
||||
}
|
||||
SignatureAlgorithm::RSA_PSS_2048_8192_SHA256_LEGACY_KEY => {
|
||||
write!(f, "RSA_PSS_2048_8192_SHA256_LEGACY_KEY")
|
||||
}
|
||||
SignatureAlgorithm::RSA_PSS_2048_8192_SHA384_LEGACY_KEY => {
|
||||
write!(f, "RSA_PSS_2048_8192_SHA384_LEGACY_KEY")
|
||||
}
|
||||
SignatureAlgorithm::RSA_PSS_2048_8192_SHA512_LEGACY_KEY => {
|
||||
write!(f, "RSA_PSS_2048_8192_SHA512_LEGACY_KEY")
|
||||
}
|
||||
SignatureScheme::RSA_PKCS1_SHA1 => write!(f, "RSA_PKCS1_SHA1"),
|
||||
SignatureScheme::ECDSA_SHA1_Legacy => write!(f, "ECDSA_SHA1_Legacy"),
|
||||
SignatureScheme::RSA_PKCS1_SHA256 => write!(f, "RSA_PKCS1_SHA256"),
|
||||
SignatureScheme::ECDSA_NISTP256_SHA256 => write!(f, "ECDSA_NISTP256_SHA256"),
|
||||
SignatureScheme::RSA_PKCS1_SHA384 => write!(f, "RSA_PKCS1_SHA384"),
|
||||
SignatureScheme::ECDSA_NISTP384_SHA384 => write!(f, "ECDSA_NISTP384_SHA384"),
|
||||
SignatureScheme::RSA_PKCS1_SHA512 => write!(f, "RSA_PKCS1_SHA512"),
|
||||
SignatureScheme::ECDSA_NISTP521_SHA512 => write!(f, "ECDSA_NISTP521_SHA512"),
|
||||
SignatureScheme::RSA_PSS_SHA256 => write!(f, "RSA_PSS_SHA256"),
|
||||
SignatureScheme::RSA_PSS_SHA384 => write!(f, "RSA_PSS_SHA384"),
|
||||
SignatureScheme::RSA_PSS_SHA512 => write!(f, "RSA_PSS_SHA512"),
|
||||
SignatureScheme::ED25519 => write!(f, "ED25519"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<tls_core::verify::SignatureAlgorithm> for SignatureAlgorithm {
|
||||
fn from(value: tls_core::verify::SignatureAlgorithm) -> Self {
|
||||
use tls_core::verify::SignatureAlgorithm as Core;
|
||||
impl TryFrom<tls_core::msgs::enums::SignatureScheme> for SignatureScheme {
|
||||
type Error = &'static str;
|
||||
|
||||
fn try_from(value: tls_core::msgs::enums::SignatureScheme) -> Result<Self, Self::Error> {
|
||||
use tls_core::msgs::enums::SignatureScheme as Core;
|
||||
use SignatureScheme::*;
|
||||
Ok(match value {
|
||||
Core::RSA_PKCS1_SHA1 => RSA_PKCS1_SHA1,
|
||||
Core::ECDSA_SHA1_Legacy => ECDSA_SHA1_Legacy,
|
||||
Core::RSA_PKCS1_SHA256 => RSA_PKCS1_SHA256,
|
||||
Core::ECDSA_NISTP256_SHA256 => ECDSA_NISTP256_SHA256,
|
||||
Core::RSA_PKCS1_SHA384 => RSA_PKCS1_SHA384,
|
||||
Core::ECDSA_NISTP384_SHA384 => ECDSA_NISTP384_SHA384,
|
||||
Core::RSA_PKCS1_SHA512 => RSA_PKCS1_SHA512,
|
||||
Core::ECDSA_NISTP521_SHA512 => ECDSA_NISTP521_SHA512,
|
||||
Core::RSA_PSS_SHA256 => RSA_PSS_SHA256,
|
||||
Core::RSA_PSS_SHA384 => RSA_PSS_SHA384,
|
||||
Core::RSA_PSS_SHA512 => RSA_PSS_SHA512,
|
||||
Core::ED25519 => ED25519,
|
||||
_ => return Err("unsupported signature scheme"),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl From<SignatureScheme> for tls_core::msgs::enums::SignatureScheme {
|
||||
fn from(value: SignatureScheme) -> Self {
|
||||
use tls_core::msgs::enums::SignatureScheme::*;
|
||||
match value {
|
||||
Core::ECDSA_NISTP256_SHA256 => SignatureAlgorithm::ECDSA_NISTP256_SHA256,
|
||||
Core::ECDSA_NISTP256_SHA384 => SignatureAlgorithm::ECDSA_NISTP256_SHA384,
|
||||
Core::ECDSA_NISTP384_SHA256 => SignatureAlgorithm::ECDSA_NISTP384_SHA256,
|
||||
Core::ECDSA_NISTP384_SHA384 => SignatureAlgorithm::ECDSA_NISTP384_SHA384,
|
||||
Core::ED25519 => SignatureAlgorithm::ED25519,
|
||||
Core::RSA_PKCS1_2048_8192_SHA256 => SignatureAlgorithm::RSA_PKCS1_2048_8192_SHA256,
|
||||
Core::RSA_PKCS1_2048_8192_SHA384 => SignatureAlgorithm::RSA_PKCS1_2048_8192_SHA384,
|
||||
Core::RSA_PKCS1_2048_8192_SHA512 => SignatureAlgorithm::RSA_PKCS1_2048_8192_SHA512,
|
||||
Core::RSA_PSS_2048_8192_SHA256_LEGACY_KEY => {
|
||||
SignatureAlgorithm::RSA_PSS_2048_8192_SHA256_LEGACY_KEY
|
||||
}
|
||||
Core::RSA_PSS_2048_8192_SHA384_LEGACY_KEY => {
|
||||
SignatureAlgorithm::RSA_PSS_2048_8192_SHA384_LEGACY_KEY
|
||||
}
|
||||
Core::RSA_PSS_2048_8192_SHA512_LEGACY_KEY => {
|
||||
SignatureAlgorithm::RSA_PSS_2048_8192_SHA512_LEGACY_KEY
|
||||
}
|
||||
SignatureScheme::RSA_PKCS1_SHA1 => RSA_PKCS1_SHA1,
|
||||
SignatureScheme::ECDSA_SHA1_Legacy => ECDSA_SHA1_Legacy,
|
||||
SignatureScheme::RSA_PKCS1_SHA256 => RSA_PKCS1_SHA256,
|
||||
SignatureScheme::ECDSA_NISTP256_SHA256 => ECDSA_NISTP256_SHA256,
|
||||
SignatureScheme::RSA_PKCS1_SHA384 => RSA_PKCS1_SHA384,
|
||||
SignatureScheme::ECDSA_NISTP384_SHA384 => ECDSA_NISTP384_SHA384,
|
||||
SignatureScheme::RSA_PKCS1_SHA512 => RSA_PKCS1_SHA512,
|
||||
SignatureScheme::ECDSA_NISTP521_SHA512 => ECDSA_NISTP521_SHA512,
|
||||
SignatureScheme::RSA_PSS_SHA256 => RSA_PSS_SHA256,
|
||||
SignatureScheme::RSA_PSS_SHA384 => RSA_PSS_SHA384,
|
||||
SignatureScheme::RSA_PSS_SHA512 => RSA_PSS_SHA512,
|
||||
SignatureScheme::ED25519 => ED25519,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -192,8 +201,8 @@ impl From<tls_core::verify::SignatureAlgorithm> for SignatureAlgorithm {
|
||||
/// Server's signature of the key exchange parameters.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ServerSignature {
|
||||
/// Signature algorithm.
|
||||
pub alg: SignatureAlgorithm,
|
||||
/// Signature scheme.
|
||||
pub scheme: SignatureScheme,
|
||||
/// Signature data.
|
||||
pub sig: Vec<u8>,
|
||||
}
|
||||
@@ -350,23 +359,20 @@ impl HandshakeData {
|
||||
message.extend_from_slice(&server_ephemeral_key.kx_params());
|
||||
|
||||
use webpki::ring as alg;
|
||||
let sig_alg = match self.sig.alg {
|
||||
SignatureAlgorithm::ECDSA_NISTP256_SHA256 => alg::ECDSA_P256_SHA256,
|
||||
SignatureAlgorithm::ECDSA_NISTP256_SHA384 => alg::ECDSA_P256_SHA384,
|
||||
SignatureAlgorithm::ECDSA_NISTP384_SHA256 => alg::ECDSA_P384_SHA256,
|
||||
SignatureAlgorithm::ECDSA_NISTP384_SHA384 => alg::ECDSA_P384_SHA384,
|
||||
SignatureAlgorithm::ED25519 => alg::ED25519,
|
||||
SignatureAlgorithm::RSA_PKCS1_2048_8192_SHA256 => alg::RSA_PKCS1_2048_8192_SHA256,
|
||||
SignatureAlgorithm::RSA_PKCS1_2048_8192_SHA384 => alg::RSA_PKCS1_2048_8192_SHA384,
|
||||
SignatureAlgorithm::RSA_PKCS1_2048_8192_SHA512 => alg::RSA_PKCS1_2048_8192_SHA512,
|
||||
SignatureAlgorithm::RSA_PSS_2048_8192_SHA256_LEGACY_KEY => {
|
||||
alg::RSA_PSS_2048_8192_SHA256_LEGACY_KEY
|
||||
}
|
||||
SignatureAlgorithm::RSA_PSS_2048_8192_SHA384_LEGACY_KEY => {
|
||||
alg::RSA_PSS_2048_8192_SHA384_LEGACY_KEY
|
||||
}
|
||||
SignatureAlgorithm::RSA_PSS_2048_8192_SHA512_LEGACY_KEY => {
|
||||
alg::RSA_PSS_2048_8192_SHA512_LEGACY_KEY
|
||||
let sig_alg = match self.sig.scheme {
|
||||
SignatureScheme::RSA_PKCS1_SHA256 => alg::RSA_PKCS1_2048_8192_SHA256,
|
||||
SignatureScheme::RSA_PKCS1_SHA384 => alg::RSA_PKCS1_2048_8192_SHA384,
|
||||
SignatureScheme::RSA_PKCS1_SHA512 => alg::RSA_PKCS1_2048_8192_SHA512,
|
||||
SignatureScheme::RSA_PSS_SHA256 => alg::RSA_PSS_2048_8192_SHA256_LEGACY_KEY,
|
||||
SignatureScheme::RSA_PSS_SHA384 => alg::RSA_PSS_2048_8192_SHA384_LEGACY_KEY,
|
||||
SignatureScheme::RSA_PSS_SHA512 => alg::RSA_PSS_2048_8192_SHA512_LEGACY_KEY,
|
||||
SignatureScheme::ECDSA_NISTP256_SHA256 => alg::ECDSA_P256_SHA256,
|
||||
SignatureScheme::ECDSA_NISTP384_SHA384 => alg::ECDSA_P384_SHA384,
|
||||
SignatureScheme::ED25519 => alg::ED25519,
|
||||
scheme => {
|
||||
return Err(HandshakeVerificationError::UnsupportedSignatureScheme(
|
||||
scheme,
|
||||
))
|
||||
}
|
||||
};
|
||||
|
||||
@@ -396,6 +402,8 @@ pub enum HandshakeVerificationError {
|
||||
InvalidServerEphemeralKey,
|
||||
#[error("server certificate verification failed: {0}")]
|
||||
ServerCert(ServerCertVerifierError),
|
||||
#[error("unsupported signature scheme: {0}")]
|
||||
UnsupportedSignatureScheme(SignatureScheme),
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
@@ -10,8 +10,7 @@ use hex::FromHex;
|
||||
use crate::{
|
||||
connection::{
|
||||
CertBinding, CertBindingV1_2, ConnectionInfo, DnsName, HandshakeData, KeyType,
|
||||
ServerEphemKey, ServerName, ServerSignature, SignatureAlgorithm, TlsVersion,
|
||||
TranscriptLength,
|
||||
ServerEphemKey, ServerName, ServerSignature, SignatureScheme, TlsVersion, TranscriptLength,
|
||||
},
|
||||
transcript::{
|
||||
encoding::{EncoderSecret, EncodingProvider},
|
||||
@@ -48,7 +47,7 @@ impl ConnectionFixture {
|
||||
CertificateDer(include_bytes!("fixtures/data/tlsnotary.org/ca.der").to_vec()),
|
||||
],
|
||||
sig: ServerSignature {
|
||||
alg: SignatureAlgorithm::RSA_PKCS1_2048_8192_SHA256,
|
||||
scheme: SignatureScheme::RSA_PKCS1_SHA256,
|
||||
sig: Vec::<u8>::from_hex(include_bytes!(
|
||||
"fixtures/data/tlsnotary.org/signature"
|
||||
))
|
||||
@@ -93,7 +92,7 @@ impl ConnectionFixture {
|
||||
CertificateDer(include_bytes!("fixtures/data/appliedzkp.org/ca.der").to_vec()),
|
||||
],
|
||||
sig: ServerSignature {
|
||||
alg: SignatureAlgorithm::ECDSA_NISTP256_SHA256,
|
||||
scheme: SignatureScheme::ECDSA_NISTP256_SHA256,
|
||||
sig: Vec::<u8>::from_hex(include_bytes!(
|
||||
"fixtures/data/appliedzkp.org/signature"
|
||||
))
|
||||
|
||||
@@ -2,13 +2,12 @@
|
||||
|
||||
use aead::Payload as AeadPayload;
|
||||
use aes_gcm::{aead::Aead, Aes128Gcm, NewAead};
|
||||
#[allow(deprecated)]
|
||||
use generic_array::GenericArray;
|
||||
use rand::{rngs::StdRng, Rng, SeedableRng};
|
||||
use tls_core::msgs::{
|
||||
base::Payload,
|
||||
codec::Codec,
|
||||
enums::{HandshakeType, ProtocolVersion},
|
||||
enums::{ContentType, HandshakeType, ProtocolVersion},
|
||||
handshake::{HandshakeMessagePayload, HandshakePayload},
|
||||
message::{OpaqueMessage, PlainMessage},
|
||||
};
|
||||
@@ -16,7 +15,7 @@ use tls_core::msgs::{
|
||||
use crate::{
|
||||
connection::{TranscriptLength, VerifyData},
|
||||
fixtures::ConnectionFixture,
|
||||
transcript::{ContentType, Record, TlsTranscript},
|
||||
transcript::{Record, TlsTranscript},
|
||||
};
|
||||
|
||||
/// The key used for encryption of the sent and received transcript.
|
||||
@@ -104,7 +103,7 @@ impl TranscriptGenerator {
|
||||
|
||||
let explicit_nonce: [u8; 8] = seq.to_be_bytes();
|
||||
let msg = PlainMessage {
|
||||
typ: ContentType::ApplicationData.into(),
|
||||
typ: ContentType::ApplicationData,
|
||||
version: ProtocolVersion::TLSv1_2,
|
||||
payload: Payload::new(plaintext),
|
||||
};
|
||||
@@ -139,7 +138,7 @@ impl TranscriptGenerator {
|
||||
handshake_message.encode(&mut plaintext);
|
||||
|
||||
let msg = PlainMessage {
|
||||
typ: ContentType::Handshake.into(),
|
||||
typ: ContentType::Handshake,
|
||||
version: ProtocolVersion::TLSv1_2,
|
||||
payload: Payload::new(plaintext.clone()),
|
||||
};
|
||||
@@ -181,7 +180,6 @@ fn aes_gcm_encrypt(
|
||||
let mut nonce = [0u8; 12];
|
||||
nonce[..4].copy_from_slice(&iv);
|
||||
nonce[4..].copy_from_slice(&explicit_nonce);
|
||||
#[allow(deprecated)]
|
||||
let nonce = GenericArray::from_slice(&nonce);
|
||||
let cipher = Aes128Gcm::new_from_slice(&key).unwrap();
|
||||
|
||||
|
||||
@@ -296,14 +296,14 @@ mod sha2 {
|
||||
fn hash(&self, data: &[u8]) -> super::Hash {
|
||||
let mut hasher = ::sha2::Sha256::default();
|
||||
hasher.update(data);
|
||||
super::Hash::new(hasher.finalize().as_ref())
|
||||
super::Hash::new(hasher.finalize().as_slice())
|
||||
}
|
||||
|
||||
fn hash_prefixed(&self, prefix: &[u8], data: &[u8]) -> super::Hash {
|
||||
let mut hasher = ::sha2::Sha256::default();
|
||||
hasher.update(prefix);
|
||||
hasher.update(data);
|
||||
super::Hash::new(hasher.finalize().as_ref())
|
||||
super::Hash::new(hasher.finalize().as_slice())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,18 +12,196 @@ pub mod merkle;
|
||||
pub mod transcript;
|
||||
pub mod webpki;
|
||||
pub use rangeset;
|
||||
pub mod config;
|
||||
pub(crate) mod display;
|
||||
|
||||
use rangeset::{RangeSet, ToRangeSet, UnionMut};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::{
|
||||
connection::ServerName,
|
||||
connection::{HandshakeData, ServerName},
|
||||
transcript::{
|
||||
encoding::EncoderSecret, PartialTranscript, TranscriptCommitment, TranscriptSecret,
|
||||
Direction, PartialTranscript, Transcript, TranscriptCommitConfig, TranscriptCommitRequest,
|
||||
TranscriptCommitment, TranscriptSecret,
|
||||
},
|
||||
};
|
||||
|
||||
/// Configuration to prove information to the verifier.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ProveConfig {
|
||||
server_identity: bool,
|
||||
reveal: Option<(RangeSet<usize>, RangeSet<usize>)>,
|
||||
transcript_commit: Option<TranscriptCommitConfig>,
|
||||
}
|
||||
|
||||
impl ProveConfig {
|
||||
/// Creates a new builder.
|
||||
pub fn builder(transcript: &Transcript) -> ProveConfigBuilder<'_> {
|
||||
ProveConfigBuilder::new(transcript)
|
||||
}
|
||||
|
||||
/// Returns `true` if the server identity is to be proven.
|
||||
pub fn server_identity(&self) -> bool {
|
||||
self.server_identity
|
||||
}
|
||||
|
||||
/// Returns the ranges of the transcript to be revealed.
|
||||
pub fn reveal(&self) -> Option<&(RangeSet<usize>, RangeSet<usize>)> {
|
||||
self.reveal.as_ref()
|
||||
}
|
||||
|
||||
/// Returns the transcript commitment configuration.
|
||||
pub fn transcript_commit(&self) -> Option<&TranscriptCommitConfig> {
|
||||
self.transcript_commit.as_ref()
|
||||
}
|
||||
}
|
||||
|
||||
/// Builder for [`ProveConfig`].
|
||||
#[derive(Debug)]
|
||||
pub struct ProveConfigBuilder<'a> {
|
||||
transcript: &'a Transcript,
|
||||
server_identity: bool,
|
||||
reveal: Option<(RangeSet<usize>, RangeSet<usize>)>,
|
||||
transcript_commit: Option<TranscriptCommitConfig>,
|
||||
}
|
||||
|
||||
impl<'a> ProveConfigBuilder<'a> {
|
||||
/// Creates a new builder.
|
||||
pub fn new(transcript: &'a Transcript) -> Self {
|
||||
Self {
|
||||
transcript,
|
||||
server_identity: false,
|
||||
reveal: None,
|
||||
transcript_commit: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Proves the server identity.
|
||||
pub fn server_identity(&mut self) -> &mut Self {
|
||||
self.server_identity = true;
|
||||
self
|
||||
}
|
||||
|
||||
/// Configures transcript commitments.
|
||||
pub fn transcript_commit(&mut self, transcript_commit: TranscriptCommitConfig) -> &mut Self {
|
||||
self.transcript_commit = Some(transcript_commit);
|
||||
self
|
||||
}
|
||||
|
||||
/// Reveals the given ranges of the transcript.
|
||||
pub fn reveal(
|
||||
&mut self,
|
||||
direction: Direction,
|
||||
ranges: &dyn ToRangeSet<usize>,
|
||||
) -> Result<&mut Self, ProveConfigBuilderError> {
|
||||
let idx = ranges.to_range_set();
|
||||
|
||||
if idx.end().unwrap_or(0) > self.transcript.len_of_direction(direction) {
|
||||
return Err(ProveConfigBuilderError(
|
||||
ProveConfigBuilderErrorRepr::IndexOutOfBounds {
|
||||
direction,
|
||||
actual: idx.end().unwrap_or(0),
|
||||
len: self.transcript.len_of_direction(direction),
|
||||
},
|
||||
));
|
||||
}
|
||||
|
||||
let (sent, recv) = self.reveal.get_or_insert_default();
|
||||
match direction {
|
||||
Direction::Sent => sent.union_mut(&idx),
|
||||
Direction::Received => recv.union_mut(&idx),
|
||||
}
|
||||
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
/// Reveals the given ranges of the sent data transcript.
|
||||
pub fn reveal_sent(
|
||||
&mut self,
|
||||
ranges: &dyn ToRangeSet<usize>,
|
||||
) -> Result<&mut Self, ProveConfigBuilderError> {
|
||||
self.reveal(Direction::Sent, ranges)
|
||||
}
|
||||
|
||||
/// Reveals the given ranges of the received data transcript.
|
||||
pub fn reveal_recv(
|
||||
&mut self,
|
||||
ranges: &dyn ToRangeSet<usize>,
|
||||
) -> Result<&mut Self, ProveConfigBuilderError> {
|
||||
self.reveal(Direction::Received, ranges)
|
||||
}
|
||||
|
||||
/// Builds the configuration.
|
||||
pub fn build(self) -> Result<ProveConfig, ProveConfigBuilderError> {
|
||||
Ok(ProveConfig {
|
||||
server_identity: self.server_identity,
|
||||
reveal: self.reveal,
|
||||
transcript_commit: self.transcript_commit,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Error for [`ProveConfigBuilder`].
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
#[error(transparent)]
|
||||
pub struct ProveConfigBuilderError(#[from] ProveConfigBuilderErrorRepr);
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
enum ProveConfigBuilderErrorRepr {
|
||||
#[error("range is out of bounds of the transcript ({direction}): {actual} > {len}")]
|
||||
IndexOutOfBounds {
|
||||
direction: Direction,
|
||||
actual: usize,
|
||||
len: usize,
|
||||
},
|
||||
}
|
||||
|
||||
/// Configuration to verify information from the prover.
|
||||
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
|
||||
pub struct VerifyConfig {}
|
||||
|
||||
impl VerifyConfig {
|
||||
/// Creates a new builder.
|
||||
pub fn builder() -> VerifyConfigBuilder {
|
||||
VerifyConfigBuilder::new()
|
||||
}
|
||||
}
|
||||
|
||||
/// Builder for [`VerifyConfig`].
|
||||
#[derive(Debug, Default)]
|
||||
pub struct VerifyConfigBuilder {}
|
||||
|
||||
impl VerifyConfigBuilder {
|
||||
/// Creates a new builder.
|
||||
pub fn new() -> Self {
|
||||
Self {}
|
||||
}
|
||||
|
||||
/// Builds the configuration.
|
||||
pub fn build(self) -> Result<VerifyConfig, VerifyConfigBuilderError> {
|
||||
Ok(VerifyConfig {})
|
||||
}
|
||||
}
|
||||
|
||||
/// Error for [`VerifyConfigBuilder`].
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
#[error(transparent)]
|
||||
pub struct VerifyConfigBuilderError(#[from] VerifyConfigBuilderErrorRepr);
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
enum VerifyConfigBuilderErrorRepr {}
|
||||
|
||||
/// Payload sent to the verifier.
|
||||
#[doc(hidden)]
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct ProvePayload {
|
||||
/// Handshake data.
|
||||
pub handshake: Option<(ServerName, HandshakeData)>,
|
||||
/// Transcript data.
|
||||
pub transcript: Option<PartialTranscript>,
|
||||
/// Transcript commitment configuration.
|
||||
pub transcript_commit: Option<TranscriptCommitRequest>,
|
||||
}
|
||||
|
||||
/// Prover output.
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct ProverOutput {
|
||||
@@ -42,8 +220,6 @@ pub struct VerifierOutput {
|
||||
pub server_name: Option<ServerName>,
|
||||
/// Transcript data.
|
||||
pub transcript: Option<PartialTranscript>,
|
||||
/// Encoding commitment secret.
|
||||
pub encoder_secret: Option<EncoderSecret>,
|
||||
/// Transcript commitments.
|
||||
pub transcript_commitments: Vec<TranscriptCommitment>,
|
||||
}
|
||||
|
||||
@@ -38,7 +38,8 @@ pub use commit::{
|
||||
pub use proof::{
|
||||
TranscriptProof, TranscriptProofBuilder, TranscriptProofBuilderError, TranscriptProofError,
|
||||
};
|
||||
pub use tls::{ContentType, Record, TlsTranscript};
|
||||
pub use tls::{Record, TlsTranscript};
|
||||
pub use tls_core::msgs::enums::ContentType;
|
||||
|
||||
/// A transcript contains the plaintext of all application data communicated
|
||||
/// between the Prover and the Server.
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
use std::{collections::HashSet, fmt};
|
||||
|
||||
use rangeset::{ToRangeSet, UnionMut};
|
||||
use rangeset::ToRangeSet;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::{
|
||||
@@ -114,19 +114,7 @@ impl TranscriptCommitConfig {
|
||||
/// Returns a request for the transcript commitments.
|
||||
pub fn to_request(&self) -> TranscriptCommitRequest {
|
||||
TranscriptCommitRequest {
|
||||
encoding: self.has_encoding.then(|| {
|
||||
let mut sent = RangeSet::default();
|
||||
let mut recv = RangeSet::default();
|
||||
|
||||
for (dir, idx) in self.iter_encoding() {
|
||||
match dir {
|
||||
Direction::Sent => sent.union_mut(idx),
|
||||
Direction::Received => recv.union_mut(idx),
|
||||
}
|
||||
}
|
||||
|
||||
(sent, recv)
|
||||
}),
|
||||
encoding: self.has_encoding,
|
||||
hash: self
|
||||
.iter_hash()
|
||||
.map(|((dir, idx), alg)| (*dir, idx.clone(), *alg))
|
||||
@@ -301,14 +289,14 @@ impl fmt::Display for TranscriptCommitConfigBuilderError {
|
||||
/// Request to compute transcript commitments.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct TranscriptCommitRequest {
|
||||
encoding: Option<(RangeSet<usize>, RangeSet<usize>)>,
|
||||
encoding: bool,
|
||||
hash: Vec<(Direction, RangeSet<usize>, HashAlgId)>,
|
||||
}
|
||||
|
||||
impl TranscriptCommitRequest {
|
||||
/// Returns `true` if an encoding commitment is requested.
|
||||
pub fn has_encoding(&self) -> bool {
|
||||
self.encoding.is_some()
|
||||
pub fn encoding(&self) -> bool {
|
||||
self.encoding
|
||||
}
|
||||
|
||||
/// Returns `true` if a hash commitment is requested.
|
||||
@@ -320,11 +308,6 @@ impl TranscriptCommitRequest {
|
||||
pub fn iter_hash(&self) -> impl Iterator<Item = &(Direction, RangeSet<usize>, HashAlgId)> {
|
||||
self.hash.iter()
|
||||
}
|
||||
|
||||
/// Returns the ranges of the encoding commitments.
|
||||
pub fn encoding(&self) -> Option<&(RangeSet<usize>, RangeSet<usize>)> {
|
||||
self.encoding.as_ref()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
@@ -19,4 +19,6 @@ use crate::hash::TypedHash;
|
||||
pub struct EncodingCommitment {
|
||||
/// Merkle root of the encoding commitments.
|
||||
pub root: TypedHash,
|
||||
/// Seed used to generate the encodings.
|
||||
pub secret: EncoderSecret,
|
||||
}
|
||||
|
||||
@@ -8,7 +8,7 @@ use crate::{
|
||||
merkle::{MerkleError, MerkleProof},
|
||||
transcript::{
|
||||
commit::MAX_TOTAL_COMMITTED_DATA,
|
||||
encoding::{new_encoder, Encoder, EncoderSecret, EncodingCommitment},
|
||||
encoding::{new_encoder, Encoder, EncodingCommitment},
|
||||
Direction,
|
||||
},
|
||||
};
|
||||
@@ -48,14 +48,13 @@ impl EncodingProof {
|
||||
pub fn verify_with_provider(
|
||||
&self,
|
||||
provider: &HashProvider,
|
||||
secret: &EncoderSecret,
|
||||
commitment: &EncodingCommitment,
|
||||
sent: &[u8],
|
||||
recv: &[u8],
|
||||
) -> Result<(RangeSet<usize>, RangeSet<usize>), EncodingProofError> {
|
||||
let hasher = provider.get(&commitment.root.alg)?;
|
||||
|
||||
let encoder = new_encoder(secret);
|
||||
let encoder = new_encoder(&commitment.secret);
|
||||
let Self {
|
||||
inclusion_proof,
|
||||
openings,
|
||||
@@ -233,7 +232,10 @@ mod test {
|
||||
use crate::{
|
||||
fixtures::{encoder_secret, encoder_secret_tampered_seed, encoding_provider},
|
||||
hash::Blake3,
|
||||
transcript::{encoding::EncodingTree, Transcript},
|
||||
transcript::{
|
||||
encoding::{EncoderSecret, EncodingTree},
|
||||
Transcript,
|
||||
},
|
||||
};
|
||||
|
||||
use super::*;
|
||||
@@ -244,7 +246,7 @@ mod test {
|
||||
commitment: EncodingCommitment,
|
||||
}
|
||||
|
||||
fn new_encoding_fixture() -> EncodingFixture {
|
||||
fn new_encoding_fixture(secret: EncoderSecret) -> EncodingFixture {
|
||||
let transcript = Transcript::new(POST_JSON, OK_JSON);
|
||||
|
||||
let idx_0 = (Direction::Sent, RangeSet::from(0..POST_JSON.len()));
|
||||
@@ -255,7 +257,10 @@ mod test {
|
||||
|
||||
let proof = tree.proof([&idx_0, &idx_1].into_iter()).unwrap();
|
||||
|
||||
let commitment = EncodingCommitment { root: tree.root() };
|
||||
let commitment = EncodingCommitment {
|
||||
root: tree.root(),
|
||||
secret,
|
||||
};
|
||||
|
||||
EncodingFixture {
|
||||
transcript,
|
||||
@@ -270,12 +275,11 @@ mod test {
|
||||
transcript,
|
||||
proof,
|
||||
commitment,
|
||||
} = new_encoding_fixture();
|
||||
} = new_encoding_fixture(encoder_secret_tampered_seed());
|
||||
|
||||
let err = proof
|
||||
.verify_with_provider(
|
||||
&HashProvider::default(),
|
||||
&encoder_secret_tampered_seed(),
|
||||
&commitment,
|
||||
transcript.sent(),
|
||||
transcript.received(),
|
||||
@@ -291,19 +295,13 @@ mod test {
|
||||
transcript,
|
||||
proof,
|
||||
commitment,
|
||||
} = new_encoding_fixture();
|
||||
} = new_encoding_fixture(encoder_secret());
|
||||
|
||||
let sent = &transcript.sent()[transcript.sent().len() - 1..];
|
||||
let recv = &transcript.received()[transcript.received().len() - 2..];
|
||||
|
||||
let err = proof
|
||||
.verify_with_provider(
|
||||
&HashProvider::default(),
|
||||
&encoder_secret(),
|
||||
&commitment,
|
||||
sent,
|
||||
recv,
|
||||
)
|
||||
.verify_with_provider(&HashProvider::default(), &commitment, sent, recv)
|
||||
.unwrap_err();
|
||||
|
||||
assert!(matches!(err.kind, ErrorKind::Proof));
|
||||
@@ -315,7 +313,7 @@ mod test {
|
||||
transcript,
|
||||
mut proof,
|
||||
commitment,
|
||||
} = new_encoding_fixture();
|
||||
} = new_encoding_fixture(encoder_secret());
|
||||
|
||||
let Opening { idx, .. } = proof.openings.values_mut().next().unwrap();
|
||||
|
||||
@@ -324,7 +322,6 @@ mod test {
|
||||
let err = proof
|
||||
.verify_with_provider(
|
||||
&HashProvider::default(),
|
||||
&encoder_secret(),
|
||||
&commitment,
|
||||
transcript.sent(),
|
||||
transcript.received(),
|
||||
@@ -340,7 +337,7 @@ mod test {
|
||||
transcript,
|
||||
mut proof,
|
||||
commitment,
|
||||
} = new_encoding_fixture();
|
||||
} = new_encoding_fixture(encoder_secret());
|
||||
|
||||
let Opening { blinder, .. } = proof.openings.values_mut().next().unwrap();
|
||||
|
||||
@@ -349,7 +346,6 @@ mod test {
|
||||
let err = proof
|
||||
.verify_with_provider(
|
||||
&HashProvider::default(),
|
||||
&encoder_secret(),
|
||||
&commitment,
|
||||
transcript.sent(),
|
||||
transcript.received(),
|
||||
|
||||
@@ -222,12 +222,14 @@ mod tests {
|
||||
|
||||
let proof = tree.proof([&idx_0, &idx_1].into_iter()).unwrap();
|
||||
|
||||
let commitment = EncodingCommitment { root: tree.root() };
|
||||
let commitment = EncodingCommitment {
|
||||
root: tree.root(),
|
||||
secret: encoder_secret(),
|
||||
};
|
||||
|
||||
let (auth_sent, auth_recv) = proof
|
||||
.verify_with_provider(
|
||||
&HashProvider::default(),
|
||||
&encoder_secret(),
|
||||
&commitment,
|
||||
transcript.sent(),
|
||||
transcript.received(),
|
||||
@@ -258,12 +260,14 @@ mod tests {
|
||||
.proof([&idx_0, &idx_1, &idx_2, &idx_3].into_iter())
|
||||
.unwrap();
|
||||
|
||||
let commitment = EncodingCommitment { root: tree.root() };
|
||||
let commitment = EncodingCommitment {
|
||||
root: tree.root(),
|
||||
secret: encoder_secret(),
|
||||
};
|
||||
|
||||
let (auth_sent, auth_recv) = proof
|
||||
.verify_with_provider(
|
||||
&HashProvider::default(),
|
||||
&encoder_secret(),
|
||||
&commitment,
|
||||
transcript.sent(),
|
||||
transcript.received(),
|
||||
|
||||
@@ -10,7 +10,7 @@ use crate::{
|
||||
hash::{HashAlgId, HashProvider},
|
||||
transcript::{
|
||||
commit::{TranscriptCommitment, TranscriptCommitmentKind},
|
||||
encoding::{EncoderSecret, EncodingProof, EncodingProofError, EncodingTree},
|
||||
encoding::{EncodingProof, EncodingProofError, EncodingTree},
|
||||
hash::{hash_plaintext, PlaintextHash, PlaintextHashSecret},
|
||||
Direction, PartialTranscript, RangeSet, Transcript, TranscriptSecret,
|
||||
},
|
||||
@@ -25,9 +25,6 @@ const DEFAULT_COMMITMENT_KINDS: &[TranscriptCommitmentKind] = &[
|
||||
TranscriptCommitmentKind::Hash {
|
||||
alg: HashAlgId::BLAKE3,
|
||||
},
|
||||
TranscriptCommitmentKind::Hash {
|
||||
alg: HashAlgId::KECCAK256,
|
||||
},
|
||||
TranscriptCommitmentKind::Encoding,
|
||||
];
|
||||
|
||||
@@ -54,7 +51,6 @@ impl TranscriptProof {
|
||||
self,
|
||||
provider: &HashProvider,
|
||||
length: &TranscriptLength,
|
||||
encoder_secret: Option<&EncoderSecret>,
|
||||
commitments: impl IntoIterator<Item = &'a TranscriptCommitment>,
|
||||
) -> Result<PartialTranscript, TranscriptProofError> {
|
||||
let mut encoding_commitment = None;
|
||||
@@ -90,13 +86,6 @@ impl TranscriptProof {
|
||||
|
||||
// Verify encoding proof.
|
||||
if let Some(proof) = self.encoding_proof {
|
||||
let secret = encoder_secret.ok_or_else(|| {
|
||||
TranscriptProofError::new(
|
||||
ErrorKind::Encoding,
|
||||
"contains an encoding proof but missing encoder secret",
|
||||
)
|
||||
})?;
|
||||
|
||||
let commitment = encoding_commitment.ok_or_else(|| {
|
||||
TranscriptProofError::new(
|
||||
ErrorKind::Encoding,
|
||||
@@ -106,7 +95,6 @@ impl TranscriptProof {
|
||||
|
||||
let (auth_sent, auth_recv) = proof.verify_with_provider(
|
||||
provider,
|
||||
secret,
|
||||
commitment,
|
||||
self.transcript.sent_unsafe(),
|
||||
self.transcript.received_unsafe(),
|
||||
@@ -587,7 +575,7 @@ mod tests {
|
||||
use tlsn_data_fixtures::http::{request::GET_WITH_HEADER, response::OK_JSON};
|
||||
|
||||
use crate::{
|
||||
fixtures::{encoder_secret, encoding_provider},
|
||||
fixtures::encoding_provider,
|
||||
hash::{Blake3, Blinder, HashAlgId},
|
||||
transcript::TranscriptCommitConfigBuilder,
|
||||
};
|
||||
@@ -614,12 +602,7 @@ mod tests {
|
||||
|
||||
let provider = HashProvider::default();
|
||||
let err = transcript_proof
|
||||
.verify_with_provider(
|
||||
&provider,
|
||||
&transcript.length(),
|
||||
Some(&encoder_secret()),
|
||||
&[],
|
||||
)
|
||||
.verify_with_provider(&provider, &transcript.length(), &[])
|
||||
.err()
|
||||
.unwrap();
|
||||
|
||||
@@ -659,7 +642,6 @@ mod tests {
|
||||
#[rstest]
|
||||
#[case::sha256(HashAlgId::SHA256)]
|
||||
#[case::blake3(HashAlgId::BLAKE3)]
|
||||
#[case::keccak256(HashAlgId::KECCAK256)]
|
||||
fn test_reveal_with_hash_commitment(#[case] alg: HashAlgId) {
|
||||
let mut rng = rand::rngs::StdRng::seed_from_u64(0);
|
||||
let provider = HashProvider::default();
|
||||
@@ -694,7 +676,6 @@ mod tests {
|
||||
.verify_with_provider(
|
||||
&provider,
|
||||
&transcript.length(),
|
||||
None,
|
||||
&[TranscriptCommitment::Hash(commitment)],
|
||||
)
|
||||
.unwrap();
|
||||
@@ -708,7 +689,6 @@ mod tests {
|
||||
#[rstest]
|
||||
#[case::sha256(HashAlgId::SHA256)]
|
||||
#[case::blake3(HashAlgId::BLAKE3)]
|
||||
#[case::keccak256(HashAlgId::KECCAK256)]
|
||||
fn test_reveal_with_inconsistent_hash_commitment(#[case] alg: HashAlgId) {
|
||||
let mut rng = rand::rngs::StdRng::seed_from_u64(0);
|
||||
let provider = HashProvider::default();
|
||||
@@ -744,7 +724,6 @@ mod tests {
|
||||
.verify_with_provider(
|
||||
&provider,
|
||||
&transcript.length(),
|
||||
None,
|
||||
&[TranscriptCommitment::Hash(commitment)],
|
||||
)
|
||||
.unwrap_err();
|
||||
|
||||
@@ -10,53 +10,10 @@ use crate::{
|
||||
use tls_core::msgs::{
|
||||
alert::AlertMessagePayload,
|
||||
codec::{Codec, Reader},
|
||||
enums::{AlertDescription, ProtocolVersion},
|
||||
enums::{AlertDescription, ContentType, ProtocolVersion},
|
||||
handshake::{HandshakeMessagePayload, HandshakePayload},
|
||||
};
|
||||
|
||||
/// TLS record content type.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
|
||||
pub enum ContentType {
|
||||
/// Change cipher spec protocol.
|
||||
ChangeCipherSpec,
|
||||
/// Alert protocol.
|
||||
Alert,
|
||||
/// Handshake protocol.
|
||||
Handshake,
|
||||
/// Application data protocol.
|
||||
ApplicationData,
|
||||
/// Heartbeat protocol.
|
||||
Heartbeat,
|
||||
/// Unknown protocol.
|
||||
Unknown(u8),
|
||||
}
|
||||
|
||||
impl From<ContentType> for tls_core::msgs::enums::ContentType {
|
||||
fn from(content_type: ContentType) -> Self {
|
||||
match content_type {
|
||||
ContentType::ChangeCipherSpec => tls_core::msgs::enums::ContentType::ChangeCipherSpec,
|
||||
ContentType::Alert => tls_core::msgs::enums::ContentType::Alert,
|
||||
ContentType::Handshake => tls_core::msgs::enums::ContentType::Handshake,
|
||||
ContentType::ApplicationData => tls_core::msgs::enums::ContentType::ApplicationData,
|
||||
ContentType::Heartbeat => tls_core::msgs::enums::ContentType::Heartbeat,
|
||||
ContentType::Unknown(id) => tls_core::msgs::enums::ContentType::Unknown(id),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<tls_core::msgs::enums::ContentType> for ContentType {
|
||||
fn from(content_type: tls_core::msgs::enums::ContentType) -> Self {
|
||||
match content_type {
|
||||
tls_core::msgs::enums::ContentType::ChangeCipherSpec => ContentType::ChangeCipherSpec,
|
||||
tls_core::msgs::enums::ContentType::Alert => ContentType::Alert,
|
||||
tls_core::msgs::enums::ContentType::Handshake => ContentType::Handshake,
|
||||
tls_core::msgs::enums::ContentType::ApplicationData => ContentType::ApplicationData,
|
||||
tls_core::msgs::enums::ContentType::Heartbeat => ContentType::Heartbeat,
|
||||
tls_core::msgs::enums::ContentType::Unknown(id) => ContentType::Unknown(id),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A transcript of TLS records sent and received by the prover.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct TlsTranscript {
|
||||
|
||||
@@ -53,21 +53,6 @@ impl RootCertStore {
|
||||
pub fn empty() -> Self {
|
||||
Self { roots: Vec::new() }
|
||||
}
|
||||
|
||||
/// Creates a root certificate store with Mozilla root certificates.
|
||||
///
|
||||
/// These certificates are sourced from [`webpki-root-certs`](https://docs.rs/webpki-root-certs/latest/webpki_root_certs/). It is not recommended to use these unless the
|
||||
/// application binary can be recompiled and deployed on-demand in the case
|
||||
/// that the root certificates need to be updated.
|
||||
#[cfg(feature = "mozilla-certs")]
|
||||
pub fn mozilla() -> Self {
|
||||
Self {
|
||||
roots: webpki_root_certs::TLS_SERVER_ROOT_CERTS
|
||||
.iter()
|
||||
.map(|cert| CertificateDer(cert.to_vec()))
|
||||
.collect(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Server certificate verifier.
|
||||
@@ -97,12 +82,8 @@ impl ServerCertVerifier {
|
||||
Ok(Self { roots })
|
||||
}
|
||||
|
||||
/// Creates a server certificate verifier with Mozilla root certificates.
|
||||
///
|
||||
/// These certificates are sourced from [`webpki-root-certs`](https://docs.rs/webpki-root-certs/latest/webpki_root_certs/). It is not recommended to use these unless the
|
||||
/// application binary can be recompiled and deployed on-demand in the case
|
||||
/// that the root certificates need to be updated.
|
||||
#[cfg(feature = "mozilla-certs")]
|
||||
/// Creates a new server certificate verifier with Mozilla root
|
||||
/// certificates.
|
||||
pub fn mozilla() -> Self {
|
||||
Self {
|
||||
roots: webpki_roots::TLS_SERVER_ROOTS.to_vec(),
|
||||
|
||||
@@ -15,7 +15,6 @@ tlsn-server-fixture = { workspace = true }
|
||||
tlsn-server-fixture-certs = { workspace = true }
|
||||
spansy = { workspace = true }
|
||||
|
||||
anyhow = { workspace = true }
|
||||
bincode = { workspace = true }
|
||||
chrono = { workspace = true }
|
||||
clap = { version = "4.5", features = ["derive"] }
|
||||
@@ -38,9 +37,8 @@ tokio = { workspace = true, features = [
|
||||
tokio-util = { workspace = true }
|
||||
tracing = { workspace = true }
|
||||
tracing-subscriber = { workspace = true }
|
||||
noir = { git = "https://github.com/zkmopro/noir-rs", tag = "v1.0.0-beta.8", features = [
|
||||
"barretenberg",
|
||||
] }
|
||||
noir = { git = "https://github.com/zkmopro/noir-rs", tag = "v1.0.0-beta.8", features = ["barretenberg"] }
|
||||
blake3 = { workspace = true }
|
||||
|
||||
[[example]]
|
||||
name = "interactive"
|
||||
|
||||
@@ -4,7 +4,5 @@ This folder contains examples demonstrating how to use the TLSNotary protocol.
|
||||
|
||||
* [Interactive](./interactive/README.md): Interactive Prover and Verifier session without a trusted notary.
|
||||
* [Attestation](./attestation/README.md): Performing a simple notarization with a trusted notary.
|
||||
* [Interactive_zk](./interactive_zk/README.md): Interactive Prover and Verifier session demonstrating zero-knowledge age verification using Noir.
|
||||
|
||||
|
||||
Refer to <https://tlsnotary.org/docs/quick_start> for a quick start guide to using TLSNotary with these examples.
|
||||
@@ -4,7 +4,6 @@
|
||||
|
||||
use std::env;
|
||||
|
||||
use anyhow::{anyhow, Result};
|
||||
use clap::Parser;
|
||||
use http_body_util::Empty;
|
||||
use hyper::{body::Bytes, Request, StatusCode};
|
||||
@@ -24,17 +23,12 @@ use tlsn::{
|
||||
Attestation, AttestationConfig, CryptoProvider, Secrets,
|
||||
},
|
||||
config::{
|
||||
prove::ProveConfig,
|
||||
prover::ProverConfig,
|
||||
tls::TlsClientConfig,
|
||||
tls_commit::{mpc::MpcTlsConfig, TlsCommitConfig},
|
||||
verifier::VerifierConfig,
|
||||
CertificateDer, PrivateKeyDer, ProtocolConfig, ProtocolConfigValidator, RootCertStore,
|
||||
},
|
||||
connection::{ConnectionInfo, HandshakeData, ServerName, TranscriptLength},
|
||||
prover::{state::Committed, Prover, ProverOutput},
|
||||
prover::{state::Committed, ProveConfig, Prover, ProverConfig, ProverOutput, TlsConfig},
|
||||
transcript::{ContentType, TranscriptCommitConfig},
|
||||
verifier::{Verifier, VerifierOutput},
|
||||
webpki::{CertificateDer, PrivateKeyDer, RootCertStore},
|
||||
verifier::{Verifier, VerifierConfig, VerifierOutput, VerifyConfig},
|
||||
};
|
||||
use tlsn_examples::ExampleType;
|
||||
use tlsn_formats::http::{DefaultHttpCommitter, HttpCommit, HttpTranscript};
|
||||
@@ -53,7 +47,7 @@ struct Args {
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<()> {
|
||||
async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
tracing_subscriber::fmt::init();
|
||||
|
||||
let args = Args::parse();
|
||||
@@ -93,63 +87,64 @@ async fn prover<S: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>(
|
||||
uri: &str,
|
||||
extra_headers: Vec<(&str, &str)>,
|
||||
example_type: &ExampleType,
|
||||
) -> Result<()> {
|
||||
) -> Result<(), Box<dyn std::error::Error>> {
|
||||
let server_host: String = env::var("SERVER_HOST").unwrap_or("127.0.0.1".into());
|
||||
let server_port: u16 = env::var("SERVER_PORT")
|
||||
.map(|port| port.parse().expect("port should be valid integer"))
|
||||
.unwrap_or(DEFAULT_FIXTURE_PORT);
|
||||
|
||||
// Create a new prover and perform necessary setup.
|
||||
let prover = Prover::new(ProverConfig::builder().build()?)
|
||||
.commit(
|
||||
TlsCommitConfig::builder()
|
||||
// Select the TLS commitment protocol.
|
||||
.protocol(
|
||||
MpcTlsConfig::builder()
|
||||
// We must configure the amount of data we expect to exchange beforehand,
|
||||
// which will be preprocessed prior to the
|
||||
// connection. Reducing these limits will improve
|
||||
// performance.
|
||||
.max_sent_data(tlsn_examples::MAX_SENT_DATA)
|
||||
.max_recv_data(tlsn_examples::MAX_RECV_DATA)
|
||||
.build()?,
|
||||
)
|
||||
// Create a root certificate store with the server-fixture's self-signed
|
||||
// certificate. This is only required for offline testing with the
|
||||
// server-fixture.
|
||||
let mut tls_config_builder = TlsConfig::builder();
|
||||
tls_config_builder
|
||||
.root_store(RootCertStore {
|
||||
roots: vec![CertificateDer(CA_CERT_DER.to_vec())],
|
||||
})
|
||||
// (Optional) Set up TLS client authentication if required by the server.
|
||||
.client_auth((
|
||||
vec![CertificateDer(CLIENT_CERT_DER.to_vec())],
|
||||
PrivateKeyDer(CLIENT_KEY_DER.to_vec()),
|
||||
));
|
||||
|
||||
let tls_config = tls_config_builder.build().unwrap();
|
||||
|
||||
// Set up protocol configuration for prover.
|
||||
let mut prover_config_builder = ProverConfig::builder();
|
||||
prover_config_builder
|
||||
.server_name(ServerName::Dns(SERVER_DOMAIN.try_into().unwrap()))
|
||||
.tls_config(tls_config)
|
||||
.protocol_config(
|
||||
ProtocolConfig::builder()
|
||||
// We must configure the amount of data we expect to exchange beforehand, which will
|
||||
// be preprocessed prior to the connection. Reducing these limits will improve
|
||||
// performance.
|
||||
.max_sent_data(tlsn_examples::MAX_SENT_DATA)
|
||||
.max_recv_data(tlsn_examples::MAX_RECV_DATA)
|
||||
.build()?,
|
||||
socket.compat(),
|
||||
)
|
||||
.await?;
|
||||
);
|
||||
|
||||
let prover_config = prover_config_builder.build()?;
|
||||
|
||||
// Create a new prover and perform necessary setup.
|
||||
let prover = Prover::new(prover_config).setup(socket.compat()).await?;
|
||||
|
||||
// Open a TCP connection to the server.
|
||||
let client_socket = tokio::net::TcpStream::connect((server_host, server_port)).await?;
|
||||
|
||||
// Bind the prover to the server connection.
|
||||
let (tls_connection, prover_fut) = prover
|
||||
.connect(
|
||||
TlsClientConfig::builder()
|
||||
.server_name(ServerName::Dns(SERVER_DOMAIN.try_into()?))
|
||||
// Create a root certificate store with the server-fixture's self-signed
|
||||
// certificate. This is only required for offline testing with the
|
||||
// server-fixture.
|
||||
.root_store(RootCertStore {
|
||||
roots: vec![CertificateDer(CA_CERT_DER.to_vec())],
|
||||
})
|
||||
// (Optional) Set up TLS client authentication if required by the server.
|
||||
.client_auth((
|
||||
vec![CertificateDer(CLIENT_CERT_DER.to_vec())],
|
||||
PrivateKeyDer(CLIENT_KEY_DER.to_vec()),
|
||||
))
|
||||
.build()?,
|
||||
client_socket.compat(),
|
||||
)
|
||||
.await?;
|
||||
let tls_connection = TokioIo::new(tls_connection.compat());
|
||||
// The returned `mpc_tls_connection` is an MPC TLS connection to the server: all
|
||||
// data written to/read from it will be encrypted/decrypted using MPC with
|
||||
// the notary.
|
||||
let (mpc_tls_connection, prover_fut) = prover.connect(client_socket.compat()).await?;
|
||||
let mpc_tls_connection = TokioIo::new(mpc_tls_connection.compat());
|
||||
|
||||
// Spawn the prover task to be run concurrently in the background.
|
||||
let prover_task = tokio::spawn(prover_fut);
|
||||
|
||||
// Attach the hyper HTTP client to the connection.
|
||||
let (mut request_sender, connection) =
|
||||
hyper::client::conn::http1::handshake(tls_connection).await?;
|
||||
hyper::client::conn::http1::handshake(mpc_tls_connection).await?;
|
||||
|
||||
// Spawn the HTTP task to be run concurrently in the background.
|
||||
tokio::spawn(connection);
|
||||
@@ -170,7 +165,7 @@ async fn prover<S: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>(
|
||||
}
|
||||
let request = request_builder.body(Empty::<Bytes>::new())?;
|
||||
|
||||
info!("Starting connection with the server");
|
||||
info!("Starting an MPC TLS connection with the server");
|
||||
|
||||
// Send the request to the server and wait for the response.
|
||||
let response = request_sender.send_request(request).await?;
|
||||
@@ -180,7 +175,7 @@ async fn prover<S: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>(
|
||||
assert!(response.status() == StatusCode::OK);
|
||||
|
||||
// The prover task should be done now, so we can await it.
|
||||
let prover = prover_task.await??;
|
||||
let mut prover = prover_task.await??;
|
||||
|
||||
// Parse the HTTP transcript.
|
||||
let transcript = HttpTranscript::parse(prover.transcript())?;
|
||||
@@ -222,7 +217,7 @@ async fn prover<S: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>(
|
||||
|
||||
let request_config = builder.build()?;
|
||||
|
||||
let (attestation, secrets) = notarize(prover, &request_config, req_tx, resp_rx).await?;
|
||||
let (attestation, secrets) = notarize(&mut prover, &request_config, req_tx, resp_rx).await?;
|
||||
|
||||
// Write the attestation to disk.
|
||||
let attestation_path = tlsn_examples::get_file_path(example_type, "attestation");
|
||||
@@ -243,11 +238,11 @@ async fn prover<S: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>(
|
||||
}
|
||||
|
||||
async fn notarize(
|
||||
mut prover: Prover<Committed>,
|
||||
prover: &mut Prover<Committed>,
|
||||
config: &RequestConfig,
|
||||
request_tx: Sender<AttestationRequest>,
|
||||
attestation_rx: Receiver<Attestation>,
|
||||
) -> Result<(Attestation, Secrets)> {
|
||||
) -> Result<(Attestation, Secrets), Box<dyn std::error::Error>> {
|
||||
let mut builder = ProveConfig::builder(prover.transcript());
|
||||
|
||||
if let Some(config) = config.transcript_commit() {
|
||||
@@ -262,27 +257,25 @@ async fn notarize(
|
||||
..
|
||||
} = prover.prove(&disclosure_config).await?;
|
||||
|
||||
let transcript = prover.transcript().clone();
|
||||
let tls_transcript = prover.tls_transcript().clone();
|
||||
prover.close().await?;
|
||||
|
||||
// Build an attestation request.
|
||||
let mut builder = AttestationRequest::builder(config);
|
||||
|
||||
builder
|
||||
.server_name(ServerName::Dns(SERVER_DOMAIN.try_into().unwrap()))
|
||||
.handshake_data(HandshakeData {
|
||||
certs: tls_transcript
|
||||
certs: prover
|
||||
.tls_transcript()
|
||||
.server_cert_chain()
|
||||
.expect("server cert chain is present")
|
||||
.to_vec(),
|
||||
sig: tls_transcript
|
||||
sig: prover
|
||||
.tls_transcript()
|
||||
.server_signature()
|
||||
.expect("server signature is present")
|
||||
.clone(),
|
||||
binding: tls_transcript.certificate_binding().clone(),
|
||||
binding: prover.tls_transcript().certificate_binding().clone(),
|
||||
})
|
||||
.transcript(transcript)
|
||||
.transcript(prover.transcript().clone())
|
||||
.transcript_commitments(transcript_secrets, transcript_commitments);
|
||||
|
||||
let (request, secrets) = builder.build(&CryptoProvider::default())?;
|
||||
@@ -290,18 +283,15 @@ async fn notarize(
|
||||
// Send attestation request to notary.
|
||||
request_tx
|
||||
.send(request.clone())
|
||||
.map_err(|_| anyhow!("notary is not receiving attestation request"))?;
|
||||
.map_err(|_| "notary is not receiving attestation request".to_string())?;
|
||||
|
||||
// Receive attestation from notary.
|
||||
let attestation = attestation_rx
|
||||
.await
|
||||
.map_err(|err| anyhow!("notary did not respond with attestation: {err}"))?;
|
||||
|
||||
// Signature verifier for the signature algorithm in the request.
|
||||
let provider = CryptoProvider::default();
|
||||
.map_err(|err| format!("notary did not respond with attestation: {err}"))?;
|
||||
|
||||
// Check the attestation is consistent with the Prover's view.
|
||||
request.validate(&attestation, &provider)?;
|
||||
request.validate(&attestation)?;
|
||||
|
||||
Ok((attestation, secrets))
|
||||
}
|
||||
@@ -310,7 +300,14 @@ async fn notary<S: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>(
|
||||
socket: S,
|
||||
request_rx: Receiver<AttestationRequest>,
|
||||
attestation_tx: Sender<Attestation>,
|
||||
) -> Result<()> {
|
||||
) -> Result<(), Box<dyn std::error::Error>> {
|
||||
// Set up Verifier.
|
||||
let config_validator = ProtocolConfigValidator::builder()
|
||||
.max_sent_data(tlsn_examples::MAX_SENT_DATA)
|
||||
.max_recv_data(tlsn_examples::MAX_RECV_DATA)
|
||||
.build()
|
||||
.unwrap();
|
||||
|
||||
// Create a root certificate store with the server-fixture's self-signed
|
||||
// certificate. This is only required for offline testing with the
|
||||
// server-fixture.
|
||||
@@ -318,25 +315,20 @@ async fn notary<S: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>(
|
||||
.root_store(RootCertStore {
|
||||
roots: vec![CertificateDer(CA_CERT_DER.to_vec())],
|
||||
})
|
||||
.protocol_config_validator(config_validator)
|
||||
.build()
|
||||
.unwrap();
|
||||
|
||||
let verifier = Verifier::new(verifier_config)
|
||||
.commit(socket.compat())
|
||||
.await?
|
||||
.accept()
|
||||
let mut verifier = Verifier::new(verifier_config)
|
||||
.setup(socket.compat())
|
||||
.await?
|
||||
.run()
|
||||
.await?;
|
||||
|
||||
let (
|
||||
VerifierOutput {
|
||||
transcript_commitments,
|
||||
encoder_secret,
|
||||
..
|
||||
},
|
||||
verifier,
|
||||
) = verifier.verify().await?.accept().await?;
|
||||
let VerifierOutput {
|
||||
transcript_commitments,
|
||||
..
|
||||
} = verifier.verify(&VerifyConfig::default()).await?;
|
||||
|
||||
let tls_transcript = verifier.tls_transcript().clone();
|
||||
|
||||
@@ -393,16 +385,12 @@ async fn notary<S: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>(
|
||||
.server_ephemeral_key(tls_transcript.server_ephemeral_key().clone())
|
||||
.transcript_commitments(transcript_commitments);
|
||||
|
||||
if let Some(encoder_secret) = encoder_secret {
|
||||
builder.encoder_secret(encoder_secret);
|
||||
}
|
||||
|
||||
let attestation = builder.build(&provider)?;
|
||||
|
||||
// Send attestation to prover.
|
||||
attestation_tx
|
||||
.send(attestation)
|
||||
.map_err(|_| anyhow!("prover is not receiving attestation"))?;
|
||||
.map_err(|_| "prover is not receiving attestation".to_string())?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -12,8 +12,8 @@ use tlsn::{
|
||||
signing::VerifyingKey,
|
||||
CryptoProvider,
|
||||
},
|
||||
config::{CertificateDer, RootCertStore},
|
||||
verifier::ServerCertVerifier,
|
||||
webpki::{CertificateDer, RootCertStore},
|
||||
};
|
||||
use tlsn_examples::ExampleType;
|
||||
use tlsn_server_fixture_certs::CA_CERT_DER;
|
||||
|
||||
@@ -3,7 +3,6 @@ use std::{
|
||||
net::{IpAddr, SocketAddr},
|
||||
};
|
||||
|
||||
use anyhow::Result;
|
||||
use http_body_util::Empty;
|
||||
use hyper::{body::Bytes, Request, StatusCode, Uri};
|
||||
use hyper_util::rt::TokioIo;
|
||||
@@ -12,18 +11,11 @@ use tokio_util::compat::{FuturesAsyncReadCompatExt, TokioAsyncReadCompatExt};
|
||||
use tracing::instrument;
|
||||
|
||||
use tlsn::{
|
||||
config::{
|
||||
prove::ProveConfig,
|
||||
prover::ProverConfig,
|
||||
tls::TlsClientConfig,
|
||||
tls_commit::{mpc::MpcTlsConfig, TlsCommitConfig, TlsCommitProtocolConfig},
|
||||
verifier::VerifierConfig,
|
||||
},
|
||||
config::{CertificateDer, ProtocolConfig, ProtocolConfigValidator, RootCertStore},
|
||||
connection::ServerName,
|
||||
prover::Prover,
|
||||
prover::{ProveConfig, Prover, ProverConfig, TlsConfig},
|
||||
transcript::PartialTranscript,
|
||||
verifier::{Verifier, VerifierOutput},
|
||||
webpki::{CertificateDer, RootCertStore},
|
||||
verifier::{Verifier, VerifierConfig, VerifierOutput, VerifyConfig},
|
||||
};
|
||||
use tlsn_server_fixture::DEFAULT_FIXTURE_PORT;
|
||||
use tlsn_server_fixture_certs::{CA_CERT_DER, SERVER_DOMAIN};
|
||||
@@ -54,7 +46,7 @@ async fn main() {
|
||||
let (prover_socket, verifier_socket) = tokio::io::duplex(1 << 23);
|
||||
let prover = prover(prover_socket, &server_addr, &uri);
|
||||
let verifier = verifier(verifier_socket);
|
||||
let (_, transcript) = tokio::try_join!(prover, verifier).unwrap();
|
||||
let (_, transcript) = tokio::join!(prover, verifier);
|
||||
|
||||
println!("Successfully verified {}", &uri);
|
||||
println!(
|
||||
@@ -72,57 +64,61 @@ async fn prover<T: AsyncWrite + AsyncRead + Send + Unpin + 'static>(
|
||||
verifier_socket: T,
|
||||
server_addr: &SocketAddr,
|
||||
uri: &str,
|
||||
) -> Result<()> {
|
||||
) {
|
||||
let uri = uri.parse::<Uri>().unwrap();
|
||||
assert_eq!(uri.scheme().unwrap().as_str(), "https");
|
||||
let server_domain = uri.authority().unwrap().host();
|
||||
|
||||
// Create a new prover and perform necessary setup.
|
||||
let prover = Prover::new(ProverConfig::builder().build()?)
|
||||
.commit(
|
||||
TlsCommitConfig::builder()
|
||||
// Select the TLS commitment protocol.
|
||||
.protocol(
|
||||
MpcTlsConfig::builder()
|
||||
// We must configure the amount of data we expect to exchange beforehand,
|
||||
// which will be preprocessed prior to the
|
||||
// connection. Reducing these limits will improve
|
||||
// performance.
|
||||
.max_sent_data(tlsn_examples::MAX_SENT_DATA)
|
||||
.max_recv_data(tlsn_examples::MAX_RECV_DATA)
|
||||
.build()?,
|
||||
)
|
||||
.build()?,
|
||||
verifier_socket.compat(),
|
||||
)
|
||||
.await?;
|
||||
// Create a root certificate store with the server-fixture's self-signed
|
||||
// certificate. This is only required for offline testing with the
|
||||
// server-fixture.
|
||||
let mut tls_config_builder = TlsConfig::builder();
|
||||
tls_config_builder.root_store(RootCertStore {
|
||||
roots: vec![CertificateDer(CA_CERT_DER.to_vec())],
|
||||
});
|
||||
let tls_config = tls_config_builder.build().unwrap();
|
||||
|
||||
// Open a TCP connection to the server.
|
||||
let client_socket = tokio::net::TcpStream::connect(server_addr).await?;
|
||||
// Set up protocol configuration for prover.
|
||||
let mut prover_config_builder = ProverConfig::builder();
|
||||
prover_config_builder
|
||||
.server_name(ServerName::Dns(server_domain.try_into().unwrap()))
|
||||
.tls_config(tls_config)
|
||||
.protocol_config(
|
||||
ProtocolConfig::builder()
|
||||
.max_sent_data(MAX_SENT_DATA)
|
||||
.max_recv_data(MAX_RECV_DATA)
|
||||
.build()
|
||||
.unwrap(),
|
||||
);
|
||||
|
||||
// Bind the prover to the server connection.
|
||||
let (tls_connection, prover_fut) = prover
|
||||
.connect(
|
||||
TlsClientConfig::builder()
|
||||
.server_name(ServerName::Dns(SERVER_DOMAIN.try_into()?))
|
||||
// Create a root certificate store with the server-fixture's self-signed
|
||||
// certificate. This is only required for offline testing with the
|
||||
// server-fixture.
|
||||
.root_store(RootCertStore {
|
||||
roots: vec![CertificateDer(CA_CERT_DER.to_vec())],
|
||||
})
|
||||
.build()?,
|
||||
client_socket.compat(),
|
||||
)
|
||||
.await?;
|
||||
let tls_connection = TokioIo::new(tls_connection.compat());
|
||||
let prover_config = prover_config_builder.build().unwrap();
|
||||
|
||||
// Create prover and connect to verifier.
|
||||
//
|
||||
// Perform the setup phase with the verifier.
|
||||
let prover = Prover::new(prover_config)
|
||||
.setup(verifier_socket.compat())
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Connect to TLS Server.
|
||||
let tls_client_socket = tokio::net::TcpStream::connect(server_addr).await.unwrap();
|
||||
|
||||
// Pass server connection into the prover.
|
||||
let (mpc_tls_connection, prover_fut) =
|
||||
prover.connect(tls_client_socket.compat()).await.unwrap();
|
||||
|
||||
// Wrap the connection in a TokioIo compatibility layer to use it with hyper.
|
||||
let mpc_tls_connection = TokioIo::new(mpc_tls_connection.compat());
|
||||
|
||||
// Spawn the Prover to run in the background.
|
||||
let prover_task = tokio::spawn(prover_fut);
|
||||
|
||||
// MPC-TLS Handshake.
|
||||
let (mut request_sender, connection) =
|
||||
hyper::client::conn::http1::handshake(tls_connection).await?;
|
||||
hyper::client::conn::http1::handshake(mpc_tls_connection)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Spawn the connection to run in the background.
|
||||
tokio::spawn(connection);
|
||||
@@ -134,13 +130,14 @@ async fn prover<T: AsyncWrite + AsyncRead + Send + Unpin + 'static>(
|
||||
.header("Connection", "close")
|
||||
.header("Secret", SECRET)
|
||||
.method("GET")
|
||||
.body(Empty::<Bytes>::new())?;
|
||||
let response = request_sender.send_request(request).await?;
|
||||
.body(Empty::<Bytes>::new())
|
||||
.unwrap();
|
||||
let response = request_sender.send_request(request).await.unwrap();
|
||||
|
||||
assert!(response.status() == StatusCode::OK);
|
||||
|
||||
// Create proof for the Verifier.
|
||||
let mut prover = prover_task.await??;
|
||||
let mut prover = prover_task.await.unwrap().unwrap();
|
||||
|
||||
let mut builder = ProveConfig::builder(prover.transcript());
|
||||
|
||||
@@ -156,8 +153,10 @@ async fn prover<T: AsyncWrite + AsyncRead + Send + Unpin + 'static>(
|
||||
.expect("the secret should be in the sent data");
|
||||
|
||||
// Reveal everything except for the secret.
|
||||
builder.reveal_sent(&(0..pos))?;
|
||||
builder.reveal_sent(&(pos + SECRET.len()..prover.transcript().sent().len()))?;
|
||||
builder.reveal_sent(&(0..pos)).unwrap();
|
||||
builder
|
||||
.reveal_sent(&(pos + SECRET.len()..prover.transcript().sent().len()))
|
||||
.unwrap();
|
||||
|
||||
// Find the substring "Dick".
|
||||
let pos = prover
|
||||
@@ -168,21 +167,28 @@ async fn prover<T: AsyncWrite + AsyncRead + Send + Unpin + 'static>(
|
||||
.expect("the substring 'Dick' should be in the received data");
|
||||
|
||||
// Reveal everything except for the substring.
|
||||
builder.reveal_recv(&(0..pos))?;
|
||||
builder.reveal_recv(&(pos + 4..prover.transcript().received().len()))?;
|
||||
builder.reveal_recv(&(0..pos)).unwrap();
|
||||
builder
|
||||
.reveal_recv(&(pos + 4..prover.transcript().received().len()))
|
||||
.unwrap();
|
||||
|
||||
let config = builder.build()?;
|
||||
let config = builder.build().unwrap();
|
||||
|
||||
prover.prove(&config).await?;
|
||||
prover.close().await?;
|
||||
|
||||
Ok(())
|
||||
prover.prove(&config).await.unwrap();
|
||||
prover.close().await.unwrap();
|
||||
}
|
||||
|
||||
#[instrument(skip(socket))]
|
||||
async fn verifier<T: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>(
|
||||
socket: T,
|
||||
) -> Result<PartialTranscript> {
|
||||
) -> PartialTranscript {
|
||||
// Set up Verifier.
|
||||
let config_validator = ProtocolConfigValidator::builder()
|
||||
.max_sent_data(MAX_SENT_DATA)
|
||||
.max_recv_data(MAX_RECV_DATA)
|
||||
.build()
|
||||
.unwrap();
|
||||
|
||||
// Create a root certificate store with the server-fixture's self-signed
|
||||
// certificate. This is only required for offline testing with the
|
||||
// server-fixture.
|
||||
@@ -190,56 +196,20 @@ async fn verifier<T: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>(
|
||||
.root_store(RootCertStore {
|
||||
roots: vec![CertificateDer(CA_CERT_DER.to_vec())],
|
||||
})
|
||||
.build()?;
|
||||
.protocol_config_validator(config_validator)
|
||||
.build()
|
||||
.unwrap();
|
||||
let verifier = Verifier::new(verifier_config);
|
||||
|
||||
// Validate the proposed configuration and then run the TLS commitment protocol.
|
||||
let verifier = verifier.commit(socket.compat()).await?;
|
||||
|
||||
// This is the opportunity to ensure the prover does not attempt to overload the
|
||||
// verifier.
|
||||
let reject = if let TlsCommitProtocolConfig::Mpc(mpc_tls_config) = verifier.request().protocol()
|
||||
{
|
||||
if mpc_tls_config.max_sent_data() > MAX_SENT_DATA {
|
||||
Some("max_sent_data is too large")
|
||||
} else if mpc_tls_config.max_recv_data() > MAX_RECV_DATA {
|
||||
Some("max_recv_data is too large")
|
||||
} else {
|
||||
None
|
||||
}
|
||||
} else {
|
||||
Some("expecting to use MPC-TLS")
|
||||
};
|
||||
|
||||
if reject.is_some() {
|
||||
verifier.reject(reject).await?;
|
||||
return Err(anyhow::anyhow!("protocol configuration rejected"));
|
||||
}
|
||||
|
||||
// Runs the TLS commitment protocol to completion.
|
||||
let verifier = verifier.accept().await?.run().await?;
|
||||
|
||||
// Validate the proving request and then verify.
|
||||
let verifier = verifier.verify().await?;
|
||||
|
||||
if !verifier.request().server_identity() {
|
||||
let verifier = verifier
|
||||
.reject(Some("expecting to verify the server name"))
|
||||
.await?;
|
||||
verifier.close().await?;
|
||||
return Err(anyhow::anyhow!("prover did not reveal the server name"));
|
||||
}
|
||||
|
||||
let (
|
||||
VerifierOutput {
|
||||
server_name,
|
||||
transcript,
|
||||
..
|
||||
},
|
||||
verifier,
|
||||
) = verifier.accept().await?;
|
||||
|
||||
verifier.close().await?;
|
||||
// Receive authenticated data.
|
||||
let VerifierOutput {
|
||||
server_name,
|
||||
transcript,
|
||||
..
|
||||
} = verifier
|
||||
.verify(socket.compat(), &VerifyConfig::default())
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let server_name = server_name.expect("prover should have revealed server name");
|
||||
let transcript = transcript.expect("prover should have revealed transcript data");
|
||||
@@ -262,7 +232,7 @@ async fn verifier<T: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>(
|
||||
let ServerName::Dns(server_name) = server_name;
|
||||
assert_eq!(server_name.as_str(), SERVER_DOMAIN);
|
||||
|
||||
Ok(transcript)
|
||||
transcript
|
||||
}
|
||||
|
||||
/// Render redacted bytes as `🙈`.
|
||||
|
||||
@@ -2,7 +2,6 @@ mod prover;
|
||||
mod types;
|
||||
mod verifier;
|
||||
|
||||
use anyhow::Result;
|
||||
use prover::prover;
|
||||
use std::{
|
||||
env,
|
||||
@@ -13,7 +12,7 @@ use tlsn_server_fixture_certs::SERVER_DOMAIN;
|
||||
use verifier::verifier;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<()> {
|
||||
async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
tracing_subscriber::fmt::init();
|
||||
|
||||
let server_host: String = env::var("SERVER_HOST").unwrap_or("127.0.0.1".into());
|
||||
@@ -26,7 +25,7 @@ async fn main() -> Result<()> {
|
||||
let uri = format!("https://{SERVER_DOMAIN}:{server_port}/elster");
|
||||
let server_ip: IpAddr = server_host
|
||||
.parse()
|
||||
.map_err(|e| anyhow::anyhow!("Invalid IP address '{server_host}': {e}"))?;
|
||||
.map_err(|e| format!("Invalid IP address '{}': {}", server_host, e))?;
|
||||
let server_addr = SocketAddr::from((server_ip, server_port));
|
||||
|
||||
// Connect prover and verifier.
|
||||
|
||||
@@ -4,5 +4,4 @@ type = "bin"
|
||||
authors = [""]
|
||||
|
||||
[dependencies]
|
||||
sha256 = { tag = "v0.1.5", git = "https://github.com/noir-lang/sha256" }
|
||||
date = { tag = "v0.5.4", git = "https://github.com/madztheo/noir-date.git" }
|
||||
|
||||
@@ -7,14 +7,13 @@ edition = "2021"
|
||||
publish = false
|
||||
|
||||
[dependencies]
|
||||
sha2 = "0.10"
|
||||
blake3 = "1.5"
|
||||
rand = "0.8"
|
||||
chrono = "0.4"
|
||||
---
|
||||
use chrono::Datelike;
|
||||
use chrono::Local;
|
||||
use rand::RngCore;
|
||||
use sha2::{Digest, Sha256};
|
||||
|
||||
fn main() {
|
||||
// 1. Birthdate string (fixed)
|
||||
@@ -35,7 +34,8 @@ fn main() {
|
||||
preimage.extend_from_slice(&blinder);
|
||||
|
||||
// 4. Hash it
|
||||
let hash = Sha256::digest(&preimage);
|
||||
let hash = blake3::hash(&preimage);
|
||||
let hash = hash.as_bytes();
|
||||
|
||||
let blinder = blinder
|
||||
.iter()
|
||||
|
||||
@@ -25,7 +25,7 @@ fn check_18(date_of_birth: str<10>, proof_date: date::Date) -> bool {
|
||||
|
||||
fn check_hash(date_of_birth: str<10>, blinder: [u8; 16], committed_hash: [u8; 32]) -> bool {
|
||||
let hash_input: [u8; 26] = make_hash_input(date_of_birth, blinder);
|
||||
let computed_hash = sha256::sha256_var(hash_input, 26);
|
||||
let computed_hash = std::hash::blake3(hash_input);
|
||||
let correct_hash = computed_hash == committed_hash;
|
||||
println(f"Correct hash? {correct_hash}");
|
||||
correct_hash
|
||||
@@ -53,13 +53,13 @@ pub fn parse_birth_date(birth_date: str<10>) -> date::Date {
|
||||
fn test_max_is_over_18() {
|
||||
// Private input
|
||||
let date_of_birth = "1985-03-12";
|
||||
let blinder = [120, 80, 62, 10, 76, 60, 130, 98, 147, 161, 139, 126, 27, 236, 36, 56];
|
||||
let blinder = [109, 224, 222, 179, 60, 44, 41, 65, 166, 94, 111, 216, 73, 231, 63, 83];
|
||||
|
||||
// Public input
|
||||
let proof_date = date::Date { year: 2025, month: 9, day: 2 };
|
||||
let proof_date = date::Date { year: 2025, month: 9, day: 26 };
|
||||
let committed_hash = [
|
||||
229, 118, 202, 216, 213, 230, 125, 163, 48, 178, 118, 225, 84, 7, 140, 63, 173, 255, 163,
|
||||
208, 163, 3, 63, 204, 37, 120, 254, 246, 202, 116, 122, 145,
|
||||
114, 34, 41, 235, 91, 156, 13, 57, 254, 112, 250, 35, 104, 217, 20, 182, 240, 170, 57, 39,
|
||||
187, 154, 14, 39, 91, 67, 50, 199, 149, 231, 78, 46,
|
||||
];
|
||||
|
||||
main(proof_date, committed_hash, date_of_birth, blinder);
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -4,12 +4,11 @@ use crate::types::received_commitments;
|
||||
|
||||
use super::types::ZKProofBundle;
|
||||
|
||||
use anyhow::Result;
|
||||
use chrono::{Datelike, Local, NaiveDate};
|
||||
use http_body_util::Empty;
|
||||
use hyper::{body::Bytes, header, Request, StatusCode, Uri};
|
||||
use hyper_util::rt::TokioIo;
|
||||
use k256::sha2::{Digest, Sha256};
|
||||
use k256::sha2::Digest;
|
||||
use noir::{
|
||||
barretenberg::{
|
||||
prove::prove_ultra_honk, srs::setup_srs_from_bytecode,
|
||||
@@ -22,27 +21,24 @@ use spansy::{
|
||||
http::{BodyContent, Requests, Responses},
|
||||
Spanned,
|
||||
};
|
||||
use tls_server_fixture::{CA_CERT_DER, SERVER_DOMAIN};
|
||||
use tls_server_fixture::CA_CERT_DER;
|
||||
use tlsn::{
|
||||
config::{
|
||||
prove::{ProveConfig, ProveConfigBuilder},
|
||||
prover::ProverConfig,
|
||||
tls::TlsClientConfig,
|
||||
tls_commit::{mpc::MpcTlsConfig, TlsCommitConfig},
|
||||
},
|
||||
config::{CertificateDer, ProtocolConfig, RootCertStore},
|
||||
connection::ServerName,
|
||||
hash::HashAlgId,
|
||||
prover::Prover,
|
||||
prover::{ProveConfig, ProveConfigBuilder, Prover, ProverConfig, TlsConfig},
|
||||
transcript::{
|
||||
hash::{PlaintextHash, PlaintextHashSecret},
|
||||
Direction, TranscriptCommitConfig, TranscriptCommitConfigBuilder, TranscriptCommitmentKind,
|
||||
TranscriptSecret,
|
||||
},
|
||||
webpki::{CertificateDer, RootCertStore},
|
||||
};
|
||||
|
||||
use tlsn_examples::{MAX_RECV_DATA, MAX_SENT_DATA};
|
||||
use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt};
|
||||
use tlsn_examples::MAX_RECV_DATA;
|
||||
use tokio::io::AsyncWriteExt;
|
||||
|
||||
use tlsn_examples::MAX_SENT_DATA;
|
||||
use tokio::io::{AsyncRead, AsyncWrite};
|
||||
use tokio_util::compat::{FuturesAsyncReadCompatExt, TokioAsyncReadCompatExt};
|
||||
use tracing::instrument;
|
||||
|
||||
@@ -52,64 +48,60 @@ pub async fn prover<T: AsyncWrite + AsyncRead + Send + Unpin + 'static>(
|
||||
mut verifier_extra_socket: T,
|
||||
server_addr: &SocketAddr,
|
||||
uri: &str,
|
||||
) -> Result<()> {
|
||||
) -> Result<(), Box<dyn std::error::Error>> {
|
||||
let uri = uri.parse::<Uri>()?;
|
||||
|
||||
if uri.scheme().map(|s| s.as_str()) != Some("https") {
|
||||
return Err(anyhow::anyhow!("URI must use HTTPS scheme"));
|
||||
return Err("URI must use HTTPS scheme".into());
|
||||
}
|
||||
|
||||
let server_domain = uri
|
||||
.authority()
|
||||
.ok_or_else(|| anyhow::anyhow!("URI must have authority"))?
|
||||
.host();
|
||||
let server_domain = uri.authority().ok_or("URI must have authority")?.host();
|
||||
|
||||
// Create a new prover and perform necessary setup.
|
||||
let prover = Prover::new(ProverConfig::builder().build()?)
|
||||
.commit(
|
||||
TlsCommitConfig::builder()
|
||||
// Select the TLS commitment protocol.
|
||||
.protocol(
|
||||
MpcTlsConfig::builder()
|
||||
// We must configure the amount of data we expect to exchange beforehand,
|
||||
// which will be preprocessed prior to the
|
||||
// connection. Reducing these limits will improve
|
||||
// performance.
|
||||
.max_sent_data(MAX_SENT_DATA)
|
||||
.max_recv_data(MAX_RECV_DATA)
|
||||
.build()?,
|
||||
)
|
||||
// Create a root certificate store with the server-fixture's self-signed
|
||||
// certificate. This is only required for offline testing with the
|
||||
// server-fixture.
|
||||
let mut tls_config_builder = TlsConfig::builder();
|
||||
tls_config_builder.root_store(RootCertStore {
|
||||
roots: vec![CertificateDer(CA_CERT_DER.to_vec())],
|
||||
});
|
||||
let tls_config = tls_config_builder.build()?;
|
||||
|
||||
// Set up protocol configuration for prover.
|
||||
let mut prover_config_builder = ProverConfig::builder();
|
||||
prover_config_builder
|
||||
.server_name(ServerName::Dns(server_domain.try_into()?))
|
||||
.tls_config(tls_config)
|
||||
.protocol_config(
|
||||
ProtocolConfig::builder()
|
||||
.max_sent_data(MAX_SENT_DATA)
|
||||
.max_recv_data(MAX_RECV_DATA)
|
||||
.build()?,
|
||||
verifier_socket.compat(),
|
||||
)
|
||||
);
|
||||
|
||||
let prover_config = prover_config_builder.build()?;
|
||||
|
||||
// Create prover and connect to verifier.
|
||||
//
|
||||
// Perform the setup phase with the verifier.
|
||||
let prover = Prover::new(prover_config)
|
||||
.setup(verifier_socket.compat())
|
||||
.await?;
|
||||
|
||||
// Open a TCP connection to the server.
|
||||
let client_socket = tokio::net::TcpStream::connect(server_addr).await?;
|
||||
// Connect to TLS Server.
|
||||
let tls_client_socket = tokio::net::TcpStream::connect(server_addr).await?;
|
||||
|
||||
// Bind the prover to the server connection.
|
||||
let (tls_connection, prover_fut) = prover
|
||||
.connect(
|
||||
TlsClientConfig::builder()
|
||||
.server_name(ServerName::Dns(SERVER_DOMAIN.try_into()?))
|
||||
// Create a root certificate store with the server-fixture's self-signed
|
||||
// certificate. This is only required for offline testing with the
|
||||
// server-fixture.
|
||||
.root_store(RootCertStore {
|
||||
roots: vec![CertificateDer(CA_CERT_DER.to_vec())],
|
||||
})
|
||||
.build()?,
|
||||
client_socket.compat(),
|
||||
)
|
||||
.await?;
|
||||
let tls_connection = TokioIo::new(tls_connection.compat());
|
||||
// Pass server connection into the prover.
|
||||
let (mpc_tls_connection, prover_fut) = prover.connect(tls_client_socket.compat()).await?;
|
||||
|
||||
// Wrap the connection in a TokioIo compatibility layer to use it with hyper.
|
||||
let mpc_tls_connection = TokioIo::new(mpc_tls_connection.compat());
|
||||
|
||||
// Spawn the Prover to run in the background.
|
||||
let prover_task = tokio::spawn(prover_fut);
|
||||
|
||||
// MPC-TLS Handshake.
|
||||
let (mut request_sender, connection) =
|
||||
hyper::client::conn::http1::handshake(tls_connection).await?;
|
||||
hyper::client::conn::http1::handshake(mpc_tls_connection).await?;
|
||||
|
||||
// Spawn the connection to run in the background.
|
||||
tokio::spawn(connection);
|
||||
@@ -126,10 +118,7 @@ pub async fn prover<T: AsyncWrite + AsyncRead + Send + Unpin + 'static>(
|
||||
let response = request_sender.send_request(request).await?;
|
||||
|
||||
if response.status() != StatusCode::OK {
|
||||
return Err(anyhow::anyhow!(
|
||||
"MPC-TLS request failed with status {}",
|
||||
response.status()
|
||||
));
|
||||
return Err(format!("MPC-TLS request failed with status {}", response.status()).into());
|
||||
}
|
||||
|
||||
// Create proof for the Verifier.
|
||||
@@ -153,7 +142,7 @@ pub async fn prover<T: AsyncWrite + AsyncRead + Send + Unpin + 'static>(
|
||||
// Create hash commitment for the date of birth field from the response
|
||||
let mut transcript_commitment_builder = TranscriptCommitConfig::builder(&transcript);
|
||||
transcript_commitment_builder.default_kind(TranscriptCommitmentKind::Hash {
|
||||
alg: HashAlgId::SHA256,
|
||||
alg: HashAlgId::BLAKE3,
|
||||
});
|
||||
reveal_received(
|
||||
received,
|
||||
@@ -174,13 +163,21 @@ pub async fn prover<T: AsyncWrite + AsyncRead + Send + Unpin + 'static>(
|
||||
let received_commitments = received_commitments(&prover_output.transcript_commitments);
|
||||
let received_commitment = received_commitments
|
||||
.first()
|
||||
.ok_or_else(|| anyhow::anyhow!("No received commitments found"))?; // committed hash (of date of birth string)
|
||||
.ok_or("No received commitments found")?; // committed hash (of date of birth string)
|
||||
let received_secrets = received_secrets(&prover_output.transcript_secrets);
|
||||
let received_secret = received_secrets
|
||||
.first()
|
||||
.ok_or_else(|| anyhow::anyhow!("No received secrets found"))?; // hash blinder
|
||||
.ok_or("No received secrets found")?; // hash blinder
|
||||
|
||||
let start_time = std::time::Instant::now();
|
||||
let proof_input = prepare_zk_proof_input(received, received_commitment, received_secret)?;
|
||||
let prepare_duration = start_time.elapsed();
|
||||
tracing::info!("🔢 prepare_zk_proof_input took: {:?}", prepare_duration);
|
||||
|
||||
let start_time = std::time::Instant::now();
|
||||
let proof_bundle = generate_zk_proof(&proof_input)?;
|
||||
let generate_duration = start_time.elapsed();
|
||||
tracing::info!("⚡ generate_zk_proof took: {:?}", generate_duration);
|
||||
|
||||
// Sent zk proof bundle to verifier
|
||||
let serialized_proof = bincode::serialize(&proof_bundle)?;
|
||||
@@ -191,30 +188,28 @@ pub async fn prover<T: AsyncWrite + AsyncRead + Send + Unpin + 'static>(
|
||||
}
|
||||
|
||||
// Reveal everything from the request, except for the authorization token.
|
||||
fn reveal_request(request: &[u8], builder: &mut ProveConfigBuilder<'_>) -> Result<()> {
|
||||
fn reveal_request(
|
||||
request: &[u8],
|
||||
builder: &mut ProveConfigBuilder<'_>,
|
||||
) -> Result<(), Box<dyn std::error::Error>> {
|
||||
let reqs = Requests::new_from_slice(request).collect::<Result<Vec<_>, _>>()?;
|
||||
|
||||
let req = reqs
|
||||
.first()
|
||||
.ok_or_else(|| anyhow::anyhow!("No requests found"))?;
|
||||
let req = reqs.first().ok_or("No requests found")?;
|
||||
|
||||
if req.request.method.as_str() != "GET" {
|
||||
return Err(anyhow::anyhow!(
|
||||
"Expected GET method, found {}",
|
||||
req.request.method.as_str()
|
||||
));
|
||||
return Err(format!("Expected GET method, found {}", req.request.method.as_str()).into());
|
||||
}
|
||||
|
||||
let authorization_header = req
|
||||
.headers_with_name(header::AUTHORIZATION.as_str())
|
||||
.next()
|
||||
.ok_or_else(|| anyhow::anyhow!("Authorization header not found"))?;
|
||||
.ok_or("Authorization header not found")?;
|
||||
|
||||
let start_pos = authorization_header
|
||||
.span()
|
||||
.indices()
|
||||
.min()
|
||||
.ok_or_else(|| anyhow::anyhow!("Could not find authorization header start position"))?
|
||||
.ok_or("Could not find authorization header start position")?
|
||||
+ header::AUTHORIZATION.as_str().len()
|
||||
+ 2;
|
||||
let end_pos =
|
||||
@@ -230,43 +225,38 @@ fn reveal_received(
|
||||
received: &[u8],
|
||||
builder: &mut ProveConfigBuilder<'_>,
|
||||
transcript_commitment_builder: &mut TranscriptCommitConfigBuilder,
|
||||
) -> Result<()> {
|
||||
) -> Result<(), Box<dyn std::error::Error>> {
|
||||
let resp = Responses::new_from_slice(received).collect::<Result<Vec<_>, _>>()?;
|
||||
|
||||
let response = resp
|
||||
.first()
|
||||
.ok_or_else(|| anyhow::anyhow!("No responses found"))?;
|
||||
let body = response
|
||||
.body
|
||||
.as_ref()
|
||||
.ok_or_else(|| anyhow::anyhow!("Response body not found"))?;
|
||||
let response = resp.first().ok_or("No responses found")?;
|
||||
let body = response.body.as_ref().ok_or("Response body not found")?;
|
||||
|
||||
let BodyContent::Json(json) = &body.content else {
|
||||
return Err(anyhow::anyhow!("Expected JSON body content"));
|
||||
return Err("Expected JSON body content".into());
|
||||
};
|
||||
|
||||
// reveal tax year
|
||||
let tax_year = json
|
||||
.get("tax_year")
|
||||
.ok_or_else(|| anyhow::anyhow!("tax_year field not found in JSON"))?;
|
||||
.ok_or("tax_year field not found in JSON")?;
|
||||
let start_pos = tax_year
|
||||
.span()
|
||||
.indices()
|
||||
.min()
|
||||
.ok_or_else(|| anyhow::anyhow!("Could not find tax_year start position"))?
|
||||
.ok_or("Could not find tax_year start position")?
|
||||
- 11;
|
||||
let end_pos = tax_year
|
||||
.span()
|
||||
.indices()
|
||||
.max()
|
||||
.ok_or_else(|| anyhow::anyhow!("Could not find tax_year end position"))?
|
||||
.ok_or("Could not find tax_year end position")?
|
||||
+ 1;
|
||||
builder.reveal_recv(&(start_pos..end_pos))?;
|
||||
|
||||
// commit to hash of date of birth
|
||||
let dob = json
|
||||
.get("taxpayer.date_of_birth")
|
||||
.ok_or_else(|| anyhow::anyhow!("taxpayer.date_of_birth field not found in JSON"))?;
|
||||
.ok_or("taxpayer.date_of_birth field not found in JSON")?;
|
||||
|
||||
transcript_commitment_builder.commit_recv(dob.span())?;
|
||||
|
||||
@@ -297,37 +287,35 @@ fn prepare_zk_proof_input(
|
||||
received: &[u8],
|
||||
received_commitment: &PlaintextHash,
|
||||
received_secret: &PlaintextHashSecret,
|
||||
) -> Result<ZKProofInput> {
|
||||
) -> Result<ZKProofInput, Box<dyn std::error::Error>> {
|
||||
assert_eq!(received_commitment.direction, Direction::Received);
|
||||
assert_eq!(received_commitment.hash.alg, HashAlgId::SHA256);
|
||||
assert_eq!(received_commitment.hash.alg, HashAlgId::BLAKE3);
|
||||
|
||||
let hash = &received_commitment.hash;
|
||||
|
||||
let dob_start = received_commitment
|
||||
.idx
|
||||
.min()
|
||||
.ok_or_else(|| anyhow::anyhow!("No start index for DOB"))?;
|
||||
.ok_or("No start index for DOB")?;
|
||||
let dob_end = received_commitment
|
||||
.idx
|
||||
.end()
|
||||
.ok_or_else(|| anyhow::anyhow!("No end index for DOB"))?;
|
||||
.ok_or("No end index for DOB")?;
|
||||
let dob = received[dob_start..dob_end].to_vec();
|
||||
let blinder = received_secret.blinder.as_bytes().to_vec();
|
||||
let committed_hash = hash.value.as_bytes().to_vec();
|
||||
let proof_date = Local::now().date_naive();
|
||||
|
||||
assert_eq!(received_secret.direction, Direction::Received);
|
||||
assert_eq!(received_secret.alg, HashAlgId::SHA256);
|
||||
assert_eq!(received_secret.alg, HashAlgId::BLAKE3);
|
||||
|
||||
let mut hasher = Sha256::new();
|
||||
let mut hasher = blake3::Hasher::new();
|
||||
hasher.update(&dob);
|
||||
hasher.update(&blinder);
|
||||
let computed_hash = hasher.finalize();
|
||||
let computed_hash = hasher.finalize().as_bytes().to_vec();
|
||||
|
||||
if committed_hash != computed_hash.as_ref() as &[u8] {
|
||||
return Err(anyhow::anyhow!(
|
||||
"Computed hash does not match committed hash"
|
||||
));
|
||||
if committed_hash != computed_hash.as_slice() {
|
||||
return Err("Computed hash does not match committed hash".into());
|
||||
}
|
||||
|
||||
Ok(ZKProofInput {
|
||||
@@ -338,7 +326,9 @@ fn prepare_zk_proof_input(
|
||||
})
|
||||
}
|
||||
|
||||
fn generate_zk_proof(proof_input: &ZKProofInput) -> Result<ZKProofBundle> {
|
||||
fn generate_zk_proof(
|
||||
proof_input: &ZKProofInput,
|
||||
) -> Result<ZKProofBundle, Box<dyn std::error::Error>> {
|
||||
tracing::info!("🔒 Generating ZK proof with Noir...");
|
||||
|
||||
const PROGRAM_JSON: &str = include_str!("./noir/target/noir.json");
|
||||
@@ -347,7 +337,7 @@ fn generate_zk_proof(proof_input: &ZKProofInput) -> Result<ZKProofBundle> {
|
||||
let json: Value = serde_json::from_str(PROGRAM_JSON)?;
|
||||
let bytecode = json["bytecode"]
|
||||
.as_str()
|
||||
.ok_or_else(|| anyhow::anyhow!("bytecode field not found in program.json"))?;
|
||||
.ok_or("bytecode field not found in program.json")?;
|
||||
|
||||
let mut inputs: Vec<String> = vec![];
|
||||
inputs.push(proof_input.proof_date.day().to_string());
|
||||
@@ -372,17 +362,16 @@ fn generate_zk_proof(proof_input: &ZKProofInput) -> Result<ZKProofBundle> {
|
||||
tracing::debug!("Witness inputs {:?}", inputs);
|
||||
|
||||
let input_refs: Vec<&str> = inputs.iter().map(String::as_str).collect();
|
||||
let witness = from_vec_str_to_witness_map(input_refs).map_err(|e| anyhow::anyhow!(e))?;
|
||||
let witness = from_vec_str_to_witness_map(input_refs)?;
|
||||
|
||||
// Setup SRS
|
||||
setup_srs_from_bytecode(bytecode, None, false).map_err(|e| anyhow::anyhow!(e))?;
|
||||
setup_srs_from_bytecode(bytecode, None, false)?;
|
||||
|
||||
// Verification key
|
||||
let vk = get_ultra_honk_verification_key(bytecode, false).map_err(|e| anyhow::anyhow!(e))?;
|
||||
let vk = get_ultra_honk_verification_key(bytecode, false)?;
|
||||
|
||||
// Generate proof
|
||||
let proof = prove_ultra_honk(bytecode, witness.clone(), vk.clone(), false)
|
||||
.map_err(|e| anyhow::anyhow!(e))?;
|
||||
let proof = prove_ultra_honk(bytecode, witness.clone(), vk.clone(), false)?;
|
||||
tracing::info!("✅ Proof generated ({} bytes)", proof.len());
|
||||
|
||||
let proof_bundle = ZKProofBundle { vk, proof };
|
||||
|
||||
@@ -1,18 +1,16 @@
|
||||
use crate::types::received_commitments;
|
||||
|
||||
use super::types::ZKProofBundle;
|
||||
use anyhow::Result;
|
||||
use chrono::{Local, NaiveDate};
|
||||
use noir::barretenberg::verify::{get_ultra_honk_verification_key, verify_ultra_honk};
|
||||
use serde_json::Value;
|
||||
use tls_server_fixture::CA_CERT_DER;
|
||||
use tlsn::{
|
||||
config::{tls_commit::TlsCommitProtocolConfig, verifier::VerifierConfig},
|
||||
config::{CertificateDer, ProtocolConfigValidator, RootCertStore},
|
||||
connection::ServerName,
|
||||
hash::HashAlgId,
|
||||
transcript::{Direction, PartialTranscript},
|
||||
verifier::{Verifier, VerifierOutput},
|
||||
webpki::{CertificateDer, RootCertStore},
|
||||
verifier::{Verifier, VerifierConfig, VerifierOutput, VerifyConfig},
|
||||
};
|
||||
use tlsn_examples::{MAX_RECV_DATA, MAX_SENT_DATA};
|
||||
use tlsn_server_fixture_certs::SERVER_DOMAIN;
|
||||
@@ -24,104 +22,71 @@ use tracing::instrument;
|
||||
pub async fn verifier<T: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>(
|
||||
socket: T,
|
||||
mut extra_socket: T,
|
||||
) -> Result<PartialTranscript> {
|
||||
let verifier = Verifier::new(
|
||||
VerifierConfig::builder()
|
||||
// Create a root certificate store with the server-fixture's self-signed
|
||||
// certificate. This is only required for offline testing with the
|
||||
// server-fixture.
|
||||
.root_store(RootCertStore {
|
||||
roots: vec![CertificateDer(CA_CERT_DER.to_vec())],
|
||||
})
|
||||
.build()?,
|
||||
);
|
||||
) -> Result<PartialTranscript, Box<dyn std::error::Error>> {
|
||||
// Set up Verifier.
|
||||
let config_validator = ProtocolConfigValidator::builder()
|
||||
.max_sent_data(MAX_SENT_DATA)
|
||||
.max_recv_data(MAX_RECV_DATA)
|
||||
.build()?;
|
||||
|
||||
// Validate the proposed configuration and then run the TLS commitment protocol.
|
||||
let verifier = verifier.commit(socket.compat()).await?;
|
||||
// Create a root certificate store with the server-fixture's self-signed
|
||||
// certificate. This is only required for offline testing with the
|
||||
// server-fixture.
|
||||
let verifier_config = VerifierConfig::builder()
|
||||
.root_store(RootCertStore {
|
||||
roots: vec![CertificateDer(CA_CERT_DER.to_vec())],
|
||||
})
|
||||
.protocol_config_validator(config_validator)
|
||||
.build()?;
|
||||
|
||||
// This is the opportunity to ensure the prover does not attempt to overload the
|
||||
// verifier.
|
||||
let reject = if let TlsCommitProtocolConfig::Mpc(mpc_tls_config) = verifier.request().protocol()
|
||||
{
|
||||
if mpc_tls_config.max_sent_data() > MAX_SENT_DATA {
|
||||
Some("max_sent_data is too large")
|
||||
} else if mpc_tls_config.max_recv_data() > MAX_RECV_DATA {
|
||||
Some("max_recv_data is too large")
|
||||
} else {
|
||||
None
|
||||
}
|
||||
} else {
|
||||
Some("expecting to use MPC-TLS")
|
||||
};
|
||||
let verifier = Verifier::new(verifier_config);
|
||||
|
||||
if reject.is_some() {
|
||||
verifier.reject(reject).await?;
|
||||
return Err(anyhow::anyhow!("protocol configuration rejected"));
|
||||
}
|
||||
// Receive authenticated data.
|
||||
let VerifierOutput {
|
||||
server_name,
|
||||
transcript,
|
||||
transcript_commitments,
|
||||
..
|
||||
} = verifier
|
||||
.verify(socket.compat(), &VerifyConfig::default())
|
||||
.await?;
|
||||
|
||||
// Runs the TLS commitment protocol to completion.
|
||||
let verifier = verifier.accept().await?.run().await?;
|
||||
|
||||
// Validate the proving request and then verify.
|
||||
let verifier = verifier.verify().await?;
|
||||
let request = verifier.request();
|
||||
|
||||
if !request.server_identity() || request.reveal().is_none() {
|
||||
let verifier = verifier
|
||||
.reject(Some(
|
||||
"expecting to verify the server name and transcript data",
|
||||
))
|
||||
.await?;
|
||||
verifier.close().await?;
|
||||
return Err(anyhow::anyhow!(
|
||||
"prover did not reveal the server name and transcript data"
|
||||
));
|
||||
}
|
||||
|
||||
let (
|
||||
VerifierOutput {
|
||||
server_name,
|
||||
transcript,
|
||||
transcript_commitments,
|
||||
..
|
||||
},
|
||||
verifier,
|
||||
) = verifier.accept().await?;
|
||||
|
||||
verifier.close().await?;
|
||||
|
||||
let server_name = server_name.expect("server name should be present");
|
||||
let transcript = transcript.expect("transcript should be present");
|
||||
let server_name = server_name.ok_or("Prover should have revealed server name")?;
|
||||
let transcript = transcript.ok_or("Prover should have revealed transcript data")?;
|
||||
|
||||
// Create hash commitment for the date of birth field from the response
|
||||
let sent = transcript.sent_unsafe().to_vec();
|
||||
let sent_data = String::from_utf8(sent.clone())
|
||||
.map_err(|e| anyhow::anyhow!("Verifier expected valid UTF-8 sent data: {e}"))?;
|
||||
.map_err(|e| format!("Verifier expected valid UTF-8 sent data: {}", e))?;
|
||||
|
||||
if !sent_data.contains(SERVER_DOMAIN) {
|
||||
return Err(anyhow::anyhow!(
|
||||
"Verification failed: Expected host {SERVER_DOMAIN} not found in sent data"
|
||||
));
|
||||
return Err(format!(
|
||||
"Verification failed: Expected host {} not found in sent data",
|
||||
SERVER_DOMAIN
|
||||
)
|
||||
.into());
|
||||
}
|
||||
|
||||
// Check received data.
|
||||
let received_commitments = received_commitments(&transcript_commitments);
|
||||
let received_commitment = received_commitments
|
||||
.first()
|
||||
.ok_or_else(|| anyhow::anyhow!("Missing hash commitment"))?;
|
||||
.ok_or("Missing received hash commitment")?;
|
||||
|
||||
assert!(received_commitment.direction == Direction::Received);
|
||||
assert!(received_commitment.hash.alg == HashAlgId::SHA256);
|
||||
assert!(received_commitment.hash.alg == HashAlgId::BLAKE3);
|
||||
|
||||
let committed_hash = &received_commitment.hash;
|
||||
|
||||
// Check Session info: server name.
|
||||
let ServerName::Dns(server_name) = server_name;
|
||||
if server_name.as_str() != SERVER_DOMAIN {
|
||||
return Err(anyhow::anyhow!(
|
||||
"Server name mismatch: expected {SERVER_DOMAIN}, got {}",
|
||||
return Err(format!(
|
||||
"Server name mismatch: expected {}, got {}",
|
||||
SERVER_DOMAIN,
|
||||
server_name.as_str()
|
||||
));
|
||||
)
|
||||
.into());
|
||||
}
|
||||
|
||||
// Receive ZKProof information from prover
|
||||
@@ -129,28 +94,26 @@ pub async fn verifier<T: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>
|
||||
extra_socket.read_to_end(&mut buf).await?;
|
||||
|
||||
if buf.is_empty() {
|
||||
return Err(anyhow::anyhow!("No ZK proof data received from prover"));
|
||||
return Err("No ZK proof data received from prover".into());
|
||||
}
|
||||
|
||||
let msg: ZKProofBundle = bincode::deserialize(&buf)
|
||||
.map_err(|e| anyhow::anyhow!("Failed to deserialize ZK proof bundle: {e}"))?;
|
||||
.map_err(|e| format!("Failed to deserialize ZK proof bundle: {}", e))?;
|
||||
|
||||
// Verify zk proof
|
||||
const PROGRAM_JSON: &str = include_str!("./noir/target/noir.json");
|
||||
let json: Value = serde_json::from_str(PROGRAM_JSON)
|
||||
.map_err(|e| anyhow::anyhow!("Failed to parse Noir circuit: {e}"))?;
|
||||
.map_err(|e| format!("Failed to parse Noir circuit: {}", e))?;
|
||||
|
||||
let bytecode = json["bytecode"]
|
||||
.as_str()
|
||||
.ok_or_else(|| anyhow::anyhow!("Bytecode field missing in noir.json"))?;
|
||||
.ok_or("Bytecode field missing in noir.json")?;
|
||||
|
||||
let vk = get_ultra_honk_verification_key(bytecode, false)
|
||||
.map_err(|e| anyhow::anyhow!("Failed to get verification key: {e}"))?;
|
||||
.map_err(|e| format!("Failed to get verification key: {}", e))?;
|
||||
|
||||
if vk != msg.vk {
|
||||
return Err(anyhow::anyhow!(
|
||||
"Verification key mismatch between computed and provided by prover"
|
||||
));
|
||||
return Err("Verification key mismatch between computed and provided by prover".into());
|
||||
}
|
||||
|
||||
let proof = msg.proof.clone();
|
||||
@@ -162,10 +125,12 @@ pub async fn verifier<T: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>
|
||||
// * and 32*32 bytes for the hash
|
||||
let min_bytes = (32 + 3) * 32;
|
||||
if proof.len() < min_bytes {
|
||||
return Err(anyhow::anyhow!(
|
||||
"Proof too short: expected at least {min_bytes} bytes, got {}",
|
||||
return Err(format!(
|
||||
"Proof too short: expected at least {} bytes, got {}",
|
||||
min_bytes,
|
||||
proof.len()
|
||||
));
|
||||
)
|
||||
.into());
|
||||
}
|
||||
|
||||
// Check that the proof date is correctly included in the proof
|
||||
@@ -174,12 +139,14 @@ pub async fn verifier<T: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>
|
||||
let proof_date_year: i32 = i32::from_be_bytes(proof[92..96].try_into()?);
|
||||
let proof_date_from_proof =
|
||||
NaiveDate::from_ymd_opt(proof_date_year, proof_date_month, proof_date_day)
|
||||
.ok_or_else(|| anyhow::anyhow!("Invalid proof date in proof"))?;
|
||||
.ok_or("Invalid proof date in proof")?;
|
||||
let today = Local::now().date_naive();
|
||||
if (today - proof_date_from_proof).num_days() < 0 {
|
||||
return Err(anyhow::anyhow!(
|
||||
"The proof date can only be today or in the past: provided {proof_date_from_proof}, today {today}"
|
||||
));
|
||||
return Err(format!(
|
||||
"The proof date can only be today or in the past: provided {}, today {}",
|
||||
proof_date_from_proof, today
|
||||
)
|
||||
.into());
|
||||
}
|
||||
|
||||
// Check that the committed hash in the proof matches the hash from the
|
||||
@@ -197,9 +164,7 @@ pub async fn verifier<T: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>
|
||||
hex::encode(&committed_hash_in_proof),
|
||||
hex::encode(&expected_hash)
|
||||
);
|
||||
return Err(anyhow::anyhow!(
|
||||
"Hash in proof does not match committed hash in MPC-TLS"
|
||||
));
|
||||
return Err("Hash in proof does not match committed hash in MPC-TLS".into());
|
||||
}
|
||||
tracing::info!(
|
||||
"✅ The hash in the proof matches the committed hash in MPC-TLS ({})",
|
||||
@@ -208,10 +173,10 @@ pub async fn verifier<T: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>
|
||||
|
||||
// Finally verify the proof
|
||||
let is_valid = verify_ultra_honk(msg.proof, msg.vk)
|
||||
.map_err(|e| anyhow::anyhow!("ZKProof Verification failed: {e}"))?;
|
||||
.map_err(|e| format!("ZKProof Verification failed: {}", e))?;
|
||||
if !is_valid {
|
||||
tracing::error!("❌ Age verification ZKProof failed to verify");
|
||||
return Err(anyhow::anyhow!("Age verification ZKProof failed to verify"));
|
||||
return Err("Age verification ZKProof failed to verify".into());
|
||||
}
|
||||
tracing::info!("✅ Age verification ZKProof successfully verified");
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "tlsn-formats"
|
||||
version = "0.1.0-alpha.14-pre"
|
||||
version = "0.1.0-alpha.13-pre"
|
||||
edition = "2021"
|
||||
|
||||
[lints]
|
||||
|
||||
@@ -3,19 +3,7 @@
|
||||
# Ensure the script runs in the folder that contains this script
|
||||
cd "$(dirname "$0")"
|
||||
|
||||
RUNNER_FEATURES=""
|
||||
EXECUTOR_FEATURES=""
|
||||
|
||||
if [ "$1" = "debug" ]; then
|
||||
RUNNER_FEATURES="--features debug"
|
||||
EXECUTOR_FEATURES="--no-default-features --features debug"
|
||||
fi
|
||||
|
||||
cargo build --release \
|
||||
--package tlsn-harness-runner $RUNNER_FEATURES \
|
||||
--package tlsn-harness-executor $EXECUTOR_FEATURES \
|
||||
--package tlsn-server-fixture \
|
||||
--package tlsn-harness-plot
|
||||
cargo build --release --package tlsn-harness-runner --package tlsn-harness-executor --package tlsn-server-fixture --package tlsn-harness-plot
|
||||
|
||||
mkdir -p bin
|
||||
|
||||
|
||||
@@ -1,14 +1,10 @@
|
||||
[target.wasm32-unknown-unknown]
|
||||
rustflags = [
|
||||
"-Ctarget-feature=+atomics,+bulk-memory,+mutable-globals,+simd128",
|
||||
"-Clink-arg=--shared-memory",
|
||||
"-C",
|
||||
"target-feature=+atomics,+bulk-memory,+mutable-globals,+simd128",
|
||||
"-C",
|
||||
# 4GB
|
||||
"-Clink-arg=--max-memory=4294967296",
|
||||
"-Clink-arg=--import-memory",
|
||||
"-Clink-arg=--export=__wasm_init_tls",
|
||||
"-Clink-arg=--export=__tls_size",
|
||||
"-Clink-arg=--export=__tls_align",
|
||||
"-Clink-arg=--export=__tls_base",
|
||||
"link-arg=--max-memory=4294967296",
|
||||
"--cfg",
|
||||
'getrandom_backend="wasm_js"',
|
||||
]
|
||||
|
||||
@@ -4,12 +4,6 @@ version = "0.1.0"
|
||||
edition = "2024"
|
||||
publish = false
|
||||
|
||||
[features]
|
||||
# Disable tracing events as a workaround for issue 959.
|
||||
default = ["tracing/release_max_level_off"]
|
||||
# Used to debug the executor itself.
|
||||
debug = []
|
||||
|
||||
[lib]
|
||||
name = "harness_executor"
|
||||
crate-type = ["cdylib", "rlib"]
|
||||
@@ -34,7 +28,8 @@ tokio = { workspace = true, features = ["full"] }
|
||||
tokio-util = { workspace = true, features = ["compat"] }
|
||||
|
||||
[target.'cfg(target_arch = "wasm32")'.dependencies]
|
||||
tracing = { workspace = true }
|
||||
# Disable tracing events as a workaround for issue 959.
|
||||
tracing = { workspace = true, features = ["release_max_level_off"] }
|
||||
wasm-bindgen = { workspace = true }
|
||||
tlsn-wasm = { workspace = true }
|
||||
js-sys = { workspace = true }
|
||||
|
||||
@@ -5,15 +5,9 @@ use futures::{AsyncReadExt, AsyncWriteExt, TryFutureExt};
|
||||
|
||||
use harness_core::bench::{Bench, ProverMetrics};
|
||||
use tlsn::{
|
||||
config::{
|
||||
prove::ProveConfig,
|
||||
prover::ProverConfig,
|
||||
tls::TlsClientConfig,
|
||||
tls_commit::{TlsCommitConfig, mpc::MpcTlsConfig},
|
||||
},
|
||||
config::{CertificateDer, ProtocolConfig, RootCertStore},
|
||||
connection::ServerName,
|
||||
prover::Prover,
|
||||
webpki::{CertificateDer, RootCertStore},
|
||||
prover::{ProveConfig, Prover, ProverConfig, TlsConfig},
|
||||
};
|
||||
use tlsn_server_fixture_certs::{CA_CERT_DER, SERVER_DOMAIN};
|
||||
|
||||
@@ -28,47 +22,41 @@ pub async fn bench_prover(provider: &IoProvider, config: &Bench) -> Result<Prove
|
||||
let sent = verifier_io.sent();
|
||||
let recv = verifier_io.recv();
|
||||
|
||||
let prover = Prover::new(ProverConfig::builder().build()?);
|
||||
let mut builder = ProtocolConfig::builder();
|
||||
builder.max_sent_data(config.upload_size);
|
||||
|
||||
builder.defer_decryption_from_start(config.defer_decryption);
|
||||
if !config.defer_decryption {
|
||||
builder.max_recv_data_online(config.download_size + RECV_PADDING);
|
||||
}
|
||||
builder.max_recv_data(config.download_size + RECV_PADDING);
|
||||
|
||||
let protocol_config = builder.build()?;
|
||||
|
||||
let mut tls_config_builder = TlsConfig::builder();
|
||||
tls_config_builder.root_store(RootCertStore {
|
||||
roots: vec![CertificateDer(CA_CERT_DER.to_vec())],
|
||||
});
|
||||
let tls_config = tls_config_builder.build()?;
|
||||
|
||||
let prover = Prover::new(
|
||||
ProverConfig::builder()
|
||||
.tls_config(tls_config)
|
||||
.protocol_config(protocol_config)
|
||||
.server_name(ServerName::Dns(SERVER_DOMAIN.try_into().unwrap()))
|
||||
.build()?,
|
||||
);
|
||||
|
||||
let time_start = web_time::Instant::now();
|
||||
|
||||
let prover = prover
|
||||
.commit(
|
||||
TlsCommitConfig::builder()
|
||||
.protocol({
|
||||
let mut builder = MpcTlsConfig::builder()
|
||||
.max_sent_data(config.upload_size)
|
||||
.defer_decryption_from_start(config.defer_decryption);
|
||||
|
||||
if !config.defer_decryption {
|
||||
builder = builder.max_recv_data_online(config.download_size + RECV_PADDING);
|
||||
}
|
||||
|
||||
builder
|
||||
.max_recv_data(config.download_size + RECV_PADDING)
|
||||
.build()
|
||||
}?)
|
||||
.build()?,
|
||||
verifier_io,
|
||||
)
|
||||
.await?;
|
||||
let prover = prover.setup(verifier_io).await?;
|
||||
|
||||
let time_preprocess = time_start.elapsed().as_millis();
|
||||
let time_start_online = web_time::Instant::now();
|
||||
let uploaded_preprocess = sent.load(Ordering::Relaxed);
|
||||
let downloaded_preprocess = recv.load(Ordering::Relaxed);
|
||||
|
||||
let (mut conn, prover_fut) = prover
|
||||
.connect(
|
||||
TlsClientConfig::builder()
|
||||
.server_name(ServerName::Dns(SERVER_DOMAIN.try_into()?))
|
||||
.root_store(RootCertStore {
|
||||
roots: vec![CertificateDer(CA_CERT_DER.to_vec())],
|
||||
})
|
||||
.build()?,
|
||||
provider.provide_server_io().await?,
|
||||
)
|
||||
.await?;
|
||||
let (mut conn, prover_fut) = prover.connect(provider.provide_server_io().await?).await?;
|
||||
|
||||
let (_, mut prover) = futures::try_join!(
|
||||
async {
|
||||
|
||||
@@ -2,31 +2,33 @@ use anyhow::Result;
|
||||
|
||||
use harness_core::bench::Bench;
|
||||
use tlsn::{
|
||||
config::verifier::VerifierConfig,
|
||||
verifier::Verifier,
|
||||
webpki::{CertificateDer, RootCertStore},
|
||||
config::{CertificateDer, ProtocolConfigValidator, RootCertStore},
|
||||
verifier::{Verifier, VerifierConfig, VerifyConfig},
|
||||
};
|
||||
use tlsn_server_fixture_certs::CA_CERT_DER;
|
||||
|
||||
use crate::IoProvider;
|
||||
use crate::{IoProvider, bench::RECV_PADDING};
|
||||
|
||||
pub async fn bench_verifier(provider: &IoProvider, config: &Bench) -> Result<()> {
|
||||
let mut builder = ProtocolConfigValidator::builder();
|
||||
builder
|
||||
.max_sent_data(config.upload_size)
|
||||
.max_recv_data(config.download_size + RECV_PADDING);
|
||||
|
||||
let protocol_config = builder.build()?;
|
||||
|
||||
pub async fn bench_verifier(provider: &IoProvider, _config: &Bench) -> Result<()> {
|
||||
let verifier = Verifier::new(
|
||||
VerifierConfig::builder()
|
||||
.root_store(RootCertStore {
|
||||
roots: vec![CertificateDer(CA_CERT_DER.to_vec())],
|
||||
})
|
||||
.protocol_config_validator(protocol_config)
|
||||
.build()?,
|
||||
);
|
||||
|
||||
let verifier = verifier
|
||||
.commit(provider.provide_proto_io().await?)
|
||||
.await?
|
||||
.accept()
|
||||
.await?
|
||||
.run()
|
||||
.await?;
|
||||
let (_, verifier) = verifier.verify().await?.accept().await?;
|
||||
let verifier = verifier.setup(provider.provide_proto_io().await?).await?;
|
||||
let mut verifier = verifier.run().await?;
|
||||
verifier.verify(&VerifyConfig::default()).await?;
|
||||
verifier.close().await?;
|
||||
|
||||
Ok(())
|
||||
|
||||
@@ -1,17 +1,10 @@
|
||||
use tlsn::{
|
||||
config::{
|
||||
prove::ProveConfig,
|
||||
prover::ProverConfig,
|
||||
tls::TlsClientConfig,
|
||||
tls_commit::{TlsCommitConfig, mpc::MpcTlsConfig},
|
||||
verifier::VerifierConfig,
|
||||
},
|
||||
config::{CertificateDer, ProtocolConfig, ProtocolConfigValidator, RootCertStore},
|
||||
connection::ServerName,
|
||||
hash::HashAlgId,
|
||||
prover::Prover,
|
||||
prover::{ProveConfig, Prover, ProverConfig, TlsConfig},
|
||||
transcript::{TranscriptCommitConfig, TranscriptCommitment, TranscriptCommitmentKind},
|
||||
verifier::{Verifier, VerifierOutput},
|
||||
webpki::{CertificateDer, RootCertStore},
|
||||
verifier::{Verifier, VerifierConfig, VerifierOutput, VerifyConfig},
|
||||
};
|
||||
use tlsn_server_fixture_certs::{CA_CERT_DER, SERVER_DOMAIN};
|
||||
|
||||
@@ -28,35 +21,35 @@ const MAX_RECV_DATA: usize = 1 << 11;
|
||||
crate::test!("basic", prover, verifier);
|
||||
|
||||
async fn prover(provider: &IoProvider) {
|
||||
let prover = Prover::new(ProverConfig::builder().build().unwrap())
|
||||
.commit(
|
||||
TlsCommitConfig::builder()
|
||||
.protocol(
|
||||
MpcTlsConfig::builder()
|
||||
.max_sent_data(MAX_SENT_DATA)
|
||||
.max_recv_data(MAX_RECV_DATA)
|
||||
.defer_decryption_from_start(true)
|
||||
.build()
|
||||
.unwrap(),
|
||||
)
|
||||
.build()
|
||||
.unwrap(),
|
||||
provider.provide_proto_io().await.unwrap(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
let mut tls_config_builder = TlsConfig::builder();
|
||||
tls_config_builder.root_store(RootCertStore {
|
||||
roots: vec![CertificateDer(CA_CERT_DER.to_vec())],
|
||||
});
|
||||
|
||||
let tls_config = tls_config_builder.build().unwrap();
|
||||
|
||||
let server_name = ServerName::Dns(SERVER_DOMAIN.try_into().unwrap());
|
||||
let prover = Prover::new(
|
||||
ProverConfig::builder()
|
||||
.server_name(server_name)
|
||||
.tls_config(tls_config)
|
||||
.protocol_config(
|
||||
ProtocolConfig::builder()
|
||||
.max_sent_data(MAX_SENT_DATA)
|
||||
.max_recv_data(MAX_RECV_DATA)
|
||||
.defer_decryption_from_start(true)
|
||||
.build()
|
||||
.unwrap(),
|
||||
)
|
||||
.build()
|
||||
.unwrap(),
|
||||
)
|
||||
.setup(provider.provide_proto_io().await.unwrap())
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let (tls_connection, prover_fut) = prover
|
||||
.connect(
|
||||
TlsClientConfig::builder()
|
||||
.server_name(ServerName::Dns(SERVER_DOMAIN.try_into().unwrap()))
|
||||
.root_store(RootCertStore {
|
||||
roots: vec![CertificateDer(CA_CERT_DER.to_vec())],
|
||||
})
|
||||
.build()
|
||||
.unwrap(),
|
||||
provider.provide_server_io().await.unwrap(),
|
||||
)
|
||||
.connect(provider.provide_server_io().await.unwrap())
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
@@ -120,34 +113,33 @@ async fn prover(provider: &IoProvider) {
|
||||
|
||||
async fn verifier(provider: &IoProvider) {
|
||||
let config = VerifierConfig::builder()
|
||||
.protocol_config_validator(
|
||||
ProtocolConfigValidator::builder()
|
||||
.max_sent_data(MAX_SENT_DATA)
|
||||
.max_recv_data(MAX_RECV_DATA)
|
||||
.build()
|
||||
.unwrap(),
|
||||
)
|
||||
.root_store(RootCertStore {
|
||||
roots: vec![CertificateDer(CA_CERT_DER.to_vec())],
|
||||
})
|
||||
.build()
|
||||
.unwrap();
|
||||
|
||||
let verifier = Verifier::new(config)
|
||||
.commit(provider.provide_proto_io().await.unwrap())
|
||||
.await
|
||||
.unwrap()
|
||||
.accept()
|
||||
.await
|
||||
.unwrap()
|
||||
.run()
|
||||
let verifier = Verifier::new(config);
|
||||
|
||||
let VerifierOutput {
|
||||
server_name,
|
||||
transcript_commitments,
|
||||
..
|
||||
} = verifier
|
||||
.verify(
|
||||
provider.provide_proto_io().await.unwrap(),
|
||||
&VerifyConfig::default(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let (
|
||||
VerifierOutput {
|
||||
server_name,
|
||||
transcript_commitments,
|
||||
..
|
||||
},
|
||||
verifier,
|
||||
) = verifier.verify().await.unwrap().accept().await.unwrap();
|
||||
|
||||
verifier.close().await.unwrap();
|
||||
|
||||
let ServerName::Dns(server_name) = server_name.unwrap();
|
||||
|
||||
assert_eq!(server_name.as_str(), SERVER_DOMAIN);
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
FROM rust AS builder
|
||||
WORKDIR /usr/src/tlsn
|
||||
|
||||
ARG DEBUG=0
|
||||
|
||||
RUN \
|
||||
rustup update; \
|
||||
apt update && apt install -y clang; \
|
||||
@@ -12,12 +10,7 @@ RUN \
|
||||
COPY . .
|
||||
RUN \
|
||||
cd crates/harness; \
|
||||
# Pass `--build-arg DEBUG=1` to `docker build` if you need to debug the harness.
|
||||
if [ "$DEBUG" = "1" ]; then \
|
||||
./build.sh debug; \
|
||||
else \
|
||||
./build.sh; \
|
||||
fi
|
||||
./build.sh;
|
||||
|
||||
FROM debian:latest
|
||||
|
||||
|
||||
@@ -7,10 +7,6 @@ publish = false
|
||||
[lib]
|
||||
name = "harness_runner"
|
||||
|
||||
[features]
|
||||
# Used to debug the runner itself.
|
||||
debug = []
|
||||
|
||||
[dependencies]
|
||||
tlsn-harness-core = { workspace = true }
|
||||
tlsn-server-fixture = { workspace = true }
|
||||
|
||||
@@ -1,17 +0,0 @@
|
||||
#![allow(unused_imports)]
|
||||
pub use futures::FutureExt;
|
||||
|
||||
pub use tracing::{debug, error};
|
||||
|
||||
pub use chromiumoxide::{
|
||||
Browser, Page,
|
||||
cdp::{
|
||||
browser_protocol::{
|
||||
log::{EventEntryAdded, LogEntryLevel},
|
||||
network::{EnableParams, SetCacheDisabledParams},
|
||||
page::ReloadParams,
|
||||
},
|
||||
js_protocol::runtime::EventExceptionThrown,
|
||||
},
|
||||
handler::HandlerConfig,
|
||||
};
|
||||
@@ -21,9 +21,6 @@ use harness_core::{
|
||||
|
||||
use crate::{Target, network::Namespace, rpc::Rpc};
|
||||
|
||||
#[cfg(feature = "debug")]
|
||||
use crate::debug_prelude::*;
|
||||
|
||||
pub struct Executor {
|
||||
ns: Namespace,
|
||||
config: ExecutorConfig,
|
||||
@@ -69,34 +66,20 @@ impl Executor {
|
||||
Id::One => self.config.network().rpc_1,
|
||||
};
|
||||
|
||||
let mut args = vec![
|
||||
"ip".into(),
|
||||
"netns".into(),
|
||||
"exec".into(),
|
||||
self.ns.name().into(),
|
||||
"env".into(),
|
||||
let process = duct::cmd!(
|
||||
"sudo",
|
||||
"ip",
|
||||
"netns",
|
||||
"exec",
|
||||
self.ns.name(),
|
||||
"env",
|
||||
format!("CONFIG={}", serde_json::to_string(&self.config)?),
|
||||
];
|
||||
|
||||
if cfg!(feature = "debug") {
|
||||
let level = &std::env::var("RUST_LOG").unwrap_or("debug".to_string());
|
||||
args.push("env".into());
|
||||
args.push(format!("RUST_LOG={}", level));
|
||||
};
|
||||
|
||||
args.push(executor_path.to_str().expect("valid path").into());
|
||||
|
||||
let process = duct::cmd("sudo", args);
|
||||
|
||||
let process = if !cfg!(feature = "debug") {
|
||||
process
|
||||
.stdout_capture()
|
||||
.stderr_capture()
|
||||
.unchecked()
|
||||
.start()?
|
||||
} else {
|
||||
process.unchecked().start()?
|
||||
};
|
||||
executor_path
|
||||
)
|
||||
.stdout_capture()
|
||||
.stderr_capture()
|
||||
.unchecked()
|
||||
.start()?;
|
||||
|
||||
let rpc = Rpc::new_native(rpc_addr).await?;
|
||||
|
||||
@@ -136,13 +119,10 @@ impl Executor {
|
||||
"--no-sandbox",
|
||||
format!("--user-data-dir={tmp}"),
|
||||
format!("--allowed-ips=10.250.0.1"),
|
||||
);
|
||||
|
||||
let process = if !cfg!(feature = "debug") {
|
||||
process.stderr_capture().stdout_capture().start()?
|
||||
} else {
|
||||
process.start()?
|
||||
};
|
||||
)
|
||||
.stderr_capture()
|
||||
.stdout_capture()
|
||||
.start()?;
|
||||
|
||||
const TIMEOUT: usize = 10000;
|
||||
const DELAY: usize = 100;
|
||||
@@ -191,38 +171,6 @@ impl Executor {
|
||||
.new_page(&format!("http://{wasm_addr}:{wasm_port}/index.html"))
|
||||
.await?;
|
||||
|
||||
#[cfg(feature = "debug")]
|
||||
tokio::spawn(register_listeners(page.clone()).await?);
|
||||
|
||||
#[cfg(feature = "debug")]
|
||||
async fn register_listeners(page: Page) -> Result<impl Future<Output = ()>> {
|
||||
let mut logs = page.event_listener::<EventEntryAdded>().await?.fuse();
|
||||
let mut exceptions =
|
||||
page.event_listener::<EventExceptionThrown>().await?.fuse();
|
||||
|
||||
Ok(futures::future::join(
|
||||
async move {
|
||||
while let Some(event) = logs.next().await {
|
||||
let entry = &event.entry;
|
||||
match entry.level {
|
||||
LogEntryLevel::Error => {
|
||||
error!("{:?}", entry);
|
||||
}
|
||||
_ => {
|
||||
debug!("{:?}: {}", entry.timestamp, entry.text);
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
async move {
|
||||
while let Some(event) = exceptions.next().await {
|
||||
error!("{:?}", event);
|
||||
}
|
||||
},
|
||||
)
|
||||
.map(|_| ()))
|
||||
}
|
||||
|
||||
page.execute(EnableParams::builder().build()).await?;
|
||||
page.execute(SetCacheDisabledParams {
|
||||
cache_disabled: true,
|
||||
|
||||
@@ -6,9 +6,6 @@ mod server_fixture;
|
||||
pub mod wasm_server;
|
||||
mod ws_proxy;
|
||||
|
||||
#[cfg(feature = "debug")]
|
||||
mod debug_prelude;
|
||||
|
||||
use std::time::Duration;
|
||||
|
||||
use anyhow::Result;
|
||||
@@ -27,18 +24,20 @@ use cli::{Cli, Command};
|
||||
use executor::Executor;
|
||||
use server_fixture::ServerFixture;
|
||||
|
||||
#[cfg(feature = "debug")]
|
||||
use crate::debug_prelude::*;
|
||||
|
||||
use crate::{cli::Route, network::Network, wasm_server::WasmServer, ws_proxy::WsProxy};
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, clap::ValueEnum, Default)]
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, clap::ValueEnum)]
|
||||
pub enum Target {
|
||||
#[default]
|
||||
Native,
|
||||
Browser,
|
||||
}
|
||||
|
||||
impl Default for Target {
|
||||
fn default() -> Self {
|
||||
Self::Native
|
||||
}
|
||||
}
|
||||
|
||||
struct Runner {
|
||||
network: Network,
|
||||
server_fixture: ServerFixture,
|
||||
@@ -114,9 +113,6 @@ impl Runner {
|
||||
}
|
||||
|
||||
pub async fn main() -> Result<()> {
|
||||
#[cfg(feature = "debug")]
|
||||
tracing_subscriber::fmt::init();
|
||||
|
||||
let cli = Cli::parse();
|
||||
let mut runner = Runner::new(&cli)?;
|
||||
|
||||
@@ -231,9 +227,6 @@ pub async fn main() -> Result<()> {
|
||||
// Wait for the network to stabilize
|
||||
tokio::time::sleep(Duration::from_millis(100)).await;
|
||||
|
||||
#[cfg(feature = "debug")]
|
||||
debug!("Starting bench in group {:?}", config.group);
|
||||
|
||||
let (output, _) = tokio::try_join!(
|
||||
runner.exec_p.bench(BenchCmd {
|
||||
config: config.clone(),
|
||||
|
||||
@@ -5,7 +5,7 @@ description = "TLSNotary MPC-TLS protocol"
|
||||
keywords = ["tls", "mpc", "2pc"]
|
||||
categories = ["cryptography"]
|
||||
license = "MIT OR Apache-2.0"
|
||||
version = "0.1.0-alpha.14-pre"
|
||||
version = "0.1.0-alpha.13-pre"
|
||||
edition = "2021"
|
||||
|
||||
[lints]
|
||||
@@ -33,7 +33,9 @@ mpz-ole = { workspace = true }
|
||||
mpz-share-conversion = { workspace = true }
|
||||
mpz-vm-core = { workspace = true }
|
||||
mpz-memory-core = { workspace = true }
|
||||
mpz-circuits = { workspace = true }
|
||||
|
||||
ludi = { git = "https://github.com/sinui0/ludi", rev = "e511c3b", default-features = false }
|
||||
serio = { workspace = true }
|
||||
|
||||
async-trait = { workspace = true }
|
||||
@@ -55,9 +57,9 @@ pin-project-lite = { workspace = true }
|
||||
web-time = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
mpz-common = { workspace = true, features = ["test-utils"] }
|
||||
mpz-ot = { workspace = true, features = ["ideal"] }
|
||||
mpz-ideal-vm = { workspace = true }
|
||||
mpz-ole = { workspace = true, features = ["test-utils"] }
|
||||
mpz-ot = { workspace = true }
|
||||
mpz-garble = { workspace = true }
|
||||
|
||||
cipher-crate = { package = "cipher", version = "0.4" }
|
||||
generic-array = { workspace = true }
|
||||
@@ -65,6 +67,7 @@ rand_chacha = { workspace = true }
|
||||
rstest = { workspace = true }
|
||||
tls-server-fixture = { workspace = true }
|
||||
tlsn-tls-client = { workspace = true }
|
||||
tlsn-tls-client-async = { workspace = true }
|
||||
tokio = { workspace = true, features = ["macros", "rt", "rt-multi-thread"] }
|
||||
tokio-util = { workspace = true, features = ["compat"] }
|
||||
tracing-subscriber = { workspace = true }
|
||||
|
||||
@@ -15,6 +15,13 @@ impl MpcTlsError {
|
||||
Self(ErrorRepr::Peer(err.into()))
|
||||
}
|
||||
|
||||
pub(crate) fn actor<E>(err: E) -> Self
|
||||
where
|
||||
E: Into<Box<dyn std::error::Error + Send + Sync + 'static>>,
|
||||
{
|
||||
Self(ErrorRepr::Actor(err.into()))
|
||||
}
|
||||
|
||||
pub(crate) fn state<E>(err: E) -> Self
|
||||
where
|
||||
E: Into<Box<dyn std::error::Error + Send + Sync + 'static>>,
|
||||
@@ -65,6 +72,8 @@ enum ErrorRepr {
|
||||
Peer(Box<dyn std::error::Error + Send + Sync>),
|
||||
#[error("I/O error: {0}")]
|
||||
Io(std::io::Error),
|
||||
#[error("actor error: {0}")]
|
||||
Actor(Box<dyn std::error::Error + Send + Sync>),
|
||||
#[error("state error: {0}")]
|
||||
State(Box<dyn std::error::Error + Send + Sync>),
|
||||
#[error("allocation error: {0}")]
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
mod actor;
|
||||
|
||||
use crate::{
|
||||
error::MpcTlsError,
|
||||
msg::{
|
||||
@@ -12,6 +14,7 @@ use async_trait::async_trait;
|
||||
use hmac_sha256::{MpcPrf, PrfOutput};
|
||||
use ke::KeyExchange;
|
||||
use key_exchange::{self as ke, MpcKeyExchange};
|
||||
use ludi::Context as LudiContext;
|
||||
use mpz_common::{Context, Flush};
|
||||
use mpz_core::{bitvec::BitVec, Block};
|
||||
use mpz_memory_core::DecodeFutureTyped;
|
||||
@@ -38,7 +41,6 @@ use tls_core::{
|
||||
message::{OpaqueMessage, PlainMessage},
|
||||
},
|
||||
suites::SupportedCipherSuite,
|
||||
verify::verify_sig_determine_alg,
|
||||
};
|
||||
use tlsn_core::{
|
||||
connection::{CertBinding, CertBindingV1_2, ServerSignature, TlsVersion, VerifyData},
|
||||
@@ -47,9 +49,13 @@ use tlsn_core::{
|
||||
};
|
||||
use tracing::{debug, instrument, trace, warn};
|
||||
|
||||
/// Controller for MPC-TLS leader.
|
||||
pub type LeaderCtrl = actor::MpcTlsLeaderCtrl;
|
||||
|
||||
/// MPC-TLS leader.
|
||||
#[derive(Debug)]
|
||||
pub struct MpcTlsLeader {
|
||||
self_handle: Option<LeaderCtrl>,
|
||||
config: Config,
|
||||
state: State,
|
||||
|
||||
@@ -107,6 +113,7 @@ impl MpcTlsLeader {
|
||||
|
||||
let is_decrypting = !config.defer_decryption;
|
||||
Self {
|
||||
self_handle: None,
|
||||
config,
|
||||
state: State::Init {
|
||||
ctx,
|
||||
@@ -320,20 +327,12 @@ impl MpcTlsLeader {
|
||||
.map(|cert| CertificateDer(cert.0.clone()))
|
||||
.collect();
|
||||
|
||||
let mut sig_msg = Vec::new();
|
||||
sig_msg.extend_from_slice(&client_random.0);
|
||||
sig_msg.extend_from_slice(&server_random.0);
|
||||
sig_msg.extend_from_slice(server_kx_details.kx_params());
|
||||
|
||||
let server_signature_alg = verify_sig_determine_alg(
|
||||
&server_cert_details.cert_chain()[0],
|
||||
&sig_msg,
|
||||
server_kx_details.kx_sig(),
|
||||
)
|
||||
.expect("only supported signature should have been accepted");
|
||||
|
||||
let server_signature = ServerSignature {
|
||||
alg: server_signature_alg.into(),
|
||||
scheme: server_kx_details
|
||||
.kx_sig()
|
||||
.scheme
|
||||
.try_into()
|
||||
.expect("only supported signature scheme should have been accepted"),
|
||||
sig: server_kx_details.kx_sig().sig.0.clone(),
|
||||
};
|
||||
|
||||
@@ -370,42 +369,18 @@ impl MpcTlsLeader {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Enables or disables the decryption of any incoming messages.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `enable` - Whether to enable or disable decryption.
|
||||
/// Defers decryption of any incoming messages.
|
||||
#[instrument(level = "debug", skip_all, err)]
|
||||
pub fn enable_decryption(&mut self, enable: bool) -> Result<(), MpcTlsError> {
|
||||
self.is_decrypting = enable;
|
||||
|
||||
if enable {
|
||||
self.notifier.set();
|
||||
} else {
|
||||
self.notifier.clear();
|
||||
}
|
||||
pub async fn defer_decryption(&mut self) -> Result<(), MpcTlsError> {
|
||||
self.is_decrypting = false;
|
||||
self.notifier.clear();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Returns if incoming messages are decrypted.
|
||||
pub fn is_decrypting(&self) -> bool {
|
||||
self.is_decrypting
|
||||
}
|
||||
|
||||
/// Returns the context and transcript.
|
||||
///
|
||||
/// Should be called after a successful call to [`Backend::server_closed`].
|
||||
pub fn finish(&mut self) -> Option<(Context, TlsTranscript)> {
|
||||
match self.state.take() {
|
||||
State::Closed {
|
||||
ctx, transcript, ..
|
||||
} => Some((ctx, transcript)),
|
||||
state => {
|
||||
self.state = state;
|
||||
None
|
||||
}
|
||||
}
|
||||
/// Stops the actor.
|
||||
pub fn stop(&mut self, ctx: &mut LudiContext<Self>) {
|
||||
ctx.stop();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
1779
crates/mpc-tls/src/leader/actor.rs
Normal file
1779
crates/mpc-tls/src/leader/actor.rs
Normal file
File diff suppressed because it is too large
Load Diff
@@ -16,7 +16,7 @@ pub(crate) mod utils;
|
||||
pub use config::{Config, ConfigBuilder, ConfigBuilderError};
|
||||
pub use error::MpcTlsError;
|
||||
pub use follower::MpcTlsFollower;
|
||||
pub use leader::MpcTlsLeader;
|
||||
pub use leader::{LeaderCtrl, MpcTlsLeader};
|
||||
|
||||
use std::{future::Future, pin::Pin, sync::Arc};
|
||||
|
||||
|
||||
@@ -487,7 +487,7 @@ impl RecordLayer {
|
||||
|
||||
sent_records.push(Record {
|
||||
seq: op.seq,
|
||||
typ: op.typ.into(),
|
||||
typ: op.typ,
|
||||
plaintext: op.plaintext,
|
||||
explicit_nonce: op.explicit_nonce,
|
||||
ciphertext,
|
||||
@@ -505,7 +505,7 @@ impl RecordLayer {
|
||||
|
||||
recv_records.push(Record {
|
||||
seq: op.seq,
|
||||
typ: op.typ.into(),
|
||||
typ: op.typ,
|
||||
plaintext,
|
||||
explicit_nonce: op.explicit_nonce,
|
||||
ciphertext: op.ciphertext,
|
||||
@@ -578,7 +578,7 @@ impl RecordLayer {
|
||||
|
||||
recv_records.push(Record {
|
||||
seq: op.seq,
|
||||
typ: op.typ.into(),
|
||||
typ: op.typ,
|
||||
plaintext,
|
||||
explicit_nonce: op.explicit_nonce,
|
||||
ciphertext: op.ciphertext,
|
||||
|
||||
@@ -456,8 +456,9 @@ mod tests {
|
||||
};
|
||||
use mpz_common::context::test_st_context;
|
||||
use mpz_core::Block;
|
||||
use mpz_ideal_vm::IdealVm;
|
||||
use mpz_memory_core::binary::U8;
|
||||
use mpz_garble::protocol::semihonest::{Evaluator, Garbler};
|
||||
use mpz_memory_core::{binary::U8, correlated::Delta};
|
||||
use mpz_ot::ideal::cot::ideal_cot;
|
||||
use mpz_share_conversion::ideal::ideal_share_convert;
|
||||
use rand::{rngs::StdRng, SeedableRng};
|
||||
use rstest::*;
|
||||
@@ -573,8 +574,13 @@ mod tests {
|
||||
}
|
||||
|
||||
fn create_vm(key: [u8; 16], iv: [u8; 4]) -> ((impl Vm<Binary>, Vars), (impl Vm<Binary>, Vars)) {
|
||||
let mut vm_0 = IdealVm::new();
|
||||
let mut vm_1 = IdealVm::new();
|
||||
let mut rng = StdRng::seed_from_u64(0);
|
||||
let block = Block::random(&mut rng);
|
||||
let (sender, receiver) = ideal_cot(block);
|
||||
|
||||
let delta = Delta::new(block);
|
||||
let mut vm_0 = Garbler::new(sender, [0u8; 16], delta);
|
||||
let mut vm_1 = Evaluator::new(receiver);
|
||||
|
||||
let key_ref_0 = vm_0.alloc::<Array<U8, 16>>().unwrap();
|
||||
vm_0.mark_public(key_ref_0).unwrap();
|
||||
|
||||
168
crates/mpc-tls/tests/test.rs
Normal file
168
crates/mpc-tls/tests/test.rs
Normal file
@@ -0,0 +1,168 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use futures::{AsyncReadExt, AsyncWriteExt};
|
||||
use mpc_tls::{Config, MpcTlsFollower, MpcTlsLeader};
|
||||
use mpz_common::context::test_mt_context;
|
||||
use mpz_core::Block;
|
||||
use mpz_garble::protocol::semihonest::{Evaluator, Garbler};
|
||||
use mpz_memory_core::correlated::Delta;
|
||||
use mpz_ot::{
|
||||
cot::{DerandCOTReceiver, DerandCOTSender},
|
||||
ideal::rcot::ideal_rcot,
|
||||
rcot::shared::{SharedRCOTReceiver, SharedRCOTSender},
|
||||
};
|
||||
use rand::{rngs::StdRng, Rng, SeedableRng};
|
||||
use rustls_pki_types::CertificateDer;
|
||||
use tls_client::RootCertStore;
|
||||
use tls_client_async::bind_client;
|
||||
use tls_server_fixture::{bind_test_server_hyper, CA_CERT_DER, SERVER_DOMAIN};
|
||||
use tokio::sync::Mutex;
|
||||
use tokio_util::compat::TokioAsyncReadCompatExt;
|
||||
use webpki::anchor_from_trusted_cert;
|
||||
|
||||
const CA_CERT: CertificateDer = CertificateDer::from_slice(CA_CERT_DER);
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||
#[ignore = "expensive"]
|
||||
async fn mpc_tls_test() {
|
||||
tracing_subscriber::fmt::init();
|
||||
|
||||
let config = Config::builder()
|
||||
.defer_decryption(false)
|
||||
.max_sent(1 << 13)
|
||||
.max_recv_online(1 << 13)
|
||||
.max_recv(1 << 13)
|
||||
.build()
|
||||
.unwrap();
|
||||
|
||||
let (leader, follower) = build_pair(config);
|
||||
|
||||
tokio::try_join!(
|
||||
tokio::spawn(leader_task(leader)),
|
||||
tokio::spawn(follower_task(follower))
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
async fn leader_task(mut leader: MpcTlsLeader) {
|
||||
leader.alloc().unwrap();
|
||||
|
||||
leader.preprocess().await.unwrap();
|
||||
|
||||
let (leader_ctrl, leader_fut) = leader.run();
|
||||
tokio::spawn(async { leader_fut.await.unwrap() });
|
||||
|
||||
let config = tls_client::ClientConfig::builder()
|
||||
.with_safe_defaults()
|
||||
.with_root_certificates(RootCertStore {
|
||||
roots: vec![anchor_from_trusted_cert(&CA_CERT).unwrap().to_owned()],
|
||||
})
|
||||
.with_no_client_auth();
|
||||
|
||||
let server_name = SERVER_DOMAIN.try_into().unwrap();
|
||||
|
||||
let client = tls_client::ClientConnection::new(
|
||||
Arc::new(config),
|
||||
Box::new(leader_ctrl.clone()),
|
||||
server_name,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let (client_socket, server_socket) = tokio::io::duplex(1 << 16);
|
||||
tokio::spawn(bind_test_server_hyper(server_socket.compat()));
|
||||
|
||||
let (mut conn, conn_fut) = bind_client(client_socket.compat(), client);
|
||||
let handle = tokio::spawn(async { conn_fut.await.unwrap() });
|
||||
|
||||
let msg = concat!(
|
||||
"POST /echo HTTP/1.1\r\n",
|
||||
"Host: test-server.io\r\n",
|
||||
"Connection: keep-alive\r\n",
|
||||
"Accept-Encoding: identity\r\n",
|
||||
"Content-Length: 5\r\n",
|
||||
"\r\n",
|
||||
"hello",
|
||||
"\r\n"
|
||||
);
|
||||
|
||||
conn.write_all(msg.as_bytes()).await.unwrap();
|
||||
|
||||
let mut buf = vec![0u8; 48];
|
||||
conn.read_exact(&mut buf).await.unwrap();
|
||||
|
||||
leader_ctrl.defer_decryption().await.unwrap();
|
||||
|
||||
let msg = concat!(
|
||||
"POST /echo HTTP/1.1\r\n",
|
||||
"Host: test-server.io\r\n",
|
||||
"Connection: close\r\n",
|
||||
"Accept-Encoding: identity\r\n",
|
||||
"Content-Length: 5\r\n",
|
||||
"\r\n",
|
||||
"hello",
|
||||
"\r\n"
|
||||
);
|
||||
|
||||
conn.write_all(msg.as_bytes()).await.unwrap();
|
||||
conn.close().await.unwrap();
|
||||
|
||||
let mut buf = vec![0u8; 1024];
|
||||
conn.read_to_end(&mut buf).await.unwrap();
|
||||
|
||||
leader_ctrl.stop().await.unwrap();
|
||||
|
||||
handle.await.unwrap();
|
||||
}
|
||||
|
||||
async fn follower_task(mut follower: MpcTlsFollower) {
|
||||
follower.alloc().unwrap();
|
||||
follower.preprocess().await.unwrap();
|
||||
follower.run().await.unwrap();
|
||||
}
|
||||
|
||||
fn build_pair(config: Config) -> (MpcTlsLeader, MpcTlsFollower) {
|
||||
let mut rng = StdRng::seed_from_u64(0);
|
||||
|
||||
let (mut mt_a, mut mt_b) = test_mt_context(8);
|
||||
|
||||
let ctx_a = futures::executor::block_on(mt_a.new_context()).unwrap();
|
||||
let ctx_b = futures::executor::block_on(mt_b.new_context()).unwrap();
|
||||
|
||||
let delta_a = Delta::new(Block::random(&mut rng));
|
||||
let delta_b = Delta::new(Block::random(&mut rng));
|
||||
|
||||
let (rcot_send_a, rcot_recv_b) = ideal_rcot(Block::random(&mut rng), delta_a.into_inner());
|
||||
let (rcot_send_b, rcot_recv_a) = ideal_rcot(Block::random(&mut rng), delta_b.into_inner());
|
||||
|
||||
let rcot_send_a = SharedRCOTSender::new(rcot_send_a);
|
||||
let rcot_send_b = SharedRCOTSender::new(rcot_send_b);
|
||||
let rcot_recv_a = SharedRCOTReceiver::new(rcot_recv_a);
|
||||
let rcot_recv_b = SharedRCOTReceiver::new(rcot_recv_b);
|
||||
|
||||
let mpc_a = Arc::new(Mutex::new(Garbler::new(
|
||||
DerandCOTSender::new(rcot_send_a.clone()),
|
||||
rand::rng().random(),
|
||||
delta_a,
|
||||
)));
|
||||
let mpc_b = Arc::new(Mutex::new(Evaluator::new(DerandCOTReceiver::new(
|
||||
rcot_recv_b.clone(),
|
||||
))));
|
||||
|
||||
let leader = MpcTlsLeader::new(
|
||||
config.clone(),
|
||||
ctx_a,
|
||||
mpc_a,
|
||||
(rcot_send_a.clone(), rcot_send_a.clone(), rcot_send_a),
|
||||
rcot_recv_a,
|
||||
);
|
||||
|
||||
let follower = MpcTlsFollower::new(
|
||||
config,
|
||||
ctx_b,
|
||||
mpc_b,
|
||||
rcot_send_b,
|
||||
(rcot_recv_b.clone(), rcot_recv_b.clone(), rcot_recv_b),
|
||||
);
|
||||
|
||||
(leader, follower)
|
||||
}
|
||||
@@ -5,7 +5,7 @@ description = "A TLS backend trait for TLSNotary"
|
||||
keywords = ["tls", "mpc", "2pc"]
|
||||
categories = ["cryptography"]
|
||||
license = "MIT OR Apache-2.0"
|
||||
version = "0.1.0-alpha.14-pre"
|
||||
version = "0.1.0-alpha.13-pre"
|
||||
edition = "2021"
|
||||
|
||||
[lints]
|
||||
|
||||
39
crates/tls/client-async/Cargo.toml
Normal file
39
crates/tls/client-async/Cargo.toml
Normal file
@@ -0,0 +1,39 @@
|
||||
[package]
|
||||
name = "tlsn-tls-client-async"
|
||||
authors = ["TLSNotary Team"]
|
||||
description = "An async TLS client for TLSNotary"
|
||||
keywords = ["tls", "mpc", "2pc", "client", "async"]
|
||||
categories = ["cryptography"]
|
||||
license = "MIT OR Apache-2.0"
|
||||
version = "0.1.0-alpha.13-pre"
|
||||
edition = "2021"
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[lib]
|
||||
name = "tls_client_async"
|
||||
|
||||
[features]
|
||||
default = ["tracing"]
|
||||
tracing = ["dep:tracing"]
|
||||
|
||||
[dependencies]
|
||||
tlsn-tls-client = { workspace = true }
|
||||
|
||||
bytes = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
thiserror = { workspace = true }
|
||||
tokio-util = { workspace = true, features = ["io", "compat"] }
|
||||
tracing = { workspace = true, optional = true }
|
||||
|
||||
[dev-dependencies]
|
||||
tls-server-fixture = { workspace = true }
|
||||
|
||||
http-body-util = { workspace = true }
|
||||
hyper = { workspace = true, features = ["client", "http1"] }
|
||||
hyper-util = { workspace = true, features = ["full"] }
|
||||
rstest = { workspace = true }
|
||||
tokio = { workspace = true, features = ["rt", "rt-multi-thread", "macros"] }
|
||||
rustls-webpki = { workspace = true }
|
||||
rustls-pki-types = { workspace = true }
|
||||
89
crates/tls/client-async/src/conn.rs
Normal file
89
crates/tls/client-async/src/conn.rs
Normal file
@@ -0,0 +1,89 @@
|
||||
use bytes::Bytes;
|
||||
use futures::{
|
||||
channel::mpsc::{Receiver, SendError, Sender},
|
||||
sink::SinkMapErr,
|
||||
AsyncRead, AsyncWrite, SinkExt,
|
||||
};
|
||||
use std::{
|
||||
io::{Error as IoError, ErrorKind as IoErrorKind},
|
||||
pin::Pin,
|
||||
task::{Context, Poll},
|
||||
};
|
||||
use tokio_util::{
|
||||
compat::{Compat, TokioAsyncReadCompatExt, TokioAsyncWriteCompatExt},
|
||||
io::{CopyToBytes, SinkWriter, StreamReader},
|
||||
};
|
||||
|
||||
type CompatSinkWriter =
|
||||
Compat<SinkWriter<CopyToBytes<SinkMapErr<Sender<Bytes>, fn(SendError) -> IoError>>>>;
|
||||
|
||||
/// A TLS connection to a server.
|
||||
///
|
||||
/// This type implements `AsyncRead` and `AsyncWrite` and can be used to
|
||||
/// communicate with a server using TLS.
|
||||
///
|
||||
/// # Note
|
||||
///
|
||||
/// This connection is closed on a best-effort basis if this is dropped. To
|
||||
/// ensure a clean close, you should call
|
||||
/// [`AsyncWriteExt::close`](futures::io::AsyncWriteExt::close) to close the
|
||||
/// connection.
|
||||
#[derive(Debug)]
|
||||
pub struct TlsConnection {
|
||||
/// The data to be transmitted to the server is sent to this sink.
|
||||
tx_sender: CompatSinkWriter,
|
||||
/// The data to be received from the server is received from this stream.
|
||||
rx_receiver: Compat<StreamReader<Receiver<Result<Bytes, IoError>>, Bytes>>,
|
||||
}
|
||||
|
||||
impl TlsConnection {
|
||||
/// Creates a new TLS connection.
|
||||
pub(crate) fn new(
|
||||
tx_sender: Sender<Bytes>,
|
||||
rx_receiver: Receiver<Result<Bytes, IoError>>,
|
||||
) -> Self {
|
||||
fn convert_error(err: SendError) -> IoError {
|
||||
if err.is_disconnected() {
|
||||
IoErrorKind::BrokenPipe.into()
|
||||
} else {
|
||||
IoErrorKind::WouldBlock.into()
|
||||
}
|
||||
}
|
||||
|
||||
Self {
|
||||
tx_sender: SinkWriter::new(CopyToBytes::new(
|
||||
tx_sender.sink_map_err(convert_error as fn(SendError) -> IoError),
|
||||
))
|
||||
.compat_write(),
|
||||
rx_receiver: StreamReader::new(rx_receiver).compat(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl AsyncRead for TlsConnection {
|
||||
fn poll_read(
|
||||
mut self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
buf: &mut [u8],
|
||||
) -> Poll<Result<usize, IoError>> {
|
||||
Pin::new(&mut self.rx_receiver).poll_read(cx, buf)
|
||||
}
|
||||
}
|
||||
|
||||
impl AsyncWrite for TlsConnection {
|
||||
fn poll_write(
|
||||
mut self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
buf: &[u8],
|
||||
) -> Poll<Result<usize, IoError>> {
|
||||
Pin::new(&mut self.tx_sender).poll_write(cx, buf)
|
||||
}
|
||||
|
||||
fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), IoError>> {
|
||||
Pin::new(&mut self.tx_sender).poll_flush(cx)
|
||||
}
|
||||
|
||||
fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), IoError>> {
|
||||
Pin::new(&mut self.tx_sender).poll_close(cx)
|
||||
}
|
||||
}
|
||||
269
crates/tls/client-async/src/lib.rs
Normal file
269
crates/tls/client-async/src/lib.rs
Normal file
@@ -0,0 +1,269 @@
|
||||
//! Provides a TLS client which exposes an async socket.
|
||||
//!
|
||||
//! This library provides the [bind_client] function which attaches a TLS client
|
||||
//! to a socket connection and then exposes a [TlsConnection] object, which
|
||||
//! provides an async socket API for reading and writing cleartext. The TLS
|
||||
//! client will then automatically encrypt and decrypt traffic and forward that
|
||||
//! to the provided socket.
|
||||
|
||||
#![deny(missing_docs, unreachable_pub, unused_must_use)]
|
||||
#![deny(clippy::all)]
|
||||
#![forbid(unsafe_code)]
|
||||
|
||||
mod conn;
|
||||
|
||||
use bytes::{Buf, Bytes};
|
||||
use futures::{
|
||||
channel::mpsc, future::Fuse, select_biased, stream::Next, AsyncRead, AsyncReadExt, AsyncWrite,
|
||||
AsyncWriteExt, Future, FutureExt, SinkExt, StreamExt,
|
||||
};
|
||||
|
||||
use std::{
|
||||
pin::Pin,
|
||||
task::{Context, Poll},
|
||||
};
|
||||
|
||||
#[cfg(feature = "tracing")]
|
||||
use tracing::{debug, debug_span, error, trace, warn, Instrument};
|
||||
|
||||
use tls_client::ClientConnection;
|
||||
|
||||
pub use conn::TlsConnection;
|
||||
|
||||
const RX_TLS_BUF_SIZE: usize = 1 << 13; // 8 KiB
|
||||
const RX_BUF_SIZE: usize = 1 << 13; // 8 KiB
|
||||
|
||||
/// An error that can occur during a TLS connection.
|
||||
#[allow(missing_docs)]
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum ConnectionError {
|
||||
#[error(transparent)]
|
||||
TlsError(#[from] tls_client::Error),
|
||||
#[error(transparent)]
|
||||
IOError(#[from] std::io::Error),
|
||||
}
|
||||
|
||||
/// Closed connection data.
|
||||
#[derive(Debug)]
|
||||
pub struct ClosedConnection {
|
||||
/// The connection for the client
|
||||
pub client: ClientConnection,
|
||||
/// Sent plaintext bytes
|
||||
pub sent: Vec<u8>,
|
||||
/// Received plaintext bytes
|
||||
pub recv: Vec<u8>,
|
||||
}
|
||||
|
||||
/// A future which runs the TLS connection to completion.
|
||||
///
|
||||
/// This future must be polled in order for the connection to make progress.
|
||||
#[must_use = "futures do nothing unless polled"]
|
||||
pub struct ConnectionFuture {
|
||||
fut: Pin<Box<dyn Future<Output = Result<ClosedConnection, ConnectionError>> + Send>>,
|
||||
}
|
||||
|
||||
impl Future for ConnectionFuture {
|
||||
type Output = Result<ClosedConnection, ConnectionError>;
|
||||
|
||||
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
||||
self.fut.poll_unpin(cx)
|
||||
}
|
||||
}
|
||||
|
||||
/// Binds a client connection to the provided socket.
|
||||
///
|
||||
/// Returns a connection handle and a future which runs the connection to
|
||||
/// completion.
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// Any connection errors that occur will be returned from the future, not
|
||||
/// [`TlsConnection`].
|
||||
pub fn bind_client<T: AsyncRead + AsyncWrite + Send + Unpin + 'static>(
|
||||
socket: T,
|
||||
mut client: ClientConnection,
|
||||
) -> (TlsConnection, ConnectionFuture) {
|
||||
let (tx_sender, mut tx_receiver) = mpsc::channel(1 << 14);
|
||||
let (mut rx_sender, rx_receiver) = mpsc::channel(1 << 14);
|
||||
|
||||
let conn = TlsConnection::new(tx_sender, rx_receiver);
|
||||
|
||||
let fut = async move {
|
||||
client.start().await?;
|
||||
let mut notify = client.get_notify().await?;
|
||||
|
||||
let (mut server_rx, mut server_tx) = socket.split();
|
||||
|
||||
let mut rx_tls_buf = [0u8; RX_TLS_BUF_SIZE];
|
||||
let mut rx_buf = [0u8; RX_BUF_SIZE];
|
||||
|
||||
let mut handshake_done = false;
|
||||
let mut client_closed = false;
|
||||
let mut server_closed = false;
|
||||
|
||||
let mut sent = Vec::with_capacity(1024);
|
||||
let mut recv = Vec::with_capacity(1024);
|
||||
|
||||
let mut rx_tls_fut = server_rx.read(&mut rx_tls_buf).fuse();
|
||||
// We don't start writing application data until the handshake is complete.
|
||||
let mut tx_recv_fut: Fuse<Next<'_, mpsc::Receiver<Bytes>>> = Fuse::terminated();
|
||||
|
||||
// Runs both the tx and rx halves of the connection to completion.
|
||||
// This loop does not terminate until the *SERVER* closes the connection and
|
||||
// we've processed all received data. If an error occurs, the `TlsConnection`
|
||||
// channels will be closed and the error will be returned from this future.
|
||||
'conn: loop {
|
||||
// Write all pending TLS data to the server.
|
||||
if client.wants_write() && !client_closed {
|
||||
#[cfg(feature = "tracing")]
|
||||
trace!("client wants to write");
|
||||
while client.wants_write() {
|
||||
let _sent = client.write_tls_async(&mut server_tx).await?;
|
||||
#[cfg(feature = "tracing")]
|
||||
trace!("sent {} tls bytes to server", _sent);
|
||||
}
|
||||
server_tx.flush().await?;
|
||||
}
|
||||
|
||||
// Forward received plaintext to `TlsConnection`.
|
||||
while !client.plaintext_is_empty() {
|
||||
let read = client.read_plaintext(&mut rx_buf)?;
|
||||
recv.extend(&rx_buf[..read]);
|
||||
// Ignore if the receiver has hung up.
|
||||
_ = rx_sender
|
||||
.send(Ok(Bytes::copy_from_slice(&rx_buf[..read])))
|
||||
.await;
|
||||
#[cfg(feature = "tracing")]
|
||||
trace!("forwarded {} plaintext bytes to conn", read);
|
||||
}
|
||||
|
||||
if !client.is_handshaking() && !handshake_done {
|
||||
#[cfg(feature = "tracing")]
|
||||
debug!("handshake complete");
|
||||
handshake_done = true;
|
||||
// Start reading application data that needs to be transmitted from the
|
||||
// `TlsConnection`.
|
||||
tx_recv_fut = tx_receiver.next().fuse();
|
||||
}
|
||||
|
||||
if server_closed && client.plaintext_is_empty() && client.is_empty().await? {
|
||||
break 'conn;
|
||||
}
|
||||
|
||||
select_biased! {
|
||||
// Reads TLS data from the server and writes it into the client.
|
||||
received = &mut rx_tls_fut => {
|
||||
let received = received?;
|
||||
#[cfg(feature = "tracing")]
|
||||
trace!("received {} tls bytes from server", received);
|
||||
|
||||
// Loop until we've processed all the data we received in this read.
|
||||
// Note that we must make one iteration even if `received == 0`.
|
||||
let mut processed = 0;
|
||||
let mut reader = rx_tls_buf[..received].reader();
|
||||
loop {
|
||||
processed += client.read_tls(&mut reader)?;
|
||||
client.process_new_packets().await?;
|
||||
|
||||
debug_assert!(processed <= received);
|
||||
if processed >= received {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "tracing")]
|
||||
trace!("processed {} tls bytes from server", processed);
|
||||
|
||||
// By convention if `AsyncRead::read` returns 0, it means EOF, i.e. the peer
|
||||
// has closed the socket.
|
||||
if received == 0 {
|
||||
#[cfg(feature = "tracing")]
|
||||
debug!("server closed connection");
|
||||
server_closed = true;
|
||||
client.server_closed().await?;
|
||||
// Do not read from the socket again.
|
||||
rx_tls_fut = Fuse::terminated();
|
||||
} else {
|
||||
// Reset the read future so next iteration we can read again.
|
||||
rx_tls_fut = server_rx.read(&mut rx_tls_buf).fuse();
|
||||
}
|
||||
}
|
||||
// If we receive None from `TlsConnection`, it has closed, so we
|
||||
// send a close_notify to the server.
|
||||
data = &mut tx_recv_fut => {
|
||||
if let Some(data) = data {
|
||||
#[cfg(feature = "tracing")]
|
||||
trace!("writing {} plaintext bytes to client", data.len());
|
||||
|
||||
sent.extend(&data);
|
||||
client
|
||||
.write_all_plaintext(&data)
|
||||
.await?;
|
||||
|
||||
tx_recv_fut = tx_receiver.next().fuse();
|
||||
} else {
|
||||
if !server_closed {
|
||||
if let Err(e) = send_close_notify(&mut client, &mut server_tx).await {
|
||||
#[cfg(feature = "tracing")]
|
||||
warn!("failed to send close_notify to server: {}", e);
|
||||
}
|
||||
}
|
||||
|
||||
client_closed = true;
|
||||
|
||||
tx_recv_fut = Fuse::terminated();
|
||||
}
|
||||
}
|
||||
// Waits for a notification from the backend that it is ready to decrypt data.
|
||||
_ = &mut notify => {
|
||||
#[cfg(feature = "tracing")]
|
||||
trace!("backend is ready to decrypt");
|
||||
|
||||
client.process_new_packets().await?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "tracing")]
|
||||
debug!("client shutdown");
|
||||
|
||||
_ = server_tx.close().await;
|
||||
tx_receiver.close();
|
||||
rx_sender.close_channel();
|
||||
|
||||
#[cfg(feature = "tracing")]
|
||||
trace!(
|
||||
"server close notify: {}, sent: {}, recv: {}",
|
||||
client.received_close_notify(),
|
||||
sent.len(),
|
||||
recv.len()
|
||||
);
|
||||
|
||||
Ok(ClosedConnection { client, sent, recv })
|
||||
};
|
||||
|
||||
#[cfg(feature = "tracing")]
|
||||
let fut = fut.instrument(debug_span!("tls_connection"));
|
||||
|
||||
let fut = ConnectionFuture { fut: Box::pin(fut) };
|
||||
|
||||
(conn, fut)
|
||||
}
|
||||
|
||||
async fn send_close_notify(
|
||||
client: &mut ClientConnection,
|
||||
server_tx: &mut (impl AsyncWrite + Unpin),
|
||||
) -> Result<(), ConnectionError> {
|
||||
#[cfg(feature = "tracing")]
|
||||
trace!("sending close_notify to server");
|
||||
client.send_close_notify().await?;
|
||||
client.process_new_packets().await?;
|
||||
|
||||
// Flush all remaining plaintext
|
||||
while client.wants_write() {
|
||||
client.write_tls_async(server_tx).await?;
|
||||
}
|
||||
server_tx.flush().await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
438
crates/tls/client-async/tests/test.rs
Normal file
438
crates/tls/client-async/tests/test.rs
Normal file
@@ -0,0 +1,438 @@
|
||||
use std::{str, sync::Arc};
|
||||
|
||||
use core::future::Future;
|
||||
use futures::{AsyncReadExt, AsyncWriteExt};
|
||||
use http_body_util::{BodyExt as _, Full};
|
||||
use hyper::{body::Bytes, Request, StatusCode};
|
||||
use hyper_util::rt::TokioIo;
|
||||
use rstest::{fixture, rstest};
|
||||
use rustls_pki_types::CertificateDer;
|
||||
use tls_client::{ClientConfig, ClientConnection, RustCryptoBackend, ServerName};
|
||||
use tls_client_async::{bind_client, ClosedConnection, ConnectionError, TlsConnection};
|
||||
use tls_server_fixture::{
|
||||
bind_test_server, bind_test_server_hyper, APP_RECORD_LENGTH, CA_CERT_DER, CLOSE_DELAY,
|
||||
SERVER_DOMAIN,
|
||||
};
|
||||
use tokio::task::JoinHandle;
|
||||
use tokio_util::compat::{FuturesAsyncReadCompatExt, TokioAsyncReadCompatExt};
|
||||
use webpki::anchor_from_trusted_cert;
|
||||
|
||||
const CA_CERT: CertificateDer = CertificateDer::from_slice(CA_CERT_DER);
|
||||
|
||||
// An established client TLS connection
|
||||
struct TlsFixture {
|
||||
client_tls_conn: TlsConnection,
|
||||
// a handle that must be `.await`ed to get the result of a TLS connection
|
||||
closed_tls_task: JoinHandle<Result<ClosedConnection, ConnectionError>>,
|
||||
}
|
||||
|
||||
// Sets up a TLS connection between client and server and sends a hello message
|
||||
#[fixture]
|
||||
async fn set_up_tls() -> TlsFixture {
|
||||
let (client_socket, server_socket) = tokio::io::duplex(1 << 16);
|
||||
|
||||
let _server_task = tokio::spawn(bind_test_server(server_socket.compat()));
|
||||
|
||||
let mut root_store = tls_client::RootCertStore::empty();
|
||||
root_store
|
||||
.roots
|
||||
.push(anchor_from_trusted_cert(&CA_CERT).unwrap().to_owned());
|
||||
let config = ClientConfig::builder()
|
||||
.with_safe_defaults()
|
||||
.with_root_certificates(root_store)
|
||||
.with_no_client_auth();
|
||||
let client = ClientConnection::new(
|
||||
Arc::new(config),
|
||||
Box::new(RustCryptoBackend::new()),
|
||||
ServerName::try_from(SERVER_DOMAIN).unwrap(),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let (mut client_tls_conn, tls_fut) = bind_client(client_socket.compat(), client);
|
||||
|
||||
let closed_tls_task = tokio::spawn(tls_fut);
|
||||
|
||||
client_tls_conn
|
||||
.write_all(&pad("expecting you to send back hello".to_string()))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// give the server some time to respond
|
||||
std::thread::sleep(std::time::Duration::from_millis(10));
|
||||
|
||||
let mut plaintext = vec![0u8; 320];
|
||||
let n = client_tls_conn.read(&mut plaintext).await.unwrap();
|
||||
let s = str::from_utf8(&plaintext[0..n]).unwrap();
|
||||
|
||||
assert_eq!(s, "hello");
|
||||
|
||||
TlsFixture {
|
||||
client_tls_conn,
|
||||
closed_tls_task,
|
||||
}
|
||||
}
|
||||
|
||||
// Expect the async tls client wrapped in `hyper::client` to make a successful
|
||||
// request and receive the expected response
|
||||
#[tokio::test]
|
||||
async fn test_hyper_ok() {
|
||||
let (client_socket, server_socket) = tokio::io::duplex(1 << 16);
|
||||
|
||||
let server_task = tokio::spawn(bind_test_server_hyper(server_socket.compat()));
|
||||
|
||||
let mut root_store = tls_client::RootCertStore::empty();
|
||||
root_store
|
||||
.roots
|
||||
.push(anchor_from_trusted_cert(&CA_CERT).unwrap().to_owned());
|
||||
let config = ClientConfig::builder()
|
||||
.with_safe_defaults()
|
||||
.with_root_certificates(root_store)
|
||||
.with_no_client_auth();
|
||||
let client = ClientConnection::new(
|
||||
Arc::new(config),
|
||||
Box::new(RustCryptoBackend::new()),
|
||||
ServerName::try_from(SERVER_DOMAIN).unwrap(),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let (conn, tls_fut) = bind_client(client_socket.compat(), client);
|
||||
|
||||
let closed_tls_task = tokio::spawn(tls_fut);
|
||||
|
||||
let (mut request_sender, connection) =
|
||||
hyper::client::conn::http1::handshake(TokioIo::new(conn.compat()))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
tokio::spawn(connection);
|
||||
|
||||
let request = Request::builder()
|
||||
.uri(format!("https://{SERVER_DOMAIN}/echo"))
|
||||
.header("Host", SERVER_DOMAIN)
|
||||
.header("Connection", "close")
|
||||
.method("POST")
|
||||
.body(Full::<Bytes>::new("hello".into()))
|
||||
.unwrap();
|
||||
|
||||
let response = request_sender.send_request(request).await.unwrap();
|
||||
|
||||
assert!(response.status() == StatusCode::OK);
|
||||
|
||||
// Process the response body
|
||||
response.into_body().collect().await.unwrap().to_bytes();
|
||||
|
||||
let _ = server_task.await.unwrap();
|
||||
|
||||
let closed_conn = closed_tls_task.await.unwrap().unwrap();
|
||||
|
||||
assert!(closed_conn.client.received_close_notify());
|
||||
}
|
||||
|
||||
// Expect a clean TLS connection closure when server responds to the client's
|
||||
// close_notify but doesn't close the socket
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_ok_server_no_socket_close(set_up_tls: impl Future<Output = TlsFixture>) {
|
||||
let TlsFixture {
|
||||
mut client_tls_conn,
|
||||
closed_tls_task,
|
||||
} = set_up_tls.await;
|
||||
|
||||
// instruct the server to send close_notify back to us after 10 ms
|
||||
client_tls_conn
|
||||
.write_all(&pad("send_close_notify".to_string()))
|
||||
.await
|
||||
.unwrap();
|
||||
client_tls_conn.flush().await.unwrap();
|
||||
|
||||
// closing `client_tls_conn` will cause close_notify to be sent by the client;
|
||||
client_tls_conn.close().await.unwrap();
|
||||
|
||||
let closed_conn = closed_tls_task.await.unwrap().unwrap();
|
||||
|
||||
assert!(closed_conn.client.received_close_notify());
|
||||
}
|
||||
|
||||
// Expect a clean TLS connection closure when server responds to the client's
|
||||
// close_notify AND also closes the socket
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_ok_server_socket_close(set_up_tls: impl Future<Output = TlsFixture>) {
|
||||
let TlsFixture {
|
||||
mut client_tls_conn,
|
||||
closed_tls_task,
|
||||
} = set_up_tls.await;
|
||||
|
||||
// instruct the server to send close_notify back to us AND close the socket
|
||||
// after 10 ms
|
||||
client_tls_conn
|
||||
.write_all(&pad("send_close_notify_and_close_socket".to_string()))
|
||||
.await
|
||||
.unwrap();
|
||||
client_tls_conn.flush().await.unwrap();
|
||||
|
||||
// closing `client_tls_conn` will cause close_notify to be sent by the client;
|
||||
client_tls_conn.close().await.unwrap();
|
||||
|
||||
let closed_conn = closed_tls_task.await.unwrap().unwrap();
|
||||
|
||||
assert!(closed_conn.client.received_close_notify());
|
||||
}
|
||||
|
||||
// Expect a clean TLS connection closure when server sends close_notify first
|
||||
// but doesn't close the socket
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_ok_server_close_notify(set_up_tls: impl Future<Output = TlsFixture>) {
|
||||
let TlsFixture {
|
||||
mut client_tls_conn,
|
||||
closed_tls_task,
|
||||
} = set_up_tls.await;
|
||||
|
||||
// instruct the server to send close_notify back to us after 10 ms
|
||||
client_tls_conn
|
||||
.write_all(&pad("send_close_notify".to_string()))
|
||||
.await
|
||||
.unwrap();
|
||||
client_tls_conn.flush().await.unwrap();
|
||||
|
||||
// give enough time for server's close_notify to arrive
|
||||
tokio::time::sleep(std::time::Duration::from_millis(20)).await;
|
||||
|
||||
client_tls_conn.close().await.unwrap();
|
||||
|
||||
let closed_conn = closed_tls_task.await.unwrap().unwrap();
|
||||
|
||||
assert!(closed_conn.client.received_close_notify());
|
||||
}
|
||||
|
||||
// Expect a clean TLS connection closure when server sends close_notify first
|
||||
// AND also closes the socket
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_ok_server_close_notify_and_socket_close(
|
||||
set_up_tls: impl Future<Output = TlsFixture>,
|
||||
) {
|
||||
let TlsFixture {
|
||||
mut client_tls_conn,
|
||||
closed_tls_task,
|
||||
} = set_up_tls.await;
|
||||
|
||||
// instruct the server to send close_notify back to us after 10 ms
|
||||
client_tls_conn
|
||||
.write_all(&pad("send_close_notify_and_close_socket".to_string()))
|
||||
.await
|
||||
.unwrap();
|
||||
client_tls_conn.flush().await.unwrap();
|
||||
|
||||
// give enough time for server's close_notify to arrive
|
||||
tokio::time::sleep(std::time::Duration::from_millis(20)).await;
|
||||
|
||||
client_tls_conn.close().await.unwrap();
|
||||
|
||||
let closed_conn = closed_tls_task.await.unwrap().unwrap();
|
||||
|
||||
assert!(closed_conn.client.received_close_notify());
|
||||
}
|
||||
|
||||
// Expect to be able to read the data after server closes the socket abruptly
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_ok_read_after_close(set_up_tls: impl Future<Output = TlsFixture>) {
|
||||
let TlsFixture {
|
||||
mut client_tls_conn,
|
||||
..
|
||||
} = set_up_tls.await;
|
||||
|
||||
// instruct the server to send us a hello message
|
||||
client_tls_conn
|
||||
.write_all(&pad("send a hello message".to_string()))
|
||||
.await
|
||||
.unwrap();
|
||||
client_tls_conn.flush().await.unwrap();
|
||||
|
||||
// instruct the server to close the socket
|
||||
client_tls_conn
|
||||
.write_all(&pad("close_socket".to_string()))
|
||||
.await
|
||||
.unwrap();
|
||||
client_tls_conn.flush().await.unwrap();
|
||||
|
||||
// give enough time to close the socket
|
||||
tokio::time::sleep(std::time::Duration::from_millis(10)).await;
|
||||
|
||||
// try to read some more data
|
||||
let mut buf = vec![0u8; 10];
|
||||
let n = client_tls_conn.read(&mut buf).await.unwrap();
|
||||
|
||||
assert_eq!(std::str::from_utf8(&buf[0..n]).unwrap(), "hello");
|
||||
}
|
||||
|
||||
// Expect there to be no error when server DOES NOT send close_notify but just
|
||||
// closes the socket
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_ok_server_no_close_notify(set_up_tls: impl Future<Output = TlsFixture>) {
|
||||
let TlsFixture {
|
||||
mut client_tls_conn,
|
||||
closed_tls_task,
|
||||
} = set_up_tls.await;
|
||||
|
||||
// instruct the server to close the socket
|
||||
client_tls_conn
|
||||
.write_all(&pad("close_socket".to_string()))
|
||||
.await
|
||||
.unwrap();
|
||||
client_tls_conn.flush().await.unwrap();
|
||||
|
||||
// give enough time to close the socket
|
||||
tokio::time::sleep(std::time::Duration::from_millis(10)).await;
|
||||
|
||||
client_tls_conn.close().await.unwrap();
|
||||
|
||||
let closed_conn = closed_tls_task.await.unwrap().unwrap();
|
||||
|
||||
assert!(!closed_conn.client.received_close_notify());
|
||||
}
|
||||
|
||||
// Expect to register a delay when the server delays closing the socket
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_ok_delay_close(set_up_tls: impl Future<Output = TlsFixture>) {
|
||||
let TlsFixture {
|
||||
mut client_tls_conn,
|
||||
closed_tls_task,
|
||||
} = set_up_tls.await;
|
||||
|
||||
client_tls_conn
|
||||
.write_all(&pad("must_delay_when_closing".to_string()))
|
||||
.await
|
||||
.unwrap();
|
||||
client_tls_conn.flush().await.unwrap();
|
||||
|
||||
// closing `client_tls_conn` will cause close_notify to be sent by the client
|
||||
client_tls_conn.close().await.unwrap();
|
||||
|
||||
use std::time::Instant;
|
||||
let now = Instant::now();
|
||||
// this will resolve when the server stops delaying closing the socket
|
||||
let closed_conn = closed_tls_task.await.unwrap().unwrap();
|
||||
let elapsed = now.elapsed();
|
||||
|
||||
// the elapsed time must be roughly equal to the server's delay
|
||||
// (give or take timing variations)
|
||||
assert!(elapsed.as_millis() as u64 > CLOSE_DELAY - 50);
|
||||
|
||||
assert!(!closed_conn.client.received_close_notify());
|
||||
}
|
||||
|
||||
// Expect client to error when server sends a corrupted message
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_err_corrupted(set_up_tls: impl Future<Output = TlsFixture>) {
|
||||
let TlsFixture {
|
||||
mut client_tls_conn,
|
||||
closed_tls_task,
|
||||
} = set_up_tls.await;
|
||||
|
||||
// instruct the server to send a corrupted message
|
||||
client_tls_conn
|
||||
.write_all(&pad("send_corrupted_message".to_string()))
|
||||
.await
|
||||
.unwrap();
|
||||
client_tls_conn.flush().await.unwrap();
|
||||
|
||||
tokio::time::sleep(std::time::Duration::from_millis(10)).await;
|
||||
client_tls_conn.close().await.unwrap();
|
||||
|
||||
assert_eq!(
|
||||
closed_tls_task.await.unwrap().err().unwrap().to_string(),
|
||||
"received corrupt message"
|
||||
);
|
||||
}
|
||||
|
||||
// Expect client to error when server sends a TLS record with a bad MAC
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_err_bad_mac(set_up_tls: impl Future<Output = TlsFixture>) {
|
||||
let TlsFixture {
|
||||
mut client_tls_conn,
|
||||
closed_tls_task,
|
||||
} = set_up_tls.await;
|
||||
|
||||
// instruct the server to send us a TLS record with a bad MAC
|
||||
client_tls_conn
|
||||
.write_all(&pad("send_record_with_bad_mac".to_string()))
|
||||
.await
|
||||
.unwrap();
|
||||
client_tls_conn.flush().await.unwrap();
|
||||
|
||||
tokio::time::sleep(std::time::Duration::from_millis(10)).await;
|
||||
client_tls_conn.close().await.unwrap();
|
||||
|
||||
assert_eq!(
|
||||
closed_tls_task.await.unwrap().err().unwrap().to_string(),
|
||||
"backend error: Decryption error: \"aead::Error\""
|
||||
);
|
||||
}
|
||||
|
||||
// Expect client to error when server sends a fatal alert
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_err_alert(set_up_tls: impl Future<Output = TlsFixture>) {
|
||||
let TlsFixture {
|
||||
mut client_tls_conn,
|
||||
closed_tls_task,
|
||||
} = set_up_tls.await;
|
||||
|
||||
// instruct the server to send us a TLS record with a bad MAC
|
||||
client_tls_conn
|
||||
.write_all(&pad("send_alert".to_string()))
|
||||
.await
|
||||
.unwrap();
|
||||
client_tls_conn.flush().await.unwrap();
|
||||
|
||||
tokio::time::sleep(std::time::Duration::from_millis(10)).await;
|
||||
client_tls_conn.close().await.unwrap();
|
||||
|
||||
assert_eq!(
|
||||
closed_tls_task.await.unwrap().err().unwrap().to_string(),
|
||||
"received fatal alert: BadRecordMac"
|
||||
);
|
||||
}
|
||||
|
||||
// Expect an error when trying to write data to a connection which server closed
|
||||
// abruptly
|
||||
#[rstest]
|
||||
#[tokio::test]
|
||||
async fn test_err_write_after_close(set_up_tls: impl Future<Output = TlsFixture>) {
|
||||
let TlsFixture {
|
||||
mut client_tls_conn,
|
||||
..
|
||||
} = set_up_tls.await;
|
||||
|
||||
// instruct the server to close the socket
|
||||
client_tls_conn
|
||||
.write_all(&pad("close_socket".to_string()))
|
||||
.await
|
||||
.unwrap();
|
||||
client_tls_conn.flush().await.unwrap();
|
||||
|
||||
// give enough time to close the socket
|
||||
tokio::time::sleep(std::time::Duration::from_millis(10)).await;
|
||||
|
||||
// try to send some more data
|
||||
let res = client_tls_conn
|
||||
.write_all(&pad("more data".to_string()))
|
||||
.await;
|
||||
|
||||
assert_eq!(res.err().unwrap().kind(), std::io::ErrorKind::BrokenPipe);
|
||||
}
|
||||
|
||||
// Converts a string into a slice zero-padded to APP_RECORD_LENGTH
|
||||
fn pad(s: String) -> Vec<u8> {
|
||||
assert!(s.len() <= APP_RECORD_LENGTH);
|
||||
let mut buf = vec![0u8; APP_RECORD_LENGTH];
|
||||
buf[..s.len()].copy_from_slice(s.as_bytes());
|
||||
buf
|
||||
}
|
||||
@@ -5,7 +5,7 @@ description = "A TLS client for TLSNotary"
|
||||
keywords = ["tls", "mpc", "2pc", "client", "sync"]
|
||||
categories = ["cryptography"]
|
||||
license = "Apache-2.0 OR ISC OR MIT"
|
||||
version = "0.1.0-alpha.14-pre"
|
||||
version = "0.1.0-alpha.13-pre"
|
||||
edition = "2021"
|
||||
autobenches = false
|
||||
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
use super::{Backend, BackendError};
|
||||
use crate::{DecryptMode, EncryptMode, Error};
|
||||
#[allow(deprecated)]
|
||||
use aes_gcm::{
|
||||
aead::{generic_array::GenericArray, Aead, NewAead, Payload},
|
||||
Aes128Gcm,
|
||||
@@ -508,7 +507,6 @@ impl Encrypter {
|
||||
let mut nonce = [0u8; 12];
|
||||
nonce[..4].copy_from_slice(&self.write_iv);
|
||||
nonce[4..].copy_from_slice(explicit_nonce);
|
||||
#[allow(deprecated)]
|
||||
let nonce = GenericArray::from_slice(&nonce);
|
||||
let cipher = Aes128Gcm::new_from_slice(&self.write_key).unwrap();
|
||||
// ciphertext will have the MAC appended
|
||||
@@ -570,7 +568,6 @@ impl Decrypter {
|
||||
let mut nonce = [0u8; 12];
|
||||
nonce[..4].copy_from_slice(&self.write_iv);
|
||||
nonce[4..].copy_from_slice(&m.payload.0[0..8]);
|
||||
#[allow(deprecated)]
|
||||
let nonce = GenericArray::from_slice(&nonce);
|
||||
let plaintext = cipher
|
||||
.decrypt(nonce, aes_payload)
|
||||
|
||||
@@ -227,7 +227,6 @@ impl ConnectionCommon {
|
||||
|
||||
/// Signals that the server has closed the connection.
|
||||
pub async fn server_closed(&mut self) -> Result<(), Error> {
|
||||
self.common_state.has_seen_eof = true;
|
||||
self.common_state.backend.server_closed().await?;
|
||||
Ok(())
|
||||
}
|
||||
@@ -458,9 +457,6 @@ impl ConnectionCommon {
|
||||
return Err(Error::CorruptMessage);
|
||||
}
|
||||
|
||||
// Process outgoing plaintext buffer and encrypt messages.
|
||||
self.flush_plaintext().await?;
|
||||
|
||||
// Process new messages.
|
||||
while let Some(msg) = self.message_deframer.frames.pop_front() {
|
||||
// If we're not decrypting yet, we process it immediately. Otherwise it will be
|
||||
@@ -512,22 +508,25 @@ impl ConnectionCommon {
|
||||
Ok(state)
|
||||
}
|
||||
|
||||
/// Writes plaintext `buf` into an internal buffer. May not fully process the
|
||||
/// whole buffer and returns the processed length.
|
||||
pub fn write_plaintext(&mut self, buf: &[u8]) -> Result<usize, Error> {
|
||||
if buf.is_empty() {
|
||||
// Don't send empty fragments.
|
||||
return Ok(0);
|
||||
/// Write buffer into connection.
|
||||
pub async fn write_plaintext(&mut self, buf: &[u8]) -> Result<usize, Error> {
|
||||
if let Ok(st) = &mut self.state {
|
||||
st.perhaps_write_key_update(&mut self.common_state).await;
|
||||
}
|
||||
|
||||
let len = self.sendable_plaintext.append_limited_copy(buf);
|
||||
Ok(len)
|
||||
self.common_state.send_some_plaintext(buf).await
|
||||
}
|
||||
|
||||
/// Writes the entire plaintext `buf` into an internal buffer.
|
||||
pub fn write_all_plaintext(&mut self, buf: &[u8]) -> Result<(), Error> {
|
||||
self.sendable_plaintext.append(buf.to_vec());
|
||||
Ok(())
|
||||
/// Write entire buffer into connection.
|
||||
pub async fn write_all_plaintext(&mut self, buf: &[u8]) -> Result<usize, Error> {
|
||||
let mut pos = 0;
|
||||
while pos < buf.len() {
|
||||
pos += self.write_plaintext(&buf[pos..]).await?;
|
||||
}
|
||||
self.backend.flush().await?;
|
||||
while let Some(msg) = self.backend.next_outgoing().await? {
|
||||
self.queue_tls_message(msg);
|
||||
}
|
||||
Ok(pos)
|
||||
}
|
||||
|
||||
/// Read TLS content from `rd`. This method does internal
|
||||
@@ -691,11 +690,6 @@ impl CommonState {
|
||||
self.received_plaintext.is_empty()
|
||||
}
|
||||
|
||||
/// Returns true if the buffer for sendable plaintext is full.
|
||||
pub fn sendable_plaintext_is_full(&self) -> bool {
|
||||
self.sendable_plaintext.is_full()
|
||||
}
|
||||
|
||||
/// Returns true if the connection is currently performing the TLS
|
||||
/// handshake.
|
||||
///
|
||||
@@ -788,6 +782,15 @@ impl CommonState {
|
||||
}
|
||||
}
|
||||
|
||||
/// Send plaintext application data, fragmenting and
|
||||
/// encrypting it as it goes out.
|
||||
///
|
||||
/// If internal buffers are too small, this function will not accept
|
||||
/// all the data.
|
||||
pub(crate) async fn send_some_plaintext(&mut self, data: &[u8]) -> Result<usize, Error> {
|
||||
self.send_plain(data, Limit::Yes).await
|
||||
}
|
||||
|
||||
// Changing the keys must not span any fragmented handshake
|
||||
// messages. Otherwise the defragmented messages will have
|
||||
// been protected with two different record layer protections,
|
||||
@@ -928,6 +931,32 @@ impl CommonState {
|
||||
self.sendable_tls.write_to_async(wr).await
|
||||
}
|
||||
|
||||
/// Encrypt and send some plaintext `data`. `limit` controls
|
||||
/// whether the per-connection buffer limits apply.
|
||||
///
|
||||
/// Returns the number of bytes written from `data`: this might
|
||||
/// be less than `data.len()` if buffer limits were exceeded.
|
||||
async fn send_plain(&mut self, data: &[u8], limit: Limit) -> Result<usize, Error> {
|
||||
if !self.may_send_application_data {
|
||||
// If we haven't completed handshaking, buffer
|
||||
// plaintext to send once we do.
|
||||
let len = match limit {
|
||||
Limit::Yes => self.sendable_plaintext.append_limited_copy(data),
|
||||
Limit::No => self.sendable_plaintext.append(data.to_vec()),
|
||||
};
|
||||
return Ok(len);
|
||||
}
|
||||
|
||||
debug_assert!(self.record_layer.is_encrypting());
|
||||
|
||||
if data.is_empty() {
|
||||
// Don't send empty fragments.
|
||||
return Ok(0);
|
||||
}
|
||||
|
||||
self.send_appdata_encrypt(data, limit).await
|
||||
}
|
||||
|
||||
pub(crate) async fn start_outgoing_traffic(&mut self) -> Result<(), Error> {
|
||||
self.may_send_application_data = true;
|
||||
self.flush_plaintext().await
|
||||
@@ -983,14 +1012,15 @@ impl CommonState {
|
||||
self.sendable_tls.set_limit(limit);
|
||||
}
|
||||
|
||||
/// Send and encrypt any buffered plaintext. Does nothing during handshake.
|
||||
pub async fn flush_plaintext(&mut self) -> Result<(), Error> {
|
||||
/// Send any buffered plaintext. Plaintext is buffered if
|
||||
/// written during handshake.
|
||||
async fn flush_plaintext(&mut self) -> Result<(), Error> {
|
||||
if !self.may_send_application_data {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
while let Some(buf) = self.sendable_plaintext.pop() {
|
||||
self.send_appdata_encrypt(&buf, Limit::No).await?;
|
||||
self.send_plain(&buf, Limit::No).await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
||||
@@ -35,15 +35,6 @@ impl ChunkVecBuffer {
|
||||
self.chunks.is_empty()
|
||||
}
|
||||
|
||||
/// If the buffer has reached limit.
|
||||
pub(crate) fn is_full(&self) -> bool {
|
||||
if let Some(limit) = self.limit {
|
||||
self.len() >= limit
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
/// How many bytes we're storing
|
||||
pub(crate) fn len(&self) -> usize {
|
||||
let mut len = 0;
|
||||
|
||||
@@ -247,8 +247,7 @@ async fn servered_client_data_sent() {
|
||||
let (mut client, mut server) =
|
||||
make_pair_for_arc_configs(&Arc::new(client_config), &server_config).await;
|
||||
|
||||
assert_eq!(5, client.write_plaintext(b"hello").unwrap());
|
||||
client.flush_plaintext().await.unwrap();
|
||||
assert_eq!(5, client.write_plaintext(b"hello").await.unwrap());
|
||||
|
||||
do_handshake(&mut client, &mut server).await;
|
||||
send(&mut client, &mut server);
|
||||
@@ -287,7 +286,7 @@ async fn servered_both_data_sent() {
|
||||
make_pair_for_arc_configs(&Arc::new(client_config), &server_config).await;
|
||||
|
||||
assert_eq!(12, server.writer().write(b"from-server!").unwrap());
|
||||
assert_eq!(12, client.write_plaintext(b"from-client!").unwrap());
|
||||
assert_eq!(12, client.write_plaintext(b"from-client!").await.unwrap());
|
||||
|
||||
do_handshake(&mut client, &mut server).await;
|
||||
|
||||
@@ -433,7 +432,7 @@ async fn server_close_notify() {
|
||||
|
||||
// check that alerts don't overtake appdata
|
||||
assert_eq!(12, server.writer().write(b"from-server!").unwrap());
|
||||
assert_eq!(12, client.write_plaintext(b"from-client!").unwrap());
|
||||
assert_eq!(12, client.write_plaintext(b"from-client!").await.unwrap());
|
||||
server.send_close_notify();
|
||||
|
||||
receive(&mut server, &mut client);
|
||||
@@ -461,8 +460,7 @@ async fn client_close_notify() {
|
||||
|
||||
// check that alerts don't overtake appdata
|
||||
assert_eq!(12, server.writer().write(b"from-server!").unwrap());
|
||||
assert_eq!(12, client.write_plaintext(b"from-client!").unwrap());
|
||||
client.flush_plaintext().await.unwrap();
|
||||
assert_eq!(12, client.write_plaintext(b"from-client!").await.unwrap());
|
||||
client.send_close_notify().await.unwrap();
|
||||
|
||||
send(&mut client, &mut server);
|
||||
@@ -489,7 +487,7 @@ async fn server_closes_uncleanly() {
|
||||
|
||||
// check that unclean EOF reporting does not overtake appdata
|
||||
assert_eq!(12, server.writer().write(b"from-server!").unwrap());
|
||||
assert_eq!(12, client.write_plaintext(b"from-client!").unwrap());
|
||||
assert_eq!(12, client.write_plaintext(b"from-client!").await.unwrap());
|
||||
|
||||
receive(&mut server, &mut client);
|
||||
transfer_eof(&mut client);
|
||||
@@ -520,7 +518,7 @@ async fn client_closes_uncleanly() {
|
||||
|
||||
// check that unclean EOF reporting does not overtake appdata
|
||||
assert_eq!(12, server.writer().write(b"from-server!").unwrap());
|
||||
assert_eq!(12, client.write_plaintext(b"from-client!").unwrap());
|
||||
assert_eq!(12, client.write_plaintext(b"from-client!").await.unwrap());
|
||||
client.process_new_packets().await.unwrap();
|
||||
|
||||
send(&mut client, &mut server);
|
||||
@@ -902,9 +900,20 @@ async fn client_respects_buffer_limit_pre_handshake() {
|
||||
|
||||
client.set_buffer_limit(Some(32));
|
||||
|
||||
assert_eq!(client.write_plaintext(b"01234567890123456789").unwrap(), 20);
|
||||
assert_eq!(client.write_plaintext(b"01234567890123456789").unwrap(), 12);
|
||||
client.flush_plaintext().await.unwrap();
|
||||
assert_eq!(
|
||||
client
|
||||
.write_plaintext(b"01234567890123456789")
|
||||
.await
|
||||
.unwrap(),
|
||||
20
|
||||
);
|
||||
assert_eq!(
|
||||
client
|
||||
.write_plaintext(b"01234567890123456789")
|
||||
.await
|
||||
.unwrap(),
|
||||
12
|
||||
);
|
||||
|
||||
do_handshake(&mut client, &mut server).await;
|
||||
send(&mut client, &mut server);
|
||||
@@ -944,9 +953,20 @@ async fn client_respects_buffer_limit_post_handshake() {
|
||||
do_handshake(&mut client, &mut server).await;
|
||||
client.set_buffer_limit(Some(48));
|
||||
|
||||
assert_eq!(client.write_plaintext(b"01234567890123456789").unwrap(), 20);
|
||||
assert_eq!(client.write_plaintext(b"01234567890123456789").unwrap(), 6);
|
||||
client.flush_plaintext().await.unwrap();
|
||||
assert_eq!(
|
||||
client
|
||||
.write_plaintext(b"01234567890123456789")
|
||||
.await
|
||||
.unwrap(),
|
||||
20
|
||||
);
|
||||
assert_eq!(
|
||||
client
|
||||
.write_plaintext(b"01234567890123456789")
|
||||
.await
|
||||
.unwrap(),
|
||||
6
|
||||
);
|
||||
|
||||
send(&mut client, &mut server);
|
||||
server.process_new_packets().unwrap();
|
||||
@@ -1191,8 +1211,14 @@ async fn client_complete_io_for_write() {
|
||||
|
||||
do_handshake(&mut client, &mut server).await;
|
||||
|
||||
client.write_plaintext(b"01234567890123456789").unwrap();
|
||||
client.write_plaintext(b"01234567890123456789").unwrap();
|
||||
client
|
||||
.write_plaintext(b"01234567890123456789")
|
||||
.await
|
||||
.unwrap();
|
||||
client
|
||||
.write_plaintext(b"01234567890123456789")
|
||||
.await
|
||||
.unwrap();
|
||||
{
|
||||
let mut pipe = ServerSession::new(&mut server);
|
||||
let (rdlen, wrlen) = client
|
||||
@@ -1324,8 +1350,7 @@ async fn server_stream_read() {
|
||||
for kt in ALL_KEY_TYPES.iter() {
|
||||
let (mut client, mut server) = make_pair(*kt).await;
|
||||
|
||||
client.write_all_plaintext(b"world").unwrap();
|
||||
client.process_new_packets().await.unwrap();
|
||||
client.write_all_plaintext(b"world").await.unwrap();
|
||||
|
||||
{
|
||||
let mut pipe = ClientSession::new(&mut client);
|
||||
@@ -1341,8 +1366,7 @@ async fn server_streamowned_read() {
|
||||
for kt in ALL_KEY_TYPES.iter() {
|
||||
let (mut client, server) = make_pair(*kt).await;
|
||||
|
||||
client.write_all_plaintext(b"world").unwrap();
|
||||
client.process_new_packets().await.unwrap();
|
||||
client.write_all_plaintext(b"world").await.unwrap();
|
||||
|
||||
{
|
||||
let pipe = ClientSession::new(&mut client);
|
||||
@@ -1361,9 +1385,7 @@ async fn server_streamowned_read() {
|
||||
// errkind: io::ErrorKind::ConnectionAborted,
|
||||
// after: 0,
|
||||
// };
|
||||
// client.write_all_plaintext(b"hello").unwrap();
|
||||
// client.process_new_packets().await.unwrap();
|
||||
//
|
||||
// client.write_all_plaintext(b"hello").await.unwrap();
|
||||
// let mut client_stream = Stream::new(&mut client, &mut pipe);
|
||||
// let rc = client_stream.write(b"world");
|
||||
// assert!(rc.is_err());
|
||||
@@ -1380,9 +1402,7 @@ async fn server_streamowned_read() {
|
||||
// errkind: io::ErrorKind::ConnectionAborted,
|
||||
// after: 1,
|
||||
// };
|
||||
// client.write_all_plaintext(b"hello").unwrap();
|
||||
// client.process_new_packets().await.unwrap();
|
||||
//
|
||||
// client.write_all_plaintext(b"hello").await.unwrap();
|
||||
// let mut client_stream = Stream::new(&mut client, &mut pipe);
|
||||
// let rc = client_stream.write(b"world");
|
||||
// assert_eq!(format!("{:?}", rc), "Ok(5)");
|
||||
@@ -1880,9 +1900,14 @@ async fn servered_write_for_client_appdata() {
|
||||
let (mut client, mut server) = make_pair(KeyType::Rsa).await;
|
||||
do_handshake(&mut client, &mut server).await;
|
||||
|
||||
client.write_all_plaintext(b"01234567890123456789").unwrap();
|
||||
client.write_all_plaintext(b"01234567890123456789").unwrap();
|
||||
client.process_new_packets().await.unwrap();
|
||||
client
|
||||
.write_all_plaintext(b"01234567890123456789")
|
||||
.await
|
||||
.unwrap();
|
||||
client
|
||||
.write_all_plaintext(b"01234567890123456789")
|
||||
.await
|
||||
.unwrap();
|
||||
{
|
||||
let mut pipe = ServerSession::new(&mut server);
|
||||
let wrlen = client.write_tls(&mut pipe).unwrap();
|
||||
@@ -1994,10 +2019,11 @@ async fn servered_write_for_server_handshake_no_half_rtt_by_default() {
|
||||
async fn servered_write_for_client_handshake() {
|
||||
let (mut client, mut server) = make_pair(KeyType::Rsa).await;
|
||||
|
||||
client.write_all_plaintext(b"01234567890123456789").unwrap();
|
||||
client.write_all_plaintext(b"0123456789").unwrap();
|
||||
client.process_new_packets().await.unwrap();
|
||||
|
||||
client
|
||||
.write_all_plaintext(b"01234567890123456789")
|
||||
.await
|
||||
.unwrap();
|
||||
client.write_all_plaintext(b"0123456789").await.unwrap();
|
||||
{
|
||||
let mut pipe = ServerSession::new(&mut server);
|
||||
let wrlen = client.write_tls(&mut pipe).unwrap();
|
||||
|
||||
@@ -5,7 +5,7 @@ description = "Cryptographic operations for the TLSNotary TLS client"
|
||||
keywords = ["tls", "mpc", "2pc"]
|
||||
categories = ["cryptography"]
|
||||
license = "Apache-2.0 OR ISC OR MIT"
|
||||
version = "0.1.0-alpha.14-pre"
|
||||
version = "0.1.0-alpha.13-pre"
|
||||
edition = "2021"
|
||||
|
||||
[lints]
|
||||
|
||||
@@ -465,81 +465,19 @@ fn convert_scheme(scheme: SignatureScheme) -> Result<SignatureAlgorithms, Error>
|
||||
}
|
||||
}
|
||||
|
||||
/// Signature algorithm.
|
||||
#[derive(Debug, Clone, Copy, PartialEq)]
|
||||
#[allow(non_camel_case_types)]
|
||||
pub enum SignatureAlgorithm {
|
||||
ECDSA_NISTP256_SHA256,
|
||||
ECDSA_NISTP256_SHA384,
|
||||
ECDSA_NISTP384_SHA256,
|
||||
ECDSA_NISTP384_SHA384,
|
||||
ED25519,
|
||||
RSA_PKCS1_2048_8192_SHA256,
|
||||
RSA_PKCS1_2048_8192_SHA384,
|
||||
RSA_PKCS1_2048_8192_SHA512,
|
||||
RSA_PSS_2048_8192_SHA256_LEGACY_KEY,
|
||||
RSA_PSS_2048_8192_SHA384_LEGACY_KEY,
|
||||
RSA_PSS_2048_8192_SHA512_LEGACY_KEY,
|
||||
}
|
||||
|
||||
impl SignatureAlgorithm {
|
||||
pub fn from_alg(alg: &dyn pki_types::SignatureVerificationAlgorithm) -> Self {
|
||||
let id = alg.signature_alg_id();
|
||||
if id == webpki::ring::ECDSA_P256_SHA256.signature_alg_id() {
|
||||
SignatureAlgorithm::ECDSA_NISTP256_SHA256
|
||||
} else if id == webpki::ring::ECDSA_P256_SHA384.signature_alg_id() {
|
||||
SignatureAlgorithm::ECDSA_NISTP256_SHA384
|
||||
} else if id == webpki::ring::ECDSA_P384_SHA256.signature_alg_id() {
|
||||
SignatureAlgorithm::ECDSA_NISTP384_SHA256
|
||||
} else if id == webpki::ring::ECDSA_P384_SHA384.signature_alg_id() {
|
||||
SignatureAlgorithm::ECDSA_NISTP384_SHA384
|
||||
} else if id == webpki::ring::ED25519.signature_alg_id() {
|
||||
SignatureAlgorithm::ED25519
|
||||
} else if id == webpki::ring::RSA_PKCS1_2048_8192_SHA256.signature_alg_id() {
|
||||
SignatureAlgorithm::RSA_PKCS1_2048_8192_SHA256
|
||||
} else if id == webpki::ring::RSA_PKCS1_2048_8192_SHA384.signature_alg_id() {
|
||||
SignatureAlgorithm::RSA_PKCS1_2048_8192_SHA384
|
||||
} else if id == webpki::ring::RSA_PKCS1_2048_8192_SHA512.signature_alg_id() {
|
||||
SignatureAlgorithm::RSA_PKCS1_2048_8192_SHA512
|
||||
} else if id == webpki::ring::RSA_PSS_2048_8192_SHA256_LEGACY_KEY.signature_alg_id() {
|
||||
SignatureAlgorithm::RSA_PSS_2048_8192_SHA256_LEGACY_KEY
|
||||
} else if id == webpki::ring::RSA_PSS_2048_8192_SHA384_LEGACY_KEY.signature_alg_id() {
|
||||
SignatureAlgorithm::RSA_PSS_2048_8192_SHA384_LEGACY_KEY
|
||||
} else if id == webpki::ring::RSA_PSS_2048_8192_SHA512_LEGACY_KEY.signature_alg_id() {
|
||||
SignatureAlgorithm::RSA_PSS_2048_8192_SHA512_LEGACY_KEY
|
||||
} else {
|
||||
unreachable!()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Verify the signature and return the algorithm which passed verification.
|
||||
pub fn verify_sig_determine_alg(
|
||||
cert: &Certificate,
|
||||
message: &[u8],
|
||||
dss: &DigitallySignedStruct,
|
||||
) -> Result<SignatureAlgorithm, Error> {
|
||||
let cert = pki_types::CertificateDer::from(cert.0.as_slice());
|
||||
let cert = webpki::EndEntityCert::try_from(&cert).map_err(pki_error)?;
|
||||
|
||||
verify_sig_using_any_alg(&cert, convert_scheme(dss.scheme)?, message, &dss.sig.0)
|
||||
.map_err(pki_error)
|
||||
}
|
||||
|
||||
fn verify_sig_using_any_alg(
|
||||
cert: &webpki::EndEntityCert,
|
||||
algs: SignatureAlgorithms,
|
||||
message: &[u8],
|
||||
sig: &[u8],
|
||||
) -> Result<SignatureAlgorithm, webpki::Error> {
|
||||
) -> Result<(), webpki::Error> {
|
||||
// TLS doesn't itself give us enough info to map to a single
|
||||
// webpki::SignatureAlgorithm. Therefore, convert_algs maps to several and
|
||||
// we try them all.
|
||||
for alg in algs {
|
||||
match cert.verify_signature(*alg, message, sig) {
|
||||
Ok(_) => return Ok(SignatureAlgorithm::from_alg(*alg)),
|
||||
Err(webpki::Error::UnsupportedSignatureAlgorithmForPublicKeyContext(_)) => continue,
|
||||
Err(e) => return Err(e),
|
||||
res => return res,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@ authors = ["TLSNotary Team"]
|
||||
keywords = ["tls", "mpc", "2pc", "prover"]
|
||||
categories = ["cryptography"]
|
||||
license = "MIT OR Apache-2.0"
|
||||
version = "0.1.0-alpha.14-pre"
|
||||
version = "0.1.0-alpha.13-pre"
|
||||
edition = "2024"
|
||||
|
||||
[lints]
|
||||
@@ -12,7 +12,6 @@ workspace = true
|
||||
|
||||
[features]
|
||||
default = ["rayon"]
|
||||
mozilla-certs = ["tlsn-core/mozilla-certs"]
|
||||
rayon = ["mpz-zk/rayon", "mpz-garble/rayon"]
|
||||
web = ["dep:web-spawn"]
|
||||
|
||||
@@ -21,16 +20,15 @@ tlsn-attestation = { workspace = true }
|
||||
tlsn-core = { workspace = true }
|
||||
tlsn-deap = { workspace = true }
|
||||
tlsn-tls-client = { workspace = true }
|
||||
tlsn-tls-client-async = { workspace = true }
|
||||
tlsn-tls-core = { workspace = true }
|
||||
tlsn-mpc-tls = { workspace = true }
|
||||
tlsn-cipher = { workspace = true }
|
||||
|
||||
futures-plex = { workspace = true }
|
||||
serio = { workspace = true, features = ["compat"] }
|
||||
uid-mux = { workspace = true, features = ["serio"] }
|
||||
web-spawn = { workspace = true, optional = true }
|
||||
|
||||
mpz-circuits = { workspace = true, features = ["aes"] }
|
||||
mpz-common = { workspace = true }
|
||||
mpz-core = { workspace = true }
|
||||
mpz-garble = { workspace = true }
|
||||
@@ -41,10 +39,8 @@ mpz-ole = { workspace = true }
|
||||
mpz-ot = { workspace = true }
|
||||
mpz-vm-core = { workspace = true }
|
||||
mpz-zk = { workspace = true }
|
||||
mpz-ideal-vm = { workspace = true }
|
||||
|
||||
aes = { workspace = true }
|
||||
ctr = { workspace = true }
|
||||
derive_builder = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
opaque-debug = { workspace = true }
|
||||
rand = { workspace = true }
|
||||
@@ -61,9 +57,7 @@ rangeset = { workspace = true }
|
||||
webpki-roots = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
mpz-ideal-vm = { workspace = true }
|
||||
rstest = { workspace = true }
|
||||
tlsn-core = { workspace = true, features = ["fixtures"] }
|
||||
tlsn-server-fixture = { workspace = true }
|
||||
tlsn-server-fixture-certs = { workspace = true }
|
||||
tokio = { workspace = true, features = ["full"] }
|
||||
|
||||
@@ -1,3 +0,0 @@
|
||||
fn main() {
|
||||
println!("cargo:rustc-check-cfg=cfg(tlsn_insecure)");
|
||||
}
|
||||
109
crates/tlsn/src/commit.rs
Normal file
109
crates/tlsn/src/commit.rs
Normal file
@@ -0,0 +1,109 @@
|
||||
//! Plaintext commitment and proof of encryption.
|
||||
|
||||
pub(crate) mod hash;
|
||||
pub(crate) mod transcript;
|
||||
|
||||
use mpz_core::bitvec::BitVec;
|
||||
use mpz_memory_core::{
|
||||
DecodeFutureTyped, Vector,
|
||||
binary::{Binary, U8},
|
||||
};
|
||||
use mpz_vm_core::{Vm, prelude::*};
|
||||
use tlsn_core::transcript::Record;
|
||||
|
||||
use crate::{
|
||||
Role,
|
||||
zk_aes_ctr::{ZkAesCtr, ZkAesCtrError},
|
||||
};
|
||||
|
||||
/// Commits the plaintext of the provided records, returning a proof of
|
||||
/// encryption.
|
||||
///
|
||||
/// Writes the plaintext VM reference to the provided records.
|
||||
pub(crate) fn commit_records<'record>(
|
||||
vm: &mut dyn Vm<Binary>,
|
||||
aes: &mut ZkAesCtr,
|
||||
records: impl IntoIterator<Item = &'record Record>,
|
||||
) -> Result<(Vec<Vector<U8>>, RecordProof), RecordProofError> {
|
||||
let mut plaintexts = Vec::new();
|
||||
let mut ciphertexts = Vec::new();
|
||||
for record in records {
|
||||
let (plaintext_ref, ciphertext_ref) = aes
|
||||
.encrypt(vm, record.explicit_nonce.clone(), record.ciphertext.len())
|
||||
.map_err(ErrorRepr::Aes)?;
|
||||
|
||||
if let Role::Prover = aes.role() {
|
||||
let Some(plaintext) = record.plaintext.clone() else {
|
||||
return Err(ErrorRepr::MissingPlaintext.into());
|
||||
};
|
||||
|
||||
vm.assign(plaintext_ref, plaintext)
|
||||
.map_err(RecordProofError::vm)?;
|
||||
}
|
||||
vm.commit(plaintext_ref).map_err(RecordProofError::vm)?;
|
||||
|
||||
let ciphertext = vm.decode(ciphertext_ref).map_err(RecordProofError::vm)?;
|
||||
|
||||
plaintexts.push(plaintext_ref);
|
||||
ciphertexts.push((ciphertext, record.ciphertext.clone()));
|
||||
}
|
||||
|
||||
Ok((plaintexts, RecordProof { ciphertexts }))
|
||||
}
|
||||
|
||||
/// Proof of encryption.
|
||||
#[derive(Debug)]
|
||||
#[must_use]
|
||||
#[allow(clippy::type_complexity)]
|
||||
pub(crate) struct RecordProof {
|
||||
ciphertexts: Vec<(DecodeFutureTyped<BitVec, Vec<u8>>, Vec<u8>)>,
|
||||
}
|
||||
|
||||
impl RecordProof {
|
||||
/// Verifies the proof.
|
||||
pub(crate) fn verify(self) -> Result<(), RecordProofError> {
|
||||
let Self { ciphertexts } = self;
|
||||
|
||||
for (mut ciphertext, expected) in ciphertexts {
|
||||
let ciphertext = ciphertext
|
||||
.try_recv()
|
||||
.map_err(RecordProofError::vm)?
|
||||
.ok_or_else(|| ErrorRepr::NotDecoded)?;
|
||||
|
||||
if ciphertext != expected {
|
||||
return Err(ErrorRepr::InvalidCiphertext.into());
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Error for [`RecordProof`].
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
#[error(transparent)]
|
||||
pub(crate) struct RecordProofError(#[from] ErrorRepr);
|
||||
|
||||
impl RecordProofError {
|
||||
fn vm<E>(err: E) -> Self
|
||||
where
|
||||
E: Into<Box<dyn std::error::Error + Send + Sync + 'static>>,
|
||||
{
|
||||
Self(ErrorRepr::Vm(err.into()))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
#[error("record proof error: {0}")]
|
||||
enum ErrorRepr {
|
||||
#[error("VM error: {0}")]
|
||||
Vm(Box<dyn std::error::Error + Send + Sync + 'static>),
|
||||
#[error("zk aes error: {0}")]
|
||||
Aes(ZkAesCtrError),
|
||||
#[error("plaintext is missing")]
|
||||
MissingPlaintext,
|
||||
#[error("ciphertext was not decoded")]
|
||||
NotDecoded,
|
||||
#[error("ciphertext does not match expected")]
|
||||
InvalidCiphertext,
|
||||
}
|
||||
@@ -3,7 +3,7 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use mpz_core::bitvec::BitVec;
|
||||
use mpz_hash::{blake3::Blake3, keccak256::Keccak256, sha256::Sha256};
|
||||
use mpz_hash::{blake3::Blake3, sha256::Sha256};
|
||||
use mpz_memory_core::{
|
||||
DecodeFutureTyped, MemoryExt, Vector,
|
||||
binary::{Binary, U8},
|
||||
@@ -18,7 +18,7 @@ use tlsn_core::{
|
||||
},
|
||||
};
|
||||
|
||||
use crate::{Role, transcript_internal::TranscriptRefs};
|
||||
use crate::{Role, commit::transcript::TranscriptRefs};
|
||||
|
||||
/// Future which will resolve to the committed hash values.
|
||||
#[derive(Debug)]
|
||||
@@ -111,7 +111,6 @@ pub(crate) fn verify_hash(
|
||||
enum Hasher {
|
||||
Sha256(Sha256),
|
||||
Blake3(Blake3),
|
||||
Keccak256(Keccak256),
|
||||
}
|
||||
|
||||
/// Commit plaintext hashes of the transcript.
|
||||
@@ -150,15 +149,9 @@ fn hash_commit_inner(
|
||||
hasher
|
||||
};
|
||||
|
||||
let refs = match direction {
|
||||
Direction::Sent => &refs.sent,
|
||||
Direction::Received => &refs.recv,
|
||||
};
|
||||
|
||||
for range in idx.iter_ranges() {
|
||||
hasher.update(&refs.get(range).expect("plaintext refs are valid"));
|
||||
for plaintext in refs.get(direction, &idx).expect("plaintext refs are valid") {
|
||||
hasher.update(&plaintext);
|
||||
}
|
||||
|
||||
hasher.update(&blinder);
|
||||
hasher.finalize(vm).map_err(HashCommitError::hasher)?
|
||||
}
|
||||
@@ -171,46 +164,16 @@ fn hash_commit_inner(
|
||||
hasher
|
||||
};
|
||||
|
||||
let refs = match direction {
|
||||
Direction::Sent => &refs.sent,
|
||||
Direction::Received => &refs.recv,
|
||||
};
|
||||
|
||||
for range in idx.iter_ranges() {
|
||||
for plaintext in refs.get(direction, &idx).expect("plaintext refs are valid") {
|
||||
hasher
|
||||
.update(vm, &refs.get(range).expect("plaintext refs are valid"))
|
||||
.update(vm, &plaintext)
|
||||
.map_err(HashCommitError::hasher)?;
|
||||
}
|
||||
hasher
|
||||
.update(vm, &blinder)
|
||||
.map_err(HashCommitError::hasher)?;
|
||||
hasher.finalize(vm).map_err(HashCommitError::hasher)?
|
||||
}
|
||||
HashAlgId::KECCAK256 => {
|
||||
let mut hasher = if let Some(Hasher::Keccak256(hasher)) = hashers.get(&alg).cloned()
|
||||
{
|
||||
hasher
|
||||
} else {
|
||||
let hasher = Keccak256::new_with_init(vm).map_err(HashCommitError::hasher)?;
|
||||
hashers.insert(alg, Hasher::Keccak256(hasher.clone()));
|
||||
hasher
|
||||
};
|
||||
|
||||
let refs = match direction {
|
||||
Direction::Sent => &refs.sent,
|
||||
Direction::Received => &refs.recv,
|
||||
};
|
||||
|
||||
for range in idx.iter_ranges() {
|
||||
hasher
|
||||
.update(vm, &refs.get(range).expect("plaintext refs are valid"))
|
||||
.map_err(HashCommitError::hasher)?;
|
||||
}
|
||||
|
||||
hasher
|
||||
.update(vm, &blinder)
|
||||
.map_err(HashCommitError::hasher)?;
|
||||
hasher.finalize(vm).map_err(HashCommitError::hasher)?
|
||||
|
||||
}
|
||||
alg => {
|
||||
return Err(HashCommitError::unsupported_alg(alg));
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user