mirror of
https://github.com/tlsnotary/tlsn.git
synced 2026-01-11 07:37:58 -05:00
Compare commits
15 Commits
feat/tls_1
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1801c30599 | ||
|
|
0885d40ddf | ||
|
|
610411aae4 | ||
|
|
37df1baed7 | ||
|
|
aeaebc5c60 | ||
|
|
2e7e3db11d | ||
|
|
0a68837d0a | ||
|
|
0ec2392716 | ||
|
|
f99fce5b5a | ||
|
|
6b9f44e7e5 | ||
|
|
bf1cf2302a | ||
|
|
2884be17e0 | ||
|
|
df8d79c152 | ||
|
|
82d509266b | ||
|
|
d5ad768e7c |
2
.github/workflows/ci.yml
vendored
2
.github/workflows/ci.yml
vendored
@@ -41,7 +41,7 @@ jobs:
|
||||
uses: Swatinem/rust-cache@v2.7.7
|
||||
|
||||
- name: Clippy
|
||||
run: cargo clippy --keep-going --all-features --all-targets --locked -- -D warnings
|
||||
run: cargo clippy --keep-going --all-features --all-targets --locked
|
||||
|
||||
fmt:
|
||||
name: Check formatting
|
||||
|
||||
2
.github/workflows/releng.yml
vendored
2
.github/workflows/releng.yml
vendored
@@ -6,7 +6,7 @@ on:
|
||||
tag:
|
||||
description: 'Tag to publish to NPM'
|
||||
required: true
|
||||
default: 'v0.1.0-alpha.13-pre'
|
||||
default: 'v0.1.0-alpha.13'
|
||||
|
||||
jobs:
|
||||
release:
|
||||
|
||||
817
Cargo.lock
generated
817
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
27
Cargo.toml
27
Cargo.toml
@@ -66,19 +66,20 @@ tlsn-harness-runner = { path = "crates/harness/runner" }
|
||||
tlsn-wasm = { path = "crates/wasm" }
|
||||
tlsn = { path = "crates/tlsn" }
|
||||
|
||||
mpz-circuits = { git = "https://github.com/privacy-ethereum/mpz", rev = "3d90b6c" }
|
||||
mpz-memory-core = { git = "https://github.com/privacy-ethereum/mpz", rev = "3d90b6c" }
|
||||
mpz-common = { git = "https://github.com/privacy-ethereum/mpz", rev = "3d90b6c" }
|
||||
mpz-core = { git = "https://github.com/privacy-ethereum/mpz", rev = "3d90b6c" }
|
||||
mpz-vm-core = { git = "https://github.com/privacy-ethereum/mpz", rev = "3d90b6c" }
|
||||
mpz-garble = { git = "https://github.com/privacy-ethereum/mpz", rev = "3d90b6c" }
|
||||
mpz-garble-core = { git = "https://github.com/privacy-ethereum/mpz", rev = "3d90b6c" }
|
||||
mpz-ole = { git = "https://github.com/privacy-ethereum/mpz", rev = "3d90b6c" }
|
||||
mpz-ot = { git = "https://github.com/privacy-ethereum/mpz", rev = "3d90b6c" }
|
||||
mpz-share-conversion = { git = "https://github.com/privacy-ethereum/mpz", rev = "3d90b6c" }
|
||||
mpz-fields = { git = "https://github.com/privacy-ethereum/mpz", rev = "3d90b6c" }
|
||||
mpz-zk = { git = "https://github.com/privacy-ethereum/mpz", rev = "3d90b6c" }
|
||||
mpz-hash = { git = "https://github.com/privacy-ethereum/mpz", rev = "3d90b6c" }
|
||||
mpz-circuits = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.4" }
|
||||
mpz-memory-core = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.4" }
|
||||
mpz-common = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.4" }
|
||||
mpz-core = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.4" }
|
||||
mpz-vm-core = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.4" }
|
||||
mpz-garble = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.4" }
|
||||
mpz-garble-core = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.4" }
|
||||
mpz-ole = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.4" }
|
||||
mpz-ot = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.4" }
|
||||
mpz-share-conversion = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.4" }
|
||||
mpz-fields = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.4" }
|
||||
mpz-zk = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.4" }
|
||||
mpz-hash = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.4" }
|
||||
mpz-ideal-vm = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.4" }
|
||||
|
||||
rangeset = { version = "0.2" }
|
||||
serio = { version = "0.2" }
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "tlsn-attestation"
|
||||
version = "0.1.0-alpha.13-pre"
|
||||
version = "0.1.0-alpha.13"
|
||||
edition = "2024"
|
||||
|
||||
[features]
|
||||
|
||||
@@ -5,7 +5,7 @@ use rand::{Rng, rng};
|
||||
use tlsn_core::{
|
||||
connection::{ConnectionInfo, ServerEphemKey},
|
||||
hash::HashAlgId,
|
||||
transcript::TranscriptCommitment,
|
||||
transcript::{TranscriptCommitment, encoding::EncoderSecret},
|
||||
};
|
||||
|
||||
use crate::{
|
||||
@@ -25,6 +25,7 @@ pub struct Sign {
|
||||
connection_info: Option<ConnectionInfo>,
|
||||
server_ephemeral_key: Option<ServerEphemKey>,
|
||||
cert_commitment: ServerCertCommitment,
|
||||
encoder_secret: Option<EncoderSecret>,
|
||||
extensions: Vec<Extension>,
|
||||
transcript_commitments: Vec<TranscriptCommitment>,
|
||||
}
|
||||
@@ -86,6 +87,7 @@ impl<'a> AttestationBuilder<'a, Accept> {
|
||||
connection_info: None,
|
||||
server_ephemeral_key: None,
|
||||
cert_commitment,
|
||||
encoder_secret: None,
|
||||
transcript_commitments: Vec::new(),
|
||||
extensions,
|
||||
},
|
||||
@@ -106,6 +108,12 @@ impl AttestationBuilder<'_, Sign> {
|
||||
self
|
||||
}
|
||||
|
||||
/// Sets the secret for encoding commitments.
|
||||
pub fn encoder_secret(&mut self, secret: EncoderSecret) -> &mut Self {
|
||||
self.state.encoder_secret = Some(secret);
|
||||
self
|
||||
}
|
||||
|
||||
/// Adds an extension to the attestation.
|
||||
pub fn extension(&mut self, extension: Extension) -> &mut Self {
|
||||
self.state.extensions.push(extension);
|
||||
@@ -129,6 +137,7 @@ impl AttestationBuilder<'_, Sign> {
|
||||
connection_info,
|
||||
server_ephemeral_key,
|
||||
cert_commitment,
|
||||
encoder_secret,
|
||||
extensions,
|
||||
transcript_commitments,
|
||||
} = self.state;
|
||||
@@ -159,6 +168,7 @@ impl AttestationBuilder<'_, Sign> {
|
||||
AttestationBuilderError::new(ErrorKind::Field, "handshake data was not set")
|
||||
})?),
|
||||
cert_commitment: field_id.next(cert_commitment),
|
||||
encoder_secret: encoder_secret.map(|secret| field_id.next(secret)),
|
||||
extensions: extensions
|
||||
.into_iter()
|
||||
.map(|extension| field_id.next(extension))
|
||||
|
||||
@@ -219,7 +219,7 @@ use tlsn_core::{
|
||||
connection::{ConnectionInfo, ServerEphemKey},
|
||||
hash::{Hash, HashAlgorithm, TypedHash},
|
||||
merkle::MerkleTree,
|
||||
transcript::TranscriptCommitment,
|
||||
transcript::{TranscriptCommitment, encoding::EncoderSecret},
|
||||
};
|
||||
|
||||
use crate::{
|
||||
@@ -327,6 +327,7 @@ pub struct Body {
|
||||
connection_info: Field<ConnectionInfo>,
|
||||
server_ephemeral_key: Field<ServerEphemKey>,
|
||||
cert_commitment: Field<ServerCertCommitment>,
|
||||
encoder_secret: Option<Field<EncoderSecret>>,
|
||||
extensions: Vec<Field<Extension>>,
|
||||
transcript_commitments: Vec<Field<TranscriptCommitment>>,
|
||||
}
|
||||
@@ -372,6 +373,7 @@ impl Body {
|
||||
connection_info: conn_info,
|
||||
server_ephemeral_key,
|
||||
cert_commitment,
|
||||
encoder_secret,
|
||||
extensions,
|
||||
transcript_commitments,
|
||||
} = self;
|
||||
@@ -389,6 +391,13 @@ impl Body {
|
||||
),
|
||||
];
|
||||
|
||||
if let Some(encoder_secret) = encoder_secret {
|
||||
fields.push((
|
||||
encoder_secret.id,
|
||||
hasher.hash_separated(&encoder_secret.data),
|
||||
));
|
||||
}
|
||||
|
||||
for field in extensions.iter() {
|
||||
fields.push((field.id, hasher.hash_separated(&field.data)));
|
||||
}
|
||||
|
||||
@@ -91,6 +91,11 @@ impl Presentation {
|
||||
transcript.verify_with_provider(
|
||||
&provider.hash,
|
||||
&attestation.body.connection_info().transcript_length,
|
||||
attestation
|
||||
.body
|
||||
.encoder_secret
|
||||
.as_ref()
|
||||
.map(|field| &field.data),
|
||||
attestation.body.transcript_commitments(),
|
||||
)
|
||||
})
|
||||
|
||||
@@ -49,5 +49,6 @@ impl_domain_separator!(tlsn_core::connection::ConnectionInfo);
|
||||
impl_domain_separator!(tlsn_core::connection::CertBinding);
|
||||
impl_domain_separator!(tlsn_core::transcript::TranscriptCommitment);
|
||||
impl_domain_separator!(tlsn_core::transcript::TranscriptSecret);
|
||||
impl_domain_separator!(tlsn_core::transcript::encoding::EncoderSecret);
|
||||
impl_domain_separator!(tlsn_core::transcript::encoding::EncodingCommitment);
|
||||
impl_domain_separator!(tlsn_core::transcript::hash::PlaintextHash);
|
||||
|
||||
@@ -64,7 +64,6 @@ fn test_api() {
|
||||
|
||||
let encoding_commitment = EncodingCommitment {
|
||||
root: encoding_tree.root(),
|
||||
secret: encoder_secret(),
|
||||
};
|
||||
|
||||
let request_config = RequestConfig::default();
|
||||
@@ -96,6 +95,7 @@ fn test_api() {
|
||||
.connection_info(connection_info.clone())
|
||||
// Server key Notary received during handshake
|
||||
.server_ephemeral_key(server_ephemeral_key)
|
||||
.encoder_secret(encoder_secret())
|
||||
.transcript_commitments(vec![TranscriptCommitment::Encoding(encoding_commitment)]);
|
||||
|
||||
let attestation = attestation_builder.build(&provider).unwrap();
|
||||
|
||||
@@ -5,7 +5,7 @@ description = "This crate provides implementations of ciphers for two parties"
|
||||
keywords = ["tls", "mpc", "2pc", "aes"]
|
||||
categories = ["cryptography"]
|
||||
license = "MIT OR Apache-2.0"
|
||||
version = "0.1.0-alpha.13-pre"
|
||||
version = "0.1.0-alpha.13"
|
||||
edition = "2021"
|
||||
|
||||
[lints]
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "tlsn-deap"
|
||||
version = "0.1.0-alpha.13-pre"
|
||||
version = "0.1.0-alpha.13"
|
||||
edition = "2021"
|
||||
|
||||
[lints]
|
||||
|
||||
@@ -5,7 +5,7 @@ description = "A 2PC implementation of TLS HMAC-SHA256 PRF"
|
||||
keywords = ["tls", "mpc", "2pc", "hmac", "sha256"]
|
||||
categories = ["cryptography"]
|
||||
license = "MIT OR Apache-2.0"
|
||||
version = "0.1.0-alpha.13-pre"
|
||||
version = "0.1.0-alpha.13"
|
||||
edition = "2021"
|
||||
|
||||
[lints]
|
||||
|
||||
@@ -5,7 +5,7 @@ description = "Implementation of the 3-party key-exchange protocol"
|
||||
keywords = ["tls", "mpc", "2pc", "pms", "key-exchange"]
|
||||
categories = ["cryptography"]
|
||||
license = "MIT OR Apache-2.0"
|
||||
version = "0.1.0-alpha.13-pre"
|
||||
version = "0.1.0-alpha.13"
|
||||
edition = "2021"
|
||||
|
||||
[lints]
|
||||
|
||||
@@ -5,7 +5,7 @@ description = "Core types for TLSNotary"
|
||||
keywords = ["tls", "mpc", "2pc", "types"]
|
||||
categories = ["cryptography"]
|
||||
license = "MIT OR Apache-2.0"
|
||||
version = "0.1.0-alpha.13-pre"
|
||||
version = "0.1.0-alpha.13"
|
||||
edition = "2021"
|
||||
|
||||
[lints]
|
||||
|
||||
@@ -116,84 +116,75 @@ pub enum KeyType {
|
||||
SECP256R1 = 0x0017,
|
||||
}
|
||||
|
||||
/// Signature scheme on the key exchange parameters.
|
||||
/// Signature algorithm used on the key exchange parameters.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
#[allow(non_camel_case_types, missing_docs)]
|
||||
pub enum SignatureScheme {
|
||||
RSA_PKCS1_SHA1 = 0x0201,
|
||||
ECDSA_SHA1_Legacy = 0x0203,
|
||||
RSA_PKCS1_SHA256 = 0x0401,
|
||||
ECDSA_NISTP256_SHA256 = 0x0403,
|
||||
RSA_PKCS1_SHA384 = 0x0501,
|
||||
ECDSA_NISTP384_SHA384 = 0x0503,
|
||||
RSA_PKCS1_SHA512 = 0x0601,
|
||||
ECDSA_NISTP521_SHA512 = 0x0603,
|
||||
RSA_PSS_SHA256 = 0x0804,
|
||||
RSA_PSS_SHA384 = 0x0805,
|
||||
RSA_PSS_SHA512 = 0x0806,
|
||||
ED25519 = 0x0807,
|
||||
pub enum SignatureAlgorithm {
|
||||
ECDSA_NISTP256_SHA256,
|
||||
ECDSA_NISTP256_SHA384,
|
||||
ECDSA_NISTP384_SHA256,
|
||||
ECDSA_NISTP384_SHA384,
|
||||
ED25519,
|
||||
RSA_PKCS1_2048_8192_SHA256,
|
||||
RSA_PKCS1_2048_8192_SHA384,
|
||||
RSA_PKCS1_2048_8192_SHA512,
|
||||
RSA_PSS_2048_8192_SHA256_LEGACY_KEY,
|
||||
RSA_PSS_2048_8192_SHA384_LEGACY_KEY,
|
||||
RSA_PSS_2048_8192_SHA512_LEGACY_KEY,
|
||||
}
|
||||
|
||||
impl fmt::Display for SignatureScheme {
|
||||
impl fmt::Display for SignatureAlgorithm {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
SignatureScheme::RSA_PKCS1_SHA1 => write!(f, "RSA_PKCS1_SHA1"),
|
||||
SignatureScheme::ECDSA_SHA1_Legacy => write!(f, "ECDSA_SHA1_Legacy"),
|
||||
SignatureScheme::RSA_PKCS1_SHA256 => write!(f, "RSA_PKCS1_SHA256"),
|
||||
SignatureScheme::ECDSA_NISTP256_SHA256 => write!(f, "ECDSA_NISTP256_SHA256"),
|
||||
SignatureScheme::RSA_PKCS1_SHA384 => write!(f, "RSA_PKCS1_SHA384"),
|
||||
SignatureScheme::ECDSA_NISTP384_SHA384 => write!(f, "ECDSA_NISTP384_SHA384"),
|
||||
SignatureScheme::RSA_PKCS1_SHA512 => write!(f, "RSA_PKCS1_SHA512"),
|
||||
SignatureScheme::ECDSA_NISTP521_SHA512 => write!(f, "ECDSA_NISTP521_SHA512"),
|
||||
SignatureScheme::RSA_PSS_SHA256 => write!(f, "RSA_PSS_SHA256"),
|
||||
SignatureScheme::RSA_PSS_SHA384 => write!(f, "RSA_PSS_SHA384"),
|
||||
SignatureScheme::RSA_PSS_SHA512 => write!(f, "RSA_PSS_SHA512"),
|
||||
SignatureScheme::ED25519 => write!(f, "ED25519"),
|
||||
SignatureAlgorithm::ECDSA_NISTP256_SHA256 => write!(f, "ECDSA_NISTP256_SHA256"),
|
||||
SignatureAlgorithm::ECDSA_NISTP256_SHA384 => write!(f, "ECDSA_NISTP256_SHA384"),
|
||||
SignatureAlgorithm::ECDSA_NISTP384_SHA256 => write!(f, "ECDSA_NISTP384_SHA256"),
|
||||
SignatureAlgorithm::ECDSA_NISTP384_SHA384 => write!(f, "ECDSA_NISTP384_SHA384"),
|
||||
SignatureAlgorithm::ED25519 => write!(f, "ED25519"),
|
||||
SignatureAlgorithm::RSA_PKCS1_2048_8192_SHA256 => {
|
||||
write!(f, "RSA_PKCS1_2048_8192_SHA256")
|
||||
}
|
||||
SignatureAlgorithm::RSA_PKCS1_2048_8192_SHA384 => {
|
||||
write!(f, "RSA_PKCS1_2048_8192_SHA384")
|
||||
}
|
||||
SignatureAlgorithm::RSA_PKCS1_2048_8192_SHA512 => {
|
||||
write!(f, "RSA_PKCS1_2048_8192_SHA512")
|
||||
}
|
||||
SignatureAlgorithm::RSA_PSS_2048_8192_SHA256_LEGACY_KEY => {
|
||||
write!(f, "RSA_PSS_2048_8192_SHA256_LEGACY_KEY")
|
||||
}
|
||||
SignatureAlgorithm::RSA_PSS_2048_8192_SHA384_LEGACY_KEY => {
|
||||
write!(f, "RSA_PSS_2048_8192_SHA384_LEGACY_KEY")
|
||||
}
|
||||
SignatureAlgorithm::RSA_PSS_2048_8192_SHA512_LEGACY_KEY => {
|
||||
write!(f, "RSA_PSS_2048_8192_SHA512_LEGACY_KEY")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<tls_core::msgs::enums::SignatureScheme> for SignatureScheme {
|
||||
type Error = &'static str;
|
||||
|
||||
fn try_from(value: tls_core::msgs::enums::SignatureScheme) -> Result<Self, Self::Error> {
|
||||
use tls_core::msgs::enums::SignatureScheme as Core;
|
||||
use SignatureScheme::*;
|
||||
Ok(match value {
|
||||
Core::RSA_PKCS1_SHA1 => RSA_PKCS1_SHA1,
|
||||
Core::ECDSA_SHA1_Legacy => ECDSA_SHA1_Legacy,
|
||||
Core::RSA_PKCS1_SHA256 => RSA_PKCS1_SHA256,
|
||||
Core::ECDSA_NISTP256_SHA256 => ECDSA_NISTP256_SHA256,
|
||||
Core::RSA_PKCS1_SHA384 => RSA_PKCS1_SHA384,
|
||||
Core::ECDSA_NISTP384_SHA384 => ECDSA_NISTP384_SHA384,
|
||||
Core::RSA_PKCS1_SHA512 => RSA_PKCS1_SHA512,
|
||||
Core::ECDSA_NISTP521_SHA512 => ECDSA_NISTP521_SHA512,
|
||||
Core::RSA_PSS_SHA256 => RSA_PSS_SHA256,
|
||||
Core::RSA_PSS_SHA384 => RSA_PSS_SHA384,
|
||||
Core::RSA_PSS_SHA512 => RSA_PSS_SHA512,
|
||||
Core::ED25519 => ED25519,
|
||||
_ => return Err("unsupported signature scheme"),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl From<SignatureScheme> for tls_core::msgs::enums::SignatureScheme {
|
||||
fn from(value: SignatureScheme) -> Self {
|
||||
use tls_core::msgs::enums::SignatureScheme::*;
|
||||
impl From<tls_core::verify::SignatureAlgorithm> for SignatureAlgorithm {
|
||||
fn from(value: tls_core::verify::SignatureAlgorithm) -> Self {
|
||||
use tls_core::verify::SignatureAlgorithm as Core;
|
||||
match value {
|
||||
SignatureScheme::RSA_PKCS1_SHA1 => RSA_PKCS1_SHA1,
|
||||
SignatureScheme::ECDSA_SHA1_Legacy => ECDSA_SHA1_Legacy,
|
||||
SignatureScheme::RSA_PKCS1_SHA256 => RSA_PKCS1_SHA256,
|
||||
SignatureScheme::ECDSA_NISTP256_SHA256 => ECDSA_NISTP256_SHA256,
|
||||
SignatureScheme::RSA_PKCS1_SHA384 => RSA_PKCS1_SHA384,
|
||||
SignatureScheme::ECDSA_NISTP384_SHA384 => ECDSA_NISTP384_SHA384,
|
||||
SignatureScheme::RSA_PKCS1_SHA512 => RSA_PKCS1_SHA512,
|
||||
SignatureScheme::ECDSA_NISTP521_SHA512 => ECDSA_NISTP521_SHA512,
|
||||
SignatureScheme::RSA_PSS_SHA256 => RSA_PSS_SHA256,
|
||||
SignatureScheme::RSA_PSS_SHA384 => RSA_PSS_SHA384,
|
||||
SignatureScheme::RSA_PSS_SHA512 => RSA_PSS_SHA512,
|
||||
SignatureScheme::ED25519 => ED25519,
|
||||
Core::ECDSA_NISTP256_SHA256 => SignatureAlgorithm::ECDSA_NISTP256_SHA256,
|
||||
Core::ECDSA_NISTP256_SHA384 => SignatureAlgorithm::ECDSA_NISTP256_SHA384,
|
||||
Core::ECDSA_NISTP384_SHA256 => SignatureAlgorithm::ECDSA_NISTP384_SHA256,
|
||||
Core::ECDSA_NISTP384_SHA384 => SignatureAlgorithm::ECDSA_NISTP384_SHA384,
|
||||
Core::ED25519 => SignatureAlgorithm::ED25519,
|
||||
Core::RSA_PKCS1_2048_8192_SHA256 => SignatureAlgorithm::RSA_PKCS1_2048_8192_SHA256,
|
||||
Core::RSA_PKCS1_2048_8192_SHA384 => SignatureAlgorithm::RSA_PKCS1_2048_8192_SHA384,
|
||||
Core::RSA_PKCS1_2048_8192_SHA512 => SignatureAlgorithm::RSA_PKCS1_2048_8192_SHA512,
|
||||
Core::RSA_PSS_2048_8192_SHA256_LEGACY_KEY => {
|
||||
SignatureAlgorithm::RSA_PSS_2048_8192_SHA256_LEGACY_KEY
|
||||
}
|
||||
Core::RSA_PSS_2048_8192_SHA384_LEGACY_KEY => {
|
||||
SignatureAlgorithm::RSA_PSS_2048_8192_SHA384_LEGACY_KEY
|
||||
}
|
||||
Core::RSA_PSS_2048_8192_SHA512_LEGACY_KEY => {
|
||||
SignatureAlgorithm::RSA_PSS_2048_8192_SHA512_LEGACY_KEY
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -201,8 +192,8 @@ impl From<SignatureScheme> for tls_core::msgs::enums::SignatureScheme {
|
||||
/// Server's signature of the key exchange parameters.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ServerSignature {
|
||||
/// Signature scheme.
|
||||
pub scheme: SignatureScheme,
|
||||
/// Signature algorithm.
|
||||
pub alg: SignatureAlgorithm,
|
||||
/// Signature data.
|
||||
pub sig: Vec<u8>,
|
||||
}
|
||||
@@ -359,20 +350,23 @@ impl HandshakeData {
|
||||
message.extend_from_slice(&server_ephemeral_key.kx_params());
|
||||
|
||||
use webpki::ring as alg;
|
||||
let sig_alg = match self.sig.scheme {
|
||||
SignatureScheme::RSA_PKCS1_SHA256 => alg::RSA_PKCS1_2048_8192_SHA256,
|
||||
SignatureScheme::RSA_PKCS1_SHA384 => alg::RSA_PKCS1_2048_8192_SHA384,
|
||||
SignatureScheme::RSA_PKCS1_SHA512 => alg::RSA_PKCS1_2048_8192_SHA512,
|
||||
SignatureScheme::RSA_PSS_SHA256 => alg::RSA_PSS_2048_8192_SHA256_LEGACY_KEY,
|
||||
SignatureScheme::RSA_PSS_SHA384 => alg::RSA_PSS_2048_8192_SHA384_LEGACY_KEY,
|
||||
SignatureScheme::RSA_PSS_SHA512 => alg::RSA_PSS_2048_8192_SHA512_LEGACY_KEY,
|
||||
SignatureScheme::ECDSA_NISTP256_SHA256 => alg::ECDSA_P256_SHA256,
|
||||
SignatureScheme::ECDSA_NISTP384_SHA384 => alg::ECDSA_P384_SHA384,
|
||||
SignatureScheme::ED25519 => alg::ED25519,
|
||||
scheme => {
|
||||
return Err(HandshakeVerificationError::UnsupportedSignatureScheme(
|
||||
scheme,
|
||||
))
|
||||
let sig_alg = match self.sig.alg {
|
||||
SignatureAlgorithm::ECDSA_NISTP256_SHA256 => alg::ECDSA_P256_SHA256,
|
||||
SignatureAlgorithm::ECDSA_NISTP256_SHA384 => alg::ECDSA_P256_SHA384,
|
||||
SignatureAlgorithm::ECDSA_NISTP384_SHA256 => alg::ECDSA_P384_SHA256,
|
||||
SignatureAlgorithm::ECDSA_NISTP384_SHA384 => alg::ECDSA_P384_SHA384,
|
||||
SignatureAlgorithm::ED25519 => alg::ED25519,
|
||||
SignatureAlgorithm::RSA_PKCS1_2048_8192_SHA256 => alg::RSA_PKCS1_2048_8192_SHA256,
|
||||
SignatureAlgorithm::RSA_PKCS1_2048_8192_SHA384 => alg::RSA_PKCS1_2048_8192_SHA384,
|
||||
SignatureAlgorithm::RSA_PKCS1_2048_8192_SHA512 => alg::RSA_PKCS1_2048_8192_SHA512,
|
||||
SignatureAlgorithm::RSA_PSS_2048_8192_SHA256_LEGACY_KEY => {
|
||||
alg::RSA_PSS_2048_8192_SHA256_LEGACY_KEY
|
||||
}
|
||||
SignatureAlgorithm::RSA_PSS_2048_8192_SHA384_LEGACY_KEY => {
|
||||
alg::RSA_PSS_2048_8192_SHA384_LEGACY_KEY
|
||||
}
|
||||
SignatureAlgorithm::RSA_PSS_2048_8192_SHA512_LEGACY_KEY => {
|
||||
alg::RSA_PSS_2048_8192_SHA512_LEGACY_KEY
|
||||
}
|
||||
};
|
||||
|
||||
@@ -402,8 +396,6 @@ pub enum HandshakeVerificationError {
|
||||
InvalidServerEphemeralKey,
|
||||
#[error("server certificate verification failed: {0}")]
|
||||
ServerCert(ServerCertVerifierError),
|
||||
#[error("unsupported signature scheme: {0}")]
|
||||
UnsupportedSignatureScheme(SignatureScheme),
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
@@ -10,7 +10,8 @@ use hex::FromHex;
|
||||
use crate::{
|
||||
connection::{
|
||||
CertBinding, CertBindingV1_2, ConnectionInfo, DnsName, HandshakeData, KeyType,
|
||||
ServerEphemKey, ServerName, ServerSignature, SignatureScheme, TlsVersion, TranscriptLength,
|
||||
ServerEphemKey, ServerName, ServerSignature, SignatureAlgorithm, TlsVersion,
|
||||
TranscriptLength,
|
||||
},
|
||||
transcript::{
|
||||
encoding::{EncoderSecret, EncodingProvider},
|
||||
@@ -47,7 +48,7 @@ impl ConnectionFixture {
|
||||
CertificateDer(include_bytes!("fixtures/data/tlsnotary.org/ca.der").to_vec()),
|
||||
],
|
||||
sig: ServerSignature {
|
||||
scheme: SignatureScheme::RSA_PKCS1_SHA256,
|
||||
alg: SignatureAlgorithm::RSA_PKCS1_2048_8192_SHA256,
|
||||
sig: Vec::<u8>::from_hex(include_bytes!(
|
||||
"fixtures/data/tlsnotary.org/signature"
|
||||
))
|
||||
@@ -92,7 +93,7 @@ impl ConnectionFixture {
|
||||
CertificateDer(include_bytes!("fixtures/data/appliedzkp.org/ca.der").to_vec()),
|
||||
],
|
||||
sig: ServerSignature {
|
||||
scheme: SignatureScheme::ECDSA_NISTP256_SHA256,
|
||||
alg: SignatureAlgorithm::ECDSA_NISTP256_SHA256,
|
||||
sig: Vec::<u8>::from_hex(include_bytes!(
|
||||
"fixtures/data/appliedzkp.org/signature"
|
||||
))
|
||||
|
||||
@@ -20,8 +20,8 @@ use serde::{Deserialize, Serialize};
|
||||
use crate::{
|
||||
connection::{HandshakeData, ServerName},
|
||||
transcript::{
|
||||
Direction, PartialTranscript, Transcript, TranscriptCommitConfig, TranscriptCommitRequest,
|
||||
TranscriptCommitment, TranscriptSecret,
|
||||
encoding::EncoderSecret, Direction, PartialTranscript, Transcript, TranscriptCommitConfig,
|
||||
TranscriptCommitRequest, TranscriptCommitment, TranscriptSecret,
|
||||
},
|
||||
};
|
||||
|
||||
@@ -122,6 +122,14 @@ impl<'a> ProveConfigBuilder<'a> {
|
||||
self.reveal(Direction::Sent, ranges)
|
||||
}
|
||||
|
||||
/// Reveals all of the sent data transcript.
|
||||
pub fn reveal_sent_all(&mut self) -> Result<&mut Self, ProveConfigBuilderError> {
|
||||
let len = self.transcript.len_of_direction(Direction::Sent);
|
||||
let (sent, _) = self.reveal.get_or_insert_default();
|
||||
sent.union_mut(&(0..len));
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
/// Reveals the given ranges of the received data transcript.
|
||||
pub fn reveal_recv(
|
||||
&mut self,
|
||||
@@ -130,6 +138,14 @@ impl<'a> ProveConfigBuilder<'a> {
|
||||
self.reveal(Direction::Received, ranges)
|
||||
}
|
||||
|
||||
/// Reveals all of the received data transcript.
|
||||
pub fn reveal_recv_all(&mut self) -> Result<&mut Self, ProveConfigBuilderError> {
|
||||
let len = self.transcript.len_of_direction(Direction::Received);
|
||||
let (_, recv) = self.reveal.get_or_insert_default();
|
||||
recv.union_mut(&(0..len));
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
/// Builds the configuration.
|
||||
pub fn build(self) -> Result<ProveConfig, ProveConfigBuilderError> {
|
||||
Ok(ProveConfig {
|
||||
@@ -190,10 +206,10 @@ pub struct VerifyConfigBuilderError(#[from] VerifyConfigBuilderErrorRepr);
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
enum VerifyConfigBuilderErrorRepr {}
|
||||
|
||||
/// Payload sent to the verifier.
|
||||
/// Request to prove statements about the connection.
|
||||
#[doc(hidden)]
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct ProvePayload {
|
||||
pub struct ProveRequest {
|
||||
/// Handshake data.
|
||||
pub handshake: Option<(ServerName, HandshakeData)>,
|
||||
/// Transcript data.
|
||||
@@ -220,6 +236,8 @@ pub struct VerifierOutput {
|
||||
pub server_name: Option<ServerName>,
|
||||
/// Transcript data.
|
||||
pub transcript: Option<PartialTranscript>,
|
||||
/// Encoding commitment secret.
|
||||
pub encoder_secret: Option<EncoderSecret>,
|
||||
/// Transcript commitments.
|
||||
pub transcript_commitments: Vec<TranscriptCommitment>,
|
||||
}
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
use std::{collections::HashSet, fmt};
|
||||
|
||||
use rangeset::ToRangeSet;
|
||||
use rangeset::{ToRangeSet, UnionMut};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::{
|
||||
@@ -114,7 +114,19 @@ impl TranscriptCommitConfig {
|
||||
/// Returns a request for the transcript commitments.
|
||||
pub fn to_request(&self) -> TranscriptCommitRequest {
|
||||
TranscriptCommitRequest {
|
||||
encoding: self.has_encoding,
|
||||
encoding: self.has_encoding.then(|| {
|
||||
let mut sent = RangeSet::default();
|
||||
let mut recv = RangeSet::default();
|
||||
|
||||
for (dir, idx) in self.iter_encoding() {
|
||||
match dir {
|
||||
Direction::Sent => sent.union_mut(idx),
|
||||
Direction::Received => recv.union_mut(idx),
|
||||
}
|
||||
}
|
||||
|
||||
(sent, recv)
|
||||
}),
|
||||
hash: self
|
||||
.iter_hash()
|
||||
.map(|((dir, idx), alg)| (*dir, idx.clone(), *alg))
|
||||
@@ -289,14 +301,14 @@ impl fmt::Display for TranscriptCommitConfigBuilderError {
|
||||
/// Request to compute transcript commitments.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct TranscriptCommitRequest {
|
||||
encoding: bool,
|
||||
encoding: Option<(RangeSet<usize>, RangeSet<usize>)>,
|
||||
hash: Vec<(Direction, RangeSet<usize>, HashAlgId)>,
|
||||
}
|
||||
|
||||
impl TranscriptCommitRequest {
|
||||
/// Returns `true` if an encoding commitment is requested.
|
||||
pub fn encoding(&self) -> bool {
|
||||
self.encoding
|
||||
pub fn has_encoding(&self) -> bool {
|
||||
self.encoding.is_some()
|
||||
}
|
||||
|
||||
/// Returns `true` if a hash commitment is requested.
|
||||
@@ -308,6 +320,11 @@ impl TranscriptCommitRequest {
|
||||
pub fn iter_hash(&self) -> impl Iterator<Item = &(Direction, RangeSet<usize>, HashAlgId)> {
|
||||
self.hash.iter()
|
||||
}
|
||||
|
||||
/// Returns the ranges of the encoding commitments.
|
||||
pub fn encoding(&self) -> Option<&(RangeSet<usize>, RangeSet<usize>)> {
|
||||
self.encoding.as_ref()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
@@ -19,6 +19,4 @@ use crate::hash::TypedHash;
|
||||
pub struct EncodingCommitment {
|
||||
/// Merkle root of the encoding commitments.
|
||||
pub root: TypedHash,
|
||||
/// Seed used to generate the encodings.
|
||||
pub secret: EncoderSecret,
|
||||
}
|
||||
|
||||
@@ -8,7 +8,7 @@ use crate::{
|
||||
merkle::{MerkleError, MerkleProof},
|
||||
transcript::{
|
||||
commit::MAX_TOTAL_COMMITTED_DATA,
|
||||
encoding::{new_encoder, Encoder, EncodingCommitment},
|
||||
encoding::{new_encoder, Encoder, EncoderSecret, EncodingCommitment},
|
||||
Direction,
|
||||
},
|
||||
};
|
||||
@@ -48,13 +48,14 @@ impl EncodingProof {
|
||||
pub fn verify_with_provider(
|
||||
&self,
|
||||
provider: &HashProvider,
|
||||
secret: &EncoderSecret,
|
||||
commitment: &EncodingCommitment,
|
||||
sent: &[u8],
|
||||
recv: &[u8],
|
||||
) -> Result<(RangeSet<usize>, RangeSet<usize>), EncodingProofError> {
|
||||
let hasher = provider.get(&commitment.root.alg)?;
|
||||
|
||||
let encoder = new_encoder(&commitment.secret);
|
||||
let encoder = new_encoder(secret);
|
||||
let Self {
|
||||
inclusion_proof,
|
||||
openings,
|
||||
@@ -232,10 +233,7 @@ mod test {
|
||||
use crate::{
|
||||
fixtures::{encoder_secret, encoder_secret_tampered_seed, encoding_provider},
|
||||
hash::Blake3,
|
||||
transcript::{
|
||||
encoding::{EncoderSecret, EncodingTree},
|
||||
Transcript,
|
||||
},
|
||||
transcript::{encoding::EncodingTree, Transcript},
|
||||
};
|
||||
|
||||
use super::*;
|
||||
@@ -246,7 +244,7 @@ mod test {
|
||||
commitment: EncodingCommitment,
|
||||
}
|
||||
|
||||
fn new_encoding_fixture(secret: EncoderSecret) -> EncodingFixture {
|
||||
fn new_encoding_fixture() -> EncodingFixture {
|
||||
let transcript = Transcript::new(POST_JSON, OK_JSON);
|
||||
|
||||
let idx_0 = (Direction::Sent, RangeSet::from(0..POST_JSON.len()));
|
||||
@@ -257,10 +255,7 @@ mod test {
|
||||
|
||||
let proof = tree.proof([&idx_0, &idx_1].into_iter()).unwrap();
|
||||
|
||||
let commitment = EncodingCommitment {
|
||||
root: tree.root(),
|
||||
secret,
|
||||
};
|
||||
let commitment = EncodingCommitment { root: tree.root() };
|
||||
|
||||
EncodingFixture {
|
||||
transcript,
|
||||
@@ -275,11 +270,12 @@ mod test {
|
||||
transcript,
|
||||
proof,
|
||||
commitment,
|
||||
} = new_encoding_fixture(encoder_secret_tampered_seed());
|
||||
} = new_encoding_fixture();
|
||||
|
||||
let err = proof
|
||||
.verify_with_provider(
|
||||
&HashProvider::default(),
|
||||
&encoder_secret_tampered_seed(),
|
||||
&commitment,
|
||||
transcript.sent(),
|
||||
transcript.received(),
|
||||
@@ -295,13 +291,19 @@ mod test {
|
||||
transcript,
|
||||
proof,
|
||||
commitment,
|
||||
} = new_encoding_fixture(encoder_secret());
|
||||
} = new_encoding_fixture();
|
||||
|
||||
let sent = &transcript.sent()[transcript.sent().len() - 1..];
|
||||
let recv = &transcript.received()[transcript.received().len() - 2..];
|
||||
|
||||
let err = proof
|
||||
.verify_with_provider(&HashProvider::default(), &commitment, sent, recv)
|
||||
.verify_with_provider(
|
||||
&HashProvider::default(),
|
||||
&encoder_secret(),
|
||||
&commitment,
|
||||
sent,
|
||||
recv,
|
||||
)
|
||||
.unwrap_err();
|
||||
|
||||
assert!(matches!(err.kind, ErrorKind::Proof));
|
||||
@@ -313,7 +315,7 @@ mod test {
|
||||
transcript,
|
||||
mut proof,
|
||||
commitment,
|
||||
} = new_encoding_fixture(encoder_secret());
|
||||
} = new_encoding_fixture();
|
||||
|
||||
let Opening { idx, .. } = proof.openings.values_mut().next().unwrap();
|
||||
|
||||
@@ -322,6 +324,7 @@ mod test {
|
||||
let err = proof
|
||||
.verify_with_provider(
|
||||
&HashProvider::default(),
|
||||
&encoder_secret(),
|
||||
&commitment,
|
||||
transcript.sent(),
|
||||
transcript.received(),
|
||||
@@ -337,7 +340,7 @@ mod test {
|
||||
transcript,
|
||||
mut proof,
|
||||
commitment,
|
||||
} = new_encoding_fixture(encoder_secret());
|
||||
} = new_encoding_fixture();
|
||||
|
||||
let Opening { blinder, .. } = proof.openings.values_mut().next().unwrap();
|
||||
|
||||
@@ -346,6 +349,7 @@ mod test {
|
||||
let err = proof
|
||||
.verify_with_provider(
|
||||
&HashProvider::default(),
|
||||
&encoder_secret(),
|
||||
&commitment,
|
||||
transcript.sent(),
|
||||
transcript.received(),
|
||||
|
||||
@@ -222,14 +222,12 @@ mod tests {
|
||||
|
||||
let proof = tree.proof([&idx_0, &idx_1].into_iter()).unwrap();
|
||||
|
||||
let commitment = EncodingCommitment {
|
||||
root: tree.root(),
|
||||
secret: encoder_secret(),
|
||||
};
|
||||
let commitment = EncodingCommitment { root: tree.root() };
|
||||
|
||||
let (auth_sent, auth_recv) = proof
|
||||
.verify_with_provider(
|
||||
&HashProvider::default(),
|
||||
&encoder_secret(),
|
||||
&commitment,
|
||||
transcript.sent(),
|
||||
transcript.received(),
|
||||
@@ -260,14 +258,12 @@ mod tests {
|
||||
.proof([&idx_0, &idx_1, &idx_2, &idx_3].into_iter())
|
||||
.unwrap();
|
||||
|
||||
let commitment = EncodingCommitment {
|
||||
root: tree.root(),
|
||||
secret: encoder_secret(),
|
||||
};
|
||||
let commitment = EncodingCommitment { root: tree.root() };
|
||||
|
||||
let (auth_sent, auth_recv) = proof
|
||||
.verify_with_provider(
|
||||
&HashProvider::default(),
|
||||
&encoder_secret(),
|
||||
&commitment,
|
||||
transcript.sent(),
|
||||
transcript.received(),
|
||||
|
||||
@@ -10,7 +10,7 @@ use crate::{
|
||||
hash::{HashAlgId, HashProvider},
|
||||
transcript::{
|
||||
commit::{TranscriptCommitment, TranscriptCommitmentKind},
|
||||
encoding::{EncodingProof, EncodingProofError, EncodingTree},
|
||||
encoding::{EncoderSecret, EncodingProof, EncodingProofError, EncodingTree},
|
||||
hash::{hash_plaintext, PlaintextHash, PlaintextHashSecret},
|
||||
Direction, PartialTranscript, RangeSet, Transcript, TranscriptSecret,
|
||||
},
|
||||
@@ -22,6 +22,9 @@ const DEFAULT_COMMITMENT_KINDS: &[TranscriptCommitmentKind] = &[
|
||||
TranscriptCommitmentKind::Hash {
|
||||
alg: HashAlgId::SHA256,
|
||||
},
|
||||
TranscriptCommitmentKind::Hash {
|
||||
alg: HashAlgId::BLAKE3,
|
||||
},
|
||||
TranscriptCommitmentKind::Encoding,
|
||||
];
|
||||
|
||||
@@ -48,6 +51,7 @@ impl TranscriptProof {
|
||||
self,
|
||||
provider: &HashProvider,
|
||||
length: &TranscriptLength,
|
||||
encoder_secret: Option<&EncoderSecret>,
|
||||
commitments: impl IntoIterator<Item = &'a TranscriptCommitment>,
|
||||
) -> Result<PartialTranscript, TranscriptProofError> {
|
||||
let mut encoding_commitment = None;
|
||||
@@ -83,6 +87,13 @@ impl TranscriptProof {
|
||||
|
||||
// Verify encoding proof.
|
||||
if let Some(proof) = self.encoding_proof {
|
||||
let secret = encoder_secret.ok_or_else(|| {
|
||||
TranscriptProofError::new(
|
||||
ErrorKind::Encoding,
|
||||
"contains an encoding proof but missing encoder secret",
|
||||
)
|
||||
})?;
|
||||
|
||||
let commitment = encoding_commitment.ok_or_else(|| {
|
||||
TranscriptProofError::new(
|
||||
ErrorKind::Encoding,
|
||||
@@ -92,6 +103,7 @@ impl TranscriptProof {
|
||||
|
||||
let (auth_sent, auth_recv) = proof.verify_with_provider(
|
||||
provider,
|
||||
secret,
|
||||
commitment,
|
||||
self.transcript.sent_unsafe(),
|
||||
self.transcript.received_unsafe(),
|
||||
@@ -572,7 +584,7 @@ mod tests {
|
||||
use tlsn_data_fixtures::http::{request::GET_WITH_HEADER, response::OK_JSON};
|
||||
|
||||
use crate::{
|
||||
fixtures::encoding_provider,
|
||||
fixtures::{encoder_secret, encoding_provider},
|
||||
hash::{Blake3, Blinder, HashAlgId},
|
||||
transcript::TranscriptCommitConfigBuilder,
|
||||
};
|
||||
@@ -599,7 +611,12 @@ mod tests {
|
||||
|
||||
let provider = HashProvider::default();
|
||||
let err = transcript_proof
|
||||
.verify_with_provider(&provider, &transcript.length(), &[])
|
||||
.verify_with_provider(
|
||||
&provider,
|
||||
&transcript.length(),
|
||||
Some(&encoder_secret()),
|
||||
&[],
|
||||
)
|
||||
.err()
|
||||
.unwrap();
|
||||
|
||||
@@ -637,7 +654,9 @@ mod tests {
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
fn test_reveal_with_hash_commitment() {
|
||||
#[case::sha256(HashAlgId::SHA256)]
|
||||
#[case::blake3(HashAlgId::BLAKE3)]
|
||||
fn test_reveal_with_hash_commitment(#[case] alg: HashAlgId) {
|
||||
let mut rng = rand::rngs::StdRng::seed_from_u64(0);
|
||||
let provider = HashProvider::default();
|
||||
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
|
||||
@@ -645,7 +664,6 @@ mod tests {
|
||||
let direction = Direction::Sent;
|
||||
let idx = RangeSet::from(0..10);
|
||||
let blinder: Blinder = rng.random();
|
||||
let alg = HashAlgId::SHA256;
|
||||
let hasher = provider.get(&alg).unwrap();
|
||||
|
||||
let commitment = PlaintextHash {
|
||||
@@ -672,6 +690,7 @@ mod tests {
|
||||
.verify_with_provider(
|
||||
&provider,
|
||||
&transcript.length(),
|
||||
None,
|
||||
&[TranscriptCommitment::Hash(commitment)],
|
||||
)
|
||||
.unwrap();
|
||||
@@ -683,7 +702,9 @@ mod tests {
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
fn test_reveal_with_inconsistent_hash_commitment() {
|
||||
#[case::sha256(HashAlgId::SHA256)]
|
||||
#[case::blake3(HashAlgId::BLAKE3)]
|
||||
fn test_reveal_with_inconsistent_hash_commitment(#[case] alg: HashAlgId) {
|
||||
let mut rng = rand::rngs::StdRng::seed_from_u64(0);
|
||||
let provider = HashProvider::default();
|
||||
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
|
||||
@@ -691,7 +712,6 @@ mod tests {
|
||||
let direction = Direction::Sent;
|
||||
let idx = RangeSet::from(0..10);
|
||||
let blinder: Blinder = rng.random();
|
||||
let alg = HashAlgId::SHA256;
|
||||
let hasher = provider.get(&alg).unwrap();
|
||||
|
||||
let commitment = PlaintextHash {
|
||||
@@ -719,6 +739,7 @@ mod tests {
|
||||
.verify_with_provider(
|
||||
&provider,
|
||||
&transcript.length(),
|
||||
None,
|
||||
&[TranscriptCommitment::Hash(commitment)],
|
||||
)
|
||||
.unwrap_err();
|
||||
|
||||
@@ -175,7 +175,7 @@ async fn prover<S: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>(
|
||||
assert!(response.status() == StatusCode::OK);
|
||||
|
||||
// The prover task should be done now, so we can await it.
|
||||
let mut prover = prover_task.await??;
|
||||
let prover = prover_task.await??;
|
||||
|
||||
// Parse the HTTP transcript.
|
||||
let transcript = HttpTranscript::parse(prover.transcript())?;
|
||||
@@ -217,7 +217,7 @@ async fn prover<S: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>(
|
||||
|
||||
let request_config = builder.build()?;
|
||||
|
||||
let (attestation, secrets) = notarize(&mut prover, &request_config, req_tx, resp_rx).await?;
|
||||
let (attestation, secrets) = notarize(prover, &request_config, req_tx, resp_rx).await?;
|
||||
|
||||
// Write the attestation to disk.
|
||||
let attestation_path = tlsn_examples::get_file_path(example_type, "attestation");
|
||||
@@ -238,7 +238,7 @@ async fn prover<S: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>(
|
||||
}
|
||||
|
||||
async fn notarize(
|
||||
prover: &mut Prover<Committed>,
|
||||
mut prover: Prover<Committed>,
|
||||
config: &RequestConfig,
|
||||
request_tx: Sender<AttestationRequest>,
|
||||
attestation_rx: Receiver<Attestation>,
|
||||
@@ -257,25 +257,27 @@ async fn notarize(
|
||||
..
|
||||
} = prover.prove(&disclosure_config).await?;
|
||||
|
||||
let transcript = prover.transcript().clone();
|
||||
let tls_transcript = prover.tls_transcript().clone();
|
||||
prover.close().await?;
|
||||
|
||||
// Build an attestation request.
|
||||
let mut builder = AttestationRequest::builder(config);
|
||||
|
||||
builder
|
||||
.server_name(ServerName::Dns(SERVER_DOMAIN.try_into().unwrap()))
|
||||
.handshake_data(HandshakeData {
|
||||
certs: prover
|
||||
.tls_transcript()
|
||||
certs: tls_transcript
|
||||
.server_cert_chain()
|
||||
.expect("server cert chain is present")
|
||||
.to_vec(),
|
||||
sig: prover
|
||||
.tls_transcript()
|
||||
sig: tls_transcript
|
||||
.server_signature()
|
||||
.expect("server signature is present")
|
||||
.clone(),
|
||||
binding: prover.tls_transcript().certificate_binding().clone(),
|
||||
binding: tls_transcript.certificate_binding().clone(),
|
||||
})
|
||||
.transcript(prover.transcript().clone())
|
||||
.transcript(transcript)
|
||||
.transcript_commitments(transcript_secrets, transcript_commitments);
|
||||
|
||||
let (request, secrets) = builder.build(&CryptoProvider::default())?;
|
||||
@@ -327,6 +329,7 @@ async fn notary<S: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>(
|
||||
|
||||
let VerifierOutput {
|
||||
transcript_commitments,
|
||||
encoder_secret,
|
||||
..
|
||||
} = verifier.verify(&VerifyConfig::default()).await?;
|
||||
|
||||
@@ -385,6 +388,10 @@ async fn notary<S: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>(
|
||||
.server_ephemeral_key(tls_transcript.server_ephemeral_key().clone())
|
||||
.transcript_commitments(transcript_commitments);
|
||||
|
||||
if let Some(encoder_secret) = encoder_secret {
|
||||
builder.encoder_secret(encoder_secret);
|
||||
}
|
||||
|
||||
let attestation = builder.build(&provider)?;
|
||||
|
||||
// Send attestation to prover.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "tlsn-formats"
|
||||
version = "0.1.0-alpha.13-pre"
|
||||
version = "0.1.0-alpha.13"
|
||||
edition = "2021"
|
||||
|
||||
[lints]
|
||||
|
||||
@@ -3,7 +3,19 @@
|
||||
# Ensure the script runs in the folder that contains this script
|
||||
cd "$(dirname "$0")"
|
||||
|
||||
cargo build --release --package tlsn-harness-runner --package tlsn-harness-executor --package tlsn-server-fixture --package tlsn-harness-plot
|
||||
RUNNER_FEATURES=""
|
||||
EXECUTOR_FEATURES=""
|
||||
|
||||
if [ "$1" = "debug" ]; then
|
||||
RUNNER_FEATURES="--features debug"
|
||||
EXECUTOR_FEATURES="--no-default-features --features debug"
|
||||
fi
|
||||
|
||||
cargo build --release \
|
||||
--package tlsn-harness-runner $RUNNER_FEATURES \
|
||||
--package tlsn-harness-executor $EXECUTOR_FEATURES \
|
||||
--package tlsn-server-fixture \
|
||||
--package tlsn-harness-plot
|
||||
|
||||
mkdir -p bin
|
||||
|
||||
|
||||
@@ -1,10 +1,14 @@
|
||||
[target.wasm32-unknown-unknown]
|
||||
rustflags = [
|
||||
"-C",
|
||||
"target-feature=+atomics,+bulk-memory,+mutable-globals,+simd128",
|
||||
"-C",
|
||||
"-Ctarget-feature=+atomics,+bulk-memory,+mutable-globals,+simd128",
|
||||
"-Clink-arg=--shared-memory",
|
||||
# 4GB
|
||||
"link-arg=--max-memory=4294967296",
|
||||
"-Clink-arg=--max-memory=4294967296",
|
||||
"-Clink-arg=--import-memory",
|
||||
"-Clink-arg=--export=__wasm_init_tls",
|
||||
"-Clink-arg=--export=__tls_size",
|
||||
"-Clink-arg=--export=__tls_align",
|
||||
"-Clink-arg=--export=__tls_base",
|
||||
"--cfg",
|
||||
'getrandom_backend="wasm_js"',
|
||||
]
|
||||
|
||||
@@ -4,6 +4,12 @@ version = "0.1.0"
|
||||
edition = "2024"
|
||||
publish = false
|
||||
|
||||
[features]
|
||||
# Disable tracing events as a workaround for issue 959.
|
||||
default = ["tracing/release_max_level_off"]
|
||||
# Used to debug the executor itself.
|
||||
debug = []
|
||||
|
||||
[lib]
|
||||
name = "harness_executor"
|
||||
crate-type = ["cdylib", "rlib"]
|
||||
@@ -28,8 +34,7 @@ tokio = { workspace = true, features = ["full"] }
|
||||
tokio-util = { workspace = true, features = ["compat"] }
|
||||
|
||||
[target.'cfg(target_arch = "wasm32")'.dependencies]
|
||||
# Disable tracing events as a workaround for issue 959.
|
||||
tracing = { workspace = true, features = ["release_max_level_off"] }
|
||||
tracing = { workspace = true }
|
||||
wasm-bindgen = { workspace = true }
|
||||
tlsn-wasm = { workspace = true }
|
||||
js-sys = { workspace = true }
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
FROM rust AS builder
|
||||
WORKDIR /usr/src/tlsn
|
||||
|
||||
ARG DEBUG=0
|
||||
|
||||
RUN \
|
||||
rustup update; \
|
||||
apt update && apt install -y clang; \
|
||||
@@ -10,7 +12,12 @@ RUN \
|
||||
COPY . .
|
||||
RUN \
|
||||
cd crates/harness; \
|
||||
./build.sh;
|
||||
# Pass `--build-arg DEBUG=1` to `docker build` if you need to debug the harness.
|
||||
if [ "$DEBUG" = "1" ]; then \
|
||||
./build.sh debug; \
|
||||
else \
|
||||
./build.sh; \
|
||||
fi
|
||||
|
||||
FROM debian:latest
|
||||
|
||||
|
||||
@@ -7,6 +7,10 @@ publish = false
|
||||
[lib]
|
||||
name = "harness_runner"
|
||||
|
||||
[features]
|
||||
# Used to debug the runner itself.
|
||||
debug = []
|
||||
|
||||
[dependencies]
|
||||
tlsn-harness-core = { workspace = true }
|
||||
tlsn-server-fixture = { workspace = true }
|
||||
|
||||
17
crates/harness/runner/src/debug_prelude.rs
Normal file
17
crates/harness/runner/src/debug_prelude.rs
Normal file
@@ -0,0 +1,17 @@
|
||||
#![allow(unused_imports)]
|
||||
pub use futures::FutureExt;
|
||||
|
||||
pub use tracing::{debug, error};
|
||||
|
||||
pub use chromiumoxide::{
|
||||
Browser, Page,
|
||||
cdp::{
|
||||
browser_protocol::{
|
||||
log::{EventEntryAdded, LogEntryLevel},
|
||||
network::{EnableParams, SetCacheDisabledParams},
|
||||
page::ReloadParams,
|
||||
},
|
||||
js_protocol::runtime::EventExceptionThrown,
|
||||
},
|
||||
handler::HandlerConfig,
|
||||
};
|
||||
@@ -21,6 +21,9 @@ use harness_core::{
|
||||
|
||||
use crate::{Target, network::Namespace, rpc::Rpc};
|
||||
|
||||
#[cfg(feature = "debug")]
|
||||
use crate::debug_prelude::*;
|
||||
|
||||
pub struct Executor {
|
||||
ns: Namespace,
|
||||
config: ExecutorConfig,
|
||||
@@ -66,20 +69,34 @@ impl Executor {
|
||||
Id::One => self.config.network().rpc_1,
|
||||
};
|
||||
|
||||
let process = duct::cmd!(
|
||||
"sudo",
|
||||
"ip",
|
||||
"netns",
|
||||
"exec",
|
||||
self.ns.name(),
|
||||
"env",
|
||||
let mut args = vec![
|
||||
"ip".into(),
|
||||
"netns".into(),
|
||||
"exec".into(),
|
||||
self.ns.name().into(),
|
||||
"env".into(),
|
||||
format!("CONFIG={}", serde_json::to_string(&self.config)?),
|
||||
executor_path
|
||||
)
|
||||
.stdout_capture()
|
||||
.stderr_capture()
|
||||
.unchecked()
|
||||
.start()?;
|
||||
];
|
||||
|
||||
if cfg!(feature = "debug") {
|
||||
let level = &std::env::var("RUST_LOG").unwrap_or("debug".to_string());
|
||||
args.push("env".into());
|
||||
args.push(format!("RUST_LOG={}", level));
|
||||
};
|
||||
|
||||
args.push(executor_path.to_str().expect("valid path").into());
|
||||
|
||||
let process = duct::cmd("sudo", args);
|
||||
|
||||
let process = if !cfg!(feature = "debug") {
|
||||
process
|
||||
.stdout_capture()
|
||||
.stderr_capture()
|
||||
.unchecked()
|
||||
.start()?
|
||||
} else {
|
||||
process.unchecked().start()?
|
||||
};
|
||||
|
||||
let rpc = Rpc::new_native(rpc_addr).await?;
|
||||
|
||||
@@ -119,10 +136,13 @@ impl Executor {
|
||||
"--no-sandbox",
|
||||
format!("--user-data-dir={tmp}"),
|
||||
format!("--allowed-ips=10.250.0.1"),
|
||||
)
|
||||
.stderr_capture()
|
||||
.stdout_capture()
|
||||
.start()?;
|
||||
);
|
||||
|
||||
let process = if !cfg!(feature = "debug") {
|
||||
process.stderr_capture().stdout_capture().start()?
|
||||
} else {
|
||||
process.start()?
|
||||
};
|
||||
|
||||
const TIMEOUT: usize = 10000;
|
||||
const DELAY: usize = 100;
|
||||
@@ -171,6 +191,38 @@ impl Executor {
|
||||
.new_page(&format!("http://{wasm_addr}:{wasm_port}/index.html"))
|
||||
.await?;
|
||||
|
||||
#[cfg(feature = "debug")]
|
||||
tokio::spawn(register_listeners(page.clone()).await?);
|
||||
|
||||
#[cfg(feature = "debug")]
|
||||
async fn register_listeners(page: Page) -> Result<impl Future<Output = ()>> {
|
||||
let mut logs = page.event_listener::<EventEntryAdded>().await?.fuse();
|
||||
let mut exceptions =
|
||||
page.event_listener::<EventExceptionThrown>().await?.fuse();
|
||||
|
||||
Ok(futures::future::join(
|
||||
async move {
|
||||
while let Some(event) = logs.next().await {
|
||||
let entry = &event.entry;
|
||||
match entry.level {
|
||||
LogEntryLevel::Error => {
|
||||
error!("{:?}", entry);
|
||||
}
|
||||
_ => {
|
||||
debug!("{:?}: {}", entry.timestamp, entry.text);
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
async move {
|
||||
while let Some(event) = exceptions.next().await {
|
||||
error!("{:?}", event);
|
||||
}
|
||||
},
|
||||
)
|
||||
.map(|_| ()))
|
||||
}
|
||||
|
||||
page.execute(EnableParams::builder().build()).await?;
|
||||
page.execute(SetCacheDisabledParams {
|
||||
cache_disabled: true,
|
||||
|
||||
@@ -6,6 +6,9 @@ mod server_fixture;
|
||||
pub mod wasm_server;
|
||||
mod ws_proxy;
|
||||
|
||||
#[cfg(feature = "debug")]
|
||||
mod debug_prelude;
|
||||
|
||||
use std::time::Duration;
|
||||
|
||||
use anyhow::Result;
|
||||
@@ -24,6 +27,9 @@ use cli::{Cli, Command};
|
||||
use executor::Executor;
|
||||
use server_fixture::ServerFixture;
|
||||
|
||||
#[cfg(feature = "debug")]
|
||||
use crate::debug_prelude::*;
|
||||
|
||||
use crate::{cli::Route, network::Network, wasm_server::WasmServer, ws_proxy::WsProxy};
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, clap::ValueEnum)]
|
||||
@@ -113,6 +119,9 @@ impl Runner {
|
||||
}
|
||||
|
||||
pub async fn main() -> Result<()> {
|
||||
#[cfg(feature = "debug")]
|
||||
tracing_subscriber::fmt::init();
|
||||
|
||||
let cli = Cli::parse();
|
||||
let mut runner = Runner::new(&cli)?;
|
||||
|
||||
@@ -227,6 +236,9 @@ pub async fn main() -> Result<()> {
|
||||
// Wait for the network to stabilize
|
||||
tokio::time::sleep(Duration::from_millis(100)).await;
|
||||
|
||||
#[cfg(feature = "debug")]
|
||||
debug!("Starting bench in group {:?}", config.group);
|
||||
|
||||
let (output, _) = tokio::try_join!(
|
||||
runner.exec_p.bench(BenchCmd {
|
||||
config: config.clone(),
|
||||
|
||||
@@ -5,7 +5,7 @@ description = "TLSNotary MPC-TLS protocol"
|
||||
keywords = ["tls", "mpc", "2pc"]
|
||||
categories = ["cryptography"]
|
||||
license = "MIT OR Apache-2.0"
|
||||
version = "0.1.0-alpha.13-pre"
|
||||
version = "0.1.0-alpha.13"
|
||||
edition = "2021"
|
||||
|
||||
[lints]
|
||||
|
||||
@@ -41,6 +41,7 @@ use tls_core::{
|
||||
message::{OpaqueMessage, PlainMessage},
|
||||
},
|
||||
suites::SupportedCipherSuite,
|
||||
verify::verify_sig_determine_alg,
|
||||
};
|
||||
use tlsn_core::{
|
||||
connection::{CertBinding, CertBindingV1_2, ServerSignature, TlsVersion, VerifyData},
|
||||
@@ -327,12 +328,20 @@ impl MpcTlsLeader {
|
||||
.map(|cert| CertificateDer(cert.0.clone()))
|
||||
.collect();
|
||||
|
||||
let mut sig_msg = Vec::new();
|
||||
sig_msg.extend_from_slice(&client_random.0);
|
||||
sig_msg.extend_from_slice(&server_random.0);
|
||||
sig_msg.extend_from_slice(server_kx_details.kx_params());
|
||||
|
||||
let server_signature_alg = verify_sig_determine_alg(
|
||||
&server_cert_details.cert_chain()[0],
|
||||
&sig_msg,
|
||||
server_kx_details.kx_sig(),
|
||||
)
|
||||
.expect("only supported signature should have been accepted");
|
||||
|
||||
let server_signature = ServerSignature {
|
||||
scheme: server_kx_details
|
||||
.kx_sig()
|
||||
.scheme
|
||||
.try_into()
|
||||
.expect("only supported signature scheme should have been accepted"),
|
||||
alg: server_signature_alg.into(),
|
||||
sig: server_kx_details.kx_sig().sig.0.clone(),
|
||||
};
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@ description = "A TLS backend trait for TLSNotary"
|
||||
keywords = ["tls", "mpc", "2pc"]
|
||||
categories = ["cryptography"]
|
||||
license = "MIT OR Apache-2.0"
|
||||
version = "0.1.0-alpha.13-pre"
|
||||
version = "0.1.0-alpha.13"
|
||||
edition = "2021"
|
||||
|
||||
[lints]
|
||||
|
||||
@@ -5,7 +5,7 @@ description = "An async TLS client for TLSNotary"
|
||||
keywords = ["tls", "mpc", "2pc", "client", "async"]
|
||||
categories = ["cryptography"]
|
||||
license = "MIT OR Apache-2.0"
|
||||
version = "0.1.0-alpha.13-pre"
|
||||
version = "0.1.0-alpha.13"
|
||||
edition = "2021"
|
||||
|
||||
[lints]
|
||||
|
||||
@@ -5,7 +5,7 @@ description = "A TLS client for TLSNotary"
|
||||
keywords = ["tls", "mpc", "2pc", "client", "sync"]
|
||||
categories = ["cryptography"]
|
||||
license = "Apache-2.0 OR ISC OR MIT"
|
||||
version = "0.1.0-alpha.13-pre"
|
||||
version = "0.1.0-alpha.13"
|
||||
edition = "2021"
|
||||
autobenches = false
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@ description = "Cryptographic operations for the TLSNotary TLS client"
|
||||
keywords = ["tls", "mpc", "2pc"]
|
||||
categories = ["cryptography"]
|
||||
license = "Apache-2.0 OR ISC OR MIT"
|
||||
version = "0.1.0-alpha.13-pre"
|
||||
version = "0.1.0-alpha.13"
|
||||
edition = "2021"
|
||||
|
||||
[lints]
|
||||
|
||||
@@ -465,19 +465,81 @@ fn convert_scheme(scheme: SignatureScheme) -> Result<SignatureAlgorithms, Error>
|
||||
}
|
||||
}
|
||||
|
||||
/// Signature algorithm.
|
||||
#[derive(Debug, Clone, Copy, PartialEq)]
|
||||
#[allow(non_camel_case_types)]
|
||||
pub enum SignatureAlgorithm {
|
||||
ECDSA_NISTP256_SHA256,
|
||||
ECDSA_NISTP256_SHA384,
|
||||
ECDSA_NISTP384_SHA256,
|
||||
ECDSA_NISTP384_SHA384,
|
||||
ED25519,
|
||||
RSA_PKCS1_2048_8192_SHA256,
|
||||
RSA_PKCS1_2048_8192_SHA384,
|
||||
RSA_PKCS1_2048_8192_SHA512,
|
||||
RSA_PSS_2048_8192_SHA256_LEGACY_KEY,
|
||||
RSA_PSS_2048_8192_SHA384_LEGACY_KEY,
|
||||
RSA_PSS_2048_8192_SHA512_LEGACY_KEY,
|
||||
}
|
||||
|
||||
impl SignatureAlgorithm {
|
||||
pub fn from_alg(alg: &dyn pki_types::SignatureVerificationAlgorithm) -> Self {
|
||||
let id = alg.signature_alg_id();
|
||||
if id == webpki::ring::ECDSA_P256_SHA256.signature_alg_id() {
|
||||
SignatureAlgorithm::ECDSA_NISTP256_SHA256
|
||||
} else if id == webpki::ring::ECDSA_P256_SHA384.signature_alg_id() {
|
||||
SignatureAlgorithm::ECDSA_NISTP256_SHA384
|
||||
} else if id == webpki::ring::ECDSA_P384_SHA256.signature_alg_id() {
|
||||
SignatureAlgorithm::ECDSA_NISTP384_SHA256
|
||||
} else if id == webpki::ring::ECDSA_P384_SHA384.signature_alg_id() {
|
||||
SignatureAlgorithm::ECDSA_NISTP384_SHA384
|
||||
} else if id == webpki::ring::ED25519.signature_alg_id() {
|
||||
SignatureAlgorithm::ED25519
|
||||
} else if id == webpki::ring::RSA_PKCS1_2048_8192_SHA256.signature_alg_id() {
|
||||
SignatureAlgorithm::RSA_PKCS1_2048_8192_SHA256
|
||||
} else if id == webpki::ring::RSA_PKCS1_2048_8192_SHA384.signature_alg_id() {
|
||||
SignatureAlgorithm::RSA_PKCS1_2048_8192_SHA384
|
||||
} else if id == webpki::ring::RSA_PKCS1_2048_8192_SHA512.signature_alg_id() {
|
||||
SignatureAlgorithm::RSA_PKCS1_2048_8192_SHA512
|
||||
} else if id == webpki::ring::RSA_PSS_2048_8192_SHA256_LEGACY_KEY.signature_alg_id() {
|
||||
SignatureAlgorithm::RSA_PSS_2048_8192_SHA256_LEGACY_KEY
|
||||
} else if id == webpki::ring::RSA_PSS_2048_8192_SHA384_LEGACY_KEY.signature_alg_id() {
|
||||
SignatureAlgorithm::RSA_PSS_2048_8192_SHA384_LEGACY_KEY
|
||||
} else if id == webpki::ring::RSA_PSS_2048_8192_SHA512_LEGACY_KEY.signature_alg_id() {
|
||||
SignatureAlgorithm::RSA_PSS_2048_8192_SHA512_LEGACY_KEY
|
||||
} else {
|
||||
unreachable!()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Verify the signature and return the algorithm which passed verification.
|
||||
pub fn verify_sig_determine_alg(
|
||||
cert: &Certificate,
|
||||
message: &[u8],
|
||||
dss: &DigitallySignedStruct,
|
||||
) -> Result<SignatureAlgorithm, Error> {
|
||||
let cert = pki_types::CertificateDer::from(cert.0.as_slice());
|
||||
let cert = webpki::EndEntityCert::try_from(&cert).map_err(pki_error)?;
|
||||
|
||||
verify_sig_using_any_alg(&cert, convert_scheme(dss.scheme)?, message, &dss.sig.0)
|
||||
.map_err(pki_error)
|
||||
}
|
||||
|
||||
fn verify_sig_using_any_alg(
|
||||
cert: &webpki::EndEntityCert,
|
||||
algs: SignatureAlgorithms,
|
||||
message: &[u8],
|
||||
sig: &[u8],
|
||||
) -> Result<(), webpki::Error> {
|
||||
) -> Result<SignatureAlgorithm, webpki::Error> {
|
||||
// TLS doesn't itself give us enough info to map to a single
|
||||
// webpki::SignatureAlgorithm. Therefore, convert_algs maps to several and
|
||||
// we try them all.
|
||||
for alg in algs {
|
||||
match cert.verify_signature(*alg, message, sig) {
|
||||
Ok(_) => return Ok(SignatureAlgorithm::from_alg(*alg)),
|
||||
Err(webpki::Error::UnsupportedSignatureAlgorithmForPublicKeyContext(_)) => continue,
|
||||
res => return res,
|
||||
Err(e) => return Err(e),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@ authors = ["TLSNotary Team"]
|
||||
keywords = ["tls", "mpc", "2pc", "prover"]
|
||||
categories = ["cryptography"]
|
||||
license = "MIT OR Apache-2.0"
|
||||
version = "0.1.0-alpha.13-pre"
|
||||
version = "0.1.0-alpha.13"
|
||||
edition = "2024"
|
||||
|
||||
[lints]
|
||||
@@ -31,6 +31,7 @@ web-spawn = { workspace = true, optional = true }
|
||||
|
||||
mpz-common = { workspace = true }
|
||||
mpz-core = { workspace = true }
|
||||
mpz-circuits = { workspace = true }
|
||||
mpz-garble = { workspace = true }
|
||||
mpz-garble-core = { workspace = true }
|
||||
mpz-hash = { workspace = true }
|
||||
@@ -40,6 +41,8 @@ mpz-ot = { workspace = true }
|
||||
mpz-vm-core = { workspace = true }
|
||||
mpz-zk = { workspace = true }
|
||||
|
||||
aes = { workspace = true }
|
||||
ctr = { workspace = true }
|
||||
derive_builder = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
opaque-debug = { workspace = true }
|
||||
@@ -57,7 +60,9 @@ rangeset = { workspace = true }
|
||||
webpki-roots = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
mpz-ideal-vm = { workspace = true }
|
||||
rstest = { workspace = true }
|
||||
tlsn-core = { workspace = true, features = ["fixtures"] }
|
||||
tlsn-server-fixture = { workspace = true }
|
||||
tlsn-server-fixture-certs = { workspace = true }
|
||||
tokio = { workspace = true, features = ["full"] }
|
||||
|
||||
@@ -1,109 +0,0 @@
|
||||
//! Plaintext commitment and proof of encryption.
|
||||
|
||||
pub(crate) mod hash;
|
||||
pub(crate) mod transcript;
|
||||
|
||||
use mpz_core::bitvec::BitVec;
|
||||
use mpz_memory_core::{
|
||||
DecodeFutureTyped, Vector,
|
||||
binary::{Binary, U8},
|
||||
};
|
||||
use mpz_vm_core::{Vm, prelude::*};
|
||||
use tlsn_core::transcript::Record;
|
||||
|
||||
use crate::{
|
||||
Role,
|
||||
zk_aes_ctr::{ZkAesCtr, ZkAesCtrError},
|
||||
};
|
||||
|
||||
/// Commits the plaintext of the provided records, returning a proof of
|
||||
/// encryption.
|
||||
///
|
||||
/// Writes the plaintext VM reference to the provided records.
|
||||
pub(crate) fn commit_records<'record>(
|
||||
vm: &mut dyn Vm<Binary>,
|
||||
aes: &mut ZkAesCtr,
|
||||
records: impl IntoIterator<Item = &'record Record>,
|
||||
) -> Result<(Vec<Vector<U8>>, RecordProof), RecordProofError> {
|
||||
let mut plaintexts = Vec::new();
|
||||
let mut ciphertexts = Vec::new();
|
||||
for record in records {
|
||||
let (plaintext_ref, ciphertext_ref) = aes
|
||||
.encrypt(vm, record.explicit_nonce.clone(), record.ciphertext.len())
|
||||
.map_err(ErrorRepr::Aes)?;
|
||||
|
||||
if let Role::Prover = aes.role() {
|
||||
let Some(plaintext) = record.plaintext.clone() else {
|
||||
return Err(ErrorRepr::MissingPlaintext.into());
|
||||
};
|
||||
|
||||
vm.assign(plaintext_ref, plaintext)
|
||||
.map_err(RecordProofError::vm)?;
|
||||
}
|
||||
vm.commit(plaintext_ref).map_err(RecordProofError::vm)?;
|
||||
|
||||
let ciphertext = vm.decode(ciphertext_ref).map_err(RecordProofError::vm)?;
|
||||
|
||||
plaintexts.push(plaintext_ref);
|
||||
ciphertexts.push((ciphertext, record.ciphertext.clone()));
|
||||
}
|
||||
|
||||
Ok((plaintexts, RecordProof { ciphertexts }))
|
||||
}
|
||||
|
||||
/// Proof of encryption.
|
||||
#[derive(Debug)]
|
||||
#[must_use]
|
||||
#[allow(clippy::type_complexity)]
|
||||
pub(crate) struct RecordProof {
|
||||
ciphertexts: Vec<(DecodeFutureTyped<BitVec, Vec<u8>>, Vec<u8>)>,
|
||||
}
|
||||
|
||||
impl RecordProof {
|
||||
/// Verifies the proof.
|
||||
pub(crate) fn verify(self) -> Result<(), RecordProofError> {
|
||||
let Self { ciphertexts } = self;
|
||||
|
||||
for (mut ciphertext, expected) in ciphertexts {
|
||||
let ciphertext = ciphertext
|
||||
.try_recv()
|
||||
.map_err(RecordProofError::vm)?
|
||||
.ok_or_else(|| ErrorRepr::NotDecoded)?;
|
||||
|
||||
if ciphertext != expected {
|
||||
return Err(ErrorRepr::InvalidCiphertext.into());
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Error for [`RecordProof`].
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
#[error(transparent)]
|
||||
pub(crate) struct RecordProofError(#[from] ErrorRepr);
|
||||
|
||||
impl RecordProofError {
|
||||
fn vm<E>(err: E) -> Self
|
||||
where
|
||||
E: Into<Box<dyn std::error::Error + Send + Sync + 'static>>,
|
||||
{
|
||||
Self(ErrorRepr::Vm(err.into()))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
#[error("record proof error: {0}")]
|
||||
enum ErrorRepr {
|
||||
#[error("VM error: {0}")]
|
||||
Vm(Box<dyn std::error::Error + Send + Sync + 'static>),
|
||||
#[error("zk aes error: {0}")]
|
||||
Aes(ZkAesCtrError),
|
||||
#[error("plaintext is missing")]
|
||||
MissingPlaintext,
|
||||
#[error("ciphertext was not decoded")]
|
||||
NotDecoded,
|
||||
#[error("ciphertext does not match expected")]
|
||||
InvalidCiphertext,
|
||||
}
|
||||
@@ -1,211 +0,0 @@
|
||||
use mpz_memory_core::{
|
||||
MemoryExt, Vector,
|
||||
binary::{Binary, U8},
|
||||
};
|
||||
use mpz_vm_core::{Vm, VmError};
|
||||
use rangeset::{Intersection, RangeSet};
|
||||
use tlsn_core::transcript::{Direction, PartialTranscript};
|
||||
|
||||
/// References to the application plaintext in the transcript.
|
||||
#[derive(Debug, Default, Clone)]
|
||||
pub(crate) struct TranscriptRefs {
|
||||
sent: Vec<Vector<U8>>,
|
||||
recv: Vec<Vector<U8>>,
|
||||
}
|
||||
|
||||
impl TranscriptRefs {
|
||||
pub(crate) fn new(sent: Vec<Vector<U8>>, recv: Vec<Vector<U8>>) -> Self {
|
||||
Self { sent, recv }
|
||||
}
|
||||
|
||||
/// Returns the sent plaintext references.
|
||||
pub(crate) fn sent(&self) -> &[Vector<U8>] {
|
||||
&self.sent
|
||||
}
|
||||
|
||||
/// Returns the received plaintext references.
|
||||
pub(crate) fn recv(&self) -> &[Vector<U8>] {
|
||||
&self.recv
|
||||
}
|
||||
|
||||
/// Returns the transcript lengths.
|
||||
pub(crate) fn len(&self) -> (usize, usize) {
|
||||
let sent = self.sent.iter().map(|v| v.len()).sum();
|
||||
let recv = self.recv.iter().map(|v| v.len()).sum();
|
||||
|
||||
(sent, recv)
|
||||
}
|
||||
|
||||
/// Returns VM references for the given direction and index, otherwise
|
||||
/// `None` if the index is out of bounds.
|
||||
pub(crate) fn get(
|
||||
&self,
|
||||
direction: Direction,
|
||||
idx: &RangeSet<usize>,
|
||||
) -> Option<Vec<Vector<U8>>> {
|
||||
if idx.is_empty() {
|
||||
return Some(Vec::new());
|
||||
}
|
||||
|
||||
let refs = match direction {
|
||||
Direction::Sent => &self.sent,
|
||||
Direction::Received => &self.recv,
|
||||
};
|
||||
|
||||
// Computes the transcript range for each reference.
|
||||
let mut start = 0;
|
||||
let mut slice_iter = refs.iter().map(move |slice| {
|
||||
let out = (slice, start..start + slice.len());
|
||||
start += slice.len();
|
||||
out
|
||||
});
|
||||
|
||||
let mut slices = Vec::new();
|
||||
let (mut slice, mut slice_range) = slice_iter.next()?;
|
||||
for range in idx.iter_ranges() {
|
||||
loop {
|
||||
if let Some(intersection) = slice_range.intersection(&range) {
|
||||
let start = intersection.start - slice_range.start;
|
||||
let end = intersection.end - slice_range.start;
|
||||
slices.push(slice.get(start..end).expect("range should be in bounds"));
|
||||
}
|
||||
|
||||
// Proceed to next range if the current slice extends beyond. Otherwise, proceed
|
||||
// to the next slice.
|
||||
if range.end <= slice_range.end {
|
||||
break;
|
||||
} else {
|
||||
(slice, slice_range) = slice_iter.next()?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Some(slices)
|
||||
}
|
||||
}
|
||||
|
||||
/// Decodes the transcript.
|
||||
pub(crate) fn decode_transcript(
|
||||
vm: &mut dyn Vm<Binary>,
|
||||
sent: &RangeSet<usize>,
|
||||
recv: &RangeSet<usize>,
|
||||
refs: &TranscriptRefs,
|
||||
) -> Result<(), VmError> {
|
||||
let sent_refs = refs.get(Direction::Sent, sent).expect("index is in bounds");
|
||||
let recv_refs = refs
|
||||
.get(Direction::Received, recv)
|
||||
.expect("index is in bounds");
|
||||
|
||||
for slice in sent_refs.into_iter().chain(recv_refs) {
|
||||
// Drop the future, we don't need it.
|
||||
drop(vm.decode(slice)?);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Verifies a partial transcript.
|
||||
pub(crate) fn verify_transcript(
|
||||
vm: &mut dyn Vm<Binary>,
|
||||
transcript: &PartialTranscript,
|
||||
refs: &TranscriptRefs,
|
||||
) -> Result<(), InconsistentTranscript> {
|
||||
let sent_refs = refs
|
||||
.get(Direction::Sent, transcript.sent_authed())
|
||||
.expect("index is in bounds");
|
||||
let recv_refs = refs
|
||||
.get(Direction::Received, transcript.received_authed())
|
||||
.expect("index is in bounds");
|
||||
|
||||
let mut authenticated_data = Vec::new();
|
||||
for data in sent_refs.into_iter().chain(recv_refs) {
|
||||
let plaintext = vm
|
||||
.get(data)
|
||||
.expect("reference is valid")
|
||||
.expect("plaintext is decoded");
|
||||
authenticated_data.extend_from_slice(&plaintext);
|
||||
}
|
||||
|
||||
let mut purported_data = Vec::with_capacity(authenticated_data.len());
|
||||
for range in transcript.sent_authed().iter_ranges() {
|
||||
purported_data.extend_from_slice(&transcript.sent_unsafe()[range]);
|
||||
}
|
||||
|
||||
for range in transcript.received_authed().iter_ranges() {
|
||||
purported_data.extend_from_slice(&transcript.received_unsafe()[range]);
|
||||
}
|
||||
|
||||
if purported_data != authenticated_data {
|
||||
return Err(InconsistentTranscript {});
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Error for [`verify_transcript`].
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
#[error("inconsistent transcript")]
|
||||
pub(crate) struct InconsistentTranscript {}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::TranscriptRefs;
|
||||
use mpz_memory_core::{FromRaw, Slice, Vector, binary::U8};
|
||||
use rangeset::RangeSet;
|
||||
use std::ops::Range;
|
||||
use tlsn_core::transcript::Direction;
|
||||
|
||||
// TRANSCRIPT_REFS:
|
||||
//
|
||||
// 48..96 -> 6 slots
|
||||
// 112..176 -> 8 slots
|
||||
// 240..288 -> 6 slots
|
||||
// 352..392 -> 5 slots
|
||||
// 440..480 -> 5 slots
|
||||
const TRANSCRIPT_REFS: &[Range<usize>] = &[48..96, 112..176, 240..288, 352..392, 440..480];
|
||||
|
||||
const IDXS: &[Range<usize>] = &[0..4, 5..10, 14..16, 16..28];
|
||||
|
||||
// 1. Take slots 0..4, 4 slots -> 48..80 (4)
|
||||
// 2. Take slots 5..10, 5 slots -> 88..96 (1) + 112..144 (4)
|
||||
// 3. Take slots 14..16, 2 slots -> 240..256 (2)
|
||||
// 4. Take slots 16..28, 12 slots -> 256..288 (4) + 352..392 (5) + 440..464 (3)
|
||||
//
|
||||
// 5. Merge slots 240..256 and 256..288 => 240..288 and get EXPECTED_REFS
|
||||
const EXPECTED_REFS: &[Range<usize>] =
|
||||
&[48..80, 88..96, 112..144, 240..288, 352..392, 440..464];
|
||||
|
||||
#[test]
|
||||
fn test_transcript_refs_get() {
|
||||
let transcript_refs: Vec<Vector<U8>> = TRANSCRIPT_REFS
|
||||
.iter()
|
||||
.cloned()
|
||||
.map(|range| Vector::from_raw(Slice::from_range_unchecked(range)))
|
||||
.collect();
|
||||
|
||||
let transcript_refs = TranscriptRefs {
|
||||
sent: transcript_refs.clone(),
|
||||
recv: transcript_refs,
|
||||
};
|
||||
|
||||
let vm_refs = transcript_refs
|
||||
.get(Direction::Sent, &RangeSet::from(IDXS))
|
||||
.unwrap();
|
||||
|
||||
let expected_refs: Vec<Vector<U8>> = EXPECTED_REFS
|
||||
.iter()
|
||||
.cloned()
|
||||
.map(|range| Vector::from_raw(Slice::from_range_unchecked(range)))
|
||||
.collect();
|
||||
|
||||
assert_eq!(
|
||||
vm_refs.len(),
|
||||
expected_refs.len(),
|
||||
"Length of actual and expected refs are not equal"
|
||||
);
|
||||
|
||||
for (&expected, actual) in expected_refs.iter().zip(vm_refs) {
|
||||
assert_eq!(expected, actual);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,249 +0,0 @@
|
||||
//! Encoding commitment protocol.
|
||||
|
||||
use std::ops::Range;
|
||||
|
||||
use mpz_common::Context;
|
||||
use mpz_memory_core::{
|
||||
Vector,
|
||||
binary::U8,
|
||||
correlated::{Delta, Key, Mac},
|
||||
};
|
||||
use rand::Rng;
|
||||
use rangeset::RangeSet;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serio::{SinkExt, stream::IoStreamExt};
|
||||
use tlsn_core::{
|
||||
hash::HashAlgorithm,
|
||||
transcript::{
|
||||
Direction,
|
||||
encoding::{
|
||||
Encoder, EncoderSecret, EncodingCommitment, EncodingProvider, EncodingProviderError,
|
||||
EncodingTree, EncodingTreeError, new_encoder,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
use crate::commit::transcript::TranscriptRefs;
|
||||
|
||||
/// Bytes of encoding, per byte.
|
||||
const ENCODING_SIZE: usize = 128;
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
struct Encodings {
|
||||
sent: Vec<u8>,
|
||||
recv: Vec<u8>,
|
||||
}
|
||||
|
||||
/// Transfers the encodings using the provided seed and keys.
|
||||
///
|
||||
/// The keys must be consistent with the global delta used in the encodings.
|
||||
pub(crate) async fn transfer<'a>(
|
||||
ctx: &mut Context,
|
||||
refs: &TranscriptRefs,
|
||||
delta: &Delta,
|
||||
f: impl Fn(Vector<U8>) -> &'a [Key],
|
||||
) -> Result<EncodingCommitment, EncodingError> {
|
||||
let secret = EncoderSecret::new(rand::rng().random(), delta.as_block().to_bytes());
|
||||
let encoder = new_encoder(&secret);
|
||||
|
||||
let sent_keys: Vec<u8> = refs
|
||||
.sent()
|
||||
.iter()
|
||||
.copied()
|
||||
.flat_map(&f)
|
||||
.flat_map(|key| key.as_block().as_bytes())
|
||||
.copied()
|
||||
.collect();
|
||||
let recv_keys: Vec<u8> = refs
|
||||
.recv()
|
||||
.iter()
|
||||
.copied()
|
||||
.flat_map(&f)
|
||||
.flat_map(|key| key.as_block().as_bytes())
|
||||
.copied()
|
||||
.collect();
|
||||
|
||||
assert_eq!(sent_keys.len() % ENCODING_SIZE, 0);
|
||||
assert_eq!(recv_keys.len() % ENCODING_SIZE, 0);
|
||||
|
||||
let mut sent_encoding = Vec::with_capacity(sent_keys.len());
|
||||
let mut recv_encoding = Vec::with_capacity(recv_keys.len());
|
||||
|
||||
encoder.encode_range(
|
||||
Direction::Sent,
|
||||
0..sent_keys.len() / ENCODING_SIZE,
|
||||
&mut sent_encoding,
|
||||
);
|
||||
encoder.encode_range(
|
||||
Direction::Received,
|
||||
0..recv_keys.len() / ENCODING_SIZE,
|
||||
&mut recv_encoding,
|
||||
);
|
||||
|
||||
sent_encoding
|
||||
.iter_mut()
|
||||
.zip(sent_keys)
|
||||
.for_each(|(enc, key)| *enc ^= key);
|
||||
recv_encoding
|
||||
.iter_mut()
|
||||
.zip(recv_keys)
|
||||
.for_each(|(enc, key)| *enc ^= key);
|
||||
|
||||
// Set frame limit and add some extra bytes cushion room.
|
||||
let (sent, recv) = refs.len();
|
||||
let frame_limit = ENCODING_SIZE * (sent + recv) + ctx.io().limit();
|
||||
|
||||
ctx.io_mut()
|
||||
.with_limit(frame_limit)
|
||||
.send(Encodings {
|
||||
sent: sent_encoding,
|
||||
recv: recv_encoding,
|
||||
})
|
||||
.await?;
|
||||
|
||||
let root = ctx.io_mut().expect_next().await?;
|
||||
ctx.io_mut().send(secret.clone()).await?;
|
||||
|
||||
Ok(EncodingCommitment {
|
||||
root,
|
||||
secret: secret.clone(),
|
||||
})
|
||||
}
|
||||
|
||||
/// Receives the encodings using the provided MACs.
|
||||
///
|
||||
/// The MACs must be consistent with the global delta used in the encodings.
|
||||
pub(crate) async fn receive<'a>(
|
||||
ctx: &mut Context,
|
||||
hasher: &(dyn HashAlgorithm + Send + Sync),
|
||||
refs: &TranscriptRefs,
|
||||
f: impl Fn(Vector<U8>) -> &'a [Mac],
|
||||
idxs: impl IntoIterator<Item = &(Direction, RangeSet<usize>)>,
|
||||
) -> Result<(EncodingCommitment, EncodingTree), EncodingError> {
|
||||
// Set frame limit and add some extra bytes cushion room.
|
||||
let (sent, recv) = refs.len();
|
||||
let frame_limit = ENCODING_SIZE * (sent + recv) + ctx.io().limit();
|
||||
|
||||
let Encodings { mut sent, mut recv } =
|
||||
ctx.io_mut().with_limit(frame_limit).expect_next().await?;
|
||||
|
||||
let sent_macs: Vec<u8> = refs
|
||||
.sent()
|
||||
.iter()
|
||||
.copied()
|
||||
.flat_map(&f)
|
||||
.flat_map(|mac| mac.as_bytes())
|
||||
.copied()
|
||||
.collect();
|
||||
let recv_macs: Vec<u8> = refs
|
||||
.recv()
|
||||
.iter()
|
||||
.copied()
|
||||
.flat_map(&f)
|
||||
.flat_map(|mac| mac.as_bytes())
|
||||
.copied()
|
||||
.collect();
|
||||
|
||||
assert_eq!(sent_macs.len() % ENCODING_SIZE, 0);
|
||||
assert_eq!(recv_macs.len() % ENCODING_SIZE, 0);
|
||||
|
||||
if sent.len() != sent_macs.len() {
|
||||
return Err(ErrorRepr::IncorrectMacCount {
|
||||
direction: Direction::Sent,
|
||||
expected: sent_macs.len(),
|
||||
got: sent.len(),
|
||||
}
|
||||
.into());
|
||||
}
|
||||
|
||||
if recv.len() != recv_macs.len() {
|
||||
return Err(ErrorRepr::IncorrectMacCount {
|
||||
direction: Direction::Received,
|
||||
expected: recv_macs.len(),
|
||||
got: recv.len(),
|
||||
}
|
||||
.into());
|
||||
}
|
||||
|
||||
sent.iter_mut()
|
||||
.zip(sent_macs)
|
||||
.for_each(|(enc, mac)| *enc ^= mac);
|
||||
recv.iter_mut()
|
||||
.zip(recv_macs)
|
||||
.for_each(|(enc, mac)| *enc ^= mac);
|
||||
|
||||
let provider = Provider { sent, recv };
|
||||
|
||||
let tree = EncodingTree::new(hasher, idxs, &provider)?;
|
||||
let root = tree.root();
|
||||
|
||||
ctx.io_mut().send(root.clone()).await?;
|
||||
let secret = ctx.io_mut().expect_next().await?;
|
||||
|
||||
let commitment = EncodingCommitment { root, secret };
|
||||
|
||||
Ok((commitment, tree))
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct Provider {
|
||||
sent: Vec<u8>,
|
||||
recv: Vec<u8>,
|
||||
}
|
||||
|
||||
impl EncodingProvider for Provider {
|
||||
fn provide_encoding(
|
||||
&self,
|
||||
direction: Direction,
|
||||
range: Range<usize>,
|
||||
dest: &mut Vec<u8>,
|
||||
) -> Result<(), EncodingProviderError> {
|
||||
let encodings = match direction {
|
||||
Direction::Sent => &self.sent,
|
||||
Direction::Received => &self.recv,
|
||||
};
|
||||
|
||||
let start = range.start * ENCODING_SIZE;
|
||||
let end = range.end * ENCODING_SIZE;
|
||||
|
||||
if end > encodings.len() {
|
||||
return Err(EncodingProviderError);
|
||||
}
|
||||
|
||||
dest.extend_from_slice(&encodings[start..end]);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Encoding protocol error.
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
#[error(transparent)]
|
||||
pub struct EncodingError(#[from] ErrorRepr);
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
#[error("encoding protocol error: {0}")]
|
||||
enum ErrorRepr {
|
||||
#[error("I/O error: {0}")]
|
||||
Io(std::io::Error),
|
||||
#[error("incorrect MAC count for {direction}: expected {expected}, got {got}")]
|
||||
IncorrectMacCount {
|
||||
direction: Direction,
|
||||
expected: usize,
|
||||
got: usize,
|
||||
},
|
||||
#[error("encoding tree error: {0}")]
|
||||
EncodingTree(EncodingTreeError),
|
||||
}
|
||||
|
||||
impl From<std::io::Error> for EncodingError {
|
||||
fn from(value: std::io::Error) -> Self {
|
||||
Self(ErrorRepr::Io(value))
|
||||
}
|
||||
}
|
||||
|
||||
impl From<EncodingTreeError> for EncodingError {
|
||||
fn from(value: EncodingTreeError) -> Self {
|
||||
Self(ErrorRepr::EncodingTree(value))
|
||||
}
|
||||
}
|
||||
@@ -4,17 +4,15 @@
|
||||
#![deny(clippy::all)]
|
||||
#![forbid(unsafe_code)]
|
||||
|
||||
pub(crate) mod commit;
|
||||
pub mod config;
|
||||
pub(crate) mod context;
|
||||
pub(crate) mod encoding;
|
||||
pub(crate) mod ghash;
|
||||
pub(crate) mod msg;
|
||||
pub(crate) mod map;
|
||||
pub(crate) mod mux;
|
||||
pub mod prover;
|
||||
pub(crate) mod tag;
|
||||
pub(crate) mod transcript_internal;
|
||||
pub mod verifier;
|
||||
pub(crate) mod zk_aes_ctr;
|
||||
|
||||
pub use tlsn_attestation as attestation;
|
||||
pub use tlsn_core::{connection, hash, transcript};
|
||||
|
||||
208
crates/tlsn/src/map.rs
Normal file
208
crates/tlsn/src/map.rs
Normal file
@@ -0,0 +1,208 @@
|
||||
use std::ops::Range;
|
||||
|
||||
use mpz_memory_core::{Vector, binary::U8};
|
||||
use rangeset::RangeSet;
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub(crate) struct RangeMap<T> {
|
||||
map: Vec<(usize, T)>,
|
||||
}
|
||||
|
||||
impl<T> Default for RangeMap<T>
|
||||
where
|
||||
T: Item,
|
||||
{
|
||||
fn default() -> Self {
|
||||
Self { map: Vec::new() }
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> RangeMap<T>
|
||||
where
|
||||
T: Item,
|
||||
{
|
||||
pub(crate) fn new(map: Vec<(usize, T)>) -> Self {
|
||||
let mut pos = 0;
|
||||
for (idx, item) in &map {
|
||||
assert!(
|
||||
*idx >= pos,
|
||||
"items must be sorted by index and non-overlapping"
|
||||
);
|
||||
|
||||
pos = *idx + item.length();
|
||||
}
|
||||
|
||||
Self { map }
|
||||
}
|
||||
|
||||
/// Returns `true` if the map is empty.
|
||||
pub(crate) fn is_empty(&self) -> bool {
|
||||
self.map.is_empty()
|
||||
}
|
||||
|
||||
/// Returns the keys of the map.
|
||||
pub(crate) fn keys(&self) -> impl Iterator<Item = Range<usize>> {
|
||||
self.map
|
||||
.iter()
|
||||
.map(|(idx, item)| *idx..*idx + item.length())
|
||||
}
|
||||
|
||||
/// Returns the length of the map.
|
||||
pub(crate) fn len(&self) -> usize {
|
||||
self.map.iter().map(|(_, item)| item.length()).sum()
|
||||
}
|
||||
|
||||
pub(crate) fn iter(&self) -> impl Iterator<Item = (Range<usize>, &T)> {
|
||||
self.map
|
||||
.iter()
|
||||
.map(|(idx, item)| (*idx..*idx + item.length(), item))
|
||||
}
|
||||
|
||||
pub(crate) fn get(&self, range: Range<usize>) -> Option<T::Slice<'_>> {
|
||||
if range.start >= range.end {
|
||||
return None;
|
||||
}
|
||||
|
||||
// Find the item with the greatest start index <= range.start
|
||||
let pos = match self.map.binary_search_by(|(idx, _)| idx.cmp(&range.start)) {
|
||||
Ok(i) => i,
|
||||
Err(0) => return None,
|
||||
Err(i) => i - 1,
|
||||
};
|
||||
|
||||
let (base, item) = &self.map[pos];
|
||||
|
||||
item.slice(range.start - *base..range.end - *base)
|
||||
}
|
||||
|
||||
pub(crate) fn index(&self, idx: &RangeSet<usize>) -> Option<Self> {
|
||||
let mut map = Vec::new();
|
||||
for idx in idx.iter_ranges() {
|
||||
let pos = match self.map.binary_search_by(|(base, _)| base.cmp(&idx.start)) {
|
||||
Ok(i) => i,
|
||||
Err(0) => return None,
|
||||
Err(i) => i - 1,
|
||||
};
|
||||
|
||||
let (base, item) = self.map.get(pos)?;
|
||||
if idx.start < *base || idx.end > *base + item.length() {
|
||||
return None;
|
||||
}
|
||||
|
||||
let start = idx.start - *base;
|
||||
let end = start + idx.len();
|
||||
|
||||
map.push((
|
||||
idx.start,
|
||||
item.slice(start..end)
|
||||
.expect("slice length is checked")
|
||||
.into(),
|
||||
));
|
||||
}
|
||||
|
||||
Some(Self { map })
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> FromIterator<(usize, T)> for RangeMap<T>
|
||||
where
|
||||
T: Item,
|
||||
{
|
||||
fn from_iter<I: IntoIterator<Item = (usize, T)>>(items: I) -> Self {
|
||||
let mut pos = 0;
|
||||
let mut map = Vec::new();
|
||||
for (idx, item) in items {
|
||||
assert!(
|
||||
idx >= pos,
|
||||
"items must be sorted by index and non-overlapping"
|
||||
);
|
||||
|
||||
pos = idx + item.length();
|
||||
map.push((idx, item));
|
||||
}
|
||||
|
||||
Self { map }
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) trait Item: Sized {
|
||||
type Slice<'a>: Into<Self>
|
||||
where
|
||||
Self: 'a;
|
||||
|
||||
fn length(&self) -> usize;
|
||||
|
||||
fn slice<'a>(&'a self, range: Range<usize>) -> Option<Self::Slice<'a>>;
|
||||
}
|
||||
|
||||
impl Item for Vector<U8> {
|
||||
type Slice<'a> = Vector<U8>;
|
||||
|
||||
fn length(&self) -> usize {
|
||||
self.len()
|
||||
}
|
||||
|
||||
fn slice<'a>(&'a self, range: Range<usize>) -> Option<Self::Slice<'a>> {
|
||||
self.get(range)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
impl Item for Range<usize> {
|
||||
type Slice<'a> = Range<usize>;
|
||||
|
||||
fn length(&self) -> usize {
|
||||
self.end - self.start
|
||||
}
|
||||
|
||||
fn slice(&self, range: Range<usize>) -> Option<Self> {
|
||||
if range.end > self.end - self.start {
|
||||
return None;
|
||||
}
|
||||
|
||||
Some(range.start + self.start..range.end + self.start)
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_range_map() {
|
||||
let map = RangeMap::from_iter([(0, 10..14), (10, 20..24), (20, 30..32)]);
|
||||
|
||||
assert_eq!(map.get(0..4), Some(10..14));
|
||||
assert_eq!(map.get(10..14), Some(20..24));
|
||||
assert_eq!(map.get(20..22), Some(30..32));
|
||||
assert_eq!(map.get(0..2), Some(10..12));
|
||||
assert_eq!(map.get(11..13), Some(21..23));
|
||||
assert_eq!(map.get(0..10), None);
|
||||
assert_eq!(map.get(10..20), None);
|
||||
assert_eq!(map.get(20..30), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_range_map_index() {
|
||||
let map = RangeMap::from_iter([(0, 10..14), (10, 20..24), (20, 30..32)]);
|
||||
|
||||
let idx = RangeSet::from([0..4, 10..14, 20..22]);
|
||||
assert_eq!(map.index(&idx), Some(map.clone()));
|
||||
|
||||
let idx = RangeSet::from(25..30);
|
||||
assert_eq!(map.index(&idx), None);
|
||||
|
||||
let idx = RangeSet::from(15..20);
|
||||
assert_eq!(map.index(&idx), None);
|
||||
|
||||
let idx = RangeSet::from([1..3, 11..12, 13..14, 21..22]);
|
||||
assert_eq!(
|
||||
map.index(&idx),
|
||||
Some(RangeMap::from_iter([
|
||||
(1, 11..13),
|
||||
(11, 21..22),
|
||||
(13, 23..24),
|
||||
(21, 31..32)
|
||||
]))
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -1,15 +0,0 @@
|
||||
//! Message types.
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use tlsn_core::connection::{HandshakeData, ServerName};
|
||||
|
||||
/// Message sent from Prover to Verifier to prove the server identity.
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
#[allow(dead_code)]
|
||||
pub(crate) struct ServerIdentityProof {
|
||||
/// Server name.
|
||||
pub name: ServerName,
|
||||
/// Server identity data.
|
||||
pub data: HandshakeData,
|
||||
}
|
||||
@@ -3,6 +3,7 @@
|
||||
mod config;
|
||||
mod error;
|
||||
mod future;
|
||||
mod prove;
|
||||
pub mod state;
|
||||
|
||||
pub use config::{ProverConfig, ProverConfigBuilder, TlsConfig, TlsConfigBuilder};
|
||||
@@ -18,19 +19,7 @@ use mpz_vm_core::prelude::*;
|
||||
use mpz_zk::ProverConfig as ZkProverConfig;
|
||||
use webpki::anchor_from_trusted_cert;
|
||||
|
||||
use crate::{
|
||||
Role,
|
||||
commit::{
|
||||
commit_records,
|
||||
hash::prove_hash,
|
||||
transcript::{TranscriptRefs, decode_transcript},
|
||||
},
|
||||
context::build_mt_context,
|
||||
encoding,
|
||||
mux::attach_mux,
|
||||
tag::verify_tags,
|
||||
zk_aes_ctr::ZkAesCtr,
|
||||
};
|
||||
use crate::{Role, context::build_mt_context, mux::attach_mux, tag::verify_tags};
|
||||
|
||||
use futures::{AsyncRead, AsyncWrite, TryFutureExt};
|
||||
use mpc_tls::{LeaderCtrl, MpcTlsLeader, SessionKeys};
|
||||
@@ -39,12 +28,9 @@ use serio::SinkExt;
|
||||
use std::sync::Arc;
|
||||
use tls_client::{ClientConnection, ServerName as TlsServerName};
|
||||
use tls_client_async::{TlsConnection, bind_client};
|
||||
use tls_core::msgs::enums::ContentType;
|
||||
use tlsn_core::{
|
||||
ProvePayload,
|
||||
connection::{HandshakeData, ServerName},
|
||||
hash::{Blake3, HashAlgId, HashAlgorithm, Keccak256, Sha256},
|
||||
transcript::{TlsTranscript, Transcript, TranscriptCommitment, TranscriptSecret},
|
||||
connection::ServerName,
|
||||
transcript::{TlsTranscript, Transcript},
|
||||
};
|
||||
use tlsn_deap::Deap;
|
||||
use tokio::sync::Mutex;
|
||||
@@ -115,22 +101,6 @@ impl Prover<state::Initialized> {
|
||||
let mut keys = mpc_tls.alloc()?;
|
||||
let vm_lock = vm.try_lock().expect("VM is not locked");
|
||||
translate_keys(&mut keys, &vm_lock)?;
|
||||
|
||||
// Allocate for committing to plaintext.
|
||||
let mut zk_aes_ctr_sent = ZkAesCtr::new(Role::Prover);
|
||||
zk_aes_ctr_sent.set_key(keys.client_write_key, keys.client_write_iv);
|
||||
zk_aes_ctr_sent.alloc(
|
||||
&mut *vm_lock.zk(),
|
||||
self.config.protocol_config().max_sent_data(),
|
||||
)?;
|
||||
|
||||
let mut zk_aes_ctr_recv = ZkAesCtr::new(Role::Prover);
|
||||
zk_aes_ctr_recv.set_key(keys.server_write_key, keys.server_write_iv);
|
||||
zk_aes_ctr_recv.alloc(
|
||||
&mut *vm_lock.zk(),
|
||||
self.config.protocol_config().max_recv_data(),
|
||||
)?;
|
||||
|
||||
drop(vm_lock);
|
||||
|
||||
debug!("setting up mpc-tls");
|
||||
@@ -146,8 +116,6 @@ impl Prover<state::Initialized> {
|
||||
mux_ctrl,
|
||||
mux_fut,
|
||||
mpc_tls,
|
||||
zk_aes_ctr_sent,
|
||||
zk_aes_ctr_recv,
|
||||
keys,
|
||||
vm,
|
||||
},
|
||||
@@ -173,8 +141,6 @@ impl Prover<state::Setup> {
|
||||
mux_ctrl,
|
||||
mut mux_fut,
|
||||
mpc_tls,
|
||||
mut zk_aes_ctr_sent,
|
||||
mut zk_aes_ctr_recv,
|
||||
keys,
|
||||
vm,
|
||||
..
|
||||
@@ -281,28 +247,6 @@ impl Prover<state::Setup> {
|
||||
)
|
||||
.map_err(ProverError::zk)?;
|
||||
|
||||
// Prove received plaintext. Prover drops the proof output, as
|
||||
// they trust themselves.
|
||||
let (sent_refs, _) = commit_records(
|
||||
&mut vm,
|
||||
&mut zk_aes_ctr_sent,
|
||||
tls_transcript
|
||||
.sent()
|
||||
.iter()
|
||||
.filter(|record| record.typ == ContentType::ApplicationData),
|
||||
)
|
||||
.map_err(ProverError::zk)?;
|
||||
|
||||
let (recv_refs, _) = commit_records(
|
||||
&mut vm,
|
||||
&mut zk_aes_ctr_recv,
|
||||
tls_transcript
|
||||
.recv()
|
||||
.iter()
|
||||
.filter(|record| record.typ == ContentType::ApplicationData),
|
||||
)
|
||||
.map_err(ProverError::zk)?;
|
||||
|
||||
mux_fut
|
||||
.poll_with(vm.execute_all(&mut ctx).map_err(ProverError::zk))
|
||||
.await?;
|
||||
@@ -310,7 +254,6 @@ impl Prover<state::Setup> {
|
||||
let transcript = tls_transcript
|
||||
.to_transcript()
|
||||
.expect("transcript is complete");
|
||||
let transcript_refs = TranscriptRefs::new(sent_refs, recv_refs);
|
||||
|
||||
Ok(Prover {
|
||||
config: self.config,
|
||||
@@ -320,9 +263,9 @@ impl Prover<state::Setup> {
|
||||
mux_fut,
|
||||
ctx,
|
||||
vm,
|
||||
keys,
|
||||
tls_transcript,
|
||||
transcript,
|
||||
transcript_refs,
|
||||
},
|
||||
})
|
||||
}
|
||||
@@ -361,117 +304,24 @@ impl Prover<state::Committed> {
|
||||
mux_fut,
|
||||
ctx,
|
||||
vm,
|
||||
keys,
|
||||
tls_transcript,
|
||||
transcript,
|
||||
transcript_refs,
|
||||
..
|
||||
} = &mut self.state;
|
||||
|
||||
let mut output = ProverOutput {
|
||||
transcript_commitments: Vec::new(),
|
||||
transcript_secrets: Vec::new(),
|
||||
};
|
||||
|
||||
let partial_transcript = if let Some((sent, recv)) = config.reveal() {
|
||||
decode_transcript(vm, sent, recv, transcript_refs).map_err(ProverError::zk)?;
|
||||
|
||||
Some(transcript.to_partial(sent.clone(), recv.clone()))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let payload = ProvePayload {
|
||||
handshake: config.server_identity().then(|| {
|
||||
(
|
||||
self.config.server_name().clone(),
|
||||
HandshakeData {
|
||||
certs: tls_transcript
|
||||
.server_cert_chain()
|
||||
.expect("server cert chain is present")
|
||||
.to_vec(),
|
||||
sig: tls_transcript
|
||||
.server_signature()
|
||||
.expect("server signature is present")
|
||||
.clone(),
|
||||
binding: tls_transcript.certificate_binding().clone(),
|
||||
},
|
||||
)
|
||||
}),
|
||||
transcript: partial_transcript,
|
||||
transcript_commit: config.transcript_commit().map(|config| config.to_request()),
|
||||
};
|
||||
|
||||
// Send payload.
|
||||
mux_fut
|
||||
.poll_with(ctx.io_mut().send(payload).map_err(ProverError::from))
|
||||
let output = mux_fut
|
||||
.poll_with(prove::prove(
|
||||
ctx,
|
||||
vm,
|
||||
keys,
|
||||
self.config.server_name(),
|
||||
transcript,
|
||||
tls_transcript,
|
||||
config,
|
||||
))
|
||||
.await?;
|
||||
|
||||
let mut hash_commitments = None;
|
||||
if let Some(commit_config) = config.transcript_commit() {
|
||||
if commit_config.has_encoding() {
|
||||
let hasher: &(dyn HashAlgorithm + Send + Sync) =
|
||||
match *commit_config.encoding_hash_alg() {
|
||||
HashAlgId::SHA256 => &Sha256::default(),
|
||||
HashAlgId::KECCAK256 => &Keccak256::default(),
|
||||
HashAlgId::BLAKE3 => &Blake3::default(),
|
||||
alg => {
|
||||
return Err(ProverError::config(format!(
|
||||
"unsupported hash algorithm for encoding commitment: {alg}"
|
||||
)));
|
||||
}
|
||||
};
|
||||
|
||||
let (commitment, tree) = mux_fut
|
||||
.poll_with(
|
||||
encoding::receive(
|
||||
ctx,
|
||||
hasher,
|
||||
transcript_refs,
|
||||
|plaintext| vm.get_macs(plaintext).expect("reference is valid"),
|
||||
commit_config.iter_encoding(),
|
||||
)
|
||||
.map_err(ProverError::commit),
|
||||
)
|
||||
.await?;
|
||||
|
||||
output
|
||||
.transcript_commitments
|
||||
.push(TranscriptCommitment::Encoding(commitment));
|
||||
output
|
||||
.transcript_secrets
|
||||
.push(TranscriptSecret::Encoding(tree));
|
||||
}
|
||||
|
||||
if commit_config.has_hash() {
|
||||
hash_commitments = Some(
|
||||
prove_hash(
|
||||
vm,
|
||||
transcript_refs,
|
||||
commit_config
|
||||
.iter_hash()
|
||||
.map(|((dir, idx), alg)| (*dir, idx.clone(), *alg)),
|
||||
)
|
||||
.map_err(ProverError::commit)?,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
mux_fut
|
||||
.poll_with(vm.execute_all(ctx).map_err(ProverError::zk))
|
||||
.await?;
|
||||
|
||||
if let Some((hash_fut, hash_secrets)) = hash_commitments {
|
||||
let hash_commitments = hash_fut.try_recv().map_err(ProverError::commit)?;
|
||||
for (commitment, secret) in hash_commitments.into_iter().zip(hash_secrets) {
|
||||
output
|
||||
.transcript_commitments
|
||||
.push(TranscriptCommitment::Hash(commitment));
|
||||
output
|
||||
.transcript_secrets
|
||||
.push(TranscriptSecret::Hash(secret));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(output)
|
||||
}
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@ use std::{error::Error, fmt};
|
||||
|
||||
use mpc_tls::MpcTlsError;
|
||||
|
||||
use crate::{encoding::EncodingError, zk_aes_ctr::ZkAesCtrError};
|
||||
use crate::transcript_internal::commit::encoding::EncodingError;
|
||||
|
||||
/// Error for [`Prover`](crate::Prover).
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
@@ -110,12 +110,6 @@ impl From<MpcTlsError> for ProverError {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ZkAesCtrError> for ProverError {
|
||||
fn from(e: ZkAesCtrError) -> Self {
|
||||
Self::new(ErrorKind::Zk, e)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<EncodingError> for ProverError {
|
||||
fn from(e: EncodingError) -> Self {
|
||||
Self::new(ErrorKind::Commit, e)
|
||||
|
||||
187
crates/tlsn/src/prover/prove.rs
Normal file
187
crates/tlsn/src/prover/prove.rs
Normal file
@@ -0,0 +1,187 @@
|
||||
use mpc_tls::SessionKeys;
|
||||
use mpz_common::Context;
|
||||
use mpz_memory_core::binary::Binary;
|
||||
use mpz_vm_core::Vm;
|
||||
use rangeset::{RangeSet, UnionMut};
|
||||
use serio::SinkExt;
|
||||
use tlsn_core::{
|
||||
ProveConfig, ProveRequest, ProverOutput,
|
||||
connection::{HandshakeData, ServerName},
|
||||
transcript::{
|
||||
ContentType, Direction, TlsTranscript, Transcript, TranscriptCommitment, TranscriptSecret,
|
||||
},
|
||||
};
|
||||
|
||||
use crate::{
|
||||
prover::ProverError,
|
||||
transcript_internal::{
|
||||
TranscriptRefs,
|
||||
auth::prove_plaintext,
|
||||
commit::{
|
||||
encoding::{self, MacStore},
|
||||
hash::prove_hash,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
pub(crate) async fn prove<T: Vm<Binary> + MacStore + Send + Sync>(
|
||||
ctx: &mut Context,
|
||||
vm: &mut T,
|
||||
keys: &SessionKeys,
|
||||
server_name: &ServerName,
|
||||
transcript: &Transcript,
|
||||
tls_transcript: &TlsTranscript,
|
||||
config: &ProveConfig,
|
||||
) -> Result<ProverOutput, ProverError> {
|
||||
let mut output = ProverOutput {
|
||||
transcript_commitments: Vec::default(),
|
||||
transcript_secrets: Vec::default(),
|
||||
};
|
||||
|
||||
let request = ProveRequest {
|
||||
handshake: config.server_identity().then(|| {
|
||||
(
|
||||
server_name.clone(),
|
||||
HandshakeData {
|
||||
certs: tls_transcript
|
||||
.server_cert_chain()
|
||||
.expect("server cert chain is present")
|
||||
.to_vec(),
|
||||
sig: tls_transcript
|
||||
.server_signature()
|
||||
.expect("server signature is present")
|
||||
.clone(),
|
||||
binding: tls_transcript.certificate_binding().clone(),
|
||||
},
|
||||
)
|
||||
}),
|
||||
transcript: config
|
||||
.reveal()
|
||||
.map(|(sent, recv)| transcript.to_partial(sent.clone(), recv.clone())),
|
||||
transcript_commit: config.transcript_commit().map(|config| config.to_request()),
|
||||
};
|
||||
|
||||
ctx.io_mut()
|
||||
.send(request)
|
||||
.await
|
||||
.map_err(ProverError::from)?;
|
||||
|
||||
let (reveal_sent, reveal_recv) = config.reveal().cloned().unwrap_or_default();
|
||||
let (mut commit_sent, mut commit_recv) = (RangeSet::default(), RangeSet::default());
|
||||
if let Some(commit_config) = config.transcript_commit() {
|
||||
commit_config
|
||||
.iter_hash()
|
||||
.for_each(|((direction, idx), _)| match direction {
|
||||
Direction::Sent => commit_sent.union_mut(idx),
|
||||
Direction::Received => commit_recv.union_mut(idx),
|
||||
});
|
||||
|
||||
commit_config
|
||||
.iter_encoding()
|
||||
.for_each(|(direction, idx)| match direction {
|
||||
Direction::Sent => commit_sent.union_mut(idx),
|
||||
Direction::Received => commit_recv.union_mut(idx),
|
||||
});
|
||||
}
|
||||
|
||||
let transcript_refs = TranscriptRefs {
|
||||
sent: prove_plaintext(
|
||||
vm,
|
||||
keys.client_write_key,
|
||||
keys.client_write_iv,
|
||||
transcript.sent(),
|
||||
tls_transcript
|
||||
.sent()
|
||||
.iter()
|
||||
.filter(|record| record.typ == ContentType::ApplicationData),
|
||||
&reveal_sent,
|
||||
&commit_sent,
|
||||
)
|
||||
.map_err(ProverError::commit)?,
|
||||
recv: prove_plaintext(
|
||||
vm,
|
||||
keys.server_write_key,
|
||||
keys.server_write_iv,
|
||||
transcript.received(),
|
||||
tls_transcript
|
||||
.recv()
|
||||
.iter()
|
||||
.filter(|record| record.typ == ContentType::ApplicationData),
|
||||
&reveal_recv,
|
||||
&commit_recv,
|
||||
)
|
||||
.map_err(ProverError::commit)?,
|
||||
};
|
||||
|
||||
let hash_commitments = if let Some(commit_config) = config.transcript_commit()
|
||||
&& commit_config.has_hash()
|
||||
{
|
||||
Some(
|
||||
prove_hash(
|
||||
vm,
|
||||
&transcript_refs,
|
||||
commit_config
|
||||
.iter_hash()
|
||||
.map(|((dir, idx), alg)| (*dir, idx.clone(), *alg)),
|
||||
)
|
||||
.map_err(ProverError::commit)?,
|
||||
)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
vm.execute_all(ctx).await.map_err(ProverError::zk)?;
|
||||
|
||||
if let Some(commit_config) = config.transcript_commit()
|
||||
&& commit_config.has_encoding()
|
||||
{
|
||||
let mut sent_ranges = RangeSet::default();
|
||||
let mut recv_ranges = RangeSet::default();
|
||||
for (dir, idx) in commit_config.iter_encoding() {
|
||||
match dir {
|
||||
Direction::Sent => sent_ranges.union_mut(idx),
|
||||
Direction::Received => recv_ranges.union_mut(idx),
|
||||
}
|
||||
}
|
||||
|
||||
let sent_map = transcript_refs
|
||||
.sent
|
||||
.index(&sent_ranges)
|
||||
.expect("indices are valid");
|
||||
let recv_map = transcript_refs
|
||||
.recv
|
||||
.index(&recv_ranges)
|
||||
.expect("indices are valid");
|
||||
|
||||
let (commitment, tree) = encoding::receive(
|
||||
ctx,
|
||||
vm,
|
||||
*commit_config.encoding_hash_alg(),
|
||||
&sent_map,
|
||||
&recv_map,
|
||||
commit_config.iter_encoding(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
output
|
||||
.transcript_commitments
|
||||
.push(TranscriptCommitment::Encoding(commitment));
|
||||
output
|
||||
.transcript_secrets
|
||||
.push(TranscriptSecret::Encoding(tree));
|
||||
}
|
||||
|
||||
if let Some((hash_fut, hash_secrets)) = hash_commitments {
|
||||
let hash_commitments = hash_fut.try_recv().map_err(ProverError::commit)?;
|
||||
for (commitment, secret) in hash_commitments.into_iter().zip(hash_secrets) {
|
||||
output
|
||||
.transcript_commitments
|
||||
.push(TranscriptCommitment::Hash(commitment));
|
||||
output
|
||||
.transcript_secrets
|
||||
.push(TranscriptSecret::Hash(secret));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(output)
|
||||
}
|
||||
@@ -9,10 +9,8 @@ use tlsn_deap::Deap;
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
use crate::{
|
||||
commit::transcript::TranscriptRefs,
|
||||
mux::{MuxControl, MuxFuture},
|
||||
prover::{Mpc, Zk},
|
||||
zk_aes_ctr::ZkAesCtr,
|
||||
};
|
||||
|
||||
/// Entry state
|
||||
@@ -25,8 +23,6 @@ pub struct Setup {
|
||||
pub(crate) mux_ctrl: MuxControl,
|
||||
pub(crate) mux_fut: MuxFuture,
|
||||
pub(crate) mpc_tls: MpcTlsLeader,
|
||||
pub(crate) zk_aes_ctr_sent: ZkAesCtr,
|
||||
pub(crate) zk_aes_ctr_recv: ZkAesCtr,
|
||||
pub(crate) keys: SessionKeys,
|
||||
pub(crate) vm: Arc<Mutex<Deap<Mpc, Zk>>>,
|
||||
}
|
||||
@@ -39,9 +35,9 @@ pub struct Committed {
|
||||
pub(crate) mux_fut: MuxFuture,
|
||||
pub(crate) ctx: Context,
|
||||
pub(crate) vm: Zk,
|
||||
pub(crate) keys: SessionKeys,
|
||||
pub(crate) tls_transcript: TlsTranscript,
|
||||
pub(crate) transcript: Transcript,
|
||||
pub(crate) transcript_refs: TranscriptRefs,
|
||||
}
|
||||
|
||||
opaque_debug::implement!(Committed);
|
||||
|
||||
16
crates/tlsn/src/transcript_internal.rs
Normal file
16
crates/tlsn/src/transcript_internal.rs
Normal file
@@ -0,0 +1,16 @@
|
||||
pub(crate) mod auth;
|
||||
pub(crate) mod commit;
|
||||
|
||||
use mpz_memory_core::{Vector, binary::U8};
|
||||
|
||||
use crate::map::RangeMap;
|
||||
|
||||
/// Maps transcript ranges to VM references.
|
||||
pub(crate) type ReferenceMap = RangeMap<Vector<U8>>;
|
||||
|
||||
/// References to the application plaintext in the transcript.
|
||||
#[derive(Debug, Default, Clone)]
|
||||
pub(crate) struct TranscriptRefs {
|
||||
pub(crate) sent: ReferenceMap,
|
||||
pub(crate) recv: ReferenceMap,
|
||||
}
|
||||
639
crates/tlsn/src/transcript_internal/auth.rs
Normal file
639
crates/tlsn/src/transcript_internal/auth.rs
Normal file
@@ -0,0 +1,639 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use aes::Aes128;
|
||||
use ctr::{
|
||||
Ctr32BE,
|
||||
cipher::{KeyIvInit, StreamCipher, StreamCipherSeek},
|
||||
};
|
||||
use mpz_circuits::circuits::{AES128, xor};
|
||||
use mpz_core::bitvec::BitVec;
|
||||
use mpz_memory_core::{
|
||||
Array, DecodeFutureTyped, MemoryExt, Vector, ViewExt,
|
||||
binary::{Binary, U8},
|
||||
};
|
||||
use mpz_vm_core::{Call, CallableExt, Vm};
|
||||
use rangeset::{Difference, RangeSet, Union};
|
||||
use tlsn_core::transcript::Record;
|
||||
|
||||
use crate::transcript_internal::ReferenceMap;
|
||||
|
||||
pub(crate) fn prove_plaintext<'a>(
|
||||
vm: &mut dyn Vm<Binary>,
|
||||
key: Array<U8, 16>,
|
||||
iv: Array<U8, 4>,
|
||||
plaintext: &[u8],
|
||||
records: impl IntoIterator<Item = &'a Record>,
|
||||
reveal: &RangeSet<usize>,
|
||||
commit: &RangeSet<usize>,
|
||||
) -> Result<ReferenceMap, PlaintextAuthError> {
|
||||
let is_reveal_all = reveal == (0..plaintext.len());
|
||||
|
||||
let alloc_ranges = if is_reveal_all {
|
||||
commit.clone()
|
||||
} else {
|
||||
// The plaintext is only partially revealed, so we need to authenticate in ZK.
|
||||
commit.union(reveal)
|
||||
};
|
||||
|
||||
let plaintext_refs = alloc_plaintext(vm, &alloc_ranges)?;
|
||||
let records = RecordParams::from_iter(records).collect::<Vec<_>>();
|
||||
|
||||
if is_reveal_all {
|
||||
drop(vm.decode(key).map_err(PlaintextAuthError::vm)?);
|
||||
drop(vm.decode(iv).map_err(PlaintextAuthError::vm)?);
|
||||
|
||||
for (range, slice) in plaintext_refs.iter() {
|
||||
vm.mark_public(*slice).map_err(PlaintextAuthError::vm)?;
|
||||
vm.assign(*slice, plaintext[range].to_vec())
|
||||
.map_err(PlaintextAuthError::vm)?;
|
||||
vm.commit(*slice).map_err(PlaintextAuthError::vm)?;
|
||||
}
|
||||
} else {
|
||||
let private = commit.difference(reveal);
|
||||
for (_, slice) in plaintext_refs
|
||||
.index(&private)
|
||||
.expect("all ranges are allocated")
|
||||
.iter()
|
||||
{
|
||||
vm.mark_private(*slice).map_err(PlaintextAuthError::vm)?;
|
||||
}
|
||||
|
||||
for (_, slice) in plaintext_refs
|
||||
.index(reveal)
|
||||
.expect("all ranges are allocated")
|
||||
.iter()
|
||||
{
|
||||
vm.mark_public(*slice).map_err(PlaintextAuthError::vm)?;
|
||||
}
|
||||
|
||||
for (range, slice) in plaintext_refs.iter() {
|
||||
vm.assign(*slice, plaintext[range].to_vec())
|
||||
.map_err(PlaintextAuthError::vm)?;
|
||||
vm.commit(*slice).map_err(PlaintextAuthError::vm)?;
|
||||
}
|
||||
|
||||
let ciphertext = alloc_ciphertext(vm, key, iv, plaintext_refs.clone(), &records)?;
|
||||
for (_, slice) in ciphertext.iter() {
|
||||
drop(vm.decode(*slice).map_err(PlaintextAuthError::vm)?);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(plaintext_refs)
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub(crate) fn verify_plaintext<'a>(
|
||||
vm: &mut dyn Vm<Binary>,
|
||||
key: Array<U8, 16>,
|
||||
iv: Array<U8, 4>,
|
||||
plaintext: &'a [u8],
|
||||
ciphertext: &'a [u8],
|
||||
records: impl IntoIterator<Item = &'a Record>,
|
||||
reveal: &RangeSet<usize>,
|
||||
commit: &RangeSet<usize>,
|
||||
) -> Result<(ReferenceMap, PlaintextProof<'a>), PlaintextAuthError> {
|
||||
let is_reveal_all = reveal == (0..plaintext.len());
|
||||
|
||||
let alloc_ranges = if is_reveal_all {
|
||||
commit.clone()
|
||||
} else {
|
||||
// The plaintext is only partially revealed, so we need to authenticate in ZK.
|
||||
commit.union(reveal)
|
||||
};
|
||||
|
||||
let plaintext_refs = alloc_plaintext(vm, &alloc_ranges)?;
|
||||
let records = RecordParams::from_iter(records).collect::<Vec<_>>();
|
||||
|
||||
let plaintext_proof = if is_reveal_all {
|
||||
let key = vm.decode(key).map_err(PlaintextAuthError::vm)?;
|
||||
let iv = vm.decode(iv).map_err(PlaintextAuthError::vm)?;
|
||||
|
||||
for (range, slice) in plaintext_refs.iter() {
|
||||
vm.mark_public(*slice).map_err(PlaintextAuthError::vm)?;
|
||||
vm.assign(*slice, plaintext[range].to_vec())
|
||||
.map_err(PlaintextAuthError::vm)?;
|
||||
vm.commit(*slice).map_err(PlaintextAuthError::vm)?;
|
||||
}
|
||||
|
||||
PlaintextProof(ProofInner::WithKey {
|
||||
key,
|
||||
iv,
|
||||
records,
|
||||
plaintext,
|
||||
ciphertext,
|
||||
})
|
||||
} else {
|
||||
let private = commit.difference(reveal);
|
||||
for (_, slice) in plaintext_refs
|
||||
.index(&private)
|
||||
.expect("all ranges are allocated")
|
||||
.iter()
|
||||
{
|
||||
vm.mark_blind(*slice).map_err(PlaintextAuthError::vm)?;
|
||||
}
|
||||
|
||||
for (range, slice) in plaintext_refs
|
||||
.index(reveal)
|
||||
.expect("all ranges are allocated")
|
||||
.iter()
|
||||
{
|
||||
vm.mark_public(*slice).map_err(PlaintextAuthError::vm)?;
|
||||
vm.assign(*slice, plaintext[range].to_vec())
|
||||
.map_err(PlaintextAuthError::vm)?;
|
||||
}
|
||||
|
||||
for (_, slice) in plaintext_refs.iter() {
|
||||
vm.commit(*slice).map_err(PlaintextAuthError::vm)?;
|
||||
}
|
||||
|
||||
let ciphertext_map = alloc_ciphertext(vm, key, iv, plaintext_refs.clone(), &records)?;
|
||||
|
||||
let mut ciphertexts = Vec::new();
|
||||
for (range, chunk) in ciphertext_map.iter() {
|
||||
ciphertexts.push((
|
||||
&ciphertext[range],
|
||||
vm.decode(*chunk).map_err(PlaintextAuthError::vm)?,
|
||||
));
|
||||
}
|
||||
|
||||
PlaintextProof(ProofInner::WithZk { ciphertexts })
|
||||
};
|
||||
|
||||
Ok((plaintext_refs, plaintext_proof))
|
||||
}
|
||||
|
||||
fn alloc_plaintext(
|
||||
vm: &mut dyn Vm<Binary>,
|
||||
ranges: &RangeSet<usize>,
|
||||
) -> Result<ReferenceMap, PlaintextAuthError> {
|
||||
let len = ranges.len();
|
||||
|
||||
if len == 0 {
|
||||
return Ok(ReferenceMap::default());
|
||||
}
|
||||
|
||||
let plaintext = vm.alloc_vec::<U8>(len).map_err(PlaintextAuthError::vm)?;
|
||||
|
||||
let mut pos = 0;
|
||||
Ok(ReferenceMap::from_iter(ranges.iter_ranges().map(
|
||||
move |range| {
|
||||
let chunk = plaintext
|
||||
.get(pos..pos + range.len())
|
||||
.expect("length was checked");
|
||||
pos += range.len();
|
||||
(range.start, chunk)
|
||||
},
|
||||
)))
|
||||
}
|
||||
|
||||
fn alloc_ciphertext<'a>(
|
||||
vm: &mut dyn Vm<Binary>,
|
||||
key: Array<U8, 16>,
|
||||
iv: Array<U8, 4>,
|
||||
plaintext: ReferenceMap,
|
||||
records: impl IntoIterator<Item = &'a RecordParams>,
|
||||
) -> Result<ReferenceMap, PlaintextAuthError> {
|
||||
if plaintext.is_empty() {
|
||||
return Ok(ReferenceMap::default());
|
||||
}
|
||||
|
||||
let ranges = RangeSet::from(plaintext.keys().collect::<Vec<_>>());
|
||||
|
||||
let keystream = alloc_keystream(vm, key, iv, &ranges, records)?;
|
||||
let mut builder = Call::builder(Arc::new(xor(ranges.len() * 8)));
|
||||
for (_, slice) in plaintext.iter() {
|
||||
builder = builder.arg(*slice);
|
||||
}
|
||||
for slice in keystream {
|
||||
builder = builder.arg(slice);
|
||||
}
|
||||
let call = builder.build().expect("call should be valid");
|
||||
|
||||
let ciphertext: Vector<U8> = vm.call(call).map_err(PlaintextAuthError::vm)?;
|
||||
|
||||
let mut pos = 0;
|
||||
Ok(ReferenceMap::from_iter(ranges.iter_ranges().map(
|
||||
move |range| {
|
||||
let chunk = ciphertext
|
||||
.get(pos..pos + range.len())
|
||||
.expect("length was checked");
|
||||
pos += range.len();
|
||||
(range.start, chunk)
|
||||
},
|
||||
)))
|
||||
}
|
||||
|
||||
fn alloc_keystream<'a>(
|
||||
vm: &mut dyn Vm<Binary>,
|
||||
key: Array<U8, 16>,
|
||||
iv: Array<U8, 4>,
|
||||
ranges: &RangeSet<usize>,
|
||||
records: impl IntoIterator<Item = &'a RecordParams>,
|
||||
) -> Result<Vec<Vector<U8>>, PlaintextAuthError> {
|
||||
let mut keystream = Vec::new();
|
||||
|
||||
let mut pos = 0;
|
||||
let mut range_iter = ranges.iter_ranges();
|
||||
let mut current_range = range_iter.next();
|
||||
for record in records {
|
||||
let mut explicit_nonce = None;
|
||||
let mut current_block = None;
|
||||
loop {
|
||||
let Some(range) = current_range.take().or_else(|| range_iter.next()) else {
|
||||
return Ok(keystream);
|
||||
};
|
||||
|
||||
let record_range = pos..pos + record.len;
|
||||
if range.start >= record_range.end {
|
||||
current_range = Some(range);
|
||||
break;
|
||||
}
|
||||
|
||||
// Range with record offset applied.
|
||||
let offset_range = range.start - pos..range.end - pos;
|
||||
|
||||
let explicit_nonce = if let Some(explicit_nonce) = explicit_nonce {
|
||||
explicit_nonce
|
||||
} else {
|
||||
let nonce = alloc_explicit_nonce(vm, record.explicit_nonce.clone())?;
|
||||
explicit_nonce = Some(nonce);
|
||||
nonce
|
||||
};
|
||||
|
||||
const BLOCK_SIZE: usize = 16;
|
||||
let block_num = offset_range.start / BLOCK_SIZE;
|
||||
let block = if let Some((current_block_num, block)) = current_block.take()
|
||||
&& current_block_num == block_num
|
||||
{
|
||||
block
|
||||
} else {
|
||||
let block = alloc_block(vm, key, iv, explicit_nonce, block_num)?;
|
||||
current_block = Some((block_num, block));
|
||||
block
|
||||
};
|
||||
|
||||
// Range within the block.
|
||||
let block_range_start = offset_range.start % BLOCK_SIZE;
|
||||
let len =
|
||||
(range.end.min(record_range.end) - range.start).min(BLOCK_SIZE - block_range_start);
|
||||
let block_range = block_range_start..block_range_start + len;
|
||||
|
||||
keystream.push(block.get(block_range).expect("range is checked"));
|
||||
|
||||
// If the range extends past the block, process the tail.
|
||||
if range.start + len < range.end {
|
||||
current_range = Some(range.start + len..range.end);
|
||||
}
|
||||
}
|
||||
|
||||
pos += record.len;
|
||||
}
|
||||
|
||||
Err(ErrorRepr::OutOfBounds.into())
|
||||
}
|
||||
|
||||
fn alloc_explicit_nonce(
|
||||
vm: &mut dyn Vm<Binary>,
|
||||
explicit_nonce: Vec<u8>,
|
||||
) -> Result<Vector<U8>, PlaintextAuthError> {
|
||||
const EXPLICIT_NONCE_LEN: usize = 8;
|
||||
let nonce = vm
|
||||
.alloc_vec::<U8>(EXPLICIT_NONCE_LEN)
|
||||
.map_err(PlaintextAuthError::vm)?;
|
||||
vm.mark_public(nonce).map_err(PlaintextAuthError::vm)?;
|
||||
vm.assign(nonce, explicit_nonce)
|
||||
.map_err(PlaintextAuthError::vm)?;
|
||||
vm.commit(nonce).map_err(PlaintextAuthError::vm)?;
|
||||
|
||||
Ok(nonce)
|
||||
}
|
||||
|
||||
fn alloc_block(
|
||||
vm: &mut dyn Vm<Binary>,
|
||||
key: Array<U8, 16>,
|
||||
iv: Array<U8, 4>,
|
||||
explicit_nonce: Vector<U8>,
|
||||
block: usize,
|
||||
) -> Result<Vector<U8>, PlaintextAuthError> {
|
||||
let ctr: Array<U8, 4> = vm.alloc().map_err(PlaintextAuthError::vm)?;
|
||||
vm.mark_public(ctr).map_err(PlaintextAuthError::vm)?;
|
||||
const START_CTR: u32 = 2;
|
||||
vm.assign(ctr, (START_CTR + block as u32).to_be_bytes())
|
||||
.map_err(PlaintextAuthError::vm)?;
|
||||
vm.commit(ctr).map_err(PlaintextAuthError::vm)?;
|
||||
|
||||
let block: Array<U8, 16> = vm
|
||||
.call(
|
||||
Call::builder(AES128.clone())
|
||||
.arg(key)
|
||||
.arg(iv)
|
||||
.arg(explicit_nonce)
|
||||
.arg(ctr)
|
||||
.build()
|
||||
.expect("call should be valid"),
|
||||
)
|
||||
.map_err(PlaintextAuthError::vm)?;
|
||||
|
||||
Ok(Vector::from(block))
|
||||
}
|
||||
|
||||
struct RecordParams {
|
||||
explicit_nonce: Vec<u8>,
|
||||
len: usize,
|
||||
}
|
||||
|
||||
impl RecordParams {
|
||||
fn from_iter<'a>(records: impl IntoIterator<Item = &'a Record>) -> impl Iterator<Item = Self> {
|
||||
records.into_iter().map(|record| Self {
|
||||
explicit_nonce: record.explicit_nonce.clone(),
|
||||
len: record.ciphertext.len(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub(crate) struct PlaintextProof<'a>(ProofInner<'a>);
|
||||
|
||||
impl<'a> PlaintextProof<'a> {
|
||||
pub(crate) fn verify(self) -> Result<(), PlaintextAuthError> {
|
||||
match self.0 {
|
||||
ProofInner::WithKey {
|
||||
mut key,
|
||||
mut iv,
|
||||
records,
|
||||
plaintext,
|
||||
ciphertext,
|
||||
} => {
|
||||
let key = key
|
||||
.try_recv()
|
||||
.map_err(PlaintextAuthError::vm)?
|
||||
.ok_or(ErrorRepr::MissingDecoding)?;
|
||||
let iv = iv
|
||||
.try_recv()
|
||||
.map_err(PlaintextAuthError::vm)?
|
||||
.ok_or(ErrorRepr::MissingDecoding)?;
|
||||
|
||||
verify_plaintext_with_key(key, iv, &records, plaintext, ciphertext)?;
|
||||
}
|
||||
ProofInner::WithZk { ciphertexts } => {
|
||||
for (expected, mut actual) in ciphertexts {
|
||||
let actual = actual
|
||||
.try_recv()
|
||||
.map_err(PlaintextAuthError::vm)?
|
||||
.ok_or(PlaintextAuthError(ErrorRepr::MissingDecoding))?;
|
||||
|
||||
if actual != expected {
|
||||
return Err(PlaintextAuthError(ErrorRepr::InvalidPlaintext));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
enum ProofInner<'a> {
|
||||
WithKey {
|
||||
key: DecodeFutureTyped<BitVec, [u8; 16]>,
|
||||
iv: DecodeFutureTyped<BitVec, [u8; 4]>,
|
||||
records: Vec<RecordParams>,
|
||||
plaintext: &'a [u8],
|
||||
ciphertext: &'a [u8],
|
||||
},
|
||||
WithZk {
|
||||
// (expected, actual)
|
||||
#[allow(clippy::type_complexity)]
|
||||
ciphertexts: Vec<(&'a [u8], DecodeFutureTyped<BitVec, Vec<u8>>)>,
|
||||
},
|
||||
}
|
||||
|
||||
fn aes_ctr_apply_keystream(key: &[u8; 16], iv: &[u8; 4], explicit_nonce: &[u8], input: &mut [u8]) {
|
||||
let mut full_iv = [0u8; 16];
|
||||
full_iv[0..4].copy_from_slice(iv);
|
||||
full_iv[4..12].copy_from_slice(&explicit_nonce[..8]);
|
||||
|
||||
const START_CTR: u32 = 2;
|
||||
let mut cipher = Ctr32BE::<Aes128>::new(key.into(), &full_iv.into());
|
||||
cipher
|
||||
.try_seek(START_CTR * 16)
|
||||
.expect("start counter is less than keystream length");
|
||||
cipher.apply_keystream(input);
|
||||
}
|
||||
|
||||
fn verify_plaintext_with_key<'a>(
|
||||
key: [u8; 16],
|
||||
iv: [u8; 4],
|
||||
records: impl IntoIterator<Item = &'a RecordParams>,
|
||||
plaintext: &[u8],
|
||||
ciphertext: &[u8],
|
||||
) -> Result<(), PlaintextAuthError> {
|
||||
let mut pos = 0;
|
||||
let mut text = Vec::new();
|
||||
for record in records {
|
||||
text.clear();
|
||||
text.extend_from_slice(&plaintext[pos..pos + record.len]);
|
||||
|
||||
aes_ctr_apply_keystream(&key, &iv, &record.explicit_nonce, &mut text);
|
||||
|
||||
if text != ciphertext[pos..pos + record.len] {
|
||||
return Err(PlaintextAuthError(ErrorRepr::InvalidPlaintext));
|
||||
}
|
||||
|
||||
pos += record.len;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
#[error("plaintext authentication error: {0}")]
|
||||
pub(crate) struct PlaintextAuthError(#[from] ErrorRepr);
|
||||
|
||||
impl PlaintextAuthError {
|
||||
fn vm<E>(err: E) -> Self
|
||||
where
|
||||
E: Into<Box<dyn std::error::Error + Send + Sync + 'static>>,
|
||||
{
|
||||
Self(ErrorRepr::Vm(err.into()))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
enum ErrorRepr {
|
||||
#[error("vm error: {0}")]
|
||||
Vm(Box<dyn std::error::Error + Send + Sync + 'static>),
|
||||
#[error("plaintext out of bounds of records. This should never happen and is an internal bug.")]
|
||||
OutOfBounds,
|
||||
#[error("missing decoding")]
|
||||
MissingDecoding,
|
||||
#[error("plaintext does not match ciphertext")]
|
||||
InvalidPlaintext,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[allow(clippy::all)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use mpz_common::context::test_st_context;
|
||||
use mpz_ideal_vm::IdealVm;
|
||||
use mpz_vm_core::prelude::*;
|
||||
use rand::{Rng, SeedableRng, rngs::StdRng};
|
||||
use rstest::*;
|
||||
use std::ops::Range;
|
||||
|
||||
fn build_vm(key: [u8; 16], iv: [u8; 4]) -> (IdealVm, Array<U8, 16>, Array<U8, 4>) {
|
||||
let mut vm = IdealVm::new();
|
||||
let key_ref = vm.alloc::<Array<U8, 16>>().unwrap();
|
||||
let iv_ref = vm.alloc::<Array<U8, 4>>().unwrap();
|
||||
|
||||
vm.mark_public(key_ref).unwrap();
|
||||
vm.mark_public(iv_ref).unwrap();
|
||||
vm.assign(key_ref, key).unwrap();
|
||||
vm.assign(iv_ref, iv).unwrap();
|
||||
vm.commit(key_ref).unwrap();
|
||||
vm.commit(iv_ref).unwrap();
|
||||
|
||||
(vm, key_ref, iv_ref)
|
||||
}
|
||||
|
||||
fn expected_aes_ctr<'a>(
|
||||
key: [u8; 16],
|
||||
iv: [u8; 4],
|
||||
records: impl IntoIterator<Item = &'a RecordParams>,
|
||||
ranges: &RangeSet<usize>,
|
||||
) -> Vec<u8> {
|
||||
let mut keystream = Vec::new();
|
||||
let mut pos = 0;
|
||||
for record in records {
|
||||
let mut record_keystream = vec![0u8; record.len];
|
||||
aes_ctr_apply_keystream(&key, &iv, &record.explicit_nonce, &mut record_keystream);
|
||||
for mut range in ranges.iter_ranges() {
|
||||
range.start = range.start.max(pos);
|
||||
range.end = range.end.min(pos + record.len);
|
||||
if range.start < range.end {
|
||||
keystream
|
||||
.extend_from_slice(&record_keystream[range.start - pos..range.end - pos]);
|
||||
}
|
||||
}
|
||||
pos += record.len;
|
||||
}
|
||||
|
||||
keystream
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[case::single_record_empty([0], [])]
|
||||
#[case::multiple_empty_records_empty([0, 0], [])]
|
||||
#[case::multiple_records_empty([128, 64], [])]
|
||||
#[case::single_block_full([16], [0..16])]
|
||||
#[case::single_block_partial([16], [2..14])]
|
||||
#[case::partial_block_full([15], [0..15])]
|
||||
#[case::out_of_bounds([16], [0..17])]
|
||||
#[case::multiple_records_full([128, 63, 33, 15, 4], [0..243])]
|
||||
#[case::multiple_records_partial([128, 63, 33, 15, 4], [1..15, 16..17, 18..19, 126..130, 224..225, 242..243])]
|
||||
#[tokio::test]
|
||||
async fn test_alloc_keystream(
|
||||
#[case] record_lens: impl IntoIterator<Item = usize>,
|
||||
#[case] ranges: impl IntoIterator<Item = Range<usize>>,
|
||||
) {
|
||||
let mut rng = StdRng::seed_from_u64(0);
|
||||
let mut key = [0u8; 16];
|
||||
let mut iv = [0u8; 4];
|
||||
rng.fill(&mut key);
|
||||
rng.fill(&mut iv);
|
||||
|
||||
let mut total_len = 0;
|
||||
let records = record_lens
|
||||
.into_iter()
|
||||
.map(|len| {
|
||||
let mut explicit_nonce = [0u8; 8];
|
||||
rng.fill(&mut explicit_nonce);
|
||||
total_len += len;
|
||||
RecordParams {
|
||||
explicit_nonce: explicit_nonce.to_vec(),
|
||||
len,
|
||||
}
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let ranges = RangeSet::from(ranges.into_iter().collect::<Vec<_>>());
|
||||
let is_out_of_bounds = ranges.end().unwrap_or(0) > total_len;
|
||||
|
||||
let (mut ctx, _) = test_st_context(1024);
|
||||
let (mut vm, key_ref, iv_ref) = build_vm(key, iv);
|
||||
|
||||
let keystream = match alloc_keystream(&mut vm, key_ref, iv_ref, &ranges, &records) {
|
||||
Ok(_) if is_out_of_bounds => panic!("should be out of bounds"),
|
||||
Ok(keystream) => keystream,
|
||||
Err(PlaintextAuthError(ErrorRepr::OutOfBounds)) if is_out_of_bounds => {
|
||||
return;
|
||||
}
|
||||
Err(e) => panic!("unexpected error: {:?}", e),
|
||||
};
|
||||
|
||||
vm.execute(&mut ctx).await.unwrap();
|
||||
|
||||
let keystream: Vec<u8> = keystream
|
||||
.iter()
|
||||
.flat_map(|slice| vm.get(*slice).unwrap().unwrap())
|
||||
.collect();
|
||||
|
||||
assert_eq!(keystream.len(), ranges.len());
|
||||
|
||||
let expected = expected_aes_ctr(key, iv, &records, &ranges);
|
||||
|
||||
assert_eq!(keystream, expected);
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[case::single_record_empty([0])]
|
||||
#[case::single_record([32])]
|
||||
#[case::multiple_records([128, 63, 33, 15, 4])]
|
||||
#[case::multiple_records_with_empty([128, 63, 33, 0, 15, 4])]
|
||||
fn test_verify_plaintext_with_key(
|
||||
#[case] record_lens: impl IntoIterator<Item = usize>,
|
||||
#[values(false, true)] tamper: bool,
|
||||
) {
|
||||
let mut rng = StdRng::seed_from_u64(0);
|
||||
let mut key = [0u8; 16];
|
||||
let mut iv = [0u8; 4];
|
||||
rng.fill(&mut key);
|
||||
rng.fill(&mut iv);
|
||||
|
||||
let mut total_len = 0;
|
||||
let records = record_lens
|
||||
.into_iter()
|
||||
.map(|len| {
|
||||
let mut explicit_nonce = [0u8; 8];
|
||||
rng.fill(&mut explicit_nonce);
|
||||
total_len += len;
|
||||
RecordParams {
|
||||
explicit_nonce: explicit_nonce.to_vec(),
|
||||
len,
|
||||
}
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let mut plaintext = vec![0u8; total_len];
|
||||
rng.fill(plaintext.as_mut_slice());
|
||||
|
||||
let mut ciphertext = plaintext.clone();
|
||||
expected_aes_ctr(key, iv, &records, &(0..total_len).into())
|
||||
.iter()
|
||||
.zip(ciphertext.iter_mut())
|
||||
.for_each(|(key, pt)| {
|
||||
*pt ^= *key;
|
||||
});
|
||||
|
||||
if tamper {
|
||||
plaintext.first_mut().map(|pt| *pt ^= 1);
|
||||
}
|
||||
|
||||
match verify_plaintext_with_key(key, iv, &records, &plaintext, &ciphertext) {
|
||||
Ok(_) if tamper && !plaintext.is_empty() => panic!("should be invalid"),
|
||||
Err(e) if !tamper => panic!("unexpected error: {:?}", e),
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
4
crates/tlsn/src/transcript_internal/commit.rs
Normal file
4
crates/tlsn/src/transcript_internal/commit.rs
Normal file
@@ -0,0 +1,4 @@
|
||||
//! Plaintext commitment and proof of encryption.
|
||||
|
||||
pub(crate) mod encoding;
|
||||
pub(crate) mod hash;
|
||||
283
crates/tlsn/src/transcript_internal/commit/encoding.rs
Normal file
283
crates/tlsn/src/transcript_internal/commit/encoding.rs
Normal file
@@ -0,0 +1,283 @@
|
||||
//! Encoding commitment protocol.
|
||||
|
||||
use std::ops::Range;
|
||||
|
||||
use mpz_common::Context;
|
||||
use mpz_memory_core::{
|
||||
Vector,
|
||||
binary::U8,
|
||||
correlated::{Delta, Key, Mac},
|
||||
};
|
||||
use rand::Rng;
|
||||
use rangeset::RangeSet;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serio::{SinkExt, stream::IoStreamExt};
|
||||
use tlsn_core::{
|
||||
hash::{Blake3, HashAlgId, HashAlgorithm, Keccak256, Sha256},
|
||||
transcript::{
|
||||
Direction,
|
||||
encoding::{
|
||||
Encoder, EncoderSecret, EncodingCommitment, EncodingProvider, EncodingProviderError,
|
||||
EncodingTree, EncodingTreeError, new_encoder,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
use crate::{
|
||||
map::{Item, RangeMap},
|
||||
transcript_internal::ReferenceMap,
|
||||
};
|
||||
|
||||
/// Bytes of encoding, per byte.
|
||||
const ENCODING_SIZE: usize = 128;
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
struct Encodings {
|
||||
sent: Vec<u8>,
|
||||
recv: Vec<u8>,
|
||||
}
|
||||
|
||||
/// Transfers encodings for the provided plaintext ranges.
|
||||
pub(crate) async fn transfer<K: KeyStore>(
|
||||
ctx: &mut Context,
|
||||
store: &K,
|
||||
sent: &ReferenceMap,
|
||||
recv: &ReferenceMap,
|
||||
) -> Result<(EncoderSecret, EncodingCommitment), EncodingError> {
|
||||
let secret = EncoderSecret::new(rand::rng().random(), store.delta().as_block().to_bytes());
|
||||
let encoder = new_encoder(&secret);
|
||||
|
||||
// Collects the encodings for the provided plaintext ranges.
|
||||
fn collect_encodings(
|
||||
encoder: &impl Encoder,
|
||||
store: &impl KeyStore,
|
||||
direction: Direction,
|
||||
map: &ReferenceMap,
|
||||
) -> Vec<u8> {
|
||||
let mut encodings = Vec::with_capacity(map.len() * ENCODING_SIZE);
|
||||
for (range, chunk) in map.iter() {
|
||||
let start = encodings.len();
|
||||
encoder.encode_range(direction, range, &mut encodings);
|
||||
let keys = store
|
||||
.get_keys(*chunk)
|
||||
.expect("keys are present for provided plaintext ranges");
|
||||
encodings[start..]
|
||||
.iter_mut()
|
||||
.zip(keys.iter().flat_map(|key| key.as_block().as_bytes()))
|
||||
.for_each(|(encoding, key)| {
|
||||
*encoding ^= *key;
|
||||
});
|
||||
}
|
||||
encodings
|
||||
}
|
||||
|
||||
let encodings = Encodings {
|
||||
sent: collect_encodings(&encoder, store, Direction::Sent, sent),
|
||||
recv: collect_encodings(&encoder, store, Direction::Received, recv),
|
||||
};
|
||||
|
||||
let frame_limit = ctx
|
||||
.io()
|
||||
.limit()
|
||||
.saturating_add(encodings.sent.len() + encodings.recv.len());
|
||||
ctx.io_mut().with_limit(frame_limit).send(encodings).await?;
|
||||
|
||||
let root = ctx.io_mut().expect_next().await?;
|
||||
|
||||
Ok((secret, EncodingCommitment { root }))
|
||||
}
|
||||
|
||||
/// Receives and commits to the encodings for the provided plaintext ranges.
|
||||
pub(crate) async fn receive<M: MacStore>(
|
||||
ctx: &mut Context,
|
||||
store: &M,
|
||||
hash_alg: HashAlgId,
|
||||
sent: &ReferenceMap,
|
||||
recv: &ReferenceMap,
|
||||
idxs: impl IntoIterator<Item = &(Direction, RangeSet<usize>)>,
|
||||
) -> Result<(EncodingCommitment, EncodingTree), EncodingError> {
|
||||
let hasher: &(dyn HashAlgorithm + Send + Sync) = match hash_alg {
|
||||
HashAlgId::SHA256 => &Sha256::default(),
|
||||
HashAlgId::KECCAK256 => &Keccak256::default(),
|
||||
HashAlgId::BLAKE3 => &Blake3::default(),
|
||||
alg => {
|
||||
return Err(ErrorRepr::UnsupportedHashAlgorithm(alg).into());
|
||||
}
|
||||
};
|
||||
|
||||
let (sent_len, recv_len) = (sent.len(), recv.len());
|
||||
let frame_limit = ctx
|
||||
.io()
|
||||
.limit()
|
||||
.saturating_add(ENCODING_SIZE * (sent_len + recv_len));
|
||||
let encodings: Encodings = ctx.io_mut().with_limit(frame_limit).expect_next().await?;
|
||||
|
||||
if encodings.sent.len() != sent_len * ENCODING_SIZE {
|
||||
return Err(ErrorRepr::IncorrectMacCount {
|
||||
direction: Direction::Sent,
|
||||
expected: sent_len,
|
||||
got: encodings.sent.len() / ENCODING_SIZE,
|
||||
}
|
||||
.into());
|
||||
}
|
||||
|
||||
if encodings.recv.len() != recv_len * ENCODING_SIZE {
|
||||
return Err(ErrorRepr::IncorrectMacCount {
|
||||
direction: Direction::Received,
|
||||
expected: recv_len,
|
||||
got: encodings.recv.len() / ENCODING_SIZE,
|
||||
}
|
||||
.into());
|
||||
}
|
||||
|
||||
// Collects a map of plaintext ranges to their encodings.
|
||||
fn collect_map(
|
||||
store: &impl MacStore,
|
||||
mut encodings: Vec<u8>,
|
||||
map: &ReferenceMap,
|
||||
) -> RangeMap<EncodingSlice> {
|
||||
let mut encoding_map = Vec::new();
|
||||
let mut pos = 0;
|
||||
for (range, chunk) in map.iter() {
|
||||
let macs = store
|
||||
.get_macs(*chunk)
|
||||
.expect("MACs are present for provided plaintext ranges");
|
||||
let encoding = &mut encodings[pos..pos + range.len() * ENCODING_SIZE];
|
||||
encoding
|
||||
.iter_mut()
|
||||
.zip(macs.iter().flat_map(|mac| mac.as_bytes()))
|
||||
.for_each(|(encoding, mac)| {
|
||||
*encoding ^= *mac;
|
||||
});
|
||||
|
||||
encoding_map.push((range.start, EncodingSlice::from(&(*encoding))));
|
||||
pos += range.len() * ENCODING_SIZE;
|
||||
}
|
||||
RangeMap::new(encoding_map)
|
||||
}
|
||||
|
||||
let provider = Provider {
|
||||
sent: collect_map(store, encodings.sent, sent),
|
||||
recv: collect_map(store, encodings.recv, recv),
|
||||
};
|
||||
|
||||
let tree = EncodingTree::new(hasher, idxs, &provider)?;
|
||||
let root = tree.root();
|
||||
|
||||
ctx.io_mut().send(root.clone()).await?;
|
||||
|
||||
let commitment = EncodingCommitment { root };
|
||||
|
||||
Ok((commitment, tree))
|
||||
}
|
||||
|
||||
pub(crate) trait KeyStore {
|
||||
fn delta(&self) -> Δ
|
||||
|
||||
fn get_keys(&self, data: Vector<U8>) -> Option<&[Key]>;
|
||||
}
|
||||
|
||||
impl KeyStore for crate::verifier::Zk {
|
||||
fn delta(&self) -> &Delta {
|
||||
crate::verifier::Zk::delta(self)
|
||||
}
|
||||
|
||||
fn get_keys(&self, data: Vector<U8>) -> Option<&[Key]> {
|
||||
self.get_keys(data).ok()
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) trait MacStore {
|
||||
fn get_macs(&self, data: Vector<U8>) -> Option<&[Mac]>;
|
||||
}
|
||||
|
||||
impl MacStore for crate::prover::Zk {
|
||||
fn get_macs(&self, data: Vector<U8>) -> Option<&[Mac]> {
|
||||
self.get_macs(data).ok()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct Provider {
|
||||
sent: RangeMap<EncodingSlice>,
|
||||
recv: RangeMap<EncodingSlice>,
|
||||
}
|
||||
|
||||
impl EncodingProvider for Provider {
|
||||
fn provide_encoding(
|
||||
&self,
|
||||
direction: Direction,
|
||||
range: Range<usize>,
|
||||
dest: &mut Vec<u8>,
|
||||
) -> Result<(), EncodingProviderError> {
|
||||
let encodings = match direction {
|
||||
Direction::Sent => &self.sent,
|
||||
Direction::Received => &self.recv,
|
||||
};
|
||||
|
||||
let encoding = encodings.get(range).ok_or(EncodingProviderError)?;
|
||||
|
||||
dest.extend_from_slice(encoding);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct EncodingSlice(Vec<u8>);
|
||||
|
||||
impl From<&[u8]> for EncodingSlice {
|
||||
fn from(value: &[u8]) -> Self {
|
||||
Self(value.to_vec())
|
||||
}
|
||||
}
|
||||
|
||||
impl Item for EncodingSlice {
|
||||
type Slice<'a>
|
||||
= &'a [u8]
|
||||
where
|
||||
Self: 'a;
|
||||
|
||||
fn length(&self) -> usize {
|
||||
self.0.len() / ENCODING_SIZE
|
||||
}
|
||||
|
||||
fn slice<'a>(&'a self, range: Range<usize>) -> Option<Self::Slice<'a>> {
|
||||
self.0
|
||||
.get(range.start * ENCODING_SIZE..range.end * ENCODING_SIZE)
|
||||
}
|
||||
}
|
||||
|
||||
/// Encoding protocol error.
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
#[error(transparent)]
|
||||
pub struct EncodingError(#[from] ErrorRepr);
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
#[error("encoding protocol error: {0}")]
|
||||
enum ErrorRepr {
|
||||
#[error("I/O error: {0}")]
|
||||
Io(std::io::Error),
|
||||
#[error("incorrect MAC count for {direction}: expected {expected}, got {got}")]
|
||||
IncorrectMacCount {
|
||||
direction: Direction,
|
||||
expected: usize,
|
||||
got: usize,
|
||||
},
|
||||
#[error("encoding tree error: {0}")]
|
||||
EncodingTree(EncodingTreeError),
|
||||
#[error("unsupported hash algorithm: {0}")]
|
||||
UnsupportedHashAlgorithm(HashAlgId),
|
||||
}
|
||||
|
||||
impl From<std::io::Error> for EncodingError {
|
||||
fn from(value: std::io::Error) -> Self {
|
||||
Self(ErrorRepr::Io(value))
|
||||
}
|
||||
}
|
||||
|
||||
impl From<EncodingTreeError> for EncodingError {
|
||||
fn from(value: EncodingTreeError) -> Self {
|
||||
Self(ErrorRepr::EncodingTree(value))
|
||||
}
|
||||
}
|
||||
@@ -3,7 +3,7 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use mpz_core::bitvec::BitVec;
|
||||
use mpz_hash::sha256::Sha256;
|
||||
use mpz_hash::{blake3::Blake3, sha256::Sha256};
|
||||
use mpz_memory_core::{
|
||||
DecodeFutureTyped, MemoryExt, Vector,
|
||||
binary::{Binary, U8},
|
||||
@@ -18,7 +18,7 @@ use tlsn_core::{
|
||||
},
|
||||
};
|
||||
|
||||
use crate::{Role, commit::transcript::TranscriptRefs};
|
||||
use crate::{Role, transcript_internal::TranscriptRefs};
|
||||
|
||||
/// Future which will resolve to the committed hash values.
|
||||
#[derive(Debug)]
|
||||
@@ -107,6 +107,12 @@ pub(crate) fn verify_hash(
|
||||
Ok(HashCommitFuture { futs })
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
enum Hasher {
|
||||
Sha256(Sha256),
|
||||
Blake3(Blake3),
|
||||
}
|
||||
|
||||
/// Commit plaintext hashes of the transcript.
|
||||
#[allow(clippy::type_complexity)]
|
||||
fn hash_commit_inner(
|
||||
@@ -135,20 +141,50 @@ fn hash_commit_inner(
|
||||
|
||||
let hash = match alg {
|
||||
HashAlgId::SHA256 => {
|
||||
let mut hasher = if let Some(hasher) = hashers.get(&alg).cloned() {
|
||||
let mut hasher = if let Some(Hasher::Sha256(hasher)) = hashers.get(&alg).cloned() {
|
||||
hasher
|
||||
} else {
|
||||
let hasher = Sha256::new_with_init(vm).map_err(HashCommitError::hasher)?;
|
||||
hashers.insert(alg, hasher.clone());
|
||||
hashers.insert(alg, Hasher::Sha256(hasher.clone()));
|
||||
hasher
|
||||
};
|
||||
|
||||
for plaintext in refs.get(direction, &idx).expect("plaintext refs are valid") {
|
||||
hasher.update(&plaintext);
|
||||
let refs = match direction {
|
||||
Direction::Sent => &refs.sent,
|
||||
Direction::Received => &refs.recv,
|
||||
};
|
||||
|
||||
for range in idx.iter_ranges() {
|
||||
hasher.update(&refs.get(range).expect("plaintext refs are valid"));
|
||||
}
|
||||
|
||||
hasher.update(&blinder);
|
||||
hasher.finalize(vm).map_err(HashCommitError::hasher)?
|
||||
}
|
||||
HashAlgId::BLAKE3 => {
|
||||
let mut hasher = if let Some(Hasher::Blake3(hasher)) = hashers.get(&alg).cloned() {
|
||||
hasher
|
||||
} else {
|
||||
let hasher = Blake3::new(vm).map_err(HashCommitError::hasher)?;
|
||||
hashers.insert(alg, Hasher::Blake3(hasher.clone()));
|
||||
hasher
|
||||
};
|
||||
|
||||
let refs = match direction {
|
||||
Direction::Sent => &refs.sent,
|
||||
Direction::Received => &refs.recv,
|
||||
};
|
||||
|
||||
for range in idx.iter_ranges() {
|
||||
hasher
|
||||
.update(vm, &refs.get(range).expect("plaintext refs are valid"))
|
||||
.map_err(HashCommitError::hasher)?;
|
||||
}
|
||||
hasher
|
||||
.update(vm, &blinder)
|
||||
.map_err(HashCommitError::hasher)?;
|
||||
hasher.finalize(vm).map_err(HashCommitError::hasher)?
|
||||
}
|
||||
alg => {
|
||||
return Err(HashCommitError::unsupported_alg(alg));
|
||||
}
|
||||
@@ -3,6 +3,7 @@
|
||||
pub(crate) mod config;
|
||||
mod error;
|
||||
pub mod state;
|
||||
mod verify;
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
@@ -14,18 +15,7 @@ pub use tlsn_core::{
|
||||
};
|
||||
|
||||
use crate::{
|
||||
Role,
|
||||
commit::{
|
||||
commit_records,
|
||||
hash::verify_hash,
|
||||
transcript::{TranscriptRefs, decode_transcript, verify_transcript},
|
||||
},
|
||||
config::ProtocolConfig,
|
||||
context::build_mt_context,
|
||||
encoding,
|
||||
mux::attach_mux,
|
||||
tag::verify_tags,
|
||||
zk_aes_ctr::ZkAesCtr,
|
||||
Role, config::ProtocolConfig, context::build_mt_context, mux::attach_mux, tag::verify_tags,
|
||||
};
|
||||
use futures::{AsyncRead, AsyncWrite, TryFutureExt};
|
||||
use mpc_tls::{MpcTlsFollower, SessionKeys};
|
||||
@@ -35,11 +25,9 @@ use mpz_garble_core::Delta;
|
||||
use mpz_vm_core::prelude::*;
|
||||
use mpz_zk::VerifierConfig as ZkVerifierConfig;
|
||||
use serio::stream::IoStreamExt;
|
||||
use tls_core::msgs::enums::ContentType;
|
||||
use tlsn_core::{
|
||||
ProvePayload,
|
||||
connection::{ConnectionInfo, ServerName},
|
||||
transcript::{TlsTranscript, TranscriptCommitment},
|
||||
transcript::TlsTranscript,
|
||||
};
|
||||
use tlsn_deap::Deap;
|
||||
use tokio::sync::Mutex;
|
||||
@@ -114,23 +102,12 @@ impl Verifier<state::Initialized> {
|
||||
})
|
||||
.await?;
|
||||
|
||||
let delta = Delta::random(&mut rand::rng());
|
||||
let (vm, mut mpc_tls) = build_mpc_tls(&self.config, &protocol_config, delta, ctx);
|
||||
let (vm, mut mpc_tls) = build_mpc_tls(&self.config, &protocol_config, ctx);
|
||||
|
||||
// Allocate resources for MPC-TLS in the VM.
|
||||
let mut keys = mpc_tls.alloc()?;
|
||||
let vm_lock = vm.try_lock().expect("VM is not locked");
|
||||
translate_keys(&mut keys, &vm_lock)?;
|
||||
|
||||
// Allocate for committing to plaintext.
|
||||
let mut zk_aes_ctr_sent = ZkAesCtr::new(Role::Verifier);
|
||||
zk_aes_ctr_sent.set_key(keys.client_write_key, keys.client_write_iv);
|
||||
zk_aes_ctr_sent.alloc(&mut *vm_lock.zk(), protocol_config.max_sent_data())?;
|
||||
|
||||
let mut zk_aes_ctr_recv = ZkAesCtr::new(Role::Verifier);
|
||||
zk_aes_ctr_recv.set_key(keys.server_write_key, keys.server_write_iv);
|
||||
zk_aes_ctr_recv.alloc(&mut *vm_lock.zk(), protocol_config.max_recv_data())?;
|
||||
|
||||
drop(vm_lock);
|
||||
|
||||
debug!("setting up mpc-tls");
|
||||
@@ -145,10 +122,7 @@ impl Verifier<state::Initialized> {
|
||||
state: state::Setup {
|
||||
mux_ctrl,
|
||||
mux_fut,
|
||||
delta,
|
||||
mpc_tls,
|
||||
zk_aes_ctr_sent,
|
||||
zk_aes_ctr_recv,
|
||||
keys,
|
||||
vm,
|
||||
},
|
||||
@@ -186,10 +160,7 @@ impl Verifier<state::Setup> {
|
||||
let state::Setup {
|
||||
mux_ctrl,
|
||||
mut mux_fut,
|
||||
delta,
|
||||
mpc_tls,
|
||||
mut zk_aes_ctr_sent,
|
||||
mut zk_aes_ctr_recv,
|
||||
vm,
|
||||
keys,
|
||||
} = self.state;
|
||||
@@ -230,27 +201,6 @@ impl Verifier<state::Setup> {
|
||||
)
|
||||
.map_err(VerifierError::zk)?;
|
||||
|
||||
// Prepare for the prover to prove received plaintext.
|
||||
let (sent_refs, sent_proof) = commit_records(
|
||||
&mut vm,
|
||||
&mut zk_aes_ctr_sent,
|
||||
tls_transcript
|
||||
.sent()
|
||||
.iter()
|
||||
.filter(|record| record.typ == ContentType::ApplicationData),
|
||||
)
|
||||
.map_err(VerifierError::zk)?;
|
||||
|
||||
let (recv_refs, recv_proof) = commit_records(
|
||||
&mut vm,
|
||||
&mut zk_aes_ctr_recv,
|
||||
tls_transcript
|
||||
.recv()
|
||||
.iter()
|
||||
.filter(|record| record.typ == ContentType::ApplicationData),
|
||||
)
|
||||
.map_err(VerifierError::zk)?;
|
||||
|
||||
mux_fut
|
||||
.poll_with(vm.execute_all(&mut ctx).map_err(VerifierError::zk))
|
||||
.await?;
|
||||
@@ -260,23 +210,16 @@ impl Verifier<state::Setup> {
|
||||
// authenticated from the verifier's perspective.
|
||||
tag_proof.verify().map_err(VerifierError::zk)?;
|
||||
|
||||
// Verify the plaintext proofs.
|
||||
sent_proof.verify().map_err(VerifierError::zk)?;
|
||||
recv_proof.verify().map_err(VerifierError::zk)?;
|
||||
|
||||
let transcript_refs = TranscriptRefs::new(sent_refs, recv_refs);
|
||||
|
||||
Ok(Verifier {
|
||||
config: self.config,
|
||||
span: self.span,
|
||||
state: state::Committed {
|
||||
mux_ctrl,
|
||||
mux_fut,
|
||||
delta,
|
||||
ctx,
|
||||
vm,
|
||||
keys,
|
||||
tls_transcript,
|
||||
transcript_refs,
|
||||
},
|
||||
})
|
||||
}
|
||||
@@ -301,130 +244,34 @@ impl Verifier<state::Committed> {
|
||||
let state::Committed {
|
||||
mux_fut,
|
||||
ctx,
|
||||
delta,
|
||||
vm,
|
||||
keys,
|
||||
tls_transcript,
|
||||
transcript_refs,
|
||||
..
|
||||
} = &mut self.state;
|
||||
|
||||
let ProvePayload {
|
||||
handshake,
|
||||
transcript,
|
||||
transcript_commit,
|
||||
} = mux_fut
|
||||
.poll_with(ctx.io_mut().expect_next().map_err(VerifierError::from))
|
||||
.await?;
|
||||
|
||||
let verifier = if let Some(root_store) = self.config.root_store() {
|
||||
let cert_verifier = if let Some(root_store) = self.config.root_store() {
|
||||
ServerCertVerifier::new(root_store).map_err(VerifierError::config)?
|
||||
} else {
|
||||
ServerCertVerifier::mozilla()
|
||||
};
|
||||
|
||||
let server_name = if let Some((name, cert_data)) = handshake {
|
||||
cert_data
|
||||
.verify(
|
||||
&verifier,
|
||||
tls_transcript.time(),
|
||||
tls_transcript.server_ephemeral_key(),
|
||||
&name,
|
||||
)
|
||||
.map_err(VerifierError::verify)?;
|
||||
|
||||
Some(name)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
if let Some(partial_transcript) = &transcript {
|
||||
let sent_len = tls_transcript
|
||||
.sent()
|
||||
.iter()
|
||||
.filter_map(|record| {
|
||||
if let ContentType::ApplicationData = record.typ {
|
||||
Some(record.ciphertext.len())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.sum::<usize>();
|
||||
|
||||
let recv_len = tls_transcript
|
||||
.recv()
|
||||
.iter()
|
||||
.filter_map(|record| {
|
||||
if let ContentType::ApplicationData = record.typ {
|
||||
Some(record.ciphertext.len())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.sum::<usize>();
|
||||
|
||||
// Check ranges.
|
||||
if partial_transcript.len_sent() != sent_len
|
||||
|| partial_transcript.len_received() != recv_len
|
||||
{
|
||||
return Err(VerifierError::verify(
|
||||
"prover sent transcript with incorrect length",
|
||||
));
|
||||
}
|
||||
|
||||
decode_transcript(
|
||||
vm,
|
||||
partial_transcript.sent_authed(),
|
||||
partial_transcript.received_authed(),
|
||||
transcript_refs,
|
||||
)
|
||||
.map_err(VerifierError::zk)?;
|
||||
}
|
||||
|
||||
let mut transcript_commitments = Vec::new();
|
||||
let mut hash_commitments = None;
|
||||
if let Some(commit_config) = transcript_commit {
|
||||
if commit_config.encoding() {
|
||||
let commitment = mux_fut
|
||||
.poll_with(encoding::transfer(
|
||||
ctx,
|
||||
transcript_refs,
|
||||
delta,
|
||||
|plaintext| vm.get_keys(plaintext).expect("reference is valid"),
|
||||
))
|
||||
.await?;
|
||||
|
||||
transcript_commitments.push(TranscriptCommitment::Encoding(commitment));
|
||||
}
|
||||
|
||||
if commit_config.has_hash() {
|
||||
hash_commitments = Some(
|
||||
verify_hash(vm, transcript_refs, commit_config.iter_hash().cloned())
|
||||
.map_err(VerifierError::verify)?,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
mux_fut
|
||||
.poll_with(vm.execute_all(ctx).map_err(VerifierError::zk))
|
||||
let request = mux_fut
|
||||
.poll_with(ctx.io_mut().expect_next().map_err(VerifierError::from))
|
||||
.await?;
|
||||
|
||||
// Verify revealed data.
|
||||
if let Some(partial_transcript) = &transcript {
|
||||
verify_transcript(vm, partial_transcript, transcript_refs)
|
||||
.map_err(VerifierError::verify)?;
|
||||
}
|
||||
let output = mux_fut
|
||||
.poll_with(verify::verify(
|
||||
ctx,
|
||||
vm,
|
||||
keys,
|
||||
&cert_verifier,
|
||||
tls_transcript,
|
||||
request,
|
||||
))
|
||||
.await?;
|
||||
|
||||
if let Some(hash_commitments) = hash_commitments {
|
||||
for commitment in hash_commitments.try_recv().map_err(VerifierError::verify)? {
|
||||
transcript_commitments.push(TranscriptCommitment::Hash(commitment));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(VerifierOutput {
|
||||
server_name,
|
||||
transcript,
|
||||
transcript_commitments,
|
||||
})
|
||||
Ok(output)
|
||||
}
|
||||
|
||||
/// Closes the connection with the prover.
|
||||
@@ -447,11 +294,11 @@ impl Verifier<state::Committed> {
|
||||
fn build_mpc_tls(
|
||||
config: &VerifierConfig,
|
||||
protocol_config: &ProtocolConfig,
|
||||
delta: Delta,
|
||||
ctx: Context,
|
||||
) -> (Arc<Mutex<Deap<Mpc, Zk>>>, MpcTlsFollower) {
|
||||
let mut rng = rand::rng();
|
||||
|
||||
let delta = Delta::random(&mut rng);
|
||||
let base_ot_send = mpz_ot::chou_orlandi::Sender::default();
|
||||
let base_ot_recv = mpz_ot::chou_orlandi::Receiver::default();
|
||||
let rcot_send = mpz_ot::kos::Sender::new(
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
use crate::{encoding::EncodingError, zk_aes_ctr::ZkAesCtrError};
|
||||
use mpc_tls::MpcTlsError;
|
||||
use std::{error::Error, fmt};
|
||||
|
||||
use mpc_tls::MpcTlsError;
|
||||
|
||||
use crate::transcript_internal::commit::encoding::EncodingError;
|
||||
|
||||
/// Error for [`Verifier`](crate::Verifier).
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub struct VerifierError {
|
||||
@@ -110,12 +112,6 @@ impl From<MpcTlsError> for VerifierError {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ZkAesCtrError> for VerifierError {
|
||||
fn from(e: ZkAesCtrError) -> Self {
|
||||
Self::new(ErrorKind::Zk, e)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<EncodingError> for VerifierError {
|
||||
fn from(e: EncodingError) -> Self {
|
||||
Self::new(ErrorKind::Commit, e)
|
||||
|
||||
@@ -2,14 +2,9 @@
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::{
|
||||
commit::transcript::TranscriptRefs,
|
||||
mux::{MuxControl, MuxFuture},
|
||||
zk_aes_ctr::ZkAesCtr,
|
||||
};
|
||||
use crate::mux::{MuxControl, MuxFuture};
|
||||
use mpc_tls::{MpcTlsFollower, SessionKeys};
|
||||
use mpz_common::Context;
|
||||
use mpz_memory_core::correlated::Delta;
|
||||
use tlsn_core::transcript::TlsTranscript;
|
||||
use tlsn_deap::Deap;
|
||||
use tokio::sync::Mutex;
|
||||
@@ -28,10 +23,7 @@ opaque_debug::implement!(Initialized);
|
||||
pub struct Setup {
|
||||
pub(crate) mux_ctrl: MuxControl,
|
||||
pub(crate) mux_fut: MuxFuture,
|
||||
pub(crate) delta: Delta,
|
||||
pub(crate) mpc_tls: MpcTlsFollower,
|
||||
pub(crate) zk_aes_ctr_sent: ZkAesCtr,
|
||||
pub(crate) zk_aes_ctr_recv: ZkAesCtr,
|
||||
pub(crate) keys: SessionKeys,
|
||||
pub(crate) vm: Arc<Mutex<Deap<Mpc, Zk>>>,
|
||||
}
|
||||
@@ -40,11 +32,10 @@ pub struct Setup {
|
||||
pub struct Committed {
|
||||
pub(crate) mux_ctrl: MuxControl,
|
||||
pub(crate) mux_fut: MuxFuture,
|
||||
pub(crate) delta: Delta,
|
||||
pub(crate) ctx: Context,
|
||||
pub(crate) vm: Zk,
|
||||
pub(crate) keys: SessionKeys,
|
||||
pub(crate) tls_transcript: TlsTranscript,
|
||||
pub(crate) transcript_refs: TranscriptRefs,
|
||||
}
|
||||
|
||||
opaque_debug::implement!(Committed);
|
||||
|
||||
179
crates/tlsn/src/verifier/verify.rs
Normal file
179
crates/tlsn/src/verifier/verify.rs
Normal file
@@ -0,0 +1,179 @@
|
||||
use mpc_tls::SessionKeys;
|
||||
use mpz_common::Context;
|
||||
use mpz_memory_core::binary::Binary;
|
||||
use mpz_vm_core::Vm;
|
||||
use rangeset::{RangeSet, UnionMut};
|
||||
use tlsn_core::{
|
||||
ProveRequest, VerifierOutput,
|
||||
transcript::{
|
||||
ContentType, Direction, PartialTranscript, Record, TlsTranscript, TranscriptCommitment,
|
||||
},
|
||||
webpki::ServerCertVerifier,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
transcript_internal::{
|
||||
TranscriptRefs,
|
||||
auth::verify_plaintext,
|
||||
commit::{
|
||||
encoding::{self, KeyStore},
|
||||
hash::verify_hash,
|
||||
},
|
||||
},
|
||||
verifier::VerifierError,
|
||||
};
|
||||
|
||||
pub(crate) async fn verify<T: Vm<Binary> + KeyStore + Send + Sync>(
|
||||
ctx: &mut Context,
|
||||
vm: &mut T,
|
||||
keys: &SessionKeys,
|
||||
cert_verifier: &ServerCertVerifier,
|
||||
tls_transcript: &TlsTranscript,
|
||||
request: ProveRequest,
|
||||
) -> Result<VerifierOutput, VerifierError> {
|
||||
let ProveRequest {
|
||||
handshake,
|
||||
transcript,
|
||||
transcript_commit,
|
||||
} = request;
|
||||
|
||||
let ciphertext_sent = collect_ciphertext(tls_transcript.sent());
|
||||
let ciphertext_recv = collect_ciphertext(tls_transcript.recv());
|
||||
|
||||
let has_reveal = transcript.is_some();
|
||||
let transcript = if let Some(transcript) = transcript {
|
||||
if transcript.len_sent() != ciphertext_sent.len()
|
||||
|| transcript.len_received() != ciphertext_recv.len()
|
||||
{
|
||||
return Err(VerifierError::verify(
|
||||
"prover sent transcript with incorrect length",
|
||||
));
|
||||
}
|
||||
|
||||
transcript
|
||||
} else {
|
||||
PartialTranscript::new(ciphertext_sent.len(), ciphertext_recv.len())
|
||||
};
|
||||
|
||||
let server_name = if let Some((name, cert_data)) = handshake {
|
||||
cert_data
|
||||
.verify(
|
||||
cert_verifier,
|
||||
tls_transcript.time(),
|
||||
tls_transcript.server_ephemeral_key(),
|
||||
&name,
|
||||
)
|
||||
.map_err(VerifierError::verify)?;
|
||||
|
||||
Some(name)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let (mut commit_sent, mut commit_recv) = (RangeSet::default(), RangeSet::default());
|
||||
if let Some(commit_config) = transcript_commit.as_ref() {
|
||||
commit_config
|
||||
.iter_hash()
|
||||
.for_each(|(direction, idx, _)| match direction {
|
||||
Direction::Sent => commit_sent.union_mut(idx),
|
||||
Direction::Received => commit_recv.union_mut(idx),
|
||||
});
|
||||
|
||||
if let Some((sent, recv)) = commit_config.encoding() {
|
||||
commit_sent.union_mut(sent);
|
||||
commit_recv.union_mut(recv);
|
||||
}
|
||||
}
|
||||
|
||||
let (sent_refs, sent_proof) = verify_plaintext(
|
||||
vm,
|
||||
keys.client_write_key,
|
||||
keys.client_write_iv,
|
||||
transcript.sent_unsafe(),
|
||||
&ciphertext_sent,
|
||||
tls_transcript
|
||||
.sent()
|
||||
.iter()
|
||||
.filter(|record| record.typ == ContentType::ApplicationData),
|
||||
transcript.sent_authed(),
|
||||
&commit_sent,
|
||||
)
|
||||
.map_err(VerifierError::zk)?;
|
||||
let (recv_refs, recv_proof) = verify_plaintext(
|
||||
vm,
|
||||
keys.server_write_key,
|
||||
keys.server_write_iv,
|
||||
transcript.received_unsafe(),
|
||||
&ciphertext_recv,
|
||||
tls_transcript
|
||||
.recv()
|
||||
.iter()
|
||||
.filter(|record| record.typ == ContentType::ApplicationData),
|
||||
transcript.received_authed(),
|
||||
&commit_recv,
|
||||
)
|
||||
.map_err(VerifierError::zk)?;
|
||||
|
||||
let transcript_refs = TranscriptRefs {
|
||||
sent: sent_refs,
|
||||
recv: recv_refs,
|
||||
};
|
||||
|
||||
let mut transcript_commitments = Vec::new();
|
||||
let mut hash_commitments = None;
|
||||
if let Some(commit_config) = transcript_commit.as_ref()
|
||||
&& commit_config.has_hash()
|
||||
{
|
||||
hash_commitments = Some(
|
||||
verify_hash(vm, &transcript_refs, commit_config.iter_hash().cloned())
|
||||
.map_err(VerifierError::verify)?,
|
||||
);
|
||||
}
|
||||
|
||||
vm.execute_all(ctx).await.map_err(VerifierError::zk)?;
|
||||
|
||||
sent_proof.verify().map_err(VerifierError::verify)?;
|
||||
recv_proof.verify().map_err(VerifierError::verify)?;
|
||||
|
||||
let mut encoder_secret = None;
|
||||
if let Some(commit_config) = transcript_commit
|
||||
&& let Some((sent, recv)) = commit_config.encoding()
|
||||
{
|
||||
let sent_map = transcript_refs
|
||||
.sent
|
||||
.index(sent)
|
||||
.expect("ranges were authenticated");
|
||||
let recv_map = transcript_refs
|
||||
.recv
|
||||
.index(recv)
|
||||
.expect("ranges were authenticated");
|
||||
|
||||
let (secret, commitment) = encoding::transfer(ctx, vm, &sent_map, &recv_map).await?;
|
||||
encoder_secret = Some(secret);
|
||||
transcript_commitments.push(TranscriptCommitment::Encoding(commitment));
|
||||
}
|
||||
|
||||
if let Some(hash_commitments) = hash_commitments {
|
||||
for commitment in hash_commitments.try_recv().map_err(VerifierError::verify)? {
|
||||
transcript_commitments.push(TranscriptCommitment::Hash(commitment));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(VerifierOutput {
|
||||
server_name,
|
||||
transcript: has_reveal.then_some(transcript),
|
||||
encoder_secret,
|
||||
transcript_commitments,
|
||||
})
|
||||
}
|
||||
|
||||
fn collect_ciphertext<'a>(records: impl IntoIterator<Item = &'a Record>) -> Vec<u8> {
|
||||
let mut ciphertext = Vec::new();
|
||||
records
|
||||
.into_iter()
|
||||
.filter(|record| record.typ == ContentType::ApplicationData)
|
||||
.for_each(|record| {
|
||||
ciphertext.extend_from_slice(&record.ciphertext);
|
||||
});
|
||||
ciphertext
|
||||
}
|
||||
@@ -1,214 +0,0 @@
|
||||
//! Zero-knowledge AES-CTR encryption.
|
||||
|
||||
use cipher::{
|
||||
Cipher, CipherError, Keystream,
|
||||
aes::{Aes128, AesError},
|
||||
};
|
||||
use mpz_memory_core::{
|
||||
Array, Vector,
|
||||
binary::{Binary, U8},
|
||||
};
|
||||
use mpz_vm_core::{Vm, prelude::*};
|
||||
|
||||
use crate::Role;
|
||||
|
||||
type Nonce = Array<U8, 8>;
|
||||
type Ctr = Array<U8, 4>;
|
||||
type Block = Array<U8, 16>;
|
||||
|
||||
const START_CTR: u32 = 2;
|
||||
|
||||
/// ZK AES-CTR encryption.
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct ZkAesCtr {
|
||||
role: Role,
|
||||
aes: Aes128,
|
||||
state: State,
|
||||
}
|
||||
|
||||
impl ZkAesCtr {
|
||||
/// Creates a new ZK AES-CTR instance.
|
||||
pub(crate) fn new(role: Role) -> Self {
|
||||
Self {
|
||||
role,
|
||||
aes: Aes128::default(),
|
||||
state: State::Init,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the role.
|
||||
pub(crate) fn role(&self) -> &Role {
|
||||
&self.role
|
||||
}
|
||||
|
||||
/// Allocates `len` bytes for encryption.
|
||||
pub(crate) fn alloc(
|
||||
&mut self,
|
||||
vm: &mut dyn Vm<Binary>,
|
||||
len: usize,
|
||||
) -> Result<(), ZkAesCtrError> {
|
||||
let State::Init = self.state.take() else {
|
||||
Err(ErrorRepr::State {
|
||||
reason: "must be in init state to allocate",
|
||||
})?
|
||||
};
|
||||
|
||||
// Round up to the nearest block size.
|
||||
let len = 16 * len.div_ceil(16);
|
||||
|
||||
let input = vm.alloc_vec::<U8>(len).map_err(ZkAesCtrError::vm)?;
|
||||
let keystream = self.aes.alloc_keystream(vm, len)?;
|
||||
|
||||
match self.role {
|
||||
Role::Prover => vm.mark_private(input).map_err(ZkAesCtrError::vm)?,
|
||||
Role::Verifier => vm.mark_blind(input).map_err(ZkAesCtrError::vm)?,
|
||||
}
|
||||
|
||||
self.state = State::Ready { input, keystream };
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Sets the key and IV for the cipher.
|
||||
pub(crate) fn set_key(&mut self, key: Array<U8, 16>, iv: Array<U8, 4>) {
|
||||
self.aes.set_key(key);
|
||||
self.aes.set_iv(iv);
|
||||
}
|
||||
|
||||
/// Proves the encryption of `len` bytes.
|
||||
///
|
||||
/// Here we only assign certain values in the VM but the actual proving
|
||||
/// happens later when the plaintext is assigned and the VM is executed.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `vm` - Virtual machine.
|
||||
/// * `explicit_nonce` - Explicit nonce.
|
||||
/// * `len` - Length of the plaintext in bytes.
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// A VM reference to the plaintext and the ciphertext.
|
||||
pub(crate) fn encrypt(
|
||||
&mut self,
|
||||
vm: &mut dyn Vm<Binary>,
|
||||
explicit_nonce: Vec<u8>,
|
||||
len: usize,
|
||||
) -> Result<(Vector<U8>, Vector<U8>), ZkAesCtrError> {
|
||||
let State::Ready { input, keystream } = &mut self.state else {
|
||||
Err(ErrorRepr::State {
|
||||
reason: "must be in ready state to encrypt",
|
||||
})?
|
||||
};
|
||||
|
||||
let explicit_nonce: [u8; 8] =
|
||||
explicit_nonce
|
||||
.try_into()
|
||||
.map_err(|explicit_nonce: Vec<_>| ErrorRepr::ExplicitNonceLength {
|
||||
expected: 8,
|
||||
actual: explicit_nonce.len(),
|
||||
})?;
|
||||
|
||||
let block_count = len.div_ceil(16);
|
||||
let padded_len = block_count * 16;
|
||||
let padding_len = padded_len - len;
|
||||
|
||||
if padded_len > input.len() {
|
||||
Err(ErrorRepr::InsufficientPreprocessing {
|
||||
expected: padded_len,
|
||||
actual: input.len(),
|
||||
})?
|
||||
}
|
||||
|
||||
let mut input = input.split_off(input.len() - padded_len);
|
||||
let keystream = keystream.consume(padded_len)?;
|
||||
let mut output = keystream.apply(vm, input)?;
|
||||
|
||||
// Assign counter block inputs.
|
||||
let mut ctr = START_CTR..;
|
||||
keystream.assign(vm, explicit_nonce, move || {
|
||||
ctr.next().expect("range is unbounded").to_be_bytes()
|
||||
})?;
|
||||
|
||||
// Assign zeroes to the padding.
|
||||
if padding_len > 0 {
|
||||
let padding = input.split_off(input.len() - padding_len);
|
||||
// To simplify the impl, we don't mark the padding as public, that's why only
|
||||
// the prover assigns it.
|
||||
if let Role::Prover = self.role {
|
||||
vm.assign(padding, vec![0; padding_len])
|
||||
.map_err(ZkAesCtrError::vm)?;
|
||||
}
|
||||
vm.commit(padding).map_err(ZkAesCtrError::vm)?;
|
||||
output.truncate(len);
|
||||
}
|
||||
|
||||
Ok((input, output))
|
||||
}
|
||||
}
|
||||
|
||||
enum State {
|
||||
Init,
|
||||
Ready {
|
||||
input: Vector<U8>,
|
||||
keystream: Keystream<Nonce, Ctr, Block>,
|
||||
},
|
||||
Error,
|
||||
}
|
||||
|
||||
impl State {
|
||||
fn take(&mut self) -> Self {
|
||||
std::mem::replace(self, State::Error)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for State {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
State::Init => write!(f, "Init"),
|
||||
State::Ready { .. } => write!(f, "Ready"),
|
||||
State::Error => write!(f, "Error"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Error for [`ZkAesCtr`].
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
#[error(transparent)]
|
||||
pub struct ZkAesCtrError(#[from] ErrorRepr);
|
||||
|
||||
impl ZkAesCtrError {
|
||||
fn vm<E>(err: E) -> Self
|
||||
where
|
||||
E: Into<Box<dyn std::error::Error + Send + Sync + 'static>>,
|
||||
{
|
||||
Self(ErrorRepr::Vm(err.into()))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
#[error("zk aes error")]
|
||||
enum ErrorRepr {
|
||||
#[error("invalid state: {reason}")]
|
||||
State { reason: &'static str },
|
||||
#[error("cipher error: {0}")]
|
||||
Cipher(Box<dyn std::error::Error + Send + Sync + 'static>),
|
||||
#[error("vm error: {0}")]
|
||||
Vm(Box<dyn std::error::Error + Send + Sync + 'static>),
|
||||
#[error("invalid explicit nonce length: expected {expected}, got {actual}")]
|
||||
ExplicitNonceLength { expected: usize, actual: usize },
|
||||
#[error("insufficient preprocessing: expected {expected}, got {actual}")]
|
||||
InsufficientPreprocessing { expected: usize, actual: usize },
|
||||
}
|
||||
|
||||
impl From<AesError> for ZkAesCtrError {
|
||||
fn from(err: AesError) -> Self {
|
||||
Self(ErrorRepr::Cipher(Box::new(err)))
|
||||
}
|
||||
}
|
||||
|
||||
impl From<CipherError> for ZkAesCtrError {
|
||||
fn from(err: CipherError) -> Self {
|
||||
Self(ErrorRepr::Cipher(Box::new(err)))
|
||||
}
|
||||
}
|
||||
@@ -1,11 +1,17 @@
|
||||
use futures::{AsyncReadExt, AsyncWriteExt};
|
||||
use rangeset::RangeSet;
|
||||
use tlsn::{
|
||||
config::{CertificateDer, ProtocolConfig, ProtocolConfigValidator, RootCertStore},
|
||||
connection::ServerName,
|
||||
hash::{HashAlgId, HashProvider},
|
||||
prover::{ProveConfig, Prover, ProverConfig, TlsConfig},
|
||||
transcript::{TranscriptCommitConfig, TranscriptCommitment},
|
||||
transcript::{
|
||||
Direction, Transcript, TranscriptCommitConfig, TranscriptCommitment,
|
||||
TranscriptCommitmentKind, TranscriptSecret,
|
||||
},
|
||||
verifier::{Verifier, VerifierConfig, VerifierOutput, VerifyConfig},
|
||||
};
|
||||
use tlsn_core::ProverOutput;
|
||||
use tlsn_server_fixture::bind;
|
||||
use tlsn_server_fixture_certs::{CA_CERT_DER, SERVER_DOMAIN};
|
||||
|
||||
@@ -29,11 +35,80 @@ async fn test() {
|
||||
|
||||
let (socket_0, socket_1) = tokio::io::duplex(2 << 23);
|
||||
|
||||
tokio::join!(prover(socket_0), verifier(socket_1));
|
||||
let ((full_transcript, prover_output), verifier_output) =
|
||||
tokio::join!(prover(socket_0), verifier(socket_1));
|
||||
|
||||
let partial_transcript = verifier_output.transcript.unwrap();
|
||||
let ServerName::Dns(server_name) = verifier_output.server_name.unwrap();
|
||||
|
||||
assert_eq!(server_name.as_str(), SERVER_DOMAIN);
|
||||
assert!(!partial_transcript.is_complete());
|
||||
assert_eq!(
|
||||
partial_transcript
|
||||
.sent_authed()
|
||||
.iter_ranges()
|
||||
.next()
|
||||
.unwrap(),
|
||||
0..10
|
||||
);
|
||||
assert_eq!(
|
||||
partial_transcript
|
||||
.received_authed()
|
||||
.iter_ranges()
|
||||
.next()
|
||||
.unwrap(),
|
||||
0..10
|
||||
);
|
||||
|
||||
let encoding_tree = prover_output
|
||||
.transcript_secrets
|
||||
.iter()
|
||||
.find_map(|secret| {
|
||||
if let TranscriptSecret::Encoding(tree) = secret {
|
||||
Some(tree)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
let encoding_commitment = prover_output
|
||||
.transcript_commitments
|
||||
.iter()
|
||||
.find_map(|commitment| {
|
||||
if let TranscriptCommitment::Encoding(commitment) = commitment {
|
||||
Some(commitment)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
let prove_sent = RangeSet::from(1..full_transcript.sent().len() - 1);
|
||||
let prove_recv = RangeSet::from(1..full_transcript.received().len() - 1);
|
||||
let idxs = [
|
||||
(Direction::Sent, prove_sent.clone()),
|
||||
(Direction::Received, prove_recv.clone()),
|
||||
];
|
||||
let proof = encoding_tree.proof(idxs.iter()).unwrap();
|
||||
let (auth_sent, auth_recv) = proof
|
||||
.verify_with_provider(
|
||||
&HashProvider::default(),
|
||||
&verifier_output.encoder_secret.unwrap(),
|
||||
encoding_commitment,
|
||||
full_transcript.sent(),
|
||||
full_transcript.received(),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(auth_sent, prove_sent);
|
||||
assert_eq!(auth_recv, prove_recv);
|
||||
}
|
||||
|
||||
#[instrument(skip(verifier_socket))]
|
||||
async fn prover<T: AsyncWrite + AsyncRead + Send + Unpin + 'static>(verifier_socket: T) {
|
||||
async fn prover<T: AsyncWrite + AsyncRead + Send + Unpin + 'static>(
|
||||
verifier_socket: T,
|
||||
) -> (Transcript, ProverOutput) {
|
||||
let (client_socket, server_socket) = tokio::io::duplex(2 << 16);
|
||||
|
||||
let server_task = tokio::spawn(bind(server_socket.compat()));
|
||||
@@ -86,9 +161,25 @@ async fn prover<T: AsyncWrite + AsyncRead + Send + Unpin + 'static>(verifier_soc
|
||||
|
||||
let mut builder = TranscriptCommitConfig::builder(prover.transcript());
|
||||
|
||||
// Commit to everything
|
||||
builder.commit_sent(&(0..sent_tx_len)).unwrap();
|
||||
builder.commit_recv(&(0..recv_tx_len)).unwrap();
|
||||
for kind in [
|
||||
TranscriptCommitmentKind::Encoding,
|
||||
TranscriptCommitmentKind::Hash {
|
||||
alg: HashAlgId::SHA256,
|
||||
},
|
||||
] {
|
||||
builder
|
||||
.commit_with_kind(&(0..sent_tx_len), Direction::Sent, kind)
|
||||
.unwrap();
|
||||
builder
|
||||
.commit_with_kind(&(0..recv_tx_len), Direction::Received, kind)
|
||||
.unwrap();
|
||||
builder
|
||||
.commit_with_kind(&(1..sent_tx_len - 1), Direction::Sent, kind)
|
||||
.unwrap();
|
||||
builder
|
||||
.commit_with_kind(&(1..recv_tx_len - 1), Direction::Received, kind)
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
let transcript_commit = builder.build().unwrap();
|
||||
|
||||
@@ -102,13 +193,17 @@ async fn prover<T: AsyncWrite + AsyncRead + Send + Unpin + 'static>(verifier_soc
|
||||
builder.transcript_commit(transcript_commit);
|
||||
|
||||
let config = builder.build().unwrap();
|
||||
|
||||
prover.prove(&config).await.unwrap();
|
||||
let transcript = prover.transcript().clone();
|
||||
let output = prover.prove(&config).await.unwrap();
|
||||
prover.close().await.unwrap();
|
||||
|
||||
(transcript, output)
|
||||
}
|
||||
|
||||
#[instrument(skip(socket))]
|
||||
async fn verifier<T: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>(socket: T) {
|
||||
async fn verifier<T: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>(
|
||||
socket: T,
|
||||
) -> VerifierOutput {
|
||||
let config_validator = ProtocolConfigValidator::builder()
|
||||
.max_sent_data(MAX_SENT_DATA)
|
||||
.max_recv_data(MAX_RECV_DATA)
|
||||
@@ -125,31 +220,16 @@ async fn verifier<T: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>(soc
|
||||
.unwrap(),
|
||||
);
|
||||
|
||||
let VerifierOutput {
|
||||
server_name,
|
||||
transcript,
|
||||
transcript_commitments,
|
||||
} = verifier
|
||||
.verify(socket.compat(), &VerifyConfig::default())
|
||||
let mut verifier = verifier
|
||||
.setup(socket.compat())
|
||||
.await
|
||||
.unwrap()
|
||||
.run()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let transcript = transcript.unwrap();
|
||||
let output = verifier.verify(&VerifyConfig::default()).await.unwrap();
|
||||
verifier.close().await.unwrap();
|
||||
|
||||
let ServerName::Dns(server_name) = server_name.unwrap();
|
||||
|
||||
assert_eq!(server_name.as_str(), SERVER_DOMAIN);
|
||||
assert!(!transcript.is_complete());
|
||||
assert_eq!(
|
||||
transcript.sent_authed().iter_ranges().next().unwrap(),
|
||||
0..10
|
||||
);
|
||||
assert_eq!(
|
||||
transcript.received_authed().iter_ranges().next().unwrap(),
|
||||
0..10
|
||||
);
|
||||
assert!(matches!(
|
||||
transcript_commitments[0],
|
||||
TranscriptCommitment::Encoding(_)
|
||||
));
|
||||
output
|
||||
}
|
||||
|
||||
@@ -6,11 +6,15 @@ build-std = ["panic_abort", "std"]
|
||||
|
||||
[target.wasm32-unknown-unknown]
|
||||
rustflags = [
|
||||
"-C",
|
||||
"target-feature=+atomics,+bulk-memory,+mutable-globals,+simd128",
|
||||
"-C",
|
||||
"-Ctarget-feature=+atomics,+bulk-memory,+mutable-globals,+simd128",
|
||||
"-Clink-arg=--shared-memory",
|
||||
# 4GB
|
||||
"link-arg=--max-memory=4294967296",
|
||||
"-Clink-arg=--max-memory=4294967296",
|
||||
"-Clink-arg=--import-memory",
|
||||
"-Clink-arg=--export=__wasm_init_tls",
|
||||
"-Clink-arg=--export=__tls_size",
|
||||
"-Clink-arg=--export=__tls_align",
|
||||
"-Clink-arg=--export=__tls_base",
|
||||
"--cfg",
|
||||
'getrandom_backend="wasm_js"',
|
||||
]
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "tlsn-wasm"
|
||||
version = "0.1.0-alpha.13-pre"
|
||||
version = "0.1.0-alpha.13"
|
||||
edition = "2021"
|
||||
repository = "https://github.com/tlsnotary/tlsn.git"
|
||||
description = "A core WebAssembly package for TLSNotary."
|
||||
|
||||
Reference in New Issue
Block a user