Compare commits

..

42 Commits

Author SHA1 Message Date
sinu.eth
1801c30599 fix: attestation example (#1030)
* fix(example): close prover (#1025)

* fix: provide encoder secret to attestation
2025-10-21 11:10:29 -07:00
sinu.eth
0885d40ddf chore: release v0.1.0-alpha.13 (#1019) 2025-10-15 09:38:52 -07:00
sinu.eth
610411aae4 ci: relax clippy (#1020) 2025-10-15 09:27:55 -07:00
sinu.eth
37df1baed7 feat(core): proof config builder reveal all methods (#1017)
Co-authored-by: Hendrik Eeckhaut <hendrik@eeckhaut.org>
2025-10-14 08:56:28 -07:00
dan
aeaebc5c60 chore(harness): expose debug flag in dockerfile (#1018) 2025-10-14 11:19:30 +00:00
sinu.eth
2e7e3db11d fix: fully identify signature algorithm (#1015) 2025-10-13 09:57:34 +02:00
sinu.eth
0a68837d0a fix: empty auth ranges (#1016) 2025-10-10 15:44:38 -07:00
sinu.eth
0ec2392716 chore(tlsn): add transcript auth tests (#1014)
* chore(tlsn): add transcript auth tests

* clippy
2025-10-10 14:10:17 -07:00
sinu.eth
f99fce5b5a fix(tlsn): do not implicitly reveal encoder secret (#1011) 2025-10-10 08:39:24 -07:00
sinu.eth
6b9f44e7e5 feat(tlsn): disclose encryption key (#1010)
Co-authored-by: th4s <th4s@metavoid.xyz>
2025-10-10 08:32:50 -07:00
dan
bf1cf2302a fix(harness): add harness debug feature (#1012) 2025-10-10 14:20:42 +00:00
sinu.eth
2884be17e0 feat(tlsn): partial plaintext auth (#1006)
Co-authored-by: th4s <th4s@metavoid.xyz>
2025-10-09 11:22:23 -07:00
sinu.eth
df8d79c152 fix(wasm): explicitly enable link args for wasm (#1007) 2025-10-09 08:34:11 -07:00
yuroitaki
82d509266b feat: add blake3 transcript commitment (#1000)
* Add blake3.

* Update mpz version.

---------

Co-authored-by: yuroitaki <>
2025-10-08 10:13:07 +08:00
dan
d5ad768e7c chore: improve error msg (#1003) 2025-10-03 05:43:58 +00:00
Hendrik Eeckhaut
d25fb320d4 build: update Rust to version 1.90.0 2025-09-24 09:32:56 +02:00
Hendrik Eeckhaut
0539268da7 Interactive noir example (#981)
demo for interactive zk age proof

Co-authored-by: th4s <th4s@metavoid.xyz>
2025-09-19 16:55:10 +02:00
dan
427b2896b5 allow root_store to be None (#995) 2025-09-19 15:15:04 +02:00
Hendrik Eeckhaut
89d1e594d1 privacy-scaling-explorations -> privacy-ethereum (#993) 2025-09-11 16:48:01 +02:00
sinu.eth
b4380f021e refactor: decouple ProveConfig from PartialTranscript (#991) 2025-09-11 09:13:52 +02:00
sinu.eth
8a823d18ec refactor(core): replace Idx with RangeSet (#988)
* refactor(core): replace Idx with RangeSet

* clippy
2025-09-10 15:44:40 -07:00
sinu.eth
7bcfc56bd8 fix(tls-core): remove deprecated webpki error variants (#992)
* fix(tls-core): remove deprecated webpki error variants

* clippy
2025-09-10 15:24:07 -07:00
sinu.eth
2909d5ebaa chore: bump mpz to 3d90b6c (#990) 2025-09-10 14:38:48 -07:00
sinu.eth
7918494ccc fix(core): fix dev dependencies (#989) 2025-09-10 14:25:04 -07:00
sinu.eth
92dd47b376 fix(core): enable zeroize derive (#987) 2025-09-10 14:11:41 -07:00
th4s
5474a748ce feat(core): Add transcript fixture (#983)
* feat(core): add transcript fixture for testing

* add feedback

* remove packages from dev dependencies
2025-09-10 22:58:10 +02:00
yuroitaki
92da5adc24 chore: update attestation example (#966)
* Add attestation example.

* Apply fmt.

* Apply clippy fix.

* Rebase.

* Improved readme + more default loggging in prove example

* Removed wrong AI generated "learn more" links

* re-export ContentType in tlsn-core

* remove unnecessary checks from example

---------

Co-authored-by: yuroitaki <>
Co-authored-by: Hendrik Eeckhaut <hendrik@eeckhaut.org>
Co-authored-by: sinu <65924192+sinui0@users.noreply.github.com>
2025-09-10 09:37:17 -07:00
Hendrik Eeckhaut
e0ce1ad31a build:Update to unpatched ws_stream_wasm crate (#975) 2025-09-01 16:33:00 +02:00
Hendrik Eeckhaut
3b76877920 build: reduce wasm size (#977) 2025-09-01 11:28:12 +02:00
Hendrik Eeckhaut
783355772a docs: corrected commands in docker.md of the harness (#976) 2025-08-28 17:00:18 +02:00
dan
e5c59da90b chore: fix tests (#974) 2025-08-26 08:42:48 +00:00
dan
f059c53c2d use zk config; bump mpz (#973) 2025-08-26 08:23:24 +00:00
sinu.eth
a1367b5428 refactor(tlsn): change network setting default to reduce data transfer (#971) 2025-08-22 14:00:23 -07:00
sinu.eth
9d8124ac9d chore: bump mpz to 1b00912 (#970) 2025-08-21 09:46:29 -07:00
dan
5034366c72 fix(hmac-sha256): compute PHash and AHash concurrently (#969)
---------

Co-authored-by: th4s <th4s@metavoid.xyz>
2025-08-21 06:41:59 +00:00
sinu.eth
afd8f44261 feat(tlsn): serializable config (#968) 2025-08-18 09:03:04 -07:00
sinu.eth
21086d2883 refactor: clean up web pki (#967)
* refactor: clean up web pki

* fix time import

* clippy

* fix wasm
2025-08-18 08:36:04 -07:00
dan
cca9a318a4 fix(harness): improve harness stability (#962) 2025-08-15 09:17:20 +00:00
dan
cb804a6025 fix(harness): disable tracing events (#961) 2025-08-15 07:13:12 +00:00
th4s
9f849e7c18 fix(encoding): set correct frame limit (#963)
* fix(encoding): set correct frame limit

* bugfix for `TranscriptRefs::len`

* use current frame limit as cushion room
2025-08-13 09:57:00 +02:00
th4s
389bceddef chore: bump rust version, fix lints and satisfy clippy (#964)
* chore(lints): fix lints and satisfy clippy

* bump rust version in ci
2025-08-12 10:50:31 -07:00
th4s
657838671a chore: remove notarize methods for prover and verifier (#952)
* feat: remove notarize methods for prover and verifier

* clean up imports

* remove remaining notarize methods

* clean up imports

* remove wasm attestation bindings

---------

Co-authored-by: sinu <65924192+sinui0@users.noreply.github.com>
2025-08-06 09:38:43 -07:00
142 changed files with 7677 additions and 5316 deletions

View File

@@ -18,10 +18,10 @@ env:
# We need a higher number of parallel rayon tasks than the default (which is 4)
# in order to prevent a deadlock, c.f.
# - https://github.com/tlsnotary/tlsn/issues/548
# - https://github.com/privacy-scaling-explorations/mpz/issues/178
# - https://github.com/privacy-ethereum/mpz/issues/178
# 32 seems to be big enough for the foreseeable future
RAYON_NUM_THREADS: 32
RUST_VERSION: 1.88.0
RUST_VERSION: 1.90.0
jobs:
clippy:
@@ -32,7 +32,7 @@ jobs:
uses: actions/checkout@v4
- name: Install rust toolchain
uses: dtolnay/rust-toolchain@stable
uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ env.RUST_VERSION }}
components: clippy
@@ -41,7 +41,7 @@ jobs:
uses: Swatinem/rust-cache@v2.7.7
- name: Clippy
run: cargo clippy --keep-going --all-features --all-targets --locked -- -D warnings
run: cargo clippy --keep-going --all-features --all-targets --locked
fmt:
name: Check formatting
@@ -198,4 +198,4 @@ jobs:
draft: true
tag_name: ${{ github.ref_name }}
prerelease: true
generate_release_notes: true
generate_release_notes: true

View File

@@ -6,7 +6,7 @@ on:
tag:
description: 'Tag to publish to NPM'
required: true
default: 'v0.1.0-alpha.13-pre'
default: 'v0.1.0-alpha.13'
jobs:
release:

View File

@@ -23,7 +23,6 @@ jobs:
- name: "rustdoc"
run: crates/wasm/build-docs.sh
- name: Deploy
uses: peaceiris/actions-gh-pages@v3
if: ${{ github.ref == 'refs/heads/dev' }}

3140
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -39,6 +39,8 @@ opt-level = 1
[profile.wasm]
inherits = "release"
lto = true
panic = "abort"
codegen-units = 1
[workspace.dependencies]
tls-server-fixture = { path = "crates/tls/server-fixture" }
@@ -64,19 +66,20 @@ tlsn-harness-runner = { path = "crates/harness/runner" }
tlsn-wasm = { path = "crates/wasm" }
tlsn = { path = "crates/tlsn" }
mpz-circuits = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "ccc0057" }
mpz-memory-core = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "ccc0057" }
mpz-common = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "ccc0057" }
mpz-core = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "ccc0057" }
mpz-vm-core = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "ccc0057" }
mpz-garble = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "ccc0057" }
mpz-garble-core = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "ccc0057" }
mpz-ole = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "ccc0057" }
mpz-ot = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "ccc0057" }
mpz-share-conversion = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "ccc0057" }
mpz-fields = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "ccc0057" }
mpz-zk = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "ccc0057" }
mpz-hash = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "ccc0057" }
mpz-circuits = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.4" }
mpz-memory-core = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.4" }
mpz-common = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.4" }
mpz-core = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.4" }
mpz-vm-core = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.4" }
mpz-garble = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.4" }
mpz-garble-core = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.4" }
mpz-ole = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.4" }
mpz-ot = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.4" }
mpz-share-conversion = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.4" }
mpz-fields = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.4" }
mpz-zk = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.4" }
mpz-hash = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.4" }
mpz-ideal-vm = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.4" }
rangeset = { version = "0.2" }
serio = { version = "0.2" }
@@ -84,6 +87,7 @@ spansy = { git = "https://github.com/tlsnotary/tlsn-utils", rev = "6168663" }
uid-mux = { version = "0.2" }
websocket-relay = { git = "https://github.com/tlsnotary/tlsn-utils", rev = "6168663" }
aead = { version = "0.4" }
aes = { version = "0.8" }
aes-gcm = { version = "0.9" }
anyhow = { version = "1.0" }
@@ -107,7 +111,7 @@ elliptic-curve = { version = "0.13" }
enum-try-as-inner = { version = "0.1" }
env_logger = { version = "0.10" }
futures = { version = "0.3" }
futures-rustls = { version = "0.26" }
futures-rustls = { version = "0.25" }
generic-array = { version = "0.14" }
ghash = { version = "0.5" }
hex = { version = "0.4" }
@@ -137,6 +141,8 @@ rs_merkle = { git = "https://github.com/tlsnotary/rs-merkle.git", rev = "85f3e82
rstest = { version = "0.17" }
rustls = { version = "0.21" }
rustls-pemfile = { version = "1.0" }
rustls-webpki = { version = "0.103" }
rustls-pki-types = { version = "1.12" }
sct = { version = "0.7" }
semver = { version = "1.0" }
serde = { version = "1.0" }
@@ -157,7 +163,7 @@ wasm-bindgen = { version = "0.2" }
wasm-bindgen-futures = { version = "0.4" }
web-spawn = { version = "0.2" }
web-time = { version = "0.2" }
webpki = { version = "0.22" }
webpki-roots = { version = "0.26" }
# Use the patched ws_stream_wasm to fix the issue https://github.com/najamelan/ws_stream_wasm/issues/12#issuecomment-1711902958
ws_stream_wasm = { git = "https://github.com/tlsnotary/ws_stream_wasm", rev = "2ed12aad9f0236e5321f577672f309920b2aef51" }
webpki-roots = { version = "1.0" }
webpki-root-certs = { version = "1.0" }
ws_stream_wasm = { version = "0.7.5" }
zeroize = { version = "1.8" }

View File

@@ -1,6 +1,6 @@
[package]
name = "tlsn-attestation"
version = "0.1.0-alpha.13-pre"
version = "0.1.0-alpha.13"
edition = "2024"
[features]
@@ -21,12 +21,11 @@ rand = { workspace = true }
serde = { workspace = true, features = ["derive"] }
thiserror = { workspace = true }
tiny-keccak = { workspace = true, features = ["keccak"] }
webpki-roots = { workspace = true }
[dev-dependencies]
alloy-primitives = { version = "0.8.22", default-features = false }
alloy-signer = { version = "0.12", default-features = false }
alloy-signer-local = { version = "0.12", default-features = false }
alloy-primitives = { version = "1.3.1", default-features = false }
alloy-signer = { version = "1.0", default-features = false }
alloy-signer-local = { version = "1.0", default-features = false }
rand06-compat = { workspace = true }
rstest = { workspace = true }
tlsn-core = { workspace = true, features = ["fixtures"] }

View File

@@ -5,7 +5,7 @@ use rand::{Rng, rng};
use tlsn_core::{
connection::{ConnectionInfo, ServerEphemKey},
hash::HashAlgId,
transcript::TranscriptCommitment,
transcript::{TranscriptCommitment, encoding::EncoderSecret},
};
use crate::{
@@ -25,6 +25,7 @@ pub struct Sign {
connection_info: Option<ConnectionInfo>,
server_ephemeral_key: Option<ServerEphemKey>,
cert_commitment: ServerCertCommitment,
encoder_secret: Option<EncoderSecret>,
extensions: Vec<Extension>,
transcript_commitments: Vec<TranscriptCommitment>,
}
@@ -86,6 +87,7 @@ impl<'a> AttestationBuilder<'a, Accept> {
connection_info: None,
server_ephemeral_key: None,
cert_commitment,
encoder_secret: None,
transcript_commitments: Vec::new(),
extensions,
},
@@ -106,6 +108,12 @@ impl AttestationBuilder<'_, Sign> {
self
}
/// Sets the secret for encoding commitments.
pub fn encoder_secret(&mut self, secret: EncoderSecret) -> &mut Self {
self.state.encoder_secret = Some(secret);
self
}
/// Adds an extension to the attestation.
pub fn extension(&mut self, extension: Extension) -> &mut Self {
self.state.extensions.push(extension);
@@ -129,6 +137,7 @@ impl AttestationBuilder<'_, Sign> {
connection_info,
server_ephemeral_key,
cert_commitment,
encoder_secret,
extensions,
transcript_commitments,
} = self.state;
@@ -159,6 +168,7 @@ impl AttestationBuilder<'_, Sign> {
AttestationBuilderError::new(ErrorKind::Field, "handshake data was not set")
})?),
cert_commitment: field_id.next(cert_commitment),
encoder_secret: encoder_secret.map(|secret| field_id.next(secret)),
extensions: extensions
.into_iter()
.map(|extension| field_id.next(extension))
@@ -242,7 +252,7 @@ impl std::fmt::Display for AttestationBuilderError {
mod test {
use rstest::{fixture, rstest};
use tlsn_core::{
connection::{HandshakeData, HandshakeDataV1_2},
connection::{CertBinding, CertBindingV1_2},
fixtures::{ConnectionFixture, encoding_provider},
hash::Blake3,
transcript::Transcript,
@@ -399,10 +409,10 @@ mod test {
server_cert_data, ..
} = connection;
let HandshakeData::V1_2(HandshakeDataV1_2 {
let CertBinding::V1_2(CertBindingV1_2 {
server_ephemeral_key,
..
}) = server_cert_data.handshake
}) = server_cert_data.binding
else {
panic!("expected v1.2 handshake data");
};
@@ -470,10 +480,10 @@ mod test {
..
} = connection;
let HandshakeData::V1_2(HandshakeDataV1_2 {
let CertBinding::V1_2(CertBindingV1_2 {
server_ephemeral_key,
..
}) = server_cert_data.handshake
}) = server_cert_data.binding
else {
panic!("expected v1.2 handshake data");
};

View File

@@ -22,7 +22,7 @@
use serde::{Deserialize, Serialize};
use tlsn_core::{
connection::{CertificateVerificationError, ServerCertData, ServerEphemKey, ServerName},
connection::{HandshakeData, HandshakeVerificationError, ServerEphemKey, ServerName},
hash::{Blinded, HashAlgorithm, HashProviderError, TypedHash},
};
@@ -30,14 +30,14 @@ use crate::{CryptoProvider, hash::HashAlgorithmExt, serialize::impl_domain_separ
/// Opens a [`ServerCertCommitment`].
#[derive(Clone, Serialize, Deserialize)]
pub struct ServerCertOpening(Blinded<ServerCertData>);
pub struct ServerCertOpening(Blinded<HandshakeData>);
impl_domain_separator!(ServerCertOpening);
opaque_debug::implement!(ServerCertOpening);
impl ServerCertOpening {
pub(crate) fn new(data: ServerCertData) -> Self {
pub(crate) fn new(data: HandshakeData) -> Self {
Self(Blinded::new(data))
}
@@ -49,7 +49,7 @@ impl ServerCertOpening {
}
/// Returns the server identity data.
pub fn data(&self) -> &ServerCertData {
pub fn data(&self) -> &HandshakeData {
self.0.data()
}
}
@@ -122,8 +122,8 @@ impl From<HashProviderError> for ServerIdentityProofError {
}
}
impl From<CertificateVerificationError> for ServerIdentityProofError {
fn from(err: CertificateVerificationError) -> Self {
impl From<HandshakeVerificationError> for ServerIdentityProofError {
fn from(err: HandshakeVerificationError) -> Self {
Self {
kind: ErrorKind::Certificate,
message: err.to_string(),

View File

@@ -1,7 +1,7 @@
//! Attestation fixtures.
use tlsn_core::{
connection::{HandshakeData, HandshakeDataV1_2},
connection::{CertBinding, CertBindingV1_2},
fixtures::ConnectionFixture,
hash::HashAlgorithm,
transcript::{
@@ -67,7 +67,7 @@ pub fn request_fixture(
let mut request_builder = Request::builder(&request_config);
request_builder
.server_name(server_name)
.server_cert_data(server_cert_data)
.handshake_data(server_cert_data)
.transcript(transcript);
let (request, _) = request_builder.build(&provider).unwrap();
@@ -91,12 +91,12 @@ pub fn attestation_fixture(
..
} = connection;
let HandshakeData::V1_2(HandshakeDataV1_2 {
let CertBinding::V1_2(CertBindingV1_2 {
server_ephemeral_key,
..
}) = server_cert_data.handshake
}) = server_cert_data.binding
else {
panic!("expected v1.2 handshake data");
panic!("expected v1.2 binding data");
};
let mut provider = CryptoProvider::default();

View File

@@ -219,7 +219,7 @@ use tlsn_core::{
connection::{ConnectionInfo, ServerEphemKey},
hash::{Hash, HashAlgorithm, TypedHash},
merkle::MerkleTree,
transcript::TranscriptCommitment,
transcript::{TranscriptCommitment, encoding::EncoderSecret},
};
use crate::{
@@ -327,6 +327,7 @@ pub struct Body {
connection_info: Field<ConnectionInfo>,
server_ephemeral_key: Field<ServerEphemKey>,
cert_commitment: Field<ServerCertCommitment>,
encoder_secret: Option<Field<EncoderSecret>>,
extensions: Vec<Field<Extension>>,
transcript_commitments: Vec<Field<TranscriptCommitment>>,
}
@@ -372,6 +373,7 @@ impl Body {
connection_info: conn_info,
server_ephemeral_key,
cert_commitment,
encoder_secret,
extensions,
transcript_commitments,
} = self;
@@ -389,6 +391,13 @@ impl Body {
),
];
if let Some(encoder_secret) = encoder_secret {
fields.push((
encoder_secret.id,
hasher.hash_separated(&encoder_secret.data),
));
}
for field in extensions.iter() {
fields.push((field.id, hasher.hash_separated(&field.data)));
}

View File

@@ -91,6 +91,11 @@ impl Presentation {
transcript.verify_with_provider(
&provider.hash,
&attestation.body.connection_info().transcript_length,
attestation
.body
.encoder_secret
.as_ref()
.map(|field| &field.data),
attestation.body.transcript_commitments(),
)
})

View File

@@ -1,8 +1,4 @@
use tls_core::{
anchors::{OwnedTrustAnchor, RootCertStore},
verify::WebPkiVerifier,
};
use tlsn_core::hash::HashProvider;
use tlsn_core::{hash::HashProvider, webpki::ServerCertVerifier};
use crate::signing::{SignatureVerifierProvider, SignerProvider};
@@ -28,7 +24,7 @@ pub struct CryptoProvider {
/// This is used to verify the server's certificate chain.
///
/// The default verifier uses the Mozilla root certificates.
pub cert: WebPkiVerifier,
pub cert: ServerCertVerifier,
/// Signer provider.
///
/// This is used for signing attestations.
@@ -45,21 +41,9 @@ impl Default for CryptoProvider {
fn default() -> Self {
Self {
hash: Default::default(),
cert: default_cert_verifier(),
cert: ServerCertVerifier::mozilla(),
signer: Default::default(),
signature: Default::default(),
}
}
}
pub(crate) fn default_cert_verifier() -> WebPkiVerifier {
let mut root_store = RootCertStore::empty();
root_store.add_server_trust_anchors(webpki_roots::TLS_SERVER_ROOTS.iter().map(|ta| {
OwnedTrustAnchor::from_subject_spki_name_constraints(
ta.subject.as_ref(),
ta.subject_public_key_info.as_ref(),
ta.name_constraints.as_ref().map(|nc| nc.as_ref()),
)
}));
WebPkiVerifier::new(root_store, None)
}

View File

@@ -36,7 +36,7 @@ pub struct Request {
impl Request {
/// Returns a new request builder.
pub fn builder(config: &RequestConfig) -> RequestBuilder {
pub fn builder(config: &RequestConfig) -> RequestBuilder<'_> {
RequestBuilder::new(config)
}

View File

@@ -1,5 +1,5 @@
use tlsn_core::{
connection::{ServerCertData, ServerName},
connection::{HandshakeData, ServerName},
transcript::{Transcript, TranscriptCommitment, TranscriptSecret},
};
@@ -13,7 +13,7 @@ use crate::{
pub struct RequestBuilder<'a> {
config: &'a RequestConfig,
server_name: Option<ServerName>,
server_cert_data: Option<ServerCertData>,
handshake_data: Option<HandshakeData>,
transcript: Option<Transcript>,
transcript_commitments: Vec<TranscriptCommitment>,
transcript_commitment_secrets: Vec<TranscriptSecret>,
@@ -25,7 +25,7 @@ impl<'a> RequestBuilder<'a> {
Self {
config,
server_name: None,
server_cert_data: None,
handshake_data: None,
transcript: None,
transcript_commitments: Vec::new(),
transcript_commitment_secrets: Vec::new(),
@@ -38,9 +38,9 @@ impl<'a> RequestBuilder<'a> {
self
}
/// Sets the server identity data.
pub fn server_cert_data(&mut self, data: ServerCertData) -> &mut Self {
self.server_cert_data = Some(data);
/// Sets the handshake data.
pub fn handshake_data(&mut self, data: HandshakeData) -> &mut Self {
self.handshake_data = Some(data);
self
}
@@ -69,7 +69,7 @@ impl<'a> RequestBuilder<'a> {
let Self {
config,
server_name,
server_cert_data,
handshake_data: server_cert_data,
transcript,
transcript_commitments,
transcript_commitment_secrets,

View File

@@ -46,8 +46,9 @@ pub(crate) use impl_domain_separator;
impl_domain_separator!(tlsn_core::connection::ServerEphemKey);
impl_domain_separator!(tlsn_core::connection::ConnectionInfo);
impl_domain_separator!(tlsn_core::connection::HandshakeData);
impl_domain_separator!(tlsn_core::connection::CertBinding);
impl_domain_separator!(tlsn_core::transcript::TranscriptCommitment);
impl_domain_separator!(tlsn_core::transcript::TranscriptSecret);
impl_domain_separator!(tlsn_core::transcript::encoding::EncoderSecret);
impl_domain_separator!(tlsn_core::transcript::encoding::EncodingCommitment);
impl_domain_separator!(tlsn_core::transcript::hash::PlaintextHash);

View File

@@ -5,7 +5,7 @@ use tlsn_attestation::{
signing::SignatureAlgId,
};
use tlsn_core::{
connection::{HandshakeData, HandshakeDataV1_2},
connection::{CertBinding, CertBindingV1_2},
fixtures::{self, ConnectionFixture, encoder_secret},
hash::Blake3,
transcript::{
@@ -36,10 +36,10 @@ fn test_api() {
server_cert_data,
} = ConnectionFixture::tlsnotary(transcript.length());
let HandshakeData::V1_2(HandshakeDataV1_2 {
let CertBinding::V1_2(CertBindingV1_2 {
server_ephemeral_key,
..
}) = server_cert_data.handshake.clone()
}) = server_cert_data.binding.clone()
else {
unreachable!()
};
@@ -64,7 +64,6 @@ fn test_api() {
let encoding_commitment = EncodingCommitment {
root: encoding_tree.root(),
secret: encoder_secret(),
};
let request_config = RequestConfig::default();
@@ -72,7 +71,7 @@ fn test_api() {
request_builder
.server_name(server_name.clone())
.server_cert_data(server_cert_data)
.handshake_data(server_cert_data)
.transcript(transcript)
.transcript_commitments(
vec![TranscriptSecret::Encoding(encoding_tree)],
@@ -96,6 +95,7 @@ fn test_api() {
.connection_info(connection_info.clone())
// Server key Notary received during handshake
.server_ephemeral_key(server_ephemeral_key)
.encoder_secret(encoder_secret())
.transcript_commitments(vec![TranscriptCommitment::Encoding(encoding_commitment)]);
let attestation = attestation_builder.build(&provider).unwrap();

View File

@@ -5,7 +5,7 @@ description = "This crate provides implementations of ciphers for two parties"
keywords = ["tls", "mpc", "2pc", "aes"]
categories = ["cryptography"]
license = "MIT OR Apache-2.0"
version = "0.1.0-alpha.13-pre"
version = "0.1.0-alpha.13"
edition = "2021"
[lints]

View File

@@ -1,6 +1,6 @@
[package]
name = "tlsn-deap"
version = "0.1.0-alpha.13-pre"
version = "0.1.0-alpha.13"
edition = "2021"
[lints]

View File

@@ -391,7 +391,7 @@ mod tests {
memory::{binary::U8, correlated::Delta, Array},
prelude::*,
};
use mpz_zk::{Prover, Verifier};
use mpz_zk::{Prover, ProverConfig, Verifier, VerifierConfig};
use rand::{rngs::StdRng, SeedableRng};
use super::*;
@@ -408,8 +408,8 @@ mod tests {
let gb = Garbler::new(cot_send, [0u8; 16], delta_mpc);
let ev = Evaluator::new(cot_recv);
let prover = Prover::new(rcot_recv);
let verifier = Verifier::new(delta_zk, rcot_send);
let prover = Prover::new(ProverConfig::default(), rcot_recv);
let verifier = Verifier::new(VerifierConfig::default(), delta_zk, rcot_send);
let mut leader = Deap::new(Role::Leader, gb, prover);
let mut follower = Deap::new(Role::Follower, ev, verifier);
@@ -488,8 +488,8 @@ mod tests {
let gb = Garbler::new(cot_send, [0u8; 16], delta_mpc);
let ev = Evaluator::new(cot_recv);
let prover = Prover::new(rcot_recv);
let verifier = Verifier::new(delta_zk, rcot_send);
let prover = Prover::new(ProverConfig::default(), rcot_recv);
let verifier = Verifier::new(VerifierConfig::default(), delta_zk, rcot_send);
let mut leader = Deap::new(Role::Leader, gb, prover);
let mut follower = Deap::new(Role::Follower, ev, verifier);
@@ -574,8 +574,8 @@ mod tests {
let gb = Garbler::new(cot_send, [1u8; 16], delta_mpc);
let ev = Evaluator::new(cot_recv);
let prover = Prover::new(rcot_recv);
let verifier = Verifier::new(delta_zk, rcot_send);
let prover = Prover::new(ProverConfig::default(), rcot_recv);
let verifier = Verifier::new(VerifierConfig::default(), delta_zk, rcot_send);
let mut leader = Deap::new(Role::Leader, gb, prover);
let mut follower = Deap::new(Role::Follower, ev, verifier);

View File

@@ -5,7 +5,7 @@ description = "A 2PC implementation of TLS HMAC-SHA256 PRF"
keywords = ["tls", "mpc", "2pc", "hmac", "sha256"]
categories = ["cryptography"]
license = "MIT OR Apache-2.0"
version = "0.1.0-alpha.13-pre"
version = "0.1.0-alpha.13"
edition = "2021"
[lints]

View File

@@ -40,7 +40,6 @@ enum PrfState {
inner_partial: [u32; 8],
a_output: DecodeFutureTyped<BitVec, [u8; 32]>,
},
FinishLastP,
Done,
}
@@ -137,16 +136,18 @@ impl PrfFunction {
assign_inner_local(vm, p.inner_local, *inner_partial, &msg)?;
if *iter == self.iterations {
self.state = PrfState::FinishLastP;
self.state = PrfState::Done;
} else {
self.state = PrfState::ComputeA {
iter: *iter + 1,
inner_partial: *inner_partial,
msg: output.to_vec(),
}
};
};
// We recurse, so that this PHash and the next AHash could
// be computed in a single VM execute call.
self.flush(vm)?;
}
}
PrfState::FinishLastP => self.state = PrfState::Done,
_ => (),
}

View File

@@ -5,7 +5,7 @@ description = "Implementation of the 3-party key-exchange protocol"
keywords = ["tls", "mpc", "2pc", "pms", "key-exchange"]
categories = ["cryptography"]
license = "MIT OR Apache-2.0"
version = "0.1.0-alpha.13-pre"
version = "0.1.0-alpha.13"
edition = "2021"
[lints]

View File

@@ -5,7 +5,7 @@ description = "Core types for TLSNotary"
keywords = ["tls", "mpc", "2pc", "types"]
categories = ["cryptography"]
license = "MIT OR Apache-2.0"
version = "0.1.0-alpha.13-pre"
version = "0.1.0-alpha.13"
edition = "2021"
[lints]
@@ -13,7 +13,13 @@ workspace = true
[features]
default = []
fixtures = ["dep:hex", "dep:tlsn-data-fixtures"]
fixtures = [
"dep:hex",
"dep:tlsn-data-fixtures",
"dep:aead",
"dep:aes-gcm",
"dep:generic-array",
]
[dependencies]
tlsn-data-fixtures = { workspace = true, optional = true }
@@ -21,6 +27,9 @@ tlsn-tls-core = { workspace = true, features = ["serde"] }
tlsn-utils = { workspace = true }
rangeset = { workspace = true, features = ["serde"] }
aead = { workspace = true, features = ["alloc"], optional = true }
aes-gcm = { workspace = true, optional = true }
generic-array = { workspace = true, optional = true }
bimap = { version = "0.6", features = ["serde"] }
blake3 = { workspace = true }
hex = { workspace = true, optional = true }
@@ -36,10 +45,17 @@ thiserror = { workspace = true }
tiny-keccak = { workspace = true, features = ["keccak"] }
web-time = { workspace = true }
webpki-roots = { workspace = true }
rustls-webpki = { workspace = true, features = ["ring"] }
rustls-pki-types = { workspace = true }
itybity = { workspace = true }
zeroize = { workspace = true, features = ["zeroize_derive"] }
[dev-dependencies]
aead = { workspace = true, features = ["alloc"] }
aes-gcm = { workspace = true }
generic-array = { workspace = true }
bincode = { workspace = true }
hex = { workspace = true }
rstest = { workspace = true }
tlsn-data-fixtures = { workspace = true }
webpki-root-certs = { workspace = true }

View File

@@ -2,16 +2,11 @@
use std::fmt;
use rustls_pki_types as webpki_types;
use serde::{Deserialize, Serialize};
use tls_core::{
msgs::{
codec::Codec,
enums::NamedGroup,
handshake::{DigitallySignedStruct, ServerECDHParams},
},
verify::{ServerCertVerifier as _, WebPkiVerifier},
};
use web_time::{Duration, UNIX_EPOCH};
use tls_core::msgs::{codec::Codec, enums::NamedGroup, handshake::ServerECDHParams};
use crate::webpki::{CertificateDer, ServerCertVerifier, ServerCertVerifierError};
/// TLS version.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
@@ -35,40 +30,82 @@ impl TryFrom<tls_core::msgs::enums::ProtocolVersion> for TlsVersion {
}
}
/// Server's name, a.k.a. the DNS name.
/// Server's name.
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub struct ServerName(String);
pub enum ServerName {
/// DNS name.
Dns(DnsName),
}
impl ServerName {
/// Creates a new server name.
pub fn new(name: String) -> Self {
Self(name)
}
/// Returns the name as a string.
pub fn as_str(&self) -> &str {
&self.0
}
}
impl From<&str> for ServerName {
fn from(name: &str) -> Self {
Self(name.to_string())
}
}
impl AsRef<str> for ServerName {
fn as_ref(&self) -> &str {
&self.0
pub(crate) fn to_webpki(&self) -> webpki_types::ServerName<'static> {
match self {
ServerName::Dns(name) => webpki_types::ServerName::DnsName(
webpki_types::DnsName::try_from(name.0.as_str())
.expect("name was validated")
.to_owned(),
),
}
}
}
impl fmt::Display for ServerName {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
ServerName::Dns(name) => write!(f, "{name}"),
}
}
}
/// DNS name.
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]
#[serde(try_from = "String")]
pub struct DnsName(String);
impl DnsName {
/// Returns the DNS name as a string.
pub fn as_str(&self) -> &str {
self.0.as_str()
}
}
impl fmt::Display for DnsName {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.0)
}
}
impl AsRef<str> for DnsName {
fn as_ref(&self) -> &str {
&self.0
}
}
/// Error returned when a DNS name is invalid.
#[derive(Debug, thiserror::Error)]
#[error("invalid DNS name")]
pub struct InvalidDnsNameError {}
impl TryFrom<&str> for DnsName {
type Error = InvalidDnsNameError;
fn try_from(value: &str) -> Result<Self, Self::Error> {
// Borrow validation from rustls
match webpki_types::DnsName::try_from_str(value) {
Ok(_) => Ok(DnsName(value.to_string())),
Err(_) => Err(InvalidDnsNameError {}),
}
}
}
impl TryFrom<String> for DnsName {
type Error = InvalidDnsNameError;
fn try_from(value: String) -> Result<Self, Self::Error> {
Self::try_from(value.as_str())
}
}
/// Type of a public key.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
@@ -79,84 +116,84 @@ pub enum KeyType {
SECP256R1 = 0x0017,
}
/// Signature scheme on the key exchange parameters.
/// Signature algorithm used on the key exchange parameters.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
#[allow(non_camel_case_types, missing_docs)]
pub enum SignatureScheme {
RSA_PKCS1_SHA1 = 0x0201,
ECDSA_SHA1_Legacy = 0x0203,
RSA_PKCS1_SHA256 = 0x0401,
ECDSA_NISTP256_SHA256 = 0x0403,
RSA_PKCS1_SHA384 = 0x0501,
ECDSA_NISTP384_SHA384 = 0x0503,
RSA_PKCS1_SHA512 = 0x0601,
ECDSA_NISTP521_SHA512 = 0x0603,
RSA_PSS_SHA256 = 0x0804,
RSA_PSS_SHA384 = 0x0805,
RSA_PSS_SHA512 = 0x0806,
ED25519 = 0x0807,
pub enum SignatureAlgorithm {
ECDSA_NISTP256_SHA256,
ECDSA_NISTP256_SHA384,
ECDSA_NISTP384_SHA256,
ECDSA_NISTP384_SHA384,
ED25519,
RSA_PKCS1_2048_8192_SHA256,
RSA_PKCS1_2048_8192_SHA384,
RSA_PKCS1_2048_8192_SHA512,
RSA_PSS_2048_8192_SHA256_LEGACY_KEY,
RSA_PSS_2048_8192_SHA384_LEGACY_KEY,
RSA_PSS_2048_8192_SHA512_LEGACY_KEY,
}
impl TryFrom<tls_core::msgs::enums::SignatureScheme> for SignatureScheme {
type Error = &'static str;
fn try_from(value: tls_core::msgs::enums::SignatureScheme) -> Result<Self, Self::Error> {
use tls_core::msgs::enums::SignatureScheme as Core;
use SignatureScheme::*;
Ok(match value {
Core::RSA_PKCS1_SHA1 => RSA_PKCS1_SHA1,
Core::ECDSA_SHA1_Legacy => ECDSA_SHA1_Legacy,
Core::RSA_PKCS1_SHA256 => RSA_PKCS1_SHA256,
Core::ECDSA_NISTP256_SHA256 => ECDSA_NISTP256_SHA256,
Core::RSA_PKCS1_SHA384 => RSA_PKCS1_SHA384,
Core::ECDSA_NISTP384_SHA384 => ECDSA_NISTP384_SHA384,
Core::RSA_PKCS1_SHA512 => RSA_PKCS1_SHA512,
Core::ECDSA_NISTP521_SHA512 => ECDSA_NISTP521_SHA512,
Core::RSA_PSS_SHA256 => RSA_PSS_SHA256,
Core::RSA_PSS_SHA384 => RSA_PSS_SHA384,
Core::RSA_PSS_SHA512 => RSA_PSS_SHA512,
Core::ED25519 => ED25519,
_ => return Err("unsupported signature scheme"),
})
}
}
impl From<SignatureScheme> for tls_core::msgs::enums::SignatureScheme {
fn from(value: SignatureScheme) -> Self {
use tls_core::msgs::enums::SignatureScheme::*;
match value {
SignatureScheme::RSA_PKCS1_SHA1 => RSA_PKCS1_SHA1,
SignatureScheme::ECDSA_SHA1_Legacy => ECDSA_SHA1_Legacy,
SignatureScheme::RSA_PKCS1_SHA256 => RSA_PKCS1_SHA256,
SignatureScheme::ECDSA_NISTP256_SHA256 => ECDSA_NISTP256_SHA256,
SignatureScheme::RSA_PKCS1_SHA384 => RSA_PKCS1_SHA384,
SignatureScheme::ECDSA_NISTP384_SHA384 => ECDSA_NISTP384_SHA384,
SignatureScheme::RSA_PKCS1_SHA512 => RSA_PKCS1_SHA512,
SignatureScheme::ECDSA_NISTP521_SHA512 => ECDSA_NISTP521_SHA512,
SignatureScheme::RSA_PSS_SHA256 => RSA_PSS_SHA256,
SignatureScheme::RSA_PSS_SHA384 => RSA_PSS_SHA384,
SignatureScheme::RSA_PSS_SHA512 => RSA_PSS_SHA512,
SignatureScheme::ED25519 => ED25519,
impl fmt::Display for SignatureAlgorithm {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
SignatureAlgorithm::ECDSA_NISTP256_SHA256 => write!(f, "ECDSA_NISTP256_SHA256"),
SignatureAlgorithm::ECDSA_NISTP256_SHA384 => write!(f, "ECDSA_NISTP256_SHA384"),
SignatureAlgorithm::ECDSA_NISTP384_SHA256 => write!(f, "ECDSA_NISTP384_SHA256"),
SignatureAlgorithm::ECDSA_NISTP384_SHA384 => write!(f, "ECDSA_NISTP384_SHA384"),
SignatureAlgorithm::ED25519 => write!(f, "ED25519"),
SignatureAlgorithm::RSA_PKCS1_2048_8192_SHA256 => {
write!(f, "RSA_PKCS1_2048_8192_SHA256")
}
SignatureAlgorithm::RSA_PKCS1_2048_8192_SHA384 => {
write!(f, "RSA_PKCS1_2048_8192_SHA384")
}
SignatureAlgorithm::RSA_PKCS1_2048_8192_SHA512 => {
write!(f, "RSA_PKCS1_2048_8192_SHA512")
}
SignatureAlgorithm::RSA_PSS_2048_8192_SHA256_LEGACY_KEY => {
write!(f, "RSA_PSS_2048_8192_SHA256_LEGACY_KEY")
}
SignatureAlgorithm::RSA_PSS_2048_8192_SHA384_LEGACY_KEY => {
write!(f, "RSA_PSS_2048_8192_SHA384_LEGACY_KEY")
}
SignatureAlgorithm::RSA_PSS_2048_8192_SHA512_LEGACY_KEY => {
write!(f, "RSA_PSS_2048_8192_SHA512_LEGACY_KEY")
}
}
}
}
/// X.509 certificate, DER encoded.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Certificate(pub Vec<u8>);
impl From<tls_core::key::Certificate> for Certificate {
fn from(cert: tls_core::key::Certificate) -> Self {
Self(cert.0)
impl From<tls_core::verify::SignatureAlgorithm> for SignatureAlgorithm {
fn from(value: tls_core::verify::SignatureAlgorithm) -> Self {
use tls_core::verify::SignatureAlgorithm as Core;
match value {
Core::ECDSA_NISTP256_SHA256 => SignatureAlgorithm::ECDSA_NISTP256_SHA256,
Core::ECDSA_NISTP256_SHA384 => SignatureAlgorithm::ECDSA_NISTP256_SHA384,
Core::ECDSA_NISTP384_SHA256 => SignatureAlgorithm::ECDSA_NISTP384_SHA256,
Core::ECDSA_NISTP384_SHA384 => SignatureAlgorithm::ECDSA_NISTP384_SHA384,
Core::ED25519 => SignatureAlgorithm::ED25519,
Core::RSA_PKCS1_2048_8192_SHA256 => SignatureAlgorithm::RSA_PKCS1_2048_8192_SHA256,
Core::RSA_PKCS1_2048_8192_SHA384 => SignatureAlgorithm::RSA_PKCS1_2048_8192_SHA384,
Core::RSA_PKCS1_2048_8192_SHA512 => SignatureAlgorithm::RSA_PKCS1_2048_8192_SHA512,
Core::RSA_PSS_2048_8192_SHA256_LEGACY_KEY => {
SignatureAlgorithm::RSA_PSS_2048_8192_SHA256_LEGACY_KEY
}
Core::RSA_PSS_2048_8192_SHA384_LEGACY_KEY => {
SignatureAlgorithm::RSA_PSS_2048_8192_SHA384_LEGACY_KEY
}
Core::RSA_PSS_2048_8192_SHA512_LEGACY_KEY => {
SignatureAlgorithm::RSA_PSS_2048_8192_SHA512_LEGACY_KEY
}
}
}
}
/// Server's signature of the key exchange parameters.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ServerSignature {
/// Signature scheme.
pub scheme: SignatureScheme,
/// Signature algorithm.
pub alg: SignatureAlgorithm,
/// Signature data.
pub sig: Vec<u8>,
}
@@ -220,9 +257,9 @@ pub struct TranscriptLength {
pub received: u32,
}
/// TLS 1.2 handshake data.
/// TLS 1.2 certificate binding.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct HandshakeDataV1_2 {
pub struct CertBindingV1_2 {
/// Client random.
pub client_random: [u8; 32],
/// Server random.
@@ -231,13 +268,18 @@ pub struct HandshakeDataV1_2 {
pub server_ephemeral_key: ServerEphemKey,
}
/// TLS handshake data.
/// TLS certificate binding.
///
/// This is the data that the server signs using its public key in the
/// certificate it presents during the TLS handshake. This provides a binding
/// between the server's identity and the ephemeral keys used to authenticate
/// the TLS session.
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
#[non_exhaustive]
pub enum HandshakeData {
/// TLS 1.2 handshake data.
V1_2(HandshakeDataV1_2),
pub enum CertBinding {
/// TLS 1.2 certificate binding.
V1_2(CertBindingV1_2),
}
/// Verify data from the TLS handshake finished messages.
@@ -249,19 +291,19 @@ pub struct VerifyData {
pub server_finished: Vec<u8>,
}
/// Server certificate and handshake data.
/// TLS handshake data.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ServerCertData {
/// Certificate chain.
pub certs: Vec<Certificate>,
/// Server signature of the key exchange parameters.
pub struct HandshakeData {
/// Server certificate chain.
pub certs: Vec<CertificateDer>,
/// Server certificate signature over the binding message.
pub sig: ServerSignature,
/// TLS handshake data.
pub handshake: HandshakeData,
/// Certificate binding.
pub binding: CertBinding,
}
impl ServerCertData {
/// Verifies the server certificate data.
impl HandshakeData {
/// Verifies the handshake data.
///
/// # Arguments
///
@@ -271,53 +313,35 @@ impl ServerCertData {
/// * `server_name` - The server name.
pub fn verify(
&self,
verifier: &WebPkiVerifier,
verifier: &ServerCertVerifier,
time: u64,
server_ephemeral_key: &ServerEphemKey,
server_name: &ServerName,
) -> Result<(), CertificateVerificationError> {
) -> Result<(), HandshakeVerificationError> {
#[allow(irrefutable_let_patterns)]
let HandshakeData::V1_2(HandshakeDataV1_2 {
let CertBinding::V1_2(CertBindingV1_2 {
client_random,
server_random,
server_ephemeral_key: expected_server_ephemeral_key,
}) = &self.handshake
}) = &self.binding
else {
unreachable!("only TLS 1.2 is implemented")
};
if server_ephemeral_key != expected_server_ephemeral_key {
return Err(CertificateVerificationError::InvalidServerEphemeralKey);
return Err(HandshakeVerificationError::InvalidServerEphemeralKey);
}
// Verify server name.
let server_name = tls_core::dns::ServerName::try_from(server_name.as_ref())
.map_err(|_| CertificateVerificationError::InvalidIdentity(server_name.clone()))?;
// Verify server certificate.
let cert_chain = self
let (end_entity, intermediates) = self
.certs
.clone()
.into_iter()
.map(|cert| tls_core::key::Certificate(cert.0))
.collect::<Vec<_>>();
let (end_entity, intermediates) = cert_chain
.split_first()
.ok_or(CertificateVerificationError::MissingCerts)?;
.ok_or(HandshakeVerificationError::MissingCerts)?;
// Verify the end entity cert is valid for the provided server name
// and that it chains to at least one of the roots we trust.
verifier
.verify_server_cert(
end_entity,
intermediates,
&server_name,
&mut [].into_iter(),
&[],
UNIX_EPOCH + Duration::from_secs(time),
)
.map_err(|_| CertificateVerificationError::InvalidCert)?;
.verify_server_cert(end_entity, intermediates, server_name, time)
.map_err(HandshakeVerificationError::ServerCert)?;
// Verify the signature matches the certificate and key exchange parameters.
let mut message = Vec::new();
@@ -325,11 +349,34 @@ impl ServerCertData {
message.extend_from_slice(server_random);
message.extend_from_slice(&server_ephemeral_key.kx_params());
let dss = DigitallySignedStruct::new(self.sig.scheme.into(), self.sig.sig.clone());
use webpki::ring as alg;
let sig_alg = match self.sig.alg {
SignatureAlgorithm::ECDSA_NISTP256_SHA256 => alg::ECDSA_P256_SHA256,
SignatureAlgorithm::ECDSA_NISTP256_SHA384 => alg::ECDSA_P256_SHA384,
SignatureAlgorithm::ECDSA_NISTP384_SHA256 => alg::ECDSA_P384_SHA256,
SignatureAlgorithm::ECDSA_NISTP384_SHA384 => alg::ECDSA_P384_SHA384,
SignatureAlgorithm::ED25519 => alg::ED25519,
SignatureAlgorithm::RSA_PKCS1_2048_8192_SHA256 => alg::RSA_PKCS1_2048_8192_SHA256,
SignatureAlgorithm::RSA_PKCS1_2048_8192_SHA384 => alg::RSA_PKCS1_2048_8192_SHA384,
SignatureAlgorithm::RSA_PKCS1_2048_8192_SHA512 => alg::RSA_PKCS1_2048_8192_SHA512,
SignatureAlgorithm::RSA_PSS_2048_8192_SHA256_LEGACY_KEY => {
alg::RSA_PSS_2048_8192_SHA256_LEGACY_KEY
}
SignatureAlgorithm::RSA_PSS_2048_8192_SHA384_LEGACY_KEY => {
alg::RSA_PSS_2048_8192_SHA384_LEGACY_KEY
}
SignatureAlgorithm::RSA_PSS_2048_8192_SHA512_LEGACY_KEY => {
alg::RSA_PSS_2048_8192_SHA512_LEGACY_KEY
}
};
verifier
.verify_tls12_signature(&message, end_entity, &dss)
.map_err(|_| CertificateVerificationError::InvalidServerSignature)?;
let end_entity = webpki_types::CertificateDer::from(end_entity.0.as_slice());
let end_entity = webpki::EndEntityCert::try_from(&end_entity)
.map_err(|_| HandshakeVerificationError::InvalidEndEntityCertificate)?;
end_entity
.verify_signature(sig_alg, &message, &self.sig.sig)
.map_err(|_| HandshakeVerificationError::InvalidServerSignature)?;
Ok(())
}
@@ -338,58 +385,49 @@ impl ServerCertData {
/// Errors that can occur when verifying a certificate chain or signature.
#[derive(Debug, thiserror::Error)]
#[allow(missing_docs)]
pub enum CertificateVerificationError {
#[error("invalid server identity: {0}")]
InvalidIdentity(ServerName),
pub enum HandshakeVerificationError {
#[error("invalid end entity certificate")]
InvalidEndEntityCertificate,
#[error("missing server certificates")]
MissingCerts,
#[error("invalid server certificate")]
InvalidCert,
#[error("invalid server signature")]
InvalidServerSignature,
#[error("invalid server ephemeral key")]
InvalidServerEphemeralKey,
#[error("server certificate verification failed: {0}")]
ServerCert(ServerCertVerifierError),
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{fixtures::ConnectionFixture, transcript::Transcript};
use crate::{fixtures::ConnectionFixture, transcript::Transcript, webpki::RootCertStore};
use hex::FromHex;
use rstest::*;
use tls_core::{
anchors::{OwnedTrustAnchor, RootCertStore},
verify::WebPkiVerifier,
};
use tlsn_data_fixtures::http::{request::GET_WITH_HEADER, response::OK_JSON};
#[fixture]
#[once]
fn verifier() -> WebPkiVerifier {
let mut root_store = RootCertStore::empty();
root_store.add_server_trust_anchors(webpki_roots::TLS_SERVER_ROOTS.iter().map(|ta| {
OwnedTrustAnchor::from_subject_spki_name_constraints(
ta.subject.as_ref(),
ta.subject_public_key_info.as_ref(),
ta.name_constraints.as_ref().map(|nc| nc.as_ref()),
)
}));
fn verifier() -> ServerCertVerifier {
let mut root_store = RootCertStore {
roots: webpki_root_certs::TLS_SERVER_ROOT_CERTS
.iter()
.map(|c| CertificateDer(c.to_vec()))
.collect(),
};
// Add a cert which is no longer included in the Mozilla root store.
let cert = tls_core::key::Certificate(
root_store.roots.push(
appliedzkp()
.server_cert_data
.certs
.last()
.expect("chain is valid")
.0
.clone(),
);
root_store.add(&cert).unwrap();
WebPkiVerifier::new(root_store, None)
ServerCertVerifier::new(&root_store).unwrap()
}
fn tlsnotary() -> ConnectionFixture {
@@ -405,7 +443,7 @@ mod tests {
#[case::tlsnotary(tlsnotary())]
#[case::appliedzkp(appliedzkp())]
fn test_verify_cert_chain_sucess_ca_implicit(
verifier: &WebPkiVerifier,
verifier: &ServerCertVerifier,
#[case] mut data: ConnectionFixture,
) {
// Remove the CA cert
@@ -417,7 +455,7 @@ mod tests {
verifier,
data.connection_info.time,
data.server_ephemeral_key(),
&ServerName::from(data.server_name.as_ref()),
&data.server_name,
)
.is_ok());
}
@@ -428,7 +466,7 @@ mod tests {
#[case::tlsnotary(tlsnotary())]
#[case::appliedzkp(appliedzkp())]
fn test_verify_cert_chain_success_ca_explicit(
verifier: &WebPkiVerifier,
verifier: &ServerCertVerifier,
#[case] data: ConnectionFixture,
) {
assert!(data
@@ -437,7 +475,7 @@ mod tests {
verifier,
data.connection_info.time,
data.server_ephemeral_key(),
&ServerName::from(data.server_name.as_ref()),
&data.server_name,
)
.is_ok());
}
@@ -447,7 +485,7 @@ mod tests {
#[case::tlsnotary(tlsnotary())]
#[case::appliedzkp(appliedzkp())]
fn test_verify_cert_chain_fail_bad_time(
verifier: &WebPkiVerifier,
verifier: &ServerCertVerifier,
#[case] data: ConnectionFixture,
) {
// unix time when the cert chain was NOT valid
@@ -457,12 +495,12 @@ mod tests {
verifier,
bad_time,
data.server_ephemeral_key(),
&ServerName::from(data.server_name.as_ref()),
&data.server_name,
);
assert!(matches!(
err.unwrap_err(),
CertificateVerificationError::InvalidCert
HandshakeVerificationError::ServerCert(_)
));
}
@@ -471,7 +509,7 @@ mod tests {
#[case::tlsnotary(tlsnotary())]
#[case::appliedzkp(appliedzkp())]
fn test_verify_cert_chain_fail_no_interm_cert(
verifier: &WebPkiVerifier,
verifier: &ServerCertVerifier,
#[case] mut data: ConnectionFixture,
) {
// Remove the CA cert
@@ -483,12 +521,12 @@ mod tests {
verifier,
data.connection_info.time,
data.server_ephemeral_key(),
&ServerName::from(data.server_name.as_ref()),
&data.server_name,
);
assert!(matches!(
err.unwrap_err(),
CertificateVerificationError::InvalidCert
HandshakeVerificationError::ServerCert(_)
));
}
@@ -498,7 +536,7 @@ mod tests {
#[case::tlsnotary(tlsnotary())]
#[case::appliedzkp(appliedzkp())]
fn test_verify_cert_chain_fail_no_interm_cert_with_ca_cert(
verifier: &WebPkiVerifier,
verifier: &ServerCertVerifier,
#[case] mut data: ConnectionFixture,
) {
// Remove the intermediate cert
@@ -508,12 +546,12 @@ mod tests {
verifier,
data.connection_info.time,
data.server_ephemeral_key(),
&ServerName::from(data.server_name.as_ref()),
&data.server_name,
);
assert!(matches!(
err.unwrap_err(),
CertificateVerificationError::InvalidCert
HandshakeVerificationError::ServerCert(_)
));
}
@@ -522,24 +560,24 @@ mod tests {
#[case::tlsnotary(tlsnotary())]
#[case::appliedzkp(appliedzkp())]
fn test_verify_cert_chain_fail_bad_ee_cert(
verifier: &WebPkiVerifier,
verifier: &ServerCertVerifier,
#[case] mut data: ConnectionFixture,
) {
let ee: &[u8] = include_bytes!("./fixtures/data/unknown/ee.der");
// Change the end entity cert
data.server_cert_data.certs[0] = Certificate(ee.to_vec());
data.server_cert_data.certs[0] = CertificateDer(ee.to_vec());
let err = data.server_cert_data.verify(
verifier,
data.connection_info.time,
data.server_ephemeral_key(),
&ServerName::from(data.server_name.as_ref()),
&data.server_name,
);
assert!(matches!(
err.unwrap_err(),
CertificateVerificationError::InvalidCert
HandshakeVerificationError::ServerCert(_)
));
}
@@ -548,23 +586,23 @@ mod tests {
#[case::tlsnotary(tlsnotary())]
#[case::appliedzkp(appliedzkp())]
fn test_verify_sig_ke_params_fail_bad_client_random(
verifier: &WebPkiVerifier,
verifier: &ServerCertVerifier,
#[case] mut data: ConnectionFixture,
) {
let HandshakeData::V1_2(HandshakeDataV1_2 { client_random, .. }) =
&mut data.server_cert_data.handshake;
let CertBinding::V1_2(CertBindingV1_2 { client_random, .. }) =
&mut data.server_cert_data.binding;
client_random[31] = client_random[31].wrapping_add(1);
let err = data.server_cert_data.verify(
verifier,
data.connection_info.time,
data.server_ephemeral_key(),
&ServerName::from(data.server_name.as_ref()),
&data.server_name,
);
assert!(matches!(
err.unwrap_err(),
CertificateVerificationError::InvalidServerSignature
HandshakeVerificationError::InvalidServerSignature
));
}
@@ -573,7 +611,7 @@ mod tests {
#[case::tlsnotary(tlsnotary())]
#[case::appliedzkp(appliedzkp())]
fn test_verify_sig_ke_params_fail_bad_sig(
verifier: &WebPkiVerifier,
verifier: &ServerCertVerifier,
#[case] mut data: ConnectionFixture,
) {
data.server_cert_data.sig.sig[31] = data.server_cert_data.sig.sig[31].wrapping_add(1);
@@ -582,12 +620,12 @@ mod tests {
verifier,
data.connection_info.time,
data.server_ephemeral_key(),
&ServerName::from(data.server_name.as_ref()),
&data.server_name,
);
assert!(matches!(
err.unwrap_err(),
CertificateVerificationError::InvalidServerSignature
HandshakeVerificationError::InvalidServerSignature
));
}
@@ -596,10 +634,10 @@ mod tests {
#[case::tlsnotary(tlsnotary())]
#[case::appliedzkp(appliedzkp())]
fn test_check_dns_name_present_in_cert_fail_bad_host(
verifier: &WebPkiVerifier,
verifier: &ServerCertVerifier,
#[case] data: ConnectionFixture,
) {
let bad_name = ServerName::from("badhost.com");
let bad_name = ServerName::Dns(DnsName::try_from("badhost.com").unwrap());
let err = data.server_cert_data.verify(
verifier,
@@ -610,7 +648,7 @@ mod tests {
assert!(matches!(
err.unwrap_err(),
CertificateVerificationError::InvalidCert
HandshakeVerificationError::ServerCert(_)
));
}
@@ -618,7 +656,7 @@ mod tests {
#[rstest]
#[case::tlsnotary(tlsnotary())]
#[case::appliedzkp(appliedzkp())]
fn test_invalid_ephemeral_key(verifier: &WebPkiVerifier, #[case] data: ConnectionFixture) {
fn test_invalid_ephemeral_key(verifier: &ServerCertVerifier, #[case] data: ConnectionFixture) {
let wrong_ephemeral_key = ServerEphemKey {
typ: KeyType::SECP256R1,
key: Vec::<u8>::from_hex(include_bytes!("./fixtures/data/unknown/pubkey")).unwrap(),
@@ -628,12 +666,12 @@ mod tests {
verifier,
data.connection_info.time,
&wrong_ephemeral_key,
&ServerName::from(data.server_name.as_ref()),
&data.server_name,
);
assert!(matches!(
err.unwrap_err(),
CertificateVerificationError::InvalidServerEphemeralKey
HandshakeVerificationError::InvalidServerEphemeralKey
));
}
@@ -642,7 +680,7 @@ mod tests {
#[case::tlsnotary(tlsnotary())]
#[case::appliedzkp(appliedzkp())]
fn test_verify_cert_chain_fail_no_cert(
verifier: &WebPkiVerifier,
verifier: &ServerCertVerifier,
#[case] mut data: ConnectionFixture,
) {
// Empty certs
@@ -652,12 +690,12 @@ mod tests {
verifier,
data.connection_info.time,
data.server_ephemeral_key(),
&ServerName::from(data.server_name.as_ref()),
&data.server_name,
);
assert!(matches!(
err.unwrap_err(),
CertificateVerificationError::MissingCerts
HandshakeVerificationError::MissingCerts
));
}
}

View File

@@ -0,0 +1,16 @@
use rangeset::RangeSet;
pub(crate) struct FmtRangeSet<'a>(pub &'a RangeSet<usize>);
impl<'a> std::fmt::Display for FmtRangeSet<'a> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str("{")?;
for range in self.0.iter_ranges() {
write!(f, "{}..{}", range.start, range.end)?;
if range.end < self.0.end().unwrap_or(0) {
f.write_str(", ")?;
}
}
f.write_str("}")
}
}

View File

@@ -1,6 +1,7 @@
//! Fixtures for testing
mod provider;
pub mod transcript;
pub use provider::FixtureEncodingProvider;
@@ -8,13 +9,15 @@ use hex::FromHex;
use crate::{
connection::{
Certificate, ConnectionInfo, HandshakeData, HandshakeDataV1_2, KeyType, ServerCertData,
ServerEphemKey, ServerName, ServerSignature, SignatureScheme, TlsVersion, TranscriptLength,
CertBinding, CertBindingV1_2, ConnectionInfo, DnsName, HandshakeData, KeyType,
ServerEphemKey, ServerName, ServerSignature, SignatureAlgorithm, TlsVersion,
TranscriptLength,
},
transcript::{
encoding::{EncoderSecret, EncodingProvider},
Transcript,
},
webpki::CertificateDer,
};
/// A fixture containing various TLS connection data.
@@ -23,33 +26,35 @@ use crate::{
pub struct ConnectionFixture {
pub server_name: ServerName,
pub connection_info: ConnectionInfo,
pub server_cert_data: ServerCertData,
pub server_cert_data: HandshakeData,
}
impl ConnectionFixture {
/// Returns a connection fixture for tlsnotary.org.
pub fn tlsnotary(transcript_length: TranscriptLength) -> Self {
ConnectionFixture {
server_name: ServerName::new("tlsnotary.org".to_string()),
server_name: ServerName::Dns(DnsName::try_from("tlsnotary.org").unwrap()),
connection_info: ConnectionInfo {
time: 1671637529,
version: TlsVersion::V1_2,
transcript_length,
},
server_cert_data: ServerCertData {
server_cert_data: HandshakeData {
certs: vec![
Certificate(include_bytes!("fixtures/data/tlsnotary.org/ee.der").to_vec()),
Certificate(include_bytes!("fixtures/data/tlsnotary.org/inter.der").to_vec()),
Certificate(include_bytes!("fixtures/data/tlsnotary.org/ca.der").to_vec()),
CertificateDer(include_bytes!("fixtures/data/tlsnotary.org/ee.der").to_vec()),
CertificateDer(
include_bytes!("fixtures/data/tlsnotary.org/inter.der").to_vec(),
),
CertificateDer(include_bytes!("fixtures/data/tlsnotary.org/ca.der").to_vec()),
],
sig: ServerSignature {
scheme: SignatureScheme::RSA_PKCS1_SHA256,
alg: SignatureAlgorithm::RSA_PKCS1_2048_8192_SHA256,
sig: Vec::<u8>::from_hex(include_bytes!(
"fixtures/data/tlsnotary.org/signature"
))
.unwrap(),
},
handshake: HandshakeData::V1_2(HandshakeDataV1_2 {
binding: CertBinding::V1_2(CertBindingV1_2 {
client_random: <[u8; 32]>::from_hex(include_bytes!(
"fixtures/data/tlsnotary.org/client_random"
))
@@ -73,26 +78,28 @@ impl ConnectionFixture {
/// Returns a connection fixture for appliedzkp.org.
pub fn appliedzkp(transcript_length: TranscriptLength) -> Self {
ConnectionFixture {
server_name: ServerName::new("appliedzkp.org".to_string()),
server_name: ServerName::Dns(DnsName::try_from("appliedzkp.org").unwrap()),
connection_info: ConnectionInfo {
time: 1671637529,
version: TlsVersion::V1_2,
transcript_length,
},
server_cert_data: ServerCertData {
server_cert_data: HandshakeData {
certs: vec![
Certificate(include_bytes!("fixtures/data/appliedzkp.org/ee.der").to_vec()),
Certificate(include_bytes!("fixtures/data/appliedzkp.org/inter.der").to_vec()),
Certificate(include_bytes!("fixtures/data/appliedzkp.org/ca.der").to_vec()),
CertificateDer(include_bytes!("fixtures/data/appliedzkp.org/ee.der").to_vec()),
CertificateDer(
include_bytes!("fixtures/data/appliedzkp.org/inter.der").to_vec(),
),
CertificateDer(include_bytes!("fixtures/data/appliedzkp.org/ca.der").to_vec()),
],
sig: ServerSignature {
scheme: SignatureScheme::ECDSA_NISTP256_SHA256,
alg: SignatureAlgorithm::ECDSA_NISTP256_SHA256,
sig: Vec::<u8>::from_hex(include_bytes!(
"fixtures/data/appliedzkp.org/signature"
))
.unwrap(),
},
handshake: HandshakeData::V1_2(HandshakeDataV1_2 {
binding: CertBinding::V1_2(CertBindingV1_2 {
client_random: <[u8; 32]>::from_hex(include_bytes!(
"fixtures/data/appliedzkp.org/client_random"
))
@@ -115,10 +122,10 @@ impl ConnectionFixture {
/// Returns the server_ephemeral_key fixture.
pub fn server_ephemeral_key(&self) -> &ServerEphemKey {
let HandshakeData::V1_2(HandshakeDataV1_2 {
let CertBinding::V1_2(CertBindingV1_2 {
server_ephemeral_key,
..
}) = &self.server_cert_data.handshake;
}) = &self.server_cert_data.binding;
server_ephemeral_key
}
}

View File

@@ -0,0 +1,199 @@
//! Transcript fixtures for testing.
use aead::Payload as AeadPayload;
use aes_gcm::{aead::Aead, Aes128Gcm, NewAead};
use generic_array::GenericArray;
use rand::{rngs::StdRng, Rng, SeedableRng};
use tls_core::msgs::{
base::Payload,
codec::Codec,
enums::{ContentType, HandshakeType, ProtocolVersion},
handshake::{HandshakeMessagePayload, HandshakePayload},
message::{OpaqueMessage, PlainMessage},
};
use crate::{
connection::{TranscriptLength, VerifyData},
fixtures::ConnectionFixture,
transcript::{Record, TlsTranscript},
};
/// The key used for encryption of the sent and received transcript.
pub const KEY: [u8; 16] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15];
/// The iv used for encryption of the sent and received transcript.
pub const IV: [u8; 4] = [1, 3, 3, 7];
/// The record size in bytes.
pub const RECORD_SIZE: usize = 512;
/// Creates a transript fixture for testing.
pub fn transcript_fixture(sent: &[u8], recv: &[u8]) -> TlsTranscript {
TranscriptGenerator::new(KEY, IV).generate(sent, recv)
}
struct TranscriptGenerator {
key: [u8; 16],
iv: [u8; 4],
}
impl TranscriptGenerator {
fn new(key: [u8; 16], iv: [u8; 4]) -> Self {
Self { key, iv }
}
fn generate(&self, sent: &[u8], recv: &[u8]) -> TlsTranscript {
let mut rng = StdRng::from_seed([1; 32]);
let transcript_len = TranscriptLength {
sent: sent.len() as u32,
received: recv.len() as u32,
};
let tlsn = ConnectionFixture::tlsnotary(transcript_len);
let time = tlsn.connection_info.time;
let version = tlsn.connection_info.version;
let server_cert_chain = tlsn.server_cert_data.certs;
let server_signature = tlsn.server_cert_data.sig;
let cert_binding = tlsn.server_cert_data.binding;
let cf_vd: [u8; 12] = rng.random();
let sf_vd: [u8; 12] = rng.random();
let verify_data = VerifyData {
client_finished: cf_vd.to_vec(),
server_finished: sf_vd.to_vec(),
};
let sent = self.gen_records(cf_vd, sent);
let recv = self.gen_records(sf_vd, recv);
TlsTranscript::new(
time,
version,
Some(server_cert_chain),
Some(server_signature),
cert_binding,
verify_data,
sent,
recv,
)
.unwrap()
}
fn gen_records(&self, vd: [u8; 12], plaintext: &[u8]) -> Vec<Record> {
let mut records = Vec::new();
let handshake = self.gen_handshake(vd);
records.push(handshake);
for (seq, msg) in (1_u64..).zip(plaintext.chunks(RECORD_SIZE)) {
let record = self.gen_app_data(seq, msg);
records.push(record);
}
records
}
fn gen_app_data(&self, seq: u64, plaintext: &[u8]) -> Record {
assert!(
plaintext.len() <= 1 << 14,
"plaintext len per record must be smaller than 2^14 bytes"
);
let explicit_nonce: [u8; 8] = seq.to_be_bytes();
let msg = PlainMessage {
typ: ContentType::ApplicationData,
version: ProtocolVersion::TLSv1_2,
payload: Payload::new(plaintext),
};
let opaque = aes_gcm_encrypt(self.key, self.iv, seq, explicit_nonce, &msg);
let mut payload = opaque.payload.0;
let mut ciphertext = payload.split_off(8);
let tag = ciphertext.split_off(ciphertext.len() - 16);
Record {
seq,
typ: ContentType::ApplicationData,
plaintext: Some(plaintext.to_vec()),
explicit_nonce: explicit_nonce.to_vec(),
ciphertext,
tag: Some(tag),
}
}
fn gen_handshake(&self, vd: [u8; 12]) -> Record {
let seq = 0_u64;
let explicit_nonce = seq.to_be_bytes();
let mut plaintext = Vec::new();
let payload = Payload(vd.to_vec());
let hs_payload = HandshakePayload::Finished(payload);
let handshake_message = HandshakeMessagePayload {
typ: HandshakeType::Finished,
payload: hs_payload,
};
handshake_message.encode(&mut plaintext);
let msg = PlainMessage {
typ: ContentType::Handshake,
version: ProtocolVersion::TLSv1_2,
payload: Payload::new(plaintext.clone()),
};
let opaque = aes_gcm_encrypt(self.key, self.iv, seq, explicit_nonce, &msg);
let mut payload = opaque.payload.0;
let mut ciphertext = payload.split_off(8);
let tag = ciphertext.split_off(ciphertext.len() - 16);
Record {
seq,
typ: ContentType::Handshake,
plaintext: Some(plaintext),
explicit_nonce: explicit_nonce.to_vec(),
ciphertext,
tag: Some(tag),
}
}
}
fn aes_gcm_encrypt(
key: [u8; 16],
iv: [u8; 4],
seq: u64,
explicit_nonce: [u8; 8],
msg: &PlainMessage,
) -> OpaqueMessage {
let mut aad = [0u8; 13];
aad[..8].copy_from_slice(&seq.to_be_bytes());
aad[8] = msg.typ.get_u8();
aad[9..11].copy_from_slice(&msg.version.get_u16().to_be_bytes());
aad[11..13].copy_from_slice(&(msg.payload.0.len() as u16).to_be_bytes());
let payload = AeadPayload {
msg: &msg.payload.0,
aad: &aad,
};
let mut nonce = [0u8; 12];
nonce[..4].copy_from_slice(&iv);
nonce[4..].copy_from_slice(&explicit_nonce);
let nonce = GenericArray::from_slice(&nonce);
let cipher = Aes128Gcm::new_from_slice(&key).unwrap();
// ciphertext will have the MAC appended
let ciphertext = cipher.encrypt(nonce, payload).unwrap();
// prepend the explicit nonce
let mut nonce_ct_mac = vec![0u8; 0];
nonce_ct_mac.extend(explicit_nonce.iter());
nonce_ct_mac.extend(ciphertext.iter());
OpaqueMessage {
typ: msg.typ,
version: msg.version,
payload: Payload::new(nonce_ct_mac),
}
}

View File

@@ -191,6 +191,11 @@ impl Hash {
len: value.len(),
}
}
/// Returns a byte slice of the hash value.
pub fn as_bytes(&self) -> &[u8] {
&self.value[..self.len]
}
}
impl rs_merkle::Hash for Hash {

View File

@@ -10,29 +10,32 @@ pub mod fixtures;
pub mod hash;
pub mod merkle;
pub mod transcript;
pub mod webpki;
pub use rangeset;
pub(crate) mod display;
use rangeset::ToRangeSet;
use rangeset::{RangeSet, ToRangeSet, UnionMut};
use serde::{Deserialize, Serialize};
use crate::{
connection::{ServerCertData, ServerName},
connection::{HandshakeData, ServerName},
transcript::{
Direction, Idx, PartialTranscript, Transcript, TranscriptCommitConfig,
encoding::EncoderSecret, Direction, PartialTranscript, Transcript, TranscriptCommitConfig,
TranscriptCommitRequest, TranscriptCommitment, TranscriptSecret,
},
};
/// Configuration to prove information to the verifier.
#[derive(Debug, Clone)]
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ProveConfig {
server_identity: bool,
transcript: Option<PartialTranscript>,
reveal: Option<(RangeSet<usize>, RangeSet<usize>)>,
transcript_commit: Option<TranscriptCommitConfig>,
}
impl ProveConfig {
/// Creates a new builder.
pub fn builder(transcript: &Transcript) -> ProveConfigBuilder {
pub fn builder(transcript: &Transcript) -> ProveConfigBuilder<'_> {
ProveConfigBuilder::new(transcript)
}
@@ -41,9 +44,9 @@ impl ProveConfig {
self.server_identity
}
/// Returns the transcript to be proven.
pub fn transcript(&self) -> Option<&PartialTranscript> {
self.transcript.as_ref()
/// Returns the ranges of the transcript to be revealed.
pub fn reveal(&self) -> Option<&(RangeSet<usize>, RangeSet<usize>)> {
self.reveal.as_ref()
}
/// Returns the transcript commitment configuration.
@@ -57,8 +60,7 @@ impl ProveConfig {
pub struct ProveConfigBuilder<'a> {
transcript: &'a Transcript,
server_identity: bool,
reveal_sent: Idx,
reveal_recv: Idx,
reveal: Option<(RangeSet<usize>, RangeSet<usize>)>,
transcript_commit: Option<TranscriptCommitConfig>,
}
@@ -68,8 +70,7 @@ impl<'a> ProveConfigBuilder<'a> {
Self {
transcript,
server_identity: false,
reveal_sent: Idx::default(),
reveal_recv: Idx::default(),
reveal: None,
transcript_commit: None,
}
}
@@ -92,22 +93,24 @@ impl<'a> ProveConfigBuilder<'a> {
direction: Direction,
ranges: &dyn ToRangeSet<usize>,
) -> Result<&mut Self, ProveConfigBuilderError> {
let idx = Idx::new(ranges.to_range_set());
let idx = ranges.to_range_set();
if idx.end() > self.transcript.len_of_direction(direction) {
if idx.end().unwrap_or(0) > self.transcript.len_of_direction(direction) {
return Err(ProveConfigBuilderError(
ProveConfigBuilderErrorRepr::IndexOutOfBounds {
direction,
actual: idx.end(),
actual: idx.end().unwrap_or(0),
len: self.transcript.len_of_direction(direction),
},
));
}
let (sent, recv) = self.reveal.get_or_insert_default();
match direction {
Direction::Sent => self.reveal_sent.union_mut(&idx),
Direction::Received => self.reveal_recv.union_mut(&idx),
Direction::Sent => sent.union_mut(&idx),
Direction::Received => recv.union_mut(&idx),
}
Ok(self)
}
@@ -119,6 +122,14 @@ impl<'a> ProveConfigBuilder<'a> {
self.reveal(Direction::Sent, ranges)
}
/// Reveals all of the sent data transcript.
pub fn reveal_sent_all(&mut self) -> Result<&mut Self, ProveConfigBuilderError> {
let len = self.transcript.len_of_direction(Direction::Sent);
let (sent, _) = self.reveal.get_or_insert_default();
sent.union_mut(&(0..len));
Ok(self)
}
/// Reveals the given ranges of the received data transcript.
pub fn reveal_recv(
&mut self,
@@ -127,20 +138,19 @@ impl<'a> ProveConfigBuilder<'a> {
self.reveal(Direction::Received, ranges)
}
/// Reveals all of the received data transcript.
pub fn reveal_recv_all(&mut self) -> Result<&mut Self, ProveConfigBuilderError> {
let len = self.transcript.len_of_direction(Direction::Received);
let (_, recv) = self.reveal.get_or_insert_default();
recv.union_mut(&(0..len));
Ok(self)
}
/// Builds the configuration.
pub fn build(self) -> Result<ProveConfig, ProveConfigBuilderError> {
let transcript = if !self.reveal_sent.is_empty() || !self.reveal_recv.is_empty() {
Some(
self.transcript
.to_partial(self.reveal_sent, self.reveal_recv),
)
} else {
None
};
Ok(ProveConfig {
server_identity: self.server_identity,
transcript,
reveal: self.reveal,
transcript_commit: self.transcript_commit,
})
}
@@ -162,7 +172,7 @@ enum ProveConfigBuilderErrorRepr {
}
/// Configuration to verify information from the prover.
#[derive(Debug, Default, Clone)]
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
pub struct VerifyConfig {}
impl VerifyConfig {
@@ -196,12 +206,12 @@ pub struct VerifyConfigBuilderError(#[from] VerifyConfigBuilderErrorRepr);
#[derive(Debug, thiserror::Error)]
enum VerifyConfigBuilderErrorRepr {}
/// Payload sent to the verifier.
/// Request to prove statements about the connection.
#[doc(hidden)]
#[derive(Debug, Serialize, Deserialize)]
pub struct ProvePayload {
/// Server identity data.
pub server_identity: Option<(ServerName, ServerCertData)>,
pub struct ProveRequest {
/// Handshake data.
pub handshake: Option<(ServerName, HandshakeData)>,
/// Transcript data.
pub transcript: Option<PartialTranscript>,
/// Transcript commitment configuration.
@@ -209,6 +219,7 @@ pub struct ProvePayload {
}
/// Prover output.
#[derive(Serialize, Deserialize)]
pub struct ProverOutput {
/// Transcript commitments.
pub transcript_commitments: Vec<TranscriptCommitment>,
@@ -219,11 +230,14 @@ pub struct ProverOutput {
opaque_debug::implement!(ProverOutput);
/// Verifier output.
#[derive(Serialize, Deserialize)]
pub struct VerifierOutput {
/// Server identity.
pub server_name: Option<ServerName>,
/// Transcript data.
pub transcript: Option<PartialTranscript>,
/// Encoding commitment secret.
pub encoder_secret: Option<EncoderSecret>,
/// Transcript commitments.
pub transcript_commitments: Vec<TranscriptCommitment>,
}

View File

@@ -26,7 +26,7 @@ mod tls;
use std::{fmt, ops::Range};
use rangeset::{Difference, IndexRanges, RangeSet, Subset, ToRangeSet, Union, UnionMut};
use rangeset::{Difference, IndexRanges, RangeSet, Union};
use serde::{Deserialize, Serialize};
use crate::connection::TranscriptLength;
@@ -39,6 +39,7 @@ pub use proof::{
TranscriptProof, TranscriptProofBuilder, TranscriptProofBuilderError, TranscriptProofError,
};
pub use tls::{Record, TlsTranscript};
pub use tls_core::msgs::enums::ContentType;
/// A transcript contains the plaintext of all application data communicated
/// between the Prover and the Server.
@@ -95,18 +96,18 @@ impl Transcript {
/// Returns the subsequence of the transcript with the provided index,
/// returning `None` if the index is out of bounds.
pub fn get(&self, direction: Direction, idx: &Idx) -> Option<Subsequence> {
pub fn get(&self, direction: Direction, idx: &RangeSet<usize>) -> Option<Subsequence> {
let data = match direction {
Direction::Sent => &self.sent,
Direction::Received => &self.received,
};
if idx.end() > data.len() {
if idx.end().unwrap_or(0) > data.len() {
return None;
}
Some(
Subsequence::new(idx.clone(), data.index_ranges(&idx.0))
Subsequence::new(idx.clone(), data.index_ranges(idx))
.expect("data is same length as index"),
)
}
@@ -121,7 +122,11 @@ impl Transcript {
///
/// * `sent_idx` - The indices of the sent data to include.
/// * `recv_idx` - The indices of the received data to include.
pub fn to_partial(&self, sent_idx: Idx, recv_idx: Idx) -> PartialTranscript {
pub fn to_partial(
&self,
sent_idx: RangeSet<usize>,
recv_idx: RangeSet<usize>,
) -> PartialTranscript {
let mut sent = vec![0; self.sent.len()];
let mut received = vec![0; self.received.len()];
@@ -156,9 +161,9 @@ pub struct PartialTranscript {
/// Data received by the Prover from the Server.
received: Vec<u8>,
/// Index of `sent` which have been authenticated.
sent_authed_idx: Idx,
sent_authed_idx: RangeSet<usize>,
/// Index of `received` which have been authenticated.
received_authed_idx: Idx,
received_authed_idx: RangeSet<usize>,
}
/// `PartialTranscript` in a compressed form.
@@ -170,9 +175,9 @@ pub struct CompressedPartialTranscript {
/// Received data which has been authenticated.
received_authed: Vec<u8>,
/// Index of `sent_authed`.
sent_idx: Idx,
sent_idx: RangeSet<usize>,
/// Index of `received_authed`.
recv_idx: Idx,
recv_idx: RangeSet<usize>,
/// Total bytelength of sent data in the original partial transcript.
sent_total: usize,
/// Total bytelength of received data in the original partial transcript.
@@ -184,10 +189,10 @@ impl From<PartialTranscript> for CompressedPartialTranscript {
Self {
sent_authed: uncompressed
.sent
.index_ranges(&uncompressed.sent_authed_idx.0),
.index_ranges(&uncompressed.sent_authed_idx),
received_authed: uncompressed
.received
.index_ranges(&uncompressed.received_authed_idx.0),
.index_ranges(&uncompressed.received_authed_idx),
sent_idx: uncompressed.sent_authed_idx,
recv_idx: uncompressed.received_authed_idx,
sent_total: uncompressed.sent.len(),
@@ -237,8 +242,8 @@ impl PartialTranscript {
Self {
sent: vec![0; sent_len],
received: vec![0; received_len],
sent_authed_idx: Idx::default(),
received_authed_idx: Idx::default(),
sent_authed_idx: RangeSet::default(),
received_authed_idx: RangeSet::default(),
}
}
@@ -259,10 +264,10 @@ impl PartialTranscript {
}
/// Returns whether the index is in bounds of the transcript.
pub fn contains(&self, direction: Direction, idx: &Idx) -> bool {
pub fn contains(&self, direction: Direction, idx: &RangeSet<usize>) -> bool {
match direction {
Direction::Sent => idx.end() <= self.sent.len(),
Direction::Received => idx.end() <= self.received.len(),
Direction::Sent => idx.end().unwrap_or(0) <= self.sent.len(),
Direction::Received => idx.end().unwrap_or(0) <= self.received.len(),
}
}
@@ -289,23 +294,23 @@ impl PartialTranscript {
}
/// Returns the index of sent data which have been authenticated.
pub fn sent_authed(&self) -> &Idx {
pub fn sent_authed(&self) -> &RangeSet<usize> {
&self.sent_authed_idx
}
/// Returns the index of received data which have been authenticated.
pub fn received_authed(&self) -> &Idx {
pub fn received_authed(&self) -> &RangeSet<usize> {
&self.received_authed_idx
}
/// Returns the index of sent data which haven't been authenticated.
pub fn sent_unauthed(&self) -> Idx {
Idx(RangeSet::from(0..self.sent.len()).difference(&self.sent_authed_idx.0))
pub fn sent_unauthed(&self) -> RangeSet<usize> {
(0..self.sent.len()).difference(&self.sent_authed_idx)
}
/// Returns the index of received data which haven't been authenticated.
pub fn received_unauthed(&self) -> Idx {
Idx(RangeSet::from(0..self.received.len()).difference(&self.received_authed_idx.0))
pub fn received_unauthed(&self) -> RangeSet<usize> {
(0..self.received.len()).difference(&self.received_authed_idx)
}
/// Returns an iterator over the authenticated data in the transcript.
@@ -315,7 +320,7 @@ impl PartialTranscript {
Direction::Received => (&self.received, &self.received_authed_idx),
};
authed.0.iter().map(|i| data[i])
authed.iter().map(|i| data[i])
}
/// Unions the authenticated data of this transcript with another.
@@ -337,8 +342,7 @@ impl PartialTranscript {
for range in other
.sent_authed_idx
.0
.difference(&self.sent_authed_idx.0)
.difference(&self.sent_authed_idx)
.iter_ranges()
{
self.sent[range.clone()].copy_from_slice(&other.sent[range]);
@@ -346,8 +350,7 @@ impl PartialTranscript {
for range in other
.received_authed_idx
.0
.difference(&self.received_authed_idx.0)
.difference(&self.received_authed_idx)
.iter_ranges()
{
self.received[range.clone()].copy_from_slice(&other.received[range]);
@@ -399,12 +402,12 @@ impl PartialTranscript {
pub fn set_unauthed_range(&mut self, value: u8, direction: Direction, range: Range<usize>) {
match direction {
Direction::Sent => {
for range in range.difference(&self.sent_authed_idx.0).iter_ranges() {
for range in range.difference(&self.sent_authed_idx).iter_ranges() {
self.sent[range].fill(value);
}
}
Direction::Received => {
for range in range.difference(&self.received_authed_idx.0).iter_ranges() {
for range in range.difference(&self.received_authed_idx).iter_ranges() {
self.received[range].fill(value);
}
}
@@ -433,130 +436,19 @@ impl fmt::Display for Direction {
}
}
/// Transcript index.
#[derive(Debug, Default, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub struct Idx(RangeSet<usize>);
impl Idx {
/// Creates a new index builder.
pub fn builder() -> IdxBuilder {
IdxBuilder::default()
}
/// Creates an empty index.
pub fn empty() -> Self {
Self(RangeSet::default())
}
/// Creates a new transcript index.
pub fn new(ranges: impl Into<RangeSet<usize>>) -> Self {
Self(ranges.into())
}
/// Returns the start of the index.
pub fn start(&self) -> usize {
self.0.min().unwrap_or_default()
}
/// Returns the end of the index, non-inclusive.
pub fn end(&self) -> usize {
self.0.end().unwrap_or_default()
}
/// Returns an iterator over the values in the index.
pub fn iter(&self) -> impl Iterator<Item = usize> + '_ {
self.0.iter()
}
/// Returns an iterator over the ranges of the index.
pub fn iter_ranges(&self) -> impl Iterator<Item = Range<usize>> + '_ {
self.0.iter_ranges()
}
/// Returns the number of values in the index.
pub fn len(&self) -> usize {
self.0.len()
}
/// Returns whether the index is empty.
pub fn is_empty(&self) -> bool {
self.0.is_empty()
}
/// Returns the number of disjoint ranges in the index.
pub fn count(&self) -> usize {
self.0.len_ranges()
}
pub(crate) fn as_range_set(&self) -> &RangeSet<usize> {
&self.0
}
/// Returns the union of this index with another.
pub(crate) fn union(&self, other: &Idx) -> Idx {
Idx(self.0.union(&other.0))
}
/// Unions this index with another.
pub(crate) fn union_mut(&mut self, other: &Idx) {
self.0.union_mut(&other.0);
}
/// Returns the difference between `self` and `other`.
pub(crate) fn difference(&self, other: &Idx) -> Idx {
Idx(self.0.difference(&other.0))
}
/// Returns `true` if `self` is a subset of `other`.
pub(crate) fn is_subset(&self, other: &Idx) -> bool {
self.0.is_subset(&other.0)
}
}
impl std::fmt::Display for Idx {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("Idx([")?;
let count = self.0.len_ranges();
for (i, range) in self.0.iter_ranges().enumerate() {
write!(f, "{}..{}", range.start, range.end)?;
if i < count - 1 {
write!(f, ", ")?;
}
}
f.write_str("])")?;
Ok(())
}
}
/// Builder for [`Idx`].
#[derive(Debug, Default)]
pub struct IdxBuilder(RangeSet<usize>);
impl IdxBuilder {
/// Unions ranges.
pub fn union(self, ranges: &dyn ToRangeSet<usize>) -> Self {
IdxBuilder(self.0.union(&ranges.to_range_set()))
}
/// Builds the index.
pub fn build(self) -> Idx {
Idx(self.0)
}
}
/// Transcript subsequence.
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
#[serde(try_from = "validation::SubsequenceUnchecked")]
pub struct Subsequence {
/// Index of the subsequence.
idx: Idx,
idx: RangeSet<usize>,
/// Data of the subsequence.
data: Vec<u8>,
}
impl Subsequence {
/// Creates a new subsequence.
pub fn new(idx: Idx, data: Vec<u8>) -> Result<Self, InvalidSubsequence> {
pub fn new(idx: RangeSet<usize>, data: Vec<u8>) -> Result<Self, InvalidSubsequence> {
if idx.len() != data.len() {
return Err(InvalidSubsequence(
"index length does not match data length",
@@ -567,7 +459,7 @@ impl Subsequence {
}
/// Returns the index of the subsequence.
pub fn index(&self) -> &Idx {
pub fn index(&self) -> &RangeSet<usize> {
&self.idx
}
@@ -583,7 +475,7 @@ impl Subsequence {
}
/// Returns the inner parts of the subsequence.
pub fn into_parts(self) -> (Idx, Vec<u8>) {
pub fn into_parts(self) -> (RangeSet<usize>, Vec<u8>) {
(self.idx, self.data)
}
@@ -611,7 +503,7 @@ mod validation {
#[derive(Debug, Deserialize)]
pub(super) struct SubsequenceUnchecked {
idx: Idx,
idx: RangeSet<usize>,
data: Vec<u8>,
}
@@ -633,8 +525,8 @@ mod validation {
pub(super) struct CompressedPartialTranscriptUnchecked {
sent_authed: Vec<u8>,
received_authed: Vec<u8>,
sent_idx: Idx,
recv_idx: Idx,
sent_idx: RangeSet<usize>,
recv_idx: RangeSet<usize>,
sent_total: usize,
recv_total: usize,
}
@@ -651,8 +543,8 @@ mod validation {
));
}
if unchecked.sent_idx.end() > unchecked.sent_total
|| unchecked.recv_idx.end() > unchecked.recv_total
if unchecked.sent_idx.end().unwrap_or(0) > unchecked.sent_total
|| unchecked.recv_idx.end().unwrap_or(0) > unchecked.recv_total
{
return Err(InvalidCompressedPartialTranscript(
"ranges are not in bounds of the data",
@@ -681,8 +573,8 @@ mod validation {
CompressedPartialTranscriptUnchecked {
received_authed: vec![1, 2, 3, 11, 12, 13],
sent_authed: vec![4, 5, 6, 14, 15, 16],
recv_idx: Idx(RangeSet::new(&[1..4, 11..14])),
sent_idx: Idx(RangeSet::new(&[4..7, 14..17])),
recv_idx: RangeSet::from([1..4, 11..14]),
sent_idx: RangeSet::from([4..7, 14..17]),
sent_total: 20,
recv_total: 20,
}
@@ -721,7 +613,6 @@ mod validation {
// Change the total to be less than the last range's end bound.
let end = partial_transcript
.sent_idx
.0
.iter_ranges()
.next_back()
.unwrap()
@@ -753,31 +644,25 @@ mod tests {
#[fixture]
fn partial_transcript() -> PartialTranscript {
transcript().to_partial(
Idx::new(RangeSet::new(&[1..4, 6..9])),
Idx::new(RangeSet::new(&[2..5, 7..10])),
)
transcript().to_partial(RangeSet::from([1..4, 6..9]), RangeSet::from([2..5, 7..10]))
}
#[rstest]
fn test_transcript_get_subsequence(transcript: Transcript) {
let subseq = transcript
.get(Direction::Received, &Idx(RangeSet::from([0..4, 7..10])))
.get(Direction::Received, &RangeSet::from([0..4, 7..10]))
.unwrap();
assert_eq!(subseq.data, vec![0, 1, 2, 3, 7, 8, 9]);
let subseq = transcript
.get(Direction::Sent, &Idx(RangeSet::from([0..4, 9..12])))
.get(Direction::Sent, &RangeSet::from([0..4, 9..12]))
.unwrap();
assert_eq!(subseq.data, vec![0, 1, 2, 3, 9, 10, 11]);
let subseq = transcript.get(
Direction::Received,
&Idx(RangeSet::from([0..4, 7..10, 11..13])),
);
let subseq = transcript.get(Direction::Received, &RangeSet::from([0..4, 7..10, 11..13]));
assert_eq!(subseq, None);
let subseq = transcript.get(Direction::Sent, &Idx(RangeSet::from([0..4, 7..10, 11..13])));
let subseq = transcript.get(Direction::Sent, &RangeSet::from([0..4, 7..10, 11..13]));
assert_eq!(subseq, None);
}
@@ -790,7 +675,7 @@ mod tests {
#[rstest]
fn test_transcript_to_partial_success(transcript: Transcript) {
let partial = transcript.to_partial(Idx::new(0..2), Idx::new(3..7));
let partial = transcript.to_partial(RangeSet::from(0..2), RangeSet::from(3..7));
assert_eq!(partial.sent_unsafe(), [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]);
assert_eq!(
partial.received_unsafe(),
@@ -801,29 +686,30 @@ mod tests {
#[rstest]
#[should_panic]
fn test_transcript_to_partial_failure(transcript: Transcript) {
let _ = transcript.to_partial(Idx::new(0..14), Idx::new(3..7));
let _ = transcript.to_partial(RangeSet::from(0..14), RangeSet::from(3..7));
}
#[rstest]
fn test_partial_transcript_contains(transcript: Transcript) {
let partial = transcript.to_partial(Idx::new(0..2), Idx::new(3..7));
assert!(partial.contains(Direction::Sent, &Idx::new([0..5, 7..10])));
assert!(!partial.contains(Direction::Received, &Idx::new([4..6, 7..13])))
let partial = transcript.to_partial(RangeSet::from(0..2), RangeSet::from(3..7));
assert!(partial.contains(Direction::Sent, &RangeSet::from([0..5, 7..10])));
assert!(!partial.contains(Direction::Received, &RangeSet::from([4..6, 7..13])))
}
#[rstest]
fn test_partial_transcript_unauthed(transcript: Transcript) {
let partial = transcript.to_partial(Idx::new(0..2), Idx::new(3..7));
assert_eq!(partial.sent_unauthed(), Idx::new(2..12));
assert_eq!(partial.received_unauthed(), Idx::new([0..3, 7..12]));
let partial = transcript.to_partial(RangeSet::from(0..2), RangeSet::from(3..7));
assert_eq!(partial.sent_unauthed(), RangeSet::from(2..12));
assert_eq!(partial.received_unauthed(), RangeSet::from([0..3, 7..12]));
}
#[rstest]
fn test_partial_transcript_union_success(transcript: Transcript) {
// Non overlapping ranges.
let mut simple_partial = transcript.to_partial(Idx::new(0..2), Idx::new(3..7));
let mut simple_partial = transcript.to_partial(RangeSet::from(0..2), RangeSet::from(3..7));
let other_simple_partial = transcript.to_partial(Idx::new(3..5), Idx::new(1..2));
let other_simple_partial =
transcript.to_partial(RangeSet::from(3..5), RangeSet::from(1..2));
simple_partial.union_transcript(&other_simple_partial);
@@ -835,12 +721,16 @@ mod tests {
simple_partial.received_unsafe(),
[0, 1, 0, 3, 4, 5, 6, 0, 0, 0, 0, 0]
);
assert_eq!(simple_partial.sent_authed(), &Idx::new([0..2, 3..5]));
assert_eq!(simple_partial.received_authed(), &Idx::new([1..2, 3..7]));
assert_eq!(simple_partial.sent_authed(), &RangeSet::from([0..2, 3..5]));
assert_eq!(
simple_partial.received_authed(),
&RangeSet::from([1..2, 3..7])
);
// Overwrite with another partial transcript.
let another_simple_partial = transcript.to_partial(Idx::new(1..4), Idx::new(6..9));
let another_simple_partial =
transcript.to_partial(RangeSet::from(1..4), RangeSet::from(6..9));
simple_partial.union_transcript(&another_simple_partial);
@@ -852,13 +742,17 @@ mod tests {
simple_partial.received_unsafe(),
[0, 1, 0, 3, 4, 5, 6, 7, 8, 0, 0, 0]
);
assert_eq!(simple_partial.sent_authed(), &Idx::new(0..5));
assert_eq!(simple_partial.received_authed(), &Idx::new([1..2, 3..9]));
assert_eq!(simple_partial.sent_authed(), &RangeSet::from(0..5));
assert_eq!(
simple_partial.received_authed(),
&RangeSet::from([1..2, 3..9])
);
// Overlapping ranges.
let mut overlap_partial = transcript.to_partial(Idx::new(4..6), Idx::new(3..7));
let mut overlap_partial = transcript.to_partial(RangeSet::from(4..6), RangeSet::from(3..7));
let other_overlap_partial = transcript.to_partial(Idx::new(3..5), Idx::new(5..9));
let other_overlap_partial =
transcript.to_partial(RangeSet::from(3..5), RangeSet::from(5..9));
overlap_partial.union_transcript(&other_overlap_partial);
@@ -870,13 +764,16 @@ mod tests {
overlap_partial.received_unsafe(),
[0, 0, 0, 3, 4, 5, 6, 7, 8, 0, 0, 0]
);
assert_eq!(overlap_partial.sent_authed(), &Idx::new([3..5, 4..6]));
assert_eq!(overlap_partial.received_authed(), &Idx::new([3..7, 5..9]));
assert_eq!(overlap_partial.sent_authed(), &RangeSet::from([3..5, 4..6]));
assert_eq!(
overlap_partial.received_authed(),
&RangeSet::from([3..7, 5..9])
);
// Equal ranges.
let mut equal_partial = transcript.to_partial(Idx::new(4..6), Idx::new(3..7));
let mut equal_partial = transcript.to_partial(RangeSet::from(4..6), RangeSet::from(3..7));
let other_equal_partial = transcript.to_partial(Idx::new(4..6), Idx::new(3..7));
let other_equal_partial = transcript.to_partial(RangeSet::from(4..6), RangeSet::from(3..7));
equal_partial.union_transcript(&other_equal_partial);
@@ -888,13 +785,15 @@ mod tests {
equal_partial.received_unsafe(),
[0, 0, 0, 3, 4, 5, 6, 0, 0, 0, 0, 0]
);
assert_eq!(equal_partial.sent_authed(), &Idx::new(4..6));
assert_eq!(equal_partial.received_authed(), &Idx::new(3..7));
assert_eq!(equal_partial.sent_authed(), &RangeSet::from(4..6));
assert_eq!(equal_partial.received_authed(), &RangeSet::from(3..7));
// Subset ranges.
let mut subset_partial = transcript.to_partial(Idx::new(4..10), Idx::new(3..11));
let mut subset_partial =
transcript.to_partial(RangeSet::from(4..10), RangeSet::from(3..11));
let other_subset_partial = transcript.to_partial(Idx::new(6..9), Idx::new(5..6));
let other_subset_partial =
transcript.to_partial(RangeSet::from(6..9), RangeSet::from(5..6));
subset_partial.union_transcript(&other_subset_partial);
@@ -906,30 +805,32 @@ mod tests {
subset_partial.received_unsafe(),
[0, 0, 0, 3, 4, 5, 6, 7, 8, 9, 10, 0]
);
assert_eq!(subset_partial.sent_authed(), &Idx::new(4..10));
assert_eq!(subset_partial.received_authed(), &Idx::new(3..11));
assert_eq!(subset_partial.sent_authed(), &RangeSet::from(4..10));
assert_eq!(subset_partial.received_authed(), &RangeSet::from(3..11));
}
#[rstest]
#[should_panic]
fn test_partial_transcript_union_failure(transcript: Transcript) {
let mut partial = transcript.to_partial(Idx::new(4..10), Idx::new(3..11));
let mut partial = transcript.to_partial(RangeSet::from(4..10), RangeSet::from(3..11));
let other_transcript = Transcript::new(
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
);
let other_partial = other_transcript.to_partial(Idx::new(6..9), Idx::new(5..6));
let other_partial = other_transcript.to_partial(RangeSet::from(6..9), RangeSet::from(5..6));
partial.union_transcript(&other_partial);
}
#[rstest]
fn test_partial_transcript_union_subseq_success(transcript: Transcript) {
let mut partial = transcript.to_partial(Idx::new(4..10), Idx::new(3..11));
let sent_seq = Subsequence::new(Idx::new([0..3, 5..7]), [0, 1, 2, 5, 6].into()).unwrap();
let recv_seq = Subsequence::new(Idx::new([0..4, 5..7]), [0, 1, 2, 3, 5, 6].into()).unwrap();
let mut partial = transcript.to_partial(RangeSet::from(4..10), RangeSet::from(3..11));
let sent_seq =
Subsequence::new(RangeSet::from([0..3, 5..7]), [0, 1, 2, 5, 6].into()).unwrap();
let recv_seq =
Subsequence::new(RangeSet::from([0..4, 5..7]), [0, 1, 2, 3, 5, 6].into()).unwrap();
partial.union_subsequence(Direction::Sent, &sent_seq);
partial.union_subsequence(Direction::Received, &recv_seq);
@@ -939,30 +840,31 @@ mod tests {
partial.received_unsafe(),
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 0]
);
assert_eq!(partial.sent_authed(), &Idx::new([0..3, 4..10]));
assert_eq!(partial.received_authed(), &Idx::new(0..11));
assert_eq!(partial.sent_authed(), &RangeSet::from([0..3, 4..10]));
assert_eq!(partial.received_authed(), &RangeSet::from(0..11));
// Overwrite with another subseq.
let other_sent_seq = Subsequence::new(Idx::new(0..3), [3, 2, 1].into()).unwrap();
let other_sent_seq = Subsequence::new(RangeSet::from(0..3), [3, 2, 1].into()).unwrap();
partial.union_subsequence(Direction::Sent, &other_sent_seq);
assert_eq!(partial.sent_unsafe(), [3, 2, 1, 0, 4, 5, 6, 7, 8, 9, 0, 0]);
assert_eq!(partial.sent_authed(), &Idx::new([0..3, 4..10]));
assert_eq!(partial.sent_authed(), &RangeSet::from([0..3, 4..10]));
}
#[rstest]
#[should_panic]
fn test_partial_transcript_union_subseq_failure(transcript: Transcript) {
let mut partial = transcript.to_partial(Idx::new(4..10), Idx::new(3..11));
let mut partial = transcript.to_partial(RangeSet::from(4..10), RangeSet::from(3..11));
let sent_seq = Subsequence::new(Idx::new([0..3, 13..15]), [0, 1, 2, 5, 6].into()).unwrap();
let sent_seq =
Subsequence::new(RangeSet::from([0..3, 13..15]), [0, 1, 2, 5, 6].into()).unwrap();
partial.union_subsequence(Direction::Sent, &sent_seq);
}
#[rstest]
fn test_partial_transcript_set_unauthed_range(transcript: Transcript) {
let mut partial = transcript.to_partial(Idx::new(4..10), Idx::new(3..7));
let mut partial = transcript.to_partial(RangeSet::from(4..10), RangeSet::from(3..7));
partial.set_unauthed_range(7, Direction::Sent, 2..5);
partial.set_unauthed_range(5, Direction::Sent, 0..2);
@@ -979,13 +881,13 @@ mod tests {
#[rstest]
#[should_panic]
fn test_subsequence_new_invalid_len() {
let _ = Subsequence::new(Idx::new([0..3, 5..8]), [0, 1, 2, 5, 6].into()).unwrap();
let _ = Subsequence::new(RangeSet::from([0..3, 5..8]), [0, 1, 2, 5, 6].into()).unwrap();
}
#[rstest]
#[should_panic]
fn test_subsequence_copy_to_invalid_len() {
let seq = Subsequence::new(Idx::new([0..3, 5..7]), [0, 1, 2, 5, 6].into()).unwrap();
let seq = Subsequence::new(RangeSet::from([0..3, 5..7]), [0, 1, 2, 5, 6].into()).unwrap();
let mut data: [u8; 3] = [0, 1, 2];
seq.copy_to(&mut data);

View File

@@ -2,7 +2,7 @@
use std::{collections::HashSet, fmt};
use rangeset::ToRangeSet;
use rangeset::{ToRangeSet, UnionMut};
use serde::{Deserialize, Serialize};
use crate::{
@@ -10,7 +10,7 @@ use crate::{
transcript::{
encoding::{EncodingCommitment, EncodingTree},
hash::{PlaintextHash, PlaintextHashSecret},
Direction, Idx, Transcript,
Direction, RangeSet, Transcript,
},
};
@@ -66,17 +66,17 @@ pub enum TranscriptSecret {
}
/// Configuration for transcript commitments.
#[derive(Debug, Clone)]
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TranscriptCommitConfig {
encoding_hash_alg: HashAlgId,
has_encoding: bool,
has_hash: bool,
commits: Vec<((Direction, Idx), TranscriptCommitmentKind)>,
commits: Vec<((Direction, RangeSet<usize>), TranscriptCommitmentKind)>,
}
impl TranscriptCommitConfig {
/// Creates a new commit config builder.
pub fn builder(transcript: &Transcript) -> TranscriptCommitConfigBuilder {
pub fn builder(transcript: &Transcript) -> TranscriptCommitConfigBuilder<'_> {
TranscriptCommitConfigBuilder::new(transcript)
}
@@ -96,7 +96,7 @@ impl TranscriptCommitConfig {
}
/// Returns an iterator over the encoding commitment indices.
pub fn iter_encoding(&self) -> impl Iterator<Item = &(Direction, Idx)> {
pub fn iter_encoding(&self) -> impl Iterator<Item = &(Direction, RangeSet<usize>)> {
self.commits.iter().filter_map(|(idx, kind)| match kind {
TranscriptCommitmentKind::Encoding => Some(idx),
_ => None,
@@ -104,7 +104,7 @@ impl TranscriptCommitConfig {
}
/// Returns an iterator over the hash commitment indices.
pub fn iter_hash(&self) -> impl Iterator<Item = (&(Direction, Idx), &HashAlgId)> {
pub fn iter_hash(&self) -> impl Iterator<Item = (&(Direction, RangeSet<usize>), &HashAlgId)> {
self.commits.iter().filter_map(|(idx, kind)| match kind {
TranscriptCommitmentKind::Hash { alg } => Some((idx, alg)),
_ => None,
@@ -114,7 +114,19 @@ impl TranscriptCommitConfig {
/// Returns a request for the transcript commitments.
pub fn to_request(&self) -> TranscriptCommitRequest {
TranscriptCommitRequest {
encoding: self.has_encoding,
encoding: self.has_encoding.then(|| {
let mut sent = RangeSet::default();
let mut recv = RangeSet::default();
for (dir, idx) in self.iter_encoding() {
match dir {
Direction::Sent => sent.union_mut(idx),
Direction::Received => recv.union_mut(idx),
}
}
(sent, recv)
}),
hash: self
.iter_hash()
.map(|((dir, idx), alg)| (*dir, idx.clone(), *alg))
@@ -134,7 +146,7 @@ pub struct TranscriptCommitConfigBuilder<'a> {
has_encoding: bool,
has_hash: bool,
default_kind: TranscriptCommitmentKind,
commits: HashSet<((Direction, Idx), TranscriptCommitmentKind)>,
commits: HashSet<((Direction, RangeSet<usize>), TranscriptCommitmentKind)>,
}
impl<'a> TranscriptCommitConfigBuilder<'a> {
@@ -175,15 +187,15 @@ impl<'a> TranscriptCommitConfigBuilder<'a> {
direction: Direction,
kind: TranscriptCommitmentKind,
) -> Result<&mut Self, TranscriptCommitConfigBuilderError> {
let idx = Idx::new(ranges.to_range_set());
let idx = ranges.to_range_set();
if idx.end() > self.transcript.len_of_direction(direction) {
if idx.end().unwrap_or(0) > self.transcript.len_of_direction(direction) {
return Err(TranscriptCommitConfigBuilderError::new(
ErrorKind::Index,
format!(
"range is out of bounds of the transcript ({}): {} > {}",
direction,
idx.end(),
idx.end().unwrap_or(0),
self.transcript.len_of_direction(direction)
),
));
@@ -289,14 +301,14 @@ impl fmt::Display for TranscriptCommitConfigBuilderError {
/// Request to compute transcript commitments.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TranscriptCommitRequest {
encoding: bool,
hash: Vec<(Direction, Idx, HashAlgId)>,
encoding: Option<(RangeSet<usize>, RangeSet<usize>)>,
hash: Vec<(Direction, RangeSet<usize>, HashAlgId)>,
}
impl TranscriptCommitRequest {
/// Returns `true` if an encoding commitment is requested.
pub fn encoding(&self) -> bool {
self.encoding
pub fn has_encoding(&self) -> bool {
self.encoding.is_some()
}
/// Returns `true` if a hash commitment is requested.
@@ -305,9 +317,14 @@ impl TranscriptCommitRequest {
}
/// Returns an iterator over the hash commitments.
pub fn iter_hash(&self) -> impl Iterator<Item = &(Direction, Idx, HashAlgId)> {
pub fn iter_hash(&self) -> impl Iterator<Item = &(Direction, RangeSet<usize>, HashAlgId)> {
self.hash.iter()
}
/// Returns the ranges of the encoding commitments.
pub fn encoding(&self) -> Option<&(RangeSet<usize>, RangeSet<usize>)> {
self.encoding.as_ref()
}
}
#[cfg(test)]

View File

@@ -19,6 +19,4 @@ use crate::hash::TypedHash;
pub struct EncodingCommitment {
/// Merkle root of the encoding commitments.
pub root: TypedHash,
/// Seed used to generate the encodings.
pub secret: EncoderSecret,
}

View File

@@ -8,8 +8,8 @@ use crate::{
merkle::{MerkleError, MerkleProof},
transcript::{
commit::MAX_TOTAL_COMMITTED_DATA,
encoding::{new_encoder, Encoder, EncodingCommitment},
Direction, Idx,
encoding::{new_encoder, Encoder, EncoderSecret, EncodingCommitment},
Direction,
},
};
@@ -17,7 +17,7 @@ use crate::{
#[derive(Clone, Serialize, Deserialize)]
pub(super) struct Opening {
pub(super) direction: Direction,
pub(super) idx: Idx,
pub(super) idx: RangeSet<usize>,
pub(super) blinder: Blinder,
}
@@ -48,13 +48,14 @@ impl EncodingProof {
pub fn verify_with_provider(
&self,
provider: &HashProvider,
secret: &EncoderSecret,
commitment: &EncodingCommitment,
sent: &[u8],
recv: &[u8],
) -> Result<(Idx, Idx), EncodingProofError> {
) -> Result<(RangeSet<usize>, RangeSet<usize>), EncodingProofError> {
let hasher = provider.get(&commitment.root.alg)?;
let encoder = new_encoder(&commitment.secret);
let encoder = new_encoder(secret);
let Self {
inclusion_proof,
openings,
@@ -89,13 +90,13 @@ impl EncodingProof {
};
// Make sure the ranges are within the bounds of the transcript.
if idx.end() > data.len() {
if idx.end().unwrap_or(0) > data.len() {
return Err(EncodingProofError::new(
ErrorKind::Proof,
format!(
"index out of bounds of the transcript ({}): {} > {}",
direction,
idx.end(),
idx.end().unwrap_or(0),
data.len()
),
));
@@ -111,7 +112,7 @@ impl EncodingProof {
// present in the merkle tree.
leaves.push((*id, hasher.hash(&expected_leaf)));
auth.union_mut(idx.as_range_set());
auth.union_mut(idx);
}
// Verify that the expected hashes are present in the merkle tree.
@@ -121,7 +122,7 @@ impl EncodingProof {
// data is authentic.
inclusion_proof.verify(hasher, &commitment.root, leaves)?;
Ok((Idx(auth_sent), Idx(auth_recv)))
Ok((auth_sent, auth_recv))
}
}
@@ -232,10 +233,7 @@ mod test {
use crate::{
fixtures::{encoder_secret, encoder_secret_tampered_seed, encoding_provider},
hash::Blake3,
transcript::{
encoding::{EncoderSecret, EncodingTree},
Idx, Transcript,
},
transcript::{encoding::EncodingTree, Transcript},
};
use super::*;
@@ -246,21 +244,18 @@ mod test {
commitment: EncodingCommitment,
}
fn new_encoding_fixture(secret: EncoderSecret) -> EncodingFixture {
fn new_encoding_fixture() -> EncodingFixture {
let transcript = Transcript::new(POST_JSON, OK_JSON);
let idx_0 = (Direction::Sent, Idx::new(0..POST_JSON.len()));
let idx_1 = (Direction::Received, Idx::new(0..OK_JSON.len()));
let idx_0 = (Direction::Sent, RangeSet::from(0..POST_JSON.len()));
let idx_1 = (Direction::Received, RangeSet::from(0..OK_JSON.len()));
let provider = encoding_provider(transcript.sent(), transcript.received());
let tree = EncodingTree::new(&Blake3::default(), [&idx_0, &idx_1], &provider).unwrap();
let proof = tree.proof([&idx_0, &idx_1].into_iter()).unwrap();
let commitment = EncodingCommitment {
root: tree.root(),
secret,
};
let commitment = EncodingCommitment { root: tree.root() };
EncodingFixture {
transcript,
@@ -275,11 +270,12 @@ mod test {
transcript,
proof,
commitment,
} = new_encoding_fixture(encoder_secret_tampered_seed());
} = new_encoding_fixture();
let err = proof
.verify_with_provider(
&HashProvider::default(),
&encoder_secret_tampered_seed(),
&commitment,
transcript.sent(),
transcript.received(),
@@ -295,13 +291,19 @@ mod test {
transcript,
proof,
commitment,
} = new_encoding_fixture(encoder_secret());
} = new_encoding_fixture();
let sent = &transcript.sent()[transcript.sent().len() - 1..];
let recv = &transcript.received()[transcript.received().len() - 2..];
let err = proof
.verify_with_provider(&HashProvider::default(), &commitment, sent, recv)
.verify_with_provider(
&HashProvider::default(),
&encoder_secret(),
&commitment,
sent,
recv,
)
.unwrap_err();
assert!(matches!(err.kind, ErrorKind::Proof));
@@ -313,15 +315,16 @@ mod test {
transcript,
mut proof,
commitment,
} = new_encoding_fixture(encoder_secret());
} = new_encoding_fixture();
let Opening { idx, .. } = proof.openings.values_mut().next().unwrap();
*idx = Idx::new([0..3, 13..15]);
*idx = RangeSet::from([0..3, 13..15]);
let err = proof
.verify_with_provider(
&HashProvider::default(),
&encoder_secret(),
&commitment,
transcript.sent(),
transcript.received(),
@@ -337,7 +340,7 @@ mod test {
transcript,
mut proof,
commitment,
} = new_encoding_fixture(encoder_secret());
} = new_encoding_fixture();
let Opening { blinder, .. } = proof.openings.values_mut().next().unwrap();
@@ -346,6 +349,7 @@ mod test {
let err = proof
.verify_with_provider(
&HashProvider::default(),
&encoder_secret(),
&commitment,
transcript.sent(),
transcript.received(),

View File

@@ -1,6 +1,7 @@
use std::collections::HashMap;
use bimap::BiMap;
use rangeset::{RangeSet, UnionMut};
use serde::{Deserialize, Serialize};
use crate::{
@@ -11,7 +12,7 @@ use crate::{
proof::{EncodingProof, Opening},
EncodingProvider,
},
Direction, Idx,
Direction,
},
};
@@ -22,7 +23,7 @@ pub enum EncodingTreeError {
#[error("index is out of bounds of the transcript")]
OutOfBounds {
/// The index.
index: Idx,
index: RangeSet<usize>,
/// The transcript length.
transcript_length: usize,
},
@@ -30,13 +31,13 @@ pub enum EncodingTreeError {
#[error("encoding provider is missing an encoding for an index")]
MissingEncoding {
/// The index which is missing.
index: Idx,
index: RangeSet<usize>,
},
/// Index is missing from the tree.
#[error("index is missing from the tree")]
MissingLeaf {
/// The index which is missing.
index: Idx,
index: RangeSet<usize>,
},
}
@@ -49,11 +50,11 @@ pub struct EncodingTree {
blinders: Vec<Blinder>,
/// Mapping between the index of a leaf and the transcript index it
/// corresponds to.
idxs: BiMap<usize, (Direction, Idx)>,
idxs: BiMap<usize, (Direction, RangeSet<usize>)>,
/// Union of all transcript indices in the sent direction.
sent_idx: Idx,
sent_idx: RangeSet<usize>,
/// Union of all transcript indices in the received direction.
received_idx: Idx,
received_idx: RangeSet<usize>,
}
opaque_debug::implement!(EncodingTree);
@@ -68,15 +69,15 @@ impl EncodingTree {
/// * `provider` - The encoding provider.
pub fn new<'idx>(
hasher: &dyn HashAlgorithm,
idxs: impl IntoIterator<Item = &'idx (Direction, Idx)>,
idxs: impl IntoIterator<Item = &'idx (Direction, RangeSet<usize>)>,
provider: &dyn EncodingProvider,
) -> Result<Self, EncodingTreeError> {
let mut this = Self {
tree: MerkleTree::new(hasher.id()),
blinders: Vec::new(),
idxs: BiMap::new(),
sent_idx: Idx::empty(),
received_idx: Idx::empty(),
sent_idx: RangeSet::default(),
received_idx: RangeSet::default(),
};
let mut leaves = Vec::new();
@@ -138,7 +139,7 @@ impl EncodingTree {
/// * `idxs` - The transcript indices to prove.
pub fn proof<'idx>(
&self,
idxs: impl Iterator<Item = &'idx (Direction, Idx)>,
idxs: impl Iterator<Item = &'idx (Direction, RangeSet<usize>)>,
) -> Result<EncodingProof, EncodingTreeError> {
let mut openings = HashMap::new();
for dir_idx in idxs {
@@ -171,11 +172,11 @@ impl EncodingTree {
}
/// Returns whether the tree contains the given transcript index.
pub fn contains(&self, idx: &(Direction, Idx)) -> bool {
pub fn contains(&self, idx: &(Direction, RangeSet<usize>)) -> bool {
self.idxs.contains_right(idx)
}
pub(crate) fn idx(&self, direction: Direction) -> &Idx {
pub(crate) fn idx(&self, direction: Direction) -> &RangeSet<usize> {
match direction {
Direction::Sent => &self.sent_idx,
Direction::Received => &self.received_idx,
@@ -183,7 +184,7 @@ impl EncodingTree {
}
/// Returns the committed transcript indices.
pub(crate) fn transcript_indices(&self) -> impl Iterator<Item = &(Direction, Idx)> {
pub(crate) fn transcript_indices(&self) -> impl Iterator<Item = &(Direction, RangeSet<usize>)> {
self.idxs.right_values()
}
}
@@ -200,7 +201,7 @@ mod tests {
fn new_tree<'seq>(
transcript: &Transcript,
idxs: impl Iterator<Item = &'seq (Direction, Idx)>,
idxs: impl Iterator<Item = &'seq (Direction, RangeSet<usize>)>,
) -> Result<EncodingTree, EncodingTreeError> {
let provider = encoding_provider(transcript.sent(), transcript.received());
@@ -211,8 +212,8 @@ mod tests {
fn test_encoding_tree() {
let transcript = Transcript::new(POST_JSON, OK_JSON);
let idx_0 = (Direction::Sent, Idx::new(0..POST_JSON.len()));
let idx_1 = (Direction::Received, Idx::new(0..OK_JSON.len()));
let idx_0 = (Direction::Sent, RangeSet::from(0..POST_JSON.len()));
let idx_1 = (Direction::Received, RangeSet::from(0..OK_JSON.len()));
let tree = new_tree(&transcript, [&idx_0, &idx_1].into_iter()).unwrap();
@@ -221,14 +222,12 @@ mod tests {
let proof = tree.proof([&idx_0, &idx_1].into_iter()).unwrap();
let commitment = EncodingCommitment {
root: tree.root(),
secret: encoder_secret(),
};
let commitment = EncodingCommitment { root: tree.root() };
let (auth_sent, auth_recv) = proof
.verify_with_provider(
&HashProvider::default(),
&encoder_secret(),
&commitment,
transcript.sent(),
transcript.received(),
@@ -243,10 +242,10 @@ mod tests {
fn test_encoding_tree_multiple_ranges() {
let transcript = Transcript::new(POST_JSON, OK_JSON);
let idx_0 = (Direction::Sent, Idx::new(0..1));
let idx_1 = (Direction::Sent, Idx::new(1..POST_JSON.len()));
let idx_2 = (Direction::Received, Idx::new(0..1));
let idx_3 = (Direction::Received, Idx::new(1..OK_JSON.len()));
let idx_0 = (Direction::Sent, RangeSet::from(0..1));
let idx_1 = (Direction::Sent, RangeSet::from(1..POST_JSON.len()));
let idx_2 = (Direction::Received, RangeSet::from(0..1));
let idx_3 = (Direction::Received, RangeSet::from(1..OK_JSON.len()));
let tree = new_tree(&transcript, [&idx_0, &idx_1, &idx_2, &idx_3].into_iter()).unwrap();
@@ -259,25 +258,23 @@ mod tests {
.proof([&idx_0, &idx_1, &idx_2, &idx_3].into_iter())
.unwrap();
let commitment = EncodingCommitment {
root: tree.root(),
secret: encoder_secret(),
};
let commitment = EncodingCommitment { root: tree.root() };
let (auth_sent, auth_recv) = proof
.verify_with_provider(
&HashProvider::default(),
&encoder_secret(),
&commitment,
transcript.sent(),
transcript.received(),
)
.unwrap();
let mut expected_auth_sent = Idx::default();
let mut expected_auth_sent = RangeSet::default();
expected_auth_sent.union_mut(&idx_0.1);
expected_auth_sent.union_mut(&idx_1.1);
let mut expected_auth_recv = Idx::default();
let mut expected_auth_recv = RangeSet::default();
expected_auth_recv.union_mut(&idx_2.1);
expected_auth_recv.union_mut(&idx_3.1);
@@ -289,9 +286,9 @@ mod tests {
fn test_encoding_tree_proof_missing_leaf() {
let transcript = Transcript::new(POST_JSON, OK_JSON);
let idx_0 = (Direction::Sent, Idx::new(0..POST_JSON.len()));
let idx_1 = (Direction::Received, Idx::new(0..4));
let idx_2 = (Direction::Received, Idx::new(4..OK_JSON.len()));
let idx_0 = (Direction::Sent, RangeSet::from(0..POST_JSON.len()));
let idx_1 = (Direction::Received, RangeSet::from(0..4));
let idx_2 = (Direction::Received, RangeSet::from(4..OK_JSON.len()));
let tree = new_tree(&transcript, [&idx_0, &idx_1].into_iter()).unwrap();
@@ -305,8 +302,8 @@ mod tests {
fn test_encoding_tree_out_of_bounds() {
let transcript = Transcript::new(POST_JSON, OK_JSON);
let idx_0 = (Direction::Sent, Idx::new(0..POST_JSON.len() + 1));
let idx_1 = (Direction::Received, Idx::new(0..OK_JSON.len() + 1));
let idx_0 = (Direction::Sent, RangeSet::from(0..POST_JSON.len() + 1));
let idx_1 = (Direction::Received, RangeSet::from(0..OK_JSON.len() + 1));
let result = new_tree(&transcript, [&idx_0].into_iter()).unwrap_err();
assert!(matches!(result, EncodingTreeError::MissingEncoding { .. }));
@@ -321,7 +318,7 @@ mod tests {
let result = EncodingTree::new(
&Blake3::default(),
[(Direction::Sent, Idx::new(0..8))].iter(),
[(Direction::Sent, RangeSet::from(0..8))].iter(),
&provider,
)
.unwrap_err();

View File

@@ -4,7 +4,7 @@ use serde::{Deserialize, Serialize};
use crate::{
hash::{Blinder, HashAlgId, HashAlgorithm, TypedHash},
transcript::{Direction, Idx},
transcript::{Direction, RangeSet},
};
/// Hashes plaintext with a blinder.
@@ -23,7 +23,7 @@ pub struct PlaintextHash {
/// Direction of the plaintext.
pub direction: Direction,
/// Index of plaintext.
pub idx: Idx,
pub idx: RangeSet<usize>,
/// The hash of the data.
pub hash: TypedHash,
}
@@ -34,7 +34,7 @@ pub struct PlaintextHashSecret {
/// Direction of the plaintext.
pub direction: Direction,
/// Index of plaintext.
pub idx: Idx,
pub idx: RangeSet<usize>,
/// The algorithm of the hash.
pub alg: HashAlgId,
/// Blinder for the hash.

View File

@@ -1,17 +1,18 @@
//! Transcript proofs.
use rangeset::{Cover, ToRangeSet};
use rangeset::{Cover, Difference, Subset, ToRangeSet, UnionMut};
use serde::{Deserialize, Serialize};
use std::{collections::HashSet, fmt};
use crate::{
connection::TranscriptLength,
display::FmtRangeSet,
hash::{HashAlgId, HashProvider},
transcript::{
commit::{TranscriptCommitment, TranscriptCommitmentKind},
encoding::{EncodingProof, EncodingProofError, EncodingTree},
encoding::{EncoderSecret, EncodingProof, EncodingProofError, EncodingTree},
hash::{hash_plaintext, PlaintextHash, PlaintextHashSecret},
Direction, Idx, PartialTranscript, Transcript, TranscriptSecret,
Direction, PartialTranscript, RangeSet, Transcript, TranscriptSecret,
},
};
@@ -21,6 +22,9 @@ const DEFAULT_COMMITMENT_KINDS: &[TranscriptCommitmentKind] = &[
TranscriptCommitmentKind::Hash {
alg: HashAlgId::SHA256,
},
TranscriptCommitmentKind::Hash {
alg: HashAlgId::BLAKE3,
},
TranscriptCommitmentKind::Encoding,
];
@@ -47,6 +51,7 @@ impl TranscriptProof {
self,
provider: &HashProvider,
length: &TranscriptLength,
encoder_secret: Option<&EncoderSecret>,
commitments: impl IntoIterator<Item = &'a TranscriptCommitment>,
) -> Result<PartialTranscript, TranscriptProofError> {
let mut encoding_commitment = None;
@@ -77,11 +82,18 @@ impl TranscriptProof {
));
}
let mut total_auth_sent = Idx::default();
let mut total_auth_recv = Idx::default();
let mut total_auth_sent = RangeSet::default();
let mut total_auth_recv = RangeSet::default();
// Verify encoding proof.
if let Some(proof) = self.encoding_proof {
let secret = encoder_secret.ok_or_else(|| {
TranscriptProofError::new(
ErrorKind::Encoding,
"contains an encoding proof but missing encoder secret",
)
})?;
let commitment = encoding_commitment.ok_or_else(|| {
TranscriptProofError::new(
ErrorKind::Encoding,
@@ -91,6 +103,7 @@ impl TranscriptProof {
let (auth_sent, auth_recv) = proof.verify_with_provider(
provider,
secret,
commitment,
self.transcript.sent_unsafe(),
self.transcript.received_unsafe(),
@@ -120,7 +133,7 @@ impl TranscriptProof {
Direction::Received => (self.transcript.received_unsafe(), &mut total_auth_recv),
};
if idx.end() > plaintext.len() {
if idx.end().unwrap_or(0) > plaintext.len() {
return Err(TranscriptProofError::new(
ErrorKind::Hash,
"hash opening index is out of bounds",
@@ -215,15 +228,15 @@ impl From<EncodingProofError> for TranscriptProofError {
/// Union of ranges to reveal.
#[derive(Clone, Debug, PartialEq)]
struct QueryIdx {
sent: Idx,
recv: Idx,
sent: RangeSet<usize>,
recv: RangeSet<usize>,
}
impl QueryIdx {
fn new() -> Self {
Self {
sent: Idx::empty(),
recv: Idx::empty(),
sent: RangeSet::default(),
recv: RangeSet::default(),
}
}
@@ -231,7 +244,7 @@ impl QueryIdx {
self.sent.is_empty() && self.recv.is_empty()
}
fn union(&mut self, direction: &Direction, other: &Idx) {
fn union(&mut self, direction: &Direction, other: &RangeSet<usize>) {
match direction {
Direction::Sent => self.sent.union_mut(other),
Direction::Received => self.recv.union_mut(other),
@@ -241,7 +254,12 @@ impl QueryIdx {
impl std::fmt::Display for QueryIdx {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "sent: {}, received: {}", self.sent, self.recv)
write!(
f,
"sent: {}, received: {}",
FmtRangeSet(&self.sent),
FmtRangeSet(&self.recv)
)
}
}
@@ -253,8 +271,8 @@ pub struct TranscriptProofBuilder<'a> {
transcript: &'a Transcript,
encoding_tree: Option<&'a EncodingTree>,
hash_secrets: Vec<&'a PlaintextHashSecret>,
committed_sent: Idx,
committed_recv: Idx,
committed_sent: RangeSet<usize>,
committed_recv: RangeSet<usize>,
query_idx: QueryIdx,
}
@@ -264,8 +282,8 @@ impl<'a> TranscriptProofBuilder<'a> {
transcript: &'a Transcript,
secrets: impl IntoIterator<Item = &'a TranscriptSecret>,
) -> Self {
let mut committed_sent = Idx::empty();
let mut committed_recv = Idx::empty();
let mut committed_sent = RangeSet::default();
let mut committed_recv = RangeSet::default();
let mut encoding_tree = None;
let mut hash_secrets = Vec::new();
@@ -323,15 +341,15 @@ impl<'a> TranscriptProofBuilder<'a> {
ranges: &dyn ToRangeSet<usize>,
direction: Direction,
) -> Result<&mut Self, TranscriptProofBuilderError> {
let idx = Idx::new(ranges.to_range_set());
let idx = ranges.to_range_set();
if idx.end() > self.transcript.len_of_direction(direction) {
if idx.end().unwrap_or(0) > self.transcript.len_of_direction(direction) {
return Err(TranscriptProofBuilderError::new(
BuilderErrorKind::Index,
format!(
"range is out of bounds of the transcript ({}): {} > {}",
direction,
idx.end(),
idx.end().unwrap_or(0),
self.transcript.len_of_direction(direction)
),
));
@@ -348,7 +366,10 @@ impl<'a> TranscriptProofBuilder<'a> {
let missing = idx.difference(committed);
return Err(TranscriptProofBuilderError::new(
BuilderErrorKind::MissingCommitment,
format!("commitment is missing for ranges in {direction} transcript: {missing}"),
format!(
"commitment is missing for ranges in {direction} transcript: {}",
FmtRangeSet(&missing)
),
));
}
Ok(self)
@@ -403,25 +424,23 @@ impl<'a> TranscriptProofBuilder<'a> {
continue;
};
let (sent_dir_idxs, sent_uncovered) =
uncovered_query_idx.sent.as_range_set().cover_by(
encoding_tree
.transcript_indices()
.filter(|(dir, _)| *dir == Direction::Sent),
|(_, idx)| &idx.0,
);
let (sent_dir_idxs, sent_uncovered) = uncovered_query_idx.sent.cover_by(
encoding_tree
.transcript_indices()
.filter(|(dir, _)| *dir == Direction::Sent),
|(_, idx)| idx,
);
// Uncovered ranges will be checked with ranges of the next
// preferred commitment kind.
uncovered_query_idx.sent = Idx(sent_uncovered);
uncovered_query_idx.sent = sent_uncovered;
let (recv_dir_idxs, recv_uncovered) =
uncovered_query_idx.recv.as_range_set().cover_by(
encoding_tree
.transcript_indices()
.filter(|(dir, _)| *dir == Direction::Received),
|(_, idx)| &idx.0,
);
uncovered_query_idx.recv = Idx(recv_uncovered);
let (recv_dir_idxs, recv_uncovered) = uncovered_query_idx.recv.cover_by(
encoding_tree
.transcript_indices()
.filter(|(dir, _)| *dir == Direction::Received),
|(_, idx)| idx,
);
uncovered_query_idx.recv = recv_uncovered;
let dir_idxs = sent_dir_idxs
.into_iter()
@@ -439,25 +458,23 @@ impl<'a> TranscriptProofBuilder<'a> {
}
}
TranscriptCommitmentKind::Hash { alg } => {
let (sent_hashes, sent_uncovered) =
uncovered_query_idx.sent.as_range_set().cover_by(
self.hash_secrets.iter().filter(|hash| {
hash.direction == Direction::Sent && &hash.alg == alg
}),
|hash| &hash.idx.0,
);
let (sent_hashes, sent_uncovered) = uncovered_query_idx.sent.cover_by(
self.hash_secrets.iter().filter(|hash| {
hash.direction == Direction::Sent && &hash.alg == alg
}),
|hash| &hash.idx,
);
// Uncovered ranges will be checked with ranges of the next
// preferred commitment kind.
uncovered_query_idx.sent = Idx(sent_uncovered);
uncovered_query_idx.sent = sent_uncovered;
let (recv_hashes, recv_uncovered) =
uncovered_query_idx.recv.as_range_set().cover_by(
self.hash_secrets.iter().filter(|hash| {
hash.direction == Direction::Received && &hash.alg == alg
}),
|hash| &hash.idx.0,
);
uncovered_query_idx.recv = Idx(recv_uncovered);
let (recv_hashes, recv_uncovered) = uncovered_query_idx.recv.cover_by(
self.hash_secrets.iter().filter(|hash| {
hash.direction == Direction::Received && &hash.alg == alg
}),
|hash| &hash.idx,
);
uncovered_query_idx.recv = recv_uncovered;
transcript_proof.hash_secrets.extend(
sent_hashes
@@ -567,7 +584,7 @@ mod tests {
use tlsn_data_fixtures::http::{request::GET_WITH_HEADER, response::OK_JSON};
use crate::{
fixtures::encoding_provider,
fixtures::{encoder_secret, encoding_provider},
hash::{Blake3, Blinder, HashAlgId},
transcript::TranscriptCommitConfigBuilder,
};
@@ -577,7 +594,7 @@ mod tests {
#[rstest]
fn test_verify_missing_encoding_commitment_root() {
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
let idxs = vec![(Direction::Received, Idx::new(0..transcript.len().1))];
let idxs = vec![(Direction::Received, RangeSet::from(0..transcript.len().1))];
let encoding_tree = EncodingTree::new(
&Blake3::default(),
&idxs,
@@ -594,7 +611,12 @@ mod tests {
let provider = HashProvider::default();
let err = transcript_proof
.verify_with_provider(&provider, &transcript.length(), &[])
.verify_with_provider(
&provider,
&transcript.length(),
Some(&encoder_secret()),
&[],
)
.err()
.unwrap();
@@ -632,15 +654,16 @@ mod tests {
}
#[rstest]
fn test_reveal_with_hash_commitment() {
#[case::sha256(HashAlgId::SHA256)]
#[case::blake3(HashAlgId::BLAKE3)]
fn test_reveal_with_hash_commitment(#[case] alg: HashAlgId) {
let mut rng = rand::rngs::StdRng::seed_from_u64(0);
let provider = HashProvider::default();
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
let direction = Direction::Sent;
let idx = Idx::new(0..10);
let idx = RangeSet::from(0..10);
let blinder: Blinder = rng.random();
let alg = HashAlgId::SHA256;
let hasher = provider.get(&alg).unwrap();
let commitment = PlaintextHash {
@@ -667,6 +690,7 @@ mod tests {
.verify_with_provider(
&provider,
&transcript.length(),
None,
&[TranscriptCommitment::Hash(commitment)],
)
.unwrap();
@@ -678,15 +702,16 @@ mod tests {
}
#[rstest]
fn test_reveal_with_inconsistent_hash_commitment() {
#[case::sha256(HashAlgId::SHA256)]
#[case::blake3(HashAlgId::BLAKE3)]
fn test_reveal_with_inconsistent_hash_commitment(#[case] alg: HashAlgId) {
let mut rng = rand::rngs::StdRng::seed_from_u64(0);
let provider = HashProvider::default();
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
let direction = Direction::Sent;
let idx = Idx::new(0..10);
let idx = RangeSet::from(0..10);
let blinder: Blinder = rng.random();
let alg = HashAlgId::SHA256;
let hasher = provider.get(&alg).unwrap();
let commitment = PlaintextHash {
@@ -714,6 +739,7 @@ mod tests {
.verify_with_provider(
&provider,
&transcript.length(),
None,
&[TranscriptCommitment::Hash(commitment)],
)
.unwrap_err();
@@ -894,10 +920,10 @@ mod tests {
match kind {
BuilderErrorKind::Cover { uncovered, .. } => {
if !uncovered_sent_rangeset.is_empty() {
assert_eq!(uncovered.sent, Idx(uncovered_sent_rangeset));
assert_eq!(uncovered.sent, uncovered_sent_rangeset);
}
if !uncovered_recv_rangeset.is_empty() {
assert_eq!(uncovered.recv, Idx(uncovered_recv_rangeset));
assert_eq!(uncovered.recv, uncovered_recv_rangeset);
}
}
_ => panic!("unexpected error kind: {kind:?}"),

View File

@@ -2,10 +2,10 @@
use crate::{
connection::{
Certificate, HandshakeData, HandshakeDataV1_2, ServerEphemKey, ServerSignature, TlsVersion,
VerifyData,
CertBinding, CertBindingV1_2, ServerEphemKey, ServerSignature, TlsVersion, VerifyData,
},
transcript::{Direction, Transcript},
webpki::CertificateDer,
};
use tls_core::msgs::{
alert::AlertMessagePayload,
@@ -19,9 +19,9 @@ use tls_core::msgs::{
pub struct TlsTranscript {
time: u64,
version: TlsVersion,
server_cert_chain: Option<Vec<Certificate>>,
server_cert_chain: Option<Vec<CertificateDer>>,
server_signature: Option<ServerSignature>,
handshake_data: HandshakeData,
certificate_binding: CertBinding,
sent: Vec<Record>,
recv: Vec<Record>,
}
@@ -32,9 +32,9 @@ impl TlsTranscript {
pub fn new(
time: u64,
version: TlsVersion,
server_cert_chain: Option<Vec<Certificate>>,
server_cert_chain: Option<Vec<CertificateDer>>,
server_signature: Option<ServerSignature>,
handshake_data: HandshakeData,
certificate_binding: CertBinding,
verify_data: VerifyData,
sent: Vec<Record>,
recv: Vec<Record>,
@@ -198,7 +198,7 @@ impl TlsTranscript {
version,
server_cert_chain,
server_signature,
handshake_data,
certificate_binding,
sent,
recv,
})
@@ -215,7 +215,7 @@ impl TlsTranscript {
}
/// Returns the server certificate chain.
pub fn server_cert_chain(&self) -> Option<&[Certificate]> {
pub fn server_cert_chain(&self) -> Option<&[CertificateDer]> {
self.server_cert_chain.as_deref()
}
@@ -226,17 +226,17 @@ impl TlsTranscript {
/// Returns the server ephemeral key used in the TLS handshake.
pub fn server_ephemeral_key(&self) -> &ServerEphemKey {
match &self.handshake_data {
HandshakeData::V1_2(HandshakeDataV1_2 {
match &self.certificate_binding {
CertBinding::V1_2(CertBindingV1_2 {
server_ephemeral_key,
..
}) => server_ephemeral_key,
}
}
/// Returns the handshake data.
pub fn handshake_data(&self) -> &HandshakeData {
&self.handshake_data
/// Returns the certificate binding data.
pub fn certificate_binding(&self) -> &CertBinding {
&self.certificate_binding
}
/// Returns the sent records.

168
crates/core/src/webpki.rs Normal file
View File

@@ -0,0 +1,168 @@
//! Web PKI types.
use std::time::Duration;
use rustls_pki_types::{self as webpki_types, pem::PemObject};
use serde::{Deserialize, Serialize};
use crate::connection::ServerName;
/// X.509 certificate, DER encoded.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct CertificateDer(pub Vec<u8>);
impl CertificateDer {
/// Creates a DER-encoded certificate from a PEM-encoded certificate.
pub fn from_pem_slice(pem: &[u8]) -> Result<Self, PemError> {
let der = webpki_types::CertificateDer::from_pem_slice(pem).map_err(|_| PemError {})?;
Ok(Self(der.to_vec()))
}
}
/// Private key, DER encoded.
#[derive(Debug, Clone, zeroize::ZeroizeOnDrop, Serialize, Deserialize)]
pub struct PrivateKeyDer(pub Vec<u8>);
impl PrivateKeyDer {
/// Creates a DER-encoded private key from a PEM-encoded private key.
pub fn from_pem_slice(pem: &[u8]) -> Result<Self, PemError> {
let der = webpki_types::PrivateKeyDer::from_pem_slice(pem).map_err(|_| PemError {})?;
Ok(Self(der.secret_der().to_vec()))
}
}
/// PEM parsing error.
#[derive(Debug, thiserror::Error)]
#[error("failed to parse PEM object")]
pub struct PemError {}
/// Root certificate store.
///
/// This stores root certificates which are used to verify end-entity
/// certificates presented by a TLS server.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct RootCertStore {
/// Unvalidated DER-encoded X.509 root certificates.
pub roots: Vec<CertificateDer>,
}
impl RootCertStore {
/// Creates an empty root certificate store.
pub fn empty() -> Self {
Self { roots: Vec::new() }
}
}
/// Server certificate verifier.
#[derive(Debug)]
pub struct ServerCertVerifier {
roots: Vec<webpki_types::TrustAnchor<'static>>,
}
impl ServerCertVerifier {
/// Creates a new server certificate verifier.
pub fn new(roots: &RootCertStore) -> Result<Self, ServerCertVerifierError> {
let roots = roots
.roots
.iter()
.map(|cert| {
webpki::anchor_from_trusted_cert(&webpki_types::CertificateDer::from(
cert.0.as_slice(),
))
.map(|anchor| anchor.to_owned())
.map_err(|err| ServerCertVerifierError::InvalidRootCertificate {
cert: cert.clone(),
reason: err.to_string(),
})
})
.collect::<Result<Vec<_>, _>>()?;
Ok(Self { roots })
}
/// Creates a new server certificate verifier with Mozilla root
/// certificates.
pub fn mozilla() -> Self {
Self {
roots: webpki_roots::TLS_SERVER_ROOTS.to_vec(),
}
}
/// Verifies the server certificate was valid at the given time of
/// presentation.
///
/// # Arguments
///
/// * `end_entity` - End-entity certificate to verify.
/// * `intermediates` - Intermediate certificates to a trust anchor.
/// * `server_name` - Server DNS name.
/// * `time` - Unix time the certificate was presented.
pub fn verify_server_cert(
&self,
end_entity: &CertificateDer,
intermediates: &[CertificateDer],
server_name: &ServerName,
time: u64,
) -> Result<(), ServerCertVerifierError> {
let cert = webpki_types::CertificateDer::from(end_entity.0.as_slice());
let cert = webpki::EndEntityCert::try_from(&cert).map_err(|e| {
ServerCertVerifierError::InvalidEndEntityCertificate {
cert: end_entity.clone(),
reason: e.to_string(),
}
})?;
let intermediates = intermediates
.iter()
.map(|c| webpki_types::CertificateDer::from(c.0.as_slice()))
.collect::<Vec<_>>();
let server_name = server_name.to_webpki();
let time = webpki_types::UnixTime::since_unix_epoch(Duration::from_secs(time));
cert.verify_for_usage(
webpki::ALL_VERIFICATION_ALGS,
&self.roots,
&intermediates,
time,
webpki::KeyUsage::server_auth(),
None,
None,
)
.map(|_| ())
.map_err(|_| ServerCertVerifierError::InvalidPath)?;
cert.verify_is_valid_for_subject_name(&server_name)
.map_err(|_| ServerCertVerifierError::InvalidServerName)?;
Ok(())
}
}
/// Error for [`ServerCertVerifier`].
#[derive(Debug, thiserror::Error)]
#[error("server certificate verification failed: {0}")]
pub enum ServerCertVerifierError {
/// Root certificate store contains invalid certificate.
#[error("root certificate store contains invalid certificate: {reason}")]
InvalidRootCertificate {
/// Invalid certificate.
cert: CertificateDer,
/// Reason for invalidity.
reason: String,
},
/// End-entity certificate is invalid.
#[error("end-entity certificate is invalid: {reason}")]
InvalidEndEntityCertificate {
/// Invalid certificate.
cert: CertificateDer,
/// Reason for invalidity.
reason: String,
},
/// Failed to verify certificate path to provided trust anchors.
#[error("failed to verify certificate path to provided trust anchors")]
InvalidPath,
/// Failed to verify certificate is valid for provided server name.
#[error("failed to verify certificate is valid for provided server name")]
InvalidServerName,
}

View File

@@ -8,10 +8,8 @@ version = "0.0.0"
workspace = true
[dependencies]
tlsn-core = { workspace = true }
tlsn = { workspace = true }
tlsn-formats = { workspace = true }
tlsn-tls-core = { workspace = true }
tls-server-fixture = { workspace = true }
tlsn-server-fixture = { workspace = true }
tlsn-server-fixture-certs = { workspace = true }
@@ -20,13 +18,13 @@ spansy = { workspace = true }
bincode = { workspace = true }
chrono = { workspace = true }
clap = { version = "4.5", features = ["derive"] }
dotenv = { version = "0.15.0" }
futures = { workspace = true }
http-body-util = { workspace = true }
hex = { workspace = true }
hyper = { workspace = true, features = ["client", "http1"] }
hyper-util = { workspace = true, features = ["full"] }
k256 = { workspace = true, features = ["ecdsa"] }
serde = { workspace = true, features = ["derive"] }
serde_json = { workspace = true }
tokio = { workspace = true, features = [
"rt",
@@ -39,7 +37,24 @@ tokio = { workspace = true, features = [
tokio-util = { workspace = true }
tracing = { workspace = true }
tracing-subscriber = { workspace = true }
noir = { git = "https://github.com/zkmopro/noir-rs", tag = "v1.0.0-beta.8", features = ["barretenberg"] }
[[example]]
name = "interactive"
path = "interactive/interactive.rs"
[[example]]
name = "interactive_zk"
path = "interactive_zk/interactive_zk.rs"
[[example]]
name = "attestation_prove"
path = "attestation/prove.rs"
[[example]]
name = "attestation_present"
path = "attestation/present.rs"
[[example]]
name = "attestation_verify"
path = "attestation/verify.rs"

View File

@@ -5,4 +5,4 @@ This folder contains examples demonstrating how to use the TLSNotary protocol.
* [Interactive](./interactive/README.md): Interactive Prover and Verifier session without a trusted notary.
* [Attestation](./attestation/README.md): Performing a simple notarization with a trusted notary.
Refer to <https://docs.tlsnotary.org/quick_start/index.html> for a quick start guide to using TLSNotary with these examples.
Refer to <https://tlsnotary.org/docs/quick_start> for a quick start guide to using TLSNotary with these examples.

View File

@@ -0,0 +1,164 @@
# Attestation Example
This example demonstrates a **TLSNotary attestation workflow**: notarizing data from a server with a trusted third party (Notary), then creating verifiable presentations with selective disclosure of sensitive information to a Verifier.
## 🔍 How It Works
```mermaid
sequenceDiagram
participant P as Prover
participant N as MPC-TLS<br/>Verifier
participant S as Server<br/>Fixture
participant V as Attestation<br/>Verifier
Note over P,S: 1. Notarization Phase
P->>N: Establish MPC-TLS connection
P->>S: Request (MPC-TLS)
S->>P: Response (MPC-TLS)
N->>P: Issue signed attestation
Note over P: 2. Presentation Phase
P->>P: Create redacted presentation
Note over P,V: 3. Verification Phase
P->>V: Share presentation
V->>V: Verify attestation signature
```
### The Three-Step Process
1. **🔐 Notarize**: Prover collaborates with Notary to create an authenticated TLS session and obtain a signed attestation
2. **✂️ Present**: Prover creates a selective presentation, choosing which data to reveal or redact
3. **✅ Verify**: Anyone can verify the presentation's authenticity using the Notary's public key
## 🚀 Quick Start
### Step 1: Notarize Data
**Start the test server** (from repository root):
```bash
RUST_LOG=info PORT=4000 cargo run --bin tlsn-server-fixture
```
**Run the notarization** (in a new terminal):
```bash
RUST_LOG=info SERVER_PORT=4000 cargo run --release --example attestation_prove
```
**Expected output:**
```
Notarization completed successfully!
The attestation has been written to `example-json.attestation.tlsn` and the corresponding secrets to `example-json.secrets.tlsn`.
```
### Step 2: Create Verifiable Presentation
**Generate a redacted presentation:**
```bash
cargo run --release --example attestation_present
```
**Expected output:**
```
Presentation built successfully!
The presentation has been written to `example-json.presentation.tlsn`.
```
> 💡 **Tip**: You can create multiple presentations from the same attestation, each with different redactions!
### Step 3: Verify the Presentation
**Verify the presentation:**
```bash
cargo run --release --example attestation_verify
```
**Expected output:**
```
Verifying presentation with {key algorithm} key: { hex encoded key }
**Ask yourself, do you trust this key?**
-------------------------------------------------------------------
Successfully verified that the data below came from a session with test-server.io at { time }.
Note that the data which the Prover chose not to disclose are shown as X.
Data sent:
GET /formats/json HTTP/1.1
host: test-server.io
accept: */*
accept-encoding: identity
connection: close
user-agent: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
Data received:
HTTP/1.1 200 OK
content-type: application/json
content-length: 722
connection: close
date: Mon, 08 Sep 2025 09:18:29 GMT
XXXXXX1234567890XXXXXXXXXXXXXXXXXXXXXXXXJohn DoeXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX1.2XX
```
## 🎯 Use Cases & Examples
### JSON Data (Default)
Perfect for API responses, configuration data, or structured information:
```bash
# All three steps use JSON by default
SERVER_PORT=4000 cargo run --release --example attestation_prove
cargo run --release --example attestation_present
cargo run --release --example attestation_verify
```
### HTML Content
Ideal for web pages, forms, or any HTML-based data:
```bash
# Notarize HTML content
SERVER_PORT=4000 cargo run --release --example attestation_prove -- html
cargo run --release --example attestation_present -- html
cargo run --release --example attestation_verify -- html
```
### Authenticated/Private Data
For APIs requiring authentication tokens, cookies, or private access:
```bash
# Notarize private data with authentication
SERVER_PORT=4000 cargo run --release --example attestation_prove -- authenticated
cargo run --release --example attestation_present -- authenticated
cargo run --release --example attestation_verify -- authenticated
```
### Debug Mode
For detailed logging and troubleshooting:
```bash
RUST_LOG=debug,yamux=info,uid_mux=info SERVER_PORT=4000 cargo run --release --example attestation_prove
```
### Generated Files
After running the examples, you'll find:
- **`*.attestation.tlsn`**: The cryptographically signed attestation from the Notary
- **`*.secrets.tlsn`**: Cryptographic secrets needed to create presentations
- **`*.presentation.tlsn`**: The verifiable presentation with your chosen redactions
## 🔐 Security Considerations
### Trust Model
-**Notary Key**: The presentation includes the Notary's verifying key - The verifier must trust this key
-**Data Authenticity**: Cryptographically guaranteed that data came from the specified server
-**Tamper Evidence**: Any modification to the presentation will fail verification
- ⚠️ **Notary Trust**: The verifier must trust the Notary not to collude with the Prover
### Production Deployment
- 🏭 **Independent Notary**: Use a trusted third-party Notary service (not a local one)
- 🔒 **Key Management**: Implement proper Notary key distribution and verification
- 📋 **Audit Trail**: Maintain logs of notarization and verification events
- 🔄 **Key Rotation**: Plan for Notary key updates and migration
> ⚠️ **Demo Notice**: This example uses a local test server and local Notary for demonstration. In production, use trusted third-party Notary services and real server endpoints.

View File

@@ -0,0 +1,117 @@
// This example demonstrates how to build a verifiable presentation from an
// attestation and the corresponding connection secrets. See the `prove.rs`
// example to learn how to acquire an attestation from a Notary.
use clap::Parser;
use hyper::header;
use tlsn::attestation::{presentation::Presentation, Attestation, CryptoProvider, Secrets};
use tlsn_examples::ExampleType;
use tlsn_formats::http::HttpTranscript;
#[derive(Parser, Debug)]
#[command(version, about, long_about = None)]
struct Args {
/// What data to notarize
#[clap(default_value_t, value_enum)]
example_type: ExampleType,
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let args = Args::parse();
create_presentation(&args.example_type).await
}
async fn create_presentation(example_type: &ExampleType) -> Result<(), Box<dyn std::error::Error>> {
let attestation_path = tlsn_examples::get_file_path(example_type, "attestation");
let secrets_path = tlsn_examples::get_file_path(example_type, "secrets");
// Read attestation from disk.
let attestation: Attestation = bincode::deserialize(&std::fs::read(attestation_path)?)?;
// Read secrets from disk.
let secrets: Secrets = bincode::deserialize(&std::fs::read(secrets_path)?)?;
// Parse the HTTP transcript.
let transcript = HttpTranscript::parse(secrets.transcript())?;
// Build a transcript proof.
let mut builder = secrets.transcript_proof_builder();
// Here is where we reveal all or some of the parts we committed in `prove.rs`
// previously.
let request = &transcript.requests[0];
// Reveal the structure of the request without the headers or body.
builder.reveal_sent(&request.without_data())?;
// Reveal the request target.
builder.reveal_sent(&request.request.target)?;
// Reveal all request headers except the values of User-Agent and Authorization.
for header in &request.headers {
if !(header
.name
.as_str()
.eq_ignore_ascii_case(header::USER_AGENT.as_str())
|| header
.name
.as_str()
.eq_ignore_ascii_case(header::AUTHORIZATION.as_str()))
{
builder.reveal_sent(header)?;
} else {
builder.reveal_sent(&header.without_value())?;
}
}
// Reveal only parts of the response.
let response = &transcript.responses[0];
// Reveal the structure of the response without the headers or body.
builder.reveal_recv(&response.without_data())?;
// Reveal all response headers.
for header in &response.headers {
builder.reveal_recv(header)?;
}
let content = &response.body.as_ref().unwrap().content;
match content {
tlsn_formats::http::BodyContent::Json(json) => {
// For experimentation, reveal the entire response or just a selection.
let reveal_all = false;
if reveal_all {
builder.reveal_recv(response)?;
} else {
builder.reveal_recv(json.get("id").unwrap())?;
builder.reveal_recv(json.get("information.name").unwrap())?;
builder.reveal_recv(json.get("meta.version").unwrap())?;
}
}
tlsn_formats::http::BodyContent::Unknown(span) => {
builder.reveal_recv(span)?;
}
_ => {}
}
let transcript_proof = builder.build()?;
// Use default crypto provider to build the presentation.
let provider = CryptoProvider::default();
let mut builder = attestation.presentation_builder(&provider);
builder
.identity_proof(secrets.identity_proof())
.transcript_proof(transcript_proof);
let presentation: Presentation = builder.build()?;
let presentation_path = tlsn_examples::get_file_path(example_type, "presentation");
// Write the presentation to disk.
std::fs::write(&presentation_path, bincode::serialize(&presentation)?)?;
println!("Presentation built successfully!");
println!("The presentation has been written to `{presentation_path}`.");
Ok(())
}

View File

@@ -0,0 +1,403 @@
// This example demonstrates how to use the Prover to acquire an attestation for
// an HTTP request sent to a server fixture. The attestation and secrets are
// saved to disk.
use std::env;
use clap::Parser;
use http_body_util::Empty;
use hyper::{body::Bytes, Request, StatusCode};
use hyper_util::rt::TokioIo;
use spansy::Spanned;
use tokio::{
io::{AsyncRead, AsyncWrite},
sync::oneshot::{self, Receiver, Sender},
};
use tokio_util::compat::{FuturesAsyncReadCompatExt, TokioAsyncReadCompatExt};
use tracing::info;
use tlsn::{
attestation::{
request::{Request as AttestationRequest, RequestConfig},
signing::Secp256k1Signer,
Attestation, AttestationConfig, CryptoProvider, Secrets,
},
config::{
CertificateDer, PrivateKeyDer, ProtocolConfig, ProtocolConfigValidator, RootCertStore,
},
connection::{ConnectionInfo, HandshakeData, ServerName, TranscriptLength},
prover::{state::Committed, ProveConfig, Prover, ProverConfig, ProverOutput, TlsConfig},
transcript::{ContentType, TranscriptCommitConfig},
verifier::{Verifier, VerifierConfig, VerifierOutput, VerifyConfig},
};
use tlsn_examples::ExampleType;
use tlsn_formats::http::{DefaultHttpCommitter, HttpCommit, HttpTranscript};
use tlsn_server_fixture::DEFAULT_FIXTURE_PORT;
use tlsn_server_fixture_certs::{CA_CERT_DER, CLIENT_CERT_DER, CLIENT_KEY_DER, SERVER_DOMAIN};
// Setting of the application server.
const USER_AGENT: &str = "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36";
#[derive(Parser, Debug)]
#[command(version, about, long_about = None)]
struct Args {
/// What data to notarize.
#[clap(default_value_t, value_enum)]
example_type: ExampleType,
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
tracing_subscriber::fmt::init();
let args = Args::parse();
let (uri, extra_headers) = match args.example_type {
ExampleType::Json => ("/formats/json", vec![]),
ExampleType::Html => ("/formats/html", vec![]),
ExampleType::Authenticated => ("/protected", vec![("Authorization", "random_auth_token")]),
};
let (notary_socket, prover_socket) = tokio::io::duplex(1 << 23);
let (request_tx, request_rx) = oneshot::channel();
let (attestation_tx, attestation_rx) = oneshot::channel();
tokio::spawn(async move {
notary(notary_socket, request_rx, attestation_tx)
.await
.unwrap()
});
prover(
prover_socket,
request_tx,
attestation_rx,
uri,
extra_headers,
&args.example_type,
)
.await?;
Ok(())
}
async fn prover<S: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>(
socket: S,
req_tx: Sender<AttestationRequest>,
resp_rx: Receiver<Attestation>,
uri: &str,
extra_headers: Vec<(&str, &str)>,
example_type: &ExampleType,
) -> Result<(), Box<dyn std::error::Error>> {
let server_host: String = env::var("SERVER_HOST").unwrap_or("127.0.0.1".into());
let server_port: u16 = env::var("SERVER_PORT")
.map(|port| port.parse().expect("port should be valid integer"))
.unwrap_or(DEFAULT_FIXTURE_PORT);
// Create a root certificate store with the server-fixture's self-signed
// certificate. This is only required for offline testing with the
// server-fixture.
let mut tls_config_builder = TlsConfig::builder();
tls_config_builder
.root_store(RootCertStore {
roots: vec![CertificateDer(CA_CERT_DER.to_vec())],
})
// (Optional) Set up TLS client authentication if required by the server.
.client_auth((
vec![CertificateDer(CLIENT_CERT_DER.to_vec())],
PrivateKeyDer(CLIENT_KEY_DER.to_vec()),
));
let tls_config = tls_config_builder.build().unwrap();
// Set up protocol configuration for prover.
let mut prover_config_builder = ProverConfig::builder();
prover_config_builder
.server_name(ServerName::Dns(SERVER_DOMAIN.try_into().unwrap()))
.tls_config(tls_config)
.protocol_config(
ProtocolConfig::builder()
// We must configure the amount of data we expect to exchange beforehand, which will
// be preprocessed prior to the connection. Reducing these limits will improve
// performance.
.max_sent_data(tlsn_examples::MAX_SENT_DATA)
.max_recv_data(tlsn_examples::MAX_RECV_DATA)
.build()?,
);
let prover_config = prover_config_builder.build()?;
// Create a new prover and perform necessary setup.
let prover = Prover::new(prover_config).setup(socket.compat()).await?;
// Open a TCP connection to the server.
let client_socket = tokio::net::TcpStream::connect((server_host, server_port)).await?;
// Bind the prover to the server connection.
// The returned `mpc_tls_connection` is an MPC TLS connection to the server: all
// data written to/read from it will be encrypted/decrypted using MPC with
// the notary.
let (mpc_tls_connection, prover_fut) = prover.connect(client_socket.compat()).await?;
let mpc_tls_connection = TokioIo::new(mpc_tls_connection.compat());
// Spawn the prover task to be run concurrently in the background.
let prover_task = tokio::spawn(prover_fut);
// Attach the hyper HTTP client to the connection.
let (mut request_sender, connection) =
hyper::client::conn::http1::handshake(mpc_tls_connection).await?;
// Spawn the HTTP task to be run concurrently in the background.
tokio::spawn(connection);
// Build a simple HTTP request with common headers.
let request_builder = Request::builder()
.uri(uri)
.header("Host", SERVER_DOMAIN)
.header("Accept", "*/*")
// Using "identity" instructs the Server not to use compression for its HTTP response.
// TLSNotary tooling does not support compression.
.header("Accept-Encoding", "identity")
.header("Connection", "close")
.header("User-Agent", USER_AGENT);
let mut request_builder = request_builder;
for (key, value) in extra_headers {
request_builder = request_builder.header(key, value);
}
let request = request_builder.body(Empty::<Bytes>::new())?;
info!("Starting an MPC TLS connection with the server");
// Send the request to the server and wait for the response.
let response = request_sender.send_request(request).await?;
info!("Got a response from the server: {}", response.status());
assert!(response.status() == StatusCode::OK);
// The prover task should be done now, so we can await it.
let prover = prover_task.await??;
// Parse the HTTP transcript.
let transcript = HttpTranscript::parse(prover.transcript())?;
let body_content = &transcript.responses[0].body.as_ref().unwrap().content;
let body = String::from_utf8_lossy(body_content.span().as_bytes());
match body_content {
tlsn_formats::http::BodyContent::Json(_json) => {
let parsed = serde_json::from_str::<serde_json::Value>(&body)?;
info!("{}", serde_json::to_string_pretty(&parsed)?);
}
tlsn_formats::http::BodyContent::Unknown(_span) => {
info!("{}", &body);
}
_ => {}
}
// Commit to the transcript.
let mut builder = TranscriptCommitConfig::builder(prover.transcript());
// This commits to various parts of the transcript separately (e.g. request
// headers, response headers, response body and more). See https://docs.tlsnotary.org//protocol/commit_strategy.html
// for other strategies that can be used to generate commitments.
DefaultHttpCommitter::default().commit_transcript(&mut builder, &transcript)?;
let transcript_commit = builder.build()?;
// Build an attestation request.
let mut builder = RequestConfig::builder();
builder.transcript_commit(transcript_commit);
// Optionally, add an extension to the attestation if the notary supports it.
// builder.extension(Extension {
// id: b"example.name".to_vec(),
// value: b"Bobert".to_vec(),
// });
let request_config = builder.build()?;
let (attestation, secrets) = notarize(prover, &request_config, req_tx, resp_rx).await?;
// Write the attestation to disk.
let attestation_path = tlsn_examples::get_file_path(example_type, "attestation");
let secrets_path = tlsn_examples::get_file_path(example_type, "secrets");
tokio::fs::write(&attestation_path, bincode::serialize(&attestation)?).await?;
// Write the secrets to disk.
tokio::fs::write(&secrets_path, bincode::serialize(&secrets)?).await?;
println!("Notarization completed successfully!");
println!(
"The attestation has been written to `{attestation_path}` and the \
corresponding secrets to `{secrets_path}`."
);
Ok(())
}
async fn notarize(
mut prover: Prover<Committed>,
config: &RequestConfig,
request_tx: Sender<AttestationRequest>,
attestation_rx: Receiver<Attestation>,
) -> Result<(Attestation, Secrets), Box<dyn std::error::Error>> {
let mut builder = ProveConfig::builder(prover.transcript());
if let Some(config) = config.transcript_commit() {
builder.transcript_commit(config.clone());
}
let disclosure_config = builder.build()?;
let ProverOutput {
transcript_commitments,
transcript_secrets,
..
} = prover.prove(&disclosure_config).await?;
let transcript = prover.transcript().clone();
let tls_transcript = prover.tls_transcript().clone();
prover.close().await?;
// Build an attestation request.
let mut builder = AttestationRequest::builder(config);
builder
.server_name(ServerName::Dns(SERVER_DOMAIN.try_into().unwrap()))
.handshake_data(HandshakeData {
certs: tls_transcript
.server_cert_chain()
.expect("server cert chain is present")
.to_vec(),
sig: tls_transcript
.server_signature()
.expect("server signature is present")
.clone(),
binding: tls_transcript.certificate_binding().clone(),
})
.transcript(transcript)
.transcript_commitments(transcript_secrets, transcript_commitments);
let (request, secrets) = builder.build(&CryptoProvider::default())?;
// Send attestation request to notary.
request_tx
.send(request.clone())
.map_err(|_| "notary is not receiving attestation request".to_string())?;
// Receive attestation from notary.
let attestation = attestation_rx
.await
.map_err(|err| format!("notary did not respond with attestation: {err}"))?;
// Check the attestation is consistent with the Prover's view.
request.validate(&attestation)?;
Ok((attestation, secrets))
}
async fn notary<S: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>(
socket: S,
request_rx: Receiver<AttestationRequest>,
attestation_tx: Sender<Attestation>,
) -> Result<(), Box<dyn std::error::Error>> {
// Set up Verifier.
let config_validator = ProtocolConfigValidator::builder()
.max_sent_data(tlsn_examples::MAX_SENT_DATA)
.max_recv_data(tlsn_examples::MAX_RECV_DATA)
.build()
.unwrap();
// Create a root certificate store with the server-fixture's self-signed
// certificate. This is only required for offline testing with the
// server-fixture.
let verifier_config = VerifierConfig::builder()
.root_store(RootCertStore {
roots: vec![CertificateDer(CA_CERT_DER.to_vec())],
})
.protocol_config_validator(config_validator)
.build()
.unwrap();
let mut verifier = Verifier::new(verifier_config)
.setup(socket.compat())
.await?
.run()
.await?;
let VerifierOutput {
transcript_commitments,
encoder_secret,
..
} = verifier.verify(&VerifyConfig::default()).await?;
let tls_transcript = verifier.tls_transcript().clone();
verifier.close().await?;
let sent_len = tls_transcript
.sent()
.iter()
.filter_map(|record| {
if let ContentType::ApplicationData = record.typ {
Some(record.ciphertext.len())
} else {
None
}
})
.sum::<usize>();
let recv_len = tls_transcript
.recv()
.iter()
.filter_map(|record| {
if let ContentType::ApplicationData = record.typ {
Some(record.ciphertext.len())
} else {
None
}
})
.sum::<usize>();
// Receive attestation request from prover.
let request = request_rx.await?;
// Load a dummy signing key.
let signing_key = k256::ecdsa::SigningKey::from_bytes(&[1u8; 32].into())?;
let signer = Box::new(Secp256k1Signer::new(&signing_key.to_bytes())?);
let mut provider = CryptoProvider::default();
provider.signer.set_signer(signer);
// Build an attestation.
let mut att_config_builder = AttestationConfig::builder();
att_config_builder.supported_signature_algs(Vec::from_iter(provider.signer.supported_algs()));
let att_config = att_config_builder.build()?;
let mut builder = Attestation::builder(&att_config).accept_request(request)?;
builder
.connection_info(ConnectionInfo {
time: tls_transcript.time(),
version: (*tls_transcript.version()),
transcript_length: TranscriptLength {
sent: sent_len as u32,
received: recv_len as u32,
},
})
.server_ephemeral_key(tls_transcript.server_ephemeral_key().clone())
.transcript_commitments(transcript_commitments);
if let Some(encoder_secret) = encoder_secret {
builder.encoder_secret(encoder_secret);
}
let attestation = builder.build(&provider)?;
// Send attestation to prover.
attestation_tx
.send(attestation)
.map_err(|_| "prover is not receiving attestation".to_string())?;
Ok(())
}

View File

@@ -0,0 +1,96 @@
// This example demonstrates how to verify a presentation. See `present.rs` for
// an example of how to build a presentation from an attestation and connection
// secrets.
use std::time::Duration;
use clap::Parser;
use tlsn::{
attestation::{
presentation::{Presentation, PresentationOutput},
signing::VerifyingKey,
CryptoProvider,
},
config::{CertificateDer, RootCertStore},
verifier::ServerCertVerifier,
};
use tlsn_examples::ExampleType;
use tlsn_server_fixture_certs::CA_CERT_DER;
#[derive(Parser, Debug)]
#[command(version, about, long_about = None)]
struct Args {
/// What data to notarize.
#[clap(default_value_t, value_enum)]
example_type: ExampleType,
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let args = Args::parse();
verify_presentation(&args.example_type).await
}
async fn verify_presentation(example_type: &ExampleType) -> Result<(), Box<dyn std::error::Error>> {
// Read the presentation from disk.
let presentation_path = tlsn_examples::get_file_path(example_type, "presentation");
let presentation: Presentation = bincode::deserialize(&std::fs::read(presentation_path)?)?;
// Create a crypto provider accepting the server-fixture's self-signed
// root certificate.
//
// This is only required for offline testing with the server-fixture. In
// production, use `CryptoProvider::default()` instead.
let root_cert_store = RootCertStore {
roots: vec![CertificateDer(CA_CERT_DER.to_vec())],
};
let crypto_provider = CryptoProvider {
cert: ServerCertVerifier::new(&root_cert_store)?,
..Default::default()
};
let VerifyingKey {
alg,
data: key_data,
} = presentation.verifying_key();
println!(
"Verifying presentation with {alg} key: {}\n\n**Ask yourself, do you trust this key?**\n",
hex::encode(key_data)
);
// Verify the presentation.
let PresentationOutput {
server_name,
connection_info,
transcript,
// extensions, // Optionally, verify any custom extensions from prover/notary.
..
} = presentation.verify(&crypto_provider).unwrap();
// The time at which the connection was started.
let time = chrono::DateTime::UNIX_EPOCH + Duration::from_secs(connection_info.time);
let server_name = server_name.unwrap();
let mut partial_transcript = transcript.unwrap();
// Set the unauthenticated bytes so they are distinguishable.
partial_transcript.set_unauthed(b'X');
let sent = String::from_utf8_lossy(partial_transcript.sent_unsafe());
let recv = String::from_utf8_lossy(partial_transcript.received_unsafe());
println!("-------------------------------------------------------------------");
println!(
"Successfully verified that the data below came from a session with {server_name} at {time}.",
);
println!("Note that the data which the Prover chose not to disclose are shown as X.\n");
println!("Data sent:\n");
println!("{sent}\n");
println!("Data received:\n");
println!("{recv}\n");
println!("-------------------------------------------------------------------");
Ok(())
}

View File

@@ -10,15 +10,15 @@ use tokio::io::{AsyncRead, AsyncWrite};
use tokio_util::compat::{FuturesAsyncReadCompatExt, TokioAsyncReadCompatExt};
use tracing::instrument;
use tls_server_fixture::CA_CERT_DER;
use tlsn::{
config::{ProtocolConfig, ProtocolConfigValidator},
config::{CertificateDer, ProtocolConfig, ProtocolConfigValidator, RootCertStore},
connection::ServerName,
prover::{ProveConfig, Prover, ProverConfig, TlsConfig},
transcript::PartialTranscript,
verifier::{Verifier, VerifierConfig, VerifierOutput, VerifyConfig},
};
use tlsn_server_fixture::DEFAULT_FIXTURE_PORT;
use tlsn_server_fixture_certs::SERVER_DOMAIN;
use tlsn_server_fixture_certs::{CA_CERT_DER, SERVER_DOMAIN};
const SECRET: &str = "TLSNotary's private key 🤡";
@@ -72,18 +72,16 @@ async fn prover<T: AsyncWrite + AsyncRead + Send + Unpin + 'static>(
// Create a root certificate store with the server-fixture's self-signed
// certificate. This is only required for offline testing with the
// server-fixture.
let mut root_store = tls_core::anchors::RootCertStore::empty();
root_store
.add(&tls_core::key::Certificate(CA_CERT_DER.to_vec()))
.unwrap();
let mut tls_config_builder = TlsConfig::builder();
tls_config_builder.root_store(root_store);
tls_config_builder.root_store(RootCertStore {
roots: vec![CertificateDer(CA_CERT_DER.to_vec())],
});
let tls_config = tls_config_builder.build().unwrap();
// Set up protocol configuration for prover.
let mut prover_config_builder = ProverConfig::builder();
prover_config_builder
.server_name(server_domain)
.server_name(ServerName::Dns(server_domain.try_into().unwrap()))
.tls_config(tls_config)
.protocol_config(
ProtocolConfig::builder()
@@ -194,13 +192,10 @@ async fn verifier<T: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>(
// Create a root certificate store with the server-fixture's self-signed
// certificate. This is only required for offline testing with the
// server-fixture.
let mut root_store = tls_core::anchors::RootCertStore::empty();
root_store
.add(&tls_core::key::Certificate(CA_CERT_DER.to_vec()))
.unwrap();
let verifier_config = VerifierConfig::builder()
.root_store(root_store)
.root_store(RootCertStore {
roots: vec![CertificateDer(CA_CERT_DER.to_vec())],
})
.protocol_config_validator(config_validator)
.build()
.unwrap();
@@ -234,6 +229,7 @@ async fn verifier<T: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>(
.unwrap_or_else(|| panic!("Expected valid data from {SERVER_DOMAIN}"));
// Check Session info: server name.
let ServerName::Dns(server_name) = server_name;
assert_eq!(server_name.as_str(), SERVER_DOMAIN);
transcript

View File

@@ -0,0 +1,5 @@
!noir/target/
# Ignore everything inside noir/target
noir/target/*
# Except noir.json
!noir/target/noir.json

View File

@@ -0,0 +1,167 @@
# Interactive Zero-Knowledge Age Verification with TLSNotary
This example demonstrates **privacy-preserving age verification** using TLSNotary and zero-knowledge proofs. It allows a prover to demonstrate they are 18+ years old without revealing their actual birth date or any other personal information.
## 🔍 How It Works (simplified overview)
```mermaid
sequenceDiagram
participant S as Tax Server<br/>(fixture)
participant P as Prover
participant V as Verifier
P->>S: Request tax data (with auth token) (MPC-TLS)
S->>P: Tax data including `date_of_birth` (MPC-TLS)
P->>V: Share transcript with redactions
P->>V: Commit to blinded hash of birth date
P->>P: Generate ZK proof of age ≥ 18
P->>V: Send ZK proof
V->>V: Verify transcript & ZK proof
V->>V: ✅ Confirm: Prover is 18+ (no birth date revealed)
```
### The Process
1. **MPC-TLS Session**: The Prover fetches tax information containing their birth date, while the Verifier jointly verifies the TLS session to ensure the data comes from the authentic server.
2. **Selective Disclosure**:
* The authorization token is **redacted**: the Verifier sees the plaintext request but not the token.
* The birth date is **committed** as a blinded hash: the Verifier cannot see the date, but the Prover is cryptographically bound to it.
(Depending on the use case more data can be redacted or revealed)
3. **Zero-Knowledge Proof**: The Prover generates a ZK proof that the committed birth date corresponds to an age ≥ 18.
4. **Verification**: The Verifier checks both the TLS transcript and the ZK proof, confirming age ≥ 18 without learning the actual date of birth.
### Example Data
The tax server returns data like this:
```json
{
"tax_year": 2024,
"taxpayer": {
"idnr": "12345678901",
"first_name": "Max",
"last_name": "Mustermann",
"date_of_birth": "1985-03-12",
// ...
}
}
```
## 🔐 Zero-Knowledge Proof Details
The ZK circuit proves: **"I know a birth date that hashes to the committed value AND indicates I am 18+ years old"**
**Public Inputs:**
- ✅ Verification date
- ✅ Committed blinded hash of birth date
**Private Inputs (Hidden):**
- 🔒 Actual birth date plaintext
- 🔒 Random blinder used in hash commitment
**What the Verifier Learns:**
- ✅ The prover is 18+ years old
- ✅ The birth date is authentic (from the MPC-TLS session)
Everything else remains private.
## 🏃 Run the Example
1. **Start the test server** (from repository root):
```bash
RUST_LOG=info PORT=4000 cargo run --bin tlsn-server-fixture
```
2. **Run the age verification** (in a new terminal):
```bash
SERVER_PORT=4000 cargo run --release --example interactive_zk
```
3. **For detailed logs**:
```bash
RUST_LOG=debug,yamux=info,uid_mux=info SERVER_PORT=4000 cargo run --release --example interactive_zk
```
### Expected Output
```
Successfully verified https://test-server.io:4000/elster
Age verified in ZK: 18+ ✅
Verified sent data:
GET https://test-server.io:4000/elster HTTP/1.1
host: test-server.io
connection: close
authorization: 🙈🙈🙈🙈🙈🙈🙈🙈🙈🙈🙈🙈🙈🙈🙈🙈🙈🙈🙈🙈🙈🙈🙈🙈🙈🙈
Verified received data:
🙈🙈🙈🙈🙈🙈🙈🙈[truncated for brevity]...🙈🙈🙈🙈🙈"tax_year":2024🙈🙈🙈🙈🙈...
```
> 💡 **Note**: In this demo, both Prover and Verifier run on the same machine. In production, they would operate on separate systems.
> 💡 **Note**: This demo assumes that the tax server serves correct data, and that only the submitter of the tax data has access to the specified page.
## 🛠 Development
### Project Structure
```
interactive_zk/
├── prover.rs # Prover implementation
├── verifier.rs # Verifier implementation
├── types.rs # Shared types
└── interactive_zk.rs # Main example runner
├── noir/ # Zero-knowledge circuit
│ ├── src/main.n # Noir circuit code
│ ├── target/ # Compiled circuit artifacts
│ └── Nargo.toml # Noir project config
│ └── Prover.toml # Example input for `nargo execute`
│ └── generate_test_data.rs # Rust script to generate Noir test data
└── README.md
```
### Noir Circuit Commands
We use [Mopro's `noir_rs`](https://zkmopro.org/docs/crates/noir-rs/) for ZK proof generation. The **circuit is pre-compiled and ready to use**. You don't need to install Noir tools to run the example. But if you want to change or test the circuit in isolation, you can use the following instructions.
Before you proceed, we recommend to double check that your Noir tooling matches the versions used in Mopro's `noir_rs`:
```sh
# Install correct Noir and BB versions (important for compatibility!)
noirup --version 1.0.0-beta.8
bbup -v 1.0.0-nightly.20250723
```
If you don't have `noirup` and `bbup` installed yet, check [Noir's Quick Start](https://noir-lang.org/docs/getting_started/quick_start).
To compile the circuit, go to the `noir` folder and run `nargo compile`.
To check and experiment with the Noir circuit, you can use these commands:
* Execute Circuit: Compile the circuit and run it with sample data from `Prover.toml`:
```sh
nargo execute
```
* Generate Verification Key: Create the verification key needed to verify proofs
```sh
bb write_vk -b ./target/noir.json -o ./target
```
* Generate Proof: Create a zero-knowledge proof using the circuit and witness data.
```sh
bb prove --bytecode_path ./target/noir.json --witness_path ./target/noir.gz -o ./target
```
* Verify Proof: Verify that a proof is valid using the verification key.
```sh
bb verify -k ./target/vk -p ./target/proof
```
* Run the Noir tests:
```sh
nargo test --show-output
```
To create extra tests, you can use `./generate_test_data.rs` to help with generating correct blinders and hashes.
## 📚 Learn More
- [TLSNotary Documentation](https://docs.tlsnotary.org/)
- [Noir Language Guide](https://noir-lang.org/)
- [Zero-Knowledge Proofs Explained](https://ethereum.org/en/zero-knowledge-proofs/)
- [Mopro ZK Toolkit](https://zkmopro.org/)

View File

@@ -0,0 +1,59 @@
mod prover;
mod types;
mod verifier;
use prover::prover;
use std::{
env,
net::{IpAddr, SocketAddr},
};
use tlsn_server_fixture::DEFAULT_FIXTURE_PORT;
use tlsn_server_fixture_certs::SERVER_DOMAIN;
use verifier::verifier;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
tracing_subscriber::fmt::init();
let server_host: String = env::var("SERVER_HOST").unwrap_or("127.0.0.1".into());
let server_port: u16 = env::var("SERVER_PORT")
.map(|port| port.parse().expect("port should be valid integer"))
.unwrap_or(DEFAULT_FIXTURE_PORT);
// We use SERVER_DOMAIN here to make sure it matches the domain in the test
// server's certificate.
let uri = format!("https://{SERVER_DOMAIN}:{server_port}/elster");
let server_ip: IpAddr = server_host
.parse()
.map_err(|e| format!("Invalid IP address '{}': {}", server_host, e))?;
let server_addr = SocketAddr::from((server_ip, server_port));
// Connect prover and verifier.
let (prover_socket, verifier_socket) = tokio::io::duplex(1 << 23);
let (prover_extra_socket, verifier_extra_socket) = tokio::io::duplex(1 << 23);
let (_, transcript) = tokio::try_join!(
prover(prover_socket, prover_extra_socket, &server_addr, &uri),
verifier(verifier_socket, verifier_extra_socket)
)?;
println!("---");
println!("Successfully verified {}", &uri);
println!("Age verified in ZK: 18+ ✅\n");
println!(
"Verified sent data:\n{}",
bytes_to_redacted_string(transcript.sent_unsafe())
);
println!(
"Verified received data:\n{}",
bytes_to_redacted_string(transcript.received_unsafe())
);
Ok(())
}
/// Render redacted bytes as `🙈`.
pub fn bytes_to_redacted_string(bytes: &[u8]) -> String {
String::from_utf8_lossy(bytes).replace('\0', "🙈")
}

View File

@@ -0,0 +1,8 @@
[package]
name = "noir"
type = "bin"
authors = [""]
[dependencies]
sha256 = { tag = "v0.1.5", git = "https://github.com/noir-lang/sha256" }
date = { tag = "v0.5.4", git = "https://github.com/madztheo/noir-date.git" }

View File

@@ -0,0 +1,8 @@
blinder = [108, 93, 120, 205, 15, 35, 159, 124, 243, 96, 22, 128, 16, 149, 219, 216]
committed_hash = [186, 158, 101, 39, 49, 48, 26, 83, 242, 96, 10, 221, 121, 174, 62, 50, 136, 132, 232, 58, 25, 32, 66, 196, 99, 85, 66, 85, 255, 1, 202, 254]
date_of_birth = "1985-03-12"
[proof_date]
day = "29"
month = "08"
year = "2025"

View File

@@ -0,0 +1,64 @@
#!/usr/bin/env -S cargo +nightly -Zscript
---
[package]
name = "generate_test_data"
version = "0.0.0"
edition = "2021"
publish = false
[dependencies]
sha2 = "0.10"
rand = "0.8"
chrono = "0.4"
---
use chrono::Datelike;
use chrono::Local;
use rand::RngCore;
use sha2::{Digest, Sha256};
fn main() {
// 1. Birthdate string (fixed)
let dob_str = "1985-03-12"; // 10 bytes long
let proof_date = Local::now().date_naive();
let proof_year = proof_date.year();
let proof_month = proof_date.month();
let proof_day = proof_date.day();
// 2. Generate random 16-byte blinder
let mut blinder = [0u8; 16];
rand::thread_rng().fill_bytes(&mut blinder);
// 3. Concatenate blinder + dob string bytes
let mut preimage = Vec::with_capacity(26);
preimage.extend_from_slice(dob_str.as_bytes());
preimage.extend_from_slice(&blinder);
// 4. Hash it
let hash = Sha256::digest(&preimage);
let blinder = blinder
.iter()
.map(|b| b.to_string())
.collect::<Vec<_>>()
.join(", ");
let committed_hash = hash
.iter()
.map(|b| b.to_string())
.collect::<Vec<_>>()
.join(", ");
println!(
"
// Private input
let date_of_birth = \"{dob_str}\";
let blinder = [{blinder}];
// Public input
let proof_date = date::Date {{ year: {proof_year}, month: {proof_month}, day: {proof_day} }};
let committed_hash = [{committed_hash}];
main(proof_date, committed_hash, date_of_birth, blinder);
"
);
}

View File

@@ -0,0 +1,82 @@
use dep::date::Date;
fn main(
// Public inputs
proof_date: pub date::Date, // "2025-08-29"
committed_hash: pub [u8; 32], // Hash of (blinder || dob string)
// Private inputs
date_of_birth: str<10>, // "1985-03-12"
blinder: [u8; 16], // Random 16-byte blinder
) {
let is_18 = check_18(date_of_birth, proof_date);
let correct_hash = check_hash(date_of_birth, blinder, committed_hash);
assert(correct_hash);
assert(is_18);
}
fn check_18(date_of_birth: str<10>, proof_date: date::Date) -> bool {
let dob = parse_birth_date(date_of_birth);
let is_18 = dob.add_years(18).lt(proof_date);
println(f"Is 18? {is_18}");
is_18
}
fn check_hash(date_of_birth: str<10>, blinder: [u8; 16], committed_hash: [u8; 32]) -> bool {
let hash_input: [u8; 26] = make_hash_input(date_of_birth, blinder);
let computed_hash = sha256::sha256_var(hash_input, 26);
let correct_hash = computed_hash == committed_hash;
println(f"Correct hash? {correct_hash}");
correct_hash
}
fn make_hash_input(dob: str<10>, blinder: [u8; 16]) -> [u8; 26] {
let mut input: [u8; 26] = [0; 26];
for i in 0..10 {
input[i] = dob.as_bytes()[i];
}
for i in 0..16 {
input[10 + i] = blinder[i];
}
input
}
pub fn parse_birth_date(birth_date: str<10>) -> date::Date {
let date: [u8; 10] = birth_date.as_bytes();
let date_str: str<8> =
[date[0], date[1], date[2], date[3], date[5], date[6], date[8], date[9]].as_str_unchecked();
Date::from_str_long_year(date_str)
}
#[test]
fn test_max_is_over_18() {
// Private input
let date_of_birth = "1985-03-12";
let blinder = [120, 80, 62, 10, 76, 60, 130, 98, 147, 161, 139, 126, 27, 236, 36, 56];
// Public input
let proof_date = date::Date { year: 2025, month: 9, day: 2 };
let committed_hash = [
229, 118, 202, 216, 213, 230, 125, 163, 48, 178, 118, 225, 84, 7, 140, 63, 173, 255, 163,
208, 163, 3, 63, 204, 37, 120, 254, 246, 202, 116, 122, 145,
];
main(proof_date, committed_hash, date_of_birth, blinder);
}
#[test(should_fail)]
fn test_under_18() {
// Private input
let date_of_birth = "2010-08-01";
let blinder = [160, 23, 57, 158, 141, 195, 155, 132, 109, 242, 48, 220, 70, 217, 229, 189];
// Public input
let proof_date = date::Date { year: 2025, month: 8, day: 29 };
let committed_hash = [
16, 132, 194, 62, 232, 90, 157, 153, 4, 231, 1, 54, 226, 3, 87, 174, 129, 177, 80, 69, 37,
222, 209, 91, 168, 156, 9, 109, 108, 144, 168, 109,
];
main(proof_date, committed_hash, date_of_birth, blinder);
}

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,371 @@
use std::net::SocketAddr;
use crate::types::received_commitments;
use super::types::ZKProofBundle;
use chrono::{Datelike, Local, NaiveDate};
use http_body_util::Empty;
use hyper::{body::Bytes, header, Request, StatusCode, Uri};
use hyper_util::rt::TokioIo;
use k256::sha2::{Digest, Sha256};
use noir::{
barretenberg::{
prove::prove_ultra_honk, srs::setup_srs_from_bytecode,
verify::get_ultra_honk_verification_key,
},
witness::from_vec_str_to_witness_map,
};
use serde_json::Value;
use spansy::{
http::{BodyContent, Requests, Responses},
Spanned,
};
use tls_server_fixture::CA_CERT_DER;
use tlsn::{
config::{CertificateDer, ProtocolConfig, RootCertStore},
connection::ServerName,
hash::HashAlgId,
prover::{ProveConfig, ProveConfigBuilder, Prover, ProverConfig, TlsConfig},
transcript::{
hash::{PlaintextHash, PlaintextHashSecret},
Direction, TranscriptCommitConfig, TranscriptCommitConfigBuilder, TranscriptCommitmentKind,
TranscriptSecret,
},
};
use tlsn_examples::MAX_RECV_DATA;
use tokio::io::AsyncWriteExt;
use tlsn_examples::MAX_SENT_DATA;
use tokio::io::{AsyncRead, AsyncWrite};
use tokio_util::compat::{FuturesAsyncReadCompatExt, TokioAsyncReadCompatExt};
use tracing::instrument;
#[instrument(skip(verifier_socket, verifier_extra_socket))]
pub async fn prover<T: AsyncWrite + AsyncRead + Send + Unpin + 'static>(
verifier_socket: T,
mut verifier_extra_socket: T,
server_addr: &SocketAddr,
uri: &str,
) -> Result<(), Box<dyn std::error::Error>> {
let uri = uri.parse::<Uri>()?;
if uri.scheme().map(|s| s.as_str()) != Some("https") {
return Err("URI must use HTTPS scheme".into());
}
let server_domain = uri.authority().ok_or("URI must have authority")?.host();
// Create a root certificate store with the server-fixture's self-signed
// certificate. This is only required for offline testing with the
// server-fixture.
let mut tls_config_builder = TlsConfig::builder();
tls_config_builder.root_store(RootCertStore {
roots: vec![CertificateDer(CA_CERT_DER.to_vec())],
});
let tls_config = tls_config_builder.build()?;
// Set up protocol configuration for prover.
let mut prover_config_builder = ProverConfig::builder();
prover_config_builder
.server_name(ServerName::Dns(server_domain.try_into()?))
.tls_config(tls_config)
.protocol_config(
ProtocolConfig::builder()
.max_sent_data(MAX_SENT_DATA)
.max_recv_data(MAX_RECV_DATA)
.build()?,
);
let prover_config = prover_config_builder.build()?;
// Create prover and connect to verifier.
//
// Perform the setup phase with the verifier.
let prover = Prover::new(prover_config)
.setup(verifier_socket.compat())
.await?;
// Connect to TLS Server.
let tls_client_socket = tokio::net::TcpStream::connect(server_addr).await?;
// Pass server connection into the prover.
let (mpc_tls_connection, prover_fut) = prover.connect(tls_client_socket.compat()).await?;
// Wrap the connection in a TokioIo compatibility layer to use it with hyper.
let mpc_tls_connection = TokioIo::new(mpc_tls_connection.compat());
// Spawn the Prover to run in the background.
let prover_task = tokio::spawn(prover_fut);
// MPC-TLS Handshake.
let (mut request_sender, connection) =
hyper::client::conn::http1::handshake(mpc_tls_connection).await?;
// Spawn the connection to run in the background.
tokio::spawn(connection);
// MPC-TLS: Send Request and wait for Response.
let request = Request::builder()
.uri(uri.clone())
.header("Host", server_domain)
.header("Connection", "close")
.header(header::AUTHORIZATION, "Bearer random_auth_token")
.method("GET")
.body(Empty::<Bytes>::new())?;
let response = request_sender.send_request(request).await?;
if response.status() != StatusCode::OK {
return Err(format!("MPC-TLS request failed with status {}", response.status()).into());
}
// Create proof for the Verifier.
let mut prover = prover_task.await??;
let transcript = prover.transcript().clone();
let mut prove_config_builder = ProveConfig::builder(&transcript);
// Reveal the DNS name.
prove_config_builder.server_identity();
let sent: &[u8] = transcript.sent();
let received: &[u8] = transcript.received();
let sent_len = sent.len();
let recv_len = received.len();
tracing::info!("Sent length: {}, Received length: {}", sent_len, recv_len);
// Reveal the entire HTTP request except for the authorization bearer token
reveal_request(sent, &mut prove_config_builder)?;
// Create hash commitment for the date of birth field from the response
let mut transcript_commitment_builder = TranscriptCommitConfig::builder(&transcript);
transcript_commitment_builder.default_kind(TranscriptCommitmentKind::Hash {
alg: HashAlgId::SHA256,
});
reveal_received(
received,
&mut prove_config_builder,
&mut transcript_commitment_builder,
)?;
let transcripts_commitment_config = transcript_commitment_builder.build()?;
prove_config_builder.transcript_commit(transcripts_commitment_config);
let prove_config = prove_config_builder.build()?;
// MPC-TLS prove
let prover_output = prover.prove(&prove_config).await?;
prover.close().await?;
// Prove birthdate is more than 18 years ago.
let received_commitments = received_commitments(&prover_output.transcript_commitments);
let received_commitment = received_commitments
.first()
.ok_or("No received commitments found")?; // committed hash (of date of birth string)
let received_secrets = received_secrets(&prover_output.transcript_secrets);
let received_secret = received_secrets
.first()
.ok_or("No received secrets found")?; // hash blinder
let proof_input = prepare_zk_proof_input(received, received_commitment, received_secret)?;
let proof_bundle = generate_zk_proof(&proof_input)?;
// Sent zk proof bundle to verifier
let serialized_proof = bincode::serialize(&proof_bundle)?;
verifier_extra_socket.write_all(&serialized_proof).await?;
verifier_extra_socket.shutdown().await?;
Ok(())
}
// Reveal everything from the request, except for the authorization token.
fn reveal_request(
request: &[u8],
builder: &mut ProveConfigBuilder<'_>,
) -> Result<(), Box<dyn std::error::Error>> {
let reqs = Requests::new_from_slice(request).collect::<Result<Vec<_>, _>>()?;
let req = reqs.first().ok_or("No requests found")?;
if req.request.method.as_str() != "GET" {
return Err(format!("Expected GET method, found {}", req.request.method.as_str()).into());
}
let authorization_header = req
.headers_with_name(header::AUTHORIZATION.as_str())
.next()
.ok_or("Authorization header not found")?;
let start_pos = authorization_header
.span()
.indices()
.min()
.ok_or("Could not find authorization header start position")?
+ header::AUTHORIZATION.as_str().len()
+ 2;
let end_pos =
start_pos + authorization_header.span().len() - header::AUTHORIZATION.as_str().len() - 2;
builder.reveal_sent(&(0..start_pos))?;
builder.reveal_sent(&(end_pos..request.len()))?;
Ok(())
}
fn reveal_received(
received: &[u8],
builder: &mut ProveConfigBuilder<'_>,
transcript_commitment_builder: &mut TranscriptCommitConfigBuilder,
) -> Result<(), Box<dyn std::error::Error>> {
let resp = Responses::new_from_slice(received).collect::<Result<Vec<_>, _>>()?;
let response = resp.first().ok_or("No responses found")?;
let body = response.body.as_ref().ok_or("Response body not found")?;
let BodyContent::Json(json) = &body.content else {
return Err("Expected JSON body content".into());
};
// reveal tax year
let tax_year = json
.get("tax_year")
.ok_or("tax_year field not found in JSON")?;
let start_pos = tax_year
.span()
.indices()
.min()
.ok_or("Could not find tax_year start position")?
- 11;
let end_pos = tax_year
.span()
.indices()
.max()
.ok_or("Could not find tax_year end position")?
+ 1;
builder.reveal_recv(&(start_pos..end_pos))?;
// commit to hash of date of birth
let dob = json
.get("taxpayer.date_of_birth")
.ok_or("taxpayer.date_of_birth field not found in JSON")?;
transcript_commitment_builder.commit_recv(dob.span())?;
Ok(())
}
// extract secret from prover output
fn received_secrets(transcript_secrets: &[TranscriptSecret]) -> Vec<&PlaintextHashSecret> {
transcript_secrets
.iter()
.filter_map(|secret| match secret {
TranscriptSecret::Hash(hash) if hash.direction == Direction::Received => Some(hash),
_ => None,
})
.collect()
}
#[derive(Debug)]
pub struct ZKProofInput {
dob: Vec<u8>,
proof_date: NaiveDate,
blinder: Vec<u8>,
committed_hash: Vec<u8>,
}
// Verify that the blinded, committed hash is correct
fn prepare_zk_proof_input(
received: &[u8],
received_commitment: &PlaintextHash,
received_secret: &PlaintextHashSecret,
) -> Result<ZKProofInput, Box<dyn std::error::Error>> {
assert_eq!(received_commitment.direction, Direction::Received);
assert_eq!(received_commitment.hash.alg, HashAlgId::SHA256);
let hash = &received_commitment.hash;
let dob_start = received_commitment
.idx
.min()
.ok_or("No start index for DOB")?;
let dob_end = received_commitment
.idx
.end()
.ok_or("No end index for DOB")?;
let dob = received[dob_start..dob_end].to_vec();
let blinder = received_secret.blinder.as_bytes().to_vec();
let committed_hash = hash.value.as_bytes().to_vec();
let proof_date = Local::now().date_naive();
assert_eq!(received_secret.direction, Direction::Received);
assert_eq!(received_secret.alg, HashAlgId::SHA256);
let mut hasher = Sha256::new();
hasher.update(&dob);
hasher.update(&blinder);
let computed_hash = hasher.finalize();
if committed_hash != computed_hash.as_slice() {
return Err("Computed hash does not match committed hash".into());
}
Ok(ZKProofInput {
dob,
proof_date,
committed_hash,
blinder,
})
}
fn generate_zk_proof(
proof_input: &ZKProofInput,
) -> Result<ZKProofBundle, Box<dyn std::error::Error>> {
tracing::info!("🔒 Generating ZK proof with Noir...");
const PROGRAM_JSON: &str = include_str!("./noir/target/noir.json");
// 1. Load bytecode from program.json
let json: Value = serde_json::from_str(PROGRAM_JSON)?;
let bytecode = json["bytecode"]
.as_str()
.ok_or("bytecode field not found in program.json")?;
let mut inputs: Vec<String> = vec![];
inputs.push(proof_input.proof_date.day().to_string());
inputs.push(proof_input.proof_date.month().to_string());
inputs.push(proof_input.proof_date.year().to_string());
inputs.extend(proof_input.committed_hash.iter().map(|b| b.to_string()));
inputs.extend(proof_input.dob.iter().map(|b| b.to_string()));
inputs.extend(proof_input.blinder.iter().map(|b| b.to_string()));
let proof_date = proof_input.proof_date.to_string();
tracing::info!(
"Public inputs : Proof date ({}) and committed hash ({})",
proof_date,
hex::encode(&proof_input.committed_hash)
);
tracing::info!(
"Private inputs: Blinder ({}) and Date of Birth ({})",
hex::encode(&proof_input.blinder),
String::from_utf8_lossy(&proof_input.dob)
);
tracing::debug!("Witness inputs {:?}", inputs);
let input_refs: Vec<&str> = inputs.iter().map(String::as_str).collect();
let witness = from_vec_str_to_witness_map(input_refs)?;
// Setup SRS
setup_srs_from_bytecode(bytecode, None, false)?;
// Verification key
let vk = get_ultra_honk_verification_key(bytecode, false)?;
// Generate proof
let proof = prove_ultra_honk(bytecode, witness.clone(), vk.clone(), false)?;
tracing::info!("✅ Proof generated ({} bytes)", proof.len());
let proof_bundle = ZKProofBundle { vk, proof };
Ok(proof_bundle)
}

View File

@@ -0,0 +1,21 @@
use serde::{Deserialize, Serialize};
use tlsn::transcript::{hash::PlaintextHash, Direction, TranscriptCommitment};
#[derive(Serialize, Deserialize, Debug)]
pub struct ZKProofBundle {
pub vk: Vec<u8>,
pub proof: Vec<u8>,
}
// extract commitment from prover output
pub fn received_commitments(
transcript_commitments: &[TranscriptCommitment],
) -> Vec<&PlaintextHash> {
transcript_commitments
.iter()
.filter_map(|commitment| match commitment {
TranscriptCommitment::Hash(hash) if hash.direction == Direction::Received => Some(hash),
_ => None,
})
.collect()
}

View File

@@ -0,0 +1,184 @@
use crate::types::received_commitments;
use super::types::ZKProofBundle;
use chrono::{Local, NaiveDate};
use noir::barretenberg::verify::{get_ultra_honk_verification_key, verify_ultra_honk};
use serde_json::Value;
use tls_server_fixture::CA_CERT_DER;
use tlsn::{
config::{CertificateDer, ProtocolConfigValidator, RootCertStore},
connection::ServerName,
hash::HashAlgId,
transcript::{Direction, PartialTranscript},
verifier::{Verifier, VerifierConfig, VerifierOutput, VerifyConfig},
};
use tlsn_examples::{MAX_RECV_DATA, MAX_SENT_DATA};
use tlsn_server_fixture_certs::SERVER_DOMAIN;
use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite};
use tokio_util::compat::TokioAsyncReadCompatExt;
use tracing::instrument;
#[instrument(skip(socket, extra_socket))]
pub async fn verifier<T: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>(
socket: T,
mut extra_socket: T,
) -> Result<PartialTranscript, Box<dyn std::error::Error>> {
// Set up Verifier.
let config_validator = ProtocolConfigValidator::builder()
.max_sent_data(MAX_SENT_DATA)
.max_recv_data(MAX_RECV_DATA)
.build()?;
// Create a root certificate store with the server-fixture's self-signed
// certificate. This is only required for offline testing with the
// server-fixture.
let verifier_config = VerifierConfig::builder()
.root_store(RootCertStore {
roots: vec![CertificateDer(CA_CERT_DER.to_vec())],
})
.protocol_config_validator(config_validator)
.build()?;
let verifier = Verifier::new(verifier_config);
// Receive authenticated data.
let VerifierOutput {
server_name,
transcript,
transcript_commitments,
..
} = verifier
.verify(socket.compat(), &VerifyConfig::default())
.await?;
let server_name = server_name.ok_or("Prover should have revealed server name")?;
let transcript = transcript.ok_or("Prover should have revealed transcript data")?;
// Create hash commitment for the date of birth field from the response
let sent = transcript.sent_unsafe().to_vec();
let sent_data = String::from_utf8(sent.clone())
.map_err(|e| format!("Verifier expected valid UTF-8 sent data: {}", e))?;
if !sent_data.contains(SERVER_DOMAIN) {
return Err(format!(
"Verification failed: Expected host {} not found in sent data",
SERVER_DOMAIN
)
.into());
}
// Check received data.
let received_commitments = received_commitments(&transcript_commitments);
let received_commitment = received_commitments
.first()
.ok_or("Missing received hash commitment")?;
assert!(received_commitment.direction == Direction::Received);
assert!(received_commitment.hash.alg == HashAlgId::SHA256);
let committed_hash = &received_commitment.hash;
// Check Session info: server name.
let ServerName::Dns(server_name) = server_name;
if server_name.as_str() != SERVER_DOMAIN {
return Err(format!(
"Server name mismatch: expected {}, got {}",
SERVER_DOMAIN,
server_name.as_str()
)
.into());
}
// Receive ZKProof information from prover
let mut buf = Vec::new();
extra_socket.read_to_end(&mut buf).await?;
if buf.is_empty() {
return Err("No ZK proof data received from prover".into());
}
let msg: ZKProofBundle = bincode::deserialize(&buf)
.map_err(|e| format!("Failed to deserialize ZK proof bundle: {}", e))?;
// Verify zk proof
const PROGRAM_JSON: &str = include_str!("./noir/target/noir.json");
let json: Value = serde_json::from_str(PROGRAM_JSON)
.map_err(|e| format!("Failed to parse Noir circuit: {}", e))?;
let bytecode = json["bytecode"]
.as_str()
.ok_or("Bytecode field missing in noir.json")?;
let vk = get_ultra_honk_verification_key(bytecode, false)
.map_err(|e| format!("Failed to get verification key: {}", e))?;
if vk != msg.vk {
return Err("Verification key mismatch between computed and provided by prover".into());
}
let proof = msg.proof.clone();
// Validate proof has enough data.
// The proof should start with the public inputs:
// * We expect at least 3 * 32 bytes for the three date fields (day, month,
// year)
// * and 32*32 bytes for the hash
let min_bytes = (32 + 3) * 32;
if proof.len() < min_bytes {
return Err(format!(
"Proof too short: expected at least {} bytes, got {}",
min_bytes,
proof.len()
)
.into());
}
// Check that the proof date is correctly included in the proof
let proof_date_day: u32 = u32::from_be_bytes(proof[28..32].try_into()?);
let proof_date_month: u32 = u32::from_be_bytes(proof[60..64].try_into()?);
let proof_date_year: i32 = i32::from_be_bytes(proof[92..96].try_into()?);
let proof_date_from_proof =
NaiveDate::from_ymd_opt(proof_date_year, proof_date_month, proof_date_day)
.ok_or("Invalid proof date in proof")?;
let today = Local::now().date_naive();
if (today - proof_date_from_proof).num_days() < 0 {
return Err(format!(
"The proof date can only be today or in the past: provided {}, today {}",
proof_date_from_proof, today
)
.into());
}
// Check that the committed hash in the proof matches the hash from the
// commitment
let committed_hash_in_proof: Vec<u8> = proof
.chunks(32)
.skip(3) // skip the first 3 chunks
.take(32)
.map(|chunk| *chunk.last().unwrap_or(&0))
.collect();
let expected_hash = committed_hash.value.as_bytes().to_vec();
if committed_hash_in_proof != expected_hash {
tracing::error!(
"❌ The hash in the proof does not match the committed hash in MPC-TLS: {} != {}",
hex::encode(&committed_hash_in_proof),
hex::encode(&expected_hash)
);
return Err("Hash in proof does not match committed hash in MPC-TLS".into());
}
tracing::info!(
"✅ The hash in the proof matches the committed hash in MPC-TLS ({})",
hex::encode(&expected_hash)
);
// Finally verify the proof
let is_valid = verify_ultra_honk(msg.proof, msg.vk)
.map_err(|e| format!("ZKProof Verification failed: {}", e))?;
if !is_valid {
tracing::error!("❌ Age verification ZKProof failed to verify");
return Err("Age verification ZKProof failed to verify".into());
}
tracing::info!("✅ Age verification ZKProof successfully verified");
Ok(transcript)
}

View File

@@ -1,6 +1,6 @@
[package]
name = "tlsn-formats"
version = "0.1.0-alpha.13-pre"
version = "0.1.0-alpha.13"
edition = "2021"
[lints]

View File

@@ -3,7 +3,19 @@
# Ensure the script runs in the folder that contains this script
cd "$(dirname "$0")"
cargo build --release --package tlsn-harness-runner --package tlsn-harness-executor --package tlsn-server-fixture --package tlsn-harness-plot
RUNNER_FEATURES=""
EXECUTOR_FEATURES=""
if [ "$1" = "debug" ]; then
RUNNER_FEATURES="--features debug"
EXECUTOR_FEATURES="--no-default-features --features debug"
fi
cargo build --release \
--package tlsn-harness-runner $RUNNER_FEATURES \
--package tlsn-harness-executor $EXECUTOR_FEATURES \
--package tlsn-server-fixture \
--package tlsn-harness-plot
mkdir -p bin

View File

@@ -26,7 +26,7 @@ pub enum Id {
One,
}
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq)]
pub enum IoMode {
Client,
Server,

View File

@@ -7,12 +7,12 @@ docker build --pull -t tlsn-bench . -f ./crates/harness/harness.Dockerfile
Next run the benches with:
```
docker run -it --privileged -v ./crates/harness/:/benches tlsn-bench bash -c "runner setup; runner bench"
docker run -it --privileged -v $(pwd)/crates/harness/:/benches tlsn-bench bash -c "runner setup; runner bench"
```
The `--privileged` parameter is required because this test bench needs permission to create networks with certain parameters
To run the benches in a browser run:
```
docker run -it --privileged -v ./crates/harness/:/benches tlsn-bench bash -c "cd /; runner setup; runner --target browser bench"
docker run -it --privileged -v $(pwd)/crates/harness/:/benches tlsn-bench bash -c "runner setup; runner --target browser bench"
```

View File

@@ -1,10 +1,14 @@
[target.wasm32-unknown-unknown]
rustflags = [
"-C",
"target-feature=+atomics,+bulk-memory,+mutable-globals,+simd128",
"-C",
"-Ctarget-feature=+atomics,+bulk-memory,+mutable-globals,+simd128",
"-Clink-arg=--shared-memory",
# 4GB
"link-arg=--max-memory=4294967296",
"-Clink-arg=--max-memory=4294967296",
"-Clink-arg=--import-memory",
"-Clink-arg=--export=__wasm_init_tls",
"-Clink-arg=--export=__tls_size",
"-Clink-arg=--export=__tls_align",
"-Clink-arg=--export=__tls_base",
"--cfg",
'getrandom_backend="wasm_js"',
]

View File

@@ -4,18 +4,19 @@ version = "0.1.0"
edition = "2024"
publish = false
[features]
# Disable tracing events as a workaround for issue 959.
default = ["tracing/release_max_level_off"]
# Used to debug the executor itself.
debug = []
[lib]
name = "harness_executor"
crate-type = ["cdylib", "rlib"]
[package.metadata.wasm-pack.profile.custom]
wasm-opt = ["-O3"]
[dependencies]
tlsn-harness-core = { workspace = true }
tlsn = { workspace = true }
tlsn-core = { workspace = true }
tlsn-tls-core = { workspace = true }
tlsn-server-fixture-certs = { workspace = true }
inventory = { workspace = true }
@@ -33,6 +34,7 @@ tokio = { workspace = true, features = ["full"] }
tokio-util = { workspace = true, features = ["compat"] }
[target.'cfg(target_arch = "wasm32")'.dependencies]
tracing = { workspace = true }
wasm-bindgen = { workspace = true }
tlsn-wasm = { workspace = true }
js-sys = { workspace = true }

View File

@@ -5,7 +5,8 @@ use futures::{AsyncReadExt, AsyncWriteExt, TryFutureExt};
use harness_core::bench::{Bench, ProverMetrics};
use tlsn::{
config::ProtocolConfig,
config::{CertificateDer, ProtocolConfig, RootCertStore},
connection::ServerName,
prover::{ProveConfig, Prover, ProverConfig, TlsConfig},
};
use tlsn_server_fixture_certs::{CA_CERT_DER, SERVER_DOMAIN};
@@ -32,20 +33,17 @@ pub async fn bench_prover(provider: &IoProvider, config: &Bench) -> Result<Prove
let protocol_config = builder.build()?;
let mut root_store = tls_core::anchors::RootCertStore::empty();
root_store
.add(&tls_core::key::Certificate(CA_CERT_DER.to_vec()))
.unwrap();
let mut tls_config_builder = TlsConfig::builder();
tls_config_builder.root_store(root_store);
tls_config_builder.root_store(RootCertStore {
roots: vec![CertificateDer(CA_CERT_DER.to_vec())],
});
let tls_config = tls_config_builder.build()?;
let prover = Prover::new(
ProverConfig::builder()
.tls_config(tls_config)
.protocol_config(protocol_config)
.server_name(SERVER_DOMAIN)
.server_name(ServerName::Dns(SERVER_DOMAIN.try_into().unwrap()))
.build()?,
);

View File

@@ -2,7 +2,7 @@ use anyhow::Result;
use harness_core::bench::Bench;
use tlsn::{
config::ProtocolConfigValidator,
config::{CertificateDer, ProtocolConfigValidator, RootCertStore},
verifier::{Verifier, VerifierConfig, VerifyConfig},
};
use tlsn_server_fixture_certs::CA_CERT_DER;
@@ -17,14 +17,11 @@ pub async fn bench_verifier(provider: &IoProvider, config: &Bench) -> Result<()>
let protocol_config = builder.build()?;
let mut root_store = tls_core::anchors::RootCertStore::empty();
root_store
.add(&tls_core::key::Certificate(CA_CERT_DER.to_vec()))
.unwrap();
let verifier = Verifier::new(
VerifierConfig::builder()
.root_store(root_store)
.root_store(RootCertStore {
roots: vec![CertificateDer(CA_CERT_DER.to_vec())],
})
.protocol_config_validator(protocol_config)
.build()?,
);

View File

@@ -81,7 +81,11 @@ mod native {
mod wasm {
use super::IoProvider;
use crate::io::Io;
use anyhow::Result;
use anyhow::{Result, anyhow};
use std::time::Duration;
const CHECK_WS_OPEN_DELAY_MS: usize = 50;
const MAX_RETRIES: usize = 50;
impl IoProvider {
/// Provides a connection to the server.
@@ -107,7 +111,27 @@ mod wasm {
&self.config.proto_1.0,
self.config.proto_1.1,
);
let (_, io) = ws_stream_wasm::WsMeta::connect(url, None).await?;
let mut retries = 0;
let io = loop {
// Connect to the websocket relay.
let (_, io) = ws_stream_wasm::WsMeta::connect(url.clone(), None).await?;
// Allow some time for the relay to initiate a connection to
// the verifier.
std::thread::sleep(Duration::from_millis(CHECK_WS_OPEN_DELAY_MS as u64));
// If the relay didn't close the io, most likely the verifier
// accepted the connection.
if io.ready_state() == ws_stream_wasm::WsState::Open {
break io;
}
retries += 1;
if retries > MAX_RETRIES {
return Err(anyhow!("verifier did not accept connection"));
}
};
Ok(io.into_io())
}

View File

@@ -1,6 +1,6 @@
use tls_core::anchors::RootCertStore;
use tlsn::{
config::{ProtocolConfig, ProtocolConfigValidator},
config::{CertificateDer, ProtocolConfig, ProtocolConfigValidator, RootCertStore},
connection::ServerName,
hash::HashAlgId,
prover::{ProveConfig, Prover, ProverConfig, TlsConfig},
transcript::{TranscriptCommitConfig, TranscriptCommitment, TranscriptCommitmentKind},
@@ -21,19 +21,17 @@ const MAX_RECV_DATA: usize = 1 << 11;
crate::test!("basic", prover, verifier);
async fn prover(provider: &IoProvider) {
let mut root_store = RootCertStore::empty();
root_store
.add(&tls_core::key::Certificate(CA_CERT_DER.to_vec()))
.unwrap();
let mut tls_config_builder = TlsConfig::builder();
tls_config_builder.root_store(root_store);
tls_config_builder.root_store(RootCertStore {
roots: vec![CertificateDer(CA_CERT_DER.to_vec())],
});
let tls_config = tls_config_builder.build().unwrap();
let server_name = ServerName::Dns(SERVER_DOMAIN.try_into().unwrap());
let prover = Prover::new(
ProverConfig::builder()
.server_name(SERVER_DOMAIN)
.server_name(server_name)
.tls_config(tls_config)
.protocol_config(
ProtocolConfig::builder()
@@ -114,11 +112,6 @@ async fn prover(provider: &IoProvider) {
}
async fn verifier(provider: &IoProvider) {
let mut root_store = RootCertStore::empty();
root_store
.add(&tls_core::key::Certificate(CA_CERT_DER.to_vec()))
.unwrap();
let config = VerifierConfig::builder()
.protocol_config_validator(
ProtocolConfigValidator::builder()
@@ -127,7 +120,9 @@ async fn verifier(provider: &IoProvider) {
.build()
.unwrap(),
)
.root_store(root_store)
.root_store(RootCertStore {
roots: vec![CertificateDer(CA_CERT_DER.to_vec())],
})
.build()
.unwrap();
@@ -145,7 +140,9 @@ async fn verifier(provider: &IoProvider) {
.await
.unwrap();
assert_eq!(server_name.unwrap().as_str(), SERVER_DOMAIN);
let ServerName::Dns(server_name) = server_name.unwrap();
assert_eq!(server_name.as_str(), SERVER_DOMAIN);
assert!(
transcript_commitments
.iter()

View File

@@ -1,6 +1,8 @@
FROM rust AS builder
WORKDIR /usr/src/tlsn
ARG DEBUG=0
RUN \
rustup update; \
apt update && apt install -y clang; \
@@ -10,7 +12,12 @@ RUN \
COPY . .
RUN \
cd crates/harness; \
./build.sh;
# Pass `--build-arg DEBUG=1` to `docker build` if you need to debug the harness.
if [ "$DEBUG" = "1" ]; then \
./build.sh debug; \
else \
./build.sh; \
fi
FROM debian:latest

View File

@@ -7,6 +7,10 @@ publish = false
[lib]
name = "harness_runner"
[features]
# Used to debug the runner itself.
debug = []
[dependencies]
tlsn-harness-core = { workspace = true }
tlsn-server-fixture = { workspace = true }

View File

@@ -0,0 +1,17 @@
#![allow(unused_imports)]
pub use futures::FutureExt;
pub use tracing::{debug, error};
pub use chromiumoxide::{
Browser, Page,
cdp::{
browser_protocol::{
log::{EventEntryAdded, LogEntryLevel},
network::{EnableParams, SetCacheDisabledParams},
page::ReloadParams,
},
js_protocol::runtime::EventExceptionThrown,
},
handler::HandlerConfig,
};

View File

@@ -8,6 +8,7 @@ use chromiumoxide::{
network::{EnableParams, SetCacheDisabledParams},
page::ReloadParams,
},
handler::HandlerConfig,
};
use futures::StreamExt;
use harness_core::{
@@ -20,6 +21,9 @@ use harness_core::{
use crate::{Target, network::Namespace, rpc::Rpc};
#[cfg(feature = "debug")]
use crate::debug_prelude::*;
pub struct Executor {
ns: Namespace,
config: ExecutorConfig,
@@ -65,20 +69,34 @@ impl Executor {
Id::One => self.config.network().rpc_1,
};
let process = duct::cmd!(
"sudo",
"ip",
"netns",
"exec",
self.ns.name(),
"env",
let mut args = vec![
"ip".into(),
"netns".into(),
"exec".into(),
self.ns.name().into(),
"env".into(),
format!("CONFIG={}", serde_json::to_string(&self.config)?),
executor_path
)
.stdout_capture()
.stderr_capture()
.unchecked()
.start()?;
];
if cfg!(feature = "debug") {
let level = &std::env::var("RUST_LOG").unwrap_or("debug".to_string());
args.push("env".into());
args.push(format!("RUST_LOG={}", level));
};
args.push(executor_path.to_str().expect("valid path").into());
let process = duct::cmd("sudo", args);
let process = if !cfg!(feature = "debug") {
process
.stdout_capture()
.stderr_capture()
.unchecked()
.start()?
} else {
process.unchecked().start()?
};
let rpc = Rpc::new_native(rpc_addr).await?;
@@ -118,16 +136,29 @@ impl Executor {
"--no-sandbox",
format!("--user-data-dir={tmp}"),
format!("--allowed-ips=10.250.0.1"),
)
.stderr_capture()
.stdout_capture()
.start()?;
);
let process = if !cfg!(feature = "debug") {
process.stderr_capture().stdout_capture().start()?
} else {
process.start()?
};
const TIMEOUT: usize = 10000;
const DELAY: usize = 100;
let mut retries = 0;
let config = HandlerConfig {
// Bump the timeout for long-running benches.
request_timeout: Duration::from_secs(120),
..Default::default()
};
let (browser, mut handler) = loop {
match Browser::connect(format!("http://{}:{}", rpc_addr.0, PORT_BROWSER)).await
match Browser::connect_with_config(
format!("http://{}:{}", rpc_addr.0, PORT_BROWSER),
config.clone(),
)
.await
{
Ok(browser) => break browser,
Err(e) => {
@@ -143,6 +174,14 @@ impl Executor {
tokio::spawn(async move {
while let Some(res) = handler.next().await {
if let Err(e) = res {
if e.to_string()
== "data did not match any variant of untagged enum Message"
{
// Do not log this error. It appears to be
// caused by a bug upstream.
// https://github.com/mattsse/chromiumoxide/issues/167
continue;
}
eprintln!("chromium error: {e:?}");
}
}
@@ -152,6 +191,38 @@ impl Executor {
.new_page(&format!("http://{wasm_addr}:{wasm_port}/index.html"))
.await?;
#[cfg(feature = "debug")]
tokio::spawn(register_listeners(page.clone()).await?);
#[cfg(feature = "debug")]
async fn register_listeners(page: Page) -> Result<impl Future<Output = ()>> {
let mut logs = page.event_listener::<EventEntryAdded>().await?.fuse();
let mut exceptions =
page.event_listener::<EventExceptionThrown>().await?.fuse();
Ok(futures::future::join(
async move {
while let Some(event) = logs.next().await {
let entry = &event.entry;
match entry.level {
LogEntryLevel::Error => {
error!("{:?}", entry);
}
_ => {
debug!("{:?}: {}", entry.timestamp, entry.text);
}
}
}
},
async move {
while let Some(event) = exceptions.next().await {
error!("{:?}", event);
}
},
)
.map(|_| ()))
}
page.execute(EnableParams::builder().build()).await?;
page.execute(SetCacheDisabledParams {
cache_disabled: true,

View File

@@ -6,6 +6,9 @@ mod server_fixture;
pub mod wasm_server;
mod ws_proxy;
#[cfg(feature = "debug")]
mod debug_prelude;
use std::time::Duration;
use anyhow::Result;
@@ -24,6 +27,9 @@ use cli::{Cli, Command};
use executor::Executor;
use server_fixture::ServerFixture;
#[cfg(feature = "debug")]
use crate::debug_prelude::*;
use crate::{cli::Route, network::Network, wasm_server::WasmServer, ws_proxy::WsProxy};
#[derive(Debug, Clone, Copy, PartialEq, Eq, clap::ValueEnum)]
@@ -113,6 +119,9 @@ impl Runner {
}
pub async fn main() -> Result<()> {
#[cfg(feature = "debug")]
tracing_subscriber::fmt::init();
let cli = Cli::parse();
let mut runner = Runner::new(&cli)?;
@@ -227,6 +236,9 @@ pub async fn main() -> Result<()> {
// Wait for the network to stabilize
tokio::time::sleep(Duration::from_millis(100)).await;
#[cfg(feature = "debug")]
debug!("Starting bench in group {:?}", config.group);
let (output, _) = tokio::try_join!(
runner.exec_p.bench(BenchCmd {
config: config.clone(),

View File

@@ -5,7 +5,7 @@ description = "TLSNotary MPC-TLS protocol"
keywords = ["tls", "mpc", "2pc"]
categories = ["cryptography"]
license = "MIT OR Apache-2.0"
version = "0.1.0-alpha.13-pre"
version = "0.1.0-alpha.13"
edition = "2021"
[lints]
@@ -72,3 +72,5 @@ tokio = { workspace = true, features = ["macros", "rt", "rt-multi-thread"] }
tokio-util = { workspace = true, features = ["compat"] }
tracing-subscriber = { workspace = true }
uid-mux = { workspace = true, features = ["serio", "test-utils"] }
rustls-pki-types = { workspace = true }
rustls-webpki = { workspace = true }

View File

@@ -22,7 +22,7 @@ use serio::stream::IoStreamExt;
use std::mem;
use tls_core::msgs::enums::NamedGroup;
use tlsn_core::{
connection::{HandshakeData, HandshakeDataV1_2, TlsVersion, VerifyData},
connection::{CertBinding, CertBindingV1_2, TlsVersion, VerifyData},
transcript::TlsTranscript,
};
use tracing::{debug, instrument};
@@ -405,7 +405,7 @@ impl MpcTlsFollower {
let cf_vd = cf_vd.ok_or(MpcTlsError::hs("client finished VD not computed"))?;
let sf_vd = sf_vd.ok_or(MpcTlsError::hs("server finished VD not computed"))?;
let handshake_data = HandshakeData::V1_2(HandshakeDataV1_2 {
let handshake_data = CertBinding::V1_2(CertBindingV1_2 {
client_random,
server_random,
server_ephemeral_key: server_key

View File

@@ -41,12 +41,12 @@ use tls_core::{
message::{OpaqueMessage, PlainMessage},
},
suites::SupportedCipherSuite,
verify::verify_sig_determine_alg,
};
use tlsn_core::{
connection::{
Certificate, HandshakeData, HandshakeDataV1_2, ServerSignature, TlsVersion, VerifyData,
},
connection::{CertBinding, CertBindingV1_2, ServerSignature, TlsVersion, VerifyData},
transcript::TlsTranscript,
webpki::CertificateDer,
};
use tracing::{debug, instrument, trace, warn};
@@ -325,19 +325,27 @@ impl MpcTlsLeader {
let server_cert_chain = server_cert_details
.cert_chain()
.iter()
.map(|cert| Certificate(cert.0.clone()))
.map(|cert| CertificateDer(cert.0.clone()))
.collect();
let mut sig_msg = Vec::new();
sig_msg.extend_from_slice(&client_random.0);
sig_msg.extend_from_slice(&server_random.0);
sig_msg.extend_from_slice(server_kx_details.kx_params());
let server_signature_alg = verify_sig_determine_alg(
&server_cert_details.cert_chain()[0],
&sig_msg,
server_kx_details.kx_sig(),
)
.expect("only supported signature should have been accepted");
let server_signature = ServerSignature {
scheme: server_kx_details
.kx_sig()
.scheme
.try_into()
.expect("only supported signature scheme should have been accepted"),
alg: server_signature_alg.into(),
sig: server_kx_details.kx_sig().sig.0.clone(),
};
let handshake_data = HandshakeData::V1_2(HandshakeDataV1_2 {
let handshake_data = CertBinding::V1_2(CertBindingV1_2 {
client_random: client_random.0,
server_random: server_random.0,
server_ephemeral_key: server_key

View File

@@ -72,4 +72,5 @@ pub(crate) struct ServerFinishedVd {
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[allow(dead_code)]
pub(crate) struct CloseConnection;

View File

@@ -193,7 +193,7 @@ where
};
// Divide by block length and round up.
let block_count = input.len() / 16 + (input.len() % 16 != 0) as usize;
let block_count = input.len() / 16 + !input.len().is_multiple_of(16) as usize;
if block_count > MAX_POWER {
return Err(ErrorRepr::InputLength {
@@ -282,11 +282,11 @@ fn build_ghash_data(mut aad: Vec<u8>, mut ciphertext: Vec<u8>) -> Vec<u8> {
let len_block = ((associated_data_bitlen as u128) << 64) + (text_bitlen as u128);
// Pad data to be a multiple of 16 bytes.
let aad_padded_block_count = (aad.len() / 16) + (aad.len() % 16 != 0) as usize;
let aad_padded_block_count = (aad.len() / 16) + !aad.len().is_multiple_of(16) as usize;
aad.resize(aad_padded_block_count * 16, 0);
let ciphertext_padded_block_count =
(ciphertext.len() / 16) + (ciphertext.len() % 16 != 0) as usize;
(ciphertext.len() / 16) + !ciphertext.len().is_multiple_of(16) as usize;
ciphertext.resize(ciphertext_padded_block_count * 16, 0);
let mut data: Vec<u8> = Vec::with_capacity(aad.len() + ciphertext.len() + 16);

View File

@@ -12,11 +12,15 @@ use mpz_ot::{
rcot::shared::{SharedRCOTReceiver, SharedRCOTSender},
};
use rand::{rngs::StdRng, Rng, SeedableRng};
use tls_client::Certificate;
use rustls_pki_types::CertificateDer;
use tls_client::RootCertStore;
use tls_client_async::bind_client;
use tls_server_fixture::{bind_test_server_hyper, CA_CERT_DER, SERVER_DOMAIN};
use tokio::sync::Mutex;
use tokio_util::compat::TokioAsyncReadCompatExt;
use webpki::anchor_from_trusted_cert;
const CA_CERT: CertificateDer = CertificateDer::from_slice(CA_CERT_DER);
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
#[ignore = "expensive"]
@@ -48,11 +52,11 @@ async fn leader_task(mut leader: MpcTlsLeader) {
let (leader_ctrl, leader_fut) = leader.run();
tokio::spawn(async { leader_fut.await.unwrap() });
let mut root_store = tls_client::RootCertStore::empty();
root_store.add(&Certificate(CA_CERT_DER.to_vec())).unwrap();
let config = tls_client::ClientConfig::builder()
.with_safe_defaults()
.with_root_certificates(root_store)
.with_root_certificates(RootCertStore {
roots: vec![anchor_from_trusted_cert(&CA_CERT).unwrap().to_owned()],
})
.with_no_client_auth();
let server_name = SERVER_DOMAIN.try_into().unwrap();

View File

@@ -6,7 +6,7 @@ pub static SERVER_CERT_DER: &[u8] = include_bytes!("tls/test_server_cert.der");
pub static SERVER_KEY_DER: &[u8] = include_bytes!("tls/test_server_private_key.der");
/// The domain name bound to the server certificate.
pub static SERVER_DOMAIN: &str = "test-server.io";
/// A client certificate fixture PEM-encoded.
pub static CLIENT_CERT: &[u8] = include_bytes!("tls/client_cert.pem");
/// A client private key fixture PEM-encoded.
pub static CLIENT_KEY: &[u8] = include_bytes!("tls/client_cert.key");
/// A client certificate fixture.
pub static CLIENT_CERT_DER: &[u8] = include_bytes!("tls/client_cert.der");
/// A client private key fixture.
pub static CLIENT_KEY_DER: &[u8] = include_bytes!("tls/client_cert_private_key.der");

View File

@@ -33,5 +33,8 @@ openssl req -new -key client_cert.key -out client_cert.csr -subj "/C=US/ST=State
# Sign the CSR with the root CA to create the end entity certificate (100 years validity)
openssl x509 -req -in client_cert.csr -CA root_ca.crt -CAkey root_ca.key -CAcreateserial -out client_cert.crt -days 36525 -sha256 -extfile openssl.cnf -extensions v3_req
# Convert the end entity certificate to PEM format
openssl x509 -in client_cert.crt -outform pem -out client_cert.pem
# Convert the end entity certificate to DER format
openssl x509 -in client_cert.crt -outform der -out client_cert.der
# Convert the end entity certificate private key to DER format
openssl pkcs8 -topk8 -inform PEM -outform DER -in client_cert.key -out client_cert_private_key.der -nocrypt

Binary file not shown.

View File

@@ -1,23 +0,0 @@
-----BEGIN CERTIFICATE-----
MIID2jCCAsKgAwIBAgIUG5JKIz/fbUDdpX1+TAw33mS+mWwwDQYJKoZIhvcNAQEL
BQAwZTELMAkGA1UEBhMCVVMxDjAMBgNVBAgMBVN0YXRlMQ0wCwYDVQQHDARDaXR5
MRIwEAYDVQQKDAl0bHNub3RhcnkxCzAJBgNVBAsMAklUMRYwFAYDVQQDDA10bHNu
b3Rhcnkub3JnMCAXDTI1MDYxMDA3MTYxOVoYDzIxMjUwNjExMDcxNjE5WjBwMQsw
CQYDVQQGEwJVUzEOMAwGA1UECAwFU3RhdGUxDTALBgNVBAcMBENpdHkxEjAQBgNV
BAoMCXRsc25vdGFyeTELMAkGA1UECwwCSVQxITAfBgNVBAMMGGNsaWVudC1hdXRo
ZW50aWNhdGlvbi5pbzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANsx
Tf3JqWdAMGFzOwbO64vJ5fV/IPSrdBwKY/Fjef0REZC1Z/gGzmp0nnlaHZzZLtLS
Z9kyfdUrL6PuG3HfP6wxhiaBpUay+1O9KZsuhkKSif4KMPjlYKm+oZLvD12Qj62r
TFlui4+1wKgPrTGUUO6SQdoRxKU4nzuzRYRLyzDi0pO5YD9RLaruBj+IDEOVRW7d
1uleheVMg61lbQle5Fo0c4I0Sif96Z+7aotj3j9F2lK52jaLpA1kvC3oLajfAT30
BzpNLZTnWa1b5PRRxkuOYUXeNr+aNO90fL80K1YeIlea0f7qmKL9uDLtQbrqIJv5
tBaf8Uf0UghtBm//kx8CAwEAAaN1MHMwCQYDVR0TBAIwADALBgNVHQ8EBAMCBeAw
GQYDVR0RBBIwEIIOdGVzdC1zZXJ2ZXIuaW8wHQYDVR0OBBYEFH1qCgl04Y5i75aF
cT0V3fn9423iMB8GA1UdIwQYMBaAFMmBciQ/DZlWROxwXH8IplmuHKbNMA0GCSqG
SIb3DQEBCwUAA4IBAQB8Gvj3dsENAn0u6PS9uTFm46MaA9Dm+Fa+KbXuEHp3ADs2
7m4Hb3eojM3yae93/v/stYn8IVcB5zWmMvg6WA6obe86muuB+SZeMC/AnSD8P4pm
AzO3eTSR1s5Dr4O0qVPd2VP36e7NWXfojQg4W9t9UQtC64bVOaCDQvbe0xeWT+AR
w0y7GwnuCr/8bisqQZS8+Er1JU3zxBEjQwMiMxlOWHnYtjGeA6pdWaeLp0E6Ss3x
ecsTjmrLt6oY+BdfRSyWU4qVEOpuZLCeikUWXFzpxRX7NWYRtJUfVnoRWwuD2lzG
LybzCW2qxwHJe4biGIfWKQ7Ne7DrwQwFxVRJxCm0
-----END CERTIFICATE-----

View File

@@ -0,0 +1,37 @@
{
"tax_year": 2024,
"taxpayer": {
"idnr": "12345678901",
"first_name": "Max",
"last_name": "Mustermann",
"date_of_birth": "1985-03-12",
"address": {
"street": "Musterstraße 1",
"postal_code": "10115",
"city": "Berlin"
}
},
"income": {
"employment_income": 54200.00,
"other_income": 1200.00,
"capital_gains": 350.00
},
"deductions": {
"pension_insurance": 4200.00,
"health_insurance": 3600.00,
"donations": 500.00,
"work_related_expenses": 1100.00
},
"assessment": {
"taxable_income": 49200.00,
"income_tax": 9156.00,
"solidarity_surcharge": 503.58,
"total_tax": 9659.58,
"prepaid_tax": 9500.00,
"refund": 159.58
},
"submission": {
"submitted_at": "2025-03-01T14:22:30Z",
"submitted_by": "ElsterOnline-Portal"
}
}

View File

@@ -47,6 +47,7 @@ fn app(state: AppState) -> Router {
.route("/formats/json", get(json))
.route("/formats/html", get(html))
.route("/protected", get(protected_route))
.route("/elster", get(elster_route))
.layer(TraceLayer::new_for_http())
.with_state(Arc::new(Mutex::new(state)))
}
@@ -196,6 +197,12 @@ async fn protected_route(_: AuthenticatedUser) -> Result<Json<Value>, StatusCode
get_json_value(include_str!("data/protected_data.json"))
}
async fn elster_route(_: AuthenticatedUser) -> Result<Json<Value>, StatusCode> {
info!("Handling /elster");
get_json_value(include_str!("data/elster.json"))
}
#[cfg(test)]
mod tests {
use super::*;

View File

@@ -5,7 +5,7 @@ description = "A TLS backend trait for TLSNotary"
keywords = ["tls", "mpc", "2pc"]
categories = ["cryptography"]
license = "MIT OR Apache-2.0"
version = "0.1.0-alpha.13-pre"
version = "0.1.0-alpha.13"
edition = "2021"
[lints]

View File

@@ -5,7 +5,7 @@ description = "An async TLS client for TLSNotary"
keywords = ["tls", "mpc", "2pc", "client", "async"]
categories = ["cryptography"]
license = "MIT OR Apache-2.0"
version = "0.1.0-alpha.13-pre"
version = "0.1.0-alpha.13"
edition = "2021"
[lints]
@@ -35,3 +35,5 @@ hyper = { workspace = true, features = ["client", "http1"] }
hyper-util = { workspace = true, features = ["full"] }
rstest = { workspace = true }
tokio = { workspace = true, features = ["rt", "rt-multi-thread", "macros"] }
rustls-webpki = { workspace = true }
rustls-pki-types = { workspace = true }

View File

@@ -6,7 +6,8 @@ use http_body_util::{BodyExt as _, Full};
use hyper::{body::Bytes, Request, StatusCode};
use hyper_util::rt::TokioIo;
use rstest::{fixture, rstest};
use tls_client::{Certificate, ClientConfig, ClientConnection, RustCryptoBackend, ServerName};
use rustls_pki_types::CertificateDer;
use tls_client::{ClientConfig, ClientConnection, RustCryptoBackend, ServerName};
use tls_client_async::{bind_client, ClosedConnection, ConnectionError, TlsConnection};
use tls_server_fixture::{
bind_test_server, bind_test_server_hyper, APP_RECORD_LENGTH, CA_CERT_DER, CLOSE_DELAY,
@@ -14,6 +15,9 @@ use tls_server_fixture::{
};
use tokio::task::JoinHandle;
use tokio_util::compat::{FuturesAsyncReadCompatExt, TokioAsyncReadCompatExt};
use webpki::anchor_from_trusted_cert;
const CA_CERT: CertificateDer = CertificateDer::from_slice(CA_CERT_DER);
// An established client TLS connection
struct TlsFixture {
@@ -30,7 +34,9 @@ async fn set_up_tls() -> TlsFixture {
let _server_task = tokio::spawn(bind_test_server(server_socket.compat()));
let mut root_store = tls_client::RootCertStore::empty();
root_store.add(&Certificate(CA_CERT_DER.to_vec())).unwrap();
root_store
.roots
.push(anchor_from_trusted_cert(&CA_CERT).unwrap().to_owned());
let config = ClientConfig::builder()
.with_safe_defaults()
.with_root_certificates(root_store)
@@ -75,7 +81,9 @@ async fn test_hyper_ok() {
let server_task = tokio::spawn(bind_test_server_hyper(server_socket.compat()));
let mut root_store = tls_client::RootCertStore::empty();
root_store.add(&Certificate(CA_CERT_DER.to_vec())).unwrap();
root_store
.roots
.push(anchor_from_trusted_cert(&CA_CERT).unwrap().to_owned());
let config = ClientConfig::builder()
.with_safe_defaults()
.with_root_certificates(root_store)

View File

@@ -5,7 +5,7 @@ description = "A TLS client for TLSNotary"
keywords = ["tls", "mpc", "2pc", "client", "sync"]
categories = ["cryptography"]
license = "Apache-2.0 OR ISC OR MIT"
version = "0.1.0-alpha.13-pre"
version = "0.1.0-alpha.13"
edition = "2021"
autobenches = false
@@ -23,7 +23,8 @@ async-trait = { workspace = true }
log = { workspace = true, optional = true }
ring = { workspace = true }
sct = { workspace = true }
webpki = { workspace = true, features = ["alloc", "std"] }
rustls-pki-types = { workspace = true }
rustls-webpki = { workspace = true }
aes-gcm = { workspace = true }
p256 = { workspace = true, features = ["ecdh"] }
rand = { workspace = true }

View File

@@ -7,7 +7,6 @@ use crate::{
conn::{CommonState, ConnectionRandoms, State},
error::Error,
hash_hs::HandshakeHashBuffer,
msgs::persist,
ticketer::TimeBase,
};
use tls_core::{
@@ -42,35 +41,6 @@ pub(super) type NextState = Box<dyn State<ClientConnectionData>>;
pub(super) type NextStateOrError = Result<NextState, Error>;
pub(super) type ClientContext<'a> = crate::conn::Context<'a>;
fn find_session(
server_name: &ServerName,
config: &ClientConfig,
) -> Option<persist::Retrieved<persist::ClientSessionValue>> {
let key = persist::ClientSessionKey::session_for_server_name(server_name);
let key_buf = key.get_encoding();
let value = config.session_storage.get(&key_buf).or_else(|| {
debug!("No cached session for {:?}", server_name);
None
})?;
#[allow(unused_mut)]
let mut reader = Reader::init(&value[2..]);
#[allow(clippy::bind_instead_of_map)] // https://github.com/rust-lang/rust-clippy/issues/8082
CipherSuite::read_bytes(&value[..2])
.and_then(|suite| {
persist::ClientSessionValue::read(&mut reader, suite, &config.cipher_suites)
})
.and_then(|resuming| {
let retrieved = persist::Retrieved::new(resuming, TimeBase::now().ok()?);
match retrieved.has_expired() {
false => Some(retrieved),
true => None,
}
})
.and_then(Some)
}
pub(super) async fn start_handshake(
server_name: ServerName,
extra_exts: Vec<ClientExtension>,
@@ -123,7 +93,6 @@ pub(super) async fn start_handshake(
emit_client_hello_for_retry(
config,
cx,
None,
random,
false,
transcript_buffer,
@@ -142,7 +111,6 @@ pub(super) async fn start_handshake(
struct ExpectServerHello {
config: Arc<ClientConfig>,
resuming_session: Option<persist::Retrieved<persist::ClientSessionValue>>,
server_name: ServerName,
random: Random,
using_ems: bool,
@@ -162,7 +130,6 @@ struct ExpectServerHelloOrHelloRetryRequest {
async fn emit_client_hello_for_retry(
config: Arc<ClientConfig>,
cx: &mut ClientContext<'_>,
resuming_session: Option<persist::Retrieved<persist::ClientSessionValue>>,
random: Random,
using_ems: bool,
mut transcript_buffer: HandshakeHashBuffer,
@@ -176,25 +143,6 @@ async fn emit_client_hello_for_retry(
may_send_sct_list: bool,
suite: Option<SupportedCipherSuite>,
) -> Result<NextState, Error> {
// For now we do not support session resumption
//
// Do we have a SessionID or ticket cached for this host?
// let (ticket, resume_version) = if let Some(resuming) = &resuming_session {
// match &resuming.value {
// persist::ClientSessionValue::Tls13(inner) => {
// (inner.ticket().to_vec(), ProtocolVersion::TLSv1_3)
// }
// #[cfg(feature = "tls12")]
// persist::ClientSessionValue::Tls12(inner) => {
// (inner.ticket().to_vec(), ProtocolVersion::TLSv1_2)
// }
// }
// } else {
// (Vec::new(), ProtocolVersion::Unknown(0))
// };
// let (ticket, resume_version) = (Vec::new(), ProtocolVersion::Unknown(0));
let support_tls12 = config.supports_version(ProtocolVersion::TLSv1_2);
let support_tls13 = config.supports_version(ProtocolVersion::TLSv1_3);
@@ -256,48 +204,6 @@ async fn emit_client_hello_for_retry(
// Extra extensions must be placed before the PSK extension
exts.extend(extra_exts.iter().cloned());
// let fill_in_binder = if support_tls13
// && config.enable_tickets
// && resume_version == ProtocolVersion::TLSv1_3
// && !ticket.is_empty()
// {
// let resuming =
// resuming_session
// .as_ref()
// .and_then(|resuming| match (suite, resuming.tls13()) {
// (Some(suite), Some(resuming)) => {
// suite.tls13()?.can_resume_from(resuming.suite())?;
// Some(resuming)
// }
// (None, Some(resuming)) => Some(resuming),
// _ => None,
// });
// if let Some(ref resuming) = resuming {
// tls13::prepare_resumption(
// &config,
// cx,
// ticket,
// &resuming,
// &mut exts,
// retryreq.is_some(),
// )
// .await;
// }
// resuming
// } else if config.enable_tickets {
// // If we have a ticket, include it. Otherwise, request one.
// if ticket.is_empty() {
// exts.push(ClientExtension::SessionTicket(ClientSessionTicket::Request));
// } else {
// exts.push(ClientExtension::SessionTicket(ClientSessionTicket::Offer(
// Payload::new(ticket),
// )));
// }
// None
// } else {
// None
// };
// Note what extensions we sent.
hello.sent_extensions = exts.iter().map(ClientExtension::get_type).collect();
@@ -319,8 +225,8 @@ async fn emit_client_hello_for_retry(
};
// let early_key_schedule = if let Some(resuming) = fill_in_binder {
// let schedule = tls13::fill_in_psk_binder(&resuming, &transcript_buffer, &mut chp);
// Some((resuming.suite(), schedule))
// let schedule = tls13::fill_in_psk_binder(&resuming, &transcript_buffer,
// &mut chp); Some((resuming.suite(), schedule))
// } else {
// None
// };
@@ -350,7 +256,6 @@ async fn emit_client_hello_for_retry(
let next = ExpectServerHello {
config,
resuming_session,
server_name,
random,
using_ems,
@@ -551,19 +456,10 @@ impl State<ClientConnectionData> for ExpectServerHello {
// handshake_traffic_secret.
match suite {
SupportedCipherSuite::Tls13(suite) => {
let resuming_session =
self.resuming_session
.and_then(|resuming| match resuming.value {
persist::ClientSessionValue::Tls13(inner) => Some(inner),
#[cfg(feature = "tls12")]
persist::ClientSessionValue::Tls12(_) => None,
});
tls13::handle_server_hello(
self.config,
cx,
server_hello,
resuming_session,
self.server_name,
randoms,
suite,
@@ -577,16 +473,8 @@ impl State<ClientConnectionData> for ExpectServerHello {
}
#[cfg(feature = "tls12")]
SupportedCipherSuite::Tls12(suite) => {
let resuming_session =
self.resuming_session
.and_then(|resuming| match resuming.value {
persist::ClientSessionValue::Tls12(inner) => Some(inner),
persist::ClientSessionValue::Tls13(_) => None,
});
tls12::CompleteServerHelloHandling {
config: self.config,
resuming_session,
server_name: self.server_name,
randoms,
using_ems: self.using_ems,
@@ -723,7 +611,6 @@ impl ExpectServerHelloOrHelloRetryRequest {
emit_client_hello_for_retry(
self.next.config,
cx,
self.next.resuming_session,
self.next.random,
self.next.using_ems,
transcript_buffer,

View File

@@ -10,7 +10,6 @@ use crate::{
conn::{CommonState, ConnectionRandoms, State},
error::Error,
hash_hs::HandshakeHash,
msgs::persist,
sign::Signer,
ticketer::TimeBase,
verify,
@@ -49,7 +48,6 @@ mod server_hello {
pub(in crate::client) struct CompleteServerHelloHandling {
pub(in crate::client) config: Arc<ClientConfig>,
pub(in crate::client) resuming_session: Option<persist::Tls12ClientSessionValue>,
pub(in crate::client) server_name: ServerName,
pub(in crate::client) randoms: ConnectionRandoms,
pub(in crate::client) using_ems: bool,
@@ -113,76 +111,8 @@ mod server_hello {
None
};
// See if we're successfully resuming.
if let Some(ref _resuming) = self.resuming_session {
return Err(Error::General(
"client does not support resumption".to_string(),
));
// if resuming.session_id == server_hello.session_id {
// debug!("Server agreed to resume");
// // Is the server telling lies about the ciphersuite?
// if resuming.suite() != suite {
// let error_msg =
// "abbreviated handshake offered, but with varied cs".to_string();
// return Err(Error::PeerMisbehavedError(error_msg));
// }
// // And about EMS support?
// if resuming.extended_ms() != self.using_ems {
// let error_msg = "server varied ems support over resume".to_string();
// return Err(Error::PeerMisbehavedError(error_msg));
// }
// let secrets =
// ConnectionSecrets::new_resume(self.randoms, suite, resuming.secret());
// self.config.key_log.log(
// "CLIENT_RANDOM",
// &secrets.randoms.client,
// &secrets.master_secret,
// );
// cx.common.start_encryption_tls12(&secrets, Side::Client);
// // Since we're resuming, we verified the certificate and
// // proof of possession in the prior session.
// cx.common.peer_certificates = Some(resuming.server_cert_chain().to_vec());
// let cert_verified = verify::ServerCertVerified::assertion();
// let sig_verified = verify::HandshakeSignatureValid::assertion();
// return if must_issue_new_ticket {
// Ok(Box::new(ExpectNewTicket {
// config: self.config,
// secrets,
// resuming_session: self.resuming_session,
// session_id: server_hello.session_id,
// server_name: self.server_name,
// using_ems: self.using_ems,
// transcript: self.transcript,
// resuming: true,
// cert_verified,
// sig_verified,
// }))
// } else {
// Ok(Box::new(ExpectCcs {
// config: self.config,
// secrets,
// resuming_session: self.resuming_session,
// session_id: server_hello.session_id,
// server_name: self.server_name,
// using_ems: self.using_ems,
// transcript: self.transcript,
// ticket: None,
// resuming: true,
// cert_verified,
// sig_verified,
// }))
// };
// }
}
Ok(Box::new(ExpectCertificate {
config: self.config,
resuming_session: self.resuming_session,
session_id: server_hello.session_id,
server_name: self.server_name,
randoms: self.randoms,
@@ -199,7 +129,6 @@ mod server_hello {
struct ExpectCertificate {
config: Arc<ClientConfig>,
resuming_session: Option<persist::Tls12ClientSessionValue>,
session_id: SessionID,
server_name: ServerName,
randoms: ConnectionRandoms,
@@ -228,7 +157,6 @@ impl State<ClientConnectionData> for ExpectCertificate {
if self.may_send_cert_status {
Ok(Box::new(ExpectCertificateStatusOrServerKx {
config: self.config,
resuming_session: self.resuming_session,
session_id: self.session_id,
server_name: self.server_name,
randoms: self.randoms,
@@ -250,7 +178,6 @@ impl State<ClientConnectionData> for ExpectCertificate {
Ok(Box::new(ExpectServerKx {
config: self.config,
resuming_session: self.resuming_session,
session_id: self.session_id,
server_name: self.server_name,
randoms: self.randoms,
@@ -266,7 +193,6 @@ impl State<ClientConnectionData> for ExpectCertificate {
struct ExpectCertificateStatusOrServerKx {
config: Arc<ClientConfig>,
resuming_session: Option<persist::Tls12ClientSessionValue>,
session_id: SessionID,
server_name: ServerName,
randoms: ConnectionRandoms,
@@ -303,7 +229,6 @@ impl State<ClientConnectionData> for ExpectCertificateStatusOrServerKx {
Box::new(ExpectServerKx {
config: self.config,
resuming_session: self.resuming_session,
session_id: self.session_id,
server_name: self.server_name,
randoms: self.randoms,
@@ -322,7 +247,6 @@ impl State<ClientConnectionData> for ExpectCertificateStatusOrServerKx {
}) => {
Box::new(ExpectCertificateStatus {
config: self.config,
resuming_session: self.resuming_session,
session_id: self.session_id,
server_name: self.server_name,
randoms: self.randoms,
@@ -350,7 +274,6 @@ impl State<ClientConnectionData> for ExpectCertificateStatusOrServerKx {
struct ExpectCertificateStatus {
config: Arc<ClientConfig>,
resuming_session: Option<persist::Tls12ClientSessionValue>,
session_id: SessionID,
server_name: ServerName,
randoms: ConnectionRandoms,
@@ -395,7 +318,6 @@ impl State<ClientConnectionData> for ExpectCertificateStatus {
Ok(Box::new(ExpectServerKx {
config: self.config,
resuming_session: self.resuming_session,
session_id: self.session_id,
server_name: self.server_name,
randoms: self.randoms,
@@ -410,7 +332,6 @@ impl State<ClientConnectionData> for ExpectCertificateStatus {
struct ExpectServerKx {
config: Arc<ClientConfig>,
resuming_session: Option<persist::Tls12ClientSessionValue>,
session_id: SessionID,
server_name: ServerName,
randoms: ConnectionRandoms,
@@ -458,7 +379,6 @@ impl State<ClientConnectionData> for ExpectServerKx {
Ok(Box::new(ExpectServerDoneOrCertReq {
config: self.config,
resuming_session: self.resuming_session,
session_id: self.session_id,
server_name: self.server_name,
randoms: self.randoms,
@@ -570,7 +490,6 @@ async fn emit_finished(
// client auth. Otherwise we go straight to ServerHelloDone.
struct ExpectServerDoneOrCertReq {
config: Arc<ClientConfig>,
resuming_session: Option<persist::Tls12ClientSessionValue>,
session_id: SessionID,
server_name: ServerName,
randoms: ConnectionRandoms,
@@ -598,7 +517,6 @@ impl State<ClientConnectionData> for ExpectServerDoneOrCertReq {
) {
Box::new(ExpectCertificateRequest {
config: self.config,
resuming_session: self.resuming_session,
session_id: self.session_id,
server_name: self.server_name,
randoms: self.randoms,
@@ -616,7 +534,6 @@ impl State<ClientConnectionData> for ExpectServerDoneOrCertReq {
Box::new(ExpectServerDone {
config: self.config,
resuming_session: self.resuming_session,
session_id: self.session_id,
server_name: self.server_name,
randoms: self.randoms,
@@ -636,7 +553,6 @@ impl State<ClientConnectionData> for ExpectServerDoneOrCertReq {
struct ExpectCertificateRequest {
config: Arc<ClientConfig>,
resuming_session: Option<persist::Tls12ClientSessionValue>,
session_id: SessionID,
server_name: ServerName,
randoms: ConnectionRandoms,
@@ -679,7 +595,6 @@ impl State<ClientConnectionData> for ExpectCertificateRequest {
Ok(Box::new(ExpectServerDone {
config: self.config,
resuming_session: self.resuming_session,
session_id: self.session_id,
server_name: self.server_name,
randoms: self.randoms,
@@ -696,7 +611,6 @@ impl State<ClientConnectionData> for ExpectCertificateRequest {
struct ExpectServerDone {
config: Arc<ClientConfig>,
resuming_session: Option<persist::Tls12ClientSessionValue>,
session_id: SessionID,
server_name: ServerName,
randoms: ConnectionRandoms,
@@ -745,6 +659,7 @@ impl State<ClientConnectionData> for ExpectServerDone {
// 3. Verify that the top certificate signed their kx.
// 4. If doing client auth, send our Certificate.
// 5. Complete the key exchange:
//
// a) generate our kx pair
// b) emit a ClientKeyExchange containing it
// c) if doing client auth, emit a CertificateVerify
@@ -891,7 +806,6 @@ impl State<ClientConnectionData> for ExpectServerDone {
if st.must_issue_new_ticket {
Ok(Box::new(ExpectNewTicket {
config: st.config,
resuming_session: st.resuming_session,
session_id: st.session_id,
server_name: st.server_name,
using_ems: st.using_ems,
@@ -903,7 +817,6 @@ impl State<ClientConnectionData> for ExpectServerDone {
} else {
Ok(Box::new(ExpectCcs {
config: st.config,
resuming_session: st.resuming_session,
session_id: st.session_id,
server_name: st.server_name,
using_ems: st.using_ems,
@@ -919,7 +832,6 @@ impl State<ClientConnectionData> for ExpectServerDone {
struct ExpectNewTicket {
config: Arc<ClientConfig>,
resuming_session: Option<persist::Tls12ClientSessionValue>,
session_id: SessionID,
server_name: ServerName,
using_ems: bool,
@@ -946,7 +858,6 @@ impl State<ClientConnectionData> for ExpectNewTicket {
Ok(Box::new(ExpectCcs {
config: self.config,
resuming_session: self.resuming_session,
session_id: self.session_id,
server_name: self.server_name,
using_ems: self.using_ems,
@@ -962,7 +873,6 @@ impl State<ClientConnectionData> for ExpectNewTicket {
// -- Waiting for their CCS --
struct ExpectCcs {
config: Arc<ClientConfig>,
resuming_session: Option<persist::Tls12ClientSessionValue>,
session_id: SessionID,
server_name: ServerName,
using_ems: bool,
@@ -998,7 +908,6 @@ impl State<ClientConnectionData> for ExpectCcs {
Ok(Box::new(ExpectFinished {
config: self.config,
resuming_session: self.resuming_session,
session_id: self.session_id,
server_name: self.server_name,
using_ems: self.using_ems,
@@ -1013,7 +922,6 @@ impl State<ClientConnectionData> for ExpectCcs {
struct ExpectFinished {
config: Arc<ClientConfig>,
resuming_session: Option<persist::Tls12ClientSessionValue>,
session_id: SessionID,
server_name: ServerName,
using_ems: bool,
@@ -1024,60 +932,6 @@ struct ExpectFinished {
sig_verified: verify::HandshakeSignatureValid,
}
// impl ExpectFinished {
// // -- Waiting for their finished --
// fn save_session(&mut self, cx: &mut ClientContext<'_>) {
// // Save a ticket. If we got a new ticket, save that. Otherwise, save the
// // original ticket again.
// let (mut ticket, lifetime) = match self.ticket.take() {
// Some(nst) => (nst.ticket.0, nst.lifetime_hint),
// None => (Vec::new(), 0),
// };
// if ticket.is_empty() {
// if let Some(resuming_session) = &mut self.resuming_session {
// ticket = resuming_session.take_ticket();
// }
// }
// if self.session_id.is_empty() && ticket.is_empty() {
// debug!("Session not saved: server didn't allocate id or ticket");
// return;
// }
// let time_now = match TimeBase::now() {
// Ok(time_now) => time_now,
// Err(e) => {
// debug!("Session not saved: {}", e);
// return;
// }
// };
// let key = persist::ClientSessionKey::session_for_server_name(&self.server_name);
// let value = persist::Tls12ClientSessionValue::new(
// self.secrets.suite(),
// self.session_id,
// ticket,
// self.secrets.get_master_secret(),
// cx.common.peer_certificates.clone().unwrap_or_default(),
// time_now,
// lifetime,
// self.using_ems,
// );
// let worked = self
// .config
// .session_storage
// .put(key.get_encoding(), value.get_encoding());
// if worked {
// debug!("Session saved");
// } else {
// debug!("Session not saved");
// }
// }
// }
#[async_trait]
impl State<ClientConnectionData> for ExpectFinished {
async fn handle(

View File

@@ -11,7 +11,6 @@ use crate::{
conn::{CommonState, ConnectionRandoms, State},
error::Error,
hash_hs::{HandshakeHash, HandshakeHashBuffer},
msgs::persist,
sign, verify, KeyLog,
};
#[allow(deprecated)]
@@ -60,7 +59,6 @@ pub(super) async fn handle_server_hello(
config: Arc<ClientConfig>,
cx: &mut ClientContext<'_>,
server_hello: &ServerHelloPayload,
resuming_session: Option<persist::Tls13ClientSessionValue>,
server_name: ServerName,
randoms: ConnectionRandoms,
suite: &'static Tls13CipherSuite,
@@ -102,8 +100,8 @@ pub(super) async fn handle_server_hello(
// };
// if server_hello.get_psk_index() != Some(0) {
// return Err(cx.common.illegal_param("server selected invalid psk").await);
// }
// return Err(cx.common.illegal_param("server selected invalid
// psk").await); }
// debug!("Resuming using PSK");
// // The key schedule has been initialized and set in fill_in_psk_binder()
@@ -143,7 +141,6 @@ pub(super) async fn handle_server_hello(
Ok(Box::new(ExpectEncryptedExtensions {
config,
resuming_session,
server_name,
randoms,
suite,
@@ -170,69 +167,6 @@ async fn validate_server_hello(
Ok(())
}
// fn save_kx_hint(config: &ClientConfig, server_name: &ServerName, group: NamedGroup) {
// let key = persist::ClientSessionKey::hint_for_server_name(server_name);
// config
// .session_storage
// .put(key.get_encoding(), group.get_encoding());
// }
// /// This implements the horrifying TLS1.3 hack where PSK binders have a
// /// data dependency on the message they are contained within.
// pub(super) fn fill_in_psk_binder(
// resuming: &persist::Tls13ClientSessionValue,
// transcript: &HandshakeHashBuffer,
// hmp: &mut HandshakeMessagePayload,
// ) -> KeyScheduleEarly {
// // We need to know the hash function of the suite we're trying to resume into.
// let hkdf_alg = &resuming.suite().hkdf_algorithm;
// let suite_hash = resuming.suite().hash_algorithm();
// // The binder is calculated over the clienthello, but doesn't include itself or its
// // length, or the length of its container.
// let binder_plaintext = hmp.get_encoding_for_binder_signing();
// let handshake_hash = transcript.get_hash_given(suite_hash, &binder_plaintext);
// // Run a fake key_schedule to simulate what the server will do if it chooses
// // to resume.
// let key_schedule = KeyScheduleEarly::new(hkdf_alg, resuming.secret());
// let real_binder = key_schedule.resumption_psk_binder_key_and_sign_verify_data(&handshake_hash);
// if let HandshakePayload::ClientHello(ref mut ch) = hmp.payload {
// ch.set_psk_binder(real_binder.as_ref());
// };
// key_schedule
// }
// pub(super) async fn prepare_resumption(
// config: &ClientConfig,
// cx: &mut ClientContext<'_>,
// ticket: Vec<u8>,
// resuming_session: &persist::Retrieved<&persist::Tls13ClientSessionValue>,
// exts: &mut Vec<ClientExtension>,
// doing_retry: bool,
// ) {
// let resuming_suite = resuming_session.suite();
// cx.common.suite = Some(resuming_suite.into());
// cx.data.resumption_ciphersuite = Some(resuming_suite.into());
// // Finally, and only for TLS1.3 with a ticket resumption, include a binder
// // for our ticket. This must go last.
// //
// // Include an empty binder. It gets filled in below because it depends on
// // the message it's contained in (!!!).
// let obfuscated_ticket_age = resuming_session.obfuscated_ticket_age();
// let binder_len = resuming_suite.hash_algorithm().output_len();
// let binder = vec![0u8; binder_len];
// let psk_identity = PresharedKeyIdentity::new(ticket, obfuscated_ticket_age);
// let psk_ext = PresharedKeyOffer::new(psk_identity, binder);
// exts.push(ClientExtension::PresharedKey(psk_ext));
// }
pub(super) async fn emit_fake_ccs(
sent_tls13_fake_ccs: &mut bool,
common: &mut CommonState,
@@ -287,7 +221,6 @@ async fn validate_encrypted_extensions(
struct ExpectEncryptedExtensions {
config: Arc<ClientConfig>,
resuming_session: Option<persist::Tls13ClientSessionValue>,
server_name: ServerName,
randoms: ConnectionRandoms,
suite: &'static Tls13CipherSuite,
@@ -313,52 +246,19 @@ impl State<ClientConnectionData> for ExpectEncryptedExtensions {
validate_encrypted_extensions(cx.common, &self.hello, exts).await?;
hs::process_alpn_protocol(cx.common, &self.config, exts.get_alpn_protocol()).await?;
if let Some(resuming_session) = self.resuming_session {
let was_early_traffic = cx.common.early_traffic;
if was_early_traffic {
if exts.early_data_extension_offered() {
cx.data.early_data.accepted();
} else {
cx.data.early_data.rejected();
cx.common.early_traffic = false;
}
}
if was_early_traffic && !cx.common.early_traffic {
// If no early traffic, set the encryption key for handshakes
cx.common.record_layer.set_message_encrypter();
}
cx.common.peer_certificates = Some(resuming_session.server_cert_chain().to_vec());
// We *don't* reverify the certificate chain here: resumption is a
// continuation of the previous session in terms of security policy.
let cert_verified = verify::ServerCertVerified::assertion();
let sig_verified = verify::HandshakeSignatureValid::assertion();
Ok(Box::new(ExpectFinished {
config: self.config,
server_name: self.server_name,
randoms: self.randoms,
suite: self.suite,
transcript: self.transcript,
client_auth: None,
cert_verified,
sig_verified,
}))
} else {
if exts.early_data_extension_offered() {
let msg = "server sent early data extension without resumption".to_string();
return Err(Error::PeerMisbehavedError(msg));
}
Ok(Box::new(ExpectCertificateOrCertReq {
config: self.config,
server_name: self.server_name,
randoms: self.randoms,
suite: self.suite,
transcript: self.transcript,
may_send_sct_list: self.hello.server_may_send_sct_list(),
}))
if exts.early_data_extension_offered() {
let msg = "server sent early data extension without resumption".to_string();
return Err(Error::PeerMisbehavedError(msg));
}
Ok(Box::new(ExpectCertificateOrCertReq {
config: self.config,
server_name: self.server_name,
randoms: self.randoms,
suite: self.suite,
transcript: self.transcript,
may_send_sct_list: self.hello.server_may_send_sct_list(),
}))
}
}
@@ -422,9 +322,9 @@ impl State<ClientConnectionData> for ExpectCertificateOrCertReq {
}
}
// TLS1.3 version of CertificateRequest handling. We then move to expecting the server
// Certificate. Unfortunately the CertificateRequest type changed in an annoying way
// in TLS1.3.
// TLS1.3 version of CertificateRequest handling. We then move to expecting the
// server Certificate. Unfortunately the CertificateRequest type changed in an
// annoying way in TLS1.3.
struct ExpectCertificateRequest {
config: Arc<ClientConfig>,
server_name: ServerName,
@@ -787,8 +687,8 @@ impl State<ClientConnectionData> for ExpectFinished {
st.transcript.add_message(&m);
/* The EndOfEarlyData message to server is still encrypted with early data keys,
* but appears in the transcript after the server Finished. */
/* The EndOfEarlyData message to server is still encrypted with early data
* keys, but appears in the transcript after the server Finished. */
if cx.common.early_traffic {
emit_end_of_early_data_tls13(&mut st.transcript, cx.common).await?;
cx.common.early_traffic = false;
@@ -893,42 +793,6 @@ impl ExpectTraffic {
));
}
// let handshake_hash = self.transcript.get_current_hash();
// let secret = self
// .key_schedule
// .resumption_master_secret_and_derive_ticket_psk(&handshake_hash, &nst.nonce.0);
// let time_now = match TimeBase::now() {
// Ok(t) => t,
// #[allow(unused_variables)]
// Err(e) => {
// debug!("Session not saved: {}", e);
// return Ok(());
// }
// };
// let value = persist::Tls13ClientSessionValue::new(
// self.suite,
// nst.ticket.0.clone(),
// secret,
// cx.common.peer_certificates.clone().unwrap_or_default(),
// time_now,
// nst.lifetime,
// nst.age_add,
// nst.get_max_early_data_size().unwrap_or_default(),
// );
// let key = persist::ClientSessionKey::session_for_server_name(&self.server_name);
// #[allow(unused_mut)]
// let mut ticket = value.get_encoding();
// let worked = self.session_storage.put(key.get_encoding(), ticket);
// if worked {
// debug!("Ticket saved");
// } else {
// debug!("Ticket not saved");
// }
Ok(())
}
@@ -948,27 +812,6 @@ impl ExpectTraffic {
Err(Error::General(
"received unsupported key update request from peer".to_string(),
))
// match kur {
// KeyUpdateRequest::UpdateNotRequested => {}
// KeyUpdateRequest::UpdateRequested => {
// self.want_write_key_update = true;
// }
// _ => {
// common
// .send_fatal_alert(AlertDescription::IllegalParameter)
// .await;
// return Err(Error::CorruptMessagePayload(ContentType::Handshake));
// }
// }
// // Update our read-side keys.
// let new_read_key = self.key_schedule.next_server_application_traffic_secret();
// common
// .record_layer
// .set_message_decrypter(self.suite.derive_decrypter(&new_read_key));
// Ok(())
}
}
@@ -1022,10 +865,11 @@ impl State<ClientConnectionData> for ExpectTraffic {
// .send_msg_encrypt(Message::build_key_update_notify().into())
// .await;
// let write_key = self.key_schedule.next_client_application_traffic_secret();
// let write_key =
// self.key_schedule.next_client_application_traffic_secret();
// common
// .record_layer
// .set_message_encrypter(self.suite.derive_encrypter(&write_key));
// }
// .set_message_encrypter(self.suite.derive_encrypter(&
// write_key)); }
}
}

View File

@@ -182,7 +182,7 @@ impl ConnectionCommon {
}
/// Returns an object that allows reading plaintext.
pub fn reader(&mut self) -> Reader {
pub fn reader(&mut self) -> Reader<'_> {
Reader {
received_plaintext: &mut self.common_state.received_plaintext,
// Are we done? i.e., have we processed all received messages, and received a

View File

@@ -301,15 +301,11 @@ mod conn;
mod error;
mod hash_hs;
mod limited_cache;
mod msgs;
mod rand;
mod record_layer;
//mod stream;
mod vecbuf;
pub(crate) use tls_core::verify;
#[cfg(test)]
mod verifybench;
pub(crate) use tls_core::x509;
pub(crate) use tls_core::{verify, x509};
#[macro_use]
mod check;
mod bs_debug;
@@ -330,7 +326,7 @@ pub mod internal {
// The public interface is:
pub use crate::{
anchors::{OwnedTrustAnchor, RootCertStore},
anchors::RootCertStore,
builder::{ConfigBuilder, WantsCipherSuites, WantsKxGroups, WantsVerifier, WantsVersions},
conn::{CommonState, ConnectionCommon, IoState, Reader, SideData},
error::Error,

View File

@@ -1,4 +0,0 @@
pub(crate) mod persist;
#[cfg(test)]
mod persist_test;

View File

@@ -1,526 +0,0 @@
use crate::{client::ServerName, ticketer::TimeBase};
use std::cmp;
#[cfg(feature = "tls12")]
use std::mem;
#[cfg(feature = "tls12")]
use tls_core::suites::Tls12CipherSuite;
use tls_core::{
msgs::{
base::{PayloadU16, PayloadU8},
codec::{Codec, Reader},
enums::{CipherSuite, ProtocolVersion},
handshake::{CertificatePayload, SessionID},
},
suites::{SupportedCipherSuite, Tls13CipherSuite},
};
// These are the keys and values we store in session storage.
// --- Client types ---
/// Keys for session resumption and tickets.
/// Matching value is a `ClientSessionValue`.
#[derive(Debug)]
pub struct ClientSessionKey {
kind: &'static [u8],
name: Vec<u8>,
}
impl Codec for ClientSessionKey {
fn encode(&self, bytes: &mut Vec<u8>) {
bytes.extend_from_slice(self.kind);
bytes.extend_from_slice(&self.name);
}
// Don't need to read these.
fn read(_r: &mut Reader) -> Option<Self> {
None
}
}
impl ClientSessionKey {
pub fn session_for_server_name(server_name: &ServerName) -> Self {
Self {
kind: b"session",
name: server_name.encode(),
}
}
pub fn hint_for_server_name(server_name: &ServerName) -> Self {
Self {
kind: b"kx-hint",
name: server_name.encode(),
}
}
}
#[derive(Debug)]
pub enum ClientSessionValue {
Tls13(Tls13ClientSessionValue),
#[cfg(feature = "tls12")]
Tls12(Tls12ClientSessionValue),
}
impl ClientSessionValue {
pub fn read(
reader: &mut Reader<'_>,
suite: CipherSuite,
supported: &[SupportedCipherSuite],
) -> Option<Self> {
match supported.iter().find(|s| s.suite() == suite)? {
SupportedCipherSuite::Tls13(inner) => {
Tls13ClientSessionValue::read(inner, reader).map(ClientSessionValue::Tls13)
}
#[cfg(feature = "tls12")]
SupportedCipherSuite::Tls12(inner) => {
Tls12ClientSessionValue::read(inner, reader).map(ClientSessionValue::Tls12)
}
}
}
fn common(&self) -> &ClientSessionCommon {
match self {
Self::Tls13(inner) => &inner.common,
#[cfg(feature = "tls12")]
Self::Tls12(inner) => &inner.common,
}
}
}
impl From<Tls13ClientSessionValue> for ClientSessionValue {
fn from(v: Tls13ClientSessionValue) -> Self {
Self::Tls13(v)
}
}
#[cfg(feature = "tls12")]
impl From<Tls12ClientSessionValue> for ClientSessionValue {
fn from(v: Tls12ClientSessionValue) -> Self {
Self::Tls12(v)
}
}
pub struct Retrieved<T> {
pub value: T,
retrieved_at: TimeBase,
}
impl<T> Retrieved<T> {
pub fn new(value: T, retrieved_at: TimeBase) -> Self {
Self {
value,
retrieved_at,
}
}
}
impl Retrieved<&Tls13ClientSessionValue> {
pub fn obfuscated_ticket_age(&self) -> u32 {
let age_secs = self
.retrieved_at
.as_secs()
.saturating_sub(self.value.common.epoch);
let age_millis = age_secs as u32 * 1000;
age_millis.wrapping_add(self.value.age_add)
}
}
impl Retrieved<ClientSessionValue> {
pub fn tls13(&self) -> Option<Retrieved<&Tls13ClientSessionValue>> {
match &self.value {
ClientSessionValue::Tls13(value) => Some(Retrieved::new(value, self.retrieved_at)),
#[cfg(feature = "tls12")]
ClientSessionValue::Tls12(_) => None,
}
}
pub fn has_expired(&self) -> bool {
let common = self.value.common();
common.lifetime_secs != 0
&& common.epoch + u64::from(common.lifetime_secs) < self.retrieved_at.as_secs()
}
}
impl<T> std::ops::Deref for Retrieved<T> {
type Target = T;
fn deref(&self) -> &Self::Target {
&self.value
}
}
#[derive(Debug)]
pub struct Tls13ClientSessionValue {
suite: &'static Tls13CipherSuite,
age_add: u32,
max_early_data_size: u32,
pub common: ClientSessionCommon,
}
impl Tls13ClientSessionValue {
pub fn new(
suite: &'static Tls13CipherSuite,
ticket: Vec<u8>,
secret: Vec<u8>,
server_cert_chain: Vec<tls_core::key::Certificate>,
time_now: TimeBase,
lifetime_secs: u32,
age_add: u32,
max_early_data_size: u32,
) -> Self {
Self {
suite,
age_add,
max_early_data_size,
common: ClientSessionCommon::new(
ticket,
secret,
time_now,
lifetime_secs,
server_cert_chain,
),
}
}
/// [`Codec::read()`] with an extra `suite` argument.
///
/// We decode the `suite` argument separately because it allows us to
/// decide whether we're decoding an 1.2 or 1.3 session value.
pub fn read(suite: &'static Tls13CipherSuite, r: &mut Reader) -> Option<Self> {
Some(Self {
suite,
age_add: u32::read(r)?,
max_early_data_size: u32::read(r)?,
common: ClientSessionCommon::read(r)?,
})
}
/// Inherent implementation of the [`Codec::get_encoding()`] method.
///
/// (See `read()` for why this is inherent here.)
pub fn get_encoding(&self) -> Vec<u8> {
let mut bytes = Vec::with_capacity(16);
self.suite.common.suite.encode(&mut bytes);
self.age_add.encode(&mut bytes);
self.max_early_data_size.encode(&mut bytes);
self.common.encode(&mut bytes);
bytes
}
pub fn max_early_data_size(&self) -> u32 {
self.max_early_data_size
}
pub fn suite(&self) -> &'static Tls13CipherSuite {
self.suite
}
}
impl std::ops::Deref for Tls13ClientSessionValue {
type Target = ClientSessionCommon;
fn deref(&self) -> &Self::Target {
&self.common
}
}
#[cfg(feature = "tls12")]
#[derive(Debug)]
pub struct Tls12ClientSessionValue {
suite: &'static Tls12CipherSuite,
pub session_id: SessionID,
extended_ms: bool,
pub common: ClientSessionCommon,
}
#[cfg(feature = "tls12")]
impl Tls12ClientSessionValue {
pub fn new(
suite: &'static Tls12CipherSuite,
session_id: SessionID,
ticket: Vec<u8>,
master_secret: Vec<u8>,
server_cert_chain: Vec<tls_core::key::Certificate>,
time_now: TimeBase,
lifetime_secs: u32,
extended_ms: bool,
) -> Self {
Self {
suite,
session_id,
extended_ms,
common: ClientSessionCommon::new(
ticket,
master_secret,
time_now,
lifetime_secs,
server_cert_chain,
),
}
}
/// [`Codec::read()`] with an extra `suite` argument.
///
/// We decode the `suite` argument separately because it allows us to
/// decide whether we're decoding an 1.2 or 1.3 session value.
fn read(suite: &'static Tls12CipherSuite, r: &mut Reader) -> Option<Self> {
Some(Self {
suite,
session_id: SessionID::read(r)?,
extended_ms: u8::read(r)? == 1,
common: ClientSessionCommon::read(r)?,
})
}
/// Inherent implementation of the [`Codec::get_encoding()`] method.
///
/// (See `read()` for why this is inherent here.)
pub fn get_encoding(&self) -> Vec<u8> {
let mut bytes = Vec::with_capacity(16);
self.suite.common.suite.encode(&mut bytes);
self.session_id.encode(&mut bytes);
(if self.extended_ms { 1u8 } else { 0u8 }).encode(&mut bytes);
self.common.encode(&mut bytes);
bytes
}
pub fn take_ticket(&mut self) -> Vec<u8> {
mem::take(&mut self.common.ticket.0)
}
pub fn extended_ms(&self) -> bool {
self.extended_ms
}
pub fn suite(&self) -> &'static Tls12CipherSuite {
self.suite
}
}
#[cfg(feature = "tls12")]
impl std::ops::Deref for Tls12ClientSessionValue {
type Target = ClientSessionCommon;
fn deref(&self) -> &Self::Target {
&self.common
}
}
#[derive(Debug)]
pub struct ClientSessionCommon {
ticket: PayloadU16,
secret: PayloadU8,
epoch: u64,
lifetime_secs: u32,
server_cert_chain: CertificatePayload,
}
impl ClientSessionCommon {
fn new(
ticket: Vec<u8>,
secret: Vec<u8>,
time_now: TimeBase,
lifetime_secs: u32,
server_cert_chain: Vec<tls_core::key::Certificate>,
) -> Self {
Self {
ticket: PayloadU16(ticket),
secret: PayloadU8(secret),
epoch: time_now.as_secs(),
lifetime_secs: cmp::min(lifetime_secs, MAX_TICKET_LIFETIME),
server_cert_chain,
}
}
/// [`Codec::read()`] is inherent here to avoid leaking the [`Codec`]
/// implementation through [`Deref`] implementations on
/// [`Tls12ClientSessionValue`] and [`Tls13ClientSessionValue`].
fn read(r: &mut Reader) -> Option<Self> {
Some(Self {
ticket: PayloadU16::read(r)?,
secret: PayloadU8::read(r)?,
epoch: u64::read(r)?,
lifetime_secs: u32::read(r)?,
server_cert_chain: CertificatePayload::read(r)?,
})
}
/// [`Codec::encode()`] is inherent here to avoid leaking the [`Codec`]
/// implementation through [`Deref`] implementations on
/// [`Tls12ClientSessionValue`] and [`Tls13ClientSessionValue`].
fn encode(&self, bytes: &mut Vec<u8>) {
self.ticket.encode(bytes);
self.secret.encode(bytes);
self.epoch.encode(bytes);
self.lifetime_secs.encode(bytes);
self.server_cert_chain.encode(bytes);
}
pub fn server_cert_chain(&self) -> &[tls_core::key::Certificate] {
self.server_cert_chain.as_ref()
}
pub fn secret(&self) -> &[u8] {
self.secret.0.as_ref()
}
pub fn ticket(&self) -> &[u8] {
self.ticket.0.as_ref()
}
/// Test only: wind back epoch by delta seconds.
pub fn rewind_epoch(&mut self, delta: u32) {
self.epoch -= delta as u64;
}
}
static MAX_TICKET_LIFETIME: u32 = 7 * 24 * 60 * 60;
/// This is the maximum allowed skew between server and client clocks, over
/// the maximum ticket lifetime period. This encompasses TCP retransmission
/// times in case packet loss occurs when the client sends the ClientHello
/// or receives the NewSessionTicket, _and_ actual clock skew over this period.
static MAX_FRESHNESS_SKEW_MS: u32 = 60 * 1000;
// --- Server types ---
pub type ServerSessionKey = SessionID;
#[derive(Debug)]
pub struct ServerSessionValue {
pub sni: Option<webpki::DnsName>,
pub version: ProtocolVersion,
pub cipher_suite: CipherSuite,
pub master_secret: PayloadU8,
pub extended_ms: bool,
pub client_cert_chain: Option<CertificatePayload>,
pub alpn: Option<PayloadU8>,
pub application_data: PayloadU16,
pub creation_time_sec: u64,
pub age_obfuscation_offset: u32,
freshness: Option<bool>,
}
impl Codec for ServerSessionValue {
fn encode(&self, bytes: &mut Vec<u8>) {
if let Some(ref sni) = self.sni {
1u8.encode(bytes);
let sni_bytes: &str = sni.as_ref().into();
PayloadU8::new(Vec::from(sni_bytes)).encode(bytes);
} else {
0u8.encode(bytes);
}
self.version.encode(bytes);
self.cipher_suite.encode(bytes);
self.master_secret.encode(bytes);
(if self.extended_ms { 1u8 } else { 0u8 }).encode(bytes);
if let Some(ref chain) = self.client_cert_chain {
1u8.encode(bytes);
chain.encode(bytes);
} else {
0u8.encode(bytes);
}
if let Some(ref alpn) = self.alpn {
1u8.encode(bytes);
alpn.encode(bytes);
} else {
0u8.encode(bytes);
}
self.application_data.encode(bytes);
self.creation_time_sec.encode(bytes);
self.age_obfuscation_offset.encode(bytes);
}
fn read(r: &mut Reader) -> Option<Self> {
let has_sni = u8::read(r)?;
let sni = if has_sni == 1 {
let dns_name = PayloadU8::read(r)?;
let dns_name = webpki::DnsNameRef::try_from_ascii(&dns_name.0).ok()?;
Some(dns_name.into())
} else {
None
};
let v = ProtocolVersion::read(r)?;
let cs = CipherSuite::read(r)?;
let ms = PayloadU8::read(r)?;
let ems = u8::read(r)?;
let has_ccert = u8::read(r)? == 1;
let ccert = if has_ccert {
Some(CertificatePayload::read(r)?)
} else {
None
};
let has_alpn = u8::read(r)? == 1;
let alpn = if has_alpn {
Some(PayloadU8::read(r)?)
} else {
None
};
let application_data = PayloadU16::read(r)?;
let creation_time_sec = u64::read(r)?;
let age_obfuscation_offset = u32::read(r)?;
Some(Self {
sni,
version: v,
cipher_suite: cs,
master_secret: ms,
extended_ms: ems == 1u8,
client_cert_chain: ccert,
alpn,
application_data,
creation_time_sec,
age_obfuscation_offset,
freshness: None,
})
}
}
impl ServerSessionValue {
pub fn new(
sni: Option<&webpki::DnsName>,
v: ProtocolVersion,
cs: CipherSuite,
ms: Vec<u8>,
client_cert_chain: Option<CertificatePayload>,
alpn: Option<Vec<u8>>,
application_data: Vec<u8>,
creation_time: TimeBase,
age_obfuscation_offset: u32,
) -> Self {
Self {
sni: sni.cloned(),
version: v,
cipher_suite: cs,
master_secret: PayloadU8::new(ms),
extended_ms: false,
client_cert_chain,
alpn: alpn.map(PayloadU8::new),
application_data: PayloadU16::new(application_data),
creation_time_sec: creation_time.as_secs(),
age_obfuscation_offset,
freshness: None,
}
}
pub fn set_extended_ms_used(&mut self) {
self.extended_ms = true;
}
pub fn set_freshness(mut self, obfuscated_client_age_ms: u32, time_now: TimeBase) -> Self {
let client_age_ms = obfuscated_client_age_ms.wrapping_sub(self.age_obfuscation_offset);
let server_age_ms =
(time_now.as_secs().saturating_sub(self.creation_time_sec) as u32).saturating_mul(1000);
let age_difference = if client_age_ms < server_age_ms {
server_age_ms - client_age_ms
} else {
client_age_ms - server_age_ms
};
self.freshness = Some(age_difference <= MAX_FRESHNESS_SKEW_MS);
self
}
pub fn is_fresh(&self) -> bool {
self.freshness.unwrap_or_default()
}
}

View File

@@ -1,78 +0,0 @@
use super::persist::*;
use crate::ticketer::TimeBase;
use std::convert::TryInto;
use tls_core::{
key::Certificate,
msgs::{
codec::{Codec, Reader},
enums::*,
},
suites::TLS13_AES_128_GCM_SHA256,
};
#[test]
fn clientsessionkey_is_debug() {
let name = "hello".try_into().unwrap();
let csk = ClientSessionKey::session_for_server_name(&name);
println!("{:?}", csk);
}
#[test]
fn clientsessionkey_cannot_be_read() {
let bytes = [0; 1];
let mut rd = Reader::init(&bytes);
assert!(ClientSessionKey::read(&mut rd).is_none());
}
#[test]
fn clientsessionvalue_is_debug() {
let csv = ClientSessionValue::from(Tls13ClientSessionValue::new(
TLS13_AES_128_GCM_SHA256.tls13().unwrap(),
vec![],
vec![1, 2, 3],
vec![Certificate(b"abc".to_vec()), Certificate(b"def".to_vec())],
TimeBase::now().unwrap(),
15,
10,
128,
));
println!("{:?}", csv);
}
#[test]
fn serversessionvalue_is_debug() {
let ssv = ServerSessionValue::new(
None,
ProtocolVersion::TLSv1_3,
CipherSuite::TLS13_AES_128_GCM_SHA256,
vec![1, 2, 3],
None,
None,
vec![4, 5, 6],
TimeBase::now().unwrap(),
0x12345678,
);
println!("{:?}", ssv);
}
#[test]
fn serversessionvalue_no_sni() {
let bytes = [
0x00, 0x03, 0x03, 0xc0, 0x23, 0x03, 0x01, 0x02, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x12,
0x23, 0x34, 0x45, 0x56, 0x67, 0x78, 0x89, 0xfe, 0xed, 0xf0, 0x0d,
];
let mut rd = Reader::init(&bytes);
let ssv = ServerSessionValue::read(&mut rd).unwrap();
assert_eq!(ssv.get_encoding(), bytes);
}
#[test]
fn serversessionvalue_with_cert() {
let bytes = [
0x00, 0x03, 0x03, 0xc0, 0x23, 0x03, 0x01, 0x02, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x12,
0x23, 0x34, 0x45, 0x56, 0x67, 0x78, 0x89, 0xfe, 0xed, 0xf0, 0x0d,
];
let mut rd = Reader::init(&bytes);
let ssv = ServerSessionValue::read(&mut rd).unwrap();
assert_eq!(ssv.get_encoding(), bytes);
}

View File

@@ -6,6 +6,7 @@ use ring::{
io::der,
signature::{self, EcdsaKeyPair, Ed25519KeyPair, RsaKeyPair},
};
use rustls_pki_types as pki_types;
use std::{convert::TryFrom, error::Error as StdError, fmt, sync::Arc};
use tls_core::{
key,
@@ -71,51 +72,6 @@ impl CertifiedKey {
pub fn end_entity_cert(&self) -> Result<&key::Certificate, SignError> {
self.cert.first().ok_or(SignError(()))
}
/// Check the certificate chain for validity:
/// - it should be non-empty list
/// - the first certificate should be parsable as a x509v3,
/// - the first certificate should quote the given server name
/// (if provided)
///
/// These checks are not security-sensitive. They are the
/// *server* attempting to detect accidental misconfiguration.
pub(crate) fn cross_check_end_entity_cert(
&self,
name: Option<webpki::DnsNameRef>,
) -> Result<(), Error> {
// Always reject an empty certificate chain.
let end_entity_cert = self.end_entity_cert().map_err(|SignError(())| {
Error::General("No end-entity certificate in certificate chain".to_string())
})?;
// Reject syntactically-invalid end-entity certificates.
let end_entity_cert =
webpki::EndEntityCert::try_from(end_entity_cert.as_ref()).map_err(|_| {
Error::General(
"End-entity certificate in certificate \
chain is syntactically invalid"
.to_string(),
)
})?;
if let Some(name) = name {
// If SNI was offered then the certificate must be valid for
// that hostname. Note that this doesn't fully validate that the
// certificate is valid; it only validates that the name is one
// that the certificate is valid for, if the certificate is
// valid.
if end_entity_cert.verify_is_valid_for_dns_name(name).is_err() {
return Err(Error::General(
"The server certificate is not \
valid for the given name"
.to_string(),
));
}
}
Ok(())
}
}
/// Parse `der` as any supported key encoding/type, returning

View File

@@ -1,223 +0,0 @@
// This program does benchmarking of the functions in verify.rs,
// that do certificate chain validation and signature verification.
//
// Note: we don't use any of the standard 'cargo bench', 'test::Bencher',
// etc. because it's unstable at the time of writing.
use crate::{anchors, verify, verify::ServerCertVerifier, OwnedTrustAnchor};
use std::convert::TryInto;
use web_time::{Duration, Instant, SystemTime};
use webpki_roots;
fn duration_nanos(d: Duration) -> u64 {
((d.as_secs() as f64) * 1e9 + (d.subsec_nanos() as f64)) as u64
}
#[test]
fn test_reddit_cert() {
Context::new(
"reddit",
"reddit.com",
&[
include_bytes!("testdata/cert-reddit.0.der"),
include_bytes!("testdata/cert-reddit.1.der"),
],
)
.bench(100)
}
#[test]
fn test_github_cert() {
Context::new(
"github",
"github.com",
&[
include_bytes!("testdata/cert-github.0.der"),
include_bytes!("testdata/cert-github.1.der"),
],
)
.bench(100)
}
#[test]
fn test_arstechnica_cert() {
Context::new(
"arstechnica",
"arstechnica.com",
&[
include_bytes!("testdata/cert-arstechnica.0.der"),
include_bytes!("testdata/cert-arstechnica.1.der"),
include_bytes!("testdata/cert-arstechnica.2.der"),
include_bytes!("testdata/cert-arstechnica.3.der"),
],
)
.bench(100)
}
#[test]
fn test_twitter_cert() {
Context::new(
"twitter",
"twitter.com",
&[
include_bytes!("testdata/cert-twitter.0.der"),
include_bytes!("testdata/cert-twitter.1.der"),
],
)
.bench(100)
}
#[test]
fn test_wikipedia_cert() {
Context::new(
"wikipedia",
"wikipedia.org",
&[
include_bytes!("testdata/cert-wikipedia.0.der"),
include_bytes!("testdata/cert-wikipedia.1.der"),
],
)
.bench(100)
}
#[test]
fn test_google_cert() {
Context::new(
"google",
"www.google.com",
&[
include_bytes!("testdata/cert-google.0.der"),
include_bytes!("testdata/cert-google.1.der"),
],
)
.bench(100)
}
#[test]
fn test_hn_cert() {
Context::new(
"hn",
"news.ycombinator.com",
&[
include_bytes!("testdata/cert-hn.0.der"),
include_bytes!("testdata/cert-hn.1.der"),
],
)
.bench(100)
}
#[test]
fn test_stackoverflow_cert() {
Context::new(
"stackoverflow",
"stackoverflow.com",
&[
include_bytes!("testdata/cert-stackoverflow.0.der"),
include_bytes!("testdata/cert-stackoverflow.1.der"),
],
)
.bench(100)
}
#[test]
fn test_duckduckgo_cert() {
Context::new(
"duckduckgo",
"duckduckgo.com",
&[
include_bytes!("testdata/cert-duckduckgo.0.der"),
include_bytes!("testdata/cert-duckduckgo.1.der"),
],
)
.bench(100)
}
#[test]
fn test_rustlang_cert() {
Context::new(
"rustlang",
"www.rust-lang.org",
&[
include_bytes!("testdata/cert-rustlang.0.der"),
include_bytes!("testdata/cert-rustlang.1.der"),
include_bytes!("testdata/cert-rustlang.2.der"),
],
)
.bench(100)
}
#[test]
fn test_wapo_cert() {
Context::new(
"wapo",
"www.washingtonpost.com",
&[
include_bytes!("testdata/cert-wapo.0.der"),
include_bytes!("testdata/cert-wapo.1.der"),
],
)
.bench(100)
}
struct Context {
name: &'static str,
domain: &'static str,
roots: anchors::RootCertStore,
chain: Vec<tls_core::key::Certificate>,
now: SystemTime,
}
impl Context {
fn new(name: &'static str, domain: &'static str, certs: &[&'static [u8]]) -> Self {
let mut roots = anchors::RootCertStore::empty();
roots.add_server_trust_anchors(webpki_roots::TLS_SERVER_ROOTS.iter().map(|ta| {
OwnedTrustAnchor::from_subject_spki_name_constraints(
ta.subject.as_ref(),
ta.subject_public_key_info.as_ref(),
ta.name_constraints.as_ref().map(|nc| nc.as_ref()),
)
}));
Self {
name,
domain,
roots,
chain: certs
.iter()
.copied()
.map(|bytes| tls_core::key::Certificate(bytes.to_vec()))
.collect(),
now: SystemTime::UNIX_EPOCH + Duration::from_secs(1640870720),
}
}
fn bench(&self, count: usize) {
let verifier = verify::WebPkiVerifier::new(self.roots.clone(), None);
const SCTS: &[&[u8]] = &[];
const OCSP_RESPONSE: &[u8] = &[];
let mut times = Vec::new();
let (end_entity, intermediates) = self.chain.split_first().unwrap();
for _ in 0..count {
let start = Instant::now();
let server_name = self.domain.try_into().unwrap();
verifier
.verify_server_cert(
end_entity,
intermediates,
&server_name,
&mut SCTS.iter().copied(),
OCSP_RESPONSE,
self.now,
)
.unwrap();
times.push(duration_nanos(Instant::now().duration_since(start)));
}
println!(
"verify_server_cert({}): min {:?}us",
self.name,
times.iter().min().unwrap() / 1000
);
}
}

View File

@@ -780,14 +780,12 @@ async fn client_checks_server_certificate_with_given_name() {
let mut server = ServerConnection::new(Arc::clone(&server_config)).unwrap();
let err = do_handshake_until_error(&mut client, &mut server).await;
assert_eq!(
assert!(matches!(
err,
Err(ErrorFromPeer::Client(Error::CoreError(
tls_core::Error::InvalidCertificateData(
"invalid peer certificate: CertNotValidForName".into(),
)
tls_core::Error::InvalidCertificateData(_)
)))
);
));
}
}
}
@@ -889,6 +887,7 @@ async fn client_error_is_sticky() {
#[tokio::test]
#[allow(clippy::no_effect)]
#[allow(clippy::unnecessary_operation)]
async fn client_is_send() {
let (client, _) = make_pair(KeyType::Rsa).await;
&client as &dyn Send;

View File

@@ -2,6 +2,7 @@
use futures::{AsyncRead, AsyncWrite};
use rustls::{server::AllowAnyAuthenticatedClient, ServerConfig, ServerConnection};
use rustls_pki_types::CertificateDer;
use std::{
convert::{TryFrom, TryInto},
io,
@@ -15,6 +16,7 @@ use tls_client::{
Certificate, ClientConfig, ClientConnection, Error, PrivateKey, RootCertStore,
RustCryptoBackend,
};
use webpki::anchor_from_trusted_cert;
macro_rules! embed_files {
(
@@ -409,9 +411,17 @@ pub fn finish_client_config(
kt: KeyType,
config: tls_client::ConfigBuilder<tls_client::WantsVerifier>,
) -> ClientConfig {
let mut root_store = RootCertStore::empty();
let mut rootbuf = io::BufReader::new(kt.bytes_for("ca.cert"));
root_store.add_parsable_certificates(&rustls_pemfile::certs(&mut rootbuf).unwrap());
let roots = rustls_pemfile::certs(&mut rootbuf)
.unwrap()
.into_iter()
.map(|cert| {
let der = CertificateDer::from_slice(&cert);
anchor_from_trusted_cert(&der).unwrap().to_owned()
})
.collect();
let root_store = RootCertStore { roots };
config
.with_root_certificates(root_store)
@@ -422,9 +432,17 @@ pub fn finish_client_config_with_creds(
kt: KeyType,
config: tls_client::ConfigBuilder<tls_client::WantsVerifier>,
) -> ClientConfig {
let mut root_store = RootCertStore::empty();
let mut rootbuf = io::BufReader::new(kt.bytes_for("ca.cert"));
root_store.add_parsable_certificates(&rustls_pemfile::certs(&mut rootbuf).unwrap());
let roots = rustls_pemfile::certs(&mut rootbuf)
.unwrap()
.into_iter()
.map(|cert| {
let der = CertificateDer::from_slice(&cert);
anchor_from_trusted_cert(&der).unwrap().to_owned()
})
.collect();
let root_store = RootCertStore { roots };
config
.with_root_certificates(root_store)

Some files were not shown because too many files have changed in this diff Show More