refactor(core): alpha.7 rewrite (#574)

* refactor(core): alpha.7 rewrite

* allow empty idx

* fix empty assumption

* further encapsulate rangeset

* added presentation, finishing touches

* remove unwrap

* update index naming

* add secp256r1 support

* add attestation to presentation output, and serde derives

* handle k256 in KeyAlgId Display

* unnecessary newline

* refactor(core): integrate rewrite changes (#584)

* refactor(core): alpha.7 rewrite

* allow empty idx

* fix empty assumption

* further encapsulate rangeset

* added presentation, finishing touches

* remove unwrap

* refactor(core): integrate rewrite changes

* remove obsolete tests

* add secp256r1 support

* update index naming

* add secp256r1 support

* add attestation to presentation output, and serde derives

* handle k256 in KeyAlgId Display

* unnecessary newline

* fix variable name

* restore changes from dev to tlsn-prover

* use CryptoProvider in config

* clippy

* more clippy
This commit is contained in:
sinu.eth
2024-09-19 07:57:54 -07:00
committed by GitHub
parent a4a0de02f9
commit 53ff873b3a
145 changed files with 7589 additions and 5028 deletions

View File

@@ -10,6 +10,7 @@ members = [
"crates/components/stream-cipher",
"crates/components/universal-hash",
"crates/core",
"crates/data-fixtures",
"crates/examples",
"crates/formats",
"crates/notary/client",
@@ -25,9 +26,9 @@ members = [
"crates/tls/core",
"crates/tls/mpc",
"crates/tls/server-fixture",
"crates/wasm-test-runner",
"crates/verifier",
"crates/wasm",
"crates/wasm-test-runner",
]
resolver = "2"
@@ -43,6 +44,7 @@ tlsn-aead = { path = "crates/components/aead" }
tlsn-block-cipher = { path = "crates/components/block-cipher" }
tlsn-common = { path = "crates/common" }
tlsn-core = { path = "crates/core" }
tlsn-data-fixtures = { path = "crates/data-fixtures" }
tlsn-formats = { path = "crates/formats" }
tlsn-hmac-sha256 = { path = "crates/components/hmac-sha256" }
tlsn-hmac-sha256-circuits = { path = "crates/components/hmac-sha256-circuits" }
@@ -57,8 +59,8 @@ tlsn-tls-client-async = { path = "crates/tls/client-async" }
tlsn-tls-core = { path = "crates/tls/core" }
tlsn-tls-mpc = { path = "crates/tls/mpc" }
tlsn-universal-hash = { path = "crates/components/universal-hash" }
tlsn-utils = { git = "https://github.com/tlsnotary/tlsn-utils", rev = "45370cc" }
tlsn-utils-aio = { git = "https://github.com/tlsnotary/tlsn-utils", rev = "45370cc" }
tlsn-utils = { git = "https://github.com/tlsnotary/tlsn-utils", rev = "e7b2db6" }
tlsn-utils-aio = { git = "https://github.com/tlsnotary/tlsn-utils", rev = "e7b2db6" }
tlsn-verifier = { path = "crates/verifier" }
mpz-circuits = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "b8ae7ac" }
@@ -71,7 +73,7 @@ mpz-ot = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "b
mpz-share-conversion = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "b8ae7ac" }
serio = { version = "0.1" }
spansy = { git = "https://github.com/tlsnotary/tlsn-utils", rev = "45370cc" }
spansy = { git = "https://github.com/tlsnotary/tlsn-utils", rev = "e7b2db6" }
uid-mux = { version = "0.1", features = ["serio"] }
aes = { version = "0.8" }
@@ -80,7 +82,9 @@ anyhow = { version = "1.0" }
async-trait = { version = "0.1" }
async-tungstenite = { version = "0.25" }
axum = { version = "0.7" }
bcs = { version = "0.1" }
bincode = { version = "1.3" }
blake3 = { version = "1.5" }
bytes = { version = "1.4" }
chrono = { version = "0.4" }
cipher = { version = "0.4" }
@@ -101,17 +105,19 @@ http = { version = "1.1" }
http-body-util = { version = "0.1" }
hyper = { version = "1.1" }
hyper-util = { version = "0.1" }
k256 = { version = "0.13" }
log = { version = "0.4" }
once_cell = { version = "1.19" }
opaque-debug = { version = "0.3" }
p256 = { version = "0.13" }
pkcs8 = { version = "0.10" }
pin-project-lite = { version = "0.2" }
rand = { version = "0.8" }
rand_chacha = { version = "0.3" }
rand_core = { version = "0.6" }
regex = { version = "1.10" }
ring = { version = "0.17" }
rs_merkle = { version = "1.4" }
rs_merkle = { git = "https://github.com/tlsnotary/rs-merkle.git", rev = "7fb354c" }
rstest = { version = "0.17" }
rustls = { version = "0.21" }
rustls-pemfile = { version = "1.0" }
@@ -131,3 +137,4 @@ web-time = { version = "0.2" }
webpki = { version = "0.22" }
webpki-roots = { version = "0.26" }
ws_stream_tungstenite = { version = "0.13" }
zeroize = { version = "1.8" }

View File

@@ -9,14 +9,14 @@ use std::{
use anyhow::Context;
use futures::{AsyncReadExt, AsyncWriteExt};
use tls_core::verify::WebPkiVerifier;
use tlsn_benches::{
config::{BenchInstance, Config},
metrics::Metrics,
set_interface, PROVER_INTERFACE,
};
use tlsn_common::config::ProtocolConfig;
use tlsn_core::Direction;
use tlsn_core::{transcript::Idx, CryptoProvider};
use tlsn_server_fixture::bind;
use tlsn_server_fixture_certs::{CA_CERT_DER, SERVER_DOMAIN};
use tokio::io::{AsyncRead, AsyncWrite};
@@ -25,7 +25,7 @@ use tokio_util::{
io::{InspectReader, InspectWriter},
};
use tlsn_prover::tls::{Prover, ProverConfig};
use tlsn_prover::{Prover, ProverConfig};
use tracing_subscriber::{fmt::format::FmtSpan, EnvFilter};
#[tokio::main]
@@ -117,6 +117,11 @@ async fn run_instance<S: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>
let start_time = Instant::now();
let provider = CryptoProvider {
cert: WebPkiVerifier::new(root_store(), None),
..Default::default()
};
let protocol_config = if defer_decryption {
ProtocolConfig::builder()
.max_sent_data(upload_size + 256)
@@ -134,11 +139,10 @@ async fn run_instance<S: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>
let prover = Prover::new(
ProverConfig::builder()
.id("test")
.server_dns(SERVER_DOMAIN)
.root_cert_store(root_store())
.server_name(SERVER_DOMAIN)
.protocol_config(protocol_config)
.defer_decryption_from_start(defer_decryption)
.crypto_provider(provider)
.build()
.context("invalid prover config")?,
)
@@ -163,12 +167,10 @@ async fn run_instance<S: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>
let mut prover = prover_task.await??.start_prove();
prover.reveal(0..prover.sent_transcript().data().len(), Direction::Sent)?;
prover.reveal(
0..prover.recv_transcript().data().len(),
Direction::Received,
)?;
prover.prove().await?;
let (sent_len, recv_len) = prover.transcript().len();
prover
.prove_transcript(Idx::new(0..sent_len), Idx::new(0..recv_len))
.await?;
prover.finalize().await?;
Ok(Metrics {

View File

@@ -9,8 +9,9 @@ use tlsn_benches::{
set_interface, VERIFIER_INTERFACE,
};
use tlsn_common::config::ProtocolConfigValidator;
use tlsn_core::CryptoProvider;
use tlsn_server_fixture_certs::CA_CERT_DER;
use tlsn_verifier::tls::{Verifier, VerifierConfig};
use tlsn_verifier::{Verifier, VerifierConfig};
#[tokio::main]
async fn main() -> anyhow::Result<()> {
@@ -64,6 +65,11 @@ async fn run_instance<S: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>
set_interface(VERIFIER_INTERFACE, download, 1, download_delay)?;
let provider = CryptoProvider {
cert: cert_verifier(),
..Default::default()
};
let config_validator = ProtocolConfigValidator::builder()
.max_sent_data(upload_size + 256)
.max_recv_data(download_size + 256)
@@ -72,9 +78,8 @@ async fn run_instance<S: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>
let verifier = Verifier::new(
VerifierConfig::builder()
.id("test")
.cert_verifier(cert_verifier())
.protocol_config_validator(config_validator)
.crypto_provider(provider)
.build()?,
);

View File

@@ -8,6 +8,7 @@ edition = "2021"
default = []
[dependencies]
tlsn-core = { workspace = true }
mpz-common = { workspace = true }
mpz-garble = { workspace = true }
mpz-ot = { workspace = true }
@@ -15,12 +16,12 @@ mpz-ot = { workspace = true }
derive_builder = { workspace = true }
futures = { workspace = true }
once_cell = { workspace = true }
serde = { workspace = true }
serio = { workspace = true, features = ["codec", "bincode"] }
thiserror = { workspace = true }
tracing = { workspace = true }
uid-mux = { workspace = true, features = ["serio"] }
serde = { workspace = true, features = ["derive"] }
tlsn-utils = { workspace = true }
semver = { version = "1.0", features = ["serde"] }
[dev-dependencies]

View File

@@ -5,6 +5,7 @@
#![forbid(unsafe_code)]
pub mod config;
pub mod msg;
pub mod mux;
use serio::codec::Codec;

14
crates/common/src/msg.rs Normal file
View File

@@ -0,0 +1,14 @@
//! Message types.
use serde::{Deserialize, Serialize};
use tlsn_core::connection::{ServerCertData, ServerName};
/// Message sent from Prover to Verifier to prove the server identity.
#[derive(Debug, Serialize, Deserialize)]
pub struct ServerIdentityProof {
/// Server name.
pub name: ServerName,
/// Server identity data.
pub data: ServerCertData,
}

View File

@@ -16,28 +16,35 @@ fixtures = ["dep:hex"]
tlsn-tls-core = { workspace = true, features = ["serde"] }
tlsn-utils = { workspace = true }
mpz-circuits = { workspace = true }
mpz-core = { workspace = true }
mpz-garble-core = { workspace = true }
mpz-circuits = { workspace = true }
bimap = { version = "0.6.3", features = ["serde"] }
bytes = { workspace = true, features = ["serde"] }
bcs = { workspace = true }
bimap = { version = "0.6", features = ["serde"] }
blake3 = { workspace = true }
derive_builder = { workspace = true }
hex = { workspace = true, optional = true }
k256 = { workspace = true }
opaque-debug = { workspace = true }
p256 = { workspace = true, features = ["serde"] }
rs_merkle = { workspace = true }
rand = { workspace = true }
rs_merkle = { workspace = true, features = ["serde"] }
rstest = { workspace = true, optional = true }
serde = { workspace = true }
sha2 = { workspace = true }
thiserror = { workspace = true }
tiny-keccak = { version = "2.0", features = ["keccak"] }
web-time = { workspace = true }
webpki-roots = { workspace = true }
[dev-dependencies]
bincode = { workspace = true }
hex = { workspace = true }
rand_chacha = { workspace = true }
rand_core = { workspace = true }
rstest = { workspace = true }
hex = { workspace = true }
rand_core = { workspace = true }
rand_chacha = { workspace = true }
bincode = { workspace = true }
tlsn-data-fixtures = { workspace = true }
[[test]]
name = "api"

View File

@@ -0,0 +1,254 @@
//! Attestation types.
//!
//! An attestation is a cryptographically signed document from a Notary who witnessed a TLS
//! connection. It contains various fields which can be used to verify statements about the
//! connection and the associated application data.
//!
//! Attestations contain a header and a body. The header is signed by a Notary and it contains
//! a merkle root of the body fields. This allows a Prover to disclose only necessary fields
//! to a Verifier depending on the statements being made.
mod builder;
mod config;
mod proof;
use std::fmt;
use rand::distributions::{Distribution, Standard};
use serde::{Deserialize, Serialize};
use crate::{
connection::{ConnectionInfo, ServerCertCommitment, ServerEphemKey},
hash::{impl_domain_separator, Hash, HashAlgorithm, HashAlgorithmExt, TypedHash},
index::Index,
merkle::MerkleTree,
presentation::PresentationBuilder,
signing::{Signature, VerifyingKey},
transcript::{encoding::EncodingCommitment, hash::PlaintextHash},
CryptoProvider,
};
pub use builder::{AttestationBuilder, AttestationBuilderError};
pub use config::AttestationConfig;
pub use proof::{AttestationError, AttestationProof};
/// Current version of attestations.
pub const VERSION: Version = Version(0);
/// Unique identifier for an attestation.
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)]
pub struct Uid(pub [u8; 16]);
impl From<[u8; 16]> for Uid {
fn from(id: [u8; 16]) -> Self {
Self(id)
}
}
impl Distribution<Uid> for Standard {
fn sample<R: rand::Rng + ?Sized>(&self, rng: &mut R) -> Uid {
Uid(self.sample(rng))
}
}
/// Version of an attestation.
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)]
pub struct Version(u32);
impl_domain_separator!(Version);
/// Public attestation field.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Field<T> {
/// Identifier of the field.
pub id: FieldId,
/// Field data.
pub data: T,
}
/// Identifier for a field.
#[derive(
Debug, Default, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize,
)]
pub struct FieldId(pub u32);
impl FieldId {
pub(crate) fn next<T>(&mut self, data: T) -> Field<T> {
let id = *self;
self.0 += 1;
Field { id, data }
}
}
impl fmt::Display for FieldId {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.0)
}
}
/// Kind of an attestation field.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
#[repr(u8)]
pub enum FieldKind {
/// Connection information.
ConnectionInfo = 0x01,
/// Server ephemeral key.
ServerEphemKey = 0x02,
/// Server identity commitment.
ServerIdentityCommitment = 0x03,
/// Encoding commitment.
EncodingCommitment = 0x04,
/// Plaintext hash commitment.
PlaintextHash = 0x05,
}
/// Attestation header.
///
/// A header is the data structure which is signed by the Notary. It contains
/// a unique identifier, the protocol version, and a Merkle root of the
/// attestation fields.
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub struct Header {
/// An identifier for the attestation.
pub id: Uid,
/// Version of the attestation.
pub version: Version,
/// Merkle root of the attestation fields.
pub root: TypedHash,
}
impl_domain_separator!(Header);
/// Attestation body.
///
/// An attestation contains a set of fields which are cryptographically signed by
/// a Notary via a [`Header`]. These fields include data which can be
/// used to verify aspects of a TLS connection, such as the server's identity, and facts
/// about the transcript.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Body {
verifying_key: Field<VerifyingKey>,
connection_info: Field<ConnectionInfo>,
server_ephemeral_key: Field<ServerEphemKey>,
cert_commitment: Field<ServerCertCommitment>,
encoding_commitment: Option<Field<EncodingCommitment>>,
plaintext_hashes: Index<Field<PlaintextHash>>,
}
impl Body {
/// Returns the attestation verifying key.
pub fn verifying_key(&self) -> &VerifyingKey {
&self.verifying_key.data
}
/// Computes the Merkle root of the attestation fields.
///
/// This is only used when building an attestation.
pub(crate) fn root(&self, hasher: &dyn HashAlgorithm) -> TypedHash {
let mut tree = MerkleTree::new(hasher.id());
let fields = self
.hash_fields(hasher)
.into_iter()
.map(|(_, hash)| hash)
.collect::<Vec<_>>();
tree.insert(hasher, fields);
tree.root()
}
/// Returns the fields of the body hashed and sorted by id.
///
/// Each field is hashed with a domain separator to mitigate type confusion attacks.
///
/// # Note
///
/// The order of fields is not stable across versions.
pub(crate) fn hash_fields(&self, hasher: &dyn HashAlgorithm) -> Vec<(FieldId, Hash)> {
// CRITICAL: ensure all fields are included! If a new field is added to the struct
// without including it here it will not be verified to be included in the attestation.
let Self {
verifying_key,
connection_info: conn_info,
server_ephemeral_key,
cert_commitment,
encoding_commitment,
plaintext_hashes,
} = self;
let mut fields: Vec<(FieldId, Hash)> = vec![
(verifying_key.id, hasher.hash_separated(&verifying_key.data)),
(conn_info.id, hasher.hash_separated(&conn_info.data)),
(
server_ephemeral_key.id,
hasher.hash_separated(&server_ephemeral_key.data),
),
(
cert_commitment.id,
hasher.hash_separated(&cert_commitment.data),
),
];
if let Some(encoding_commitment) = encoding_commitment {
fields.push((
encoding_commitment.id,
hasher.hash_separated(&encoding_commitment.data),
));
}
for field in plaintext_hashes.iter() {
fields.push((field.id, hasher.hash_separated(&field.data)));
}
fields.sort_by_key(|(id, _)| *id);
fields
}
/// Returns the connection information.
pub(crate) fn connection_info(&self) -> &ConnectionInfo {
&self.connection_info.data
}
pub(crate) fn server_ephemeral_key(&self) -> &ServerEphemKey {
&self.server_ephemeral_key.data
}
pub(crate) fn cert_commitment(&self) -> &ServerCertCommitment {
&self.cert_commitment.data
}
/// Returns the encoding commitment.
pub(crate) fn encoding_commitment(&self) -> Option<&EncodingCommitment> {
self.encoding_commitment.as_ref().map(|field| &field.data)
}
/// Returns the plaintext hash commitments.
pub(crate) fn plaintext_hashes(&self) -> &Index<Field<PlaintextHash>> {
&self.plaintext_hashes
}
}
/// An attestation.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Attestation {
/// The signature of the attestation.
pub signature: Signature,
/// The attestation header.
pub header: Header,
/// The attestation body.
pub body: Body,
}
impl Attestation {
/// Returns an attestation builder.
pub fn builder(config: &AttestationConfig) -> AttestationBuilder<'_> {
AttestationBuilder::new(config)
}
/// Returns a presentation builder.
pub fn presentation_builder<'a>(
&'a self,
provider: &'a CryptoProvider,
) -> PresentationBuilder<'a> {
PresentationBuilder::new(provider, self)
}
}

View File

@@ -0,0 +1,239 @@
use std::error::Error;
use rand::{thread_rng, Rng};
use crate::{
attestation::{
Attestation, AttestationConfig, Body, EncodingCommitment, FieldId, FieldKind, Header,
ServerCertCommitment, VERSION,
},
connection::{ConnectionInfo, ServerEphemKey},
hash::{HashAlgId, TypedHash},
request::Request,
serialize::CanonicalSerialize,
signing::SignatureAlgId,
CryptoProvider,
};
/// Attestation builder state for accepting a request.
pub struct Accept {}
pub struct Sign {
signature_alg: SignatureAlgId,
hash_alg: HashAlgId,
connection_info: Option<ConnectionInfo>,
server_ephemeral_key: Option<ServerEphemKey>,
cert_commitment: ServerCertCommitment,
encoding_commitment_root: Option<TypedHash>,
encoding_seed: Option<Vec<u8>>,
}
/// An attestation builder.
pub struct AttestationBuilder<'a, T = Accept> {
config: &'a AttestationConfig,
state: T,
}
impl<'a> AttestationBuilder<'a, Accept> {
/// Creates a new attestation builder.
pub fn new(config: &'a AttestationConfig) -> Self {
Self {
config,
state: Accept {},
}
}
/// Accepts the attestation request.
pub fn accept_request(
self,
request: Request,
) -> Result<AttestationBuilder<'a, Sign>, AttestationBuilderError> {
let config = self.config;
let Request {
signature_alg,
hash_alg,
server_cert_commitment: cert_commitment,
encoding_commitment_root,
} = request;
if !config.supported_signature_algs().contains(&signature_alg) {
return Err(AttestationBuilderError::new(
ErrorKind::Request,
format!("unsupported signature algorithm: {signature_alg}"),
));
}
if !config.supported_hash_algs().contains(&hash_alg) {
return Err(AttestationBuilderError::new(
ErrorKind::Request,
format!("unsupported hash algorithm: {hash_alg}"),
));
}
if encoding_commitment_root.is_some()
&& !config
.supported_fields()
.contains(&FieldKind::EncodingCommitment)
{
return Err(AttestationBuilderError::new(
ErrorKind::Request,
"encoding commitment is not supported",
));
}
Ok(AttestationBuilder {
config: self.config,
state: Sign {
signature_alg,
hash_alg,
connection_info: None,
server_ephemeral_key: None,
cert_commitment,
encoding_commitment_root,
encoding_seed: None,
},
})
}
}
impl AttestationBuilder<'_, Sign> {
/// Sets the connection information.
pub fn connection_info(&mut self, connection_info: ConnectionInfo) -> &mut Self {
self.state.connection_info = Some(connection_info);
self
}
/// Sets the server ephemeral key.
pub fn server_ephemeral_key(&mut self, key: ServerEphemKey) -> &mut Self {
self.state.server_ephemeral_key = Some(key);
self
}
/// Sets the encoding seed.
pub fn encoding_seed(&mut self, seed: Vec<u8>) -> &mut Self {
self.state.encoding_seed = Some(seed);
self
}
/// Builds the attestation.
pub fn build(self, provider: &CryptoProvider) -> Result<Attestation, AttestationBuilderError> {
let Sign {
signature_alg,
hash_alg,
connection_info,
server_ephemeral_key,
cert_commitment,
encoding_commitment_root,
encoding_seed,
} = self.state;
let hasher = provider.hash.get(&hash_alg).map_err(|_| {
AttestationBuilderError::new(
ErrorKind::Config,
format!("accepted hash algorithm {hash_alg} but it's missing in the provider"),
)
})?;
let signer = provider.signer.get(&signature_alg).map_err(|_| {
AttestationBuilderError::new(
ErrorKind::Config,
format!(
"accepted signature algorithm {signature_alg} but it's missing in the provider"
),
)
})?;
let encoding_commitment = if let Some(root) = encoding_commitment_root {
let Some(seed) = encoding_seed else {
return Err(AttestationBuilderError::new(
ErrorKind::Field,
"encoding commitment requested but seed was not set",
));
};
Some(EncodingCommitment { root, seed })
} else {
None
};
let mut field_id = FieldId::default();
let body = Body {
verifying_key: field_id.next(signer.verifying_key()),
connection_info: field_id.next(connection_info.ok_or_else(|| {
AttestationBuilderError::new(ErrorKind::Field, "connection info was not set")
})?),
server_ephemeral_key: field_id.next(server_ephemeral_key.ok_or_else(|| {
AttestationBuilderError::new(ErrorKind::Field, "handshake data was not set")
})?),
cert_commitment: field_id.next(cert_commitment),
encoding_commitment: encoding_commitment.map(|commitment| field_id.next(commitment)),
plaintext_hashes: Default::default(),
};
let header = Header {
id: thread_rng().gen(),
version: VERSION,
root: body.root(hasher),
};
let signature = signer
.sign(&CanonicalSerialize::serialize(&header))
.map_err(|err| AttestationBuilderError::new(ErrorKind::Signature, err))?;
Ok(Attestation {
signature,
header,
body,
})
}
}
/// Error for [`AttestationBuilder`].
#[derive(Debug, thiserror::Error)]
pub struct AttestationBuilderError {
kind: ErrorKind,
source: Option<Box<dyn Error + Send + Sync + 'static>>,
}
#[derive(Debug)]
enum ErrorKind {
Request,
Config,
Field,
Signature,
}
impl AttestationBuilderError {
fn new<E>(kind: ErrorKind, error: E) -> Self
where
E: Into<Box<dyn Error + Send + Sync + 'static>>,
{
Self {
kind,
source: Some(error.into()),
}
}
/// Returns whether the error originates from a bad request.
pub fn is_request(&self) -> bool {
matches!(self.kind, ErrorKind::Request)
}
}
impl std::fmt::Display for AttestationBuilderError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self.kind {
ErrorKind::Request => f.write_str("request error")?,
ErrorKind::Config => f.write_str("config error")?,
ErrorKind::Field => f.write_str("field error")?,
ErrorKind::Signature => f.write_str("signature error")?,
}
if let Some(source) = &self.source {
write!(f, " caused by: {}", source)?;
}
Ok(())
}
}

View File

@@ -0,0 +1,123 @@
use crate::{
attestation::FieldKind,
hash::{HashAlgId, DEFAULT_SUPPORTED_HASH_ALGS},
signing::SignatureAlgId,
};
const DEFAULT_SUPPORTED_FIELDS: &[FieldKind] = &[
FieldKind::ConnectionInfo,
FieldKind::ServerEphemKey,
FieldKind::ServerIdentityCommitment,
FieldKind::EncodingCommitment,
];
#[derive(Debug)]
#[allow(dead_code)]
enum ErrorKind {
Builder,
}
impl std::fmt::Display for ErrorKind {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
ErrorKind::Builder => write!(f, "builder"),
}
}
}
#[derive(Debug, thiserror::Error)]
#[error("attestation config error: kind: {kind}, reason: {reason}")]
pub struct AttestationConfigError {
kind: ErrorKind,
reason: String,
}
impl AttestationConfigError {
#[allow(dead_code)]
fn builder(reason: impl Into<String>) -> Self {
Self {
kind: ErrorKind::Builder,
reason: reason.into(),
}
}
}
/// Attestation configuration.
#[derive(Debug, Clone)]
pub struct AttestationConfig {
supported_signature_algs: Vec<SignatureAlgId>,
supported_hash_algs: Vec<HashAlgId>,
supported_fields: Vec<FieldKind>,
}
impl AttestationConfig {
/// Creates a new builder.
pub fn builder() -> AttestationConfigBuilder {
AttestationConfigBuilder::default()
}
pub(crate) fn supported_signature_algs(&self) -> &[SignatureAlgId] {
&self.supported_signature_algs
}
pub(crate) fn supported_hash_algs(&self) -> &[HashAlgId] {
&self.supported_hash_algs
}
pub(crate) fn supported_fields(&self) -> &[FieldKind] {
&self.supported_fields
}
}
/// Builder for [`AttestationConfig`].
#[derive(Debug)]
pub struct AttestationConfigBuilder {
supported_signature_algs: Vec<SignatureAlgId>,
supported_hash_algs: Vec<HashAlgId>,
supported_fields: Vec<FieldKind>,
}
impl Default for AttestationConfigBuilder {
fn default() -> Self {
Self {
supported_signature_algs: Vec::default(),
supported_hash_algs: DEFAULT_SUPPORTED_HASH_ALGS.to_vec(),
supported_fields: DEFAULT_SUPPORTED_FIELDS.to_vec(),
}
}
}
impl AttestationConfigBuilder {
/// Sets the supported signature algorithms.
pub fn supported_signature_algs(
&mut self,
supported_signature_algs: impl Into<Vec<SignatureAlgId>>,
) -> &mut Self {
self.supported_signature_algs = supported_signature_algs.into();
self
}
/// Sets the supported hash algorithms.
pub fn supported_hash_algs(
&mut self,
supported_hash_algs: impl Into<Vec<HashAlgId>>,
) -> &mut Self {
self.supported_hash_algs = supported_hash_algs.into();
self
}
/// Sets the supported attestation fields.
pub fn supported_fields(&mut self, supported_fields: impl Into<Vec<FieldKind>>) -> &mut Self {
self.supported_fields = supported_fields.into();
self
}
/// Builds the configuration.
pub fn build(&self) -> Result<AttestationConfig, AttestationConfigError> {
Ok(AttestationConfig {
supported_signature_algs: self.supported_signature_algs.clone(),
supported_hash_algs: self.supported_hash_algs.clone(),
supported_fields: self.supported_fields.clone(),
})
}
}

View File

@@ -0,0 +1,168 @@
use std::fmt;
use serde::{Deserialize, Serialize};
use crate::{
attestation::{Attestation, Body, Header},
hash::HashAlgorithm,
merkle::{MerkleProof, MerkleTree},
serialize::CanonicalSerialize,
signing::Signature,
CryptoProvider,
};
/// Proof of an attestation.
#[derive(Debug, Serialize, Deserialize)]
pub struct AttestationProof {
signature: Signature,
header: Header,
body: BodyProof,
}
impl AttestationProof {
pub(crate) fn new(
provider: &CryptoProvider,
attestation: &Attestation,
) -> Result<Self, AttestationError> {
let hasher = provider
.hash
.get(&attestation.header.root.alg)
.map_err(|e| AttestationError::new(ErrorKind::Provider, e))?;
let body = BodyProof::new(hasher, attestation.body.clone())?;
Ok(Self {
signature: attestation.signature.clone(),
header: attestation.header.clone(),
body,
})
}
/// Verifies the attestation proof.
///
/// # Arguments
///
/// * `provider` - Cryptography provider.
/// * `verifying_key` - Verifying key for the Notary signature.
pub fn verify(self, provider: &CryptoProvider) -> Result<Attestation, AttestationError> {
let signature_verifier = provider
.signature
.get(&self.signature.alg)
.map_err(|e| AttestationError::new(ErrorKind::Provider, e))?;
// Verify body corresponding to the header.
let body = self.body.verify_with_provider(provider, &self.header)?;
// Verify signature of the header.
signature_verifier
.verify(
&body.verifying_key.data,
&CanonicalSerialize::serialize(&self.header),
&self.signature.data,
)
.map_err(|e| AttestationError::new(ErrorKind::Signature, e))?;
Ok(Attestation {
signature: self.signature,
header: self.header,
body,
})
}
}
/// Proof of an attestation body.
#[derive(Debug, Serialize, Deserialize)]
pub(crate) struct BodyProof {
body: Body,
proof: MerkleProof,
}
impl BodyProof {
/// Returns a new body proof.
// TODO: Support including a subset of fields instead of the entire body.
pub(crate) fn new(
hasher: &dyn HashAlgorithm,
body: Body,
) -> Result<BodyProof, AttestationError> {
let (indices, leaves): (Vec<_>, Vec<_>) = body
.hash_fields(hasher)
.into_iter()
.map(|(id, hash)| (id.0 as usize, hash))
.unzip();
let mut tree = MerkleTree::new(hasher.id());
tree.insert(hasher, leaves);
let proof = tree.proof(&indices);
Ok(BodyProof { body, proof })
}
/// Verifies the proof against the attestation header.
pub(crate) fn verify_with_provider(
self,
provider: &CryptoProvider,
header: &Header,
) -> Result<Body, AttestationError> {
let hasher = provider
.hash
.get(&header.root.alg)
.map_err(|e| AttestationError::new(ErrorKind::Provider, e))?;
let fields = self
.body
.hash_fields(hasher)
.into_iter()
.map(|(id, hash)| (id.0 as usize, hash));
self.proof
.verify(hasher, &header.root, fields)
.map_err(|e| AttestationError::new(ErrorKind::Body, e))?;
Ok(self.body)
}
}
/// Error for [`AttestationProof`].
#[derive(Debug, thiserror::Error)]
pub struct AttestationError {
kind: ErrorKind,
source: Option<Box<dyn std::error::Error + Send + Sync>>,
}
impl AttestationError {
fn new<E>(kind: ErrorKind, source: E) -> Self
where
E: Into<Box<dyn std::error::Error + Send + Sync>>,
{
Self {
kind,
source: Some(source.into()),
}
}
}
impl fmt::Display for AttestationError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("attestation proof error: ")?;
match self.kind {
ErrorKind::Provider => f.write_str("provider error")?,
ErrorKind::Signature => f.write_str("signature error")?,
ErrorKind::Body => f.write_str("body proof error")?,
}
if let Some(source) = &self.source {
write!(f, " caused by: {}", source)?;
}
Ok(())
}
}
#[derive(Debug)]
enum ErrorKind {
Provider,
Signature,
Body,
}

View File

@@ -1,105 +0,0 @@
use crate::commitment::{Commitment, CommitmentOpening};
use mpz_core::{
commit::{Decommitment, HashCommit, Nonce},
hash::Hash,
};
use mpz_garble_core::{encoding_state, encoding_state::Full, EncodedValue};
use serde::{Deserialize, Serialize};
/// A Blake3 commitment to the encodings of the substrings of a [`Transcript`](crate::Transcript).
#[derive(Clone, Copy, Serialize, Deserialize)]
pub struct Blake3Commitment {
hash: Hash,
nonce: Nonce,
}
opaque_debug::implement!(Blake3Commitment);
impl Blake3Commitment {
/// Creates a new Blake3 commitment
pub fn new(encodings: &[EncodedValue<encoding_state::Active>]) -> Self {
let (decommitment, hash) = encodings.hash_commit();
Self {
hash,
nonce: *decommitment.nonce(),
}
}
/// Returns the hash of this commitment
pub fn hash(&self) -> &Hash {
&self.hash
}
/// Returns the nonce of this commitment
pub fn nonce(&self) -> &Nonce {
&self.nonce
}
/// Opens this commitment
pub fn open(&self, data: Vec<u8>) -> Blake3Opening {
Blake3Opening::new(data, self.nonce)
}
}
impl From<Blake3Commitment> for Commitment {
fn from(value: Blake3Commitment) -> Self {
Self::Blake3(value)
}
}
/// A substring opening using Blake3
#[derive(Serialize, Deserialize, Clone)]
pub struct Blake3Opening {
data: Vec<u8>,
nonce: Nonce,
}
impl Blake3Opening {
pub(crate) fn new(data: Vec<u8>, nonce: Nonce) -> Self {
Self { data, nonce }
}
/// Recovers the expected commitment from this opening.
///
/// # Panics
///
/// - If the number of encodings does not match the number of bytes in the opening.
/// - If an encoding is not for a u8.
pub fn recover(&self, encodings: &[EncodedValue<Full>]) -> Blake3Commitment {
assert_eq!(
encodings.len(),
self.data.len(),
"encodings and data must have the same length"
);
let encodings = encodings
.iter()
.zip(&self.data)
.map(|(encoding, data)| encoding.select(*data).expect("encoding is for a u8"))
.collect::<Vec<_>>();
let hash = Decommitment::new_with_nonce(encodings, self.nonce).commit();
Blake3Commitment {
hash,
nonce: self.nonce,
}
}
/// Returns the transcript data corresponding to this opening
pub fn data(&self) -> &[u8] {
&self.data
}
/// Returns the transcript data corresponding to this opening
pub fn into_data(self) -> Vec<u8> {
self.data
}
}
impl From<Blake3Opening> for CommitmentOpening {
fn from(value: Blake3Opening) -> Self {
Self::Blake3(value)
}
}

View File

@@ -1,191 +0,0 @@
use std::collections::HashMap;
use bimap::BiMap;
use mpz_core::hash::Hash;
use utils::range::{RangeSet, ToRangeSet};
use crate::{
commitment::{
blake3::Blake3Commitment, Commitment, CommitmentId, CommitmentInfo, CommitmentKind,
TranscriptCommitments,
},
merkle::MerkleTree,
transcript::get_value_ids,
Direction, EncodingProvider,
};
/// An error for [`TranscriptCommitmentBuilder`]
#[derive(Debug, thiserror::Error)]
pub enum TranscriptCommitmentBuilderError {
/// Empty range
#[error("can not commit to an empty range")]
EmptyRange,
/// Range out of bounds
#[error("range out of bounds: {upper_commitment} > {upper_transcript}")]
RangeOutOfBounds {
/// The upper bound of the commitment range
upper_commitment: usize,
/// The upper bound of the transcript range
upper_transcript: usize,
},
/// Failed to retrieve encodings for the provided transcript ranges.
#[error("failed to retrieve encodings for the provided transcript ranges")]
MissingEncodings,
/// Duplicate commitment
#[error("attempted to create a duplicate commitment, overwriting: {0:?}")]
Duplicate(CommitmentId),
/// No commitments were added
#[error("no commitments were added")]
NoCommitments,
}
/// A builder for [`TranscriptCommitments`].
pub struct TranscriptCommitmentBuilder {
commitments: HashMap<CommitmentId, Commitment>,
/// Information about the above `commitments`.
commitment_info: BiMap<CommitmentId, CommitmentInfo>,
merkle_leaves: Vec<Hash>,
/// A function that returns the encodings for the provided transcript byte ids.
encoding_provider: EncodingProvider,
sent_len: usize,
recv_len: usize,
}
opaque_debug::implement!(TranscriptCommitmentBuilder);
impl TranscriptCommitmentBuilder {
/// Creates a new builder.
///
/// # Arguments
///
/// * `encoding_provider` - A function that returns the encodings for the provided transcript byte ids.
#[doc(hidden)]
pub fn new(encoding_provider: EncodingProvider, sent_len: usize, recv_len: usize) -> Self {
Self {
commitments: HashMap::default(),
commitment_info: BiMap::default(),
merkle_leaves: Vec::default(),
encoding_provider,
sent_len,
recv_len,
}
}
/// Commits to the provided ranges of the `sent` transcript.
pub fn commit_sent(
&mut self,
ranges: &dyn ToRangeSet<usize>,
) -> Result<CommitmentId, TranscriptCommitmentBuilderError> {
self.add_substrings_commitment(&ranges.to_range_set(), Direction::Sent)
}
/// Commits to the provided ranges of the `received` transcript.
pub fn commit_recv(
&mut self,
ranges: &dyn ToRangeSet<usize>,
) -> Result<CommitmentId, TranscriptCommitmentBuilderError> {
self.add_substrings_commitment(&ranges.to_range_set(), Direction::Received)
}
/// Commits to the provided ranges of the transcript.
pub fn commit(
&mut self,
ranges: &dyn ToRangeSet<usize>,
direction: Direction,
) -> Result<CommitmentId, TranscriptCommitmentBuilderError> {
match direction {
Direction::Sent => self.commit_sent(ranges),
Direction::Received => self.commit_recv(ranges),
}
}
/// Gets the commitment id for the provided commitment info.
pub fn get_id(
&self,
kind: CommitmentKind,
ranges: impl Into<RangeSet<usize>>,
direction: Direction,
) -> Option<CommitmentId> {
self.commitment_info
.get_by_right(&CommitmentInfo {
kind,
ranges: ranges.into(),
direction,
})
.copied()
}
/// Add a commitment to substrings of the transcript
fn add_substrings_commitment(
&mut self,
ranges: &RangeSet<usize>,
direction: Direction,
) -> Result<CommitmentId, TranscriptCommitmentBuilderError> {
let max = ranges
.max()
.ok_or(TranscriptCommitmentBuilderError::EmptyRange)?;
let len = match direction {
Direction::Sent => self.sent_len,
Direction::Received => self.recv_len,
};
if max > len {
return Err(TranscriptCommitmentBuilderError::RangeOutOfBounds {
upper_commitment: max,
upper_transcript: len,
});
}
let ids: Vec<_> = get_value_ids(ranges, direction).collect();
let id_refs = ids.iter().map(|id| id.as_ref()).collect::<Vec<_>>();
let encodings = (self.encoding_provider)(&id_refs)
.ok_or(TranscriptCommitmentBuilderError::MissingEncodings)?;
// We only support BLAKE3 for now
let commitment = Blake3Commitment::new(&encodings);
let hash = *commitment.hash();
let id = CommitmentId::new(self.merkle_leaves.len() as u32);
let commitment: Commitment = commitment.into();
// Store commitment with its id
self.commitment_info
.insert_no_overwrite(
id,
CommitmentInfo::new(commitment.kind(), ranges.clone(), direction),
)
.map_err(|(id, _)| TranscriptCommitmentBuilderError::Duplicate(id))?;
if self.commitments.insert(id, commitment).is_some() {
// This shouldn't be possible, as we check for duplicates above.
panic!("commitment id already exists");
}
// Insert commitment hash into the merkle tree
self.merkle_leaves.push(hash);
Ok(id)
}
/// Builds the [`TranscriptCommitments`]
pub fn build(self) -> Result<TranscriptCommitments, TranscriptCommitmentBuilderError> {
let Self {
commitments,
commitment_info,
merkle_leaves,
..
} = self;
let merkle_tree = MerkleTree::from_leaves(&merkle_leaves)
.map_err(|_| TranscriptCommitmentBuilderError::NoCommitments)?;
Ok(TranscriptCommitments {
merkle_tree,
commitments,
commitment_info,
})
}
}

View File

@@ -1,193 +0,0 @@
//! Types related to transcript commitments.
/// BLAKE3 commitments.
pub mod blake3;
mod builder;
use std::collections::HashMap;
use bimap::BiMap;
use mpz_core::hash::Hash;
use mpz_garble_core::{encoding_state::Full, EncodedValue};
use serde::{Deserialize, Serialize};
use utils::range::RangeSet;
use crate::{
merkle::{MerkleRoot, MerkleTree},
Direction,
};
pub use builder::{TranscriptCommitmentBuilder, TranscriptCommitmentBuilderError};
/// A commitment id.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
pub struct CommitmentId(u32);
impl CommitmentId {
/// Creates a new commitment id
pub(crate) fn new(id: u32) -> Self {
Self(id)
}
/// Returns the inner value
pub(crate) fn to_inner(self) -> u32 {
self.0
}
}
/// Info of a transcript commitment
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub struct CommitmentInfo {
pub(crate) kind: CommitmentKind,
pub(crate) ranges: RangeSet<usize>,
pub(crate) direction: Direction,
}
impl CommitmentInfo {
/// Creates new commitment info.
pub(crate) fn new(kind: CommitmentKind, ranges: RangeSet<usize>, direction: Direction) -> Self {
Self {
kind,
ranges,
direction,
}
}
/// Returns the kind of this commitment
pub fn kind(&self) -> CommitmentKind {
self.kind
}
/// Returns the ranges of this commitment
pub fn ranges(&self) -> &RangeSet<usize> {
&self.ranges
}
/// Returns the direction of this commitment
pub fn direction(&self) -> &Direction {
&self.direction
}
}
/// A commitment to some bytes in a transcript
#[derive(Clone, Serialize, Deserialize)]
#[non_exhaustive]
pub enum Commitment {
/// A BLAKE3 commitment to encodings of the transcript.
Blake3(blake3::Blake3Commitment),
}
impl Commitment {
/// Returns the hash of this commitment
pub fn hash(&self) -> Hash {
match self {
Commitment::Blake3(commitment) => *commitment.hash(),
}
}
/// Returns the kind of this commitment
pub fn kind(&self) -> CommitmentKind {
match self {
Commitment::Blake3(_) => CommitmentKind::Blake3,
}
}
}
/// The kind of a [`Commitment`].
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
#[non_exhaustive]
pub enum CommitmentKind {
/// A BLAKE3 commitment to encodings of the transcript.
Blake3,
}
/// An opening to a commitment to the transcript.
#[derive(Clone, Serialize, Deserialize)]
#[non_exhaustive]
pub enum CommitmentOpening {
/// An opening to a BLAKE3 commitment
Blake3(blake3::Blake3Opening),
}
impl CommitmentOpening {
/// Returns the kind of this opening
pub fn kind(&self) -> CommitmentKind {
match self {
CommitmentOpening::Blake3(_) => CommitmentKind::Blake3,
}
}
/// Recovers the expected commitment from this opening.
///
/// # Panics
///
/// Implementations may panic if the following conditions are not met:
///
/// - If the number of encodings does not match the number of bytes in the opening.
/// - If an encoding is not for a u8.
pub fn recover(&self, encodings: &[EncodedValue<Full>]) -> Commitment {
match self {
CommitmentOpening::Blake3(opening) => opening.recover(encodings).into(),
}
}
/// Returns the transcript data corresponding to this opening
pub fn data(&self) -> &[u8] {
match self {
CommitmentOpening::Blake3(opening) => opening.data(),
}
}
/// Returns the transcript data corresponding to this opening
pub fn into_data(self) -> Vec<u8> {
match self {
CommitmentOpening::Blake3(opening) => opening.into_data(),
}
}
}
/// A collection of transcript commitments.
#[derive(Clone, Serialize, Deserialize)]
pub struct TranscriptCommitments {
/// A Merkle tree of commitments. Each commitment's index in the tree matches its `CommitmentId`.
merkle_tree: MerkleTree,
commitments: HashMap<CommitmentId, Commitment>,
/// Information about the above `commitments`.
commitment_info: BiMap<CommitmentId, CommitmentInfo>,
}
opaque_debug::implement!(TranscriptCommitments);
impl TranscriptCommitments {
/// Returns the merkle tree of the commitments.
pub fn merkle_tree(&self) -> &MerkleTree {
&self.merkle_tree
}
/// Returns the merkle root of the commitments.
pub fn merkle_root(&self) -> MerkleRoot {
self.merkle_tree.root()
}
/// Returns a commitment if it exists.
pub fn get(&self, id: &CommitmentId) -> Option<&Commitment> {
self.commitments.get(id)
}
/// Returns the commitment id for a commitment with the given info, if it exists.
pub fn get_id_by_info(
&self,
kind: CommitmentKind,
ranges: &RangeSet<usize>,
direction: Direction,
) -> Option<CommitmentId> {
self.commitment_info
.get_by_right(&CommitmentInfo::new(kind, ranges.clone(), direction))
.copied()
}
/// Returns commitment info, if it exists.
pub fn get_info(&self, id: &CommitmentId) -> Option<&CommitmentInfo> {
self.commitment_info.get_by_left(id)
}
}

View File

@@ -0,0 +1,376 @@
//! TLS connection types.
//!
//! ## Commitment
//!
//! During the TLS handshake the Notary receives the Server's ephemeral public key, and this key
//! serves as a binding commitment to the identity of the Server. The ephemeral key itself does not
//! reveal the Server's identity, but it is bound to it via a signature created using the Server's
//! X.509 certificate.
//!
//! A Prover can withhold the Server's signature and certificate chain from the Notary to
//! improve privacy and censorship resistance.
//!
//! ## Proving the Server's identity
//!
//! A Prover can prove the Server's identity to a Verifier by sending a [`ServerIdentityProof`]. This
//! proof contains all the information required to establish the link between the TLS connection
//! and the Server's X.509 certificate. A Verifier checks the Server's certificate against their own trust
//! anchors, the same way a typical TLS client would.
mod commit;
mod proof;
use std::fmt;
use serde::{Deserialize, Serialize};
use tls_core::{
msgs::{
codec::Codec,
enums::NamedGroup,
handshake::{DigitallySignedStruct, ServerECDHParams},
},
verify::ServerCertVerifier as _,
};
use web_time::{Duration, UNIX_EPOCH};
use crate::{hash::impl_domain_separator, CryptoProvider};
pub use commit::{ServerCertCommitment, ServerCertOpening};
pub use proof::{ServerIdentityProof, ServerIdentityProofError};
/// TLS version.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub enum TlsVersion {
/// TLS 1.2.
V1_2,
/// TLS 1.3.
V1_3,
}
impl TryFrom<tls_core::msgs::enums::ProtocolVersion> for TlsVersion {
type Error = &'static str;
fn try_from(value: tls_core::msgs::enums::ProtocolVersion) -> Result<Self, Self::Error> {
Ok(match value {
tls_core::msgs::enums::ProtocolVersion::TLSv1_2 => TlsVersion::V1_2,
tls_core::msgs::enums::ProtocolVersion::TLSv1_3 => TlsVersion::V1_3,
_ => return Err("unsupported TLS version"),
})
}
}
/// Server's name, a.k.a. the DNS name.
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub struct ServerName(String);
impl ServerName {
/// Creates a new server name.
pub fn new(name: String) -> Self {
Self(name)
}
/// Returns the name as a string.
pub fn as_str(&self) -> &str {
&self.0
}
}
impl From<&str> for ServerName {
fn from(name: &str) -> Self {
Self(name.to_string())
}
}
impl AsRef<str> for ServerName {
fn as_ref(&self) -> &str {
&self.0
}
}
impl fmt::Display for ServerName {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.0)
}
}
/// Type of a public key.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
#[non_exhaustive]
#[allow(non_camel_case_types)]
pub enum KeyType {
/// secp256r1.
SECP256R1 = 0x0017,
}
/// Signature scheme on the key exchange parameters.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
#[allow(non_camel_case_types, missing_docs)]
pub enum SignatureScheme {
RSA_PKCS1_SHA1 = 0x0201,
ECDSA_SHA1_Legacy = 0x0203,
RSA_PKCS1_SHA256 = 0x0401,
ECDSA_NISTP256_SHA256 = 0x0403,
RSA_PKCS1_SHA384 = 0x0501,
ECDSA_NISTP384_SHA384 = 0x0503,
RSA_PKCS1_SHA512 = 0x0601,
ECDSA_NISTP521_SHA512 = 0x0603,
RSA_PSS_SHA256 = 0x0804,
RSA_PSS_SHA384 = 0x0805,
RSA_PSS_SHA512 = 0x0806,
ED25519 = 0x0807,
}
impl TryFrom<tls_core::msgs::enums::SignatureScheme> for SignatureScheme {
type Error = &'static str;
fn try_from(value: tls_core::msgs::enums::SignatureScheme) -> Result<Self, Self::Error> {
use tls_core::msgs::enums::SignatureScheme as Core;
use SignatureScheme::*;
Ok(match value {
Core::RSA_PKCS1_SHA1 => RSA_PKCS1_SHA1,
Core::ECDSA_SHA1_Legacy => ECDSA_SHA1_Legacy,
Core::RSA_PKCS1_SHA256 => RSA_PKCS1_SHA256,
Core::ECDSA_NISTP256_SHA256 => ECDSA_NISTP256_SHA256,
Core::RSA_PKCS1_SHA384 => RSA_PKCS1_SHA384,
Core::ECDSA_NISTP384_SHA384 => ECDSA_NISTP384_SHA384,
Core::RSA_PKCS1_SHA512 => RSA_PKCS1_SHA512,
Core::ECDSA_NISTP521_SHA512 => ECDSA_NISTP521_SHA512,
Core::RSA_PSS_SHA256 => RSA_PSS_SHA256,
Core::RSA_PSS_SHA384 => RSA_PSS_SHA384,
Core::RSA_PSS_SHA512 => RSA_PSS_SHA512,
Core::ED25519 => ED25519,
_ => return Err("unsupported signature scheme"),
})
}
}
impl From<SignatureScheme> for tls_core::msgs::enums::SignatureScheme {
fn from(value: SignatureScheme) -> Self {
use tls_core::msgs::enums::SignatureScheme::*;
match value {
SignatureScheme::RSA_PKCS1_SHA1 => RSA_PKCS1_SHA1,
SignatureScheme::ECDSA_SHA1_Legacy => ECDSA_SHA1_Legacy,
SignatureScheme::RSA_PKCS1_SHA256 => RSA_PKCS1_SHA256,
SignatureScheme::ECDSA_NISTP256_SHA256 => ECDSA_NISTP256_SHA256,
SignatureScheme::RSA_PKCS1_SHA384 => RSA_PKCS1_SHA384,
SignatureScheme::ECDSA_NISTP384_SHA384 => ECDSA_NISTP384_SHA384,
SignatureScheme::RSA_PKCS1_SHA512 => RSA_PKCS1_SHA512,
SignatureScheme::ECDSA_NISTP521_SHA512 => ECDSA_NISTP521_SHA512,
SignatureScheme::RSA_PSS_SHA256 => RSA_PSS_SHA256,
SignatureScheme::RSA_PSS_SHA384 => RSA_PSS_SHA384,
SignatureScheme::RSA_PSS_SHA512 => RSA_PSS_SHA512,
SignatureScheme::ED25519 => ED25519,
}
}
}
/// X.509 certificate, DER encoded.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Certificate(pub Vec<u8>);
impl From<tls_core::key::Certificate> for Certificate {
fn from(cert: tls_core::key::Certificate) -> Self {
Self(cert.0)
}
}
/// Server's signature of the key exchange parameters.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ServerSignature {
/// Signature scheme.
pub scheme: SignatureScheme,
/// Signature data.
pub sig: Vec<u8>,
}
/// Server's ephemeral public key.
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub struct ServerEphemKey {
/// Type of the public key.
#[serde(rename = "type")]
pub typ: KeyType,
/// Public key data.
pub key: Vec<u8>,
}
impl_domain_separator!(ServerEphemKey);
impl ServerEphemKey {
/// Encodes the key exchange parameters as in TLS.
pub(crate) fn kx_params(&self) -> Vec<u8> {
let group = match self.typ {
KeyType::SECP256R1 => NamedGroup::secp256r1,
};
let mut kx_params = Vec::new();
ServerECDHParams::new(group, &self.key).encode(&mut kx_params);
kx_params
}
}
impl TryFrom<tls_core::key::PublicKey> for ServerEphemKey {
type Error = &'static str;
fn try_from(value: tls_core::key::PublicKey) -> Result<Self, Self::Error> {
let tls_core::msgs::enums::NamedGroup::secp256r1 = value.group else {
return Err("unsupported key type");
};
Ok(ServerEphemKey {
typ: KeyType::SECP256R1,
key: value.key,
})
}
}
/// TLS session information.
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub struct ConnectionInfo {
/// UNIX time when the TLS connection started.
pub time: u64,
/// TLS version used in the connection.
pub version: TlsVersion,
/// Transcript length.
pub transcript_length: TranscriptLength,
}
impl_domain_separator!(ConnectionInfo);
/// Transcript length information.
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub struct TranscriptLength {
/// Number of bytes sent by the Prover to the Server.
pub sent: u32,
/// Number of bytes received by the Prover from the Server.
pub received: u32,
}
/// TLS 1.2 handshake data.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct HandshakeDataV1_2 {
/// Client random.
pub client_random: [u8; 32],
/// Server random.
pub server_random: [u8; 32],
/// Server's ephemeral public key.
pub server_ephemeral_key: ServerEphemKey,
}
/// TLS handshake data.
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
#[non_exhaustive]
pub enum HandshakeData {
/// TLS 1.2 handshake data.
V1_2(HandshakeDataV1_2),
}
impl_domain_separator!(HandshakeData);
/// Server certificate and handshake data.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ServerCertData {
/// Certificate chain.
pub certs: Vec<Certificate>,
/// Server signature of the key exchange parameters.
pub sig: ServerSignature,
/// TLS handshake data.
pub handshake: HandshakeData,
}
impl ServerCertData {
/// Verifies the server certificate data.
///
/// # Arguments
///
/// * `provider` - The crypto provider to use for verification.
/// * `time` - The time of the connection.
/// * `server_ephemeral_key` - The server's ephemeral key.
/// * `server_name` - The server name.
pub fn verify_with_provider(
&self,
provider: &CryptoProvider,
time: u64,
server_ephemeral_key: &ServerEphemKey,
server_name: &ServerName,
) -> Result<(), CertificateVerificationError> {
#[allow(irrefutable_let_patterns)]
let HandshakeData::V1_2(HandshakeDataV1_2 {
client_random,
server_random,
server_ephemeral_key: expected_server_ephemeral_key,
}) = &self.handshake
else {
unreachable!("only TLS 1.2 is implemented")
};
if server_ephemeral_key != expected_server_ephemeral_key {
return Err(CertificateVerificationError::InvalidServerEphemeralKey);
}
// Verify server name
let server_name = tls_core::dns::ServerName::try_from(server_name.as_ref())
.map_err(|_| CertificateVerificationError::InvalidIdentity(server_name.clone()))?;
// Verify server certificate
let cert_chain = self
.certs
.clone()
.into_iter()
.map(|cert| tls_core::key::Certificate(cert.0))
.collect::<Vec<_>>();
let (end_entity, intermediates) = cert_chain
.split_first()
.ok_or(CertificateVerificationError::MissingCerts)?;
// Verify the end entity cert is valid for the provided server name
// and that it chains to at least one of the roots we trust.
provider
.cert
.verify_server_cert(
end_entity,
intermediates,
&server_name,
&mut [].into_iter(),
&[],
UNIX_EPOCH + Duration::from_secs(time),
)
.map_err(|_| CertificateVerificationError::InvalidCert)?;
// Verify the signature matches the certificate and key exchange parameters.
let mut message = Vec::new();
message.extend_from_slice(client_random);
message.extend_from_slice(server_random);
message.extend_from_slice(&server_ephemeral_key.kx_params());
let dss = DigitallySignedStruct::new(self.sig.scheme.into(), self.sig.sig.clone());
provider
.cert
.verify_tls12_signature(&message, end_entity, &dss)
.map_err(|_| CertificateVerificationError::InvalidServerSignature)?;
Ok(())
}
}
/// Errors that can occur when verifying a certificate chain or signature.
#[derive(Debug, thiserror::Error)]
#[allow(missing_docs)]
pub enum CertificateVerificationError {
#[error("invalid server identity: {0}")]
InvalidIdentity(ServerName),
#[error("missing server certificates")]
MissingCerts,
#[error("invalid server certificate")]
InvalidCert,
#[error("invalid server signature")]
InvalidServerSignature,
#[error("invalid server ephemeral key")]
InvalidServerEphemeralKey,
}

View File

@@ -0,0 +1,40 @@
//! Types for committing details of a connection.
use serde::{Deserialize, Serialize};
use crate::{
connection::ServerCertData,
hash::{impl_domain_separator, Blinded, HashAlgorithm, HashAlgorithmExt, TypedHash},
};
/// Opens a [`ServerCertCommitment`].
#[derive(Clone, Serialize, Deserialize)]
pub struct ServerCertOpening(Blinded<ServerCertData>);
impl_domain_separator!(ServerCertOpening);
opaque_debug::implement!(ServerCertOpening);
impl ServerCertOpening {
pub(crate) fn new(data: ServerCertData) -> Self {
Self(Blinded::new(data))
}
pub(crate) fn commit(&self, hasher: &dyn HashAlgorithm) -> ServerCertCommitment {
ServerCertCommitment(TypedHash {
alg: hasher.id(),
value: hasher.hash_separated(self),
})
}
/// Returns the server identity data.
pub fn data(&self) -> &ServerCertData {
self.0.data()
}
}
/// Commitment to a server certificate.
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub struct ServerCertCommitment(pub(crate) TypedHash);
impl_domain_separator!(ServerCertCommitment);

View File

@@ -0,0 +1,103 @@
//! Types for proving details of a connection.
use serde::{Deserialize, Serialize};
use crate::{
connection::{
commit::{ServerCertCommitment, ServerCertOpening},
CertificateVerificationError, ServerEphemKey, ServerName,
},
hash::{HashAlgorithmExt, HashProviderError},
CryptoProvider,
};
/// TLS server identity proof.
#[derive(Debug, Serialize, Deserialize)]
pub struct ServerIdentityProof {
name: ServerName,
opening: ServerCertOpening,
}
impl ServerIdentityProof {
pub(crate) fn new(name: ServerName, opening: ServerCertOpening) -> Self {
Self { name, opening }
}
/// Verifies the server identity proof.
///
/// # Arguments
///
/// * `provider` - The crypto provider to use for verification.
/// * `time` - The time of the connection.
/// * `server_ephemeral_key` - The server's ephemeral key.
/// * `commitment` - Commitment to the server certificate.
pub fn verify_with_provider(
self,
provider: &CryptoProvider,
time: u64,
server_ephemeral_key: &ServerEphemKey,
commitment: &ServerCertCommitment,
) -> Result<ServerName, ServerIdentityProofError> {
let hasher = provider.hash.get(&commitment.0.alg)?;
if commitment.0.value != hasher.hash_separated(&self.opening) {
return Err(ServerIdentityProofError {
kind: ErrorKind::Commitment,
message: "certificate opening does not match commitment".to_string(),
});
}
// Verify certificate and identity.
self.opening.data().verify_with_provider(
provider,
time,
server_ephemeral_key,
&self.name,
)?;
Ok(self.name)
}
}
/// Error for [`ServerIdentityProof`].
#[derive(Debug, thiserror::Error)]
#[error("server identity proof error: {kind}: {message}")]
pub struct ServerIdentityProofError {
kind: ErrorKind,
message: String,
}
impl From<HashProviderError> for ServerIdentityProofError {
fn from(err: HashProviderError) -> Self {
Self {
kind: ErrorKind::Provider,
message: err.to_string(),
}
}
}
impl From<CertificateVerificationError> for ServerIdentityProofError {
fn from(err: CertificateVerificationError) -> Self {
Self {
kind: ErrorKind::Certificate,
message: err.to_string(),
}
}
}
#[derive(Debug)]
enum ErrorKind {
Provider,
Commitment,
Certificate,
}
impl std::fmt::Display for ErrorKind {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
ErrorKind::Provider => write!(f, "provider"),
ErrorKind::Commitment => write!(f, "commitment"),
ErrorKind::Certificate => write!(f, "certificate"),
}
}
}

127
crates/core/src/fixtures.rs Normal file
View File

@@ -0,0 +1,127 @@
//! Fixtures for testing
mod provider;
pub use provider::ChaChaProvider;
use hex::FromHex;
use p256::ecdsa::SigningKey;
use crate::{
connection::{
Certificate, ConnectionInfo, HandshakeData, HandshakeDataV1_2, KeyType, ServerCertData,
ServerEphemKey, ServerName, ServerSignature, SignatureScheme, TlsVersion, TranscriptLength,
},
transcript::{encoding::EncodingProvider, Transcript},
};
/// A fixture containing various TLS connection data.
#[allow(missing_docs)]
pub struct ConnectionFixture {
pub server_name: ServerName,
pub connection_info: ConnectionInfo,
pub server_cert_data: ServerCertData,
}
impl ConnectionFixture {
/// Returns a connection fixture for tlsnotary.org.
pub fn tlsnotary(transcript_length: TranscriptLength) -> Self {
ConnectionFixture {
server_name: ServerName::new("tlsnotary.org".to_string()),
connection_info: ConnectionInfo {
time: 1671637529,
version: TlsVersion::V1_2,
transcript_length,
},
server_cert_data: ServerCertData {
certs: vec![
Certificate(include_bytes!("fixtures/data/tlsnotary.org/ee.der").to_vec()),
Certificate(include_bytes!("fixtures/data/tlsnotary.org/inter.der").to_vec()),
Certificate(include_bytes!("fixtures/data/tlsnotary.org/ca.der").to_vec()),
],
sig: ServerSignature {
scheme: SignatureScheme::RSA_PKCS1_SHA256,
sig: Vec::<u8>::from_hex(include_bytes!(
"fixtures/data/tlsnotary.org/signature"
))
.unwrap(),
},
handshake: HandshakeData::V1_2(HandshakeDataV1_2 {
client_random: <[u8; 32]>::from_hex(include_bytes!(
"fixtures/data/tlsnotary.org/client_random"
))
.unwrap(),
server_random: <[u8; 32]>::from_hex(include_bytes!(
"fixtures/data/tlsnotary.org/server_random"
))
.unwrap(),
server_ephemeral_key: ServerEphemKey {
typ: KeyType::SECP256R1,
key: Vec::<u8>::from_hex(include_bytes!(
"fixtures/data/tlsnotary.org/pubkey"
))
.unwrap(),
},
}),
},
}
}
/// Returns a connection fixture for appliedzkp.org.
pub fn appliedzkp(transcript_length: TranscriptLength) -> Self {
ConnectionFixture {
server_name: ServerName::new("appliedzkp.org".to_string()),
connection_info: ConnectionInfo {
time: 1671637529,
version: TlsVersion::V1_2,
transcript_length,
},
server_cert_data: ServerCertData {
certs: vec![
Certificate(include_bytes!("fixtures/data/appliedzkp.org/ee.der").to_vec()),
Certificate(include_bytes!("fixtures/data/appliedzkp.org/inter.der").to_vec()),
Certificate(include_bytes!("fixtures/data/appliedzkp.org/ca.der").to_vec()),
],
sig: ServerSignature {
scheme: SignatureScheme::RSA_PKCS1_SHA256,
sig: Vec::<u8>::from_hex(include_bytes!(
"fixtures/data/appliedzkp.org/signature"
))
.unwrap(),
},
handshake: HandshakeData::V1_2(HandshakeDataV1_2 {
client_random: <[u8; 32]>::from_hex(include_bytes!(
"fixtures/data/appliedzkp.org/client_random"
))
.unwrap(),
server_random: <[u8; 32]>::from_hex(include_bytes!(
"fixtures/data/appliedzkp.org/server_random"
))
.unwrap(),
server_ephemeral_key: ServerEphemKey {
typ: KeyType::SECP256R1,
key: Vec::<u8>::from_hex(include_bytes!(
"fixtures/data/appliedzkp.org/pubkey"
))
.unwrap(),
},
}),
},
}
}
}
/// Returns an encoding provider fixture.
pub fn encoding_provider(tx: &[u8], rx: &[u8]) -> impl EncodingProvider {
ChaChaProvider::new(encoder_seed(), Transcript::new(tx, rx))
}
/// Returns an encoder seed fixture.
pub fn encoder_seed() -> [u8; 32] {
[0u8; 32]
}
/// Returns a notary signing key fixture.
pub fn notary_signing_key() -> SigningKey {
SigningKey::from_slice(&[1; 32]).unwrap()
}

View File

@@ -1,130 +0,0 @@
use tls_core::{
key::{Certificate, PublicKey},
msgs::{
codec::Codec,
enums::{NamedGroup, SignatureScheme},
handshake::{DigitallySignedStruct, Random, ServerECDHParams},
},
};
use hex::FromHex;
/// Collects data needed for testing
pub struct TestData {
/// end-entity cert
pub ee: Certificate,
/// intermediate cert
pub inter: Certificate,
/// CA cert
pub ca: Certificate,
/// client random
pub cr: Random,
/// server random
pub sr: Random,
/// server ephemeral P256 pubkey
pub pubkey: PublicKey,
/// server signature over the key exchange parameters
pub sig: Vec<u8>,
/// unix time when TLS handshake began
pub time: u64,
/// algorithm used to create the sig
pub sig_scheme: SignatureScheme,
/// DNS name of the website
pub dns_name: String,
}
impl TestData {
/// Returns the [ServerECDHParams] in encoded form
pub fn kx_params(&self) -> Vec<u8> {
let mut params = Vec::new();
let ecdh_params = ServerECDHParams::new(NamedGroup::secp256r1, &self.pubkey.key);
ecdh_params.encode(&mut params);
params
}
/// Returns the [DigitallySignedStruct]
pub fn dss(&self) -> DigitallySignedStruct {
DigitallySignedStruct::new(self.sig_scheme, self.sig.clone())
}
/// Returns the client random + server random + kx params in encoded form
pub fn signature_msg(&self) -> Vec<u8> {
let mut msg = Vec::new();
msg.extend_from_slice(&self.cr.0);
msg.extend_from_slice(&self.sr.0);
msg.extend_from_slice(&self.kx_params());
msg
}
}
/// Returns test data for the tlsnotary.org website
pub fn tlsnotary() -> TestData {
TestData {
ee: Certificate(include_bytes!("testdata/key_exchange/tlsnotary.org/ee.der").to_vec()),
inter: Certificate(
include_bytes!("testdata/key_exchange/tlsnotary.org/inter.der").to_vec(),
),
ca: Certificate(include_bytes!("testdata/key_exchange/tlsnotary.org/ca.der").to_vec()),
cr: Random(
<[u8; 32]>::from_hex(include_bytes!(
"testdata/key_exchange/tlsnotary.org/client_random"
))
.unwrap(),
),
sr: Random(
<[u8; 32]>::from_hex(include_bytes!(
"testdata/key_exchange/tlsnotary.org/server_random"
))
.unwrap(),
),
pubkey: PublicKey::new(
NamedGroup::secp256r1,
&Vec::<u8>::from_hex(include_bytes!("testdata/key_exchange/tlsnotary.org/pubkey"))
.unwrap(),
),
sig: Vec::<u8>::from_hex(include_bytes!(
"testdata/key_exchange/tlsnotary.org/signature"
))
.unwrap(),
time: 1671637529,
sig_scheme: SignatureScheme::RSA_PKCS1_SHA256,
dns_name: "tlsnotary.org".to_string(),
}
}
/// Returns test data for the appliedzkp.org website
pub fn appliedzkp() -> TestData {
TestData {
ee: Certificate(include_bytes!("testdata/key_exchange/appliedzkp.org/ee.der").to_vec()),
inter: Certificate(
include_bytes!("testdata/key_exchange/appliedzkp.org/inter.der").to_vec(),
),
ca: Certificate(include_bytes!("testdata/key_exchange/appliedzkp.org/ca.der").to_vec()),
cr: Random(
<[u8; 32]>::from_hex(include_bytes!(
"testdata/key_exchange/appliedzkp.org/client_random"
))
.unwrap(),
),
sr: Random(
<[u8; 32]>::from_hex(include_bytes!(
"testdata/key_exchange/appliedzkp.org/server_random"
))
.unwrap(),
),
pubkey: PublicKey::new(
NamedGroup::secp256r1,
&Vec::<u8>::from_hex(include_bytes!(
"testdata/key_exchange/appliedzkp.org/pubkey"
))
.unwrap(),
),
sig: Vec::<u8>::from_hex(include_bytes!(
"testdata/key_exchange/appliedzkp.org/signature"
))
.unwrap(),
time: 1671637529,
sig_scheme: SignatureScheme::ECDSA_NISTP256_SHA256,
dns_name: "appliedzkp.org".to_string(),
}
}

View File

@@ -25,5 +25,4 @@ tshark -r out.pcap -Y "tcp.stream==$STREAM_ID and tcp.srcport == 443" -T fields
# pubkey (ephemeral public key)
tshark -r out.pcap -Y "tcp.stream==$STREAM_ID" -T fields -e tls.handshake.server_point
# signature (over the key exchange parameters)
tshark -r out.pcap -Y "tcp.stream==$STREAM_ID" -T fields -e tls.handshake.sig
tshark -r out.pcap -Y "tcp.stream==$STREAM_ID" -T fields -e tls.handshake.sig

View File

@@ -1,169 +0,0 @@
//! Fixtures for testing
/// Certificate fixtures
pub mod cert;
use std::collections::HashMap;
use hex::FromHex;
use mpz_circuits::types::ValueType;
use mpz_core::{commit::HashCommit, hash::Hash, utils::blake3};
use mpz_garble_core::{ChaChaEncoder, Encoder};
use tls_core::{
cert::ServerCertDetails,
handshake::HandshakeData,
ke::ServerKxDetails,
key::{Certificate, PublicKey},
msgs::{
codec::Codec,
enums::{NamedGroup, SignatureScheme},
handshake::{DigitallySignedStruct, Random, ServerECDHParams},
},
};
use p256::ecdsa::SigningKey;
use crate::{
merkle::MerkleRoot,
session::{HandshakeSummary, SessionHeader},
EncodingProvider,
};
fn value_id(id: &str) -> u64 {
let hash = blake3(id.as_bytes());
u64::from_be_bytes(hash[..8].try_into().unwrap())
}
/// Returns a session header fixture using the given transcript lengths and merkle root.
///
/// # Arguments
///
/// * `root` - The merkle root of the transcript commitments.
/// * `sent_len` - The length of the sent transcript.
/// * `recv_len` - The length of the received transcript.
pub fn session_header(root: MerkleRoot, sent_len: usize, recv_len: usize) -> SessionHeader {
SessionHeader::new(
encoder_seed(),
root,
sent_len,
recv_len,
handshake_summary(),
)
}
/// Returns an encoding provider fixture using the given transcripts.
pub fn encoding_provider(transcript_tx: &[u8], transcript_rx: &[u8]) -> EncodingProvider {
let encoder = encoder();
let mut active_encodings = HashMap::new();
for (idx, byte) in transcript_tx.iter().enumerate() {
let id = format!("tx/{idx}");
let enc = encoder.encode_by_type(value_id(&id), &ValueType::U8);
active_encodings.insert(id, enc.select(*byte).unwrap());
}
for (idx, byte) in transcript_rx.iter().enumerate() {
let id = format!("rx/{idx}");
let enc = encoder.encode_by_type(value_id(&id), &ValueType::U8);
active_encodings.insert(id, enc.select(*byte).unwrap());
}
Box::new(move |ids: &[&str]| {
ids.iter()
.map(|id| active_encodings.get(*id).cloned())
.collect()
})
}
/// Returns a handshake summary fixture.
pub fn handshake_summary() -> HandshakeSummary {
HandshakeSummary::new(1671637529, server_ephemeral_key(), handshake_commitment())
}
/// Returns a handshake commitment fixture.
pub fn handshake_commitment() -> Hash {
let (_, hash) = handshake_data().hash_commit();
hash
}
/// Returns a handshake data fixture.
pub fn handshake_data() -> HandshakeData {
HandshakeData::new(
server_cert_details(),
server_kx_details(),
client_random(),
server_random(),
)
}
/// Returns a server certificate details fixture.
pub fn server_cert_details() -> ServerCertDetails {
ServerCertDetails::new(
vec![
Certificate(include_bytes!("testdata/key_exchange/tlsnotary.org/ee.der").to_vec()),
Certificate(include_bytes!("testdata/key_exchange/tlsnotary.org/inter.der").to_vec()),
Certificate(include_bytes!("testdata/key_exchange/tlsnotary.org/ca.der").to_vec()),
],
vec![],
None,
)
}
/// Returns a server key exchange details fixture.
pub fn server_kx_details() -> ServerKxDetails {
let mut params = Vec::new();
let ecdh_params = ServerECDHParams::new(NamedGroup::secp256r1, &server_ephemeral_key().key);
ecdh_params.encode(&mut params);
ServerKxDetails::new(
params,
DigitallySignedStruct::new(
SignatureScheme::RSA_PKCS1_SHA256,
Vec::<u8>::from_hex(include_bytes!(
"testdata/key_exchange/tlsnotary.org/signature"
))
.unwrap(),
),
)
}
/// Returns a client random fixture.
pub fn client_random() -> Random {
Random(
<[u8; 32]>::from_hex(include_bytes!(
"testdata/key_exchange/tlsnotary.org/client_random"
))
.unwrap(),
)
}
/// Returns a server random fixture.
pub fn server_random() -> Random {
Random(
<[u8; 32]>::from_hex(include_bytes!(
"testdata/key_exchange/tlsnotary.org/server_random"
))
.unwrap(),
)
}
/// Returns an encoder fixture.
pub fn encoder() -> ChaChaEncoder {
ChaChaEncoder::new(encoder_seed())
}
/// Returns an encoder seed fixture.
pub fn encoder_seed() -> [u8; 32] {
[0u8; 32]
}
/// Returns a server ephemeral key fixture.
pub fn server_ephemeral_key() -> PublicKey {
PublicKey::new(
NamedGroup::secp256r1,
&Vec::<u8>::from_hex(include_bytes!("testdata/key_exchange/tlsnotary.org/pubkey")).unwrap(),
)
}
/// Returns a notary signing key fixture.
pub fn notary_signing_key() -> SigningKey {
SigningKey::from_slice(&[1; 32]).unwrap()
}

View File

@@ -0,0 +1,29 @@
use mpz_garble_core::ChaChaEncoder;
use crate::transcript::{
encoding::{Encoder, EncodingProvider},
Direction, Idx, Transcript,
};
/// A ChaCha encoding provider fixture.
pub struct ChaChaProvider {
encoder: ChaChaEncoder,
transcript: Transcript,
}
impl ChaChaProvider {
/// Creates a new ChaCha encoding provider.
pub(crate) fn new(seed: [u8; 32], transcript: Transcript) -> Self {
Self {
encoder: ChaChaEncoder::new(seed),
transcript,
}
}
}
impl EncodingProvider for ChaChaProvider {
fn provide_encoding(&self, direction: Direction, idx: &Idx) -> Option<Vec<u8>> {
let seq = self.transcript.get(direction, idx)?;
Some(self.encoder.encode_subsequence(direction, &seq))
}
}

408
crates/core/src/hash.rs Normal file
View File

@@ -0,0 +1,408 @@
//! Hash types.
use std::{collections::HashMap, fmt::Display};
use rand::{distributions::Standard, prelude::Distribution};
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use crate::serialize::CanonicalSerialize;
pub(crate) const DEFAULT_SUPPORTED_HASH_ALGS: &[HashAlgId] =
&[HashAlgId::SHA256, HashAlgId::BLAKE3, HashAlgId::KECCAK256];
/// Maximum length of a hash value.
const MAX_LEN: usize = 64;
/// An error for [`HashProvider`].
#[derive(Debug, thiserror::Error)]
#[error("unknown hash algorithm id: {}", self.0)]
pub struct HashProviderError(HashAlgId);
/// Hash provider.
pub struct HashProvider {
algs: HashMap<HashAlgId, Box<dyn HashAlgorithm + Send + Sync>>,
}
impl Default for HashProvider {
fn default() -> Self {
let mut algs: HashMap<_, Box<dyn HashAlgorithm + Send + Sync>> = HashMap::new();
algs.insert(HashAlgId::SHA256, Box::new(Sha256::default()));
algs.insert(HashAlgId::BLAKE3, Box::new(Blake3::default()));
algs.insert(HashAlgId::KECCAK256, Box::new(Keccak256::default()));
Self { algs }
}
}
impl HashProvider {
/// Sets a hash algorithm.
///
/// This can be used to add or override implementations of hash algorithms.
pub fn set_algorithm(
&mut self,
id: HashAlgId,
algorithm: Box<dyn HashAlgorithm + Send + Sync>,
) {
self.algs.insert(id, algorithm);
}
/// Returns the hash algorithm with the given identifier, or an error if the hash algorithm
/// does not exist.
pub fn get(
&self,
id: &HashAlgId,
) -> Result<&(dyn HashAlgorithm + Send + Sync), HashProviderError> {
self.algs
.get(id)
.map(|alg| &**alg)
.ok_or(HashProviderError(*id))
}
}
/// A hash algorithm identifier.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub struct HashAlgId(u8);
impl HashAlgId {
/// SHA-256 hash algorithm.
pub const SHA256: Self = Self(1);
/// BLAKE3 hash algorithm.
pub const BLAKE3: Self = Self(2);
/// Keccak-256 hash algorithm.
pub const KECCAK256: Self = Self(3);
/// Creates a new hash algorithm identifier.
///
/// # Panics
///
/// Panics if the identifier is in the reserved range 0-127.
///
/// # Arguments
///
/// * id - Unique identifier for the hash algorithm.
pub const fn new(id: u8) -> Self {
assert!(id >= 128, "hash algorithm id range 0-127 is reserved");
Self(id)
}
/// Returns the id as a `u8`.
pub const fn as_u8(&self) -> u8 {
self.0
}
}
impl Display for HashAlgId {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{:02x}", self.0)
}
}
/// A typed hash value.
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub struct TypedHash {
/// The algorithm of the hash.
pub alg: HashAlgId,
/// The hash value.
pub value: Hash,
}
/// A hash value.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct Hash {
// To avoid heap allocation, we use a fixed-size array.
// 64 bytes should be sufficient for most hash algorithms.
value: [u8; MAX_LEN],
len: usize,
}
impl Default for Hash {
fn default() -> Self {
Self {
value: [0u8; MAX_LEN],
len: 0,
}
}
}
impl Serialize for Hash {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer.collect_seq(&self.value[..self.len])
}
}
impl<'de> Deserialize<'de> for Hash {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
use core::marker::PhantomData;
use serde::de::{Error, SeqAccess, Visitor};
struct HashVisitor<'de>(PhantomData<&'de ()>);
impl<'de> Visitor<'de> for HashVisitor<'de> {
type Value = Hash;
fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(formatter, "an array at most 64 bytes long")
}
fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error>
where
A: SeqAccess<'de>,
{
let mut value = [0; MAX_LEN];
let mut len = 0;
while let Some(byte) = seq.next_element()? {
if len >= MAX_LEN {
return Err(A::Error::invalid_length(len, &self));
}
value[len] = byte;
len += 1;
}
Ok(Hash { value, len })
}
}
deserializer.deserialize_seq(HashVisitor(PhantomData))
}
}
impl Hash {
/// Creates a new hash value.
///
/// # Panics
///
/// Panics if the length of the value is greater than 64 bytes.
fn new(value: &[u8]) -> Self {
assert!(
value.len() <= MAX_LEN,
"hash value must be at most 64 bytes"
);
let mut bytes = [0; MAX_LEN];
bytes[..value.len()].copy_from_slice(value);
Self {
value: bytes,
len: value.len(),
}
}
}
impl rs_merkle::Hash for Hash {
const SIZE: usize = MAX_LEN;
}
impl TryFrom<Vec<u8>> for Hash {
type Error = &'static str;
fn try_from(value: Vec<u8>) -> Result<Self, Self::Error> {
if value.len() > MAX_LEN {
return Err("hash value must be at most 64 bytes");
}
let mut bytes = [0; MAX_LEN];
bytes[..value.len()].copy_from_slice(&value);
Ok(Self {
value: bytes,
len: value.len(),
})
}
}
impl From<Hash> for Vec<u8> {
fn from(value: Hash) -> Self {
value.value[..value.len].to_vec()
}
}
/// A hashing algorithm.
pub trait HashAlgorithm {
/// Returns the hash algorithm identifier.
fn id(&self) -> HashAlgId;
/// Computes the hash of the provided data.
fn hash(&self, data: &[u8]) -> Hash;
/// Computes the hash of the provided data with a prefix.
fn hash_prefixed(&self, prefix: &[u8], data: &[u8]) -> Hash;
}
pub(crate) trait HashAlgorithmExt: HashAlgorithm {
fn hash_canonical<T: CanonicalSerialize>(&self, data: &T) -> Hash {
self.hash(&data.serialize())
}
fn hash_separated<T: DomainSeparator + CanonicalSerialize>(&self, data: &T) -> Hash {
self.hash_prefixed(data.domain(), &data.serialize())
}
}
impl<T: HashAlgorithm + ?Sized> HashAlgorithmExt for T {}
/// A hash blinder.
#[derive(Clone, Serialize, Deserialize)]
pub(crate) struct Blinder([u8; 16]);
opaque_debug::implement!(Blinder);
impl Distribution<Blinder> for Standard {
fn sample<R: rand::Rng + ?Sized>(&self, rng: &mut R) -> Blinder {
let mut blinder = [0; 16];
rng.fill(&mut blinder);
Blinder(blinder)
}
}
/// A blinded pre-image of a hash.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub(crate) struct Blinded<T> {
data: T,
blinder: Blinder,
}
impl<T> Blinded<T> {
/// Creates a new blinded pre-image.
pub(crate) fn new(data: T) -> Self {
Self {
data,
blinder: rand::random(),
}
}
pub(crate) fn new_with_blinder(data: T, blinder: Blinder) -> Self {
Self { data, blinder }
}
pub(crate) fn data(&self) -> &T {
&self.data
}
pub(crate) fn into_parts(self) -> (T, Blinder) {
(self.data, self.blinder)
}
}
/// A type with a domain separator which is used during hashing to mitigate type confusion attacks.
pub(crate) trait DomainSeparator {
/// Returns the domain separator for the type.
fn domain(&self) -> &[u8];
}
macro_rules! impl_domain_separator {
($type:ty) => {
impl $crate::hash::DomainSeparator for $type {
fn domain(&self) -> &[u8] {
use std::sync::LazyLock;
// Computes a 16 byte hash of the types name to use as a domain separator.
static DOMAIN: LazyLock<[u8; 16]> = LazyLock::new(|| {
let domain: [u8; 32] = blake3::hash(stringify!($type).as_bytes()).into();
domain[..16].try_into().unwrap()
});
&*DOMAIN
}
}
};
}
pub(crate) use impl_domain_separator;
mod sha2 {
use ::sha2::Digest;
/// SHA-256 hash algorithm.
#[derive(Default, Clone)]
pub struct Sha256 {}
impl super::HashAlgorithm for Sha256 {
fn id(&self) -> super::HashAlgId {
super::HashAlgId::SHA256
}
fn hash(&self, data: &[u8]) -> super::Hash {
let mut hasher = ::sha2::Sha256::default();
hasher.update(data);
super::Hash::new(hasher.finalize().as_slice())
}
fn hash_prefixed(&self, prefix: &[u8], data: &[u8]) -> super::Hash {
let mut hasher = ::sha2::Sha256::default();
hasher.update(prefix);
hasher.update(data);
super::Hash::new(hasher.finalize().as_slice())
}
}
}
pub use sha2::Sha256;
mod blake3 {
/// BLAKE3 hash algorithm.
#[derive(Default, Clone)]
pub struct Blake3 {}
impl super::HashAlgorithm for Blake3 {
fn id(&self) -> super::HashAlgId {
super::HashAlgId::BLAKE3
}
fn hash(&self, data: &[u8]) -> super::Hash {
super::Hash::new(::blake3::hash(data).as_bytes())
}
fn hash_prefixed(&self, prefix: &[u8], data: &[u8]) -> super::Hash {
let mut hasher = ::blake3::Hasher::new();
hasher.update(prefix);
hasher.update(data);
super::Hash::new(hasher.finalize().as_bytes())
}
}
}
pub use blake3::Blake3;
mod keccak {
use tiny_keccak::Hasher;
/// Keccak-256 hash algorithm.
#[derive(Default, Clone)]
pub struct Keccak256 {}
impl super::HashAlgorithm for Keccak256 {
fn id(&self) -> super::HashAlgId {
super::HashAlgId::KECCAK256
}
fn hash(&self, data: &[u8]) -> super::Hash {
let mut hasher = tiny_keccak::Keccak::v256();
hasher.update(data);
let mut output = vec![0; 32];
hasher.finalize(&mut output);
super::Hash::new(&output)
}
fn hash_prefixed(&self, prefix: &[u8], data: &[u8]) -> super::Hash {
let mut hasher = tiny_keccak::Keccak::v256();
hasher.update(prefix);
hasher.update(data);
let mut output = vec![0; 32];
hasher.finalize(&mut output);
super::Hash::new(&output)
}
}
}
pub use keccak::Keccak256;

106
crates/core/src/index.rs Normal file
View File

@@ -0,0 +1,106 @@
use std::collections::HashMap;
use serde::{Deserialize, Serialize};
use crate::{
attestation::{Field, FieldId},
transcript::{
hash::{PlaintextHash, PlaintextHashSecret},
Idx,
},
};
/// Index for items which can be looked up by transcript index or field id.
#[derive(Debug, Clone)]
pub(crate) struct Index<T> {
items: Vec<T>,
// Lookup by field id
field_ids: HashMap<FieldId, usize>,
// Lookup by transcript index
transcript_idxs: HashMap<Idx, usize>,
}
impl<T> Default for Index<T> {
fn default() -> Self {
Self {
items: Default::default(),
field_ids: Default::default(),
transcript_idxs: Default::default(),
}
}
}
impl<T: Serialize> Serialize for Index<T> {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
self.items.serialize(serializer)
}
}
impl<'de, T: Deserialize<'de>> Deserialize<'de> for Index<T>
where
Index<T>: From<Vec<T>>,
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
Vec::<T>::deserialize(deserializer).map(Index::from)
}
}
impl<T> From<Index<T>> for Vec<T> {
fn from(value: Index<T>) -> Self {
value.items
}
}
impl<T> Index<T> {
pub(crate) fn new<F>(items: Vec<T>, f: F) -> Self
where
F: Fn(&T) -> (&FieldId, &Idx),
{
let mut field_ids = HashMap::new();
let mut transcript_idxs = HashMap::new();
for (i, item) in items.iter().enumerate() {
let (id, idx) = f(item);
field_ids.insert(*id, i);
transcript_idxs.insert(idx.clone(), i);
}
Self {
items,
field_ids,
transcript_idxs,
}
}
pub(crate) fn iter(&self) -> impl Iterator<Item = &T> {
self.items.iter()
}
pub(crate) fn get_by_field_id(&self, id: &FieldId) -> Option<&T> {
self.field_ids.get(id).map(|i| &self.items[*i])
}
pub(crate) fn get_by_transcript_idx(&self, idx: &Idx) -> Option<&T> {
self.transcript_idxs.get(idx).map(|i| &self.items[*i])
}
}
impl From<Vec<Field<PlaintextHash>>> for Index<Field<PlaintextHash>> {
fn from(items: Vec<Field<PlaintextHash>>) -> Self {
Self::new(items, |field: &Field<PlaintextHash>| {
(&field.id, &field.data.idx)
})
}
}
impl From<Vec<PlaintextHashSecret>> for Index<PlaintextHashSecret> {
fn from(items: Vec<PlaintextHashSecret>) -> Self {
Self::new(items, |item: &PlaintextHashSecret| {
(&item.commitment, &item.idx)
})
}
}

View File

@@ -1,78 +1,23 @@
//! TLSNotary core protocol library.
//!
//! This crate contains core types for the TLSNotary protocol, including some functionality for selective disclosure.
//! TLSNotary core library.
#![deny(missing_docs, unreachable_pub, unused_must_use)]
#![deny(clippy::all)]
#![forbid(unsafe_code)]
pub mod commitment;
pub mod attestation;
pub mod connection;
#[cfg(any(test, feature = "fixtures"))]
pub mod fixtures;
pub mod merkle;
pub mod msg;
pub mod proof;
pub mod session;
mod signature;
pub mod hash;
pub(crate) mod index;
pub(crate) mod merkle;
pub mod presentation;
mod provider;
pub mod request;
mod secrets;
pub(crate) mod serialize;
pub mod signing;
pub mod transcript;
pub use session::{HandshakeSummary, NotarizedSession, SessionData, SessionHeader};
pub use signature::{NotaryPublicKey, Signature};
pub use transcript::{Direction, RedactedTranscript, Transcript, TranscriptSlice};
use mpz_garble_core::{encoding_state, EncodedValue};
use serde::{Deserialize, Serialize};
/// The maximum allowed total bytelength of all committed data. Used to prevent DoS during verification.
/// (this will cause the verifier to hash up to a max of 1GB * 128 = 128GB of plaintext encodings if the
/// commitment type is [crate::commitment::Blake3]).
///
/// This value must not exceed bcs's MAX_SEQUENCE_LENGTH limit (which is (1 << 31) - 1 by default)
const MAX_TOTAL_COMMITTED_DATA: usize = 1_000_000_000;
/// A provider of plaintext encodings.
pub(crate) type EncodingProvider =
Box<dyn Fn(&[&str]) -> Option<Vec<EncodedValue<encoding_state::Active>>> + Send>;
/// The encoding id
///
/// A 64 bit Blake3 hash which is used for the plaintext encodings
#[derive(Debug, Clone, Copy, PartialEq, Eq, Ord, PartialOrd, Hash)]
pub(crate) struct EncodingId(u64);
impl EncodingId {
/// Create a new encoding ID.
pub(crate) fn new(id: &str) -> Self {
let hash = mpz_core::utils::blake3(id.as_bytes());
Self(u64::from_be_bytes(hash[..8].try_into().unwrap()))
}
/// Returns the encoding ID.
pub(crate) fn to_inner(self) -> u64 {
self.0
}
}
/// A Server's name.
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub enum ServerName {
/// A DNS name.
Dns(String),
}
impl ServerName {
/// Returns a reference to the server name as a string slice.
pub fn as_str(&self) -> &str {
match self {
Self::Dns(name) => name.as_str(),
}
}
}
impl AsRef<str> for ServerName {
fn as_ref(&self) -> &str {
match self {
Self::Dns(name) => name.as_ref(),
}
}
}
pub use provider::CryptoProvider;
pub use secrets::Secrets;

View File

@@ -1,61 +1,31 @@
//! Merkle tree types.
//!
//! # Usage
//!
//! During notarization, the `Prover` generates various commitments to the transcript data, which are subsequently
//! inserted into a `MerkleTree`. Rather than send each commitment to the Notary individually, the `Prover` simply sends the
//! `MerkleRoot`. This hides the number of commitments from the Notary, which is important for privacy as it can leak
//! information about the content of the transcript.
//!
//! Later, during selective disclosure to a `Verifier`, the `Prover` can open any subset of the commitments in the `MerkleTree`
//! by providing a `MerkleProof` for the corresponding `MerkleRoot` which was signed by the Notary.
use mpz_core::hash::Hash;
use rs_merkle::{
algorithms::Sha256, proof_serializers, MerkleProof as MerkleProof_rs_merkle,
MerkleTree as MerkleTree_rs_merkle,
};
use serde::{ser::Serializer, Deserialize, Deserializer, Serialize};
use serde::{Deserialize, Serialize};
use utils::iter::DuplicateCheck;
/// A Merkle root.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
pub struct MerkleRoot([u8; 32]);
impl MerkleRoot {
/// Returns the inner byte array
pub fn to_inner(self) -> [u8; 32] {
self.0
}
}
impl From<[u8; 32]> for MerkleRoot {
fn from(bytes: [u8; 32]) -> Self {
Self(bytes)
}
}
use crate::hash::{Hash, HashAlgId, HashAlgorithm, TypedHash};
/// Errors that can occur during operations with Merkle tree and Merkle proof
#[derive(Debug, thiserror::Error, PartialEq)]
#[allow(missing_docs)]
pub enum MerkleError {
#[error("Failed to verify a Merkle proof")]
MerkleProofVerificationFailed,
#[error("No leaves were provided when constructing a Merkle tree")]
MerkleNoLeavesProvided,
#[derive(Debug, thiserror::Error)]
#[error("merkle error: {0}")]
pub(crate) struct MerkleError(String);
impl MerkleError {
fn new(msg: impl Into<String>) -> Self {
Self(msg.into())
}
}
/// A Merkle proof.
#[derive(Serialize, Deserialize)]
pub struct MerkleProof {
#[serde(
serialize_with = "merkle_proof_serialize",
deserialize_with = "merkle_proof_deserialize"
)]
proof: MerkleProof_rs_merkle<Sha256>,
total_leaves: usize,
pub(crate) struct MerkleProof {
alg: HashAlgId,
tree_len: usize,
#[serde(flatten)]
proof: rs_merkle::MerkleProof<Hash>,
}
opaque_debug::implement!(MerkleProof);
impl MerkleProof {
/// Checks if indices, hashes and leaves count are valid for the provided root
///
@@ -63,323 +33,275 @@ impl MerkleProof {
///
/// - If the length of `leaf_indices` and `leaf_hashes` does not match.
/// - If `leaf_indices` contains duplicates.
pub fn verify(
pub(crate) fn verify(
&self,
root: &MerkleRoot,
leaf_indices: &[usize],
leaf_hashes: &[Hash],
hasher: &dyn HashAlgorithm,
root: &TypedHash,
leaves: impl IntoIterator<Item = (usize, Hash)>,
) -> Result<(), MerkleError> {
assert_eq!(
leaf_indices.len(),
leaf_hashes.len(),
"leaf indices length must match leaf hashes length"
);
assert!(
!leaf_indices.iter().contains_dups(),
"duplicate indices provided {:?}",
leaf_indices
);
let mut leaves = leaves.into_iter().collect::<Vec<_>>();
// zip indices and hashes
let mut tuples: Vec<(usize, [u8; 32])> = leaf_indices
.iter()
.cloned()
.zip(leaf_hashes.iter().cloned().map(|h| *h.as_bytes()))
.collect();
// Sort by index
leaves.sort_by_key(|(idx, _)| *idx);
// sort by index and unzip
tuples.sort_by(|(a, _), (b, _)| a.cmp(b));
let (indices, hashes): (Vec<usize>, Vec<[u8; 32]>) = tuples.into_iter().unzip();
let (indices, leaves): (Vec<_>, Vec<_>) = leaves.into_iter().unzip();
if !self
.proof
.verify(root.to_inner(), &indices, &hashes, self.total_leaves)
{
return Err(MerkleError::MerkleProofVerificationFailed);
if indices.iter().contains_dups() {
return Err(MerkleError::new("duplicate leaf indices provided"));
}
if !self.proof.verify(
&RsMerkleHasher(hasher),
root.value,
&indices,
&leaves,
self.tree_len,
) {
return Err(MerkleError::new("invalid merkle proof"));
}
Ok(())
}
}
impl Clone for MerkleProof {
fn clone(&self) -> Self {
let bytes = self.proof.to_bytes();
Self {
proof: MerkleProof_rs_merkle::<Sha256>::from_bytes(&bytes).unwrap(),
total_leaves: self.total_leaves,
}
#[derive(Clone)]
struct RsMerkleHasher<'a>(&'a dyn HashAlgorithm);
impl rs_merkle::Hasher for RsMerkleHasher<'_> {
type Hash = Hash;
fn hash(&self, data: &[u8]) -> Hash {
self.0.hash(data)
}
}
fn merkle_proof_serialize<S>(
proof: &MerkleProof_rs_merkle<Sha256>,
serializer: S,
) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let bytes = proof.serialize::<proof_serializers::DirectHashesOrder>();
serializer.serialize_bytes(&bytes)
#[derive(Serialize, Deserialize)]
pub(crate) struct MerkleTree {
alg: HashAlgId,
tree: rs_merkle::MerkleTree<Hash>,
}
fn merkle_proof_deserialize<'de, D>(
deserializer: D,
) -> Result<MerkleProof_rs_merkle<Sha256>, D::Error>
where
D: Deserializer<'de>,
{
let bytes = Vec::deserialize(deserializer)?;
MerkleProof_rs_merkle::<Sha256>::from_bytes(bytes.as_slice()).map_err(serde::de::Error::custom)
}
/// A Merkle tree.
#[derive(Serialize, Deserialize, Default, Clone)]
pub struct MerkleTree(
#[serde(
serialize_with = "merkle_tree_serialize",
deserialize_with = "merkle_tree_deserialize"
)]
pub MerkleTree_rs_merkle<Sha256>,
);
impl MerkleTree {
/// Create a new Merkle tree from the given `leaves`
pub fn from_leaves(leaves: &[Hash]) -> Result<Self, MerkleError> {
if leaves.is_empty() {
return Err(MerkleError::MerkleNoLeavesProvided);
pub(crate) fn new(alg: HashAlgId) -> Self {
Self {
alg,
tree: Default::default(),
}
let leaves: Vec<[u8; 32]> = leaves.iter().map(|h| *h.as_bytes()).collect();
Ok(Self(MerkleTree_rs_merkle::<Sha256>::from_leaves(&leaves)))
}
/// Creates an inclusion proof for the given `indices`
pub(crate) fn algorithm(&self) -> HashAlgId {
self.alg
}
pub(crate) fn root(&self) -> TypedHash {
TypedHash {
alg: self.alg,
value: self.tree.root().expect("tree should not be empty"),
}
}
/// Inserts leaves into the tree.
///
/// # Panics
///
/// - if `indices` is not sorted.
/// - if `indices` contains duplicates
pub fn proof(&self, indices: &[usize]) -> MerkleProof {
/// - If the provided hasher is not the same as the one used to create the tree.
pub(crate) fn insert(&mut self, hasher: &dyn HashAlgorithm, mut leaves: Vec<Hash>) {
assert_eq!(self.alg, hasher.id(), "hash algorithm mismatch");
self.tree.append(&mut leaves);
self.tree.commit(&RsMerkleHasher(hasher))
}
/// Returns a Merkle proof for the provided indices.
///
/// # Panics
///
/// - If the provided indices are not unique and sorted.
pub(crate) fn proof(&self, indices: &[usize]) -> MerkleProof {
assert!(
indices.windows(2).all(|w| w[0] < w[1]),
"indices must be sorted"
"indices must be unique and sorted"
);
let proof = self.0.proof(indices);
MerkleProof {
proof,
total_leaves: self.0.leaves_len(),
alg: self.alg,
tree_len: self.tree.leaves_len(),
proof: self.tree.proof(indices),
}
}
/// Returns the Merkle root for this MerkleTree
pub fn root(&self) -> MerkleRoot {
self.0
.root()
.expect("Merkle root should be available")
.into()
}
}
/// Serialize the rs_merkle's `MerkleTree` type
fn merkle_tree_serialize<S>(
tree: &MerkleTree_rs_merkle<Sha256>,
serializer: S,
) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
// all leaves are sha256 hashes
let hash_size = 32;
let mut bytes: Vec<u8> = Vec::with_capacity(tree.leaves_len() * hash_size);
if let Some(leaves) = tree.leaves() {
for leaf in leaves {
bytes.append(&mut leaf.to_vec());
}
}
serializer.serialize_bytes(&bytes)
}
fn merkle_tree_deserialize<'de, D>(
deserializer: D,
) -> Result<MerkleTree_rs_merkle<Sha256>, D::Error>
where
D: Deserializer<'de>,
{
let bytes: Vec<u8> = Vec::deserialize(deserializer)?;
if bytes.len() % 32 != 0 {
return Err(serde::de::Error::custom("leaves must be 32 bytes"));
}
let leaves: Vec<[u8; 32]> = bytes.chunks(32).map(|c| c.try_into().unwrap()).collect();
Ok(MerkleTree_rs_merkle::<Sha256>::from_leaves(
leaves.as_slice(),
))
}
#[cfg(test)]
mod test {
use crate::hash::{impl_domain_separator, Blake3, HashAlgorithmExt, Keccak256, Sha256};
use super::*;
use rstest::*;
#[derive(Serialize)]
struct T(u64);
impl_domain_separator!(T);
fn leaves<H: HashAlgorithm>(hasher: &H, leaves: impl IntoIterator<Item = T>) -> Vec<Hash> {
leaves
.into_iter()
.map(|x| hasher.hash_canonical(&x))
.collect()
}
fn choose_leaves(
indices: impl IntoIterator<Item = usize>,
leaves: &[Hash],
) -> Vec<(usize, Hash)> {
indices.into_iter().map(|i| (i, leaves[i])).collect()
}
// Expect Merkle proof verification to succeed
#[test]
fn test_verify_success() {
let leaf0 = Hash::from([0u8; 32]);
let leaf1 = Hash::from([1u8; 32]);
let leaf2 = Hash::from([2u8; 32]);
let leaf3 = Hash::from([3u8; 32]);
let leaf4 = Hash::from([4u8; 32]);
let tree = MerkleTree::from_leaves(&[leaf0, leaf1, leaf2, leaf3, leaf4]).unwrap();
#[rstest]
#[case::sha2(Sha256::default())]
#[case::blake3(Blake3::default())]
#[case::keccak(Keccak256::default())]
fn test_verify_success<H: HashAlgorithm>(#[case] hasher: H) {
let mut tree = MerkleTree::new(hasher.id());
let leaves = leaves(&hasher, [T(0), T(1), T(2), T(3), T(4)]);
tree.insert(&hasher, leaves.clone());
let proof = tree.proof(&[2, 3, 4]);
assert!(proof
.verify(&tree.root(), &[2, 3, 4], &[leaf2, leaf3, leaf4])
.is_ok(),);
.verify(&hasher, &tree.root(), choose_leaves([2, 3, 4], &leaves))
.is_ok());
}
#[test]
fn test_verify_fail_wrong_leaf() {
let leaf0 = Hash::from([0u8; 32]);
let leaf1 = Hash::from([1u8; 32]);
let leaf2 = Hash::from([2u8; 32]);
let leaf3 = Hash::from([3u8; 32]);
let leaf4 = Hash::from([4u8; 32]);
let tree = MerkleTree::from_leaves(&[leaf0, leaf1, leaf2, leaf3, leaf4]).unwrap();
#[rstest]
#[case::sha2(Sha256::default())]
#[case::blake3(Blake3::default())]
#[case::keccak(Keccak256::default())]
fn test_verify_fail_wrong_leaf<H: HashAlgorithm>(#[case] hasher: H) {
let mut tree = MerkleTree::new(hasher.id());
let leaves = leaves(&hasher, [T(0), T(1), T(2), T(3), T(4)]);
tree.insert(&hasher, leaves.clone());
let proof = tree.proof(&[2, 3, 4]);
let mut choices = choose_leaves([2, 3, 4], &leaves);
choices[1].1 = leaves[0];
// fail because the leaf is wrong
assert_eq!(
proof
.verify(&tree.root(), &[2, 3, 4], &[leaf1, leaf3, leaf4])
.err()
.unwrap(),
MerkleError::MerkleProofVerificationFailed
);
assert!(proof.verify(&hasher, &tree.root(), choices).is_err());
}
#[test]
#[rstest]
#[case::sha2(Sha256::default())]
#[case::blake3(Blake3::default())]
#[case::keccak(Keccak256::default())]
#[should_panic]
fn test_proof_fail_length_unsorted() {
let leaf0 = Hash::from([0u8; 32]);
let leaf1 = Hash::from([1u8; 32]);
let leaf2 = Hash::from([2u8; 32]);
let leaf3 = Hash::from([3u8; 32]);
let leaf4 = Hash::from([4u8; 32]);
let tree = MerkleTree::from_leaves(&[leaf0, leaf1, leaf2, leaf3, leaf4]).unwrap();
fn test_proof_fail_length_unsorted<H: HashAlgorithm>(#[case] hasher: H) {
let mut tree = MerkleTree::new(hasher.id());
let leaves = leaves(&hasher, [T(0), T(1), T(2), T(3), T(4)]);
tree.insert(&hasher, leaves.clone());
_ = tree.proof(&[2, 4, 3]);
}
#[test]
#[rstest]
#[case::sha2(Sha256::default())]
#[case::blake3(Blake3::default())]
#[case::keccak(Keccak256::default())]
#[should_panic]
fn test_proof_fail_length_duplicates() {
let leaf0 = Hash::from([0u8; 32]);
let leaf1 = Hash::from([1u8; 32]);
let leaf2 = Hash::from([2u8; 32]);
let leaf3 = Hash::from([3u8; 32]);
let leaf4 = Hash::from([4u8; 32]);
let tree = MerkleTree::from_leaves(&[leaf0, leaf1, leaf2, leaf3, leaf4]).unwrap();
fn test_proof_fail_length_duplicates<H: HashAlgorithm>(#[case] hasher: H) {
let mut tree = MerkleTree::new(hasher.id());
let leaves = leaves(&hasher, [T(0), T(1), T(2), T(3), T(4)]);
tree.insert(&hasher, leaves.clone());
_ = tree.proof(&[2, 2, 3]);
}
#[test]
#[should_panic]
fn test_verify_fail_length_mismatch() {
let leaf0 = Hash::from([0u8; 32]);
let leaf1 = Hash::from([1u8; 32]);
let leaf2 = Hash::from([2u8; 32]);
let leaf3 = Hash::from([3u8; 32]);
let leaf4 = Hash::from([4u8; 32]);
let tree = MerkleTree::from_leaves(&[leaf0, leaf1, leaf2, leaf3, leaf4]).unwrap();
#[rstest]
#[case::sha2(Sha256::default())]
#[case::blake3(Blake3::default())]
#[case::keccak(Keccak256::default())]
fn test_verify_fail_duplicates<H: HashAlgorithm>(#[case] hasher: H) {
let mut tree = MerkleTree::new(hasher.id());
let leaves = leaves(&hasher, [T(0), T(1), T(2), T(3), T(4)]);
tree.insert(&hasher, leaves.clone());
let proof = tree.proof(&[2, 3, 4]);
_ = proof.verify(&tree.root(), &[1, 2, 3, 4], &[leaf2, leaf3, leaf4]);
assert!(proof
.verify(&hasher, &tree.root(), choose_leaves([2, 2, 3], &leaves))
.is_err());
}
#[test]
#[should_panic]
fn test_verify_fail_duplicates() {
let leaf0 = Hash::from([0u8; 32]);
let leaf1 = Hash::from([1u8; 32]);
let leaf2 = Hash::from([2u8; 32]);
let leaf3 = Hash::from([3u8; 32]);
let leaf4 = Hash::from([4u8; 32]);
let tree = MerkleTree::from_leaves(&[leaf0, leaf1, leaf2, leaf3, leaf4]).unwrap();
let proof = tree.proof(&[2, 3, 4]);
#[rstest]
#[case::sha2(Sha256::default())]
#[case::blake3(Blake3::default())]
#[case::keccak(Keccak256::default())]
fn test_verify_fail_incorrect_leaf_count<H: HashAlgorithm>(#[case] hasher: H) {
let mut tree = MerkleTree::new(hasher.id());
_ = proof.verify(&tree.root(), &[2, 2, 3], &[leaf2, leaf2, leaf3]);
}
let leaves = leaves(&hasher, [T(0), T(1), T(2), T(3), T(4)]);
tree.insert(&hasher, leaves.clone());
#[test]
fn test_verify_fail_incorrect_leaf_count() {
let leaf0 = Hash::from([0u8; 32]);
let leaf1 = Hash::from([1u8; 32]);
let leaf2 = Hash::from([2u8; 32]);
let leaf3 = Hash::from([3u8; 32]);
let leaf4 = Hash::from([4u8; 32]);
let tree = MerkleTree::from_leaves(&[leaf0, leaf1, leaf2, leaf3, leaf4]).unwrap();
let mut proof = tree.proof(&[2, 3, 4]);
proof.total_leaves = 6;
proof.tree_len += 1;
// fail because leaf count is wrong
assert!(proof
.verify(&tree.root(), &[2, 3, 4], &[leaf2, leaf3, leaf4])
.verify(&hasher, &tree.root(), choose_leaves([2, 3, 4], &leaves))
.is_err());
}
#[test]
fn test_verify_fail_incorrect_indices() {
let leaf0 = Hash::from([0u8; 32]);
let leaf1 = Hash::from([1u8; 32]);
let leaf2 = Hash::from([2u8; 32]);
let leaf3 = Hash::from([3u8; 32]);
let leaf4 = Hash::from([4u8; 32]);
let tree = MerkleTree::from_leaves(&[leaf0, leaf1, leaf2, leaf3, leaf4]).unwrap();
#[rstest]
#[case::sha2(Sha256::default())]
#[case::blake3(Blake3::default())]
#[case::keccak(Keccak256::default())]
fn test_verify_fail_incorrect_indices<H: HashAlgorithm>(#[case] hasher: H) {
let mut tree = MerkleTree::new(hasher.id());
let leaves = leaves(&hasher, [T(0), T(1), T(2), T(3), T(4)]);
tree.insert(&hasher, leaves.clone());
let proof = tree.proof(&[2, 3, 4]);
// fail because tree index is wrong
assert!(proof
.verify(&tree.root(), &[1, 3, 4], &[leaf1, leaf3, leaf4])
.is_err());
let mut choices = choose_leaves([2, 3, 4], &leaves);
choices[1].0 = 1;
// fail because leaf index is wrong
assert!(proof.verify(&hasher, &tree.root(), choices).is_err());
}
#[test]
fn test_verify_fail_fewer_indices() {
let leaf0 = Hash::from([0u8; 32]);
let leaf1 = Hash::from([1u8; 32]);
let leaf2 = Hash::from([2u8; 32]);
let leaf3 = Hash::from([3u8; 32]);
let leaf4 = Hash::from([4u8; 32]);
let tree = MerkleTree::from_leaves(&[leaf0, leaf1, leaf2, leaf3, leaf4]).unwrap();
#[rstest]
#[case::sha2(Sha256::default())]
#[case::blake3(Blake3::default())]
#[case::keccak(Keccak256::default())]
fn test_verify_fail_fewer_indices<H: HashAlgorithm>(#[case] hasher: H) {
let mut tree = MerkleTree::new(hasher.id());
let leaves = leaves(&hasher, [T(0), T(1), T(2), T(3), T(4)]);
tree.insert(&hasher, leaves.clone());
let proof = tree.proof(&[2, 3, 4]);
// trying to verify less leaves than what was included in the proof
assert!(proof
.verify(&tree.root(), &[3, 4], &[leaf3, leaf4])
.verify(&hasher, &tree.root(), choose_leaves([2, 3], &leaves))
.is_err());
}
// Expect MerkleProof/MerkleTree custom serialization/deserialization to work
#[test]
fn test_serialization() {
let leaf0 = Hash::from([0u8; 32]);
let leaf1 = Hash::from([1u8; 32]);
let leaf2 = Hash::from([2u8; 32]);
let leaf3 = Hash::from([3u8; 32]);
let leaf4 = Hash::from([4u8; 32]);
let tree = MerkleTree::from_leaves(&[leaf0, leaf1, leaf2, leaf3, leaf4]).unwrap();
let proof = tree.proof(&[2, 3, 4]);
// serialize
let tree_bytes = bincode::serialize(&tree).unwrap();
let proof_bytes = bincode::serialize(&proof).unwrap();
// deserialize
let tree2: MerkleTree = bincode::deserialize(&tree_bytes).unwrap();
let proof2: MerkleProof = bincode::deserialize(&proof_bytes).unwrap();
assert!(proof2
.verify(&tree2.root(), &[2, 3, 4], &[leaf2, leaf3, leaf4])
.is_ok());
}
}

View File

@@ -1,41 +0,0 @@
//! Protocol message types.
use serde::{Deserialize, Serialize};
use utils::range::RangeSet;
use crate::{merkle::MerkleRoot, proof::SessionInfo, signature::Signature, SessionHeader};
/// Top-level enum for all messages
#[derive(Debug, Serialize, Deserialize)]
pub enum TlsnMessage {
/// A Merkle root for the tree of commitments to the transcript.
TranscriptCommitmentRoot(MerkleRoot),
/// A session header signed by a notary.
SignedSessionHeader(SignedSessionHeader),
/// A session header.
SessionHeader(SessionHeader),
/// Information about the TLS session
SessionInfo(SessionInfo),
/// Information about the values the prover wants to prove
ProvingInfo(ProvingInfo),
}
/// A signed session header.
#[derive(Debug, Serialize, Deserialize)]
pub struct SignedSessionHeader {
/// The session header
pub header: SessionHeader,
/// The notary's signature
pub signature: Signature,
}
/// Information about the values the prover wants to prove
#[derive(Debug, Serialize, Deserialize, Default)]
pub struct ProvingInfo {
/// The ids for the sent transcript
pub sent_ids: RangeSet<usize>,
/// The ids for the received transcript
pub recv_ids: RangeSet<usize>,
/// Purported cleartext values
pub cleartext: Vec<u8>,
}

View File

@@ -0,0 +1,220 @@
//! Verifiable presentation.
use std::fmt;
use serde::{Deserialize, Serialize};
use crate::{
attestation::{Attestation, AttestationError, AttestationProof},
connection::{ConnectionInfo, ServerIdentityProof, ServerIdentityProofError, ServerName},
transcript::{PartialTranscript, TranscriptProof, TranscriptProofError},
CryptoProvider,
};
/// A verifiable presentation.
#[derive(Debug, Serialize, Deserialize)]
pub struct Presentation {
attestation: AttestationProof,
identity: Option<ServerIdentityProof>,
transcript: Option<TranscriptProof>,
}
impl Presentation {
/// Creates a new builder.
pub fn builder<'a>(
provider: &'a CryptoProvider,
attestation: &'a Attestation,
) -> PresentationBuilder<'a> {
PresentationBuilder::new(provider, attestation)
}
/// Verifies the presentation.
pub fn verify(
self,
provider: &CryptoProvider,
) -> Result<PresentationOutput, PresentationError> {
let Self {
attestation,
identity,
transcript,
} = self;
let attestation = attestation.verify(provider)?;
let server_name = identity
.map(|identity| {
identity.verify_with_provider(
provider,
attestation.body.connection_info().time,
attestation.body.server_ephemeral_key(),
attestation.body.cert_commitment(),
)
})
.transpose()?;
let transcript = transcript
.map(|transcript| transcript.verify_with_provider(provider, &attestation.body))
.transpose()?;
let connection_info = attestation.body.connection_info().clone();
Ok(PresentationOutput {
attestation,
server_name,
connection_info,
transcript,
})
}
}
/// Output of a verified [`Presentation`].
#[derive(Debug)]
#[non_exhaustive]
pub struct PresentationOutput {
/// Verified attestation.
pub attestation: Attestation,
/// Authenticated server name.
pub server_name: Option<ServerName>,
/// Connection information.
pub connection_info: ConnectionInfo,
/// Authenticated transcript data.
pub transcript: Option<PartialTranscript>,
}
/// Builder for [`Presentation`].
pub struct PresentationBuilder<'a> {
provider: &'a CryptoProvider,
attestation: &'a Attestation,
identity_proof: Option<ServerIdentityProof>,
transcript_proof: Option<TranscriptProof>,
}
impl<'a> PresentationBuilder<'a> {
pub(crate) fn new(provider: &'a CryptoProvider, attestation: &'a Attestation) -> Self {
Self {
provider,
attestation,
identity_proof: None,
transcript_proof: None,
}
}
/// Includes a server identity proof.
pub fn identity_proof(&mut self, proof: ServerIdentityProof) -> &mut Self {
self.identity_proof = Some(proof);
self
}
/// Includes a transcript proof.
pub fn transcript_proof(&mut self, proof: TranscriptProof) -> &mut Self {
self.transcript_proof = Some(proof);
self
}
/// Builds the presentation.
pub fn build(self) -> Result<Presentation, PresentationBuilderError> {
let attestation = AttestationProof::new(self.provider, self.attestation)?;
Ok(Presentation {
attestation,
identity: self.identity_proof,
transcript: self.transcript_proof,
})
}
}
/// Error for [`PresentationBuilder`].
#[derive(Debug, thiserror::Error)]
pub struct PresentationBuilderError {
kind: BuilderErrorKind,
source: Option<Box<dyn std::error::Error + Send + Sync>>,
}
#[derive(Debug)]
enum BuilderErrorKind {
Attestation,
}
impl fmt::Display for PresentationBuilderError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("presentation builder error: ")?;
match self.kind {
BuilderErrorKind::Attestation => f.write_str("attestation error")?,
}
if let Some(source) = &self.source {
write!(f, " caused by: {}", source)?;
}
Ok(())
}
}
impl From<AttestationError> for PresentationBuilderError {
fn from(error: AttestationError) -> Self {
Self {
kind: BuilderErrorKind::Attestation,
source: Some(Box::new(error)),
}
}
}
/// Error for [`Presentation`].
#[derive(Debug, thiserror::Error)]
pub struct PresentationError {
kind: ErrorKind,
source: Option<Box<dyn std::error::Error + Send + Sync>>,
}
#[derive(Debug)]
enum ErrorKind {
Attestation,
Identity,
Transcript,
}
impl fmt::Display for PresentationError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("presentation error: ")?;
match self.kind {
ErrorKind::Attestation => f.write_str("attestation error")?,
ErrorKind::Identity => f.write_str("server identity error")?,
ErrorKind::Transcript => f.write_str("transcript error")?,
}
if let Some(source) = &self.source {
write!(f, " caused by: {}", source)?;
}
Ok(())
}
}
impl From<AttestationError> for PresentationError {
fn from(error: AttestationError) -> Self {
Self {
kind: ErrorKind::Attestation,
source: Some(Box::new(error)),
}
}
}
impl From<ServerIdentityProofError> for PresentationError {
fn from(error: ServerIdentityProofError) -> Self {
Self {
kind: ErrorKind::Identity,
source: Some(Box::new(error)),
}
}
}
impl From<TranscriptProofError> for PresentationError {
fn from(error: TranscriptProofError) -> Self {
Self {
kind: ErrorKind::Transcript,
source: Some(Box::new(error)),
}
}
}

View File

@@ -1,21 +0,0 @@
//! Different types of proofs used in the TLSNotary protocol.
mod session;
mod substrings;
pub use session::{default_cert_verifier, SessionInfo, SessionProof, SessionProofError};
pub use substrings::{
SubstringsProof, SubstringsProofBuilder, SubstringsProofBuilderError, SubstringsProofError,
};
use serde::{Deserialize, Serialize};
use std::fmt::Debug;
/// Proof that a transcript of communications took place between a Prover and Server.
#[derive(Debug, Serialize, Deserialize)]
pub struct TlsProof {
/// Proof of the TLS handshake, server identity, and commitments to the transcript.
pub session: SessionProof,
/// Proof regarding the contents of the transcript.
pub substrings: SubstringsProof,
}

View File

@@ -1,339 +0,0 @@
use web_time::{Duration, UNIX_EPOCH};
use serde::{Deserialize, Serialize};
use mpz_core::{commit::Decommitment, serialize::CanonicalSerialize};
use tls_core::{
anchors::{OwnedTrustAnchor, RootCertStore},
dns::ServerName as TlsServerName,
handshake::HandshakeData,
verify::{ServerCertVerifier, WebPkiVerifier},
};
use crate::{
session::SessionHeader,
signature::{Signature, SignatureVerifyError},
HandshakeSummary, NotaryPublicKey, ServerName,
};
/// An error that can occur while verifying a [`SessionProof`].
#[derive(Debug, thiserror::Error)]
#[non_exhaustive]
pub enum SessionProofError {
/// Session proof is missing Notary signature
#[error("session proof is missing notary signature")]
MissingNotarySignature,
/// Invalid signature
#[error(transparent)]
InvalidSignature(#[from] SignatureVerifyError),
/// Invalid server name.
#[error("invalid server name: {0}")]
InvalidServerName(String),
/// Invalid handshake
#[error("handshake verification failed: {0}")]
InvalidHandshake(String),
/// Invalid server certificate
#[error("server certificate verification failed: {0}")]
InvalidServerCertificate(String),
}
/// A session proof which is created from a [crate::session::NotarizedSession]
///
/// Proof of the TLS handshake, server identity, and commitments to the transcript.
#[derive(Debug, Serialize, Deserialize)]
pub struct SessionProof {
/// The session header
pub header: SessionHeader,
/// Signature for the session header, if the notary signed it
pub signature: Option<Signature>,
/// Information about the server
pub session_info: SessionInfo,
}
impl SessionProof {
/// Verify the session proof.
///
/// # Arguments
///
/// * `notary_public_key` - The public key of the notary.
/// * `cert_verifier` - The certificate verifier.
pub fn verify(
&self,
notary_public_key: impl Into<NotaryPublicKey>,
cert_verifier: &impl ServerCertVerifier,
) -> Result<(), SessionProofError> {
// Verify notary signature
let signature = self
.signature
.as_ref()
.ok_or(SessionProofError::MissingNotarySignature)?;
signature.verify(&self.header.to_bytes(), notary_public_key)?;
self.session_info
.verify(self.header.handshake_summary(), cert_verifier)?;
Ok(())
}
/// Verify the session proof using trust anchors from the `webpki-roots` crate.
///
/// # Arguments
///
/// * `notary_public_key` - The public key of the notary.
pub fn verify_with_default_cert_verifier(
&self,
notary_public_key: impl Into<NotaryPublicKey>,
) -> Result<(), SessionProofError> {
self.verify(notary_public_key, &default_cert_verifier())
}
}
/// Contains information about the session
///
/// Includes the [ServerName] and the decommitment to the [HandshakeData].
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct SessionInfo {
/// The server name.
pub server_name: ServerName,
/// Decommitment to the TLS handshake and server identity.
pub handshake_decommitment: Decommitment<HandshakeData>,
}
impl SessionInfo {
/// Verify the session info.
pub fn verify(
&self,
handshake_summary: &HandshakeSummary,
cert_verifier: &impl ServerCertVerifier,
) -> Result<(), SessionProofError> {
// Verify server name
let server_name = TlsServerName::try_from(self.server_name.as_ref())
.map_err(|e| SessionProofError::InvalidServerName(e.to_string()))?;
// Verify handshake
self.handshake_decommitment
.verify(handshake_summary.handshake_commitment())
.map_err(|e| SessionProofError::InvalidHandshake(e.to_string()))?;
// Verify server certificate
self.handshake_decommitment
.data()
.verify(
cert_verifier,
UNIX_EPOCH + Duration::from_secs(handshake_summary.time()),
&server_name,
)
.map_err(|e| SessionProofError::InvalidServerCertificate(e.to_string()))?;
Ok(())
}
/// Verify the session info using trust anchors from the `webpki-roots` crate.
///
/// # Arguments
///
/// * `handshake_summary` - The summary of the handshake.
pub fn verify_with_default_cert_verifier(
&self,
handshake_summary: &HandshakeSummary,
) -> Result<(), SessionProofError> {
self.verify(handshake_summary, &default_cert_verifier())
}
}
/// Create a new [`WebPkiVerifier`] with the default trust anchors from the `webpki-roots` crate.
pub fn default_cert_verifier() -> WebPkiVerifier {
let mut root_store = RootCertStore::empty();
root_store.add_server_trust_anchors(webpki_roots::TLS_SERVER_ROOTS.iter().map(|ta| {
OwnedTrustAnchor::from_subject_spki_name_constraints(
ta.subject.as_ref(),
ta.subject_public_key_info.as_ref(),
ta.name_constraints.as_ref().map(|nc| nc.as_ref()),
)
}));
WebPkiVerifier::new(root_store, None)
}
#[cfg(test)]
mod tests {
use super::*;
use rstest::*;
use crate::fixtures::cert::{appliedzkp, tlsnotary, TestData};
use tls_core::{dns::ServerName, key::Certificate};
use web_time::SystemTime;
/// Expect chain verification to succeed
#[rstest]
#[case::tlsnotary(tlsnotary())]
#[case::appliedzkp(appliedzkp())]
fn test_verify_cert_chain_sucess_ca_implicit(#[case] data: TestData) {
assert!(default_cert_verifier()
.verify_server_cert(
&data.ee,
&[data.inter],
&ServerName::try_from(data.dns_name.as_ref()).unwrap(),
&mut std::iter::empty(),
&[],
SystemTime::UNIX_EPOCH + Duration::from_secs(data.time),
)
.is_ok());
}
/// Expect chain verification to succeed even when a trusted CA is provided among the intermediate
/// certs. webpki handles such cases properly.
#[rstest]
#[case::tlsnotary(tlsnotary())]
#[case::appliedzkp(appliedzkp())]
fn test_verify_cert_chain_success_ca_explicit(#[case] data: TestData) {
assert!(default_cert_verifier()
.verify_server_cert(
&data.ee,
&[data.inter, data.ca],
&ServerName::try_from(data.dns_name.as_ref()).unwrap(),
&mut std::iter::empty(),
&[],
SystemTime::UNIX_EPOCH + Duration::from_secs(data.time),
)
.is_ok());
}
/// Expect to fail since the end entity cert was not valid at the time
#[rstest]
#[case::tlsnotary(tlsnotary())]
#[case::appliedzkp(appliedzkp())]
fn test_verify_cert_chain_fail_bad_time(#[case] data: TestData) {
// unix time when the cert chain was NOT valid
let bad_time: u64 = 1571465711;
let err = default_cert_verifier().verify_server_cert(
&data.ee,
&[data.inter],
&ServerName::try_from(data.dns_name.as_ref()).unwrap(),
&mut std::iter::empty(),
&[],
SystemTime::UNIX_EPOCH + Duration::from_secs(bad_time),
);
assert!(matches!(
err.unwrap_err(),
tls_core::Error::InvalidCertificateData(_)
));
}
/// Expect to fail when no intermediate cert provided
#[rstest]
#[case::tlsnotary(tlsnotary())]
#[case::appliedzkp(appliedzkp())]
fn test_verify_cert_chain_fail_no_interm_cert(#[case] data: TestData) {
let err = default_cert_verifier().verify_server_cert(
&data.ee,
&[],
&ServerName::try_from(data.dns_name.as_ref()).unwrap(),
&mut std::iter::empty(),
&[],
SystemTime::UNIX_EPOCH + Duration::from_secs(data.time),
);
assert!(matches!(
err.unwrap_err(),
tls_core::Error::InvalidCertificateData(_)
));
}
/// Expect to fail when no intermediate cert provided even if a trusted CA cert is provided
#[rstest]
#[case::tlsnotary(tlsnotary())]
#[case::appliedzkp(appliedzkp())]
fn test_verify_cert_chain_fail_no_interm_cert_with_ca_cert(#[case] data: TestData) {
let err = default_cert_verifier().verify_server_cert(
&data.ee,
&[data.ca],
&ServerName::try_from(data.dns_name.as_ref()).unwrap(),
&mut std::iter::empty(),
&[],
SystemTime::UNIX_EPOCH + Duration::from_secs(data.time),
);
assert!(matches!(
err.unwrap_err(),
tls_core::Error::InvalidCertificateData(_)
));
}
/// Expect to fail because end-entity cert is wrong
#[rstest]
#[case::tlsnotary(tlsnotary())]
#[case::appliedzkp(appliedzkp())]
fn test_verify_cert_chain_fail_bad_ee_cert(#[case] data: TestData) {
let ee: &[u8] = include_bytes!("../fixtures/testdata/key_exchange/unknown/ee.der");
let err = default_cert_verifier().verify_server_cert(
&Certificate(ee.to_vec()),
&[data.inter],
&ServerName::try_from(data.dns_name.as_ref()).unwrap(),
&mut std::iter::empty(),
&[],
SystemTime::UNIX_EPOCH + Duration::from_secs(data.time),
);
assert!(matches!(
err.unwrap_err(),
tls_core::Error::InvalidCertificateData(_)
));
}
/// Expect to succeed when key exchange params signed correctly with a cert
#[rstest]
#[case::tlsnotary(tlsnotary())]
#[case::appliedzkp(appliedzkp())]
fn test_verify_sig_ke_params_success(#[case] data: TestData) {
assert!(default_cert_verifier()
.verify_tls12_signature(&data.signature_msg(), &data.ee, &data.dss())
.is_ok());
}
/// Expect sig verification to fail because client_random is wrong
#[rstest]
#[case::tlsnotary(tlsnotary())]
#[case::appliedzkp(appliedzkp())]
fn test_verify_sig_ke_params_fail_bad_client_random(#[case] mut data: TestData) {
data.cr.0[31] = data.cr.0[31].wrapping_add(1);
assert!(default_cert_verifier()
.verify_tls12_signature(&data.signature_msg(), &data.ee, &data.dss())
.is_err());
}
/// Expect sig verification to fail because the sig is wrong
#[rstest]
#[case::tlsnotary(tlsnotary())]
#[case::appliedzkp(appliedzkp())]
fn test_verify_sig_ke_params_fail_bad_sig(#[case] mut data: TestData) {
data.sig[31] = data.sig[31].wrapping_add(1);
assert!(default_cert_verifier()
.verify_tls12_signature(&data.signature_msg(), &data.ee, &data.dss())
.is_err());
}
/// Expect to fail because the dns name is not in the cert
#[rstest]
#[case::tlsnotary(tlsnotary())]
#[case::appliedzkp(appliedzkp())]
fn test_check_dns_name_present_in_cert_fail_bad_host(#[case] data: TestData) {
let bad_name = ServerName::try_from("badhost.com").unwrap();
assert!(default_cert_verifier()
.verify_server_cert(
&data.ee,
&[data.inter, data.ca],
&bad_name,
&mut std::iter::empty(),
&[],
SystemTime::UNIX_EPOCH + Duration::from_secs(data.time),
)
.is_err());
}
}

View File

@@ -1,326 +0,0 @@
//! Substrings proofs based on commitments.
use crate::{
commitment::{
Commitment, CommitmentId, CommitmentInfo, CommitmentKind, CommitmentOpening,
TranscriptCommitments,
},
merkle::MerkleProof,
transcript::get_value_ids,
Direction, EncodingId, RedactedTranscript, SessionHeader, Transcript, TranscriptSlice,
MAX_TOTAL_COMMITTED_DATA,
};
use mpz_circuits::types::ValueType;
use mpz_garble_core::Encoder;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use utils::range::{RangeDisjoint, RangeSet, RangeUnion, ToRangeSet};
/// An error for [`SubstringsProofBuilder`]
#[derive(Debug, thiserror::Error)]
#[non_exhaustive]
pub enum SubstringsProofBuilderError {
/// Invalid commitment id.
#[error("invalid commitment id: {0:?}")]
InvalidCommitmentId(CommitmentId),
/// Missing commitment.
#[error("missing commitment")]
MissingCommitment,
/// Invalid commitment type.
#[error("commitment {0:?} is not a substrings commitment")]
InvalidCommitmentType(CommitmentId),
/// Attempted to add a commitment with a duplicate id.
#[error("commitment with id {0:?} already exists")]
DuplicateCommitmentId(CommitmentId),
}
/// A builder for [`SubstringsProof`]
pub struct SubstringsProofBuilder<'a> {
commitments: &'a TranscriptCommitments,
transcript_tx: &'a Transcript,
transcript_rx: &'a Transcript,
openings: HashMap<CommitmentId, (CommitmentInfo, CommitmentOpening)>,
}
opaque_debug::implement!(SubstringsProofBuilder<'_>);
impl<'a> SubstringsProofBuilder<'a> {
/// Creates a new builder.
pub fn new(
commitments: &'a TranscriptCommitments,
transcript_tx: &'a Transcript,
transcript_rx: &'a Transcript,
) -> Self {
Self {
commitments,
transcript_tx,
transcript_rx,
openings: HashMap::default(),
}
}
/// Returns a reference to the commitments.
pub fn commitments(&self) -> &TranscriptCommitments {
self.commitments
}
/// Reveals data corresponding to the provided ranges in the sent direction.
pub fn reveal_sent(
&mut self,
ranges: &dyn ToRangeSet<usize>,
commitment_kind: CommitmentKind,
) -> Result<&mut Self, SubstringsProofBuilderError> {
self.reveal(ranges, Direction::Sent, commitment_kind)
}
/// Reveals data corresponding to the provided transcript subsequence in the received direction.
pub fn reveal_recv(
&mut self,
ranges: &dyn ToRangeSet<usize>,
commitment_kind: CommitmentKind,
) -> Result<&mut Self, SubstringsProofBuilderError> {
self.reveal(ranges, Direction::Received, commitment_kind)
}
/// Reveals data corresponding to the provided ranges and direction.
pub fn reveal(
&mut self,
ranges: &dyn ToRangeSet<usize>,
direction: Direction,
commitment_kind: CommitmentKind,
) -> Result<&mut Self, SubstringsProofBuilderError> {
let com = self
.commitments
.get_id_by_info(commitment_kind, &ranges.to_range_set(), direction)
.ok_or(SubstringsProofBuilderError::MissingCommitment)?;
self.reveal_by_id(com)
}
/// Reveals data corresponding to the provided commitment id
pub fn reveal_by_id(
&mut self,
id: CommitmentId,
) -> Result<&mut Self, SubstringsProofBuilderError> {
let commitment = self
.commitments()
.get(&id)
.ok_or(SubstringsProofBuilderError::InvalidCommitmentId(id))?;
let info = self
.commitments()
.get_info(&id)
.expect("info exists if commitment exists");
#[allow(irrefutable_let_patterns)]
let Commitment::Blake3(commitment) = commitment
else {
return Err(SubstringsProofBuilderError::InvalidCommitmentType(id));
};
let transcript = match info.direction() {
Direction::Sent => self.transcript_tx,
Direction::Received => self.transcript_rx,
};
let data = transcript.get_bytes_in_ranges(info.ranges());
// add commitment to openings and return an error if it is already present
if self
.openings
.insert(id, (info.clone(), commitment.open(data).into()))
.is_some()
{
return Err(SubstringsProofBuilderError::DuplicateCommitmentId(id));
}
Ok(self)
}
/// Builds the [`SubstringsProof`]
pub fn build(self) -> Result<SubstringsProof, SubstringsProofBuilderError> {
let Self {
commitments,
openings,
..
} = self;
let mut indices = openings
.keys()
.map(|id| id.to_inner() as usize)
.collect::<Vec<_>>();
indices.sort();
let inclusion_proof = commitments.merkle_tree().proof(&indices);
Ok(SubstringsProof {
openings,
inclusion_proof,
})
}
}
/// An error relating to [`SubstringsProof`]
#[derive(Debug, thiserror::Error)]
#[non_exhaustive]
pub enum SubstringsProofError {
/// The proof contains more data than the maximum allowed.
#[error(
"substrings proof opens more data than the maximum allowed: {0} > {}",
MAX_TOTAL_COMMITTED_DATA
)]
MaxDataExceeded(usize),
/// The proof contains duplicate transcript data.
#[error("proof contains duplicate transcript data")]
DuplicateData(Direction, RangeSet<usize>),
/// Range of the opening is out of bounds.
#[error("range of opening {0:?} is out of bounds: {1}")]
RangeOutOfBounds(CommitmentId, usize),
/// The proof contains an invalid commitment opening.
#[error("invalid opening for commitment id: {0:?}")]
InvalidOpening(CommitmentId),
/// The proof contains an invalid inclusion proof.
#[error("invalid inclusion proof: {0}")]
InvalidInclusionProof(String),
}
/// A substring proof using commitments
///
/// This substring proof contains the commitment openings and a proof
/// that the corresponding commitments are present in the merkle tree.
#[derive(Serialize, Deserialize)]
pub struct SubstringsProof {
openings: HashMap<CommitmentId, (CommitmentInfo, CommitmentOpening)>,
inclusion_proof: MerkleProof,
}
opaque_debug::implement!(SubstringsProof);
impl SubstringsProof {
/// Verifies this proof and, if successful, returns the redacted sent and received transcripts.
///
/// # Arguments
///
/// * `header` - The session header.
pub fn verify(
self,
header: &SessionHeader,
) -> Result<(RedactedTranscript, RedactedTranscript), SubstringsProofError> {
let Self {
openings,
inclusion_proof,
} = self;
let mut indices = Vec::with_capacity(openings.len());
let mut expected_hashes = Vec::with_capacity(openings.len());
let mut sent = vec![0u8; header.sent_len()];
let mut recv = vec![0u8; header.recv_len()];
let mut sent_ranges = RangeSet::default();
let mut recv_ranges = RangeSet::default();
let mut total_opened = 0u128;
for (id, (info, opening)) in openings {
let CommitmentInfo {
ranges, direction, ..
} = info;
let opened_len = ranges.len();
// Make sure the amount of data being proved is bounded.
total_opened += opened_len as u128;
if total_opened > MAX_TOTAL_COMMITTED_DATA as u128 {
return Err(SubstringsProofError::MaxDataExceeded(total_opened as usize));
}
// Make sure the opening length matches the ranges length.
if opening.data().len() != opened_len {
return Err(SubstringsProofError::InvalidOpening(id));
}
// Make sure duplicate data is not opened.
match direction {
Direction::Sent => {
if !sent_ranges.is_disjoint(&ranges) {
return Err(SubstringsProofError::DuplicateData(direction, ranges));
}
sent_ranges = sent_ranges.union(&ranges);
}
Direction::Received => {
if !recv_ranges.is_disjoint(&ranges) {
return Err(SubstringsProofError::DuplicateData(direction, ranges));
}
recv_ranges = recv_ranges.union(&ranges);
}
}
// Make sure the ranges are within the bounds of the transcript
let max = ranges
.max()
.ok_or(SubstringsProofError::InvalidOpening(id))?;
let transcript_len = match direction {
Direction::Sent => header.sent_len(),
Direction::Received => header.recv_len(),
};
if max > transcript_len {
return Err(SubstringsProofError::RangeOutOfBounds(id, max));
}
// Generate the expected encodings for the purported data in the opening.
let encodings = get_value_ids(&ranges, direction)
.map(|id| {
header
.encoder()
.encode_by_type(EncodingId::new(&id).to_inner(), &ValueType::U8)
})
.collect::<Vec<_>>();
// Compute the expected hash of the commitment to make sure it is
// present in the merkle tree.
indices.push(id.to_inner() as usize);
expected_hashes.push(opening.recover(&encodings).hash());
// Make sure the length of data from the opening matches the commitment.
let mut data = opening.into_data();
if data.len() != ranges.len() {
return Err(SubstringsProofError::InvalidOpening(id));
}
let dest = match direction {
Direction::Sent => &mut sent,
Direction::Received => &mut recv,
};
// Iterate over the ranges backwards, copying the data from the opening
// then truncating it.
for range in ranges.iter_ranges().rev() {
let start = data.len() - range.len();
dest[range].copy_from_slice(&data[start..]);
data.truncate(start);
}
}
// Verify that the expected hashes are present in the merkle tree.
//
// This proves the Prover committed to the purported data prior to the encoder
// seed being revealed.
inclusion_proof
.verify(header.merkle_root(), &indices, &expected_hashes)
.map_err(|e| SubstringsProofError::InvalidInclusionProof(e.to_string()))?;
// Iterate over the unioned ranges and create TranscriptSlices for each.
// This ensures that the slices are sorted and disjoint.
let sent_slices = sent_ranges
.iter_ranges()
.map(|range| TranscriptSlice::new(range.clone(), sent[range].to_vec()))
.collect();
let recv_slices = recv_ranges
.iter_ranges()
.map(|range| TranscriptSlice::new(range.clone(), recv[range].to_vec()))
.collect();
Ok((
RedactedTranscript::new(header.sent_len(), sent_slices),
RedactedTranscript::new(header.recv_len(), recv_slices),
))
}
}

View File

@@ -0,0 +1,64 @@
use tls_core::{
anchors::{OwnedTrustAnchor, RootCertStore},
verify::WebPkiVerifier,
};
use crate::{
hash::HashProvider,
signing::{SignatureVerifierProvider, SignerProvider},
};
/// Cryptography provider.
///
/// ## Custom Algorithms
///
/// This is the primary interface for extending cryptographic functionality. The various
/// providers can be configured with custom algorithms and implementations.
///
/// Algorithms are uniquely identified using an 8-bit ID, eg. [`HashAlgId`](crate::hash::HashAlgId),
/// half of which is reserved for the officially supported algorithms. If you think that a new
/// algorithm should be added to the official set, please open an issue. Beware that other parties
/// may assign different algorithms to the same ID as you, and we make no effort to mitigate this.
pub struct CryptoProvider {
/// Hash provider.
pub hash: HashProvider,
/// Certificate verifier.
///
/// This is used to verify the server's certificate chain.
///
/// The default verifier uses the Mozilla root certificates.
pub cert: WebPkiVerifier,
/// Signer provider.
///
/// This is used for signing attestations.
pub signer: SignerProvider,
/// Signature verifier provider.
///
/// This is used for verifying signatures of attestations.
pub signature: SignatureVerifierProvider,
}
opaque_debug::implement!(CryptoProvider);
impl Default for CryptoProvider {
fn default() -> Self {
Self {
hash: Default::default(),
cert: default_cert_verifier(),
signer: Default::default(),
signature: Default::default(),
}
}
}
pub(crate) fn default_cert_verifier() -> WebPkiVerifier {
let mut root_store = RootCertStore::empty();
root_store.add_server_trust_anchors(webpki_roots::TLS_SERVER_ROOTS.iter().map(|ta| {
OwnedTrustAnchor::from_subject_spki_name_constraints(
ta.subject.as_ref(),
ta.subject_public_key_info.as_ref(),
ta.name_constraints.as_ref().map(|nc| nc.as_ref()),
)
}));
WebPkiVerifier::new(root_store, None)
}

View File

@@ -0,0 +1,86 @@
//! Attestation requests.
//!
//! After the TLS connection, a Prover can request an attestation from the Notary which contains
//! various information about the connection. During this process the Prover has the opportunity
//! to configure certain aspects of the attestation, such as which signature algorithm the Notary
//! should use to sign the attestation. Or which hash algorithm the Notary should use to merkelize
//! the fields.
//!
//! A [`Request`] can be created using a [`RequestBuilder`]. The builder will take both configuration
//! via a [`RequestConfig`] as well as the Prover's secret data. The [`Secrets`](crate::Secrets) are of
//! course not shared with the Notary but are used to create commitments which are included in the attestation.
mod builder;
mod config;
use serde::{Deserialize, Serialize};
use crate::{
attestation::Attestation,
connection::ServerCertCommitment,
hash::{HashAlgId, TypedHash},
signing::SignatureAlgId,
};
pub use builder::{RequestBuilder, RequestBuilderError};
pub use config::{RequestConfig, RequestConfigBuilder, RequestConfigBuilderError};
/// Attestation request.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Request {
pub(crate) signature_alg: SignatureAlgId,
pub(crate) hash_alg: HashAlgId,
pub(crate) server_cert_commitment: ServerCertCommitment,
pub(crate) encoding_commitment_root: Option<TypedHash>,
}
impl Request {
/// Returns a new request builder.
pub fn builder(config: &RequestConfig) -> RequestBuilder {
RequestBuilder::new(config)
}
/// Validates the content of the attestation against this request.
pub fn validate(&self, attestation: &Attestation) -> Result<(), InconsistentAttestation> {
if attestation.signature.alg != self.signature_alg {
return Err(InconsistentAttestation(format!(
"signature algorithm: expected {:?}, got {:?}",
self.signature_alg, attestation.signature.alg
)));
}
if attestation.header.root.alg != self.hash_alg {
return Err(InconsistentAttestation(format!(
"hash algorithm: expected {:?}, got {:?}",
self.hash_alg, attestation.header.root.alg
)));
}
if attestation.body.cert_commitment() != &self.server_cert_commitment {
return Err(InconsistentAttestation(
"server certificate commitment does not match".to_string(),
));
}
if let Some(encoding_commitment_root) = &self.encoding_commitment_root {
let Some(encoding_commitment) = attestation.body.encoding_commitment() else {
return Err(InconsistentAttestation(
"encoding commitment is missing".to_string(),
));
};
if &encoding_commitment.root != encoding_commitment_root {
return Err(InconsistentAttestation(
"encoding commitment root does not match".to_string(),
));
}
}
Ok(())
}
}
/// Error for [`Request::validate`].
#[derive(Debug, thiserror::Error)]
#[error("inconsistent attestation: {0}")]
pub struct InconsistentAttestation(String);

View File

@@ -0,0 +1,122 @@
use crate::{
connection::{ServerCertData, ServerCertOpening, ServerName},
index::Index,
request::{Request, RequestConfig},
secrets::Secrets,
transcript::{encoding::EncodingTree, Transcript},
CryptoProvider,
};
/// Builder for [`Request`].
pub struct RequestBuilder<'a> {
config: &'a RequestConfig,
server_name: Option<ServerName>,
server_cert_data: Option<ServerCertData>,
encoding_tree: Option<EncodingTree>,
transcript: Option<Transcript>,
}
impl<'a> RequestBuilder<'a> {
/// Creates a new request builder.
pub fn new(config: &'a RequestConfig) -> Self {
Self {
config,
server_name: None,
server_cert_data: None,
encoding_tree: None,
transcript: None,
}
}
/// Sets the server name.
pub fn server_name(&mut self, name: ServerName) -> &mut Self {
self.server_name = Some(name);
self
}
/// Sets the server identity data.
pub fn server_cert_data(&mut self, data: ServerCertData) -> &mut Self {
self.server_cert_data = Some(data);
self
}
/// Sets the tree to commit to the transcript encodings.
pub fn encoding_tree(&mut self, tree: EncodingTree) -> &mut Self {
self.encoding_tree = Some(tree);
self
}
/// Sets the transcript.
pub fn transcript(&mut self, transcript: Transcript) -> &mut Self {
self.transcript = Some(transcript);
self
}
/// Builds the attestation request and returns the corresponding secrets.
pub fn build(
self,
provider: &CryptoProvider,
) -> Result<(Request, Secrets), RequestBuilderError> {
let Self {
config,
server_name,
server_cert_data,
encoding_tree,
transcript,
} = self;
let signature_alg = *config.signature_alg();
let hash_alg = *config.hash_alg();
let hasher = provider.hash.get(&hash_alg).map_err(|_| {
RequestBuilderError::new(format!("unsupported hash algorithm: {hash_alg}"))
})?;
let server_name =
server_name.ok_or_else(|| RequestBuilderError::new("server name is missing"))?;
let server_cert_opening = ServerCertOpening::new(
server_cert_data
.ok_or_else(|| RequestBuilderError::new("server identity data is missing"))?,
);
let transcript =
transcript.ok_or_else(|| RequestBuilderError::new("transcript is missing"))?;
let server_cert_commitment = server_cert_opening.commit(hasher);
let encoding_commitment_root = encoding_tree.as_ref().map(|tree| tree.root());
let request = Request {
signature_alg,
hash_alg,
server_cert_commitment,
encoding_commitment_root,
};
let secrets = Secrets {
server_name,
server_cert_opening,
encoding_tree,
plaintext_hashes: Index::default(),
transcript,
};
Ok((request, secrets))
}
}
/// Error for [`RequestBuilder`].
#[derive(Debug, thiserror::Error)]
#[error("request builder error: {message}")]
pub struct RequestBuilderError {
message: String,
}
impl RequestBuilderError {
fn new(message: impl Into<String>) -> Self {
Self {
message: message.into(),
}
}
}

View File

@@ -0,0 +1,76 @@
use crate::{hash::HashAlgId, signing::SignatureAlgId};
/// Request configuration.
#[derive(Debug, Clone)]
pub struct RequestConfig {
signature_alg: SignatureAlgId,
hash_alg: HashAlgId,
}
impl Default for RequestConfig {
fn default() -> Self {
Self::builder().build().unwrap()
}
}
impl RequestConfig {
/// Creates a new builder.
pub fn builder() -> RequestConfigBuilder {
RequestConfigBuilder::default()
}
/// Returns the signature algorithm.
pub fn signature_alg(&self) -> &SignatureAlgId {
&self.signature_alg
}
/// Returns the hash algorithm.
pub fn hash_alg(&self) -> &HashAlgId {
&self.hash_alg
}
}
/// Builder for [`RequestConfig`].
#[derive(Debug)]
pub struct RequestConfigBuilder {
signature_alg: SignatureAlgId,
hash_alg: HashAlgId,
}
impl Default for RequestConfigBuilder {
fn default() -> Self {
Self {
signature_alg: SignatureAlgId::SECP256K1,
hash_alg: HashAlgId::BLAKE3,
}
}
}
impl RequestConfigBuilder {
/// Sets the signature algorithm.
pub fn signature_alg(&mut self, signature_alg: SignatureAlgId) -> &mut Self {
self.signature_alg = signature_alg;
self
}
/// Sets the hash algorithm.
pub fn hash_alg(&mut self, hash_alg: HashAlgId) -> &mut Self {
self.hash_alg = hash_alg;
self
}
/// Builds the config.
pub fn build(self) -> Result<RequestConfig, RequestConfigBuilderError> {
Ok(RequestConfig {
signature_alg: self.signature_alg,
hash_alg: self.hash_alg,
})
}
}
/// Error for [`RequestConfigBuilder`].
#[derive(Debug, thiserror::Error)]
#[error("request configuration builder error: {message}")]
pub struct RequestConfigBuilderError {
message: String,
}

View File

@@ -0,0 +1,47 @@
use serde::{Deserialize, Serialize};
use crate::{
connection::{ServerCertOpening, ServerIdentityProof, ServerName},
index::Index,
transcript::{
encoding::EncodingTree, hash::PlaintextHashSecret, Transcript, TranscriptProofBuilder,
},
};
/// Secret data of an [`Attestation`](crate::attestation::Attestation).
#[derive(Serialize, Deserialize)]
pub struct Secrets {
pub(crate) server_name: ServerName,
pub(crate) server_cert_opening: ServerCertOpening,
pub(crate) encoding_tree: Option<EncodingTree>,
pub(crate) plaintext_hashes: Index<PlaintextHashSecret>,
pub(crate) transcript: Transcript,
}
opaque_debug::implement!(Secrets);
impl Secrets {
/// Returns the server name.
pub fn server_name(&self) -> &ServerName {
&self.server_name
}
/// Returns the transcript.
pub fn transcript(&self) -> &Transcript {
&self.transcript
}
/// Returns a server identity proof.
pub fn identity_proof(&self) -> ServerIdentityProof {
ServerIdentityProof::new(self.server_name.clone(), self.server_cert_opening.clone())
}
/// Returns a transcript proof builder.
pub fn transcript_proof_builder(&self) -> TranscriptProofBuilder<'_> {
TranscriptProofBuilder::new(
&self.transcript,
self.encoding_tree.as_ref(),
&self.plaintext_hashes,
)
}
}

View File

@@ -0,0 +1,18 @@
/// Canonical serialization of TLSNotary types.
///
/// This trait is used to serialize types into a canonical byte representation.
pub(crate) trait CanonicalSerialize {
/// Serializes the type.
fn serialize(&self) -> Vec<u8>;
}
impl<T> CanonicalSerialize for T
where
T: serde::Serialize,
{
fn serialize(&self) -> Vec<u8> {
// For now we use BCS for serialization. In future releases we will want to
// consider this further, particularly with respect to EVM compatibility.
bcs::to_bytes(self).unwrap()
}
}

View File

@@ -1,77 +0,0 @@
use crate::{
commitment::TranscriptCommitments,
proof::{SessionInfo, SubstringsProofBuilder},
ServerName, Transcript,
};
use mpz_core::commit::Decommitment;
use serde::{Deserialize, Serialize};
use tls_core::handshake::HandshakeData;
/// Session data used for notarization.
///
/// This contains all the private data held by the `Prover` after notarization including
/// commitments to the parts of the transcript.
///
/// # Selective disclosure
///
/// The `Prover` can selectively disclose parts of the transcript to a `Verifier` using a
/// [`SubstringsProof`](crate::proof::SubstringsProof).
///
/// See [`build_substrings_proof`](SessionData::build_substrings_proof).
#[derive(Serialize, Deserialize)]
pub struct SessionData {
session_info: SessionInfo,
transcript_tx: Transcript,
transcript_rx: Transcript,
commitments: TranscriptCommitments,
}
impl SessionData {
/// Creates new session data.
pub fn new(
server_name: ServerName,
handshake_data_decommitment: Decommitment<HandshakeData>,
transcript_tx: Transcript,
transcript_rx: Transcript,
commitments: TranscriptCommitments,
) -> Self {
let session_info = SessionInfo {
server_name,
handshake_decommitment: handshake_data_decommitment,
};
Self {
session_info,
transcript_tx,
transcript_rx,
commitments,
}
}
/// Returns the session info
pub fn session_info(&self) -> &SessionInfo {
&self.session_info
}
/// Returns the transcript for data sent to the server
pub fn sent_transcript(&self) -> &Transcript {
&self.transcript_tx
}
/// Returns the transcript for data received from the server
pub fn recv_transcript(&self) -> &Transcript {
&self.transcript_rx
}
/// Returns the transcript commitments.
pub fn commitments(&self) -> &TranscriptCommitments {
&self.commitments
}
/// Returns a substrings proof builder.
pub fn build_substrings_proof(&self) -> SubstringsProofBuilder {
SubstringsProofBuilder::new(&self.commitments, &self.transcript_tx, &self.transcript_rx)
}
}
opaque_debug::implement!(SessionData);

View File

@@ -1,82 +0,0 @@
use mpz_core::{commit::Decommitment, hash::Hash};
use serde::{Deserialize, Serialize};
use tls_core::{handshake::HandshakeData, key::PublicKey, msgs::handshake::ServerECDHParams};
/// An error that can occur while verifying a handshake summary
#[derive(Debug, thiserror::Error)]
#[non_exhaustive]
pub enum HandshakeVerifyError {
/// The handshake data does not match the commitment
#[error("Handshake data does not match commitment")]
Commitment,
/// The key exchange parameters are invalid
#[error("Key exchange parameters are invalid")]
KxParams,
/// The server ephemeral key does not match
#[error("Server ephemeral key does not match")]
ServerEphemKey,
}
/// Handshake summary is part of the session header signed by the Notary
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct HandshakeSummary {
/// time when Notary signed the session header
// TODO: we can change this to be the time when the Notary started the TLS handshake 2PC
time: u64,
/// server ephemeral public key
server_public_key: PublicKey,
/// Prover's commitment to [crate::handshake_data::HandshakeData]
handshake_commitment: Hash,
}
impl HandshakeSummary {
/// Creates a new HandshakeSummary
pub fn new(time: u64, ephemeral_ec_pubkey: PublicKey, handshake_commitment: Hash) -> Self {
Self {
time,
server_public_key: ephemeral_ec_pubkey,
handshake_commitment,
}
}
/// Time of the TLS session, in seconds since the UNIX epoch.
///
/// # Note
///
/// This time is not necessarily exactly aligned with the TLS handshake.
pub fn time(&self) -> u64 {
self.time
}
/// Returns the server ephemeral public key
pub fn server_public_key(&self) -> &PublicKey {
&self.server_public_key
}
/// Returns commitment to the handshake data
pub fn handshake_commitment(&self) -> &Hash {
&self.handshake_commitment
}
/// Verifies that the provided handshake data matches this handshake summary
pub fn verify(&self, data: &Decommitment<HandshakeData>) -> Result<(), HandshakeVerifyError> {
// Verify the handshake data matches the commitment in the session header
data.verify(&self.handshake_commitment)
.map_err(|_| HandshakeVerifyError::Commitment)?;
let ecdh_params = tls_core::suites::tls12::decode_ecdh_params::<ServerECDHParams>(
data.data().server_kx_details().kx_params(),
)
.ok_or(HandshakeVerifyError::KxParams)?;
let server_public_key =
PublicKey::new(ecdh_params.curve_params.named_group, &ecdh_params.public.0);
// Ephemeral pubkey must match the one which the Notary signed
if server_public_key != self.server_public_key {
return Err(HandshakeVerifyError::ServerEphemKey);
}
Ok(())
}
}

View File

@@ -1,120 +0,0 @@
use mpz_core::commit::Decommitment;
use serde::{Deserialize, Serialize};
use mpz_garble_core::ChaChaEncoder;
use tls_core::{handshake::HandshakeData, key::PublicKey};
use crate::{merkle::MerkleRoot, HandshakeSummary};
/// An error that can occur while verifying a session header
#[derive(Debug, thiserror::Error)]
#[non_exhaustive]
pub enum SessionHeaderVerifyError {
/// The session header is not consistent with the provided data
#[error("session header is not consistent with the provided data")]
InconsistentHeader,
}
/// An authentic session header from the Notary
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SessionHeader {
/// A PRG seeds used to generate encodings for the plaintext
encoder_seed: [u8; 32],
/// The root of the Merkle tree of all the commitments. The Prover must prove that each one of the
/// `commitments` is included in the Merkle tree.
/// This approach allows the Prover to hide from the Notary the exact amount of commitments thus
/// increasing Prover privacy against the Notary.
/// The root was made known to the Notary before the Notary opened his garbled circuits
/// to the Prover.
merkle_root: MerkleRoot,
/// Bytelength of all data which was sent to the webserver
sent_len: usize,
/// Bytelength of all data which was received from the webserver
recv_len: usize,
handshake_summary: HandshakeSummary,
}
impl SessionHeader {
/// Create a new instance of SessionHeader
pub fn new(
encoder_seed: [u8; 32],
merkle_root: MerkleRoot,
sent_len: usize,
recv_len: usize,
handshake_summary: HandshakeSummary,
) -> Self {
Self {
encoder_seed,
merkle_root,
sent_len,
recv_len,
handshake_summary,
}
}
/// Verify the data in the header is consistent with the Prover's view
pub fn verify(
&self,
time: u64,
server_public_key: &PublicKey,
root: &MerkleRoot,
encoder_seed: &[u8; 32],
handshake_data_decommitment: &Decommitment<HandshakeData>,
) -> Result<(), SessionHeaderVerifyError> {
let ok_time = self.handshake_summary.time().abs_diff(time) <= 300;
let ok_root = &self.merkle_root == root;
let ok_encoder_seed = &self.encoder_seed == encoder_seed;
let ok_handshake_data = handshake_data_decommitment
.verify(self.handshake_summary.handshake_commitment())
.is_ok();
let ok_server_public_key = self.handshake_summary.server_public_key() == server_public_key;
if !(ok_time && ok_root && ok_encoder_seed && ok_handshake_data && ok_server_public_key) {
return Err(SessionHeaderVerifyError::InconsistentHeader);
}
Ok(())
}
/// Create a new [ChaChaEncoder] from encoder_seed
pub fn encoder(&self) -> ChaChaEncoder {
ChaChaEncoder::new(self.encoder_seed)
}
/// Returns the seed used to generate plaintext encodings
pub fn encoder_seed(&self) -> &[u8; 32] {
&self.encoder_seed
}
/// Returns the merkle_root of the merkle tree of the prover's commitments
pub fn merkle_root(&self) -> &MerkleRoot {
&self.merkle_root
}
/// Returns the [HandshakeSummary] of the TLS session between prover and server
pub fn handshake_summary(&self) -> &HandshakeSummary {
&self.handshake_summary
}
/// Time of the TLS session, in seconds since the UNIX epoch.
///
/// # Note
///
/// This time is not necessarily exactly aligned with the TLS handshake.
pub fn time(&self) -> u64 {
self.handshake_summary.time()
}
/// Returns the number of bytes sent to the server
pub fn sent_len(&self) -> usize {
self.sent_len
}
/// Returns the number of bytes received by the server
pub fn recv_len(&self) -> usize {
self.recv_len
}
}

View File

@@ -1,66 +0,0 @@
//! TLS session types.
mod data;
mod handshake;
mod header;
use serde::{Deserialize, Serialize};
pub use data::SessionData;
pub use handshake::{HandshakeSummary, HandshakeVerifyError};
pub use header::{SessionHeader, SessionHeaderVerifyError};
use crate::{
proof::{SessionInfo, SessionProof},
signature::Signature,
};
/// A validated notarized session stored by the Prover
#[derive(Serialize, Deserialize)]
pub struct NotarizedSession {
header: SessionHeader,
signature: Option<Signature>,
data: SessionData,
}
opaque_debug::implement!(NotarizedSession);
impl NotarizedSession {
/// Create a new notarized session.
pub fn new(header: SessionHeader, signature: Option<Signature>, data: SessionData) -> Self {
Self {
header,
signature,
data,
}
}
/// Returns a proof of the TLS session
pub fn session_proof(&self) -> SessionProof {
let session_info = SessionInfo {
server_name: self.data.session_info().server_name.clone(),
handshake_decommitment: self.data.session_info().handshake_decommitment.clone(),
};
SessionProof {
header: self.header.clone(),
signature: self.signature.clone(),
session_info,
}
}
/// Returns the [SessionHeader]
pub fn header(&self) -> &SessionHeader {
&self.header
}
/// Returns the signature for the session header, if the notary signed it
pub fn signature(&self) -> &Option<Signature> {
&self.signature
}
/// Returns the [SessionData]
pub fn data(&self) -> &SessionData {
&self.data
}
}

View File

@@ -1,63 +0,0 @@
use serde::{Deserialize, Serialize};
use p256::ecdsa::{signature::Verifier, VerifyingKey};
/// A Notary public key.
#[derive(Debug, Clone, Deserialize, Serialize)]
#[non_exhaustive]
pub enum NotaryPublicKey {
/// A NIST P-256 public key.
P256(p256::PublicKey),
}
impl From<p256::PublicKey> for NotaryPublicKey {
fn from(key: p256::PublicKey) -> Self {
Self::P256(key)
}
}
/// An error occurred while verifying a signature.
#[derive(Debug, thiserror::Error)]
#[error("signature verification failed: {0}")]
pub struct SignatureVerifyError(String);
/// A Notary signature.
#[derive(Debug, Clone, Deserialize, Serialize)]
#[non_exhaustive]
pub enum Signature {
/// A secp256r1 signature.
P256(p256::ecdsa::Signature),
}
impl From<p256::ecdsa::Signature> for Signature {
fn from(sig: p256::ecdsa::Signature) -> Self {
Self::P256(sig)
}
}
impl Signature {
/// Returns the bytes of this signature.
pub fn to_bytes(&self) -> Vec<u8> {
match self {
Self::P256(sig) => sig.to_vec(),
}
}
/// Verifies the signature.
///
/// # Arguments
///
/// * `msg` - The message to verify.
/// * `notary_public_key` - The public key of the notary.
pub fn verify(
&self,
msg: &[u8],
notary_public_key: impl Into<NotaryPublicKey>,
) -> Result<(), SignatureVerifyError> {
match (self, notary_public_key.into()) {
(Self::P256(sig), NotaryPublicKey::P256(key)) => VerifyingKey::from(key)
.verify(msg, sig)
.map_err(|e| SignatureVerifyError(e.to_string())),
}
}
}

374
crates/core/src/signing.rs Normal file
View File

@@ -0,0 +1,374 @@
//! Cryptographic signatures.
use std::collections::HashMap;
use serde::{Deserialize, Serialize};
use crate::hash::impl_domain_separator;
/// Key algorithm identifier.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub struct KeyAlgId(u8);
impl KeyAlgId {
/// secp256k1 elliptic curve key algorithm.
pub const K256: Self = Self(1);
/// NIST P-256 elliptic curve key algorithm.
pub const P256: Self = Self(2);
/// Creates a new key algorithm identifier.
///
/// # Panics
///
/// Panics if the identifier is in the reserved range 0-127.
///
/// # Arguments
///
/// * id - Unique identifier for the key algorithm.
pub const fn new(id: u8) -> Self {
assert!(id >= 128, "key algorithm id range 0-127 is reserved");
Self(id)
}
/// Returns the id as a `u8`.
pub const fn as_u8(&self) -> u8 {
self.0
}
}
impl std::fmt::Display for KeyAlgId {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match *self {
KeyAlgId::K256 => write!(f, "k256"),
KeyAlgId::P256 => write!(f, "p256"),
_ => write!(f, "custom({:02x})", self.0),
}
}
}
/// Signature algorithm identifier.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub struct SignatureAlgId(u8);
impl SignatureAlgId {
/// secp256k1 signature algorithm.
pub const SECP256K1: Self = Self(1);
/// secp256r1 signature algorithm.
pub const SECP256R1: Self = Self(2);
/// Creates a new signature algorithm identifier.
///
/// # Panics
///
/// Panics if the identifier is in the reserved range 0-127.
///
/// # Arguments
///
/// * id - Unique identifier for the signature algorithm.
pub const fn new(id: u8) -> Self {
assert!(id >= 128, "signature algorithm id range 0-127 is reserved");
Self(id)
}
/// Returns the id as a `u8`.
pub const fn as_u8(&self) -> u8 {
self.0
}
}
impl std::fmt::Display for SignatureAlgId {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match *self {
SignatureAlgId::SECP256K1 => write!(f, "secp256k1"),
SignatureAlgId::SECP256R1 => write!(f, "secp256r1"),
_ => write!(f, "custom({:02x})", self.0),
}
}
}
/// Unknown signature algorithm error.
#[derive(Debug, thiserror::Error)]
#[error("unknown signature algorithm id: {0:?}")]
pub struct UnknownSignatureAlgId(SignatureAlgId);
/// Provider of signers.
#[derive(Default)]
pub struct SignerProvider {
signers: HashMap<SignatureAlgId, Box<dyn Signer + Send + Sync>>,
}
impl SignerProvider {
/// Returns the supported signature algorithms.
pub fn supported_algs(&self) -> impl Iterator<Item = SignatureAlgId> + '_ {
self.signers.keys().copied()
}
/// Configures a signer.
pub fn set_signer(&mut self, signer: Box<dyn Signer + Send + Sync>) {
self.signers.insert(signer.alg_id(), signer);
}
/// Configures a secp256k1 signer with the provided signing key.
pub fn set_secp256k1(&mut self, key: &[u8]) -> Result<&mut Self, SignerError> {
self.set_signer(Box::new(Secp256k1Signer::new(key)?));
Ok(self)
}
/// Configures a secp256r1 signer with the provided signing key.
pub fn set_secp256r1(&mut self, key: &[u8]) -> Result<&mut Self, SignerError> {
self.set_signer(Box::new(Secp256r1Signer::new(key)?));
Ok(self)
}
/// Returns a signer for the given algorithm.
pub(crate) fn get(
&self,
alg: &SignatureAlgId,
) -> Result<&(dyn Signer + Send + Sync), UnknownSignatureAlgId> {
self.signers
.get(alg)
.map(|s| &**s)
.ok_or(UnknownSignatureAlgId(*alg))
}
}
/// Error for [`Signer`].
#[derive(Debug, thiserror::Error)]
#[error("signer error: {0}")]
pub struct SignerError(String);
/// Cryptographic signer.
pub trait Signer {
/// Returns the algorithm used by this signer.
fn alg_id(&self) -> SignatureAlgId;
/// Signs the message.
fn sign(&self, msg: &[u8]) -> Result<Signature, SignatureError>;
/// Returns the verifying key for this signer.
fn verifying_key(&self) -> VerifyingKey;
}
/// Provider of signature verifiers.
pub struct SignatureVerifierProvider {
verifiers: HashMap<SignatureAlgId, Box<dyn SignatureVerifier + Send + Sync>>,
}
impl Default for SignatureVerifierProvider {
fn default() -> Self {
let mut verifiers = HashMap::new();
verifiers.insert(SignatureAlgId::SECP256K1, Box::new(Secp256k1Verifier) as _);
verifiers.insert(SignatureAlgId::SECP256R1, Box::new(Secp256r1Verifier) as _);
Self { verifiers }
}
}
impl SignatureVerifierProvider {
/// Configures a signature verifier.
pub fn set_verifier(&mut self, verifier: Box<dyn SignatureVerifier + Send + Sync>) {
self.verifiers.insert(verifier.alg_id(), verifier);
}
/// Returns the verifier for the given algorithm.
pub(crate) fn get(
&self,
alg: &SignatureAlgId,
) -> Result<&(dyn SignatureVerifier + Send + Sync), UnknownSignatureAlgId> {
self.verifiers
.get(alg)
.map(|s| &**s)
.ok_or(UnknownSignatureAlgId(*alg))
}
}
/// Signature verifier.
pub trait SignatureVerifier {
/// Returns the algorithm used by this verifier.
fn alg_id(&self) -> SignatureAlgId;
/// Verifies the signature.
fn verify(&self, key: &VerifyingKey, msg: &[u8], sig: &[u8]) -> Result<(), SignatureError>;
}
/// Verifying key.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct VerifyingKey {
/// The key algorithm.
pub alg: KeyAlgId,
/// The key data.
pub data: Vec<u8>,
}
impl_domain_separator!(VerifyingKey);
/// Error occurred while verifying a signature.
#[derive(Debug, thiserror::Error)]
#[error("signature verification failed: {0}")]
pub struct SignatureError(String);
/// A signature.
#[derive(Debug, Clone, Deserialize, Serialize)]
pub struct Signature {
/// The algorithm used to sign the data.
pub alg: SignatureAlgId,
/// The signature data.
pub data: Vec<u8>,
}
mod secp256k1 {
use std::sync::{Arc, Mutex};
use k256::ecdsa::{
signature::{SignerMut, Verifier},
Signature as Secp256K1Signature, SigningKey,
};
use super::*;
/// secp256k1 signer.
pub struct Secp256k1Signer(Arc<Mutex<SigningKey>>);
impl Secp256k1Signer {
/// Creates a new secp256k1 signer with the provided signing key.
pub fn new(key: &[u8]) -> Result<Self, SignerError> {
SigningKey::from_slice(key)
.map(|key| Self(Arc::new(Mutex::new(key))))
.map_err(|_| SignerError("invalid key".to_string()))
}
}
impl Signer for Secp256k1Signer {
fn alg_id(&self) -> SignatureAlgId {
SignatureAlgId::SECP256K1
}
fn sign(&self, msg: &[u8]) -> Result<Signature, SignatureError> {
let sig: Secp256K1Signature = self.0.lock().unwrap().sign(msg);
Ok(Signature {
alg: SignatureAlgId::SECP256K1,
data: sig.to_vec(),
})
}
fn verifying_key(&self) -> VerifyingKey {
let key = self.0.lock().unwrap().verifying_key().to_sec1_bytes();
VerifyingKey {
alg: KeyAlgId::K256,
data: key.to_vec(),
}
}
}
/// secp256k1 verifier.
pub struct Secp256k1Verifier;
impl SignatureVerifier for Secp256k1Verifier {
fn alg_id(&self) -> SignatureAlgId {
SignatureAlgId::SECP256K1
}
fn verify(&self, key: &VerifyingKey, msg: &[u8], sig: &[u8]) -> Result<(), SignatureError> {
if key.alg != KeyAlgId::K256 {
return Err(SignatureError("key algorithm is not k256".to_string()));
}
let key = k256::ecdsa::VerifyingKey::from_sec1_bytes(&key.data)
.map_err(|_| SignatureError("invalid k256 key".to_string()))?;
let sig = Secp256K1Signature::from_slice(sig)
.map_err(|_| SignatureError("invalid secp256k1 signature".to_string()))?;
key.verify(msg, &sig).map_err(|_| {
SignatureError("secp256k1 signature verification failed".to_string())
})?;
Ok(())
}
}
}
pub use secp256k1::{Secp256k1Signer, Secp256k1Verifier};
mod secp256r1 {
use std::sync::{Arc, Mutex};
use p256::ecdsa::{
signature::{SignerMut, Verifier},
Signature as Secp256R1Signature, SigningKey,
};
use super::*;
/// secp256r1 signer.
pub struct Secp256r1Signer(Arc<Mutex<SigningKey>>);
impl Secp256r1Signer {
/// Creates a new secp256r1 signer with the provided signing key.
pub fn new(key: &[u8]) -> Result<Self, SignerError> {
SigningKey::from_slice(key)
.map(|key| Self(Arc::new(Mutex::new(key))))
.map_err(|_| SignerError("invalid key".to_string()))
}
}
impl Signer for Secp256r1Signer {
fn alg_id(&self) -> SignatureAlgId {
SignatureAlgId::SECP256R1
}
fn sign(&self, msg: &[u8]) -> Result<Signature, SignatureError> {
let sig: Secp256R1Signature = self.0.lock().unwrap().sign(msg);
Ok(Signature {
alg: SignatureAlgId::SECP256R1,
data: sig.to_vec(),
})
}
fn verifying_key(&self) -> VerifyingKey {
let key = self.0.lock().unwrap().verifying_key().to_sec1_bytes();
VerifyingKey {
alg: KeyAlgId::P256,
data: key.to_vec(),
}
}
}
/// secp256r1 verifier.
pub struct Secp256r1Verifier;
impl SignatureVerifier for Secp256r1Verifier {
fn alg_id(&self) -> SignatureAlgId {
SignatureAlgId::SECP256R1
}
fn verify(&self, key: &VerifyingKey, msg: &[u8], sig: &[u8]) -> Result<(), SignatureError> {
if key.alg != KeyAlgId::P256 {
return Err(SignatureError("key algorithm is not p256".to_string()));
}
let key = p256::ecdsa::VerifyingKey::from_sec1_bytes(&key.data)
.map_err(|_| SignatureError("invalid p256 key".to_string()))?;
let sig = Secp256R1Signature::from_slice(sig)
.map_err(|_| SignatureError("invalid secp256r1 signature".to_string()))?;
key.verify(msg, &sig).map_err(|_| {
SignatureError("secp256r1 signature verification failed".to_string())
})?;
Ok(())
}
}
}
pub use secp256r1::{Secp256r1Signer, Secp256r1Verifier};

View File

@@ -1,159 +1,353 @@
//! Transcript data types.
//! Transcript types.
//!
//! All application data communicated over a TLS connection is referred to as a [`Transcript`]. A transcript is essentially
//! just two vectors of bytes, each corresponding to a [`Direction`].
//!
//! TLS operates over a bidirectional byte stream, and thus there are no application layer semantics present in the transcript.
//! For example, HTTPS is an application layer protocol that runs *over TLS* so there is no concept of "requests" or "responses"
//! in the transcript itself. These semantics must be recovered by parsing the application data and relating it to the bytes
//! in the transcript.
//!
//! ## Commitments
//!
//! During the attestation process a Prover can generate multiple commitments to various parts of the transcript.
//! These commitments are inserted into the attestation body and can be used by the Verifier to verify transcript proofs
//! later.
//!
//! To configure the transcript commitments, use the [`TranscriptCommitConfigBuilder`].
//!
//! ## Selective Disclosure
//!
//! Using a [`TranscriptProof`] a Prover can selectively disclose parts of a transcript to a Verifier
//! in the form of a [`PartialTranscript`]. A Verifier always learns the length of the transcript, but sensitive
//! data can be withheld.
//!
//! To create a proof, use the [`TranscriptProofBuilder`] which is returned by
//! [`Secrets::transcript_proof_builder`](crate::Secrets::transcript_proof_builder).
use std::ops::Range;
mod commit;
#[doc(hidden)]
pub mod encoding;
pub(crate) mod hash;
mod proof;
use std::{fmt, ops::Range};
use bytes::Bytes;
use serde::{Deserialize, Serialize};
use utils::range::{RangeDifference, RangeSet, RangeUnion};
use utils::range::{Difference, IndexRanges, RangeSet, ToRangeSet, Union};
pub(crate) static TX_TRANSCRIPT_ID: &str = "tx";
pub(crate) static RX_TRANSCRIPT_ID: &str = "rx";
use crate::connection::TranscriptLength;
/// A transcript contains a subset of bytes from a TLS session
#[derive(Default, Serialize, Deserialize, Clone, Debug)]
pub use commit::{
TranscriptCommitConfig, TranscriptCommitConfigBuilder, TranscriptCommitConfigBuilderError,
TranscriptCommitmentKind,
};
pub use proof::{
TranscriptProof, TranscriptProofBuilder, TranscriptProofBuilderError, TranscriptProofError,
};
/// Sent data transcript ID.
pub static TX_TRANSCRIPT_ID: &str = "tx";
/// Received data transcript ID.
pub static RX_TRANSCRIPT_ID: &str = "rx";
/// A transcript contains all the data communicated over a TLS connection.
#[derive(Clone, Serialize, Deserialize)]
pub struct Transcript {
data: Bytes,
/// Data sent from the Prover to the Server.
sent: Vec<u8>,
/// Data received by the Prover from the Server.
received: Vec<u8>,
}
opaque_debug::implement!(Transcript);
impl Transcript {
/// Creates a new transcript with the given ID and data
pub fn new(data: impl Into<Bytes>) -> Self {
Self { data: data.into() }
/// Creates a new transcript.
pub fn new(sent: impl Into<Vec<u8>>, received: impl Into<Vec<u8>>) -> Self {
Self {
sent: sent.into(),
received: received.into(),
}
}
/// Returns the actual traffic data of this transcript
pub fn data(&self) -> &Bytes {
&self.data
/// Returns a reference to the sent data.
pub fn sent(&self) -> &[u8] {
&self.sent
}
/// Returns a concatenated bytestring located in the given ranges of the transcript.
/// Returns a reference to the received data.
pub fn received(&self) -> &[u8] {
&self.received
}
/// Returns the length of the sent and received data, respectively.
#[allow(clippy::len_without_is_empty)]
pub fn len(&self) -> (usize, usize) {
(self.sent.len(), self.received.len())
}
/// Returns the length of the transcript in the given direction.
pub(crate) fn len_of_direction(&self, direction: Direction) -> usize {
match direction {
Direction::Sent => self.sent.len(),
Direction::Received => self.received.len(),
}
}
/// Returns the transcript length.
pub fn length(&self) -> TranscriptLength {
TranscriptLength {
sent: self.sent.len() as u32,
received: self.received.len() as u32,
}
}
/// Returns the subsequence of the transcript with the provided index, returning `None`
/// if the index is out of bounds.
pub fn get(&self, direction: Direction, idx: &Idx) -> Option<Subsequence> {
let data = match direction {
Direction::Sent => &self.sent,
Direction::Received => &self.received,
};
if idx.end() > data.len() {
return None;
}
Some(
Subsequence::new(idx.clone(), data.index_ranges(&idx.0))
.expect("data is same length as index"),
)
}
/// Returns a partial transcript containing the provided indices.
///
/// # Panics
///
/// Panics if the range set is empty or is out of bounds.
pub(crate) fn get_bytes_in_ranges(&self, ranges: &RangeSet<usize>) -> Vec<u8> {
let max = ranges.max().expect("range set is not empty");
assert!(max <= self.data.len(), "range set is out of bounds");
ranges
.iter_ranges()
.flat_map(|range| &self.data[range])
.copied()
.collect()
}
}
/// A transcript which may have some data redacted.
#[derive(Debug)]
pub struct RedactedTranscript {
data: Vec<u8>,
/// Ranges of `data` which have been authenticated
auth: RangeSet<usize>,
/// Ranges of `data` which have been redacted
redacted: RangeSet<usize>,
}
impl RedactedTranscript {
/// Creates a new redacted transcript with the given length.
///
/// All bytes in the transcript are initialized to 0.
/// Panics if the indices are out of bounds.
///
/// # Arguments
///
/// * `len` - The length of the transcript
/// * `slices` - A list of slices of data which have been authenticated
pub fn new(len: usize, slices: Vec<TranscriptSlice>) -> Self {
let mut data = vec![0u8; len];
let mut auth = RangeSet::default();
for slice in slices {
data[slice.range()].copy_from_slice(slice.data());
auth = auth.union(&slice.range());
}
let redacted = RangeSet::from(0..len).difference(&auth);
/// * `sent_idx` - The indices of the sent data to include.
/// * `recv_idx` - The indices of the received data to include.
pub fn to_partial(&self, sent_idx: Idx, recv_idx: Idx) -> PartialTranscript {
let mut sent = vec![0; self.sent.len()];
let mut received = vec![0; self.received.len()];
for range in sent_idx.iter_ranges() {
sent[range.clone()].copy_from_slice(&self.sent[range]);
}
for range in recv_idx.iter_ranges() {
received[range.clone()].copy_from_slice(&self.received[range]);
}
PartialTranscript {
sent,
received,
sent_authed: sent_idx,
received_authed: recv_idx,
}
}
}
/// A partial transcript.
///
/// A partial transcript is a transcript which may not have all the data authenticated.
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(try_from = "validation::PartialTranscriptUnchecked")]
pub struct PartialTranscript {
/// Data sent from the Prover to the Server.
sent: Vec<u8>,
/// Data received by the Prover from the Server.
received: Vec<u8>,
/// Index of `sent` which have been authenticated.
sent_authed: Idx,
/// Index of `received` which have been authenticated.
received_authed: Idx,
}
impl PartialTranscript {
/// Creates a new partial transcript initalized to all 0s.
///
/// # Arguments
///
/// * `sent_len` - The length of the sent data.
/// * `received_len` - The length of the received data.
pub fn new(sent_len: usize, received_len: usize) -> Self {
Self {
data,
auth,
redacted,
sent: vec![0; sent_len],
received: vec![0; received_len],
sent_authed: Idx::default(),
received_authed: Idx::default(),
}
}
/// Returns a reference to the data.
/// Returns the length of the sent transcript.
pub fn len_sent(&self) -> usize {
self.sent.len()
}
/// Returns the length of the received transcript.
pub fn len_received(&self) -> usize {
self.received.len()
}
/// Returns whether the transcript is complete.
pub fn is_complete(&self) -> bool {
self.sent_authed.len() == self.sent.len()
&& self.received_authed.len() == self.received.len()
}
/// Returns whether the index is in bounds of the transcript.
pub fn contains(&self, direction: Direction, idx: &Idx) -> bool {
match direction {
Direction::Sent => idx.end() <= self.sent.len(),
Direction::Received => idx.end() <= self.received.len(),
}
}
/// Returns a reference to the sent data.
///
/// # Warning
///
/// Not all of the data in the transcript may have been authenticated. See
/// [authed](RedactedTranscript::authed) for a set of ranges which have been.
pub fn data(&self) -> &[u8] {
&self.data
/// [sent_authed](PartialTranscript::sent_authed) for a set of ranges which have been.
pub fn sent_unsafe(&self) -> &[u8] {
&self.sent
}
/// Returns all the ranges of data which have been authenticated.
pub fn authed(&self) -> &RangeSet<usize> {
&self.auth
/// Returns a reference to the received data.
///
/// # Warning
///
/// Not all of the data in the transcript may have been authenticated. See
/// [received_authed](PartialTranscript::received_authed) for a set of ranges which have been.
pub fn received_unsafe(&self) -> &[u8] {
&self.received
}
/// Returns all the ranges of data which have been redacted.
pub fn redacted(&self) -> &RangeSet<usize> {
&self.redacted
/// Returns the index of sent data which have been authenticated.
pub fn sent_authed(&self) -> &Idx {
&self.sent_authed
}
/// Sets all bytes in the transcript which were redacted.
///
/// # Arguments
///
/// * `value` - The value to set the redacted bytes to
pub fn set_redacted(&mut self, value: u8) {
for range in self.redacted().clone().iter_ranges() {
self.data[range].fill(value);
}
/// Returns the index of received data which have been authenticated.
pub fn received_authed(&self) -> &Idx {
&self.received_authed
}
/// Sets all bytes in the transcript which were redacted in the given range.
/// Returns the index of sent data which haven't been authenticated.
pub fn sent_unauthed(&self) -> Idx {
Idx(RangeSet::from(0..self.sent.len()).difference(&self.sent_authed.0))
}
/// Returns the index of received data which haven't been authenticated.
pub fn received_unauthed(&self) -> Idx {
Idx(RangeSet::from(0..self.received.len()).difference(&self.received_authed.0))
}
/// Returns an iterator over the authenticated data in the transcript.
pub fn iter(&self, direction: Direction) -> impl Iterator<Item = u8> + '_ {
let (data, authed) = match direction {
Direction::Sent => (&self.sent, &self.sent_authed),
Direction::Received => (&self.received, &self.received_authed),
};
authed.0.iter().map(|i| data[i])
}
/// Unions the authenticated data of this transcript with another.
///
/// # Arguments
/// # Panics
///
/// * `value` - The value to set the redacted bytes to
/// * `range` - The range of redacted bytes to set
pub fn set_redacted_range(&mut self, value: u8, range: Range<usize>) {
for range in self
.redacted
.difference(&(0..self.data.len()).difference(&range))
/// Panics if the other transcript is not the same length.
pub fn union_transcript(&mut self, other: &PartialTranscript) {
assert_eq!(
self.sent.len(),
other.sent.len(),
"sent data are not the same length"
);
assert_eq!(
self.received.len(),
other.received.len(),
"received data are not the same length"
);
for range in other
.sent_authed
.0
.difference(&self.sent_authed.0)
.iter_ranges()
{
self.data[range].fill(value);
self.sent[range.clone()].copy_from_slice(&other.sent[range]);
}
for range in other
.received_authed
.0
.difference(&self.received_authed.0)
.iter_ranges()
{
self.received[range.clone()].copy_from_slice(&other.received[range]);
}
self.sent_authed = self.sent_authed.union(&other.sent_authed);
self.received_authed = self.received_authed.union(&other.received_authed);
}
/// Unions an authenticated subsequence into this transcript.
///
/// # Panics
///
/// Panics if the subsequence is outside the bounds of the transcript.
pub fn union_subsequence(&mut self, direction: Direction, seq: &Subsequence) {
match direction {
Direction::Sent => {
seq.copy_to(&mut self.sent);
self.sent_authed = self.sent_authed.union(&seq.idx);
}
Direction::Received => {
seq.copy_to(&mut self.received);
self.received_authed = self.received_authed.union(&seq.idx);
}
}
}
}
/// Slice of a transcript.
#[derive(PartialEq, Debug, Clone, Default)]
pub struct TranscriptSlice {
/// A byte range of this slice
range: Range<usize>,
/// The actual byte content of the slice
data: Vec<u8>,
}
impl TranscriptSlice {
/// Creates a new transcript slice.
pub fn new(range: Range<usize>, data: Vec<u8>) -> Self {
Self { range, data }
/// Sets all bytes in the transcript which haven't been authenticated.
///
/// # Arguments
///
/// * `value` - The value to set the unauthenticated bytes to
pub fn set_unauthed(&mut self, value: u8) {
for range in self.sent_unauthed().iter_ranges() {
self.sent[range].fill(value);
}
for range in self.received_unauthed().iter_ranges() {
self.received[range].fill(value);
}
}
/// Returns the range of bytes this slice refers to in the transcript
pub fn range(&self) -> Range<usize> {
self.range.clone()
}
/// Returns the bytes of this slice
pub fn data(&self) -> &[u8] {
&self.data
}
/// Returns the bytes of this slice
pub fn into_bytes(self) -> Vec<u8> {
self.data
/// Sets all bytes in the transcript which haven't been authenticated within the given range.
///
/// # Arguments
///
/// * `value` - The value to set the unauthenticated bytes to
/// * `range` - The range of bytes to set
pub fn set_unauthed_range(&mut self, value: u8, direction: Direction, range: Range<usize>) {
match direction {
Direction::Sent => {
for range in range.difference(&self.sent_authed.0).iter_ranges() {
self.sent[range].fill(value);
}
}
Direction::Received => {
for range in range.difference(&self.received_authed.0).iter_ranges() {
self.received[range].fill(value);
}
}
}
}
}
@@ -164,22 +358,232 @@ impl TranscriptSlice {
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub enum Direction {
/// Sent from the Prover to the TLS peer.
Sent,
Sent = 0x00,
/// Received by the prover from the TLS peer.
Received,
Received = 0x01,
}
/// Returns the value ID for each byte in the provided range set
pub fn get_value_ids(
ranges: &RangeSet<usize>,
direction: Direction,
) -> impl Iterator<Item = String> + '_ {
impl fmt::Display for Direction {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Direction::Sent => write!(f, "sent"),
Direction::Received => write!(f, "received"),
}
}
}
/// Transcript index.
#[derive(Debug, Default, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub struct Idx(RangeSet<usize>);
impl Idx {
/// Creates a new index builder.
pub fn builder() -> IdxBuilder {
IdxBuilder::default()
}
/// Creates an empty index.
pub fn empty() -> Self {
Self(RangeSet::default())
}
/// Creates a new transcript index.
pub fn new(ranges: impl Into<RangeSet<usize>>) -> Self {
Self(ranges.into())
}
/// Returns the start of the index.
pub fn start(&self) -> usize {
self.0.min().unwrap_or_default()
}
/// Returns the end of the index, non-inclusive.
pub fn end(&self) -> usize {
self.0.end().unwrap_or_default()
}
/// Returns an iterator over the values in the index.
pub fn iter(&self) -> impl Iterator<Item = usize> + '_ {
self.0.iter()
}
/// Returns an iterator over the ranges of the index.
pub fn iter_ranges(&self) -> impl Iterator<Item = Range<usize>> + '_ {
self.0.iter_ranges()
}
/// Returns the number of values in the index.
pub fn len(&self) -> usize {
self.0.len()
}
/// Returns whether the index is empty.
pub fn is_empty(&self) -> bool {
self.0.is_empty()
}
/// Returns the number of disjoint ranges in the index.
pub fn count(&self) -> usize {
self.0.len_ranges()
}
/// Returns the union of this index with another.
pub fn union(&self, other: &Idx) -> Idx {
Idx(self.0.union(&other.0))
}
}
/// Builder for [`Idx`].
#[derive(Debug, Default)]
pub struct IdxBuilder(RangeSet<usize>);
impl IdxBuilder {
/// Unions ranges.
pub fn union(self, ranges: &dyn ToRangeSet<usize>) -> Self {
IdxBuilder(self.0.union(&ranges.to_range_set()))
}
/// Builds the index.
pub fn build(self) -> Idx {
Idx(self.0)
}
}
/// Transcript subsequence.
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
#[serde(try_from = "validation::SubsequenceUnchecked")]
pub struct Subsequence {
/// Index of the subsequence.
idx: Idx,
/// Data of the subsequence.
data: Vec<u8>,
}
impl Subsequence {
/// Creates a new subsequence.
pub fn new(idx: Idx, data: Vec<u8>) -> Result<Self, InvalidSubsequence> {
if idx.len() != data.len() {
return Err(InvalidSubsequence(
"index length does not match data length",
));
}
Ok(Self { idx, data })
}
/// Returns the index of the subsequence.
pub fn index(&self) -> &Idx {
&self.idx
}
/// Returns the data of the subsequence.
pub fn data(&self) -> &[u8] {
&self.data
}
/// Returns the length of the subsequence.
#[allow(clippy::len_without_is_empty)]
pub fn len(&self) -> usize {
self.data.len()
}
/// Returns the inner parts of the subsequence.
pub fn into_parts(self) -> (Idx, Vec<u8>) {
(self.idx, self.data)
}
/// Copies the subsequence data into the given destination.
///
/// # Panics
///
/// Panics if the subsequence ranges are out of bounds.
pub(crate) fn copy_to(&self, dest: &mut [u8]) {
let mut offset = 0;
for range in self.idx.iter_ranges() {
dest[range.clone()].copy_from_slice(&self.data[offset..offset + range.len()]);
offset += range.len();
}
}
}
/// Invalid subsequence error.
#[derive(Debug, thiserror::Error)]
#[error("invalid subsequence: {0}")]
pub struct InvalidSubsequence(&'static str);
/// Returns the value ID for each byte in the provided range set.
#[doc(hidden)]
pub fn get_value_ids(direction: Direction, idx: &Idx) -> impl Iterator<Item = String> + '_ {
let id = match direction {
Direction::Sent => TX_TRANSCRIPT_ID,
Direction::Received => RX_TRANSCRIPT_ID,
};
ranges.iter().map(move |idx| format!("{}/{}", id, idx))
idx.iter().map(move |idx| format!("{}/{}", id, idx))
}
mod validation {
use super::*;
#[derive(Debug, Deserialize)]
pub(super) struct SubsequenceUnchecked {
idx: Idx,
data: Vec<u8>,
}
impl TryFrom<SubsequenceUnchecked> for Subsequence {
type Error = InvalidSubsequence;
fn try_from(unchecked: SubsequenceUnchecked) -> Result<Self, Self::Error> {
Self::new(unchecked.idx, unchecked.data)
}
}
/// Invalid partial transcript error.
#[derive(Debug, thiserror::Error)]
#[error("invalid partial transcript: {0}")]
pub struct InvalidPartialTranscript(&'static str);
#[derive(Debug, Deserialize)]
pub(super) struct PartialTranscriptUnchecked {
sent: Vec<u8>,
received: Vec<u8>,
sent_authed: Idx,
received_authed: Idx,
}
impl TryFrom<PartialTranscriptUnchecked> for PartialTranscript {
type Error = InvalidPartialTranscript;
fn try_from(unchecked: PartialTranscriptUnchecked) -> Result<Self, Self::Error> {
if unchecked.sent_authed.end() > unchecked.sent.len()
|| unchecked.received_authed.end() > unchecked.received.len()
{
return Err(InvalidPartialTranscript(
"authenticated ranges are not in bounds of the data",
));
}
// Rewrite the data to ensure that unauthenticated data is zeroed out.
let mut sent = vec![0; unchecked.sent.len()];
let mut received = vec![0; unchecked.received.len()];
for range in unchecked.sent_authed.iter_ranges() {
sent[range.clone()].copy_from_slice(&unchecked.sent[range]);
}
for range in unchecked.received_authed.iter_ranges() {
received[range.clone()].copy_from_slice(&unchecked.received[range]);
}
Ok(Self {
sent,
received,
sent_authed: unchecked.sent_authed,
received_authed: unchecked.received_authed,
})
}
}
}
#[cfg(test)]
@@ -189,57 +593,32 @@ mod tests {
use super::*;
#[fixture]
fn transcripts() -> (Transcript, Transcript) {
let sent = "data sent 123456789".as_bytes().to_vec();
let recv = "data received 987654321".as_bytes().to_vec();
(Transcript::new(sent), Transcript::new(recv))
fn transcript() -> Transcript {
Transcript::new(
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
)
}
#[rstest]
fn test_get_bytes_in_ranges(transcripts: (Transcript, Transcript)) {
let (sent, recv) = transcripts;
fn test_get_subsequence(transcript: Transcript) {
let subseq = transcript
.get(Direction::Received, &Idx(RangeSet::from([0..4, 7..10])))
.unwrap();
assert_eq!(subseq.data, vec![0, 1, 2, 3, 7, 8, 9]);
let range1 = Range { start: 2, end: 4 };
let range2 = Range { start: 10, end: 15 };
// a full range spanning the entirety of the data
let range3 = Range {
start: 0,
end: sent.data().len(),
};
let subseq = transcript
.get(Direction::Sent, &Idx(RangeSet::from([0..4, 9..12])))
.unwrap();
assert_eq!(subseq.data, vec![0, 1, 2, 3, 9, 10, 11]);
let expected = "ta12345".as_bytes().to_vec();
assert_eq!(
expected,
sent.get_bytes_in_ranges(&RangeSet::from([range1.clone(), range2.clone()]))
let subseq = transcript.get(
Direction::Received,
&Idx(RangeSet::from([0..4, 7..10, 11..13])),
);
assert_eq!(subseq, None);
let expected = "taved 9".as_bytes().to_vec();
assert_eq!(
expected,
recv.get_bytes_in_ranges(&RangeSet::from([range1, range2]))
);
assert_eq!(
sent.data().as_ref(),
sent.get_bytes_in_ranges(&RangeSet::from([range3]))
);
}
#[rstest]
#[should_panic]
fn test_get_bytes_in_ranges_empty(transcripts: (Transcript, Transcript)) {
let (sent, _) = transcripts;
sent.get_bytes_in_ranges(&RangeSet::default());
}
#[rstest]
#[should_panic]
fn test_get_bytes_in_ranges_out_of_bounds(transcripts: (Transcript, Transcript)) {
let (sent, _) = transcripts;
let range = Range {
start: 0,
end: sent.data().len() + 1,
};
sent.get_bytes_in_ranges(&RangeSet::from([range]));
let subseq = transcript.get(Direction::Sent, &Idx(RangeSet::from([0..4, 7..10, 11..13])));
assert_eq!(subseq, None);
}
}

View File

@@ -0,0 +1,234 @@
//! Transcript commitments.
use std::{collections::HashSet, fmt};
use serde::{Deserialize, Serialize};
use utils::range::ToRangeSet;
use crate::{
hash::HashAlgId,
transcript::{Direction, Idx, Transcript},
};
/// Kind of transcript commitment.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub enum TranscriptCommitmentKind {
/// A commitment to encodings of the transcript.
Encoding,
/// A hash commitment to plaintext in the transcript.
Hash {
/// The hash algorithm used.
alg: HashAlgId,
},
}
/// Configuration for transcript commitments.
#[derive(Debug, Clone)]
pub struct TranscriptCommitConfig {
encoding_hash_alg: HashAlgId,
commits: Vec<((Direction, Idx), TranscriptCommitmentKind)>,
}
impl TranscriptCommitConfig {
/// Creates a new commit config builder.
pub fn builder(transcript: &Transcript) -> TranscriptCommitConfigBuilder {
TranscriptCommitConfigBuilder::new(transcript)
}
/// Returns the hash algorithm to use for encoding commitments.
pub fn encoding_hash_alg(&self) -> &HashAlgId {
&self.encoding_hash_alg
}
/// Returns whether the configuration has any encoding commitments.
pub fn has_encoding(&self) -> bool {
self.commits
.iter()
.any(|(_, kind)| matches!(kind, TranscriptCommitmentKind::Encoding))
}
/// Returns an iterator over the encoding commitment indices.
pub fn iter_encoding(&self) -> impl Iterator<Item = &(Direction, Idx)> {
self.commits.iter().filter_map(|(idx, kind)| match kind {
TranscriptCommitmentKind::Encoding => Some(idx),
_ => None,
})
}
/// Returns an iterator over the hash commitment indices.
pub fn iter_hash(&self) -> impl Iterator<Item = (&(Direction, Idx), &HashAlgId)> {
self.commits.iter().filter_map(|(idx, kind)| match kind {
TranscriptCommitmentKind::Hash { alg } => Some((idx, alg)),
_ => None,
})
}
}
/// A builder for [`TranscriptCommitConfig`].
///
/// The default hash algorithm is [`HashAlgId::BLAKE3`] and the default kind
/// is [`TranscriptCommitmentKind::Encoding`].
#[derive(Debug)]
pub struct TranscriptCommitConfigBuilder<'a> {
transcript: &'a Transcript,
encoding_hash_alg: HashAlgId,
default_kind: TranscriptCommitmentKind,
commits: HashSet<((Direction, Idx), TranscriptCommitmentKind)>,
}
impl<'a> TranscriptCommitConfigBuilder<'a> {
/// Creates a new commit config builder.
pub fn new(transcript: &'a Transcript) -> Self {
Self {
transcript,
encoding_hash_alg: HashAlgId::BLAKE3,
default_kind: TranscriptCommitmentKind::Encoding,
commits: HashSet::default(),
}
}
/// Sets the hash algorithm to use for encoding commitments.
pub fn encoding_hash_alg(&mut self, alg: HashAlgId) -> &mut Self {
self.encoding_hash_alg = alg;
self
}
/// Sets the default kind of commitment to use.
pub fn default_kind(&mut self, default_kind: TranscriptCommitmentKind) -> &mut Self {
self.default_kind = default_kind;
self
}
/// Adds a commitment.
///
/// # Arguments
///
/// * `ranges` - The ranges of the commitment.
/// * `direction` - The direction of the transcript.
/// * `kind` - The kind of commitment.
pub fn commit_with_kind(
&mut self,
ranges: &dyn ToRangeSet<usize>,
direction: Direction,
kind: TranscriptCommitmentKind,
) -> Result<&mut Self, TranscriptCommitConfigBuilderError> {
let idx = Idx::new(ranges.to_range_set());
if idx.end() > self.transcript.len_of_direction(direction) {
return Err(TranscriptCommitConfigBuilderError::new(
ErrorKind::Index,
format!(
"range is out of bounds of the transcript ({}): {} > {}",
direction,
idx.end(),
self.transcript.len_of_direction(direction)
),
));
}
self.commits.insert(((direction, idx), kind));
Ok(self)
}
/// Adds a commitment with the default kind.
///
/// # Arguments
///
/// * `ranges` - The ranges of the commitment.
/// * `direction` - The direction of the transcript.
pub fn commit(
&mut self,
ranges: &dyn ToRangeSet<usize>,
direction: Direction,
) -> Result<&mut Self, TranscriptCommitConfigBuilderError> {
self.commit_with_kind(ranges, direction, self.default_kind)
}
/// Adds a commitment with the default kind to the sent data transcript.
///
/// # Arguments
///
/// * `ranges` - The ranges of the commitment.
pub fn commit_sent(
&mut self,
ranges: &dyn ToRangeSet<usize>,
) -> Result<&mut Self, TranscriptCommitConfigBuilderError> {
self.commit(ranges, Direction::Sent)
}
/// Adds a commitment with the default kind to the received data transcript.
///
/// # Arguments
///
/// * `ranges` - The ranges of the commitment.
pub fn commit_recv(
&mut self,
ranges: &dyn ToRangeSet<usize>,
) -> Result<&mut Self, TranscriptCommitConfigBuilderError> {
self.commit(ranges, Direction::Received)
}
/// Builds the configuration.
pub fn build(self) -> Result<TranscriptCommitConfig, TranscriptCommitConfigBuilderError> {
Ok(TranscriptCommitConfig {
encoding_hash_alg: self.encoding_hash_alg,
commits: Vec::from_iter(self.commits),
})
}
}
/// Error for [`TranscriptCommitConfigBuilder`].
#[derive(Debug, thiserror::Error)]
pub struct TranscriptCommitConfigBuilderError {
kind: ErrorKind,
source: Option<Box<dyn std::error::Error + Send + Sync>>,
}
impl TranscriptCommitConfigBuilderError {
fn new<E>(kind: ErrorKind, source: E) -> Self
where
E: Into<Box<dyn std::error::Error + Send + Sync>>,
{
Self {
kind,
source: Some(source.into()),
}
}
}
#[derive(Debug)]
enum ErrorKind {
Index,
}
impl fmt::Display for TranscriptCommitConfigBuilderError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self.kind {
ErrorKind::Index => f.write_str("index error")?,
}
if let Some(source) = &self.source {
write!(f, " caused by: {}", source)?;
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_range_out_of_bounds() {
let transcript = Transcript::new(
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
);
let mut builder = TranscriptCommitConfigBuilder::new(&transcript);
assert!(builder.commit_sent(&(10..15)).is_err());
assert!(builder.commit_recv(&(10..15)).is_err());
}
}

View File

@@ -0,0 +1,35 @@
//! Transcript encoding commitments and proofs.
//!
//! This is an internal module that is not intended to be used directly by users.
mod encoder;
mod proof;
mod provider;
mod tree;
pub(crate) use encoder::{new_encoder, Encoder};
pub use proof::{EncodingProof, EncodingProofError};
pub use provider::EncodingProvider;
pub use tree::EncodingTree;
use serde::{Deserialize, Serialize};
use crate::hash::{impl_domain_separator, TypedHash};
/// The maximum allowed total bytelength of all committed data. Used to prevent DoS during verification.
/// (this will cause the verifier to hash up to a max of 1GB * 128 = 128GB of plaintext encodings if the
/// commitment type is [crate::commitment::Blake3]).
///
/// This value must not exceed bcs's MAX_SEQUENCE_LENGTH limit (which is (1 << 31) - 1 by default)
const MAX_TOTAL_COMMITTED_DATA: usize = 1_000_000_000;
/// Transcript encoding commitment.
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub struct EncodingCommitment {
/// Merkle root of the encoding commitments.
pub root: TypedHash,
/// Seed used to generate the encodings.
pub seed: Vec<u8>,
}
impl_domain_separator!(EncodingCommitment);

View File

@@ -0,0 +1,49 @@
use mpz_circuits::types::ValueType;
use mpz_core::serialize::CanonicalSerialize;
use mpz_garble_core::ChaChaEncoder;
use crate::transcript::{Direction, Subsequence, RX_TRANSCRIPT_ID, TX_TRANSCRIPT_ID};
pub(crate) fn new_encoder(seed: [u8; 32]) -> impl Encoder {
ChaChaEncoder::new(seed)
}
/// A transcript encoder.
///
/// This is an internal implementation detail that should not be exposed to the
/// public API.
pub(crate) trait Encoder {
/// Returns the encoding for the given subsequence of the transcript.
///
/// # Arguments
///
/// * `seq` - The subsequence to encode.
fn encode_subsequence(&self, direction: Direction, seq: &Subsequence) -> Vec<u8>;
}
impl Encoder for ChaChaEncoder {
fn encode_subsequence(&self, direction: Direction, seq: &Subsequence) -> Vec<u8> {
let id = match direction {
Direction::Sent => TX_TRANSCRIPT_ID,
Direction::Received => RX_TRANSCRIPT_ID,
};
let mut encoding = Vec::with_capacity(seq.len() * 16);
for (byte_id, &byte) in seq.index().iter().zip(seq.data()) {
let id_hash = mpz_core::utils::blake3(format!("{}/{}", id, byte_id).as_bytes());
let id = u64::from_be_bytes(id_hash[..8].try_into().unwrap());
encoding.extend(
<ChaChaEncoder as mpz_garble_core::Encoder>::encode_by_type(
self,
id,
&ValueType::U8,
)
.select(byte)
.expect("encoding is a byte encoding")
.to_bytes(),
)
}
encoding
}
}

View File

@@ -0,0 +1,182 @@
use std::{collections::HashMap, fmt};
use serde::{Deserialize, Serialize};
use crate::{
connection::TranscriptLength,
hash::{Blinded, Blinder, HashAlgorithmExt, HashProviderError},
merkle::{MerkleError, MerkleProof},
transcript::{
encoding::{
new_encoder, tree::EncodingLeaf, Encoder, EncodingCommitment, MAX_TOTAL_COMMITTED_DATA,
},
Direction, PartialTranscript, Subsequence,
},
CryptoProvider,
};
/// An opening of a leaf in the encoding tree.
#[derive(Serialize, Deserialize)]
pub(super) struct Opening {
pub(super) direction: Direction,
pub(super) seq: Subsequence,
pub(super) blinder: Blinder,
}
opaque_debug::implement!(Opening);
/// An encoding proof.
#[derive(Debug, Serialize, Deserialize)]
pub struct EncodingProof {
pub(super) inclusion_proof: MerkleProof,
pub(super) openings: HashMap<usize, Opening>,
}
impl EncodingProof {
/// Verifies the proof against the commitment.
///
/// Returns the partial sent and received transcripts, respectively.
///
/// # Arguments
///
/// * `transcript_length` - The length of the transcript.
/// * `commitment` - The encoding commitment to verify against.
pub fn verify_with_provider(
self,
provider: &CryptoProvider,
transcript_length: &TranscriptLength,
commitment: &EncodingCommitment,
) -> Result<PartialTranscript, EncodingProofError> {
let hasher = provider.hash.get(&commitment.root.alg)?;
let seed: [u8; 32] = commitment.seed.clone().try_into().map_err(|_| {
EncodingProofError::new(ErrorKind::Commitment, "encoding seed not 32 bytes")
})?;
let encoder = new_encoder(seed);
let Self {
inclusion_proof,
openings,
} = self;
let (sent_len, recv_len) = (
transcript_length.sent as usize,
transcript_length.received as usize,
);
let mut leaves = Vec::with_capacity(openings.len());
let mut transcript = PartialTranscript::new(sent_len, recv_len);
let mut total_opened = 0u128;
for (
id,
Opening {
direction,
seq,
blinder,
},
) in openings
{
// Make sure the amount of data being proved is bounded.
total_opened += seq.len() as u128;
if total_opened > MAX_TOTAL_COMMITTED_DATA as u128 {
return Err(EncodingProofError::new(
ErrorKind::Proof,
"exceeded maximum allowed data",
))?;
}
// Make sure the ranges are within the bounds of the transcript
let transcript_len = match direction {
Direction::Sent => sent_len,
Direction::Received => recv_len,
};
if seq.index().end() > transcript_len {
return Err(EncodingProofError::new(
ErrorKind::Proof,
format!(
"index out of bounds of the transcript ({}): {} > {}",
direction,
seq.index().end(),
transcript_len
),
));
}
let expected_encoding = encoder.encode_subsequence(direction, &seq);
let expected_leaf =
Blinded::new_with_blinder(EncodingLeaf::new(expected_encoding), blinder);
// Compute the expected hash of the commitment to make sure it is
// present in the merkle tree.
leaves.push((id, hasher.hash_canonical(&expected_leaf)));
// Union the authenticated subsequence into the transcript.
transcript.union_subsequence(direction, &seq);
}
// Verify that the expected hashes are present in the merkle tree.
//
// This proves the Prover committed to the purported data prior to the encoder
// seed being revealed. Ergo, if the encodings are authentic then the purported
// data is authentic.
inclusion_proof.verify(hasher, &commitment.root, leaves)?;
Ok(transcript)
}
}
/// Error for [`EncodingProof`].
#[derive(Debug, thiserror::Error)]
pub struct EncodingProofError {
kind: ErrorKind,
source: Option<Box<dyn std::error::Error + Send + Sync>>,
}
impl EncodingProofError {
fn new<E>(kind: ErrorKind, source: E) -> Self
where
E: Into<Box<dyn std::error::Error + Send + Sync>>,
{
Self {
kind,
source: Some(source.into()),
}
}
}
#[derive(Debug)]
enum ErrorKind {
Provider,
Commitment,
Proof,
}
impl fmt::Display for EncodingProofError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str("encoding proof error: ")?;
match self.kind {
ErrorKind::Provider => f.write_str("provider error")?,
ErrorKind::Commitment => f.write_str("commitment error")?,
ErrorKind::Proof => f.write_str("proof error")?,
}
if let Some(source) = &self.source {
write!(f, " caused by: {}", source)?;
}
Ok(())
}
}
impl From<HashProviderError> for EncodingProofError {
fn from(error: HashProviderError) -> Self {
Self::new(ErrorKind::Provider, error)
}
}
impl From<MerkleError> for EncodingProofError {
fn from(error: MerkleError) -> Self {
Self::new(ErrorKind::Proof, error)
}
}

View File

@@ -0,0 +1,7 @@
use crate::transcript::{Direction, Idx};
/// A provider of plaintext encodings.
pub trait EncodingProvider {
/// Provides the encoding of a subsequence of plaintext.
fn provide_encoding(&self, direction: Direction, idx: &Idx) -> Option<Vec<u8>>;
}

View File

@@ -0,0 +1,330 @@
use std::collections::HashMap;
use bimap::BiMap;
use serde::{Deserialize, Serialize};
use crate::{
connection::TranscriptLength,
hash::{Blinded, Blinder, HashAlgId, HashAlgorithm, TypedHash},
merkle::MerkleTree,
serialize::CanonicalSerialize,
transcript::{
encoding::{
proof::{EncodingProof, Opening},
EncodingProvider,
},
Direction, Idx, Transcript,
},
};
/// Encoding tree builder error.
#[derive(Debug, thiserror::Error)]
pub enum EncodingTreeError {
/// Index is out of bounds of the transcript.
#[error("index is out of bounds of the transcript")]
OutOfBounds {
/// The index.
index: Idx,
/// The transcript length.
transcript_length: usize,
},
/// Encoding provider is missing an encoding for an index.
#[error("encoding provider is missing an encoding for an index")]
MissingEncoding {
/// The index which is missing.
index: Idx,
},
/// Index is missing from the tree.
#[error("index is missing from the tree")]
MissingLeaf {
/// The index which is missing.
index: Idx,
},
}
#[derive(Serialize)]
pub(crate) struct EncodingLeaf(Vec<u8>);
impl EncodingLeaf {
pub(super) fn new(encoding: Vec<u8>) -> Self {
Self(encoding)
}
}
/// A merkle tree of transcript encodings.
#[derive(Serialize, Deserialize)]
pub struct EncodingTree {
/// Merkle tree of the commitments.
tree: MerkleTree,
/// Nonces used to blind the hashes.
nonces: Vec<Blinder>,
/// Mapping between the index of a leaf and the transcript index it
/// corresponds to.
idxs: BiMap<usize, (Direction, Idx)>,
}
opaque_debug::implement!(EncodingTree);
impl EncodingTree {
/// Creates a new encoding tree.
///
/// # Arguments
///
/// * `alg` - The hash algorithm to use.
/// * `idxs` - The subsequence indices to commit to.
/// * `provider` - The encoding provider.
/// * `transcript_length` - The length of the transcript.
pub fn new<'idx>(
hasher: &dyn HashAlgorithm,
idxs: impl IntoIterator<Item = &'idx (Direction, Idx)>,
provider: &dyn EncodingProvider,
transcript_length: &TranscriptLength,
) -> Result<Self, EncodingTreeError> {
let mut this = Self {
tree: MerkleTree::new(hasher.id()),
nonces: Vec::new(),
idxs: BiMap::new(),
};
let mut leaves = Vec::new();
for dir_idx in idxs {
let direction = dir_idx.0;
let idx = &dir_idx.1;
// Ignore empty indices.
if idx.is_empty() {
continue;
}
let len = match direction {
Direction::Sent => transcript_length.sent as usize,
Direction::Received => transcript_length.received as usize,
};
if idx.end() > len {
return Err(EncodingTreeError::OutOfBounds {
index: idx.clone(),
transcript_length: len,
});
}
if this.idxs.contains_right(dir_idx) {
// The subsequence is already in the tree.
continue;
}
let encoding = provider
.provide_encoding(direction, idx)
.ok_or_else(|| EncodingTreeError::MissingEncoding { index: idx.clone() })?;
let leaf = Blinded::new(EncodingLeaf::new(encoding));
leaves.push(hasher.hash(&CanonicalSerialize::serialize(&leaf)));
this.nonces.push(leaf.into_parts().1);
this.idxs.insert(this.idxs.len(), dir_idx.clone());
}
this.tree.insert(hasher, leaves);
Ok(this)
}
/// Returns the root of the tree.
pub fn root(&self) -> TypedHash {
self.tree.root()
}
/// Returns the hash algorithm of the tree.
pub fn algorithm(&self) -> HashAlgId {
self.tree.algorithm()
}
/// Generates a proof for the given indices.
///
/// # Arguments
///
/// * `transcript` - The transcript to prove against.
/// * `idxs` - The transcript indices to prove.
pub fn proof<'idx>(
&self,
transcript: &Transcript,
idxs: impl Iterator<Item = &'idx (Direction, Idx)>,
) -> Result<EncodingProof, EncodingTreeError> {
let mut openings = HashMap::new();
for dir_idx in idxs {
let direction = dir_idx.0;
let idx = &dir_idx.1;
let leaf_idx = *self
.idxs
.get_by_right(dir_idx)
.ok_or_else(|| EncodingTreeError::MissingLeaf { index: idx.clone() })?;
let seq =
transcript
.get(direction, idx)
.ok_or_else(|| EncodingTreeError::OutOfBounds {
index: idx.clone(),
transcript_length: transcript.len_of_direction(direction),
})?;
let nonce = self.nonces[leaf_idx].clone();
openings.insert(
leaf_idx,
Opening {
direction,
seq,
blinder: nonce,
},
);
}
let mut indices = openings.keys().copied().collect::<Vec<_>>();
indices.sort();
Ok(EncodingProof {
inclusion_proof: self.tree.proof(&indices),
openings,
})
}
/// Returns whether the tree contains the given transcript index.
pub fn contains(&self, idx: &(Direction, Idx)) -> bool {
self.idxs.contains_right(idx)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{
fixtures::{encoder_seed, encoding_provider},
hash::Blake3,
transcript::encoding::EncodingCommitment,
CryptoProvider,
};
use tlsn_data_fixtures::http::{request::POST_JSON, response::OK_JSON};
fn new_tree<'seq>(
transcript: &Transcript,
idxs: impl Iterator<Item = &'seq (Direction, Idx)>,
) -> Result<EncodingTree, EncodingTreeError> {
let provider = encoding_provider(transcript.sent(), transcript.received());
let transcript_length = TranscriptLength {
sent: transcript.sent().len() as u32,
received: transcript.received().len() as u32,
};
EncodingTree::new(&Blake3::default(), idxs, &provider, &transcript_length)
}
#[test]
fn test_encoding_tree() {
let transcript = Transcript::new(POST_JSON, OK_JSON);
let idx_0 = (Direction::Sent, Idx::new(0..POST_JSON.len()));
let idx_1 = (Direction::Received, Idx::new(0..OK_JSON.len()));
let tree = new_tree(&transcript, [&idx_0, &idx_1].into_iter()).unwrap();
assert!(tree.contains(&idx_0));
assert!(tree.contains(&idx_1));
let proof = tree
.proof(&transcript, [&idx_0, &idx_1].into_iter())
.unwrap();
let commitment = EncodingCommitment {
root: tree.root(),
seed: encoder_seed().to_vec(),
};
let partial_transcript = proof
.verify_with_provider(
&CryptoProvider::default(),
&transcript.length(),
&commitment,
)
.unwrap();
assert_eq!(partial_transcript.sent_unsafe(), transcript.sent());
assert_eq!(partial_transcript.received_unsafe(), transcript.received());
}
#[test]
fn test_encoding_tree_multiple_ranges() {
let transcript = Transcript::new(POST_JSON, OK_JSON);
let idx_0 = (Direction::Sent, Idx::new(0..1));
let idx_1 = (Direction::Sent, Idx::new(1..POST_JSON.len()));
let idx_2 = (Direction::Received, Idx::new(0..1));
let idx_3 = (Direction::Received, Idx::new(1..OK_JSON.len()));
let tree = new_tree(&transcript, [&idx_0, &idx_1, &idx_2, &idx_3].into_iter()).unwrap();
assert!(tree.contains(&idx_0));
assert!(tree.contains(&idx_1));
assert!(tree.contains(&idx_2));
assert!(tree.contains(&idx_3));
let proof = tree
.proof(&transcript, [&idx_0, &idx_1, &idx_2, &idx_3].into_iter())
.unwrap();
let commitment = EncodingCommitment {
root: tree.root(),
seed: encoder_seed().to_vec(),
};
let partial_transcript = proof
.verify_with_provider(
&CryptoProvider::default(),
&transcript.length(),
&commitment,
)
.unwrap();
assert_eq!(partial_transcript.sent_unsafe(), transcript.sent());
assert_eq!(partial_transcript.received_unsafe(), transcript.received());
}
#[test]
fn test_encoding_tree_out_of_bounds() {
let transcript = Transcript::new(POST_JSON, OK_JSON);
let idx_0 = (Direction::Sent, Idx::new(0..POST_JSON.len() + 1));
let idx_1 = (Direction::Received, Idx::new(0..OK_JSON.len() + 1));
let result = new_tree(&transcript, [&idx_0].into_iter()).unwrap_err();
assert!(matches!(result, EncodingTreeError::OutOfBounds { .. }));
let result = new_tree(&transcript, [&idx_1].into_iter()).unwrap_err();
assert!(matches!(result, EncodingTreeError::OutOfBounds { .. }));
}
#[test]
fn test_encoding_tree_missing_encoding() {
let provider = encoding_provider(&[], &[]);
let transcript_length = TranscriptLength {
sent: 8,
received: 8,
};
let result = EncodingTree::new(
&Blake3::default(),
[(Direction::Sent, Idx::new(0..8))].iter(),
&provider,
&transcript_length,
)
.unwrap_err();
assert!(matches!(result, EncodingTreeError::MissingEncoding { .. }));
let result = EncodingTree::new(
&Blake3::default(),
[(Direction::Sent, Idx::new(0..8))].iter(),
&provider,
&transcript_length,
)
.unwrap_err();
assert!(matches!(result, EncodingTreeError::MissingEncoding { .. }));
}
}

View File

@@ -0,0 +1,99 @@
use serde::{Deserialize, Serialize};
use crate::{
attestation::FieldId,
hash::{
impl_domain_separator, Blinded, Blinder, HashAlgorithmExt, HashProvider, HashProviderError,
TypedHash,
},
transcript::{Direction, Idx, InvalidSubsequence, Subsequence},
};
/// Hash of plaintext in the transcript.
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub(crate) struct PlaintextHash {
/// Direction of the plaintext.
pub direction: Direction,
/// Index of plaintext.
pub idx: Idx,
/// The hash of the data.
pub hash: TypedHash,
}
impl_domain_separator!(PlaintextHash);
/// Secret data for a plaintext hash commitment.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub(crate) struct PlaintextHashSecret {
pub(crate) direction: Direction,
pub(crate) idx: Idx,
pub(crate) commitment: FieldId,
pub(crate) blinder: Blinder,
}
/// Proof of the plaintext of a hash.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub(crate) struct PlaintextHashProof {
data: Blinded<Vec<u8>>,
commitment: FieldId,
}
impl PlaintextHashProof {
pub(crate) fn new(data: Blinded<Vec<u8>>, commitment: FieldId) -> Self {
Self { data, commitment }
}
}
impl PlaintextHashProof {
/// Returns the field id of the commitment this opening corresponds to.
pub(crate) fn commitment_id(&self) -> &FieldId {
&self.commitment
}
/// Verifies the proof, returning the subsequence of plaintext.
///
/// # Arguments
///
/// * `commitment` - The commitment attested to by a Notary.
pub(crate) fn verify(
self,
provider: &HashProvider,
commitment: &PlaintextHash,
) -> Result<(Direction, Subsequence), PlaintextHashProofError> {
let alg = provider.get(&commitment.hash.alg)?;
if commitment.hash.value != alg.hash_canonical(&self.data) {
return Err(PlaintextHashProofError::new(
"hash does not match commitment",
));
}
Ok((
commitment.direction,
Subsequence::new(commitment.idx.clone(), self.data.into_parts().0)?,
))
}
}
/// Error for [`PlaintextHashProof`].
#[derive(Debug, thiserror::Error)]
#[error("invalid plaintext hash proof: {0}")]
pub(crate) struct PlaintextHashProofError(String);
impl PlaintextHashProofError {
fn new<T: Into<String>>(msg: T) -> Self {
Self(msg.into())
}
}
impl From<HashProviderError> for PlaintextHashProofError {
fn from(err: HashProviderError) -> Self {
Self(err.to_string())
}
}
impl From<InvalidSubsequence> for PlaintextHashProofError {
fn from(err: InvalidSubsequence) -> Self {
Self(err.to_string())
}
}

View File

@@ -0,0 +1,346 @@
//! Transcript proofs.
use std::{collections::HashSet, fmt};
use serde::{Deserialize, Serialize};
use utils::range::ToRangeSet;
use crate::{
attestation::Body,
hash::Blinded,
index::Index,
transcript::{
commit::TranscriptCommitmentKind,
encoding::{EncodingProof, EncodingProofError, EncodingTree},
hash::{PlaintextHashProof, PlaintextHashProofError, PlaintextHashSecret},
Direction, Idx, PartialTranscript, Transcript,
},
CryptoProvider,
};
/// Proof of the contents of a transcript.
#[derive(Serialize, Deserialize)]
pub struct TranscriptProof {
encoding_proof: Option<EncodingProof>,
hash_proofs: Vec<PlaintextHashProof>,
}
opaque_debug::implement!(TranscriptProof);
impl TranscriptProof {
/// Verifies the proof.
///
/// Returns a partial transcript of authenticated data.
///
/// # Arguments
///
/// * `provider` - The crypto provider to use for verification.
/// * `attestation_body` - The attestation body to verify against.
pub fn verify_with_provider(
self,
provider: &CryptoProvider,
attestation_body: &Body,
) -> Result<PartialTranscript, TranscriptProofError> {
let info = attestation_body.connection_info();
let mut transcript = PartialTranscript::new(
info.transcript_length.sent as usize,
info.transcript_length.received as usize,
);
// Verify encoding proof.
if let Some(proof) = self.encoding_proof {
let commitment = attestation_body.encoding_commitment().ok_or_else(|| {
TranscriptProofError::new(
ErrorKind::Encoding,
"contains an encoding proof but attestation is missing encoding commitment",
)
})?;
let seq = proof.verify_with_provider(provider, &info.transcript_length, commitment)?;
transcript.union_transcript(&seq);
}
// Verify hash openings.
for proof in self.hash_proofs {
let commitment = attestation_body
.plaintext_hashes()
.get_by_field_id(proof.commitment_id())
.map(|field| &field.data)
.ok_or_else(|| {
TranscriptProofError::new(
ErrorKind::Hash,
format!("contains a hash opening but attestation is missing corresponding commitment (id: {})", proof.commitment_id()),
)
})?;
let (direction, seq) = proof.verify(&provider.hash, commitment)?;
transcript.union_subsequence(direction, &seq);
}
Ok(transcript)
}
}
/// Error for [`TranscriptProof`].
#[derive(Debug, thiserror::Error)]
pub struct TranscriptProofError {
kind: ErrorKind,
source: Option<Box<dyn std::error::Error + Send + Sync>>,
}
impl TranscriptProofError {
fn new<E>(kind: ErrorKind, source: E) -> Self
where
E: Into<Box<dyn std::error::Error + Send + Sync>>,
{
Self {
kind,
source: Some(source.into()),
}
}
}
#[derive(Debug)]
enum ErrorKind {
Encoding,
Hash,
}
impl fmt::Display for TranscriptProofError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("transcript proof error: ")?;
match self.kind {
ErrorKind::Encoding => f.write_str("encoding error")?,
ErrorKind::Hash => f.write_str("hash error")?,
}
if let Some(source) = &self.source {
write!(f, " caused by: {}", source)?;
}
Ok(())
}
}
impl From<EncodingProofError> for TranscriptProofError {
fn from(e: EncodingProofError) -> Self {
TranscriptProofError::new(ErrorKind::Encoding, e)
}
}
impl From<PlaintextHashProofError> for TranscriptProofError {
fn from(e: PlaintextHashProofError) -> Self {
TranscriptProofError::new(ErrorKind::Hash, e)
}
}
/// Builder for [`TranscriptProof`].
#[derive(Debug)]
pub struct TranscriptProofBuilder<'a> {
default_kind: TranscriptCommitmentKind,
transcript: &'a Transcript,
encoding_tree: Option<&'a EncodingTree>,
plaintext_hashes: &'a Index<PlaintextHashSecret>,
encoding_proof_idxs: HashSet<(Direction, Idx)>,
hash_proofs: Vec<PlaintextHashProof>,
}
impl<'a> TranscriptProofBuilder<'a> {
/// Creates a new proof config builder.
pub(crate) fn new(
transcript: &'a Transcript,
encoding_tree: Option<&'a EncodingTree>,
plaintext_hashes: &'a Index<PlaintextHashSecret>,
) -> Self {
Self {
default_kind: TranscriptCommitmentKind::Encoding,
transcript,
encoding_tree,
plaintext_hashes,
encoding_proof_idxs: HashSet::default(),
hash_proofs: Vec::new(),
}
}
/// Sets the default kind of commitment to open when revealing ranges.
pub fn default_kind(&mut self, kind: TranscriptCommitmentKind) -> &mut Self {
self.default_kind = kind;
self
}
/// Reveals the given ranges in the transcript using the provided kind of commitment.
///
/// # Arguments
///
/// * `ranges` - The ranges to reveal.
/// * `direction` - The direction of the transcript.
/// * `kind` - The kind of commitment to open.
pub fn reveal_with_kind(
&mut self,
ranges: &dyn ToRangeSet<usize>,
direction: Direction,
kind: TranscriptCommitmentKind,
) -> Result<&mut Self, TranscriptProofBuilderError> {
let idx = Idx::new(ranges.to_range_set());
if idx.end() > self.transcript.len_of_direction(direction) {
return Err(TranscriptProofBuilderError::new(
BuilderErrorKind::Index,
format!(
"range is out of bounds of the transcript ({}): {} > {}",
direction,
idx.end(),
self.transcript.len_of_direction(direction)
),
));
}
match kind {
TranscriptCommitmentKind::Encoding => {
let Some(encoding_tree) = self.encoding_tree else {
return Err(TranscriptProofBuilderError::new(
BuilderErrorKind::MissingCommitment,
"encoding tree is missing",
));
};
let dir_idx = (direction, idx);
if !encoding_tree.contains(&dir_idx) {
return Err(TranscriptProofBuilderError::new(
BuilderErrorKind::MissingCommitment,
format!(
"encoding commitment is missing for ranges in {} transcript",
direction
),
));
}
self.encoding_proof_idxs.insert(dir_idx);
}
TranscriptCommitmentKind::Hash { .. } => {
let Some(PlaintextHashSecret {
direction,
commitment,
blinder,
..
}) = self.plaintext_hashes.get_by_transcript_idx(&idx)
else {
return Err(TranscriptProofBuilderError::new(
BuilderErrorKind::MissingCommitment,
format!(
"hash commitment is missing for ranges in {} transcript",
direction
),
));
};
let (_, data) = self
.transcript
.get(*direction, &idx)
.expect("subsequence was checked to be in transcript")
.into_parts();
self.hash_proofs.push(PlaintextHashProof::new(
Blinded::new_with_blinder(data, blinder.clone()),
*commitment,
));
}
}
Ok(self)
}
/// Reveals the given ranges in the transcript using the default kind of commitment.
///
/// # Arguments
///
/// * `ranges` - The ranges to reveal.
/// * `direction` - The direction of the transcript.
pub fn reveal(
&mut self,
ranges: &dyn ToRangeSet<usize>,
direction: Direction,
) -> Result<&mut Self, TranscriptProofBuilderError> {
self.reveal_with_kind(ranges, direction, self.default_kind)
}
/// Builds the transcript proof.
pub fn build(self) -> Result<TranscriptProof, TranscriptProofBuilderError> {
let encoding_proof = if !self.encoding_proof_idxs.is_empty() {
let encoding_tree = self.encoding_tree.expect("encoding tree is present");
let proof = encoding_tree
.proof(self.transcript, self.encoding_proof_idxs.iter())
.expect("subsequences were checked to be in tree");
Some(proof)
} else {
None
};
Ok(TranscriptProof {
encoding_proof,
hash_proofs: self.hash_proofs,
})
}
}
/// Error for [`TranscriptProofBuilder`].
#[derive(Debug, thiserror::Error)]
pub struct TranscriptProofBuilderError {
kind: BuilderErrorKind,
source: Option<Box<dyn std::error::Error + Send + Sync>>,
}
impl TranscriptProofBuilderError {
fn new<E>(kind: BuilderErrorKind, source: E) -> Self
where
E: Into<Box<dyn std::error::Error + Send + Sync>>,
{
Self {
kind,
source: Some(source.into()),
}
}
}
#[derive(Debug)]
enum BuilderErrorKind {
Index,
MissingCommitment,
}
impl fmt::Display for TranscriptProofBuilderError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str("transcript proof builder error: ")?;
match self.kind {
BuilderErrorKind::Index => f.write_str("index error")?,
BuilderErrorKind::MissingCommitment => f.write_str("commitment error")?,
}
if let Some(source) = &self.source {
write!(f, " caused by: {}", source)?;
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_range_out_of_bounds() {
let transcript = Transcript::new(
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
);
let index = Index::default();
let mut builder = TranscriptProofBuilder::new(&transcript, None, &index);
assert!(builder.reveal(&(10..15), Direction::Sent).is_err());
assert!(builder.reveal(&(10..15), Direction::Received).is_err());
}
}

View File

@@ -1,192 +1,133 @@
use std::ops::Range;
use p256::{
ecdsa::{
signature::{SignerMut, Verifier},
Signature as P256Signature, SigningKey,
},
PublicKey,
};
use rand_chacha::ChaCha20Rng;
use rand_core::SeedableRng;
use tls_core::{
cert::ServerCertDetails,
handshake::HandshakeData,
ke::ServerKxDetails,
msgs::{enums::SignatureScheme, handshake::DigitallySignedStruct},
};
use mpz_core::{commit::HashCommit, serialize::CanonicalSerialize};
use tlsn_core::{
commitment::TranscriptCommitmentBuilder,
fixtures,
msg::SignedSessionHeader,
proof::{SessionProof, SubstringsProof},
HandshakeSummary, NotarizedSession, ServerName, SessionData, SessionHeader, Signature,
Transcript,
attestation::{Attestation, AttestationConfig},
connection::{HandshakeData, HandshakeDataV1_2},
fixtures::{self, encoder_seed, ConnectionFixture},
hash::Blake3,
presentation::PresentationOutput,
request::{Request, RequestConfig},
signing::SignatureAlgId,
transcript::{encoding::EncodingTree, Direction, Transcript, TranscriptCommitConfigBuilder},
CryptoProvider,
};
use tlsn_data_fixtures::http::{request::GET_WITH_HEADER, response::OK_JSON};
/// Tests that the attestation protocol and verification work end-to-end
#[test]
/// Tests that the commitment creation protocol and verification work end-to-end
fn test_api() {
let testdata = fixtures::cert::tlsnotary();
// Prover's transcript
let data_sent = "sent data".as_bytes();
let data_recv = "received data".as_bytes();
let transcript_tx = Transcript::new(data_sent.to_vec());
let transcript_rx = Transcript::new(data_recv.to_vec());
let mut provider = CryptoProvider::default();
// Ranges of plaintext for which the Prover wants to create a commitment
let range1: Range<usize> = Range { start: 0, end: 2 };
let range2: Range<usize> = Range { start: 1, end: 3 };
// Configure signer for Notary
provider.signer.set_secp256k1(&[42u8; 32]).unwrap();
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
let (sent_len, recv_len) = transcript.len();
// Plaintext encodings which the Prover obtained from GC evaluation
let encodings_provider = fixtures::encoding_provider(data_sent, data_recv);
let encodings_provider = fixtures::encoding_provider(GET_WITH_HEADER, OK_JSON);
// At the end of the session the Prover holds the:
// - time when the TLS handshake began
// - server ephemeral key
// - handshake data (to which the Prover sent a commitment earlier)
// - encoder seed revealed by the Notary at the end of the label commitment protocol
// At the end of the TLS connection the Prover holds the:
let ConnectionFixture {
server_name,
connection_info,
server_cert_data,
} = ConnectionFixture::tlsnotary(transcript.length());
let time = testdata.time;
let ephem_key = testdata.pubkey.clone();
let handshake_data = HandshakeData::new(
ServerCertDetails::new(
vec![
testdata.ee.clone(),
testdata.inter.clone(),
testdata.ca.clone(),
],
vec![],
None,
),
ServerKxDetails::new(
testdata.kx_params(),
DigitallySignedStruct::new(SignatureScheme::RSA_PKCS1_SHA256, testdata.sig.clone()),
),
testdata.cr,
testdata.sr,
);
// Commitment to the handshake which the Prover sent at the start of the TLS handshake
let (hs_decommitment, hs_commitment) = handshake_data.hash_commit();
let mut commitment_builder =
TranscriptCommitmentBuilder::new(encodings_provider, data_sent.len(), data_recv.len());
let commitment_id_1 = commitment_builder.commit_sent(&range1).unwrap();
let commitment_id_2 = commitment_builder.commit_recv(&range2).unwrap();
let commitments = commitment_builder.build().unwrap();
let notarized_session_data = SessionData::new(
ServerName::Dns(testdata.dns_name.clone()),
hs_decommitment.clone(),
transcript_tx,
transcript_rx,
commitments,
);
// Some outer context generates an (ephemeral) signing key for the Notary, e.g.
let mut rng = ChaCha20Rng::from_seed([6u8; 32]);
let signing_key = SigningKey::random(&mut rng);
let raw_key = signing_key.to_bytes();
// Notary receives the raw signing key from some outer context
let mut signer = SigningKey::from_bytes(&raw_key).unwrap();
let notary_pubkey = PublicKey::from(*signer.verifying_key());
let notary_verifing_key = *signer.verifying_key();
// Notary creates the session header
assert!(data_sent.len() <= (u32::MAX as usize) && data_recv.len() <= (u32::MAX as usize));
let header = SessionHeader::new(
fixtures::encoder_seed(),
notarized_session_data.commitments().merkle_root(),
data_sent.len(),
data_recv.len(),
// the session's end time and TLS handshake start time may be a few mins apart
HandshakeSummary::new(time + 60, ephem_key.clone(), hs_commitment),
);
let signature: P256Signature = signer.sign(&header.to_bytes());
// Notary creates a msg and sends it to Prover
let msg = SignedSessionHeader {
header,
signature: signature.into(),
};
//---------------------------------------
let msg_bytes = bincode::serialize(&msg).unwrap();
let SignedSessionHeader { header, signature } = bincode::deserialize(&msg_bytes).unwrap();
//---------------------------------------
// Prover verifies the signature
#[allow(irrefutable_let_patterns)]
if let Signature::P256(signature) = signature {
notary_verifing_key
.verify(&header.to_bytes(), &signature)
.unwrap();
} else {
panic!("Notary signature is not P256");
};
// Prover verifies the header and stores it with the signature in NotarizedSession
header
.verify(
time,
&ephem_key,
&notarized_session_data.commitments().merkle_root(),
header.encoder_seed(),
&notarized_session_data.session_info().handshake_decommitment,
)
.unwrap();
let session = NotarizedSession::new(header, Some(signature), notarized_session_data);
// Prover converts NotarizedSession into SessionProof and SubstringsProof and sends them to the Verifier
let session_proof = session.session_proof();
let mut substrings_proof_builder = session.data().build_substrings_proof();
substrings_proof_builder
.reveal_by_id(commitment_id_1)
.unwrap()
.reveal_by_id(commitment_id_2)
.unwrap();
let substrings_proof = substrings_proof_builder.build().unwrap();
//---------------------------------------
let session_proof_bytes = bincode::serialize(&session_proof).unwrap();
let substrings_proof_bytes = bincode::serialize(&substrings_proof).unwrap();
let session_proof: SessionProof = bincode::deserialize(&session_proof_bytes).unwrap();
let substrings_proof: SubstringsProof = bincode::deserialize(&substrings_proof_bytes).unwrap();
//---------------------------------------
// The Verifier does:
session_proof
.verify_with_default_cert_verifier(notary_pubkey)
.unwrap();
let SessionProof {
header,
session_info,
let HandshakeData::V1_2(HandshakeDataV1_2 {
server_ephemeral_key,
..
} = session_proof;
}) = server_cert_data.handshake.clone()
else {
unreachable!()
};
// Prover specifies the ranges it wants to commit to.
let mut transcript_commitment_builder = TranscriptCommitConfigBuilder::new(&transcript);
transcript_commitment_builder
.commit_sent(&(0..sent_len))
.unwrap()
.commit_recv(&(0..recv_len))
.unwrap();
let transcripts_commitment_config = transcript_commitment_builder.build().unwrap();
// Prover constructs encoding tree.
let encoding_tree = EncodingTree::new(
&Blake3::default(),
transcripts_commitment_config.iter_encoding(),
&encodings_provider,
&transcript.length(),
)
.unwrap();
let request_config = RequestConfig::default();
let mut request_builder = Request::builder(&request_config);
request_builder
.server_name(server_name.clone())
.server_cert_data(server_cert_data)
.transcript(transcript)
.encoding_tree(encoding_tree);
let (request, secrets) = request_builder.build(&provider).unwrap();
let attestation_config = AttestationConfig::builder()
.supported_signature_algs([SignatureAlgId::SECP256K1])
.build()
.unwrap();
// Notary signs an attestation according to their view of the connection.
let mut attestation_builder = Attestation::builder(&attestation_config)
.accept_request(request.clone())
.unwrap();
attestation_builder
// Notary's view of the connection
.connection_info(connection_info.clone())
// Server key Notary received during handshake
.server_ephemeral_key(server_ephemeral_key)
.encoding_seed(encoder_seed().to_vec());
let attestation = attestation_builder.build(&provider).unwrap();
// Prover validates the attestation is consistent with its request.
request.validate(&attestation).unwrap();
let mut transcript_proof_builder = secrets.transcript_proof_builder();
transcript_proof_builder
.reveal(&(0..sent_len), Direction::Sent)
.unwrap();
transcript_proof_builder
.reveal(&(0..recv_len), Direction::Received)
.unwrap();
let transcript_proof = transcript_proof_builder.build().unwrap();
let mut builder = attestation.presentation_builder(&provider);
builder.identity_proof(secrets.identity_proof());
builder.transcript_proof(transcript_proof);
let presentation = builder.build().unwrap();
// Verifier verifies the presentation.
let PresentationOutput {
server_name: presented_server_name,
connection_info: presented_connection_info,
transcript: presented_transcript,
..
} = presentation.verify(&provider).unwrap();
assert_eq!(presented_server_name.unwrap(), server_name);
assert_eq!(presented_connection_info, connection_info);
let presented_transcript = presented_transcript.unwrap();
// assert dns name is expected
assert_eq!(
session_info.server_name.as_ref(),
testdata.dns_name.as_str()
presented_transcript.sent_unsafe(),
secrets.transcript().sent()
);
assert_eq!(
presented_transcript.received_unsafe(),
secrets.transcript().received()
);
let (sent, recv) = substrings_proof.verify(&header).unwrap();
assert_eq!(&sent.data()[range1], b"se".as_slice());
assert_eq!(&recv.data()[range2], b"ec".as_slice());
}

View File

@@ -0,0 +1,5 @@
[package]
name = "tlsn-data-fixtures"
version = "0.0.0"
edition = "2021"
publish = false

View File

@@ -1,6 +1,6 @@
HTTP/1.1 200 OK
Cookie: very-secret-cookie
Content-Length: 14
Content-Length: 44
Content-Type: application/json
{"foo": "bar"}
{"foo": "bar", "bazz": 123, "buzz": [1,"5"]}

View File

@@ -0,0 +1,53 @@
//! HTTP data fixtures
/// HTTP requests
pub mod request {
use crate::define_fixture;
define_fixture!(
GET_EMPTY,
"A GET request without a body or headers.",
"../data/http/request_get_empty"
);
define_fixture!(
GET_EMPTY_HEADER,
"A GET request with an empty header.",
"../data/http/request_get_empty_header"
);
define_fixture!(
GET_WITH_HEADER,
"A GET request with a header.",
"../data/http/request_get_with_header"
);
define_fixture!(
POST_JSON,
"A POST request with a JSON body.",
"../data/http/request_post_json"
);
}
/// HTTP responses
pub mod response {
use crate::define_fixture;
define_fixture!(
OK_EMPTY,
"An OK response without a body.",
"../data/http/response_empty"
);
define_fixture!(
OK_EMPTY_HEADER,
"An OK response with an empty header.",
"../data/http/response_empty"
);
define_fixture!(
OK_TEXT,
"An OK response with a text body.",
"../data/http/response_text"
);
define_fixture!(
OK_JSON,
"An OK response with a JSON body.",
"../data/http/response_json"
);
}

View File

@@ -0,0 +1,14 @@
pub mod http;
macro_rules! define_fixture {
($name:ident, $doc:tt, $path:tt) => {
#[doc = $doc]
///
/// ```ignore
#[doc = include_str!($path)]
/// ```
pub const $name: &[u8] = include_bytes!($path);
};
}
pub(crate) use define_fixture;

View File

@@ -18,7 +18,8 @@ futures = { workspace = true }
http-body-util = { workspace = true }
hyper = { workspace = true, features = ["client", "http1"] }
hyper-util = { workspace = true, features = ["full"] }
p256 = { workspace = true, features = ["ecdsa"] }
k256 = { workspace = true, features = ["ecdsa"] }
regex = { workspace = true }
serde = { workspace = true, features = ["derive"] }
serde_json = { workspace = true }
tokio = { workspace = true, features = [
@@ -51,4 +52,4 @@ path = "discord/discord_dm.rs"
[[example]]
name = "discord_dm_verifier"
path = "discord/discord_dm_verifier.rs"
path = "discord/discord_dm_verifier.rs"

View File

@@ -11,7 +11,7 @@ use tokio::io::AsyncWriteExt as _;
use tokio_util::compat::{FuturesAsyncReadCompatExt, TokioAsyncReadCompatExt};
use tlsn_examples::run_notary;
use tlsn_prover::tls::{state::Notarize, Prover, ProverConfig};
use tlsn_prover::{state::Notarize, Prover, ProverConfig};
// Setting of the application server
const SERVER_DOMAIN: &str = "example.com";
@@ -31,7 +31,7 @@ async fn main() {
// Prover configuration.
let config = ProverConfig::builder()
.id("example")
.server_dns(SERVER_DOMAIN)
.server_name(SERVER_DOMAIN)
.protocol_config(
ProtocolConfig::builder()
// Configure the limit of the data sent and received.
@@ -159,8 +159,8 @@ async fn build_proof_without_redactions(mut prover: Prover<Notarize>) -> TlsProo
let sent_commitment = builder.commit_sent(&(0..sent_len)).unwrap();
let recv_commitment = builder.commit_recv(&(0..recv_len)).unwrap();
// Finalize, returning the notarized session
let notarized_session = prover.finalize().await.unwrap();
// Finalize, returning the attestation and secrets.
let (attestation, secrets) = prover.finalize().await.unwrap();
// Create a proof for all committed data in this session
let mut proof_builder = notarized_session.data().build_substrings_proof();

View File

@@ -9,7 +9,7 @@ use notary_client::{Accepted, NotarizationRequest, NotaryClient};
use std::{env, ops::Range, str};
use tlsn_common::config::ProtocolConfig;
use tlsn_core::proof::TlsProof;
use tlsn_prover::tls::{Prover, ProverConfig};
use tlsn_prover::{Prover, ProverConfig};
use tokio::io::AsyncWriteExt as _;
use tokio_util::compat::{FuturesAsyncReadCompatExt, TokioAsyncReadCompatExt};
use tracing::debug;
@@ -72,7 +72,7 @@ async fn main() {
// Configure a new prover with the unique session id returned from notary client.
let prover_config = ProverConfig::builder()
.id(session_id)
.server_dns(SERVER_DOMAIN)
.server_name(SERVER_DOMAIN)
.protocol_config(protocol_config)
.build()
.unwrap();

View File

@@ -3,7 +3,7 @@ use hyper::{body::Bytes, Request, StatusCode, Uri};
use hyper_util::rt::TokioIo;
use tlsn_common::config::{ProtocolConfig, ProtocolConfigValidator};
use tlsn_core::{proof::SessionInfo, Direction, RedactedTranscript};
use tlsn_prover::tls::{state::Prove, Prover, ProverConfig};
use tlsn_prover::{state::Prove, Prover, ProverConfig};
use tlsn_verifier::tls::{Verifier, VerifierConfig};
use tokio::io::{AsyncRead, AsyncWrite};
use tokio_util::compat::{FuturesAsyncReadCompatExt, TokioAsyncReadCompatExt};
@@ -58,7 +58,7 @@ async fn prover<T: AsyncWrite + AsyncRead + Send + Unpin + 'static>(
let prover = Prover::new(
ProverConfig::builder()
.id(id)
.server_dns(server_domain)
.server_name(server_domain)
.protocol_config(
ProtocolConfig::builder()
.max_sent_data(MAX_SENT_DATA)

View File

@@ -1,7 +1,10 @@
use elliptic_curve::pkcs8::DecodePrivateKey;
use futures::{AsyncRead, AsyncWrite};
use tlsn_common::config::ProtocolConfigValidator;
use tlsn_verifier::tls::{Verifier, VerifierConfig};
use tlsn_core::{attestation::AttestationConfig, signing::SignatureAlgId, CryptoProvider};
use tlsn_verifier::{Verifier, VerifierConfig};
/// The private key used by the Notary for signing attestations.
pub const NOTARY_PRIVATE_KEY: &[u8] = &[1u8; 32];
// Maximum number of bytes that can be sent from prover to server
const MAX_SENT_DATA: usize = 1 << 12;
@@ -10,12 +13,8 @@ const MAX_RECV_DATA: usize = 1 << 14;
/// Runs a simple Notary with the provided connection to the Prover.
pub async fn run_notary<T: AsyncWrite + AsyncRead + Send + Unpin + 'static>(conn: T) {
// Load the notary signing key
let signing_key_str = std::str::from_utf8(include_bytes!(
"../../notary/server/fixture/notary/notary.key"
))
.unwrap();
let signing_key = p256::ecdsa::SigningKey::from_pkcs8_pem(signing_key_str).unwrap();
let mut provider = CryptoProvider::default();
provider.signer.set_secp256k1(NOTARY_PRIVATE_KEY).unwrap();
// Setup the config. Normally a different ID would be generated
// for each notarization.
@@ -26,13 +25,18 @@ pub async fn run_notary<T: AsyncWrite + AsyncRead + Send + Unpin + 'static>(conn
.unwrap();
let config = VerifierConfig::builder()
.id("example")
.protocol_config_validator(config_validator)
.crypto_provider(provider)
.build()
.unwrap();
let attestation_config = AttestationConfig::builder()
.supported_signature_algs(vec![SignatureAlgId::SECP256K1])
.build()
.unwrap();
Verifier::new(config)
.notarize::<_, p256::ecdsa::Signature>(conn, &signing_key)
.notarize(conn, &attestation_config)
.await
.unwrap();
}

View File

@@ -14,4 +14,5 @@ thiserror = { workspace = true }
[dev-dependencies]
tlsn-core = { workspace = true, features = ["fixtures"] }
tlsn-data-fixtures = { workspace = true }
rstest = { workspace = true }

View File

@@ -1,7 +1,7 @@
use std::error::Error;
use spansy::Spanned;
use tlsn_core::{commitment::TranscriptCommitmentBuilder, Direction};
use tlsn_core::transcript::{Direction, TranscriptCommitConfigBuilder};
use crate::{
http::{Body, BodyContent, Header, HttpTranscript, MessageKind, Request, Response, Target},
@@ -88,7 +88,7 @@ pub trait HttpCommit {
/// * `transcript` - The transcript to commit.
fn commit_transcript(
&mut self,
builder: &mut TranscriptCommitmentBuilder,
builder: &mut TranscriptCommitConfigBuilder,
transcript: &HttpTranscript,
) -> Result<(), HttpCommitError> {
for request in &transcript.requests {
@@ -114,7 +114,7 @@ pub trait HttpCommit {
/// * `request` - The request to commit to.
fn commit_request(
&mut self,
builder: &mut TranscriptCommitmentBuilder,
builder: &mut TranscriptCommitConfigBuilder,
direction: Direction,
request: &Request,
) -> Result<(), HttpCommitError> {
@@ -163,7 +163,7 @@ pub trait HttpCommit {
/// * `target` - The target to commit to.
fn commit_target(
&mut self,
builder: &mut TranscriptCommitmentBuilder,
builder: &mut TranscriptCommitConfigBuilder,
direction: Direction,
request: &Request,
target: &Target,
@@ -191,7 +191,7 @@ pub trait HttpCommit {
/// * `header` - The header to commit to.
fn commit_request_header(
&mut self,
builder: &mut TranscriptCommitmentBuilder,
builder: &mut TranscriptCommitConfigBuilder,
direction: Direction,
parent: &Request,
header: &Header,
@@ -236,7 +236,7 @@ pub trait HttpCommit {
/// * `body` - The body to commit to.
fn commit_request_body(
&mut self,
builder: &mut TranscriptCommitmentBuilder,
builder: &mut TranscriptCommitConfigBuilder,
direction: Direction,
parent: &Request,
body: &Body,
@@ -279,7 +279,7 @@ pub trait HttpCommit {
/// * `response` - The response to commit to.
fn commit_response(
&mut self,
builder: &mut TranscriptCommitmentBuilder,
builder: &mut TranscriptCommitConfigBuilder,
direction: Direction,
response: &Response,
) -> Result<(), HttpCommitError> {
@@ -326,7 +326,7 @@ pub trait HttpCommit {
/// * `header` - The header to commit to.
fn commit_response_header(
&mut self,
builder: &mut TranscriptCommitmentBuilder,
builder: &mut TranscriptCommitConfigBuilder,
direction: Direction,
parent: &Response,
header: &Header,
@@ -371,7 +371,7 @@ pub trait HttpCommit {
/// * `body` - The body to commit to.
fn commit_response_body(
&mut self,
builder: &mut TranscriptCommitmentBuilder,
builder: &mut TranscriptCommitConfigBuilder,
direction: Direction,
parent: &Response,
body: &Body,
@@ -414,18 +414,19 @@ mod tests {
use super::*;
use rstest::*;
use spansy::http::{parse_request, parse_response};
use tlsn_core::fixtures;
use tlsn_core::transcript::Transcript;
use tlsn_data_fixtures::http as fixtures;
#[rstest]
#[case::get_empty(include_bytes!("../../tests/fixtures/http/request_get_empty"))]
#[case::get_empty_header(include_bytes!("../../tests/fixtures/http/request_get_empty_header"))]
#[case::get_with_header(include_bytes!("../../tests/fixtures/http/request_get_with_header"))]
#[case::post_json(include_bytes!("../../tests/fixtures/http/request_post_json"))]
#[case::get_empty(fixtures::request::GET_EMPTY)]
#[case::get_empty_header(fixtures::request::GET_EMPTY_HEADER)]
#[case::get_with_header(fixtures::request::GET_WITH_HEADER)]
#[case::post_json(fixtures::request::POST_JSON)]
fn test_http_default_commit_request(#[case] src: &'static [u8]) {
let transcript = Transcript::new(src, []);
let request = parse_request(src).unwrap();
let mut committer = DefaultHttpCommitter::default();
let mut builder =
TranscriptCommitmentBuilder::new(fixtures::encoding_provider(src, &[]), src.len(), 0);
let mut builder = TranscriptCommitConfigBuilder::new(&transcript);
committer
.commit_request(&mut builder, Direction::Sent, &request)
@@ -435,15 +436,15 @@ mod tests {
}
#[rstest]
#[case::empty(include_bytes!("../../tests/fixtures/http/response_empty"))]
#[case::empty_header(include_bytes!("../../tests/fixtures/http/response_empty_header"))]
#[case::json(include_bytes!("../../tests/fixtures/http/response_json"))]
#[case::text(include_bytes!("../../tests/fixtures/http/response_text"))]
#[case::empty(fixtures::response::OK_EMPTY)]
#[case::empty_header(fixtures::response::OK_EMPTY_HEADER)]
#[case::json(fixtures::response::OK_JSON)]
#[case::text(fixtures::response::OK_TEXT)]
fn test_http_default_commit_response(#[case] src: &'static [u8]) {
let transcript = Transcript::new([], src);
let response = parse_response(src).unwrap();
let mut committer = DefaultHttpCommitter::default();
let mut builder =
TranscriptCommitmentBuilder::new(fixtures::encoding_provider(&[], src), 0, src.len());
let mut builder = TranscriptCommitConfigBuilder::new(&transcript);
committer
.commit_response(&mut builder, Direction::Received, &response)

View File

@@ -1,10 +1,9 @@
//! Tooling for working with HTTP data.
mod commit;
mod session;
use bytes::Bytes;
pub use commit::{DefaultHttpCommitter, HttpCommit, HttpCommitError};
pub use session::NotarizedHttpSession;
#[doc(hidden)]
pub use spansy::http;
@@ -13,7 +12,7 @@ pub use http::{
parse_request, parse_response, Body, BodyContent, Header, HeaderName, HeaderValue, Method,
Reason, Request, RequestLine, Requests, Response, Responses, Status, Target,
};
use tlsn_core::Transcript;
use tlsn_core::transcript::Transcript;
/// The kind of HTTP message.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
@@ -35,9 +34,11 @@ pub struct HttpTranscript {
impl HttpTranscript {
/// Parses the HTTP transcript from the provided transcripts.
pub fn parse(tx: &Transcript, rx: &Transcript) -> Result<Self, spansy::ParseError> {
let requests = Requests::new(tx.data().clone()).collect::<Result<Vec<_>, _>>()?;
let responses = Responses::new(rx.data().clone()).collect::<Result<Vec<_>, _>>()?;
pub fn parse(transcript: &Transcript) -> Result<Self, spansy::ParseError> {
let requests = Requests::new(Bytes::copy_from_slice(transcript.sent()))
.collect::<Result<Vec<_>, _>>()?;
let responses = Responses::new(Bytes::copy_from_slice(transcript.received()))
.collect::<Result<Vec<_>, _>>()?;
Ok(Self {
requests,
@@ -45,158 +46,3 @@ impl HttpTranscript {
})
}
}
#[cfg(test)]
mod tests {
use super::*;
use tlsn_core::{
commitment::{CommitmentKind, TranscriptCommitmentBuilder},
fixtures,
proof::SubstringsProofBuilder,
Direction, Transcript,
};
use crate::json::JsonValue;
static TX: &[u8] = b"GET / HTTP/1.1\r\nHost: localhost\r\n\r\n\
POST /hello HTTP/1.1\r\nHost: localhost\r\nContent-Length: 44\r\nContent-Type: application/json\r\n\r\n\
{\"foo\": \"bar\", \"bazz\": 123, \"buzz\": [1,\"5\"]}";
static RX: &[u8] =
b"HTTP/1.1 200 OK\r\nCookie: very-secret-cookie\r\nContent-Length: 14\r\nContent-Type: application/json\r\n\r\n\
{\"foo\": \"bar\"}\r\n\
HTTP/1.1 200 OK\r\nContent-Length: 14\r\nContent-Type: text/plain\r\n\r\n\
Hello World!!!";
#[test]
fn test_http_commit() {
let transcript_tx = Transcript::new(TX);
let transcript_rx = Transcript::new(RX);
let mut builder = TranscriptCommitmentBuilder::new(
fixtures::encoding_provider(TX, RX),
TX.len(),
RX.len(),
);
let transcript = HttpTranscript::parse(&transcript_tx, &transcript_rx).unwrap();
let mut committer = DefaultHttpCommitter::default();
committer
.commit_transcript(&mut builder, &transcript)
.unwrap();
let commitments = builder.build().unwrap();
// Path
assert!(commitments
.get_id_by_info(CommitmentKind::Blake3, &(4..5).into(), Direction::Sent)
.is_some());
// Host header
assert!(commitments
.get_id_by_info(CommitmentKind::Blake3, &(16..33).into(), Direction::Sent)
.is_some());
// foo value
assert!(commitments
.get_id_by_info(CommitmentKind::Blake3, &(137..140).into(), Direction::Sent)
.is_some());
// Cookie header
assert!(commitments
.get_id_by_info(
CommitmentKind::Blake3,
&(17..45).into(),
Direction::Received
)
.is_some());
// Body
assert!(commitments
.get_id_by_info(
CommitmentKind::Blake3,
&(180..194).into(),
Direction::Received
)
.is_some());
}
#[test]
fn test_http_prove() {
let transcript_tx = Transcript::new(TX);
let transcript_rx = Transcript::new(RX);
let mut builder = TranscriptCommitmentBuilder::new(
fixtures::encoding_provider(TX, RX),
TX.len(),
RX.len(),
);
let transcript = HttpTranscript::parse(&transcript_tx, &transcript_rx).unwrap();
let mut committer = DefaultHttpCommitter::default();
committer
.commit_transcript(&mut builder, &transcript)
.unwrap();
let commitments = builder.build().unwrap();
let mut builder = SubstringsProofBuilder::new(&commitments, &transcript_tx, &transcript_rx);
let req_0 = &transcript.requests[0];
let req_1 = &transcript.requests[1];
let BodyContent::Json(JsonValue::Object(req_1_body)) =
&req_1.body.as_ref().unwrap().content
else {
unreachable!();
};
let resp_0 = &transcript.responses[0];
let resp_1 = &transcript.responses[1];
builder
.reveal_sent(&req_0.without_data(), CommitmentKind::Blake3)
.unwrap()
.reveal_sent(&req_0.request.target, CommitmentKind::Blake3)
.unwrap()
.reveal_sent(
req_0.headers_with_name("host").next().unwrap(),
CommitmentKind::Blake3,
)
.unwrap();
builder
.reveal_sent(&req_1.without_data(), CommitmentKind::Blake3)
.unwrap()
.reveal_sent(&req_1_body.without_pairs(), CommitmentKind::Blake3)
.unwrap()
.reveal_sent(req_1_body.get("bazz").unwrap(), CommitmentKind::Blake3)
.unwrap();
builder
.reveal_recv(&resp_0.without_data(), CommitmentKind::Blake3)
.unwrap()
.reveal_recv(
resp_0.headers_with_name("cookie").next().unwrap(),
CommitmentKind::Blake3,
)
.unwrap();
builder
.reveal_recv(&resp_1.without_data(), CommitmentKind::Blake3)
.unwrap()
.reveal_recv(resp_1.body.as_ref().unwrap(), CommitmentKind::Blake3)
.unwrap();
let proof = builder.build().unwrap();
let header = fixtures::session_header(commitments.merkle_root(), TX.len(), RX.len());
let (sent, recv) = proof.verify(&header).unwrap();
assert_eq!(&sent.data()[4..5], b"/");
assert_eq!(&sent.data()[22..31], b"localhost");
assert_eq!(&sent.data()[151..154], b"123");
assert_eq!(&recv.data()[25..43], b"very-secret-cookie");
assert_eq!(&recv.data()[180..194], b"Hello World!!!");
}
}

View File

@@ -1,36 +0,0 @@
use tlsn_core::{proof::SessionProof, NotarizedSession};
use crate::http::HttpTranscript;
/// A notarized HTTP session.
#[derive(Debug)]
pub struct NotarizedHttpSession {
session: NotarizedSession,
transcript: HttpTranscript,
}
impl NotarizedHttpSession {
/// Creates a new notarized HTTP session.
#[doc(hidden)]
pub fn new(session: NotarizedSession, transcript: HttpTranscript) -> Self {
Self {
session,
transcript,
}
}
/// Returns the notarized TLS session.
pub fn session(&self) -> &NotarizedSession {
&self.session
}
/// Returns the HTTP transcript.
pub fn transcript(&self) -> &HttpTranscript {
&self.transcript
}
/// Returns a proof for the TLS session.
pub fn session_proof(&self) -> SessionProof {
self.session.session_proof()
}
}

View File

@@ -1,7 +1,7 @@
use std::error::Error;
use spansy::{json::KeyValue, Spanned};
use tlsn_core::{commitment::TranscriptCommitmentBuilder, Direction};
use tlsn_core::transcript::{Direction, TranscriptCommitConfigBuilder};
use crate::json::{Array, Bool, JsonValue, Null, Number, Object, String as JsonString};
@@ -60,7 +60,7 @@ pub trait JsonCommit {
/// * `direction` - The direction of the data (sent or received).
fn commit_value(
&mut self,
builder: &mut TranscriptCommitmentBuilder,
builder: &mut TranscriptCommitConfigBuilder,
value: &JsonValue,
direction: Direction,
) -> Result<(), JsonCommitError> {
@@ -86,7 +86,7 @@ pub trait JsonCommit {
/// * `direction` - The direction of the data (sent or received).
fn commit_object(
&mut self,
builder: &mut TranscriptCommitmentBuilder,
builder: &mut TranscriptCommitConfigBuilder,
object: &Object,
direction: Direction,
) -> Result<(), JsonCommitError> {
@@ -113,7 +113,7 @@ pub trait JsonCommit {
/// * `direction` - The direction of the data (sent or received).
fn commit_key_value(
&mut self,
builder: &mut TranscriptCommitmentBuilder,
builder: &mut TranscriptCommitConfigBuilder,
kv: &KeyValue,
direction: Direction,
) -> Result<(), JsonCommitError> {
@@ -141,7 +141,7 @@ pub trait JsonCommit {
/// * `direction` - The direction of the data (sent or received).
fn commit_array(
&mut self,
builder: &mut TranscriptCommitmentBuilder,
builder: &mut TranscriptCommitConfigBuilder,
array: &Array,
direction: Direction,
) -> Result<(), JsonCommitError> {
@@ -172,7 +172,7 @@ pub trait JsonCommit {
/// * `direction` - The direction of the data (sent or received).
fn commit_string(
&mut self,
builder: &mut TranscriptCommitmentBuilder,
builder: &mut TranscriptCommitConfigBuilder,
string: &JsonString,
direction: Direction,
) -> Result<(), JsonCommitError> {
@@ -196,7 +196,7 @@ pub trait JsonCommit {
/// * `direction` - The direction of the data (sent or received).
fn commit_number(
&mut self,
builder: &mut TranscriptCommitmentBuilder,
builder: &mut TranscriptCommitConfigBuilder,
number: &Number,
direction: Direction,
) -> Result<(), JsonCommitError> {
@@ -215,7 +215,7 @@ pub trait JsonCommit {
/// * `direction` - The direction of the data (sent or received).
fn commit_bool(
&mut self,
builder: &mut TranscriptCommitmentBuilder,
builder: &mut TranscriptCommitConfigBuilder,
boolean: &Bool,
direction: Direction,
) -> Result<(), JsonCommitError> {
@@ -234,7 +234,7 @@ pub trait JsonCommit {
/// * `direction` - The direction of the data (sent or received).
fn commit_null(
&mut self,
builder: &mut TranscriptCommitmentBuilder,
builder: &mut TranscriptCommitConfigBuilder,
null: &Null,
direction: Direction,
) -> Result<(), JsonCommitError> {

View File

@@ -4,6 +4,7 @@ version = "0.1.0-alpha.6"
edition = "2021"
[dependencies]
tlsn-core = { workspace = true }
tlsn-common = { workspace = true }
tlsn-verifier = { workspace = true }
@@ -22,10 +23,12 @@ http = { workspace = true }
http-body-util = { workspace = true }
hyper = { workspace = true, features = ["client", "http1", "server"] }
hyper-util = { workspace = true, features = ["full"] }
k256 = { workspace = true }
notify = { version = "6.1.1", default-features = false, features = [
"macos_kqueue",
] }
p256 = { workspace = true }
pkcs8 = { workspace = true, features = ["pem"] }
rustls = { workspace = true }
rustls-pemfile = { workspace = true }
serde = { workspace = true, features = ["derive"] }
@@ -44,3 +47,4 @@ tracing = { workspace = true }
tracing-subscriber = { workspace = true, features = ["env-filter"] }
uuid = { workspace = true, features = ["v4", "fast-rng"] }
ws_stream_tungstenite = { workspace = true, features = ["tokio_io"] }
zeroize = { workspace = true }

View File

@@ -1,9 +1,9 @@
use p256::ecdsa::SigningKey;
use serde::{Deserialize, Serialize};
use std::{
collections::HashMap,
sync::{Arc, Mutex},
};
use tlsn_core::CryptoProvider;
use crate::{config::NotarizationProperties, domain::auth::AuthorizationWhitelistRecord};
@@ -46,7 +46,7 @@ pub enum ClientType {
/// Global data that needs to be shared with the axum handlers
#[derive(Clone, Debug)]
pub struct NotaryGlobals {
pub notary_signing_key: SigningKey,
pub crypto_provider: Arc<CryptoProvider>,
pub notarization_config: NotarizationProperties,
/// A temporary storage to store session_id
pub store: Arc<Mutex<HashMap<String, ()>>>,
@@ -56,12 +56,12 @@ pub struct NotaryGlobals {
impl NotaryGlobals {
pub fn new(
notary_signing_key: SigningKey,
crypto_provider: Arc<CryptoProvider>,
notarization_config: NotarizationProperties,
authorization_whitelist: Option<Arc<Mutex<HashMap<String, AuthorizationWhitelistRecord>>>>,
) -> Self {
Self {
notary_signing_key,
crypto_provider,
notarization_config,
store: Default::default(),
authorization_whitelist,

View File

@@ -4,7 +4,7 @@ use eyre::Report;
use std::error::Error;
use tlsn_common::config::ProtocolConfigValidatorBuilderError;
use tlsn_verifier::tls::{VerifierConfigBuilderError, VerifierError};
use tlsn_verifier::{VerifierConfigBuilderError, VerifierError};
#[derive(Debug, thiserror::Error)]
pub enum NotaryServerError {

View File

@@ -5,6 +5,7 @@ mod middleware;
mod server;
mod server_tracing;
mod service;
mod signing;
mod util;
pub use config::{

View File

@@ -13,7 +13,7 @@ use hyper_util::rt::TokioIo;
use notify::{
event::ModifyKind, Error, Event, EventKind, RecommendedWatcher, RecursiveMode, Watcher,
};
use p256::{ecdsa::SigningKey, pkcs8::DecodePrivateKey};
use pkcs8::DecodePrivateKey;
use rustls::{Certificate, PrivateKey, ServerConfig};
use std::{
collections::HashMap,
@@ -24,11 +24,13 @@ use std::{
pin::Pin,
sync::{Arc, Mutex},
};
use tokio::{fs::File, net::TcpListener};
use tlsn_core::CryptoProvider;
use tokio::{fs::File, io::AsyncReadExt, net::TcpListener};
use tokio_rustls::TlsAcceptor;
use tower_http::cors::CorsLayer;
use tower_service::Service;
use tracing::{debug, error, info};
use zeroize::Zeroize;
use crate::{
config::{NotaryServerProperties, NotarySigningKeyProperties},
@@ -40,6 +42,7 @@ use crate::{
error::NotaryServerError,
middleware::AuthorizationMiddleware,
service::{initialize, upgrade_protocol},
signing::AttestationKey,
util::parse_csv_file,
};
@@ -47,7 +50,9 @@ use crate::{
#[tracing::instrument(skip(config))]
pub async fn run_server(config: &NotaryServerProperties) -> Result<(), NotaryServerError> {
// Load the private key for notarized transcript signing
let notary_signing_key = load_notary_signing_key(&config.notary_key).await?;
let attestation_key = load_attestation_key(&config.notary_key).await?;
let crypto_provider = build_crypto_provider(attestation_key);
// Build TLS acceptor if it is turned on
let tls_acceptor = if !config.tls.enabled {
debug!("Skipping TLS setup as it is turned off.");
@@ -95,7 +100,7 @@ pub async fn run_server(config: &NotaryServerProperties) -> Result<(), NotarySer
let protocol = Arc::new(http1::Builder::new());
let notary_globals = NotaryGlobals::new(
notary_signing_key,
Arc::new(crypto_provider),
config.notarization.clone(),
authorization_whitelist,
);
@@ -216,15 +221,30 @@ pub async fn run_server(config: &NotaryServerProperties) -> Result<(), NotarySer
}
}
/// Load notary signing key from static file
async fn load_notary_signing_key(config: &NotarySigningKeyProperties) -> Result<SigningKey> {
fn build_crypto_provider(attestation_key: AttestationKey) -> CryptoProvider {
let mut provider = CryptoProvider::default();
provider.signer.set_signer(attestation_key.into_signer());
provider
}
/// Load notary signing key for attestations from static file
async fn load_attestation_key(config: &NotarySigningKeyProperties) -> Result<AttestationKey> {
debug!("Loading notary server's signing key");
let notary_signing_key = SigningKey::read_pkcs8_pem_file(&config.private_key_pem_path)
let mut file = File::open(&config.private_key_pem_path).await?;
let mut pem = String::new();
file.read_to_string(&mut pem)
.await
.map_err(|_| eyre!("pem file does not contain valid UTF-8"))?;
let key = AttestationKey::from_pkcs8_pem(&pem)
.map_err(|err| eyre!("Failed to load notary signing key for notarization: {err}"))?;
pem.zeroize();
debug!("Successfully loaded notary server's signing key!");
Ok(notary_signing_key)
Ok(key)
}
/// Read a PEM-formatted file and return its buffer reader
@@ -355,13 +375,12 @@ mod test {
}
#[tokio::test]
async fn test_load_notary_signing_key() {
async fn test_load_attestation_key() {
let config = NotarySigningKeyProperties {
private_key_pem_path: "./fixture/notary/notary.key".to_string(),
public_key_pem_path: "./fixture/notary/notary.pub".to_string(),
};
let result: Result<SigningKey> = load_notary_signing_key(&config).await;
assert!(result.is_ok(), "Could not load notary private key");
load_attestation_key(&config).await.unwrap();
}
#[tokio::test]

View File

@@ -2,6 +2,8 @@ pub mod axum_websocket;
pub mod tcp;
pub mod websocket;
use std::sync::Arc;
use async_trait::async_trait;
use axum::{
extract::{rejection::JsonRejection, FromRequestParts, Query, State},
@@ -9,9 +11,9 @@ use axum::{
response::{IntoResponse, Json, Response},
};
use axum_macros::debug_handler;
use p256::ecdsa::{Signature, SigningKey};
use tlsn_common::config::ProtocolConfigValidator;
use tlsn_verifier::tls::{Verifier, VerifierConfig};
use tlsn_core::{attestation::AttestationConfig, CryptoProvider};
use tlsn_verifier::{Verifier, VerifierConfig};
use tokio::io::{AsyncRead, AsyncWrite};
use tokio_util::compat::TokioAsyncReadCompatExt;
use tracing::{debug, error, info, trace};
@@ -172,25 +174,30 @@ pub async fn initialize(
/// Run the notarization
pub async fn notary_service<T: AsyncWrite + AsyncRead + Send + Unpin + 'static>(
socket: T,
signing_key: &SigningKey,
crypto_provider: Arc<CryptoProvider>,
session_id: &str,
max_sent_data: usize,
max_recv_data: usize,
) -> Result<(), NotaryServerError> {
debug!(?session_id, "Starting notarization...");
let config_validator = ProtocolConfigValidator::builder()
.max_sent_data(max_sent_data)
.max_recv_data(max_recv_data)
.build()?;
let att_config = AttestationConfig::builder()
.supported_signature_algs(Vec::from_iter(crypto_provider.signer.supported_algs()))
.build()
.map_err(|err| NotaryServerError::Notarization(Box::new(err)))?;
let config = VerifierConfig::builder()
.id(session_id)
.protocol_config_validator(config_validator)
.protocol_config_validator(
ProtocolConfigValidator::builder()
.max_sent_data(max_sent_data)
.max_recv_data(max_recv_data)
.build()?,
)
.crypto_provider(crypto_provider)
.build()?;
Verifier::new(config)
.notarize::<_, Signature>(socket.compat(), signing_key)
.notarize(socket.compat(), &att_config)
.await?;
Ok(())

View File

@@ -87,7 +87,7 @@ pub async fn tcp_notarize(
debug!(?session_id, "Upgraded to tcp connection");
match notary_service(
stream,
&notary_globals.notary_signing_key,
notary_globals.crypto_provider.clone(),
&session_id,
notary_globals.notarization_config.max_sent_data,
notary_globals.notarization_config.max_recv_data,

View File

@@ -17,7 +17,7 @@ pub async fn websocket_notarize(
let stream = WsStream::new(socket.into_inner());
match notary_service(
stream,
&notary_globals.notary_signing_key,
notary_globals.crypto_provider.clone(),
&session_id,
notary_globals.notarization_config.max_sent_data,
notary_globals.notarization_config.max_recv_data,

View File

@@ -0,0 +1,74 @@
use core::fmt;
use pkcs8::{der::Encode, AssociatedOid, DecodePrivateKey, ObjectIdentifier, PrivateKeyInfo};
use tlsn_core::signing::{Secp256k1Signer, Secp256r1Signer, SignatureAlgId, Signer};
use tracing::error;
/// A cryptographic key used for signing attestations.
pub struct AttestationKey {
alg_id: SignatureAlgId,
key: SigningKey,
}
impl TryFrom<PrivateKeyInfo<'_>> for AttestationKey {
type Error = pkcs8::Error;
fn try_from(pkcs8: PrivateKeyInfo<'_>) -> Result<Self, Self::Error> {
const OID_EC_PUBLIC_KEY: ObjectIdentifier =
ObjectIdentifier::new_unwrap("1.2.840.10045.2.1");
// For now we only support elliptic curve keys
if pkcs8.algorithm.oid != OID_EC_PUBLIC_KEY {
error!("unsupported key algorithm OID: {:?}", pkcs8.algorithm.oid);
return Err(pkcs8::Error::KeyMalformed);
}
let (alg_id, key) = match pkcs8.algorithm.parameters_oid()? {
k256::Secp256k1::OID => {
let key = k256::ecdsa::SigningKey::from_pkcs8_der(&pkcs8.to_der()?)
.map_err(|_| pkcs8::Error::KeyMalformed)?;
(SignatureAlgId::SECP256K1, SigningKey::Secp256k1(key))
}
p256::NistP256::OID => {
let key = p256::ecdsa::SigningKey::from_pkcs8_der(&pkcs8.to_der()?)
.map_err(|_| pkcs8::Error::KeyMalformed)?;
(SignatureAlgId::SECP256R1, SigningKey::Secp256r1(key))
}
oid => {
error!("unsupported curve OID: {:?}", oid);
return Err(pkcs8::Error::KeyMalformed);
}
};
Ok(Self { alg_id, key })
}
}
impl AttestationKey {
/// Creates a new signer using this key.
pub fn into_signer(self) -> Box<dyn Signer + Send + Sync> {
match self.key {
SigningKey::Secp256k1(key) => {
Box::new(Secp256k1Signer::new(&key.to_bytes()).expect("key should be valid"))
}
SigningKey::Secp256r1(key) => {
Box::new(Secp256r1Signer::new(&key.to_bytes()).expect("key should be valid"))
}
}
}
}
impl fmt::Debug for AttestationKey {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("AttestationKey")
.field("alg_id", &self.alg_id)
.finish_non_exhaustive()
}
}
enum SigningKey {
Secp256k1(k256::ecdsa::SigningKey),
Secp256r1(p256::ecdsa::SigningKey),
}

View File

@@ -11,12 +11,15 @@ tls-server-fixture = { workspace = true }
tlsn-common = { workspace = true }
tlsn-prover = { workspace = true }
tlsn-tls-core = { workspace = true }
tlsn-core = { workspace = true }
async-tungstenite = { workspace = true, features = ["tokio-native-tls"] }
http = { workspace = true }
http-body-util = { workspace = true }
hyper = { workspace = true, features = ["client", "http1", "server"] }
hyper-tls = { version = "0.6", features = ["vendored"] } # specify vendored feature to use statically linked copy of OpenSSL
hyper-tls = { version = "0.6", features = [
"vendored",
] } # specify vendored feature to use statically linked copy of OpenSSL
hyper-util = { workspace = true, features = ["full"] }
rstest = { workspace = true }
rustls = { workspace = true }

Some files were not shown because too many files have changed in this diff Show More