mirror of
https://github.com/tlsnotary/tlsn.git
synced 2026-01-12 16:18:43 -05:00
Compare commits
1 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2dcde0d954 |
8
.github/workflows/ci.yml
vendored
8
.github/workflows/ci.yml
vendored
@@ -18,10 +18,10 @@ env:
|
||||
# We need a higher number of parallel rayon tasks than the default (which is 4)
|
||||
# in order to prevent a deadlock, c.f.
|
||||
# - https://github.com/tlsnotary/tlsn/issues/548
|
||||
# - https://github.com/privacy-ethereum/mpz/issues/178
|
||||
# - https://github.com/privacy-scaling-explorations/mpz/issues/178
|
||||
# 32 seems to be big enough for the foreseeable future
|
||||
RAYON_NUM_THREADS: 32
|
||||
RUST_VERSION: 1.90.0
|
||||
RUST_VERSION: 1.89.0
|
||||
|
||||
jobs:
|
||||
clippy:
|
||||
@@ -32,7 +32,7 @@ jobs:
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Install rust toolchain
|
||||
uses: dtolnay/rust-toolchain@master
|
||||
uses: dtolnay/rust-toolchain@stable
|
||||
with:
|
||||
toolchain: ${{ env.RUST_VERSION }}
|
||||
components: clippy
|
||||
@@ -41,7 +41,7 @@ jobs:
|
||||
uses: Swatinem/rust-cache@v2.7.7
|
||||
|
||||
- name: Clippy
|
||||
run: cargo clippy --keep-going --all-features --all-targets --locked
|
||||
run: cargo clippy --keep-going --all-features --all-targets --locked -- -D warnings
|
||||
|
||||
fmt:
|
||||
name: Check formatting
|
||||
|
||||
2
.github/workflows/releng.yml
vendored
2
.github/workflows/releng.yml
vendored
@@ -6,7 +6,7 @@ on:
|
||||
tag:
|
||||
description: 'Tag to publish to NPM'
|
||||
required: true
|
||||
default: 'v0.1.0-alpha.13'
|
||||
default: 'v0.1.0-alpha.13-pre'
|
||||
|
||||
jobs:
|
||||
release:
|
||||
|
||||
1
.github/workflows/rustdoc.yml
vendored
1
.github/workflows/rustdoc.yml
vendored
@@ -23,6 +23,7 @@ jobs:
|
||||
- name: "rustdoc"
|
||||
run: crates/wasm/build-docs.sh
|
||||
|
||||
|
||||
- name: Deploy
|
||||
uses: peaceiris/actions-gh-pages@v3
|
||||
if: ${{ github.ref == 'refs/heads/dev' }}
|
||||
|
||||
3581
Cargo.lock
generated
3581
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
41
Cargo.toml
41
Cargo.toml
@@ -22,7 +22,10 @@ members = [
|
||||
"crates/harness/executor",
|
||||
"crates/harness/runner",
|
||||
"crates/harness/plot",
|
||||
"crates/sdk-core",
|
||||
"crates/pdk",
|
||||
"crates/tlsn",
|
||||
"crates/sdk-plugin-test",
|
||||
]
|
||||
resolver = "2"
|
||||
|
||||
@@ -39,8 +42,6 @@ opt-level = 1
|
||||
[profile.wasm]
|
||||
inherits = "release"
|
||||
lto = true
|
||||
panic = "abort"
|
||||
codegen-units = 1
|
||||
|
||||
[workspace.dependencies]
|
||||
tls-server-fixture = { path = "crates/tls/server-fixture" }
|
||||
@@ -53,6 +54,7 @@ tlsn-formats = { path = "crates/formats" }
|
||||
tlsn-hmac-sha256 = { path = "crates/components/hmac-sha256" }
|
||||
tlsn-key-exchange = { path = "crates/components/key-exchange" }
|
||||
tlsn-mpc-tls = { path = "crates/mpc-tls" }
|
||||
tlsn-pdk = { path = "crates/pdk" }
|
||||
tlsn-server-fixture = { path = "crates/server-fixture/server" }
|
||||
tlsn-server-fixture-certs = { path = "crates/server-fixture/certs" }
|
||||
tlsn-tls-backend = { path = "crates/tls/backend" }
|
||||
@@ -66,28 +68,27 @@ tlsn-harness-runner = { path = "crates/harness/runner" }
|
||||
tlsn-wasm = { path = "crates/wasm" }
|
||||
tlsn = { path = "crates/tlsn" }
|
||||
|
||||
mpz-circuits = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.4" }
|
||||
mpz-memory-core = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.4" }
|
||||
mpz-common = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.4" }
|
||||
mpz-core = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.4" }
|
||||
mpz-vm-core = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.4" }
|
||||
mpz-garble = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.4" }
|
||||
mpz-garble-core = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.4" }
|
||||
mpz-ole = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.4" }
|
||||
mpz-ot = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.4" }
|
||||
mpz-share-conversion = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.4" }
|
||||
mpz-fields = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.4" }
|
||||
mpz-zk = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.4" }
|
||||
mpz-hash = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.4" }
|
||||
mpz-ideal-vm = { git = "https://github.com/privacy-ethereum/mpz", tag = "v0.1.0-alpha.4" }
|
||||
mpz-circuits = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "1b00912" }
|
||||
mpz-memory-core = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "1b00912" }
|
||||
mpz-common = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "1b00912" }
|
||||
mpz-core = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "1b00912" }
|
||||
mpz-vm-core = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "1b00912" }
|
||||
mpz-garble = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "1b00912" }
|
||||
mpz-garble-core = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "1b00912" }
|
||||
mpz-ole = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "1b00912" }
|
||||
mpz-ot = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "1b00912" }
|
||||
mpz-share-conversion = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "1b00912" }
|
||||
mpz-fields = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "1b00912" }
|
||||
mpz-zk = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "1b00912" }
|
||||
mpz-hash = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "1b00912" }
|
||||
|
||||
rangeset = { version = "0.2" }
|
||||
serio = { version = "0.2" }
|
||||
spansy = { git = "https://github.com/tlsnotary/tlsn-utils", rev = "6168663" }
|
||||
uid-mux = { version = "0.2" }
|
||||
websocket-relay = { git = "https://github.com/tlsnotary/tlsn-utils", rev = "6168663" }
|
||||
futures-plex = { git = "https://github.com/tlsnotary/tlsn-utils", rev = "eb48413" }
|
||||
|
||||
aead = { version = "0.4" }
|
||||
aes = { version = "0.8" }
|
||||
aes-gcm = { version = "0.9" }
|
||||
anyhow = { version = "1.0" }
|
||||
@@ -111,7 +112,7 @@ elliptic-curve = { version = "0.13" }
|
||||
enum-try-as-inner = { version = "0.1" }
|
||||
env_logger = { version = "0.10" }
|
||||
futures = { version = "0.3" }
|
||||
futures-rustls = { version = "0.25" }
|
||||
futures-rustls = { version = "0.26" }
|
||||
generic-array = { version = "0.14" }
|
||||
ghash = { version = "0.5" }
|
||||
hex = { version = "0.4" }
|
||||
@@ -165,5 +166,7 @@ web-spawn = { version = "0.2" }
|
||||
web-time = { version = "0.2" }
|
||||
webpki-roots = { version = "1.0" }
|
||||
webpki-root-certs = { version = "1.0" }
|
||||
ws_stream_wasm = { version = "0.7.5" }
|
||||
# Use the patched ws_stream_wasm to fix the issue https://github.com/najamelan/ws_stream_wasm/issues/12#issuecomment-1711902958
|
||||
ws_stream_wasm = { git = "https://github.com/tlsnotary/ws_stream_wasm", rev = "2ed12aad9f0236e5321f577672f309920b2aef51" }
|
||||
zeroize = { version = "1.8" }
|
||||
zerocopy = { version = "0.8" }
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "tlsn-attestation"
|
||||
version = "0.1.0-alpha.13"
|
||||
version = "0.1.0-alpha.13-pre"
|
||||
edition = "2024"
|
||||
|
||||
[features]
|
||||
@@ -23,9 +23,9 @@ thiserror = { workspace = true }
|
||||
tiny-keccak = { workspace = true, features = ["keccak"] }
|
||||
|
||||
[dev-dependencies]
|
||||
alloy-primitives = { version = "1.3.1", default-features = false }
|
||||
alloy-signer = { version = "1.0", default-features = false }
|
||||
alloy-signer-local = { version = "1.0", default-features = false }
|
||||
alloy-primitives = { version = "0.8.22", default-features = false }
|
||||
alloy-signer = { version = "0.12", default-features = false }
|
||||
alloy-signer-local = { version = "0.12", default-features = false }
|
||||
rand06-compat = { workspace = true }
|
||||
rstest = { workspace = true }
|
||||
tlsn-core = { workspace = true, features = ["fixtures"] }
|
||||
|
||||
@@ -5,7 +5,7 @@ use rand::{Rng, rng};
|
||||
use tlsn_core::{
|
||||
connection::{ConnectionInfo, ServerEphemKey},
|
||||
hash::HashAlgId,
|
||||
transcript::{TranscriptCommitment, encoding::EncoderSecret},
|
||||
transcript::TranscriptCommitment,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
@@ -25,7 +25,6 @@ pub struct Sign {
|
||||
connection_info: Option<ConnectionInfo>,
|
||||
server_ephemeral_key: Option<ServerEphemKey>,
|
||||
cert_commitment: ServerCertCommitment,
|
||||
encoder_secret: Option<EncoderSecret>,
|
||||
extensions: Vec<Extension>,
|
||||
transcript_commitments: Vec<TranscriptCommitment>,
|
||||
}
|
||||
@@ -87,7 +86,6 @@ impl<'a> AttestationBuilder<'a, Accept> {
|
||||
connection_info: None,
|
||||
server_ephemeral_key: None,
|
||||
cert_commitment,
|
||||
encoder_secret: None,
|
||||
transcript_commitments: Vec::new(),
|
||||
extensions,
|
||||
},
|
||||
@@ -108,12 +106,6 @@ impl AttestationBuilder<'_, Sign> {
|
||||
self
|
||||
}
|
||||
|
||||
/// Sets the secret for encoding commitments.
|
||||
pub fn encoder_secret(&mut self, secret: EncoderSecret) -> &mut Self {
|
||||
self.state.encoder_secret = Some(secret);
|
||||
self
|
||||
}
|
||||
|
||||
/// Adds an extension to the attestation.
|
||||
pub fn extension(&mut self, extension: Extension) -> &mut Self {
|
||||
self.state.extensions.push(extension);
|
||||
@@ -137,7 +129,6 @@ impl AttestationBuilder<'_, Sign> {
|
||||
connection_info,
|
||||
server_ephemeral_key,
|
||||
cert_commitment,
|
||||
encoder_secret,
|
||||
extensions,
|
||||
transcript_commitments,
|
||||
} = self.state;
|
||||
@@ -168,7 +159,6 @@ impl AttestationBuilder<'_, Sign> {
|
||||
AttestationBuilderError::new(ErrorKind::Field, "handshake data was not set")
|
||||
})?),
|
||||
cert_commitment: field_id.next(cert_commitment),
|
||||
encoder_secret: encoder_secret.map(|secret| field_id.next(secret)),
|
||||
extensions: extensions
|
||||
.into_iter()
|
||||
.map(|extension| field_id.next(extension))
|
||||
|
||||
@@ -219,7 +219,7 @@ use tlsn_core::{
|
||||
connection::{ConnectionInfo, ServerEphemKey},
|
||||
hash::{Hash, HashAlgorithm, TypedHash},
|
||||
merkle::MerkleTree,
|
||||
transcript::{TranscriptCommitment, encoding::EncoderSecret},
|
||||
transcript::TranscriptCommitment,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
@@ -327,7 +327,6 @@ pub struct Body {
|
||||
connection_info: Field<ConnectionInfo>,
|
||||
server_ephemeral_key: Field<ServerEphemKey>,
|
||||
cert_commitment: Field<ServerCertCommitment>,
|
||||
encoder_secret: Option<Field<EncoderSecret>>,
|
||||
extensions: Vec<Field<Extension>>,
|
||||
transcript_commitments: Vec<Field<TranscriptCommitment>>,
|
||||
}
|
||||
@@ -373,7 +372,6 @@ impl Body {
|
||||
connection_info: conn_info,
|
||||
server_ephemeral_key,
|
||||
cert_commitment,
|
||||
encoder_secret,
|
||||
extensions,
|
||||
transcript_commitments,
|
||||
} = self;
|
||||
@@ -391,13 +389,6 @@ impl Body {
|
||||
),
|
||||
];
|
||||
|
||||
if let Some(encoder_secret) = encoder_secret {
|
||||
fields.push((
|
||||
encoder_secret.id,
|
||||
hasher.hash_separated(&encoder_secret.data),
|
||||
));
|
||||
}
|
||||
|
||||
for field in extensions.iter() {
|
||||
fields.push((field.id, hasher.hash_separated(&field.data)));
|
||||
}
|
||||
|
||||
@@ -91,11 +91,6 @@ impl Presentation {
|
||||
transcript.verify_with_provider(
|
||||
&provider.hash,
|
||||
&attestation.body.connection_info().transcript_length,
|
||||
attestation
|
||||
.body
|
||||
.encoder_secret
|
||||
.as_ref()
|
||||
.map(|field| &field.data),
|
||||
attestation.body.transcript_commitments(),
|
||||
)
|
||||
})
|
||||
|
||||
@@ -49,6 +49,5 @@ impl_domain_separator!(tlsn_core::connection::ConnectionInfo);
|
||||
impl_domain_separator!(tlsn_core::connection::CertBinding);
|
||||
impl_domain_separator!(tlsn_core::transcript::TranscriptCommitment);
|
||||
impl_domain_separator!(tlsn_core::transcript::TranscriptSecret);
|
||||
impl_domain_separator!(tlsn_core::transcript::encoding::EncoderSecret);
|
||||
impl_domain_separator!(tlsn_core::transcript::encoding::EncodingCommitment);
|
||||
impl_domain_separator!(tlsn_core::transcript::hash::PlaintextHash);
|
||||
|
||||
@@ -64,6 +64,7 @@ fn test_api() {
|
||||
|
||||
let encoding_commitment = EncodingCommitment {
|
||||
root: encoding_tree.root(),
|
||||
secret: encoder_secret(),
|
||||
};
|
||||
|
||||
let request_config = RequestConfig::default();
|
||||
@@ -95,7 +96,6 @@ fn test_api() {
|
||||
.connection_info(connection_info.clone())
|
||||
// Server key Notary received during handshake
|
||||
.server_ephemeral_key(server_ephemeral_key)
|
||||
.encoder_secret(encoder_secret())
|
||||
.transcript_commitments(vec![TranscriptCommitment::Encoding(encoding_commitment)]);
|
||||
|
||||
let attestation = attestation_builder.build(&provider).unwrap();
|
||||
|
||||
@@ -5,7 +5,7 @@ description = "This crate provides implementations of ciphers for two parties"
|
||||
keywords = ["tls", "mpc", "2pc", "aes"]
|
||||
categories = ["cryptography"]
|
||||
license = "MIT OR Apache-2.0"
|
||||
version = "0.1.0-alpha.13"
|
||||
version = "0.1.0-alpha.13-pre"
|
||||
edition = "2021"
|
||||
|
||||
[lints]
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "tlsn-deap"
|
||||
version = "0.1.0-alpha.13"
|
||||
version = "0.1.0-alpha.13-pre"
|
||||
edition = "2021"
|
||||
|
||||
[lints]
|
||||
|
||||
@@ -391,7 +391,7 @@ mod tests {
|
||||
memory::{binary::U8, correlated::Delta, Array},
|
||||
prelude::*,
|
||||
};
|
||||
use mpz_zk::{Prover, ProverConfig, Verifier, VerifierConfig};
|
||||
use mpz_zk::{Prover, Verifier};
|
||||
use rand::{rngs::StdRng, SeedableRng};
|
||||
|
||||
use super::*;
|
||||
@@ -408,8 +408,8 @@ mod tests {
|
||||
|
||||
let gb = Garbler::new(cot_send, [0u8; 16], delta_mpc);
|
||||
let ev = Evaluator::new(cot_recv);
|
||||
let prover = Prover::new(ProverConfig::default(), rcot_recv);
|
||||
let verifier = Verifier::new(VerifierConfig::default(), delta_zk, rcot_send);
|
||||
let prover = Prover::new(rcot_recv);
|
||||
let verifier = Verifier::new(delta_zk, rcot_send);
|
||||
|
||||
let mut leader = Deap::new(Role::Leader, gb, prover);
|
||||
let mut follower = Deap::new(Role::Follower, ev, verifier);
|
||||
@@ -488,8 +488,8 @@ mod tests {
|
||||
|
||||
let gb = Garbler::new(cot_send, [0u8; 16], delta_mpc);
|
||||
let ev = Evaluator::new(cot_recv);
|
||||
let prover = Prover::new(ProverConfig::default(), rcot_recv);
|
||||
let verifier = Verifier::new(VerifierConfig::default(), delta_zk, rcot_send);
|
||||
let prover = Prover::new(rcot_recv);
|
||||
let verifier = Verifier::new(delta_zk, rcot_send);
|
||||
|
||||
let mut leader = Deap::new(Role::Leader, gb, prover);
|
||||
let mut follower = Deap::new(Role::Follower, ev, verifier);
|
||||
@@ -574,8 +574,8 @@ mod tests {
|
||||
|
||||
let gb = Garbler::new(cot_send, [1u8; 16], delta_mpc);
|
||||
let ev = Evaluator::new(cot_recv);
|
||||
let prover = Prover::new(ProverConfig::default(), rcot_recv);
|
||||
let verifier = Verifier::new(VerifierConfig::default(), delta_zk, rcot_send);
|
||||
let prover = Prover::new(rcot_recv);
|
||||
let verifier = Verifier::new(delta_zk, rcot_send);
|
||||
|
||||
let mut leader = Deap::new(Role::Leader, gb, prover);
|
||||
let mut follower = Deap::new(Role::Follower, ev, verifier);
|
||||
|
||||
@@ -5,7 +5,7 @@ description = "A 2PC implementation of TLS HMAC-SHA256 PRF"
|
||||
keywords = ["tls", "mpc", "2pc", "hmac", "sha256"]
|
||||
categories = ["cryptography"]
|
||||
license = "MIT OR Apache-2.0"
|
||||
version = "0.1.0-alpha.13"
|
||||
version = "0.1.0-alpha.13-pre"
|
||||
edition = "2021"
|
||||
|
||||
[lints]
|
||||
|
||||
@@ -5,7 +5,7 @@ description = "Implementation of the 3-party key-exchange protocol"
|
||||
keywords = ["tls", "mpc", "2pc", "pms", "key-exchange"]
|
||||
categories = ["cryptography"]
|
||||
license = "MIT OR Apache-2.0"
|
||||
version = "0.1.0-alpha.13"
|
||||
version = "0.1.0-alpha.13-pre"
|
||||
edition = "2021"
|
||||
|
||||
[lints]
|
||||
|
||||
@@ -5,7 +5,7 @@ description = "Core types for TLSNotary"
|
||||
keywords = ["tls", "mpc", "2pc", "types"]
|
||||
categories = ["cryptography"]
|
||||
license = "MIT OR Apache-2.0"
|
||||
version = "0.1.0-alpha.13"
|
||||
version = "0.1.0-alpha.13-pre"
|
||||
edition = "2021"
|
||||
|
||||
[lints]
|
||||
@@ -13,13 +13,7 @@ workspace = true
|
||||
|
||||
[features]
|
||||
default = []
|
||||
fixtures = [
|
||||
"dep:hex",
|
||||
"dep:tlsn-data-fixtures",
|
||||
"dep:aead",
|
||||
"dep:aes-gcm",
|
||||
"dep:generic-array",
|
||||
]
|
||||
fixtures = ["dep:hex", "dep:tlsn-data-fixtures"]
|
||||
|
||||
[dependencies]
|
||||
tlsn-data-fixtures = { workspace = true, optional = true }
|
||||
@@ -27,11 +21,9 @@ tlsn-tls-core = { workspace = true, features = ["serde"] }
|
||||
tlsn-utils = { workspace = true }
|
||||
rangeset = { workspace = true, features = ["serde"] }
|
||||
|
||||
aead = { workspace = true, features = ["alloc"], optional = true }
|
||||
aes-gcm = { workspace = true, optional = true }
|
||||
generic-array = { workspace = true, optional = true }
|
||||
bimap = { version = "0.6", features = ["serde"] }
|
||||
blake3 = { workspace = true }
|
||||
derive_builder = { workspace = true }
|
||||
hex = { workspace = true, optional = true }
|
||||
opaque-debug = { workspace = true }
|
||||
rand = { workspace = true }
|
||||
@@ -40,20 +32,17 @@ rand_chacha = { workspace = true }
|
||||
rs_merkle = { workspace = true, features = ["serde"] }
|
||||
rstest = { workspace = true, optional = true }
|
||||
serde = { workspace = true }
|
||||
semver = { workspace = true, features = ["serde"] }
|
||||
sha2 = { workspace = true }
|
||||
thiserror = { workspace = true }
|
||||
tiny-keccak = { workspace = true, features = ["keccak"] }
|
||||
web-time = { workspace = true }
|
||||
webpki-roots = { workspace = true }
|
||||
rustls-webpki = { workspace = true, features = ["ring"] }
|
||||
rustls-pki-types = { workspace = true }
|
||||
itybity = { workspace = true }
|
||||
zeroize = { workspace = true, features = ["zeroize_derive"] }
|
||||
zeroize = { workspace = true, features = ["derive"] }
|
||||
|
||||
[dev-dependencies]
|
||||
aead = { workspace = true, features = ["alloc"] }
|
||||
aes-gcm = { workspace = true }
|
||||
generic-array = { workspace = true }
|
||||
bincode = { workspace = true }
|
||||
hex = { workspace = true }
|
||||
rstest = { workspace = true }
|
||||
|
||||
@@ -1,11 +1,16 @@
|
||||
//! TLSNotary protocol config and config utilities.
|
||||
//! Configuration types.
|
||||
|
||||
use core::fmt;
|
||||
use once_cell::sync::Lazy;
|
||||
use rangeset::ToRangeSet;
|
||||
use semver::Version;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::error::Error;
|
||||
use std::{error::Error, sync::LazyLock};
|
||||
|
||||
pub use tlsn_core::webpki::{CertificateDer, PrivateKeyDer, RootCertStore};
|
||||
use crate::{
|
||||
connection::ServerName,
|
||||
transcript::{Direction, Idx, PartialTranscript, Transcript, TranscriptCommitConfig},
|
||||
webpki::{CertificateDer, PrivateKeyDer, RootCertStore},
|
||||
};
|
||||
|
||||
// Default is 32 bytes to decrypt the TLS protocol messages.
|
||||
const DEFAULT_MAX_RECV_ONLINE: usize = 32;
|
||||
@@ -15,12 +20,160 @@ const DEFAULT_MAX_RECV_ONLINE: usize = 32;
|
||||
const DEFAULT_RECORDS_LIMIT: usize = 256;
|
||||
|
||||
// Current version that is running.
|
||||
static VERSION: Lazy<Version> = Lazy::new(|| {
|
||||
static VERSION: LazyLock<Version> = LazyLock::new(|| {
|
||||
Version::parse(env!("CARGO_PKG_VERSION"))
|
||||
.map_err(|err| ProtocolConfigError::new(ErrorKind::Version, err))
|
||||
.unwrap()
|
||||
});
|
||||
|
||||
/// Configuration for the prover.
|
||||
#[derive(Debug, Clone, derive_builder::Builder, Serialize, Deserialize)]
|
||||
pub struct ProverConfig {
|
||||
/// The server DNS name.
|
||||
#[builder(setter(into))]
|
||||
server_name: ServerName,
|
||||
/// Protocol configuration to be checked with the verifier.
|
||||
protocol_config: ProtocolConfig,
|
||||
/// TLS configuration.
|
||||
#[builder(default)]
|
||||
tls_config: TlsConfig,
|
||||
}
|
||||
|
||||
impl ProverConfig {
|
||||
/// Creates a new builder for `ProverConfig`.
|
||||
pub fn builder() -> ProverConfigBuilder {
|
||||
ProverConfigBuilder::default()
|
||||
}
|
||||
|
||||
/// Returns the server DNS name.
|
||||
pub fn server_name(&self) -> &ServerName {
|
||||
&self.server_name
|
||||
}
|
||||
|
||||
/// Returns the protocol configuration.
|
||||
pub fn protocol_config(&self) -> &ProtocolConfig {
|
||||
&self.protocol_config
|
||||
}
|
||||
|
||||
/// Returns the TLS configuration.
|
||||
pub fn tls_config(&self) -> &TlsConfig {
|
||||
&self.tls_config
|
||||
}
|
||||
}
|
||||
|
||||
/// Configuration for the [`Verifier`](crate::tls::Verifier).
|
||||
#[allow(missing_docs)]
|
||||
#[derive(derive_builder::Builder, Serialize, Deserialize)]
|
||||
#[builder(pattern = "owned")]
|
||||
pub struct VerifierConfig {
|
||||
protocol_config_validator: ProtocolConfigValidator,
|
||||
#[builder(setter(strip_option), default)]
|
||||
root_store: Option<RootCertStore>,
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for VerifierConfig {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("VerifierConfig")
|
||||
.field("protocol_config_validator", &self.protocol_config_validator)
|
||||
.finish_non_exhaustive()
|
||||
}
|
||||
}
|
||||
|
||||
impl VerifierConfig {
|
||||
/// Creates a new configuration builder.
|
||||
pub fn builder() -> VerifierConfigBuilder {
|
||||
VerifierConfigBuilder::default()
|
||||
}
|
||||
|
||||
/// Returns the protocol configuration validator.
|
||||
pub fn protocol_config_validator(&self) -> &ProtocolConfigValidator {
|
||||
&self.protocol_config_validator
|
||||
}
|
||||
|
||||
/// Returns the root certificate store.
|
||||
pub fn root_store(&self) -> Option<&RootCertStore> {
|
||||
self.root_store.as_ref()
|
||||
}
|
||||
}
|
||||
|
||||
/// Configuration for the prover's TLS connection.
|
||||
#[derive(Default, Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct TlsConfig {
|
||||
/// Root certificates.
|
||||
root_store: Option<RootCertStore>,
|
||||
/// Certificate chain and a matching private key for client
|
||||
/// authentication.
|
||||
client_auth: Option<(Vec<CertificateDer>, PrivateKeyDer)>,
|
||||
}
|
||||
|
||||
impl TlsConfig {
|
||||
/// Creates a new builder for `TlsConfig`.
|
||||
pub fn builder() -> TlsConfigBuilder {
|
||||
TlsConfigBuilder::default()
|
||||
}
|
||||
|
||||
/// Returns the root certificate store.
|
||||
pub fn root_store(&self) -> Option<&RootCertStore> {
|
||||
self.root_store.as_ref()
|
||||
}
|
||||
|
||||
/// Returns a certificate chain and a matching private key for client
|
||||
/// authentication.
|
||||
pub fn client_auth(&self) -> &Option<(Vec<CertificateDer>, PrivateKeyDer)> {
|
||||
&self.client_auth
|
||||
}
|
||||
}
|
||||
|
||||
/// Builder for [`TlsConfig`].
|
||||
#[derive(Debug, Default)]
|
||||
pub struct TlsConfigBuilder {
|
||||
root_store: Option<RootCertStore>,
|
||||
client_auth: Option<(Vec<CertificateDer>, PrivateKeyDer)>,
|
||||
}
|
||||
|
||||
impl TlsConfigBuilder {
|
||||
/// Sets the root certificates to use for verifying the server's
|
||||
/// certificate.
|
||||
pub fn root_store(&mut self, store: RootCertStore) -> &mut Self {
|
||||
self.root_store = Some(store);
|
||||
self
|
||||
}
|
||||
|
||||
/// Sets a DER-encoded certificate chain and a matching private key for
|
||||
/// client authentication.
|
||||
///
|
||||
/// Often the chain will consist of a single end-entity certificate.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `cert_key` - A tuple containing the certificate chain and the private
|
||||
/// key.
|
||||
///
|
||||
/// - Each certificate in the chain must be in the X.509 format.
|
||||
/// - The key must be in the ASN.1 format (either PKCS#8 or PKCS#1).
|
||||
pub fn client_auth(&mut self, cert_key: (Vec<CertificateDer>, PrivateKeyDer)) -> &mut Self {
|
||||
self.client_auth = Some(cert_key);
|
||||
self
|
||||
}
|
||||
|
||||
/// Builds the TLS configuration.
|
||||
pub fn build(self) -> Result<TlsConfig, TlsConfigError> {
|
||||
Ok(TlsConfig {
|
||||
root_store: self.root_store,
|
||||
client_auth: self.client_auth,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// TLS configuration error.
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
#[error(transparent)]
|
||||
pub struct TlsConfigError(#[from] ErrorRepr);
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
#[error("tls config error")]
|
||||
enum ErrorRepr {}
|
||||
|
||||
/// Protocol configuration to be set up initially by prover and verifier.
|
||||
#[derive(derive_builder::Builder, Clone, Debug, Deserialize, Serialize)]
|
||||
#[builder(build_fn(validate = "Self::validate"))]
|
||||
@@ -192,22 +345,22 @@ impl ProtocolConfigValidator {
|
||||
max_sent_records: Option<usize>,
|
||||
max_recv_records_online: Option<usize>,
|
||||
) -> Result<(), ProtocolConfigError> {
|
||||
if let Some(max_sent_records) = max_sent_records
|
||||
&& max_sent_records > self.max_sent_records
|
||||
{
|
||||
return Err(ProtocolConfigError::max_record_count(format!(
|
||||
"max_sent_records {} is greater than the configured limit {}",
|
||||
max_sent_records, self.max_sent_records,
|
||||
)));
|
||||
if let Some(max_sent_records) = max_sent_records {
|
||||
if max_sent_records > self.max_sent_records {
|
||||
return Err(ProtocolConfigError::max_record_count(format!(
|
||||
"max_sent_records {} is greater than the configured limit {}",
|
||||
max_sent_records, self.max_sent_records,
|
||||
)));
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(max_recv_records_online) = max_recv_records_online
|
||||
&& max_recv_records_online > self.max_recv_records_online
|
||||
{
|
||||
return Err(ProtocolConfigError::max_record_count(format!(
|
||||
"max_recv_records_online {} is greater than the configured limit {}",
|
||||
max_recv_records_online, self.max_recv_records_online,
|
||||
)));
|
||||
if let Some(max_recv_records_online) = max_recv_records_online {
|
||||
if max_recv_records_online > self.max_recv_records_online {
|
||||
return Err(ProtocolConfigError::max_record_count(format!(
|
||||
"max_recv_records_online {} is greater than the configured limit {}",
|
||||
max_recv_records_online, self.max_recv_records_online,
|
||||
)));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@@ -233,17 +386,15 @@ impl ProtocolConfigValidator {
|
||||
/// situations.
|
||||
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
|
||||
pub enum NetworkSetting {
|
||||
/// Reduces network round-trips at the expense of consuming more network
|
||||
/// bandwidth.
|
||||
/// Prefers a bandwidth-heavy protocol.
|
||||
Bandwidth,
|
||||
/// Reduces network bandwidth utilization at the expense of more network
|
||||
/// round-trips.
|
||||
/// Prefers a latency-heavy protocol.
|
||||
Latency,
|
||||
}
|
||||
|
||||
impl Default for NetworkSetting {
|
||||
fn default() -> Self {
|
||||
Self::Latency
|
||||
Self::Bandwidth
|
||||
}
|
||||
}
|
||||
|
||||
@@ -311,6 +462,180 @@ enum ErrorKind {
|
||||
Version,
|
||||
}
|
||||
|
||||
/// Configuration to prove information to the verifier.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ProveConfig {
|
||||
server_identity: bool,
|
||||
transcript: Option<PartialTranscript>,
|
||||
transcript_commit: Option<TranscriptCommitConfig>,
|
||||
}
|
||||
|
||||
impl ProveConfig {
|
||||
/// Creates a new builder.
|
||||
pub fn builder(transcript: &Transcript) -> ProveConfigBuilder<'_> {
|
||||
ProveConfigBuilder::new(transcript)
|
||||
}
|
||||
|
||||
/// Returns `true` if the server identity is to be proven.
|
||||
pub fn server_identity(&self) -> bool {
|
||||
self.server_identity
|
||||
}
|
||||
|
||||
/// Returns the transcript to be proven.
|
||||
pub fn transcript(&self) -> Option<&PartialTranscript> {
|
||||
self.transcript.as_ref()
|
||||
}
|
||||
|
||||
/// Returns the transcript commitment configuration.
|
||||
pub fn transcript_commit(&self) -> Option<&TranscriptCommitConfig> {
|
||||
self.transcript_commit.as_ref()
|
||||
}
|
||||
}
|
||||
|
||||
/// Builder for [`ProveConfig`].
|
||||
#[derive(Debug)]
|
||||
pub struct ProveConfigBuilder<'a> {
|
||||
transcript: &'a Transcript,
|
||||
server_identity: bool,
|
||||
reveal_sent: Idx,
|
||||
reveal_recv: Idx,
|
||||
transcript_commit: Option<TranscriptCommitConfig>,
|
||||
}
|
||||
|
||||
impl<'a> ProveConfigBuilder<'a> {
|
||||
/// Creates a new builder.
|
||||
pub fn new(transcript: &'a Transcript) -> Self {
|
||||
Self {
|
||||
transcript,
|
||||
server_identity: false,
|
||||
reveal_sent: Idx::default(),
|
||||
reveal_recv: Idx::default(),
|
||||
transcript_commit: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Proves the server identity.
|
||||
pub fn server_identity(&mut self) -> &mut Self {
|
||||
self.server_identity = true;
|
||||
self
|
||||
}
|
||||
|
||||
/// Configures transcript commitments.
|
||||
pub fn transcript_commit(&mut self, transcript_commit: TranscriptCommitConfig) -> &mut Self {
|
||||
self.transcript_commit = Some(transcript_commit);
|
||||
self
|
||||
}
|
||||
|
||||
/// Reveals the given ranges of the transcript.
|
||||
pub fn reveal(
|
||||
&mut self,
|
||||
direction: Direction,
|
||||
ranges: &dyn ToRangeSet<usize>,
|
||||
) -> Result<&mut Self, ProveConfigBuilderError> {
|
||||
let idx = Idx::new(ranges.to_range_set());
|
||||
|
||||
if idx.end() > self.transcript.len_of_direction(direction) {
|
||||
return Err(ProveConfigBuilderError(
|
||||
ProveConfigBuilderErrorRepr::IndexOutOfBounds {
|
||||
direction,
|
||||
actual: idx.end(),
|
||||
len: self.transcript.len_of_direction(direction),
|
||||
},
|
||||
));
|
||||
}
|
||||
|
||||
match direction {
|
||||
Direction::Sent => self.reveal_sent.union_mut(&idx),
|
||||
Direction::Received => self.reveal_recv.union_mut(&idx),
|
||||
}
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
/// Reveals the given ranges of the sent data transcript.
|
||||
pub fn reveal_sent(
|
||||
&mut self,
|
||||
ranges: &dyn ToRangeSet<usize>,
|
||||
) -> Result<&mut Self, ProveConfigBuilderError> {
|
||||
self.reveal(Direction::Sent, ranges)
|
||||
}
|
||||
|
||||
/// Reveals the given ranges of the received data transcript.
|
||||
pub fn reveal_recv(
|
||||
&mut self,
|
||||
ranges: &dyn ToRangeSet<usize>,
|
||||
) -> Result<&mut Self, ProveConfigBuilderError> {
|
||||
self.reveal(Direction::Received, ranges)
|
||||
}
|
||||
|
||||
/// Builds the configuration.
|
||||
pub fn build(self) -> Result<ProveConfig, ProveConfigBuilderError> {
|
||||
let transcript = if !self.reveal_sent.is_empty() || !self.reveal_recv.is_empty() {
|
||||
Some(
|
||||
self.transcript
|
||||
.to_partial(self.reveal_sent, self.reveal_recv),
|
||||
)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
Ok(ProveConfig {
|
||||
server_identity: self.server_identity,
|
||||
transcript,
|
||||
transcript_commit: self.transcript_commit,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Error for [`ProveConfigBuilder`].
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
#[error(transparent)]
|
||||
pub struct ProveConfigBuilderError(#[from] ProveConfigBuilderErrorRepr);
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
enum ProveConfigBuilderErrorRepr {
|
||||
#[error("range is out of bounds of the transcript ({direction}): {actual} > {len}")]
|
||||
IndexOutOfBounds {
|
||||
direction: Direction,
|
||||
actual: usize,
|
||||
len: usize,
|
||||
},
|
||||
}
|
||||
|
||||
/// Configuration to verify information from the prover.
|
||||
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
|
||||
pub struct VerifyConfig {}
|
||||
|
||||
impl VerifyConfig {
|
||||
/// Creates a new builder.
|
||||
pub fn builder() -> VerifyConfigBuilder {
|
||||
VerifyConfigBuilder::new()
|
||||
}
|
||||
}
|
||||
|
||||
/// Builder for [`VerifyConfig`].
|
||||
#[derive(Debug, Default)]
|
||||
pub struct VerifyConfigBuilder {}
|
||||
|
||||
impl VerifyConfigBuilder {
|
||||
/// Creates a new builder.
|
||||
pub fn new() -> Self {
|
||||
Self {}
|
||||
}
|
||||
|
||||
/// Builds the configuration.
|
||||
pub fn build(self) -> Result<VerifyConfig, VerifyConfigBuilderError> {
|
||||
Ok(VerifyConfig {})
|
||||
}
|
||||
}
|
||||
|
||||
/// Error for [`VerifyConfigBuilder`].
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
#[error(transparent)]
|
||||
pub struct VerifyConfigBuilderError(#[from] VerifyConfigBuilderErrorRepr);
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
enum VerifyConfigBuilderErrorRepr {}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
@@ -116,75 +116,84 @@ pub enum KeyType {
|
||||
SECP256R1 = 0x0017,
|
||||
}
|
||||
|
||||
/// Signature algorithm used on the key exchange parameters.
|
||||
/// Signature scheme on the key exchange parameters.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
#[allow(non_camel_case_types, missing_docs)]
|
||||
pub enum SignatureAlgorithm {
|
||||
ECDSA_NISTP256_SHA256,
|
||||
ECDSA_NISTP256_SHA384,
|
||||
ECDSA_NISTP384_SHA256,
|
||||
ECDSA_NISTP384_SHA384,
|
||||
ED25519,
|
||||
RSA_PKCS1_2048_8192_SHA256,
|
||||
RSA_PKCS1_2048_8192_SHA384,
|
||||
RSA_PKCS1_2048_8192_SHA512,
|
||||
RSA_PSS_2048_8192_SHA256_LEGACY_KEY,
|
||||
RSA_PSS_2048_8192_SHA384_LEGACY_KEY,
|
||||
RSA_PSS_2048_8192_SHA512_LEGACY_KEY,
|
||||
pub enum SignatureScheme {
|
||||
RSA_PKCS1_SHA1 = 0x0201,
|
||||
ECDSA_SHA1_Legacy = 0x0203,
|
||||
RSA_PKCS1_SHA256 = 0x0401,
|
||||
ECDSA_NISTP256_SHA256 = 0x0403,
|
||||
RSA_PKCS1_SHA384 = 0x0501,
|
||||
ECDSA_NISTP384_SHA384 = 0x0503,
|
||||
RSA_PKCS1_SHA512 = 0x0601,
|
||||
ECDSA_NISTP521_SHA512 = 0x0603,
|
||||
RSA_PSS_SHA256 = 0x0804,
|
||||
RSA_PSS_SHA384 = 0x0805,
|
||||
RSA_PSS_SHA512 = 0x0806,
|
||||
ED25519 = 0x0807,
|
||||
}
|
||||
|
||||
impl fmt::Display for SignatureAlgorithm {
|
||||
impl fmt::Display for SignatureScheme {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
SignatureAlgorithm::ECDSA_NISTP256_SHA256 => write!(f, "ECDSA_NISTP256_SHA256"),
|
||||
SignatureAlgorithm::ECDSA_NISTP256_SHA384 => write!(f, "ECDSA_NISTP256_SHA384"),
|
||||
SignatureAlgorithm::ECDSA_NISTP384_SHA256 => write!(f, "ECDSA_NISTP384_SHA256"),
|
||||
SignatureAlgorithm::ECDSA_NISTP384_SHA384 => write!(f, "ECDSA_NISTP384_SHA384"),
|
||||
SignatureAlgorithm::ED25519 => write!(f, "ED25519"),
|
||||
SignatureAlgorithm::RSA_PKCS1_2048_8192_SHA256 => {
|
||||
write!(f, "RSA_PKCS1_2048_8192_SHA256")
|
||||
}
|
||||
SignatureAlgorithm::RSA_PKCS1_2048_8192_SHA384 => {
|
||||
write!(f, "RSA_PKCS1_2048_8192_SHA384")
|
||||
}
|
||||
SignatureAlgorithm::RSA_PKCS1_2048_8192_SHA512 => {
|
||||
write!(f, "RSA_PKCS1_2048_8192_SHA512")
|
||||
}
|
||||
SignatureAlgorithm::RSA_PSS_2048_8192_SHA256_LEGACY_KEY => {
|
||||
write!(f, "RSA_PSS_2048_8192_SHA256_LEGACY_KEY")
|
||||
}
|
||||
SignatureAlgorithm::RSA_PSS_2048_8192_SHA384_LEGACY_KEY => {
|
||||
write!(f, "RSA_PSS_2048_8192_SHA384_LEGACY_KEY")
|
||||
}
|
||||
SignatureAlgorithm::RSA_PSS_2048_8192_SHA512_LEGACY_KEY => {
|
||||
write!(f, "RSA_PSS_2048_8192_SHA512_LEGACY_KEY")
|
||||
}
|
||||
SignatureScheme::RSA_PKCS1_SHA1 => write!(f, "RSA_PKCS1_SHA1"),
|
||||
SignatureScheme::ECDSA_SHA1_Legacy => write!(f, "ECDSA_SHA1_Legacy"),
|
||||
SignatureScheme::RSA_PKCS1_SHA256 => write!(f, "RSA_PKCS1_SHA256"),
|
||||
SignatureScheme::ECDSA_NISTP256_SHA256 => write!(f, "ECDSA_NISTP256_SHA256"),
|
||||
SignatureScheme::RSA_PKCS1_SHA384 => write!(f, "RSA_PKCS1_SHA384"),
|
||||
SignatureScheme::ECDSA_NISTP384_SHA384 => write!(f, "ECDSA_NISTP384_SHA384"),
|
||||
SignatureScheme::RSA_PKCS1_SHA512 => write!(f, "RSA_PKCS1_SHA512"),
|
||||
SignatureScheme::ECDSA_NISTP521_SHA512 => write!(f, "ECDSA_NISTP521_SHA512"),
|
||||
SignatureScheme::RSA_PSS_SHA256 => write!(f, "RSA_PSS_SHA256"),
|
||||
SignatureScheme::RSA_PSS_SHA384 => write!(f, "RSA_PSS_SHA384"),
|
||||
SignatureScheme::RSA_PSS_SHA512 => write!(f, "RSA_PSS_SHA512"),
|
||||
SignatureScheme::ED25519 => write!(f, "ED25519"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<tls_core::verify::SignatureAlgorithm> for SignatureAlgorithm {
|
||||
fn from(value: tls_core::verify::SignatureAlgorithm) -> Self {
|
||||
use tls_core::verify::SignatureAlgorithm as Core;
|
||||
impl TryFrom<tls_core::msgs::enums::SignatureScheme> for SignatureScheme {
|
||||
type Error = &'static str;
|
||||
|
||||
fn try_from(value: tls_core::msgs::enums::SignatureScheme) -> Result<Self, Self::Error> {
|
||||
use tls_core::msgs::enums::SignatureScheme as Core;
|
||||
use SignatureScheme::*;
|
||||
Ok(match value {
|
||||
Core::RSA_PKCS1_SHA1 => RSA_PKCS1_SHA1,
|
||||
Core::ECDSA_SHA1_Legacy => ECDSA_SHA1_Legacy,
|
||||
Core::RSA_PKCS1_SHA256 => RSA_PKCS1_SHA256,
|
||||
Core::ECDSA_NISTP256_SHA256 => ECDSA_NISTP256_SHA256,
|
||||
Core::RSA_PKCS1_SHA384 => RSA_PKCS1_SHA384,
|
||||
Core::ECDSA_NISTP384_SHA384 => ECDSA_NISTP384_SHA384,
|
||||
Core::RSA_PKCS1_SHA512 => RSA_PKCS1_SHA512,
|
||||
Core::ECDSA_NISTP521_SHA512 => ECDSA_NISTP521_SHA512,
|
||||
Core::RSA_PSS_SHA256 => RSA_PSS_SHA256,
|
||||
Core::RSA_PSS_SHA384 => RSA_PSS_SHA384,
|
||||
Core::RSA_PSS_SHA512 => RSA_PSS_SHA512,
|
||||
Core::ED25519 => ED25519,
|
||||
_ => return Err("unsupported signature scheme"),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl From<SignatureScheme> for tls_core::msgs::enums::SignatureScheme {
|
||||
fn from(value: SignatureScheme) -> Self {
|
||||
use tls_core::msgs::enums::SignatureScheme::*;
|
||||
match value {
|
||||
Core::ECDSA_NISTP256_SHA256 => SignatureAlgorithm::ECDSA_NISTP256_SHA256,
|
||||
Core::ECDSA_NISTP256_SHA384 => SignatureAlgorithm::ECDSA_NISTP256_SHA384,
|
||||
Core::ECDSA_NISTP384_SHA256 => SignatureAlgorithm::ECDSA_NISTP384_SHA256,
|
||||
Core::ECDSA_NISTP384_SHA384 => SignatureAlgorithm::ECDSA_NISTP384_SHA384,
|
||||
Core::ED25519 => SignatureAlgorithm::ED25519,
|
||||
Core::RSA_PKCS1_2048_8192_SHA256 => SignatureAlgorithm::RSA_PKCS1_2048_8192_SHA256,
|
||||
Core::RSA_PKCS1_2048_8192_SHA384 => SignatureAlgorithm::RSA_PKCS1_2048_8192_SHA384,
|
||||
Core::RSA_PKCS1_2048_8192_SHA512 => SignatureAlgorithm::RSA_PKCS1_2048_8192_SHA512,
|
||||
Core::RSA_PSS_2048_8192_SHA256_LEGACY_KEY => {
|
||||
SignatureAlgorithm::RSA_PSS_2048_8192_SHA256_LEGACY_KEY
|
||||
}
|
||||
Core::RSA_PSS_2048_8192_SHA384_LEGACY_KEY => {
|
||||
SignatureAlgorithm::RSA_PSS_2048_8192_SHA384_LEGACY_KEY
|
||||
}
|
||||
Core::RSA_PSS_2048_8192_SHA512_LEGACY_KEY => {
|
||||
SignatureAlgorithm::RSA_PSS_2048_8192_SHA512_LEGACY_KEY
|
||||
}
|
||||
SignatureScheme::RSA_PKCS1_SHA1 => RSA_PKCS1_SHA1,
|
||||
SignatureScheme::ECDSA_SHA1_Legacy => ECDSA_SHA1_Legacy,
|
||||
SignatureScheme::RSA_PKCS1_SHA256 => RSA_PKCS1_SHA256,
|
||||
SignatureScheme::ECDSA_NISTP256_SHA256 => ECDSA_NISTP256_SHA256,
|
||||
SignatureScheme::RSA_PKCS1_SHA384 => RSA_PKCS1_SHA384,
|
||||
SignatureScheme::ECDSA_NISTP384_SHA384 => ECDSA_NISTP384_SHA384,
|
||||
SignatureScheme::RSA_PKCS1_SHA512 => RSA_PKCS1_SHA512,
|
||||
SignatureScheme::ECDSA_NISTP521_SHA512 => ECDSA_NISTP521_SHA512,
|
||||
SignatureScheme::RSA_PSS_SHA256 => RSA_PSS_SHA256,
|
||||
SignatureScheme::RSA_PSS_SHA384 => RSA_PSS_SHA384,
|
||||
SignatureScheme::RSA_PSS_SHA512 => RSA_PSS_SHA512,
|
||||
SignatureScheme::ED25519 => ED25519,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -192,8 +201,8 @@ impl From<tls_core::verify::SignatureAlgorithm> for SignatureAlgorithm {
|
||||
/// Server's signature of the key exchange parameters.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ServerSignature {
|
||||
/// Signature algorithm.
|
||||
pub alg: SignatureAlgorithm,
|
||||
/// Signature scheme.
|
||||
pub scheme: SignatureScheme,
|
||||
/// Signature data.
|
||||
pub sig: Vec<u8>,
|
||||
}
|
||||
@@ -350,23 +359,20 @@ impl HandshakeData {
|
||||
message.extend_from_slice(&server_ephemeral_key.kx_params());
|
||||
|
||||
use webpki::ring as alg;
|
||||
let sig_alg = match self.sig.alg {
|
||||
SignatureAlgorithm::ECDSA_NISTP256_SHA256 => alg::ECDSA_P256_SHA256,
|
||||
SignatureAlgorithm::ECDSA_NISTP256_SHA384 => alg::ECDSA_P256_SHA384,
|
||||
SignatureAlgorithm::ECDSA_NISTP384_SHA256 => alg::ECDSA_P384_SHA256,
|
||||
SignatureAlgorithm::ECDSA_NISTP384_SHA384 => alg::ECDSA_P384_SHA384,
|
||||
SignatureAlgorithm::ED25519 => alg::ED25519,
|
||||
SignatureAlgorithm::RSA_PKCS1_2048_8192_SHA256 => alg::RSA_PKCS1_2048_8192_SHA256,
|
||||
SignatureAlgorithm::RSA_PKCS1_2048_8192_SHA384 => alg::RSA_PKCS1_2048_8192_SHA384,
|
||||
SignatureAlgorithm::RSA_PKCS1_2048_8192_SHA512 => alg::RSA_PKCS1_2048_8192_SHA512,
|
||||
SignatureAlgorithm::RSA_PSS_2048_8192_SHA256_LEGACY_KEY => {
|
||||
alg::RSA_PSS_2048_8192_SHA256_LEGACY_KEY
|
||||
}
|
||||
SignatureAlgorithm::RSA_PSS_2048_8192_SHA384_LEGACY_KEY => {
|
||||
alg::RSA_PSS_2048_8192_SHA384_LEGACY_KEY
|
||||
}
|
||||
SignatureAlgorithm::RSA_PSS_2048_8192_SHA512_LEGACY_KEY => {
|
||||
alg::RSA_PSS_2048_8192_SHA512_LEGACY_KEY
|
||||
let sig_alg = match self.sig.scheme {
|
||||
SignatureScheme::RSA_PKCS1_SHA256 => alg::RSA_PKCS1_2048_8192_SHA256,
|
||||
SignatureScheme::RSA_PKCS1_SHA384 => alg::RSA_PKCS1_2048_8192_SHA384,
|
||||
SignatureScheme::RSA_PKCS1_SHA512 => alg::RSA_PKCS1_2048_8192_SHA512,
|
||||
SignatureScheme::RSA_PSS_SHA256 => alg::RSA_PSS_2048_8192_SHA256_LEGACY_KEY,
|
||||
SignatureScheme::RSA_PSS_SHA384 => alg::RSA_PSS_2048_8192_SHA384_LEGACY_KEY,
|
||||
SignatureScheme::RSA_PSS_SHA512 => alg::RSA_PSS_2048_8192_SHA512_LEGACY_KEY,
|
||||
SignatureScheme::ECDSA_NISTP256_SHA256 => alg::ECDSA_P256_SHA256,
|
||||
SignatureScheme::ECDSA_NISTP384_SHA384 => alg::ECDSA_P384_SHA384,
|
||||
SignatureScheme::ED25519 => alg::ED25519,
|
||||
scheme => {
|
||||
return Err(HandshakeVerificationError::UnsupportedSignatureScheme(
|
||||
scheme,
|
||||
))
|
||||
}
|
||||
};
|
||||
|
||||
@@ -396,6 +402,8 @@ pub enum HandshakeVerificationError {
|
||||
InvalidServerEphemeralKey,
|
||||
#[error("server certificate verification failed: {0}")]
|
||||
ServerCert(ServerCertVerifierError),
|
||||
#[error("unsupported signature scheme: {0}")]
|
||||
UnsupportedSignatureScheme(SignatureScheme),
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
@@ -1,16 +0,0 @@
|
||||
use rangeset::RangeSet;
|
||||
|
||||
pub(crate) struct FmtRangeSet<'a>(pub &'a RangeSet<usize>);
|
||||
|
||||
impl<'a> std::fmt::Display for FmtRangeSet<'a> {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.write_str("{")?;
|
||||
for range in self.0.iter_ranges() {
|
||||
write!(f, "{}..{}", range.start, range.end)?;
|
||||
if range.end < self.0.end().unwrap_or(0) {
|
||||
f.write_str(", ")?;
|
||||
}
|
||||
}
|
||||
f.write_str("}")
|
||||
}
|
||||
}
|
||||
@@ -1,7 +1,6 @@
|
||||
//! Fixtures for testing
|
||||
|
||||
mod provider;
|
||||
pub mod transcript;
|
||||
|
||||
pub use provider::FixtureEncodingProvider;
|
||||
|
||||
@@ -10,8 +9,7 @@ use hex::FromHex;
|
||||
use crate::{
|
||||
connection::{
|
||||
CertBinding, CertBindingV1_2, ConnectionInfo, DnsName, HandshakeData, KeyType,
|
||||
ServerEphemKey, ServerName, ServerSignature, SignatureAlgorithm, TlsVersion,
|
||||
TranscriptLength,
|
||||
ServerEphemKey, ServerName, ServerSignature, SignatureScheme, TlsVersion, TranscriptLength,
|
||||
},
|
||||
transcript::{
|
||||
encoding::{EncoderSecret, EncodingProvider},
|
||||
@@ -48,7 +46,7 @@ impl ConnectionFixture {
|
||||
CertificateDer(include_bytes!("fixtures/data/tlsnotary.org/ca.der").to_vec()),
|
||||
],
|
||||
sig: ServerSignature {
|
||||
alg: SignatureAlgorithm::RSA_PKCS1_2048_8192_SHA256,
|
||||
scheme: SignatureScheme::RSA_PKCS1_SHA256,
|
||||
sig: Vec::<u8>::from_hex(include_bytes!(
|
||||
"fixtures/data/tlsnotary.org/signature"
|
||||
))
|
||||
@@ -93,7 +91,7 @@ impl ConnectionFixture {
|
||||
CertificateDer(include_bytes!("fixtures/data/appliedzkp.org/ca.der").to_vec()),
|
||||
],
|
||||
sig: ServerSignature {
|
||||
alg: SignatureAlgorithm::ECDSA_NISTP256_SHA256,
|
||||
scheme: SignatureScheme::ECDSA_NISTP256_SHA256,
|
||||
sig: Vec::<u8>::from_hex(include_bytes!(
|
||||
"fixtures/data/appliedzkp.org/signature"
|
||||
))
|
||||
|
||||
@@ -1,199 +0,0 @@
|
||||
//! Transcript fixtures for testing.
|
||||
|
||||
use aead::Payload as AeadPayload;
|
||||
use aes_gcm::{aead::Aead, Aes128Gcm, NewAead};
|
||||
use generic_array::GenericArray;
|
||||
use rand::{rngs::StdRng, Rng, SeedableRng};
|
||||
use tls_core::msgs::{
|
||||
base::Payload,
|
||||
codec::Codec,
|
||||
enums::{ContentType, HandshakeType, ProtocolVersion},
|
||||
handshake::{HandshakeMessagePayload, HandshakePayload},
|
||||
message::{OpaqueMessage, PlainMessage},
|
||||
};
|
||||
|
||||
use crate::{
|
||||
connection::{TranscriptLength, VerifyData},
|
||||
fixtures::ConnectionFixture,
|
||||
transcript::{Record, TlsTranscript},
|
||||
};
|
||||
|
||||
/// The key used for encryption of the sent and received transcript.
|
||||
pub const KEY: [u8; 16] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15];
|
||||
|
||||
/// The iv used for encryption of the sent and received transcript.
|
||||
pub const IV: [u8; 4] = [1, 3, 3, 7];
|
||||
|
||||
/// The record size in bytes.
|
||||
pub const RECORD_SIZE: usize = 512;
|
||||
|
||||
/// Creates a transript fixture for testing.
|
||||
pub fn transcript_fixture(sent: &[u8], recv: &[u8]) -> TlsTranscript {
|
||||
TranscriptGenerator::new(KEY, IV).generate(sent, recv)
|
||||
}
|
||||
|
||||
struct TranscriptGenerator {
|
||||
key: [u8; 16],
|
||||
iv: [u8; 4],
|
||||
}
|
||||
|
||||
impl TranscriptGenerator {
|
||||
fn new(key: [u8; 16], iv: [u8; 4]) -> Self {
|
||||
Self { key, iv }
|
||||
}
|
||||
|
||||
fn generate(&self, sent: &[u8], recv: &[u8]) -> TlsTranscript {
|
||||
let mut rng = StdRng::from_seed([1; 32]);
|
||||
|
||||
let transcript_len = TranscriptLength {
|
||||
sent: sent.len() as u32,
|
||||
received: recv.len() as u32,
|
||||
};
|
||||
let tlsn = ConnectionFixture::tlsnotary(transcript_len);
|
||||
|
||||
let time = tlsn.connection_info.time;
|
||||
let version = tlsn.connection_info.version;
|
||||
let server_cert_chain = tlsn.server_cert_data.certs;
|
||||
let server_signature = tlsn.server_cert_data.sig;
|
||||
let cert_binding = tlsn.server_cert_data.binding;
|
||||
|
||||
let cf_vd: [u8; 12] = rng.random();
|
||||
let sf_vd: [u8; 12] = rng.random();
|
||||
|
||||
let verify_data = VerifyData {
|
||||
client_finished: cf_vd.to_vec(),
|
||||
server_finished: sf_vd.to_vec(),
|
||||
};
|
||||
|
||||
let sent = self.gen_records(cf_vd, sent);
|
||||
let recv = self.gen_records(sf_vd, recv);
|
||||
|
||||
TlsTranscript::new(
|
||||
time,
|
||||
version,
|
||||
Some(server_cert_chain),
|
||||
Some(server_signature),
|
||||
cert_binding,
|
||||
verify_data,
|
||||
sent,
|
||||
recv,
|
||||
)
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
fn gen_records(&self, vd: [u8; 12], plaintext: &[u8]) -> Vec<Record> {
|
||||
let mut records = Vec::new();
|
||||
|
||||
let handshake = self.gen_handshake(vd);
|
||||
records.push(handshake);
|
||||
|
||||
for (seq, msg) in (1_u64..).zip(plaintext.chunks(RECORD_SIZE)) {
|
||||
let record = self.gen_app_data(seq, msg);
|
||||
records.push(record);
|
||||
}
|
||||
|
||||
records
|
||||
}
|
||||
|
||||
fn gen_app_data(&self, seq: u64, plaintext: &[u8]) -> Record {
|
||||
assert!(
|
||||
plaintext.len() <= 1 << 14,
|
||||
"plaintext len per record must be smaller than 2^14 bytes"
|
||||
);
|
||||
|
||||
let explicit_nonce: [u8; 8] = seq.to_be_bytes();
|
||||
let msg = PlainMessage {
|
||||
typ: ContentType::ApplicationData,
|
||||
version: ProtocolVersion::TLSv1_2,
|
||||
payload: Payload::new(plaintext),
|
||||
};
|
||||
let opaque = aes_gcm_encrypt(self.key, self.iv, seq, explicit_nonce, &msg);
|
||||
|
||||
let mut payload = opaque.payload.0;
|
||||
let mut ciphertext = payload.split_off(8);
|
||||
let tag = ciphertext.split_off(ciphertext.len() - 16);
|
||||
|
||||
Record {
|
||||
seq,
|
||||
typ: ContentType::ApplicationData,
|
||||
plaintext: Some(plaintext.to_vec()),
|
||||
explicit_nonce: explicit_nonce.to_vec(),
|
||||
ciphertext,
|
||||
tag: Some(tag),
|
||||
}
|
||||
}
|
||||
|
||||
fn gen_handshake(&self, vd: [u8; 12]) -> Record {
|
||||
let seq = 0_u64;
|
||||
let explicit_nonce = seq.to_be_bytes();
|
||||
|
||||
let mut plaintext = Vec::new();
|
||||
|
||||
let payload = Payload(vd.to_vec());
|
||||
let hs_payload = HandshakePayload::Finished(payload);
|
||||
let handshake_message = HandshakeMessagePayload {
|
||||
typ: HandshakeType::Finished,
|
||||
payload: hs_payload,
|
||||
};
|
||||
handshake_message.encode(&mut plaintext);
|
||||
|
||||
let msg = PlainMessage {
|
||||
typ: ContentType::Handshake,
|
||||
version: ProtocolVersion::TLSv1_2,
|
||||
payload: Payload::new(plaintext.clone()),
|
||||
};
|
||||
|
||||
let opaque = aes_gcm_encrypt(self.key, self.iv, seq, explicit_nonce, &msg);
|
||||
let mut payload = opaque.payload.0;
|
||||
let mut ciphertext = payload.split_off(8);
|
||||
let tag = ciphertext.split_off(ciphertext.len() - 16);
|
||||
|
||||
Record {
|
||||
seq,
|
||||
typ: ContentType::Handshake,
|
||||
plaintext: Some(plaintext),
|
||||
explicit_nonce: explicit_nonce.to_vec(),
|
||||
ciphertext,
|
||||
tag: Some(tag),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn aes_gcm_encrypt(
|
||||
key: [u8; 16],
|
||||
iv: [u8; 4],
|
||||
seq: u64,
|
||||
explicit_nonce: [u8; 8],
|
||||
msg: &PlainMessage,
|
||||
) -> OpaqueMessage {
|
||||
let mut aad = [0u8; 13];
|
||||
|
||||
aad[..8].copy_from_slice(&seq.to_be_bytes());
|
||||
aad[8] = msg.typ.get_u8();
|
||||
aad[9..11].copy_from_slice(&msg.version.get_u16().to_be_bytes());
|
||||
aad[11..13].copy_from_slice(&(msg.payload.0.len() as u16).to_be_bytes());
|
||||
let payload = AeadPayload {
|
||||
msg: &msg.payload.0,
|
||||
aad: &aad,
|
||||
};
|
||||
|
||||
let mut nonce = [0u8; 12];
|
||||
nonce[..4].copy_from_slice(&iv);
|
||||
nonce[4..].copy_from_slice(&explicit_nonce);
|
||||
let nonce = GenericArray::from_slice(&nonce);
|
||||
let cipher = Aes128Gcm::new_from_slice(&key).unwrap();
|
||||
|
||||
// ciphertext will have the MAC appended
|
||||
let ciphertext = cipher.encrypt(nonce, payload).unwrap();
|
||||
|
||||
// prepend the explicit nonce
|
||||
let mut nonce_ct_mac = vec![0u8; 0];
|
||||
nonce_ct_mac.extend(explicit_nonce.iter());
|
||||
nonce_ct_mac.extend(ciphertext.iter());
|
||||
|
||||
OpaqueMessage {
|
||||
typ: msg.typ,
|
||||
version: msg.version,
|
||||
payload: Payload::new(nonce_ct_mac),
|
||||
}
|
||||
}
|
||||
@@ -191,11 +191,6 @@ impl Hash {
|
||||
len: value.len(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a byte slice of the hash value.
|
||||
pub fn as_bytes(&self) -> &[u8] {
|
||||
&self.value[..self.len]
|
||||
}
|
||||
}
|
||||
|
||||
impl rs_merkle::Hash for Hash {
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
#![deny(clippy::all)]
|
||||
#![forbid(unsafe_code)]
|
||||
|
||||
pub mod config;
|
||||
pub mod connection;
|
||||
#[cfg(any(test, feature = "fixtures"))]
|
||||
pub mod fixtures;
|
||||
@@ -11,205 +12,20 @@ pub mod hash;
|
||||
pub mod merkle;
|
||||
pub mod transcript;
|
||||
pub mod webpki;
|
||||
pub use rangeset;
|
||||
pub(crate) mod display;
|
||||
|
||||
use rangeset::{RangeSet, ToRangeSet, UnionMut};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::{
|
||||
connection::{HandshakeData, ServerName},
|
||||
transcript::{
|
||||
encoding::EncoderSecret, Direction, PartialTranscript, Transcript, TranscriptCommitConfig,
|
||||
TranscriptCommitRequest, TranscriptCommitment, TranscriptSecret,
|
||||
PartialTranscript, TranscriptCommitRequest, TranscriptCommitment, TranscriptSecret,
|
||||
},
|
||||
};
|
||||
|
||||
/// Configuration to prove information to the verifier.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ProveConfig {
|
||||
server_identity: bool,
|
||||
reveal: Option<(RangeSet<usize>, RangeSet<usize>)>,
|
||||
transcript_commit: Option<TranscriptCommitConfig>,
|
||||
}
|
||||
|
||||
impl ProveConfig {
|
||||
/// Creates a new builder.
|
||||
pub fn builder(transcript: &Transcript) -> ProveConfigBuilder<'_> {
|
||||
ProveConfigBuilder::new(transcript)
|
||||
}
|
||||
|
||||
/// Returns `true` if the server identity is to be proven.
|
||||
pub fn server_identity(&self) -> bool {
|
||||
self.server_identity
|
||||
}
|
||||
|
||||
/// Returns the ranges of the transcript to be revealed.
|
||||
pub fn reveal(&self) -> Option<&(RangeSet<usize>, RangeSet<usize>)> {
|
||||
self.reveal.as_ref()
|
||||
}
|
||||
|
||||
/// Returns the transcript commitment configuration.
|
||||
pub fn transcript_commit(&self) -> Option<&TranscriptCommitConfig> {
|
||||
self.transcript_commit.as_ref()
|
||||
}
|
||||
}
|
||||
|
||||
/// Builder for [`ProveConfig`].
|
||||
#[derive(Debug)]
|
||||
pub struct ProveConfigBuilder<'a> {
|
||||
transcript: &'a Transcript,
|
||||
server_identity: bool,
|
||||
reveal: Option<(RangeSet<usize>, RangeSet<usize>)>,
|
||||
transcript_commit: Option<TranscriptCommitConfig>,
|
||||
}
|
||||
|
||||
impl<'a> ProveConfigBuilder<'a> {
|
||||
/// Creates a new builder.
|
||||
pub fn new(transcript: &'a Transcript) -> Self {
|
||||
Self {
|
||||
transcript,
|
||||
server_identity: false,
|
||||
reveal: None,
|
||||
transcript_commit: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Proves the server identity.
|
||||
pub fn server_identity(&mut self) -> &mut Self {
|
||||
self.server_identity = true;
|
||||
self
|
||||
}
|
||||
|
||||
/// Configures transcript commitments.
|
||||
pub fn transcript_commit(&mut self, transcript_commit: TranscriptCommitConfig) -> &mut Self {
|
||||
self.transcript_commit = Some(transcript_commit);
|
||||
self
|
||||
}
|
||||
|
||||
/// Reveals the given ranges of the transcript.
|
||||
pub fn reveal(
|
||||
&mut self,
|
||||
direction: Direction,
|
||||
ranges: &dyn ToRangeSet<usize>,
|
||||
) -> Result<&mut Self, ProveConfigBuilderError> {
|
||||
let idx = ranges.to_range_set();
|
||||
|
||||
if idx.end().unwrap_or(0) > self.transcript.len_of_direction(direction) {
|
||||
return Err(ProveConfigBuilderError(
|
||||
ProveConfigBuilderErrorRepr::IndexOutOfBounds {
|
||||
direction,
|
||||
actual: idx.end().unwrap_or(0),
|
||||
len: self.transcript.len_of_direction(direction),
|
||||
},
|
||||
));
|
||||
}
|
||||
|
||||
let (sent, recv) = self.reveal.get_or_insert_default();
|
||||
match direction {
|
||||
Direction::Sent => sent.union_mut(&idx),
|
||||
Direction::Received => recv.union_mut(&idx),
|
||||
}
|
||||
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
/// Reveals the given ranges of the sent data transcript.
|
||||
pub fn reveal_sent(
|
||||
&mut self,
|
||||
ranges: &dyn ToRangeSet<usize>,
|
||||
) -> Result<&mut Self, ProveConfigBuilderError> {
|
||||
self.reveal(Direction::Sent, ranges)
|
||||
}
|
||||
|
||||
/// Reveals all of the sent data transcript.
|
||||
pub fn reveal_sent_all(&mut self) -> Result<&mut Self, ProveConfigBuilderError> {
|
||||
let len = self.transcript.len_of_direction(Direction::Sent);
|
||||
let (sent, _) = self.reveal.get_or_insert_default();
|
||||
sent.union_mut(&(0..len));
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
/// Reveals the given ranges of the received data transcript.
|
||||
pub fn reveal_recv(
|
||||
&mut self,
|
||||
ranges: &dyn ToRangeSet<usize>,
|
||||
) -> Result<&mut Self, ProveConfigBuilderError> {
|
||||
self.reveal(Direction::Received, ranges)
|
||||
}
|
||||
|
||||
/// Reveals all of the received data transcript.
|
||||
pub fn reveal_recv_all(&mut self) -> Result<&mut Self, ProveConfigBuilderError> {
|
||||
let len = self.transcript.len_of_direction(Direction::Received);
|
||||
let (_, recv) = self.reveal.get_or_insert_default();
|
||||
recv.union_mut(&(0..len));
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
/// Builds the configuration.
|
||||
pub fn build(self) -> Result<ProveConfig, ProveConfigBuilderError> {
|
||||
Ok(ProveConfig {
|
||||
server_identity: self.server_identity,
|
||||
reveal: self.reveal,
|
||||
transcript_commit: self.transcript_commit,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Error for [`ProveConfigBuilder`].
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
#[error(transparent)]
|
||||
pub struct ProveConfigBuilderError(#[from] ProveConfigBuilderErrorRepr);
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
enum ProveConfigBuilderErrorRepr {
|
||||
#[error("range is out of bounds of the transcript ({direction}): {actual} > {len}")]
|
||||
IndexOutOfBounds {
|
||||
direction: Direction,
|
||||
actual: usize,
|
||||
len: usize,
|
||||
},
|
||||
}
|
||||
|
||||
/// Configuration to verify information from the prover.
|
||||
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
|
||||
pub struct VerifyConfig {}
|
||||
|
||||
impl VerifyConfig {
|
||||
/// Creates a new builder.
|
||||
pub fn builder() -> VerifyConfigBuilder {
|
||||
VerifyConfigBuilder::new()
|
||||
}
|
||||
}
|
||||
|
||||
/// Builder for [`VerifyConfig`].
|
||||
#[derive(Debug, Default)]
|
||||
pub struct VerifyConfigBuilder {}
|
||||
|
||||
impl VerifyConfigBuilder {
|
||||
/// Creates a new builder.
|
||||
pub fn new() -> Self {
|
||||
Self {}
|
||||
}
|
||||
|
||||
/// Builds the configuration.
|
||||
pub fn build(self) -> Result<VerifyConfig, VerifyConfigBuilderError> {
|
||||
Ok(VerifyConfig {})
|
||||
}
|
||||
}
|
||||
|
||||
/// Error for [`VerifyConfigBuilder`].
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
#[error(transparent)]
|
||||
pub struct VerifyConfigBuilderError(#[from] VerifyConfigBuilderErrorRepr);
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
enum VerifyConfigBuilderErrorRepr {}
|
||||
|
||||
/// Request to prove statements about the connection.
|
||||
/// Payload sent to the verifier.
|
||||
#[doc(hidden)]
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct ProveRequest {
|
||||
pub struct ProvePayload {
|
||||
/// Handshake data.
|
||||
pub handshake: Option<(ServerName, HandshakeData)>,
|
||||
/// Transcript data.
|
||||
@@ -236,8 +52,6 @@ pub struct VerifierOutput {
|
||||
pub server_name: Option<ServerName>,
|
||||
/// Transcript data.
|
||||
pub transcript: Option<PartialTranscript>,
|
||||
/// Encoding commitment secret.
|
||||
pub encoder_secret: Option<EncoderSecret>,
|
||||
/// Transcript commitments.
|
||||
pub transcript_commitments: Vec<TranscriptCommitment>,
|
||||
}
|
||||
|
||||
@@ -26,7 +26,7 @@ mod tls;
|
||||
|
||||
use std::{fmt, ops::Range};
|
||||
|
||||
use rangeset::{Difference, IndexRanges, RangeSet, Union};
|
||||
use rangeset::{Difference, IndexRanges, RangeSet, Subset, ToRangeSet, Union, UnionMut};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::connection::TranscriptLength;
|
||||
@@ -39,7 +39,6 @@ pub use proof::{
|
||||
TranscriptProof, TranscriptProofBuilder, TranscriptProofBuilderError, TranscriptProofError,
|
||||
};
|
||||
pub use tls::{Record, TlsTranscript};
|
||||
pub use tls_core::msgs::enums::ContentType;
|
||||
|
||||
/// A transcript contains the plaintext of all application data communicated
|
||||
/// between the Prover and the Server.
|
||||
@@ -96,18 +95,18 @@ impl Transcript {
|
||||
|
||||
/// Returns the subsequence of the transcript with the provided index,
|
||||
/// returning `None` if the index is out of bounds.
|
||||
pub fn get(&self, direction: Direction, idx: &RangeSet<usize>) -> Option<Subsequence> {
|
||||
pub fn get(&self, direction: Direction, idx: &Idx) -> Option<Subsequence> {
|
||||
let data = match direction {
|
||||
Direction::Sent => &self.sent,
|
||||
Direction::Received => &self.received,
|
||||
};
|
||||
|
||||
if idx.end().unwrap_or(0) > data.len() {
|
||||
if idx.end() > data.len() {
|
||||
return None;
|
||||
}
|
||||
|
||||
Some(
|
||||
Subsequence::new(idx.clone(), data.index_ranges(idx))
|
||||
Subsequence::new(idx.clone(), data.index_ranges(&idx.0))
|
||||
.expect("data is same length as index"),
|
||||
)
|
||||
}
|
||||
@@ -122,11 +121,7 @@ impl Transcript {
|
||||
///
|
||||
/// * `sent_idx` - The indices of the sent data to include.
|
||||
/// * `recv_idx` - The indices of the received data to include.
|
||||
pub fn to_partial(
|
||||
&self,
|
||||
sent_idx: RangeSet<usize>,
|
||||
recv_idx: RangeSet<usize>,
|
||||
) -> PartialTranscript {
|
||||
pub fn to_partial(&self, sent_idx: Idx, recv_idx: Idx) -> PartialTranscript {
|
||||
let mut sent = vec![0; self.sent.len()];
|
||||
let mut received = vec![0; self.received.len()];
|
||||
|
||||
@@ -161,9 +156,9 @@ pub struct PartialTranscript {
|
||||
/// Data received by the Prover from the Server.
|
||||
received: Vec<u8>,
|
||||
/// Index of `sent` which have been authenticated.
|
||||
sent_authed_idx: RangeSet<usize>,
|
||||
sent_authed_idx: Idx,
|
||||
/// Index of `received` which have been authenticated.
|
||||
received_authed_idx: RangeSet<usize>,
|
||||
received_authed_idx: Idx,
|
||||
}
|
||||
|
||||
/// `PartialTranscript` in a compressed form.
|
||||
@@ -175,9 +170,9 @@ pub struct CompressedPartialTranscript {
|
||||
/// Received data which has been authenticated.
|
||||
received_authed: Vec<u8>,
|
||||
/// Index of `sent_authed`.
|
||||
sent_idx: RangeSet<usize>,
|
||||
sent_idx: Idx,
|
||||
/// Index of `received_authed`.
|
||||
recv_idx: RangeSet<usize>,
|
||||
recv_idx: Idx,
|
||||
/// Total bytelength of sent data in the original partial transcript.
|
||||
sent_total: usize,
|
||||
/// Total bytelength of received data in the original partial transcript.
|
||||
@@ -189,10 +184,10 @@ impl From<PartialTranscript> for CompressedPartialTranscript {
|
||||
Self {
|
||||
sent_authed: uncompressed
|
||||
.sent
|
||||
.index_ranges(&uncompressed.sent_authed_idx),
|
||||
.index_ranges(&uncompressed.sent_authed_idx.0),
|
||||
received_authed: uncompressed
|
||||
.received
|
||||
.index_ranges(&uncompressed.received_authed_idx),
|
||||
.index_ranges(&uncompressed.received_authed_idx.0),
|
||||
sent_idx: uncompressed.sent_authed_idx,
|
||||
recv_idx: uncompressed.received_authed_idx,
|
||||
sent_total: uncompressed.sent.len(),
|
||||
@@ -242,8 +237,8 @@ impl PartialTranscript {
|
||||
Self {
|
||||
sent: vec![0; sent_len],
|
||||
received: vec![0; received_len],
|
||||
sent_authed_idx: RangeSet::default(),
|
||||
received_authed_idx: RangeSet::default(),
|
||||
sent_authed_idx: Idx::default(),
|
||||
received_authed_idx: Idx::default(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -264,10 +259,10 @@ impl PartialTranscript {
|
||||
}
|
||||
|
||||
/// Returns whether the index is in bounds of the transcript.
|
||||
pub fn contains(&self, direction: Direction, idx: &RangeSet<usize>) -> bool {
|
||||
pub fn contains(&self, direction: Direction, idx: &Idx) -> bool {
|
||||
match direction {
|
||||
Direction::Sent => idx.end().unwrap_or(0) <= self.sent.len(),
|
||||
Direction::Received => idx.end().unwrap_or(0) <= self.received.len(),
|
||||
Direction::Sent => idx.end() <= self.sent.len(),
|
||||
Direction::Received => idx.end() <= self.received.len(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -294,23 +289,23 @@ impl PartialTranscript {
|
||||
}
|
||||
|
||||
/// Returns the index of sent data which have been authenticated.
|
||||
pub fn sent_authed(&self) -> &RangeSet<usize> {
|
||||
pub fn sent_authed(&self) -> &Idx {
|
||||
&self.sent_authed_idx
|
||||
}
|
||||
|
||||
/// Returns the index of received data which have been authenticated.
|
||||
pub fn received_authed(&self) -> &RangeSet<usize> {
|
||||
pub fn received_authed(&self) -> &Idx {
|
||||
&self.received_authed_idx
|
||||
}
|
||||
|
||||
/// Returns the index of sent data which haven't been authenticated.
|
||||
pub fn sent_unauthed(&self) -> RangeSet<usize> {
|
||||
(0..self.sent.len()).difference(&self.sent_authed_idx)
|
||||
pub fn sent_unauthed(&self) -> Idx {
|
||||
Idx(RangeSet::from(0..self.sent.len()).difference(&self.sent_authed_idx.0))
|
||||
}
|
||||
|
||||
/// Returns the index of received data which haven't been authenticated.
|
||||
pub fn received_unauthed(&self) -> RangeSet<usize> {
|
||||
(0..self.received.len()).difference(&self.received_authed_idx)
|
||||
pub fn received_unauthed(&self) -> Idx {
|
||||
Idx(RangeSet::from(0..self.received.len()).difference(&self.received_authed_idx.0))
|
||||
}
|
||||
|
||||
/// Returns an iterator over the authenticated data in the transcript.
|
||||
@@ -320,7 +315,7 @@ impl PartialTranscript {
|
||||
Direction::Received => (&self.received, &self.received_authed_idx),
|
||||
};
|
||||
|
||||
authed.iter().map(|i| data[i])
|
||||
authed.0.iter().map(|i| data[i])
|
||||
}
|
||||
|
||||
/// Unions the authenticated data of this transcript with another.
|
||||
@@ -342,7 +337,8 @@ impl PartialTranscript {
|
||||
|
||||
for range in other
|
||||
.sent_authed_idx
|
||||
.difference(&self.sent_authed_idx)
|
||||
.0
|
||||
.difference(&self.sent_authed_idx.0)
|
||||
.iter_ranges()
|
||||
{
|
||||
self.sent[range.clone()].copy_from_slice(&other.sent[range]);
|
||||
@@ -350,7 +346,8 @@ impl PartialTranscript {
|
||||
|
||||
for range in other
|
||||
.received_authed_idx
|
||||
.difference(&self.received_authed_idx)
|
||||
.0
|
||||
.difference(&self.received_authed_idx.0)
|
||||
.iter_ranges()
|
||||
{
|
||||
self.received[range.clone()].copy_from_slice(&other.received[range]);
|
||||
@@ -402,12 +399,12 @@ impl PartialTranscript {
|
||||
pub fn set_unauthed_range(&mut self, value: u8, direction: Direction, range: Range<usize>) {
|
||||
match direction {
|
||||
Direction::Sent => {
|
||||
for range in range.difference(&self.sent_authed_idx).iter_ranges() {
|
||||
for range in range.difference(&self.sent_authed_idx.0).iter_ranges() {
|
||||
self.sent[range].fill(value);
|
||||
}
|
||||
}
|
||||
Direction::Received => {
|
||||
for range in range.difference(&self.received_authed_idx).iter_ranges() {
|
||||
for range in range.difference(&self.received_authed_idx.0).iter_ranges() {
|
||||
self.received[range].fill(value);
|
||||
}
|
||||
}
|
||||
@@ -436,19 +433,130 @@ impl fmt::Display for Direction {
|
||||
}
|
||||
}
|
||||
|
||||
/// Transcript index.
|
||||
#[derive(Debug, Default, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]
|
||||
pub struct Idx(RangeSet<usize>);
|
||||
|
||||
impl Idx {
|
||||
/// Creates a new index builder.
|
||||
pub fn builder() -> IdxBuilder {
|
||||
IdxBuilder::default()
|
||||
}
|
||||
|
||||
/// Creates an empty index.
|
||||
pub fn empty() -> Self {
|
||||
Self(RangeSet::default())
|
||||
}
|
||||
|
||||
/// Creates a new transcript index.
|
||||
pub fn new(ranges: impl Into<RangeSet<usize>>) -> Self {
|
||||
Self(ranges.into())
|
||||
}
|
||||
|
||||
/// Returns the start of the index.
|
||||
pub fn start(&self) -> usize {
|
||||
self.0.min().unwrap_or_default()
|
||||
}
|
||||
|
||||
/// Returns the end of the index, non-inclusive.
|
||||
pub fn end(&self) -> usize {
|
||||
self.0.end().unwrap_or_default()
|
||||
}
|
||||
|
||||
/// Returns an iterator over the values in the index.
|
||||
pub fn iter(&self) -> impl Iterator<Item = usize> + '_ {
|
||||
self.0.iter()
|
||||
}
|
||||
|
||||
/// Returns an iterator over the ranges of the index.
|
||||
pub fn iter_ranges(&self) -> impl Iterator<Item = Range<usize>> + '_ {
|
||||
self.0.iter_ranges()
|
||||
}
|
||||
|
||||
/// Returns the number of values in the index.
|
||||
pub fn len(&self) -> usize {
|
||||
self.0.len()
|
||||
}
|
||||
|
||||
/// Returns whether the index is empty.
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.0.is_empty()
|
||||
}
|
||||
|
||||
/// Returns the number of disjoint ranges in the index.
|
||||
pub fn count(&self) -> usize {
|
||||
self.0.len_ranges()
|
||||
}
|
||||
|
||||
pub(crate) fn as_range_set(&self) -> &RangeSet<usize> {
|
||||
&self.0
|
||||
}
|
||||
|
||||
/// Returns the union of this index with another.
|
||||
pub(crate) fn union(&self, other: &Idx) -> Idx {
|
||||
Idx(self.0.union(&other.0))
|
||||
}
|
||||
|
||||
/// Unions this index with another.
|
||||
pub(crate) fn union_mut(&mut self, other: &Idx) {
|
||||
self.0.union_mut(&other.0);
|
||||
}
|
||||
|
||||
/// Returns the difference between `self` and `other`.
|
||||
pub(crate) fn difference(&self, other: &Idx) -> Idx {
|
||||
Idx(self.0.difference(&other.0))
|
||||
}
|
||||
|
||||
/// Returns `true` if `self` is a subset of `other`.
|
||||
pub(crate) fn is_subset(&self, other: &Idx) -> bool {
|
||||
self.0.is_subset(&other.0)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for Idx {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.write_str("Idx([")?;
|
||||
let count = self.0.len_ranges();
|
||||
for (i, range) in self.0.iter_ranges().enumerate() {
|
||||
write!(f, "{}..{}", range.start, range.end)?;
|
||||
if i < count - 1 {
|
||||
write!(f, ", ")?;
|
||||
}
|
||||
}
|
||||
f.write_str("])")?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Builder for [`Idx`].
|
||||
#[derive(Debug, Default)]
|
||||
pub struct IdxBuilder(RangeSet<usize>);
|
||||
|
||||
impl IdxBuilder {
|
||||
/// Unions ranges.
|
||||
pub fn union(self, ranges: &dyn ToRangeSet<usize>) -> Self {
|
||||
IdxBuilder(self.0.union(&ranges.to_range_set()))
|
||||
}
|
||||
|
||||
/// Builds the index.
|
||||
pub fn build(self) -> Idx {
|
||||
Idx(self.0)
|
||||
}
|
||||
}
|
||||
|
||||
/// Transcript subsequence.
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(try_from = "validation::SubsequenceUnchecked")]
|
||||
pub struct Subsequence {
|
||||
/// Index of the subsequence.
|
||||
idx: RangeSet<usize>,
|
||||
idx: Idx,
|
||||
/// Data of the subsequence.
|
||||
data: Vec<u8>,
|
||||
}
|
||||
|
||||
impl Subsequence {
|
||||
/// Creates a new subsequence.
|
||||
pub fn new(idx: RangeSet<usize>, data: Vec<u8>) -> Result<Self, InvalidSubsequence> {
|
||||
pub fn new(idx: Idx, data: Vec<u8>) -> Result<Self, InvalidSubsequence> {
|
||||
if idx.len() != data.len() {
|
||||
return Err(InvalidSubsequence(
|
||||
"index length does not match data length",
|
||||
@@ -459,7 +567,7 @@ impl Subsequence {
|
||||
}
|
||||
|
||||
/// Returns the index of the subsequence.
|
||||
pub fn index(&self) -> &RangeSet<usize> {
|
||||
pub fn index(&self) -> &Idx {
|
||||
&self.idx
|
||||
}
|
||||
|
||||
@@ -475,7 +583,7 @@ impl Subsequence {
|
||||
}
|
||||
|
||||
/// Returns the inner parts of the subsequence.
|
||||
pub fn into_parts(self) -> (RangeSet<usize>, Vec<u8>) {
|
||||
pub fn into_parts(self) -> (Idx, Vec<u8>) {
|
||||
(self.idx, self.data)
|
||||
}
|
||||
|
||||
@@ -503,7 +611,7 @@ mod validation {
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub(super) struct SubsequenceUnchecked {
|
||||
idx: RangeSet<usize>,
|
||||
idx: Idx,
|
||||
data: Vec<u8>,
|
||||
}
|
||||
|
||||
@@ -525,8 +633,8 @@ mod validation {
|
||||
pub(super) struct CompressedPartialTranscriptUnchecked {
|
||||
sent_authed: Vec<u8>,
|
||||
received_authed: Vec<u8>,
|
||||
sent_idx: RangeSet<usize>,
|
||||
recv_idx: RangeSet<usize>,
|
||||
sent_idx: Idx,
|
||||
recv_idx: Idx,
|
||||
sent_total: usize,
|
||||
recv_total: usize,
|
||||
}
|
||||
@@ -543,8 +651,8 @@ mod validation {
|
||||
));
|
||||
}
|
||||
|
||||
if unchecked.sent_idx.end().unwrap_or(0) > unchecked.sent_total
|
||||
|| unchecked.recv_idx.end().unwrap_or(0) > unchecked.recv_total
|
||||
if unchecked.sent_idx.end() > unchecked.sent_total
|
||||
|| unchecked.recv_idx.end() > unchecked.recv_total
|
||||
{
|
||||
return Err(InvalidCompressedPartialTranscript(
|
||||
"ranges are not in bounds of the data",
|
||||
@@ -573,8 +681,8 @@ mod validation {
|
||||
CompressedPartialTranscriptUnchecked {
|
||||
received_authed: vec![1, 2, 3, 11, 12, 13],
|
||||
sent_authed: vec![4, 5, 6, 14, 15, 16],
|
||||
recv_idx: RangeSet::from([1..4, 11..14]),
|
||||
sent_idx: RangeSet::from([4..7, 14..17]),
|
||||
recv_idx: Idx(RangeSet::new(&[1..4, 11..14])),
|
||||
sent_idx: Idx(RangeSet::new(&[4..7, 14..17])),
|
||||
sent_total: 20,
|
||||
recv_total: 20,
|
||||
}
|
||||
@@ -613,6 +721,7 @@ mod validation {
|
||||
// Change the total to be less than the last range's end bound.
|
||||
let end = partial_transcript
|
||||
.sent_idx
|
||||
.0
|
||||
.iter_ranges()
|
||||
.next_back()
|
||||
.unwrap()
|
||||
@@ -644,25 +753,31 @@ mod tests {
|
||||
|
||||
#[fixture]
|
||||
fn partial_transcript() -> PartialTranscript {
|
||||
transcript().to_partial(RangeSet::from([1..4, 6..9]), RangeSet::from([2..5, 7..10]))
|
||||
transcript().to_partial(
|
||||
Idx::new(RangeSet::new(&[1..4, 6..9])),
|
||||
Idx::new(RangeSet::new(&[2..5, 7..10])),
|
||||
)
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
fn test_transcript_get_subsequence(transcript: Transcript) {
|
||||
let subseq = transcript
|
||||
.get(Direction::Received, &RangeSet::from([0..4, 7..10]))
|
||||
.get(Direction::Received, &Idx(RangeSet::from([0..4, 7..10])))
|
||||
.unwrap();
|
||||
assert_eq!(subseq.data, vec![0, 1, 2, 3, 7, 8, 9]);
|
||||
|
||||
let subseq = transcript
|
||||
.get(Direction::Sent, &RangeSet::from([0..4, 9..12]))
|
||||
.get(Direction::Sent, &Idx(RangeSet::from([0..4, 9..12])))
|
||||
.unwrap();
|
||||
assert_eq!(subseq.data, vec![0, 1, 2, 3, 9, 10, 11]);
|
||||
|
||||
let subseq = transcript.get(Direction::Received, &RangeSet::from([0..4, 7..10, 11..13]));
|
||||
let subseq = transcript.get(
|
||||
Direction::Received,
|
||||
&Idx(RangeSet::from([0..4, 7..10, 11..13])),
|
||||
);
|
||||
assert_eq!(subseq, None);
|
||||
|
||||
let subseq = transcript.get(Direction::Sent, &RangeSet::from([0..4, 7..10, 11..13]));
|
||||
let subseq = transcript.get(Direction::Sent, &Idx(RangeSet::from([0..4, 7..10, 11..13])));
|
||||
assert_eq!(subseq, None);
|
||||
}
|
||||
|
||||
@@ -675,7 +790,7 @@ mod tests {
|
||||
|
||||
#[rstest]
|
||||
fn test_transcript_to_partial_success(transcript: Transcript) {
|
||||
let partial = transcript.to_partial(RangeSet::from(0..2), RangeSet::from(3..7));
|
||||
let partial = transcript.to_partial(Idx::new(0..2), Idx::new(3..7));
|
||||
assert_eq!(partial.sent_unsafe(), [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]);
|
||||
assert_eq!(
|
||||
partial.received_unsafe(),
|
||||
@@ -686,30 +801,29 @@ mod tests {
|
||||
#[rstest]
|
||||
#[should_panic]
|
||||
fn test_transcript_to_partial_failure(transcript: Transcript) {
|
||||
let _ = transcript.to_partial(RangeSet::from(0..14), RangeSet::from(3..7));
|
||||
let _ = transcript.to_partial(Idx::new(0..14), Idx::new(3..7));
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
fn test_partial_transcript_contains(transcript: Transcript) {
|
||||
let partial = transcript.to_partial(RangeSet::from(0..2), RangeSet::from(3..7));
|
||||
assert!(partial.contains(Direction::Sent, &RangeSet::from([0..5, 7..10])));
|
||||
assert!(!partial.contains(Direction::Received, &RangeSet::from([4..6, 7..13])))
|
||||
let partial = transcript.to_partial(Idx::new(0..2), Idx::new(3..7));
|
||||
assert!(partial.contains(Direction::Sent, &Idx::new([0..5, 7..10])));
|
||||
assert!(!partial.contains(Direction::Received, &Idx::new([4..6, 7..13])))
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
fn test_partial_transcript_unauthed(transcript: Transcript) {
|
||||
let partial = transcript.to_partial(RangeSet::from(0..2), RangeSet::from(3..7));
|
||||
assert_eq!(partial.sent_unauthed(), RangeSet::from(2..12));
|
||||
assert_eq!(partial.received_unauthed(), RangeSet::from([0..3, 7..12]));
|
||||
let partial = transcript.to_partial(Idx::new(0..2), Idx::new(3..7));
|
||||
assert_eq!(partial.sent_unauthed(), Idx::new(2..12));
|
||||
assert_eq!(partial.received_unauthed(), Idx::new([0..3, 7..12]));
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
fn test_partial_transcript_union_success(transcript: Transcript) {
|
||||
// Non overlapping ranges.
|
||||
let mut simple_partial = transcript.to_partial(RangeSet::from(0..2), RangeSet::from(3..7));
|
||||
let mut simple_partial = transcript.to_partial(Idx::new(0..2), Idx::new(3..7));
|
||||
|
||||
let other_simple_partial =
|
||||
transcript.to_partial(RangeSet::from(3..5), RangeSet::from(1..2));
|
||||
let other_simple_partial = transcript.to_partial(Idx::new(3..5), Idx::new(1..2));
|
||||
|
||||
simple_partial.union_transcript(&other_simple_partial);
|
||||
|
||||
@@ -721,16 +835,12 @@ mod tests {
|
||||
simple_partial.received_unsafe(),
|
||||
[0, 1, 0, 3, 4, 5, 6, 0, 0, 0, 0, 0]
|
||||
);
|
||||
assert_eq!(simple_partial.sent_authed(), &RangeSet::from([0..2, 3..5]));
|
||||
assert_eq!(
|
||||
simple_partial.received_authed(),
|
||||
&RangeSet::from([1..2, 3..7])
|
||||
);
|
||||
assert_eq!(simple_partial.sent_authed(), &Idx::new([0..2, 3..5]));
|
||||
assert_eq!(simple_partial.received_authed(), &Idx::new([1..2, 3..7]));
|
||||
|
||||
// Overwrite with another partial transcript.
|
||||
|
||||
let another_simple_partial =
|
||||
transcript.to_partial(RangeSet::from(1..4), RangeSet::from(6..9));
|
||||
let another_simple_partial = transcript.to_partial(Idx::new(1..4), Idx::new(6..9));
|
||||
|
||||
simple_partial.union_transcript(&another_simple_partial);
|
||||
|
||||
@@ -742,17 +852,13 @@ mod tests {
|
||||
simple_partial.received_unsafe(),
|
||||
[0, 1, 0, 3, 4, 5, 6, 7, 8, 0, 0, 0]
|
||||
);
|
||||
assert_eq!(simple_partial.sent_authed(), &RangeSet::from(0..5));
|
||||
assert_eq!(
|
||||
simple_partial.received_authed(),
|
||||
&RangeSet::from([1..2, 3..9])
|
||||
);
|
||||
assert_eq!(simple_partial.sent_authed(), &Idx::new(0..5));
|
||||
assert_eq!(simple_partial.received_authed(), &Idx::new([1..2, 3..9]));
|
||||
|
||||
// Overlapping ranges.
|
||||
let mut overlap_partial = transcript.to_partial(RangeSet::from(4..6), RangeSet::from(3..7));
|
||||
let mut overlap_partial = transcript.to_partial(Idx::new(4..6), Idx::new(3..7));
|
||||
|
||||
let other_overlap_partial =
|
||||
transcript.to_partial(RangeSet::from(3..5), RangeSet::from(5..9));
|
||||
let other_overlap_partial = transcript.to_partial(Idx::new(3..5), Idx::new(5..9));
|
||||
|
||||
overlap_partial.union_transcript(&other_overlap_partial);
|
||||
|
||||
@@ -764,16 +870,13 @@ mod tests {
|
||||
overlap_partial.received_unsafe(),
|
||||
[0, 0, 0, 3, 4, 5, 6, 7, 8, 0, 0, 0]
|
||||
);
|
||||
assert_eq!(overlap_partial.sent_authed(), &RangeSet::from([3..5, 4..6]));
|
||||
assert_eq!(
|
||||
overlap_partial.received_authed(),
|
||||
&RangeSet::from([3..7, 5..9])
|
||||
);
|
||||
assert_eq!(overlap_partial.sent_authed(), &Idx::new([3..5, 4..6]));
|
||||
assert_eq!(overlap_partial.received_authed(), &Idx::new([3..7, 5..9]));
|
||||
|
||||
// Equal ranges.
|
||||
let mut equal_partial = transcript.to_partial(RangeSet::from(4..6), RangeSet::from(3..7));
|
||||
let mut equal_partial = transcript.to_partial(Idx::new(4..6), Idx::new(3..7));
|
||||
|
||||
let other_equal_partial = transcript.to_partial(RangeSet::from(4..6), RangeSet::from(3..7));
|
||||
let other_equal_partial = transcript.to_partial(Idx::new(4..6), Idx::new(3..7));
|
||||
|
||||
equal_partial.union_transcript(&other_equal_partial);
|
||||
|
||||
@@ -785,15 +888,13 @@ mod tests {
|
||||
equal_partial.received_unsafe(),
|
||||
[0, 0, 0, 3, 4, 5, 6, 0, 0, 0, 0, 0]
|
||||
);
|
||||
assert_eq!(equal_partial.sent_authed(), &RangeSet::from(4..6));
|
||||
assert_eq!(equal_partial.received_authed(), &RangeSet::from(3..7));
|
||||
assert_eq!(equal_partial.sent_authed(), &Idx::new(4..6));
|
||||
assert_eq!(equal_partial.received_authed(), &Idx::new(3..7));
|
||||
|
||||
// Subset ranges.
|
||||
let mut subset_partial =
|
||||
transcript.to_partial(RangeSet::from(4..10), RangeSet::from(3..11));
|
||||
let mut subset_partial = transcript.to_partial(Idx::new(4..10), Idx::new(3..11));
|
||||
|
||||
let other_subset_partial =
|
||||
transcript.to_partial(RangeSet::from(6..9), RangeSet::from(5..6));
|
||||
let other_subset_partial = transcript.to_partial(Idx::new(6..9), Idx::new(5..6));
|
||||
|
||||
subset_partial.union_transcript(&other_subset_partial);
|
||||
|
||||
@@ -805,32 +906,30 @@ mod tests {
|
||||
subset_partial.received_unsafe(),
|
||||
[0, 0, 0, 3, 4, 5, 6, 7, 8, 9, 10, 0]
|
||||
);
|
||||
assert_eq!(subset_partial.sent_authed(), &RangeSet::from(4..10));
|
||||
assert_eq!(subset_partial.received_authed(), &RangeSet::from(3..11));
|
||||
assert_eq!(subset_partial.sent_authed(), &Idx::new(4..10));
|
||||
assert_eq!(subset_partial.received_authed(), &Idx::new(3..11));
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[should_panic]
|
||||
fn test_partial_transcript_union_failure(transcript: Transcript) {
|
||||
let mut partial = transcript.to_partial(RangeSet::from(4..10), RangeSet::from(3..11));
|
||||
let mut partial = transcript.to_partial(Idx::new(4..10), Idx::new(3..11));
|
||||
|
||||
let other_transcript = Transcript::new(
|
||||
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
|
||||
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
|
||||
);
|
||||
|
||||
let other_partial = other_transcript.to_partial(RangeSet::from(6..9), RangeSet::from(5..6));
|
||||
let other_partial = other_transcript.to_partial(Idx::new(6..9), Idx::new(5..6));
|
||||
|
||||
partial.union_transcript(&other_partial);
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
fn test_partial_transcript_union_subseq_success(transcript: Transcript) {
|
||||
let mut partial = transcript.to_partial(RangeSet::from(4..10), RangeSet::from(3..11));
|
||||
let sent_seq =
|
||||
Subsequence::new(RangeSet::from([0..3, 5..7]), [0, 1, 2, 5, 6].into()).unwrap();
|
||||
let recv_seq =
|
||||
Subsequence::new(RangeSet::from([0..4, 5..7]), [0, 1, 2, 3, 5, 6].into()).unwrap();
|
||||
let mut partial = transcript.to_partial(Idx::new(4..10), Idx::new(3..11));
|
||||
let sent_seq = Subsequence::new(Idx::new([0..3, 5..7]), [0, 1, 2, 5, 6].into()).unwrap();
|
||||
let recv_seq = Subsequence::new(Idx::new([0..4, 5..7]), [0, 1, 2, 3, 5, 6].into()).unwrap();
|
||||
|
||||
partial.union_subsequence(Direction::Sent, &sent_seq);
|
||||
partial.union_subsequence(Direction::Received, &recv_seq);
|
||||
@@ -840,31 +939,30 @@ mod tests {
|
||||
partial.received_unsafe(),
|
||||
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 0]
|
||||
);
|
||||
assert_eq!(partial.sent_authed(), &RangeSet::from([0..3, 4..10]));
|
||||
assert_eq!(partial.received_authed(), &RangeSet::from(0..11));
|
||||
assert_eq!(partial.sent_authed(), &Idx::new([0..3, 4..10]));
|
||||
assert_eq!(partial.received_authed(), &Idx::new(0..11));
|
||||
|
||||
// Overwrite with another subseq.
|
||||
let other_sent_seq = Subsequence::new(RangeSet::from(0..3), [3, 2, 1].into()).unwrap();
|
||||
let other_sent_seq = Subsequence::new(Idx::new(0..3), [3, 2, 1].into()).unwrap();
|
||||
|
||||
partial.union_subsequence(Direction::Sent, &other_sent_seq);
|
||||
assert_eq!(partial.sent_unsafe(), [3, 2, 1, 0, 4, 5, 6, 7, 8, 9, 0, 0]);
|
||||
assert_eq!(partial.sent_authed(), &RangeSet::from([0..3, 4..10]));
|
||||
assert_eq!(partial.sent_authed(), &Idx::new([0..3, 4..10]));
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[should_panic]
|
||||
fn test_partial_transcript_union_subseq_failure(transcript: Transcript) {
|
||||
let mut partial = transcript.to_partial(RangeSet::from(4..10), RangeSet::from(3..11));
|
||||
let mut partial = transcript.to_partial(Idx::new(4..10), Idx::new(3..11));
|
||||
|
||||
let sent_seq =
|
||||
Subsequence::new(RangeSet::from([0..3, 13..15]), [0, 1, 2, 5, 6].into()).unwrap();
|
||||
let sent_seq = Subsequence::new(Idx::new([0..3, 13..15]), [0, 1, 2, 5, 6].into()).unwrap();
|
||||
|
||||
partial.union_subsequence(Direction::Sent, &sent_seq);
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
fn test_partial_transcript_set_unauthed_range(transcript: Transcript) {
|
||||
let mut partial = transcript.to_partial(RangeSet::from(4..10), RangeSet::from(3..7));
|
||||
let mut partial = transcript.to_partial(Idx::new(4..10), Idx::new(3..7));
|
||||
|
||||
partial.set_unauthed_range(7, Direction::Sent, 2..5);
|
||||
partial.set_unauthed_range(5, Direction::Sent, 0..2);
|
||||
@@ -881,13 +979,13 @@ mod tests {
|
||||
#[rstest]
|
||||
#[should_panic]
|
||||
fn test_subsequence_new_invalid_len() {
|
||||
let _ = Subsequence::new(RangeSet::from([0..3, 5..8]), [0, 1, 2, 5, 6].into()).unwrap();
|
||||
let _ = Subsequence::new(Idx::new([0..3, 5..8]), [0, 1, 2, 5, 6].into()).unwrap();
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[should_panic]
|
||||
fn test_subsequence_copy_to_invalid_len() {
|
||||
let seq = Subsequence::new(RangeSet::from([0..3, 5..7]), [0, 1, 2, 5, 6].into()).unwrap();
|
||||
let seq = Subsequence::new(Idx::new([0..3, 5..7]), [0, 1, 2, 5, 6].into()).unwrap();
|
||||
|
||||
let mut data: [u8; 3] = [0, 1, 2];
|
||||
seq.copy_to(&mut data);
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
use std::{collections::HashSet, fmt};
|
||||
|
||||
use rangeset::{ToRangeSet, UnionMut};
|
||||
use rangeset::ToRangeSet;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::{
|
||||
@@ -10,7 +10,7 @@ use crate::{
|
||||
transcript::{
|
||||
encoding::{EncodingCommitment, EncodingTree},
|
||||
hash::{PlaintextHash, PlaintextHashSecret},
|
||||
Direction, RangeSet, Transcript,
|
||||
Direction, Idx, Transcript,
|
||||
},
|
||||
};
|
||||
|
||||
@@ -71,7 +71,7 @@ pub struct TranscriptCommitConfig {
|
||||
encoding_hash_alg: HashAlgId,
|
||||
has_encoding: bool,
|
||||
has_hash: bool,
|
||||
commits: Vec<((Direction, RangeSet<usize>), TranscriptCommitmentKind)>,
|
||||
commits: Vec<((Direction, Idx), TranscriptCommitmentKind)>,
|
||||
}
|
||||
|
||||
impl TranscriptCommitConfig {
|
||||
@@ -96,7 +96,7 @@ impl TranscriptCommitConfig {
|
||||
}
|
||||
|
||||
/// Returns an iterator over the encoding commitment indices.
|
||||
pub fn iter_encoding(&self) -> impl Iterator<Item = &(Direction, RangeSet<usize>)> {
|
||||
pub fn iter_encoding(&self) -> impl Iterator<Item = &(Direction, Idx)> {
|
||||
self.commits.iter().filter_map(|(idx, kind)| match kind {
|
||||
TranscriptCommitmentKind::Encoding => Some(idx),
|
||||
_ => None,
|
||||
@@ -104,7 +104,7 @@ impl TranscriptCommitConfig {
|
||||
}
|
||||
|
||||
/// Returns an iterator over the hash commitment indices.
|
||||
pub fn iter_hash(&self) -> impl Iterator<Item = (&(Direction, RangeSet<usize>), &HashAlgId)> {
|
||||
pub fn iter_hash(&self) -> impl Iterator<Item = (&(Direction, Idx), &HashAlgId)> {
|
||||
self.commits.iter().filter_map(|(idx, kind)| match kind {
|
||||
TranscriptCommitmentKind::Hash { alg } => Some((idx, alg)),
|
||||
_ => None,
|
||||
@@ -114,19 +114,7 @@ impl TranscriptCommitConfig {
|
||||
/// Returns a request for the transcript commitments.
|
||||
pub fn to_request(&self) -> TranscriptCommitRequest {
|
||||
TranscriptCommitRequest {
|
||||
encoding: self.has_encoding.then(|| {
|
||||
let mut sent = RangeSet::default();
|
||||
let mut recv = RangeSet::default();
|
||||
|
||||
for (dir, idx) in self.iter_encoding() {
|
||||
match dir {
|
||||
Direction::Sent => sent.union_mut(idx),
|
||||
Direction::Received => recv.union_mut(idx),
|
||||
}
|
||||
}
|
||||
|
||||
(sent, recv)
|
||||
}),
|
||||
encoding: self.has_encoding,
|
||||
hash: self
|
||||
.iter_hash()
|
||||
.map(|((dir, idx), alg)| (*dir, idx.clone(), *alg))
|
||||
@@ -146,7 +134,7 @@ pub struct TranscriptCommitConfigBuilder<'a> {
|
||||
has_encoding: bool,
|
||||
has_hash: bool,
|
||||
default_kind: TranscriptCommitmentKind,
|
||||
commits: HashSet<((Direction, RangeSet<usize>), TranscriptCommitmentKind)>,
|
||||
commits: HashSet<((Direction, Idx), TranscriptCommitmentKind)>,
|
||||
}
|
||||
|
||||
impl<'a> TranscriptCommitConfigBuilder<'a> {
|
||||
@@ -187,15 +175,15 @@ impl<'a> TranscriptCommitConfigBuilder<'a> {
|
||||
direction: Direction,
|
||||
kind: TranscriptCommitmentKind,
|
||||
) -> Result<&mut Self, TranscriptCommitConfigBuilderError> {
|
||||
let idx = ranges.to_range_set();
|
||||
let idx = Idx::new(ranges.to_range_set());
|
||||
|
||||
if idx.end().unwrap_or(0) > self.transcript.len_of_direction(direction) {
|
||||
if idx.end() > self.transcript.len_of_direction(direction) {
|
||||
return Err(TranscriptCommitConfigBuilderError::new(
|
||||
ErrorKind::Index,
|
||||
format!(
|
||||
"range is out of bounds of the transcript ({}): {} > {}",
|
||||
direction,
|
||||
idx.end().unwrap_or(0),
|
||||
idx.end(),
|
||||
self.transcript.len_of_direction(direction)
|
||||
),
|
||||
));
|
||||
@@ -301,14 +289,14 @@ impl fmt::Display for TranscriptCommitConfigBuilderError {
|
||||
/// Request to compute transcript commitments.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct TranscriptCommitRequest {
|
||||
encoding: Option<(RangeSet<usize>, RangeSet<usize>)>,
|
||||
hash: Vec<(Direction, RangeSet<usize>, HashAlgId)>,
|
||||
encoding: bool,
|
||||
hash: Vec<(Direction, Idx, HashAlgId)>,
|
||||
}
|
||||
|
||||
impl TranscriptCommitRequest {
|
||||
/// Returns `true` if an encoding commitment is requested.
|
||||
pub fn has_encoding(&self) -> bool {
|
||||
self.encoding.is_some()
|
||||
pub fn encoding(&self) -> bool {
|
||||
self.encoding
|
||||
}
|
||||
|
||||
/// Returns `true` if a hash commitment is requested.
|
||||
@@ -317,14 +305,9 @@ impl TranscriptCommitRequest {
|
||||
}
|
||||
|
||||
/// Returns an iterator over the hash commitments.
|
||||
pub fn iter_hash(&self) -> impl Iterator<Item = &(Direction, RangeSet<usize>, HashAlgId)> {
|
||||
pub fn iter_hash(&self) -> impl Iterator<Item = &(Direction, Idx, HashAlgId)> {
|
||||
self.hash.iter()
|
||||
}
|
||||
|
||||
/// Returns the ranges of the encoding commitments.
|
||||
pub fn encoding(&self) -> Option<&(RangeSet<usize>, RangeSet<usize>)> {
|
||||
self.encoding.as_ref()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
@@ -19,4 +19,6 @@ use crate::hash::TypedHash;
|
||||
pub struct EncodingCommitment {
|
||||
/// Merkle root of the encoding commitments.
|
||||
pub root: TypedHash,
|
||||
/// Seed used to generate the encodings.
|
||||
pub secret: EncoderSecret,
|
||||
}
|
||||
|
||||
@@ -8,8 +8,8 @@ use crate::{
|
||||
merkle::{MerkleError, MerkleProof},
|
||||
transcript::{
|
||||
commit::MAX_TOTAL_COMMITTED_DATA,
|
||||
encoding::{new_encoder, Encoder, EncoderSecret, EncodingCommitment},
|
||||
Direction,
|
||||
encoding::{new_encoder, Encoder, EncodingCommitment},
|
||||
Direction, Idx,
|
||||
},
|
||||
};
|
||||
|
||||
@@ -17,7 +17,7 @@ use crate::{
|
||||
#[derive(Clone, Serialize, Deserialize)]
|
||||
pub(super) struct Opening {
|
||||
pub(super) direction: Direction,
|
||||
pub(super) idx: RangeSet<usize>,
|
||||
pub(super) idx: Idx,
|
||||
pub(super) blinder: Blinder,
|
||||
}
|
||||
|
||||
@@ -48,14 +48,13 @@ impl EncodingProof {
|
||||
pub fn verify_with_provider(
|
||||
&self,
|
||||
provider: &HashProvider,
|
||||
secret: &EncoderSecret,
|
||||
commitment: &EncodingCommitment,
|
||||
sent: &[u8],
|
||||
recv: &[u8],
|
||||
) -> Result<(RangeSet<usize>, RangeSet<usize>), EncodingProofError> {
|
||||
) -> Result<(Idx, Idx), EncodingProofError> {
|
||||
let hasher = provider.get(&commitment.root.alg)?;
|
||||
|
||||
let encoder = new_encoder(secret);
|
||||
let encoder = new_encoder(&commitment.secret);
|
||||
let Self {
|
||||
inclusion_proof,
|
||||
openings,
|
||||
@@ -90,13 +89,13 @@ impl EncodingProof {
|
||||
};
|
||||
|
||||
// Make sure the ranges are within the bounds of the transcript.
|
||||
if idx.end().unwrap_or(0) > data.len() {
|
||||
if idx.end() > data.len() {
|
||||
return Err(EncodingProofError::new(
|
||||
ErrorKind::Proof,
|
||||
format!(
|
||||
"index out of bounds of the transcript ({}): {} > {}",
|
||||
direction,
|
||||
idx.end().unwrap_or(0),
|
||||
idx.end(),
|
||||
data.len()
|
||||
),
|
||||
));
|
||||
@@ -112,7 +111,7 @@ impl EncodingProof {
|
||||
// present in the merkle tree.
|
||||
leaves.push((*id, hasher.hash(&expected_leaf)));
|
||||
|
||||
auth.union_mut(idx);
|
||||
auth.union_mut(idx.as_range_set());
|
||||
}
|
||||
|
||||
// Verify that the expected hashes are present in the merkle tree.
|
||||
@@ -122,7 +121,7 @@ impl EncodingProof {
|
||||
// data is authentic.
|
||||
inclusion_proof.verify(hasher, &commitment.root, leaves)?;
|
||||
|
||||
Ok((auth_sent, auth_recv))
|
||||
Ok((Idx(auth_sent), Idx(auth_recv)))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -233,7 +232,10 @@ mod test {
|
||||
use crate::{
|
||||
fixtures::{encoder_secret, encoder_secret_tampered_seed, encoding_provider},
|
||||
hash::Blake3,
|
||||
transcript::{encoding::EncodingTree, Transcript},
|
||||
transcript::{
|
||||
encoding::{EncoderSecret, EncodingTree},
|
||||
Idx, Transcript,
|
||||
},
|
||||
};
|
||||
|
||||
use super::*;
|
||||
@@ -244,18 +246,21 @@ mod test {
|
||||
commitment: EncodingCommitment,
|
||||
}
|
||||
|
||||
fn new_encoding_fixture() -> EncodingFixture {
|
||||
fn new_encoding_fixture(secret: EncoderSecret) -> EncodingFixture {
|
||||
let transcript = Transcript::new(POST_JSON, OK_JSON);
|
||||
|
||||
let idx_0 = (Direction::Sent, RangeSet::from(0..POST_JSON.len()));
|
||||
let idx_1 = (Direction::Received, RangeSet::from(0..OK_JSON.len()));
|
||||
let idx_0 = (Direction::Sent, Idx::new(0..POST_JSON.len()));
|
||||
let idx_1 = (Direction::Received, Idx::new(0..OK_JSON.len()));
|
||||
|
||||
let provider = encoding_provider(transcript.sent(), transcript.received());
|
||||
let tree = EncodingTree::new(&Blake3::default(), [&idx_0, &idx_1], &provider).unwrap();
|
||||
|
||||
let proof = tree.proof([&idx_0, &idx_1].into_iter()).unwrap();
|
||||
|
||||
let commitment = EncodingCommitment { root: tree.root() };
|
||||
let commitment = EncodingCommitment {
|
||||
root: tree.root(),
|
||||
secret,
|
||||
};
|
||||
|
||||
EncodingFixture {
|
||||
transcript,
|
||||
@@ -270,12 +275,11 @@ mod test {
|
||||
transcript,
|
||||
proof,
|
||||
commitment,
|
||||
} = new_encoding_fixture();
|
||||
} = new_encoding_fixture(encoder_secret_tampered_seed());
|
||||
|
||||
let err = proof
|
||||
.verify_with_provider(
|
||||
&HashProvider::default(),
|
||||
&encoder_secret_tampered_seed(),
|
||||
&commitment,
|
||||
transcript.sent(),
|
||||
transcript.received(),
|
||||
@@ -291,19 +295,13 @@ mod test {
|
||||
transcript,
|
||||
proof,
|
||||
commitment,
|
||||
} = new_encoding_fixture();
|
||||
} = new_encoding_fixture(encoder_secret());
|
||||
|
||||
let sent = &transcript.sent()[transcript.sent().len() - 1..];
|
||||
let recv = &transcript.received()[transcript.received().len() - 2..];
|
||||
|
||||
let err = proof
|
||||
.verify_with_provider(
|
||||
&HashProvider::default(),
|
||||
&encoder_secret(),
|
||||
&commitment,
|
||||
sent,
|
||||
recv,
|
||||
)
|
||||
.verify_with_provider(&HashProvider::default(), &commitment, sent, recv)
|
||||
.unwrap_err();
|
||||
|
||||
assert!(matches!(err.kind, ErrorKind::Proof));
|
||||
@@ -315,16 +313,15 @@ mod test {
|
||||
transcript,
|
||||
mut proof,
|
||||
commitment,
|
||||
} = new_encoding_fixture();
|
||||
} = new_encoding_fixture(encoder_secret());
|
||||
|
||||
let Opening { idx, .. } = proof.openings.values_mut().next().unwrap();
|
||||
|
||||
*idx = RangeSet::from([0..3, 13..15]);
|
||||
*idx = Idx::new([0..3, 13..15]);
|
||||
|
||||
let err = proof
|
||||
.verify_with_provider(
|
||||
&HashProvider::default(),
|
||||
&encoder_secret(),
|
||||
&commitment,
|
||||
transcript.sent(),
|
||||
transcript.received(),
|
||||
@@ -340,7 +337,7 @@ mod test {
|
||||
transcript,
|
||||
mut proof,
|
||||
commitment,
|
||||
} = new_encoding_fixture();
|
||||
} = new_encoding_fixture(encoder_secret());
|
||||
|
||||
let Opening { blinder, .. } = proof.openings.values_mut().next().unwrap();
|
||||
|
||||
@@ -349,7 +346,6 @@ mod test {
|
||||
let err = proof
|
||||
.verify_with_provider(
|
||||
&HashProvider::default(),
|
||||
&encoder_secret(),
|
||||
&commitment,
|
||||
transcript.sent(),
|
||||
transcript.received(),
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use bimap::BiMap;
|
||||
use rangeset::{RangeSet, UnionMut};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::{
|
||||
@@ -12,7 +11,7 @@ use crate::{
|
||||
proof::{EncodingProof, Opening},
|
||||
EncodingProvider,
|
||||
},
|
||||
Direction,
|
||||
Direction, Idx,
|
||||
},
|
||||
};
|
||||
|
||||
@@ -23,7 +22,7 @@ pub enum EncodingTreeError {
|
||||
#[error("index is out of bounds of the transcript")]
|
||||
OutOfBounds {
|
||||
/// The index.
|
||||
index: RangeSet<usize>,
|
||||
index: Idx,
|
||||
/// The transcript length.
|
||||
transcript_length: usize,
|
||||
},
|
||||
@@ -31,13 +30,13 @@ pub enum EncodingTreeError {
|
||||
#[error("encoding provider is missing an encoding for an index")]
|
||||
MissingEncoding {
|
||||
/// The index which is missing.
|
||||
index: RangeSet<usize>,
|
||||
index: Idx,
|
||||
},
|
||||
/// Index is missing from the tree.
|
||||
#[error("index is missing from the tree")]
|
||||
MissingLeaf {
|
||||
/// The index which is missing.
|
||||
index: RangeSet<usize>,
|
||||
index: Idx,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -50,11 +49,11 @@ pub struct EncodingTree {
|
||||
blinders: Vec<Blinder>,
|
||||
/// Mapping between the index of a leaf and the transcript index it
|
||||
/// corresponds to.
|
||||
idxs: BiMap<usize, (Direction, RangeSet<usize>)>,
|
||||
idxs: BiMap<usize, (Direction, Idx)>,
|
||||
/// Union of all transcript indices in the sent direction.
|
||||
sent_idx: RangeSet<usize>,
|
||||
sent_idx: Idx,
|
||||
/// Union of all transcript indices in the received direction.
|
||||
received_idx: RangeSet<usize>,
|
||||
received_idx: Idx,
|
||||
}
|
||||
|
||||
opaque_debug::implement!(EncodingTree);
|
||||
@@ -69,15 +68,15 @@ impl EncodingTree {
|
||||
/// * `provider` - The encoding provider.
|
||||
pub fn new<'idx>(
|
||||
hasher: &dyn HashAlgorithm,
|
||||
idxs: impl IntoIterator<Item = &'idx (Direction, RangeSet<usize>)>,
|
||||
idxs: impl IntoIterator<Item = &'idx (Direction, Idx)>,
|
||||
provider: &dyn EncodingProvider,
|
||||
) -> Result<Self, EncodingTreeError> {
|
||||
let mut this = Self {
|
||||
tree: MerkleTree::new(hasher.id()),
|
||||
blinders: Vec::new(),
|
||||
idxs: BiMap::new(),
|
||||
sent_idx: RangeSet::default(),
|
||||
received_idx: RangeSet::default(),
|
||||
sent_idx: Idx::empty(),
|
||||
received_idx: Idx::empty(),
|
||||
};
|
||||
|
||||
let mut leaves = Vec::new();
|
||||
@@ -139,7 +138,7 @@ impl EncodingTree {
|
||||
/// * `idxs` - The transcript indices to prove.
|
||||
pub fn proof<'idx>(
|
||||
&self,
|
||||
idxs: impl Iterator<Item = &'idx (Direction, RangeSet<usize>)>,
|
||||
idxs: impl Iterator<Item = &'idx (Direction, Idx)>,
|
||||
) -> Result<EncodingProof, EncodingTreeError> {
|
||||
let mut openings = HashMap::new();
|
||||
for dir_idx in idxs {
|
||||
@@ -172,11 +171,11 @@ impl EncodingTree {
|
||||
}
|
||||
|
||||
/// Returns whether the tree contains the given transcript index.
|
||||
pub fn contains(&self, idx: &(Direction, RangeSet<usize>)) -> bool {
|
||||
pub fn contains(&self, idx: &(Direction, Idx)) -> bool {
|
||||
self.idxs.contains_right(idx)
|
||||
}
|
||||
|
||||
pub(crate) fn idx(&self, direction: Direction) -> &RangeSet<usize> {
|
||||
pub(crate) fn idx(&self, direction: Direction) -> &Idx {
|
||||
match direction {
|
||||
Direction::Sent => &self.sent_idx,
|
||||
Direction::Received => &self.received_idx,
|
||||
@@ -184,7 +183,7 @@ impl EncodingTree {
|
||||
}
|
||||
|
||||
/// Returns the committed transcript indices.
|
||||
pub(crate) fn transcript_indices(&self) -> impl Iterator<Item = &(Direction, RangeSet<usize>)> {
|
||||
pub(crate) fn transcript_indices(&self) -> impl Iterator<Item = &(Direction, Idx)> {
|
||||
self.idxs.right_values()
|
||||
}
|
||||
}
|
||||
@@ -201,7 +200,7 @@ mod tests {
|
||||
|
||||
fn new_tree<'seq>(
|
||||
transcript: &Transcript,
|
||||
idxs: impl Iterator<Item = &'seq (Direction, RangeSet<usize>)>,
|
||||
idxs: impl Iterator<Item = &'seq (Direction, Idx)>,
|
||||
) -> Result<EncodingTree, EncodingTreeError> {
|
||||
let provider = encoding_provider(transcript.sent(), transcript.received());
|
||||
|
||||
@@ -212,8 +211,8 @@ mod tests {
|
||||
fn test_encoding_tree() {
|
||||
let transcript = Transcript::new(POST_JSON, OK_JSON);
|
||||
|
||||
let idx_0 = (Direction::Sent, RangeSet::from(0..POST_JSON.len()));
|
||||
let idx_1 = (Direction::Received, RangeSet::from(0..OK_JSON.len()));
|
||||
let idx_0 = (Direction::Sent, Idx::new(0..POST_JSON.len()));
|
||||
let idx_1 = (Direction::Received, Idx::new(0..OK_JSON.len()));
|
||||
|
||||
let tree = new_tree(&transcript, [&idx_0, &idx_1].into_iter()).unwrap();
|
||||
|
||||
@@ -222,12 +221,14 @@ mod tests {
|
||||
|
||||
let proof = tree.proof([&idx_0, &idx_1].into_iter()).unwrap();
|
||||
|
||||
let commitment = EncodingCommitment { root: tree.root() };
|
||||
let commitment = EncodingCommitment {
|
||||
root: tree.root(),
|
||||
secret: encoder_secret(),
|
||||
};
|
||||
|
||||
let (auth_sent, auth_recv) = proof
|
||||
.verify_with_provider(
|
||||
&HashProvider::default(),
|
||||
&encoder_secret(),
|
||||
&commitment,
|
||||
transcript.sent(),
|
||||
transcript.received(),
|
||||
@@ -242,10 +243,10 @@ mod tests {
|
||||
fn test_encoding_tree_multiple_ranges() {
|
||||
let transcript = Transcript::new(POST_JSON, OK_JSON);
|
||||
|
||||
let idx_0 = (Direction::Sent, RangeSet::from(0..1));
|
||||
let idx_1 = (Direction::Sent, RangeSet::from(1..POST_JSON.len()));
|
||||
let idx_2 = (Direction::Received, RangeSet::from(0..1));
|
||||
let idx_3 = (Direction::Received, RangeSet::from(1..OK_JSON.len()));
|
||||
let idx_0 = (Direction::Sent, Idx::new(0..1));
|
||||
let idx_1 = (Direction::Sent, Idx::new(1..POST_JSON.len()));
|
||||
let idx_2 = (Direction::Received, Idx::new(0..1));
|
||||
let idx_3 = (Direction::Received, Idx::new(1..OK_JSON.len()));
|
||||
|
||||
let tree = new_tree(&transcript, [&idx_0, &idx_1, &idx_2, &idx_3].into_iter()).unwrap();
|
||||
|
||||
@@ -258,23 +259,25 @@ mod tests {
|
||||
.proof([&idx_0, &idx_1, &idx_2, &idx_3].into_iter())
|
||||
.unwrap();
|
||||
|
||||
let commitment = EncodingCommitment { root: tree.root() };
|
||||
let commitment = EncodingCommitment {
|
||||
root: tree.root(),
|
||||
secret: encoder_secret(),
|
||||
};
|
||||
|
||||
let (auth_sent, auth_recv) = proof
|
||||
.verify_with_provider(
|
||||
&HashProvider::default(),
|
||||
&encoder_secret(),
|
||||
&commitment,
|
||||
transcript.sent(),
|
||||
transcript.received(),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let mut expected_auth_sent = RangeSet::default();
|
||||
let mut expected_auth_sent = Idx::default();
|
||||
expected_auth_sent.union_mut(&idx_0.1);
|
||||
expected_auth_sent.union_mut(&idx_1.1);
|
||||
|
||||
let mut expected_auth_recv = RangeSet::default();
|
||||
let mut expected_auth_recv = Idx::default();
|
||||
expected_auth_recv.union_mut(&idx_2.1);
|
||||
expected_auth_recv.union_mut(&idx_3.1);
|
||||
|
||||
@@ -286,9 +289,9 @@ mod tests {
|
||||
fn test_encoding_tree_proof_missing_leaf() {
|
||||
let transcript = Transcript::new(POST_JSON, OK_JSON);
|
||||
|
||||
let idx_0 = (Direction::Sent, RangeSet::from(0..POST_JSON.len()));
|
||||
let idx_1 = (Direction::Received, RangeSet::from(0..4));
|
||||
let idx_2 = (Direction::Received, RangeSet::from(4..OK_JSON.len()));
|
||||
let idx_0 = (Direction::Sent, Idx::new(0..POST_JSON.len()));
|
||||
let idx_1 = (Direction::Received, Idx::new(0..4));
|
||||
let idx_2 = (Direction::Received, Idx::new(4..OK_JSON.len()));
|
||||
|
||||
let tree = new_tree(&transcript, [&idx_0, &idx_1].into_iter()).unwrap();
|
||||
|
||||
@@ -302,8 +305,8 @@ mod tests {
|
||||
fn test_encoding_tree_out_of_bounds() {
|
||||
let transcript = Transcript::new(POST_JSON, OK_JSON);
|
||||
|
||||
let idx_0 = (Direction::Sent, RangeSet::from(0..POST_JSON.len() + 1));
|
||||
let idx_1 = (Direction::Received, RangeSet::from(0..OK_JSON.len() + 1));
|
||||
let idx_0 = (Direction::Sent, Idx::new(0..POST_JSON.len() + 1));
|
||||
let idx_1 = (Direction::Received, Idx::new(0..OK_JSON.len() + 1));
|
||||
|
||||
let result = new_tree(&transcript, [&idx_0].into_iter()).unwrap_err();
|
||||
assert!(matches!(result, EncodingTreeError::MissingEncoding { .. }));
|
||||
@@ -318,7 +321,7 @@ mod tests {
|
||||
|
||||
let result = EncodingTree::new(
|
||||
&Blake3::default(),
|
||||
[(Direction::Sent, RangeSet::from(0..8))].iter(),
|
||||
[(Direction::Sent, Idx::new(0..8))].iter(),
|
||||
&provider,
|
||||
)
|
||||
.unwrap_err();
|
||||
|
||||
@@ -4,7 +4,7 @@ use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::{
|
||||
hash::{Blinder, HashAlgId, HashAlgorithm, TypedHash},
|
||||
transcript::{Direction, RangeSet},
|
||||
transcript::{Direction, Idx},
|
||||
};
|
||||
|
||||
/// Hashes plaintext with a blinder.
|
||||
@@ -23,7 +23,7 @@ pub struct PlaintextHash {
|
||||
/// Direction of the plaintext.
|
||||
pub direction: Direction,
|
||||
/// Index of plaintext.
|
||||
pub idx: RangeSet<usize>,
|
||||
pub idx: Idx,
|
||||
/// The hash of the data.
|
||||
pub hash: TypedHash,
|
||||
}
|
||||
@@ -34,7 +34,7 @@ pub struct PlaintextHashSecret {
|
||||
/// Direction of the plaintext.
|
||||
pub direction: Direction,
|
||||
/// Index of plaintext.
|
||||
pub idx: RangeSet<usize>,
|
||||
pub idx: Idx,
|
||||
/// The algorithm of the hash.
|
||||
pub alg: HashAlgId,
|
||||
/// Blinder for the hash.
|
||||
|
||||
@@ -1,18 +1,17 @@
|
||||
//! Transcript proofs.
|
||||
|
||||
use rangeset::{Cover, Difference, Subset, ToRangeSet, UnionMut};
|
||||
use rangeset::{Cover, ToRangeSet};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::{collections::HashSet, fmt};
|
||||
|
||||
use crate::{
|
||||
connection::TranscriptLength,
|
||||
display::FmtRangeSet,
|
||||
hash::{HashAlgId, HashProvider},
|
||||
transcript::{
|
||||
commit::{TranscriptCommitment, TranscriptCommitmentKind},
|
||||
encoding::{EncoderSecret, EncodingProof, EncodingProofError, EncodingTree},
|
||||
encoding::{EncodingProof, EncodingProofError, EncodingTree},
|
||||
hash::{hash_plaintext, PlaintextHash, PlaintextHashSecret},
|
||||
Direction, PartialTranscript, RangeSet, Transcript, TranscriptSecret,
|
||||
Direction, Idx, PartialTranscript, Transcript, TranscriptSecret,
|
||||
},
|
||||
};
|
||||
|
||||
@@ -22,9 +21,6 @@ const DEFAULT_COMMITMENT_KINDS: &[TranscriptCommitmentKind] = &[
|
||||
TranscriptCommitmentKind::Hash {
|
||||
alg: HashAlgId::SHA256,
|
||||
},
|
||||
TranscriptCommitmentKind::Hash {
|
||||
alg: HashAlgId::BLAKE3,
|
||||
},
|
||||
TranscriptCommitmentKind::Encoding,
|
||||
];
|
||||
|
||||
@@ -51,7 +47,6 @@ impl TranscriptProof {
|
||||
self,
|
||||
provider: &HashProvider,
|
||||
length: &TranscriptLength,
|
||||
encoder_secret: Option<&EncoderSecret>,
|
||||
commitments: impl IntoIterator<Item = &'a TranscriptCommitment>,
|
||||
) -> Result<PartialTranscript, TranscriptProofError> {
|
||||
let mut encoding_commitment = None;
|
||||
@@ -82,18 +77,11 @@ impl TranscriptProof {
|
||||
));
|
||||
}
|
||||
|
||||
let mut total_auth_sent = RangeSet::default();
|
||||
let mut total_auth_recv = RangeSet::default();
|
||||
let mut total_auth_sent = Idx::default();
|
||||
let mut total_auth_recv = Idx::default();
|
||||
|
||||
// Verify encoding proof.
|
||||
if let Some(proof) = self.encoding_proof {
|
||||
let secret = encoder_secret.ok_or_else(|| {
|
||||
TranscriptProofError::new(
|
||||
ErrorKind::Encoding,
|
||||
"contains an encoding proof but missing encoder secret",
|
||||
)
|
||||
})?;
|
||||
|
||||
let commitment = encoding_commitment.ok_or_else(|| {
|
||||
TranscriptProofError::new(
|
||||
ErrorKind::Encoding,
|
||||
@@ -103,7 +91,6 @@ impl TranscriptProof {
|
||||
|
||||
let (auth_sent, auth_recv) = proof.verify_with_provider(
|
||||
provider,
|
||||
secret,
|
||||
commitment,
|
||||
self.transcript.sent_unsafe(),
|
||||
self.transcript.received_unsafe(),
|
||||
@@ -133,7 +120,7 @@ impl TranscriptProof {
|
||||
Direction::Received => (self.transcript.received_unsafe(), &mut total_auth_recv),
|
||||
};
|
||||
|
||||
if idx.end().unwrap_or(0) > plaintext.len() {
|
||||
if idx.end() > plaintext.len() {
|
||||
return Err(TranscriptProofError::new(
|
||||
ErrorKind::Hash,
|
||||
"hash opening index is out of bounds",
|
||||
@@ -228,15 +215,15 @@ impl From<EncodingProofError> for TranscriptProofError {
|
||||
/// Union of ranges to reveal.
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
struct QueryIdx {
|
||||
sent: RangeSet<usize>,
|
||||
recv: RangeSet<usize>,
|
||||
sent: Idx,
|
||||
recv: Idx,
|
||||
}
|
||||
|
||||
impl QueryIdx {
|
||||
fn new() -> Self {
|
||||
Self {
|
||||
sent: RangeSet::default(),
|
||||
recv: RangeSet::default(),
|
||||
sent: Idx::empty(),
|
||||
recv: Idx::empty(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -244,7 +231,7 @@ impl QueryIdx {
|
||||
self.sent.is_empty() && self.recv.is_empty()
|
||||
}
|
||||
|
||||
fn union(&mut self, direction: &Direction, other: &RangeSet<usize>) {
|
||||
fn union(&mut self, direction: &Direction, other: &Idx) {
|
||||
match direction {
|
||||
Direction::Sent => self.sent.union_mut(other),
|
||||
Direction::Received => self.recv.union_mut(other),
|
||||
@@ -254,12 +241,7 @@ impl QueryIdx {
|
||||
|
||||
impl std::fmt::Display for QueryIdx {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"sent: {}, received: {}",
|
||||
FmtRangeSet(&self.sent),
|
||||
FmtRangeSet(&self.recv)
|
||||
)
|
||||
write!(f, "sent: {}, received: {}", self.sent, self.recv)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -271,8 +253,8 @@ pub struct TranscriptProofBuilder<'a> {
|
||||
transcript: &'a Transcript,
|
||||
encoding_tree: Option<&'a EncodingTree>,
|
||||
hash_secrets: Vec<&'a PlaintextHashSecret>,
|
||||
committed_sent: RangeSet<usize>,
|
||||
committed_recv: RangeSet<usize>,
|
||||
committed_sent: Idx,
|
||||
committed_recv: Idx,
|
||||
query_idx: QueryIdx,
|
||||
}
|
||||
|
||||
@@ -282,8 +264,8 @@ impl<'a> TranscriptProofBuilder<'a> {
|
||||
transcript: &'a Transcript,
|
||||
secrets: impl IntoIterator<Item = &'a TranscriptSecret>,
|
||||
) -> Self {
|
||||
let mut committed_sent = RangeSet::default();
|
||||
let mut committed_recv = RangeSet::default();
|
||||
let mut committed_sent = Idx::empty();
|
||||
let mut committed_recv = Idx::empty();
|
||||
|
||||
let mut encoding_tree = None;
|
||||
let mut hash_secrets = Vec::new();
|
||||
@@ -341,15 +323,15 @@ impl<'a> TranscriptProofBuilder<'a> {
|
||||
ranges: &dyn ToRangeSet<usize>,
|
||||
direction: Direction,
|
||||
) -> Result<&mut Self, TranscriptProofBuilderError> {
|
||||
let idx = ranges.to_range_set();
|
||||
let idx = Idx::new(ranges.to_range_set());
|
||||
|
||||
if idx.end().unwrap_or(0) > self.transcript.len_of_direction(direction) {
|
||||
if idx.end() > self.transcript.len_of_direction(direction) {
|
||||
return Err(TranscriptProofBuilderError::new(
|
||||
BuilderErrorKind::Index,
|
||||
format!(
|
||||
"range is out of bounds of the transcript ({}): {} > {}",
|
||||
direction,
|
||||
idx.end().unwrap_or(0),
|
||||
idx.end(),
|
||||
self.transcript.len_of_direction(direction)
|
||||
),
|
||||
));
|
||||
@@ -366,10 +348,7 @@ impl<'a> TranscriptProofBuilder<'a> {
|
||||
let missing = idx.difference(committed);
|
||||
return Err(TranscriptProofBuilderError::new(
|
||||
BuilderErrorKind::MissingCommitment,
|
||||
format!(
|
||||
"commitment is missing for ranges in {direction} transcript: {}",
|
||||
FmtRangeSet(&missing)
|
||||
),
|
||||
format!("commitment is missing for ranges in {direction} transcript: {missing}"),
|
||||
));
|
||||
}
|
||||
Ok(self)
|
||||
@@ -424,23 +403,25 @@ impl<'a> TranscriptProofBuilder<'a> {
|
||||
continue;
|
||||
};
|
||||
|
||||
let (sent_dir_idxs, sent_uncovered) = uncovered_query_idx.sent.cover_by(
|
||||
encoding_tree
|
||||
.transcript_indices()
|
||||
.filter(|(dir, _)| *dir == Direction::Sent),
|
||||
|(_, idx)| idx,
|
||||
);
|
||||
let (sent_dir_idxs, sent_uncovered) =
|
||||
uncovered_query_idx.sent.as_range_set().cover_by(
|
||||
encoding_tree
|
||||
.transcript_indices()
|
||||
.filter(|(dir, _)| *dir == Direction::Sent),
|
||||
|(_, idx)| &idx.0,
|
||||
);
|
||||
// Uncovered ranges will be checked with ranges of the next
|
||||
// preferred commitment kind.
|
||||
uncovered_query_idx.sent = sent_uncovered;
|
||||
uncovered_query_idx.sent = Idx(sent_uncovered);
|
||||
|
||||
let (recv_dir_idxs, recv_uncovered) = uncovered_query_idx.recv.cover_by(
|
||||
encoding_tree
|
||||
.transcript_indices()
|
||||
.filter(|(dir, _)| *dir == Direction::Received),
|
||||
|(_, idx)| idx,
|
||||
);
|
||||
uncovered_query_idx.recv = recv_uncovered;
|
||||
let (recv_dir_idxs, recv_uncovered) =
|
||||
uncovered_query_idx.recv.as_range_set().cover_by(
|
||||
encoding_tree
|
||||
.transcript_indices()
|
||||
.filter(|(dir, _)| *dir == Direction::Received),
|
||||
|(_, idx)| &idx.0,
|
||||
);
|
||||
uncovered_query_idx.recv = Idx(recv_uncovered);
|
||||
|
||||
let dir_idxs = sent_dir_idxs
|
||||
.into_iter()
|
||||
@@ -458,23 +439,25 @@ impl<'a> TranscriptProofBuilder<'a> {
|
||||
}
|
||||
}
|
||||
TranscriptCommitmentKind::Hash { alg } => {
|
||||
let (sent_hashes, sent_uncovered) = uncovered_query_idx.sent.cover_by(
|
||||
self.hash_secrets.iter().filter(|hash| {
|
||||
hash.direction == Direction::Sent && &hash.alg == alg
|
||||
}),
|
||||
|hash| &hash.idx,
|
||||
);
|
||||
let (sent_hashes, sent_uncovered) =
|
||||
uncovered_query_idx.sent.as_range_set().cover_by(
|
||||
self.hash_secrets.iter().filter(|hash| {
|
||||
hash.direction == Direction::Sent && &hash.alg == alg
|
||||
}),
|
||||
|hash| &hash.idx.0,
|
||||
);
|
||||
// Uncovered ranges will be checked with ranges of the next
|
||||
// preferred commitment kind.
|
||||
uncovered_query_idx.sent = sent_uncovered;
|
||||
uncovered_query_idx.sent = Idx(sent_uncovered);
|
||||
|
||||
let (recv_hashes, recv_uncovered) = uncovered_query_idx.recv.cover_by(
|
||||
self.hash_secrets.iter().filter(|hash| {
|
||||
hash.direction == Direction::Received && &hash.alg == alg
|
||||
}),
|
||||
|hash| &hash.idx,
|
||||
);
|
||||
uncovered_query_idx.recv = recv_uncovered;
|
||||
let (recv_hashes, recv_uncovered) =
|
||||
uncovered_query_idx.recv.as_range_set().cover_by(
|
||||
self.hash_secrets.iter().filter(|hash| {
|
||||
hash.direction == Direction::Received && &hash.alg == alg
|
||||
}),
|
||||
|hash| &hash.idx.0,
|
||||
);
|
||||
uncovered_query_idx.recv = Idx(recv_uncovered);
|
||||
|
||||
transcript_proof.hash_secrets.extend(
|
||||
sent_hashes
|
||||
@@ -584,7 +567,7 @@ mod tests {
|
||||
use tlsn_data_fixtures::http::{request::GET_WITH_HEADER, response::OK_JSON};
|
||||
|
||||
use crate::{
|
||||
fixtures::{encoder_secret, encoding_provider},
|
||||
fixtures::encoding_provider,
|
||||
hash::{Blake3, Blinder, HashAlgId},
|
||||
transcript::TranscriptCommitConfigBuilder,
|
||||
};
|
||||
@@ -594,7 +577,7 @@ mod tests {
|
||||
#[rstest]
|
||||
fn test_verify_missing_encoding_commitment_root() {
|
||||
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
|
||||
let idxs = vec![(Direction::Received, RangeSet::from(0..transcript.len().1))];
|
||||
let idxs = vec![(Direction::Received, Idx::new(0..transcript.len().1))];
|
||||
let encoding_tree = EncodingTree::new(
|
||||
&Blake3::default(),
|
||||
&idxs,
|
||||
@@ -611,12 +594,7 @@ mod tests {
|
||||
|
||||
let provider = HashProvider::default();
|
||||
let err = transcript_proof
|
||||
.verify_with_provider(
|
||||
&provider,
|
||||
&transcript.length(),
|
||||
Some(&encoder_secret()),
|
||||
&[],
|
||||
)
|
||||
.verify_with_provider(&provider, &transcript.length(), &[])
|
||||
.err()
|
||||
.unwrap();
|
||||
|
||||
@@ -654,16 +632,15 @@ mod tests {
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[case::sha256(HashAlgId::SHA256)]
|
||||
#[case::blake3(HashAlgId::BLAKE3)]
|
||||
fn test_reveal_with_hash_commitment(#[case] alg: HashAlgId) {
|
||||
fn test_reveal_with_hash_commitment() {
|
||||
let mut rng = rand::rngs::StdRng::seed_from_u64(0);
|
||||
let provider = HashProvider::default();
|
||||
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
|
||||
|
||||
let direction = Direction::Sent;
|
||||
let idx = RangeSet::from(0..10);
|
||||
let idx = Idx::new(0..10);
|
||||
let blinder: Blinder = rng.random();
|
||||
let alg = HashAlgId::SHA256;
|
||||
let hasher = provider.get(&alg).unwrap();
|
||||
|
||||
let commitment = PlaintextHash {
|
||||
@@ -690,7 +667,6 @@ mod tests {
|
||||
.verify_with_provider(
|
||||
&provider,
|
||||
&transcript.length(),
|
||||
None,
|
||||
&[TranscriptCommitment::Hash(commitment)],
|
||||
)
|
||||
.unwrap();
|
||||
@@ -702,16 +678,15 @@ mod tests {
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[case::sha256(HashAlgId::SHA256)]
|
||||
#[case::blake3(HashAlgId::BLAKE3)]
|
||||
fn test_reveal_with_inconsistent_hash_commitment(#[case] alg: HashAlgId) {
|
||||
fn test_reveal_with_inconsistent_hash_commitment() {
|
||||
let mut rng = rand::rngs::StdRng::seed_from_u64(0);
|
||||
let provider = HashProvider::default();
|
||||
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
|
||||
|
||||
let direction = Direction::Sent;
|
||||
let idx = RangeSet::from(0..10);
|
||||
let idx = Idx::new(0..10);
|
||||
let blinder: Blinder = rng.random();
|
||||
let alg = HashAlgId::SHA256;
|
||||
let hasher = provider.get(&alg).unwrap();
|
||||
|
||||
let commitment = PlaintextHash {
|
||||
@@ -739,7 +714,6 @@ mod tests {
|
||||
.verify_with_provider(
|
||||
&provider,
|
||||
&transcript.length(),
|
||||
None,
|
||||
&[TranscriptCommitment::Hash(commitment)],
|
||||
)
|
||||
.unwrap_err();
|
||||
@@ -920,10 +894,10 @@ mod tests {
|
||||
match kind {
|
||||
BuilderErrorKind::Cover { uncovered, .. } => {
|
||||
if !uncovered_sent_rangeset.is_empty() {
|
||||
assert_eq!(uncovered.sent, uncovered_sent_rangeset);
|
||||
assert_eq!(uncovered.sent, Idx(uncovered_sent_rangeset));
|
||||
}
|
||||
if !uncovered_recv_rangeset.is_empty() {
|
||||
assert_eq!(uncovered.recv, uncovered_recv_rangeset);
|
||||
assert_eq!(uncovered.recv, Idx(uncovered_recv_rangeset));
|
||||
}
|
||||
}
|
||||
_ => panic!("unexpected error kind: {kind:?}"),
|
||||
|
||||
@@ -7,6 +7,7 @@ use crate::{
|
||||
transcript::{Direction, Transcript},
|
||||
webpki::CertificateDer,
|
||||
};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tls_core::msgs::{
|
||||
alert::AlertMessagePayload,
|
||||
codec::{Codec, Reader},
|
||||
@@ -15,7 +16,7 @@ use tls_core::msgs::{
|
||||
};
|
||||
|
||||
/// A transcript of TLS records sent and received by the prover.
|
||||
#[derive(Debug, Clone)]
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct TlsTranscript {
|
||||
time: u64,
|
||||
version: TlsVersion,
|
||||
@@ -291,7 +292,7 @@ impl TlsTranscript {
|
||||
}
|
||||
|
||||
/// A TLS record.
|
||||
#[derive(Clone)]
|
||||
#[derive(Clone, Serialize, Deserialize)]
|
||||
pub struct Record {
|
||||
/// Sequence number.
|
||||
pub seq: u64,
|
||||
|
||||
@@ -18,13 +18,13 @@ spansy = { workspace = true }
|
||||
bincode = { workspace = true }
|
||||
chrono = { workspace = true }
|
||||
clap = { version = "4.5", features = ["derive"] }
|
||||
dotenv = { version = "0.15.0" }
|
||||
futures = { workspace = true }
|
||||
http-body-util = { workspace = true }
|
||||
hex = { workspace = true }
|
||||
hyper = { workspace = true, features = ["client", "http1"] }
|
||||
hyper-util = { workspace = true, features = ["full"] }
|
||||
k256 = { workspace = true, features = ["ecdsa"] }
|
||||
serde = { workspace = true, features = ["derive"] }
|
||||
serde_json = { workspace = true }
|
||||
tokio = { workspace = true, features = [
|
||||
"rt",
|
||||
@@ -37,24 +37,7 @@ tokio = { workspace = true, features = [
|
||||
tokio-util = { workspace = true }
|
||||
tracing = { workspace = true }
|
||||
tracing-subscriber = { workspace = true }
|
||||
noir = { git = "https://github.com/zkmopro/noir-rs", tag = "v1.0.0-beta.8", features = ["barretenberg"] }
|
||||
|
||||
[[example]]
|
||||
name = "interactive"
|
||||
path = "interactive/interactive.rs"
|
||||
|
||||
[[example]]
|
||||
name = "interactive_zk"
|
||||
path = "interactive_zk/interactive_zk.rs"
|
||||
|
||||
[[example]]
|
||||
name = "attestation_prove"
|
||||
path = "attestation/prove.rs"
|
||||
|
||||
[[example]]
|
||||
name = "attestation_present"
|
||||
path = "attestation/present.rs"
|
||||
|
||||
[[example]]
|
||||
name = "attestation_verify"
|
||||
path = "attestation/verify.rs"
|
||||
|
||||
@@ -5,4 +5,4 @@ This folder contains examples demonstrating how to use the TLSNotary protocol.
|
||||
* [Interactive](./interactive/README.md): Interactive Prover and Verifier session without a trusted notary.
|
||||
* [Attestation](./attestation/README.md): Performing a simple notarization with a trusted notary.
|
||||
|
||||
Refer to <https://tlsnotary.org/docs/quick_start> for a quick start guide to using TLSNotary with these examples.
|
||||
Refer to <https://docs.tlsnotary.org/quick_start/index.html> for a quick start guide to using TLSNotary with these examples.
|
||||
@@ -1,164 +0,0 @@
|
||||
# Attestation Example
|
||||
|
||||
|
||||
This example demonstrates a **TLSNotary attestation workflow**: notarizing data from a server with a trusted third party (Notary), then creating verifiable presentations with selective disclosure of sensitive information to a Verifier.
|
||||
|
||||
## 🔍 How It Works
|
||||
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
participant P as Prover
|
||||
participant N as MPC-TLS<br/>Verifier
|
||||
participant S as Server<br/>Fixture
|
||||
participant V as Attestation<br/>Verifier
|
||||
|
||||
Note over P,S: 1. Notarization Phase
|
||||
P->>N: Establish MPC-TLS connection
|
||||
P->>S: Request (MPC-TLS)
|
||||
S->>P: Response (MPC-TLS)
|
||||
N->>P: Issue signed attestation
|
||||
|
||||
Note over P: 2. Presentation Phase
|
||||
P->>P: Create redacted presentation
|
||||
|
||||
Note over P,V: 3. Verification Phase
|
||||
P->>V: Share presentation
|
||||
V->>V: Verify attestation signature
|
||||
```
|
||||
|
||||
### The Three-Step Process
|
||||
|
||||
1. **🔐 Notarize**: Prover collaborates with Notary to create an authenticated TLS session and obtain a signed attestation
|
||||
2. **✂️ Present**: Prover creates a selective presentation, choosing which data to reveal or redact
|
||||
3. **✅ Verify**: Anyone can verify the presentation's authenticity using the Notary's public key
|
||||
|
||||
## 🚀 Quick Start
|
||||
|
||||
### Step 1: Notarize Data
|
||||
|
||||
**Start the test server** (from repository root):
|
||||
```bash
|
||||
RUST_LOG=info PORT=4000 cargo run --bin tlsn-server-fixture
|
||||
```
|
||||
|
||||
**Run the notarization** (in a new terminal):
|
||||
```bash
|
||||
RUST_LOG=info SERVER_PORT=4000 cargo run --release --example attestation_prove
|
||||
```
|
||||
|
||||
**Expected output:**
|
||||
```
|
||||
Notarization completed successfully!
|
||||
The attestation has been written to `example-json.attestation.tlsn` and the corresponding secrets to `example-json.secrets.tlsn`.
|
||||
```
|
||||
|
||||
### Step 2: Create Verifiable Presentation
|
||||
|
||||
**Generate a redacted presentation:**
|
||||
```bash
|
||||
cargo run --release --example attestation_present
|
||||
```
|
||||
|
||||
**Expected output:**
|
||||
```
|
||||
Presentation built successfully!
|
||||
The presentation has been written to `example-json.presentation.tlsn`.
|
||||
```
|
||||
|
||||
> 💡 **Tip**: You can create multiple presentations from the same attestation, each with different redactions!
|
||||
|
||||
### Step 3: Verify the Presentation
|
||||
|
||||
**Verify the presentation:**
|
||||
```bash
|
||||
cargo run --release --example attestation_verify
|
||||
```
|
||||
|
||||
**Expected output:**
|
||||
```
|
||||
Verifying presentation with {key algorithm} key: { hex encoded key }
|
||||
|
||||
**Ask yourself, do you trust this key?**
|
||||
|
||||
-------------------------------------------------------------------
|
||||
Successfully verified that the data below came from a session with test-server.io at { time }.
|
||||
Note that the data which the Prover chose not to disclose are shown as X.
|
||||
|
||||
Data sent:
|
||||
|
||||
GET /formats/json HTTP/1.1
|
||||
host: test-server.io
|
||||
accept: */*
|
||||
accept-encoding: identity
|
||||
connection: close
|
||||
user-agent: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
|
||||
|
||||
Data received:
|
||||
|
||||
HTTP/1.1 200 OK
|
||||
content-type: application/json
|
||||
content-length: 722
|
||||
connection: close
|
||||
date: Mon, 08 Sep 2025 09:18:29 GMT
|
||||
|
||||
XXXXXX1234567890XXXXXXXXXXXXXXXXXXXXXXXXJohn DoeXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX1.2XX
|
||||
```
|
||||
|
||||
## 🎯 Use Cases & Examples
|
||||
|
||||
### JSON Data (Default)
|
||||
Perfect for API responses, configuration data, or structured information:
|
||||
```bash
|
||||
# All three steps use JSON by default
|
||||
SERVER_PORT=4000 cargo run --release --example attestation_prove
|
||||
cargo run --release --example attestation_present
|
||||
cargo run --release --example attestation_verify
|
||||
```
|
||||
|
||||
### HTML Content
|
||||
Ideal for web pages, forms, or any HTML-based data:
|
||||
```bash
|
||||
# Notarize HTML content
|
||||
SERVER_PORT=4000 cargo run --release --example attestation_prove -- html
|
||||
cargo run --release --example attestation_present -- html
|
||||
cargo run --release --example attestation_verify -- html
|
||||
```
|
||||
|
||||
### Authenticated/Private Data
|
||||
For APIs requiring authentication tokens, cookies, or private access:
|
||||
```bash
|
||||
# Notarize private data with authentication
|
||||
SERVER_PORT=4000 cargo run --release --example attestation_prove -- authenticated
|
||||
cargo run --release --example attestation_present -- authenticated
|
||||
cargo run --release --example attestation_verify -- authenticated
|
||||
```
|
||||
|
||||
### Debug Mode
|
||||
|
||||
For detailed logging and troubleshooting:
|
||||
```bash
|
||||
RUST_LOG=debug,yamux=info,uid_mux=info SERVER_PORT=4000 cargo run --release --example attestation_prove
|
||||
```
|
||||
|
||||
### Generated Files
|
||||
|
||||
After running the examples, you'll find:
|
||||
- **`*.attestation.tlsn`**: The cryptographically signed attestation from the Notary
|
||||
- **`*.secrets.tlsn`**: Cryptographic secrets needed to create presentations
|
||||
- **`*.presentation.tlsn`**: The verifiable presentation with your chosen redactions
|
||||
|
||||
## 🔐 Security Considerations
|
||||
|
||||
### Trust Model
|
||||
- ✅ **Notary Key**: The presentation includes the Notary's verifying key - The verifier must trust this key
|
||||
- ✅ **Data Authenticity**: Cryptographically guaranteed that data came from the specified server
|
||||
- ✅ **Tamper Evidence**: Any modification to the presentation will fail verification
|
||||
- ⚠️ **Notary Trust**: The verifier must trust the Notary not to collude with the Prover
|
||||
|
||||
### Production Deployment
|
||||
- 🏭 **Independent Notary**: Use a trusted third-party Notary service (not a local one)
|
||||
- 🔒 **Key Management**: Implement proper Notary key distribution and verification
|
||||
- 📋 **Audit Trail**: Maintain logs of notarization and verification events
|
||||
- 🔄 **Key Rotation**: Plan for Notary key updates and migration
|
||||
|
||||
> ⚠️ **Demo Notice**: This example uses a local test server and local Notary for demonstration. In production, use trusted third-party Notary services and real server endpoints.
|
||||
@@ -1,117 +0,0 @@
|
||||
// This example demonstrates how to build a verifiable presentation from an
|
||||
// attestation and the corresponding connection secrets. See the `prove.rs`
|
||||
// example to learn how to acquire an attestation from a Notary.
|
||||
|
||||
use clap::Parser;
|
||||
use hyper::header;
|
||||
|
||||
use tlsn::attestation::{presentation::Presentation, Attestation, CryptoProvider, Secrets};
|
||||
use tlsn_examples::ExampleType;
|
||||
use tlsn_formats::http::HttpTranscript;
|
||||
|
||||
#[derive(Parser, Debug)]
|
||||
#[command(version, about, long_about = None)]
|
||||
struct Args {
|
||||
/// What data to notarize
|
||||
#[clap(default_value_t, value_enum)]
|
||||
example_type: ExampleType,
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
let args = Args::parse();
|
||||
|
||||
create_presentation(&args.example_type).await
|
||||
}
|
||||
|
||||
async fn create_presentation(example_type: &ExampleType) -> Result<(), Box<dyn std::error::Error>> {
|
||||
let attestation_path = tlsn_examples::get_file_path(example_type, "attestation");
|
||||
let secrets_path = tlsn_examples::get_file_path(example_type, "secrets");
|
||||
|
||||
// Read attestation from disk.
|
||||
let attestation: Attestation = bincode::deserialize(&std::fs::read(attestation_path)?)?;
|
||||
|
||||
// Read secrets from disk.
|
||||
let secrets: Secrets = bincode::deserialize(&std::fs::read(secrets_path)?)?;
|
||||
|
||||
// Parse the HTTP transcript.
|
||||
let transcript = HttpTranscript::parse(secrets.transcript())?;
|
||||
|
||||
// Build a transcript proof.
|
||||
let mut builder = secrets.transcript_proof_builder();
|
||||
|
||||
// Here is where we reveal all or some of the parts we committed in `prove.rs`
|
||||
// previously.
|
||||
let request = &transcript.requests[0];
|
||||
// Reveal the structure of the request without the headers or body.
|
||||
builder.reveal_sent(&request.without_data())?;
|
||||
// Reveal the request target.
|
||||
builder.reveal_sent(&request.request.target)?;
|
||||
// Reveal all request headers except the values of User-Agent and Authorization.
|
||||
for header in &request.headers {
|
||||
if !(header
|
||||
.name
|
||||
.as_str()
|
||||
.eq_ignore_ascii_case(header::USER_AGENT.as_str())
|
||||
|| header
|
||||
.name
|
||||
.as_str()
|
||||
.eq_ignore_ascii_case(header::AUTHORIZATION.as_str()))
|
||||
{
|
||||
builder.reveal_sent(header)?;
|
||||
} else {
|
||||
builder.reveal_sent(&header.without_value())?;
|
||||
}
|
||||
}
|
||||
|
||||
// Reveal only parts of the response.
|
||||
let response = &transcript.responses[0];
|
||||
// Reveal the structure of the response without the headers or body.
|
||||
builder.reveal_recv(&response.without_data())?;
|
||||
// Reveal all response headers.
|
||||
for header in &response.headers {
|
||||
builder.reveal_recv(header)?;
|
||||
}
|
||||
|
||||
let content = &response.body.as_ref().unwrap().content;
|
||||
match content {
|
||||
tlsn_formats::http::BodyContent::Json(json) => {
|
||||
// For experimentation, reveal the entire response or just a selection.
|
||||
let reveal_all = false;
|
||||
if reveal_all {
|
||||
builder.reveal_recv(response)?;
|
||||
} else {
|
||||
builder.reveal_recv(json.get("id").unwrap())?;
|
||||
builder.reveal_recv(json.get("information.name").unwrap())?;
|
||||
builder.reveal_recv(json.get("meta.version").unwrap())?;
|
||||
}
|
||||
}
|
||||
tlsn_formats::http::BodyContent::Unknown(span) => {
|
||||
builder.reveal_recv(span)?;
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
|
||||
let transcript_proof = builder.build()?;
|
||||
|
||||
// Use default crypto provider to build the presentation.
|
||||
let provider = CryptoProvider::default();
|
||||
|
||||
let mut builder = attestation.presentation_builder(&provider);
|
||||
|
||||
builder
|
||||
.identity_proof(secrets.identity_proof())
|
||||
.transcript_proof(transcript_proof);
|
||||
|
||||
let presentation: Presentation = builder.build()?;
|
||||
|
||||
let presentation_path = tlsn_examples::get_file_path(example_type, "presentation");
|
||||
|
||||
// Write the presentation to disk.
|
||||
std::fs::write(&presentation_path, bincode::serialize(&presentation)?)?;
|
||||
|
||||
println!("Presentation built successfully!");
|
||||
println!("The presentation has been written to `{presentation_path}`.");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -1,403 +0,0 @@
|
||||
// This example demonstrates how to use the Prover to acquire an attestation for
|
||||
// an HTTP request sent to a server fixture. The attestation and secrets are
|
||||
// saved to disk.
|
||||
|
||||
use std::env;
|
||||
|
||||
use clap::Parser;
|
||||
use http_body_util::Empty;
|
||||
use hyper::{body::Bytes, Request, StatusCode};
|
||||
use hyper_util::rt::TokioIo;
|
||||
use spansy::Spanned;
|
||||
use tokio::{
|
||||
io::{AsyncRead, AsyncWrite},
|
||||
sync::oneshot::{self, Receiver, Sender},
|
||||
};
|
||||
use tokio_util::compat::{FuturesAsyncReadCompatExt, TokioAsyncReadCompatExt};
|
||||
use tracing::info;
|
||||
|
||||
use tlsn::{
|
||||
attestation::{
|
||||
request::{Request as AttestationRequest, RequestConfig},
|
||||
signing::Secp256k1Signer,
|
||||
Attestation, AttestationConfig, CryptoProvider, Secrets,
|
||||
},
|
||||
config::{
|
||||
CertificateDer, PrivateKeyDer, ProtocolConfig, ProtocolConfigValidator, RootCertStore,
|
||||
},
|
||||
connection::{ConnectionInfo, HandshakeData, ServerName, TranscriptLength},
|
||||
prover::{state::Committed, ProveConfig, Prover, ProverConfig, ProverOutput, TlsConfig},
|
||||
transcript::{ContentType, TranscriptCommitConfig},
|
||||
verifier::{Verifier, VerifierConfig, VerifierOutput, VerifyConfig},
|
||||
};
|
||||
use tlsn_examples::ExampleType;
|
||||
use tlsn_formats::http::{DefaultHttpCommitter, HttpCommit, HttpTranscript};
|
||||
use tlsn_server_fixture::DEFAULT_FIXTURE_PORT;
|
||||
use tlsn_server_fixture_certs::{CA_CERT_DER, CLIENT_CERT_DER, CLIENT_KEY_DER, SERVER_DOMAIN};
|
||||
|
||||
// Setting of the application server.
|
||||
const USER_AGENT: &str = "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36";
|
||||
|
||||
#[derive(Parser, Debug)]
|
||||
#[command(version, about, long_about = None)]
|
||||
struct Args {
|
||||
/// What data to notarize.
|
||||
#[clap(default_value_t, value_enum)]
|
||||
example_type: ExampleType,
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
tracing_subscriber::fmt::init();
|
||||
|
||||
let args = Args::parse();
|
||||
let (uri, extra_headers) = match args.example_type {
|
||||
ExampleType::Json => ("/formats/json", vec![]),
|
||||
ExampleType::Html => ("/formats/html", vec![]),
|
||||
ExampleType::Authenticated => ("/protected", vec![("Authorization", "random_auth_token")]),
|
||||
};
|
||||
|
||||
let (notary_socket, prover_socket) = tokio::io::duplex(1 << 23);
|
||||
let (request_tx, request_rx) = oneshot::channel();
|
||||
let (attestation_tx, attestation_rx) = oneshot::channel();
|
||||
|
||||
tokio::spawn(async move {
|
||||
notary(notary_socket, request_rx, attestation_tx)
|
||||
.await
|
||||
.unwrap()
|
||||
});
|
||||
|
||||
prover(
|
||||
prover_socket,
|
||||
request_tx,
|
||||
attestation_rx,
|
||||
uri,
|
||||
extra_headers,
|
||||
&args.example_type,
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn prover<S: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>(
|
||||
socket: S,
|
||||
req_tx: Sender<AttestationRequest>,
|
||||
resp_rx: Receiver<Attestation>,
|
||||
uri: &str,
|
||||
extra_headers: Vec<(&str, &str)>,
|
||||
example_type: &ExampleType,
|
||||
) -> Result<(), Box<dyn std::error::Error>> {
|
||||
let server_host: String = env::var("SERVER_HOST").unwrap_or("127.0.0.1".into());
|
||||
let server_port: u16 = env::var("SERVER_PORT")
|
||||
.map(|port| port.parse().expect("port should be valid integer"))
|
||||
.unwrap_or(DEFAULT_FIXTURE_PORT);
|
||||
|
||||
// Create a root certificate store with the server-fixture's self-signed
|
||||
// certificate. This is only required for offline testing with the
|
||||
// server-fixture.
|
||||
let mut tls_config_builder = TlsConfig::builder();
|
||||
tls_config_builder
|
||||
.root_store(RootCertStore {
|
||||
roots: vec![CertificateDer(CA_CERT_DER.to_vec())],
|
||||
})
|
||||
// (Optional) Set up TLS client authentication if required by the server.
|
||||
.client_auth((
|
||||
vec![CertificateDer(CLIENT_CERT_DER.to_vec())],
|
||||
PrivateKeyDer(CLIENT_KEY_DER.to_vec()),
|
||||
));
|
||||
|
||||
let tls_config = tls_config_builder.build().unwrap();
|
||||
|
||||
// Set up protocol configuration for prover.
|
||||
let mut prover_config_builder = ProverConfig::builder();
|
||||
prover_config_builder
|
||||
.server_name(ServerName::Dns(SERVER_DOMAIN.try_into().unwrap()))
|
||||
.tls_config(tls_config)
|
||||
.protocol_config(
|
||||
ProtocolConfig::builder()
|
||||
// We must configure the amount of data we expect to exchange beforehand, which will
|
||||
// be preprocessed prior to the connection. Reducing these limits will improve
|
||||
// performance.
|
||||
.max_sent_data(tlsn_examples::MAX_SENT_DATA)
|
||||
.max_recv_data(tlsn_examples::MAX_RECV_DATA)
|
||||
.build()?,
|
||||
);
|
||||
|
||||
let prover_config = prover_config_builder.build()?;
|
||||
|
||||
// Create a new prover and perform necessary setup.
|
||||
let prover = Prover::new(prover_config).setup(socket.compat()).await?;
|
||||
|
||||
// Open a TCP connection to the server.
|
||||
let client_socket = tokio::net::TcpStream::connect((server_host, server_port)).await?;
|
||||
|
||||
// Bind the prover to the server connection.
|
||||
// The returned `mpc_tls_connection` is an MPC TLS connection to the server: all
|
||||
// data written to/read from it will be encrypted/decrypted using MPC with
|
||||
// the notary.
|
||||
let (mpc_tls_connection, prover_fut) = prover.connect(client_socket.compat()).await?;
|
||||
let mpc_tls_connection = TokioIo::new(mpc_tls_connection.compat());
|
||||
|
||||
// Spawn the prover task to be run concurrently in the background.
|
||||
let prover_task = tokio::spawn(prover_fut);
|
||||
|
||||
// Attach the hyper HTTP client to the connection.
|
||||
let (mut request_sender, connection) =
|
||||
hyper::client::conn::http1::handshake(mpc_tls_connection).await?;
|
||||
|
||||
// Spawn the HTTP task to be run concurrently in the background.
|
||||
tokio::spawn(connection);
|
||||
|
||||
// Build a simple HTTP request with common headers.
|
||||
let request_builder = Request::builder()
|
||||
.uri(uri)
|
||||
.header("Host", SERVER_DOMAIN)
|
||||
.header("Accept", "*/*")
|
||||
// Using "identity" instructs the Server not to use compression for its HTTP response.
|
||||
// TLSNotary tooling does not support compression.
|
||||
.header("Accept-Encoding", "identity")
|
||||
.header("Connection", "close")
|
||||
.header("User-Agent", USER_AGENT);
|
||||
let mut request_builder = request_builder;
|
||||
for (key, value) in extra_headers {
|
||||
request_builder = request_builder.header(key, value);
|
||||
}
|
||||
let request = request_builder.body(Empty::<Bytes>::new())?;
|
||||
|
||||
info!("Starting an MPC TLS connection with the server");
|
||||
|
||||
// Send the request to the server and wait for the response.
|
||||
let response = request_sender.send_request(request).await?;
|
||||
|
||||
info!("Got a response from the server: {}", response.status());
|
||||
|
||||
assert!(response.status() == StatusCode::OK);
|
||||
|
||||
// The prover task should be done now, so we can await it.
|
||||
let prover = prover_task.await??;
|
||||
|
||||
// Parse the HTTP transcript.
|
||||
let transcript = HttpTranscript::parse(prover.transcript())?;
|
||||
|
||||
let body_content = &transcript.responses[0].body.as_ref().unwrap().content;
|
||||
let body = String::from_utf8_lossy(body_content.span().as_bytes());
|
||||
|
||||
match body_content {
|
||||
tlsn_formats::http::BodyContent::Json(_json) => {
|
||||
let parsed = serde_json::from_str::<serde_json::Value>(&body)?;
|
||||
info!("{}", serde_json::to_string_pretty(&parsed)?);
|
||||
}
|
||||
tlsn_formats::http::BodyContent::Unknown(_span) => {
|
||||
info!("{}", &body);
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
|
||||
// Commit to the transcript.
|
||||
let mut builder = TranscriptCommitConfig::builder(prover.transcript());
|
||||
|
||||
// This commits to various parts of the transcript separately (e.g. request
|
||||
// headers, response headers, response body and more). See https://docs.tlsnotary.org//protocol/commit_strategy.html
|
||||
// for other strategies that can be used to generate commitments.
|
||||
DefaultHttpCommitter::default().commit_transcript(&mut builder, &transcript)?;
|
||||
|
||||
let transcript_commit = builder.build()?;
|
||||
|
||||
// Build an attestation request.
|
||||
let mut builder = RequestConfig::builder();
|
||||
|
||||
builder.transcript_commit(transcript_commit);
|
||||
|
||||
// Optionally, add an extension to the attestation if the notary supports it.
|
||||
// builder.extension(Extension {
|
||||
// id: b"example.name".to_vec(),
|
||||
// value: b"Bobert".to_vec(),
|
||||
// });
|
||||
|
||||
let request_config = builder.build()?;
|
||||
|
||||
let (attestation, secrets) = notarize(prover, &request_config, req_tx, resp_rx).await?;
|
||||
|
||||
// Write the attestation to disk.
|
||||
let attestation_path = tlsn_examples::get_file_path(example_type, "attestation");
|
||||
let secrets_path = tlsn_examples::get_file_path(example_type, "secrets");
|
||||
|
||||
tokio::fs::write(&attestation_path, bincode::serialize(&attestation)?).await?;
|
||||
|
||||
// Write the secrets to disk.
|
||||
tokio::fs::write(&secrets_path, bincode::serialize(&secrets)?).await?;
|
||||
|
||||
println!("Notarization completed successfully!");
|
||||
println!(
|
||||
"The attestation has been written to `{attestation_path}` and the \
|
||||
corresponding secrets to `{secrets_path}`."
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn notarize(
|
||||
mut prover: Prover<Committed>,
|
||||
config: &RequestConfig,
|
||||
request_tx: Sender<AttestationRequest>,
|
||||
attestation_rx: Receiver<Attestation>,
|
||||
) -> Result<(Attestation, Secrets), Box<dyn std::error::Error>> {
|
||||
let mut builder = ProveConfig::builder(prover.transcript());
|
||||
|
||||
if let Some(config) = config.transcript_commit() {
|
||||
builder.transcript_commit(config.clone());
|
||||
}
|
||||
|
||||
let disclosure_config = builder.build()?;
|
||||
|
||||
let ProverOutput {
|
||||
transcript_commitments,
|
||||
transcript_secrets,
|
||||
..
|
||||
} = prover.prove(&disclosure_config).await?;
|
||||
|
||||
let transcript = prover.transcript().clone();
|
||||
let tls_transcript = prover.tls_transcript().clone();
|
||||
prover.close().await?;
|
||||
|
||||
// Build an attestation request.
|
||||
let mut builder = AttestationRequest::builder(config);
|
||||
|
||||
builder
|
||||
.server_name(ServerName::Dns(SERVER_DOMAIN.try_into().unwrap()))
|
||||
.handshake_data(HandshakeData {
|
||||
certs: tls_transcript
|
||||
.server_cert_chain()
|
||||
.expect("server cert chain is present")
|
||||
.to_vec(),
|
||||
sig: tls_transcript
|
||||
.server_signature()
|
||||
.expect("server signature is present")
|
||||
.clone(),
|
||||
binding: tls_transcript.certificate_binding().clone(),
|
||||
})
|
||||
.transcript(transcript)
|
||||
.transcript_commitments(transcript_secrets, transcript_commitments);
|
||||
|
||||
let (request, secrets) = builder.build(&CryptoProvider::default())?;
|
||||
|
||||
// Send attestation request to notary.
|
||||
request_tx
|
||||
.send(request.clone())
|
||||
.map_err(|_| "notary is not receiving attestation request".to_string())?;
|
||||
|
||||
// Receive attestation from notary.
|
||||
let attestation = attestation_rx
|
||||
.await
|
||||
.map_err(|err| format!("notary did not respond with attestation: {err}"))?;
|
||||
|
||||
// Check the attestation is consistent with the Prover's view.
|
||||
request.validate(&attestation)?;
|
||||
|
||||
Ok((attestation, secrets))
|
||||
}
|
||||
|
||||
async fn notary<S: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>(
|
||||
socket: S,
|
||||
request_rx: Receiver<AttestationRequest>,
|
||||
attestation_tx: Sender<Attestation>,
|
||||
) -> Result<(), Box<dyn std::error::Error>> {
|
||||
// Set up Verifier.
|
||||
let config_validator = ProtocolConfigValidator::builder()
|
||||
.max_sent_data(tlsn_examples::MAX_SENT_DATA)
|
||||
.max_recv_data(tlsn_examples::MAX_RECV_DATA)
|
||||
.build()
|
||||
.unwrap();
|
||||
|
||||
// Create a root certificate store with the server-fixture's self-signed
|
||||
// certificate. This is only required for offline testing with the
|
||||
// server-fixture.
|
||||
let verifier_config = VerifierConfig::builder()
|
||||
.root_store(RootCertStore {
|
||||
roots: vec![CertificateDer(CA_CERT_DER.to_vec())],
|
||||
})
|
||||
.protocol_config_validator(config_validator)
|
||||
.build()
|
||||
.unwrap();
|
||||
|
||||
let mut verifier = Verifier::new(verifier_config)
|
||||
.setup(socket.compat())
|
||||
.await?
|
||||
.run()
|
||||
.await?;
|
||||
|
||||
let VerifierOutput {
|
||||
transcript_commitments,
|
||||
encoder_secret,
|
||||
..
|
||||
} = verifier.verify(&VerifyConfig::default()).await?;
|
||||
|
||||
let tls_transcript = verifier.tls_transcript().clone();
|
||||
|
||||
verifier.close().await?;
|
||||
|
||||
let sent_len = tls_transcript
|
||||
.sent()
|
||||
.iter()
|
||||
.filter_map(|record| {
|
||||
if let ContentType::ApplicationData = record.typ {
|
||||
Some(record.ciphertext.len())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.sum::<usize>();
|
||||
|
||||
let recv_len = tls_transcript
|
||||
.recv()
|
||||
.iter()
|
||||
.filter_map(|record| {
|
||||
if let ContentType::ApplicationData = record.typ {
|
||||
Some(record.ciphertext.len())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.sum::<usize>();
|
||||
|
||||
// Receive attestation request from prover.
|
||||
let request = request_rx.await?;
|
||||
|
||||
// Load a dummy signing key.
|
||||
let signing_key = k256::ecdsa::SigningKey::from_bytes(&[1u8; 32].into())?;
|
||||
let signer = Box::new(Secp256k1Signer::new(&signing_key.to_bytes())?);
|
||||
let mut provider = CryptoProvider::default();
|
||||
provider.signer.set_signer(signer);
|
||||
|
||||
// Build an attestation.
|
||||
let mut att_config_builder = AttestationConfig::builder();
|
||||
att_config_builder.supported_signature_algs(Vec::from_iter(provider.signer.supported_algs()));
|
||||
let att_config = att_config_builder.build()?;
|
||||
|
||||
let mut builder = Attestation::builder(&att_config).accept_request(request)?;
|
||||
builder
|
||||
.connection_info(ConnectionInfo {
|
||||
time: tls_transcript.time(),
|
||||
version: (*tls_transcript.version()),
|
||||
transcript_length: TranscriptLength {
|
||||
sent: sent_len as u32,
|
||||
received: recv_len as u32,
|
||||
},
|
||||
})
|
||||
.server_ephemeral_key(tls_transcript.server_ephemeral_key().clone())
|
||||
.transcript_commitments(transcript_commitments);
|
||||
|
||||
if let Some(encoder_secret) = encoder_secret {
|
||||
builder.encoder_secret(encoder_secret);
|
||||
}
|
||||
|
||||
let attestation = builder.build(&provider)?;
|
||||
|
||||
// Send attestation to prover.
|
||||
attestation_tx
|
||||
.send(attestation)
|
||||
.map_err(|_| "prover is not receiving attestation".to_string())?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -1,96 +0,0 @@
|
||||
// This example demonstrates how to verify a presentation. See `present.rs` for
|
||||
// an example of how to build a presentation from an attestation and connection
|
||||
// secrets.
|
||||
|
||||
use std::time::Duration;
|
||||
|
||||
use clap::Parser;
|
||||
|
||||
use tlsn::{
|
||||
attestation::{
|
||||
presentation::{Presentation, PresentationOutput},
|
||||
signing::VerifyingKey,
|
||||
CryptoProvider,
|
||||
},
|
||||
config::{CertificateDer, RootCertStore},
|
||||
verifier::ServerCertVerifier,
|
||||
};
|
||||
use tlsn_examples::ExampleType;
|
||||
use tlsn_server_fixture_certs::CA_CERT_DER;
|
||||
|
||||
#[derive(Parser, Debug)]
|
||||
#[command(version, about, long_about = None)]
|
||||
struct Args {
|
||||
/// What data to notarize.
|
||||
#[clap(default_value_t, value_enum)]
|
||||
example_type: ExampleType,
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
let args = Args::parse();
|
||||
|
||||
verify_presentation(&args.example_type).await
|
||||
}
|
||||
|
||||
async fn verify_presentation(example_type: &ExampleType) -> Result<(), Box<dyn std::error::Error>> {
|
||||
// Read the presentation from disk.
|
||||
let presentation_path = tlsn_examples::get_file_path(example_type, "presentation");
|
||||
|
||||
let presentation: Presentation = bincode::deserialize(&std::fs::read(presentation_path)?)?;
|
||||
|
||||
// Create a crypto provider accepting the server-fixture's self-signed
|
||||
// root certificate.
|
||||
//
|
||||
// This is only required for offline testing with the server-fixture. In
|
||||
// production, use `CryptoProvider::default()` instead.
|
||||
let root_cert_store = RootCertStore {
|
||||
roots: vec![CertificateDer(CA_CERT_DER.to_vec())],
|
||||
};
|
||||
let crypto_provider = CryptoProvider {
|
||||
cert: ServerCertVerifier::new(&root_cert_store)?,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let VerifyingKey {
|
||||
alg,
|
||||
data: key_data,
|
||||
} = presentation.verifying_key();
|
||||
|
||||
println!(
|
||||
"Verifying presentation with {alg} key: {}\n\n**Ask yourself, do you trust this key?**\n",
|
||||
hex::encode(key_data)
|
||||
);
|
||||
|
||||
// Verify the presentation.
|
||||
let PresentationOutput {
|
||||
server_name,
|
||||
connection_info,
|
||||
transcript,
|
||||
// extensions, // Optionally, verify any custom extensions from prover/notary.
|
||||
..
|
||||
} = presentation.verify(&crypto_provider).unwrap();
|
||||
|
||||
// The time at which the connection was started.
|
||||
let time = chrono::DateTime::UNIX_EPOCH + Duration::from_secs(connection_info.time);
|
||||
let server_name = server_name.unwrap();
|
||||
let mut partial_transcript = transcript.unwrap();
|
||||
// Set the unauthenticated bytes so they are distinguishable.
|
||||
partial_transcript.set_unauthed(b'X');
|
||||
|
||||
let sent = String::from_utf8_lossy(partial_transcript.sent_unsafe());
|
||||
let recv = String::from_utf8_lossy(partial_transcript.received_unsafe());
|
||||
|
||||
println!("-------------------------------------------------------------------");
|
||||
println!(
|
||||
"Successfully verified that the data below came from a session with {server_name} at {time}.",
|
||||
);
|
||||
println!("Note that the data which the Prover chose not to disclose are shown as X.\n");
|
||||
println!("Data sent:\n");
|
||||
println!("{sent}\n");
|
||||
println!("Data received:\n");
|
||||
println!("{recv}\n");
|
||||
println!("-------------------------------------------------------------------");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -10,6 +10,7 @@ use tokio::io::{AsyncRead, AsyncWrite};
|
||||
use tokio_util::compat::{FuturesAsyncReadCompatExt, TokioAsyncReadCompatExt};
|
||||
use tracing::instrument;
|
||||
|
||||
use tls_server_fixture::CA_CERT_DER;
|
||||
use tlsn::{
|
||||
config::{CertificateDer, ProtocolConfig, ProtocolConfigValidator, RootCertStore},
|
||||
connection::ServerName,
|
||||
@@ -18,7 +19,7 @@ use tlsn::{
|
||||
verifier::{Verifier, VerifierConfig, VerifierOutput, VerifyConfig},
|
||||
};
|
||||
use tlsn_server_fixture::DEFAULT_FIXTURE_PORT;
|
||||
use tlsn_server_fixture_certs::{CA_CERT_DER, SERVER_DOMAIN};
|
||||
use tlsn_server_fixture_certs::SERVER_DOMAIN;
|
||||
|
||||
const SECRET: &str = "TLSNotary's private key 🤡";
|
||||
|
||||
|
||||
5
crates/examples/interactive_zk/.gitignore
vendored
5
crates/examples/interactive_zk/.gitignore
vendored
@@ -1,5 +0,0 @@
|
||||
!noir/target/
|
||||
# Ignore everything inside noir/target
|
||||
noir/target/*
|
||||
# Except noir.json
|
||||
!noir/target/noir.json
|
||||
@@ -1,167 +0,0 @@
|
||||
# Interactive Zero-Knowledge Age Verification with TLSNotary
|
||||
|
||||
This example demonstrates **privacy-preserving age verification** using TLSNotary and zero-knowledge proofs. It allows a prover to demonstrate they are 18+ years old without revealing their actual birth date or any other personal information.
|
||||
|
||||
## 🔍 How It Works (simplified overview)
|
||||
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
participant S as Tax Server<br/>(fixture)
|
||||
participant P as Prover
|
||||
participant V as Verifier
|
||||
|
||||
P->>S: Request tax data (with auth token) (MPC-TLS)
|
||||
S->>P: Tax data including `date_of_birth` (MPC-TLS)
|
||||
P->>V: Share transcript with redactions
|
||||
P->>V: Commit to blinded hash of birth date
|
||||
P->>P: Generate ZK proof of age ≥ 18
|
||||
P->>V: Send ZK proof
|
||||
V->>V: Verify transcript & ZK proof
|
||||
V->>V: ✅ Confirm: Prover is 18+ (no birth date revealed)
|
||||
```
|
||||
|
||||
### The Process
|
||||
|
||||
1. **MPC-TLS Session**: The Prover fetches tax information containing their birth date, while the Verifier jointly verifies the TLS session to ensure the data comes from the authentic server.
|
||||
2. **Selective Disclosure**:
|
||||
* The authorization token is **redacted**: the Verifier sees the plaintext request but not the token.
|
||||
* The birth date is **committed** as a blinded hash: the Verifier cannot see the date, but the Prover is cryptographically bound to it.
|
||||
(Depending on the use case more data can be redacted or revealed)
|
||||
3. **Zero-Knowledge Proof**: The Prover generates a ZK proof that the committed birth date corresponds to an age ≥ 18.
|
||||
4. **Verification**: The Verifier checks both the TLS transcript and the ZK proof, confirming age ≥ 18 without learning the actual date of birth.
|
||||
|
||||
|
||||
### Example Data
|
||||
|
||||
The tax server returns data like this:
|
||||
```json
|
||||
{
|
||||
"tax_year": 2024,
|
||||
"taxpayer": {
|
||||
"idnr": "12345678901",
|
||||
"first_name": "Max",
|
||||
"last_name": "Mustermann",
|
||||
"date_of_birth": "1985-03-12",
|
||||
// ...
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## 🔐 Zero-Knowledge Proof Details
|
||||
|
||||
The ZK circuit proves: **"I know a birth date that hashes to the committed value AND indicates I am 18+ years old"**
|
||||
|
||||
**Public Inputs:**
|
||||
- ✅ Verification date
|
||||
- ✅ Committed blinded hash of birth date
|
||||
|
||||
**Private Inputs (Hidden):**
|
||||
- 🔒 Actual birth date plaintext
|
||||
- 🔒 Random blinder used in hash commitment
|
||||
|
||||
**What the Verifier Learns:**
|
||||
- ✅ The prover is 18+ years old
|
||||
- ✅ The birth date is authentic (from the MPC-TLS session)
|
||||
|
||||
Everything else remains private.
|
||||
|
||||
## 🏃 Run the Example
|
||||
|
||||
1. **Start the test server** (from repository root):
|
||||
```bash
|
||||
RUST_LOG=info PORT=4000 cargo run --bin tlsn-server-fixture
|
||||
```
|
||||
|
||||
2. **Run the age verification** (in a new terminal):
|
||||
```bash
|
||||
SERVER_PORT=4000 cargo run --release --example interactive_zk
|
||||
```
|
||||
|
||||
3. **For detailed logs**:
|
||||
```bash
|
||||
RUST_LOG=debug,yamux=info,uid_mux=info SERVER_PORT=4000 cargo run --release --example interactive_zk
|
||||
```
|
||||
|
||||
### Expected Output
|
||||
|
||||
```
|
||||
Successfully verified https://test-server.io:4000/elster
|
||||
Age verified in ZK: 18+ ✅
|
||||
|
||||
Verified sent data:
|
||||
GET https://test-server.io:4000/elster HTTP/1.1
|
||||
host: test-server.io
|
||||
connection: close
|
||||
authorization: 🙈🙈🙈🙈🙈🙈🙈🙈🙈🙈🙈🙈🙈🙈🙈🙈🙈🙈🙈🙈🙈🙈🙈🙈🙈🙈
|
||||
|
||||
Verified received data:
|
||||
🙈🙈🙈🙈🙈🙈🙈🙈[truncated for brevity]...🙈🙈🙈🙈🙈"tax_year":2024🙈🙈🙈🙈🙈...
|
||||
```
|
||||
|
||||
> 💡 **Note**: In this demo, both Prover and Verifier run on the same machine. In production, they would operate on separate systems.
|
||||
> 💡 **Note**: This demo assumes that the tax server serves correct data, and that only the submitter of the tax data has access to the specified page.
|
||||
|
||||
## 🛠 Development
|
||||
|
||||
### Project Structure
|
||||
|
||||
```
|
||||
interactive_zk/
|
||||
├── prover.rs # Prover implementation
|
||||
├── verifier.rs # Verifier implementation
|
||||
├── types.rs # Shared types
|
||||
└── interactive_zk.rs # Main example runner
|
||||
├── noir/ # Zero-knowledge circuit
|
||||
│ ├── src/main.n # Noir circuit code
|
||||
│ ├── target/ # Compiled circuit artifacts
|
||||
│ └── Nargo.toml # Noir project config
|
||||
│ └── Prover.toml # Example input for `nargo execute`
|
||||
│ └── generate_test_data.rs # Rust script to generate Noir test data
|
||||
└── README.md
|
||||
```
|
||||
|
||||
### Noir Circuit Commands
|
||||
|
||||
We use [Mopro's `noir_rs`](https://zkmopro.org/docs/crates/noir-rs/) for ZK proof generation. The **circuit is pre-compiled and ready to use**. You don't need to install Noir tools to run the example. But if you want to change or test the circuit in isolation, you can use the following instructions.
|
||||
|
||||
Before you proceed, we recommend to double check that your Noir tooling matches the versions used in Mopro's `noir_rs`:
|
||||
```sh
|
||||
# Install correct Noir and BB versions (important for compatibility!)
|
||||
noirup --version 1.0.0-beta.8
|
||||
bbup -v 1.0.0-nightly.20250723
|
||||
```
|
||||
|
||||
If you don't have `noirup` and `bbup` installed yet, check [Noir's Quick Start](https://noir-lang.org/docs/getting_started/quick_start).
|
||||
|
||||
To compile the circuit, go to the `noir` folder and run `nargo compile`.
|
||||
|
||||
To check and experiment with the Noir circuit, you can use these commands:
|
||||
|
||||
* Execute Circuit: Compile the circuit and run it with sample data from `Prover.toml`:
|
||||
```sh
|
||||
nargo execute
|
||||
```
|
||||
* Generate Verification Key: Create the verification key needed to verify proofs
|
||||
```sh
|
||||
bb write_vk -b ./target/noir.json -o ./target
|
||||
```
|
||||
* Generate Proof: Create a zero-knowledge proof using the circuit and witness data.
|
||||
```sh
|
||||
bb prove --bytecode_path ./target/noir.json --witness_path ./target/noir.gz -o ./target
|
||||
```
|
||||
* Verify Proof: Verify that a proof is valid using the verification key.
|
||||
```sh
|
||||
bb verify -k ./target/vk -p ./target/proof
|
||||
```
|
||||
* Run the Noir tests:
|
||||
```sh
|
||||
nargo test --show-output
|
||||
```
|
||||
To create extra tests, you can use `./generate_test_data.rs` to help with generating correct blinders and hashes.
|
||||
|
||||
## 📚 Learn More
|
||||
|
||||
- [TLSNotary Documentation](https://docs.tlsnotary.org/)
|
||||
- [Noir Language Guide](https://noir-lang.org/)
|
||||
- [Zero-Knowledge Proofs Explained](https://ethereum.org/en/zero-knowledge-proofs/)
|
||||
- [Mopro ZK Toolkit](https://zkmopro.org/)
|
||||
@@ -1,59 +0,0 @@
|
||||
mod prover;
|
||||
mod types;
|
||||
mod verifier;
|
||||
|
||||
use prover::prover;
|
||||
use std::{
|
||||
env,
|
||||
net::{IpAddr, SocketAddr},
|
||||
};
|
||||
use tlsn_server_fixture::DEFAULT_FIXTURE_PORT;
|
||||
use tlsn_server_fixture_certs::SERVER_DOMAIN;
|
||||
use verifier::verifier;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
tracing_subscriber::fmt::init();
|
||||
|
||||
let server_host: String = env::var("SERVER_HOST").unwrap_or("127.0.0.1".into());
|
||||
let server_port: u16 = env::var("SERVER_PORT")
|
||||
.map(|port| port.parse().expect("port should be valid integer"))
|
||||
.unwrap_or(DEFAULT_FIXTURE_PORT);
|
||||
|
||||
// We use SERVER_DOMAIN here to make sure it matches the domain in the test
|
||||
// server's certificate.
|
||||
let uri = format!("https://{SERVER_DOMAIN}:{server_port}/elster");
|
||||
let server_ip: IpAddr = server_host
|
||||
.parse()
|
||||
.map_err(|e| format!("Invalid IP address '{}': {}", server_host, e))?;
|
||||
let server_addr = SocketAddr::from((server_ip, server_port));
|
||||
|
||||
// Connect prover and verifier.
|
||||
let (prover_socket, verifier_socket) = tokio::io::duplex(1 << 23);
|
||||
let (prover_extra_socket, verifier_extra_socket) = tokio::io::duplex(1 << 23);
|
||||
|
||||
let (_, transcript) = tokio::try_join!(
|
||||
prover(prover_socket, prover_extra_socket, &server_addr, &uri),
|
||||
verifier(verifier_socket, verifier_extra_socket)
|
||||
)?;
|
||||
|
||||
println!("---");
|
||||
println!("Successfully verified {}", &uri);
|
||||
println!("Age verified in ZK: 18+ ✅\n");
|
||||
|
||||
println!(
|
||||
"Verified sent data:\n{}",
|
||||
bytes_to_redacted_string(transcript.sent_unsafe())
|
||||
);
|
||||
println!(
|
||||
"Verified received data:\n{}",
|
||||
bytes_to_redacted_string(transcript.received_unsafe())
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Render redacted bytes as `🙈`.
|
||||
pub fn bytes_to_redacted_string(bytes: &[u8]) -> String {
|
||||
String::from_utf8_lossy(bytes).replace('\0', "🙈")
|
||||
}
|
||||
@@ -1,8 +0,0 @@
|
||||
[package]
|
||||
name = "noir"
|
||||
type = "bin"
|
||||
authors = [""]
|
||||
|
||||
[dependencies]
|
||||
sha256 = { tag = "v0.1.5", git = "https://github.com/noir-lang/sha256" }
|
||||
date = { tag = "v0.5.4", git = "https://github.com/madztheo/noir-date.git" }
|
||||
@@ -1,8 +0,0 @@
|
||||
blinder = [108, 93, 120, 205, 15, 35, 159, 124, 243, 96, 22, 128, 16, 149, 219, 216]
|
||||
committed_hash = [186, 158, 101, 39, 49, 48, 26, 83, 242, 96, 10, 221, 121, 174, 62, 50, 136, 132, 232, 58, 25, 32, 66, 196, 99, 85, 66, 85, 255, 1, 202, 254]
|
||||
date_of_birth = "1985-03-12"
|
||||
|
||||
[proof_date]
|
||||
day = "29"
|
||||
month = "08"
|
||||
year = "2025"
|
||||
@@ -1,64 +0,0 @@
|
||||
#!/usr/bin/env -S cargo +nightly -Zscript
|
||||
---
|
||||
[package]
|
||||
name = "generate_test_data"
|
||||
version = "0.0.0"
|
||||
edition = "2021"
|
||||
publish = false
|
||||
|
||||
[dependencies]
|
||||
sha2 = "0.10"
|
||||
rand = "0.8"
|
||||
chrono = "0.4"
|
||||
---
|
||||
use chrono::Datelike;
|
||||
use chrono::Local;
|
||||
use rand::RngCore;
|
||||
use sha2::{Digest, Sha256};
|
||||
|
||||
fn main() {
|
||||
// 1. Birthdate string (fixed)
|
||||
let dob_str = "1985-03-12"; // 10 bytes long
|
||||
|
||||
let proof_date = Local::now().date_naive();
|
||||
let proof_year = proof_date.year();
|
||||
let proof_month = proof_date.month();
|
||||
let proof_day = proof_date.day();
|
||||
|
||||
// 2. Generate random 16-byte blinder
|
||||
let mut blinder = [0u8; 16];
|
||||
rand::thread_rng().fill_bytes(&mut blinder);
|
||||
|
||||
// 3. Concatenate blinder + dob string bytes
|
||||
let mut preimage = Vec::with_capacity(26);
|
||||
preimage.extend_from_slice(dob_str.as_bytes());
|
||||
preimage.extend_from_slice(&blinder);
|
||||
|
||||
// 4. Hash it
|
||||
let hash = Sha256::digest(&preimage);
|
||||
|
||||
let blinder = blinder
|
||||
.iter()
|
||||
.map(|b| b.to_string())
|
||||
.collect::<Vec<_>>()
|
||||
.join(", ");
|
||||
let committed_hash = hash
|
||||
.iter()
|
||||
.map(|b| b.to_string())
|
||||
.collect::<Vec<_>>()
|
||||
.join(", ");
|
||||
|
||||
println!(
|
||||
"
|
||||
// Private input
|
||||
let date_of_birth = \"{dob_str}\";
|
||||
let blinder = [{blinder}];
|
||||
|
||||
// Public input
|
||||
let proof_date = date::Date {{ year: {proof_year}, month: {proof_month}, day: {proof_day} }};
|
||||
let committed_hash = [{committed_hash}];
|
||||
|
||||
main(proof_date, committed_hash, date_of_birth, blinder);
|
||||
"
|
||||
);
|
||||
}
|
||||
@@ -1,82 +0,0 @@
|
||||
use dep::date::Date;
|
||||
|
||||
fn main(
|
||||
// Public inputs
|
||||
proof_date: pub date::Date, // "2025-08-29"
|
||||
committed_hash: pub [u8; 32], // Hash of (blinder || dob string)
|
||||
// Private inputs
|
||||
date_of_birth: str<10>, // "1985-03-12"
|
||||
blinder: [u8; 16], // Random 16-byte blinder
|
||||
) {
|
||||
let is_18 = check_18(date_of_birth, proof_date);
|
||||
|
||||
let correct_hash = check_hash(date_of_birth, blinder, committed_hash);
|
||||
|
||||
assert(correct_hash);
|
||||
assert(is_18);
|
||||
}
|
||||
|
||||
fn check_18(date_of_birth: str<10>, proof_date: date::Date) -> bool {
|
||||
let dob = parse_birth_date(date_of_birth);
|
||||
let is_18 = dob.add_years(18).lt(proof_date);
|
||||
println(f"Is 18? {is_18}");
|
||||
is_18
|
||||
}
|
||||
|
||||
fn check_hash(date_of_birth: str<10>, blinder: [u8; 16], committed_hash: [u8; 32]) -> bool {
|
||||
let hash_input: [u8; 26] = make_hash_input(date_of_birth, blinder);
|
||||
let computed_hash = sha256::sha256_var(hash_input, 26);
|
||||
let correct_hash = computed_hash == committed_hash;
|
||||
println(f"Correct hash? {correct_hash}");
|
||||
correct_hash
|
||||
}
|
||||
|
||||
fn make_hash_input(dob: str<10>, blinder: [u8; 16]) -> [u8; 26] {
|
||||
let mut input: [u8; 26] = [0; 26];
|
||||
for i in 0..10 {
|
||||
input[i] = dob.as_bytes()[i];
|
||||
}
|
||||
for i in 0..16 {
|
||||
input[10 + i] = blinder[i];
|
||||
}
|
||||
input
|
||||
}
|
||||
|
||||
pub fn parse_birth_date(birth_date: str<10>) -> date::Date {
|
||||
let date: [u8; 10] = birth_date.as_bytes();
|
||||
let date_str: str<8> =
|
||||
[date[0], date[1], date[2], date[3], date[5], date[6], date[8], date[9]].as_str_unchecked();
|
||||
Date::from_str_long_year(date_str)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_max_is_over_18() {
|
||||
// Private input
|
||||
let date_of_birth = "1985-03-12";
|
||||
let blinder = [120, 80, 62, 10, 76, 60, 130, 98, 147, 161, 139, 126, 27, 236, 36, 56];
|
||||
|
||||
// Public input
|
||||
let proof_date = date::Date { year: 2025, month: 9, day: 2 };
|
||||
let committed_hash = [
|
||||
229, 118, 202, 216, 213, 230, 125, 163, 48, 178, 118, 225, 84, 7, 140, 63, 173, 255, 163,
|
||||
208, 163, 3, 63, 204, 37, 120, 254, 246, 202, 116, 122, 145,
|
||||
];
|
||||
|
||||
main(proof_date, committed_hash, date_of_birth, blinder);
|
||||
}
|
||||
|
||||
#[test(should_fail)]
|
||||
fn test_under_18() {
|
||||
// Private input
|
||||
let date_of_birth = "2010-08-01";
|
||||
let blinder = [160, 23, 57, 158, 141, 195, 155, 132, 109, 242, 48, 220, 70, 217, 229, 189];
|
||||
|
||||
// Public input
|
||||
let proof_date = date::Date { year: 2025, month: 8, day: 29 };
|
||||
let committed_hash = [
|
||||
16, 132, 194, 62, 232, 90, 157, 153, 4, 231, 1, 54, 226, 3, 87, 174, 129, 177, 80, 69, 37,
|
||||
222, 209, 91, 168, 156, 9, 109, 108, 144, 168, 109,
|
||||
];
|
||||
|
||||
main(proof_date, committed_hash, date_of_birth, blinder);
|
||||
}
|
||||
File diff suppressed because one or more lines are too long
@@ -1,371 +0,0 @@
|
||||
use std::net::SocketAddr;
|
||||
|
||||
use crate::types::received_commitments;
|
||||
|
||||
use super::types::ZKProofBundle;
|
||||
|
||||
use chrono::{Datelike, Local, NaiveDate};
|
||||
use http_body_util::Empty;
|
||||
use hyper::{body::Bytes, header, Request, StatusCode, Uri};
|
||||
use hyper_util::rt::TokioIo;
|
||||
use k256::sha2::{Digest, Sha256};
|
||||
use noir::{
|
||||
barretenberg::{
|
||||
prove::prove_ultra_honk, srs::setup_srs_from_bytecode,
|
||||
verify::get_ultra_honk_verification_key,
|
||||
},
|
||||
witness::from_vec_str_to_witness_map,
|
||||
};
|
||||
use serde_json::Value;
|
||||
use spansy::{
|
||||
http::{BodyContent, Requests, Responses},
|
||||
Spanned,
|
||||
};
|
||||
use tls_server_fixture::CA_CERT_DER;
|
||||
use tlsn::{
|
||||
config::{CertificateDer, ProtocolConfig, RootCertStore},
|
||||
connection::ServerName,
|
||||
hash::HashAlgId,
|
||||
prover::{ProveConfig, ProveConfigBuilder, Prover, ProverConfig, TlsConfig},
|
||||
transcript::{
|
||||
hash::{PlaintextHash, PlaintextHashSecret},
|
||||
Direction, TranscriptCommitConfig, TranscriptCommitConfigBuilder, TranscriptCommitmentKind,
|
||||
TranscriptSecret,
|
||||
},
|
||||
};
|
||||
|
||||
use tlsn_examples::MAX_RECV_DATA;
|
||||
use tokio::io::AsyncWriteExt;
|
||||
|
||||
use tlsn_examples::MAX_SENT_DATA;
|
||||
use tokio::io::{AsyncRead, AsyncWrite};
|
||||
use tokio_util::compat::{FuturesAsyncReadCompatExt, TokioAsyncReadCompatExt};
|
||||
use tracing::instrument;
|
||||
|
||||
#[instrument(skip(verifier_socket, verifier_extra_socket))]
|
||||
pub async fn prover<T: AsyncWrite + AsyncRead + Send + Unpin + 'static>(
|
||||
verifier_socket: T,
|
||||
mut verifier_extra_socket: T,
|
||||
server_addr: &SocketAddr,
|
||||
uri: &str,
|
||||
) -> Result<(), Box<dyn std::error::Error>> {
|
||||
let uri = uri.parse::<Uri>()?;
|
||||
|
||||
if uri.scheme().map(|s| s.as_str()) != Some("https") {
|
||||
return Err("URI must use HTTPS scheme".into());
|
||||
}
|
||||
|
||||
let server_domain = uri.authority().ok_or("URI must have authority")?.host();
|
||||
|
||||
// Create a root certificate store with the server-fixture's self-signed
|
||||
// certificate. This is only required for offline testing with the
|
||||
// server-fixture.
|
||||
let mut tls_config_builder = TlsConfig::builder();
|
||||
tls_config_builder.root_store(RootCertStore {
|
||||
roots: vec![CertificateDer(CA_CERT_DER.to_vec())],
|
||||
});
|
||||
let tls_config = tls_config_builder.build()?;
|
||||
|
||||
// Set up protocol configuration for prover.
|
||||
let mut prover_config_builder = ProverConfig::builder();
|
||||
prover_config_builder
|
||||
.server_name(ServerName::Dns(server_domain.try_into()?))
|
||||
.tls_config(tls_config)
|
||||
.protocol_config(
|
||||
ProtocolConfig::builder()
|
||||
.max_sent_data(MAX_SENT_DATA)
|
||||
.max_recv_data(MAX_RECV_DATA)
|
||||
.build()?,
|
||||
);
|
||||
|
||||
let prover_config = prover_config_builder.build()?;
|
||||
|
||||
// Create prover and connect to verifier.
|
||||
//
|
||||
// Perform the setup phase with the verifier.
|
||||
let prover = Prover::new(prover_config)
|
||||
.setup(verifier_socket.compat())
|
||||
.await?;
|
||||
|
||||
// Connect to TLS Server.
|
||||
let tls_client_socket = tokio::net::TcpStream::connect(server_addr).await?;
|
||||
|
||||
// Pass server connection into the prover.
|
||||
let (mpc_tls_connection, prover_fut) = prover.connect(tls_client_socket.compat()).await?;
|
||||
|
||||
// Wrap the connection in a TokioIo compatibility layer to use it with hyper.
|
||||
let mpc_tls_connection = TokioIo::new(mpc_tls_connection.compat());
|
||||
|
||||
// Spawn the Prover to run in the background.
|
||||
let prover_task = tokio::spawn(prover_fut);
|
||||
|
||||
// MPC-TLS Handshake.
|
||||
let (mut request_sender, connection) =
|
||||
hyper::client::conn::http1::handshake(mpc_tls_connection).await?;
|
||||
|
||||
// Spawn the connection to run in the background.
|
||||
tokio::spawn(connection);
|
||||
|
||||
// MPC-TLS: Send Request and wait for Response.
|
||||
let request = Request::builder()
|
||||
.uri(uri.clone())
|
||||
.header("Host", server_domain)
|
||||
.header("Connection", "close")
|
||||
.header(header::AUTHORIZATION, "Bearer random_auth_token")
|
||||
.method("GET")
|
||||
.body(Empty::<Bytes>::new())?;
|
||||
|
||||
let response = request_sender.send_request(request).await?;
|
||||
|
||||
if response.status() != StatusCode::OK {
|
||||
return Err(format!("MPC-TLS request failed with status {}", response.status()).into());
|
||||
}
|
||||
|
||||
// Create proof for the Verifier.
|
||||
let mut prover = prover_task.await??;
|
||||
|
||||
let transcript = prover.transcript().clone();
|
||||
let mut prove_config_builder = ProveConfig::builder(&transcript);
|
||||
|
||||
// Reveal the DNS name.
|
||||
prove_config_builder.server_identity();
|
||||
|
||||
let sent: &[u8] = transcript.sent();
|
||||
let received: &[u8] = transcript.received();
|
||||
let sent_len = sent.len();
|
||||
let recv_len = received.len();
|
||||
tracing::info!("Sent length: {}, Received length: {}", sent_len, recv_len);
|
||||
|
||||
// Reveal the entire HTTP request except for the authorization bearer token
|
||||
reveal_request(sent, &mut prove_config_builder)?;
|
||||
|
||||
// Create hash commitment for the date of birth field from the response
|
||||
let mut transcript_commitment_builder = TranscriptCommitConfig::builder(&transcript);
|
||||
transcript_commitment_builder.default_kind(TranscriptCommitmentKind::Hash {
|
||||
alg: HashAlgId::SHA256,
|
||||
});
|
||||
reveal_received(
|
||||
received,
|
||||
&mut prove_config_builder,
|
||||
&mut transcript_commitment_builder,
|
||||
)?;
|
||||
|
||||
let transcripts_commitment_config = transcript_commitment_builder.build()?;
|
||||
prove_config_builder.transcript_commit(transcripts_commitment_config);
|
||||
|
||||
let prove_config = prove_config_builder.build()?;
|
||||
|
||||
// MPC-TLS prove
|
||||
let prover_output = prover.prove(&prove_config).await?;
|
||||
prover.close().await?;
|
||||
|
||||
// Prove birthdate is more than 18 years ago.
|
||||
let received_commitments = received_commitments(&prover_output.transcript_commitments);
|
||||
let received_commitment = received_commitments
|
||||
.first()
|
||||
.ok_or("No received commitments found")?; // committed hash (of date of birth string)
|
||||
let received_secrets = received_secrets(&prover_output.transcript_secrets);
|
||||
let received_secret = received_secrets
|
||||
.first()
|
||||
.ok_or("No received secrets found")?; // hash blinder
|
||||
let proof_input = prepare_zk_proof_input(received, received_commitment, received_secret)?;
|
||||
let proof_bundle = generate_zk_proof(&proof_input)?;
|
||||
|
||||
// Sent zk proof bundle to verifier
|
||||
let serialized_proof = bincode::serialize(&proof_bundle)?;
|
||||
verifier_extra_socket.write_all(&serialized_proof).await?;
|
||||
verifier_extra_socket.shutdown().await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Reveal everything from the request, except for the authorization token.
|
||||
fn reveal_request(
|
||||
request: &[u8],
|
||||
builder: &mut ProveConfigBuilder<'_>,
|
||||
) -> Result<(), Box<dyn std::error::Error>> {
|
||||
let reqs = Requests::new_from_slice(request).collect::<Result<Vec<_>, _>>()?;
|
||||
|
||||
let req = reqs.first().ok_or("No requests found")?;
|
||||
|
||||
if req.request.method.as_str() != "GET" {
|
||||
return Err(format!("Expected GET method, found {}", req.request.method.as_str()).into());
|
||||
}
|
||||
|
||||
let authorization_header = req
|
||||
.headers_with_name(header::AUTHORIZATION.as_str())
|
||||
.next()
|
||||
.ok_or("Authorization header not found")?;
|
||||
|
||||
let start_pos = authorization_header
|
||||
.span()
|
||||
.indices()
|
||||
.min()
|
||||
.ok_or("Could not find authorization header start position")?
|
||||
+ header::AUTHORIZATION.as_str().len()
|
||||
+ 2;
|
||||
let end_pos =
|
||||
start_pos + authorization_header.span().len() - header::AUTHORIZATION.as_str().len() - 2;
|
||||
|
||||
builder.reveal_sent(&(0..start_pos))?;
|
||||
builder.reveal_sent(&(end_pos..request.len()))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn reveal_received(
|
||||
received: &[u8],
|
||||
builder: &mut ProveConfigBuilder<'_>,
|
||||
transcript_commitment_builder: &mut TranscriptCommitConfigBuilder,
|
||||
) -> Result<(), Box<dyn std::error::Error>> {
|
||||
let resp = Responses::new_from_slice(received).collect::<Result<Vec<_>, _>>()?;
|
||||
|
||||
let response = resp.first().ok_or("No responses found")?;
|
||||
let body = response.body.as_ref().ok_or("Response body not found")?;
|
||||
|
||||
let BodyContent::Json(json) = &body.content else {
|
||||
return Err("Expected JSON body content".into());
|
||||
};
|
||||
|
||||
// reveal tax year
|
||||
let tax_year = json
|
||||
.get("tax_year")
|
||||
.ok_or("tax_year field not found in JSON")?;
|
||||
let start_pos = tax_year
|
||||
.span()
|
||||
.indices()
|
||||
.min()
|
||||
.ok_or("Could not find tax_year start position")?
|
||||
- 11;
|
||||
let end_pos = tax_year
|
||||
.span()
|
||||
.indices()
|
||||
.max()
|
||||
.ok_or("Could not find tax_year end position")?
|
||||
+ 1;
|
||||
builder.reveal_recv(&(start_pos..end_pos))?;
|
||||
|
||||
// commit to hash of date of birth
|
||||
let dob = json
|
||||
.get("taxpayer.date_of_birth")
|
||||
.ok_or("taxpayer.date_of_birth field not found in JSON")?;
|
||||
|
||||
transcript_commitment_builder.commit_recv(dob.span())?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// extract secret from prover output
|
||||
fn received_secrets(transcript_secrets: &[TranscriptSecret]) -> Vec<&PlaintextHashSecret> {
|
||||
transcript_secrets
|
||||
.iter()
|
||||
.filter_map(|secret| match secret {
|
||||
TranscriptSecret::Hash(hash) if hash.direction == Direction::Received => Some(hash),
|
||||
_ => None,
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct ZKProofInput {
|
||||
dob: Vec<u8>,
|
||||
proof_date: NaiveDate,
|
||||
blinder: Vec<u8>,
|
||||
committed_hash: Vec<u8>,
|
||||
}
|
||||
|
||||
// Verify that the blinded, committed hash is correct
|
||||
fn prepare_zk_proof_input(
|
||||
received: &[u8],
|
||||
received_commitment: &PlaintextHash,
|
||||
received_secret: &PlaintextHashSecret,
|
||||
) -> Result<ZKProofInput, Box<dyn std::error::Error>> {
|
||||
assert_eq!(received_commitment.direction, Direction::Received);
|
||||
assert_eq!(received_commitment.hash.alg, HashAlgId::SHA256);
|
||||
|
||||
let hash = &received_commitment.hash;
|
||||
|
||||
let dob_start = received_commitment
|
||||
.idx
|
||||
.min()
|
||||
.ok_or("No start index for DOB")?;
|
||||
let dob_end = received_commitment
|
||||
.idx
|
||||
.end()
|
||||
.ok_or("No end index for DOB")?;
|
||||
let dob = received[dob_start..dob_end].to_vec();
|
||||
let blinder = received_secret.blinder.as_bytes().to_vec();
|
||||
let committed_hash = hash.value.as_bytes().to_vec();
|
||||
let proof_date = Local::now().date_naive();
|
||||
|
||||
assert_eq!(received_secret.direction, Direction::Received);
|
||||
assert_eq!(received_secret.alg, HashAlgId::SHA256);
|
||||
|
||||
let mut hasher = Sha256::new();
|
||||
hasher.update(&dob);
|
||||
hasher.update(&blinder);
|
||||
let computed_hash = hasher.finalize();
|
||||
|
||||
if committed_hash != computed_hash.as_slice() {
|
||||
return Err("Computed hash does not match committed hash".into());
|
||||
}
|
||||
|
||||
Ok(ZKProofInput {
|
||||
dob,
|
||||
proof_date,
|
||||
committed_hash,
|
||||
blinder,
|
||||
})
|
||||
}
|
||||
|
||||
fn generate_zk_proof(
|
||||
proof_input: &ZKProofInput,
|
||||
) -> Result<ZKProofBundle, Box<dyn std::error::Error>> {
|
||||
tracing::info!("🔒 Generating ZK proof with Noir...");
|
||||
|
||||
const PROGRAM_JSON: &str = include_str!("./noir/target/noir.json");
|
||||
|
||||
// 1. Load bytecode from program.json
|
||||
let json: Value = serde_json::from_str(PROGRAM_JSON)?;
|
||||
let bytecode = json["bytecode"]
|
||||
.as_str()
|
||||
.ok_or("bytecode field not found in program.json")?;
|
||||
|
||||
let mut inputs: Vec<String> = vec![];
|
||||
inputs.push(proof_input.proof_date.day().to_string());
|
||||
inputs.push(proof_input.proof_date.month().to_string());
|
||||
inputs.push(proof_input.proof_date.year().to_string());
|
||||
inputs.extend(proof_input.committed_hash.iter().map(|b| b.to_string()));
|
||||
inputs.extend(proof_input.dob.iter().map(|b| b.to_string()));
|
||||
inputs.extend(proof_input.blinder.iter().map(|b| b.to_string()));
|
||||
|
||||
let proof_date = proof_input.proof_date.to_string();
|
||||
tracing::info!(
|
||||
"Public inputs : Proof date ({}) and committed hash ({})",
|
||||
proof_date,
|
||||
hex::encode(&proof_input.committed_hash)
|
||||
);
|
||||
tracing::info!(
|
||||
"Private inputs: Blinder ({}) and Date of Birth ({})",
|
||||
hex::encode(&proof_input.blinder),
|
||||
String::from_utf8_lossy(&proof_input.dob)
|
||||
);
|
||||
|
||||
tracing::debug!("Witness inputs {:?}", inputs);
|
||||
|
||||
let input_refs: Vec<&str> = inputs.iter().map(String::as_str).collect();
|
||||
let witness = from_vec_str_to_witness_map(input_refs)?;
|
||||
|
||||
// Setup SRS
|
||||
setup_srs_from_bytecode(bytecode, None, false)?;
|
||||
|
||||
// Verification key
|
||||
let vk = get_ultra_honk_verification_key(bytecode, false)?;
|
||||
|
||||
// Generate proof
|
||||
let proof = prove_ultra_honk(bytecode, witness.clone(), vk.clone(), false)?;
|
||||
tracing::info!("✅ Proof generated ({} bytes)", proof.len());
|
||||
|
||||
let proof_bundle = ZKProofBundle { vk, proof };
|
||||
Ok(proof_bundle)
|
||||
}
|
||||
@@ -1,21 +0,0 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tlsn::transcript::{hash::PlaintextHash, Direction, TranscriptCommitment};
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub struct ZKProofBundle {
|
||||
pub vk: Vec<u8>,
|
||||
pub proof: Vec<u8>,
|
||||
}
|
||||
|
||||
// extract commitment from prover output
|
||||
pub fn received_commitments(
|
||||
transcript_commitments: &[TranscriptCommitment],
|
||||
) -> Vec<&PlaintextHash> {
|
||||
transcript_commitments
|
||||
.iter()
|
||||
.filter_map(|commitment| match commitment {
|
||||
TranscriptCommitment::Hash(hash) if hash.direction == Direction::Received => Some(hash),
|
||||
_ => None,
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
@@ -1,184 +0,0 @@
|
||||
use crate::types::received_commitments;
|
||||
|
||||
use super::types::ZKProofBundle;
|
||||
use chrono::{Local, NaiveDate};
|
||||
use noir::barretenberg::verify::{get_ultra_honk_verification_key, verify_ultra_honk};
|
||||
use serde_json::Value;
|
||||
use tls_server_fixture::CA_CERT_DER;
|
||||
use tlsn::{
|
||||
config::{CertificateDer, ProtocolConfigValidator, RootCertStore},
|
||||
connection::ServerName,
|
||||
hash::HashAlgId,
|
||||
transcript::{Direction, PartialTranscript},
|
||||
verifier::{Verifier, VerifierConfig, VerifierOutput, VerifyConfig},
|
||||
};
|
||||
use tlsn_examples::{MAX_RECV_DATA, MAX_SENT_DATA};
|
||||
use tlsn_server_fixture_certs::SERVER_DOMAIN;
|
||||
use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite};
|
||||
use tokio_util::compat::TokioAsyncReadCompatExt;
|
||||
use tracing::instrument;
|
||||
|
||||
#[instrument(skip(socket, extra_socket))]
|
||||
pub async fn verifier<T: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>(
|
||||
socket: T,
|
||||
mut extra_socket: T,
|
||||
) -> Result<PartialTranscript, Box<dyn std::error::Error>> {
|
||||
// Set up Verifier.
|
||||
let config_validator = ProtocolConfigValidator::builder()
|
||||
.max_sent_data(MAX_SENT_DATA)
|
||||
.max_recv_data(MAX_RECV_DATA)
|
||||
.build()?;
|
||||
|
||||
// Create a root certificate store with the server-fixture's self-signed
|
||||
// certificate. This is only required for offline testing with the
|
||||
// server-fixture.
|
||||
let verifier_config = VerifierConfig::builder()
|
||||
.root_store(RootCertStore {
|
||||
roots: vec![CertificateDer(CA_CERT_DER.to_vec())],
|
||||
})
|
||||
.protocol_config_validator(config_validator)
|
||||
.build()?;
|
||||
|
||||
let verifier = Verifier::new(verifier_config);
|
||||
|
||||
// Receive authenticated data.
|
||||
let VerifierOutput {
|
||||
server_name,
|
||||
transcript,
|
||||
transcript_commitments,
|
||||
..
|
||||
} = verifier
|
||||
.verify(socket.compat(), &VerifyConfig::default())
|
||||
.await?;
|
||||
|
||||
let server_name = server_name.ok_or("Prover should have revealed server name")?;
|
||||
let transcript = transcript.ok_or("Prover should have revealed transcript data")?;
|
||||
|
||||
// Create hash commitment for the date of birth field from the response
|
||||
let sent = transcript.sent_unsafe().to_vec();
|
||||
let sent_data = String::from_utf8(sent.clone())
|
||||
.map_err(|e| format!("Verifier expected valid UTF-8 sent data: {}", e))?;
|
||||
|
||||
if !sent_data.contains(SERVER_DOMAIN) {
|
||||
return Err(format!(
|
||||
"Verification failed: Expected host {} not found in sent data",
|
||||
SERVER_DOMAIN
|
||||
)
|
||||
.into());
|
||||
}
|
||||
|
||||
// Check received data.
|
||||
let received_commitments = received_commitments(&transcript_commitments);
|
||||
let received_commitment = received_commitments
|
||||
.first()
|
||||
.ok_or("Missing received hash commitment")?;
|
||||
|
||||
assert!(received_commitment.direction == Direction::Received);
|
||||
assert!(received_commitment.hash.alg == HashAlgId::SHA256);
|
||||
|
||||
let committed_hash = &received_commitment.hash;
|
||||
|
||||
// Check Session info: server name.
|
||||
let ServerName::Dns(server_name) = server_name;
|
||||
if server_name.as_str() != SERVER_DOMAIN {
|
||||
return Err(format!(
|
||||
"Server name mismatch: expected {}, got {}",
|
||||
SERVER_DOMAIN,
|
||||
server_name.as_str()
|
||||
)
|
||||
.into());
|
||||
}
|
||||
|
||||
// Receive ZKProof information from prover
|
||||
let mut buf = Vec::new();
|
||||
extra_socket.read_to_end(&mut buf).await?;
|
||||
|
||||
if buf.is_empty() {
|
||||
return Err("No ZK proof data received from prover".into());
|
||||
}
|
||||
|
||||
let msg: ZKProofBundle = bincode::deserialize(&buf)
|
||||
.map_err(|e| format!("Failed to deserialize ZK proof bundle: {}", e))?;
|
||||
|
||||
// Verify zk proof
|
||||
const PROGRAM_JSON: &str = include_str!("./noir/target/noir.json");
|
||||
let json: Value = serde_json::from_str(PROGRAM_JSON)
|
||||
.map_err(|e| format!("Failed to parse Noir circuit: {}", e))?;
|
||||
|
||||
let bytecode = json["bytecode"]
|
||||
.as_str()
|
||||
.ok_or("Bytecode field missing in noir.json")?;
|
||||
|
||||
let vk = get_ultra_honk_verification_key(bytecode, false)
|
||||
.map_err(|e| format!("Failed to get verification key: {}", e))?;
|
||||
|
||||
if vk != msg.vk {
|
||||
return Err("Verification key mismatch between computed and provided by prover".into());
|
||||
}
|
||||
|
||||
let proof = msg.proof.clone();
|
||||
|
||||
// Validate proof has enough data.
|
||||
// The proof should start with the public inputs:
|
||||
// * We expect at least 3 * 32 bytes for the three date fields (day, month,
|
||||
// year)
|
||||
// * and 32*32 bytes for the hash
|
||||
let min_bytes = (32 + 3) * 32;
|
||||
if proof.len() < min_bytes {
|
||||
return Err(format!(
|
||||
"Proof too short: expected at least {} bytes, got {}",
|
||||
min_bytes,
|
||||
proof.len()
|
||||
)
|
||||
.into());
|
||||
}
|
||||
|
||||
// Check that the proof date is correctly included in the proof
|
||||
let proof_date_day: u32 = u32::from_be_bytes(proof[28..32].try_into()?);
|
||||
let proof_date_month: u32 = u32::from_be_bytes(proof[60..64].try_into()?);
|
||||
let proof_date_year: i32 = i32::from_be_bytes(proof[92..96].try_into()?);
|
||||
let proof_date_from_proof =
|
||||
NaiveDate::from_ymd_opt(proof_date_year, proof_date_month, proof_date_day)
|
||||
.ok_or("Invalid proof date in proof")?;
|
||||
let today = Local::now().date_naive();
|
||||
if (today - proof_date_from_proof).num_days() < 0 {
|
||||
return Err(format!(
|
||||
"The proof date can only be today or in the past: provided {}, today {}",
|
||||
proof_date_from_proof, today
|
||||
)
|
||||
.into());
|
||||
}
|
||||
|
||||
// Check that the committed hash in the proof matches the hash from the
|
||||
// commitment
|
||||
let committed_hash_in_proof: Vec<u8> = proof
|
||||
.chunks(32)
|
||||
.skip(3) // skip the first 3 chunks
|
||||
.take(32)
|
||||
.map(|chunk| *chunk.last().unwrap_or(&0))
|
||||
.collect();
|
||||
let expected_hash = committed_hash.value.as_bytes().to_vec();
|
||||
if committed_hash_in_proof != expected_hash {
|
||||
tracing::error!(
|
||||
"❌ The hash in the proof does not match the committed hash in MPC-TLS: {} != {}",
|
||||
hex::encode(&committed_hash_in_proof),
|
||||
hex::encode(&expected_hash)
|
||||
);
|
||||
return Err("Hash in proof does not match committed hash in MPC-TLS".into());
|
||||
}
|
||||
tracing::info!(
|
||||
"✅ The hash in the proof matches the committed hash in MPC-TLS ({})",
|
||||
hex::encode(&expected_hash)
|
||||
);
|
||||
|
||||
// Finally verify the proof
|
||||
let is_valid = verify_ultra_honk(msg.proof, msg.vk)
|
||||
.map_err(|e| format!("ZKProof Verification failed: {}", e))?;
|
||||
if !is_valid {
|
||||
tracing::error!("❌ Age verification ZKProof failed to verify");
|
||||
return Err("Age verification ZKProof failed to verify".into());
|
||||
}
|
||||
tracing::info!("✅ Age verification ZKProof successfully verified");
|
||||
|
||||
Ok(transcript)
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "tlsn-formats"
|
||||
version = "0.1.0-alpha.13"
|
||||
version = "0.1.0-alpha.13-pre"
|
||||
edition = "2021"
|
||||
|
||||
[lints]
|
||||
|
||||
@@ -3,19 +3,7 @@
|
||||
# Ensure the script runs in the folder that contains this script
|
||||
cd "$(dirname "$0")"
|
||||
|
||||
RUNNER_FEATURES=""
|
||||
EXECUTOR_FEATURES=""
|
||||
|
||||
if [ "$1" = "debug" ]; then
|
||||
RUNNER_FEATURES="--features debug"
|
||||
EXECUTOR_FEATURES="--no-default-features --features debug"
|
||||
fi
|
||||
|
||||
cargo build --release \
|
||||
--package tlsn-harness-runner $RUNNER_FEATURES \
|
||||
--package tlsn-harness-executor $EXECUTOR_FEATURES \
|
||||
--package tlsn-server-fixture \
|
||||
--package tlsn-harness-plot
|
||||
cargo build --release --package tlsn-harness-runner --package tlsn-harness-executor --package tlsn-server-fixture --package tlsn-harness-plot
|
||||
|
||||
mkdir -p bin
|
||||
|
||||
|
||||
@@ -7,12 +7,12 @@ docker build --pull -t tlsn-bench . -f ./crates/harness/harness.Dockerfile
|
||||
|
||||
Next run the benches with:
|
||||
```
|
||||
docker run -it --privileged -v $(pwd)/crates/harness/:/benches tlsn-bench bash -c "runner setup; runner bench"
|
||||
docker run -it --privileged -v ./crates/harness/:/benches tlsn-bench bash -c "runner setup; runner bench"
|
||||
```
|
||||
The `--privileged` parameter is required because this test bench needs permission to create networks with certain parameters
|
||||
|
||||
To run the benches in a browser run:
|
||||
```
|
||||
docker run -it --privileged -v $(pwd)/crates/harness/:/benches tlsn-bench bash -c "runner setup; runner --target browser bench"
|
||||
docker run -it --privileged -v ./crates/harness/:/benches tlsn-bench bash -c "cd /; runner setup; runner --target browser bench"
|
||||
```
|
||||
|
||||
|
||||
@@ -1,14 +1,10 @@
|
||||
[target.wasm32-unknown-unknown]
|
||||
rustflags = [
|
||||
"-Ctarget-feature=+atomics,+bulk-memory,+mutable-globals,+simd128",
|
||||
"-Clink-arg=--shared-memory",
|
||||
"-C",
|
||||
"target-feature=+atomics,+bulk-memory,+mutable-globals,+simd128",
|
||||
"-C",
|
||||
# 4GB
|
||||
"-Clink-arg=--max-memory=4294967296",
|
||||
"-Clink-arg=--import-memory",
|
||||
"-Clink-arg=--export=__wasm_init_tls",
|
||||
"-Clink-arg=--export=__tls_size",
|
||||
"-Clink-arg=--export=__tls_align",
|
||||
"-Clink-arg=--export=__tls_base",
|
||||
"link-arg=--max-memory=4294967296",
|
||||
"--cfg",
|
||||
'getrandom_backend="wasm_js"',
|
||||
]
|
||||
|
||||
@@ -4,16 +4,13 @@ version = "0.1.0"
|
||||
edition = "2024"
|
||||
publish = false
|
||||
|
||||
[features]
|
||||
# Disable tracing events as a workaround for issue 959.
|
||||
default = ["tracing/release_max_level_off"]
|
||||
# Used to debug the executor itself.
|
||||
debug = []
|
||||
|
||||
[lib]
|
||||
name = "harness_executor"
|
||||
crate-type = ["cdylib", "rlib"]
|
||||
|
||||
[package.metadata.wasm-pack.profile.custom]
|
||||
wasm-opt = ["-O3"]
|
||||
|
||||
[dependencies]
|
||||
tlsn-harness-core = { workspace = true }
|
||||
tlsn = { workspace = true }
|
||||
@@ -34,7 +31,8 @@ tokio = { workspace = true, features = ["full"] }
|
||||
tokio-util = { workspace = true, features = ["compat"] }
|
||||
|
||||
[target.'cfg(target_arch = "wasm32")'.dependencies]
|
||||
tracing = { workspace = true }
|
||||
# Disable tracing events as a workaround for issue 959.
|
||||
tracing = { workspace = true, features = ["release_max_level_off"] }
|
||||
wasm-bindgen = { workspace = true }
|
||||
tlsn-wasm = { workspace = true }
|
||||
js-sys = { workspace = true }
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
FROM rust AS builder
|
||||
WORKDIR /usr/src/tlsn
|
||||
|
||||
ARG DEBUG=0
|
||||
|
||||
RUN \
|
||||
rustup update; \
|
||||
apt update && apt install -y clang; \
|
||||
@@ -12,12 +10,7 @@ RUN \
|
||||
COPY . .
|
||||
RUN \
|
||||
cd crates/harness; \
|
||||
# Pass `--build-arg DEBUG=1` to `docker build` if you need to debug the harness.
|
||||
if [ "$DEBUG" = "1" ]; then \
|
||||
./build.sh debug; \
|
||||
else \
|
||||
./build.sh; \
|
||||
fi
|
||||
./build.sh;
|
||||
|
||||
FROM debian:latest
|
||||
|
||||
|
||||
@@ -7,10 +7,6 @@ publish = false
|
||||
[lib]
|
||||
name = "harness_runner"
|
||||
|
||||
[features]
|
||||
# Used to debug the runner itself.
|
||||
debug = []
|
||||
|
||||
[dependencies]
|
||||
tlsn-harness-core = { workspace = true }
|
||||
tlsn-server-fixture = { workspace = true }
|
||||
|
||||
@@ -1,17 +0,0 @@
|
||||
#![allow(unused_imports)]
|
||||
pub use futures::FutureExt;
|
||||
|
||||
pub use tracing::{debug, error};
|
||||
|
||||
pub use chromiumoxide::{
|
||||
Browser, Page,
|
||||
cdp::{
|
||||
browser_protocol::{
|
||||
log::{EventEntryAdded, LogEntryLevel},
|
||||
network::{EnableParams, SetCacheDisabledParams},
|
||||
page::ReloadParams,
|
||||
},
|
||||
js_protocol::runtime::EventExceptionThrown,
|
||||
},
|
||||
handler::HandlerConfig,
|
||||
};
|
||||
@@ -21,9 +21,6 @@ use harness_core::{
|
||||
|
||||
use crate::{Target, network::Namespace, rpc::Rpc};
|
||||
|
||||
#[cfg(feature = "debug")]
|
||||
use crate::debug_prelude::*;
|
||||
|
||||
pub struct Executor {
|
||||
ns: Namespace,
|
||||
config: ExecutorConfig,
|
||||
@@ -69,34 +66,20 @@ impl Executor {
|
||||
Id::One => self.config.network().rpc_1,
|
||||
};
|
||||
|
||||
let mut args = vec![
|
||||
"ip".into(),
|
||||
"netns".into(),
|
||||
"exec".into(),
|
||||
self.ns.name().into(),
|
||||
"env".into(),
|
||||
let process = duct::cmd!(
|
||||
"sudo",
|
||||
"ip",
|
||||
"netns",
|
||||
"exec",
|
||||
self.ns.name(),
|
||||
"env",
|
||||
format!("CONFIG={}", serde_json::to_string(&self.config)?),
|
||||
];
|
||||
|
||||
if cfg!(feature = "debug") {
|
||||
let level = &std::env::var("RUST_LOG").unwrap_or("debug".to_string());
|
||||
args.push("env".into());
|
||||
args.push(format!("RUST_LOG={}", level));
|
||||
};
|
||||
|
||||
args.push(executor_path.to_str().expect("valid path").into());
|
||||
|
||||
let process = duct::cmd("sudo", args);
|
||||
|
||||
let process = if !cfg!(feature = "debug") {
|
||||
process
|
||||
.stdout_capture()
|
||||
.stderr_capture()
|
||||
.unchecked()
|
||||
.start()?
|
||||
} else {
|
||||
process.unchecked().start()?
|
||||
};
|
||||
executor_path
|
||||
)
|
||||
.stdout_capture()
|
||||
.stderr_capture()
|
||||
.unchecked()
|
||||
.start()?;
|
||||
|
||||
let rpc = Rpc::new_native(rpc_addr).await?;
|
||||
|
||||
@@ -136,13 +119,10 @@ impl Executor {
|
||||
"--no-sandbox",
|
||||
format!("--user-data-dir={tmp}"),
|
||||
format!("--allowed-ips=10.250.0.1"),
|
||||
);
|
||||
|
||||
let process = if !cfg!(feature = "debug") {
|
||||
process.stderr_capture().stdout_capture().start()?
|
||||
} else {
|
||||
process.start()?
|
||||
};
|
||||
)
|
||||
.stderr_capture()
|
||||
.stdout_capture()
|
||||
.start()?;
|
||||
|
||||
const TIMEOUT: usize = 10000;
|
||||
const DELAY: usize = 100;
|
||||
@@ -191,38 +171,6 @@ impl Executor {
|
||||
.new_page(&format!("http://{wasm_addr}:{wasm_port}/index.html"))
|
||||
.await?;
|
||||
|
||||
#[cfg(feature = "debug")]
|
||||
tokio::spawn(register_listeners(page.clone()).await?);
|
||||
|
||||
#[cfg(feature = "debug")]
|
||||
async fn register_listeners(page: Page) -> Result<impl Future<Output = ()>> {
|
||||
let mut logs = page.event_listener::<EventEntryAdded>().await?.fuse();
|
||||
let mut exceptions =
|
||||
page.event_listener::<EventExceptionThrown>().await?.fuse();
|
||||
|
||||
Ok(futures::future::join(
|
||||
async move {
|
||||
while let Some(event) = logs.next().await {
|
||||
let entry = &event.entry;
|
||||
match entry.level {
|
||||
LogEntryLevel::Error => {
|
||||
error!("{:?}", entry);
|
||||
}
|
||||
_ => {
|
||||
debug!("{:?}: {}", entry.timestamp, entry.text);
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
async move {
|
||||
while let Some(event) = exceptions.next().await {
|
||||
error!("{:?}", event);
|
||||
}
|
||||
},
|
||||
)
|
||||
.map(|_| ()))
|
||||
}
|
||||
|
||||
page.execute(EnableParams::builder().build()).await?;
|
||||
page.execute(SetCacheDisabledParams {
|
||||
cache_disabled: true,
|
||||
|
||||
@@ -6,9 +6,6 @@ mod server_fixture;
|
||||
pub mod wasm_server;
|
||||
mod ws_proxy;
|
||||
|
||||
#[cfg(feature = "debug")]
|
||||
mod debug_prelude;
|
||||
|
||||
use std::time::Duration;
|
||||
|
||||
use anyhow::Result;
|
||||
@@ -27,9 +24,6 @@ use cli::{Cli, Command};
|
||||
use executor::Executor;
|
||||
use server_fixture::ServerFixture;
|
||||
|
||||
#[cfg(feature = "debug")]
|
||||
use crate::debug_prelude::*;
|
||||
|
||||
use crate::{cli::Route, network::Network, wasm_server::WasmServer, ws_proxy::WsProxy};
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, clap::ValueEnum)]
|
||||
@@ -119,9 +113,6 @@ impl Runner {
|
||||
}
|
||||
|
||||
pub async fn main() -> Result<()> {
|
||||
#[cfg(feature = "debug")]
|
||||
tracing_subscriber::fmt::init();
|
||||
|
||||
let cli = Cli::parse();
|
||||
let mut runner = Runner::new(&cli)?;
|
||||
|
||||
@@ -236,9 +227,6 @@ pub async fn main() -> Result<()> {
|
||||
// Wait for the network to stabilize
|
||||
tokio::time::sleep(Duration::from_millis(100)).await;
|
||||
|
||||
#[cfg(feature = "debug")]
|
||||
debug!("Starting bench in group {:?}", config.group);
|
||||
|
||||
let (output, _) = tokio::try_join!(
|
||||
runner.exec_p.bench(BenchCmd {
|
||||
config: config.clone(),
|
||||
|
||||
@@ -5,7 +5,7 @@ description = "TLSNotary MPC-TLS protocol"
|
||||
keywords = ["tls", "mpc", "2pc"]
|
||||
categories = ["cryptography"]
|
||||
license = "MIT OR Apache-2.0"
|
||||
version = "0.1.0-alpha.13"
|
||||
version = "0.1.0-alpha.13-pre"
|
||||
edition = "2021"
|
||||
|
||||
[lints]
|
||||
|
||||
@@ -41,7 +41,6 @@ use tls_core::{
|
||||
message::{OpaqueMessage, PlainMessage},
|
||||
},
|
||||
suites::SupportedCipherSuite,
|
||||
verify::verify_sig_determine_alg,
|
||||
};
|
||||
use tlsn_core::{
|
||||
connection::{CertBinding, CertBindingV1_2, ServerSignature, TlsVersion, VerifyData},
|
||||
@@ -328,20 +327,12 @@ impl MpcTlsLeader {
|
||||
.map(|cert| CertificateDer(cert.0.clone()))
|
||||
.collect();
|
||||
|
||||
let mut sig_msg = Vec::new();
|
||||
sig_msg.extend_from_slice(&client_random.0);
|
||||
sig_msg.extend_from_slice(&server_random.0);
|
||||
sig_msg.extend_from_slice(server_kx_details.kx_params());
|
||||
|
||||
let server_signature_alg = verify_sig_determine_alg(
|
||||
&server_cert_details.cert_chain()[0],
|
||||
&sig_msg,
|
||||
server_kx_details.kx_sig(),
|
||||
)
|
||||
.expect("only supported signature should have been accepted");
|
||||
|
||||
let server_signature = ServerSignature {
|
||||
alg: server_signature_alg.into(),
|
||||
scheme: server_kx_details
|
||||
.kx_sig()
|
||||
.scheme
|
||||
.try_into()
|
||||
.expect("only supported signature scheme should have been accepted"),
|
||||
sig: server_kx_details.kx_sig().sig.0.clone(),
|
||||
};
|
||||
|
||||
|
||||
@@ -72,5 +72,4 @@ pub(crate) struct ServerFinishedVd {
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[allow(dead_code)]
|
||||
pub(crate) struct CloseConnection;
|
||||
|
||||
@@ -193,7 +193,7 @@ where
|
||||
};
|
||||
|
||||
// Divide by block length and round up.
|
||||
let block_count = input.len() / 16 + !input.len().is_multiple_of(16) as usize;
|
||||
let block_count = input.len() / 16 + (input.len() % 16 != 0) as usize;
|
||||
|
||||
if block_count > MAX_POWER {
|
||||
return Err(ErrorRepr::InputLength {
|
||||
@@ -282,11 +282,11 @@ fn build_ghash_data(mut aad: Vec<u8>, mut ciphertext: Vec<u8>) -> Vec<u8> {
|
||||
let len_block = ((associated_data_bitlen as u128) << 64) + (text_bitlen as u128);
|
||||
|
||||
// Pad data to be a multiple of 16 bytes.
|
||||
let aad_padded_block_count = (aad.len() / 16) + !aad.len().is_multiple_of(16) as usize;
|
||||
let aad_padded_block_count = (aad.len() / 16) + (aad.len() % 16 != 0) as usize;
|
||||
aad.resize(aad_padded_block_count * 16, 0);
|
||||
|
||||
let ciphertext_padded_block_count =
|
||||
(ciphertext.len() / 16) + !ciphertext.len().is_multiple_of(16) as usize;
|
||||
(ciphertext.len() / 16) + (ciphertext.len() % 16 != 0) as usize;
|
||||
ciphertext.resize(ciphertext_padded_block_count * 16, 0);
|
||||
|
||||
let mut data: Vec<u8> = Vec::with_capacity(aad.len() + ciphertext.len() + 16);
|
||||
|
||||
16
crates/pdk/Cargo.toml
Normal file
16
crates/pdk/Cargo.toml
Normal file
@@ -0,0 +1,16 @@
|
||||
[package]
|
||||
name = "tlsn-pdk"
|
||||
version = "0.1.0"
|
||||
edition = "2024"
|
||||
|
||||
[dependencies]
|
||||
tlsn-core = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
bincode = { workspace = true }
|
||||
|
||||
wit-bindgen = "0.44"
|
||||
getrandom = { version = "0.3" }
|
||||
getrandom02 = { package = "getrandom", version = "0.2", features = ["custom"] }
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
74
crates/pdk/src/abi.rs
Normal file
74
crates/pdk/src/abi.rs
Normal file
@@ -0,0 +1,74 @@
|
||||
mod generated {
|
||||
wit_bindgen::generate!({
|
||||
world: "plugin",
|
||||
path: "../sdk-core/wit/tlsn.wit",
|
||||
pub_export_macro: true,
|
||||
});
|
||||
|
||||
impl From<std::task::Poll<Result<Vec<u8>, String>>> for PollReturn {
|
||||
#[inline]
|
||||
fn from(value: std::task::Poll<Result<Vec<u8>, String>>) -> Self {
|
||||
match value {
|
||||
std::task::Poll::Ready(ret) => PollReturn::Ready(ret),
|
||||
std::task::Poll::Pending => PollReturn::Pending,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
unsafe extern "Rust" {
|
||||
fn __tlsn_entry_trampoline(
|
||||
arg: Vec<u8>,
|
||||
) -> ::std::pin::Pin<Box<dyn ::std::future::Future<Output = Result<Vec<u8>, String>>>>;
|
||||
}
|
||||
|
||||
thread_local! {
|
||||
static MAIN: ::std::cell::RefCell<Option<::std::pin::Pin<Box<dyn ::std::future::Future<Output = Result<Vec<u8>, String>>>>>> = ::std::cell::RefCell::new(None);
|
||||
}
|
||||
|
||||
struct Plugin;
|
||||
impl Guest for Plugin {
|
||||
fn start(arg: Vec<u8>) -> () {
|
||||
MAIN.with_borrow_mut(|fut| {
|
||||
if fut.is_some() {
|
||||
panic!("main future already set");
|
||||
}
|
||||
|
||||
*fut = Some(unsafe { __tlsn_entry_trampoline(arg) });
|
||||
})
|
||||
}
|
||||
|
||||
fn poll() -> PollReturn {
|
||||
MAIN.with_borrow_mut(|fut| {
|
||||
let Some(fut) = fut.as_mut() else {
|
||||
panic!("main future not set, must call start first");
|
||||
};
|
||||
|
||||
let mut cx = ::std::task::Context::from_waker(::std::task::Waker::noop());
|
||||
fut.as_mut().poll(&mut cx).into()
|
||||
})
|
||||
}
|
||||
}
|
||||
export!(Plugin);
|
||||
}
|
||||
|
||||
pub(crate) use generated::tlsn::tlsn::*;
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! entry {
|
||||
($path:path) => {
|
||||
#[unsafe(no_mangle)]
|
||||
extern "Rust" fn __tlsn_entry_trampoline(
|
||||
arg: Vec<u8>,
|
||||
) -> ::std::pin::Pin<Box<dyn ::std::future::Future<Output = Result<Vec<u8>, String>>>> {
|
||||
#[inline(always)]
|
||||
fn assert_async<F>(f: F) -> F
|
||||
where
|
||||
F: Future<Output = Result<Vec<u8>, String>>,
|
||||
{
|
||||
f
|
||||
}
|
||||
|
||||
Box::pin(assert_async($path(arg)))
|
||||
}
|
||||
};
|
||||
}
|
||||
21
crates/pdk/src/lib.rs
Normal file
21
crates/pdk/src/lib.rs
Normal file
@@ -0,0 +1,21 @@
|
||||
mod abi;
|
||||
|
||||
pub mod prover;
|
||||
pub mod verifier;
|
||||
|
||||
pub use tlsn_core::{config, connection, webpki};
|
||||
|
||||
#[unsafe(no_mangle)]
|
||||
unsafe extern "Rust" fn __getrandom_v03_custom(
|
||||
dest: *mut u8,
|
||||
len: usize,
|
||||
) -> Result<(), getrandom::Error> {
|
||||
Err(getrandom::Error::UNSUPPORTED)
|
||||
}
|
||||
|
||||
fn always_fail(buf: &mut [u8]) -> Result<(), getrandom02::Error> {
|
||||
let code = core::num::NonZeroU32::new(1).unwrap();
|
||||
Err(getrandom02::Error::from(code))
|
||||
}
|
||||
|
||||
getrandom02::register_custom_getrandom!(always_fail);
|
||||
257
crates/pdk/src/prover.rs
Normal file
257
crates/pdk/src/prover.rs
Normal file
@@ -0,0 +1,257 @@
|
||||
use std::{
|
||||
future::poll_fn,
|
||||
io,
|
||||
pin::Pin,
|
||||
task::{Context, Poll},
|
||||
};
|
||||
|
||||
use futures::{AsyncRead, AsyncWrite};
|
||||
use tlsn_core::transcript::{TlsTranscript, Transcript};
|
||||
|
||||
use crate::abi;
|
||||
|
||||
pub use tlsn_core::{
|
||||
ProverOutput,
|
||||
config::{ProveConfig, ProverConfig},
|
||||
};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct ProverError {}
|
||||
|
||||
impl std::error::Error for ProverError {}
|
||||
|
||||
impl std::fmt::Display for ProverError {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "ProverError")
|
||||
}
|
||||
}
|
||||
|
||||
pub mod state {
|
||||
use tlsn_core::transcript::{TlsTranscript, Transcript};
|
||||
|
||||
mod sealed {
|
||||
pub trait Sealed {}
|
||||
}
|
||||
|
||||
pub trait ProverState: sealed::Sealed {}
|
||||
|
||||
pub struct Initialized {}
|
||||
pub struct Setup {}
|
||||
pub struct Committed {
|
||||
pub(super) tls_transcript: TlsTranscript,
|
||||
pub(super) transcript: Transcript,
|
||||
}
|
||||
|
||||
impl sealed::Sealed for Initialized {}
|
||||
impl sealed::Sealed for Setup {}
|
||||
impl sealed::Sealed for Committed {}
|
||||
|
||||
impl ProverState for Initialized {}
|
||||
impl ProverState for Setup {}
|
||||
impl ProverState for Committed {}
|
||||
}
|
||||
|
||||
pub struct Prover<T: state::ProverState = state::Initialized> {
|
||||
handle: abi::prove::Prover,
|
||||
state: T,
|
||||
}
|
||||
|
||||
impl Prover {
|
||||
/// Creates a new prover.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `config` - The configuration for the prover.
|
||||
pub fn new(config: ProverConfig) -> Self {
|
||||
let config = bincode::serialize(&config).unwrap();
|
||||
|
||||
let handle = abi::prove::Prover::new(&config);
|
||||
|
||||
Self {
|
||||
handle,
|
||||
state: state::Initialized {},
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn setup(self) -> Result<Prover<state::Setup>, ProverError> {
|
||||
poll_fn(|_| {
|
||||
if let abi::prove::SetupReturn::Ready = self.handle.setup() {
|
||||
Poll::Ready(())
|
||||
} else {
|
||||
Poll::Pending
|
||||
}
|
||||
})
|
||||
.await;
|
||||
|
||||
Ok(Prover {
|
||||
handle: self.handle,
|
||||
state: state::Setup {},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl Prover<state::Setup> {
|
||||
pub async fn connect(self) -> Result<(TlsConnection, ProverFuture), ProverError> {
|
||||
let io = poll_fn(|_| {
|
||||
if let abi::prove::ConnectReturn::Ready(io) = self.handle.connect() {
|
||||
Poll::Ready(io)
|
||||
} else {
|
||||
Poll::Pending
|
||||
}
|
||||
})
|
||||
.await;
|
||||
|
||||
Ok((
|
||||
TlsConnection { handle: io },
|
||||
ProverFuture {
|
||||
handle: Some(self.handle),
|
||||
},
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
impl Prover<state::Committed> {
|
||||
pub fn tls_transcript(&self) -> &TlsTranscript {
|
||||
&self.state.tls_transcript
|
||||
}
|
||||
|
||||
pub fn transcript(&self) -> &Transcript {
|
||||
&self.state.transcript
|
||||
}
|
||||
|
||||
pub async fn prove(&mut self, config: &ProveConfig) -> Result<ProverOutput, ProverError> {
|
||||
let config = bincode::serialize(&config).unwrap();
|
||||
|
||||
self.handle.prove(&config);
|
||||
|
||||
let res = poll_fn(|_| {
|
||||
if let abi::prove::ProveReturn::Ready(res) = self.handle.finish_prove() {
|
||||
Poll::Ready(res)
|
||||
} else {
|
||||
Poll::Pending
|
||||
}
|
||||
})
|
||||
.await;
|
||||
|
||||
res.map(|output| bincode::deserialize(&output).unwrap())
|
||||
.map_err(|_| todo!())
|
||||
}
|
||||
|
||||
pub async fn close(self) -> Result<(), ProverError> {
|
||||
poll_fn(|_| {
|
||||
if let abi::prove::CloseReturn::Ready = self.handle.close() {
|
||||
Poll::Ready(())
|
||||
} else {
|
||||
Poll::Pending
|
||||
}
|
||||
})
|
||||
.await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub struct ProverFuture {
|
||||
handle: Option<abi::prove::Prover>,
|
||||
}
|
||||
|
||||
impl Future for ProverFuture {
|
||||
type Output = Result<Prover<state::Committed>, ProverError>;
|
||||
|
||||
fn poll(mut self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<Self::Output> {
|
||||
let handle = self
|
||||
.handle
|
||||
.take()
|
||||
.expect("future should not be polled after completion.");
|
||||
|
||||
if let abi::prove::CommitReturn::Ready(res) = handle.finish_commit() {
|
||||
Poll::Ready(
|
||||
res.map(|data| {
|
||||
let (tls_transcript, transcript) = bincode::deserialize(&data).unwrap();
|
||||
|
||||
Prover {
|
||||
handle: handle,
|
||||
state: state::Committed {
|
||||
tls_transcript,
|
||||
transcript,
|
||||
},
|
||||
}
|
||||
})
|
||||
.map_err(|_| todo!()),
|
||||
)
|
||||
} else {
|
||||
self.handle = Some(handle);
|
||||
Poll::Pending
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct TlsConnection {
|
||||
handle: abi::io::Io,
|
||||
}
|
||||
|
||||
impl AsyncWrite for TlsConnection {
|
||||
fn poll_write(
|
||||
self: Pin<&mut Self>,
|
||||
_cx: &mut Context<'_>,
|
||||
buf: &[u8],
|
||||
) -> Poll<io::Result<usize>> {
|
||||
let n = match self.handle.check_write() {
|
||||
abi::io::CheckWriteReturn::Pending => {
|
||||
return Poll::Pending;
|
||||
}
|
||||
abi::io::CheckWriteReturn::Ready(Ok(n)) => (n as usize).min(buf.len()),
|
||||
abi::io::CheckWriteReturn::Ready(Err(e)) => {
|
||||
return Poll::Ready(Err(match e {
|
||||
abi::io::Error::Closed => {
|
||||
io::Error::new(io::ErrorKind::ConnectionAborted, "connection closed")
|
||||
}
|
||||
abi::io::Error::Other(e) => io::Error::new(io::ErrorKind::Other, e),
|
||||
}));
|
||||
}
|
||||
};
|
||||
|
||||
self.handle.write(&buf[..n]).unwrap();
|
||||
|
||||
Poll::Ready(Ok(n))
|
||||
}
|
||||
|
||||
fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<io::Result<()>> {
|
||||
Poll::Ready(Ok(()))
|
||||
}
|
||||
|
||||
fn poll_close(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<io::Result<()>> {
|
||||
match self.handle.close() {
|
||||
abi::io::CloseReturn::Pending => Poll::Pending,
|
||||
abi::io::CloseReturn::Ready(Ok(())) => Poll::Ready(Ok(())),
|
||||
abi::io::CloseReturn::Ready(Err(e)) => Poll::Ready(Err(match e {
|
||||
abi::io::Error::Closed => {
|
||||
io::Error::new(io::ErrorKind::ConnectionAborted, "connection closed")
|
||||
}
|
||||
abi::io::Error::Other(e) => io::Error::new(io::ErrorKind::Other, e),
|
||||
})),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl AsyncRead for TlsConnection {
|
||||
fn poll_read(
|
||||
self: Pin<&mut Self>,
|
||||
_cx: &mut Context<'_>,
|
||||
buf: &mut [u8],
|
||||
) -> Poll<io::Result<usize>> {
|
||||
match self.handle.read(buf.len() as u32) {
|
||||
abi::io::ReadReturn::Pending => Poll::Pending,
|
||||
abi::io::ReadReturn::Ready(Ok(data)) => {
|
||||
assert!(data.len() <= buf.len());
|
||||
buf[..data.len()].copy_from_slice(&data);
|
||||
|
||||
Poll::Ready(Ok(data.len()))
|
||||
}
|
||||
abi::io::ReadReturn::Ready(Err(abi::io::Error::Closed)) => Poll::Ready(Ok(0)),
|
||||
abi::io::ReadReturn::Ready(Err(e)) => {
|
||||
Poll::Ready(Err(io::Error::new(io::ErrorKind::Other, e)))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
131
crates/pdk/src/verifier.rs
Normal file
131
crates/pdk/src/verifier.rs
Normal file
@@ -0,0 +1,131 @@
|
||||
use std::{future::poll_fn, task::Poll};
|
||||
|
||||
pub use tlsn_core::{
|
||||
VerifierOutput,
|
||||
config::{VerifierConfig, VerifyConfig},
|
||||
};
|
||||
|
||||
use crate::abi;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct VerifierError {}
|
||||
|
||||
impl std::error::Error for VerifierError {}
|
||||
|
||||
impl std::fmt::Display for VerifierError {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "VerifierError")
|
||||
}
|
||||
}
|
||||
|
||||
pub mod state {
|
||||
use tlsn_core::transcript::TlsTranscript;
|
||||
|
||||
mod sealed {
|
||||
pub trait Sealed {}
|
||||
}
|
||||
|
||||
pub trait VerifierState: sealed::Sealed {}
|
||||
|
||||
pub struct Initialized {}
|
||||
pub struct Setup {}
|
||||
pub struct Committed {
|
||||
pub(super) tls_transcript: TlsTranscript,
|
||||
}
|
||||
|
||||
impl sealed::Sealed for Initialized {}
|
||||
impl sealed::Sealed for Setup {}
|
||||
impl sealed::Sealed for Committed {}
|
||||
|
||||
impl VerifierState for Initialized {}
|
||||
impl VerifierState for Setup {}
|
||||
impl VerifierState for Committed {}
|
||||
}
|
||||
|
||||
pub struct Verifier<T: state::VerifierState = state::Initialized> {
|
||||
handle: abi::verify::Verifier,
|
||||
state: T,
|
||||
}
|
||||
|
||||
impl Verifier {
|
||||
pub fn new(config: VerifierConfig) -> Self {
|
||||
let config = bincode::serialize(&config).unwrap();
|
||||
|
||||
let handle = abi::verify::Verifier::new(&config);
|
||||
|
||||
Self {
|
||||
handle,
|
||||
state: state::Initialized {},
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn setup(self) -> Result<Verifier<state::Setup>, VerifierError> {
|
||||
poll_fn(|_| {
|
||||
if let abi::verify::SetupReturn::Ready = self.handle.setup() {
|
||||
Poll::Ready(())
|
||||
} else {
|
||||
Poll::Pending
|
||||
}
|
||||
})
|
||||
.await;
|
||||
|
||||
Ok(Verifier {
|
||||
handle: self.handle,
|
||||
state: state::Setup {},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl Verifier<state::Setup> {
|
||||
pub async fn run(self) -> Result<Verifier<state::Committed>, VerifierError> {
|
||||
let res = poll_fn(|_| {
|
||||
if let abi::verify::CommitReturn::Ready(res) = self.handle.commit() {
|
||||
Poll::Ready(res)
|
||||
} else {
|
||||
Poll::Pending
|
||||
}
|
||||
})
|
||||
.await;
|
||||
|
||||
res.map(|data| Verifier {
|
||||
handle: self.handle,
|
||||
state: state::Committed {
|
||||
tls_transcript: bincode::deserialize(&data).unwrap(),
|
||||
},
|
||||
})
|
||||
.map_err(|_| todo!())
|
||||
}
|
||||
}
|
||||
|
||||
impl Verifier<state::Committed> {
|
||||
pub async fn verify(&mut self, config: &VerifyConfig) -> Result<VerifierOutput, VerifierError> {
|
||||
let config = bincode::serialize(&config).unwrap();
|
||||
|
||||
self.handle.verify(&config);
|
||||
|
||||
let res = poll_fn(|_| {
|
||||
if let abi::verify::VerifyReturn::Ready(res) = self.handle.finish_verify() {
|
||||
Poll::Ready(res)
|
||||
} else {
|
||||
Poll::Pending
|
||||
}
|
||||
})
|
||||
.await;
|
||||
|
||||
res.map(|output| bincode::deserialize(&output).unwrap())
|
||||
.map_err(|_| todo!())
|
||||
}
|
||||
|
||||
pub async fn close(self) -> Result<(), VerifierError> {
|
||||
poll_fn(|_| {
|
||||
if let abi::verify::CloseReturn::Ready = self.handle.close() {
|
||||
Poll::Ready(())
|
||||
} else {
|
||||
Poll::Pending
|
||||
}
|
||||
})
|
||||
.await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
19
crates/sdk-core/Cargo.toml
Normal file
19
crates/sdk-core/Cargo.toml
Normal file
@@ -0,0 +1,19 @@
|
||||
[package]
|
||||
name = "tlsn-sdk-core"
|
||||
version = "0.1.0"
|
||||
edition = "2024"
|
||||
|
||||
[dependencies]
|
||||
tlsn = { workspace = true }
|
||||
wasmtime = { version = "34.0", features = ["component-model"] }
|
||||
bincode = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
pin-project-lite = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
futures-plex = { workspace = true }
|
||||
tlsn-server-fixture = { workspace = true }
|
||||
tlsn-server-fixture-certs = { workspace = true }
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
155
crates/sdk-core/src/instance.rs
Normal file
155
crates/sdk-core/src/instance.rs
Normal file
@@ -0,0 +1,155 @@
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
task::{Context as StdContext, Poll},
|
||||
};
|
||||
|
||||
use crate::{
|
||||
Error, IoProvider,
|
||||
io::{IoId, IoInstance},
|
||||
prover::{ProverId, ProverInstance},
|
||||
verifier::{VerifierId, VerifierInstance},
|
||||
};
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct Instance {
|
||||
pub state: State,
|
||||
pub cx: Context,
|
||||
}
|
||||
|
||||
pub struct Context {
|
||||
pub waker: Waker,
|
||||
}
|
||||
|
||||
impl Default for Context {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
waker: Waker::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Waker {
|
||||
wants_wake: bool,
|
||||
wants_call: bool,
|
||||
}
|
||||
|
||||
impl Waker {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
wants_wake: false,
|
||||
wants_call: true,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn set_wake(&mut self) {
|
||||
self.wants_wake = true;
|
||||
}
|
||||
|
||||
pub fn clear_wake(&mut self) {
|
||||
self.wants_wake = false;
|
||||
}
|
||||
|
||||
pub fn set_call(&mut self) {
|
||||
self.wants_call = true;
|
||||
}
|
||||
|
||||
pub fn clear_call(&mut self) {
|
||||
self.wants_call = false;
|
||||
}
|
||||
|
||||
pub fn wants_wake(&self) -> bool {
|
||||
self.wants_wake
|
||||
}
|
||||
|
||||
pub fn wants_call(&self) -> bool {
|
||||
self.wants_call
|
||||
}
|
||||
}
|
||||
|
||||
/// Plugin instance.
|
||||
#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash)]
|
||||
pub struct InstanceId(pub usize);
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct State {
|
||||
provers: HashMap<ProverId, ProverInstance>,
|
||||
verifiers: HashMap<VerifierId, VerifierInstance>,
|
||||
}
|
||||
|
||||
impl State {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
provers: HashMap::new(),
|
||||
verifiers: HashMap::new(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_prover(&mut self, config: Vec<u8>) -> Result<ProverId, Error> {
|
||||
let instance = ProverInstance::new(config)?;
|
||||
|
||||
let id = ProverId(self.provers.len());
|
||||
self.provers.insert(id, instance);
|
||||
|
||||
Ok(id)
|
||||
}
|
||||
|
||||
pub fn new_verifier(&mut self, config: Vec<u8>) -> Result<VerifierId, Error> {
|
||||
let instance = VerifierInstance::new(config)?;
|
||||
|
||||
let id = VerifierId(self.verifiers.len());
|
||||
self.verifiers.insert(id, instance);
|
||||
|
||||
Ok(id)
|
||||
}
|
||||
|
||||
pub fn get_prover_mut(&mut self, id: ProverId) -> Result<&mut ProverInstance, Error> {
|
||||
self.provers.get_mut(&id).ok_or_else(|| todo!())
|
||||
}
|
||||
|
||||
pub fn get_io_mut(&mut self, id: IoId) -> Result<&mut IoInstance, Error> {
|
||||
self.get_prover_mut(ProverId(id.0))?
|
||||
.io_mut()
|
||||
.ok_or_else(|| todo!())
|
||||
}
|
||||
|
||||
pub fn get_verifier_mut(&mut self, id: VerifierId) -> Result<&mut VerifierInstance, Error> {
|
||||
self.verifiers.get_mut(&id).ok_or_else(|| todo!())
|
||||
}
|
||||
|
||||
pub fn poll(
|
||||
&mut self,
|
||||
cx_std: &mut StdContext<'_>,
|
||||
cx: &mut Context,
|
||||
io: &mut impl IoProvider,
|
||||
) -> Poll<Result<(), Error>> {
|
||||
let mut ready = Vec::new();
|
||||
for (&id, prover) in self.provers.iter_mut() {
|
||||
if let Poll::Ready(res) = prover.poll(cx_std, cx, io) {
|
||||
res.unwrap();
|
||||
ready.push(id);
|
||||
}
|
||||
}
|
||||
|
||||
for id in ready {
|
||||
self.provers.remove(&id);
|
||||
}
|
||||
|
||||
let mut ready = Vec::new();
|
||||
for (&id, verifier) in self.verifiers.iter_mut() {
|
||||
if let Poll::Ready(res) = verifier.poll(cx_std, cx, io) {
|
||||
res.unwrap();
|
||||
ready.push(id);
|
||||
}
|
||||
}
|
||||
|
||||
for id in ready {
|
||||
self.verifiers.remove(&id);
|
||||
}
|
||||
|
||||
if self.provers.is_empty() && self.verifiers.is_empty() {
|
||||
return Poll::Ready(Ok(()));
|
||||
} else {
|
||||
Poll::Pending
|
||||
}
|
||||
}
|
||||
}
|
||||
262
crates/sdk-core/src/io.rs
Normal file
262
crates/sdk-core/src/io.rs
Normal file
@@ -0,0 +1,262 @@
|
||||
use std::{
|
||||
pin::pin,
|
||||
task::{Context as StdContext, Poll},
|
||||
};
|
||||
|
||||
use futures::{AsyncRead, AsyncWrite};
|
||||
use tlsn::prover::TlsConnection;
|
||||
|
||||
use crate::{Error, instance::Context};
|
||||
|
||||
#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash)]
|
||||
pub struct IoId(pub usize);
|
||||
|
||||
pub struct IoInstance {
|
||||
conn: TlsConnection,
|
||||
write_buf: Buf,
|
||||
wants_write_close: bool,
|
||||
write_closed: bool,
|
||||
read_buf: Buf,
|
||||
read_closed: bool,
|
||||
wants_write: bool,
|
||||
wants_read: bool,
|
||||
}
|
||||
|
||||
impl IoInstance {
|
||||
pub(crate) fn new(conn: TlsConnection) -> Self {
|
||||
const BUF_SIZE: usize = 8192;
|
||||
Self {
|
||||
conn,
|
||||
write_buf: Buf::new(BUF_SIZE),
|
||||
wants_write_close: false,
|
||||
write_closed: false,
|
||||
read_buf: Buf::new(BUF_SIZE),
|
||||
read_closed: false,
|
||||
wants_write: false,
|
||||
wants_read: false,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn check_write(&mut self, cx: &mut Context) -> Poll<Result<usize, std::io::Error>> {
|
||||
if self.write_closed {
|
||||
return Poll::Ready(Err(std::io::ErrorKind::BrokenPipe.into()));
|
||||
}
|
||||
|
||||
match self.write_buf.remaining_mut() {
|
||||
0 => {
|
||||
self.wants_write = true;
|
||||
cx.waker.set_wake();
|
||||
Poll::Pending
|
||||
}
|
||||
n => Poll::Ready(Ok(n)),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn write(&mut self, buf: &[u8]) -> Result<(), std::io::Error> {
|
||||
if self.write_closed {
|
||||
return Err(std::io::ErrorKind::BrokenPipe.into());
|
||||
}
|
||||
|
||||
let remaining_capacity = self.write_buf.remaining_mut();
|
||||
if buf.len() > remaining_capacity {
|
||||
todo!()
|
||||
}
|
||||
|
||||
let n = buf.len().min(remaining_capacity);
|
||||
|
||||
self.write_buf.chunk_mut()[..n].copy_from_slice(&buf[..n]);
|
||||
self.write_buf.advance_mut(n);
|
||||
|
||||
self.wants_write = false;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn flush(&mut self) -> Result<(), std::io::Error> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn available(&self) -> usize {
|
||||
self.read_buf.remaining()
|
||||
}
|
||||
|
||||
pub fn read_closed(&self) -> bool {
|
||||
self.read_closed
|
||||
}
|
||||
|
||||
pub fn read(&mut self, len: usize, cx: &mut Context) -> Poll<Result<Vec<u8>, std::io::Error>> {
|
||||
let chunk = self.read_buf.chunk();
|
||||
let available = chunk.len();
|
||||
|
||||
if available == 0 && !self.read_closed {
|
||||
self.wants_read = true;
|
||||
cx.waker.set_wake();
|
||||
return Poll::Pending;
|
||||
}
|
||||
|
||||
let len = available.min(len);
|
||||
let out = chunk[..len].to_vec();
|
||||
self.read_buf.advance(len);
|
||||
|
||||
self.wants_read = false;
|
||||
|
||||
Poll::Ready(Ok(out))
|
||||
}
|
||||
|
||||
pub fn close(&mut self, cx: &mut Context) -> Poll<Result<(), std::io::Error>> {
|
||||
if self.write_closed {
|
||||
Poll::Ready(Ok(()))
|
||||
} else {
|
||||
self.wants_write_close = true;
|
||||
cx.waker.set_wake();
|
||||
Poll::Pending
|
||||
}
|
||||
}
|
||||
|
||||
pub fn poll(
|
||||
&mut self,
|
||||
cx_std: &mut StdContext<'_>,
|
||||
cx: &mut Context,
|
||||
) -> Poll<Result<(), Error>> {
|
||||
while self.read_buf.remaining_mut() > 0 && !self.read_closed {
|
||||
if let Poll::Ready(res) =
|
||||
pin!(&mut self.conn).poll_read(cx_std, self.read_buf.chunk_mut())
|
||||
{
|
||||
let n = res.unwrap();
|
||||
self.read_buf.advance_mut(n);
|
||||
|
||||
if n == 0 {
|
||||
println!("server closed conn");
|
||||
self.read_closed = true;
|
||||
}
|
||||
|
||||
if self.wants_read {
|
||||
cx.waker.set_call();
|
||||
}
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
while self.write_buf.remaining() > 0 {
|
||||
if let Poll::Ready(res) =
|
||||
pin!(&mut self.conn).poll_write(cx_std, self.write_buf.chunk())
|
||||
{
|
||||
let n = res.unwrap();
|
||||
println!("prover wrote {n} bytes to server");
|
||||
self.write_buf.advance(n);
|
||||
|
||||
if self.wants_write {
|
||||
cx.waker.set_call();
|
||||
}
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if self.write_buf.remaining() == 0 && self.wants_write_close && !self.write_closed {
|
||||
if let Poll::Ready(res) = pin!(&mut self.conn).poll_close(cx_std) {
|
||||
res.unwrap();
|
||||
self.write_closed = true;
|
||||
if self.wants_write_close {
|
||||
cx.waker.set_call();
|
||||
}
|
||||
println!("prover closed conn");
|
||||
}
|
||||
}
|
||||
|
||||
Poll::Pending
|
||||
}
|
||||
}
|
||||
|
||||
/// A fixed-size buffer that is guaranteed to be initialized.
|
||||
pub(crate) struct Buf {
|
||||
data: Box<[u8]>,
|
||||
len: usize,
|
||||
pos: usize,
|
||||
cap: usize,
|
||||
}
|
||||
|
||||
impl Buf {
|
||||
pub(crate) fn new(size: usize) -> Self {
|
||||
// SAFETY: It is critical that memory of the buffer is initialized.
|
||||
#[allow(unused_unsafe)]
|
||||
let buf = unsafe { vec![0; size].into_boxed_slice() };
|
||||
|
||||
Self {
|
||||
data: buf,
|
||||
len: 0,
|
||||
pos: 0,
|
||||
cap: size,
|
||||
}
|
||||
}
|
||||
|
||||
/// Remaining bytes in the buffer.
|
||||
pub(crate) fn remaining(&self) -> usize {
|
||||
self.len - self.pos
|
||||
}
|
||||
|
||||
/// Returns a reference to the bytes in the buffer.
|
||||
pub(crate) fn chunk(&self) -> &[u8] {
|
||||
&self.data[self.pos..self.len]
|
||||
}
|
||||
|
||||
/// Advance the position of the buffer.
|
||||
pub(crate) fn advance(&mut self, cnt: usize) {
|
||||
assert!(cnt <= self.remaining(), "advance past end of buffer");
|
||||
self.pos += cnt;
|
||||
if self.pos == self.len {
|
||||
self.pos = 0;
|
||||
self.len = 0;
|
||||
}
|
||||
}
|
||||
|
||||
/// Remaining room in the buffer.
|
||||
pub(crate) fn remaining_mut(&self) -> usize {
|
||||
self.cap - self.len
|
||||
}
|
||||
|
||||
/// Advance the length of the buffer.
|
||||
pub(crate) fn advance_mut(&mut self, cnt: usize) {
|
||||
assert!(self.len + cnt <= self.cap, "advance past end of buffer");
|
||||
self.len += cnt;
|
||||
}
|
||||
|
||||
/// Returns a mutable reference to the remaining room in the buffer.
|
||||
pub(crate) fn chunk_mut(&mut self) -> &mut [u8] {
|
||||
&mut self.data[self.len..]
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_fixed_buf() {
|
||||
let mut buf = Buf::new(10);
|
||||
|
||||
assert_eq!(buf.remaining(), 0);
|
||||
assert_eq!(buf.remaining_mut(), 10);
|
||||
assert_eq!(buf.chunk(), &[] as &[u8]);
|
||||
assert_eq!(buf.chunk_mut(), &[0; 10]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_fixed_buf_advance() {
|
||||
let mut buf = Buf::new(10);
|
||||
|
||||
buf.advance_mut(5);
|
||||
assert_eq!(buf.remaining_mut(), 5);
|
||||
assert_eq!(buf.remaining(), 5);
|
||||
|
||||
buf.advance(3);
|
||||
assert_eq!(buf.remaining_mut(), 5);
|
||||
assert_eq!(buf.remaining(), 2);
|
||||
|
||||
buf.advance(buf.remaining());
|
||||
assert_eq!(buf.remaining(), 0);
|
||||
// Buffer should reset.
|
||||
assert_eq!(buf.remaining_mut(), 10);
|
||||
}
|
||||
}
|
||||
105
crates/sdk-core/src/lib.rs
Normal file
105
crates/sdk-core/src/lib.rs
Normal file
@@ -0,0 +1,105 @@
|
||||
pub(crate) mod instance;
|
||||
pub(crate) mod io;
|
||||
pub(crate) mod prover;
|
||||
pub(crate) mod provider;
|
||||
pub(crate) mod verifier;
|
||||
pub(crate) mod wasm;
|
||||
mod wasmtime;
|
||||
|
||||
pub use provider::IoProvider;
|
||||
pub use wasmtime::Wasmtime;
|
||||
|
||||
use std::{
|
||||
future::poll_fn,
|
||||
sync::{Arc, Mutex},
|
||||
task::Poll,
|
||||
};
|
||||
|
||||
use crate::{instance::State, wasm::WasmRuntime};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct Error;
|
||||
|
||||
pub struct Plugin {
|
||||
pub manifest: Manifest,
|
||||
pub binary: Binary,
|
||||
}
|
||||
|
||||
pub struct Manifest {}
|
||||
|
||||
/// Plugin WASM binary.
|
||||
pub struct Binary(pub Vec<u8>);
|
||||
|
||||
pub struct Runtime<T, I> {
|
||||
wasm: T,
|
||||
io_provider: I,
|
||||
}
|
||||
|
||||
impl<T, I> Runtime<T, I> {
|
||||
pub fn new(wasm: T, io_provider: I) -> Self {
|
||||
Self { wasm, io_provider }
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, I> Runtime<T, I>
|
||||
where
|
||||
T: WasmRuntime,
|
||||
I: IoProvider,
|
||||
{
|
||||
pub async fn run_plugin(&mut self, plugin: &Plugin, input: &[u8]) -> Vec<u8> {
|
||||
let wasm = self.wasm.load(&plugin.binary).unwrap();
|
||||
|
||||
let instance = Arc::new(Mutex::new(instance::Instance::default()));
|
||||
let id = self
|
||||
.wasm
|
||||
.instantiate(wasm, instance.clone(), input)
|
||||
.unwrap();
|
||||
|
||||
let mut output = None;
|
||||
let mut ready_0 = false;
|
||||
let mut ready_1 = false;
|
||||
let output = poll_fn(|cx_std| {
|
||||
let wants_call = instance.lock().unwrap().cx.waker.wants_call();
|
||||
if !ready_0
|
||||
&& wants_call
|
||||
&& let Poll::Ready(res) = self.wasm.poll(id).unwrap()
|
||||
{
|
||||
output = Some(res.inspect(|data| {
|
||||
println!("plugin output: {}", String::from_utf8_lossy(data));
|
||||
}));
|
||||
ready_0 = true;
|
||||
}
|
||||
|
||||
let mut instance = instance.lock().unwrap();
|
||||
if !ready_0 {
|
||||
if instance.cx.waker.wants_wake() {
|
||||
cx_std.waker().wake_by_ref();
|
||||
} else {
|
||||
panic!("plugin isn't waiting for anything");
|
||||
}
|
||||
}
|
||||
|
||||
if !ready_1 {
|
||||
let instance = &mut (*instance);
|
||||
if let Poll::Ready(res) =
|
||||
instance
|
||||
.state
|
||||
.poll(cx_std, &mut instance.cx, &mut self.io_provider)
|
||||
{
|
||||
res.unwrap();
|
||||
ready_1 = true;
|
||||
}
|
||||
}
|
||||
|
||||
if ready_0 && ready_1 {
|
||||
return Poll::Ready(Ok::<_, Error>(output.take().unwrap()));
|
||||
} else {
|
||||
Poll::Pending
|
||||
}
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
output.unwrap()
|
||||
}
|
||||
}
|
||||
388
crates/sdk-core/src/prover.rs
Normal file
388
crates/sdk-core/src/prover.rs
Normal file
@@ -0,0 +1,388 @@
|
||||
use std::{
|
||||
pin::Pin,
|
||||
task::{Context as StdContext, Poll},
|
||||
};
|
||||
|
||||
use futures::FutureExt;
|
||||
use tlsn::{
|
||||
config::ProverConfig,
|
||||
connection::ServerName,
|
||||
prover::{ProveConfig, Prover, ProverFuture, ProverOutput, TlsConnection, state},
|
||||
transcript::{TlsTranscript, Transcript},
|
||||
};
|
||||
|
||||
use crate::{Error, IoProvider, instance::Context, io::IoInstance};
|
||||
|
||||
#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash)]
|
||||
pub struct ProverId(pub usize);
|
||||
|
||||
pub struct ProverInstance {
|
||||
state: State,
|
||||
server_name: ServerName,
|
||||
server_io: Option<IoInstance>,
|
||||
wants_state_update: bool,
|
||||
}
|
||||
|
||||
impl ProverInstance {
|
||||
pub fn new(config: Vec<u8>) -> Result<Self, Error> {
|
||||
println!("prover new");
|
||||
let config: ProverConfig = bincode::deserialize(&config).unwrap();
|
||||
|
||||
let server_name = config.server_name().clone();
|
||||
let prover = Prover::new(config);
|
||||
|
||||
Ok(Self {
|
||||
state: State::Init(prover),
|
||||
server_name,
|
||||
server_io: None,
|
||||
wants_state_update: false,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn io_mut(&mut self) -> Option<&mut IoInstance> {
|
||||
self.server_io.as_mut()
|
||||
}
|
||||
|
||||
pub fn setup(&mut self, cx: &mut Context) -> Result<Poll<()>, Error> {
|
||||
match self.state.take() {
|
||||
State::Init(prover) => {
|
||||
self.state = State::Preprocess(prover);
|
||||
}
|
||||
State::Preprocess(prover) => {
|
||||
self.state = State::Preprocess(prover);
|
||||
}
|
||||
State::Preprocessing(fut) => {
|
||||
self.state = State::Preprocessing(fut);
|
||||
}
|
||||
State::Setup(prover) => {
|
||||
self.state = State::Setup(prover);
|
||||
return Ok(Poll::Ready(()));
|
||||
}
|
||||
state => todo!(),
|
||||
};
|
||||
|
||||
self.wants_state_update = true;
|
||||
cx.waker.set_wake();
|
||||
|
||||
Ok(Poll::Pending)
|
||||
}
|
||||
|
||||
pub fn connect(&mut self, cx: &mut Context) -> Result<Poll<()>, Error> {
|
||||
match self.state.take() {
|
||||
State::Setup(prover) => {
|
||||
self.state = State::Connect(prover);
|
||||
}
|
||||
State::Connect(prover) => {
|
||||
self.state = State::Connect(prover);
|
||||
}
|
||||
State::Connecting(fut) => {
|
||||
self.state = State::Connecting(fut);
|
||||
}
|
||||
State::Online(fut) => {
|
||||
self.state = State::Online(fut);
|
||||
|
||||
return Ok(Poll::Ready(()));
|
||||
}
|
||||
state => todo!(),
|
||||
}
|
||||
|
||||
self.wants_state_update = true;
|
||||
cx.waker.set_wake();
|
||||
|
||||
Ok(Poll::Pending)
|
||||
}
|
||||
|
||||
pub fn finish_commit(
|
||||
&mut self,
|
||||
cx: &mut Context,
|
||||
) -> Result<Poll<(TlsTranscript, Transcript)>, Error> {
|
||||
match self.state.take() {
|
||||
State::Online(fut) => {
|
||||
self.state = State::Online(fut);
|
||||
}
|
||||
State::FinishCommit(prover) => {
|
||||
let tls_transcript = prover.tls_transcript().clone();
|
||||
let transcript = prover.transcript().clone();
|
||||
|
||||
self.state = State::Committed(prover);
|
||||
|
||||
return Ok(Poll::Ready((tls_transcript, transcript)));
|
||||
}
|
||||
state => todo!(),
|
||||
}
|
||||
|
||||
self.wants_state_update = true;
|
||||
cx.waker.set_wake();
|
||||
|
||||
Ok(Poll::Pending)
|
||||
}
|
||||
|
||||
pub fn prove(&mut self, config: ProveConfig) -> Result<(), Error> {
|
||||
match self.state.take() {
|
||||
State::Committed(prover) => {
|
||||
self.state = State::StartProve(prover, config);
|
||||
}
|
||||
state => todo!(),
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn finish_prove(&mut self, cx: &mut Context) -> Result<Poll<ProverOutput>, Error> {
|
||||
match self.state.take() {
|
||||
State::StartProve(prover, config) => {
|
||||
self.state = State::StartProve(prover, config);
|
||||
}
|
||||
State::Proving(fut) => {
|
||||
self.state = State::Proving(fut);
|
||||
}
|
||||
State::FinishProve(prover, output) => {
|
||||
self.state = State::Committed(prover);
|
||||
|
||||
return Ok(Poll::Ready(output));
|
||||
}
|
||||
state => todo!(),
|
||||
}
|
||||
|
||||
self.wants_state_update = true;
|
||||
cx.waker.set_wake();
|
||||
|
||||
Ok(Poll::Pending)
|
||||
}
|
||||
|
||||
pub fn close(&mut self, cx: &mut Context) -> Result<Poll<()>, Error> {
|
||||
match self.state.take() {
|
||||
State::Committed(prover) => {
|
||||
self.state = State::Close(prover);
|
||||
}
|
||||
State::Close(prover) => {
|
||||
self.state = State::Close(prover);
|
||||
}
|
||||
State::Closing(fut) => {
|
||||
self.state = State::Closing(fut);
|
||||
}
|
||||
State::FinishClose => {
|
||||
self.state = State::Done;
|
||||
|
||||
return Ok(Poll::Ready(()));
|
||||
}
|
||||
state => {
|
||||
dbg!(state);
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
self.wants_state_update = true;
|
||||
cx.waker.set_wake();
|
||||
|
||||
Ok(Poll::Pending)
|
||||
}
|
||||
|
||||
pub fn poll(
|
||||
&mut self,
|
||||
cx_std: &mut StdContext<'_>,
|
||||
cx: &mut Context,
|
||||
io: &mut impl IoProvider,
|
||||
) -> Poll<Result<(), Error>> {
|
||||
if let Some(io) = self.server_io.as_mut() {
|
||||
if let Poll::Ready(res) = io.poll(cx_std, cx) {
|
||||
res?;
|
||||
}
|
||||
}
|
||||
|
||||
match self.state.take() {
|
||||
State::Init(prover) => {
|
||||
self.state = State::Init(prover);
|
||||
}
|
||||
State::Preprocess(prover) => {
|
||||
let io_fut = io.connect_peer();
|
||||
self.state = State::Preprocessing(Box::pin(async move {
|
||||
prover
|
||||
.setup(io_fut.await.unwrap())
|
||||
.await
|
||||
.map_err(|_| todo!())
|
||||
}));
|
||||
|
||||
return self.poll(cx_std, cx, io);
|
||||
}
|
||||
State::Preprocessing(mut fut) => {
|
||||
if let Poll::Ready(res) = fut.poll_unpin(cx_std) {
|
||||
let prover = res.unwrap();
|
||||
|
||||
self.state = State::Setup(prover);
|
||||
if self.wants_state_update {
|
||||
self.wants_state_update = false;
|
||||
cx.waker.set_call();
|
||||
}
|
||||
|
||||
println!("prover setup");
|
||||
} else {
|
||||
self.state = State::Preprocessing(fut);
|
||||
}
|
||||
}
|
||||
State::Setup(prover) => {
|
||||
self.state = State::Setup(prover);
|
||||
}
|
||||
State::Connect(prover) => {
|
||||
let io_fut = io.connect_server(&self.server_name);
|
||||
self.state = State::Connecting(Box::pin(async move {
|
||||
prover
|
||||
.connect(io_fut.await.unwrap())
|
||||
.await
|
||||
.map_err(|_| todo!())
|
||||
}));
|
||||
|
||||
println!("prover connect");
|
||||
|
||||
return self.poll(cx_std, cx, io);
|
||||
}
|
||||
State::Connecting(mut fut) => {
|
||||
if let Poll::Ready(res) = fut.poll_unpin(cx_std) {
|
||||
let (conn, fut) = res.unwrap();
|
||||
|
||||
self.state = State::Online(fut);
|
||||
self.server_io = Some(IoInstance::new(conn));
|
||||
if self.wants_state_update {
|
||||
self.wants_state_update = false;
|
||||
cx.waker.set_call();
|
||||
}
|
||||
|
||||
println!("prover online");
|
||||
|
||||
return self.poll(cx_std, cx, io);
|
||||
} else {
|
||||
self.state = State::Connecting(fut);
|
||||
}
|
||||
}
|
||||
State::Online(mut fut) => {
|
||||
if let Poll::Ready(res) = fut.poll_unpin(cx_std) {
|
||||
let prover = res.unwrap();
|
||||
|
||||
self.state = State::FinishCommit(prover);
|
||||
if self.wants_state_update {
|
||||
self.wants_state_update = false;
|
||||
cx.waker.set_call();
|
||||
}
|
||||
|
||||
println!("prover committed");
|
||||
} else {
|
||||
self.state = State::Online(fut);
|
||||
}
|
||||
}
|
||||
State::FinishCommit(prover) => {
|
||||
self.state = State::FinishCommit(prover);
|
||||
}
|
||||
State::Committed(prover) => {
|
||||
self.state = State::Committed(prover);
|
||||
}
|
||||
State::StartProve(mut prover, config) => {
|
||||
self.state = State::Proving(Box::pin(async move {
|
||||
let output = prover.prove(&config).await.map_err(|_| todo!())?;
|
||||
|
||||
Ok((prover, output))
|
||||
}));
|
||||
|
||||
return self.poll(cx_std, cx, io);
|
||||
}
|
||||
State::Proving(mut fut) => {
|
||||
if let Poll::Ready(res) = fut.poll_unpin(cx_std) {
|
||||
let (prover, output) = res.unwrap();
|
||||
|
||||
self.state = State::FinishProve(prover, output);
|
||||
if self.wants_state_update {
|
||||
self.wants_state_update = false;
|
||||
cx.waker.set_call();
|
||||
}
|
||||
} else {
|
||||
self.state = State::Proving(fut);
|
||||
}
|
||||
}
|
||||
State::FinishProve(prover, output) => {
|
||||
self.state = State::FinishProve(prover, output);
|
||||
}
|
||||
State::Close(prover) => {
|
||||
self.state = State::Closing(Box::pin(async move {
|
||||
prover.close().await.map_err(|_| todo!())
|
||||
}));
|
||||
|
||||
return self.poll(cx_std, cx, io);
|
||||
}
|
||||
State::Closing(mut fut) => {
|
||||
if let Poll::Ready(res) = fut.poll_unpin(cx_std) {
|
||||
res?;
|
||||
|
||||
println!("prover closed");
|
||||
|
||||
self.state = State::FinishClose;
|
||||
if self.wants_state_update {
|
||||
self.wants_state_update = false;
|
||||
cx.waker.set_call();
|
||||
}
|
||||
} else {
|
||||
self.state = State::Closing(fut);
|
||||
}
|
||||
}
|
||||
State::FinishClose => {
|
||||
self.state = State::FinishClose;
|
||||
}
|
||||
State::Done => {
|
||||
self.state = State::Done;
|
||||
|
||||
return Poll::Ready(Ok(()));
|
||||
}
|
||||
State::Error => todo!(),
|
||||
}
|
||||
|
||||
Poll::Pending
|
||||
}
|
||||
}
|
||||
|
||||
enum State {
|
||||
Init(Prover<state::Initialized>),
|
||||
Preprocess(Prover<state::Initialized>),
|
||||
Preprocessing(Pin<Box<dyn Future<Output = Result<Prover<state::Setup>, Error>>>>),
|
||||
Setup(Prover<state::Setup>),
|
||||
Connect(Prover<state::Setup>),
|
||||
Connecting(Pin<Box<dyn Future<Output = Result<(TlsConnection, ProverFuture), Error>>>>),
|
||||
Online(ProverFuture),
|
||||
FinishCommit(Prover<state::Committed>),
|
||||
Committed(Prover<state::Committed>),
|
||||
StartProve(Prover<state::Committed>, ProveConfig),
|
||||
Proving(Pin<Box<dyn Future<Output = Result<(Prover<state::Committed>, ProverOutput), Error>>>>),
|
||||
FinishProve(Prover<state::Committed>, ProverOutput),
|
||||
Close(Prover<state::Committed>),
|
||||
Closing(Pin<Box<dyn Future<Output = Result<(), Error>>>>),
|
||||
FinishClose,
|
||||
Done,
|
||||
Error,
|
||||
}
|
||||
|
||||
impl State {
|
||||
fn take(&mut self) -> Self {
|
||||
std::mem::replace(self, Self::Error)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for State {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
Self::Init(_) => f.debug_tuple("Init").finish_non_exhaustive(),
|
||||
Self::Preprocess(_) => f.debug_tuple("Preprocess").finish_non_exhaustive(),
|
||||
Self::Preprocessing(_) => f.debug_tuple("Preprocessing").finish_non_exhaustive(),
|
||||
Self::Setup(_) => f.debug_tuple("Setup").finish_non_exhaustive(),
|
||||
Self::Connect(_) => f.debug_tuple("Connect").finish_non_exhaustive(),
|
||||
Self::Connecting(_) => f.debug_tuple("Connecting").finish_non_exhaustive(),
|
||||
Self::Online(_) => f.debug_tuple("Online").finish_non_exhaustive(),
|
||||
Self::FinishCommit(_) => f.debug_tuple("FinishCommit").finish_non_exhaustive(),
|
||||
Self::Committed(_) => f.debug_tuple("Committed").finish_non_exhaustive(),
|
||||
Self::StartProve(_, _) => f.debug_tuple("StartProve").finish_non_exhaustive(),
|
||||
Self::Proving(_) => f.debug_tuple("Proving").finish_non_exhaustive(),
|
||||
Self::FinishProve(_, _) => f.debug_tuple("FinishProve").finish_non_exhaustive(),
|
||||
Self::Close(_) => f.debug_tuple("Close").finish_non_exhaustive(),
|
||||
Self::Closing(_) => f.debug_tuple("Closing").finish_non_exhaustive(),
|
||||
Self::FinishClose => f.write_str("FinishClose"),
|
||||
Self::Done => f.write_str("Done"),
|
||||
Self::Error => f.write_str("Error"),
|
||||
}
|
||||
}
|
||||
}
|
||||
18
crates/sdk-core/src/provider.rs
Normal file
18
crates/sdk-core/src/provider.rs
Normal file
@@ -0,0 +1,18 @@
|
||||
use std::pin::Pin;
|
||||
|
||||
use futures::{AsyncRead, AsyncWrite};
|
||||
use tlsn::connection::ServerName;
|
||||
|
||||
pub trait IoProvider {
|
||||
type Io: AsyncRead + AsyncWrite + Send + Unpin + 'static;
|
||||
type Error: std::error::Error + Send + Sync + 'static;
|
||||
|
||||
fn connect_server(
|
||||
&mut self,
|
||||
name: &ServerName,
|
||||
) -> Pin<Box<dyn Future<Output = Result<Self::Io, Self::Error>> + Send>>;
|
||||
|
||||
fn connect_peer(
|
||||
&mut self,
|
||||
) -> Pin<Box<dyn Future<Output = Result<Self::Io, Self::Error>> + Send>>;
|
||||
}
|
||||
309
crates/sdk-core/src/verifier.rs
Normal file
309
crates/sdk-core/src/verifier.rs
Normal file
@@ -0,0 +1,309 @@
|
||||
use std::{
|
||||
pin::Pin,
|
||||
task::{Context as StdContext, Poll},
|
||||
};
|
||||
|
||||
use futures::FutureExt;
|
||||
use tlsn::{
|
||||
config::{VerifierConfig, VerifyConfig},
|
||||
transcript::TlsTranscript,
|
||||
verifier::{Verifier, VerifierOutput, state},
|
||||
};
|
||||
|
||||
use crate::{Error, IoProvider, instance::Context};
|
||||
|
||||
#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash)]
|
||||
pub struct VerifierId(pub usize);
|
||||
|
||||
pub struct VerifierInstance {
|
||||
state: State,
|
||||
wants_state_update: bool,
|
||||
}
|
||||
|
||||
impl VerifierInstance {
|
||||
pub fn new(config: Vec<u8>) -> Result<Self, Error> {
|
||||
let config: VerifierConfig = bincode::deserialize(&config).unwrap();
|
||||
|
||||
let verifier = Verifier::new(config);
|
||||
|
||||
Ok(Self {
|
||||
state: State::Init(verifier),
|
||||
wants_state_update: false,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn setup(&mut self, cx: &mut Context) -> Result<Poll<()>, Error> {
|
||||
match self.state.take() {
|
||||
State::Init(verifier) => {
|
||||
self.state = State::Preprocess(verifier);
|
||||
}
|
||||
State::Preprocess(verifier) => {
|
||||
self.state = State::Preprocess(verifier);
|
||||
}
|
||||
State::Preprocessing(fut) => {
|
||||
self.state = State::Preprocessing(fut);
|
||||
}
|
||||
State::Setup(verifier) => {
|
||||
self.state = State::Setup(verifier);
|
||||
return Ok(Poll::Ready(()));
|
||||
}
|
||||
state => todo!(),
|
||||
};
|
||||
|
||||
self.wants_state_update = true;
|
||||
cx.waker.set_wake();
|
||||
|
||||
Ok(Poll::Pending)
|
||||
}
|
||||
|
||||
pub fn commit(&mut self, cx: &mut Context) -> Result<Poll<TlsTranscript>, Error> {
|
||||
match self.state.take() {
|
||||
State::Setup(verifier) => {
|
||||
self.state = State::StartCommit(verifier);
|
||||
}
|
||||
State::StartCommit(verifier) => {
|
||||
self.state = State::StartCommit(verifier);
|
||||
}
|
||||
State::Online(fut) => {
|
||||
self.state = State::Online(fut);
|
||||
}
|
||||
State::FinishCommit(verifier) => {
|
||||
let tls_transcript = verifier.tls_transcript().clone();
|
||||
|
||||
self.state = State::Committed(verifier);
|
||||
|
||||
println!("verifier committed");
|
||||
|
||||
return Ok(Poll::Ready(tls_transcript));
|
||||
}
|
||||
state => todo!(),
|
||||
}
|
||||
|
||||
self.wants_state_update = true;
|
||||
cx.waker.set_wake();
|
||||
|
||||
Ok(Poll::Pending)
|
||||
}
|
||||
|
||||
pub fn verify(&mut self, config: VerifyConfig) -> Result<(), Error> {
|
||||
match self.state.take() {
|
||||
State::Committed(verifier) => {
|
||||
self.state = State::StartVerify(verifier, config);
|
||||
}
|
||||
State::StartVerify(verifier, config) => {
|
||||
self.state = State::StartVerify(verifier, config);
|
||||
}
|
||||
state => todo!(),
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn finish_verify(&mut self, cx: &mut Context) -> Result<Poll<VerifierOutput>, Error> {
|
||||
match self.state.take() {
|
||||
State::StartVerify(verifier, config) => {
|
||||
self.state = State::StartVerify(verifier, config);
|
||||
}
|
||||
State::Verifying(fut) => {
|
||||
self.state = State::Verifying(fut);
|
||||
}
|
||||
State::FinishVerify(verifier, output) => {
|
||||
self.state = State::Committed(verifier);
|
||||
|
||||
return Ok(Poll::Ready(output));
|
||||
}
|
||||
state => todo!(),
|
||||
}
|
||||
|
||||
self.wants_state_update = true;
|
||||
cx.waker.set_wake();
|
||||
|
||||
Ok(Poll::Pending)
|
||||
}
|
||||
|
||||
pub fn close(&mut self, cx: &mut Context) -> Result<Poll<()>, Error> {
|
||||
match self.state.take() {
|
||||
State::Committed(verifier) => {
|
||||
self.state = State::Close(verifier);
|
||||
}
|
||||
State::Close(verifier) => {
|
||||
self.state = State::Close(verifier);
|
||||
}
|
||||
State::Closing(fut) => {
|
||||
self.state = State::Closing(fut);
|
||||
}
|
||||
State::FinishClose => {
|
||||
self.state = State::Done;
|
||||
|
||||
println!("verifier closed");
|
||||
|
||||
return Ok(Poll::Ready(()));
|
||||
}
|
||||
state => todo!(),
|
||||
}
|
||||
|
||||
self.wants_state_update = true;
|
||||
cx.waker.set_wake();
|
||||
|
||||
Ok(Poll::Pending)
|
||||
}
|
||||
|
||||
pub fn poll(
|
||||
&mut self,
|
||||
cx_std: &mut StdContext<'_>,
|
||||
cx: &mut Context,
|
||||
io: &mut impl IoProvider,
|
||||
) -> Poll<Result<(), Error>> {
|
||||
match self.state.take() {
|
||||
State::Init(verifier) => {
|
||||
self.state = State::Init(verifier);
|
||||
}
|
||||
State::Preprocess(verifier) => {
|
||||
let io_fut = io.connect_peer();
|
||||
self.state = State::Preprocessing(Box::pin(async move {
|
||||
verifier
|
||||
.setup(io_fut.await.unwrap())
|
||||
.await
|
||||
.map_err(|_| todo!())
|
||||
}));
|
||||
|
||||
return self.poll(cx_std, cx, io);
|
||||
}
|
||||
State::Preprocessing(mut fut) => {
|
||||
if let Poll::Ready(res) = fut.poll_unpin(cx_std) {
|
||||
let verifier = res.unwrap();
|
||||
|
||||
println!("verifier setup");
|
||||
|
||||
self.state = State::Setup(verifier);
|
||||
if self.wants_state_update {
|
||||
self.wants_state_update = false;
|
||||
cx.waker.set_call();
|
||||
}
|
||||
} else {
|
||||
self.state = State::Preprocessing(fut);
|
||||
}
|
||||
}
|
||||
State::Setup(verifier) => {
|
||||
self.state = State::Setup(verifier);
|
||||
}
|
||||
State::StartCommit(verifier) => {
|
||||
self.state = State::Online(Box::pin(async move {
|
||||
verifier.run().await.map_err(|_| todo!())
|
||||
}));
|
||||
|
||||
println!("verifier start commit");
|
||||
|
||||
return self.poll(cx_std, cx, io);
|
||||
}
|
||||
State::Online(mut fut) => {
|
||||
if let Poll::Ready(res) = fut.poll_unpin(cx_std) {
|
||||
let verifier = res.unwrap();
|
||||
|
||||
self.state = State::FinishCommit(verifier);
|
||||
if self.wants_state_update {
|
||||
self.wants_state_update = false;
|
||||
cx.waker.set_call();
|
||||
}
|
||||
println!("verifier finish commit");
|
||||
} else {
|
||||
self.state = State::Online(fut);
|
||||
}
|
||||
}
|
||||
State::FinishCommit(verifier) => {
|
||||
self.state = State::FinishCommit(verifier);
|
||||
}
|
||||
State::Committed(verifier) => {
|
||||
self.state = State::Committed(verifier);
|
||||
}
|
||||
State::StartVerify(mut verifier, config) => {
|
||||
self.state = State::Verifying(Box::pin(async move {
|
||||
let output = verifier.verify(&config).await.map_err(|_| todo!())?;
|
||||
|
||||
Ok((verifier, output))
|
||||
}));
|
||||
|
||||
return self.poll(cx_std, cx, io);
|
||||
}
|
||||
State::Verifying(mut fut) => {
|
||||
if let Poll::Ready(res) = fut.poll_unpin(cx_std) {
|
||||
let (verifier, output) = res.unwrap();
|
||||
|
||||
self.state = State::FinishVerify(verifier, output);
|
||||
if self.wants_state_update {
|
||||
self.wants_state_update = false;
|
||||
cx.waker.set_call();
|
||||
}
|
||||
println!("verifier finish verify");
|
||||
} else {
|
||||
self.state = State::Verifying(fut);
|
||||
}
|
||||
}
|
||||
State::FinishVerify(verifier, output) => {
|
||||
self.state = State::FinishVerify(verifier, output);
|
||||
}
|
||||
State::Close(verifier) => {
|
||||
let fut = Box::pin(async move { verifier.close().await.map_err(|_| todo!()) });
|
||||
|
||||
self.state = State::Closing(fut);
|
||||
|
||||
println!("verifier start close");
|
||||
|
||||
return self.poll(cx_std, cx, io);
|
||||
}
|
||||
State::Closing(mut fut) => {
|
||||
if let Poll::Ready(res) = fut.poll_unpin(cx_std) {
|
||||
res?;
|
||||
|
||||
println!("verifier closed");
|
||||
|
||||
self.state = State::FinishClose;
|
||||
if self.wants_state_update {
|
||||
self.wants_state_update = false;
|
||||
cx.waker.set_call();
|
||||
}
|
||||
} else {
|
||||
self.state = State::Closing(fut);
|
||||
}
|
||||
}
|
||||
State::FinishClose => {
|
||||
self.state = State::FinishClose;
|
||||
}
|
||||
State::Done => {
|
||||
self.state = State::Done;
|
||||
|
||||
return Poll::Ready(Ok(()));
|
||||
}
|
||||
State::Error => todo!(),
|
||||
}
|
||||
|
||||
Poll::Pending
|
||||
}
|
||||
}
|
||||
|
||||
enum State {
|
||||
Init(Verifier<state::Initialized>),
|
||||
Preprocess(Verifier<state::Initialized>),
|
||||
Preprocessing(Pin<Box<dyn Future<Output = Result<Verifier<state::Setup>, Error>>>>),
|
||||
Setup(Verifier<state::Setup>),
|
||||
StartCommit(Verifier<state::Setup>),
|
||||
Online(Pin<Box<dyn Future<Output = Result<Verifier<state::Committed>, Error>>>>),
|
||||
FinishCommit(Verifier<state::Committed>),
|
||||
Committed(Verifier<state::Committed>),
|
||||
StartVerify(Verifier<state::Committed>, VerifyConfig),
|
||||
Verifying(
|
||||
Pin<Box<dyn Future<Output = Result<(Verifier<state::Committed>, VerifierOutput), Error>>>>,
|
||||
),
|
||||
FinishVerify(Verifier<state::Committed>, VerifierOutput),
|
||||
Close(Verifier<state::Committed>),
|
||||
Closing(Pin<Box<dyn Future<Output = Result<(), Error>>>>),
|
||||
FinishClose,
|
||||
Done,
|
||||
Error,
|
||||
}
|
||||
|
||||
impl State {
|
||||
fn take(&mut self) -> Self {
|
||||
std::mem::replace(self, Self::Error)
|
||||
}
|
||||
}
|
||||
24
crates/sdk-core/src/wasm.rs
Normal file
24
crates/sdk-core/src/wasm.rs
Normal file
@@ -0,0 +1,24 @@
|
||||
use std::{
|
||||
sync::{Arc, Mutex},
|
||||
task::Poll,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
Binary, Error,
|
||||
instance::{Instance, InstanceId},
|
||||
};
|
||||
|
||||
pub struct WasmId(pub usize);
|
||||
|
||||
pub trait WasmRuntime {
|
||||
fn load(&mut self, bin: &Binary) -> Result<WasmId, Error>;
|
||||
|
||||
fn instantiate(
|
||||
&mut self,
|
||||
id: WasmId,
|
||||
instance: Arc<Mutex<Instance>>,
|
||||
arg: &[u8],
|
||||
) -> Result<InstanceId, Error>;
|
||||
|
||||
fn poll(&mut self, id: InstanceId) -> Result<Poll<Result<Vec<u8>, String>>, Error>;
|
||||
}
|
||||
399
crates/sdk-core/src/wasmtime.rs
Normal file
399
crates/sdk-core/src/wasmtime.rs
Normal file
@@ -0,0 +1,399 @@
|
||||
use std::{
|
||||
sync::{Arc, Mutex},
|
||||
task::Poll,
|
||||
};
|
||||
|
||||
use wasmtime::{
|
||||
Engine, Store,
|
||||
component::{Component, HasSelf, Linker, Resource},
|
||||
};
|
||||
|
||||
use crate::{
|
||||
Error, WasmRuntime,
|
||||
instance::{self, Context, InstanceId},
|
||||
io::IoId,
|
||||
prover::ProverId,
|
||||
verifier::VerifierId,
|
||||
wasm::WasmId,
|
||||
};
|
||||
|
||||
mod generated {
|
||||
wasmtime::component::bindgen!({
|
||||
world: "plugin",
|
||||
path: "wit/tlsn.wit",
|
||||
trappable_imports: true,
|
||||
with: {
|
||||
"tlsn:tlsn/prove/prover": crate::prover::ProverId,
|
||||
"tlsn:tlsn/verify/verifier": crate::verifier::VerifierId,
|
||||
"tlsn:tlsn/io/io": crate::io::IoId,
|
||||
}
|
||||
});
|
||||
}
|
||||
use generated::{Plugin, tlsn::tlsn as abi};
|
||||
|
||||
struct InstanceState {
|
||||
inner: Arc<Mutex<instance::Instance>>,
|
||||
}
|
||||
|
||||
impl abi::io::Host for InstanceState {}
|
||||
impl abi::io::HostIo for InstanceState {
|
||||
fn check_write(
|
||||
&mut self,
|
||||
self_: Resource<IoId>,
|
||||
) -> Result<abi::io::CheckWriteReturn, wasmtime::Error> {
|
||||
let id = self_.rep();
|
||||
|
||||
let mut guard = self.inner.lock().unwrap();
|
||||
let instance = &mut (*guard);
|
||||
let io = instance.state.get_io_mut(IoId(id as usize)).unwrap();
|
||||
|
||||
match io.check_write(&mut instance.cx) {
|
||||
Poll::Pending => Ok(abi::io::CheckWriteReturn::Pending),
|
||||
Poll::Ready(Ok(n)) => Ok(abi::io::CheckWriteReturn::Ready(Ok(n as u32))),
|
||||
Poll::Ready(Err(e)) => Ok(abi::io::CheckWriteReturn::Ready(Err(
|
||||
abi::io::Error::Other(e.to_string()),
|
||||
))),
|
||||
}
|
||||
}
|
||||
|
||||
fn write(
|
||||
&mut self,
|
||||
self_: Resource<IoId>,
|
||||
buf: Vec<u8>,
|
||||
) -> Result<Result<(), abi::io::Error>, wasmtime::Error> {
|
||||
let id = self_.rep();
|
||||
|
||||
let mut guard = self.inner.lock().unwrap();
|
||||
let instance = &mut (*guard);
|
||||
let io = instance.state.get_io_mut(IoId(id as usize)).unwrap();
|
||||
|
||||
Ok(io.write(buf.as_slice()).map_err(|_| todo!()))
|
||||
}
|
||||
|
||||
fn close(&mut self, self_: Resource<IoId>) -> Result<abi::io::CloseReturn, wasmtime::Error> {
|
||||
let id = self_.rep();
|
||||
|
||||
let mut guard = self.inner.lock().unwrap();
|
||||
let instance = &mut (*guard);
|
||||
let io = instance.state.get_io_mut(IoId(id as usize)).unwrap();
|
||||
|
||||
match io.close(&mut instance.cx) {
|
||||
Poll::Pending => Ok(abi::io::CloseReturn::Pending),
|
||||
Poll::Ready(Ok(())) => Ok(abi::io::CloseReturn::Ready(Ok(()))),
|
||||
Poll::Ready(Err(e)) => Ok(abi::io::CloseReturn::Ready(Err(abi::io::Error::Other(
|
||||
e.to_string(),
|
||||
)))),
|
||||
}
|
||||
}
|
||||
|
||||
fn read(
|
||||
&mut self,
|
||||
self_: Resource<IoId>,
|
||||
len: u32,
|
||||
) -> Result<abi::io::ReadReturn, wasmtime::Error> {
|
||||
let id = self_.rep();
|
||||
|
||||
let mut guard = self.inner.lock().unwrap();
|
||||
let instance = &mut (*guard);
|
||||
let io = instance.state.get_io_mut(IoId(id as usize)).unwrap();
|
||||
|
||||
match io.read(len as usize, &mut instance.cx) {
|
||||
Poll::Pending => Ok(abi::io::ReadReturn::Pending),
|
||||
Poll::Ready(Ok(data)) => Ok(abi::io::ReadReturn::Ready(Ok(data))),
|
||||
Poll::Ready(Err(e)) => Ok(abi::io::ReadReturn::Ready(Err(abi::io::Error::Other(
|
||||
e.to_string(),
|
||||
)))),
|
||||
}
|
||||
}
|
||||
|
||||
fn drop(&mut self, rep: Resource<IoId>) -> wasmtime::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl abi::prove::Host for InstanceState {}
|
||||
impl abi::prove::HostProver for InstanceState {
|
||||
fn new(&mut self, config: Vec<u8>) -> wasmtime::Result<Resource<ProverId>> {
|
||||
let id = self.inner.lock().unwrap().state.new_prover(config).unwrap();
|
||||
|
||||
Ok(Resource::new_own(id.0 as u32))
|
||||
}
|
||||
|
||||
fn setup(&mut self, self_: Resource<ProverId>) -> wasmtime::Result<abi::prove::SetupReturn> {
|
||||
let id = ProverId(self_.rep() as usize);
|
||||
|
||||
let mut guard = self.inner.lock().unwrap();
|
||||
let instance = &mut (*guard);
|
||||
let prover = instance.state.get_prover_mut(id).unwrap();
|
||||
|
||||
if let Poll::Ready(()) = prover.setup(&mut instance.cx).unwrap() {
|
||||
Ok(abi::prove::SetupReturn::Ready)
|
||||
} else {
|
||||
Ok(abi::prove::SetupReturn::Pending)
|
||||
}
|
||||
}
|
||||
|
||||
fn connect(
|
||||
&mut self,
|
||||
self_: Resource<ProverId>,
|
||||
) -> wasmtime::Result<abi::prove::ConnectReturn> {
|
||||
let id = ProverId(self_.rep() as usize);
|
||||
|
||||
let mut guard = self.inner.lock().unwrap();
|
||||
let instance = &mut (*guard);
|
||||
let prover = instance.state.get_prover_mut(id).unwrap();
|
||||
|
||||
if let Poll::Ready(()) = prover.connect(&mut instance.cx).unwrap() {
|
||||
Ok(abi::prove::ConnectReturn::Ready(Resource::new_own(
|
||||
id.0 as u32,
|
||||
)))
|
||||
} else {
|
||||
Ok(abi::prove::ConnectReturn::Pending)
|
||||
}
|
||||
}
|
||||
|
||||
fn finish_commit(
|
||||
&mut self,
|
||||
self_: Resource<ProverId>,
|
||||
) -> wasmtime::Result<abi::prove::CommitReturn> {
|
||||
let id = ProverId(self_.rep() as usize);
|
||||
|
||||
let mut guard = self.inner.lock().unwrap();
|
||||
let instance = &mut (*guard);
|
||||
let prover = instance.state.get_prover_mut(id).unwrap();
|
||||
|
||||
if let Poll::Ready((tls_transcript, transcript)) =
|
||||
prover.finish_commit(&mut instance.cx).unwrap()
|
||||
{
|
||||
Ok(abi::prove::CommitReturn::Ready(Ok(bincode::serialize(&(
|
||||
tls_transcript,
|
||||
transcript,
|
||||
))
|
||||
.unwrap())))
|
||||
} else {
|
||||
Ok(abi::prove::CommitReturn::Pending)
|
||||
}
|
||||
}
|
||||
|
||||
fn prove(&mut self, self_: Resource<ProverId>, config: Vec<u8>) -> wasmtime::Result<()> {
|
||||
let config = bincode::deserialize(&config).unwrap();
|
||||
|
||||
let id = ProverId(self_.rep() as usize);
|
||||
|
||||
let mut guard = self.inner.lock().unwrap();
|
||||
let instance = &mut (*guard);
|
||||
let prover = instance.state.get_prover_mut(id).unwrap();
|
||||
|
||||
prover.prove(config).unwrap();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn finish_prove(
|
||||
&mut self,
|
||||
self_: Resource<ProverId>,
|
||||
) -> wasmtime::Result<abi::prove::ProveReturn> {
|
||||
let id = ProverId(self_.rep() as usize);
|
||||
|
||||
let mut guard = self.inner.lock().unwrap();
|
||||
let instance = &mut (*guard);
|
||||
let prover = instance.state.get_prover_mut(id).unwrap();
|
||||
|
||||
if let Poll::Ready(output) = prover.finish_prove(&mut instance.cx).unwrap() {
|
||||
Ok(abi::prove::ProveReturn::Ready(Ok(bincode::serialize(
|
||||
&output,
|
||||
)
|
||||
.unwrap())))
|
||||
} else {
|
||||
Ok(abi::prove::ProveReturn::Pending)
|
||||
}
|
||||
}
|
||||
|
||||
fn close(&mut self, self_: Resource<ProverId>) -> wasmtime::Result<abi::prove::CloseReturn> {
|
||||
let id = ProverId(self_.rep() as usize);
|
||||
|
||||
let mut guard = self.inner.lock().unwrap();
|
||||
let instance = &mut (*guard);
|
||||
let prover = instance.state.get_prover_mut(id).unwrap();
|
||||
|
||||
if let Poll::Ready(()) = prover.close(&mut instance.cx).unwrap() {
|
||||
Ok(abi::prove::CloseReturn::Ready)
|
||||
} else {
|
||||
Ok(abi::prove::CloseReturn::Pending)
|
||||
}
|
||||
}
|
||||
|
||||
fn drop(&mut self, rep: Resource<ProverId>) -> wasmtime::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl abi::verify::Host for InstanceState {}
|
||||
impl abi::verify::HostVerifier for InstanceState {
|
||||
fn new(&mut self, config: Vec<u8>) -> wasmtime::Result<Resource<VerifierId>> {
|
||||
let id = self
|
||||
.inner
|
||||
.lock()
|
||||
.unwrap()
|
||||
.state
|
||||
.new_verifier(config)
|
||||
.unwrap();
|
||||
|
||||
Ok(Resource::new_own(id.0 as u32))
|
||||
}
|
||||
|
||||
fn setup(&mut self, self_: Resource<VerifierId>) -> wasmtime::Result<abi::verify::SetupReturn> {
|
||||
let id = VerifierId(self_.rep() as usize);
|
||||
|
||||
let mut guard = self.inner.lock().unwrap();
|
||||
let instance = &mut (*guard);
|
||||
let verifier = instance.state.get_verifier_mut(VerifierId(id.0)).unwrap();
|
||||
|
||||
if let Poll::Ready(()) = verifier.setup(&mut instance.cx).unwrap() {
|
||||
Ok(abi::verify::SetupReturn::Ready)
|
||||
} else {
|
||||
Ok(abi::verify::SetupReturn::Pending)
|
||||
}
|
||||
}
|
||||
|
||||
fn commit(
|
||||
&mut self,
|
||||
self_: Resource<VerifierId>,
|
||||
) -> wasmtime::Result<abi::verify::CommitReturn> {
|
||||
let id = VerifierId(self_.rep() as usize);
|
||||
|
||||
let mut guard = self.inner.lock().unwrap();
|
||||
let instance = &mut (*guard);
|
||||
let verifier = instance.state.get_verifier_mut(VerifierId(id.0)).unwrap();
|
||||
|
||||
if let Poll::Ready(tls_transcript) = verifier.commit(&mut instance.cx).unwrap() {
|
||||
Ok(abi::verify::CommitReturn::Ready(Ok(bincode::serialize(
|
||||
&tls_transcript,
|
||||
)
|
||||
.unwrap())))
|
||||
} else {
|
||||
Ok(abi::verify::CommitReturn::Pending)
|
||||
}
|
||||
}
|
||||
|
||||
fn verify(&mut self, self_: Resource<VerifierId>, config: Vec<u8>) -> wasmtime::Result<()> {
|
||||
let config = bincode::deserialize(&config).unwrap();
|
||||
|
||||
let id = VerifierId(self_.rep() as usize);
|
||||
|
||||
let mut guard = self.inner.lock().unwrap();
|
||||
let instance = &mut (*guard);
|
||||
let verifier = instance.state.get_verifier_mut(VerifierId(id.0)).unwrap();
|
||||
|
||||
verifier.verify(config).unwrap();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn finish_verify(
|
||||
&mut self,
|
||||
self_: Resource<VerifierId>,
|
||||
) -> wasmtime::Result<abi::verify::VerifyReturn> {
|
||||
let id = VerifierId(self_.rep() as usize);
|
||||
|
||||
let mut guard = self.inner.lock().unwrap();
|
||||
let instance = &mut (*guard);
|
||||
let verifier = instance.state.get_verifier_mut(VerifierId(id.0)).unwrap();
|
||||
|
||||
if let Poll::Ready(output) = verifier.finish_verify(&mut instance.cx).unwrap() {
|
||||
Ok(abi::verify::VerifyReturn::Ready(Ok(bincode::serialize(
|
||||
&output,
|
||||
)
|
||||
.unwrap())))
|
||||
} else {
|
||||
Ok(abi::verify::VerifyReturn::Pending)
|
||||
}
|
||||
}
|
||||
|
||||
fn close(&mut self, self_: Resource<VerifierId>) -> wasmtime::Result<abi::verify::CloseReturn> {
|
||||
let id = VerifierId(self_.rep() as usize);
|
||||
|
||||
let mut guard = self.inner.lock().unwrap();
|
||||
let instance = &mut (*guard);
|
||||
let verifier = instance.state.get_verifier_mut(VerifierId(id.0)).unwrap();
|
||||
|
||||
if let Poll::Ready(()) = verifier.close(&mut instance.cx).unwrap() {
|
||||
Ok(abi::verify::CloseReturn::Ready)
|
||||
} else {
|
||||
Ok(abi::verify::CloseReturn::Pending)
|
||||
}
|
||||
}
|
||||
|
||||
fn drop(&mut self, rep: Resource<VerifierId>) -> wasmtime::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
use crate::Binary;
|
||||
|
||||
pub struct Wasmtime {
|
||||
engine: Engine,
|
||||
components: Vec<Component>,
|
||||
instances: Vec<(Plugin, Store<InstanceState>)>,
|
||||
}
|
||||
|
||||
impl Wasmtime {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
engine: Engine::default(),
|
||||
components: vec![],
|
||||
instances: vec![],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl WasmRuntime for Wasmtime {
|
||||
fn load(&mut self, bin: &Binary) -> Result<WasmId, Error> {
|
||||
let id = self.components.len();
|
||||
|
||||
let component = Component::from_binary(&self.engine, &bin.0).unwrap();
|
||||
|
||||
self.components.push(component);
|
||||
|
||||
Ok(WasmId(id))
|
||||
}
|
||||
|
||||
fn instantiate(
|
||||
&mut self,
|
||||
id: WasmId,
|
||||
instance: Arc<Mutex<instance::Instance>>,
|
||||
arg: &[u8],
|
||||
) -> Result<InstanceId, Error> {
|
||||
let state = InstanceState { inner: instance };
|
||||
let mut store = Store::new(&self.engine, state);
|
||||
let mut linker = Linker::new(&self.engine);
|
||||
|
||||
generated::tlsn::tlsn::io::add_to_linker::<_, HasSelf<_>>(&mut linker, |state| state)
|
||||
.unwrap();
|
||||
generated::tlsn::tlsn::prove::add_to_linker::<_, HasSelf<_>>(&mut linker, |state| state)
|
||||
.unwrap();
|
||||
generated::tlsn::tlsn::verify::add_to_linker::<_, HasSelf<_>>(&mut linker, |state| state)
|
||||
.unwrap();
|
||||
|
||||
let component = self.components.get(id.0).unwrap();
|
||||
let instance = Plugin::instantiate(&mut store, component, &linker).unwrap();
|
||||
|
||||
instance.call_start(&mut store, arg).unwrap();
|
||||
|
||||
let id = self.instances.len();
|
||||
self.instances.push((instance, store));
|
||||
|
||||
Ok(InstanceId(id))
|
||||
}
|
||||
|
||||
fn poll(&mut self, id: InstanceId) -> Result<Poll<Result<Vec<u8>, String>>, Error> {
|
||||
let (instance, store) = self.instances.get_mut(id.0).unwrap();
|
||||
|
||||
let res = match instance.call_poll(store).unwrap() {
|
||||
generated::PollReturn::Pending => Poll::Pending,
|
||||
generated::PollReturn::Ready(ret) => Poll::Ready(ret),
|
||||
};
|
||||
|
||||
Ok(res)
|
||||
}
|
||||
}
|
||||
84
crates/sdk-core/tests/test.rs
Normal file
84
crates/sdk-core/tests/test.rs
Normal file
@@ -0,0 +1,84 @@
|
||||
use std::{fmt::Display, pin::Pin};
|
||||
|
||||
use futures_plex::{DuplexStream, duplex};
|
||||
use tlsn_sdk_core::{Binary, IoProvider, Manifest, Plugin, Runtime, Wasmtime};
|
||||
use tlsn_server_fixture::bind;
|
||||
|
||||
#[test]
|
||||
fn test_plugin() {
|
||||
futures::executor::block_on(async {
|
||||
let plugin = include_bytes!("../../sdk-plugin-test/sdk_plugin_test-component.wasm");
|
||||
|
||||
let plugin = Plugin {
|
||||
manifest: Manifest {},
|
||||
binary: Binary(plugin.to_vec()),
|
||||
};
|
||||
|
||||
let (server_io_0, server_io_1) = duplex(1024);
|
||||
let (io_0, io_1) = duplex(1024);
|
||||
|
||||
let mut rt_p = Runtime::new(
|
||||
Wasmtime::new(),
|
||||
DummyIo {
|
||||
server_io: Some(server_io_0),
|
||||
io: Some(io_0),
|
||||
},
|
||||
);
|
||||
|
||||
let mut rt_v = Runtime::new(
|
||||
Wasmtime::new(),
|
||||
DummyIo {
|
||||
server_io: None,
|
||||
io: Some(io_1),
|
||||
},
|
||||
);
|
||||
|
||||
let server_fut = bind(server_io_1);
|
||||
futures::join!(
|
||||
async {
|
||||
let output = rt_p.run_plugin(&plugin, &[0]).await;
|
||||
},
|
||||
async {
|
||||
let output = rt_v.run_plugin(&plugin, &[1]).await;
|
||||
},
|
||||
async {
|
||||
server_fut.await.unwrap();
|
||||
},
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
pub struct DummyIo {
|
||||
server_io: Option<DuplexStream>,
|
||||
io: Option<DuplexStream>,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct Error;
|
||||
|
||||
impl Display for Error {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "Error")
|
||||
}
|
||||
}
|
||||
impl std::error::Error for Error {}
|
||||
|
||||
impl IoProvider for DummyIo {
|
||||
type Io = DuplexStream;
|
||||
type Error = Error;
|
||||
|
||||
fn connect_server(
|
||||
&mut self,
|
||||
_name: &tlsn::connection::ServerName,
|
||||
) -> Pin<Box<dyn Future<Output = Result<Self::Io, Self::Error>> + Send>> {
|
||||
let io = self.server_io.take().unwrap();
|
||||
Box::pin(async move { Ok(io) })
|
||||
}
|
||||
|
||||
fn connect_peer(
|
||||
&mut self,
|
||||
) -> Pin<Box<dyn Future<Output = Result<Self::Io, Self::Error>> + Send>> {
|
||||
let io = self.io.take().unwrap();
|
||||
Box::pin(async move { Ok(io) })
|
||||
}
|
||||
}
|
||||
115
crates/sdk-core/wit/tlsn.wit
Normal file
115
crates/sdk-core/wit/tlsn.wit
Normal file
@@ -0,0 +1,115 @@
|
||||
package tlsn:tlsn;
|
||||
|
||||
world plugin {
|
||||
import prove;
|
||||
import verify;
|
||||
|
||||
variant poll-return {
|
||||
pending,
|
||||
ready(result<list<u8>, string>)
|
||||
}
|
||||
|
||||
export start: func(arg: list<u8>);
|
||||
export poll: func() -> poll-return;
|
||||
}
|
||||
|
||||
interface prove {
|
||||
use io.{io};
|
||||
|
||||
variant setup-return {
|
||||
pending,
|
||||
ready
|
||||
}
|
||||
|
||||
variant connect-return {
|
||||
pending,
|
||||
ready(io)
|
||||
}
|
||||
|
||||
variant commit-return {
|
||||
pending,
|
||||
ready(result<list<u8>, string>)
|
||||
}
|
||||
|
||||
variant prove-return {
|
||||
pending,
|
||||
ready(result<list<u8>, string>)
|
||||
}
|
||||
|
||||
variant close-return {
|
||||
pending,
|
||||
ready,
|
||||
}
|
||||
|
||||
resource prover {
|
||||
constructor(config: list<u8>);
|
||||
|
||||
setup: func() -> setup-return;
|
||||
connect: func() -> connect-return;
|
||||
finish-commit: func() -> commit-return;
|
||||
prove: func(config: list<u8>);
|
||||
finish-prove: func() -> prove-return;
|
||||
close: func() -> close-return;
|
||||
}
|
||||
}
|
||||
|
||||
interface verify {
|
||||
variant setup-return {
|
||||
pending,
|
||||
ready
|
||||
}
|
||||
|
||||
variant commit-return {
|
||||
pending,
|
||||
ready(result<list<u8>, string>)
|
||||
}
|
||||
|
||||
variant verify-return {
|
||||
pending,
|
||||
ready(result<list<u8>, string>)
|
||||
}
|
||||
|
||||
variant close-return {
|
||||
pending,
|
||||
ready
|
||||
}
|
||||
|
||||
resource verifier {
|
||||
constructor(config: list<u8>);
|
||||
|
||||
setup: func() -> setup-return;
|
||||
commit: func() -> commit-return;
|
||||
verify: func(config: list<u8>);
|
||||
finish-verify: func() -> verify-return;
|
||||
close: func() -> close-return;
|
||||
}
|
||||
}
|
||||
|
||||
interface io {
|
||||
variant error {
|
||||
closed,
|
||||
other(string),
|
||||
}
|
||||
|
||||
variant check-write-return {
|
||||
pending,
|
||||
ready(result<u32, error>)
|
||||
}
|
||||
|
||||
variant close-return {
|
||||
pending,
|
||||
ready(result<_, error>)
|
||||
}
|
||||
|
||||
variant read-return {
|
||||
pending,
|
||||
ready(result<list<u8>, error>)
|
||||
}
|
||||
|
||||
resource io {
|
||||
check-write: func() -> check-write-return;
|
||||
write: func(buf: list<u8>) -> result<_, error>;
|
||||
close: func() -> close-return;
|
||||
read: func(len: u32) -> read-return;
|
||||
}
|
||||
}
|
||||
5
crates/sdk-plugin-test/.cargo/config.toml
Normal file
5
crates/sdk-plugin-test/.cargo/config.toml
Normal file
@@ -0,0 +1,5 @@
|
||||
[build]
|
||||
#target = "wasm32-unknown-unknown"
|
||||
|
||||
[target.wasm32-unknown-unknown]
|
||||
rustflags = ["--cfg", 'getrandom_backend="custom"']
|
||||
15
crates/sdk-plugin-test/Cargo.toml
Normal file
15
crates/sdk-plugin-test/Cargo.toml
Normal file
@@ -0,0 +1,15 @@
|
||||
[package]
|
||||
name = "sdk-plugin-test"
|
||||
version = "0.1.0"
|
||||
edition = "2024"
|
||||
|
||||
[lib]
|
||||
crate-type = ["cdylib"]
|
||||
|
||||
[dependencies]
|
||||
tlsn-pdk = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
tlsn-server-fixture-certs = { workspace = true }
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
106
crates/sdk-plugin-test/src/lib.rs
Normal file
106
crates/sdk-plugin-test/src/lib.rs
Normal file
@@ -0,0 +1,106 @@
|
||||
use futures::{AsyncReadExt, AsyncWriteExt};
|
||||
|
||||
use tlsn_pdk::{
|
||||
config::{
|
||||
ProtocolConfig, ProtocolConfigValidator, ProverConfig, TlsConfig, VerifierConfig,
|
||||
VerifyConfig,
|
||||
},
|
||||
connection::ServerName,
|
||||
entry,
|
||||
prover::{ProveConfig, Prover},
|
||||
verifier::Verifier,
|
||||
webpki::{CertificateDer, RootCertStore},
|
||||
};
|
||||
use tlsn_server_fixture_certs::CA_CERT_DER;
|
||||
|
||||
async fn main(arg: Vec<u8>) -> Result<Vec<u8>, String> {
|
||||
if arg[0] == 0 {
|
||||
prover().await
|
||||
} else {
|
||||
verifier().await
|
||||
}
|
||||
}
|
||||
|
||||
entry!(main);
|
||||
|
||||
async fn prover() -> Result<Vec<u8>, String> {
|
||||
let name = ServerName::Dns("test-server.io".try_into().unwrap());
|
||||
|
||||
let mut builder = TlsConfig::builder();
|
||||
builder.root_store(RootCertStore {
|
||||
roots: vec![CertificateDer(CA_CERT_DER.to_vec())],
|
||||
});
|
||||
let tls_config = builder.build().unwrap();
|
||||
|
||||
let config = ProverConfig::builder()
|
||||
.server_name(name)
|
||||
.tls_config(tls_config)
|
||||
.protocol_config(
|
||||
ProtocolConfig::builder()
|
||||
.max_sent_data(1024)
|
||||
.max_recv_data(1024)
|
||||
.build()
|
||||
.unwrap(),
|
||||
)
|
||||
.build()
|
||||
.unwrap();
|
||||
|
||||
let mut prover = Prover::new(config).setup().await.unwrap();
|
||||
|
||||
let (mut conn, prover_fut) = prover.connect().await.unwrap();
|
||||
|
||||
let (response, prover) = futures::join!(
|
||||
async {
|
||||
conn.write_all(b"GET / HTTP/1.1\r\nConnection: close\r\n\r\n")
|
||||
.await
|
||||
.unwrap();
|
||||
conn.close().await.unwrap();
|
||||
|
||||
let mut response = vec![0u8; 1024];
|
||||
conn.read_to_end(&mut response).await.unwrap();
|
||||
|
||||
response
|
||||
},
|
||||
prover_fut,
|
||||
);
|
||||
|
||||
let mut prover = prover.unwrap();
|
||||
|
||||
let mut builder = ProveConfig::builder(prover.transcript());
|
||||
|
||||
let output = prover.prove(&builder.build().unwrap()).await.unwrap();
|
||||
|
||||
prover.close().await.unwrap();
|
||||
|
||||
Ok(response)
|
||||
}
|
||||
|
||||
async fn verifier() -> Result<Vec<u8>, String> {
|
||||
let config = VerifierConfig::builder()
|
||||
.root_store(RootCertStore {
|
||||
roots: vec![CertificateDer(CA_CERT_DER.to_vec())],
|
||||
})
|
||||
.protocol_config_validator(
|
||||
ProtocolConfigValidator::builder()
|
||||
.max_sent_data(1024)
|
||||
.max_recv_data(4096)
|
||||
.build()
|
||||
.unwrap(),
|
||||
)
|
||||
.build()
|
||||
.unwrap();
|
||||
|
||||
let mut verifier = Verifier::new(config)
|
||||
.setup()
|
||||
.await
|
||||
.unwrap()
|
||||
.run()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let output = verifier.verify(&VerifyConfig::default()).await.unwrap();
|
||||
|
||||
verifier.close().await.unwrap();
|
||||
|
||||
Ok(vec![])
|
||||
}
|
||||
@@ -6,7 +6,7 @@ pub static SERVER_CERT_DER: &[u8] = include_bytes!("tls/test_server_cert.der");
|
||||
pub static SERVER_KEY_DER: &[u8] = include_bytes!("tls/test_server_private_key.der");
|
||||
/// The domain name bound to the server certificate.
|
||||
pub static SERVER_DOMAIN: &str = "test-server.io";
|
||||
/// A client certificate fixture.
|
||||
pub static CLIENT_CERT_DER: &[u8] = include_bytes!("tls/client_cert.der");
|
||||
/// A client private key fixture.
|
||||
pub static CLIENT_KEY_DER: &[u8] = include_bytes!("tls/client_cert_private_key.der");
|
||||
/// A client certificate fixture PEM-encoded.
|
||||
pub static CLIENT_CERT: &[u8] = include_bytes!("tls/client_cert.pem");
|
||||
/// A client private key fixture PEM-encoded.
|
||||
pub static CLIENT_KEY: &[u8] = include_bytes!("tls/client_cert.key");
|
||||
|
||||
@@ -33,8 +33,5 @@ openssl req -new -key client_cert.key -out client_cert.csr -subj "/C=US/ST=State
|
||||
# Sign the CSR with the root CA to create the end entity certificate (100 years validity)
|
||||
openssl x509 -req -in client_cert.csr -CA root_ca.crt -CAkey root_ca.key -CAcreateserial -out client_cert.crt -days 36525 -sha256 -extfile openssl.cnf -extensions v3_req
|
||||
|
||||
# Convert the end entity certificate to DER format
|
||||
openssl x509 -in client_cert.crt -outform der -out client_cert.der
|
||||
|
||||
# Convert the end entity certificate private key to DER format
|
||||
openssl pkcs8 -topk8 -inform PEM -outform DER -in client_cert.key -out client_cert_private_key.der -nocrypt
|
||||
# Convert the end entity certificate to PEM format
|
||||
openssl x509 -in client_cert.crt -outform pem -out client_cert.pem
|
||||
|
||||
Binary file not shown.
23
crates/server-fixture/certs/src/tls/client_cert.pem
Normal file
23
crates/server-fixture/certs/src/tls/client_cert.pem
Normal file
@@ -0,0 +1,23 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIID2jCCAsKgAwIBAgIUG5JKIz/fbUDdpX1+TAw33mS+mWwwDQYJKoZIhvcNAQEL
|
||||
BQAwZTELMAkGA1UEBhMCVVMxDjAMBgNVBAgMBVN0YXRlMQ0wCwYDVQQHDARDaXR5
|
||||
MRIwEAYDVQQKDAl0bHNub3RhcnkxCzAJBgNVBAsMAklUMRYwFAYDVQQDDA10bHNu
|
||||
b3Rhcnkub3JnMCAXDTI1MDYxMDA3MTYxOVoYDzIxMjUwNjExMDcxNjE5WjBwMQsw
|
||||
CQYDVQQGEwJVUzEOMAwGA1UECAwFU3RhdGUxDTALBgNVBAcMBENpdHkxEjAQBgNV
|
||||
BAoMCXRsc25vdGFyeTELMAkGA1UECwwCSVQxITAfBgNVBAMMGGNsaWVudC1hdXRo
|
||||
ZW50aWNhdGlvbi5pbzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANsx
|
||||
Tf3JqWdAMGFzOwbO64vJ5fV/IPSrdBwKY/Fjef0REZC1Z/gGzmp0nnlaHZzZLtLS
|
||||
Z9kyfdUrL6PuG3HfP6wxhiaBpUay+1O9KZsuhkKSif4KMPjlYKm+oZLvD12Qj62r
|
||||
TFlui4+1wKgPrTGUUO6SQdoRxKU4nzuzRYRLyzDi0pO5YD9RLaruBj+IDEOVRW7d
|
||||
1uleheVMg61lbQle5Fo0c4I0Sif96Z+7aotj3j9F2lK52jaLpA1kvC3oLajfAT30
|
||||
BzpNLZTnWa1b5PRRxkuOYUXeNr+aNO90fL80K1YeIlea0f7qmKL9uDLtQbrqIJv5
|
||||
tBaf8Uf0UghtBm//kx8CAwEAAaN1MHMwCQYDVR0TBAIwADALBgNVHQ8EBAMCBeAw
|
||||
GQYDVR0RBBIwEIIOdGVzdC1zZXJ2ZXIuaW8wHQYDVR0OBBYEFH1qCgl04Y5i75aF
|
||||
cT0V3fn9423iMB8GA1UdIwQYMBaAFMmBciQ/DZlWROxwXH8IplmuHKbNMA0GCSqG
|
||||
SIb3DQEBCwUAA4IBAQB8Gvj3dsENAn0u6PS9uTFm46MaA9Dm+Fa+KbXuEHp3ADs2
|
||||
7m4Hb3eojM3yae93/v/stYn8IVcB5zWmMvg6WA6obe86muuB+SZeMC/AnSD8P4pm
|
||||
AzO3eTSR1s5Dr4O0qVPd2VP36e7NWXfojQg4W9t9UQtC64bVOaCDQvbe0xeWT+AR
|
||||
w0y7GwnuCr/8bisqQZS8+Er1JU3zxBEjQwMiMxlOWHnYtjGeA6pdWaeLp0E6Ss3x
|
||||
ecsTjmrLt6oY+BdfRSyWU4qVEOpuZLCeikUWXFzpxRX7NWYRtJUfVnoRWwuD2lzG
|
||||
LybzCW2qxwHJe4biGIfWKQ7Ne7DrwQwFxVRJxCm0
|
||||
-----END CERTIFICATE-----
|
||||
Binary file not shown.
@@ -1,37 +0,0 @@
|
||||
{
|
||||
"tax_year": 2024,
|
||||
"taxpayer": {
|
||||
"idnr": "12345678901",
|
||||
"first_name": "Max",
|
||||
"last_name": "Mustermann",
|
||||
"date_of_birth": "1985-03-12",
|
||||
"address": {
|
||||
"street": "Musterstraße 1",
|
||||
"postal_code": "10115",
|
||||
"city": "Berlin"
|
||||
}
|
||||
},
|
||||
"income": {
|
||||
"employment_income": 54200.00,
|
||||
"other_income": 1200.00,
|
||||
"capital_gains": 350.00
|
||||
},
|
||||
"deductions": {
|
||||
"pension_insurance": 4200.00,
|
||||
"health_insurance": 3600.00,
|
||||
"donations": 500.00,
|
||||
"work_related_expenses": 1100.00
|
||||
},
|
||||
"assessment": {
|
||||
"taxable_income": 49200.00,
|
||||
"income_tax": 9156.00,
|
||||
"solidarity_surcharge": 503.58,
|
||||
"total_tax": 9659.58,
|
||||
"prepaid_tax": 9500.00,
|
||||
"refund": 159.58
|
||||
},
|
||||
"submission": {
|
||||
"submitted_at": "2025-03-01T14:22:30Z",
|
||||
"submitted_by": "ElsterOnline-Portal"
|
||||
}
|
||||
}
|
||||
@@ -47,7 +47,6 @@ fn app(state: AppState) -> Router {
|
||||
.route("/formats/json", get(json))
|
||||
.route("/formats/html", get(html))
|
||||
.route("/protected", get(protected_route))
|
||||
.route("/elster", get(elster_route))
|
||||
.layer(TraceLayer::new_for_http())
|
||||
.with_state(Arc::new(Mutex::new(state)))
|
||||
}
|
||||
@@ -197,12 +196,6 @@ async fn protected_route(_: AuthenticatedUser) -> Result<Json<Value>, StatusCode
|
||||
get_json_value(include_str!("data/protected_data.json"))
|
||||
}
|
||||
|
||||
async fn elster_route(_: AuthenticatedUser) -> Result<Json<Value>, StatusCode> {
|
||||
info!("Handling /elster");
|
||||
|
||||
get_json_value(include_str!("data/elster.json"))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
@@ -5,7 +5,7 @@ description = "A TLS backend trait for TLSNotary"
|
||||
keywords = ["tls", "mpc", "2pc"]
|
||||
categories = ["cryptography"]
|
||||
license = "MIT OR Apache-2.0"
|
||||
version = "0.1.0-alpha.13"
|
||||
version = "0.1.0-alpha.13-pre"
|
||||
edition = "2021"
|
||||
|
||||
[lints]
|
||||
|
||||
@@ -5,7 +5,7 @@ description = "An async TLS client for TLSNotary"
|
||||
keywords = ["tls", "mpc", "2pc", "client", "async"]
|
||||
categories = ["cryptography"]
|
||||
license = "MIT OR Apache-2.0"
|
||||
version = "0.1.0-alpha.13"
|
||||
version = "0.1.0-alpha.13-pre"
|
||||
edition = "2021"
|
||||
|
||||
[lints]
|
||||
|
||||
@@ -5,7 +5,7 @@ description = "A TLS client for TLSNotary"
|
||||
keywords = ["tls", "mpc", "2pc", "client", "sync"]
|
||||
categories = ["cryptography"]
|
||||
license = "Apache-2.0 OR ISC OR MIT"
|
||||
version = "0.1.0-alpha.13"
|
||||
version = "0.1.0-alpha.13-pre"
|
||||
edition = "2021"
|
||||
autobenches = false
|
||||
|
||||
|
||||
@@ -886,7 +886,6 @@ async fn client_error_is_sticky() {
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[allow(clippy::no_effect)]
|
||||
#[allow(clippy::unnecessary_operation)]
|
||||
async fn client_is_send() {
|
||||
let (client, _) = make_pair(KeyType::Rsa).await;
|
||||
|
||||
@@ -5,7 +5,7 @@ description = "Cryptographic operations for the TLSNotary TLS client"
|
||||
keywords = ["tls", "mpc", "2pc"]
|
||||
categories = ["cryptography"]
|
||||
license = "Apache-2.0 OR ISC OR MIT"
|
||||
version = "0.1.0-alpha.13"
|
||||
version = "0.1.0-alpha.13-pre"
|
||||
edition = "2021"
|
||||
|
||||
[lints]
|
||||
@@ -22,6 +22,7 @@ handshake = []
|
||||
ghash = []
|
||||
logging = ["tracing"]
|
||||
prf = ["dep:hmac", "dep:sha2"]
|
||||
web = ["dep:web-time"]
|
||||
|
||||
[dependencies]
|
||||
futures = { workspace = true }
|
||||
@@ -34,6 +35,6 @@ serde = { workspace = true, optional = true, features = ["derive"] }
|
||||
sha2 = { workspace = true, optional = true }
|
||||
thiserror = { workspace = true }
|
||||
tracing = { workspace = true, optional = true }
|
||||
web-time = { workspace = true }
|
||||
web-time = { workspace = true, optional = true }
|
||||
rustls-webpki = { workspace = true, features = ["ring"] }
|
||||
rustls-pki-types = { workspace = true }
|
||||
|
||||
@@ -1,5 +1,9 @@
|
||||
use crate::msgs::enums::{AlertDescription, ContentType, HandshakeType};
|
||||
use std::{error::Error as StdError, fmt};
|
||||
|
||||
#[cfg(not(feature = "web"))]
|
||||
use std::time::SystemTimeError;
|
||||
#[cfg(feature = "web")]
|
||||
use web_time::SystemTimeError;
|
||||
|
||||
/// rustls reports protocol errors using this type.
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
#[cfg(not(feature = "web"))]
|
||||
use std::time::SystemTime;
|
||||
#[cfg(feature = "web")]
|
||||
use web_time::SystemTime;
|
||||
|
||||
use crate::{
|
||||
|
||||
@@ -10,6 +10,10 @@ use crate::{
|
||||
};
|
||||
use ring::digest::Digest;
|
||||
use rustls_pki_types as pki_types;
|
||||
|
||||
#[cfg(not(feature = "web"))]
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
#[cfg(feature = "web")]
|
||||
use web_time::{SystemTime, UNIX_EPOCH};
|
||||
|
||||
type SignatureAlgorithms = &'static [&'static dyn pki_types::SignatureVerificationAlgorithm];
|
||||
@@ -415,8 +419,7 @@ pub(crate) fn pki_error(error: webpki::Error) -> Error {
|
||||
match error {
|
||||
BadDer | BadDerTime => Error::InvalidCertificateEncoding,
|
||||
InvalidSignatureForPublicKey => Error::InvalidCertificateSignature,
|
||||
UnsupportedSignatureAlgorithmContext(_)
|
||||
| UnsupportedSignatureAlgorithmForPublicKeyContext(_) => {
|
||||
UnsupportedSignatureAlgorithm | UnsupportedSignatureAlgorithmForPublicKey => {
|
||||
Error::InvalidCertificateSignatureType
|
||||
}
|
||||
e => Error::InvalidCertificateData(format!("invalid peer certificate: {e}")),
|
||||
@@ -465,90 +468,23 @@ fn convert_scheme(scheme: SignatureScheme) -> Result<SignatureAlgorithms, Error>
|
||||
}
|
||||
}
|
||||
|
||||
/// Signature algorithm.
|
||||
#[derive(Debug, Clone, Copy, PartialEq)]
|
||||
#[allow(non_camel_case_types)]
|
||||
pub enum SignatureAlgorithm {
|
||||
ECDSA_NISTP256_SHA256,
|
||||
ECDSA_NISTP256_SHA384,
|
||||
ECDSA_NISTP384_SHA256,
|
||||
ECDSA_NISTP384_SHA384,
|
||||
ED25519,
|
||||
RSA_PKCS1_2048_8192_SHA256,
|
||||
RSA_PKCS1_2048_8192_SHA384,
|
||||
RSA_PKCS1_2048_8192_SHA512,
|
||||
RSA_PSS_2048_8192_SHA256_LEGACY_KEY,
|
||||
RSA_PSS_2048_8192_SHA384_LEGACY_KEY,
|
||||
RSA_PSS_2048_8192_SHA512_LEGACY_KEY,
|
||||
}
|
||||
|
||||
impl SignatureAlgorithm {
|
||||
pub fn from_alg(alg: &dyn pki_types::SignatureVerificationAlgorithm) -> Self {
|
||||
let id = alg.signature_alg_id();
|
||||
if id == webpki::ring::ECDSA_P256_SHA256.signature_alg_id() {
|
||||
SignatureAlgorithm::ECDSA_NISTP256_SHA256
|
||||
} else if id == webpki::ring::ECDSA_P256_SHA384.signature_alg_id() {
|
||||
SignatureAlgorithm::ECDSA_NISTP256_SHA384
|
||||
} else if id == webpki::ring::ECDSA_P384_SHA256.signature_alg_id() {
|
||||
SignatureAlgorithm::ECDSA_NISTP384_SHA256
|
||||
} else if id == webpki::ring::ECDSA_P384_SHA384.signature_alg_id() {
|
||||
SignatureAlgorithm::ECDSA_NISTP384_SHA384
|
||||
} else if id == webpki::ring::ED25519.signature_alg_id() {
|
||||
SignatureAlgorithm::ED25519
|
||||
} else if id == webpki::ring::RSA_PKCS1_2048_8192_SHA256.signature_alg_id() {
|
||||
SignatureAlgorithm::RSA_PKCS1_2048_8192_SHA256
|
||||
} else if id == webpki::ring::RSA_PKCS1_2048_8192_SHA384.signature_alg_id() {
|
||||
SignatureAlgorithm::RSA_PKCS1_2048_8192_SHA384
|
||||
} else if id == webpki::ring::RSA_PKCS1_2048_8192_SHA512.signature_alg_id() {
|
||||
SignatureAlgorithm::RSA_PKCS1_2048_8192_SHA512
|
||||
} else if id == webpki::ring::RSA_PSS_2048_8192_SHA256_LEGACY_KEY.signature_alg_id() {
|
||||
SignatureAlgorithm::RSA_PSS_2048_8192_SHA256_LEGACY_KEY
|
||||
} else if id == webpki::ring::RSA_PSS_2048_8192_SHA384_LEGACY_KEY.signature_alg_id() {
|
||||
SignatureAlgorithm::RSA_PSS_2048_8192_SHA384_LEGACY_KEY
|
||||
} else if id == webpki::ring::RSA_PSS_2048_8192_SHA512_LEGACY_KEY.signature_alg_id() {
|
||||
SignatureAlgorithm::RSA_PSS_2048_8192_SHA512_LEGACY_KEY
|
||||
} else {
|
||||
unreachable!()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Verify the signature and return the algorithm which passed verification.
|
||||
pub fn verify_sig_determine_alg(
|
||||
cert: &Certificate,
|
||||
message: &[u8],
|
||||
dss: &DigitallySignedStruct,
|
||||
) -> Result<SignatureAlgorithm, Error> {
|
||||
let cert = pki_types::CertificateDer::from(cert.0.as_slice());
|
||||
let cert = webpki::EndEntityCert::try_from(&cert).map_err(pki_error)?;
|
||||
|
||||
verify_sig_using_any_alg(&cert, convert_scheme(dss.scheme)?, message, &dss.sig.0)
|
||||
.map_err(pki_error)
|
||||
}
|
||||
|
||||
fn verify_sig_using_any_alg(
|
||||
cert: &webpki::EndEntityCert,
|
||||
algs: SignatureAlgorithms,
|
||||
message: &[u8],
|
||||
sig: &[u8],
|
||||
) -> Result<SignatureAlgorithm, webpki::Error> {
|
||||
) -> Result<(), webpki::Error> {
|
||||
// TLS doesn't itself give us enough info to map to a single
|
||||
// webpki::SignatureAlgorithm. Therefore, convert_algs maps to several and
|
||||
// we try them all.
|
||||
for alg in algs {
|
||||
match cert.verify_signature(*alg, message, sig) {
|
||||
Ok(_) => return Ok(SignatureAlgorithm::from_alg(*alg)),
|
||||
Err(webpki::Error::UnsupportedSignatureAlgorithmForPublicKeyContext(_)) => continue,
|
||||
Err(e) => return Err(e),
|
||||
Err(webpki::Error::UnsupportedSignatureAlgorithmForPublicKey) => continue,
|
||||
res => return res,
|
||||
}
|
||||
}
|
||||
|
||||
Err(webpki::Error::UnsupportedSignatureAlgorithmContext(
|
||||
webpki::UnsupportedSignatureAlgorithmContext {
|
||||
signature_algorithm_id: vec![],
|
||||
supported_algorithms: algs.iter().map(|alg| alg.signature_alg_id()).collect(),
|
||||
},
|
||||
))
|
||||
Err(webpki::Error::UnsupportedSignatureAlgorithmForPublicKey)
|
||||
}
|
||||
|
||||
fn verify_signed_struct(
|
||||
|
||||
@@ -4,7 +4,7 @@ authors = ["TLSNotary Team"]
|
||||
keywords = ["tls", "mpc", "2pc", "prover"]
|
||||
categories = ["cryptography"]
|
||||
license = "MIT OR Apache-2.0"
|
||||
version = "0.1.0-alpha.13"
|
||||
version = "0.1.0-alpha.13-pre"
|
||||
edition = "2024"
|
||||
|
||||
[lints]
|
||||
@@ -31,7 +31,6 @@ web-spawn = { workspace = true, optional = true }
|
||||
|
||||
mpz-common = { workspace = true }
|
||||
mpz-core = { workspace = true }
|
||||
mpz-circuits = { workspace = true }
|
||||
mpz-garble = { workspace = true }
|
||||
mpz-garble-core = { workspace = true }
|
||||
mpz-hash = { workspace = true }
|
||||
@@ -41,8 +40,6 @@ mpz-ot = { workspace = true }
|
||||
mpz-vm-core = { workspace = true }
|
||||
mpz-zk = { workspace = true }
|
||||
|
||||
aes = { workspace = true }
|
||||
ctr = { workspace = true }
|
||||
derive_builder = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
opaque-debug = { workspace = true }
|
||||
@@ -60,9 +57,7 @@ rangeset = { workspace = true }
|
||||
webpki-roots = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
mpz-ideal-vm = { workspace = true }
|
||||
rstest = { workspace = true }
|
||||
tlsn-core = { workspace = true, features = ["fixtures"] }
|
||||
tlsn-server-fixture = { workspace = true }
|
||||
tlsn-server-fixture-certs = { workspace = true }
|
||||
tokio = { workspace = true, features = ["full"] }
|
||||
|
||||
109
crates/tlsn/src/commit.rs
Normal file
109
crates/tlsn/src/commit.rs
Normal file
@@ -0,0 +1,109 @@
|
||||
//! Plaintext commitment and proof of encryption.
|
||||
|
||||
pub(crate) mod hash;
|
||||
pub(crate) mod transcript;
|
||||
|
||||
use mpz_core::bitvec::BitVec;
|
||||
use mpz_memory_core::{
|
||||
DecodeFutureTyped, Vector,
|
||||
binary::{Binary, U8},
|
||||
};
|
||||
use mpz_vm_core::{Vm, prelude::*};
|
||||
use tlsn_core::transcript::Record;
|
||||
|
||||
use crate::{
|
||||
Role,
|
||||
zk_aes_ctr::{ZkAesCtr, ZkAesCtrError},
|
||||
};
|
||||
|
||||
/// Commits the plaintext of the provided records, returning a proof of
|
||||
/// encryption.
|
||||
///
|
||||
/// Writes the plaintext VM reference to the provided records.
|
||||
pub(crate) fn commit_records<'record>(
|
||||
vm: &mut dyn Vm<Binary>,
|
||||
aes: &mut ZkAesCtr,
|
||||
records: impl IntoIterator<Item = &'record Record>,
|
||||
) -> Result<(Vec<Vector<U8>>, RecordProof), RecordProofError> {
|
||||
let mut plaintexts = Vec::new();
|
||||
let mut ciphertexts = Vec::new();
|
||||
for record in records {
|
||||
let (plaintext_ref, ciphertext_ref) = aes
|
||||
.encrypt(vm, record.explicit_nonce.clone(), record.ciphertext.len())
|
||||
.map_err(ErrorRepr::Aes)?;
|
||||
|
||||
if let Role::Prover = aes.role() {
|
||||
let Some(plaintext) = record.plaintext.clone() else {
|
||||
return Err(ErrorRepr::MissingPlaintext.into());
|
||||
};
|
||||
|
||||
vm.assign(plaintext_ref, plaintext)
|
||||
.map_err(RecordProofError::vm)?;
|
||||
}
|
||||
vm.commit(plaintext_ref).map_err(RecordProofError::vm)?;
|
||||
|
||||
let ciphertext = vm.decode(ciphertext_ref).map_err(RecordProofError::vm)?;
|
||||
|
||||
plaintexts.push(plaintext_ref);
|
||||
ciphertexts.push((ciphertext, record.ciphertext.clone()));
|
||||
}
|
||||
|
||||
Ok((plaintexts, RecordProof { ciphertexts }))
|
||||
}
|
||||
|
||||
/// Proof of encryption.
|
||||
#[derive(Debug)]
|
||||
#[must_use]
|
||||
#[allow(clippy::type_complexity)]
|
||||
pub(crate) struct RecordProof {
|
||||
ciphertexts: Vec<(DecodeFutureTyped<BitVec, Vec<u8>>, Vec<u8>)>,
|
||||
}
|
||||
|
||||
impl RecordProof {
|
||||
/// Verifies the proof.
|
||||
pub(crate) fn verify(self) -> Result<(), RecordProofError> {
|
||||
let Self { ciphertexts } = self;
|
||||
|
||||
for (mut ciphertext, expected) in ciphertexts {
|
||||
let ciphertext = ciphertext
|
||||
.try_recv()
|
||||
.map_err(RecordProofError::vm)?
|
||||
.ok_or_else(|| ErrorRepr::NotDecoded)?;
|
||||
|
||||
if ciphertext != expected {
|
||||
return Err(ErrorRepr::InvalidCiphertext.into());
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Error for [`RecordProof`].
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
#[error(transparent)]
|
||||
pub(crate) struct RecordProofError(#[from] ErrorRepr);
|
||||
|
||||
impl RecordProofError {
|
||||
fn vm<E>(err: E) -> Self
|
||||
where
|
||||
E: Into<Box<dyn std::error::Error + Send + Sync + 'static>>,
|
||||
{
|
||||
Self(ErrorRepr::Vm(err.into()))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
#[error("record proof error: {0}")]
|
||||
enum ErrorRepr {
|
||||
#[error("VM error: {0}")]
|
||||
Vm(Box<dyn std::error::Error + Send + Sync + 'static>),
|
||||
#[error("zk aes error: {0}")]
|
||||
Aes(ZkAesCtrError),
|
||||
#[error("plaintext is missing")]
|
||||
MissingPlaintext,
|
||||
#[error("ciphertext was not decoded")]
|
||||
NotDecoded,
|
||||
#[error("ciphertext does not match expected")]
|
||||
InvalidCiphertext,
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user