Compare commits

...

161 Commits

Author SHA1 Message Date
Hendrik Eeckhaut
5e6c5c13aa Blake3 example 2025-09-26 14:13:36 +02:00
yuroitaki
4ab73bdfb5 Merge branch 'dev' into feat/blake3 2025-09-25 18:42:05 +08:00
yuroitaki
a8fa57f2cb Add blake3. 2025-09-25 18:41:52 +08:00
Hendrik Eeckhaut
d25fb320d4 build: update Rust to version 1.90.0 2025-09-24 09:32:56 +02:00
Hendrik Eeckhaut
0539268da7 Interactive noir example (#981)
demo for interactive zk age proof

Co-authored-by: th4s <th4s@metavoid.xyz>
2025-09-19 16:55:10 +02:00
dan
427b2896b5 allow root_store to be None (#995) 2025-09-19 15:15:04 +02:00
Hendrik Eeckhaut
89d1e594d1 privacy-scaling-explorations -> privacy-ethereum (#993) 2025-09-11 16:48:01 +02:00
sinu.eth
b4380f021e refactor: decouple ProveConfig from PartialTranscript (#991) 2025-09-11 09:13:52 +02:00
sinu.eth
8a823d18ec refactor(core): replace Idx with RangeSet (#988)
* refactor(core): replace Idx with RangeSet

* clippy
2025-09-10 15:44:40 -07:00
sinu.eth
7bcfc56bd8 fix(tls-core): remove deprecated webpki error variants (#992)
* fix(tls-core): remove deprecated webpki error variants

* clippy
2025-09-10 15:24:07 -07:00
sinu.eth
2909d5ebaa chore: bump mpz to 3d90b6c (#990) 2025-09-10 14:38:48 -07:00
sinu.eth
7918494ccc fix(core): fix dev dependencies (#989) 2025-09-10 14:25:04 -07:00
sinu.eth
92dd47b376 fix(core): enable zeroize derive (#987) 2025-09-10 14:11:41 -07:00
th4s
5474a748ce feat(core): Add transcript fixture (#983)
* feat(core): add transcript fixture for testing

* add feedback

* remove packages from dev dependencies
2025-09-10 22:58:10 +02:00
yuroitaki
92da5adc24 chore: update attestation example (#966)
* Add attestation example.

* Apply fmt.

* Apply clippy fix.

* Rebase.

* Improved readme + more default loggging in prove example

* Removed wrong AI generated "learn more" links

* re-export ContentType in tlsn-core

* remove unnecessary checks from example

---------

Co-authored-by: yuroitaki <>
Co-authored-by: Hendrik Eeckhaut <hendrik@eeckhaut.org>
Co-authored-by: sinu <65924192+sinui0@users.noreply.github.com>
2025-09-10 09:37:17 -07:00
Hendrik Eeckhaut
e0ce1ad31a build:Update to unpatched ws_stream_wasm crate (#975) 2025-09-01 16:33:00 +02:00
Hendrik Eeckhaut
3b76877920 build: reduce wasm size (#977) 2025-09-01 11:28:12 +02:00
Hendrik Eeckhaut
783355772a docs: corrected commands in docker.md of the harness (#976) 2025-08-28 17:00:18 +02:00
dan
e5c59da90b chore: fix tests (#974) 2025-08-26 08:42:48 +00:00
dan
f059c53c2d use zk config; bump mpz (#973) 2025-08-26 08:23:24 +00:00
sinu.eth
a1367b5428 refactor(tlsn): change network setting default to reduce data transfer (#971) 2025-08-22 14:00:23 -07:00
sinu.eth
9d8124ac9d chore: bump mpz to 1b00912 (#970) 2025-08-21 09:46:29 -07:00
dan
5034366c72 fix(hmac-sha256): compute PHash and AHash concurrently (#969)
---------

Co-authored-by: th4s <th4s@metavoid.xyz>
2025-08-21 06:41:59 +00:00
sinu.eth
afd8f44261 feat(tlsn): serializable config (#968) 2025-08-18 09:03:04 -07:00
sinu.eth
21086d2883 refactor: clean up web pki (#967)
* refactor: clean up web pki

* fix time import

* clippy

* fix wasm
2025-08-18 08:36:04 -07:00
dan
cca9a318a4 fix(harness): improve harness stability (#962) 2025-08-15 09:17:20 +00:00
dan
cb804a6025 fix(harness): disable tracing events (#961) 2025-08-15 07:13:12 +00:00
th4s
9f849e7c18 fix(encoding): set correct frame limit (#963)
* fix(encoding): set correct frame limit

* bugfix for `TranscriptRefs::len`

* use current frame limit as cushion room
2025-08-13 09:57:00 +02:00
th4s
389bceddef chore: bump rust version, fix lints and satisfy clippy (#964)
* chore(lints): fix lints and satisfy clippy

* bump rust version in ci
2025-08-12 10:50:31 -07:00
th4s
657838671a chore: remove notarize methods for prover and verifier (#952)
* feat: remove notarize methods for prover and verifier

* clean up imports

* remove remaining notarize methods

* clean up imports

* remove wasm attestation bindings

---------

Co-authored-by: sinu <65924192+sinui0@users.noreply.github.com>
2025-08-06 09:38:43 -07:00
yuroitaki
2f072b2578 chore: remove notary crates (#953)
Co-authored-by: Hendrik Eeckhaut <hendrik@eeckhaut.org>
2025-08-04 10:41:45 +02:00
sinu.eth
33153d1124 refactor: move web-spawn under web feature (#949)
* refactor: move web-spawn under web feature

* add arch conditional
2025-07-29 07:11:16 -07:00
Hendrik Eeckhaut
2d399d5e24 chore: Update latency/bandwidth plots for new harness (#923)
* Updated latency/bandwidth plots for new harness
* Fix harness Docker build
2025-07-23 10:58:46 +02:00
dan
b6d7249b6d fix(harness): restore multithreading for browser benches (#943) 2025-07-23 05:40:58 +00:00
dan
2a8c1c3382 fix(harness): add custom wasm-pack to build script (#940) 2025-07-22 06:29:12 +00:00
dan
7c27162875 fix(harness): pull latest docker images (#941) 2025-07-21 06:07:48 +00:00
sinu.eth
eef813712d refactor: extract attestation functionality into dedicated crate (#936)
* refactor: extract attestation functionality into dedicated crate

* commit lock

* fix integration test

* clippy

* fix docs

* fix import

* fix wasm types

* fix doctest

* verifier config default rootstore

* fix integration test

* fix notary integration tests
2025-07-09 09:54:11 -07:00
sinu.eth
2e94e08fa6 build(wasm): enable simd128 feature (#937) 2025-07-04 11:22:56 -07:00
dan
97d9475335 fix(harness): do not close connection too early (#935) 2025-07-02 09:40:52 -07:00
sinu.eth
38820d6a3f refactor: consolidate into tlsn crate (#934)
* refactor: consolidate into tlsn crate

* clean up dead code

* bump lock file

* rustfmt

* fix examples

* fix docs script

* clippy

* clippy
2025-07-02 09:40:28 -07:00
sinu.eth
af85fa100f build(wasm): add wasm profile and optimize for perf (#933) 2025-07-02 08:52:47 -07:00
Hendrik Eeckhaut
008b901913 ci: docker image for new harness
* update Docker for new harness
* disable shm in Chrome
2025-06-27 17:58:12 +01:00
Hendrik Eeckhaut
db85f68328 build: update Rust to version 1.88.0 2025-06-27 16:40:29 +01:00
Hendrik Eeckhaut
fb80aa4cc9 chore: Set version number to 0.1.0-alpha.13-pre (#931) 2025-06-20 14:41:33 +02:00
Hendrik Eeckhaut
8dae57d6a7 ci: fix problem with multiple tlsn-wasm build artefacts (#930) 2025-06-20 10:57:35 +02:00
dan
f2ff4ba792 chore: release v0.1.0-alpha.12 (#928) 2025-06-19 09:05:34 +00:00
dan
9bf3371873 chore(wasm): expose client auth config to js (#927) 2025-06-19 07:15:09 +00:00
dan
9d853eb496 feat(prover): client authentication (#916) 2025-06-17 14:02:14 +00:00
sinu.eth
6923ceefd3 fix(harness): iptable rule and bench config variable (#925)
* fix(harness): iptable rule and bench config variable

* rustfmt
2025-06-16 13:18:34 -04:00
sinu.eth
5239c2328a chore: bump mpz to ccc0057 (#924) 2025-06-16 07:42:49 -07:00
Hendrik Eeckhaut
6a7c5384a9 build: fixed version numbers 2025-06-12 14:24:55 +02:00
th4s
7e469006c0 fix(prf): adapt logic to new default setting (#920) 2025-06-11 20:34:47 +02:00
dan
55091b5e94 fix: set TCP_NODELAY for prover and notary (#911) 2025-06-10 08:13:12 +00:00
dan
bc1eba18c9 feat(mpc-tls): use concurrent ot setup and gc preprocessing (#910)
* feat(mpc-tls): use concurrent ot setup and gc preprocessing

* bump mpz

* increase muxer stream count

* update Cargo.lock

---------

Co-authored-by: sinu.eth <65924192+sinui0@users.noreply.github.com>
2025-06-06 15:39:35 -07:00
sinu.eth
c128ab16ce fix(harness): retry browser connection until timeout (#914)
* fix(harness): retry browser connection until timeout

* add timeout to executor shutdown

* shutdown timeout error msg

* clippy
2025-06-06 15:01:28 -07:00
sinu.eth
a87125ff88 fix(ci): wasm tests (#913) 2025-06-06 13:51:34 -07:00
sinu.eth
0933d711d2 feat: harness (#703)
* feat: harness

* delete tests.rs build artifact

* fix binary path

* seconds -> milliseconds

* update lock

* add empty tests module

* rustfmt

* ToString -> Display

* output tests module into build artifacts

* clippy

* rustfmt
2025-06-06 13:34:32 -07:00
sinu.eth
79c230f2fa refactor(mpc-tls): remove commit-reveal from tag verification (#907) 2025-06-06 06:39:12 +00:00
dan
345d5d45ad feat: prove server mac key (#868)
* feat(mpc-tls): prove server mac key

* remove stray dep

* move mac key into `SessionKeys`

* fix key translation

* remove dangling dep

* move ghash mod to tlsn-common

* fix clippy lints

* treat all recv recs as unauthenticated

* detach zkvm first, then prove

* decrypt with aes_gcm, decode mac key only in zkvm

* encapsulate into `fn verify_tags`; inline mod `zk_aes_ecb`

* handle error

* fix dangling and clippy

* bump Cargo.lock
2025-06-05 09:19:41 -07:00
Hendrik Eeckhaut
55a26aad77 build: Lock + document Cargo.lock (#885) 2025-06-04 09:12:06 +02:00
Hendrik Eeckhaut
1132d441e1 docs: improve example readme (#904) 2025-06-04 08:56:55 +02:00
Hendrik Eeckhaut
fa2fdfd601 feat: add logging to server fixture (#903) 2025-06-04 08:49:33 +02:00
Hendrik Eeckhaut
24e10d664f Fix wasm-pack warnings (#888) 2025-06-03 22:38:54 +02:00
yuroitaki
c0e084c1ca fix(wasm): expose revealing server identity. (#898)
* Add reveal server identity.

* Fix test.

* Remove defualt.

---------

Co-authored-by: yuroitaki <>
2025-05-30 10:39:13 +08:00
Jakub Konka
b6845dfc5c feat(notary): add JWT-based authorization mode (#817)
* feat(server): add JWT-based authorization mode

This mode is an alternative to whitelist authorization mode.
It extracts the JWT from the authorization header (bearer token),
validates token's signature, claimed expiry times and additional
(user-configurable) claims.

* Fix formatting and lints

* Address review comments

* feat(server): remove JwtClaimType config property

* Fix missing README comments

* Address review comments

* Address review comments

---------

Co-authored-by: yuroitaki <25913766+yuroitaki@users.noreply.github.com>
2025-05-28 12:51:18 +08:00
sinu.eth
31def9ea81 chore: bump prerelease version (#895) 2025-05-27 11:43:42 -07:00
sinu.eth
878fe7e87d chore: release v0.1.0-alpha.11 (#894) 2025-05-27 09:27:26 -07:00
Hendrik Eeckhaut
3348ac34b6 Release automation (#890)
* ci: create release draft for tagged builds

* Apply suggestions from code review

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

---------

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Co-authored-by: sinu.eth <65924192+sinui0@users.noreply.github.com>
2025-05-27 08:43:57 -07:00
Hendrik Eeckhaut
82767ca2d5 Automatic workflow to update main after a release (#891) 2025-05-27 09:06:38 +02:00
sinu.eth
c9aaf2e0fa refactor(mpc-tls): default to full-mpc PRF (#892) 2025-05-27 08:57:34 +02:00
sinu.eth
241ed3b5a3 chore: bump mpz to alpha.3 (#893) 2025-05-27 08:34:35 +02:00
Hendrik Eeckhaut
56f088db7d ci: build ci with explicit, fixed rust version (1.87.0) (#879) 2025-05-24 21:25:36 +02:00
Hendrik Eeckhaut
f5250479bd docs: correct notary-server command in example readme (#883) 2025-05-23 11:06:14 +02:00
yuroitaki
0e2eabb833 misc(notary): update doc, docker, tee, ci (#874)
* Update docs, docker, tee, ci.

* Restore deleted dockerfile.

* Add concurrency in readme.

* Apply suggestions.

* Correct file path.

---------

Co-authored-by: yuroitaki <>
2025-05-23 11:55:36 +08:00
sinu.eth
ad530ca500 feat: SHA256 transcript commitments (#881)
* feat: SHA256 transcript commitments

* clippy
2025-05-22 09:10:21 -07:00
sinu.eth
8b1cac6fe0 refactor(core): decouple attestation from core api (#875)
* refactor(core): decouple attestation from core api

* remove dead test

* fix encoding tree test

* clippy

* fix comment
2025-05-22 09:00:43 -07:00
Hendrik Eeckhaut
555f65e6b2 fix: expose network setting type in WASM (#880) 2025-05-22 09:35:57 +02:00
dan
046485188c chore: add Cargo.lock to .gitignore (#870) 2025-05-21 09:56:08 +00:00
th4s
db53814ee7 fix(prf): set correct default logic (#873) 2025-05-20 15:22:34 +02:00
yuroitaki
d924bd6deb misc(notary): add common crate for server and client (#871)
* Add notary-common crate.

* Add cargo lock changes.

* Add copy.

---------

Co-authored-by: yuroitaki <>
2025-05-20 12:24:27 +08:00
yuroitaki
b3558bef9c feat(notary): add support for custom extension (#872)
* Add dos extension validator.

* Revert to allow any extensions.

---------

Co-authored-by: yuroitaki <>
2025-05-20 11:19:05 +08:00
yuroitaki
33c4b9d16f chore(notary): ignore clippy warning on large enum (#869)
* Fix clippy.

* Fix clippy.

---------

Co-authored-by: yuroitaki <>
2025-05-16 08:45:29 -07:00
yuroitaki
edc2a1783d refactor(notary): default to ephemeral key, remove config file & fixtures (#818)
* Add default values, refactor.

* Prepend file paths.

* Remove config and refactor.

* Fix fmt, add missing export.

* Simplify error.

* Use serde to print.

* Update crates/notary/server/src/config.rs

Co-authored-by: dan <themighty1@users.noreply.github.com>

* fixture removal + generate signing key (#819)

* Default to ephemeral key gen, remove fixutres.

* Fix wording.

* Add configuring sig alg, comment fixes.

* Fix sig alg id parsing.

* Refactor pub key to pem.

* Return error, add test.

* Update crates/notary/server/src/signing.rs

Co-authored-by: Hendrik Eeckhaut <hendrik@eeckhaut.org>

---------

Co-authored-by: yuroitaki <>
Co-authored-by: Hendrik Eeckhaut <hendrik@eeckhaut.org>

---------

Co-authored-by: yuroitaki <>
Co-authored-by: dan <themighty1@users.noreply.github.com>
Co-authored-by: Hendrik Eeckhaut <hendrik@eeckhaut.org>
2025-05-16 19:02:20 +08:00
sinu.eth
c2a6546deb refactor(core): encode by ref and rip out dead hash functionality (#866) 2025-05-15 09:10:05 -07:00
th4s
2dfa386415 chore: bump mpz and adapt update method call in hmac-sha256 (#867)
* fix(hmac-sha256): use new `update` method from mpz-hash

* use `into` conversion
2025-05-15 15:58:32 +02:00
sinu.eth
5a188e75c7 refactor(cipher): remove contiguous memory assumption (#864)
* refactor(cipher): remove contiguous memory assumption

* fix mpc-tls and upstream crates
2025-05-13 09:41:55 -07:00
sinu.eth
a8bf1026ca feat(deap): address space mapping (#809) 2025-05-13 09:38:39 -07:00
sinu.eth
f900fc51cd chore: bump mpz to abd02e6 (#825) 2025-05-13 09:35:51 -07:00
th4s
6ccf102ec8 feat(prf): reduced MPC variant (#735)
* feat(prf): reduced MPC variant

* move sending `client_random` from `alloc` to `preprocess`

* rename `Config` -> `Mode` and rename variants

* add feedback for handling of prf config

* fix formatting to nightly

* simplify `MpcPrf`

* improve external flush handling

* improve control flow

* improved inner control flow for normal prf version

* rename leftover `config` -> `mode`

* remove unnecessary pub(crate)

* rewrite state flow for reduced prf

* improve state transition for reduced prf

* repair prf bench

* WIP: Adapting to new `Sha256` from mpz

* repair failing test

* fixed all tests

* remove output decoding for p

* do not use mod.rs file hierarchy

* remove pub(crate) from function

* improve config handling

* use `Array::try_from`

* simplify hmac to function

* remove `merge_vecs`

* move `mark_public` to allocation

* minor fixes

* simplify state logic for reduced prf even more

* simplify reduced prf even more

* set reduced prf as default

* temporarily fix commit for mpz

* add part of feedback

* simplify state transition

* adapt comment

* improve state transition in flush

* simplify flush

* fix wasm prover config

---------

Co-authored-by: sinu <65924192+sinui0@users.noreply.github.com>
2025-05-13 09:26:43 -07:00
sinu.eth
2c500b13bd chore: bump mpz to alpha.3 (#806)
* temporary remove hmac crates

* wip: adapting cipher crate...

* wip: adapting key-exchange crate...

* wip: adapt most of mpc-tls...

* adapt prover and verifier crates

* remove unnecessary rand compat import for deap

* adapt mpc-tls

* fix: endianness of key-exchange circuit

* fix: output endianness of ke circuit

* fix variable name

---------

Co-authored-by: th4s <th4s@metavoid.xyz>
2025-05-13 09:03:09 -07:00
Hendrik Eeckhaut
2da0c242cb build: Check in Cargo lock files (#742) 2025-05-12 10:22:13 +02:00
th4s
798c22409a chore(config): move defer_decryption_from_start to ProtocolConfig 2025-05-10 11:41:01 +02:00
dan
3b5ac20d5b fix(benches): browser bench fixes (#821)
* fix(benches): make browser benches work again

* Update crates/benches/binary/README.md

Co-authored-by: Hendrik Eeckhaut <hendrik@eeckhaut.org>

* Update crates/benches/browser/wasm/Cargo.toml

Co-authored-by: Hendrik Eeckhaut <hendrik@eeckhaut.org>

* add --release flag

---------

Co-authored-by: Hendrik Eeckhaut <hendrik@eeckhaut.org>
2025-05-08 06:13:15 +00:00
Hendrik Eeckhaut
a063f8cc14 ci: build gramine-sgx for dev and tagged builds only (#805) 2025-05-05 17:16:50 +02:00
dan
6f6b24e76c test: fix failing tests (#823) 2025-05-05 17:01:42 +02:00
dan
a28718923b chore(examples): inline custom crypto provider for clarity (#815)
Co-authored-by: sinu.eth <65924192+sinui0@users.noreply.github.com>
2025-04-30 06:41:07 +00:00
Hendrik Eeckhaut
19447aabe5 Tee dev cleanup (#759)
* build: added scripts for local tee/sgx development
* Improved documentation: move all explanation to one README file
2025-04-28 14:46:32 +02:00
Jakub Konka
8afb7a4c11 fix(notary): use custom HTTP header for authorization: X-API-Key (#804)
Co-authored-by: yuroitaki <25913766+yuroitaki@users.noreply.github.com>
2025-04-28 14:24:32 +08:00
dan
43c6877ec0 chore: support workspace lints in all crates (#797) 2025-04-25 13:58:26 +02:00
dan
39e14949a0 chore: add rustls licence and attribution (#795)
* chore: add rustls licence and attribution

* add missing commit
2025-04-25 07:10:49 +00:00
dan
31f62982b5 feat(wasm): allow max records config (#810) 2025-04-25 06:34:49 +00:00
yuroitaki
6623734ca0 doc(example): add comments on verifying custom extension (#788)
* Add comments.

* Fix comment.

---------

Co-authored-by: yuroitaki <>
2025-04-25 11:18:47 +08:00
Hendrik Eeckhaut
41e215f912 chore: set version number to 0.1.0-alpha.11-pre (#798) 2025-04-23 13:19:05 +02:00
dan
9e0f79125b misc(notary): improve error msg when tls is expected (#776)
* misc(notary): improve error msg when tls is expected

* change wording

* fix nested if

* process hyper error

* refactor into a fn

* fix error msg

Co-authored-by: yuroitaki <25913766+yuroitaki@users.noreply.github.com>

* do not catch hyper error

---------

Co-authored-by: yuroitaki <25913766+yuroitaki@users.noreply.github.com>
2025-04-22 12:03:23 +00:00
Hendrik Eeckhaut
7bdd3a724b fix: Add missing concurrency param in tee config (#791) 2025-04-22 11:19:35 +02:00
dan
baa486ccfd chore(examples): fix formatting (#793) 2025-04-21 08:46:28 +00:00
sinu.eth
de7a47de5b feat: expose record count config (#786)
* expose record config

* update default record counts

* make fields optional

* override record count in integration test
2025-04-18 14:58:28 +07:00
sinu.eth
3a57134b3a chore: update version to alpha.10 (#785) 2025-04-18 08:54:55 +02:00
sinu.eth
86fed1a90c refactor: remove extension api from request builder (#787) 2025-04-18 13:01:28 +07:00
sinu.eth
82964c273b feat: attestation extensions (#755)
* feat: attestation extensions

* rustfmt

* fix doctest example

* add extensions getter to public api

* add tests

* fix prover so it includes extensions
2025-04-17 23:15:27 +07:00
yuroitaki
81aaa338e6 feat(core): find set cover across different commitment kinds in TranscriptProofBuilder (#765)
* Init.

* Cover range in order of preference of kinds.

* Fix comment.

* Adjust error message.

* Return tuple from set cover and address comments.

* Fix comments.

* Update utils version.

---------

Co-authored-by: yuroitaki <>
Co-authored-by: dan <themighty1@users.noreply.github.com>
2025-04-17 15:16:06 +08:00
dan
f331a7a3c5 chore: improve naming and comments (#780) 2025-04-17 06:43:30 +00:00
dan
adb407d03b misc(core): simplify encoding logic (#781)
* perf(core): simplify encoding logic

* make constant-time
2025-04-15 14:50:53 +00:00
dan
3e54119867 feat(notary): add concurrency limit (#770)
* feat(notary): add concurrency limit

* switch to 503 status code

* remove test-api feature

* improve naming and comments

* set default concurrency to 32
2025-04-15 12:31:16 +00:00
Hendrik Eeckhaut
71aa90de88 Add tlsn-wasm to API docs (#768) 2025-04-10 13:35:20 +02:00
sinu.eth
93535ca955 feat(mpc-tls): improve error message for incorrect transcript config (#754)
* feat(mpc-tls): improve error message for incorrect transcript config

* rustfmt

---------

Co-authored-by: dan <themighty1@users.noreply.github.com>
2025-04-07 10:44:02 +00:00
sinu.eth
a34dd57926 refactor: remove utils-aio dep (#760) 2025-04-03 04:58:14 +07:00
yuroitaki
92d7b59ee8 doc(example): add minor comments (#761)
* Add comments.

* Remove commented leftover.

* Remove example tweak.

* fmt.

---------

Co-authored-by: yuroitaki <>
2025-04-02 14:29:26 +08:00
Leonid Logvinov
c8e9cb370e feat(notary): Log notarization elapsed time (#746)
* Log notarisation elapsed time

* Fix formatting

* Include time units in field name
2025-03-27 08:08:29 -07:00
dan
4dc5570a31 MIsc comments (#747)
* fix comments

* fix comment

Co-authored-by: sinu.eth <65924192+sinui0@users.noreply.github.com>

* describe all args

* change decrypted plaintext -> plaintext

* remove redundant comments

---------

Co-authored-by: sinu.eth <65924192+sinui0@users.noreply.github.com>
2025-03-27 13:42:41 +00:00
Hendrik Eeckhaut
198e24c5e4 ci: manual workflow for tlsn-wasm release (#757) 2025-03-27 14:33:46 +01:00
dan
f16d7238e5 refactor(core): DoS mitigation and additional validation (#648)
* add encoding proof validation

* check that merkle tree indices are not out of bounds

* limit opened plaintext hash data

* add test

* formatting

* bump commitment tree size cap to 30 bits

* remove unnecessary test

* fix stray lines
2025-03-27 12:54:05 +00:00
dan
9253adaaa4 fix: avoid mutating self in TagShare::add (#748) 2025-03-27 12:46:27 +00:00
Hendrik Eeckhaut
8c889ac498 ci: SGX build: drop TEE GH environment, use regular secret (#751) 2025-03-27 11:40:04 +01:00
Hendrik Eeckhaut
f0e2200d22 ci: disable codecov annotation and comments in Github (#752) 2025-03-26 14:49:14 +01:00
Hendrik Eeckhaut
224e41a186 chore: Bump version to 0.1.0-alpha.10-pre 2025-03-25 14:28:26 +01:00
Hendrik Eeckhaut
328c2af162 fix: do not enable tee_quote feature by default (#745) 2025-03-25 11:24:43 +01:00
sinu.eth
cdb80e1458 fix: compute recv record count from max_recv (#743)
* fix: compute recv record count from max_recv

* pad after check

* fix: add `max_recv` to mpc-tls integration test

---------

Co-authored-by: th4s <th4s@metavoid.xyz>
2025-03-25 11:04:07 +01:00
Hendrik Eeckhaut
eeccbef909 ci: script to patch imports in the tlsn-wasm build result (#727) 2025-03-20 21:47:47 +01:00
sinu
190b7b0bf6 ci: update tlsn-wasm release workflow 2025-03-20 11:10:28 -07:00
sinu
c70caa5ed9 chore: release v0.1.0-alpha.9 2025-03-20 11:06:57 -07:00
sinu.eth
20137b8c6c fix(notary): install libclang in docker image (#740) 2025-03-20 10:53:32 -07:00
yuroitaki
4cdd1395e8 feat(core): find set cover solution for user in TranscriptProofBuilder (#664)
* Add reveal groups of ranges.

* Reveal committed ranges given a rangeset.

* Fix test and wordings.

* Fix wordings.

* Add reveal feature for hash commitments.

* Formatting.

* Fix wording.

* Add subset check.

* Add subset check.

* Add clippy allow.

* Fix missing direction in transcript index lookup.

* Fix prune subset.

* Refactor proof_idxs.

* Throw error if only one subset detected.

* Fix superset reveal.

* Fmt.

* Refactored Ord for Idx.

* Update crates/core/src/transcript/proof.rs

Co-authored-by: dan <themighty1@users.noreply.github.com>

* Adjust example and comments.

* Adjust comments.

* Remove comment.

* Change comment style.

* Change comment.

* Add comments.

* Change to lazily check set cover.

* use rangeset and simplify

* restore examples

* fix import

* rustfmt

* clippy

---------

Co-authored-by: yuroitaki <>
Co-authored-by: dan <themighty1@users.noreply.github.com>
Co-authored-by: sinu <65924192+sinui0@users.noreply.github.com>
2025-03-20 07:55:13 -07:00
Leonid Logvinov
c1b3d64d5d feat(notary): Make logging format configurable (#719)
* Make logging format configurable

* Document logging format

* Fix formatting

* Init server config with default value
s in notary interation tests
2025-03-19 10:57:00 -07:00
sinu.eth
61ce838f8c refactor: migrate to rand 0.9 (#734)
* refactor: migrate to rand 0.9

* fix: enable wasm_js feature for getrandom

* fix: set getrandom cfg

* fix: clippy

* fix: notary server rand

* fix cargo config
2025-03-19 10:36:24 -07:00
dan
efca281222 feat: Ethereum compatible signer (#731)
* feat: add ethereum-compatible signer

* fix recovery id

* test with a reference signer
2025-03-19 10:17:47 -07:00
sinu.eth
b24041b9f5 fix: record layer handshake control flow (#733) 2025-03-17 11:04:41 -07:00
th4s
9649d6e4cf test(common): Add test for TranscriptRefs::get (#712)
* test(common): add test for transcript refs

* doc: improve doc for test

---------

Co-authored-by: Hendrik Eeckhaut <hendrik@eeckhaut.org>
Co-authored-by: sinu.eth <65924192+sinui0@users.noreply.github.com>
2025-03-17 10:02:19 -07:00
Hendrik Eeckhaut
bc69683ecf ci: build notary docker image for both dev branch and releases (#726) 2025-03-12 18:03:01 +01:00
dan
6c468a91cf test: improve test, fix grammar 2025-03-11 10:44:11 +01:00
Hendrik Eeckhaut
dcff0b9152 ci: update cache plugin 2025-03-11 09:55:12 +01:00
sinu
5f91926154 fix: allow deprecated ring (#720) 2025-03-10 12:42:31 -07:00
Hendrik Eeckhaut
0496cbaeb1 chore: Bump version to 0.1.0-alpha.9-pre 2025-03-10 08:41:18 +01:00
sinu
d8747d49e3 chore: release alpha.8 2025-03-07 14:51:11 -08:00
sinu.eth
6fe328581c chore: bump mpz to alpha.2 (#716) 2025-03-07 14:38:47 -08:00
sinu.eth
6d1140355b build: separate clippy and keep going (#715) 2025-03-07 11:15:00 -08:00
sinu.eth
5246beabf5 chore(wasm): bump web spawn to 0.2 (#714) 2025-03-07 10:57:25 -08:00
Hendrik Eeckhaut
29efc35d14 ci: create notary-server-sgx docker image 2025-03-06 11:25:53 +01:00
Hendrik Eeckhaut
32d25e5c69 fix: fixed version of time dependency (v0.3.38 has wasm issue) (#711) 2025-03-06 01:06:16 +01:00
yuroitaki
ca9d364fc9 docs: Update notary server documentation 2025-03-05 13:15:11 +01:00
sinu.eth
5cbafe17f5 chore: removed unused deps (#706) 2025-03-03 12:15:46 -08:00
sinu.eth
acabb7761b chore: delete dead code (#705) 2025-03-03 11:53:20 -08:00
sinu.eth
c384a393bf chore: bump deps (#704) 2025-03-03 11:40:31 -08:00
Hendrik Eeckhaut
be0be19018 ci: calculate SGX mr_enclave for notary server in gramine docker (#701)
* calculate SGX mr_enclave for notary server in gramine docker
* remove old tee github workflow
* attest build result for dev branch builds and releases
2025-03-03 13:29:47 +01:00
Hendrik Eeckhaut
63bd6abc5d docs: corrected example output in examples README 2025-02-26 18:56:49 +01:00
sinu.eth
cb13169b82 perf: MPC-TLS upgrade (#698)
* fix: add new Cargo.toml

* (alpha.8) - Refactor key-exchange crate (#685)

* refactor(key-exchange): adapt key-exchange to new vm

* fix: fix feature flags

* simplify

* delete old msg module

* clean up error

---------

Co-authored-by: sinu <65924192+sinui0@users.noreply.github.com>

* (alpha.8) - Refactor prf crate (#684)

* refactor(prf): adapt prf to new mpz vm

Co-authored-by: sinu <65924192+sinui0@users.noreply.github.com>

* refactor: remove preprocessing bench

* fix: fix feature flags

* clean up attributes

---------

Co-authored-by: sinu <65924192+sinui0@users.noreply.github.com>

* refactor: key exchange interface (#688)

* refactor: prf interface (#689)

* (alpha.8) - Create cipher crate (#683)

* feat(cipher): add cipher crate, replacing stream/block cipher and aead

* delete old config module

* remove mpz generics

---------

Co-authored-by: sinu <65924192+sinui0@users.noreply.github.com>

* refactor(core): decouple encoder from mpz (#692)

* WIP: Adding new encoding logic...

* feat: add new encoder

* add feedback

* rename conversions

* feat: DEAP VM (#690)

* feat: DEAP VM

* use rangeset, add desync guard

* move MPC execution up in finalization

* refactor: MPC-TLS (#693)

* refactor: MPC-TLS

Co-authored-by: th4s <th4s@metavoid.xyz>

* output key references

* bump deps

---------

Co-authored-by: th4s <th4s@metavoid.xyz>

* refactor: prover + verifier (#696)

* refactor: wasm crates (#697)

* chore: appease clippy (#699)

* chore: rustfmt

* chore: appease clippy more

* chore: more rustfmt!

* chore: clippy is stubborn

* chore: rustfmt sorting change is annoying!

* fix: remove wasm bundling hack

* fix: aes ctr test

* chore: clippy

* fix: flush client when sending close notify

* fix: failing tests

---------

Co-authored-by: th4s <th4s@metavoid.xyz>
2025-02-25 13:51:28 -08:00
mac
25d65734c0 chore: improve notary server html info (regular and TEE) 2025-02-21 14:03:47 +01:00
mac
119ae4b2a8 docs: openapi conf update for TEE quote (#651) 2025-02-21 09:04:21 +01:00
Hendrik Eeckhaut
f59153b0a0 ci: fix TEE deployments (#686)
* do not run tee-deployments builds for PR builds
* Remove AWS deployment scripts
* added missing timeout parameter to TEE config
2025-02-20 11:58:13 +01:00
Hendrik Eeckhaut
bffe9ebb0b doc: disclaimer for minor changes PRs in contribution guidelines (#691) 2025-02-04 10:02:38 +01:00
Hendrik Eeckhaut
65299d7def chore: update axum to v0.8 (#681)
chore: update `axum` to v0.8

Co-authored-by: yuroitaki <25913766+yuroitaki@users.noreply.github.com>
2025-01-08 09:24:01 +01:00
440 changed files with 34440 additions and 26489 deletions

3
.github/codecov.yml vendored Normal file
View File

@@ -0,0 +1,3 @@
github_checks:
annotations: false
comment: false

View File

@@ -1,12 +0,0 @@
#!/bin/bash
# https://github.com/tlsnotary/tlsn/pull/419
set -ex
environment=$1
aws s3 sync .git s3://tlsn-deploy/$environment/.git --delete
cargo build -p notary-server --release
aws s3 cp ./target/release/notary-server s3://tlsn-deploy/$environment/
exit 0

View File

@@ -1,27 +0,0 @@
#!/bin/bash
set -ex
environment=$1
branch=$2
INSTANCE_ID=$(aws ec2 describe-instances --filters Name=tag:Name,Values=[tlsnotary-backend-v1] Name=instance-state-name,Values=[running] --query "Reservations[*].Instances[*][InstanceId]" --output text)
aws ec2 create-tags --resources $INSTANCE_ID --tags "Key=$environment,Value=$branch"
COMMIT_HASH=$(git rev-parse HEAD)
DEPLOY_ID=$(aws deploy create-deployment --application-name tlsn-$environment-v1 --deployment-group-name tlsn-$environment-v1-group --github-location repository=$GITHUB_REPOSITORY,commitId=$COMMIT_HASH --ignore-application-stop-failures --file-exists OVERWRITE --output text)
while true; do
STATUS=$(aws deploy get-deployment --deployment-id $DEPLOY_ID --query 'deploymentInfo.status' --output text)
if [ $STATUS != "InProgress" ] && [ $STATUS != "Created" ]; then
if [ $STATUS = "Succeeded" ]; then
echo "SUCCESS"
exit 0
else
echo "Failed"
exit 1
fi
else
echo "Deploying..."
fi
sleep 30
done

View File

@@ -1,43 +0,0 @@
#/bin/sh
# this is to be ran in a docker container via an github action that has gramine set-up already e.g.,
# notaryserverbuilds.azurecr.io/builder/gramine
# with sgx hardware:
# ./gramine.sh sgx
#
# without:
# ./gramine.sh
##
if [ -z "$1" ]
then
run='gramine-direct notary-server &'
else
run='gramine-sgx notary-server &'
fi
curl https://sh.rustup.rs -sSf | sh -s -- -y
. "$HOME/.cargo/env"
apt install libssl-dev
gramine-sgx-gen-private-key
SGX=1 make
gramine-sgx-sign -m notary-server.manifest -o notary-server.sgx
mr_enclave=$(gramine-sgx-sigstruct-view --verbose --output-format=json notary-server.sig |jq .mr_enclave)
echo "mrenclave=$mr_enclave" >> "$GITHUB_OUTPUT"
echo "#### sgx mrenclave" | tee >> $GITHUB_STEP_SUMMARY
echo "\`\`\`${mr_enclave}\`\`\`" | tee >> $GITHUB_STEP_SUMMARY
eval "$run"
sleep 5
if [ "$1" ]; then
curl 127.0.0.1:7047/info
else
quote=$(curl 127.0.0.1:7047/info | jq .quote.rawQuote)
echo $quote
echo "quote=$quote" >> $GITHUB_OUTPUT
echo "#### 🔒 signed quote ${quote}" | tee >> $GITHUB_STEP_SUMMARY
echo "${quote}" | tee >> $GITHUB_STEP_SUMMARY
fi

View File

@@ -1,33 +0,0 @@
#!/bin/bash
# This script is triggered by Deploy server workflow in order to send an execution command of cd-scripts/modify_proxy.sh via AWS SSM to the proxy server
set -e
GH_OWNER="tlsnotary"
GH_REPO="tlsn"
BACKEND_INSTANCE_ID=$(aws ec2 describe-instances --filters Name=tag:Name,Values=[tlsnotary-backend-v1] Name=instance-state-name,Values=[running] --query "Reservations[*].Instances[*][InstanceId]" --output text)
PROXY_INSTANCE_ID=$(aws ec2 describe-instances --filters Name=tag:Name,Values=[tlsnotary-web] Name=instance-state-name,Values=[running] --query "Reservations[*].Instances[*][InstanceId]" --output text)
TAGS=$(aws ec2 describe-instances --instance-ids $BACKEND_INSTANCE_ID --query 'Reservations[*].Instances[*].Tags')
TAG=$(echo $TAGS | jq -r '.[][][] | select(.Key == "stable").Value')
PORT=$(echo $TAGS | jq -r '.[][][] | select(.Key == "port").Value')
COMMAND_ID=$(aws ssm send-command --document-name "AWS-RunRemoteScript" --instance-ids $PROXY_INSTANCE_ID --parameters '{"sourceType":["GitHub"],"sourceInfo":["{\"owner\":\"'${GH_OWNER}'\", \"repository\":\"'${GH_REPO}'\", \"getOptions\":\"branch:'${TAG}'\", \"path\": \"cd-scripts\"}"],"commandLine":["modify_proxy.sh '${PORT}' '${TAG}' "]}' --output text --query "Command.CommandId")
while true; do
SSM_STATUS=$(aws ssm list-command-invocations --command-id $COMMAND_ID --details --query "CommandInvocations[].Status" --output text)
if [ $SSM_STATUS != "Success" ] && [ $SSM_STATUS != "InProgress" ]; then
echo "Proxy modification failed"
aws ssm list-command-invocations --command-id $COMMAND_ID --details --query "CommandInvocations[].CommandPlugins[].{Status:Status,Output:Output}"
exit 1
elif [ $SSM_STATUS = "Success" ]; then
aws ssm list-command-invocations --command-id $COMMAND_ID --details --query "CommandInvocations[].CommandPlugins[].{Status:Status,Output:Output}"
echo "Success"
break
fi
sleep 2
done
exit 0

View File

@@ -21,17 +21,21 @@ jobs:
- name: Build Docker Image
run: |
docker build -t tlsn-bench . -f ./crates/benches/binary/benches.Dockerfile --build-arg BENCH_TYPE=${{ github.event.inputs.bench_type }}
docker build -t tlsn-bench . -f ./crates/harness/harness.Dockerfile
- name: Run Benchmarks
run: |
docker run --privileged -v ${{ github.workspace }}/crates/benches/binary:/benches tlsn-bench
docker run --privileged -v ./crates/harness/:/benches tlsn-bench bash -c "runner setup; runner --target ${{ github.event.inputs.bench_type }} bench"
- name: Plot Benchmarks
run: |
docker run -v ./crates/harness/:/benches tlsn-bench bash -c "tlsn-harness-plot /benches/bench.toml /benches/metrics.csv --min-max-band --prover-kind ${{ github.event.inputs.bench_type }}"
- name: Upload graphs
uses: actions/upload-artifact@v4
with:
name: benchmark_graphs
path: |
./crates/benches/binary/runtime_vs_latency.html
./crates/benches/binary/runtime_vs_bandwidth.html
./crates/benches/binary/download_size_vs_memory.html
./crates/harness/metrics.csv
./crates/harness/bench.toml
./crates/harness/runtime_vs_latency.html
./crates/harness/runtime_vs_bandwidth.html

View File

@@ -1,86 +0,0 @@
name: Deploy server
on:
push:
branches:
- dev
tags:
- "[v]?[0-9]+.[0-9]+.[0-9]+*"
workflow_dispatch:
inputs:
environment:
description: "Environment"
required: true
default: "nightly"
type: choice
options:
- nightly
- stable
concurrency:
group: ${{ github.workflow }}
cancel-in-progress: false
jobs:
deploy:
runs-on: ubuntu-latest
env:
DATA_ENV: ${{ github.event.inputs.environment || 'nightly' }}
permissions:
id-token: write
contents: read
steps:
- name: Manipulate Environment
id: manipulate
run: |
if [ "${{ github.event_name }}" = "push" ] && [ "$GITHUB_REF_NAME" = "dev" ]; then
echo "env=nightly" >> $GITHUB_OUTPUT
elif [ "${{ github.event_name }}" = "push" ] && [[ "${{ github.ref }}" = "refs/tags/"* ]]; then
echo "env=stable" >> $GITHUB_OUTPUT
elif [ "${{ github.event_name }}" = "workflow_dispatch" ]; then
echo "env=${{ env.DATA_ENV }}" >> $GITHUB_OUTPUT
else
echo "Operation not permitted"
exit 1
fi
- name: Wait for integration test workflow to succeed
if: github.event_name == 'push'
uses: lewagon/wait-on-check-action@v1.3.1
with:
ref: ${{ github.ref }}
# More details [here](https://github.com/lewagon/wait-on-check-action#check-name)
check-name: 'Run tests release build'
repo-token: ${{ secrets.GITHUB_TOKEN }}
# How frequent (in seconds) this job will call GitHub API to check the status of the job specified at 'check-name'
wait-interval: 60
- name: Checkout
uses: actions/checkout@v4
with:
persist-credentials: false
- name: Configure AWS Credentials
uses: aws-actions/configure-aws-credentials@v4
with:
role-to-assume: arn:aws:iam::490752553772:role/tlsn-deploy-slc
role-duration-seconds: 1800
aws-region: eu-central-1
- name: Install stable rust toolchain
uses: dtolnay/rust-toolchain@stable
with:
toolchain: stable
- name: Cargo build
run: |
.github/scripts/build-server.sh ${{ steps.manipulate.outputs.env }}
- name: Trigger Deployment
run: |
.github/scripts/deploy-server.sh ${{ steps.manipulate.outputs.env }} $GITHUB_REF_NAME
- name: Modify Proxy
if: ${{ steps.manipulate.outputs.env == 'stable' }}
run: |
.github/scripts/modify-proxy.sh

View File

@@ -1,52 +0,0 @@
name: cd
on:
push:
tags:
- "[v]?[0-9]+.[0-9]+.[0-9]+*"
env:
CONTAINER_REGISTRY: ghcr.io
jobs:
build_and_publish_notary_server_image:
name: Build and publish notary server's image
runs-on: ubuntu-latest
permissions:
contents: read
packages: write
steps:
- name: Wait for integration test workflow to succeed
uses: lewagon/wait-on-check-action@v1.3.1
with:
ref: ${{ github.ref }}
# More details [here](https://github.com/lewagon/wait-on-check-action#check-name)
check-name: 'Run tests release build'
repo-token: ${{ secrets.GITHUB_TOKEN }}
# How frequent (in seconds) this job will call GitHub API to check the status of the job specified at 'check-name'
wait-interval: 60
- name: Checkout repository
uses: actions/checkout@v4
- name: Log in to the Container registry
uses: docker/login-action@v2
with:
registry: ${{ env.CONTAINER_REGISTRY }}
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Extract metadata (tags, labels) for Docker image of notary server
id: meta-notary-server
uses: docker/metadata-action@v4
with:
images: ${{ env.CONTAINER_REGISTRY }}/${{ github.repository }}/notary-server
- name: Build and push Docker image of notary server
uses: docker/build-push-action@v4
with:
context: .
push: true
tags: ${{ steps.meta-notary-server.outputs.tags }}
labels: ${{ steps.meta-notary-server.outputs.labels }}
file: ./crates/notary/server/notary-server.Dockerfile

View File

@@ -8,17 +8,41 @@ on:
- "[v]?[0-9]+.[0-9]+.[0-9]+*"
pull_request:
permissions:
id-token: write
contents: read
env:
CARGO_TERM_COLOR: always
CARGO_REGISTRIES_CRATES_IO_PROTOCOL: sparse
# We need a higher number of parallel rayon tasks than the default (which is 4)
# in order to prevent a deadlock, c.f.
# - https://github.com/tlsnotary/tlsn/issues/548
# - https://github.com/privacy-scaling-explorations/mpz/issues/178
# - https://github.com/privacy-ethereum/mpz/issues/178
# 32 seems to be big enough for the foreseeable future
RAYON_NUM_THREADS: 32
RUST_VERSION: 1.90.0
jobs:
clippy:
name: Clippy
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Install rust toolchain
uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ env.RUST_VERSION }}
components: clippy
- name: Use caching
uses: Swatinem/rust-cache@v2.7.7
- name: Clippy
run: cargo clippy --keep-going --all-features --all-targets --locked -- -D warnings
fmt:
name: Check formatting
runs-on: ubuntu-latest
@@ -34,10 +58,11 @@ jobs:
components: rustfmt
- name: Use caching
uses: Swatinem/rust-cache@v2.7.3
uses: Swatinem/rust-cache@v2.7.7
- name: Check formatting
run: cargo +nightly fmt --check --all
build-and-test:
name: Build and test
runs-on: ubuntu-latest
@@ -45,23 +70,20 @@ jobs:
- name: Checkout repository
uses: actions/checkout@v4
- name: Install stable rust toolchain
- name: Install rust toolchain
uses: dtolnay/rust-toolchain@stable
with:
toolchain: stable
components: clippy
toolchain: ${{ env.RUST_VERSION }}
- name: Use caching
uses: Swatinem/rust-cache@v2.7.3
- name: Clippy
run: cargo clippy --all-features --all-targets -- -D warnings
uses: Swatinem/rust-cache@v2.7.7
- name: Build
run: cargo build --all-targets
run: cargo build --all-targets --locked
- name: Test
run: cargo test
run: cargo test --no-fail-fast --locked
wasm:
name: Build and Test wasm
runs-on: ubuntu-latest
@@ -69,11 +91,11 @@ jobs:
- name: Checkout repository
uses: actions/checkout@v4
- name: Install stable rust toolchain
- name: Install rust toolchain
uses: dtolnay/rust-toolchain@stable
with:
targets: wasm32-unknown-unknown
toolchain: stable
toolchain: ${{ env.RUST_VERSION }}
- name: Install nightly rust toolchain
uses: dtolnay/rust-toolchain@stable
@@ -88,25 +110,29 @@ jobs:
sudo apt-get install -y chromium-chromedriver
- name: Install wasm-pack
run: curl https://rustwasm.github.io/wasm-pack/installer/init.sh -sSf | sh
# we install a specific version which supports custom profiles
run: cargo install --git https://github.com/rustwasm/wasm-pack.git --rev 32e52ca
- name: Use caching
uses: Swatinem/rust-cache@v2.7.3
uses: Swatinem/rust-cache@v2.7.7
- name: Build harness
working-directory: crates/harness
run: ./build.sh
- name: Run tests
working-directory: crates/harness
run: |
cd crates/wasm-test-runner
./run.sh
./bin/runner setup
./bin/runner --target browser test
- name: Run build
run: |
cd crates/wasm
./build.sh
working-directory: crates/wasm
run: ./build.sh
- name: Dry Run NPM Publish
run: |
cd crates/wasm/pkg
npm publish --dry-run
working-directory: crates/wasm/pkg
run: npm publish --dry-run
- name: Save tlsn-wasm package for tagged builds
if: startsWith(github.ref, 'refs/tags/')
@@ -123,43 +149,53 @@ jobs:
- name: Checkout repository
uses: actions/checkout@v4
- name: Install stable rust toolchain
- name: Install rust toolchain
uses: dtolnay/rust-toolchain@stable
with:
toolchain: stable
toolchain: ${{ env.RUST_VERSION }}
- name: Use caching
uses: Swatinem/rust-cache@v2.7.3
- name: Add custom DNS entry to /etc/hosts for notary TLS test
run: echo "127.0.0.1 tlsnotaryserver.io" | sudo tee -a /etc/hosts
uses: Swatinem/rust-cache@v2.7.7
- name: Run integration tests
run: cargo test --profile tests-integration --workspace --exclude tlsn-tls-client --exclude tlsn-tls-core -- --include-ignored
run: cargo test --locked --profile tests-integration --workspace --exclude tlsn-tls-client --exclude tlsn-tls-core --no-fail-fast -- --include-ignored
coverage:
runs-on: ubuntu-latest
env:
CARGO_TERM_COLOR: always
steps:
- uses: actions/checkout@v4
- name: Install stable rust toolchain
- name: Install rust toolchain
uses: dtolnay/rust-toolchain@stable
with:
toolchain: stable
toolchain: ${{ env.RUST_VERSION }}
- name: Install cargo-llvm-cov
uses: taiki-e/install-action@cargo-llvm-cov
- name: Generate code coverage
run: cargo llvm-cov --all-features --workspace --lcov --output-path lcov.info
run: cargo llvm-cov --all-features --workspace --locked --lcov --output-path lcov.info
- name: Upload coverage to Codecov
uses: codecov/codecov-action@v4
with:
token: ${{ secrets.CODECOV_TOKEN }}
files: lcov.info
fail_ci_if_error: true
# trigger-deployment:
# doing this here due to feedback @ https://github.com/tlsnotary/tlsn/pull/631#issuecomment-2415806267
# needs: tests-integration
# uses: ./.github/workflows/tee-cd.yml
# with:
# # what this is supposed to do -> $ref is the tag: e.g., v0.1.0-alpha.7; pass the $ref string to the cd script and update reverse proxy / deploy
# ref: ${{ github.ref_name }}
create-release-draft:
name: Create Release Draft
needs: build-and-test
runs-on: ubuntu-latest
permissions:
contents: write
if: startsWith(github.ref, 'refs/tags/v') && contains(github.ref, '.')
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Create GitHub Release Draft
uses: softprops/action-gh-release@v2
with:
draft: true
tag_name: ${{ github.ref_name }}
prerelease: true
generate_release_notes: true

View File

@@ -6,22 +6,57 @@ on:
tag:
description: 'Tag to publish to NPM'
required: true
default: '0.1.0-alpha.8-pre'
default: 'v0.1.0-alpha.13-pre'
jobs:
release:
runs-on: ubuntu-latest
env:
GH_TOKEN: ${{ github.token }}
steps:
- name: Download build artifacts
uses: actions/download-artifact@v4
with:
name: ${{ github.event.inputs.tag }}-tlsn-wasm-pkg
path: tlsn-wasm-pkg
- name: Find and download tlsn-wasm build from the tagged ci workflow
id: find_run
run: |
# Find the workflow run ID for the tag
RUN_ID=$(gh api \
-H "Accept: application/vnd.github+json" \
"/repos/tlsnotary/tlsn/actions/workflows/ci.yml/runs?per_page=100" \
--jq '.workflow_runs[] | select(.head_branch == "${{ github.event.inputs.tag }}") | .id' | sort | tail -1)
- name: NPM Publish for tlsn-wasm
env:
NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}
run: |
cd tlsn-wasm-pkg
npm publish
if [ -z "$RUN_ID" ]; then
echo "No run found for tag ${{ github.event.inputs.tag }}"
exit 1
fi
echo "Found run: $RUN_ID"
echo "run_id=$RUN_ID" >> "$GITHUB_OUTPUT"
# Find the download URL for the build artifact
DOWNLOAD_URL=$(gh api \
-H "Accept: application/vnd.github+json" \
/repos/tlsnotary/tlsn/actions/runs/${RUN_ID}/artifacts \
--jq '.artifacts[] | select(.name == "${{ github.event.inputs.tag }}-tlsn-wasm-pkg") | .archive_download_url')
if [ -z "$DOWNLOAD_URL" ]; then
echo "No download url for build artifact ${{ github.event.inputs.tag }}-tlsn-wasm-pkg in run $RUN_ID"
exit 1
fi
# Download and unzip the build artifact
mkdir tlsn-wasm-pkg
curl -L -H "Authorization: Bearer ${GH_TOKEN}" \
-H "Accept: application/vnd.github+json" \
-o tlsn-wasm-pkg.zip \
${DOWNLOAD_URL}
unzip -q tlsn-wasm-pkg.zip -d tlsn-wasm-pkg
- name: NPM Publish for tlsn-wasm
env:
NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}
run: |
cd tlsn-wasm-pkg
echo "//registry.npmjs.org/:_authToken=${NODE_AUTH_TOKEN}" > .npmrc
npm publish
rm .npmrc

View File

@@ -4,7 +4,6 @@ on:
push:
branches: [dev]
pull_request:
branches: [dev]
env:
CARGO_TERM_COLOR: always
@@ -22,18 +21,12 @@ jobs:
toolchain: stable
- name: "rustdoc"
run: cargo doc -p tlsn-core -p tlsn-prover -p tlsn-verifier --no-deps --all-features
# --target-dir ${GITHUB_WORKSPACE}/docs
# https://dev.to/deciduously/prepare-your-rust-api-docs-for-github-pages-2n5i
- name: "Add index file -> tlsn_prover"
run: |
echo "<meta http-equiv=\"refresh\" content=\"0; url=tlsn_prover\">" > target/doc/index.html
run: crates/wasm/build-docs.sh
- name: Deploy
uses: peaceiris/actions-gh-pages@v3
if: ${{ github.ref == 'refs/heads/dev' }}
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
publish_dir: target/doc/
publish_dir: target/wasm32-unknown-unknown/doc/
# cname: rustdocs.tlsnotary.org

View File

@@ -1,156 +0,0 @@
name: azure-tee-release
permissions:
contents: read
id-token: write
attestations: write
on:
workflow_dispatch:
inputs:
ref:
description: 'git branch'
required: false
default: 'dev'
type: string
#on:
# release:
# types: [published]
# branches:
# - 'releases/**'
env:
GIT_COMMIT_HASH: ${{ github.event.pull_request.head.sha || github.sha }}
GIT_COMMIT_TIMESTAMP: ${{ github.event.repository.updated_at}}
REGISTRY: notaryserverbuilds.azurecr.io
IMAGE_NAME: ${{ github.repository }}
jobs:
update-reverse-proxy:
permissions:
contents: write
environment: tee
runs-on: [self-hosted, linux]
outputs:
teeport: ${{ steps.portbump.outputs.newport}}
deploy: ${{ steps.portbump.outputs.deploy}}
steps:
- name: checkout repository
uses: actions/checkout@v4
- name: update caddyfile
id: portbump
env:
RELEASE_TAG: ${{ github.event.release.tag_name || inputs.ref }}
run: |
echo "tag: $RELEASE_TAG"
NEXT_PORT=$(bash cd-scripts/tee/azure/updateproxy.sh 'cd-scripts/tee/azure/Caddyfile' $RELEASE_TAG)
echo "newport=$NEXT_PORT" >> $GITHUB_OUTPUT
echo "new deploy port: $NEXT_PORT 🚀" >> $GITHUB_STEP_SUMMARY
chmod +r -R cd-scripts/tee/azure/
- name: Deploy updated Caddyfile to server
if: ${{ steps.portbump.outputs.deploy == 'new' }}
uses: appleboy/scp-action@v0.1.7
with:
host: ${{ secrets.AZURE_TEE_PROD_HOST }}
username: ${{ secrets.AZURE_PROD_TEE_USERNAME }}
key: ${{ secrets.AZURE_TEE_PROD_KEY }}
source: "cd-scripts/tee/azure/Caddyfile"
target: "~/"
- name: Reload Caddy on server
if: ${{ steps.portbump.outputs.deploy == 'new' }}
uses: appleboy/ssh-action@v1.0.3
with:
host: ${{ secrets.AZURE_TEE_PROD_HOST }}
username: ${{ secrets.AZURE_PROD_TEE_USERNAME }}
key: ${{ secrets.AZURE_TEE_PROD_KEY }}
script: |
sudo cp ~/cd-scripts/tee/azure/Caddyfile /etc/caddy/Caddyfile
sudo systemctl reload caddy
build-measure:
environment: tee
runs-on: [self-hosted, linux]
needs: [ update-reverse-proxy ]
container:
image: notaryserverbuilds.azurecr.io/prod/gramine
credentials:
username: notaryserverbuilds
password: ${{ secrets.AZURE_CR_BUILDS_PW }}
env:
GIT_COMMIT_HASH: ${{ github.event.pull_request.head.sha || github.sha }}
volumes:
- /var/run/aesmd/aesm.socket:/var/run/aesmd/aesm.socket
options: "--device /dev/sgx_enclave"
steps:
- name: get code
uses: actions/checkout@v4
- name: sccache
if: github.event_name != 'release'
# && github.event_name != 'workflow_dispatch'
uses: mozilla-actions/sccache-action@v0.0.6
- name: set rust env for scc
if: github.event_name != 'release'
# && github.event_name != 'workflow_dispatch'
run: |
echo "SCCACHE_GHA_ENABLED=true" >> $GITHUB_ENV
echo "RUSTC_WRAPPER=sccache" >> $GITHUB_ENV
- name: reverse proxy port
run: echo "${{needs.update-reverse-proxy.outputs.teeport}}" | tee >> $GITHUB_STEP_SUMMARY
- name: get hardware measurement
working-directory: ${{ github.workspace }}/crates/notary/server/tee
run: |
chmod +x ../../../../.github/scripts/gramine.sh && ../../../../.github/scripts/gramine.sh sgx
artifact-deploy:
environment: tee
runs-on: [self-hosted, linux]
needs: [ build-measure, update-reverse-proxy ]
steps:
- name: auth to registry
uses: docker/login-action@v3
with:
registry: notaryserverbuilds.azurecr.io
username: notaryserverbuilds
password: ${{ secrets.AZURE_CR_BUILDS_PW }}
- name: get code
uses: actions/checkout@v4
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Get Git commit timestamps
run: echo "TIMESTAMP=$(git log -1 --pretty=%ct)" >> $GITHUB_ENV
- name: Build and push
id: deploypush
uses: docker/build-push-action@v6
with:
provenance: mode=max
no-cache: true
context: ${{ github.workspace }}/crates/notary/server/tee
push: true
tags: notaryserverbuilds.azurecr.io/prod/notary-sgx:${{ env.GIT_COMMIT_HASH }}
labels: ${{needs.update-reverse-proxy.outputs.teeport}}
env:
# reproducible builds: https://github.com/moby/buildkit/blob/master/docs/build-repro.md#source_date_epoch
SOURCE_DATE_EPOCH: ${{ env.TIMESTAMP }}
- name: Generate SBOM
uses: anchore/sbom-action@v0
with:
image: notaryserverbuilds.azurecr.io/prod/notary-sgx:${{ env.GIT_COMMIT_HASH }}
format: 'cyclonedx-json'
output-file: 'sbom.cyclonedx.json'
# attestation section ::
# https://docs.docker.com/build/ci/github-actions/attestations/
- name: Attest
uses: actions/attest-build-provenance@v1
with:
subject-name: notaryserverbuilds.azurecr.io/prod/notary-sgx
subject-digest: ${{ steps.deploypush.outputs.digest }}
push-to-registry: true
-
name: run
run: |
if [[ ${{ needs.update-reverse-proxy.outputs.deploy }} == 'new' ]]; then
docker run --device /dev/sgx_enclave --device /dev/sgx_provision --volume=/var/run/aesmd/aesm.socket:/var/run/aesmd/aesm.socket -p ${{needs.update-reverse-proxy.outputs.teeport}}:7047 notaryserverbuilds.azurecr.io/prod/notary-sgx:${{ env.GIT_COMMIT_HASH }} &
else
old=$(docker ps --filter "name=${{needs.update-reverse-proxy.outputs.teeport}}")
docker rm -f $old
docker run --name ${{needs.update-reverse-proxy.outputs.teeport}} --device /dev/sgx_enclave --device /dev/sgx_provision --volume=/var/run/aesmd/aesm.socket:/var/run/aesmd/aesm.socket -p ${{needs.update-reverse-proxy.outputs.teeport}}:7047 notaryserverbuilds.azurecr.io/prod/notary-sgx:${{ env.GIT_COMMIT_HASH }} &
fi

View File

@@ -1,42 +0,0 @@
name: tee-build
on:
push:
branches: [ "dev" ]
pull_request:
branches: [ "dev" ]
concurrency:
group: ${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
build-measure-emulated:
environment: tee
runs-on: [self-hosted, linux]
container:
image: notaryserverbuilds.azurecr.io/prod/gramine
credentials:
username: notaryserverbuilds
password: ${{ secrets.AZURE_CR_BUILDS_PW }}
env:
GIT_COMMIT_HASH: ${{ github.event.pull_request.head.sha || github.sha }}
steps:
- name: get code
uses: actions/checkout@v4
- name: sccache
if: github.event_name != 'release'
# && github.event_name != 'workflow_dispatch'
uses: mozilla-actions/sccache-action@v0.0.6
- name: set rust env for scc
if: github.event_name != 'release'
# && github.event_name != 'workflow_dispatch'
run: |
echo "SCCACHE_GHA_ENABLED=true" >> $GITHUB_ENV
echo "RUSTC_WRAPPER=sccache" >> $GITHUB_ENV
- name: get emulated measurement (call gramine.sh without the sgx arg)
working-directory: ${{ github.workspace }}/crates/notary/server/tee
run: |
# this fails current ci because gramine.sh is part of this pr so the file doesnt exist
# bash .github/scripts/gramine.sh

24
.github/workflows/updatemain.yml vendored Normal file
View File

@@ -0,0 +1,24 @@
name: Fast-forward main branch to published release tag
on:
release:
types: [published]
jobs:
ff-main-to-release:
runs-on: ubuntu-latest
permissions:
contents: write
steps:
- name: Checkout main
uses: actions/checkout@v4
with:
ref: main
- name: Fast-forward main to release tag
run: |
tag="${{ github.event.release.tag_name }}"
git fetch origin "refs/tags/$tag:refs/tags/$tag"
git merge --ff-only "refs/tags/$tag"
git push origin main

6
.gitignore vendored
View File

@@ -3,10 +3,6 @@
debug/
target/
# Remove Cargo.lock from gitignore if creating an executable, leave it for libraries
# More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html
Cargo.lock
# These are backup files generated by rustfmt
**/*.rs.bk
@@ -32,4 +28,4 @@ Cargo.lock
*.log
# metrics
*.csv
*.csv

View File

@@ -16,6 +16,8 @@ keywords.
Try to do one pull request per change.
**Disclaimer**: While we appreciate all contributions, we do not prioritize minor grammatical fixes (e.g., correcting typos, rewording sentences) unless they significantly improve clarity in technical documentation. These contributions can be a distraction for the team. If you notice a grammatical error, please let us know on our Discord.
## Linting
Before a Pull Request (PR) can be merged, the Continuous Integration (CI) pipeline automatically lints all code using [Clippy](https://doc.rust-lang.org/stable/clippy/usage.html). To ensure your code is free of linting issues before creating a PR, run the following command:
@@ -59,3 +61,21 @@ Comments for function arguments must adhere to this pattern:
/// * `arg2` - The second argument.
pub fn compute(...
```
## Cargo.lock
We check in `Cargo.lock` to ensure reproducible builds. It must be updated whenever `Cargo.toml` changes. The TLSNotary team typically updates `Cargo.lock` in a separate commit after dependency changes.
If you want to hide `Cargo.lock` changes from your local `git diff`, run:
```sh
git update-index --assume-unchanged Cargo.lock
```
To start tracking changes again:
```sh
git update-index --no-assume-unchanged Cargo.lock
```
> ⚠️ Note: This only affects your local view. The file is still tracked in the repository and will be checked and used in CI.

9076
Cargo.lock generated Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -1,101 +1,107 @@
[workspace]
members = [
"crates/benches/binary",
"crates/benches/browser/core",
"crates/benches/browser/native",
"crates/benches/browser/wasm",
"crates/benches/library",
"crates/common",
"crates/components/aead",
"crates/components/block-cipher",
"crates/attestation",
"crates/components/deap",
"crates/components/cipher",
"crates/components/hmac-sha256",
"crates/components/hmac-sha256-circuits",
"crates/components/key-exchange",
"crates/components/stream-cipher",
"crates/components/universal-hash",
"crates/core",
"crates/data-fixtures",
"crates/examples",
"crates/formats",
"crates/notary/client",
"crates/notary/server",
"crates/notary/tests-integration",
"crates/prover",
"crates/server-fixture/certs",
"crates/server-fixture/server",
"crates/tests-integration",
"crates/tls/backend",
"crates/tls/client",
"crates/tls/client-async",
"crates/tls/core",
"crates/tls/mpc",
"crates/mpc-tls",
"crates/tls/server-fixture",
"crates/verifier",
"crates/wasm",
"crates/wasm-test-runner",
"crates/harness/core",
"crates/harness/executor",
"crates/harness/runner",
"crates/harness/plot",
"crates/tlsn",
]
resolver = "2"
[workspace.lints.rust]
# unsafe_code = "forbid"
[workspace.lints.clippy]
# enum_glob_use = "deny"
[profile.tests-integration]
inherits = "release"
opt-level = 1
[profile.wasm]
inherits = "release"
lto = true
panic = "abort"
codegen-units = 1
[workspace.dependencies]
notary-client = { path = "crates/notary/client" }
notary-server = { path = "crates/notary/server" }
tls-server-fixture = { path = "crates/tls/server-fixture" }
tlsn-aead = { path = "crates/components/aead" }
tlsn-benches-browser-core = { path = "crates/benches/browser/core" }
tlsn-benches-browser-native = { path = "crates/benches/browser/native" }
tlsn-benches-library = { path = "crates/benches/library" }
tlsn-block-cipher = { path = "crates/components/block-cipher" }
tlsn-common = { path = "crates/common" }
tlsn-attestation = { path = "crates/attestation" }
tlsn-cipher = { path = "crates/components/cipher" }
tlsn-core = { path = "crates/core" }
tlsn-data-fixtures = { path = "crates/data-fixtures" }
tlsn-deap = { path = "crates/components/deap" }
tlsn-formats = { path = "crates/formats" }
tlsn-hmac-sha256 = { path = "crates/components/hmac-sha256" }
tlsn-hmac-sha256-circuits = { path = "crates/components/hmac-sha256-circuits" }
tlsn-key-exchange = { path = "crates/components/key-exchange" }
tlsn-prover = { path = "crates/prover" }
tlsn-mpc-tls = { path = "crates/mpc-tls" }
tlsn-server-fixture = { path = "crates/server-fixture/server" }
tlsn-server-fixture-certs = { path = "crates/server-fixture/certs" }
tlsn-stream-cipher = { path = "crates/components/stream-cipher" }
tlsn-tls-backend = { path = "crates/tls/backend" }
tlsn-tls-client = { path = "crates/tls/client" }
tlsn-tls-client-async = { path = "crates/tls/client-async" }
tlsn-tls-core = { path = "crates/tls/core" }
tlsn-tls-mpc = { path = "crates/tls/mpc" }
tlsn-universal-hash = { path = "crates/components/universal-hash" }
tlsn-utils = { git = "https://github.com/tlsnotary/tlsn-utils", rev = "0040a00" }
tlsn-utils-aio = { git = "https://github.com/tlsnotary/tlsn-utils", rev = "0040a00" }
tlsn-verifier = { path = "crates/verifier" }
tlsn-utils = { git = "https://github.com/tlsnotary/tlsn-utils", rev = "6168663" }
tlsn-harness-core = { path = "crates/harness/core" }
tlsn-harness-executor = { path = "crates/harness/executor" }
tlsn-harness-runner = { path = "crates/harness/runner" }
tlsn-wasm = { path = "crates/wasm" }
tlsn = { path = "crates/tlsn" }
mpz-circuits = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "b8ae7ac" }
mpz-common = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "b8ae7ac" }
mpz-core = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "b8ae7ac" }
mpz-garble = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "b8ae7ac" }
mpz-garble-core = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "b8ae7ac" }
mpz-ole = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "b8ae7ac" }
mpz-ot = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "b8ae7ac" }
mpz-share-conversion = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "b8ae7ac" }
mpz-circuits = { git = "https://github.com/privacy-ethereum/mpz", rev = "70348c1" }
mpz-memory-core = { git = "https://github.com/privacy-ethereum/mpz", rev = "70348c1" }
mpz-common = { git = "https://github.com/privacy-ethereum/mpz", rev = "70348c1" }
mpz-core = { git = "https://github.com/privacy-ethereum/mpz", rev = "70348c1" }
mpz-vm-core = { git = "https://github.com/privacy-ethereum/mpz", rev = "70348c1" }
mpz-garble = { git = "https://github.com/privacy-ethereum/mpz", rev = "70348c1" }
mpz-garble-core = { git = "https://github.com/privacy-ethereum/mpz", rev = "70348c1" }
mpz-ole = { git = "https://github.com/privacy-ethereum/mpz", rev = "70348c1" }
mpz-ot = { git = "https://github.com/privacy-ethereum/mpz", rev = "70348c1" }
mpz-share-conversion = { git = "https://github.com/privacy-ethereum/mpz", rev = "70348c1" }
mpz-fields = { git = "https://github.com/privacy-ethereum/mpz", rev = "70348c1" }
mpz-zk = { git = "https://github.com/privacy-ethereum/mpz", rev = "70348c1" }
mpz-hash = { git = "https://github.com/privacy-ethereum/mpz", rev = "70348c1" }
serio = { version = "0.1" }
spansy = { git = "https://github.com/tlsnotary/tlsn-utils", rev = "0040a00" }
uid-mux = { version = "0.1", features = ["serio"] }
websocket-relay = { git = "https://github.com/tlsnotary/tlsn-utils", rev = "0040a00" }
rangeset = { version = "0.2" }
serio = { version = "0.2" }
spansy = { git = "https://github.com/tlsnotary/tlsn-utils", rev = "6168663" }
uid-mux = { version = "0.2" }
websocket-relay = { git = "https://github.com/tlsnotary/tlsn-utils", rev = "6168663" }
aead = { version = "0.4" }
aes = { version = "0.8" }
aes-gcm = { version = "0.9" }
anyhow = { version = "1.0" }
async-trait = { version = "0.1" }
async-tungstenite = { version = "0.25" }
axum = { version = "0.7" }
axum = { version = "0.8" }
bcs = { version = "0.1" }
bincode = { version = "1.3" }
blake3 = { version = "1.5" }
bon = { version = "3.6" }
bytes = { version = "1.4" }
cfg-if = { version = "1" }
chromiumoxide = { version = "0.7" }
chrono = { version = "0.4" }
cipher = { version = "0.4" }
clap = { version = "4.5" }
criterion = { version = "0.5" }
ctr = { version = "0.9" }
derive_builder = { version = "0.12" }
@@ -104,45 +110,59 @@ elliptic-curve = { version = "0.13" }
enum-try-as-inner = { version = "0.1" }
env_logger = { version = "0.10" }
futures = { version = "0.3" }
futures-rustls = { version = "0.26" }
futures-util = { version = "0.3" }
futures-rustls = { version = "0.25" }
generic-array = { version = "0.14" }
ghash = { version = "0.5" }
hex = { version = "0.4" }
hmac = { version = "0.12" }
http = { version = "1.1" }
http-body-util = { version = "0.1" }
hyper = { version = "1.1" }
hyper-util = { version = "0.1" }
ipnet = { version = "2.11" }
inventory = { version = "0.3" }
itybity = { version = "0.2" }
js-sys = { version = "0.3" }
k256 = { version = "0.13" }
log = { version = "0.4" }
once_cell = { version = "1.19" }
opaque-debug = { version = "0.3" }
p256 = { version = "0.13" }
pkcs8 = { version = "0.10" }
pin-project-lite = { version = "0.2" }
rand = { version = "0.8" }
rand_chacha = { version = "0.3" }
rand_core = { version = "0.6" }
pollster = { version = "0.4" }
rand = { version = "0.9" }
rand_chacha = { version = "0.9" }
rand_core = { version = "0.9" }
rand06-compat = { version = "0.1" }
rayon = { version = "1.10" }
regex = { version = "1.10" }
ring = { version = "0.17" }
rs_merkle = { git = "https://github.com/tlsnotary/rs-merkle.git", rev = "85f3e82" }
rstest = { version = "0.17" }
rustls = { version = "0.21" }
rustls-pemfile = { version = "1.0" }
rustls-webpki = { version = "0.103" }
rustls-pki-types = { version = "1.12" }
sct = { version = "0.7" }
semver = { version = "1.0" }
serde = { version = "1.0" }
serde_json = { version = "1.0" }
sha2 = { version = "0.10" }
signature = { version = "2.2" }
thiserror = { version = "1.0" }
tiny-keccak = { version = "2.0" }
tokio = { version = "1.38" }
tokio-rustls = { version = "0.24" }
tokio-util = { version = "0.7" }
toml = { version = "0.8" }
tower = { version = "0.5" }
tower-http = { version = "0.5" }
tower-service = { version = "0.3" }
tracing = { version = "0.1" }
tracing-subscriber = { version = "0.3" }
uuid = { version = "1.4" }
wasm-bindgen = { version = "0.2" }
wasm-bindgen-futures = { version = "0.4" }
web-spawn = { version = "0.2" }
web-time = { version = "0.2" }
webpki = { version = "0.22" }
webpki-roots = { version = "0.26" }
ws_stream_tungstenite = { version = "0.13" }
webpki-roots = { version = "1.0" }
webpki-root-certs = { version = "1.0" }
ws_stream_wasm = { version = "0.7.5" }
zeroize = { version = "1.8" }

View File

@@ -12,7 +12,7 @@
[actions-url]: https://github.com/tlsnotary/tlsn/actions?query=workflow%3Aci+branch%3Adev
[Website](https://tlsnotary.org) |
[Documentation](https://docs.tlsnotary.org) |
[Documentation](https://tlsnotary.org/docs/intro) |
[API Docs](https://tlsnotary.github.io/tlsn) |
[Discord](https://discord.gg/9XwESXtcN7)
@@ -44,12 +44,9 @@ at your option.
## Directory
- [examples](./crates/examples/): Examples on how to use the TLSNotary protocol.
- [tlsn-prover](./crates/prover/): The library for the prover component.
- [tlsn-verifier](./crates/verifier/): The library for the verifier component.
- [notary](./crates/notary/): Implements the [notary server](https://docs.tlsnotary.org/intro.html#tls-verification-with-a-general-purpose-notary) and its client.
- [components](./crates/components/): Houses low-level libraries.
- [tlsn](./crates/tlsn/): The TLSNotary library.
This repository contains the source code for the Rust implementation of the TLSNotary protocol. For additional tools and implementations related to TLSNotary, visit <https://github.com/tlsnotary>. This includes repositories such as [`tlsn-js`](https://github.com/tlsnotary/tlsn-js), [`tlsn-extension`](https://github.com/tlsnotary/tlsn-extension), [`explorer`](https://github.com/tlsnotary/explorer), among others.
This repository contains the source code for the Rust implementation of the TLSNotary protocol. For additional tools and implementations related to TLSNotary, visit <https://github.com/tlsnotary>. This includes repositories such as [`tlsn-js`](https://github.com/tlsnotary/tlsn-js), [`tlsn-extension`](https://github.com/tlsnotary/tlsn-extension), among others.
## Development

View File

@@ -1,31 +0,0 @@
# AWS CodeDeploy application specification file
version: 0.0
os: linux
files:
- source: /
destination: /home/ubuntu/tlsn
permissions:
- object: /home/ubuntu/tlsn
owner: ubuntu
group: ubuntu
hooks:
BeforeInstall:
- location: cd-scripts/appspec-scripts/before_install.sh
timeout: 300
runas: ubuntu
AfterInstall:
- location: cd-scripts/appspec-scripts/after_install.sh
timeout: 300
runas: ubuntu
ApplicationStart:
- location: cd-scripts/appspec-scripts/start_app.sh
timeout: 300
runas: ubuntu
ApplicationStop:
- location: cd-scripts/appspec-scripts/stop_app.sh
timeout: 300
runas: ubuntu
ValidateService:
- location: cd-scripts/appspec-scripts/validate_app.sh
timeout: 300
runas: ubuntu

View File

@@ -1,35 +0,0 @@
#!/bin/bash
set -e
TAG=$(curl http://169.254.169.254/latest/meta-data/tags/instance/stable)
APP_NAME=$(echo $APPLICATION_NAME | awk -F- '{ print $2 }')
if [ $APP_NAME = "stable" ]; then
# Prepare directories for stable versions
sudo mkdir ~/${APP_NAME}_${TAG}
sudo mv ~/tlsn ~/${APP_NAME}_${TAG}
sudo mkdir -p ~/${APP_NAME}_${TAG}/tlsn/notary/target/release
sudo chown -R ubuntu.ubuntu ~/${APP_NAME}_${TAG}
# Download .git directory
aws s3 cp s3://tlsn-deploy/$APP_NAME/.git ~/${APP_NAME}_${TAG}/tlsn/.git --recursive
# Download binary
aws s3 cp s3://tlsn-deploy/$APP_NAME/notary-server ~/${APP_NAME}_${TAG}/tlsn/notary/target/release
chmod +x ~/${APP_NAME}_${TAG}/tlsn/notary/target/release/notary-server
else
# Prepare directory for dev
sudo rm -rf ~/$APP_NAME/tlsn
sudo mv ~/tlsn/ ~/$APP_NAME
sudo mkdir -p ~/$APP_NAME/tlsn/notary/target/release
sudo chown -R ubuntu.ubuntu ~/$APP_NAME
# Download .git directory
aws s3 cp s3://tlsn-deploy/$APP_NAME/.git ~/$APP_NAME/tlsn/.git --recursive
# Download binary
aws s3 cp s3://tlsn-deploy/$APP_NAME/notary-server ~/$APP_NAME/tlsn/notary/target/release
chmod +x ~/$APP_NAME/tlsn/notary/target/release/notary-server
fi
exit 0

View File

@@ -1,20 +0,0 @@
#!/bin/bash
set -e
APP_NAME=$(echo $APPLICATION_NAME | awk -F- '{ print $2 }')
if [ $APP_NAME = "stable" ]; then
VERSIONS_DEPLOYED=$(find ~/ -maxdepth 1 -type d -name 'stable_*')
VERSIONS_DEPLOYED_COUNT=$(echo $VERSIONS_DEPLOYED | wc -w)
if [ $VERSIONS_DEPLOYED_COUNT -gt 3 ]; then
echo "More than 3 stable versions found"
exit 1
fi
else
if [ ! -d ~/$APP_NAME ]; then
mkdir ~/$APP_NAME
fi
fi
exit 0

View File

@@ -1,26 +0,0 @@
#!/bin/bash
# Port tagging will also be used to manipulate proxy server via modify_proxy.sh script
set -ex
TAG=$(curl http://169.254.169.254/latest/meta-data/tags/instance/stable)
APP_NAME=$(echo $APPLICATION_NAME | awk -F- '{ print $2 }')
if [ $APP_NAME = "stable" ]; then
# Check if all stable ports are in use. If true, terminate the deployment
[[ $(netstat -lnt4 | egrep -c ':(7047|7057|7067)\s') -eq 3 ]] && { echo "All stable ports are in use"; exit 1; }
STABLE_PORTS="7047 7057 7067"
for PORT in $STABLE_PORTS; do
PORT_LISTENING=$(netstat -lnt4 | egrep -cw $PORT || true)
if [ $PORT_LISTENING -eq 0 ]; then
~/${APP_NAME}_${TAG}/tlsn/notary/target/release/notary-server --config-file ~/.notary/${APP_NAME}_${PORT}/config.yaml &> ~/${APP_NAME}_${TAG}/tlsn/notary.log &
# Create a tag that will be used for service validation
INSTANCE_ID=$(curl http://169.254.169.254/latest/meta-data/instance-id)
aws ec2 create-tags --resources $INSTANCE_ID --tags "Key=port,Value=$PORT"
break
fi
done
else
~/$APP_NAME/tlsn/notary/target/release/notary-server --config-file ~/.notary/$APP_NAME/config.yaml &> ~/$APP_NAME/tlsn/notary.log &
fi
exit 0

View File

@@ -1,36 +0,0 @@
#!/bin/bash
# AWS CodeDeploy hook sequence: https://docs.aws.amazon.com/codedeploy/latest/userguide/reference-appspec-file-structure-hooks.html#appspec-hooks-server
set -ex
APP_NAME=$(echo $APPLICATION_NAME | awk -F- '{ print $2 }')
if [ $APP_NAME = "stable" ]; then
VERSIONS_DEPLOYED=$(find ~/ -maxdepth 1 -type d -name 'stable_*')
VERSIONS_DEPLOYED_COUNT=$(echo $VERSIONS_DEPLOYED | wc -w)
# Remove oldest version if exists
if [ $VERSIONS_DEPLOYED_COUNT -eq 3 ]; then
echo "Candidate versions to be removed:"
OLDEST_DIR=""
OLDEST_TIME=""
for DIR in $VERSIONS_DEPLOYED; do
TIME=$(stat -c %W $DIR)
if [ -z $OLDEST_TIME ] || [ $TIME -lt $OLDEST_TIME ]; then
OLDEST_DIR=$DIR
OLDEST_TIME=$TIME
fi
done
echo "The oldest version is running under: $OLDEST_DIR"
PID=$(lsof $OLDEST_DIR/tlsn/notary/target/release/notary-server | awk '{ print $2 }' | tail -1)
kill -15 $PID || true
rm -rf $OLDEST_DIR
fi
else
PID=$(pgrep -f notary.*$APP_NAME)
kill -15 $PID || true
fi
exit 0

View File

@@ -1,21 +0,0 @@
#!/bin/bash
set -e
# Verify proccess is running
APP_NAME=$(echo $APPLICATION_NAME | awk -F- '{ print $2 }')
# Verify that listening sockets exist
if [ $APP_NAME = "stable" ]; then
PORT=$(curl http://169.254.169.254/latest/meta-data/tags/instance/port)
ps -ef | grep notary.*$APP_NAME.*$PORT | grep -v grep
[ $? -eq 0 ] || exit 1
else
PORT=7048
pgrep -f notary.*$APP_NAME
[ $? -eq 0 ] || exit 1
fi
EXPOSED_PORTS=$(netstat -lnt4 | egrep -cw $PORT)
[ $EXPOSED_PORTS -eq 1 ] || exit 1
exit 0

View File

@@ -1,14 +0,0 @@
#!/bin/bash
# This script is executed on proxy side, in order to assign the available port to latest stable version
set -e
PORT=$1
VERSION=$2
sed -i "/# Port $PORT/{n;s/v[0-9].[0-9].[0-9]-[a-z]*.[0-9]*/$VERSION/g}" /etc/nginx/sites-available/tlsnotary-pse
sed -i "/# Port $PORT/{n;n;s/v[0-9].[0-9].[0-9]-[a-z]*.[0-9]*/$VERSION/g}" /etc/nginx/sites-available/tlsnotary-pse
nginx -t
nginx -s reload
exit 0

View File

@@ -1,90 +0,0 @@
#
# global block =>
# email is for acme
# # # #
{
key_type p256
email mac@pse.dev # for acme
servers {
metrics
}
log {
output stdout
format console {
time_format common_log
time_local
}
level DEBUG
}
}
#
# server block, acme turned on (default when using dns)
# reverse proxy with fail_duration + lb will try upstreams sequentially (fallback)
# e.g. => `reverse_proxy :4000 :5000 10.10.10.10:1000 tlsnotary.org:443`
# will always deliver to :4000 if its up, but if :4000 is down for more than 4s it trys the next one
# # # #
notary.codes {
handle_path /v0.1.0-alpha.8* {
reverse_proxy :4003 :3333 {
lb_try_duration 4s
fail_duration 10s
lb_policy header X-Upstream {
fallback first
}
}
}
handle_path /v0.1.0-alpha.7* {
reverse_proxy :4002 :3333 {
lb_try_duration 4s
fail_duration 10s
lb_policy header X-Upstream {
fallback first
}
}
}
handle_path /v0.1.0-alpha.6* {
reverse_proxy :4001 :3333 {
lb_try_duration 4s
fail_duration 10s
lb_policy header X-Upstream {
fallback first
}
}
}
handle_path /nightly* {
reverse_proxy :3333 {
lb_try_duration 4s
fail_duration 10s
lb_policy header X-Upstream {
fallback first
}
}
}
handle_path /proxy* {
reverse_proxy :55688 proxy.notary.codes:443 {
lb_try_duration 4s
fail_duration 10s
lb_policy header X-Upstream {
fallback first
}
}
}
handle {
root * /srv
file_server
}
handle_errors {
@404 {
expression {http.error.status_code} == 404
}
rewrite @404 /index.html
file_server
}
}
}

View File

@@ -1,7 +0,0 @@
global:
scrape_interval: 15s
scrape_configs:
- job_name: caddy
static_configs:
- targets: ['localhost:2019']

View File

@@ -1,84 +0,0 @@
#!/bin/sh
# Variables (Update these as needed)x
CADDYFILE=${1:-/etc/caddy/Caddyfile} # Path to your Caddyfile
GIT_COMMIT_HASH=${2:-dev}
BASE_PORT=6061 # The starting port for your reverse_proxy directives
# Function to check if handle_path for the given commit hash exists
handle_path_exists() {
local commit_hash=$1
#echo "handle_path_exists $1 -- CADDYFILE: $CADDYFILE"
grep -q "handle_path /${commit_hash}\*" "$CADDYFILE"
}
# Function to extract the port for a given commit hash
extract_port_for_commit() {
local commit_hash=$1
#echo "extract_port_for_commit $1 -- 2: $2"
grep -Pzo "handle_path /${commit_hash}\* \{\n\s*reverse_proxy :(.*) " "$CADDYFILE" | grep -Poa "reverse_proxy :(.*) " | awk '{print $2}'
}
# Function to get the last port in the Caddyfile
get_last_port() {
grep -Po "reverse_proxy :([0-9]+)" "$CADDYFILE" | awk -F: '{print $2}' | sort -n | tail -1
}
# Function to add a new handle_path block with incremented port inside notary.codes block
add_new_handle_path() {
local new_port=$1
local commit_hash=$2
# Use a temporary file for inserting the handle_path block
tmp_file=$(mktemp)
# Add the new handle_path in the notary.codes block
awk -v port="$new_port" -v hash="$commit_hash" '
/notary\.codes \{/ {
print;
print " handle_path /" hash "* {";
print " reverse_proxy :" port " :3333 {";
print " lb_try_duration 4s";
print " fail_duration 10s";
print " lb_policy header X-Upstream {";
print " fallback first";
print " }";
print " }";
print " }";
next;
}
{ print }
' "$CADDYFILE" > "$tmp_file"
# Overwrite the original Caddyfile with the updated content
mv "$tmp_file" "$CADDYFILE"
}
#git action perms +r
chmod 664 cd-scripts/tee/azure/Caddyfile
# Check if the commit hash already exists in a handle_path
if handle_path_exists "$GIT_COMMIT_HASH"; then
existing_port=$(extract_port_for_commit "$GIT_COMMIT_HASH")
echo "${existing_port:1}"
exit 0
else
# Get the last port used and increment it
last_port=$(get_last_port)
if [[ -z "$last_port" ]]; then
last_port=$BASE_PORT
fi
new_port=$((last_port + 1))
# Add the new handle_path block inside notary.codes block
add_new_handle_path "$new_port" "$GIT_COMMIT_HASH"
echo $new_port
# commit the changes
git config user.name github-actions
git config user.email github-actions@github.com
git add -A
git commit --quiet --allow-empty -m "azure tee reverse proxy => port:$NEXT_PORT/${RELEASE_TAG}"
git push --quiet
echo "deploy=new" >> $GITHUB_OUTPUT
exit 0
fi

View File

@@ -0,0 +1,39 @@
[package]
name = "tlsn-attestation"
version = "0.1.0-alpha.13-pre"
edition = "2024"
[features]
default = []
fixtures = ["tlsn-core/fixtures", "dep:tlsn-data-fixtures"]
[dependencies]
tlsn-tls-core = { workspace = true }
tlsn-core = { workspace = true }
tlsn-data-fixtures = { workspace = true, optional = true }
bcs = { workspace = true }
blake3 = { workspace = true }
p256 = { workspace = true, features = ["serde"] }
k256 = { workspace = true }
opaque-debug = { workspace = true }
rand = { workspace = true }
serde = { workspace = true, features = ["derive"] }
thiserror = { workspace = true }
tiny-keccak = { workspace = true, features = ["keccak"] }
[dev-dependencies]
alloy-primitives = { version = "1.3.1", default-features = false }
alloy-signer = { version = "1.0", default-features = false }
alloy-signer-local = { version = "1.0", default-features = false }
rand06-compat = { workspace = true }
rstest = { workspace = true }
tlsn-core = { workspace = true, features = ["fixtures"] }
tlsn-data-fixtures = { workspace = true }
[lints]
workspace = true
[[test]]
name = "api"
required-features = ["fixtures"]

View File

@@ -1,34 +1,36 @@
use std::error::Error;
use rand::{thread_rng, Rng};
use rand::{Rng, rng};
use tlsn_core::{
connection::{ConnectionInfo, ServerEphemKey},
hash::HashAlgId,
transcript::TranscriptCommitment,
};
use crate::{
attestation::{
Attestation, AttestationConfig, Body, EncodingCommitment, FieldId, FieldKind, Header,
ServerCertCommitment, VERSION,
},
connection::{ConnectionInfo, ServerEphemKey},
hash::{HashAlgId, TypedHash},
request::Request,
serialize::CanonicalSerialize,
Attestation, AttestationConfig, Body, CryptoProvider, Extension, FieldId, Header,
ServerCertCommitment, VERSION, request::Request, serialize::CanonicalSerialize,
signing::SignatureAlgId,
CryptoProvider,
};
/// Attestation builder state for accepting a request.
#[derive(Debug)]
pub struct Accept {}
#[derive(Debug)]
pub struct Sign {
signature_alg: SignatureAlgId,
hash_alg: HashAlgId,
connection_info: Option<ConnectionInfo>,
server_ephemeral_key: Option<ServerEphemKey>,
cert_commitment: ServerCertCommitment,
encoding_commitment_root: Option<TypedHash>,
encoding_seed: Option<Vec<u8>>,
extensions: Vec<Extension>,
transcript_commitments: Vec<TranscriptCommitment>,
}
/// An attestation builder.
#[derive(Debug)]
pub struct AttestationBuilder<'a, T = Accept> {
config: &'a AttestationConfig,
state: T,
@@ -54,7 +56,7 @@ impl<'a> AttestationBuilder<'a, Accept> {
signature_alg,
hash_alg,
server_cert_commitment: cert_commitment,
encoding_commitment_root,
extensions,
} = request;
if !config.supported_signature_algs().contains(&signature_alg) {
@@ -71,15 +73,9 @@ impl<'a> AttestationBuilder<'a, Accept> {
));
}
if encoding_commitment_root.is_some()
&& !config
.supported_fields()
.contains(&FieldKind::EncodingCommitment)
{
return Err(AttestationBuilderError::new(
ErrorKind::Request,
"encoding commitment is not supported",
));
if let Some(validator) = config.extension_validator() {
validator(&extensions)
.map_err(|err| AttestationBuilderError::new(ErrorKind::Extension, err))?;
}
Ok(AttestationBuilder {
@@ -90,8 +86,8 @@ impl<'a> AttestationBuilder<'a, Accept> {
connection_info: None,
server_ephemeral_key: None,
cert_commitment,
encoding_commitment_root,
encoding_seed: None,
transcript_commitments: Vec::new(),
extensions,
},
})
}
@@ -110,9 +106,18 @@ impl AttestationBuilder<'_, Sign> {
self
}
/// Sets the encoding seed.
pub fn encoding_seed(&mut self, seed: Vec<u8>) -> &mut Self {
self.state.encoding_seed = Some(seed);
/// Adds an extension to the attestation.
pub fn extension(&mut self, extension: Extension) -> &mut Self {
self.state.extensions.push(extension);
self
}
/// Sets the transcript commitments.
pub fn transcript_commitments(
&mut self,
transcript_commitments: Vec<TranscriptCommitment>,
) -> &mut Self {
self.state.transcript_commitments = transcript_commitments;
self
}
@@ -124,8 +129,8 @@ impl AttestationBuilder<'_, Sign> {
connection_info,
server_ephemeral_key,
cert_commitment,
encoding_commitment_root,
encoding_seed,
extensions,
transcript_commitments,
} = self.state;
let hasher = provider.hash.get(&hash_alg).map_err(|_| {
@@ -143,19 +148,6 @@ impl AttestationBuilder<'_, Sign> {
)
})?;
let encoding_commitment = if let Some(root) = encoding_commitment_root {
let Some(seed) = encoding_seed else {
return Err(AttestationBuilderError::new(
ErrorKind::Field,
"encoding commitment requested but seed was not set",
));
};
Some(EncodingCommitment { root, seed })
} else {
None
};
let mut field_id = FieldId::default();
let body = Body {
@@ -167,12 +159,18 @@ impl AttestationBuilder<'_, Sign> {
AttestationBuilderError::new(ErrorKind::Field, "handshake data was not set")
})?),
cert_commitment: field_id.next(cert_commitment),
encoding_commitment: encoding_commitment.map(|commitment| field_id.next(commitment)),
plaintext_hashes: Default::default(),
extensions: extensions
.into_iter()
.map(|extension| field_id.next(extension))
.collect(),
transcript_commitments: transcript_commitments
.into_iter()
.map(|commitment| field_id.next(commitment))
.collect(),
};
let header = Header {
id: thread_rng().gen(),
id: rng().random(),
version: VERSION,
root: body.root(hasher),
};
@@ -202,6 +200,7 @@ enum ErrorKind {
Config,
Field,
Signature,
Extension,
}
impl AttestationBuilderError {
@@ -228,10 +227,11 @@ impl std::fmt::Display for AttestationBuilderError {
ErrorKind::Config => f.write_str("config error")?,
ErrorKind::Field => f.write_str("field error")?,
ErrorKind::Signature => f.write_str("signature error")?,
ErrorKind::Extension => f.write_str("extension error")?,
}
if let Some(source) = &self.source {
write!(f, " caused by: {}", source)?;
write!(f, " caused by: {source}")?;
}
Ok(())
@@ -241,16 +241,15 @@ impl std::fmt::Display for AttestationBuilderError {
#[cfg(test)]
mod test {
use rstest::{fixture, rstest};
use tlsn_data_fixtures::http::{request::GET_WITH_HEADER, response::OK_JSON};
use crate::{
connection::{HandshakeData, HandshakeDataV1_2},
fixtures::{
encoder_seed, encoding_provider, request_fixture, ConnectionFixture, RequestFixture,
},
use tlsn_core::{
connection::{CertBinding, CertBindingV1_2},
fixtures::{ConnectionFixture, encoding_provider},
hash::Blake3,
transcript::Transcript,
};
use tlsn_data_fixtures::http::{request::GET_WITH_HEADER, response::OK_JSON};
use crate::fixtures::{RequestFixture, request_fixture};
use super::*;
@@ -281,6 +280,7 @@ mod test {
encoding_provider(GET_WITH_HEADER, OK_JSON),
connection,
Blake3::default(),
Vec::new(),
);
let attestation_config = AttestationConfig::builder()
@@ -305,6 +305,7 @@ mod test {
encoding_provider(GET_WITH_HEADER, OK_JSON),
connection,
Blake3::default(),
Vec::new(),
);
let attestation_config = AttestationConfig::builder()
@@ -320,35 +321,6 @@ mod test {
assert!(err.is_request());
}
#[rstest]
fn test_attestation_builder_accept_unsupported_encoding_commitment() {
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
let connection = ConnectionFixture::tlsnotary(transcript.length());
let RequestFixture { request, .. } = request_fixture(
transcript,
encoding_provider(GET_WITH_HEADER, OK_JSON),
connection,
Blake3::default(),
);
let attestation_config = AttestationConfig::builder()
.supported_signature_algs([SignatureAlgId::SECP256K1])
.supported_fields([
FieldKind::ConnectionInfo,
FieldKind::ServerEphemKey,
FieldKind::ServerIdentityCommitment,
])
.build()
.unwrap();
let err = Attestation::builder(&attestation_config)
.accept_request(request)
.err()
.unwrap();
assert!(err.is_request());
}
#[rstest]
fn test_attestation_builder_sign_missing_signer(attestation_config: &AttestationConfig) {
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
@@ -359,6 +331,7 @@ mod test {
encoding_provider(GET_WITH_HEADER, OK_JSON),
connection,
Blake3::default(),
Vec::new(),
);
let attestation_builder = Attestation::builder(attestation_config)
@@ -368,48 +341,10 @@ mod test {
let mut provider = CryptoProvider::default();
provider.signer.set_secp256r1(&[42u8; 32]).unwrap();
let err = attestation_builder.build(&provider).err().unwrap();
let err = attestation_builder.build(&provider).unwrap_err();
assert!(matches!(err.kind, ErrorKind::Config));
}
#[rstest]
fn test_attestation_builder_sign_missing_encoding_seed(
attestation_config: &AttestationConfig,
crypto_provider: &CryptoProvider,
) {
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
let connection = ConnectionFixture::tlsnotary(transcript.length());
let RequestFixture { request, .. } = request_fixture(
transcript,
encoding_provider(GET_WITH_HEADER, OK_JSON),
connection.clone(),
Blake3::default(),
);
let mut attestation_builder = Attestation::builder(attestation_config)
.accept_request(request)
.unwrap();
let ConnectionFixture {
connection_info,
server_cert_data,
..
} = connection;
let HandshakeData::V1_2(HandshakeDataV1_2 {
server_ephemeral_key,
..
}) = server_cert_data.handshake;
attestation_builder
.connection_info(connection_info)
.server_ephemeral_key(server_ephemeral_key);
let err = attestation_builder.build(crypto_provider).err().unwrap();
assert!(matches!(err.kind, ErrorKind::Field));
}
#[rstest]
fn test_attestation_builder_sign_missing_server_ephemeral_key(
attestation_config: &AttestationConfig,
@@ -423,6 +358,7 @@ mod test {
encoding_provider(GET_WITH_HEADER, OK_JSON),
connection.clone(),
Blake3::default(),
Vec::new(),
);
let mut attestation_builder = Attestation::builder(attestation_config)
@@ -433,11 +369,9 @@ mod test {
connection_info, ..
} = connection;
attestation_builder
.connection_info(connection_info)
.encoding_seed(encoder_seed().to_vec());
attestation_builder.connection_info(connection_info);
let err = attestation_builder.build(crypto_provider).err().unwrap();
let err = attestation_builder.build(crypto_provider).unwrap_err();
assert!(matches!(err.kind, ErrorKind::Field));
}
@@ -454,6 +388,7 @@ mod test {
encoding_provider(GET_WITH_HEADER, OK_JSON),
connection.clone(),
Blake3::default(),
Vec::new(),
);
let mut attestation_builder = Attestation::builder(attestation_config)
@@ -464,16 +399,91 @@ mod test {
server_cert_data, ..
} = connection;
let HandshakeData::V1_2(HandshakeDataV1_2 {
let CertBinding::V1_2(CertBindingV1_2 {
server_ephemeral_key,
..
}) = server_cert_data.handshake;
}) = server_cert_data.binding
else {
panic!("expected v1.2 handshake data");
};
attestation_builder
.server_ephemeral_key(server_ephemeral_key)
.encoding_seed(encoder_seed().to_vec());
attestation_builder.server_ephemeral_key(server_ephemeral_key);
let err = attestation_builder.build(crypto_provider).err().unwrap();
let err = attestation_builder.build(crypto_provider).unwrap_err();
assert!(matches!(err.kind, ErrorKind::Field));
}
#[rstest]
fn test_attestation_builder_reject_extensions_by_default(
attestation_config: &AttestationConfig,
) {
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
let connection = ConnectionFixture::tlsnotary(transcript.length());
let RequestFixture { request, .. } = request_fixture(
transcript,
encoding_provider(GET_WITH_HEADER, OK_JSON),
connection.clone(),
Blake3::default(),
vec![Extension {
id: b"foo".to_vec(),
value: b"bar".to_vec(),
}],
);
let err = Attestation::builder(attestation_config)
.accept_request(request)
.unwrap_err();
assert!(matches!(err.kind, ErrorKind::Extension));
}
#[rstest]
fn test_attestation_builder_accept_extension(crypto_provider: &CryptoProvider) {
let attestation_config = AttestationConfig::builder()
.supported_signature_algs([SignatureAlgId::SECP256K1])
.extension_validator(|_| Ok(()))
.build()
.unwrap();
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
let connection = ConnectionFixture::tlsnotary(transcript.length());
let RequestFixture { request, .. } = request_fixture(
transcript,
encoding_provider(GET_WITH_HEADER, OK_JSON),
connection.clone(),
Blake3::default(),
vec![Extension {
id: b"foo".to_vec(),
value: b"bar".to_vec(),
}],
);
let mut attestation_builder = Attestation::builder(&attestation_config)
.accept_request(request)
.unwrap();
let ConnectionFixture {
server_cert_data,
connection_info,
..
} = connection;
let CertBinding::V1_2(CertBindingV1_2 {
server_ephemeral_key,
..
}) = server_cert_data.binding
else {
panic!("expected v1.2 handshake data");
};
attestation_builder
.connection_info(connection_info)
.server_ephemeral_key(server_ephemeral_key);
let attestation = attestation_builder.build(crypto_provider).unwrap();
assert_eq!(attestation.body.extensions().count(), 1);
}
}

View File

@@ -1,15 +1,12 @@
use std::{fmt::Debug, sync::Arc};
use tlsn_core::hash::HashAlgId;
use crate::{
attestation::FieldKind,
hash::{HashAlgId, DEFAULT_SUPPORTED_HASH_ALGS},
signing::SignatureAlgId,
Extension, InvalidExtension, hash::DEFAULT_SUPPORTED_HASH_ALGS, signing::SignatureAlgId,
};
const DEFAULT_SUPPORTED_FIELDS: &[FieldKind] = &[
FieldKind::ConnectionInfo,
FieldKind::ServerEphemKey,
FieldKind::ServerIdentityCommitment,
FieldKind::EncodingCommitment,
];
type ExtensionValidator = Arc<dyn Fn(&[Extension]) -> Result<(), InvalidExtension> + Send + Sync>;
#[derive(Debug)]
#[allow(dead_code)]
@@ -44,11 +41,11 @@ impl AttestationConfigError {
}
/// Attestation configuration.
#[derive(Debug, Clone)]
#[derive(Clone)]
pub struct AttestationConfig {
supported_signature_algs: Vec<SignatureAlgId>,
supported_hash_algs: Vec<HashAlgId>,
supported_fields: Vec<FieldKind>,
extension_validator: Option<ExtensionValidator>,
}
impl AttestationConfig {
@@ -65,17 +62,25 @@ impl AttestationConfig {
&self.supported_hash_algs
}
pub(crate) fn supported_fields(&self) -> &[FieldKind] {
&self.supported_fields
pub(crate) fn extension_validator(&self) -> Option<&ExtensionValidator> {
self.extension_validator.as_ref()
}
}
impl Debug for AttestationConfig {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("AttestationConfig")
.field("supported_signature_algs", &self.supported_signature_algs)
.field("supported_hash_algs", &self.supported_hash_algs)
.finish_non_exhaustive()
}
}
/// Builder for [`AttestationConfig`].
#[derive(Debug)]
pub struct AttestationConfigBuilder {
supported_signature_algs: Vec<SignatureAlgId>,
supported_hash_algs: Vec<HashAlgId>,
supported_fields: Vec<FieldKind>,
extension_validator: Option<ExtensionValidator>,
}
impl Default for AttestationConfigBuilder {
@@ -83,7 +88,15 @@ impl Default for AttestationConfigBuilder {
Self {
supported_signature_algs: Vec::default(),
supported_hash_algs: DEFAULT_SUPPORTED_HASH_ALGS.to_vec(),
supported_fields: DEFAULT_SUPPORTED_FIELDS.to_vec(),
extension_validator: Some(Arc::new(|e| {
if !e.is_empty() {
Err(InvalidExtension::new(
"all extensions are disallowed by default",
))
} else {
Ok(())
}
})),
}
}
}
@@ -107,9 +120,26 @@ impl AttestationConfigBuilder {
self
}
/// Sets the supported attestation fields.
pub fn supported_fields(&mut self, supported_fields: impl Into<Vec<FieldKind>>) -> &mut Self {
self.supported_fields = supported_fields.into();
/// Sets the extension validator.
///
/// # Example
/// ```
/// # use tlsn_attestation::{AttestationConfig, InvalidExtension};
/// # let mut builder = AttestationConfig::builder();
/// builder.extension_validator(|extensions| {
/// for extension in extensions {
/// if extension.id != b"example.type" {
/// return Err(InvalidExtension::new("invalid extension type"));
/// }
/// }
/// Ok(())
/// });
/// ```
pub fn extension_validator<F>(&mut self, f: F) -> &mut Self
where
F: Fn(&[Extension]) -> Result<(), InvalidExtension> + Send + Sync + 'static,
{
self.extension_validator = Some(Arc::new(f));
self
}
@@ -118,7 +148,16 @@ impl AttestationConfigBuilder {
Ok(AttestationConfig {
supported_signature_algs: self.supported_signature_algs.clone(),
supported_hash_algs: self.supported_hash_algs.clone(),
supported_fields: self.supported_fields.clone(),
extension_validator: self.extension_validator.clone(),
})
}
}
impl Debug for AttestationConfigBuilder {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("AttestationConfigBuilder")
.field("supported_signature_algs", &self.supported_signature_algs)
.field("supported_hash_algs", &self.supported_hash_algs)
.finish_non_exhaustive()
}
}

View File

@@ -0,0 +1,149 @@
//! Types for committing details of a connection.
//!
//! ## Commitment
//!
//! During the TLS handshake the Notary receives the Server's ephemeral public
//! key, and this key serves as a binding commitment to the identity of the
//! Server. The ephemeral key itself does not reveal the Server's identity, but
//! it is bound to it via a signature created using the Server's
//! X.509 certificate.
//!
//! A Prover can withhold the Server's signature and certificate chain from the
//! Notary to improve privacy and censorship resistance.
//!
//! ## Proving the Server's identity
//!
//! A Prover can prove the Server's identity to a Verifier by sending a
//! [`ServerIdentityProof`]. This proof contains all the information required to
//! establish the link between the TLS connection and the Server's X.509
//! certificate. A Verifier checks the Server's certificate against their own
//! trust anchors, the same way a typical TLS client would.
use serde::{Deserialize, Serialize};
use tlsn_core::{
connection::{HandshakeData, HandshakeVerificationError, ServerEphemKey, ServerName},
hash::{Blinded, HashAlgorithm, HashProviderError, TypedHash},
};
use crate::{CryptoProvider, hash::HashAlgorithmExt, serialize::impl_domain_separator};
/// Opens a [`ServerCertCommitment`].
#[derive(Clone, Serialize, Deserialize)]
pub struct ServerCertOpening(Blinded<HandshakeData>);
impl_domain_separator!(ServerCertOpening);
opaque_debug::implement!(ServerCertOpening);
impl ServerCertOpening {
pub(crate) fn new(data: HandshakeData) -> Self {
Self(Blinded::new(data))
}
pub(crate) fn commit(&self, hasher: &dyn HashAlgorithm) -> ServerCertCommitment {
ServerCertCommitment(TypedHash {
alg: hasher.id(),
value: hasher.hash_separated(self),
})
}
/// Returns the server identity data.
pub fn data(&self) -> &HandshakeData {
self.0.data()
}
}
/// Commitment to a server certificate.
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub struct ServerCertCommitment(pub(crate) TypedHash);
impl_domain_separator!(ServerCertCommitment);
/// TLS server identity proof.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ServerIdentityProof {
name: ServerName,
opening: ServerCertOpening,
}
impl ServerIdentityProof {
pub(crate) fn new(name: ServerName, opening: ServerCertOpening) -> Self {
Self { name, opening }
}
/// Verifies the server identity proof.
///
/// # Arguments
///
/// * `provider` - Crypto provider.
/// * `time` - The time of the connection.
/// * `server_ephemeral_key` - The server's ephemeral key.
/// * `commitment` - Commitment to the server certificate.
pub fn verify_with_provider(
self,
provider: &CryptoProvider,
time: u64,
server_ephemeral_key: &ServerEphemKey,
commitment: &ServerCertCommitment,
) -> Result<ServerName, ServerIdentityProofError> {
let hasher = provider.hash.get(&commitment.0.alg)?;
if commitment.0.value != hasher.hash_separated(&self.opening) {
return Err(ServerIdentityProofError {
kind: ErrorKind::Commitment,
message: "certificate opening does not match commitment".to_string(),
});
}
// Verify certificate and identity.
self.opening
.data()
.verify(&provider.cert, time, server_ephemeral_key, &self.name)?;
Ok(self.name)
}
}
/// Error for [`ServerIdentityProof`].
#[derive(Debug, thiserror::Error)]
#[error("server identity proof error: {kind}: {message}")]
pub struct ServerIdentityProofError {
kind: ErrorKind,
message: String,
}
impl From<HashProviderError> for ServerIdentityProofError {
fn from(err: HashProviderError) -> Self {
Self {
kind: ErrorKind::Provider,
message: err.to_string(),
}
}
}
impl From<HandshakeVerificationError> for ServerIdentityProofError {
fn from(err: HandshakeVerificationError) -> Self {
Self {
kind: ErrorKind::Certificate,
message: err.to_string(),
}
}
}
#[derive(Debug)]
enum ErrorKind {
Provider,
Commitment,
Certificate,
}
impl std::fmt::Display for ErrorKind {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
ErrorKind::Provider => write!(f, "provider"),
ErrorKind::Commitment => write!(f, "commitment"),
ErrorKind::Certificate => write!(f, "certificate"),
}
}
}

View File

@@ -0,0 +1,35 @@
use std::error::Error;
use serde::{Deserialize, Serialize};
use crate::serialize::impl_domain_separator;
/// An attestation extension.
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub struct Extension {
/// Extension identifier.
pub id: Vec<u8>,
/// Extension data.
pub value: Vec<u8>,
}
impl_domain_separator!(Extension);
/// Invalid extension error.
#[derive(Debug, thiserror::Error)]
#[error("invalid extension: {reason}")]
pub struct InvalidExtension {
reason: Box<dyn Error + Send + Sync + 'static>,
}
impl InvalidExtension {
/// Creates a new invalid extension error.
pub fn new<E>(reason: E) -> Self
where
E: Into<Box<dyn Error + Send + Sync + 'static>>,
{
Self {
reason: reason.into(),
}
}
}

View File

@@ -0,0 +1,124 @@
//! Attestation fixtures.
use tlsn_core::{
connection::{CertBinding, CertBindingV1_2},
fixtures::ConnectionFixture,
hash::HashAlgorithm,
transcript::{
Transcript, TranscriptCommitConfigBuilder, TranscriptCommitment,
encoding::{EncodingProvider, EncodingTree},
},
};
use crate::{
Attestation, AttestationConfig, CryptoProvider, Extension,
request::{Request, RequestConfig},
signing::SignatureAlgId,
};
/// A Request fixture used for testing.
#[allow(missing_docs)]
pub struct RequestFixture {
pub encoding_tree: EncodingTree,
pub request: Request,
}
/// Returns a request fixture for testing.
pub fn request_fixture(
transcript: Transcript,
encodings_provider: impl EncodingProvider,
connection: ConnectionFixture,
encoding_hasher: impl HashAlgorithm,
extensions: Vec<Extension>,
) -> RequestFixture {
let provider = CryptoProvider::default();
let (sent_len, recv_len) = transcript.len();
let ConnectionFixture {
server_name,
server_cert_data,
..
} = connection;
let mut transcript_commitment_builder = TranscriptCommitConfigBuilder::new(&transcript);
transcript_commitment_builder
.commit_sent(&(0..sent_len))
.unwrap()
.commit_recv(&(0..recv_len))
.unwrap();
let transcripts_commitment_config = transcript_commitment_builder.build().unwrap();
// Prover constructs encoding tree.
let encoding_tree = EncodingTree::new(
&encoding_hasher,
transcripts_commitment_config.iter_encoding(),
&encodings_provider,
)
.unwrap();
let mut builder = RequestConfig::builder();
for extension in extensions {
builder.extension(extension);
}
let request_config = builder.build().unwrap();
let mut request_builder = Request::builder(&request_config);
request_builder
.server_name(server_name)
.handshake_data(server_cert_data)
.transcript(transcript);
let (request, _) = request_builder.build(&provider).unwrap();
RequestFixture {
encoding_tree,
request,
}
}
/// Returns an attestation fixture for testing.
pub fn attestation_fixture(
request: Request,
connection: ConnectionFixture,
signature_alg: SignatureAlgId,
transcript_commitments: &[TranscriptCommitment],
) -> Attestation {
let ConnectionFixture {
connection_info,
server_cert_data,
..
} = connection;
let CertBinding::V1_2(CertBindingV1_2 {
server_ephemeral_key,
..
}) = server_cert_data.binding
else {
panic!("expected v1.2 binding data");
};
let mut provider = CryptoProvider::default();
match signature_alg {
SignatureAlgId::SECP256K1 => provider.signer.set_secp256k1(&[42u8; 32]).unwrap(),
SignatureAlgId::SECP256R1 => provider.signer.set_secp256r1(&[42u8; 32]).unwrap(),
_ => unimplemented!(),
};
let attestation_config = AttestationConfig::builder()
.supported_signature_algs([signature_alg])
.build()
.unwrap();
let mut attestation_builder = Attestation::builder(&attestation_config)
.accept_request(request)
.unwrap();
attestation_builder
.connection_info(connection_info)
.server_ephemeral_key(server_ephemeral_key)
.transcript_commitments(transcript_commitments.to_vec());
attestation_builder.build(&provider).unwrap()
}

View File

@@ -0,0 +1,19 @@
use tlsn_core::hash::{Hash, HashAlgId, HashAlgorithm};
use crate::serialize::{CanonicalSerialize, DomainSeparator};
pub(crate) const DEFAULT_SUPPORTED_HASH_ALGS: &[HashAlgId] =
&[HashAlgId::SHA256, HashAlgId::BLAKE3, HashAlgId::KECCAK256];
pub(crate) trait HashAlgorithmExt: HashAlgorithm {
#[allow(dead_code)]
fn hash_canonical<T: CanonicalSerialize>(&self, data: &T) -> Hash {
self.hash(&data.serialize())
}
fn hash_separated<T: DomainSeparator + CanonicalSerialize>(&self, data: &T) -> Hash {
self.hash_prefixed(data.domain(), &data.serialize())
}
}
impl<T: HashAlgorithm + ?Sized> HashAlgorithmExt for T {}

View File

@@ -0,0 +1,449 @@
//! TLSNotary attestation types.
//!
//! # Introduction
//!
//! This library provides core functionality for TLSNotary **attestations**.
//!
//! Once the TLS commitment protocol has been completed the Prover holds a
//! collection of commitments pertaining to the TLS connection. Most
//! importantly, the Prover is committed to the
//! [`ServerName`](tlsn_core::connection::ServerName),
//! and the [`Transcript`](tlsn_core::transcript::Transcript) of application
//! data. Subsequently, the Prover can request an [`Attestation`] from the
//! Notary who will include the commitments as well as any additional
//! information which may be useful to an attestation Verifier.
//!
//! Holding an attestation, the Prover can construct a
//! [`Presentation`](crate::presentation::Presentation) which facilitates
//! selectively disclosing various aspects of the TLS connection to a Verifier.
//! If the Verifier trusts the Notary, or more specifically the verifying key of
//! the attestation, then the Verifier can trust the authenticity of the
//! information disclosed in the presentation.
//!
//! **Be sure to check out the various submodules for more information.**
//!
//! # Structure
//!
//! An attestation is a cryptographically signed document issued by a Notary who
//! witnessed a TLS connection. It contains various fields which can be used to
//! verify statements about the connection and the associated application data.
//!
//! Attestations are comprised of two parts: a [`Header`] and a [`Body`].
//!
//! The header is the data structure which is signed by a Notary. It
//! contains a unique identifier, the protocol version, and a Merkle root
//! of the body fields.
//!
//! The body contains the fields of the attestation. These fields include data
//! which can be used to verify aspects of a TLS connection, such as the
//! server's identity, and facts about the transcript.
//!
//! # Extensions
//!
//! An attestation may be extended using [`Extension`] fields included in the
//! body. Extensions (currently) have no canonical semantics, but may be used to
//! implement application specific functionality.
//!
//! A Prover may [append
//! extensions](crate::request::RequestConfigBuilder::extension)
//! to their attestation request, provided that the Notary supports them
//! (disallowed by default). A Notary may also be configured to
//! [validate](crate::AttestationConfigBuilder::extension_validator)
//! any extensions requested by a Prover using custom application logic.
//! Additionally, a Notary may
//! [include](crate::AttestationBuilder::extension)
//! their own extensions.
//!
//! # Committing to the transcript
//!
//! The TLS commitment protocol produces commitments to the entire transcript of
//! application data. However, we may want to disclose only a subset of the data
//! in a presentation. Prior to attestation, the Prover has the opportunity to
//! slice and dice the commitments into smaller sections which can be
//! selectively disclosed. Additionally, the Prover may want to use different
//! commitment schemes depending on the context they expect to disclose.
//!
//! The primary API for this process is the
//! [`TranscriptCommitConfigBuilder`](tlsn_core::transcript::TranscriptCommitConfigBuilder)
//! which is used to build up a configuration.
//!
//! ```no_run
//! # use tlsn_core::transcript::{TranscriptCommitConfigBuilder, Transcript, Direction};
//! # use tlsn_core::hash::HashAlgId;
//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
//! # let transcript: Transcript = unimplemented!();
//! let (sent_len, recv_len) = transcript.len();
//!
//! // Create a new configuration builder.
//! let mut builder = TranscriptCommitConfigBuilder::new(&transcript);
//!
//! // Specify all the transcript commitments we want to make.
//! builder
//! // Use BLAKE3 for encoding commitments.
//! .encoding_hash_alg(HashAlgId::BLAKE3)
//! // Commit to all sent data.
//! .commit_sent(&(0..sent_len))?
//! // Commit to the first 10 bytes of sent data.
//! .commit_sent(&(0..10))?
//! // Skip some bytes so it can be omitted in the presentation.
//! .commit_sent(&(20..sent_len))?
//! // Commit to all received data.
//! .commit_recv(&(0..recv_len))?;
//!
//! let config = builder.build()?;
//! # Ok(())
//! # }
//! ```
//!
//! # Requesting an attestation
//!
//! The first step in the attestation protocol is for the Prover to make a
//! [`Request`](crate::request::Request), which can be configured using the
//! associated [builder](crate::request::RequestConfigBuilder). With it the
//! Prover can configure some of the details of the attestation, such as which
//! cryptographic algorithms are used (if the Notary supports them).
//!
//! The Prover may also request for extensions to be added to the attestation,
//! see [here](#extensions) for more information.
//!
//! Upon being issued an attestation, the Prover will also hold a corresponding
//! [`Secrets`] which contains all private information. This pair can be stored
//! and used later to construct a
//! [`Presentation`](crate::presentation::Presentation), [see
//! below](#constructing-a-presentation).
//!
//! # Issuing an attestation
//!
//! Upon receiving a request, the Notary can issue an [`Attestation`] which can
//! be configured using the associated
//! [builder](crate::AttestationConfigBuilder).
//!
//! The Notary's [`CryptoProvider`] must be configured with an appropriate
//! signing key for attestations. See
//! [`SignerProvider`](crate::signing::SignerProvider) for more information.
//!
//! # Constructing a presentation
//!
//! A Prover can use an [`Attestation`] and the corresponding [`Secrets`] to
//! construct a verifiable [`Presentation`](crate::presentation::Presentation).
//!
//! ```no_run
//! # use tlsn_attestation::{Attestation, CryptoProvider, Secrets, presentation::Presentation};
//! # use tlsn_core::transcript::{TranscriptCommitmentKind, Direction};
//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
//! # let attestation: Attestation = unimplemented!();
//! # let secrets: Secrets = unimplemented!();
//! # let crypto_provider: CryptoProvider = unimplemented!();
//! let (_sent_len, recv_len) = secrets.transcript().len();
//!
//! // First, we decide which application data we would like to disclose.
//! let mut builder = secrets.transcript_proof_builder();
//!
//! builder
//! // Use transcript encoding commitments.
//! .commitment_kinds(&[TranscriptCommitmentKind::Encoding])
//! // Disclose the first 10 bytes of the sent data.
//! .reveal(&(0..10), Direction::Sent)?
//! // Disclose all of the received data.
//! .reveal(&(0..recv_len), Direction::Received)?;
//!
//! let transcript_proof = builder.build()?;
//!
//! // Most cases we will also disclose the server identity.
//! let identity_proof = secrets.identity_proof();
//!
//! // Now we can construct the presentation.
//! let mut builder = attestation.presentation_builder(&crypto_provider);
//!
//! builder
//! .identity_proof(identity_proof)
//! .transcript_proof(transcript_proof);
//!
//! // Finally, we build the presentation. Send it to a verifier!
//! let presentation: Presentation = builder.build()?;
//! # Ok(())
//! # }
//! ```
//!
//! # Verifying a presentation
//!
//! Verifying a presentation is as simple as checking the verifier trusts the
//! verifying key then calling
//! [`Presentation::verify`](crate::presentation::Presentation::verify).
//!
//! ```no_run
//! # use tlsn_attestation::{CryptoProvider, presentation::{Presentation, PresentationOutput}, signing::VerifyingKey};
//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
//! # let presentation: Presentation = unimplemented!();
//! # let trusted_key: VerifyingKey = unimplemented!();
//! # let crypto_provider: CryptoProvider = unimplemented!();
//! // Assert that we trust the verifying key.
//! assert_eq!(presentation.verifying_key(), &trusted_key);
//!
//! let PresentationOutput {
//! attestation,
//! server_name,
//! connection_info,
//! transcript,
//! ..
//! } = presentation.verify(&crypto_provider)?;
//! # Ok(())
//! # }
//! ```
#![deny(missing_docs, unreachable_pub, unused_must_use)]
#![deny(clippy::all)]
#![forbid(unsafe_code)]
mod builder;
mod config;
pub mod connection;
mod extension;
#[cfg(any(test, feature = "fixtures"))]
pub mod fixtures;
pub(crate) mod hash;
pub mod presentation;
mod proof;
mod provider;
pub mod request;
mod secrets;
pub(crate) mod serialize;
pub mod signing;
use std::fmt;
use rand::distr::{Distribution, StandardUniform};
use serde::{Deserialize, Serialize};
use tlsn_core::{
connection::{ConnectionInfo, ServerEphemKey},
hash::{Hash, HashAlgorithm, TypedHash},
merkle::MerkleTree,
transcript::TranscriptCommitment,
};
use crate::{
connection::ServerCertCommitment,
hash::HashAlgorithmExt,
presentation::PresentationBuilder,
serialize::impl_domain_separator,
signing::{Signature, VerifyingKey},
};
pub use builder::{AttestationBuilder, AttestationBuilderError};
pub use config::{AttestationConfig, AttestationConfigBuilder, AttestationConfigError};
pub use extension::{Extension, InvalidExtension};
pub use proof::{AttestationError, AttestationProof};
pub use provider::CryptoProvider;
pub use secrets::Secrets;
/// Current version of attestations.
pub const VERSION: Version = Version(0);
/// Unique identifier for an attestation.
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)]
pub struct Uid(pub [u8; 16]);
impl From<[u8; 16]> for Uid {
fn from(id: [u8; 16]) -> Self {
Self(id)
}
}
impl Distribution<Uid> for StandardUniform {
fn sample<R: rand::Rng + ?Sized>(&self, rng: &mut R) -> Uid {
Uid(self.sample(rng))
}
}
/// Version of an attestation.
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)]
pub struct Version(u32);
impl_domain_separator!(Version);
/// Public attestation field.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Field<T> {
/// Identifier of the field.
pub id: FieldId,
/// Field data.
pub data: T,
}
/// Identifier for a field.
#[derive(
Debug, Default, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize,
)]
pub struct FieldId(pub u32);
impl FieldId {
pub(crate) fn next<T>(&mut self, data: T) -> Field<T> {
let id = *self;
self.0 += 1;
Field { id, data }
}
}
impl fmt::Display for FieldId {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.0)
}
}
/// Kind of an attestation field.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
#[repr(u8)]
pub enum FieldKind {
/// Connection information.
ConnectionInfo = 0x01,
/// Server ephemeral key.
ServerEphemKey = 0x02,
/// Server identity commitment.
ServerIdentityCommitment = 0x03,
/// Encoding commitment.
EncodingCommitment = 0x04,
/// Plaintext hash commitment.
PlaintextHash = 0x05,
}
/// Attestation header.
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub struct Header {
/// An identifier for the attestation.
pub id: Uid,
/// Version of the attestation.
pub version: Version,
/// Merkle root of the attestation fields.
pub root: TypedHash,
}
impl_domain_separator!(Header);
/// Attestation body.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Body {
verifying_key: Field<VerifyingKey>,
connection_info: Field<ConnectionInfo>,
server_ephemeral_key: Field<ServerEphemKey>,
cert_commitment: Field<ServerCertCommitment>,
extensions: Vec<Field<Extension>>,
transcript_commitments: Vec<Field<TranscriptCommitment>>,
}
impl Body {
/// Returns an iterator over the extensions.
pub fn extensions(&self) -> impl Iterator<Item = &Extension> {
self.extensions.iter().map(|field| &field.data)
}
/// Returns the attestation verifying key.
pub fn verifying_key(&self) -> &VerifyingKey {
&self.verifying_key.data
}
/// Computes the Merkle root of the attestation fields.
///
/// This is only used when building an attestation.
pub(crate) fn root(&self, hasher: &dyn HashAlgorithm) -> TypedHash {
let mut tree = MerkleTree::new(hasher.id());
let fields = self
.hash_fields(hasher)
.into_iter()
.map(|(_, hash)| hash)
.collect::<Vec<_>>();
tree.insert(hasher, fields);
tree.root()
}
/// Returns the fields of the body hashed and sorted by id.
///
/// Each field is hashed with a domain separator to mitigate type confusion
/// attacks.
///
/// # Note
///
/// The order of fields is not stable across versions.
pub(crate) fn hash_fields(&self, hasher: &dyn HashAlgorithm) -> Vec<(FieldId, Hash)> {
// CRITICAL: ensure all fields are included! If a new field is added to the
// struct without including it here, it will not be included in the attestation.
let Self {
verifying_key,
connection_info: conn_info,
server_ephemeral_key,
cert_commitment,
extensions,
transcript_commitments,
} = self;
let mut fields: Vec<(FieldId, Hash)> = vec![
(verifying_key.id, hasher.hash_separated(&verifying_key.data)),
(conn_info.id, hasher.hash_separated(&conn_info.data)),
(
server_ephemeral_key.id,
hasher.hash_separated(&server_ephemeral_key.data),
),
(
cert_commitment.id,
hasher.hash_separated(&cert_commitment.data),
),
];
for field in extensions.iter() {
fields.push((field.id, hasher.hash_separated(&field.data)));
}
for field in transcript_commitments.iter() {
fields.push((field.id, hasher.hash_separated(&field.data)));
}
fields.sort_by_key(|(id, _)| *id);
fields
}
/// Returns the connection information.
pub(crate) fn connection_info(&self) -> &ConnectionInfo {
&self.connection_info.data
}
/// Returns the server's ephemeral public key.
pub(crate) fn server_ephemeral_key(&self) -> &ServerEphemKey {
&self.server_ephemeral_key.data
}
/// Returns the commitment to a server certificate.
pub(crate) fn cert_commitment(&self) -> &ServerCertCommitment {
&self.cert_commitment.data
}
/// Returns the transcript commitments.
pub(crate) fn transcript_commitments(&self) -> impl Iterator<Item = &TranscriptCommitment> {
self.transcript_commitments.iter().map(|field| &field.data)
}
}
/// An attestation document.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Attestation {
/// The signature of the attestation.
pub signature: Signature,
/// The attestation header.
pub header: Header,
/// The attestation body.
pub body: Body,
}
impl Attestation {
/// Returns an attestation builder.
pub fn builder(config: &AttestationConfig) -> AttestationBuilder<'_> {
AttestationBuilder::new(config)
}
/// Returns a presentation builder.
pub fn presentation_builder<'a>(
&'a self,
provider: &'a CryptoProvider,
) -> PresentationBuilder<'a> {
PresentationBuilder::new(provider, self)
}
}

View File

@@ -26,12 +26,15 @@ use std::fmt;
use serde::{Deserialize, Serialize};
use crate::{
attestation::{Attestation, AttestationError, AttestationProof},
connection::{ConnectionInfo, ServerIdentityProof, ServerIdentityProofError, ServerName},
signing::VerifyingKey,
use tlsn_core::{
connection::{ConnectionInfo, ServerName},
transcript::{PartialTranscript, TranscriptProof, TranscriptProofError},
CryptoProvider,
};
use crate::{
Attestation, AttestationError, AttestationProof, CryptoProvider, Extension,
connection::{ServerIdentityProof, ServerIdentityProofError},
signing::VerifyingKey,
};
/// A verifiable presentation.
@@ -84,16 +87,25 @@ impl Presentation {
.transpose()?;
let transcript = transcript
.map(|transcript| transcript.verify_with_provider(provider, &attestation.body))
.map(|transcript| {
transcript.verify_with_provider(
&provider.hash,
&attestation.body.connection_info().transcript_length,
attestation.body.transcript_commitments(),
)
})
.transpose()?;
let connection_info = attestation.body.connection_info().clone();
let extensions = attestation.body.extensions().cloned().collect();
Ok(PresentationOutput {
attestation,
server_name,
connection_info,
transcript,
extensions,
})
}
}
@@ -110,6 +122,8 @@ pub struct PresentationOutput {
pub connection_info: ConnectionInfo,
/// Authenticated transcript data.
pub transcript: Option<PartialTranscript>,
/// Extensions.
pub extensions: Vec<Extension>,
}
/// Builder for [`Presentation`].
@@ -175,7 +189,7 @@ impl fmt::Display for PresentationBuilderError {
}
if let Some(source) = &self.source {
write!(f, " caused by: {}", source)?;
write!(f, " caused by: {source}")?;
}
Ok(())
@@ -216,7 +230,7 @@ impl fmt::Display for PresentationError {
}
if let Some(source) = &self.source {
write!(f, " caused by: {}", source)?;
write!(f, " caused by: {source}")?;
}
Ok(())

View File

@@ -2,13 +2,15 @@ use std::fmt;
use serde::{Deserialize, Serialize};
use crate::{
attestation::{Attestation, Body, Header},
use tlsn_core::{
hash::HashAlgorithm,
merkle::{MerkleProof, MerkleTree},
};
use crate::{
Attestation, Body, CryptoProvider, Header,
serialize::CanonicalSerialize,
signing::{Signature, VerifyingKey},
CryptoProvider,
};
/// Proof of an attestation.
@@ -165,7 +167,7 @@ impl fmt::Display for AttestationError {
}
if let Some(source) = &self.source {
write!(f, " caused by: {}", source)?;
write!(f, " caused by: {source}")?;
}
Ok(())

View File

@@ -1,12 +1,6 @@
use tls_core::{
anchors::{OwnedTrustAnchor, RootCertStore},
verify::WebPkiVerifier,
};
use tlsn_core::{hash::HashProvider, webpki::ServerCertVerifier};
use crate::{
hash::HashProvider,
signing::{SignatureVerifierProvider, SignerProvider},
};
use crate::signing::{SignatureVerifierProvider, SignerProvider};
/// Cryptography provider.
///
@@ -17,7 +11,7 @@ use crate::{
/// implementations.
///
/// Algorithms are uniquely identified using an 8-bit ID, eg.
/// [`HashAlgId`](crate::hash::HashAlgId), half of which is reserved for the
/// [`HashAlgId`](tlsn_core::hash::HashAlgId), half of which is reserved for the
/// officially supported algorithms. If you think that a new algorithm should be
/// added to the official set, please open an issue. Beware that other parties
/// may assign different algorithms to the same ID as you, and we make no effort
@@ -30,7 +24,7 @@ pub struct CryptoProvider {
/// This is used to verify the server's certificate chain.
///
/// The default verifier uses the Mozilla root certificates.
pub cert: WebPkiVerifier,
pub cert: ServerCertVerifier,
/// Signer provider.
///
/// This is used for signing attestations.
@@ -47,21 +41,9 @@ impl Default for CryptoProvider {
fn default() -> Self {
Self {
hash: Default::default(),
cert: default_cert_verifier(),
cert: ServerCertVerifier::mozilla(),
signer: Default::default(),
signature: Default::default(),
}
}
}
pub(crate) fn default_cert_verifier() -> WebPkiVerifier {
let mut root_store = RootCertStore::empty();
root_store.add_server_trust_anchors(webpki_roots::TLS_SERVER_ROOTS.iter().map(|ta| {
OwnedTrustAnchor::from_subject_spki_name_constraints(
ta.subject.as_ref(),
ta.subject_public_key_info.as_ref(),
ta.name_constraints.as_ref().map(|nc| nc.as_ref()),
)
}));
WebPkiVerifier::new(root_store, None)
}

View File

@@ -18,12 +18,9 @@ mod config;
use serde::{Deserialize, Serialize};
use crate::{
attestation::Attestation,
connection::ServerCertCommitment,
hash::{HashAlgId, TypedHash},
signing::SignatureAlgId,
};
use tlsn_core::hash::HashAlgId;
use crate::{Attestation, Extension, connection::ServerCertCommitment, signing::SignatureAlgId};
pub use builder::{RequestBuilder, RequestBuilderError};
pub use config::{RequestConfig, RequestConfigBuilder, RequestConfigBuilderError};
@@ -34,12 +31,12 @@ pub struct Request {
pub(crate) signature_alg: SignatureAlgId,
pub(crate) hash_alg: HashAlgId,
pub(crate) server_cert_commitment: ServerCertCommitment,
pub(crate) encoding_commitment_root: Option<TypedHash>,
pub(crate) extensions: Vec<Extension>,
}
impl Request {
/// Returns a new request builder.
pub fn builder(config: &RequestConfig) -> RequestBuilder {
pub fn builder(config: &RequestConfig) -> RequestBuilder<'_> {
RequestBuilder::new(config)
}
@@ -65,16 +62,11 @@ impl Request {
));
}
if let Some(encoding_commitment_root) = &self.encoding_commitment_root {
let Some(encoding_commitment) = attestation.body.encoding_commitment() else {
// TODO: improve the O(M*N) complexity of this check.
for extension in &self.extensions {
if !attestation.body.extensions().any(|e| e == extension) {
return Err(InconsistentAttestation(
"encoding commitment is missing".to_string(),
));
};
if &encoding_commitment.root != encoding_commitment_root {
return Err(InconsistentAttestation(
"encoding commitment root does not match".to_string(),
"extension is missing from the attestation".to_string(),
));
}
}
@@ -90,20 +82,19 @@ pub struct InconsistentAttestation(String);
#[cfg(test)]
mod test {
use tlsn_core::{
connection::TranscriptLength,
fixtures::{ConnectionFixture, encoding_provider},
hash::{Blake3, HashAlgId},
transcript::Transcript,
};
use tlsn_data_fixtures::http::{request::GET_WITH_HEADER, response::OK_JSON};
use super::*;
use crate::{
connection::{ServerCertOpening, TranscriptLength},
fixtures::{
attestation_fixture, encoder_seed, encoding_provider, request_fixture,
ConnectionFixture, RequestFixture,
},
hash::{Blake3, Hash, HashAlgId},
signing::SignatureAlgId,
transcript::Transcript,
CryptoProvider,
connection::ServerCertOpening,
fixtures::{RequestFixture, attestation_fixture, request_fixture},
signing::SignatureAlgId,
};
#[test]
@@ -116,14 +107,11 @@ mod test {
encoding_provider(GET_WITH_HEADER, OK_JSON),
connection.clone(),
Blake3::default(),
Vec::new(),
);
let attestation = attestation_fixture(
request.clone(),
connection,
SignatureAlgId::SECP256K1,
encoder_seed().to_vec(),
);
let attestation =
attestation_fixture(request.clone(), connection, SignatureAlgId::SECP256K1, &[]);
assert!(request.validate(&attestation).is_ok())
}
@@ -138,14 +126,11 @@ mod test {
encoding_provider(GET_WITH_HEADER, OK_JSON),
connection.clone(),
Blake3::default(),
Vec::new(),
);
let attestation = attestation_fixture(
request.clone(),
connection,
SignatureAlgId::SECP256K1,
encoder_seed().to_vec(),
);
let attestation =
attestation_fixture(request.clone(), connection, SignatureAlgId::SECP256K1, &[]);
request.signature_alg = SignatureAlgId::SECP256R1;
@@ -163,14 +148,11 @@ mod test {
encoding_provider(GET_WITH_HEADER, OK_JSON),
connection.clone(),
Blake3::default(),
Vec::new(),
);
let attestation = attestation_fixture(
request.clone(),
connection,
SignatureAlgId::SECP256K1,
encoder_seed().to_vec(),
);
let attestation =
attestation_fixture(request.clone(), connection, SignatureAlgId::SECP256K1, &[]);
request.hash_alg = HashAlgId::SHA256;
@@ -188,14 +170,11 @@ mod test {
encoding_provider(GET_WITH_HEADER, OK_JSON),
connection.clone(),
Blake3::default(),
Vec::new(),
);
let attestation = attestation_fixture(
request.clone(),
connection,
SignatureAlgId::SECP256K1,
encoder_seed().to_vec(),
);
let attestation =
attestation_fixture(request.clone(), connection, SignatureAlgId::SECP256K1, &[]);
let ConnectionFixture {
server_cert_data, ..
@@ -212,32 +191,4 @@ mod test {
let res = request.validate(&attestation);
assert!(res.is_err())
}
#[test]
fn test_wrong_encoding_commitment_root() {
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
let connection = ConnectionFixture::tlsnotary(transcript.length());
let RequestFixture { mut request, .. } = request_fixture(
transcript,
encoding_provider(GET_WITH_HEADER, OK_JSON),
connection.clone(),
Blake3::default(),
);
let attestation = attestation_fixture(
request.clone(),
connection,
SignatureAlgId::SECP256K1,
encoder_seed().to_vec(),
);
request.encoding_commitment_root = Some(TypedHash {
alg: HashAlgId::BLAKE3,
value: Hash::default(),
});
let res = request.validate(&attestation);
assert!(res.is_err())
}
}

View File

@@ -1,19 +1,22 @@
use tlsn_core::{
connection::{HandshakeData, ServerName},
transcript::{Transcript, TranscriptCommitment, TranscriptSecret},
};
use crate::{
connection::{ServerCertData, ServerCertOpening, ServerName},
index::Index,
CryptoProvider, Secrets,
connection::ServerCertOpening,
request::{Request, RequestConfig},
secrets::Secrets,
transcript::{encoding::EncodingTree, Transcript},
CryptoProvider,
};
/// Builder for [`Request`].
pub struct RequestBuilder<'a> {
config: &'a RequestConfig,
server_name: Option<ServerName>,
server_cert_data: Option<ServerCertData>,
encoding_tree: Option<EncodingTree>,
handshake_data: Option<HandshakeData>,
transcript: Option<Transcript>,
transcript_commitments: Vec<TranscriptCommitment>,
transcript_commitment_secrets: Vec<TranscriptSecret>,
}
impl<'a> RequestBuilder<'a> {
@@ -22,9 +25,10 @@ impl<'a> RequestBuilder<'a> {
Self {
config,
server_name: None,
server_cert_data: None,
encoding_tree: None,
handshake_data: None,
transcript: None,
transcript_commitments: Vec::new(),
transcript_commitment_secrets: Vec::new(),
}
}
@@ -34,15 +38,9 @@ impl<'a> RequestBuilder<'a> {
self
}
/// Sets the server identity data.
pub fn server_cert_data(&mut self, data: ServerCertData) -> &mut Self {
self.server_cert_data = Some(data);
self
}
/// Sets the tree to commit to the transcript encodings.
pub fn encoding_tree(&mut self, tree: EncodingTree) -> &mut Self {
self.encoding_tree = Some(tree);
/// Sets the handshake data.
pub fn handshake_data(&mut self, data: HandshakeData) -> &mut Self {
self.handshake_data = Some(data);
self
}
@@ -52,6 +50,17 @@ impl<'a> RequestBuilder<'a> {
self
}
/// Sets the transcript commitments.
pub fn transcript_commitments(
&mut self,
secrets: Vec<TranscriptSecret>,
commitments: Vec<TranscriptCommitment>,
) -> &mut Self {
self.transcript_commitment_secrets = secrets;
self.transcript_commitments = commitments;
self
}
/// Builds the attestation request and returns the corresponding secrets.
pub fn build(
self,
@@ -60,9 +69,10 @@ impl<'a> RequestBuilder<'a> {
let Self {
config,
server_name,
server_cert_data,
encoding_tree,
handshake_data: server_cert_data,
transcript,
transcript_commitments,
transcript_commitment_secrets,
} = self;
let signature_alg = *config.signature_alg();
@@ -85,21 +95,21 @@ impl<'a> RequestBuilder<'a> {
let server_cert_commitment = server_cert_opening.commit(hasher);
let encoding_commitment_root = encoding_tree.as_ref().map(|tree| tree.root());
let extensions = config.extensions().to_vec();
let request = Request {
signature_alg,
hash_alg,
server_cert_commitment,
encoding_commitment_root,
extensions,
};
let secrets = Secrets {
server_name,
server_cert_opening,
encoding_tree,
plaintext_hashes: Index::default(),
transcript,
transcript_commitments,
transcript_commitment_secrets,
};
Ok((request, secrets))

View File

@@ -1,10 +1,14 @@
use crate::{hash::HashAlgId, signing::SignatureAlgId};
use tlsn_core::{hash::HashAlgId, transcript::TranscriptCommitConfig};
use crate::{Extension, signing::SignatureAlgId};
/// Request configuration.
#[derive(Debug, Clone)]
pub struct RequestConfig {
signature_alg: SignatureAlgId,
hash_alg: HashAlgId,
extensions: Vec<Extension>,
transcript_commit: Option<TranscriptCommitConfig>,
}
impl Default for RequestConfig {
@@ -28,6 +32,16 @@ impl RequestConfig {
pub fn hash_alg(&self) -> &HashAlgId {
&self.hash_alg
}
/// Returns the extensions.
pub fn extensions(&self) -> &[Extension] {
&self.extensions
}
/// Returns the transcript commitment configuration.
pub fn transcript_commit(&self) -> Option<&TranscriptCommitConfig> {
self.transcript_commit.as_ref()
}
}
/// Builder for [`RequestConfig`].
@@ -35,6 +49,8 @@ impl RequestConfig {
pub struct RequestConfigBuilder {
signature_alg: SignatureAlgId,
hash_alg: HashAlgId,
extensions: Vec<Extension>,
transcript_commit: Option<TranscriptCommitConfig>,
}
impl Default for RequestConfigBuilder {
@@ -42,6 +58,8 @@ impl Default for RequestConfigBuilder {
Self {
signature_alg: SignatureAlgId::SECP256K1,
hash_alg: HashAlgId::BLAKE3,
extensions: Vec::new(),
transcript_commit: None,
}
}
}
@@ -59,11 +77,25 @@ impl RequestConfigBuilder {
self
}
/// Adds an extension to the request.
pub fn extension(&mut self, extension: Extension) -> &mut Self {
self.extensions.push(extension);
self
}
/// Sets the transcript commitment configuration.
pub fn transcript_commit(&mut self, transcript_commit: TranscriptCommitConfig) -> &mut Self {
self.transcript_commit = Some(transcript_commit);
self
}
/// Builds the config.
pub fn build(self) -> Result<RequestConfig, RequestConfigBuilderError> {
Ok(RequestConfig {
signature_alg: self.signature_alg,
hash_alg: self.hash_alg,
extensions: self.extensions,
transcript_commit: self.transcript_commit,
})
}
}

View File

@@ -1,21 +1,20 @@
use serde::{Deserialize, Serialize};
use crate::{
connection::{ServerCertOpening, ServerIdentityProof, ServerName},
index::Index,
transcript::{
encoding::EncodingTree, hash::PlaintextHashSecret, Transcript, TranscriptProofBuilder,
},
use tlsn_core::{
connection::ServerName,
transcript::{Transcript, TranscriptCommitment, TranscriptProofBuilder, TranscriptSecret},
};
/// Secret data of an [`Attestation`](crate::attestation::Attestation).
use crate::connection::{ServerCertOpening, ServerIdentityProof};
/// Secret data of an [`Attestation`](crate::Attestation).
#[derive(Clone, Serialize, Deserialize)]
pub struct Secrets {
pub(crate) server_name: ServerName,
pub(crate) server_cert_opening: ServerCertOpening,
pub(crate) encoding_tree: Option<EncodingTree>,
pub(crate) plaintext_hashes: Index<PlaintextHashSecret>,
pub(crate) transcript: Transcript,
pub(crate) transcript_commitments: Vec<TranscriptCommitment>,
pub(crate) transcript_commitment_secrets: Vec<TranscriptSecret>,
}
opaque_debug::implement!(Secrets);
@@ -38,10 +37,6 @@ impl Secrets {
/// Returns a transcript proof builder.
pub fn transcript_proof_builder(&self) -> TranscriptProofBuilder<'_> {
TranscriptProofBuilder::new(
&self.transcript,
self.encoding_tree.as_ref(),
&self.plaintext_hashes,
)
TranscriptProofBuilder::new(&self.transcript, &self.transcript_commitment_secrets)
}
}

View File

@@ -0,0 +1,53 @@
/// Canonical serialization of TLSNotary types.
///
/// This trait is used to serialize types into a canonical byte representation.
pub(crate) trait CanonicalSerialize {
/// Serializes the type.
fn serialize(&self) -> Vec<u8>;
}
impl<T> CanonicalSerialize for T
where
T: serde::Serialize,
{
fn serialize(&self) -> Vec<u8> {
// For now we use BCS for serialization. In future releases we will want to
// consider this further, particularly with respect to EVM compatibility.
bcs::to_bytes(self).unwrap()
}
}
/// A type with a domain separator which is used during hashing to mitigate type
/// confusion attacks.
pub(crate) trait DomainSeparator {
/// Returns the domain separator for the type.
fn domain(&self) -> &[u8];
}
macro_rules! impl_domain_separator {
($type:ty) => {
impl $crate::serialize::DomainSeparator for $type {
fn domain(&self) -> &[u8] {
use std::sync::LazyLock;
// Computes a 16 byte hash of the type's name to use as a domain separator.
static DOMAIN: LazyLock<[u8; 16]> = LazyLock::new(|| {
let domain: [u8; 32] = blake3::hash(stringify!($type).as_bytes()).into();
domain[..16].try_into().unwrap()
});
&*DOMAIN
}
}
};
}
pub(crate) use impl_domain_separator;
impl_domain_separator!(tlsn_core::connection::ServerEphemKey);
impl_domain_separator!(tlsn_core::connection::ConnectionInfo);
impl_domain_separator!(tlsn_core::connection::CertBinding);
impl_domain_separator!(tlsn_core::transcript::TranscriptCommitment);
impl_domain_separator!(tlsn_core::transcript::TranscriptSecret);
impl_domain_separator!(tlsn_core::transcript::encoding::EncodingCommitment);
impl_domain_separator!(tlsn_core::transcript::hash::PlaintextHash);

View File

@@ -4,7 +4,7 @@ use std::collections::HashMap;
use serde::{Deserialize, Serialize};
use crate::hash::impl_domain_separator;
use crate::serialize::impl_domain_separator;
/// Key algorithm identifier.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
@@ -52,10 +52,15 @@ impl std::fmt::Display for KeyAlgId {
pub struct SignatureAlgId(u8);
impl SignatureAlgId {
/// secp256k1 signature algorithm.
/// secp256k1 signature algorithm with SHA-256 hashing.
pub const SECP256K1: Self = Self(1);
/// secp256r1 signature algorithm.
/// secp256r1 signature algorithm with SHA-256 hashing.
pub const SECP256R1: Self = Self(2);
/// Ethereum-compatible signature algorithm.
///
/// Uses secp256k1 with Keccak-256 hashing. The signature is a concatenation
/// of `r || s || v` as defined in Solidity's ecrecover().
pub const SECP256K1ETH: Self = Self(3);
/// Creates a new signature algorithm identifier.
///
@@ -83,6 +88,7 @@ impl std::fmt::Display for SignatureAlgId {
match *self {
SignatureAlgId::SECP256K1 => write!(f, "secp256k1"),
SignatureAlgId::SECP256R1 => write!(f, "secp256r1"),
SignatureAlgId::SECP256K1ETH => write!(f, "secp256k1eth"),
_ => write!(f, "custom({:02x})", self.0),
}
}
@@ -124,6 +130,13 @@ impl SignerProvider {
Ok(self)
}
/// Configures a secp256k1eth signer with the provided signing key.
pub fn set_secp256k1eth(&mut self, key: &[u8]) -> Result<&mut Self, SignerError> {
self.set_signer(Box::new(Secp256k1EthSigner::new(key)?));
Ok(self)
}
/// Returns a signer for the given algorithm.
pub(crate) fn get(
&self,
@@ -164,6 +177,10 @@ impl Default for SignatureVerifierProvider {
verifiers.insert(SignatureAlgId::SECP256K1, Box::new(Secp256k1Verifier) as _);
verifiers.insert(SignatureAlgId::SECP256R1, Box::new(Secp256r1Verifier) as _);
verifiers.insert(
SignatureAlgId::SECP256K1ETH,
Box::new(Secp256k1EthVerifier) as _,
);
Self { verifiers }
}
@@ -225,13 +242,13 @@ mod secp256k1 {
use std::sync::{Arc, Mutex};
use k256::ecdsa::{
signature::{SignerMut, Verifier},
Signature as Secp256K1Signature, SigningKey,
signature::{SignerMut, Verifier},
};
use super::*;
/// secp256k1 signer.
/// secp256k1 signer with SHA-256 hashing.
pub struct Secp256k1Signer(Arc<Mutex<SigningKey>>);
impl Secp256k1Signer {
@@ -267,7 +284,7 @@ mod secp256k1 {
}
}
/// secp256k1 verifier.
/// secp256k1 verifier with SHA-256 hashing.
pub struct Secp256k1Verifier;
impl SignatureVerifier for Secp256k1Verifier {
@@ -301,13 +318,13 @@ mod secp256r1 {
use std::sync::{Arc, Mutex};
use p256::ecdsa::{
signature::{SignerMut, Verifier},
Signature as Secp256R1Signature, SigningKey,
signature::{SignerMut, Verifier},
};
use super::*;
/// secp256r1 signer.
/// secp256r1 signer with SHA-256 hashing.
pub struct Secp256r1Signer(Arc<Mutex<SigningKey>>);
impl Secp256r1Signer {
@@ -343,7 +360,7 @@ mod secp256r1 {
}
}
/// secp256r1 verifier.
/// secp256r1 verifier with SHA-256 hashing.
pub struct Secp256r1Verifier;
impl SignatureVerifier for Secp256r1Verifier {
@@ -373,63 +390,208 @@ mod secp256r1 {
pub use secp256r1::{Secp256r1Signer, Secp256r1Verifier};
mod secp256k1eth {
use std::sync::{Arc, Mutex};
use k256::ecdsa::{
Signature as Secp256K1Signature, SigningKey, signature::hazmat::PrehashVerifier,
};
use tiny_keccak::{Hasher, Keccak};
use super::*;
/// secp256k1eth signer.
pub struct Secp256k1EthSigner(Arc<Mutex<SigningKey>>);
impl Secp256k1EthSigner {
/// Creates a new secp256k1eth signer with the provided signing key.
pub fn new(key: &[u8]) -> Result<Self, SignerError> {
SigningKey::from_slice(key)
.map(|key| Self(Arc::new(Mutex::new(key))))
.map_err(|_| SignerError("invalid key".to_string()))
}
}
impl Signer for Secp256k1EthSigner {
fn alg_id(&self) -> SignatureAlgId {
SignatureAlgId::SECP256K1ETH
}
fn sign(&self, msg: &[u8]) -> Result<Signature, SignatureError> {
// Pre-hash the message.
let mut hasher = Keccak::v256();
hasher.update(msg);
let mut output = vec![0; 32];
hasher.finalize(&mut output);
let (signature, recid) = self
.0
.lock()
.unwrap()
.sign_prehash_recoverable(&output)
.map_err(|_| SignatureError("error in sign_prehash_recoverable".to_string()))?;
let mut sig = signature.to_vec();
let recid = recid.to_byte();
// Based on Ethereum Yellow Paper Appendix F, only values 0 and 1 are valid.
if recid > 1 {
return Err(SignatureError(format!(
"expected recovery id 0 or 1, got {recid:?}"
)));
}
// `ecrecover` expects that 0 and 1 are mapped to 27 and 28.
sig.push(recid + 27);
Ok(Signature {
alg: SignatureAlgId::SECP256K1ETH,
data: sig,
})
}
fn verifying_key(&self) -> VerifyingKey {
let key = self.0.lock().unwrap().verifying_key().to_sec1_bytes();
VerifyingKey {
alg: KeyAlgId::K256,
data: key.to_vec(),
}
}
}
/// secp256k1eth verifier.
pub struct Secp256k1EthVerifier;
impl SignatureVerifier for Secp256k1EthVerifier {
fn alg_id(&self) -> SignatureAlgId {
SignatureAlgId::SECP256K1ETH
}
fn verify(&self, key: &VerifyingKey, msg: &[u8], sig: &[u8]) -> Result<(), SignatureError> {
if key.alg != KeyAlgId::K256 {
return Err(SignatureError("key algorithm is not k256".to_string()));
}
if sig.len() != 65 {
return Err(SignatureError(
"ethereum signature length must be 65 bytes".to_string(),
));
}
let key = k256::ecdsa::VerifyingKey::from_sec1_bytes(&key.data)
.map_err(|_| SignatureError("invalid k256 key".to_string()))?;
// `sig` is a concatenation of `r || s || v`. We ignore `v` since it is only
// useful when recovering the verifying key.
let sig = Secp256K1Signature::from_slice(&sig[..64])
.map_err(|_| SignatureError("invalid secp256k1 signature".to_string()))?;
// Pre-hash the message.
let mut hasher = Keccak::v256();
hasher.update(msg);
let mut output = vec![0; 32];
hasher.finalize(&mut output);
key.verify_prehash(&output, &sig).map_err(|_| {
SignatureError("secp256k1 signature verification failed".to_string())
})?;
Ok(())
}
}
}
pub use secp256k1eth::{Secp256k1EthSigner, Secp256k1EthVerifier};
#[cfg(test)]
mod test {
use super::*;
use rand_core::OsRng;
use alloy_primitives::utils::eip191_message;
use alloy_signer::SignerSync;
use alloy_signer_local::PrivateKeySigner;
use rand06_compat::Rand0_6CompatExt;
use rstest::{fixture, rstest};
use super::*;
#[fixture]
#[once]
fn secp256k1_signer() -> Secp256k1Signer {
let signing_key = k256::ecdsa::SigningKey::random(&mut OsRng);
Secp256k1Signer::new(&signing_key.to_bytes()).unwrap()
fn secp256k1_pair() -> (Box<dyn Signer>, Box<dyn SignatureVerifier>) {
let signing_key = k256::ecdsa::SigningKey::random(&mut rand::rng().compat());
(
Box::new(Secp256k1Signer::new(&signing_key.to_bytes()).unwrap()),
Box::new(Secp256k1Verifier {}),
)
}
#[fixture]
#[once]
fn secp256r1_signer() -> Secp256r1Signer {
let signing_key = p256::ecdsa::SigningKey::random(&mut OsRng);
Secp256r1Signer::new(&signing_key.to_bytes()).unwrap()
fn secp256r1_pair() -> (Box<dyn Signer>, Box<dyn SignatureVerifier>) {
let signing_key = p256::ecdsa::SigningKey::random(&mut rand::rng().compat());
(
Box::new(Secp256r1Signer::new(&signing_key.to_bytes()).unwrap()),
Box::new(Secp256r1Verifier {}),
)
}
#[fixture]
#[once]
fn secp256k1eth_pair() -> (Box<dyn Signer>, Box<dyn SignatureVerifier>) {
let signing_key = k256::ecdsa::SigningKey::random(&mut rand::rng().compat());
(
Box::new(Secp256k1EthSigner::new(&signing_key.to_bytes()).unwrap()),
Box::new(Secp256k1EthVerifier {}),
)
}
#[rstest]
fn test_secp256k1_success(secp256k1_signer: &Secp256k1Signer) {
assert_eq!(secp256k1_signer.alg_id(), SignatureAlgId::SECP256K1);
#[case::r1(secp256r1_pair(), SignatureAlgId::SECP256R1)]
#[case::k1(secp256k1_pair(), SignatureAlgId::SECP256K1)]
#[case::k1eth(secp256k1eth_pair(), SignatureAlgId::SECP256K1ETH)]
fn test_success(
#[case] pair: (Box<dyn Signer>, Box<dyn SignatureVerifier>),
#[case] alg: SignatureAlgId,
) {
let (signer, verifier) = pair;
assert_eq!(signer.alg_id(), alg);
let msg = "test payload";
let signature = secp256k1_signer.sign(msg.as_bytes()).unwrap();
let verifying_key = secp256k1_signer.verifying_key();
let signature = signer.sign(msg.as_bytes()).unwrap();
let verifying_key = signer.verifying_key();
assert_eq!(verifier.alg_id(), alg);
let result = verifier.verify(&verifying_key, msg.as_bytes(), &signature.data);
assert!(result.is_ok());
}
#[rstest]
#[case::r1(secp256r1_pair())]
#[case::k1eth(secp256k1eth_pair())]
fn test_wrong_signer(#[case] pair: (Box<dyn Signer>, Box<dyn SignatureVerifier>)) {
let (signer, _) = pair;
let msg = "test payload";
let signature = signer.sign(msg.as_bytes()).unwrap();
let verifying_key = signer.verifying_key();
let verifier = Secp256k1Verifier {};
assert_eq!(verifier.alg_id(), SignatureAlgId::SECP256K1);
let result = verifier.verify(&verifying_key, msg.as_bytes(), &signature.data);
assert!(result.is_ok());
assert!(result.is_err());
}
#[rstest]
fn test_secp256r1_success(secp256r1_signer: &Secp256r1Signer) {
assert_eq!(secp256r1_signer.alg_id(), SignatureAlgId::SECP256R1);
let msg = "test payload";
let signature = secp256r1_signer.sign(msg.as_bytes()).unwrap();
let verifying_key = secp256r1_signer.verifying_key();
let verifier = Secp256r1Verifier {};
assert_eq!(verifier.alg_id(), SignatureAlgId::SECP256R1);
let result = verifier.verify(&verifying_key, msg.as_bytes(), &signature.data);
assert!(result.is_ok());
}
#[rstest]
#[case::wrong_signer(&secp256r1_signer(), false, false)]
#[case::corrupted_signature(&secp256k1_signer(), true, false)]
#[case::wrong_signature(&secp256k1_signer(), false, true)]
#[case::corrupted_signature_r1(secp256r1_pair(), true, false)]
#[case::corrupted_signature_k1(secp256k1_pair(), true, false)]
#[case::corrupted_signature_k1eth(secp256k1eth_pair(), true, false)]
#[case::wrong_signature_r1(secp256r1_pair(), false, true)]
#[case::wrong_signature_k1(secp256k1_pair(), false, true)]
#[case::wrong_signature_k1eth(secp256k1eth_pair(), false, true)]
fn test_failure(
#[case] signer: &dyn Signer,
#[case] pair: (Box<dyn Signer>, Box<dyn SignatureVerifier>),
#[case] corrupted_signature: bool,
#[case] wrong_signature: bool,
) {
let (signer, verifier) = pair;
let msg = "test payload";
let mut signature = signer.sign(msg.as_bytes()).unwrap();
let verifying_key = signer.verifying_key();
@@ -442,8 +604,32 @@ mod test {
signature = signer.sign("different payload".as_bytes()).unwrap();
}
let verifier = Secp256k1Verifier {};
let result = verifier.verify(&verifying_key, msg.as_bytes(), &signature.data);
assert!(result.is_err());
}
#[test]
// Tests secp256k1eth signatures against a reference implementation.
fn test_secp256k1eth_sig() {
// An arbitrary signing key.
let sk = vec![1; 32];
let mut msg = "test message".as_bytes().to_vec();
let signer: Secp256k1EthSigner = Secp256k1EthSigner::new(&sk).unwrap();
// Testing multiple signatures.
for i in 0..10 {
msg.push(i);
// Convert to EIP-191 since the reference signer can't sign raw bytes.
let sig = signer.sign(&eip191_message(&msg)).unwrap().data;
assert_eq!(sig, reference_eth_signature(&sk, &msg));
}
}
// Returns a reference Ethereum signature.
fn reference_eth_signature(sk: &[u8], msg: &[u8]) -> Vec<u8> {
let signer = PrivateKeySigner::from_slice(sk).unwrap();
signer.sign_message_sync(msg).unwrap().as_bytes().to_vec()
}
}

View File

@@ -1,13 +1,18 @@
use tlsn_core::{
attestation::{Attestation, AttestationConfig},
connection::{HandshakeData, HandshakeDataV1_2},
fixtures::{self, encoder_seed, ConnectionFixture},
hash::Blake3,
use tlsn_attestation::{
Attestation, AttestationConfig, CryptoProvider,
presentation::PresentationOutput,
request::{Request, RequestConfig},
signing::SignatureAlgId,
transcript::{encoding::EncodingTree, Direction, Transcript, TranscriptCommitConfigBuilder},
CryptoProvider,
};
use tlsn_core::{
connection::{CertBinding, CertBindingV1_2},
fixtures::{self, ConnectionFixture, encoder_secret},
hash::Blake3,
transcript::{
Direction, Transcript, TranscriptCommitConfigBuilder, TranscriptCommitment,
TranscriptSecret,
encoding::{EncodingCommitment, EncodingTree},
},
};
use tlsn_data_fixtures::http::{request::GET_WITH_HEADER, response::OK_JSON};
@@ -31,10 +36,10 @@ fn test_api() {
server_cert_data,
} = ConnectionFixture::tlsnotary(transcript.length());
let HandshakeData::V1_2(HandshakeDataV1_2 {
let CertBinding::V1_2(CertBindingV1_2 {
server_ephemeral_key,
..
}) = server_cert_data.handshake.clone()
}) = server_cert_data.binding.clone()
else {
unreachable!()
};
@@ -54,18 +59,25 @@ fn test_api() {
&Blake3::default(),
transcripts_commitment_config.iter_encoding(),
&encodings_provider,
&transcript.length(),
)
.unwrap();
let encoding_commitment = EncodingCommitment {
root: encoding_tree.root(),
secret: encoder_secret(),
};
let request_config = RequestConfig::default();
let mut request_builder = Request::builder(&request_config);
request_builder
.server_name(server_name.clone())
.server_cert_data(server_cert_data)
.handshake_data(server_cert_data)
.transcript(transcript)
.encoding_tree(encoding_tree);
.transcript_commitments(
vec![TranscriptSecret::Encoding(encoding_tree)],
vec![TranscriptCommitment::Encoding(encoding_commitment.clone())],
);
let (request, secrets) = request_builder.build(&provider).unwrap();
@@ -84,7 +96,7 @@ fn test_api() {
.connection_info(connection_info.clone())
// Server key Notary received during handshake
.server_ephemeral_key(server_ephemeral_key)
.encoding_seed(encoder_seed().to_vec());
.transcript_commitments(vec![TranscriptCommitment::Encoding(encoding_commitment)]);
let attestation = attestation_builder.build(&provider).unwrap();

View File

@@ -1,70 +0,0 @@
[package]
edition = "2021"
name = "tlsn-benches"
publish = false
version = "0.0.0"
[features]
default = []
# Enables benchmarks in the browser.
browser-bench = ["tlsn-benches-browser-native"]
[dependencies]
mpz-common = { workspace = true }
mpz-core = { workspace = true }
mpz-garble = { workspace = true }
mpz-ot = { workspace = true, features = ["ideal"] }
tlsn-benches-library = { workspace = true }
tlsn-benches-browser-native = { workspace = true, optional = true}
tlsn-common = { workspace = true }
tlsn-core = { workspace = true }
tlsn-hmac-sha256 = { workspace = true }
tlsn-prover = { workspace = true }
tlsn-server-fixture = { workspace = true }
tlsn-server-fixture-certs = { workspace = true }
tlsn-tls-core = { workspace = true }
tlsn-verifier = { workspace = true }
anyhow = { workspace = true }
async-trait = { workspace = true }
charming = {version = "0.3.1", features = ["ssr"]}
csv = "1.3.0"
dhat = { version = "0.3.3" }
env_logger = { version = "0.6.0", default-features = false }
futures = { workspace = true }
serde = { workspace = true }
tokio = { workspace = true, features = [
"rt",
"rt-multi-thread",
"macros",
"net",
"io-std",
"fs",
] }
tokio-util = { workspace = true }
toml = "0.8.11"
tracing-subscriber = {workspace = true, features = ["env-filter"]}
[[bin]]
name = "bench"
path = "bin/bench.rs"
[[bin]]
name = "prover"
path = "bin/prover.rs"
[[bin]]
name = "prover-memory"
path = "bin/prover_memory.rs"
[[bin]]
name = "verifier"
path = "bin/verifier.rs"
[[bin]]
name = "verifier-memory"
path = "bin/verifier_memory.rs"
[[bin]]
name = "plot"
path = "bin/plot.rs"

View File

@@ -1,53 +0,0 @@
# TLSNotary bench utilities
This crate provides utilities for benchmarking protocol performance under various network conditions and usage patterns.
As the protocol is mostly IO bound, it's important to track how it performs in low bandwidth and/or high latency environments. To do this we set up temporary network namespaces and add virtual ethernet interfaces which we can control using the linux `tc` (Traffic Control) utility.
## Configuration
See the `bench.toml` file for benchmark configurations.
## Preliminaries
To run the benchmarks you will need `iproute2` installed, eg:
```sh
sudo apt-get install iproute2 -y
```
## Running benches
Running the benches requires root privileges because they will set up virtual interfaces. The script is designed to fully clean up when the benches are done, but run them at your own risk.
#### Native benches
Make sure you're in the `crates/benches/` directory, build the binaries, and then run the script:
```sh
cd binary
cargo build --release
sudo ./bench.sh
```
#### Browser benches
(Note, we recommend running browser benches inside a docker container (see docker.md) to avoid
facing incompatibility issues observed in the latest versions of Chrome.)
With a Chrome browser installed on your system, make sure you're in the `crates/benches/`
directory, build the wasm module, build the binaries, and then run the script:
```sh
cd browser/wasm
rustup run nightly wasm-pack build --release --target web
cd ../../binary
cargo build --release --features browser-bench
sudo ./bench.sh
```
## Metrics
After you run the benches you will see a `metrics.csv` file in the working directory. It will be owned by `root`, so you probably want to run
```sh
sudo chown $USER metrics.csv
```

View File

@@ -1,16 +0,0 @@
#! /bin/bash
# Check if we are running as root.
if [ "$EUID" -ne 0 ]; then
echo "This script must be run as root"
exit
fi
# Run the benchmark binary.
../../../target/release/bench
# Run the benchmark binary in memory profiling mode.
../../../target/release/bench --memory-profiling
# Plot the results.
../../../target/release/plot metrics.csv

View File

@@ -1,45 +0,0 @@
[[benches]]
name = "latency"
upload = 250
upload-delay = [10, 25, 50]
download = 250
download-delay = [10, 25, 50]
upload-size = 1024
download-size = 4096
defer-decryption = true
memory-profile = false
[[benches]]
name = "download_bandwidth"
upload = 250
upload-delay = 25
download = [10, 25, 50, 100, 250]
download-delay = 25
upload-size = 1024
download-size = 4096
defer-decryption = true
memory-profile = false
[[benches]]
name = "upload_bandwidth"
upload = [10, 25, 50, 100, 250]
upload-delay = 25
download = 250
download-delay = 25
upload-size = 1024
download-size = 4096
defer-decryption = [false, true]
memory-profile = false
[[benches]]
name = "download_volume"
upload = 250
upload-delay = 25
download = 250
download-delay = 25
upload-size = 1024
# Setting download-size higher than 45000 will cause a `Maximum call stack size exceeded`
# error in the browser.
download-size = [1024, 4096, 16384, 45000]
defer-decryption = true
memory-profile = true

View File

@@ -1,55 +0,0 @@
FROM rust AS builder
WORKDIR /usr/src/tlsn
COPY . .
ARG BENCH_TYPE=native
RUN \
if [ "$BENCH_TYPE" = "browser" ]; then \
# ring's build script needs clang.
apt update && apt install -y clang; \
rustup install nightly; \
rustup component add rust-src --toolchain nightly; \
cargo install wasm-pack; \
cd crates/benches/browser/wasm; \
rustup run nightly wasm-pack build --release --target web; \
cd ../../binary; \
cargo build --release --features browser-bench; \
else \
cd crates/benches/binary; \
cargo build --release; \
fi
FROM debian:latest
ARG BENCH_TYPE=native
RUN apt update && apt upgrade -y && apt install -y --no-install-recommends \
iproute2 \
sudo
RUN \
if [ "$BENCH_TYPE" = "browser" ]; then \
# Using Chromium since Chrome for Linux is not available on ARM.
apt install -y chromium; \
fi
RUN apt clean && rm -rf /var/lib/apt/lists/*
COPY --from=builder \
["/usr/src/tlsn/target/release/bench", \
"/usr/src/tlsn/target/release/prover", \
"/usr/src/tlsn/target/release/prover-memory", \
"/usr/src/tlsn/target/release/verifier", \
"/usr/src/tlsn/target/release/verifier-memory", \
"/usr/src/tlsn/target/release/plot", \
"/usr/local/bin/"]
ENV PROVER_PATH="/usr/local/bin/prover"
ENV VERIFIER_PATH="/usr/local/bin/verifier"
ENV PROVER_MEMORY_PATH="/usr/local/bin/prover-memory"
ENV VERIFIER_MEMORY_PATH="/usr/local/bin/verifier-memory"
VOLUME [ "/benches" ]
WORKDIR "/benches"
CMD ["/bin/bash", "-c", "bench && bench --memory-profiling && plot /benches/metrics.csv && cat /benches/metrics.csv"]

View File

@@ -1,62 +0,0 @@
use std::{env, process::Command, thread, time::Duration};
use tlsn_benches::{clean_up, set_up};
fn main() {
let args: Vec<String> = env::args().collect();
let is_memory_profiling = args.contains(&"--memory-profiling".to_string());
let (prover_path, verifier_path) = if is_memory_profiling {
(
std::env::var("PROVER_MEMORY_PATH")
.unwrap_or_else(|_| "../../../target/release/prover-memory".to_string()),
std::env::var("VERIFIER_MEMORY_PATH")
.unwrap_or_else(|_| "../../../target/release/verifier-memory".to_string()),
)
} else {
(
std::env::var("PROVER_PATH")
.unwrap_or_else(|_| "../../../target/release/prover".to_string()),
std::env::var("VERIFIER_PATH")
.unwrap_or_else(|_| "../../../target/release/verifier".to_string()),
)
};
if let Err(e) = set_up() {
println!("Error setting up: {}", e);
clean_up();
}
// Run prover and verifier binaries in parallel.
let Ok(mut verifier) = Command::new("ip")
.arg("netns")
.arg("exec")
.arg("verifier-ns")
.arg(verifier_path)
.spawn()
else {
println!("Failed to start verifier");
return clean_up();
};
// Allow the verifier some time to start listening before the prover attempts to
// connect.
thread::sleep(Duration::from_secs(1));
let Ok(mut prover) = Command::new("ip")
.arg("netns")
.arg("exec")
.arg("prover-ns")
.arg(prover_path)
.spawn()
else {
println!("Failed to start prover");
return clean_up();
};
// Wait for both to finish.
_ = prover.wait();
_ = verifier.wait();
clean_up();
}

View File

@@ -1,248 +0,0 @@
use tlsn_benches::metrics::Metrics;
use charming::{
component::{
Axis, DataView, Feature, Legend, Restore, SaveAsImage, Title, Toolbox, ToolboxDataZoom,
},
element::{NameLocation, Orient, Tooltip, Trigger},
series::{Line, Scatter},
theme::Theme,
Chart, HtmlRenderer,
};
use csv::Reader;
const THEME: Theme = Theme::Default;
fn main() -> Result<(), Box<dyn std::error::Error>> {
let csv_file = std::env::args()
.nth(1)
.expect("Usage: plot <path_to_csv_file>");
let mut rdr = Reader::from_path(csv_file)?;
// Prepare data for plotting.
let all_data: Vec<Metrics> = rdr
.deserialize::<Metrics>()
.collect::<Result<Vec<_>, _>>()?; // Attempt to collect all results, return an error if any fail.
let _chart = runtime_vs_latency(&all_data)?;
let _chart = runtime_vs_bandwidth(&all_data)?;
// Memory profiling is not compatible with browser benches.
if cfg!(not(feature = "browser-bench")) {
let _chart = download_size_vs_memory(&all_data)?;
}
Ok(())
}
fn download_size_vs_memory(all_data: &[Metrics]) -> Result<Chart, Box<dyn std::error::Error>> {
const TITLE: &str = "Download Size vs Memory";
let prover_kind: String = all_data
.first()
.map(|s| s.kind.clone().into())
.unwrap_or_default();
let data: Vec<Vec<f32>> = all_data
.iter()
.filter(|record| record.name == "download_volume" && record.heap_max_bytes.is_some())
.map(|record| {
vec![
record.download_size as f32,
record.heap_max_bytes.unwrap() as f32 / 1024.0 / 1024.0,
]
})
.collect();
// https://github.com/yuankunzhang/charming
let chart = Chart::new()
.title(
Title::new()
.text(TITLE)
.subtext(format!("{} Prover", prover_kind)),
)
.tooltip(Tooltip::new().trigger(Trigger::Axis))
.legend(Legend::new().orient(Orient::Vertical))
.toolbox(
Toolbox::new().show(true).feature(
Feature::new()
.save_as_image(SaveAsImage::new())
.restore(Restore::new())
.data_zoom(ToolboxDataZoom::new().y_axis_index("none"))
.data_view(DataView::new().read_only(false)),
),
)
.x_axis(
Axis::new()
.scale(true)
.name("Download Size (bytes)")
.name_gap(30)
.name_location(NameLocation::Center),
)
.y_axis(
Axis::new()
.scale(true)
.name("Heap Memory (Mbytes)")
.name_gap(40)
.name_location(NameLocation::Middle),
)
.series(
Scatter::new()
.name("Allocated Heap Memory")
.symbol_size(10)
.data(data),
);
// Save the chart as HTML file.
HtmlRenderer::new(TITLE, 1000, 800)
.theme(THEME)
.save(&chart, "download_size_vs_memory.html")
.unwrap();
Ok(chart)
}
fn runtime_vs_latency(all_data: &[Metrics]) -> Result<Chart, Box<dyn std::error::Error>> {
const TITLE: &str = "Runtime vs Latency";
let prover_kind: String = all_data
.first()
.map(|s| s.kind.clone().into())
.unwrap_or_default();
let data: Vec<Vec<f32>> = all_data
.iter()
.filter(|record| record.name == "latency")
.map(|record| {
let total_delay = record.upload_delay + record.download_delay; // Calculate the sum of upload and download delays.
vec![total_delay as f32, record.runtime as f32]
})
.collect();
// https://github.com/yuankunzhang/charming
let chart = Chart::new()
.title(
Title::new()
.text(TITLE)
.subtext(format!("{} Prover", prover_kind)),
)
.tooltip(Tooltip::new().trigger(Trigger::Axis))
.legend(Legend::new().orient(Orient::Vertical))
.toolbox(
Toolbox::new().show(true).feature(
Feature::new()
.save_as_image(SaveAsImage::new())
.restore(Restore::new())
.data_zoom(ToolboxDataZoom::new().y_axis_index("none"))
.data_view(DataView::new().read_only(false)),
),
)
.x_axis(
Axis::new()
.scale(true)
.name("Upload + Download Latency (ms)")
.name_location(NameLocation::Center),
)
.y_axis(
Axis::new()
.scale(true)
.name("Runtime (s)")
.name_location(NameLocation::Middle),
)
.series(
Scatter::new()
.name("Combined Latency")
.symbol_size(10)
.data(data),
);
// Save the chart as HTML file.
HtmlRenderer::new(TITLE, 1000, 800)
.theme(THEME)
.save(&chart, "runtime_vs_latency.html")
.unwrap();
Ok(chart)
}
fn runtime_vs_bandwidth(all_data: &[Metrics]) -> Result<Chart, Box<dyn std::error::Error>> {
const TITLE: &str = "Runtime vs Bandwidth";
let prover_kind: String = all_data
.first()
.map(|s| s.kind.clone().into())
.unwrap_or_default();
let download_data: Vec<Vec<f32>> = all_data
.iter()
.filter(|record| record.name == "download_bandwidth")
.map(|record| vec![record.download as f32, record.runtime as f32])
.collect();
let upload_deferred_data: Vec<Vec<f32>> = all_data
.iter()
.filter(|record| record.name == "upload_bandwidth" && record.defer_decryption)
.map(|record| vec![record.upload as f32, record.runtime as f32])
.collect();
let upload_non_deferred_data: Vec<Vec<f32>> = all_data
.iter()
.filter(|record| record.name == "upload_bandwidth" && !record.defer_decryption)
.map(|record| vec![record.upload as f32, record.runtime as f32])
.collect();
// https://github.com/yuankunzhang/charming
let chart = Chart::new()
.title(
Title::new()
.text(TITLE)
.subtext(format!("{} Prover", prover_kind)),
)
.tooltip(Tooltip::new().trigger(Trigger::Axis))
.legend(Legend::new().orient(Orient::Vertical))
.toolbox(
Toolbox::new().show(true).feature(
Feature::new()
.save_as_image(SaveAsImage::new())
.restore(Restore::new())
.data_zoom(ToolboxDataZoom::new().y_axis_index("none"))
.data_view(DataView::new().read_only(false)),
),
)
.x_axis(
Axis::new()
.scale(true)
.name("Bandwidth (Mbps)")
.name_location(NameLocation::Center),
)
.y_axis(
Axis::new()
.scale(true)
.name("Runtime (s)")
.name_location(NameLocation::Middle),
)
.series(
Line::new()
.name("Download bandwidth")
.symbol_size(10)
.data(download_data),
)
.series(
Line::new()
.name("Upload bandwidth (deferred decryption)")
.symbol_size(10)
.data(upload_deferred_data),
)
.series(
Line::new()
.name("Upload bandwidth")
.symbol_size(10)
.data(upload_non_deferred_data),
);
// Save the chart as HTML file.
HtmlRenderer::new(TITLE, 1000, 800)
.theme(THEME)
.save(&chart, "runtime_vs_bandwidth.html")
.unwrap();
Ok(chart)
}

View File

@@ -1,8 +0,0 @@
//! A Prover without memory profiling.
use tlsn_benches::prover_main::prover_main;
#[tokio::main]
async fn main() -> anyhow::Result<()> {
prover_main(false).await
}

View File

@@ -1,15 +0,0 @@
//! A Prover with memory profiling.
use tlsn_benches::prover_main::prover_main;
#[global_allocator]
static ALLOC: dhat::Alloc = dhat::Alloc;
#[tokio::main]
async fn main() -> anyhow::Result<()> {
if cfg!(feature = "browser-bench") {
// Memory profiling is not compatible with browser benches.
return Ok(());
}
prover_main(true).await
}

View File

@@ -1,8 +0,0 @@
//! A Verifier without memory profiling.
use tlsn_benches::verifier_main::verifier_main;
#[tokio::main]
async fn main() -> anyhow::Result<()> {
verifier_main(false).await
}

View File

@@ -1,15 +0,0 @@
//! A Verifier with memory profiling.
use tlsn_benches::verifier_main::verifier_main;
#[global_allocator]
static ALLOC: dhat::Alloc = dhat::Alloc;
#[tokio::main]
async fn main() -> anyhow::Result<()> {
if cfg!(feature = "browser-bench") {
// Memory profiling is not compatible with browser benches.
return Ok(());
}
verifier_main(true).await
}

View File

@@ -1,13 +0,0 @@
# Run the TLSN benches with Docker
In the root folder of this repository, run:
```
# Change to BENCH_TYPE=browser if you want benchmarks to run in the browser.
docker build -t tlsn-bench . -f ./crates/benches/binary/benches.Dockerfile --build-arg BENCH_TYPE=native
```
Next run the benches with:
```
docker run -it --privileged -v ./crates/benches/binary:/benches tlsn-bench
```
The `--privileged` parameter is required because this test bench needs permission to create networks with certain parameters

View File

@@ -1,123 +0,0 @@
use serde::{Deserialize, Serialize};
#[derive(Deserialize)]
#[serde(untagged)]
pub enum Field<T> {
Single(T),
Multiple(Vec<T>),
}
#[derive(Deserialize)]
pub struct Config {
pub benches: Vec<Bench>,
}
#[derive(Deserialize)]
pub struct Bench {
pub name: String,
pub upload: Field<usize>,
#[serde(rename = "upload-delay")]
pub upload_delay: Field<usize>,
pub download: Field<usize>,
#[serde(rename = "download-delay")]
pub download_delay: Field<usize>,
#[serde(rename = "upload-size")]
pub upload_size: Field<usize>,
#[serde(rename = "download-size")]
pub download_size: Field<usize>,
#[serde(rename = "defer-decryption")]
pub defer_decryption: Field<bool>,
#[serde(rename = "memory-profile")]
pub memory_profile: Field<bool>,
}
impl Bench {
/// Flattens the config into a list of instances
pub fn flatten(self) -> Vec<BenchInstance> {
let mut instances = vec![];
let upload = match self.upload {
Field::Single(u) => vec![u],
Field::Multiple(u) => u,
};
let upload_delay = match self.upload_delay {
Field::Single(u) => vec![u],
Field::Multiple(u) => u,
};
let download = match self.download {
Field::Single(u) => vec![u],
Field::Multiple(u) => u,
};
let download_latency = match self.download_delay {
Field::Single(u) => vec![u],
Field::Multiple(u) => u,
};
let upload_size = match self.upload_size {
Field::Single(u) => vec![u],
Field::Multiple(u) => u,
};
let download_size = match self.download_size {
Field::Single(u) => vec![u],
Field::Multiple(u) => u,
};
let defer_decryption = match self.defer_decryption {
Field::Single(u) => vec![u],
Field::Multiple(u) => u,
};
let memory_profile = match self.memory_profile {
Field::Single(u) => vec![u],
Field::Multiple(u) => u,
};
for u in upload {
for ul in &upload_delay {
for d in &download {
for dl in &download_latency {
for us in &upload_size {
for ds in &download_size {
for dd in &defer_decryption {
for mp in &memory_profile {
instances.push(BenchInstance {
name: self.name.clone(),
upload: u,
upload_delay: *ul,
download: *d,
download_delay: *dl,
upload_size: *us,
download_size: *ds,
defer_decryption: *dd,
memory_profile: *mp,
});
}
}
}
}
}
}
}
}
instances
}
}
#[derive(Debug, Clone, Serialize)]
pub struct BenchInstance {
pub name: String,
pub upload: usize,
pub upload_delay: usize,
pub download: usize,
pub download_delay: usize,
pub upload_size: usize,
pub download_size: usize,
pub defer_decryption: bool,
/// Whether this instance should be used for memory profiling.
pub memory_profile: bool,
}

View File

@@ -1,273 +0,0 @@
pub mod config;
pub mod metrics;
mod preprocess;
pub mod prover;
pub mod prover_main;
pub mod verifier_main;
use std::{
io,
process::{Command, Stdio},
};
pub const PROVER_NAMESPACE: &str = "prover-ns";
pub const PROVER_INTERFACE: &str = "prover-veth";
pub const PROVER_SUBNET: &str = "10.10.1.0/24";
pub const VERIFIER_NAMESPACE: &str = "verifier-ns";
pub const VERIFIER_INTERFACE: &str = "verifier-veth";
pub const VERIFIER_SUBNET: &str = "10.10.1.1/24";
pub fn set_up() -> io::Result<()> {
// Create network namespaces
create_network_namespace(PROVER_NAMESPACE)?;
create_network_namespace(VERIFIER_NAMESPACE)?;
// Create veth pair and attach to namespaces
create_veth_pair(
PROVER_NAMESPACE,
PROVER_INTERFACE,
VERIFIER_NAMESPACE,
VERIFIER_INTERFACE,
)?;
// Set devices up
set_device_up(PROVER_NAMESPACE, PROVER_INTERFACE)?;
set_device_up(VERIFIER_NAMESPACE, VERIFIER_INTERFACE)?;
// Bring up the loopback interface.
set_device_up(PROVER_NAMESPACE, "lo")?;
set_device_up(VERIFIER_NAMESPACE, "lo")?;
// Assign IPs
assign_ip_to_interface(PROVER_NAMESPACE, PROVER_INTERFACE, PROVER_SUBNET)?;
assign_ip_to_interface(VERIFIER_NAMESPACE, VERIFIER_INTERFACE, VERIFIER_SUBNET)?;
// Set default routes
set_default_route(
PROVER_NAMESPACE,
PROVER_INTERFACE,
PROVER_SUBNET.split('/').next().unwrap(),
)?;
set_default_route(
VERIFIER_NAMESPACE,
VERIFIER_INTERFACE,
VERIFIER_SUBNET.split('/').next().unwrap(),
)?;
Ok(())
}
pub fn clean_up() {
// Delete interface pair
if let Err(e) = Command::new("ip")
.args([
"netns",
"exec",
PROVER_NAMESPACE,
"ip",
"link",
"delete",
PROVER_INTERFACE,
])
.status()
{
println!("Error deleting interface {}: {}", PROVER_INTERFACE, e);
}
// Delete namespaces
if let Err(e) = Command::new("ip")
.args(["netns", "del", PROVER_NAMESPACE])
.status()
{
println!("Error deleting namespace {}: {}", PROVER_NAMESPACE, e);
}
if let Err(e) = Command::new("ip")
.args(["netns", "del", VERIFIER_NAMESPACE])
.status()
{
println!("Error deleting namespace {}: {}", VERIFIER_NAMESPACE, e);
}
}
/// Sets the interface parameters.
///
/// Must be run in the correct namespace.
///
/// # Arguments
///
/// * `egress` - The egress bandwidth in mbps.
/// * `burst` - The burst in mbps.
/// * `delay` - The delay in ms.
pub fn set_interface(interface: &str, egress: usize, burst: usize, delay: usize) -> io::Result<()> {
// Clear rules
let output = Command::new("tc")
.arg("qdisc")
.arg("del")
.arg("dev")
.arg(interface)
.arg("root")
.stdout(Stdio::piped())
.output()?;
if output.stderr == "Error: Cannot delete qdisc with handle of zero.\n".as_bytes() {
// This error is informative, do not log it to stderr.
} else if !output.status.success() {
return Err(io::Error::other("Failed to clear rules"));
}
// Egress
Command::new("tc")
.arg("qdisc")
.arg("add")
.arg("dev")
.arg(interface)
.arg("root")
.arg("handle")
.arg("1:")
.arg("tbf")
.arg("rate")
.arg(format!("{}mbit", egress))
.arg("burst")
.arg(format!("{}mbit", burst))
.arg("latency")
.arg("60s")
.status()?;
// Delay
Command::new("tc")
.arg("qdisc")
.arg("add")
.arg("dev")
.arg(interface)
.arg("parent")
.arg("1:1")
.arg("handle")
.arg("10:")
.arg("netem")
.arg("delay")
.arg(format!("{}ms", delay))
.status()?;
Ok(())
}
/// Create a network namespace with the given name if it does not already exist.
fn create_network_namespace(name: &str) -> io::Result<()> {
// Check if namespace already exists
if Command::new("ip")
.args(["netns", "list"])
.output()?
.stdout
.windows(name.len())
.any(|ns| ns == name.as_bytes())
{
println!("Namespace {} already exists", name);
return Ok(());
} else {
println!("Creating namespace {}", name);
Command::new("ip").args(["netns", "add", name]).status()?;
}
Ok(())
}
fn create_veth_pair(
left_namespace: &str,
left_interface: &str,
right_namespace: &str,
right_interface: &str,
) -> io::Result<()> {
// Check if interfaces are already present in namespaces
if is_interface_present_in_namespace(left_namespace, left_interface)?
|| is_interface_present_in_namespace(right_namespace, right_interface)?
{
println!("Virtual interface already exists.");
return Ok(());
}
// Create veth pair
Command::new("ip")
.args([
"link",
"add",
left_interface,
"type",
"veth",
"peer",
"name",
right_interface,
])
.status()?;
println!(
"Created veth pair {} and {}",
left_interface, right_interface
);
// Attach veth pair to namespaces
attach_interface_to_namespace(left_namespace, left_interface)?;
attach_interface_to_namespace(right_namespace, right_interface)?;
Ok(())
}
fn attach_interface_to_namespace(namespace: &str, interface: &str) -> io::Result<()> {
Command::new("ip")
.args(["link", "set", interface, "netns", namespace])
.status()?;
println!("Attached {} to namespace {}", interface, namespace);
Ok(())
}
fn set_default_route(namespace: &str, interface: &str, ip: &str) -> io::Result<()> {
Command::new("ip")
.args([
"netns", "exec", namespace, "ip", "route", "add", "default", "via", ip, "dev",
interface,
])
.status()?;
println!(
"Set default route for namespace {} ip {} to {}",
namespace, ip, interface
);
Ok(())
}
fn is_interface_present_in_namespace(
namespace: &str,
interface: &str,
) -> Result<bool, std::io::Error> {
Ok(Command::new("ip")
.args([
"netns", "exec", namespace, "ip", "link", "list", "dev", interface,
])
.output()?
.stdout
.windows(interface.len())
.any(|ns| ns == interface.as_bytes()))
}
fn set_device_up(namespace: &str, interface: &str) -> io::Result<()> {
Command::new("ip")
.args([
"netns", "exec", namespace, "ip", "link", "set", interface, "up",
])
.status()?;
Ok(())
}
fn assign_ip_to_interface(namespace: &str, interface: &str, ip: &str) -> io::Result<()> {
Command::new("ip")
.args([
"netns", "exec", namespace, "ip", "addr", "add", ip, "dev", interface,
])
.status()?;
Ok(())
}

View File

@@ -1,31 +0,0 @@
use serde::{Deserialize, Serialize};
use tlsn_benches_library::ProverKind;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Metrics {
pub name: String,
/// The kind of the prover, either native or browser.
pub kind: ProverKind,
/// Upload bandwidth in Mbps.
pub upload: usize,
/// Upload latency in ms.
pub upload_delay: usize,
/// Download bandwidth in Mbps.
pub download: usize,
/// Download latency in ms.
pub download_delay: usize,
/// Total bytes sent to the server.
pub upload_size: usize,
/// Total bytes received from the server.
pub download_size: usize,
/// Whether deferred decryption was used.
pub defer_decryption: bool,
/// The total runtime of the benchmark in seconds.
pub runtime: u64,
/// The total amount of data uploaded to the verifier in bytes.
pub uploaded: u64,
/// The total amount of data downloaded from the verifier in bytes.
pub downloaded: u64,
/// The peak heap memory usage in bytes.
pub heap_max_bytes: Option<usize>,
}

View File

@@ -1,71 +0,0 @@
use hmac_sha256::{MpcPrf, Prf, PrfConfig, Role};
use mpz_common::executor::test_st_executor;
use mpz_garble::{config::Role as DEAPRole, protocol::deap::DEAPThread, Memory};
use mpz_ot::ideal::ot::ideal_ot;
pub async fn preprocess_prf_circuits() {
let pms = [42u8; 32];
let client_random = [69u8; 32];
let (leader_ctx_0, follower_ctx_0) = test_st_executor(128);
let (leader_ctx_1, follower_ctx_1) = test_st_executor(128);
let (leader_ot_send_0, follower_ot_recv_0) = ideal_ot();
let (follower_ot_send_0, leader_ot_recv_0) = ideal_ot();
let (leader_ot_send_1, follower_ot_recv_1) = ideal_ot();
let (follower_ot_send_1, leader_ot_recv_1) = ideal_ot();
let leader_thread_0 = DEAPThread::new(
DEAPRole::Leader,
[0u8; 32],
leader_ctx_0,
leader_ot_send_0,
leader_ot_recv_0,
);
let leader_thread_1 = leader_thread_0
.new_thread(leader_ctx_1, leader_ot_send_1, leader_ot_recv_1)
.unwrap();
let follower_thread_0 = DEAPThread::new(
DEAPRole::Follower,
[0u8; 32],
follower_ctx_0,
follower_ot_send_0,
follower_ot_recv_0,
);
let follower_thread_1 = follower_thread_0
.new_thread(follower_ctx_1, follower_ot_send_1, follower_ot_recv_1)
.unwrap();
// Set up public PMS for testing.
let leader_pms = leader_thread_0.new_public_input::<[u8; 32]>("pms").unwrap();
let follower_pms = follower_thread_0
.new_public_input::<[u8; 32]>("pms")
.unwrap();
leader_thread_0.assign(&leader_pms, pms).unwrap();
let mut leader = MpcPrf::new(
PrfConfig::builder().role(Role::Leader).build().unwrap(),
leader_thread_0,
leader_thread_1,
);
let mut follower = MpcPrf::new(
PrfConfig::builder().role(Role::Follower).build().unwrap(),
follower_thread_0,
follower_thread_1,
);
futures::join!(
async {
leader.setup(leader_pms).await.unwrap();
leader.set_client_random(Some(client_random)).await.unwrap();
leader.preprocess().await.unwrap();
},
async {
follower.setup(follower_pms).await.unwrap();
follower.set_client_random(None).await.unwrap();
follower.preprocess().await.unwrap();
}
);
}

View File

@@ -1,57 +0,0 @@
use std::time::Instant;
use tlsn_benches_library::{run_prover, AsyncIo, ProverKind, ProverTrait};
use async_trait::async_trait;
pub struct NativeProver {
upload_size: usize,
download_size: usize,
defer_decryption: bool,
io: Option<Box<dyn AsyncIo>>,
client_conn: Option<Box<dyn AsyncIo>>,
}
#[async_trait]
impl ProverTrait for NativeProver {
async fn setup(
upload_size: usize,
download_size: usize,
defer_decryption: bool,
io: Box<dyn AsyncIo>,
client_conn: Box<dyn AsyncIo>,
) -> anyhow::Result<Self>
where
Self: Sized,
{
Ok(Self {
upload_size,
download_size,
defer_decryption,
io: Some(io),
client_conn: Some(client_conn),
})
}
async fn run(&mut self) -> anyhow::Result<u64> {
let io = std::mem::take(&mut self.io).unwrap();
let client_conn = std::mem::take(&mut self.client_conn).unwrap();
let start_time = Instant::now();
run_prover(
self.upload_size,
self.download_size,
self.defer_decryption,
io,
client_conn,
)
.await?;
Ok(Instant::now().duration_since(start_time).as_secs())
}
fn kind(&self) -> ProverKind {
ProverKind::Native
}
}

View File

@@ -1,176 +0,0 @@
//! Contains the actual main() function of the prover binary. It is moved here
//! in order to enable cargo to build two prover binaries - with and without
//! memory profiling.
use std::{
fs::metadata,
io::Write,
sync::{
atomic::{AtomicU64, Ordering},
Arc,
},
};
use crate::{
config::{BenchInstance, Config},
metrics::Metrics,
preprocess::preprocess_prf_circuits,
set_interface, PROVER_INTERFACE,
};
use anyhow::Context;
use tlsn_benches_library::{AsyncIo, ProverTrait};
use tlsn_server_fixture::bind;
use csv::WriterBuilder;
use tokio_util::{
compat::TokioAsyncReadCompatExt,
io::{InspectReader, InspectWriter},
};
use tracing_subscriber::{fmt::format::FmtSpan, EnvFilter};
#[cfg(not(feature = "browser-bench"))]
use crate::prover::NativeProver as BenchProver;
#[cfg(feature = "browser-bench")]
use tlsn_benches_browser_native::BrowserProver as BenchProver;
pub async fn prover_main(is_memory_profiling: bool) -> anyhow::Result<()> {
let config_path = std::env::var("CFG").unwrap_or_else(|_| "bench.toml".to_string());
let config: Config = toml::from_str(
&std::fs::read_to_string(config_path).context("failed to read config file")?,
)
.context("failed to parse config")?;
tracing_subscriber::fmt()
.with_env_filter(EnvFilter::from_default_env())
.with_span_events(FmtSpan::NEW | FmtSpan::CLOSE)
.init();
let ip = std::env::var("VERIFIER_IP").unwrap_or_else(|_| "10.10.1.1".to_string());
let port: u16 = std::env::var("VERIFIER_PORT")
.map(|port| port.parse().expect("port is valid u16"))
.unwrap_or(8000);
let verifier_host = (ip.as_str(), port);
let mut file = std::fs::OpenOptions::new()
.create(true)
.append(true)
.open("metrics.csv")
.context("failed to open metrics file")?;
// Preprocess the PRF circuits as they are allocating a lot of memory, which
// don't need to be accounted for in the benchmarks.
preprocess_prf_circuits().await;
{
let mut metric_wrt = WriterBuilder::new()
// If file is not empty, assume that the CSV header is already present in the file.
.has_headers(metadata("metrics.csv")?.len() == 0)
.from_writer(&mut file);
for bench in config.benches {
let instances = bench.flatten();
for instance in instances {
if is_memory_profiling && !instance.memory_profile {
continue;
}
println!("{:?}", &instance);
let io = tokio::net::TcpStream::connect(verifier_host)
.await
.context("failed to open tcp connection")?;
metric_wrt.serialize(
run_instance(instance, io, is_memory_profiling)
.await
.context("failed to run instance")?,
)?;
metric_wrt.flush()?;
}
}
}
file.flush()?;
Ok(())
}
async fn run_instance(
instance: BenchInstance,
io: impl AsyncIo,
is_memory_profiling: bool,
) -> anyhow::Result<Metrics> {
let uploaded = Arc::new(AtomicU64::new(0));
let downloaded = Arc::new(AtomicU64::new(0));
let io = InspectWriter::new(
InspectReader::new(io, {
let downloaded = downloaded.clone();
move |data| {
downloaded.fetch_add(data.len() as u64, Ordering::Relaxed);
}
}),
{
let uploaded = uploaded.clone();
move |data| {
uploaded.fetch_add(data.len() as u64, Ordering::Relaxed);
}
},
);
let BenchInstance {
name,
upload,
upload_delay,
download,
download_delay,
upload_size,
download_size,
defer_decryption,
memory_profile,
} = instance.clone();
set_interface(PROVER_INTERFACE, upload, 1, upload_delay)?;
let _profiler = if is_memory_profiling {
assert!(memory_profile, "Instance doesn't have `memory_profile` set");
// Build a testing profiler as it won't output to stderr.
Some(dhat::Profiler::builder().testing().build())
} else {
None
};
let (client_conn, server_conn) = tokio::io::duplex(1 << 16);
tokio::spawn(bind(server_conn.compat()));
let mut prover = BenchProver::setup(
upload_size,
download_size,
defer_decryption,
Box::new(io),
Box::new(client_conn),
)
.await?;
let runtime = prover.run().await?;
let heap_max_bytes = if is_memory_profiling {
Some(dhat::HeapStats::get().max_bytes)
} else {
None
};
Ok(Metrics {
name,
kind: prover.kind(),
upload,
upload_delay,
download,
download_delay,
upload_size,
download_size,
defer_decryption,
runtime,
uploaded: uploaded.load(Ordering::SeqCst),
downloaded: downloaded.load(Ordering::SeqCst),
heap_max_bytes,
})
}

View File

@@ -1,131 +0,0 @@
//! Contains the actual main() function of the verifier binary. It is moved here
//! in order to enable cargo to build two verifier binaries - with and without
//! memory profiling.
use crate::{
config::{BenchInstance, Config},
preprocess::preprocess_prf_circuits,
set_interface, VERIFIER_INTERFACE,
};
use tls_core::verify::WebPkiVerifier;
use tlsn_common::config::ProtocolConfigValidator;
use tlsn_core::CryptoProvider;
use tlsn_server_fixture_certs::CA_CERT_DER;
use tlsn_verifier::{Verifier, VerifierConfig};
use anyhow::Context;
use tokio::io::{AsyncRead, AsyncWrite};
use tokio_util::compat::TokioAsyncReadCompatExt;
use tracing_subscriber::{fmt::format::FmtSpan, EnvFilter};
pub async fn verifier_main(is_memory_profiling: bool) -> anyhow::Result<()> {
let config_path = std::env::var("CFG").unwrap_or_else(|_| "bench.toml".to_string());
let config: Config = toml::from_str(
&std::fs::read_to_string(config_path).context("failed to read config file")?,
)
.context("failed to parse config")?;
tracing_subscriber::fmt()
.with_env_filter(EnvFilter::from_default_env())
.with_span_events(FmtSpan::NEW | FmtSpan::CLOSE)
.init();
let ip = std::env::var("VERIFIER_IP").unwrap_or_else(|_| "10.10.1.1".to_string());
let port: u16 = std::env::var("VERIFIER_PORT")
.map(|port| port.parse().expect("port is valid u16"))
.unwrap_or(8000);
let host = (ip.as_str(), port);
let listener = tokio::net::TcpListener::bind(host)
.await
.context("failed to bind to port")?;
// Preprocess the PRF circuits as they are allocating a lot of memory, which
// don't need to be accounted for in the benchmarks.
preprocess_prf_circuits().await;
for bench in config.benches {
for instance in bench.flatten() {
if is_memory_profiling && !instance.memory_profile {
continue;
}
let (io, _) = listener
.accept()
.await
.context("failed to accept connection")?;
run_instance(instance, io, is_memory_profiling)
.await
.context("failed to run instance")?;
}
}
Ok(())
}
async fn run_instance<S: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>(
instance: BenchInstance,
io: S,
is_memory_profiling: bool,
) -> anyhow::Result<()> {
let BenchInstance {
download,
download_delay,
upload_size,
download_size,
memory_profile,
..
} = instance;
set_interface(VERIFIER_INTERFACE, download, 1, download_delay)?;
let _profiler = if is_memory_profiling {
assert!(memory_profile, "Instance doesn't have `memory_profile` set");
// Build a testing profiler as it won't output to stderr.
Some(dhat::Profiler::builder().testing().build())
} else {
None
};
let provider = CryptoProvider {
cert: cert_verifier(),
..Default::default()
};
let config_validator = ProtocolConfigValidator::builder()
.max_sent_data(upload_size + 256)
.max_recv_data(download_size + 256)
.build()
.unwrap();
let verifier = Verifier::new(
VerifierConfig::builder()
.protocol_config_validator(config_validator)
.crypto_provider(provider)
.build()?,
);
_ = verifier.verify(io.compat()).await?;
println!("verifier done");
if is_memory_profiling {
// XXX: we may want to profile the Verifier's memory usage at a future
// point.
// println!(
// "verifier peak heap memory usage: {}",
// dhat::HeapStats::get().max_bytes
// );
}
Ok(())
}
fn cert_verifier() -> WebPkiVerifier {
let mut root_store = tls_core::anchors::RootCertStore::empty();
root_store
.add(&tls_core::key::Certificate(CA_CERT_DER.to_vec()))
.unwrap();
WebPkiVerifier::new(root_store, None)
}

View File

@@ -1,13 +0,0 @@
[package]
edition = "2021"
name = "tlsn-benches-browser-core"
publish = false
version = "0.0.0"
[dependencies]
tlsn-benches-library = { workspace = true }
serio = { workspace = true }
serde = { workspace = true }
tokio-util= { workspace = true, features = ["compat", "io-util"] }

View File

@@ -1,68 +0,0 @@
//! Contains core types shared by the native and the wasm components.
use std::{
io::Error,
pin::Pin,
task::{Context, Poll},
};
use tlsn_benches_library::AsyncIo;
use serio::{
codec::{Bincode, Framed},
Sink, Stream,
};
use tokio_util::codec::LengthDelimitedCodec;
pub mod msg;
/// A sink/stream for serializable types with a framed transport.
pub struct FramedIo {
inner:
serio::Framed<tokio_util::codec::Framed<Box<dyn AsyncIo>, LengthDelimitedCodec>, Bincode>,
}
impl FramedIo {
/// Creates a new `FramedIo` from the given async `io`.
#[allow(clippy::default_constructed_unit_structs)]
pub fn new(io: Box<dyn AsyncIo>) -> Self {
let io = LengthDelimitedCodec::builder().new_framed(io);
Self {
inner: Framed::new(io, Bincode::default()),
}
}
}
impl Sink for FramedIo {
type Error = Error;
fn poll_ready(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Pin::new(&mut self.inner).poll_ready(cx)
}
fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Pin::new(&mut self.inner).poll_close(cx)
}
fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Pin::new(&mut self.inner).poll_flush(cx)
}
fn start_send<Item: serio::Serialize>(
mut self: Pin<&mut Self>,
item: Item,
) -> std::result::Result<(), Self::Error> {
Pin::new(&mut self.inner).start_send(item)
}
}
impl Stream for FramedIo {
type Error = Error;
fn poll_next<Item: serio::Deserialize>(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Item, Error>>> {
Pin::new(&mut self.inner).poll_next(cx)
}
}

View File

@@ -1,17 +0,0 @@
//! Messages exchanged by the native and the wasm components of the browser
//! prover.
use serde::{Deserialize, Serialize};
#[derive(Serialize, Deserialize, PartialEq)]
/// The config sent to the wasm component.
pub struct Config {
pub upload_size: usize,
pub download_size: usize,
pub defer_decryption: bool,
}
#[derive(Serialize, Deserialize, PartialEq)]
/// Sent by the wasm component when proving process is finished. Contains total
/// runtime in seconds.
pub struct Runtime(pub u64);

View File

@@ -1,22 +0,0 @@
[package]
edition = "2021"
name = "tlsn-benches-browser-native"
publish = false
version = "0.0.0"
[dependencies]
tlsn-benches-browser-core = { workspace = true }
tlsn-benches-library = { workspace = true }
serio = { workspace = true }
websocket-relay = { workspace = true }
anyhow = { workspace = true }
async-trait = { workspace = true }
chromiumoxide = { version = "0.6.0" , features = ["tokio-runtime"] }
futures = { workspace = true }
rust-embed = "8.5.0"
tokio = { workspace = true, features = ["rt", "io-std"] }
tracing = { workspace = true }
warp = "0.3.7"
warp-embed = "0.5.0"

View File

@@ -1,331 +0,0 @@
//! Contains the native component of the browser prover.
//!
//! Conceptually the browser prover consists of the native and the wasm
//! components. The native component is responsible for starting the browser,
//! loading the wasm component and driving it.
use std::{env, net::IpAddr};
use serio::{stream::IoStreamExt, SinkExt as _};
use tlsn_benches_browser_core::{
msg::{Config, Runtime},
FramedIo,
};
use tlsn_benches_library::{AsyncIo, ProverKind, ProverTrait};
use anyhow::{anyhow, Context, Result};
use async_trait::async_trait;
use chromiumoxide::{
cdp::{
browser_protocol::log::{EventEntryAdded, LogEntryLevel},
js_protocol::runtime::EventExceptionThrown,
},
Browser, BrowserConfig, Page,
};
use futures::{Future, FutureExt, StreamExt};
use rust_embed::RustEmbed;
use tokio::{io, io::AsyncWriteExt, net::TcpListener, task::JoinHandle};
use tracing::{debug, error, info};
use warp::Filter;
/// The IP on which the wasm component is served.
pub static DEFAULT_WASM_IP: &str = "127.0.0.1";
/// The IP of the websocket relay.
pub static DEFAULT_WS_IP: &str = "127.0.0.1";
/// The port on which the wasm component is served.
pub static DEFAULT_WASM_PORT: u16 = 9001;
/// The port of the websocket relay.
pub static DEFAULT_WS_PORT: u16 = 9002;
/// The port for the wasm component to communicate with the TLS server.
pub static DEFAULT_WASM_TO_SERVER_PORT: u16 = 9003;
/// The port for the wasm component to communicate with the verifier.
pub static DEFAULT_WASM_TO_VERIFIER_PORT: u16 = 9004;
/// The port for the wasm component to communicate with the native component.
pub static DEFAULT_WASM_TO_NATIVE_PORT: u16 = 9005;
// The `pkg` dir will be embedded into the binary at compile-time.
#[derive(RustEmbed)]
#[folder = "../wasm/pkg"]
struct Data;
/// The native component of the prover which runs in the browser.
pub struct BrowserProver {
/// Io for communication with the wasm component.
wasm_io: FramedIo,
/// The browser spawned by the prover.
browser: Browser,
/// A handle to the http server.
http_server: JoinHandle<()>,
/// Handles to the relays.
relays: Vec<JoinHandle<Result<(), anyhow::Error>>>,
}
#[async_trait]
impl ProverTrait for BrowserProver {
async fn setup(
upload_size: usize,
download_size: usize,
defer_decryption: bool,
verifier_io: Box<dyn AsyncIo>,
server_io: Box<dyn AsyncIo>,
) -> anyhow::Result<Self>
where
Self: Sized,
{
let wasm_port: u16 = env::var("WASM_PORT")
.map(|port| port.parse().expect("port should be valid integer"))
.unwrap_or(DEFAULT_WASM_PORT);
let ws_port: u16 = env::var("WS_PORT")
.map(|port| port.parse().expect("port should be valid integer"))
.unwrap_or(DEFAULT_WS_PORT);
let wasm_to_server_port: u16 = env::var("WASM_TO_SERVER_PORT")
.map(|port| port.parse().expect("port should be valid integer"))
.unwrap_or(DEFAULT_WASM_TO_SERVER_PORT);
let wasm_to_verifier_port: u16 = env::var("WASM_TO_VERIFIER_PORT")
.map(|port| port.parse().expect("port should be valid integer"))
.unwrap_or(DEFAULT_WASM_TO_VERIFIER_PORT);
let wasm_to_native_port: u16 = env::var("WASM_TO_NATIVE_PORT")
.map(|port| port.parse().expect("port should be valid integer"))
.unwrap_or(DEFAULT_WASM_TO_NATIVE_PORT);
let wasm_ip: IpAddr = env::var("WASM_IP")
.map(|addr| addr.parse().expect("should be valid IP address"))
.unwrap_or(IpAddr::V4(DEFAULT_WASM_IP.parse().unwrap()));
let ws_ip: IpAddr = env::var("WS_IP")
.map(|addr| addr.parse().expect("should be valid IP address"))
.unwrap_or(IpAddr::V4(DEFAULT_WS_IP.parse().unwrap()));
let mut relays = Vec::with_capacity(4);
relays.push(spawn_websocket_relay(ws_ip, ws_port).await?);
let http_server = spawn_http_server(wasm_ip, wasm_port)?;
// Relay data from the wasm component to the server.
relays.push(spawn_port_relay(wasm_to_server_port, server_io).await?);
// Relay data from the wasm component to the verifier.
relays.push(spawn_port_relay(wasm_to_verifier_port, verifier_io).await?);
// Create a framed connection to the wasm component.
let (wasm_left, wasm_right) = tokio::io::duplex(1 << 16);
relays.push(spawn_port_relay(wasm_to_native_port, Box::new(wasm_right)).await?);
let mut wasm_io = FramedIo::new(Box::new(wasm_left));
info!("spawning browser");
// Note that the browser must be spawned only when the WebSocket relay is
// running.
let browser = spawn_browser(
wasm_ip,
ws_ip,
wasm_port,
ws_port,
wasm_to_server_port,
wasm_to_verifier_port,
wasm_to_native_port,
)
.await?;
info!("sending config to the browser component");
wasm_io
.send(Config {
upload_size,
download_size,
defer_decryption,
})
.await?;
Ok(Self {
wasm_io,
browser,
http_server,
relays,
})
}
async fn run(&mut self) -> anyhow::Result<u64> {
let runtime: Runtime = self.wasm_io.expect_next().await.unwrap();
_ = self.clean_up().await?;
Ok(runtime.0)
}
fn kind(&self) -> ProverKind {
ProverKind::Browser
}
}
impl BrowserProver {
async fn clean_up(&mut self) -> anyhow::Result<()> {
// Kill the http server.
self.http_server.abort();
// Kill all relays.
let _ = self
.relays
.iter_mut()
.map(|task| task.abort())
.collect::<Vec<_>>();
// Close the browser.
self.browser.close().await?;
self.browser.wait().await?;
Ok(())
}
}
pub async fn spawn_websocket_relay(
ip: IpAddr,
port: u16,
) -> anyhow::Result<JoinHandle<Result<(), anyhow::Error>>> {
let listener = TcpListener::bind((ip, port)).await?;
Ok(tokio::spawn(websocket_relay::run(listener)))
}
/// Binds to the given localhost `port`, accepts a connection and relays data
/// between the connection and the `channel`.
pub async fn spawn_port_relay(
port: u16,
channel: Box<dyn AsyncIo>,
) -> anyhow::Result<JoinHandle<Result<(), anyhow::Error>>> {
let listener = tokio::net::TcpListener::bind(("127.0.0.1", port))
.await
.context("failed to bind to port")?;
let handle = tokio::spawn(async move {
let (tcp, _) = listener
.accept()
.await
.context("failed to accept a connection")
.unwrap();
relay_data(Box::new(tcp), channel).await
});
Ok(handle)
}
/// Relays data between two sources.
pub async fn relay_data(left: Box<dyn AsyncIo>, right: Box<dyn AsyncIo>) -> Result<()> {
let (mut left_read, mut left_write) = io::split(left);
let (mut right_read, mut right_write) = io::split(right);
let left_to_right = async {
io::copy(&mut left_read, &mut right_write).await?;
right_write.shutdown().await
};
let right_to_left = async {
io::copy(&mut right_read, &mut left_write).await?;
left_write.shutdown().await
};
tokio::try_join!(left_to_right, right_to_left)?;
Ok(())
}
/// Spawns the browser and starts the wasm component.
async fn spawn_browser(
wasm_ip: IpAddr,
ws_ip: IpAddr,
wasm_port: u16,
ws_port: u16,
wasm_to_server_port: u16,
wasm_to_verifier_port: u16,
wasm_to_native_port: u16,
) -> anyhow::Result<Browser> {
// Chrome requires --no-sandbox when running as root.
let config = BrowserConfig::builder()
.no_sandbox()
.incognito()
.build()
.map_err(|s| anyhow!(s))?;
debug!("launching chromedriver");
let (browser, mut handler) = Browser::launch(config).await?;
debug!("chromedriver started");
tokio::spawn(async move {
while let Some(res) = handler.next().await {
res.unwrap();
}
});
let page = browser
.new_page(&format!("http://{}:{}/index.html", wasm_ip, wasm_port))
.await?;
tokio::spawn(register_listeners(&page).await?);
page.wait_for_navigation().await?;
// Note that `format!` needs double {{ }} in order to escape them.
let _ = page
.evaluate_function(&format!(
r#"
async function() {{
await window.worker.init();
// Do not `await` run() or else it will block the browser.
window.worker.run("{}", {}, {}, {}, {});
}}
"#,
ws_ip, ws_port, wasm_to_server_port, wasm_to_verifier_port, wasm_to_native_port
))
.await?;
Ok(browser)
}
pub fn spawn_http_server(ip: IpAddr, port: u16) -> anyhow::Result<JoinHandle<()>> {
let handle = tokio::spawn(async move {
// Serve embedded files with additional headers.
let data_serve = warp_embed::embed(&Data);
let data_serve_with_headers = data_serve
.map(|reply| {
warp::reply::with_header(reply, "Cross-Origin-Opener-Policy", "same-origin")
})
.map(|reply| {
warp::reply::with_header(reply, "Cross-Origin-Embedder-Policy", "require-corp")
});
warp::serve(data_serve_with_headers).run((ip, port)).await;
});
Ok(handle)
}
async fn register_listeners(page: &Page) -> Result<impl Future<Output = ()>> {
let mut logs = page.event_listener::<EventEntryAdded>().await?.fuse();
let mut exceptions = page.event_listener::<EventExceptionThrown>().await?.fuse();
Ok(futures::future::join(
async move {
while let Some(event) = logs.next().await {
let entry = &event.entry;
match entry.level {
LogEntryLevel::Error => {
error!("{:?}", entry);
}
_ => {
debug!("{:?}: {}", entry.timestamp, entry.text);
}
}
}
},
async move {
while let Some(event) = exceptions.next().await {
error!("{:?}", event);
}
},
)
.map(|_| ()))
}

View File

@@ -1,11 +0,0 @@
[build]
target = "wasm32-unknown-unknown"
[target.wasm32-unknown-unknown]
rustflags = [
"-C",
"target-feature=+atomics,+bulk-memory,+mutable-globals",
]
[unstable]
build-std = ["panic_abort", "std"]

View File

@@ -1,31 +0,0 @@
[package]
edition = "2021"
name = "tlsn-benches-browser-wasm"
publish = false
version = "0.0.0"
[lib]
crate-type = ["cdylib", "rlib"]
[dependencies]
tlsn-benches-browser-core = { workspace = true }
tlsn-benches-library = { workspace = true }
tlsn-wasm = { path = "../../../wasm" }
serio = { workspace = true }
anyhow = { workspace = true }
tracing = { workspace = true }
wasm-bindgen = { version = "0.2.87" }
wasm-bindgen-futures = { version = "0.4.37" }
web-time = { workspace = true }
# Use the patched ws_stream_wasm to fix the issue https://github.com/najamelan/ws_stream_wasm/issues/12#issuecomment-1711902958
ws_stream_wasm = { version = "0.7.4", git = "https://github.com/tlsnotary/ws_stream_wasm", rev = "2ed12aad9f0236e5321f577672f309920b2aef51", features = ["tokio_io"]}
[target.'cfg(target_arch = "wasm32")'.dependencies]
wasm-bindgen-rayon = { version = "1.2", features = ["no-bundler"] }
[package.metadata.wasm-pack.profile.release]
# Note: these wasm-pack options should match those in crates/wasm/Cargo.toml
opt-level = "z"
wasm-opt = true

View File

@@ -1,7 +0,0 @@
<!DOCTYPE html>
<head>
</head>
<body>
<script src="index.js" type="module"></script>
</body>
</html>

View File

@@ -1,7 +0,0 @@
import * as Comlink from "./comlink.mjs";
async function init() {
const worker = Comlink.wrap(new Worker("worker.js", { type: "module" }));
window.worker = worker;
}
init();

View File

@@ -1,45 +0,0 @@
import * as Comlink from "./comlink.mjs";
import init, { wasm_main, initThreadPool, init_logging } from './tlsn_benches_browser_wasm.js';
class Worker {
async init() {
try {
await init();
// Tracing may interfere with the benchmark results. We should enable it only for debugging.
// init_logging({
// level: 'Debug',
// crate_filters: undefined,
// span_events: undefined,
// });
await initThreadPool(navigator.hardwareConcurrency);
} catch (e) {
console.error(e);
throw e;
}
}
async run(
ws_ip,
ws_port,
wasm_to_server_port,
wasm_to_verifier_port,
wasm_to_native_port
) {
try {
await wasm_main(
ws_ip,
ws_port,
wasm_to_server_port,
wasm_to_verifier_port,
wasm_to_native_port);
} catch (e) {
console.error(e);
throw e;
}
}
}
const worker = new Worker();
Comlink.expose(worker);

View File

@@ -1,2 +0,0 @@
[toolchain]
channel = "nightly"

View File

@@ -1,103 +0,0 @@
//! Contains the wasm component of the browser prover.
//!
//! Conceptually the browser prover consists of the native and the wasm
//! components.
use serio::{stream::IoStreamExt, SinkExt as _};
use tlsn_benches_browser_core::{
msg::{Config, Runtime},
FramedIo,
};
use tlsn_benches_library::run_prover;
pub use tlsn_wasm::init_logging;
use anyhow::Result;
use tracing::info;
use wasm_bindgen::prelude::*;
#[cfg(target_arch = "wasm32")]
pub use wasm_bindgen_rayon::init_thread_pool;
use web_time::Instant;
use ws_stream_wasm::WsMeta;
#[wasm_bindgen]
pub async fn wasm_main(
ws_ip: String,
ws_port: u16,
wasm_to_server_port: u16,
wasm_to_verifier_port: u16,
wasm_to_native_port: u16,
) -> Result<(), JsError> {
// Wrapping main() since wasm_bindgen doesn't support anyhow.
main(
ws_ip,
ws_port,
wasm_to_server_port,
wasm_to_verifier_port,
wasm_to_native_port,
)
.await
.map_err(|err| JsError::new(&err.to_string()))
}
pub async fn main(
ws_ip: String,
ws_port: u16,
wasm_to_server_port: u16,
wasm_to_verifier_port: u16,
wasm_to_native_port: u16,
) -> Result<()> {
info!("starting main");
// Connect to the server.
let (_, server_io_ws) = WsMeta::connect(
&format!(
"ws://{}:{}/tcp?addr=localhost%3A{}",
ws_ip, ws_port, wasm_to_server_port
),
None,
)
.await?;
let server_io = server_io_ws.into_io();
// Connect to the verifier.
let (_, verifier_io_ws) = WsMeta::connect(
&format!(
"ws://{}:{}/tcp?addr=localhost%3A{}",
ws_ip, ws_port, wasm_to_verifier_port
),
None,
)
.await?;
let verifier_io = verifier_io_ws.into_io();
// Connect to the native component of the browser prover.
let (_, native_io_ws) = WsMeta::connect(
&format!(
"ws://{}:{}/tcp?addr=localhost%3A{}",
ws_ip, ws_port, wasm_to_native_port
),
None,
)
.await?;
let mut native_io = FramedIo::new(Box::new(native_io_ws.into_io()));
info!("expecting config from the native component");
let cfg: Config = native_io.expect_next().await?;
let start_time = Instant::now();
run_prover(
cfg.upload_size,
cfg.download_size,
cfg.defer_decryption,
Box::new(verifier_io),
Box::new(server_io),
)
.await?;
native_io
.send(Runtime(start_time.elapsed().as_secs()))
.await?;
Ok(())
}

View File

@@ -1,19 +0,0 @@
[package]
edition = "2021"
name = "tlsn-benches-library"
publish = false
version = "0.0.0"
[dependencies]
tlsn-common = { workspace = true }
tlsn-core = { workspace = true }
tlsn-prover = { workspace = true }
tlsn-server-fixture-certs = { workspace = true }
tlsn-tls-core = { workspace = true }
anyhow = "1.0"
async-trait = "0.1.81"
futures = { version = "0.3", features = ["compat"] }
serde = { workspace = true }
tokio = {version = "1", default-features = false, features = ["rt", "macros"]}
tokio-util= {version = "0.7", features = ["compat", "io"]}

View File

@@ -1,131 +0,0 @@
use tls_core::{anchors::RootCertStore, verify::WebPkiVerifier};
use tlsn_common::config::ProtocolConfig;
use tlsn_core::{transcript::Idx, CryptoProvider};
use tlsn_prover::{Prover, ProverConfig};
use tlsn_server_fixture_certs::{CA_CERT_DER, SERVER_DOMAIN};
use anyhow::Context;
use async_trait::async_trait;
use futures::{future::join, AsyncReadExt as _, AsyncWriteExt as _};
use serde::{Deserialize, Serialize};
use tokio::io::{AsyncRead, AsyncWrite};
use tokio_util::compat::TokioAsyncReadCompatExt;
pub trait AsyncIo: AsyncRead + AsyncWrite + Send + Sync + Unpin + 'static {}
impl<T> AsyncIo for T where T: AsyncRead + AsyncWrite + Send + Sync + Unpin + 'static {}
#[async_trait]
pub trait ProverTrait {
/// Sets up the prover preparing it to be run. Returns a prover ready to be
/// run.
async fn setup(
upload_size: usize,
download_size: usize,
defer_decryption: bool,
verifier_io: Box<dyn AsyncIo>,
server_io: Box<dyn AsyncIo>,
) -> anyhow::Result<Self>
where
Self: Sized;
/// Runs the prover. Returns the total run time in seconds.
async fn run(&mut self) -> anyhow::Result<u64>;
/// Returns the kind of the prover.
fn kind(&self) -> ProverKind;
}
#[derive(Debug, Clone, Serialize, Deserialize)]
/// The kind of a prover.
pub enum ProverKind {
/// The prover compiled into a native binary.
Native,
/// The prover compiled into a wasm binary.
Browser,
}
impl From<ProverKind> for String {
fn from(value: ProverKind) -> Self {
match value {
ProverKind::Native => "Native".to_string(),
ProverKind::Browser => "Browser".to_string(),
}
}
}
pub async fn run_prover(
upload_size: usize,
download_size: usize,
defer_decryption: bool,
io: Box<dyn AsyncIo>,
client_conn: Box<dyn AsyncIo>,
) -> anyhow::Result<()> {
let provider = CryptoProvider {
cert: WebPkiVerifier::new(root_store(), None),
..Default::default()
};
let protocol_config = if defer_decryption {
ProtocolConfig::builder()
.max_sent_data(upload_size + 256)
.max_recv_data(download_size + 256)
.build()
.unwrap()
} else {
ProtocolConfig::builder()
.max_sent_data(upload_size + 256)
.max_recv_data(download_size + 256)
.max_recv_data_online(download_size + 256)
.build()
.unwrap()
};
let prover = Prover::new(
ProverConfig::builder()
.server_name(SERVER_DOMAIN)
.protocol_config(protocol_config)
.defer_decryption_from_start(defer_decryption)
.crypto_provider(provider)
.build()
.context("invalid prover config")?,
)
.setup(io.compat())
.await?;
let (mut mpc_tls_connection, prover_fut) = prover.connect(client_conn.compat()).await?;
let tls_fut = async move {
let request = format!(
"GET /bytes?size={} HTTP/1.1\r\nConnection: close\r\nData: {}\r\n\r\n",
download_size,
String::from_utf8(vec![0x42u8; upload_size]).unwrap(),
);
mpc_tls_connection.write_all(request.as_bytes()).await?;
mpc_tls_connection.close().await?;
let mut response = vec![];
mpc_tls_connection.read_to_end(&mut response).await?;
Ok::<(), anyhow::Error>(())
};
let (prover_task, _) = join(prover_fut, tls_fut).await;
let mut prover = prover_task?.start_prove();
let (sent_len, recv_len) = prover.transcript().len();
prover
.prove_transcript(Idx::new(0..sent_len), Idx::new(0..recv_len))
.await?;
prover.finalize().await?;
Ok(())
}
fn root_store() -> RootCertStore {
let mut root_store = RootCertStore::empty();
root_store
.add(&tls_core::key::Certificate(CA_CERT_DER.to_vec()))
.unwrap();
root_store
}

View File

@@ -1,27 +0,0 @@
[package]
name = "tlsn-common"
description = "Common code shared between tlsn-prover and tlsn-verifier"
version = "0.1.0-alpha.8-pre"
edition = "2021"
[features]
default = []
[dependencies]
tlsn-core = { workspace = true }
mpz-common = { workspace = true }
mpz-garble = { workspace = true }
mpz-ot = { workspace = true }
derive_builder = { workspace = true }
futures = { workspace = true }
once_cell = { workspace = true }
serio = { workspace = true, features = ["codec", "bincode"] }
thiserror = { workspace = true }
tracing = { workspace = true }
uid-mux = { workspace = true, features = ["serio"] }
serde = { workspace = true, features = ["derive"] }
semver = { version = "1.0", features = ["serde"] }
[dev-dependencies]
rstest = { workspace = true }

View File

@@ -1,40 +0,0 @@
//! Common code shared between `tlsn-prover` and `tlsn-verifier`.
#![deny(missing_docs, unreachable_pub, unused_must_use)]
#![deny(clippy::all)]
#![forbid(unsafe_code)]
pub mod config;
pub mod msg;
pub mod mux;
use serio::codec::Codec;
use crate::mux::MuxControl;
/// IO type.
pub type Io = <serio::codec::Bincode as Codec<uid_mux::yamux::Stream>>::Framed;
/// Base OT sender.
pub type BaseOTSender = mpz_ot::chou_orlandi::Sender;
/// Base OT receiver.
pub type BaseOTReceiver = mpz_ot::chou_orlandi::Receiver;
/// OT sender.
pub type OTSender = mpz_ot::kos::SharedSender<BaseOTReceiver>;
/// OT receiver.
pub type OTReceiver = mpz_ot::kos::SharedReceiver<BaseOTSender>;
/// MPC executor.
pub type Executor = mpz_common::executor::MTExecutor<MuxControl>;
/// MPC thread context.
pub type Context = mpz_common::executor::MTContext<MuxControl, Io>;
/// DEAP thread.
pub type DEAPThread = mpz_garble::protocol::deap::DEAPThread<Context, OTSender, OTReceiver>;
/// The party's role in the TLSN protocol.
///
/// A Notary is classified as a Verifier.
pub enum Role {
/// The prover.
Prover,
/// The verifier.
Verifier,
}

View File

@@ -1,41 +0,0 @@
[package]
name = "tlsn-aead"
authors = ["TLSNotary Team"]
description = "This crate provides an implementation of a two-party version of AES-GCM behind an AEAD trait"
keywords = ["tls", "mpc", "2pc", "aead", "aes", "aes-gcm"]
categories = ["cryptography"]
license = "MIT OR Apache-2.0"
version = "0.1.0-alpha.8-pre"
edition = "2021"
[lib]
name = "aead"
[features]
default = ["mock"]
mock = ["mpz-common/test-utils", "dep:mpz-ot"]
[dependencies]
tlsn-block-cipher = { workspace = true }
tlsn-stream-cipher = { workspace = true }
tlsn-universal-hash = { workspace = true }
mpz-common = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "b8ae7ac" }
mpz-core = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "b8ae7ac" }
mpz-garble = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "b8ae7ac" }
mpz-ot = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "b8ae7ac", optional = true, features = [
"ideal",
] }
serio = { workspace = true }
async-trait = { workspace = true }
derive_builder = { workspace = true }
futures = { workspace = true }
serde = { workspace = true }
thiserror = { workspace = true }
tracing = { workspace = true }
[dev-dependencies]
tokio = { version = "1", features = ["macros", "rt", "rt-multi-thread"] }
aes-gcm = { workspace = true }

View File

@@ -1,36 +0,0 @@
use derive_builder::Builder;
/// Protocol role.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
#[allow(missing_docs)]
pub enum Role {
Leader,
Follower,
}
/// Configuration for AES-GCM.
#[derive(Debug, Clone, Builder)]
pub struct AesGcmConfig {
/// The id of this instance.
#[builder(setter(into))]
id: String,
/// The protocol role.
role: Role,
}
impl AesGcmConfig {
/// Creates a new builder for the AES-GCM configuration.
pub fn builder() -> AesGcmConfigBuilder {
AesGcmConfigBuilder::default()
}
/// Returns the id of this instance.
pub fn id(&self) -> &str {
&self.id
}
/// Returns the protocol role.
pub fn role(&self) -> &Role {
&self.role
}
}

View File

@@ -1,102 +0,0 @@
use std::fmt::Display;
/// AES-GCM error.
#[derive(Debug, thiserror::Error)]
pub struct AesGcmError {
kind: ErrorKind,
#[source]
source: Option<Box<dyn std::error::Error + Send + Sync>>,
}
impl AesGcmError {
pub(crate) fn new<E>(kind: ErrorKind, source: E) -> Self
where
E: Into<Box<dyn std::error::Error + Send + Sync>>,
{
Self {
kind,
source: Some(source.into()),
}
}
#[cfg(test)]
pub(crate) fn kind(&self) -> ErrorKind {
self.kind
}
pub(crate) fn invalid_tag() -> Self {
Self {
kind: ErrorKind::Tag,
source: None,
}
}
pub(crate) fn peer(reason: impl Into<String>) -> Self {
Self {
kind: ErrorKind::PeerMisbehaved,
source: Some(reason.into().into()),
}
}
pub(crate) fn payload(reason: impl Into<String>) -> Self {
Self {
kind: ErrorKind::Payload,
source: Some(reason.into().into()),
}
}
}
#[derive(Debug, Clone, Copy, PartialEq)]
pub(crate) enum ErrorKind {
Io,
BlockCipher,
StreamCipher,
Ghash,
Tag,
PeerMisbehaved,
Payload,
}
impl Display for AesGcmError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self.kind {
ErrorKind::Io => write!(f, "io error")?,
ErrorKind::BlockCipher => write!(f, "block cipher error")?,
ErrorKind::StreamCipher => write!(f, "stream cipher error")?,
ErrorKind::Ghash => write!(f, "ghash error")?,
ErrorKind::Tag => write!(f, "payload has corrupted tag")?,
ErrorKind::PeerMisbehaved => write!(f, "peer misbehaved")?,
ErrorKind::Payload => write!(f, "payload error")?,
}
if let Some(source) = &self.source {
write!(f, " caused by: {}", source)?;
}
Ok(())
}
}
impl From<std::io::Error> for AesGcmError {
fn from(err: std::io::Error) -> Self {
Self::new(ErrorKind::Io, err)
}
}
impl From<block_cipher::BlockCipherError> for AesGcmError {
fn from(err: block_cipher::BlockCipherError) -> Self {
Self::new(ErrorKind::BlockCipher, err)
}
}
impl From<tlsn_stream_cipher::StreamCipherError> for AesGcmError {
fn from(err: tlsn_stream_cipher::StreamCipherError) -> Self {
Self::new(ErrorKind::StreamCipher, err)
}
}
impl From<tlsn_universal_hash::UniversalHashError> for AesGcmError {
fn from(err: tlsn_universal_hash::UniversalHashError) -> Self {
Self::new(ErrorKind::Ghash, err)
}
}

View File

@@ -1,96 +0,0 @@
//! Mock implementation of AES-GCM for testing purposes.
use block_cipher::{BlockCipherConfig, MpcBlockCipher};
use mpz_common::executor::{test_st_executor, STExecutor};
use mpz_garble::protocol::deap::mock::{MockFollower, MockLeader};
use mpz_ot::ideal::ot::ideal_ot;
use serio::channel::MemoryDuplex;
use tlsn_stream_cipher::{MpcStreamCipher, StreamCipherConfig};
use tlsn_universal_hash::ghash::ideal_ghash;
use super::*;
/// Creates a mock AES-GCM pair.
///
/// # Arguments
///
/// * `id` - The id of the AES-GCM instances.
/// * `(leader, follower)` - The leader and follower vms.
/// * `leader_config` - The configuration of the leader.
/// * `follower_config` - The configuration of the follower.
pub async fn create_mock_aes_gcm_pair(
id: &str,
(leader, follower): (MockLeader, MockFollower),
leader_config: AesGcmConfig,
follower_config: AesGcmConfig,
) -> (
MpcAesGcm<STExecutor<MemoryDuplex>>,
MpcAesGcm<STExecutor<MemoryDuplex>>,
) {
let block_cipher_id = format!("{}/block_cipher", id);
let (ctx_leader, ctx_follower) = test_st_executor(128);
let (leader_ot_send, follower_ot_recv) = ideal_ot();
let (follower_ot_send, leader_ot_recv) = ideal_ot();
let block_leader = leader
.new_thread(ctx_leader, leader_ot_send, leader_ot_recv)
.unwrap();
let block_follower = follower
.new_thread(ctx_follower, follower_ot_send, follower_ot_recv)
.unwrap();
let leader_block_cipher = MpcBlockCipher::new(
BlockCipherConfig::builder()
.id(block_cipher_id.clone())
.build()
.unwrap(),
block_leader,
);
let follower_block_cipher = MpcBlockCipher::new(
BlockCipherConfig::builder()
.id(block_cipher_id.clone())
.build()
.unwrap(),
block_follower,
);
let stream_cipher_id = format!("{}/stream_cipher", id);
let leader_stream_cipher = MpcStreamCipher::new(
StreamCipherConfig::builder()
.id(stream_cipher_id.clone())
.build()
.unwrap(),
leader,
);
let follower_stream_cipher = MpcStreamCipher::new(
StreamCipherConfig::builder()
.id(stream_cipher_id.clone())
.build()
.unwrap(),
follower,
);
let (ctx_a, ctx_b) = test_st_executor(128);
let (leader_ghash, follower_ghash) = ideal_ghash(ctx_a, ctx_b);
let (ctx_a, ctx_b) = test_st_executor(128);
let leader = MpcAesGcm::new(
leader_config,
ctx_a,
Box::new(leader_block_cipher),
Box::new(leader_stream_cipher),
Box::new(leader_ghash),
);
let follower = MpcAesGcm::new(
follower_config,
ctx_b,
Box::new(follower_block_cipher),
Box::new(follower_stream_cipher),
Box::new(follower_ghash),
);
(leader, follower)
}

View File

@@ -1,712 +0,0 @@
//! This module provides an implementation of 2PC AES-GCM.
mod config;
mod error;
#[cfg(feature = "mock")]
pub mod mock;
mod tag;
pub use config::{AesGcmConfig, AesGcmConfigBuilder, AesGcmConfigBuilderError, Role};
pub use error::AesGcmError;
use async_trait::async_trait;
use block_cipher::{Aes128, BlockCipher};
use futures::TryFutureExt;
use mpz_common::Context;
use mpz_garble::value::ValueRef;
use tlsn_stream_cipher::{Aes128Ctr, StreamCipher};
use tlsn_universal_hash::UniversalHash;
use tracing::instrument;
use crate::{
aes_gcm::tag::{compute_tag, verify_tag, TAG_LEN},
Aead,
};
/// MPC AES-GCM.
pub struct MpcAesGcm<Ctx> {
config: AesGcmConfig,
ctx: Ctx,
aes_block: Box<dyn BlockCipher<Aes128>>,
aes_ctr: Box<dyn StreamCipher<Aes128Ctr>>,
ghash: Box<dyn UniversalHash>,
}
impl<Ctx> std::fmt::Debug for MpcAesGcm<Ctx> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("MpcAesGcm")
.field("config", &self.config)
.finish()
}
}
impl<Ctx: Context> MpcAesGcm<Ctx> {
/// Creates a new instance of [`MpcAesGcm`].
pub fn new(
config: AesGcmConfig,
context: Ctx,
aes_block: Box<dyn BlockCipher<Aes128>>,
aes_ctr: Box<dyn StreamCipher<Aes128Ctr>>,
ghash: Box<dyn UniversalHash>,
) -> Self {
Self {
config,
ctx: context,
aes_block,
aes_ctr,
ghash,
}
}
}
#[async_trait]
impl<Ctx: Context> Aead for MpcAesGcm<Ctx> {
type Error = AesGcmError;
#[instrument(level = "info", skip_all, err)]
async fn set_key(&mut self, key: ValueRef, iv: ValueRef) -> Result<(), AesGcmError> {
self.aes_block.set_key(key.clone());
self.aes_ctr.set_key(key, iv);
Ok(())
}
#[instrument(level = "info", skip_all, err)]
async fn decode_key_private(&mut self) -> Result<(), AesGcmError> {
self.aes_ctr
.decode_key_private()
.await
.map_err(AesGcmError::from)
}
#[instrument(level = "info", skip_all, err)]
async fn decode_key_blind(&mut self) -> Result<(), AesGcmError> {
self.aes_ctr
.decode_key_blind()
.await
.map_err(AesGcmError::from)
}
fn set_transcript_id(&mut self, id: &str) {
self.aes_ctr.set_transcript_id(id)
}
#[instrument(level = "debug", skip(self), err)]
async fn setup(&mut self) -> Result<(), AesGcmError> {
self.ghash.setup().await?;
Ok(())
}
#[instrument(level = "debug", skip(self), err)]
async fn preprocess(&mut self, len: usize) -> Result<(), AesGcmError> {
futures::try_join!(
// Preprocess the GHASH key block.
self.aes_block
.preprocess(block_cipher::Visibility::Public, 1)
.map_err(AesGcmError::from),
self.aes_ctr.preprocess(len).map_err(AesGcmError::from),
self.ghash.preprocess().map_err(AesGcmError::from),
)?;
Ok(())
}
#[instrument(level = "debug", skip_all, err)]
async fn start(&mut self) -> Result<(), AesGcmError> {
let h_share = self.aes_block.encrypt_share(vec![0u8; 16]).await?;
self.ghash.set_key(h_share).await?;
Ok(())
}
#[instrument(level = "debug", skip_all, err)]
async fn encrypt_public(
&mut self,
explicit_nonce: Vec<u8>,
plaintext: Vec<u8>,
aad: Vec<u8>,
) -> Result<Vec<u8>, AesGcmError> {
let ciphertext = self
.aes_ctr
.encrypt_public(explicit_nonce.clone(), plaintext)
.await?;
let tag = compute_tag(
&mut self.ctx,
self.aes_ctr.as_mut(),
self.ghash.as_mut(),
explicit_nonce,
ciphertext.clone(),
aad,
)
.await?;
let mut payload = ciphertext;
payload.extend(tag);
Ok(payload)
}
#[instrument(level = "debug", skip_all, err)]
async fn encrypt_private(
&mut self,
explicit_nonce: Vec<u8>,
plaintext: Vec<u8>,
aad: Vec<u8>,
) -> Result<Vec<u8>, AesGcmError> {
let ciphertext = self
.aes_ctr
.encrypt_private(explicit_nonce.clone(), plaintext)
.await?;
let tag = compute_tag(
&mut self.ctx,
self.aes_ctr.as_mut(),
self.ghash.as_mut(),
explicit_nonce,
ciphertext.clone(),
aad,
)
.await?;
let mut payload = ciphertext;
payload.extend(tag);
Ok(payload)
}
#[instrument(level = "debug", skip_all, err)]
async fn encrypt_blind(
&mut self,
explicit_nonce: Vec<u8>,
plaintext_len: usize,
aad: Vec<u8>,
) -> Result<Vec<u8>, AesGcmError> {
let ciphertext = self
.aes_ctr
.encrypt_blind(explicit_nonce.clone(), plaintext_len)
.await?;
let tag = compute_tag(
&mut self.ctx,
self.aes_ctr.as_mut(),
self.ghash.as_mut(),
explicit_nonce,
ciphertext.clone(),
aad,
)
.await?;
let mut payload = ciphertext;
payload.extend(tag);
Ok(payload)
}
#[instrument(level = "debug", skip_all, err)]
async fn decrypt_public(
&mut self,
explicit_nonce: Vec<u8>,
mut payload: Vec<u8>,
aad: Vec<u8>,
) -> Result<Vec<u8>, AesGcmError> {
let purported_tag: [u8; TAG_LEN] = payload
.split_off(payload.len() - TAG_LEN)
.try_into()
.map_err(|_| AesGcmError::payload("payload is not long enough to contain tag"))?;
let ciphertext = payload;
verify_tag(
&mut self.ctx,
self.aes_ctr.as_mut(),
self.ghash.as_mut(),
*self.config.role(),
explicit_nonce.clone(),
ciphertext.clone(),
aad,
purported_tag,
)
.await?;
let plaintext = self
.aes_ctr
.decrypt_public(explicit_nonce, ciphertext)
.await?;
Ok(plaintext)
}
#[instrument(level = "debug", skip_all, err)]
async fn decrypt_private(
&mut self,
explicit_nonce: Vec<u8>,
mut payload: Vec<u8>,
aad: Vec<u8>,
) -> Result<Vec<u8>, AesGcmError> {
let purported_tag: [u8; TAG_LEN] = payload
.split_off(payload.len() - TAG_LEN)
.try_into()
.map_err(|_| AesGcmError::payload("payload is not long enough to contain tag"))?;
let ciphertext = payload;
verify_tag(
&mut self.ctx,
self.aes_ctr.as_mut(),
self.ghash.as_mut(),
*self.config.role(),
explicit_nonce.clone(),
ciphertext.clone(),
aad,
purported_tag,
)
.await?;
let plaintext = self
.aes_ctr
.decrypt_private(explicit_nonce, ciphertext)
.await?;
Ok(plaintext)
}
#[instrument(level = "debug", skip_all, err)]
async fn decrypt_blind(
&mut self,
explicit_nonce: Vec<u8>,
mut payload: Vec<u8>,
aad: Vec<u8>,
) -> Result<(), AesGcmError> {
let purported_tag: [u8; TAG_LEN] = payload
.split_off(payload.len() - TAG_LEN)
.try_into()
.map_err(|_| AesGcmError::payload("payload is not long enough to contain tag"))?;
let ciphertext = payload;
verify_tag(
&mut self.ctx,
self.aes_ctr.as_mut(),
self.ghash.as_mut(),
*self.config.role(),
explicit_nonce.clone(),
ciphertext.clone(),
aad,
purported_tag,
)
.await?;
self.aes_ctr
.decrypt_blind(explicit_nonce, ciphertext)
.await?;
Ok(())
}
#[instrument(level = "debug", skip_all, err)]
async fn verify_tag(
&mut self,
explicit_nonce: Vec<u8>,
mut payload: Vec<u8>,
aad: Vec<u8>,
) -> Result<(), AesGcmError> {
let purported_tag: [u8; TAG_LEN] = payload
.split_off(payload.len() - TAG_LEN)
.try_into()
.map_err(|_| AesGcmError::payload("payload is not long enough to contain tag"))?;
let ciphertext = payload;
verify_tag(
&mut self.ctx,
self.aes_ctr.as_mut(),
self.ghash.as_mut(),
*self.config.role(),
explicit_nonce,
ciphertext,
aad,
purported_tag,
)
.await?;
Ok(())
}
#[instrument(level = "debug", skip_all, err)]
async fn prove_plaintext(
&mut self,
explicit_nonce: Vec<u8>,
mut payload: Vec<u8>,
aad: Vec<u8>,
) -> Result<Vec<u8>, AesGcmError> {
let purported_tag: [u8; TAG_LEN] = payload
.split_off(payload.len() - TAG_LEN)
.try_into()
.map_err(|_| AesGcmError::payload("payload is not long enough to contain tag"))?;
let ciphertext = payload;
verify_tag(
&mut self.ctx,
self.aes_ctr.as_mut(),
self.ghash.as_mut(),
*self.config.role(),
explicit_nonce.clone(),
ciphertext.clone(),
aad,
purported_tag,
)
.await?;
let plaintext = self
.aes_ctr
.prove_plaintext(explicit_nonce, ciphertext)
.await?;
Ok(plaintext)
}
#[instrument(level = "debug", skip_all, err)]
async fn prove_plaintext_no_tag(
&mut self,
explicit_nonce: Vec<u8>,
ciphertext: Vec<u8>,
) -> Result<Vec<u8>, AesGcmError> {
self.aes_ctr
.prove_plaintext(explicit_nonce, ciphertext)
.map_err(AesGcmError::from)
.await
}
#[instrument(level = "debug", skip_all, err)]
async fn verify_plaintext(
&mut self,
explicit_nonce: Vec<u8>,
mut payload: Vec<u8>,
aad: Vec<u8>,
) -> Result<(), AesGcmError> {
let purported_tag: [u8; TAG_LEN] = payload
.split_off(payload.len() - TAG_LEN)
.try_into()
.map_err(|_| AesGcmError::payload("payload is not long enough to contain tag"))?;
let ciphertext = payload;
verify_tag(
&mut self.ctx,
self.aes_ctr.as_mut(),
self.ghash.as_mut(),
*self.config.role(),
explicit_nonce.clone(),
ciphertext.clone(),
aad,
purported_tag,
)
.await?;
self.aes_ctr
.verify_plaintext(explicit_nonce, ciphertext)
.await?;
Ok(())
}
#[instrument(level = "debug", skip_all, err)]
async fn verify_plaintext_no_tag(
&mut self,
explicit_nonce: Vec<u8>,
ciphertext: Vec<u8>,
) -> Result<(), AesGcmError> {
self.aes_ctr
.verify_plaintext(explicit_nonce, ciphertext)
.map_err(AesGcmError::from)
.await
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{
aes_gcm::{mock::create_mock_aes_gcm_pair, AesGcmConfigBuilder, Role},
Aead,
};
use ::aes_gcm::{aead::AeadInPlace, Aes128Gcm, NewAead, Nonce};
use error::ErrorKind;
use mpz_common::executor::STExecutor;
use mpz_garble::{protocol::deap::mock::create_mock_deap_vm, Memory};
use serio::channel::MemoryDuplex;
fn reference_impl(
key: &[u8],
iv: &[u8],
explicit_nonce: &[u8],
plaintext: &[u8],
aad: &[u8],
) -> Vec<u8> {
let cipher = Aes128Gcm::new_from_slice(key).unwrap();
let nonce = [iv, explicit_nonce].concat();
let nonce = Nonce::from_slice(nonce.as_slice());
let mut ciphertext = plaintext.to_vec();
cipher
.encrypt_in_place(nonce, aad, &mut ciphertext)
.unwrap();
ciphertext
}
async fn setup_pair(
key: Vec<u8>,
iv: Vec<u8>,
) -> (
MpcAesGcm<STExecutor<MemoryDuplex>>,
MpcAesGcm<STExecutor<MemoryDuplex>>,
) {
let (leader_vm, follower_vm) = create_mock_deap_vm();
let leader_key = leader_vm
.new_public_array_input::<u8>("key", key.len())
.unwrap();
let leader_iv = leader_vm
.new_public_array_input::<u8>("iv", iv.len())
.unwrap();
leader_vm.assign(&leader_key, key.clone()).unwrap();
leader_vm.assign(&leader_iv, iv.clone()).unwrap();
let follower_key = follower_vm
.new_public_array_input::<u8>("key", key.len())
.unwrap();
let follower_iv = follower_vm
.new_public_array_input::<u8>("iv", iv.len())
.unwrap();
follower_vm.assign(&follower_key, key.clone()).unwrap();
follower_vm.assign(&follower_iv, iv.clone()).unwrap();
let leader_config = AesGcmConfigBuilder::default()
.id("test".to_string())
.role(Role::Leader)
.build()
.unwrap();
let follower_config = AesGcmConfigBuilder::default()
.id("test".to_string())
.role(Role::Follower)
.build()
.unwrap();
let (mut leader, mut follower) = create_mock_aes_gcm_pair(
"test",
(leader_vm, follower_vm),
leader_config,
follower_config,
)
.await;
futures::try_join!(
leader.set_key(leader_key, leader_iv),
follower.set_key(follower_key, follower_iv)
)
.unwrap();
futures::try_join!(leader.setup(), follower.setup()).unwrap();
futures::try_join!(leader.start(), follower.start()).unwrap();
(leader, follower)
}
#[tokio::test]
#[ignore = "expensive"]
async fn test_aes_gcm_encrypt_private() {
let key = vec![0u8; 16];
let iv = vec![0u8; 4];
let explicit_nonce = vec![0u8; 8];
let plaintext = vec![1u8; 32];
let aad = vec![2u8; 12];
let (mut leader, mut follower) = setup_pair(key.clone(), iv.clone()).await;
let (leader_ciphertext, follower_ciphertext) = tokio::try_join!(
leader.encrypt_private(explicit_nonce.clone(), plaintext.clone(), aad.clone(),),
follower.encrypt_blind(explicit_nonce.clone(), plaintext.len(), aad.clone())
)
.unwrap();
assert_eq!(leader_ciphertext, follower_ciphertext);
assert_eq!(
leader_ciphertext,
reference_impl(&key, &iv, &explicit_nonce, &plaintext, &aad)
);
}
#[tokio::test]
#[ignore = "expensive"]
async fn test_aes_gcm_encrypt_public() {
let key = vec![0u8; 16];
let iv = vec![0u8; 4];
let explicit_nonce = vec![0u8; 8];
let plaintext = vec![1u8; 32];
let aad = vec![2u8; 12];
let (mut leader, mut follower) = setup_pair(key.clone(), iv.clone()).await;
let (leader_ciphertext, follower_ciphertext) = tokio::try_join!(
leader.encrypt_public(explicit_nonce.clone(), plaintext.clone(), aad.clone(),),
follower.encrypt_public(explicit_nonce.clone(), plaintext.clone(), aad.clone(),)
)
.unwrap();
assert_eq!(leader_ciphertext, follower_ciphertext);
assert_eq!(
leader_ciphertext,
reference_impl(&key, &iv, &explicit_nonce, &plaintext, &aad)
);
}
#[tokio::test]
#[ignore = "expensive"]
async fn test_aes_gcm_decrypt_private() {
let key = vec![0u8; 16];
let iv = vec![0u8; 4];
let explicit_nonce = vec![0u8; 8];
let plaintext = vec![1u8; 32];
let aad = vec![2u8; 12];
let ciphertext = reference_impl(&key, &iv, &explicit_nonce, &plaintext, &aad);
let (mut leader, mut follower) = setup_pair(key.clone(), iv.clone()).await;
let (leader_plaintext, _) = tokio::try_join!(
leader.decrypt_private(explicit_nonce.clone(), ciphertext.clone(), aad.clone(),),
follower.decrypt_blind(explicit_nonce.clone(), ciphertext, aad.clone(),)
)
.unwrap();
assert_eq!(leader_plaintext, plaintext);
}
#[tokio::test]
#[ignore = "expensive"]
async fn test_aes_gcm_decrypt_private_bad_tag() {
let key = vec![0u8; 16];
let iv = vec![0u8; 4];
let explicit_nonce = vec![0u8; 8];
let plaintext = vec![1u8; 32];
let aad = vec![2u8; 12];
let ciphertext = reference_impl(&key, &iv, &explicit_nonce, &plaintext, &aad);
let len = ciphertext.len();
// corrupt tag
let mut corrupted = ciphertext.clone();
corrupted[len - 1] -= 1;
let (mut leader, mut follower) = setup_pair(key.clone(), iv.clone()).await;
// leader receives corrupted tag
let err = tokio::try_join!(
leader.decrypt_private(explicit_nonce.clone(), corrupted.clone(), aad.clone(),),
follower.decrypt_blind(explicit_nonce.clone(), ciphertext.clone(), aad.clone(),)
)
.unwrap_err();
assert_eq!(err.kind(), ErrorKind::Tag);
let (mut leader, mut follower) = setup_pair(key.clone(), iv.clone()).await;
// follower receives corrupted tag
let err = tokio::try_join!(
leader.decrypt_private(explicit_nonce.clone(), ciphertext.clone(), aad.clone(),),
follower.decrypt_blind(explicit_nonce.clone(), corrupted.clone(), aad.clone(),)
)
.unwrap_err();
assert_eq!(err.kind(), ErrorKind::Tag);
}
#[tokio::test]
#[ignore = "expensive"]
async fn test_aes_gcm_decrypt_public() {
let key = vec![0u8; 16];
let iv = vec![0u8; 4];
let explicit_nonce = vec![0u8; 8];
let plaintext = vec![1u8; 32];
let aad = vec![2u8; 12];
let ciphertext = reference_impl(&key, &iv, &explicit_nonce, &plaintext, &aad);
let (mut leader, mut follower) = setup_pair(key.clone(), iv.clone()).await;
let (leader_plaintext, follower_plaintext) = tokio::try_join!(
leader.decrypt_public(explicit_nonce.clone(), ciphertext.clone(), aad.clone(),),
follower.decrypt_public(explicit_nonce.clone(), ciphertext, aad.clone(),)
)
.unwrap();
assert_eq!(leader_plaintext, plaintext);
assert_eq!(leader_plaintext, follower_plaintext);
}
#[tokio::test]
#[ignore = "expensive"]
async fn test_aes_gcm_decrypt_public_bad_tag() {
let key = vec![0u8; 16];
let iv = vec![0u8; 4];
let explicit_nonce = vec![0u8; 8];
let plaintext = vec![1u8; 32];
let aad = vec![2u8; 12];
let ciphertext = reference_impl(&key, &iv, &explicit_nonce, &plaintext, &aad);
let len = ciphertext.len();
// Corrupt tag.
let mut corrupted = ciphertext.clone();
corrupted[len - 1] -= 1;
let (mut leader, mut follower) = setup_pair(key.clone(), iv.clone()).await;
// Leader receives corrupted tag.
let err = tokio::try_join!(
leader.decrypt_public(explicit_nonce.clone(), corrupted.clone(), aad.clone(),),
follower.decrypt_public(explicit_nonce.clone(), ciphertext.clone(), aad.clone(),)
)
.unwrap_err();
assert_eq!(err.kind(), ErrorKind::Tag);
let (mut leader, mut follower) = setup_pair(key.clone(), iv.clone()).await;
// Follower receives corrupted tag.
let err = tokio::try_join!(
leader.decrypt_public(explicit_nonce.clone(), ciphertext.clone(), aad.clone(),),
follower.decrypt_public(explicit_nonce.clone(), corrupted.clone(), aad.clone(),)
)
.unwrap_err();
assert_eq!(err.kind(), ErrorKind::Tag);
}
#[tokio::test]
#[ignore = "expensive"]
async fn test_aes_gcm_verify_tag() {
let key = vec![0u8; 16];
let iv = vec![0u8; 4];
let explicit_nonce = vec![0u8; 8];
let plaintext = vec![1u8; 32];
let aad = vec![2u8; 12];
let ciphertext = reference_impl(&key, &iv, &explicit_nonce, &plaintext, &aad);
let len = ciphertext.len();
let (mut leader, mut follower) = setup_pair(key.clone(), iv.clone()).await;
tokio::try_join!(
leader.verify_tag(explicit_nonce.clone(), ciphertext.clone(), aad.clone()),
follower.verify_tag(explicit_nonce.clone(), ciphertext.clone(), aad.clone())
)
.unwrap();
//Corrupt tag.
let mut corrupted = ciphertext.clone();
corrupted[len - 1] -= 1;
let (leader_res, follower_res) = tokio::join!(
leader.verify_tag(explicit_nonce.clone(), corrupted.clone(), aad.clone()),
follower.verify_tag(explicit_nonce.clone(), corrupted, aad.clone())
);
assert_eq!(leader_res.unwrap_err().kind(), ErrorKind::Tag);
assert_eq!(follower_res.unwrap_err().kind(), ErrorKind::Tag);
}
}

View File

@@ -1,179 +0,0 @@
use futures::TryFutureExt;
use mpz_common::Context;
use mpz_core::{
commit::{Decommitment, HashCommit},
hash::Hash,
};
use serde::{Deserialize, Serialize};
use serio::{stream::IoStreamExt, SinkExt};
use std::ops::Add;
use tlsn_stream_cipher::{Aes128Ctr, StreamCipher};
use tlsn_universal_hash::UniversalHash;
use tracing::instrument;
use crate::aes_gcm::{AesGcmError, Role};
pub(crate) const TAG_LEN: usize = 16;
#[derive(Debug, Clone, Serialize, Deserialize)]
struct TagShare([u8; TAG_LEN]);
impl AsRef<[u8]> for TagShare {
fn as_ref(&self) -> &[u8] {
&self.0
}
}
impl Add for TagShare {
type Output = [u8; TAG_LEN];
fn add(self, rhs: Self) -> Self::Output {
core::array::from_fn(|i| self.0[i] ^ rhs.0[i])
}
}
#[instrument(level = "trace", skip_all, err)]
async fn compute_tag_share<C: StreamCipher<Aes128Ctr> + ?Sized, H: UniversalHash + ?Sized>(
aes_ctr: &mut C,
hasher: &mut H,
explicit_nonce: Vec<u8>,
ciphertext: Vec<u8>,
aad: Vec<u8>,
) -> Result<TagShare, AesGcmError> {
let (j0, hash) = futures::try_join!(
aes_ctr
.share_keystream_block(explicit_nonce, 1)
.map_err(AesGcmError::from),
hasher
.finalize(build_ghash_data(aad, ciphertext))
.map_err(AesGcmError::from)
)?;
debug_assert!(j0.len() == TAG_LEN);
debug_assert!(hash.len() == TAG_LEN);
let tag_share = core::array::from_fn(|i| j0[i] ^ hash[i]);
Ok(TagShare(tag_share))
}
/// Computes the tag for a ciphertext and additional data.
///
/// The commit-reveal step is not required for computing a tag sent to the
/// Server, as it will be able to detect if the tag is incorrect.
#[instrument(level = "debug", skip_all, err)]
pub(crate) async fn compute_tag<
Ctx: Context,
C: StreamCipher<Aes128Ctr> + ?Sized,
H: UniversalHash + ?Sized,
>(
ctx: &mut Ctx,
aes_ctr: &mut C,
hasher: &mut H,
explicit_nonce: Vec<u8>,
ciphertext: Vec<u8>,
aad: Vec<u8>,
) -> Result<[u8; TAG_LEN], AesGcmError> {
let tag_share = compute_tag_share(aes_ctr, hasher, explicit_nonce, ciphertext, aad).await?;
// TODO: The follower doesn't really need to learn the tag,
// we could reduce some latency by not sending it.
let io = ctx.io_mut();
io.send(tag_share.clone()).await?;
let other_tag_share: TagShare = io.expect_next().await?;
let tag = tag_share + other_tag_share;
Ok(tag)
}
/// Verifies a purported tag against the ciphertext and additional data.
///
/// Verifying a tag requires a commit-reveal protocol between the leader and
/// follower. Without it, the party which receives the other's tag share first
/// could trivially compute a tag share which would cause an invalid message to
/// be accepted.
#[instrument(level = "debug", skip_all, err)]
#[allow(clippy::too_many_arguments)]
pub(crate) async fn verify_tag<
Ctx: Context,
C: StreamCipher<Aes128Ctr> + ?Sized,
H: UniversalHash + ?Sized,
>(
ctx: &mut Ctx,
aes_ctr: &mut C,
hasher: &mut H,
role: Role,
explicit_nonce: Vec<u8>,
ciphertext: Vec<u8>,
aad: Vec<u8>,
purported_tag: [u8; TAG_LEN],
) -> Result<(), AesGcmError> {
let tag_share = compute_tag_share(aes_ctr, hasher, explicit_nonce, ciphertext, aad).await?;
let io = ctx.io_mut();
let tag = match role {
Role::Leader => {
// Send commitment of tag share to follower.
let (tag_share_decommitment, tag_share_commitment) = tag_share.clone().hash_commit();
io.send(tag_share_commitment).await?;
let follower_tag_share: TagShare = io.expect_next().await?;
// Send decommitment (tag share) to follower.
io.send(tag_share_decommitment).await?;
tag_share + follower_tag_share
}
Role::Follower => {
// Wait for commitment from leader.
let commitment: Hash = io.expect_next().await?;
// Send tag share to leader.
io.send(tag_share.clone()).await?;
// Expect decommitment (tag share) from leader.
let decommitment: Decommitment<TagShare> = io.expect_next().await?;
// Verify decommitment.
decommitment.verify(&commitment).map_err(|_| {
AesGcmError::peer("leader tag share commitment verification failed")
})?;
let leader_tag_share = decommitment.into_inner();
tag_share + leader_tag_share
}
};
// Reject if tag is incorrect.
if tag != purported_tag {
return Err(AesGcmError::invalid_tag());
}
Ok(())
}
/// Builds padded data for GHASH.
fn build_ghash_data(mut aad: Vec<u8>, mut ciphertext: Vec<u8>) -> Vec<u8> {
let associated_data_bitlen = (aad.len() as u64) * 8;
let text_bitlen = (ciphertext.len() as u64) * 8;
let len_block = ((associated_data_bitlen as u128) << 64) + (text_bitlen as u128);
// Pad data to be a multiple of 16 bytes.
let aad_padded_block_count = (aad.len() / 16) + (aad.len() % 16 != 0) as usize;
aad.resize(aad_padded_block_count * 16, 0);
let ciphertext_padded_block_count =
(ciphertext.len() / 16) + (ciphertext.len() % 16 != 0) as usize;
ciphertext.resize(ciphertext_padded_block_count * 16, 0);
let mut data: Vec<u8> = Vec::with_capacity(aad.len() + ciphertext.len() + 16);
data.extend(aad);
data.extend(ciphertext);
data.extend_from_slice(&len_block.to_be_bytes());
data
}

View File

@@ -1,255 +0,0 @@
//! This crate provides implementations of 2PC AEADs for authenticated
//! encryption with a shared key.
//!
//! Both parties can work together to encrypt and decrypt messages with
//! different visibility configurations. See [`Aead`] for more information on
//! the interface.
//!
//! For example, one party can privately provide the plaintext to encrypt, while
//! both parties can see the ciphertext and the tag. Or, both parties can
//! cooperate to decrypt a ciphertext and verify the tag, while only one party
//! can see the plaintext.
#![deny(missing_docs, unreachable_pub, unused_must_use)]
#![deny(clippy::all)]
#![forbid(unsafe_code)]
pub mod aes_gcm;
use async_trait::async_trait;
use mpz_garble::value::ValueRef;
/// This trait defines the interface for AEADs.
#[async_trait]
pub trait Aead: Send {
/// The error type for the AEAD.
type Error: std::error::Error + Send + Sync + 'static;
/// Sets the key for the AEAD.
async fn set_key(&mut self, key: ValueRef, iv: ValueRef) -> Result<(), Self::Error>;
/// Decodes the key for the AEAD, revealing it to this party.
async fn decode_key_private(&mut self) -> Result<(), Self::Error>;
/// Decodes the key for the AEAD, revealing it to the other party(s).
async fn decode_key_blind(&mut self) -> Result<(), Self::Error>;
/// Sets the transcript id.
///
/// The AEAD assigns unique identifiers to each byte of plaintext
/// during encryption and decryption.
///
/// For example, if the transcript id is set to `foo`, then the first byte
/// will be assigned the id `foo/0`, the second byte `foo/1`, and so on.
///
/// Each transcript id has an independent counter.
///
/// # Note
///
/// The state of a transcript counter is preserved between calls to
/// `set_transcript_id`.
fn set_transcript_id(&mut self, id: &str);
/// Performs any necessary one-time setup for the AEAD.
async fn setup(&mut self) -> Result<(), Self::Error>;
/// Preprocesses for the given number of bytes.
async fn preprocess(&mut self, len: usize) -> Result<(), Self::Error>;
/// Starts the AEAD.
///
/// This method performs initialization for the AEAD after setting the key.
async fn start(&mut self) -> Result<(), Self::Error>;
/// Encrypts a plaintext message, returning the ciphertext and tag.
///
/// The plaintext is provided by both parties.
///
/// # Arguments
///
/// * `explicit_nonce` - The explicit nonce to use for encryption.
/// * `plaintext` - The plaintext to encrypt.
/// * `aad` - Additional authenticated data.
async fn encrypt_public(
&mut self,
explicit_nonce: Vec<u8>,
plaintext: Vec<u8>,
aad: Vec<u8>,
) -> Result<Vec<u8>, Self::Error>;
/// Encrypts a plaintext message, hiding it from the other party, returning
/// the ciphertext and tag.
///
/// # Arguments
///
/// * `explicit_nonce` - The explicit nonce to use for encryption.
/// * `plaintext` - The plaintext to encrypt.
/// * `aad` - Additional authenticated data.
async fn encrypt_private(
&mut self,
explicit_nonce: Vec<u8>,
plaintext: Vec<u8>,
aad: Vec<u8>,
) -> Result<Vec<u8>, Self::Error>;
/// Encrypts a plaintext message provided by the other party, returning
/// the ciphertext and tag.
///
/// # Arguments
///
/// * `explicit_nonce` - The explicit nonce to use for encryption.
/// * `plaintext_len` - The length of the plaintext to encrypt.
/// * `aad` - Additional authenticated data.
async fn encrypt_blind(
&mut self,
explicit_nonce: Vec<u8>,
plaintext_len: usize,
aad: Vec<u8>,
) -> Result<Vec<u8>, Self::Error>;
/// Decrypts a ciphertext message, returning the plaintext to both parties.
///
/// This method checks the authenticity of the ciphertext, tag and
/// additional data.
///
/// # Arguments
///
/// * `explicit_nonce` - The explicit nonce to use for decryption.
/// * `payload` - The ciphertext and tag to authenticate and decrypt.
/// * `aad` - Additional authenticated data.
async fn decrypt_public(
&mut self,
explicit_nonce: Vec<u8>,
payload: Vec<u8>,
aad: Vec<u8>,
) -> Result<Vec<u8>, Self::Error>;
/// Decrypts a ciphertext message, returning the plaintext only to this
/// party.
///
/// This method checks the authenticity of the ciphertext, tag and
/// additional data.
///
/// # Arguments
///
/// * `explicit_nonce` - The explicit nonce to use for decryption.
/// * `payload` - The ciphertext and tag to authenticate and decrypt.
/// * `aad` - Additional authenticated data.
async fn decrypt_private(
&mut self,
explicit_nonce: Vec<u8>,
payload: Vec<u8>,
aad: Vec<u8>,
) -> Result<Vec<u8>, Self::Error>;
/// Decrypts a ciphertext message, returning the plaintext only to the other
/// party.
///
/// This method checks the authenticity of the ciphertext, tag and
/// additional data.
///
/// # Arguments
///
/// * `explicit_nonce` - The explicit nonce to use for decryption.
/// * `payload` - The ciphertext and tag to authenticate and decrypt.
/// * `aad` - Additional authenticated data.
async fn decrypt_blind(
&mut self,
explicit_nonce: Vec<u8>,
payload: Vec<u8>,
aad: Vec<u8>,
) -> Result<(), Self::Error>;
/// Verifies the tag of a ciphertext message.
///
/// This method checks the authenticity of the ciphertext, tag and
/// additional data.
///
/// # Arguments
///
/// * `explicit_nonce` - The explicit nonce to use for decryption.
/// * `payload` - The ciphertext and tag to authenticate and decrypt.
/// * `aad` - Additional authenticated data.
async fn verify_tag(
&mut self,
explicit_nonce: Vec<u8>,
payload: Vec<u8>,
aad: Vec<u8>,
) -> Result<(), Self::Error>;
/// Locally decrypts the provided ciphertext and then proves in ZK to the
/// other party(s) that the plaintext is correct.
///
/// Returns the plaintext.
///
/// This method requires this party to know the encryption key, which can be
/// achieved by calling the `decode_key_private` method.
///
/// # Arguments
///
/// * `explicit_nonce` - The explicit nonce to use for the keystream.
/// * `payload` - The ciphertext and tag to decrypt and prove.
/// * `aad` - Additional authenticated data.
async fn prove_plaintext(
&mut self,
explicit_nonce: Vec<u8>,
payload: Vec<u8>,
aad: Vec<u8>,
) -> Result<Vec<u8>, Self::Error>;
/// Locally decrypts the provided ciphertext and then proves in ZK to the
/// other party(s) that the plaintext is correct.
///
/// Returns the plaintext.
///
/// This method requires this party to know the encryption key, which can be
/// achieved by calling the `decode_key_private` method.
///
/// # WARNING
///
/// This method does not verify the tag of the ciphertext. Only use this if
/// you know what you're doing.
///
/// # Arguments
///
/// * `explicit_nonce` - The explicit nonce to use for the keystream.
/// * `ciphertext` - The ciphertext to decrypt and prove.
async fn prove_plaintext_no_tag(
&mut self,
explicit_nonce: Vec<u8>,
ciphertext: Vec<u8>,
) -> Result<Vec<u8>, Self::Error>;
/// Verifies the other party(s) can prove they know a plaintext which
/// encrypts to the given ciphertext.
///
/// # Arguments
///
/// * `explicit_nonce` - The explicit nonce to use for the keystream.
/// * `payload` - The ciphertext and tag to verify.
/// * `aad` - Additional authenticated data.
async fn verify_plaintext(
&mut self,
explicit_nonce: Vec<u8>,
payload: Vec<u8>,
aad: Vec<u8>,
) -> Result<(), Self::Error>;
/// Verifies the other party(s) can prove they know a plaintext which
/// encrypts to the given ciphertext.
///
/// # WARNING
///
/// This method does not verify the tag of the ciphertext. Only use this if
/// you know what you're doing.
///
/// # Arguments
///
/// * `explicit_nonce` - The explicit nonce to use for the keystream.
/// * `ciphertext` - The ciphertext to verify.
async fn verify_plaintext_no_tag(
&mut self,
explicit_nonce: Vec<u8>,
ciphertext: Vec<u8>,
) -> Result<(), Self::Error>;
}

View File

@@ -1,30 +0,0 @@
[package]
name = "tlsn-block-cipher"
authors = ["TLSNotary Team"]
description = "2PC block cipher implementation"
keywords = ["tls", "mpc", "2pc", "block-cipher"]
categories = ["cryptography"]
license = "MIT OR Apache-2.0"
version = "0.1.0-alpha.8-pre"
edition = "2021"
[lib]
name = "block_cipher"
[features]
default = ["mock"]
mock = []
[dependencies]
mpz-circuits = { workspace = true }
mpz-garble = { workspace = true }
tlsn-utils = { workspace = true }
async-trait = { workspace = true }
thiserror = { workspace = true }
derive_builder = { workspace = true }
tracing = { workspace = true }
[dev-dependencies]
aes = { workspace = true }
cipher = { workspace = true }
tokio = { workspace = true, features = ["macros", "rt", "rt-multi-thread"] }

View File

@@ -1,277 +0,0 @@
use std::{collections::VecDeque, marker::PhantomData};
use async_trait::async_trait;
use mpz_garble::{value::ValueRef, Decode, DecodePrivate, Execute, Load, Memory};
use tracing::instrument;
use utils::id::NestedId;
use crate::{BlockCipher, BlockCipherCircuit, BlockCipherConfig, BlockCipherError, Visibility};
#[derive(Debug)]
struct State {
private_execution_id: NestedId,
public_execution_id: NestedId,
preprocessed_private: VecDeque<BlockVars>,
preprocessed_public: VecDeque<BlockVars>,
key: Option<ValueRef>,
}
#[derive(Debug)]
struct BlockVars {
msg: ValueRef,
ciphertext: ValueRef,
}
/// An MPC block cipher.
#[derive(Debug)]
pub struct MpcBlockCipher<C, E>
where
C: BlockCipherCircuit,
E: Memory + Execute + Decode + DecodePrivate + Send + Sync,
{
state: State,
executor: E,
_cipher: PhantomData<C>,
}
impl<C, E> MpcBlockCipher<C, E>
where
C: BlockCipherCircuit,
E: Memory + Execute + Decode + DecodePrivate + Send + Sync,
{
/// Creates a new MPC block cipher.
///
/// # Arguments
///
/// * `config` - The configuration for the block cipher.
/// * `executor` - The executor to use for the MPC.
pub fn new(config: BlockCipherConfig, executor: E) -> Self {
let private_execution_id = NestedId::new(&config.id)
.append_string("private")
.append_counter();
let public_execution_id = NestedId::new(&config.id)
.append_string("public")
.append_counter();
Self {
state: State {
private_execution_id,
public_execution_id,
preprocessed_private: VecDeque::new(),
preprocessed_public: VecDeque::new(),
key: None,
},
executor,
_cipher: PhantomData,
}
}
fn define_block(&mut self, vis: Visibility) -> BlockVars {
let (id, msg) = match vis {
Visibility::Private => {
let id = self
.state
.private_execution_id
.increment_in_place()
.to_string();
let msg = self
.executor
.new_private_input::<C::BLOCK>(&format!("{}/msg", &id))
.expect("message is not defined");
(id, msg)
}
Visibility::Blind => {
let id = self
.state
.private_execution_id
.increment_in_place()
.to_string();
let msg = self
.executor
.new_blind_input::<C::BLOCK>(&format!("{}/msg", &id))
.expect("message is not defined");
(id, msg)
}
Visibility::Public => {
let id = self
.state
.public_execution_id
.increment_in_place()
.to_string();
let msg = self
.executor
.new_public_input::<C::BLOCK>(&format!("{}/msg", &id))
.expect("message is not defined");
(id, msg)
}
};
let ciphertext = self
.executor
.new_output::<C::BLOCK>(&format!("{}/ciphertext", &id))
.expect("message is not defined");
BlockVars { msg, ciphertext }
}
}
#[async_trait]
impl<C, E> BlockCipher<C> for MpcBlockCipher<C, E>
where
C: BlockCipherCircuit,
E: Memory + Load + Execute + Decode + DecodePrivate + Send + Sync + Send,
{
#[instrument(level = "trace", skip_all)]
fn set_key(&mut self, key: ValueRef) {
self.state.key = Some(key);
}
#[instrument(level = "debug", skip_all, err)]
async fn preprocess(
&mut self,
visibility: Visibility,
count: usize,
) -> Result<(), BlockCipherError> {
let key = self
.state
.key
.clone()
.ok_or_else(BlockCipherError::key_not_set)?;
for _ in 0..count {
let vars = self.define_block(visibility);
self.executor
.load(
C::circuit(),
&[key.clone(), vars.msg.clone()],
&[vars.ciphertext.clone()],
)
.await?;
match visibility {
Visibility::Private | Visibility::Blind => {
self.state.preprocessed_private.push_back(vars)
}
Visibility::Public => self.state.preprocessed_public.push_back(vars),
}
}
Ok(())
}
#[instrument(level = "debug", skip_all, err)]
async fn encrypt_private(&mut self, plaintext: Vec<u8>) -> Result<Vec<u8>, BlockCipherError> {
let len = plaintext.len();
let block: C::BLOCK = plaintext
.try_into()
.map_err(|_| BlockCipherError::invalid_message_length::<C>(len))?;
let key = self
.state
.key
.clone()
.ok_or_else(BlockCipherError::key_not_set)?;
let BlockVars { msg, ciphertext } =
if let Some(vars) = self.state.preprocessed_private.pop_front() {
vars
} else {
self.define_block(Visibility::Private)
};
self.executor.assign(&msg, block)?;
self.executor
.execute(C::circuit(), &[key, msg], &[ciphertext.clone()])
.await?;
let mut outputs = self.executor.decode(&[ciphertext]).await?;
let ciphertext: C::BLOCK = if let Ok(ciphertext) = outputs
.pop()
.expect("ciphertext should be present")
.try_into()
{
ciphertext
} else {
panic!("ciphertext should be a block")
};
Ok(ciphertext.into())
}
#[instrument(level = "debug", skip_all, err)]
async fn encrypt_blind(&mut self) -> Result<Vec<u8>, BlockCipherError> {
let key = self
.state
.key
.clone()
.ok_or_else(BlockCipherError::key_not_set)?;
let BlockVars { msg, ciphertext } =
if let Some(vars) = self.state.preprocessed_private.pop_front() {
vars
} else {
self.define_block(Visibility::Blind)
};
self.executor
.execute(C::circuit(), &[key, msg], &[ciphertext.clone()])
.await?;
let mut outputs = self.executor.decode(&[ciphertext]).await?;
let ciphertext: C::BLOCK = if let Ok(ciphertext) = outputs
.pop()
.expect("ciphertext should be present")
.try_into()
{
ciphertext
} else {
panic!("ciphertext should be a block")
};
Ok(ciphertext.into())
}
#[instrument(level = "debug", skip_all, err)]
async fn encrypt_share(&mut self, plaintext: Vec<u8>) -> Result<Vec<u8>, BlockCipherError> {
let len = plaintext.len();
let block: C::BLOCK = plaintext
.try_into()
.map_err(|_| BlockCipherError::invalid_message_length::<C>(len))?;
let key = self
.state
.key
.clone()
.ok_or_else(BlockCipherError::key_not_set)?;
let BlockVars { msg, ciphertext } =
if let Some(vars) = self.state.preprocessed_public.pop_front() {
vars
} else {
self.define_block(Visibility::Public)
};
self.executor.assign(&msg, block)?;
self.executor
.execute(C::circuit(), &[key, msg], &[ciphertext.clone()])
.await?;
let mut outputs = self.executor.decode_shared(&[ciphertext]).await?;
let share: C::BLOCK =
if let Ok(share) = outputs.pop().expect("share should be present").try_into() {
share
} else {
panic!("share should be a block")
};
Ok(share.into())
}
}

View File

@@ -1,39 +0,0 @@
use std::sync::Arc;
use mpz_circuits::{
circuits::AES128,
types::{StaticValueType, Value},
Circuit,
};
/// A block cipher circuit.
pub trait BlockCipherCircuit: Default + Clone + Send + Sync {
/// The key type.
type KEY: StaticValueType + Send + Sync;
/// The block type.
type BLOCK: StaticValueType + TryFrom<Vec<u8>> + TryFrom<Value> + Into<Vec<u8>> + Send + Sync;
/// The length of the key.
const KEY_LEN: usize;
/// The length of the block.
const BLOCK_LEN: usize;
/// Returns the circuit of the cipher.
fn circuit() -> Arc<Circuit>;
}
/// Aes128 block cipher circuit.
#[derive(Default, Debug, Clone)]
pub struct Aes128;
impl BlockCipherCircuit for Aes128 {
type KEY = [u8; 16];
type BLOCK = [u8; 16];
const KEY_LEN: usize = 16;
const BLOCK_LEN: usize = 16;
fn circuit() -> Arc<Circuit> {
AES128.clone()
}
}

View File

@@ -1,16 +0,0 @@
use derive_builder::Builder;
/// Configuration for a block cipher.
#[derive(Debug, Clone, Builder)]
pub struct BlockCipherConfig {
/// The ID of the block cipher.
#[builder(setter(into))]
pub(crate) id: String,
}
impl BlockCipherConfig {
/// Creates a new builder for the block cipher configuration.
pub fn builder() -> BlockCipherConfigBuilder {
BlockCipherConfigBuilder::default()
}
}

View File

@@ -1,92 +0,0 @@
use core::fmt;
use std::error::Error;
use crate::BlockCipherCircuit;
/// A block cipher error.
#[derive(Debug, thiserror::Error)]
pub struct BlockCipherError {
kind: ErrorKind,
#[source]
source: Option<Box<dyn Error + Send + Sync>>,
}
impl BlockCipherError {
pub(crate) fn new<E>(kind: ErrorKind, source: E) -> Self
where
E: Into<Box<dyn Error + Send + Sync>>,
{
Self {
kind,
source: Some(source.into()),
}
}
pub(crate) fn key_not_set() -> Self {
Self {
kind: ErrorKind::Key,
source: Some("key not set".into()),
}
}
pub(crate) fn invalid_message_length<C: BlockCipherCircuit>(len: usize) -> Self {
Self {
kind: ErrorKind::Msg,
source: Some(
format!(
"message length does not equal block length: {} != {}",
len,
C::BLOCK_LEN
)
.into(),
),
}
}
}
#[derive(Debug)]
pub(crate) enum ErrorKind {
Vm,
Key,
Msg,
}
impl fmt::Display for BlockCipherError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self.kind {
ErrorKind::Vm => write!(f, "vm error")?,
ErrorKind::Key => write!(f, "key error")?,
ErrorKind::Msg => write!(f, "message error")?,
}
if let Some(ref source) = self.source {
write!(f, " caused by: {}", source)?;
}
Ok(())
}
}
impl From<mpz_garble::MemoryError> for BlockCipherError {
fn from(error: mpz_garble::MemoryError) -> Self {
Self::new(ErrorKind::Vm, error)
}
}
impl From<mpz_garble::LoadError> for BlockCipherError {
fn from(error: mpz_garble::LoadError) -> Self {
Self::new(ErrorKind::Vm, error)
}
}
impl From<mpz_garble::ExecutionError> for BlockCipherError {
fn from(error: mpz_garble::ExecutionError) -> Self {
Self::new(ErrorKind::Vm, error)
}
}
impl From<mpz_garble::DecodeError> for BlockCipherError {
fn from(error: mpz_garble::DecodeError) -> Self {
Self::new(ErrorKind::Vm, error)
}
}

View File

@@ -1,236 +0,0 @@
//! This crate provides a 2PC block cipher implementation.
//!
//! Both parties work together to encrypt or share an encrypted block using a
//! shared key.
#![deny(missing_docs, unreachable_pub, unused_must_use)]
#![deny(clippy::all)]
#![deny(unsafe_code)]
mod cipher;
mod circuit;
mod config;
mod error;
use async_trait::async_trait;
use mpz_garble::value::ValueRef;
pub use crate::{
cipher::MpcBlockCipher,
circuit::{Aes128, BlockCipherCircuit},
};
pub use config::{BlockCipherConfig, BlockCipherConfigBuilder, BlockCipherConfigBuilderError};
pub use error::BlockCipherError;
/// Visibility of a message plaintext.
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum Visibility {
/// Private message.
Private,
/// Blind message.
Blind,
/// Public message.
Public,
}
/// A trait for MPC block ciphers.
#[async_trait]
pub trait BlockCipher<Cipher>: Send + Sync
where
Cipher: BlockCipherCircuit,
{
/// Sets the key for the block cipher.
fn set_key(&mut self, key: ValueRef);
/// Preprocesses `count` blocks.
///
/// # Arguments
///
/// * `visibility` - The visibility of the plaintext.
/// * `count` - The number of blocks to preprocess.
async fn preprocess(
&mut self,
visibility: Visibility,
count: usize,
) -> Result<(), BlockCipherError>;
/// Encrypts the given plaintext keeping it hidden from the other party(s).
///
/// Returns the ciphertext.
///
/// # Arguments
///
/// * `plaintext` - The plaintext to encrypt.
async fn encrypt_private(&mut self, plaintext: Vec<u8>) -> Result<Vec<u8>, BlockCipherError>;
/// Encrypts a plaintext provided by the other party(s).
///
/// Returns the ciphertext.
async fn encrypt_blind(&mut self) -> Result<Vec<u8>, BlockCipherError>;
/// Encrypts a plaintext provided by both parties. Fails if the
/// plaintext provided by both parties does not match.
///
/// Returns an additive share of the ciphertext.
///
/// # Arguments
///
/// * `plaintext` - The plaintext to encrypt.
async fn encrypt_share(&mut self, plaintext: Vec<u8>) -> Result<Vec<u8>, BlockCipherError>;
}
#[cfg(test)]
mod tests {
use super::*;
use mpz_garble::{protocol::deap::mock::create_mock_deap_vm, Memory};
use crate::circuit::Aes128;
use ::aes::Aes128 as TestAes128;
use ::cipher::{BlockEncrypt, KeyInit};
fn aes128(key: [u8; 16], msg: [u8; 16]) -> [u8; 16] {
let mut msg = msg.into();
let cipher = TestAes128::new(&key.into());
cipher.encrypt_block(&mut msg);
msg.into()
}
#[tokio::test]
#[ignore = "expensive"]
async fn test_block_cipher_blind() {
let leader_config = BlockCipherConfig::builder().id("test").build().unwrap();
let follower_config = BlockCipherConfig::builder().id("test").build().unwrap();
let key = [0u8; 16];
let (leader_vm, follower_vm) = create_mock_deap_vm();
// Key is public just for this test, typically it is private.
let leader_key = leader_vm.new_public_input::<[u8; 16]>("key").unwrap();
let follower_key = follower_vm.new_public_input::<[u8; 16]>("key").unwrap();
leader_vm.assign(&leader_key, key).unwrap();
follower_vm.assign(&follower_key, key).unwrap();
let mut leader = MpcBlockCipher::<Aes128, _>::new(leader_config, leader_vm);
leader.set_key(leader_key);
let mut follower = MpcBlockCipher::<Aes128, _>::new(follower_config, follower_vm);
follower.set_key(follower_key);
let plaintext = [0u8; 16];
let (leader_ciphertext, follower_ciphertext) = tokio::try_join!(
leader.encrypt_private(plaintext.to_vec()),
follower.encrypt_blind()
)
.unwrap();
let expected = aes128(key, plaintext);
assert_eq!(leader_ciphertext, expected.to_vec());
assert_eq!(leader_ciphertext, follower_ciphertext);
}
#[tokio::test]
#[ignore = "expensive"]
async fn test_block_cipher_share() {
let leader_config = BlockCipherConfig::builder().id("test").build().unwrap();
let follower_config = BlockCipherConfig::builder().id("test").build().unwrap();
let key = [0u8; 16];
let (leader_vm, follower_vm) = create_mock_deap_vm();
// Key is public just for this test, typically it is private.
let leader_key = leader_vm.new_public_input::<[u8; 16]>("key").unwrap();
let follower_key = follower_vm.new_public_input::<[u8; 16]>("key").unwrap();
leader_vm.assign(&leader_key, key).unwrap();
follower_vm.assign(&follower_key, key).unwrap();
let mut leader = MpcBlockCipher::<Aes128, _>::new(leader_config, leader_vm);
leader.set_key(leader_key);
let mut follower = MpcBlockCipher::<Aes128, _>::new(follower_config, follower_vm);
follower.set_key(follower_key);
let plaintext = [0u8; 16];
let (leader_share, follower_share) = tokio::try_join!(
leader.encrypt_share(plaintext.to_vec()),
follower.encrypt_share(plaintext.to_vec())
)
.unwrap();
let expected = aes128(key, plaintext);
let result: [u8; 16] = std::array::from_fn(|i| leader_share[i] ^ follower_share[i]);
assert_eq!(result, expected);
}
#[tokio::test]
#[ignore = "expensive"]
async fn test_block_cipher_preprocess() {
let leader_config = BlockCipherConfig::builder().id("test").build().unwrap();
let follower_config = BlockCipherConfig::builder().id("test").build().unwrap();
let key = [0u8; 16];
let (leader_vm, follower_vm) = create_mock_deap_vm();
// Key is public just for this test, typically it is private.
let leader_key = leader_vm.new_public_input::<[u8; 16]>("key").unwrap();
let follower_key = follower_vm.new_public_input::<[u8; 16]>("key").unwrap();
leader_vm.assign(&leader_key, key).unwrap();
follower_vm.assign(&follower_key, key).unwrap();
let mut leader = MpcBlockCipher::<Aes128, _>::new(leader_config, leader_vm);
leader.set_key(leader_key);
let mut follower = MpcBlockCipher::<Aes128, _>::new(follower_config, follower_vm);
follower.set_key(follower_key);
let plaintext = [0u8; 16];
tokio::try_join!(
leader.preprocess(Visibility::Private, 1),
follower.preprocess(Visibility::Blind, 1)
)
.unwrap();
let (leader_ciphertext, follower_ciphertext) = tokio::try_join!(
leader.encrypt_private(plaintext.to_vec()),
follower.encrypt_blind()
)
.unwrap();
let expected = aes128(key, plaintext);
assert_eq!(leader_ciphertext, expected.to_vec());
assert_eq!(leader_ciphertext, follower_ciphertext);
tokio::try_join!(
leader.preprocess(Visibility::Public, 1),
follower.preprocess(Visibility::Public, 1)
)
.unwrap();
let (leader_share, follower_share) = tokio::try_join!(
leader.encrypt_share(plaintext.to_vec()),
follower.encrypt_share(plaintext.to_vec())
)
.unwrap();
let expected = aes128(key, plaintext);
let result: [u8; 16] = std::array::from_fn(|i| leader_share[i] ^ follower_share[i]);
assert_eq!(result, expected);
}
}

View File

@@ -0,0 +1,34 @@
[package]
name = "tlsn-cipher"
authors = ["TLSNotary Team"]
description = "This crate provides implementations of ciphers for two parties"
keywords = ["tls", "mpc", "2pc", "aes"]
categories = ["cryptography"]
license = "MIT OR Apache-2.0"
version = "0.1.0-alpha.13-pre"
edition = "2021"
[lints]
workspace = true
[lib]
name = "cipher"
[dependencies]
mpz-circuits = { workspace = true }
mpz-vm-core = { workspace = true }
mpz-memory-core = { workspace = true }
async-trait = { workspace = true }
thiserror = { workspace = true }
aes = { workspace = true }
[dev-dependencies]
mpz-garble = { workspace = true }
mpz-common = { workspace = true }
mpz-ot = { workspace = true }
tokio = { version = "1", features = ["macros", "rt", "rt-multi-thread"] }
rand = { workspace = true }
ctr = { workspace = true }
cipher = { workspace = true }

View File

@@ -0,0 +1,44 @@
use std::fmt::Display;
/// AES error.
#[derive(Debug, thiserror::Error)]
pub struct AesError {
kind: ErrorKind,
#[source]
source: Option<Box<dyn std::error::Error + Send + Sync>>,
}
impl AesError {
pub(crate) fn new<E>(kind: ErrorKind, source: E) -> Self
where
E: Into<Box<dyn std::error::Error + Send + Sync>>,
{
Self {
kind,
source: Some(source.into()),
}
}
}
#[derive(Debug, Clone, Copy, PartialEq)]
pub(crate) enum ErrorKind {
Vm,
Key,
Iv,
}
impl Display for AesError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self.kind {
ErrorKind::Vm => write!(f, "vm error")?,
ErrorKind::Key => write!(f, "key error")?,
ErrorKind::Iv => write!(f, "iv error")?,
}
if let Some(source) = &self.source {
write!(f, " caused by: {source}")?;
}
Ok(())
}
}

View File

@@ -0,0 +1,375 @@
//! The AES-128 block cipher.
use crate::{Cipher, CtrBlock, Keystream};
use async_trait::async_trait;
use mpz_circuits::circuits::AES128;
use mpz_memory_core::binary::{Binary, U8};
use mpz_vm_core::{prelude::*, Call, Vm};
use std::fmt::Debug;
mod error;
pub use error::AesError;
use error::ErrorKind;
/// Computes AES-128.
#[derive(Default, Debug)]
pub struct Aes128 {
key: Option<Array<U8, 16>>,
iv: Option<Array<U8, 4>>,
}
#[async_trait]
impl Cipher for Aes128 {
type Error = AesError;
type Key = Array<U8, 16>;
type Iv = Array<U8, 4>;
type Nonce = Array<U8, 8>;
type Counter = Array<U8, 4>;
type Block = Array<U8, 16>;
fn set_key(&mut self, key: Array<U8, 16>) {
self.key = Some(key);
}
fn set_iv(&mut self, iv: Array<U8, 4>) {
self.iv = Some(iv);
}
fn key(&self) -> Option<&Array<U8, 16>> {
self.key.as_ref()
}
fn iv(&self) -> Option<&Array<U8, 4>> {
self.iv.as_ref()
}
fn alloc_block(
&self,
vm: &mut dyn Vm<Binary>,
input: Array<U8, 16>,
) -> Result<Self::Block, Self::Error> {
let key = self
.key
.ok_or_else(|| AesError::new(ErrorKind::Key, "key not set"))?;
let output = vm
.call(
Call::builder(AES128.clone())
.arg(key)
.arg(input)
.build()
.expect("call should be valid"),
)
.map_err(|err| AesError::new(ErrorKind::Vm, err))?;
Ok(output)
}
fn alloc_ctr_block(
&self,
vm: &mut dyn Vm<Binary>,
) -> Result<CtrBlock<Self::Nonce, Self::Counter, Self::Block>, Self::Error> {
let key = self
.key
.ok_or_else(|| AesError::new(ErrorKind::Key, "key not set"))?;
let iv = self
.iv
.ok_or_else(|| AesError::new(ErrorKind::Iv, "iv not set"))?;
let explicit_nonce: Array<U8, 8> = vm
.alloc()
.map_err(|err| AesError::new(ErrorKind::Vm, err))?;
vm.mark_public(explicit_nonce)
.map_err(|err| AesError::new(ErrorKind::Vm, err))?;
let counter: Array<U8, 4> = vm
.alloc()
.map_err(|err| AesError::new(ErrorKind::Vm, err))?;
vm.mark_public(counter)
.map_err(|err| AesError::new(ErrorKind::Vm, err))?;
let output = vm
.call(
Call::builder(AES128.clone())
.arg(key)
.arg(iv)
.arg(explicit_nonce)
.arg(counter)
.build()
.expect("call should be valid"),
)
.map_err(|err| AesError::new(ErrorKind::Vm, err))?;
Ok(CtrBlock {
explicit_nonce,
counter,
output,
})
}
fn alloc_keystream(
&self,
vm: &mut dyn Vm<Binary>,
len: usize,
) -> Result<Keystream<Self::Nonce, Self::Counter, Self::Block>, Self::Error> {
let key = self
.key
.ok_or_else(|| AesError::new(ErrorKind::Key, "key not set"))?;
let iv = self
.iv
.ok_or_else(|| AesError::new(ErrorKind::Iv, "iv not set"))?;
let block_count = len.div_ceil(16);
let inputs = (0..block_count)
.map(|_| {
let explicit_nonce: Array<U8, 8> = vm
.alloc()
.map_err(|err| AesError::new(ErrorKind::Vm, err))?;
let counter: Array<U8, 4> = vm
.alloc()
.map_err(|err| AesError::new(ErrorKind::Vm, err))?;
vm.mark_public(explicit_nonce)
.map_err(|err| AesError::new(ErrorKind::Vm, err))?;
vm.mark_public(counter)
.map_err(|err| AesError::new(ErrorKind::Vm, err))?;
Ok((explicit_nonce, counter))
})
.collect::<Result<Vec<_>, AesError>>()?;
let blocks = inputs
.into_iter()
.map(|(explicit_nonce, counter)| {
let output = vm
.call(
Call::builder(AES128.clone())
.arg(key)
.arg(iv)
.arg(explicit_nonce)
.arg(counter)
.build()
.expect("call should be valid"),
)
.map_err(|err| AesError::new(ErrorKind::Vm, err))?;
Ok(CtrBlock {
explicit_nonce,
counter,
output,
})
})
.collect::<Result<Vec<_>, AesError>>()?;
Ok(Keystream::new(&blocks))
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::Cipher;
use mpz_common::context::test_st_context;
use mpz_garble::protocol::semihonest::{Evaluator, Garbler};
use mpz_memory_core::{
binary::{Binary, U8},
correlated::Delta,
Array, MemoryExt, Vector, ViewExt,
};
use mpz_ot::ideal::cot::ideal_cot;
use mpz_vm_core::{Execute, Vm};
use rand::{rngs::StdRng, SeedableRng};
#[tokio::test]
async fn test_aes_ctr() {
let key = [42_u8; 16];
let iv = [3_u8; 4];
let nonce = [5_u8; 8];
let start_counter = 3u32;
let (mut ctx_a, mut ctx_b) = test_st_context(8);
let (mut gen, mut ev) = mock_vm();
let aes_gen = setup_ctr(key, iv, &mut gen);
let aes_ev = setup_ctr(key, iv, &mut ev);
let msg = vec![42u8; 128];
let keystream_gen = aes_gen.alloc_keystream(&mut gen, msg.len()).unwrap();
let keystream_ev = aes_ev.alloc_keystream(&mut ev, msg.len()).unwrap();
let msg_ref_gen: Vector<U8> = gen.alloc_vec(msg.len()).unwrap();
gen.mark_public(msg_ref_gen).unwrap();
gen.assign(msg_ref_gen, msg.clone()).unwrap();
gen.commit(msg_ref_gen).unwrap();
let msg_ref_ev: Vector<U8> = ev.alloc_vec(msg.len()).unwrap();
ev.mark_public(msg_ref_ev).unwrap();
ev.assign(msg_ref_ev, msg.clone()).unwrap();
ev.commit(msg_ref_ev).unwrap();
let mut ctr = start_counter..;
keystream_gen
.assign(&mut gen, nonce, move || ctr.next().unwrap().to_be_bytes())
.unwrap();
let mut ctr = start_counter..;
keystream_ev
.assign(&mut ev, nonce, move || ctr.next().unwrap().to_be_bytes())
.unwrap();
let cipher_out_gen = keystream_gen.apply(&mut gen, msg_ref_gen).unwrap();
let cipher_out_ev = keystream_ev.apply(&mut ev, msg_ref_ev).unwrap();
let (ct_gen, ct_ev) = tokio::try_join!(
async {
let out = gen.decode(cipher_out_gen).unwrap();
gen.flush(&mut ctx_a).await.unwrap();
gen.execute(&mut ctx_a).await.unwrap();
gen.flush(&mut ctx_a).await.unwrap();
out.await
},
async {
let out = ev.decode(cipher_out_ev).unwrap();
ev.flush(&mut ctx_b).await.unwrap();
ev.execute(&mut ctx_b).await.unwrap();
ev.flush(&mut ctx_b).await.unwrap();
out.await
}
)
.unwrap();
assert_eq!(ct_gen, ct_ev);
let expected = aes_apply_keystream(key, iv, nonce, start_counter as usize, msg);
assert_eq!(ct_gen, expected);
}
#[tokio::test]
async fn test_aes_ecb() {
let key = [1_u8; 16];
let input = [5_u8; 16];
let (mut ctx_a, mut ctx_b) = test_st_context(8);
let (mut gen, mut ev) = mock_vm();
let aes_gen = setup_block(key, &mut gen);
let aes_ev = setup_block(key, &mut ev);
let block_ref_gen: Array<U8, 16> = gen.alloc().unwrap();
gen.mark_public(block_ref_gen).unwrap();
gen.assign(block_ref_gen, input).unwrap();
gen.commit(block_ref_gen).unwrap();
let block_ref_ev: Array<U8, 16> = ev.alloc().unwrap();
ev.mark_public(block_ref_ev).unwrap();
ev.assign(block_ref_ev, input).unwrap();
ev.commit(block_ref_ev).unwrap();
let block_gen = aes_gen.alloc_block(&mut gen, block_ref_gen).unwrap();
let block_ev = aes_ev.alloc_block(&mut ev, block_ref_ev).unwrap();
let (ciphertext_gen, ciphetext_ev) = tokio::try_join!(
async {
let out = gen.decode(block_gen).unwrap();
gen.flush(&mut ctx_a).await.unwrap();
gen.execute(&mut ctx_a).await.unwrap();
gen.flush(&mut ctx_a).await.unwrap();
out.await
},
async {
let out = ev.decode(block_ev).unwrap();
ev.flush(&mut ctx_b).await.unwrap();
ev.execute(&mut ctx_b).await.unwrap();
ev.flush(&mut ctx_b).await.unwrap();
out.await
}
)
.unwrap();
assert_eq!(ciphertext_gen, ciphetext_ev);
let expected = aes128(key, input);
assert_eq!(ciphertext_gen, expected);
}
fn mock_vm() -> (impl Vm<Binary>, impl Vm<Binary>) {
let mut rng = StdRng::seed_from_u64(0);
let delta = Delta::random(&mut rng);
let (cot_send, cot_recv) = ideal_cot(delta.into_inner());
let gen = Garbler::new(cot_send, [0u8; 16], delta);
let ev = Evaluator::new(cot_recv);
(gen, ev)
}
fn setup_ctr(key: [u8; 16], iv: [u8; 4], vm: &mut dyn Vm<Binary>) -> Aes128 {
let key_ref: Array<U8, 16> = vm.alloc().unwrap();
vm.mark_public(key_ref).unwrap();
vm.assign(key_ref, key).unwrap();
vm.commit(key_ref).unwrap();
let iv_ref: Array<U8, 4> = vm.alloc().unwrap();
vm.mark_public(iv_ref).unwrap();
vm.assign(iv_ref, iv).unwrap();
vm.commit(iv_ref).unwrap();
let mut aes = Aes128::default();
aes.set_key(key_ref);
aes.set_iv(iv_ref);
aes
}
fn setup_block(key: [u8; 16], vm: &mut dyn Vm<Binary>) -> Aes128 {
let key_ref: Array<U8, 16> = vm.alloc().unwrap();
vm.mark_public(key_ref).unwrap();
vm.assign(key_ref, key).unwrap();
vm.commit(key_ref).unwrap();
let mut aes = Aes128::default();
aes.set_key(key_ref);
aes
}
fn aes_apply_keystream(
key: [u8; 16],
iv: [u8; 4],
explicit_nonce: [u8; 8],
start_ctr: usize,
msg: Vec<u8>,
) -> Vec<u8> {
use ::cipher::{KeyIvInit, StreamCipher, StreamCipherSeek};
use aes::Aes128;
use ctr::Ctr32BE;
let mut full_iv = [0u8; 16];
full_iv[0..4].copy_from_slice(&iv);
full_iv[4..12].copy_from_slice(&explicit_nonce);
let mut cipher = Ctr32BE::<Aes128>::new(&key.into(), &full_iv.into());
let mut out = msg.clone();
cipher
.try_seek(start_ctr * 16)
.expect("start counter is less than keystream length");
cipher.apply_keystream(&mut out);
out
}
fn aes128(key: [u8; 16], msg: [u8; 16]) -> [u8; 16] {
use ::aes::Aes128 as TestAes128;
use ::cipher::{BlockEncrypt, KeyInit};
let mut msg = msg.into();
let cipher = TestAes128::new(&key.into());
cipher.encrypt_block(&mut msg);
msg.into()
}
}

View File

@@ -0,0 +1,299 @@
//! This crate provides implementations of 2PC ciphers for encryption with a
//! shared key.
//!
//! Both parties can work together to encrypt and decrypt messages with
//! different visibility configurations. See [`Cipher`] and [`Keystream`] for
//! more information on the interface.
#![deny(missing_docs, unreachable_pub, unused_must_use)]
#![deny(clippy::all)]
#![forbid(unsafe_code)]
pub mod aes;
use async_trait::async_trait;
use mpz_circuits::circuits::xor;
use mpz_memory_core::{
binary::{Binary, U8},
MemoryExt, Repr, Slice, StaticSize, ToRaw, Vector,
};
use mpz_vm_core::{prelude::*, Call, CallBuilder, CallError, Vm};
use std::{collections::VecDeque, sync::Arc};
/// Provides computation of 2PC ciphers in counter and ECB mode.
///
/// After setting `key` and `iv` allows to compute the keystream via
/// [`Cipher::alloc`] or a single block in ECB mode via
/// [`Cipher::assign_block`]. [`Keystream`] provides more tooling to compute the
/// final cipher output in counter mode.
#[async_trait]
pub trait Cipher {
/// The error type for the cipher.
type Error: std::error::Error + Send + Sync + 'static;
/// Cipher key.
type Key;
/// Cipher IV.
type Iv;
/// Cipher nonce.
type Nonce;
/// Cipher counter.
type Counter;
/// Cipher block.
type Block;
/// Sets the key.
fn set_key(&mut self, key: Self::Key);
/// Sets the initialization vector.
fn set_iv(&mut self, iv: Self::Iv);
/// Returns the key reference.
fn key(&self) -> Option<&Self::Key>;
/// Returns the iv reference.
fn iv(&self) -> Option<&Self::Iv>;
/// Allocates a single block in ECB mode.
fn alloc_block(
&self,
vm: &mut dyn Vm<Binary>,
input: Self::Block,
) -> Result<Self::Block, Self::Error>;
/// Allocates a single block in counter mode.
#[allow(clippy::type_complexity)]
fn alloc_ctr_block(
&self,
vm: &mut dyn Vm<Binary>,
) -> Result<CtrBlock<Self::Nonce, Self::Counter, Self::Block>, Self::Error>;
/// Allocates a keystream in counter mode.
///
/// # Arguments
///
/// * `vm` - Virtual machine to allocate into.
/// * `len` - Length of the stream in bytes.
#[allow(clippy::type_complexity)]
fn alloc_keystream(
&self,
vm: &mut dyn Vm<Binary>,
len: usize,
) -> Result<Keystream<Self::Nonce, Self::Counter, Self::Block>, Self::Error>;
}
/// A block in counter mode.
#[derive(Debug, Clone, Copy)]
pub struct CtrBlock<N, C, O> {
/// Explicit nonce reference.
pub explicit_nonce: N,
/// Counter reference.
pub counter: C,
/// Output reference.
pub output: O,
}
/// The keystream of the cipher.
///
/// Can be used to XOR with the cipher input to operate the cipher in counter
/// mode.
pub struct Keystream<N, C, O> {
/// Sequential keystream blocks. Outputs are stored in contiguous memory.
blocks: VecDeque<CtrBlock<N, C, O>>,
}
impl<N, C, O> Default for Keystream<N, C, O> {
fn default() -> Self {
Self {
blocks: VecDeque::new(),
}
}
}
impl<N, C, O> Keystream<N, C, O>
where
N: Repr<Binary> + StaticSize<Binary> + Copy,
C: Repr<Binary> + StaticSize<Binary> + Copy,
O: Repr<Binary> + StaticSize<Binary> + Copy,
{
/// Creates a new keystream from the provided blocks.
pub fn new(blocks: &[CtrBlock<N, C, O>]) -> Self {
Self {
blocks: VecDeque::from_iter(blocks.iter().copied()),
}
}
/// Consumes keystream material.
///
/// Returns the consumed keystream material, leaving the remaining material
/// in place.
///
/// # Arguments
///
/// * `len` - Length of the keystream in bytes to return.
pub fn consume(&mut self, len: usize) -> Result<Self, CipherError> {
let block_count = len.div_ceil(self.block_size());
if block_count > self.blocks.len() {
return Err(CipherError::new("insufficient keystream"));
}
let blocks = self.blocks.split_off(self.blocks.len() - block_count);
Ok(Self { blocks })
}
/// Applies the keystream to the provided input.
///
/// # Arguments
///
/// * `vm` - Virtual machine.
/// * `input` - Input data.
pub fn apply(
&self,
vm: &mut dyn Vm<Binary>,
input: Vector<U8>,
) -> Result<Vector<U8>, CipherError> {
if input.len() != self.len() {
return Err(CipherError::new("input length must match keystream length"));
} else if self.blocks.is_empty() {
return Err(CipherError::new("no keystream material available"));
}
let xor = Arc::new(xor(self.block_size() * 8));
let mut pos = 0;
let mut outputs = Vec::with_capacity(self.blocks.len());
for block in &self.blocks {
let call = CallBuilder::new(xor.clone())
.arg(block.output)
.arg(
input
.get(pos..pos + self.block_size())
.expect("input length was checked"),
)
.build()?;
let output: Vector<U8> = vm.call(call).map_err(CipherError::new)?;
outputs.push(output);
pos += self.block_size();
}
let output = flatten_blocks(vm, outputs.iter().map(|block| block.to_raw()))?;
Ok(output)
}
/// Returns `len` bytes of the keystream as a vector.
pub fn to_vector(
&self,
vm: &mut dyn Vm<Binary>,
len: usize,
) -> Result<Vector<U8>, CipherError> {
if len == 0 {
return Err(CipherError::new("length must be greater than 0"));
} else if self.blocks.is_empty() {
return Err(CipherError::new("no keystream material available"));
}
let block_count = len.div_ceil(self.block_size());
if block_count != self.blocks.len() {
return Err(CipherError::new("length does not match keystream length"));
}
let mut keystream =
flatten_blocks(vm, self.blocks.iter().map(|block| block.output.to_raw()))?;
keystream.truncate(len);
Ok(keystream)
}
/// Assigns the keystream inputs.
///
/// # Arguments
///
/// * `vm` - Virtual machine.
/// * `explicit_nonce` - Explicit nonce.
/// * `ctr` - Counter function. The provided function will be called to
/// assign the counter values for each block.
pub fn assign(
&self,
vm: &mut dyn Vm<Binary>,
explicit_nonce: N::Clear,
mut ctr: impl FnMut() -> C::Clear,
) -> Result<(), CipherError>
where
N::Clear: Copy,
C::Clear: Copy,
{
for block in &self.blocks {
vm.assign(block.explicit_nonce, explicit_nonce)
.map_err(CipherError::new)?;
vm.commit(block.explicit_nonce).map_err(CipherError::new)?;
vm.assign(block.counter, ctr()).map_err(CipherError::new)?;
vm.commit(block.counter).map_err(CipherError::new)?;
}
Ok(())
}
/// Returns the block size in bytes.
fn block_size(&self) -> usize {
O::SIZE / 8
}
/// Returns the length of the keystream in bytes.
fn len(&self) -> usize {
self.block_size() * self.blocks.len()
}
}
fn flatten_blocks(
vm: &mut dyn Vm<Binary>,
blocks: impl IntoIterator<Item = Slice>,
) -> Result<Vector<U8>, CipherError> {
use mpz_circuits::CircuitBuilder;
let blocks = blocks.into_iter().collect::<Vec<_>>();
let len: usize = blocks.iter().map(|block| block.len()).sum();
let mut builder = CircuitBuilder::new();
for _ in 0..len {
let i = builder.add_input();
let o = builder.add_id_gate(i);
builder.add_output(o);
}
let circuit = builder.build().expect("flatten circuit should be valid");
let mut builder = Call::builder(Arc::new(circuit));
for block in blocks {
builder = builder.arg(block);
}
let call = builder.build().map_err(CipherError::new)?;
vm.call(call).map_err(CipherError::new)
}
/// A cipher error.
#[derive(Debug, thiserror::Error)]
#[error("{source}")]
pub struct CipherError {
#[source]
source: Box<dyn std::error::Error + Send + Sync>,
}
impl CipherError {
pub(crate) fn new<E>(source: E) -> Self
where
E: Into<Box<dyn std::error::Error + Send + Sync>>,
{
Self {
source: source.into(),
}
}
}
impl From<CallError> for CipherError {
fn from(value: CallError) -> Self {
Self::new(value)
}
}

View File

@@ -0,0 +1,29 @@
[package]
name = "tlsn-deap"
version = "0.1.0-alpha.13-pre"
edition = "2021"
[lints]
workspace = true
[dependencies]
mpz-core = { workspace = true }
mpz-common = { workspace = true }
mpz-vm-core = { workspace = true }
rangeset = { workspace = true }
thiserror = { workspace = true }
serde = { workspace = true, features = ["derive"] }
serio = { workspace = true }
async-trait = { workspace = true }
futures = { workspace = true }
tokio = { workspace = true, features = ["sync"] }
[dev-dependencies]
mpz-circuits = { workspace = true }
mpz-garble = { workspace = true }
mpz-ot = { workspace = true }
mpz-zk = { workspace = true }
tokio = { workspace = true, features = ["macros", "rt", "rt-multi-thread"] }
rand = { workspace = true }
rand06-compat = { workspace = true }

Some files were not shown because too many files have changed in this diff Show More