Compare commits

...

167 Commits

Author SHA1 Message Date
tsukino
8dd5f8c9a2 add interactive verifier demo 2025-07-22 20:49:18 +08:00
q
75f5690f74 remove dead code 2025-07-15 08:40:10 +08:00
q
3b233c18c2 wip: interactive verifier demo 2025-07-15 08:35:38 +08:00
yuroitaki
ef6180c313 Change naming. 2025-06-20 10:51:26 +08:00
yuroitaki
1a80ef75f8 Merge branch 'dev' into poc/verifier-server 2025-06-20 10:36:32 +08:00
dan
f2ff4ba792 chore: release v0.1.0-alpha.12 (#928) 2025-06-19 09:05:34 +00:00
dan
9bf3371873 chore(wasm): expose client auth config to js (#927) 2025-06-19 07:15:09 +00:00
yuroitaki
ba0056c8db Revert to enable wasi. 2025-06-18 17:17:58 +08:00
yuroitaki
c790b2482a Merge in dev 2025-06-18 16:35:58 +08:00
yuroitaki
0db2e6b48f Change rust target to wasm32-unknown 2025-06-18 15:16:05 +08:00
dan
9d853eb496 feat(prover): client authentication (#916) 2025-06-17 14:02:14 +00:00
yuroitaki
0c2cc6e466 Add function to load multiple plugins. 2025-06-17 16:55:06 +08:00
sinu.eth
6923ceefd3 fix(harness): iptable rule and bench config variable (#925)
* fix(harness): iptable rule and bench config variable

* rustfmt
2025-06-16 13:18:34 -04:00
sinu.eth
5239c2328a chore: bump mpz to ccc0057 (#924) 2025-06-16 07:42:49 -07:00
yuroitaki
2c048e92ed Fix typo. 2025-06-13 20:52:43 +08:00
yuroitaki
6c8cf8a182 Chmod rust prepare. 2025-06-13 20:48:41 +08:00
yuroitaki
59781d1293 Switch to rust plugin. 2025-06-13 20:43:46 +08:00
yuroitaki
43fd4d34b5 Add schema generator. 2025-06-13 20:28:03 +08:00
Hendrik Eeckhaut
6a7c5384a9 build: fixed version numbers 2025-06-12 14:24:55 +02:00
th4s
7e469006c0 fix(prf): adapt logic to new default setting (#920) 2025-06-11 20:34:47 +02:00
yuroitaki
4a604c98ce Add prover example for plugin, verification logic. 2025-06-10 19:45:31 +08:00
dan
55091b5e94 fix: set TCP_NODELAY for prover and notary (#911) 2025-06-10 08:13:12 +00:00
yuroitaki
126ba26648 Revert Cargo.lock 2025-06-10 10:56:55 +08:00
yuroitaki
2e571b0684 Merge 2025-06-09 20:34:54 +08:00
yuroitaki
4e0141f993 Lock change. 2025-06-09 20:33:56 +08:00
yuroitaki
c9ca87f4b4 Merge in dev 2025-06-09 20:18:14 +08:00
dan
bc1eba18c9 feat(mpc-tls): use concurrent ot setup and gc preprocessing (#910)
* feat(mpc-tls): use concurrent ot setup and gc preprocessing

* bump mpz

* increase muxer stream count

* update Cargo.lock

---------

Co-authored-by: sinu.eth <65924192+sinui0@users.noreply.github.com>
2025-06-06 15:39:35 -07:00
sinu.eth
c128ab16ce fix(harness): retry browser connection until timeout (#914)
* fix(harness): retry browser connection until timeout

* add timeout to executor shutdown

* shutdown timeout error msg

* clippy
2025-06-06 15:01:28 -07:00
sinu.eth
a87125ff88 fix(ci): wasm tests (#913) 2025-06-06 13:51:34 -07:00
sinu.eth
0933d711d2 feat: harness (#703)
* feat: harness

* delete tests.rs build artifact

* fix binary path

* seconds -> milliseconds

* update lock

* add empty tests module

* rustfmt

* ToString -> Display

* output tests module into build artifacts

* clippy

* rustfmt
2025-06-06 13:34:32 -07:00
yuroitaki
7e631de84a Run dummy plugin as part of request. 2025-06-06 20:09:12 +08:00
sinu.eth
79c230f2fa refactor(mpc-tls): remove commit-reveal from tag verification (#907) 2025-06-06 06:39:12 +00:00
dan
345d5d45ad feat: prove server mac key (#868)
* feat(mpc-tls): prove server mac key

* remove stray dep

* move mac key into `SessionKeys`

* fix key translation

* remove dangling dep

* move ghash mod to tlsn-common

* fix clippy lints

* treat all recv recs as unauthenticated

* detach zkvm first, then prove

* decrypt with aes_gcm, decode mac key only in zkvm

* encapsulate into `fn verify_tags`; inline mod `zk_aes_ecb`

* handle error

* fix dangling and clippy

* bump Cargo.lock
2025-06-05 09:19:41 -07:00
yuroitaki
02cdbb8130 Setup extism. 2025-06-04 15:14:24 +08:00
Hendrik Eeckhaut
55a26aad77 build: Lock + document Cargo.lock (#885) 2025-06-04 09:12:06 +02:00
Hendrik Eeckhaut
1132d441e1 docs: improve example readme (#904) 2025-06-04 08:56:55 +02:00
Hendrik Eeckhaut
fa2fdfd601 feat: add logging to server fixture (#903) 2025-06-04 08:49:33 +02:00
Hendrik Eeckhaut
24e10d664f Fix wasm-pack warnings (#888) 2025-06-03 22:38:54 +02:00
yuroitaki
c0e084c1ca fix(wasm): expose revealing server identity. (#898)
* Add reveal server identity.

* Fix test.

* Remove defualt.

---------

Co-authored-by: yuroitaki <>
2025-05-30 10:39:13 +08:00
Jakub Konka
b6845dfc5c feat(notary): add JWT-based authorization mode (#817)
* feat(server): add JWT-based authorization mode

This mode is an alternative to whitelist authorization mode.
It extracts the JWT from the authorization header (bearer token),
validates token's signature, claimed expiry times and additional
(user-configurable) claims.

* Fix formatting and lints

* Address review comments

* feat(server): remove JwtClaimType config property

* Fix missing README comments

* Address review comments

* Address review comments

---------

Co-authored-by: yuroitaki <25913766+yuroitaki@users.noreply.github.com>
2025-05-28 12:51:18 +08:00
sinu.eth
31def9ea81 chore: bump prerelease version (#895) 2025-05-27 11:43:42 -07:00
sinu.eth
878fe7e87d chore: release v0.1.0-alpha.11 (#894) 2025-05-27 09:27:26 -07:00
Hendrik Eeckhaut
3348ac34b6 Release automation (#890)
* ci: create release draft for tagged builds

* Apply suggestions from code review

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

---------

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Co-authored-by: sinu.eth <65924192+sinui0@users.noreply.github.com>
2025-05-27 08:43:57 -07:00
Hendrik Eeckhaut
82767ca2d5 Automatic workflow to update main after a release (#891) 2025-05-27 09:06:38 +02:00
sinu.eth
c9aaf2e0fa refactor(mpc-tls): default to full-mpc PRF (#892) 2025-05-27 08:57:34 +02:00
sinu.eth
241ed3b5a3 chore: bump mpz to alpha.3 (#893) 2025-05-27 08:34:35 +02:00
Hendrik Eeckhaut
56f088db7d ci: build ci with explicit, fixed rust version (1.87.0) (#879) 2025-05-24 21:25:36 +02:00
Hendrik Eeckhaut
f5250479bd docs: correct notary-server command in example readme (#883) 2025-05-23 11:06:14 +02:00
yuroitaki
0e2eabb833 misc(notary): update doc, docker, tee, ci (#874)
* Update docs, docker, tee, ci.

* Restore deleted dockerfile.

* Add concurrency in readme.

* Apply suggestions.

* Correct file path.

---------

Co-authored-by: yuroitaki <>
2025-05-23 11:55:36 +08:00
sinu.eth
ad530ca500 feat: SHA256 transcript commitments (#881)
* feat: SHA256 transcript commitments

* clippy
2025-05-22 09:10:21 -07:00
sinu.eth
8b1cac6fe0 refactor(core): decouple attestation from core api (#875)
* refactor(core): decouple attestation from core api

* remove dead test

* fix encoding tree test

* clippy

* fix comment
2025-05-22 09:00:43 -07:00
Hendrik Eeckhaut
555f65e6b2 fix: expose network setting type in WASM (#880) 2025-05-22 09:35:57 +02:00
dan
046485188c chore: add Cargo.lock to .gitignore (#870) 2025-05-21 09:56:08 +00:00
th4s
db53814ee7 fix(prf): set correct default logic (#873) 2025-05-20 15:22:34 +02:00
yuroitaki
d924bd6deb misc(notary): add common crate for server and client (#871)
* Add notary-common crate.

* Add cargo lock changes.

* Add copy.

---------

Co-authored-by: yuroitaki <>
2025-05-20 12:24:27 +08:00
yuroitaki
b3558bef9c feat(notary): add support for custom extension (#872)
* Add dos extension validator.

* Revert to allow any extensions.

---------

Co-authored-by: yuroitaki <>
2025-05-20 11:19:05 +08:00
yuroitaki
33c4b9d16f chore(notary): ignore clippy warning on large enum (#869)
* Fix clippy.

* Fix clippy.

---------

Co-authored-by: yuroitaki <>
2025-05-16 08:45:29 -07:00
yuroitaki
edc2a1783d refactor(notary): default to ephemeral key, remove config file & fixtures (#818)
* Add default values, refactor.

* Prepend file paths.

* Remove config and refactor.

* Fix fmt, add missing export.

* Simplify error.

* Use serde to print.

* Update crates/notary/server/src/config.rs

Co-authored-by: dan <themighty1@users.noreply.github.com>

* fixture removal + generate signing key (#819)

* Default to ephemeral key gen, remove fixutres.

* Fix wording.

* Add configuring sig alg, comment fixes.

* Fix sig alg id parsing.

* Refactor pub key to pem.

* Return error, add test.

* Update crates/notary/server/src/signing.rs

Co-authored-by: Hendrik Eeckhaut <hendrik@eeckhaut.org>

---------

Co-authored-by: yuroitaki <>
Co-authored-by: Hendrik Eeckhaut <hendrik@eeckhaut.org>

---------

Co-authored-by: yuroitaki <>
Co-authored-by: dan <themighty1@users.noreply.github.com>
Co-authored-by: Hendrik Eeckhaut <hendrik@eeckhaut.org>
2025-05-16 19:02:20 +08:00
sinu.eth
c2a6546deb refactor(core): encode by ref and rip out dead hash functionality (#866) 2025-05-15 09:10:05 -07:00
th4s
2dfa386415 chore: bump mpz and adapt update method call in hmac-sha256 (#867)
* fix(hmac-sha256): use new `update` method from mpz-hash

* use `into` conversion
2025-05-15 15:58:32 +02:00
sinu.eth
5a188e75c7 refactor(cipher): remove contiguous memory assumption (#864)
* refactor(cipher): remove contiguous memory assumption

* fix mpc-tls and upstream crates
2025-05-13 09:41:55 -07:00
sinu.eth
a8bf1026ca feat(deap): address space mapping (#809) 2025-05-13 09:38:39 -07:00
sinu.eth
f900fc51cd chore: bump mpz to abd02e6 (#825) 2025-05-13 09:35:51 -07:00
th4s
6ccf102ec8 feat(prf): reduced MPC variant (#735)
* feat(prf): reduced MPC variant

* move sending `client_random` from `alloc` to `preprocess`

* rename `Config` -> `Mode` and rename variants

* add feedback for handling of prf config

* fix formatting to nightly

* simplify `MpcPrf`

* improve external flush handling

* improve control flow

* improved inner control flow for normal prf version

* rename leftover `config` -> `mode`

* remove unnecessary pub(crate)

* rewrite state flow for reduced prf

* improve state transition for reduced prf

* repair prf bench

* WIP: Adapting to new `Sha256` from mpz

* repair failing test

* fixed all tests

* remove output decoding for p

* do not use mod.rs file hierarchy

* remove pub(crate) from function

* improve config handling

* use `Array::try_from`

* simplify hmac to function

* remove `merge_vecs`

* move `mark_public` to allocation

* minor fixes

* simplify state logic for reduced prf even more

* simplify reduced prf even more

* set reduced prf as default

* temporarily fix commit for mpz

* add part of feedback

* simplify state transition

* adapt comment

* improve state transition in flush

* simplify flush

* fix wasm prover config

---------

Co-authored-by: sinu <65924192+sinui0@users.noreply.github.com>
2025-05-13 09:26:43 -07:00
sinu.eth
2c500b13bd chore: bump mpz to alpha.3 (#806)
* temporary remove hmac crates

* wip: adapting cipher crate...

* wip: adapting key-exchange crate...

* wip: adapt most of mpc-tls...

* adapt prover and verifier crates

* remove unnecessary rand compat import for deap

* adapt mpc-tls

* fix: endianness of key-exchange circuit

* fix: output endianness of ke circuit

* fix variable name

---------

Co-authored-by: th4s <th4s@metavoid.xyz>
2025-05-13 09:03:09 -07:00
Hendrik Eeckhaut
2da0c242cb build: Check in Cargo lock files (#742) 2025-05-12 10:22:13 +02:00
th4s
798c22409a chore(config): move defer_decryption_from_start to ProtocolConfig 2025-05-10 11:41:01 +02:00
dan
3b5ac20d5b fix(benches): browser bench fixes (#821)
* fix(benches): make browser benches work again

* Update crates/benches/binary/README.md

Co-authored-by: Hendrik Eeckhaut <hendrik@eeckhaut.org>

* Update crates/benches/browser/wasm/Cargo.toml

Co-authored-by: Hendrik Eeckhaut <hendrik@eeckhaut.org>

* add --release flag

---------

Co-authored-by: Hendrik Eeckhaut <hendrik@eeckhaut.org>
2025-05-08 06:13:15 +00:00
Hendrik Eeckhaut
a063f8cc14 ci: build gramine-sgx for dev and tagged builds only (#805) 2025-05-05 17:16:50 +02:00
dan
6f6b24e76c test: fix failing tests (#823) 2025-05-05 17:01:42 +02:00
dan
a28718923b chore(examples): inline custom crypto provider for clarity (#815)
Co-authored-by: sinu.eth <65924192+sinui0@users.noreply.github.com>
2025-04-30 06:41:07 +00:00
Hendrik Eeckhaut
19447aabe5 Tee dev cleanup (#759)
* build: added scripts for local tee/sgx development
* Improved documentation: move all explanation to one README file
2025-04-28 14:46:32 +02:00
Jakub Konka
8afb7a4c11 fix(notary): use custom HTTP header for authorization: X-API-Key (#804)
Co-authored-by: yuroitaki <25913766+yuroitaki@users.noreply.github.com>
2025-04-28 14:24:32 +08:00
dan
43c6877ec0 chore: support workspace lints in all crates (#797) 2025-04-25 13:58:26 +02:00
dan
39e14949a0 chore: add rustls licence and attribution (#795)
* chore: add rustls licence and attribution

* add missing commit
2025-04-25 07:10:49 +00:00
dan
31f62982b5 feat(wasm): allow max records config (#810) 2025-04-25 06:34:49 +00:00
yuroitaki
6623734ca0 doc(example): add comments on verifying custom extension (#788)
* Add comments.

* Fix comment.

---------

Co-authored-by: yuroitaki <>
2025-04-25 11:18:47 +08:00
Hendrik Eeckhaut
41e215f912 chore: set version number to 0.1.0-alpha.11-pre (#798) 2025-04-23 13:19:05 +02:00
dan
9e0f79125b misc(notary): improve error msg when tls is expected (#776)
* misc(notary): improve error msg when tls is expected

* change wording

* fix nested if

* process hyper error

* refactor into a fn

* fix error msg

Co-authored-by: yuroitaki <25913766+yuroitaki@users.noreply.github.com>

* do not catch hyper error

---------

Co-authored-by: yuroitaki <25913766+yuroitaki@users.noreply.github.com>
2025-04-22 12:03:23 +00:00
Hendrik Eeckhaut
7bdd3a724b fix: Add missing concurrency param in tee config (#791) 2025-04-22 11:19:35 +02:00
dan
baa486ccfd chore(examples): fix formatting (#793) 2025-04-21 08:46:28 +00:00
sinu.eth
de7a47de5b feat: expose record count config (#786)
* expose record config

* update default record counts

* make fields optional

* override record count in integration test
2025-04-18 14:58:28 +07:00
sinu.eth
3a57134b3a chore: update version to alpha.10 (#785) 2025-04-18 08:54:55 +02:00
sinu.eth
86fed1a90c refactor: remove extension api from request builder (#787) 2025-04-18 13:01:28 +07:00
sinu.eth
82964c273b feat: attestation extensions (#755)
* feat: attestation extensions

* rustfmt

* fix doctest example

* add extensions getter to public api

* add tests

* fix prover so it includes extensions
2025-04-17 23:15:27 +07:00
yuroitaki
81aaa338e6 feat(core): find set cover across different commitment kinds in TranscriptProofBuilder (#765)
* Init.

* Cover range in order of preference of kinds.

* Fix comment.

* Adjust error message.

* Return tuple from set cover and address comments.

* Fix comments.

* Update utils version.

---------

Co-authored-by: yuroitaki <>
Co-authored-by: dan <themighty1@users.noreply.github.com>
2025-04-17 15:16:06 +08:00
dan
f331a7a3c5 chore: improve naming and comments (#780) 2025-04-17 06:43:30 +00:00
dan
adb407d03b misc(core): simplify encoding logic (#781)
* perf(core): simplify encoding logic

* make constant-time
2025-04-15 14:50:53 +00:00
dan
3e54119867 feat(notary): add concurrency limit (#770)
* feat(notary): add concurrency limit

* switch to 503 status code

* remove test-api feature

* improve naming and comments

* set default concurrency to 32
2025-04-15 12:31:16 +00:00
Hendrik Eeckhaut
71aa90de88 Add tlsn-wasm to API docs (#768) 2025-04-10 13:35:20 +02:00
sinu.eth
93535ca955 feat(mpc-tls): improve error message for incorrect transcript config (#754)
* feat(mpc-tls): improve error message for incorrect transcript config

* rustfmt

---------

Co-authored-by: dan <themighty1@users.noreply.github.com>
2025-04-07 10:44:02 +00:00
sinu.eth
a34dd57926 refactor: remove utils-aio dep (#760) 2025-04-03 04:58:14 +07:00
yuroitaki
92d7b59ee8 doc(example): add minor comments (#761)
* Add comments.

* Remove commented leftover.

* Remove example tweak.

* fmt.

---------

Co-authored-by: yuroitaki <>
2025-04-02 14:29:26 +08:00
Leonid Logvinov
c8e9cb370e feat(notary): Log notarization elapsed time (#746)
* Log notarisation elapsed time

* Fix formatting

* Include time units in field name
2025-03-27 08:08:29 -07:00
dan
4dc5570a31 MIsc comments (#747)
* fix comments

* fix comment

Co-authored-by: sinu.eth <65924192+sinui0@users.noreply.github.com>

* describe all args

* change decrypted plaintext -> plaintext

* remove redundant comments

---------

Co-authored-by: sinu.eth <65924192+sinui0@users.noreply.github.com>
2025-03-27 13:42:41 +00:00
Hendrik Eeckhaut
198e24c5e4 ci: manual workflow for tlsn-wasm release (#757) 2025-03-27 14:33:46 +01:00
dan
f16d7238e5 refactor(core): DoS mitigation and additional validation (#648)
* add encoding proof validation

* check that merkle tree indices are not out of bounds

* limit opened plaintext hash data

* add test

* formatting

* bump commitment tree size cap to 30 bits

* remove unnecessary test

* fix stray lines
2025-03-27 12:54:05 +00:00
dan
9253adaaa4 fix: avoid mutating self in TagShare::add (#748) 2025-03-27 12:46:27 +00:00
Hendrik Eeckhaut
8c889ac498 ci: SGX build: drop TEE GH environment, use regular secret (#751) 2025-03-27 11:40:04 +01:00
Hendrik Eeckhaut
f0e2200d22 ci: disable codecov annotation and comments in Github (#752) 2025-03-26 14:49:14 +01:00
Hendrik Eeckhaut
224e41a186 chore: Bump version to 0.1.0-alpha.10-pre 2025-03-25 14:28:26 +01:00
Hendrik Eeckhaut
328c2af162 fix: do not enable tee_quote feature by default (#745) 2025-03-25 11:24:43 +01:00
sinu.eth
cdb80e1458 fix: compute recv record count from max_recv (#743)
* fix: compute recv record count from max_recv

* pad after check

* fix: add `max_recv` to mpc-tls integration test

---------

Co-authored-by: th4s <th4s@metavoid.xyz>
2025-03-25 11:04:07 +01:00
Hendrik Eeckhaut
eeccbef909 ci: script to patch imports in the tlsn-wasm build result (#727) 2025-03-20 21:47:47 +01:00
sinu
190b7b0bf6 ci: update tlsn-wasm release workflow 2025-03-20 11:10:28 -07:00
sinu
c70caa5ed9 chore: release v0.1.0-alpha.9 2025-03-20 11:06:57 -07:00
sinu.eth
20137b8c6c fix(notary): install libclang in docker image (#740) 2025-03-20 10:53:32 -07:00
yuroitaki
4cdd1395e8 feat(core): find set cover solution for user in TranscriptProofBuilder (#664)
* Add reveal groups of ranges.

* Reveal committed ranges given a rangeset.

* Fix test and wordings.

* Fix wordings.

* Add reveal feature for hash commitments.

* Formatting.

* Fix wording.

* Add subset check.

* Add subset check.

* Add clippy allow.

* Fix missing direction in transcript index lookup.

* Fix prune subset.

* Refactor proof_idxs.

* Throw error if only one subset detected.

* Fix superset reveal.

* Fmt.

* Refactored Ord for Idx.

* Update crates/core/src/transcript/proof.rs

Co-authored-by: dan <themighty1@users.noreply.github.com>

* Adjust example and comments.

* Adjust comments.

* Remove comment.

* Change comment style.

* Change comment.

* Add comments.

* Change to lazily check set cover.

* use rangeset and simplify

* restore examples

* fix import

* rustfmt

* clippy

---------

Co-authored-by: yuroitaki <>
Co-authored-by: dan <themighty1@users.noreply.github.com>
Co-authored-by: sinu <65924192+sinui0@users.noreply.github.com>
2025-03-20 07:55:13 -07:00
Leonid Logvinov
c1b3d64d5d feat(notary): Make logging format configurable (#719)
* Make logging format configurable

* Document logging format

* Fix formatting

* Init server config with default value
s in notary interation tests
2025-03-19 10:57:00 -07:00
sinu.eth
61ce838f8c refactor: migrate to rand 0.9 (#734)
* refactor: migrate to rand 0.9

* fix: enable wasm_js feature for getrandom

* fix: set getrandom cfg

* fix: clippy

* fix: notary server rand

* fix cargo config
2025-03-19 10:36:24 -07:00
dan
efca281222 feat: Ethereum compatible signer (#731)
* feat: add ethereum-compatible signer

* fix recovery id

* test with a reference signer
2025-03-19 10:17:47 -07:00
sinu.eth
b24041b9f5 fix: record layer handshake control flow (#733) 2025-03-17 11:04:41 -07:00
th4s
9649d6e4cf test(common): Add test for TranscriptRefs::get (#712)
* test(common): add test for transcript refs

* doc: improve doc for test

---------

Co-authored-by: Hendrik Eeckhaut <hendrik@eeckhaut.org>
Co-authored-by: sinu.eth <65924192+sinui0@users.noreply.github.com>
2025-03-17 10:02:19 -07:00
Hendrik Eeckhaut
bc69683ecf ci: build notary docker image for both dev branch and releases (#726) 2025-03-12 18:03:01 +01:00
dan
6c468a91cf test: improve test, fix grammar 2025-03-11 10:44:11 +01:00
Hendrik Eeckhaut
dcff0b9152 ci: update cache plugin 2025-03-11 09:55:12 +01:00
sinu
5f91926154 fix: allow deprecated ring (#720) 2025-03-10 12:42:31 -07:00
Hendrik Eeckhaut
0496cbaeb1 chore: Bump version to 0.1.0-alpha.9-pre 2025-03-10 08:41:18 +01:00
sinu
d8747d49e3 chore: release alpha.8 2025-03-07 14:51:11 -08:00
sinu.eth
6fe328581c chore: bump mpz to alpha.2 (#716) 2025-03-07 14:38:47 -08:00
sinu.eth
6d1140355b build: separate clippy and keep going (#715) 2025-03-07 11:15:00 -08:00
sinu.eth
5246beabf5 chore(wasm): bump web spawn to 0.2 (#714) 2025-03-07 10:57:25 -08:00
Hendrik Eeckhaut
29efc35d14 ci: create notary-server-sgx docker image 2025-03-06 11:25:53 +01:00
Hendrik Eeckhaut
32d25e5c69 fix: fixed version of time dependency (v0.3.38 has wasm issue) (#711) 2025-03-06 01:06:16 +01:00
yuroitaki
ca9d364fc9 docs: Update notary server documentation 2025-03-05 13:15:11 +01:00
sinu.eth
5cbafe17f5 chore: removed unused deps (#706) 2025-03-03 12:15:46 -08:00
sinu.eth
acabb7761b chore: delete dead code (#705) 2025-03-03 11:53:20 -08:00
sinu.eth
c384a393bf chore: bump deps (#704) 2025-03-03 11:40:31 -08:00
Hendrik Eeckhaut
be0be19018 ci: calculate SGX mr_enclave for notary server in gramine docker (#701)
* calculate SGX mr_enclave for notary server in gramine docker
* remove old tee github workflow
* attest build result for dev branch builds and releases
2025-03-03 13:29:47 +01:00
Hendrik Eeckhaut
63bd6abc5d docs: corrected example output in examples README 2025-02-26 18:56:49 +01:00
sinu.eth
cb13169b82 perf: MPC-TLS upgrade (#698)
* fix: add new Cargo.toml

* (alpha.8) - Refactor key-exchange crate (#685)

* refactor(key-exchange): adapt key-exchange to new vm

* fix: fix feature flags

* simplify

* delete old msg module

* clean up error

---------

Co-authored-by: sinu <65924192+sinui0@users.noreply.github.com>

* (alpha.8) - Refactor prf crate (#684)

* refactor(prf): adapt prf to new mpz vm

Co-authored-by: sinu <65924192+sinui0@users.noreply.github.com>

* refactor: remove preprocessing bench

* fix: fix feature flags

* clean up attributes

---------

Co-authored-by: sinu <65924192+sinui0@users.noreply.github.com>

* refactor: key exchange interface (#688)

* refactor: prf interface (#689)

* (alpha.8) - Create cipher crate (#683)

* feat(cipher): add cipher crate, replacing stream/block cipher and aead

* delete old config module

* remove mpz generics

---------

Co-authored-by: sinu <65924192+sinui0@users.noreply.github.com>

* refactor(core): decouple encoder from mpz (#692)

* WIP: Adding new encoding logic...

* feat: add new encoder

* add feedback

* rename conversions

* feat: DEAP VM (#690)

* feat: DEAP VM

* use rangeset, add desync guard

* move MPC execution up in finalization

* refactor: MPC-TLS (#693)

* refactor: MPC-TLS

Co-authored-by: th4s <th4s@metavoid.xyz>

* output key references

* bump deps

---------

Co-authored-by: th4s <th4s@metavoid.xyz>

* refactor: prover + verifier (#696)

* refactor: wasm crates (#697)

* chore: appease clippy (#699)

* chore: rustfmt

* chore: appease clippy more

* chore: more rustfmt!

* chore: clippy is stubborn

* chore: rustfmt sorting change is annoying!

* fix: remove wasm bundling hack

* fix: aes ctr test

* chore: clippy

* fix: flush client when sending close notify

* fix: failing tests

---------

Co-authored-by: th4s <th4s@metavoid.xyz>
2025-02-25 13:51:28 -08:00
mac
25d65734c0 chore: improve notary server html info (regular and TEE) 2025-02-21 14:03:47 +01:00
mac
119ae4b2a8 docs: openapi conf update for TEE quote (#651) 2025-02-21 09:04:21 +01:00
Hendrik Eeckhaut
f59153b0a0 ci: fix TEE deployments (#686)
* do not run tee-deployments builds for PR builds
* Remove AWS deployment scripts
* added missing timeout parameter to TEE config
2025-02-20 11:58:13 +01:00
Hendrik Eeckhaut
bffe9ebb0b doc: disclaimer for minor changes PRs in contribution guidelines (#691) 2025-02-04 10:02:38 +01:00
Hendrik Eeckhaut
65299d7def chore: update axum to v0.8 (#681)
chore: update `axum` to v0.8

Co-authored-by: yuroitaki <25913766+yuroitaki@users.noreply.github.com>
2025-01-08 09:24:01 +01:00
dan
c03418a642 feat: compress partial transcript (#653)
* feat: compress partial transcript

* add missing dep
2024-12-26 10:41:22 +00:00
Hendrik Eeckhaut
7bec5a84ee Added a script to set the TLSN version number of the relevant crates (#650)
build: add script to set the TLSN version number
2024-12-12 13:46:08 +01:00
Hendrik Eeckhaut
85e0f5b467 Clean up tlsn-wasm build (#553)
ci: automate tlsn-wasm build + store artifact for tags + manual workflow for npm release

+ Improved package: add description, repo information etc
2024-12-06 11:03:23 +01:00
Ryan MacArthur
cacca108ed ci: SGX 2024-12-05 08:59:19 +01:00
yuroitaki
c9592f44a1 fix: extend test certs to 100-year validity. (#667)
Co-authored-by: yuroitaki <>
2024-12-03 14:36:28 +08:00
yuroitaki
e6be5e1cc9 Fix clippy. (#666) 2024-11-29 17:33:22 +01:00
Hendrik Eeckhaut
d974fb71d5 Revert "build: Update wasm-bindgen and wasm-bindgen-rayon dependency (#662)" (#663)
This reverts commit c0c1c0caa1.
2024-11-27 09:15:09 +01:00
Hendrik Eeckhaut
c0c1c0caa1 build: Update wasm-bindgen and wasm-bindgen-rayon dependency (#662) 2024-11-25 11:46:00 +01:00
yorozunouchu
7d88d1c20b fix(notary): make TLS keys and authorization whitelist configs optional (#589)
* feat: Add optional fields for TLS private key and certificate paths

* Add optional field for whitelist_csv_path

* fix test cases for whitelist_csv_path

* fix issues pointed out by yuroitaki

* Add error handling for missing PEM paths when TLS is enabled

* Fix formatting and linting

* throw error if pem files do not exist

---------

Co-authored-by: funkyenough <14842981+funkyenough@users.noreply.github.com>
2024-11-22 18:44:06 +08:00
yuroitaki
c10c9155a7 chore: add core transcript unit tests (#649)
* Add transcript proof and lib tests.

* Init encoding tree test.

* Add encoding proof tests.

* Generalise fixture tests.

* Add seed arg to attestation fixture fn.

* Adjust cosmetics.

* Format comment.

---------

Co-authored-by: yuroitaki <>
2024-11-04 13:59:31 +08:00
dan
faab999339 Memory profiling (#658) (#660)
* (squashing to simplify rebase)
rebased on dev
reorganized files
fix gh workflow

* modify workflow

* update dockerfile

Co-authored-by: Valentin Mihov <valentin.mihov@gmail.com>
2024-10-31 09:13:23 +00:00
dan
e6bc93c1f1 Memory profiling (#658)
* (squashing to simplify rebase)
rebased on dev
reorganized files
fix gh workflow

* modify workflow

* update dockerfile
Co-authored-by: valo <valo@users.noreply.github.com>
2024-10-29 16:20:00 +00:00
Hendrik Eeckhaut
c6dc262a5e Use a local server (fixture) for the attestation example (#656)
feat: use server fixture for tlsn examples + removed Discord example

The attestation example now has different modes: json, html and authenticated
2024-10-29 14:53:01 +01:00
Ryan MacArthur
db90e28e44 feat: intel-sgx attestation 2024-10-29 14:52:47 +01:00
Hendrik Eeckhaut
30e4e37c0d Return valid json in Server fixture (#652)
fix: Server fixture now returns valid json

* return json instead of string
* removed trailing whitespace
* use a constant for the default port
* give binary a better name
2024-10-25 22:13:35 +02:00
dan
6344410cad use the latest rev of tlsn-utils (#654) 2024-10-24 13:53:26 +00:00
Hendrik Eeckhaut
1d663596c1 Do not include git commit timestamp in notary server + use git2 instead of git command + add dirty suffix (#643)
build: improved commit info on notary/info page

This changes uses git2 Rust library instead of calling out to external git
The timestamp was removed
2024-10-23 10:29:23 +02:00
yuroitaki
2c045e5de7 fix(notary): implement timeout for notarization (#639)
* Add timeout.

* Fmt.

* Fix grammar.

* Move limit to config.

* Remove extra space.

---------

Co-authored-by: yuroitaki <>
2024-10-22 15:08:19 +08:00
dan
38104bca1a style: fix grammar and wording (#647) 2024-10-22 06:31:13 +00:00
Hendrik Eeckhaut
99ba47c25d build: re-added explicit getrandom dependency in the wasm crate (#646) 2024-10-21 11:00:17 +02:00
dan
2042089132 Test/benches fixes (#636)
* add header

* combine into a single workflow

* fix wsrelay commit
2024-10-17 12:24:10 +00:00
Hendrik Eeckhaut
504967d09a Version bump correction (#644)
* build: reset version number in non-publish crates

#642
2024-10-17 12:31:07 +02:00
Hendrik Eeckhaut
6e80d03ac7 chore: Bump version to 0.1.0-alpha.8-pre (#642)
build: Bump version to 0.1.0-alpha.8-pre and set version number for benches and fixtures to 0.0.0
2024-10-16 18:53:07 +02:00
Hendrik Eeckhaut
b3f79a9e2b build: removed unused dependencies (cargo machete) 2024-10-15 19:58:30 +02:00
Hendrik Eeckhaut
99e02fb388 Add convenience script to run most ci checks locally 2024-10-14 20:23:19 +02:00
dan
6b845fd473 test: add browser benches (#570)
* refactor: modularize server-fixture

* Update crates/server-fixture/server/Cargo.toml

add newline

Co-authored-by: sinu.eth <65924192+sinui0@users.noreply.github.com>

* test: add browser benches

* fix deps

* ci: run ci workflow for all pull requests (#571)

* misc fixes

* fix clippy

* don't log a non-critical error to stderr

* use incognito (mitigates random hangs)

* add notes

* distinguish prover kind when plotting

---------

Co-authored-by: sinu.eth <65924192+sinui0@users.noreply.github.com>
Co-authored-by: Hendrik Eeckhaut <hendrik@eeckhaut.org>
Co-authored-by: Ubuntu <ubuntu@ip-10-35-1-164.eu-central-1.compute.internal>
2024-10-14 13:52:52 +00:00
Hendrik Eeckhaut
66db5344ac ci: use env variables to get git commit hash in CI 2024-10-11 16:42:10 +02:00
Kimani
1d4c50f804 feat(notary): support reading config values from CLI and env var (#605)
* feat: supports reading config values from CLI

* chore: adds config lib to cargo.toml, uses server default config values instead, removes validations in settings.rs

* chore: tries to load YAML file

* chore: tries to load YAML file

* fix: loads config.yaml properly, refactors code

* fix: removes .idea folder and moves config lib to notary-server cargo.toml

* feat: uses serde-aux to deserialize env vars port and tls-enabled from string and restores &cli_fields.config_file path and debug log

* fix: parses int and bool using try-parsing instead of serde-aux and removes unnecessary whitespaces

* chore: converts config to snake_case for consistency

* doc: adds configuration documentation, code comments and fixes linting errors

* fix: fixes ci linting formatting

* fix: fixes ci linting formatting

* fix: adjusts formatting for settings.rs and minor adjustments to documentation

* fix: uses cargo nightly to format correctly

---------

Co-authored-by: yuroitaki <25913766+yuroitaki@users.noreply.github.com>
2024-10-11 11:56:37 +08:00
Hendrik Eeckhaut
61ff3a8255 ci: Try codecov.io 2024-10-08 09:59:09 +02:00
Hendrik Eeckhaut
2ac9de1edd ci: generate coverage report 2024-10-08 09:59:09 +02:00
Artem
a7a8a83410 fix(notary): fix client issue of not being able to specify the notary url path (#614)
* (fix: client) Fixed client issue of being able to implement the path for the url

* (feat: client) Improved the code to adjust for feedback received as well as extend the path calculation to avoid adding a `/` when already starts with a `/`

* (fix: client) Fixed client issue of being able to implement the path for the url

* (feat: client) Improved the code to adjust for feedback received as well as extend the path calculation to avoid adding a `/` when already starts with a `/`

* Update crates/notary/client/src/client.rs

Co-authored-by: yuroitaki <25913766+yuroitaki@users.noreply.github.com>

* (fix: client) Renamed `path` to `path_prefix`

* (fix: client) Remove condition on the URL

* (chore: client) Fix formating

---------

Co-authored-by: yuroitaki <25913766+yuroitaki@users.noreply.github.com>
2024-10-04 14:24:48 +08:00
449 changed files with 38758 additions and 18521 deletions

10
.cargo/config.toml Normal file
View File

@@ -0,0 +1,10 @@
[target.wasm32-unknown-unknown]
rustflags = [
"-C",
"target-feature=+atomics,+bulk-memory,+mutable-globals",
"-A",
"unused_qualifications"
]
[unstable]
build-std = ["panic_abort", "std"]

2
.dockerignore Normal file
View File

@@ -0,0 +1,2 @@
/target
/.git

3
.github/codecov.yml vendored Normal file
View File

@@ -0,0 +1,3 @@
github_checks:
annotations: false
comment: false

View File

@@ -1,12 +0,0 @@
#!/bin/bash
# https://github.com/tlsnotary/tlsn/pull/419
set -ex
environment=$1
aws s3 sync .git s3://tlsn-deploy/$environment/.git --delete
cargo build -p notary-server --release
aws s3 cp ./target/release/notary-server s3://tlsn-deploy/$environment/
exit 0

View File

@@ -1,27 +0,0 @@
#!/bin/bash
set -ex
environment=$1
branch=$2
INSTANCE_ID=$(aws ec2 describe-instances --filters Name=tag:Name,Values=[tlsnotary-backend-v1] Name=instance-state-name,Values=[running] --query "Reservations[*].Instances[*][InstanceId]" --output text)
aws ec2 create-tags --resources $INSTANCE_ID --tags "Key=$environment,Value=$branch"
COMMIT_HASH=$(git rev-parse HEAD)
DEPLOY_ID=$(aws deploy create-deployment --application-name tlsn-$environment-v1 --deployment-group-name tlsn-$environment-v1-group --github-location repository=$GITHUB_REPOSITORY,commitId=$COMMIT_HASH --ignore-application-stop-failures --file-exists OVERWRITE --output text)
while true; do
STATUS=$(aws deploy get-deployment --deployment-id $DEPLOY_ID --query 'deploymentInfo.status' --output text)
if [ $STATUS != "InProgress" ] && [ $STATUS != "Created" ]; then
if [ $STATUS = "Succeeded" ]; then
echo "SUCCESS"
exit 0
else
echo "Failed"
exit 1
fi
else
echo "Deploying..."
fi
sleep 30
done

View File

@@ -1,33 +0,0 @@
#!/bin/bash
# This script is triggered by Deploy server workflow in order to send an execution command of cd-scripts/modify_proxy.sh via AWS SSM to the proxy server
set -e
GH_OWNER="tlsnotary"
GH_REPO="tlsn"
BACKEND_INSTANCE_ID=$(aws ec2 describe-instances --filters Name=tag:Name,Values=[tlsnotary-backend-v1] Name=instance-state-name,Values=[running] --query "Reservations[*].Instances[*][InstanceId]" --output text)
PROXY_INSTANCE_ID=$(aws ec2 describe-instances --filters Name=tag:Name,Values=[tlsnotary-web] Name=instance-state-name,Values=[running] --query "Reservations[*].Instances[*][InstanceId]" --output text)
TAGS=$(aws ec2 describe-instances --instance-ids $BACKEND_INSTANCE_ID --query 'Reservations[*].Instances[*].Tags')
TAG=$(echo $TAGS | jq -r '.[][][] | select(.Key == "stable").Value')
PORT=$(echo $TAGS | jq -r '.[][][] | select(.Key == "port").Value')
COMMAND_ID=$(aws ssm send-command --document-name "AWS-RunRemoteScript" --instance-ids $PROXY_INSTANCE_ID --parameters '{"sourceType":["GitHub"],"sourceInfo":["{\"owner\":\"'${GH_OWNER}'\", \"repository\":\"'${GH_REPO}'\", \"getOptions\":\"branch:'${TAG}'\", \"path\": \"cd-scripts\"}"],"commandLine":["modify_proxy.sh '${PORT}' '${TAG}' "]}' --output text --query "Command.CommandId")
while true; do
SSM_STATUS=$(aws ssm list-command-invocations --command-id $COMMAND_ID --details --query "CommandInvocations[].Status" --output text)
if [ $SSM_STATUS != "Success" ] && [ $SSM_STATUS != "InProgress" ]; then
echo "Proxy modification failed"
aws ssm list-command-invocations --command-id $COMMAND_ID --details --query "CommandInvocations[].CommandPlugins[].{Status:Status,Output:Output}"
exit 1
elif [ $SSM_STATUS = "Success" ]; then
aws ssm list-command-invocations --command-id $COMMAND_ID --details --query "CommandInvocations[].CommandPlugins[].{Status:Status,Output:Output}"
echo "Success"
break
fi
sleep 2
done
exit 0

View File

@@ -1,7 +1,16 @@
name: Run Benchmarks
name: Run Benchmarks (Native or Browser)
on:
# manual trigger
workflow_dispatch:
inputs:
bench_type:
description: "Specify the benchmark type (native or browser)"
required: true
default: "native"
type: choice
options:
- native
- browser
jobs:
run-benchmarks:
@@ -12,16 +21,17 @@ jobs:
- name: Build Docker Image
run: |
docker build -t tlsn-bench . -f ./crates/benches/benches.Dockerfile
docker build -t tlsn-bench . -f ./crates/benches/binary/benches.Dockerfile --build-arg BENCH_TYPE=${{ github.event.inputs.bench_type }}
- name: Run Benchmarks
run: |
docker run --privileged -v ${{ github.workspace }}/crates/benches/:/benches tlsn-bench
docker run --privileged -v ${{ github.workspace }}/crates/benches/binary:/benches tlsn-bench
- name: Upload runtime_vs_latency.html
- name: Upload graphs
uses: actions/upload-artifact@v4
with:
name: benchmark_graphs
path: |
./crates/benches/runtime_vs_latency.html
./crates/benches/runtime_vs_bandwidth.html
./crates/benches/binary/runtime_vs_latency.html
./crates/benches/binary/runtime_vs_bandwidth.html
./crates/benches/binary/download_size_vs_memory.html

View File

@@ -1,86 +0,0 @@
name: Deploy server
on:
push:
branches:
- dev
tags:
- "[v]?[0-9]+.[0-9]+.[0-9]+*"
workflow_dispatch:
inputs:
environment:
description: "Environment"
required: true
default: "nightly"
type: choice
options:
- nightly
- stable
concurrency:
group: ${{ github.workflow }}
cancel-in-progress: false
jobs:
deploy:
runs-on: ubuntu-latest
env:
DATA_ENV: ${{ github.event.inputs.environment || 'nightly' }}
permissions:
id-token: write
contents: read
steps:
- name: Manipulate Environment
id: manipulate
run: |
if [ "${{ github.event_name }}" = "push" ] && [ "$GITHUB_REF_NAME" = "dev" ]; then
echo "env=nightly" >> $GITHUB_OUTPUT
elif [ "${{ github.event_name }}" = "push" ] && [[ "${{ github.ref }}" = "refs/tags/"* ]]; then
echo "env=stable" >> $GITHUB_OUTPUT
elif [ "${{ github.event_name }}" = "workflow_dispatch" ]; then
echo "env=${{ env.DATA_ENV }}" >> $GITHUB_OUTPUT
else
echo "Operation not permitted"
exit 1
fi
- name: Wait for integration test workflow to succeed
if: github.event_name == 'push'
uses: lewagon/wait-on-check-action@v1.3.1
with:
ref: ${{ github.ref }}
# More details [here](https://github.com/lewagon/wait-on-check-action#check-name)
check-name: 'Run tests release build'
repo-token: ${{ secrets.GITHUB_TOKEN }}
# How frequent (in seconds) this job will call GitHub API to check the status of the job specified at 'check-name'
wait-interval: 60
- name: Checkout
uses: actions/checkout@v4
with:
persist-credentials: false
- name: Configure AWS Credentials
uses: aws-actions/configure-aws-credentials@v4
with:
role-to-assume: arn:aws:iam::490752553772:role/tlsn-deploy-slc
role-duration-seconds: 1800
aws-region: eu-central-1
- name: Install stable rust toolchain
uses: dtolnay/rust-toolchain@stable
with:
toolchain: stable
- name: Cargo build
run: |
.github/scripts/build-server.sh ${{ steps.manipulate.outputs.env }}
- name: Trigger Deployment
run: |
.github/scripts/deploy-server.sh ${{ steps.manipulate.outputs.env }} $GITHUB_REF_NAME
- name: Modify Proxy
if: ${{ steps.manipulate.outputs.env == 'stable' }}
run: |
.github/scripts/modify-proxy.sh

View File

@@ -1,52 +0,0 @@
name: cd
on:
push:
tags:
- "[v]?[0-9]+.[0-9]+.[0-9]+*"
env:
CONTAINER_REGISTRY: ghcr.io
jobs:
build_and_publish_notary_server_image:
name: Build and publish notary server's image
runs-on: ubuntu-latest
permissions:
contents: read
packages: write
steps:
- name: Wait for integration test workflow to succeed
uses: lewagon/wait-on-check-action@v1.3.1
with:
ref: ${{ github.ref }}
# More details [here](https://github.com/lewagon/wait-on-check-action#check-name)
check-name: 'Run tests release build'
repo-token: ${{ secrets.GITHUB_TOKEN }}
# How frequent (in seconds) this job will call GitHub API to check the status of the job specified at 'check-name'
wait-interval: 60
- name: Checkout repository
uses: actions/checkout@v4
- name: Log in to the Container registry
uses: docker/login-action@v2
with:
registry: ${{ env.CONTAINER_REGISTRY }}
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Extract metadata (tags, labels) for Docker image of notary server
id: meta-notary-server
uses: docker/metadata-action@v4
with:
images: ${{ env.CONTAINER_REGISTRY }}/${{ github.repository }}/notary-server
- name: Build and push Docker image of notary server
uses: docker/build-push-action@v4
with:
context: .
push: true
tags: ${{ steps.meta-notary-server.outputs.tags }}
labels: ${{ steps.meta-notary-server.outputs.labels }}
file: ./crates/notary/server/notary-server.Dockerfile

View File

@@ -7,8 +7,11 @@ on:
tags:
- "[v]?[0-9]+.[0-9]+.[0-9]+*"
pull_request:
branches:
- dev
permissions:
id-token: write
contents: read
attestations: write
env:
CARGO_TERM_COLOR: always
@@ -19,8 +22,29 @@ env:
# - https://github.com/privacy-scaling-explorations/mpz/issues/178
# 32 seems to be big enough for the foreseeable future
RAYON_NUM_THREADS: 32
GIT_COMMIT_HASH: ${{ github.event.pull_request.head.sha || github.sha }}
RUST_VERSION: 1.87.0
jobs:
clippy:
name: Clippy
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Install rust toolchain
uses: dtolnay/rust-toolchain@stable
with:
toolchain: ${{ env.RUST_VERSION }}
components: clippy
- name: Use caching
uses: Swatinem/rust-cache@v2.7.7
- name: Clippy
run: cargo clippy --keep-going --all-features --all-targets --locked -- -D warnings
fmt:
name: Check formatting
runs-on: ubuntu-latest
@@ -36,10 +60,11 @@ jobs:
components: rustfmt
- name: Use caching
uses: Swatinem/rust-cache@v2.7.3
uses: Swatinem/rust-cache@v2.7.7
- name: Check formatting
run: cargo +nightly fmt --check --all
build-and-test:
name: Build and test
runs-on: ubuntu-latest
@@ -47,35 +72,32 @@ jobs:
- name: Checkout repository
uses: actions/checkout@v4
- name: Install stable rust toolchain
- name: Install rust toolchain
uses: dtolnay/rust-toolchain@stable
with:
toolchain: stable
components: clippy
toolchain: ${{ env.RUST_VERSION }}
- name: Use caching
uses: Swatinem/rust-cache@v2.7.3
- name: Clippy
run: cargo clippy --all-features --all-targets -- -D warnings
uses: Swatinem/rust-cache@v2.7.7
- name: Build
run: cargo build --all-targets
run: cargo build --all-targets --locked
- name: Test
run: cargo test
build-wasm:
name: Build and test wasm
run: cargo test --no-fail-fast --locked
wasm:
name: Build and Test wasm
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Install stable rust toolchain
- name: Install rust toolchain
uses: dtolnay/rust-toolchain@stable
with:
targets: wasm32-unknown-unknown
toolchain: stable
toolchain: ${{ env.RUST_VERSION }}
- name: Install nightly rust toolchain
uses: dtolnay/rust-toolchain@stable
@@ -93,12 +115,34 @@ jobs:
run: curl https://rustwasm.github.io/wasm-pack/installer/init.sh -sSf | sh
- name: Use caching
uses: Swatinem/rust-cache@v2.7.3
uses: Swatinem/rust-cache@v2.7.7
- name: Build harness
working-directory: crates/harness
run: ./build.sh
- name: Run tests
working-directory: crates/harness
run: |
cd crates/wasm-test-runner
./run.sh
./bin/runner setup
./bin/runner --target browser test
- name: Run build
working-directory: crates/wasm
run: ./build.sh
- name: Dry Run NPM Publish
working-directory: crates/wasm/pkg
run: npm publish --dry-run
- name: Save tlsn-wasm package for tagged builds
if: startsWith(github.ref, 'refs/tags/')
uses: actions/upload-artifact@v4
with:
name: ${{ github.ref_name }}-tlsn-wasm-pkg
path: ./crates/wasm/pkg
if-no-files-found: error
tests-integration:
name: Run tests release build
runs-on: ubuntu-latest
@@ -106,16 +150,248 @@ jobs:
- name: Checkout repository
uses: actions/checkout@v4
- name: Install stable rust toolchain
- name: Install rust toolchain
uses: dtolnay/rust-toolchain@stable
with:
toolchain: stable
toolchain: ${{ env.RUST_VERSION }}
- name: Use caching
uses: Swatinem/rust-cache@v2.7.3
uses: Swatinem/rust-cache@v2.7.7
- name: Add custom DNS entry to /etc/hosts for notary TLS test
run: echo "127.0.0.1 tlsnotaryserver.io" | sudo tee -a /etc/hosts
- name: Run integration tests
run: cargo test --profile tests-integration --workspace --exclude tlsn-tls-client --exclude tlsn-tls-core -- --include-ignored
run: cargo test --locked --profile tests-integration --workspace --exclude tlsn-tls-client --exclude tlsn-tls-core --no-fail-fast -- --include-ignored
coverage:
runs-on: ubuntu-latest
env:
CARGO_TERM_COLOR: always
steps:
- uses: actions/checkout@v4
- name: Install rust toolchain
uses: dtolnay/rust-toolchain@stable
with:
toolchain: ${{ env.RUST_VERSION }}
- name: Install cargo-llvm-cov
uses: taiki-e/install-action@cargo-llvm-cov
- name: Generate code coverage
run: cargo llvm-cov --all-features --workspace --locked --lcov --output-path lcov.info
- name: Upload coverage to Codecov
uses: codecov/codecov-action@v4
with:
token: ${{ secrets.CODECOV_TOKEN }}
files: lcov.info
fail_ci_if_error: true
build-sgx:
runs-on: ubuntu-latest
needs: build-and-test
container:
image: rust:latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Install Clang
run: |
apt update
apt install -y clang
- name: Use caching
uses: Swatinem/rust-cache@v2.7.7
- name: Build Rust Binary
run: |
cargo build --locked --bin notary-server --release --features tee_quote
cp --verbose target/release/notary-server $GITHUB_WORKSPACE
- name: Upload Binary for use in the Gramine Job
uses: actions/upload-artifact@v4
with:
name: notary-server
path: notary-server
if-no-files-found: error
gramine-sgx:
runs-on: ubuntu-latest
needs: build-sgx
container:
image: gramineproject/gramine:latest
if: github.ref == 'refs/heads/dev' || (startsWith(github.ref, 'refs/tags/v') && contains(github.ref, '.'))
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Restore SGX signing key from secrets
run: |
mkdir -p "${HOME}/.config/gramine/"
echo "${{ secrets.SGX_SIGNING_KEY }}" > "${HOME}/.config/gramine/enclave-key.pem"
# verify key
openssl rsa -in "${HOME}/.config/gramine/enclave-key.pem" -check -noout
- name: Download notary-server binary from build job
uses: actions/download-artifact@v4
with:
name: notary-server
path: crates/notary/server/tee
- name: Install jq
run: |
apt update
apt install -y jq
- name: Use Gramine to calculate measurements
run: |
cd crates/notary/server/tee
chmod +x notary-server
gramine-manifest \
-Dlog_level=debug \
-Darch_libdir=/lib/x86_64-linux-gnu \
-Dself_exe=notary-server \
notary-server.manifest.template \
notary-server.manifest
gramine-sgx-sign \
--manifest notary-server.manifest \
--output notary-server.manifest.sgx
gramine-sgx-sigstruct-view --verbose --output-format=json notary-server.sig | tee >> notary-server-sigstruct.json
cat notary-server-sigstruct.json
mr_enclave=$(jq -r '.mr_enclave' notary-server-sigstruct.json)
mr_signer=$(jq -r '.mr_signer' notary-server-sigstruct.json)
echo "mrenclave=$mr_enclave" >>"$GITHUB_OUTPUT"
echo "#### sgx mrenclave" | tee >>$GITHUB_STEP_SUMMARY
echo "\`\`\`mr_enclave: ${mr_enclave}\`\`\`" | tee >>$GITHUB_STEP_SUMMARY
echo "\`\`\`mr_signer: ${mr_signer}\`\`\`" | tee >>$GITHUB_STEP_SUMMARY
- name: Upload notary-server and signatures
id: upload-notary-server-sgx
uses: actions/upload-artifact@v4
with:
name: notary-server-sgx.zip
path: |
crates/notary/server/tee/notary-server
crates/notary/server/tee/notary-server-sigstruct.json
crates/notary/server/tee/notary-server.sig
crates/notary/server/tee/notary-server.manifest
crates/notary/server/tee/notary-server.manifest.sgx
crates/notary/server/tee/README.md
if-no-files-found: error
- name: Attest Build Provenance
if: startsWith(github.ref, 'refs/tags/') || github.ref == 'refs/heads/dev'
uses: actions/attest-build-provenance@v2
with:
subject-name: notary-server-sgx.zip
subject-digest: sha256:${{ steps.upload-notary-server-sgx.outputs.artifact-digest }}
- uses: geekyeggo/delete-artifact@v5 # Delete notary-server from the build job, It is part of the zipfile with the signature
with:
name: notary-server
gramine-sgx-docker:
runs-on: ubuntu-latest
needs: gramine-sgx
permissions:
contents: read
packages: write
env:
CONTAINER_REGISTRY: ghcr.io
if: github.ref == 'refs/heads/dev' || (startsWith(github.ref, 'refs/tags/v') && contains(github.ref, '.'))
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
sparse-checkout: './crates/notary/server/tee/notary-server-sgx.Dockerfile'
- name: Download notary-server-sgx.zip from gramine-sgx job
uses: actions/download-artifact@v4
with:
name: notary-server-sgx.zip
path: ./notary-server-sgx
- name: Log in to the Container registry
uses: docker/login-action@v2
with:
registry: ${{ env.CONTAINER_REGISTRY }}
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Extract metadata (tags, labels) for Docker image of notary server
id: meta-notary-server-sgx
uses: docker/metadata-action@v4
with:
images: ${{ env.CONTAINER_REGISTRY }}/${{ github.repository }}/notary-server-sgx
- name: Build and push Docker image of notary server
uses: docker/build-push-action@v4
with:
context: .
push: true
tags: ${{ steps.meta-notary-server-sgx.outputs.tags }}
labels: ${{ steps.meta-notary-server-sgx.outputs.labels }}
file: ./crates/notary/server/tee/notary-server-sgx.Dockerfile
build_and_publish_notary_server_image:
name: Build and publish notary server's image
runs-on: ubuntu-latest
needs: build-and-test
permissions:
contents: read
packages: write
env:
CONTAINER_REGISTRY: ghcr.io
if: github.ref == 'refs/heads/dev' || (startsWith(github.ref, 'refs/tags/v') && contains(github.ref, '.'))
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Log in to the Container registry
uses: docker/login-action@v2
with:
registry: ${{ env.CONTAINER_REGISTRY }}
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Extract metadata (tags, labels) for Docker image of notary server
id: meta-notary-server
uses: docker/metadata-action@v4
with:
images: ${{ env.CONTAINER_REGISTRY }}/${{ github.repository }}/notary-server
- name: Build and push Docker image of notary server
uses: docker/build-push-action@v4
with:
context: .
push: true
tags: ${{ steps.meta-notary-server.outputs.tags }}
labels: ${{ steps.meta-notary-server.outputs.labels }}
file: ./crates/notary/server/notary-server.Dockerfile
create-release-draft:
name: Create Release Draft
needs: build_and_publish_notary_server_image
runs-on: ubuntu-latest
permissions:
contents: write
if: startsWith(github.ref, 'refs/tags/v') && contains(github.ref, '.')
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Create GitHub Release Draft
uses: softprops/action-gh-release@v2
with:
draft: true
tag_name: ${{ github.ref_name }}
prerelease: true
generate_release_notes: true

62
.github/workflows/releng.yml vendored Normal file
View File

@@ -0,0 +1,62 @@
name: Publish tlsn-wasm to NPM
on:
workflow_dispatch:
inputs:
tag:
description: 'Tag to publish to NPM'
required: true
default: 'v0.1.0-alpha.12'
jobs:
release:
runs-on: ubuntu-latest
env:
GH_TOKEN: ${{ github.token }}
steps:
- name: Find and download tlsn-wasm build from the tagged ci workflow
id: find_run
run: |
# Find the workflow run ID for the tag
RUN_ID=$(gh api \
-H "Accept: application/vnd.github+json" \
"/repos/tlsnotary/tlsn/actions/workflows/ci.yml/runs?per_page=100" \
--jq '.workflow_runs[] | select(.head_branch == "${{ github.event.inputs.tag }}") | .id')
if [ -z "$RUN_ID" ]; then
echo "No run found for tag ${{ github.event.inputs.tag }}"
exit 1
fi
echo "Found run: $RUN_ID"
echo "run_id=$RUN_ID" >> "$GITHUB_OUTPUT"
# Find the download URL for the build artifact
DOWNLOAD_URL=$(gh api \
-H "Accept: application/vnd.github+json" \
/repos/tlsnotary/tlsn/actions/runs/${RUN_ID}/artifacts \
--jq '.artifacts[] | select(.name == "${{ github.event.inputs.tag }}-tlsn-wasm-pkg") | .archive_download_url')
if [ -z "$DOWNLOAD_URL" ]; then
echo "No download url for build artifact ${{ github.event.inputs.tag }}-tlsn-wasm-pkg in run $RUN_ID"
exit 1
fi
# Download and unzip the build artifact
mkdir tlsn-wasm-pkg
curl -L -H "Authorization: Bearer ${GH_TOKEN}" \
-H "Accept: application/vnd.github+json" \
-o tlsn-wasm-pkg.zip \
${DOWNLOAD_URL}
unzip -q tlsn-wasm-pkg.zip -d tlsn-wasm-pkg
- name: NPM Publish for tlsn-wasm
env:
NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}
run: |
cd tlsn-wasm-pkg
echo "//registry.npmjs.org/:_authToken=${NODE_AUTH_TOKEN}" > .npmrc
npm publish
rm .npmrc

View File

@@ -4,7 +4,6 @@ on:
push:
branches: [dev]
pull_request:
branches: [dev]
env:
CARGO_TERM_COLOR: always
@@ -22,18 +21,13 @@ jobs:
toolchain: stable
- name: "rustdoc"
run: cargo doc -p tlsn-core -p tlsn-prover -p tlsn-verifier --no-deps --all-features
# --target-dir ${GITHUB_WORKSPACE}/docs
run: crates/wasm/build-docs.sh
# https://dev.to/deciduously/prepare-your-rust-api-docs-for-github-pages-2n5i
- name: "Add index file -> tlsn_prover"
run: |
echo "<meta http-equiv=\"refresh\" content=\"0; url=tlsn_prover\">" > target/doc/index.html
- name: Deploy
uses: peaceiris/actions-gh-pages@v3
if: ${{ github.ref == 'refs/heads/dev' }}
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
publish_dir: target/doc/
publish_dir: target/wasm32-unknown-unknown/doc/
# cname: rustdocs.tlsnotary.org

24
.github/workflows/updatemain.yml vendored Normal file
View File

@@ -0,0 +1,24 @@
name: Fast-forward main branch to published release tag
on:
release:
types: [published]
jobs:
ff-main-to-release:
runs-on: ubuntu-latest
permissions:
contents: write
steps:
- name: Checkout main
uses: actions/checkout@v4
with:
ref: main
- name: Fast-forward main to release tag
run: |
tag="${{ github.event.release.tag_name }}"
git fetch origin "refs/tags/$tag:refs/tags/$tag"
git merge --ff-only "refs/tags/$tag"
git push origin main

6
.gitignore vendored
View File

@@ -3,10 +3,6 @@
debug/
target/
# Remove Cargo.lock from gitignore if creating an executable, leave it for libraries
# More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html
Cargo.lock
# These are backup files generated by rustfmt
**/*.rs.bk
@@ -32,4 +28,4 @@ Cargo.lock
*.log
# metrics
*.csv
*.csv

View File

@@ -16,6 +16,8 @@ keywords.
Try to do one pull request per change.
**Disclaimer**: While we appreciate all contributions, we do not prioritize minor grammatical fixes (e.g., correcting typos, rewording sentences) unless they significantly improve clarity in technical documentation. These contributions can be a distraction for the team. If you notice a grammatical error, please let us know on our Discord.
## Linting
Before a Pull Request (PR) can be merged, the Continuous Integration (CI) pipeline automatically lints all code using [Clippy](https://doc.rust-lang.org/stable/clippy/usage.html). To ensure your code is free of linting issues before creating a PR, run the following command:
@@ -59,3 +61,21 @@ Comments for function arguments must adhere to this pattern:
/// * `arg2` - The second argument.
pub fn compute(...
```
## Cargo.lock
We check in `Cargo.lock` to ensure reproducible builds. It must be updated whenever `Cargo.toml` changes. The TLSNotary team typically updates `Cargo.lock` in a separate commit after dependency changes.
If you want to hide `Cargo.lock` changes from your local `git diff`, run:
```sh
git update-index --assume-unchanged Cargo.lock
```
To start tracking changes again:
```sh
git update-index --no-assume-unchanged Cargo.lock
```
> ⚠️ Note: This only affects your local view. The file is still tracked in the repository and will be checked and used in CI.

9666
Cargo.lock generated Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -1,93 +1,115 @@
[workspace]
members = [
"crates/benches",
"crates/common",
"crates/components/aead",
"crates/components/block-cipher",
"crates/components/deap",
"crates/components/cipher",
"crates/components/hmac-sha256",
"crates/components/hmac-sha256-circuits",
"crates/components/key-exchange",
"crates/components/stream-cipher",
"crates/components/universal-hash",
"crates/core",
"crates/data-fixtures",
"crates/examples",
"crates/formats",
"crates/notary/client",
"crates/notary/common",
"crates/notary/server",
"crates/notary/tests-integration",
"crates/prover",
"crates/server-fixture/certs",
"crates/server-fixture/server",
"crates/tests-integration",
"crates/tls/backend",
"crates/tls/client",
"crates/tls/client-async",
"crates/tls/core",
"crates/tls/mpc",
"crates/mpc-tls",
"crates/tls/server-fixture",
"crates/verifier",
"crates/wasm",
"crates/wasm-test-runner",
"crates/harness/core",
"crates/harness/executor",
"crates/harness/runner",
]
resolver = "2"
[workspace.lints.rust]
# unsafe_code = "forbid"
[workspace.lints.clippy]
# enum_glob_use = "deny"
[profile.tests-integration]
inherits = "release"
opt-level = 1
[profile.release.package."tlsn-wasm"]
opt-level = "z"
[profile.dev.package."tlsn-wasm"]
debug = false
[workspace.dependencies]
notary-client = { path = "crates/notary/client" }
notary-common = { path = "crates/notary/common" }
notary-server = { path = "crates/notary/server" }
tls-server-fixture = { path = "crates/tls/server-fixture" }
tlsn-aead = { path = "crates/components/aead" }
tlsn-block-cipher = { path = "crates/components/block-cipher" }
tlsn-cipher = { path = "crates/components/cipher" }
tlsn-common = { path = "crates/common" }
tlsn-core = { path = "crates/core" }
tlsn-data-fixtures = { path = "crates/data-fixtures" }
tlsn-deap = { path = "crates/components/deap" }
tlsn-formats = { path = "crates/formats" }
tlsn-hmac-sha256 = { path = "crates/components/hmac-sha256" }
tlsn-hmac-sha256-circuits = { path = "crates/components/hmac-sha256-circuits" }
tlsn-key-exchange = { path = "crates/components/key-exchange" }
tlsn-mpc-tls = { path = "crates/mpc-tls" }
tlsn-prover = { path = "crates/prover" }
tlsn-server-fixture = { path = "crates/server-fixture/server" }
tlsn-server-fixture-certs = { path = "crates/server-fixture/certs" }
tlsn-stream-cipher = { path = "crates/components/stream-cipher" }
tlsn-tls-backend = { path = "crates/tls/backend" }
tlsn-tls-client = { path = "crates/tls/client" }
tlsn-tls-client-async = { path = "crates/tls/client-async" }
tlsn-tls-core = { path = "crates/tls/core" }
tlsn-tls-mpc = { path = "crates/tls/mpc" }
tlsn-universal-hash = { path = "crates/components/universal-hash" }
tlsn-utils = { git = "https://github.com/tlsnotary/tlsn-utils", rev = "e7b2db6" }
tlsn-utils-aio = { git = "https://github.com/tlsnotary/tlsn-utils", rev = "e7b2db6" }
tlsn-utils = { git = "https://github.com/tlsnotary/tlsn-utils", rev = "6168663" }
tlsn-harness-core = { path = "crates/harness/core" }
tlsn-harness-executor = { path = "crates/harness/executor" }
tlsn-harness-runner = { path = "crates/harness/runner" }
tlsn-wasm = { path = "crates/wasm" }
tlsn-verifier = { path = "crates/verifier" }
mpz-circuits = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "b8ae7ac" }
mpz-common = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "b8ae7ac" }
mpz-core = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "b8ae7ac" }
mpz-garble = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "b8ae7ac" }
mpz-garble-core = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "b8ae7ac" }
mpz-ole = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "b8ae7ac" }
mpz-ot = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "b8ae7ac" }
mpz-share-conversion = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "b8ae7ac" }
mpz-circuits = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "ccc0057" }
mpz-memory-core = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "ccc0057" }
mpz-common = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "ccc0057" }
mpz-core = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "ccc0057" }
mpz-vm-core = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "ccc0057" }
mpz-garble = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "ccc0057" }
mpz-garble-core = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "ccc0057" }
mpz-ole = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "ccc0057" }
mpz-ot = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "ccc0057" }
mpz-share-conversion = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "ccc0057" }
mpz-fields = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "ccc0057" }
mpz-zk = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "ccc0057" }
mpz-hash = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "ccc0057" }
serio = { version = "0.1" }
spansy = { git = "https://github.com/tlsnotary/tlsn-utils", rev = "e7b2db6" }
uid-mux = { version = "0.1", features = ["serio"] }
rangeset = { version = "0.2" }
serio = { version = "0.2" }
spansy = { git = "https://github.com/tlsnotary/tlsn-utils", rev = "6168663" }
uid-mux = { version = "0.2" }
websocket-relay = { git = "https://github.com/tlsnotary/tlsn-utils", rev = "6168663" }
aes = { version = "0.8" }
aes-gcm = { version = "0.9" }
anyhow = { version = "1.0" }
async-trait = { version = "0.1" }
async-tungstenite = { version = "0.25" }
axum = { version = "0.7" }
async-tungstenite = { version = "0.28.2" }
axum = { version = "0.8" }
bcs = { version = "0.1" }
bincode = { version = "1.3" }
blake3 = { version = "1.5" }
bon = { version = "3.6" }
bytes = { version = "1.4" }
cfg-if = { version = "1" }
chromiumoxide = { version = "0.7" }
chrono = { version = "0.4" }
cipher = { version = "0.4" }
clap = { version = "4.5" }
criterion = { version = "0.5" }
ctr = { version = "0.9" }
derive_builder = { version = "0.12" }
@@ -99,12 +121,17 @@ futures = { version = "0.3" }
futures-rustls = { version = "0.26" }
futures-util = { version = "0.3" }
generic-array = { version = "0.14" }
ghash = { version = "0.5" }
hex = { version = "0.4" }
hmac = { version = "0.12" }
http = { version = "1.1" }
http-body-util = { version = "0.1" }
hyper = { version = "1.1" }
hyper-util = { version = "0.1" }
ipnet = { version = "2.11" }
inventory = { version = "0.3" }
itybity = { version = "0.2" }
js-sys = { version = "0.3" }
k256 = { version = "0.13" }
log = { version = "0.4" }
once_cell = { version = "1.19" }
@@ -112,9 +139,12 @@ opaque-debug = { version = "0.3" }
p256 = { version = "0.13" }
pkcs8 = { version = "0.10" }
pin-project-lite = { version = "0.2" }
rand = { version = "0.8" }
rand_chacha = { version = "0.3" }
rand_core = { version = "0.6" }
pollster = { version = "0.4" }
rand = { version = "0.9" }
rand_chacha = { version = "0.9" }
rand_core = { version = "0.9" }
rand06-compat = { version = "0.1" }
rayon = { version = "1.10" }
regex = { version = "1.10" }
ring = { version = "0.17" }
rs_merkle = { git = "https://github.com/tlsnotary/rs-merkle.git", rev = "85f3e82" }
@@ -130,11 +160,21 @@ thiserror = { version = "1.0" }
tokio = { version = "1.38" }
tokio-rustls = { version = "0.24" }
tokio-util = { version = "0.7" }
toml = { version = "0.8" }
tower = { version = "0.5" }
tower-http = { version = "0.5" }
tower-service = { version = "0.3" }
tower-util = { version = "0.3.1" }
tracing = { version = "0.1" }
tracing-subscriber = { version = "0.3" }
uuid = { version = "1.4" }
wasm-bindgen = { version = "0.2" }
wasm-bindgen-futures = { version = "0.4" }
web-spawn = { version = "0.2" }
web-time = { version = "0.2" }
webpki = { version = "0.22" }
webpki-roots = { version = "0.26" }
ws_stream_tungstenite = { version = "0.13" }
ws_stream_tungstenite = { version = "0.14" }
# Use the patched ws_stream_wasm to fix the issue https://github.com/najamelan/ws_stream_wasm/issues/12#issuecomment-1711902958
ws_stream_wasm = { git = "https://github.com/tlsnotary/ws_stream_wasm", rev = "2ed12aad9f0236e5321f577672f309920b2aef51" }
zeroize = { version = "1.8" }

View File

@@ -1,31 +0,0 @@
# AWS CodeDeploy application specification file
version: 0.0
os: linux
files:
- source: /
destination: /home/ubuntu/tlsn
permissions:
- object: /home/ubuntu/tlsn
owner: ubuntu
group: ubuntu
hooks:
BeforeInstall:
- location: cd-scripts/appspec-scripts/before_install.sh
timeout: 300
runas: ubuntu
AfterInstall:
- location: cd-scripts/appspec-scripts/after_install.sh
timeout: 300
runas: ubuntu
ApplicationStart:
- location: cd-scripts/appspec-scripts/start_app.sh
timeout: 300
runas: ubuntu
ApplicationStop:
- location: cd-scripts/appspec-scripts/stop_app.sh
timeout: 300
runas: ubuntu
ValidateService:
- location: cd-scripts/appspec-scripts/validate_app.sh
timeout: 300
runas: ubuntu

View File

@@ -1,35 +0,0 @@
#!/bin/bash
set -e
TAG=$(curl http://169.254.169.254/latest/meta-data/tags/instance/stable)
APP_NAME=$(echo $APPLICATION_NAME | awk -F- '{ print $2 }')
if [ $APP_NAME = "stable" ]; then
# Prepare directories for stable versions
sudo mkdir ~/${APP_NAME}_${TAG}
sudo mv ~/tlsn ~/${APP_NAME}_${TAG}
sudo mkdir -p ~/${APP_NAME}_${TAG}/tlsn/notary/target/release
sudo chown -R ubuntu.ubuntu ~/${APP_NAME}_${TAG}
# Download .git directory
aws s3 cp s3://tlsn-deploy/$APP_NAME/.git ~/${APP_NAME}_${TAG}/tlsn/.git --recursive
# Download binary
aws s3 cp s3://tlsn-deploy/$APP_NAME/notary-server ~/${APP_NAME}_${TAG}/tlsn/notary/target/release
chmod +x ~/${APP_NAME}_${TAG}/tlsn/notary/target/release/notary-server
else
# Prepare directory for dev
sudo rm -rf ~/$APP_NAME/tlsn
sudo mv ~/tlsn/ ~/$APP_NAME
sudo mkdir -p ~/$APP_NAME/tlsn/notary/target/release
sudo chown -R ubuntu.ubuntu ~/$APP_NAME
# Download .git directory
aws s3 cp s3://tlsn-deploy/$APP_NAME/.git ~/$APP_NAME/tlsn/.git --recursive
# Download binary
aws s3 cp s3://tlsn-deploy/$APP_NAME/notary-server ~/$APP_NAME/tlsn/notary/target/release
chmod +x ~/$APP_NAME/tlsn/notary/target/release/notary-server
fi
exit 0

View File

@@ -1,20 +0,0 @@
#!/bin/bash
set -e
APP_NAME=$(echo $APPLICATION_NAME | awk -F- '{ print $2 }')
if [ $APP_NAME = "stable" ]; then
VERSIONS_DEPLOYED=$(find ~/ -maxdepth 1 -type d -name 'stable_*')
VERSIONS_DEPLOYED_COUNT=$(echo $VERSIONS_DEPLOYED | wc -w)
if [ $VERSIONS_DEPLOYED_COUNT -gt 3 ]; then
echo "More than 3 stable versions found"
exit 1
fi
else
if [ ! -d ~/$APP_NAME ]; then
mkdir ~/$APP_NAME
fi
fi
exit 0

View File

@@ -1,26 +0,0 @@
#!/bin/bash
# Port tagging will also be used to manipulate proxy server via modify_proxy.sh script
set -ex
TAG=$(curl http://169.254.169.254/latest/meta-data/tags/instance/stable)
APP_NAME=$(echo $APPLICATION_NAME | awk -F- '{ print $2 }')
if [ $APP_NAME = "stable" ]; then
# Check if all stable ports are in use. If true, terminate the deployment
[[ $(netstat -lnt4 | egrep -c ':(7047|7057|7067)\s') -eq 3 ]] && { echo "All stable ports are in use"; exit 1; }
STABLE_PORTS="7047 7057 7067"
for PORT in $STABLE_PORTS; do
PORT_LISTENING=$(netstat -lnt4 | egrep -cw $PORT || true)
if [ $PORT_LISTENING -eq 0 ]; then
~/${APP_NAME}_${TAG}/tlsn/notary/target/release/notary-server --config-file ~/.notary/${APP_NAME}_${PORT}/config.yaml &> ~/${APP_NAME}_${TAG}/tlsn/notary.log &
# Create a tag that will be used for service validation
INSTANCE_ID=$(curl http://169.254.169.254/latest/meta-data/instance-id)
aws ec2 create-tags --resources $INSTANCE_ID --tags "Key=port,Value=$PORT"
break
fi
done
else
~/$APP_NAME/tlsn/notary/target/release/notary-server --config-file ~/.notary/$APP_NAME/config.yaml &> ~/$APP_NAME/tlsn/notary.log &
fi
exit 0

View File

@@ -1,36 +0,0 @@
#!/bin/bash
# AWS CodeDeploy hook sequence: https://docs.aws.amazon.com/codedeploy/latest/userguide/reference-appspec-file-structure-hooks.html#appspec-hooks-server
set -ex
APP_NAME=$(echo $APPLICATION_NAME | awk -F- '{ print $2 }')
if [ $APP_NAME = "stable" ]; then
VERSIONS_DEPLOYED=$(find ~/ -maxdepth 1 -type d -name 'stable_*')
VERSIONS_DEPLOYED_COUNT=$(echo $VERSIONS_DEPLOYED | wc -w)
# Remove oldest version if exists
if [ $VERSIONS_DEPLOYED_COUNT -eq 3 ]; then
echo "Candidate versions to be removed:"
OLDEST_DIR=""
OLDEST_TIME=""
for DIR in $VERSIONS_DEPLOYED; do
TIME=$(stat -c %W $DIR)
if [ -z $OLDEST_TIME ] || [ $TIME -lt $OLDEST_TIME ]; then
OLDEST_DIR=$DIR
OLDEST_TIME=$TIME
fi
done
echo "The oldest version is running under: $OLDEST_DIR"
PID=$(lsof $OLDEST_DIR/tlsn/notary/target/release/notary-server | awk '{ print $2 }' | tail -1)
kill -15 $PID || true
rm -rf $OLDEST_DIR
fi
else
PID=$(pgrep -f notary.*$APP_NAME)
kill -15 $PID || true
fi
exit 0

View File

@@ -1,21 +0,0 @@
#!/bin/bash
set -e
# Verify proccess is running
APP_NAME=$(echo $APPLICATION_NAME | awk -F- '{ print $2 }')
# Verify that listening sockets exist
if [ $APP_NAME = "stable" ]; then
PORT=$(curl http://169.254.169.254/latest/meta-data/tags/instance/port)
ps -ef | grep notary.*$APP_NAME.*$PORT | grep -v grep
[ $? -eq 0 ] || exit 1
else
PORT=7048
pgrep -f notary.*$APP_NAME
[ $? -eq 0 ] || exit 1
fi
EXPOSED_PORTS=$(netstat -lnt4 | egrep -cw $PORT)
[ $EXPOSED_PORTS -eq 1 ] || exit 1
exit 0

View File

@@ -1,14 +0,0 @@
#!/bin/bash
# This script is executed on proxy side, in order to assign the available port to latest stable version
set -e
PORT=$1
VERSION=$2
sed -i "/# Port $PORT/{n;s/v[0-9].[0-9].[0-9]-[a-z]*.[0-9]*/$VERSION/g}" /etc/nginx/sites-available/tlsnotary-pse
sed -i "/# Port $PORT/{n;n;s/v[0-9].[0-9].[0-9]-[a-z]*.[0-9]*/$VERSION/g}" /etc/nginx/sites-available/tlsnotary-pse
nginx -t
nginx -s reload
exit 0

View File

@@ -1,46 +0,0 @@
[package]
edition = "2021"
name = "tlsn-benches"
publish = false
version = "0.0.0"
[dependencies]
anyhow = { workspace = true }
charming = { version = "0.3.1", features = ["ssr"] }
csv = "1.3.0"
futures = { workspace = true }
serde = { workspace = true }
tlsn-common = { workspace = true }
tlsn-core = { workspace = true }
tlsn-prover = { workspace = true }
tlsn-server-fixture = { workspace = true }
tlsn-server-fixture-certs = { workspace = true }
tlsn-tls-core = { workspace = true }
tlsn-verifier = { workspace = true }
tokio = { workspace = true, features = [
"rt",
"rt-multi-thread",
"macros",
"net",
"io-std",
"fs",
] }
tokio-util = { workspace = true }
toml = "0.8.11"
tracing-subscriber = { workspace = true, features = ["env-filter"] }
[[bin]]
name = "bench"
path = "bin/bench.rs"
[[bin]]
name = "prover"
path = "bin/prover.rs"
[[bin]]
name = "verifier"
path = "bin/verifier.rs"
[[bin]]
name = "plot"
path = "bin/plot.rs"

View File

@@ -1,35 +0,0 @@
# TLSNotary bench utilities
This crate provides utilities for benchmarking protocol performance under various network conditions and usage patterns.
As the protocol is mostly IO bound, it's important to track how it performs in low bandwidth and/or high latency environments. To do this we set up temporary network namespaces and add virtual ethernet interfaces which we can control using the linux `tc` (Traffic Control) utility.
## Configuration
See the `bench.toml` file for benchmark configurations.
## Preliminaries
To run the benchmarks you will need `iproute2` installed, eg:
```sh
sudo apt-get install iproute2 -y
```
## Running benches
Running the benches requires root privileges because they will set up virtual interfaces. The script is designed to fully clean up when the benches are done, but run them at your own risk.
Make sure you're in the `crates/benches/` directory, build the binaries then run the script:
```sh
cargo build --release
sudo ./bench.sh
```
## Metrics
After you run the benches you will see a `metrics.csv` file in the working directory. It will be owned by `root`, so you probably want to run
```sh
sudo chown $USER metrics.csv
```

View File

@@ -1,13 +0,0 @@
#! /bin/bash
# Check if we are running as root
if [ "$EUID" -ne 0 ]; then
echo "This script must be run as root"
exit
fi
# Run the benchmark binary
../../target/release/bench
# Plot the results
../../target/release/plot metrics.csv

View File

@@ -1,39 +0,0 @@
[[benches]]
name = "latency"
upload = 250
upload-delay = [10, 25, 50]
download = 250
download-delay = [10, 25, 50]
upload-size = 1024
download-size = 4096
defer-decryption = true
[[benches]]
name = "download_bandwidth"
upload = 250
upload-delay = 25
download = [10, 25, 50, 100, 250]
download-delay = 25
upload-size = 1024
download-size = 4096
defer-decryption = true
[[benches]]
name = "upload_bandwidth"
upload = [10, 25, 50, 100, 250]
upload-delay = 25
download = 250
download-delay = 25
upload-size = 1024
download-size = 4096
defer-decryption = [false, true]
[[benches]]
name = "download_volume"
upload = 250
upload-delay = 25
download = 250
download-delay = 25
upload-size = 1024
download-size = [1024, 4096, 16384, 65536]
defer-decryption = true

View File

@@ -1,21 +0,0 @@
FROM rust AS builder
WORKDIR /usr/src/tlsn
COPY . .
RUN cd crates/benches && cargo build --release
FROM ubuntu:latest
RUN apt-get update && apt-get -y upgrade && apt-get install -y --no-install-recommends \
iproute2 \
sudo \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
COPY --from=builder ["/usr/src/tlsn/target/release/bench", "/usr/src/tlsn/target/release/prover", "/usr/src/tlsn/target/release/verifier", "/usr/src/tlsn/target/release/plot", "/usr/local/bin/"]
ENV PROVER_PATH="/usr/local/bin/prover"
ENV VERIFIER_PATH="/usr/local/bin/verifier"
VOLUME [ "/benches" ]
WORKDIR "/benches"
CMD ["/bin/bash", "-c", "bench && plot /benches/metrics.csv && cat /benches/metrics.csv"]

View File

@@ -1,2 +0,0 @@
# exclude any /target folders
**/target*

View File

@@ -1,44 +0,0 @@
use std::process::Command;
use tlsn_benches::{clean_up, set_up};
fn main() {
let prover_path =
std::env::var("PROVER_PATH").unwrap_or_else(|_| "../../target/release/prover".to_string());
let verifier_path = std::env::var("VERIFIER_PATH")
.unwrap_or_else(|_| "../../target/release/verifier".to_string());
if let Err(e) = set_up() {
println!("Error setting up: {}", e);
clean_up();
}
// Run prover and verifier binaries in parallel
let Ok(mut verifier) = Command::new("ip")
.arg("netns")
.arg("exec")
.arg("verifier-ns")
.arg(verifier_path)
.spawn()
else {
println!("Failed to start verifier");
return clean_up();
};
let Ok(mut prover) = Command::new("ip")
.arg("netns")
.arg("exec")
.arg("prover-ns")
.arg(prover_path)
.spawn()
else {
println!("Failed to start prover");
return clean_up();
};
// Wait for both to finish
_ = prover.wait();
_ = verifier.wait();
clean_up();
}

View File

@@ -1,156 +0,0 @@
use charming::{
component::{
Axis, DataView, Feature, Legend, Restore, SaveAsImage, Title, Toolbox, ToolboxDataZoom,
},
element::{NameLocation, Orient, Tooltip, Trigger},
series::{Line, Scatter},
theme::Theme,
Chart, HtmlRenderer,
};
use tlsn_benches::metrics::Metrics;
const THEME: Theme = Theme::Default;
fn main() -> Result<(), Box<dyn std::error::Error>> {
let csv_file = std::env::args()
.nth(1)
.expect("Usage: plot <path_to_csv_file>");
let mut rdr = csv::Reader::from_path(csv_file)?;
// Prepare data for plotting
let all_data: Vec<Metrics> = rdr
.deserialize::<Metrics>()
.collect::<Result<Vec<_>, _>>()?; // Attempt to collect all results, return an error if any fail
let _chart = runtime_vs_latency(&all_data)?;
let _chart = runtime_vs_bandwidth(&all_data)?;
Ok(())
}
fn runtime_vs_latency(all_data: &[Metrics]) -> Result<Chart, Box<dyn std::error::Error>> {
const TITLE: &str = "Runtime vs Latency";
let data: Vec<Vec<f32>> = all_data
.iter()
.filter(|record| record.name == "latency")
.map(|record| {
let total_delay = record.upload_delay + record.download_delay; // Calculate the sum of upload and download delays.
vec![total_delay as f32, record.runtime as f32]
})
.collect();
// https://github.com/yuankunzhang/charming
let chart = Chart::new()
.title(Title::new().text(TITLE))
.tooltip(Tooltip::new().trigger(Trigger::Axis))
.legend(Legend::new().orient(Orient::Vertical))
.toolbox(
Toolbox::new().show(true).feature(
Feature::new()
.save_as_image(SaveAsImage::new())
.restore(Restore::new())
.data_zoom(ToolboxDataZoom::new().y_axis_index("none"))
.data_view(DataView::new().read_only(false)),
),
)
.x_axis(
Axis::new()
.scale(true)
.name("Upload + Download Latency (ms)")
.name_location(NameLocation::Center),
)
.y_axis(
Axis::new()
.scale(true)
.name("Runtime (s)")
.name_location(NameLocation::Middle),
)
.series(
Scatter::new()
.name("Combined Latency")
.symbol_size(10)
.data(data),
);
// Save the chart as HTML file.
HtmlRenderer::new(TITLE, 1000, 800)
.theme(THEME)
.save(&chart, "runtime_vs_latency.html")
.unwrap();
Ok(chart)
}
fn runtime_vs_bandwidth(all_data: &[Metrics]) -> Result<Chart, Box<dyn std::error::Error>> {
const TITLE: &str = "Runtime vs Bandwidth";
let download_data: Vec<Vec<f32>> = all_data
.iter()
.filter(|record| record.name == "download_bandwidth")
.map(|record| vec![record.download as f32, record.runtime as f32])
.collect();
let upload_deferred_data: Vec<Vec<f32>> = all_data
.iter()
.filter(|record| record.name == "upload_bandwidth" && record.defer_decryption)
.map(|record| vec![record.upload as f32, record.runtime as f32])
.collect();
let upload_non_deferred_data: Vec<Vec<f32>> = all_data
.iter()
.filter(|record| record.name == "upload_bandwidth" && !record.defer_decryption)
.map(|record| vec![record.upload as f32, record.runtime as f32])
.collect();
// https://github.com/yuankunzhang/charming
let chart = Chart::new()
.title(Title::new().text(TITLE))
.tooltip(Tooltip::new().trigger(Trigger::Axis))
.legend(Legend::new().orient(Orient::Vertical))
.toolbox(
Toolbox::new().show(true).feature(
Feature::new()
.save_as_image(SaveAsImage::new())
.restore(Restore::new())
.data_zoom(ToolboxDataZoom::new().y_axis_index("none"))
.data_view(DataView::new().read_only(false)),
),
)
.x_axis(
Axis::new()
.scale(true)
.name("Bandwidth (Mbps)")
.name_location(NameLocation::Center),
)
.y_axis(
Axis::new()
.scale(true)
.name("Runtime (s)")
.name_location(NameLocation::Middle),
)
.series(
Line::new()
.name("Download bandwidth")
.symbol_size(10)
.data(download_data),
)
.series(
Line::new()
.name("Upload bandwidth (deferred decryption)")
.symbol_size(10)
.data(upload_deferred_data),
)
.series(
Line::new()
.name("Upload bandwidth")
.symbol_size(10)
.data(upload_non_deferred_data),
);
// Save the chart as HTML file.
HtmlRenderer::new(TITLE, 1000, 800)
.theme(THEME)
.save(&chart, "runtime_vs_bandwidth.html")
.unwrap();
Ok(chart)
}

View File

@@ -1,197 +0,0 @@
use std::{
io::Write,
sync::{
atomic::{AtomicU64, Ordering},
Arc,
},
time::Instant,
};
use anyhow::Context;
use futures::{AsyncReadExt, AsyncWriteExt};
use tls_core::verify::WebPkiVerifier;
use tlsn_benches::{
config::{BenchInstance, Config},
metrics::Metrics,
set_interface, PROVER_INTERFACE,
};
use tlsn_common::config::ProtocolConfig;
use tlsn_core::{transcript::Idx, CryptoProvider};
use tlsn_server_fixture::bind;
use tlsn_server_fixture_certs::{CA_CERT_DER, SERVER_DOMAIN};
use tokio::io::{AsyncRead, AsyncWrite};
use tokio_util::{
compat::TokioAsyncReadCompatExt,
io::{InspectReader, InspectWriter},
};
use tlsn_prover::{Prover, ProverConfig};
use tracing_subscriber::{fmt::format::FmtSpan, EnvFilter};
#[tokio::main]
async fn main() -> anyhow::Result<()> {
let config_path = std::env::var("CFG").unwrap_or_else(|_| "bench.toml".to_string());
let config: Config = toml::from_str(
&std::fs::read_to_string(config_path).context("failed to read config file")?,
)
.context("failed to parse config")?;
tracing_subscriber::fmt()
.with_env_filter(EnvFilter::from_default_env())
.with_span_events(FmtSpan::NEW | FmtSpan::CLOSE)
.init();
let ip = std::env::var("VERIFIER_IP").unwrap_or_else(|_| "10.10.1.1".to_string());
let port: u16 = std::env::var("VERIFIER_PORT")
.map(|port| port.parse().expect("port is valid u16"))
.unwrap_or(8000);
let verifier_host = (ip.as_str(), port);
let mut file = std::fs::OpenOptions::new()
.create(true)
.append(true)
.open("metrics.csv")
.context("failed to open metrics file")?;
{
let mut metric_wtr = csv::Writer::from_writer(&mut file);
for bench in config.benches {
let instances = bench.flatten();
for instance in instances {
println!("{:?}", &instance);
let io = tokio::net::TcpStream::connect(verifier_host)
.await
.context("failed to open tcp connection")?;
metric_wtr.serialize(
run_instance(instance, io)
.await
.context("failed to run instance")?,
)?;
metric_wtr.flush()?;
}
}
}
file.flush()?;
Ok(())
}
async fn run_instance<S: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>(
instance: BenchInstance,
io: S,
) -> anyhow::Result<Metrics> {
let uploaded = Arc::new(AtomicU64::new(0));
let downloaded = Arc::new(AtomicU64::new(0));
let io = InspectWriter::new(
InspectReader::new(io, {
let downloaded = downloaded.clone();
move |data| {
downloaded.fetch_add(data.len() as u64, Ordering::Relaxed);
}
}),
{
let uploaded = uploaded.clone();
move |data| {
uploaded.fetch_add(data.len() as u64, Ordering::Relaxed);
}
},
);
let BenchInstance {
name,
upload,
upload_delay,
download,
download_delay,
upload_size,
download_size,
defer_decryption,
} = instance.clone();
set_interface(PROVER_INTERFACE, upload, 1, upload_delay)?;
let (client_conn, server_conn) = tokio::io::duplex(2 << 16);
tokio::spawn(bind(server_conn.compat()));
let start_time = Instant::now();
let provider = CryptoProvider {
cert: WebPkiVerifier::new(root_store(), None),
..Default::default()
};
let protocol_config = if defer_decryption {
ProtocolConfig::builder()
.max_sent_data(upload_size + 256)
.max_recv_data(download_size + 256)
.build()
.unwrap()
} else {
ProtocolConfig::builder()
.max_sent_data(upload_size + 256)
.max_recv_data(download_size + 256)
.max_recv_data_online(download_size + 256)
.build()
.unwrap()
};
let prover = Prover::new(
ProverConfig::builder()
.server_name(SERVER_DOMAIN)
.protocol_config(protocol_config)
.defer_decryption_from_start(defer_decryption)
.crypto_provider(provider)
.build()
.context("invalid prover config")?,
)
.setup(io.compat())
.await?;
let (mut mpc_tls_connection, prover_fut) = prover.connect(client_conn.compat()).await.unwrap();
let prover_task = tokio::spawn(prover_fut);
let request = format!(
"GET /bytes?size={} HTTP/1.1\r\nConnection: close\r\nData: {}\r\n\r\n",
download_size,
String::from_utf8(vec![0x42u8; upload_size]).unwrap(),
);
mpc_tls_connection.write_all(request.as_bytes()).await?;
mpc_tls_connection.close().await?;
let mut response = vec![];
mpc_tls_connection.read_to_end(&mut response).await?;
let mut prover = prover_task.await??.start_prove();
let (sent_len, recv_len) = prover.transcript().len();
prover
.prove_transcript(Idx::new(0..sent_len), Idx::new(0..recv_len))
.await?;
prover.finalize().await?;
Ok(Metrics {
name,
upload,
upload_delay,
download,
download_delay,
upload_size,
download_size,
defer_decryption,
runtime: Instant::now().duration_since(start_time).as_secs(),
uploaded: uploaded.load(Ordering::SeqCst),
downloaded: downloaded.load(Ordering::SeqCst),
})
}
fn root_store() -> tls_core::anchors::RootCertStore {
let mut root_store = tls_core::anchors::RootCertStore::empty();
root_store
.add(&tls_core::key::Certificate(CA_CERT_DER.to_vec()))
.unwrap();
root_store
}

View File

@@ -1,100 +0,0 @@
use anyhow::Context;
use tokio::io::{AsyncRead, AsyncWrite};
use tokio_util::compat::TokioAsyncReadCompatExt;
use tracing_subscriber::{fmt::format::FmtSpan, EnvFilter};
use tls_core::verify::WebPkiVerifier;
use tlsn_benches::{
config::{BenchInstance, Config},
set_interface, VERIFIER_INTERFACE,
};
use tlsn_common::config::ProtocolConfigValidator;
use tlsn_core::CryptoProvider;
use tlsn_server_fixture_certs::CA_CERT_DER;
use tlsn_verifier::{Verifier, VerifierConfig};
#[tokio::main]
async fn main() -> anyhow::Result<()> {
let config_path = std::env::var("CFG").unwrap_or_else(|_| "bench.toml".to_string());
let config: Config = toml::from_str(
&std::fs::read_to_string(config_path).context("failed to read config file")?,
)
.context("failed to parse config")?;
tracing_subscriber::fmt()
.with_env_filter(EnvFilter::from_default_env())
.with_span_events(FmtSpan::NEW | FmtSpan::CLOSE)
.init();
let ip = std::env::var("VERIFIER_IP").unwrap_or_else(|_| "10.10.1.1".to_string());
let port: u16 = std::env::var("VERIFIER_PORT")
.map(|port| port.parse().expect("port is valid u16"))
.unwrap_or(8000);
let host = (ip.as_str(), port);
let listener = tokio::net::TcpListener::bind(host)
.await
.context("failed to bind to port")?;
for bench in config.benches {
for instance in bench.flatten() {
let (io, _) = listener
.accept()
.await
.context("failed to accept connection")?;
run_instance(instance, io)
.await
.context("failed to run instance")?;
}
}
Ok(())
}
async fn run_instance<S: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>(
instance: BenchInstance,
io: S,
) -> anyhow::Result<()> {
let BenchInstance {
download,
download_delay,
upload_size,
download_size,
..
} = instance;
set_interface(VERIFIER_INTERFACE, download, 1, download_delay)?;
let provider = CryptoProvider {
cert: cert_verifier(),
..Default::default()
};
let config_validator = ProtocolConfigValidator::builder()
.max_sent_data(upload_size + 256)
.max_recv_data(download_size + 256)
.build()
.unwrap();
let verifier = Verifier::new(
VerifierConfig::builder()
.protocol_config_validator(config_validator)
.crypto_provider(provider)
.build()?,
);
_ = verifier.verify(io.compat()).await?;
println!("verifier done");
Ok(())
}
fn cert_verifier() -> WebPkiVerifier {
let mut root_store = tls_core::anchors::RootCertStore::empty();
root_store
.add(&tls_core::key::Certificate(CA_CERT_DER.to_vec()))
.unwrap();
WebPkiVerifier::new(root_store, None)
}

View File

@@ -1,12 +0,0 @@
# Run the TLSN benches with Docker
In the root folder of this repository, run:
```
docker build -t tlsn-bench . -f ./crates/benches/benches.Dockerfile
```
Next run the benches with:
```
docker run -it --privileged -v ./crates/benches/:/benches tlsn-bench
```
The `--privileged` parameter is required because this test bench needs permission to create networks with certain parameters

View File

@@ -1,111 +0,0 @@
use serde::{Deserialize, Serialize};
#[derive(Deserialize)]
#[serde(untagged)]
pub enum Field<T> {
Single(T),
Multiple(Vec<T>),
}
#[derive(Deserialize)]
pub struct Config {
pub benches: Vec<Bench>,
}
#[derive(Deserialize)]
pub struct Bench {
pub name: String,
pub upload: Field<usize>,
#[serde(rename = "upload-delay")]
pub upload_delay: Field<usize>,
pub download: Field<usize>,
#[serde(rename = "download-delay")]
pub download_delay: Field<usize>,
#[serde(rename = "upload-size")]
pub upload_size: Field<usize>,
#[serde(rename = "download-size")]
pub download_size: Field<usize>,
#[serde(rename = "defer-decryption")]
pub defer_decryption: Field<bool>,
}
impl Bench {
/// Flattens the config into a list of instances
pub fn flatten(self) -> Vec<BenchInstance> {
let mut instances = vec![];
let upload = match self.upload {
Field::Single(u) => vec![u],
Field::Multiple(u) => u,
};
let upload_delay = match self.upload_delay {
Field::Single(u) => vec![u],
Field::Multiple(u) => u,
};
let download = match self.download {
Field::Single(u) => vec![u],
Field::Multiple(u) => u,
};
let download_latency = match self.download_delay {
Field::Single(u) => vec![u],
Field::Multiple(u) => u,
};
let upload_size = match self.upload_size {
Field::Single(u) => vec![u],
Field::Multiple(u) => u,
};
let download_size = match self.download_size {
Field::Single(u) => vec![u],
Field::Multiple(u) => u,
};
let defer_decryption = match self.defer_decryption {
Field::Single(u) => vec![u],
Field::Multiple(u) => u,
};
for u in upload {
for ul in &upload_delay {
for d in &download {
for dl in &download_latency {
for us in &upload_size {
for ds in &download_size {
for dd in &defer_decryption {
instances.push(BenchInstance {
name: self.name.clone(),
upload: u,
upload_delay: *ul,
download: *d,
download_delay: *dl,
upload_size: *us,
download_size: *ds,
defer_decryption: *dd,
});
}
}
}
}
}
}
}
instances
}
}
#[derive(Debug, Clone, Serialize)]
pub struct BenchInstance {
pub name: String,
pub upload: usize,
pub upload_delay: usize,
pub download: usize,
pub download_delay: usize,
pub upload_size: usize,
pub download_size: usize,
pub defer_decryption: bool,
}

View File

@@ -1,255 +0,0 @@
pub mod config;
pub mod metrics;
use std::{io, process::Command};
pub const PROVER_NAMESPACE: &str = "prover-ns";
pub const PROVER_INTERFACE: &str = "prover-veth";
pub const PROVER_SUBNET: &str = "10.10.1.0/24";
pub const VERIFIER_NAMESPACE: &str = "verifier-ns";
pub const VERIFIER_INTERFACE: &str = "verifier-veth";
pub const VERIFIER_SUBNET: &str = "10.10.1.1/24";
pub fn set_up() -> io::Result<()> {
// Create network namespaces
create_network_namespace(PROVER_NAMESPACE)?;
create_network_namespace(VERIFIER_NAMESPACE)?;
// Create veth pair and attach to namespaces
create_veth_pair(
PROVER_NAMESPACE,
PROVER_INTERFACE,
VERIFIER_NAMESPACE,
VERIFIER_INTERFACE,
)?;
// Set devices up
set_device_up(PROVER_NAMESPACE, PROVER_INTERFACE)?;
set_device_up(VERIFIER_NAMESPACE, VERIFIER_INTERFACE)?;
// Assign IPs
assign_ip_to_interface(PROVER_NAMESPACE, PROVER_INTERFACE, PROVER_SUBNET)?;
assign_ip_to_interface(VERIFIER_NAMESPACE, VERIFIER_INTERFACE, VERIFIER_SUBNET)?;
// Set default routes
set_default_route(
PROVER_NAMESPACE,
PROVER_INTERFACE,
PROVER_SUBNET.split('/').next().unwrap(),
)?;
set_default_route(
VERIFIER_NAMESPACE,
VERIFIER_INTERFACE,
VERIFIER_SUBNET.split('/').next().unwrap(),
)?;
Ok(())
}
pub fn clean_up() {
// Delete interface pair
if let Err(e) = Command::new("ip")
.args([
"netns",
"exec",
PROVER_NAMESPACE,
"ip",
"link",
"delete",
PROVER_INTERFACE,
])
.status()
{
println!("Error deleting interface {}: {}", PROVER_INTERFACE, e);
}
// Delete namespaces
if let Err(e) = Command::new("ip")
.args(["netns", "del", PROVER_NAMESPACE])
.status()
{
println!("Error deleting namespace {}: {}", PROVER_NAMESPACE, e);
}
if let Err(e) = Command::new("ip")
.args(["netns", "del", VERIFIER_NAMESPACE])
.status()
{
println!("Error deleting namespace {}: {}", VERIFIER_NAMESPACE, e);
}
}
/// Sets the interface parameters.
///
/// Must be run in the correct namespace.
///
/// # Arguments
///
/// * `egress` - The egress bandwidth in mbps.
/// * `burst` - The burst in mbps.
/// * `delay` - The delay in ms.
pub fn set_interface(interface: &str, egress: usize, burst: usize, delay: usize) -> io::Result<()> {
// Clear rules
_ = Command::new("tc")
.arg("qdisc")
.arg("del")
.arg("dev")
.arg(interface)
.arg("root")
.status();
// Egress
Command::new("tc")
.arg("qdisc")
.arg("add")
.arg("dev")
.arg(interface)
.arg("root")
.arg("handle")
.arg("1:")
.arg("tbf")
.arg("rate")
.arg(format!("{}mbit", egress))
.arg("burst")
.arg(format!("{}mbit", burst))
.arg("latency")
.arg("60s")
.status()?;
// Delay
Command::new("tc")
.arg("qdisc")
.arg("add")
.arg("dev")
.arg(interface)
.arg("parent")
.arg("1:1")
.arg("handle")
.arg("10:")
.arg("netem")
.arg("delay")
.arg(format!("{}ms", delay))
.status()?;
Ok(())
}
/// Create a network namespace with the given name if it does not already exist.
fn create_network_namespace(name: &str) -> io::Result<()> {
// Check if namespace already exists
if Command::new("ip")
.args(["netns", "list"])
.output()?
.stdout
.windows(name.len())
.any(|ns| ns == name.as_bytes())
{
println!("Namespace {} already exists", name);
return Ok(());
} else {
println!("Creating namespace {}", name);
Command::new("ip").args(["netns", "add", name]).status()?;
}
Ok(())
}
fn create_veth_pair(
left_namespace: &str,
left_interface: &str,
right_namespace: &str,
right_interface: &str,
) -> io::Result<()> {
// Check if interfaces are already present in namespaces
if is_interface_present_in_namespace(left_namespace, left_interface)?
|| is_interface_present_in_namespace(right_namespace, right_interface)?
{
println!("Virtual interface already exists.");
return Ok(());
}
// Create veth pair
Command::new("ip")
.args([
"link",
"add",
left_interface,
"type",
"veth",
"peer",
"name",
right_interface,
])
.status()?;
println!(
"Created veth pair {} and {}",
left_interface, right_interface
);
// Attach veth pair to namespaces
attach_interface_to_namespace(left_namespace, left_interface)?;
attach_interface_to_namespace(right_namespace, right_interface)?;
Ok(())
}
fn attach_interface_to_namespace(namespace: &str, interface: &str) -> io::Result<()> {
Command::new("ip")
.args(["link", "set", interface, "netns", namespace])
.status()?;
println!("Attached {} to namespace {}", interface, namespace);
Ok(())
}
fn set_default_route(namespace: &str, interface: &str, ip: &str) -> io::Result<()> {
Command::new("ip")
.args([
"netns", "exec", namespace, "ip", "route", "add", "default", "via", ip, "dev",
interface,
])
.status()?;
println!(
"Set default route for namespace {} ip {} to {}",
namespace, ip, interface
);
Ok(())
}
fn is_interface_present_in_namespace(
namespace: &str,
interface: &str,
) -> Result<bool, std::io::Error> {
Ok(Command::new("ip")
.args([
"netns", "exec", namespace, "ip", "link", "list", "dev", interface,
])
.output()?
.stdout
.windows(interface.len())
.any(|ns| ns == interface.as_bytes()))
}
fn set_device_up(namespace: &str, interface: &str) -> io::Result<()> {
Command::new("ip")
.args([
"netns", "exec", namespace, "ip", "link", "set", interface, "up",
])
.status()?;
Ok(())
}
fn assign_ip_to_interface(namespace: &str, interface: &str, ip: &str) -> io::Result<()> {
Command::new("ip")
.args([
"netns", "exec", namespace, "ip", "addr", "add", ip, "dev", interface,
])
.status()?;
Ok(())
}

View File

@@ -1,26 +0,0 @@
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Metrics {
pub name: String,
/// Upload bandwidth in Mbps.
pub upload: usize,
/// Upload latency in ms.
pub upload_delay: usize,
/// Download bandwidth in Mbps.
pub download: usize,
/// Download latency in ms.
pub download_delay: usize,
/// Total bytes sent to the server.
pub upload_size: usize,
/// Total bytes received from the server.
pub download_size: usize,
/// Whether deferred decryption was used.
pub defer_decryption: bool,
/// The total runtime of the benchmark in seconds.
pub runtime: u64,
/// The total amount of data uploaded to the verifier in bytes.
pub uploaded: u64,
/// The total amount of data downloaded from the verifier in bytes.
pub downloaded: u64,
}

View File

@@ -1,28 +1,44 @@
[package]
name = "tlsn-common"
description = "Common code shared between tlsn-prover and tlsn-verifier"
version = "0.1.0-alpha.7"
version = "0.1.0-alpha.12"
edition = "2021"
[lints]
workspace = true
[features]
default = []
[dependencies]
tlsn-core = { workspace = true }
tlsn-tls-core = { workspace = true }
tlsn-cipher = { workspace = true }
mpz-core = { workspace = true }
mpz-common = { workspace = true }
mpz-garble = { workspace = true }
mpz-ot = { workspace = true }
mpz-memory-core = { workspace = true }
mpz-hash = { workspace = true }
mpz-vm-core = { workspace = true }
mpz-zk = { workspace = true }
async-trait = { workspace = true }
derive_builder = { workspace = true }
futures = { workspace = true }
ghash = { workspace = true }
once_cell = { workspace = true }
opaque-debug = { workspace = true }
rand = { workspace = true }
rangeset = { workspace = true }
serio = { workspace = true, features = ["codec", "bincode"] }
thiserror = { workspace = true }
tracing = { workspace = true }
uid-mux = { workspace = true, features = ["serio"] }
serde = { workspace = true, features = ["derive"] }
tlsn-utils = { workspace = true }
semver = { version = "1.0", features = ["serde"] }
[target.'cfg(target_arch = "wasm32")'.dependencies]
wasm-bindgen = { version = "0.2" }
web-spawn = { workspace = true }
[dev-dependencies]
rstest = { workspace = true }

110
crates/common/src/commit.rs Normal file
View File

@@ -0,0 +1,110 @@
//! Plaintext commitment and proof of encryption.
pub mod hash;
use mpz_core::bitvec::BitVec;
use mpz_memory_core::{binary::Binary, DecodeFutureTyped};
use mpz_vm_core::{prelude::*, Vm};
use crate::{
transcript::Record,
zk_aes_ctr::{ZkAesCtr, ZkAesCtrError},
Role,
};
/// Commits the plaintext of the provided records, returning a proof of
/// encryption.
///
/// Writes the plaintext VM reference to the provided records.
pub fn commit_records<'record>(
vm: &mut dyn Vm<Binary>,
aes: &mut ZkAesCtr,
records: impl IntoIterator<Item = &'record mut Record>,
) -> Result<RecordProof, RecordProofError> {
let mut ciphertexts = Vec::new();
for record in records {
if record.plaintext_ref.is_some() {
return Err(ErrorRepr::PlaintextRefAlreadySet.into());
}
let (plaintext_ref, ciphertext_ref) = aes
.encrypt(vm, record.explicit_nonce.clone(), record.ciphertext.len())
.map_err(ErrorRepr::Aes)?;
record.plaintext_ref = Some(plaintext_ref);
if let Role::Prover = aes.role() {
let Some(plaintext) = record.plaintext.clone() else {
return Err(ErrorRepr::MissingPlaintext.into());
};
vm.assign(plaintext_ref, plaintext)
.map_err(RecordProofError::vm)?;
}
vm.commit(plaintext_ref).map_err(RecordProofError::vm)?;
let ciphertext = vm.decode(ciphertext_ref).map_err(RecordProofError::vm)?;
ciphertexts.push((ciphertext, record.ciphertext.clone()));
}
Ok(RecordProof { ciphertexts })
}
/// Proof of encryption.
#[derive(Debug)]
#[must_use]
#[allow(clippy::type_complexity)]
pub struct RecordProof {
ciphertexts: Vec<(DecodeFutureTyped<BitVec, Vec<u8>>, Vec<u8>)>,
}
impl RecordProof {
/// Verifies the proof.
pub fn verify(self) -> Result<(), RecordProofError> {
let Self { ciphertexts } = self;
for (mut ciphertext, expected) in ciphertexts {
let ciphertext = ciphertext
.try_recv()
.map_err(RecordProofError::vm)?
.ok_or_else(|| ErrorRepr::NotDecoded)?;
if ciphertext != expected {
return Err(ErrorRepr::InvalidCiphertext.into());
}
}
Ok(())
}
}
/// Error for [`RecordProof`].
#[derive(Debug, thiserror::Error)]
#[error(transparent)]
pub struct RecordProofError(#[from] ErrorRepr);
impl RecordProofError {
fn vm<E>(err: E) -> Self
where
E: Into<Box<dyn std::error::Error + Send + Sync + 'static>>,
{
Self(ErrorRepr::Vm(err.into()))
}
}
#[derive(Debug, thiserror::Error)]
#[error("record proof error: {0}")]
enum ErrorRepr {
#[error("VM error: {0}")]
Vm(Box<dyn std::error::Error + Send + Sync + 'static>),
#[error("zk aes error: {0}")]
Aes(ZkAesCtrError),
#[error("plaintext is missing")]
MissingPlaintext,
#[error("plaintext reference is already set")]
PlaintextRefAlreadySet,
#[error("ciphertext was not decoded")]
NotDecoded,
#[error("ciphertext does not match expected")]
InvalidCiphertext,
}

View File

@@ -0,0 +1,197 @@
//! Plaintext hash commitments.
use std::collections::HashMap;
use mpz_core::bitvec::BitVec;
use mpz_hash::sha256::Sha256;
use mpz_memory_core::{
binary::{Binary, U8},
DecodeFutureTyped, MemoryExt, Vector,
};
use mpz_vm_core::{prelude::*, Vm, VmError};
use tlsn_core::{
hash::{Blinder, Hash, HashAlgId, TypedHash},
transcript::{
hash::{PlaintextHash, PlaintextHashSecret},
Direction, Idx,
},
};
use crate::{transcript::TranscriptRefs, Role};
/// Future which will resolve to the committed hash values.
#[derive(Debug)]
pub struct HashCommitFuture {
#[allow(clippy::type_complexity)]
futs: Vec<(
Direction,
Idx,
HashAlgId,
DecodeFutureTyped<BitVec, Vec<u8>>,
)>,
}
impl HashCommitFuture {
/// Tries to receive the value, returning an error if the value is not
/// ready.
pub fn try_recv(self) -> Result<Vec<PlaintextHash>, HashCommitError> {
let mut output = Vec::new();
for (direction, idx, alg, mut fut) in self.futs {
let hash = fut
.try_recv()
.map_err(|_| HashCommitError::decode())?
.ok_or_else(HashCommitError::decode)?;
output.push(PlaintextHash {
direction,
idx,
hash: TypedHash {
alg,
value: Hash::try_from(hash).map_err(HashCommitError::convert)?,
},
});
}
Ok(output)
}
}
/// Prove plaintext hash commitments.
pub fn prove_hash(
vm: &mut dyn Vm<Binary>,
refs: &TranscriptRefs,
idxs: impl IntoIterator<Item = (Direction, Idx, HashAlgId)>,
) -> Result<(HashCommitFuture, Vec<PlaintextHashSecret>), HashCommitError> {
let mut futs = Vec::new();
let mut secrets = Vec::new();
for (direction, idx, alg, hash_ref, blinder_ref) in
hash_commit_inner(vm, Role::Prover, refs, idxs)?
{
let blinder: Blinder = rand::random();
vm.assign(blinder_ref, blinder.as_bytes().to_vec())?;
vm.commit(blinder_ref)?;
let hash_fut = vm.decode(Vector::<U8>::from(hash_ref))?;
futs.push((direction, idx.clone(), alg, hash_fut));
secrets.push(PlaintextHashSecret {
direction,
idx,
blinder,
alg,
});
}
Ok((HashCommitFuture { futs }, secrets))
}
/// Verify plaintext hash commitments.
pub fn verify_hash(
vm: &mut dyn Vm<Binary>,
refs: &TranscriptRefs,
idxs: impl IntoIterator<Item = (Direction, Idx, HashAlgId)>,
) -> Result<HashCommitFuture, HashCommitError> {
let mut futs = Vec::new();
for (direction, idx, alg, hash_ref, blinder_ref) in
hash_commit_inner(vm, Role::Verifier, refs, idxs)?
{
vm.commit(blinder_ref)?;
let hash_fut = vm.decode(Vector::<U8>::from(hash_ref))?;
futs.push((direction, idx, alg, hash_fut));
}
Ok(HashCommitFuture { futs })
}
/// Commit plaintext hashes of the transcript.
#[allow(clippy::type_complexity)]
fn hash_commit_inner(
vm: &mut dyn Vm<Binary>,
role: Role,
refs: &TranscriptRefs,
idxs: impl IntoIterator<Item = (Direction, Idx, HashAlgId)>,
) -> Result<Vec<(Direction, Idx, HashAlgId, Array<U8, 32>, Vector<U8>)>, HashCommitError> {
let mut output = Vec::new();
let mut hashers = HashMap::new();
for (direction, idx, alg) in idxs {
let blinder = vm.alloc_vec::<U8>(16)?;
match role {
Role::Prover => vm.mark_private(blinder)?,
Role::Verifier => vm.mark_blind(blinder)?,
}
let hash = match alg {
HashAlgId::SHA256 => {
let mut hasher = if let Some(hasher) = hashers.get(&alg).cloned() {
hasher
} else {
let hasher = Sha256::new_with_init(vm).map_err(HashCommitError::hasher)?;
hashers.insert(alg, hasher.clone());
hasher
};
for plaintext in refs.get(direction, &idx).expect("plaintext refs are valid") {
hasher.update(&plaintext);
}
hasher.update(&blinder);
hasher.finalize(vm).map_err(HashCommitError::hasher)?
}
alg => {
return Err(HashCommitError::unsupported_alg(alg));
}
};
output.push((direction, idx, alg, hash, blinder));
}
Ok(output)
}
/// Error type for hash commitments.
#[derive(Debug, thiserror::Error)]
#[error(transparent)]
pub struct HashCommitError(#[from] ErrorRepr);
impl HashCommitError {
fn decode() -> Self {
Self(ErrorRepr::Decode)
}
fn convert(e: &'static str) -> Self {
Self(ErrorRepr::Convert(e))
}
fn hasher<E>(e: E) -> Self
where
E: Into<Box<dyn std::error::Error + Send + Sync>>,
{
Self(ErrorRepr::Hasher(e.into()))
}
fn unsupported_alg(alg: HashAlgId) -> Self {
Self(ErrorRepr::UnsupportedAlg { alg })
}
}
#[derive(Debug, thiserror::Error)]
#[error("hash commit error: {0}")]
enum ErrorRepr {
#[error("VM error: {0}")]
Vm(VmError),
#[error("failed to decode hash")]
Decode,
#[error("failed to convert hash: {0}")]
Convert(&'static str),
#[error("unsupported hash algorithm: {alg}")]
UnsupportedAlg { alg: HashAlgId },
#[error("hasher error: {0}")]
Hasher(Box<dyn std::error::Error + Send + Sync>),
}
impl From<VmError> for HashCommitError {
fn from(value: VmError) -> Self {
Self(ErrorRepr::Vm(value))
}
}

View File

@@ -5,16 +5,12 @@ use semver::Version;
use serde::{Deserialize, Serialize};
use std::error::Error;
use crate::Role;
// Extra cushion room, eg. for sharing J0 blocks.
const EXTRA_OTS: usize = 16384;
const OTS_PER_BYTE_SENT: usize = 8;
// Without deferred decryption we use 16, with it we use 8.
const OTS_PER_BYTE_RECV_ONLINE: usize = 16;
const OTS_PER_BYTE_RECV_DEFER: usize = 8;
// Default is 32 bytes to decrypt the TLS protocol messages.
const DEFAULT_MAX_RECV_ONLINE: usize = 32;
// Default maximum number of TLS records to allow.
//
// This would allow for up to 50Mb upload from prover to verifier.
const DEFAULT_RECORDS_LIMIT: usize = 256;
// Current version that is running.
static VERSION: Lazy<Version> = Lazy::new(|| {
@@ -29,12 +25,26 @@ static VERSION: Lazy<Version> = Lazy::new(|| {
pub struct ProtocolConfig {
/// Maximum number of bytes that can be sent.
max_sent_data: usize,
/// Maximum number of application data records that can be sent.
#[builder(setter(strip_option), default)]
max_sent_records: Option<usize>,
/// Maximum number of bytes that can be decrypted online, i.e. while the
/// MPC-TLS connection is active.
#[builder(default = "0")]
#[builder(default = "DEFAULT_MAX_RECV_ONLINE")]
max_recv_data_online: usize,
/// Maximum number of bytes that can be received.
max_recv_data: usize,
/// Maximum number of received application data records that can be
/// decrypted online, i.e. while the MPC-TLS connection is active.
#[builder(setter(strip_option), default)]
max_recv_records_online: Option<usize>,
/// Whether the `deferred decryption` feature is toggled on from the start
/// of the MPC-TLS connection.
#[builder(default = "true")]
defer_decryption_from_start: bool,
/// Network settings.
#[builder(default)]
network: NetworkSetting,
/// Version that is being run by prover/verifier.
#[builder(setter(skip), default = "VERSION.clone()")]
version: Version,
@@ -62,6 +72,12 @@ impl ProtocolConfig {
self.max_sent_data
}
/// Returns the maximum number of application data records that can
/// be sent.
pub fn max_sent_records(&self) -> Option<usize> {
self.max_sent_records
}
/// Returns the maximum number of bytes that can be decrypted online.
pub fn max_recv_data_online(&self) -> usize {
self.max_recv_data_online
@@ -72,24 +88,21 @@ impl ProtocolConfig {
self.max_recv_data
}
/// Returns OT sender setup count.
pub fn ot_sender_setup_count(&self, role: Role) -> usize {
ot_send_estimate(
role,
self.max_sent_data,
self.max_recv_data_online,
self.max_recv_data,
)
/// Returns the maximum number of received application data records that
/// can be decrypted online.
pub fn max_recv_records_online(&self) -> Option<usize> {
self.max_recv_records_online
}
/// Returns OT receiver setup count.
pub fn ot_receiver_setup_count(&self, role: Role) -> usize {
ot_recv_estimate(
role,
self.max_sent_data,
self.max_recv_data_online,
self.max_recv_data,
)
/// Returns whether the `deferred decryption` feature is toggled on from the
/// start of the MPC-TLS connection.
pub fn defer_decryption_from_start(&self) -> bool {
self.defer_decryption_from_start
}
/// Returns the network settings.
pub fn network(&self) -> NetworkSetting {
self.network
}
}
@@ -99,8 +112,14 @@ impl ProtocolConfig {
pub struct ProtocolConfigValidator {
/// Maximum number of bytes that can be sent.
max_sent_data: usize,
/// Maximum number of application data records that can be sent.
#[builder(default = "DEFAULT_RECORDS_LIMIT")]
max_sent_records: usize,
/// Maximum number of bytes that can be received.
max_recv_data: usize,
/// Maximum number of application data records that can be received online.
#[builder(default = "DEFAULT_RECORDS_LIMIT")]
max_recv_records_online: usize,
/// Version that is being run by checker.
#[builder(setter(skip), default = "VERSION.clone()")]
version: Version,
@@ -117,15 +136,28 @@ impl ProtocolConfigValidator {
self.max_sent_data
}
/// Returns the maximum number of application data records that can
/// be sent.
pub fn max_sent_records(&self) -> usize {
self.max_sent_records
}
/// Returns the maximum number of bytes that can be received.
pub fn max_recv_data(&self) -> usize {
self.max_recv_data
}
/// Returns the maximum number of application data records that can
/// be received online.
pub fn max_recv_records_online(&self) -> usize {
self.max_recv_records_online
}
/// Performs compatibility check of the protocol configuration between
/// prover and verifier.
pub fn validate(&self, config: &ProtocolConfig) -> Result<(), ProtocolConfigError> {
self.check_max_transcript_size(config.max_sent_data, config.max_recv_data)?;
self.check_max_records(config.max_sent_records, config.max_recv_records_online)?;
self.check_version(&config.version)?;
Ok(())
}
@@ -153,6 +185,32 @@ impl ProtocolConfigValidator {
Ok(())
}
fn check_max_records(
&self,
max_sent_records: Option<usize>,
max_recv_records_online: Option<usize>,
) -> Result<(), ProtocolConfigError> {
if let Some(max_sent_records) = max_sent_records {
if max_sent_records > self.max_sent_records {
return Err(ProtocolConfigError::max_record_count(format!(
"max_sent_records {} is greater than the configured limit {}",
max_sent_records, self.max_sent_records,
)));
}
}
if let Some(max_recv_records_online) = max_recv_records_online {
if max_recv_records_online > self.max_recv_records_online {
return Err(ProtocolConfigError::max_record_count(format!(
"max_recv_records_online {} is greater than the configured limit {}",
max_recv_records_online, self.max_recv_records_online,
)));
}
}
Ok(())
}
// Checks if both versions are the same (might support check for different but
// compatible versions in the future).
fn check_version(&self, peer_version: &Version) -> Result<(), ProtocolConfigError> {
@@ -167,6 +225,24 @@ impl ProtocolConfigValidator {
}
}
/// Settings for the network environment.
///
/// Provides optimization options to adapt the protocol to different network
/// situations.
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
pub enum NetworkSetting {
/// Prefers a bandwidth-heavy protocol.
Bandwidth,
/// Prefers a latency-heavy protocol.
Latency,
}
impl Default for NetworkSetting {
fn default() -> Self {
Self::Bandwidth
}
}
/// A ProtocolConfig error.
#[derive(thiserror::Error, Debug)]
pub struct ProtocolConfigError {
@@ -193,6 +269,13 @@ impl ProtocolConfigError {
}
}
fn max_record_count(msg: impl Into<String>) -> Self {
Self {
kind: ErrorKind::MaxRecordCount,
source: Some(msg.into().into()),
}
}
fn version(msg: impl Into<String>) -> Self {
Self {
kind: ErrorKind::Version,
@@ -204,7 +287,8 @@ impl ProtocolConfigError {
impl fmt::Display for ProtocolConfigError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self.kind {
ErrorKind::MaxTranscriptSize => write!(f, "max transcript size error")?,
ErrorKind::MaxTranscriptSize => write!(f, "max transcript size exceeded")?,
ErrorKind::MaxRecordCount => write!(f, "max record count exceeded")?,
ErrorKind::Version => write!(f, "version error")?,
}
@@ -219,45 +303,10 @@ impl fmt::Display for ProtocolConfigError {
#[derive(Debug)]
enum ErrorKind {
MaxTranscriptSize,
MaxRecordCount,
Version,
}
/// Returns an estimate of the number of OTs that will be sent.
pub fn ot_send_estimate(
role: Role,
max_sent_data: usize,
max_recv_data_online: usize,
max_recv_data: usize,
) -> usize {
match role {
Role::Prover => EXTRA_OTS,
Role::Verifier => {
EXTRA_OTS
+ (max_sent_data * OTS_PER_BYTE_SENT)
+ (max_recv_data_online * OTS_PER_BYTE_RECV_ONLINE)
+ ((max_recv_data - max_recv_data_online) * OTS_PER_BYTE_RECV_DEFER)
}
}
}
/// Returns an estimate of the number of OTs that will be received.
pub fn ot_recv_estimate(
role: Role,
max_sent_data: usize,
max_recv_data_online: usize,
max_recv_data: usize,
) -> usize {
match role {
Role::Prover => {
EXTRA_OTS
+ (max_sent_data * OTS_PER_BYTE_SENT)
+ (max_recv_data_online * OTS_PER_BYTE_RECV_ONLINE)
+ ((max_recv_data - max_recv_data_online) * OTS_PER_BYTE_RECV_DEFER)
}
Role::Verifier => EXTRA_OTS,
}
}
#[cfg(test)]
mod test {
use super::*;

View File

@@ -0,0 +1,21 @@
//! Execution context.
use mpz_common::context::Multithread;
use crate::mux::MuxControl;
/// Maximum concurrency for multi-threaded context.
pub const MAX_CONCURRENCY: usize = 8;
/// Builds a multi-threaded context with the given muxer.
pub fn build_mt_context(mux: MuxControl) -> Multithread {
let builder = Multithread::builder().mux(mux).concurrency(MAX_CONCURRENCY);
#[cfg(target_arch = "wasm32")]
let builder = builder.spawn_handler(|f| {
let _ = web_spawn::spawn(f);
Ok(())
});
builder.build().unwrap()
}

View File

@@ -0,0 +1,238 @@
//! Encoding commitment protocol.
use std::ops::Range;
use mpz_common::Context;
use mpz_memory_core::{
binary::U8,
correlated::{Delta, Key, Mac},
Vector,
};
use rand::Rng;
use serde::{Deserialize, Serialize};
use serio::{stream::IoStreamExt, SinkExt};
use tlsn_core::{
hash::HashAlgorithm,
transcript::{
encoding::{
new_encoder, Encoder, EncoderSecret, EncodingCommitment, EncodingProvider,
EncodingProviderError, EncodingTree, EncodingTreeError,
},
Direction, Idx,
},
};
use crate::transcript::TranscriptRefs;
/// Bytes of encoding, per byte.
const ENCODING_SIZE: usize = 128;
#[derive(Debug, Serialize, Deserialize)]
struct Encodings {
sent: Vec<u8>,
recv: Vec<u8>,
}
/// Transfers the encodings using the provided seed and keys.
///
/// The keys must be consistent with the global delta used in the encodings.
pub async fn transfer<'a>(
ctx: &mut Context,
refs: &TranscriptRefs,
delta: &Delta,
f: impl Fn(Vector<U8>) -> &'a [Key],
) -> Result<EncodingCommitment, EncodingError> {
let secret = EncoderSecret::new(rand::rng().random(), delta.as_block().to_bytes());
let encoder = new_encoder(&secret);
let sent_keys: Vec<u8> = refs
.sent()
.iter()
.copied()
.flat_map(&f)
.flat_map(|key| key.as_block().as_bytes())
.copied()
.collect();
let recv_keys: Vec<u8> = refs
.recv()
.iter()
.copied()
.flat_map(&f)
.flat_map(|key| key.as_block().as_bytes())
.copied()
.collect();
assert_eq!(sent_keys.len() % ENCODING_SIZE, 0);
assert_eq!(recv_keys.len() % ENCODING_SIZE, 0);
let mut sent_encoding = Vec::with_capacity(sent_keys.len());
let mut recv_encoding = Vec::with_capacity(recv_keys.len());
encoder.encode_range(
Direction::Sent,
0..sent_keys.len() / ENCODING_SIZE,
&mut sent_encoding,
);
encoder.encode_range(
Direction::Received,
0..recv_keys.len() / ENCODING_SIZE,
&mut recv_encoding,
);
sent_encoding
.iter_mut()
.zip(sent_keys)
.for_each(|(enc, key)| *enc ^= key);
recv_encoding
.iter_mut()
.zip(recv_keys)
.for_each(|(enc, key)| *enc ^= key);
ctx.io_mut()
.send(Encodings {
sent: sent_encoding,
recv: recv_encoding,
})
.await?;
let root = ctx.io_mut().expect_next().await?;
ctx.io_mut().send(secret.clone()).await?;
Ok(EncodingCommitment {
root,
secret: secret.clone(),
})
}
/// Receives the encodings using the provided MACs.
///
/// The MACs must be consistent with the global delta used in the encodings.
pub async fn receive<'a>(
ctx: &mut Context,
hasher: &(dyn HashAlgorithm + Send + Sync),
refs: &TranscriptRefs,
f: impl Fn(Vector<U8>) -> &'a [Mac],
idxs: impl IntoIterator<Item = &(Direction, Idx)>,
) -> Result<(EncodingCommitment, EncodingTree), EncodingError> {
let Encodings { mut sent, mut recv } = ctx.io_mut().expect_next().await?;
let sent_macs: Vec<u8> = refs
.sent()
.iter()
.copied()
.flat_map(&f)
.flat_map(|mac| mac.as_bytes())
.copied()
.collect();
let recv_macs: Vec<u8> = refs
.recv()
.iter()
.copied()
.flat_map(&f)
.flat_map(|mac| mac.as_bytes())
.copied()
.collect();
assert_eq!(sent_macs.len() % ENCODING_SIZE, 0);
assert_eq!(recv_macs.len() % ENCODING_SIZE, 0);
if sent.len() != sent_macs.len() {
return Err(ErrorRepr::IncorrectMacCount {
direction: Direction::Sent,
expected: sent_macs.len(),
got: sent.len(),
}
.into());
}
if recv.len() != recv_macs.len() {
return Err(ErrorRepr::IncorrectMacCount {
direction: Direction::Received,
expected: recv_macs.len(),
got: recv.len(),
}
.into());
}
sent.iter_mut()
.zip(sent_macs)
.for_each(|(enc, mac)| *enc ^= mac);
recv.iter_mut()
.zip(recv_macs)
.for_each(|(enc, mac)| *enc ^= mac);
let provider = Provider { sent, recv };
let tree = EncodingTree::new(hasher, idxs, &provider)?;
let root = tree.root();
ctx.io_mut().send(root.clone()).await?;
let secret = ctx.io_mut().expect_next().await?;
let commitment = EncodingCommitment { root, secret };
Ok((commitment, tree))
}
#[derive(Debug)]
struct Provider {
sent: Vec<u8>,
recv: Vec<u8>,
}
impl EncodingProvider for Provider {
fn provide_encoding(
&self,
direction: Direction,
range: Range<usize>,
dest: &mut Vec<u8>,
) -> Result<(), EncodingProviderError> {
let encodings = match direction {
Direction::Sent => &self.sent,
Direction::Received => &self.recv,
};
let start = range.start * ENCODING_SIZE;
let end = range.end * ENCODING_SIZE;
if end > encodings.len() {
return Err(EncodingProviderError);
}
dest.extend_from_slice(&encodings[start..end]);
Ok(())
}
}
/// Encoding protocol error.
#[derive(Debug, thiserror::Error)]
#[error(transparent)]
pub struct EncodingError(#[from] ErrorRepr);
#[derive(Debug, thiserror::Error)]
#[error("encoding protocol error: {0}")]
enum ErrorRepr {
#[error("I/O error: {0}")]
Io(std::io::Error),
#[error("incorrect MAC count for {direction}: expected {expected}, got {got}")]
IncorrectMacCount {
direction: Direction,
expected: usize,
got: usize,
},
#[error("encoding tree error: {0}")]
EncodingTree(EncodingTreeError),
}
impl From<std::io::Error> for EncodingError {
fn from(value: std::io::Error) -> Self {
Self(ErrorRepr::Io(value))
}
}
impl From<EncodingTreeError> for EncodingError {
fn from(value: EncodingTreeError) -> Self {
Self(ErrorRepr::EncodingTree(value))
}
}

View File

@@ -0,0 +1,39 @@
//! GHASH methods.
// This module belongs in tls/core. It was moved out here temporarily.
use ghash::{
universal_hash::{KeyInit, UniversalHash as UniversalHashReference},
GHash,
};
/// Computes a GHASH tag.
pub fn ghash(aad: &[u8], ciphertext: &[u8], key: &[u8; 16]) -> [u8; 16] {
let mut ghash = GHash::new(key.into());
ghash.update_padded(&build_ghash_data(aad.to_vec(), ciphertext.to_owned()));
let out = ghash.finalize();
out.into()
}
/// Builds padded data for GHASH.
pub fn build_ghash_data(mut aad: Vec<u8>, mut ciphertext: Vec<u8>) -> Vec<u8> {
let associated_data_bitlen = (aad.len() as u64) * 8;
let text_bitlen = (ciphertext.len() as u64) * 8;
let len_block = ((associated_data_bitlen as u128) << 64) + (text_bitlen as u128);
// Pad data to be a multiple of 16 bytes.
let aad_padded_block_count = (aad.len() / 16) + (aad.len() % 16 != 0) as usize;
aad.resize(aad_padded_block_count * 16, 0);
let ciphertext_padded_block_count =
(ciphertext.len() / 16) + (ciphertext.len() % 16 != 0) as usize;
ciphertext.resize(ciphertext_padded_block_count * 16, 0);
let mut data: Vec<u8> = Vec::with_capacity(aad.len() + ciphertext.len() + 16);
data.extend(aad);
data.extend(ciphertext);
data.extend_from_slice(&len_block.to_be_bytes());
data
}

View File

@@ -4,34 +4,21 @@
#![deny(clippy::all)]
#![forbid(unsafe_code)]
pub mod commit;
pub mod config;
pub mod context;
pub mod encoding;
pub mod ghash;
pub mod msg;
pub mod mux;
use serio::codec::Codec;
use crate::mux::MuxControl;
/// IO type.
pub type Io = <serio::codec::Bincode as Codec<uid_mux::yamux::Stream>>::Framed;
/// Base OT sender.
pub type BaseOTSender = mpz_ot::chou_orlandi::Sender;
/// Base OT receiver.
pub type BaseOTReceiver = mpz_ot::chou_orlandi::Receiver;
/// OT sender.
pub type OTSender = mpz_ot::kos::SharedSender<BaseOTReceiver>;
/// OT receiver.
pub type OTReceiver = mpz_ot::kos::SharedReceiver<BaseOTSender>;
/// MPC executor.
pub type Executor = mpz_common::executor::MTExecutor<MuxControl>;
/// MPC thread context.
pub type Context = mpz_common::executor::MTContext<MuxControl, Io>;
/// DEAP thread.
pub type DEAPThread = mpz_garble::protocol::deap::DEAPThread<Context, OTSender, OTReceiver>;
pub mod tag;
pub mod transcript;
pub mod zk_aes_ctr;
/// The party's role in the TLSN protocol.
///
/// A Notary is classified as a Verifier.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum Role {
/// The prover.
Prover,

View File

@@ -6,16 +6,15 @@ use futures::{
future::{FusedFuture, FutureExt},
AsyncRead, AsyncWrite, Future,
};
use serio::codec::Bincode;
use tracing::error;
use uid_mux::{yamux, FramedMux};
use uid_mux::yamux;
use crate::Role;
/// Multiplexer supporting unique deterministic stream IDs.
pub type Mux<Io> = yamux::Yamux<Io>;
/// Multiplexer controller providing streams with a codec attached.
pub type MuxControl = FramedMux<yamux::YamuxCtrl, Bincode>;
/// Multiplexer controller providing streams.
pub type MuxControl = yamux::YamuxCtrl;
/// Multiplexer future which must be polled for the muxer to make progress.
pub struct MuxFuture(
@@ -73,7 +72,7 @@ pub fn attach_mux<T: AsyncWrite + AsyncRead + Send + Unpin + 'static>(
role: Role,
) -> (MuxFuture, MuxControl) {
let mut mux_config = yamux::Config::default();
mux_config.set_max_num_streams(64);
mux_config.set_max_num_streams(36);
let mux_role = match role {
Role::Prover => yamux::Mode::Client,
@@ -81,10 +80,10 @@ pub fn attach_mux<T: AsyncWrite + AsyncRead + Send + Unpin + 'static>(
};
let mux = Mux::new(socket, mux_config, mux_role);
let ctrl = FramedMux::new(mux.control(), Bincode);
let ctrl = mux.control();
if let Role::Prover = role {
ctrl.mux().alloc(64);
ctrl.alloc(32);
}
(MuxFuture(Box::new(mux.into_future().fuse())), ctrl)

157
crates/common/src/tag.rs Normal file
View File

@@ -0,0 +1,157 @@
//! TLS record tag verification.
use crate::{ghash::ghash, transcript::Record};
use cipher::{aes::Aes128, Cipher};
use mpz_core::bitvec::BitVec;
use mpz_memory_core::{
binary::{Binary, U8},
DecodeFutureTyped,
};
use mpz_vm_core::{prelude::*, Vm};
use tls_core::cipher::make_tls12_aad;
/// Proves the verification of tags of the given `records`,
/// returning a proof.
///
/// # Arguments
///
/// * `vm` - Virtual machine.
/// * `key_iv` - Cipher key and IV.
/// * `mac_key` - MAC key.
/// * `records` - Records for which the verification is to be proven.
pub fn verify_tags(
vm: &mut dyn Vm<Binary>,
key_iv: (Array<U8, 16>, Array<U8, 4>),
mac_key: Array<U8, 16>,
records: Vec<Record>,
) -> Result<TagProof, TagProofError> {
let mut aes = Aes128::default();
aes.set_key(key_iv.0);
aes.set_iv(key_iv.1);
// Compute j0 blocks.
let j0s = records
.iter()
.map(|rec| {
let block = aes.alloc_ctr_block(vm).map_err(TagProofError::vm)?;
let explicit_nonce: [u8; 8] =
rec.explicit_nonce
.clone()
.try_into()
.map_err(|explicit_nonce: Vec<_>| ErrorRepr::ExplicitNonceLength {
expected: 8,
actual: explicit_nonce.len(),
})?;
vm.assign(block.explicit_nonce, explicit_nonce)
.map_err(TagProofError::vm)?;
vm.commit(block.explicit_nonce).map_err(TagProofError::vm)?;
// j0's counter is set to 1.
vm.assign(block.counter, 1u32.to_be_bytes())
.map_err(TagProofError::vm)?;
vm.commit(block.counter).map_err(TagProofError::vm)?;
let j0 = vm.decode(block.output).map_err(TagProofError::vm)?;
Ok(j0)
})
.collect::<Result<Vec<_>, TagProofError>>()?;
let mac_key = vm.decode(mac_key).map_err(TagProofError::vm)?;
Ok(TagProof {
j0s,
records,
mac_key,
})
}
/// Proof of tag verification.
#[derive(Debug)]
#[must_use]
pub struct TagProof {
/// The j0 block for each record.
j0s: Vec<DecodeFutureTyped<BitVec, [u8; 16]>>,
records: Vec<Record>,
/// The MAC key for tag computation.
mac_key: DecodeFutureTyped<BitVec, [u8; 16]>,
}
impl TagProof {
/// Verifies the proof.
pub fn verify(self) -> Result<(), TagProofError> {
let Self {
j0s,
mut mac_key,
records,
} = self;
let mac_key = mac_key
.try_recv()
.map_err(TagProofError::vm)?
.ok_or_else(|| ErrorRepr::NotDecoded)?;
for (mut j0, rec) in j0s.into_iter().zip(records) {
let j0 = j0
.try_recv()
.map_err(TagProofError::vm)?
.ok_or_else(|| ErrorRepr::NotDecoded)?;
let aad = make_tls12_aad(rec.seq, rec.typ, rec.version, rec.ciphertext.len());
let ghash_tag = ghash(aad.as_ref(), &rec.ciphertext, &mac_key);
let record_tag = match rec.tag.as_ref() {
Some(tag) => tag,
None => {
// This will never happen, since we only call this method
// for proofs where the records' tags are known.
return Err(ErrorRepr::UnknownTag.into());
}
};
if *record_tag
!= ghash_tag
.into_iter()
.zip(j0.into_iter())
.map(|(a, b)| a ^ b)
.collect::<Vec<_>>()
{
return Err(ErrorRepr::InvalidTag.into());
}
}
Ok(())
}
}
/// Error for [`J0Proof`].
#[derive(Debug, thiserror::Error)]
#[error(transparent)]
pub struct TagProofError(#[from] ErrorRepr);
impl TagProofError {
fn vm<E>(err: E) -> Self
where
E: Into<Box<dyn std::error::Error + Send + Sync + 'static>>,
{
Self(ErrorRepr::Vm(err.into()))
}
}
#[derive(Debug, thiserror::Error)]
#[error("j0 proof error: {0}")]
enum ErrorRepr {
#[error("value was not decoded")]
NotDecoded,
#[error("VM error: {0}")]
Vm(Box<dyn std::error::Error + Send + Sync + 'static>),
#[error("tag does not match expected")]
InvalidTag,
#[error("tag is not known")]
UnknownTag,
#[error("invalid explicit nonce length: expected {expected}, got {actual}")]
ExplicitNonceLength { expected: usize, actual: usize },
}

View File

@@ -0,0 +1,315 @@
//! TLS transcript.
use mpz_memory_core::{
binary::{Binary, U8},
MemoryExt, Vector,
};
use mpz_vm_core::{Vm, VmError};
use rangeset::Intersection;
use tls_core::msgs::enums::{ContentType, ProtocolVersion};
use tlsn_core::transcript::{Direction, Idx, PartialTranscript, Transcript};
/// A transcript of TLS records sent and received by the prover.
#[derive(Debug, Default, Clone)]
pub struct TlsTranscript {
/// Sent records.
pub sent: Vec<Record>,
/// Received records.
pub recv: Vec<Record>,
}
impl TlsTranscript {
/// Returns the application data transcript.
pub fn to_transcript(&self) -> Result<Transcript, TlsTranscriptError> {
let mut sent = Vec::new();
let mut recv = Vec::new();
for record in self
.sent
.iter()
.filter(|record| record.typ == ContentType::ApplicationData)
{
let plaintext = record
.plaintext
.as_ref()
.ok_or(ErrorRepr::IncompleteTranscript {})?
.clone();
sent.extend_from_slice(&plaintext);
}
for record in self
.recv
.iter()
.filter(|record| record.typ == ContentType::ApplicationData)
{
let plaintext = record
.plaintext
.as_ref()
.ok_or(ErrorRepr::IncompleteTranscript {})?
.clone();
recv.extend_from_slice(&plaintext);
}
Ok(Transcript::new(sent, recv))
}
/// Returns the application data transcript references.
pub fn to_transcript_refs(&self) -> Result<TranscriptRefs, TlsTranscriptError> {
let mut sent = Vec::new();
let mut recv = Vec::new();
for record in self
.sent
.iter()
.filter(|record| record.typ == ContentType::ApplicationData)
{
let plaintext_ref = record
.plaintext_ref
.as_ref()
.ok_or(ErrorRepr::IncompleteTranscript {})?;
sent.push(*plaintext_ref);
}
for record in self
.recv
.iter()
.filter(|record| record.typ == ContentType::ApplicationData)
{
let plaintext_ref = record
.plaintext_ref
.as_ref()
.ok_or(ErrorRepr::IncompleteTranscript {})?;
recv.push(*plaintext_ref);
}
Ok(TranscriptRefs { sent, recv })
}
}
/// A TLS record.
#[derive(Clone)]
pub struct Record {
/// Sequence number.
pub seq: u64,
/// Content type.
pub typ: ContentType,
/// Plaintext.
pub plaintext: Option<Vec<u8>>,
/// VM reference to the plaintext.
pub plaintext_ref: Option<Vector<U8>>,
/// Explicit nonce.
pub explicit_nonce: Vec<u8>,
/// Ciphertext.
pub ciphertext: Vec<u8>,
/// Tag.
pub tag: Option<Vec<u8>>,
/// Version.
pub version: ProtocolVersion,
}
opaque_debug::implement!(Record);
/// References to the application plaintext in the transcript.
#[derive(Debug, Default, Clone)]
pub struct TranscriptRefs {
sent: Vec<Vector<U8>>,
recv: Vec<Vector<U8>>,
}
impl TranscriptRefs {
/// Returns the sent plaintext references.
pub fn sent(&self) -> &[Vector<U8>] {
&self.sent
}
/// Returns the received plaintext references.
pub fn recv(&self) -> &[Vector<U8>] {
&self.recv
}
/// Returns VM references for the given direction and index, otherwise
/// `None` if the index is out of bounds.
pub fn get(&self, direction: Direction, idx: &Idx) -> Option<Vec<Vector<U8>>> {
if idx.is_empty() {
return Some(Vec::new());
}
let refs = match direction {
Direction::Sent => &self.sent,
Direction::Received => &self.recv,
};
// Computes the transcript range for each reference.
let mut start = 0;
let mut slice_iter = refs.iter().map(move |slice| {
let out = (slice, start..start + slice.len());
start += slice.len();
out
});
let mut slices = Vec::new();
let (mut slice, mut slice_range) = slice_iter.next()?;
for range in idx.iter_ranges() {
loop {
if let Some(intersection) = slice_range.intersection(&range) {
let start = intersection.start - slice_range.start;
let end = intersection.end - slice_range.start;
slices.push(slice.get(start..end).expect("range should be in bounds"));
}
// Proceed to next range if the current slice extends beyond. Otherwise, proceed
// to the next slice.
if range.end <= slice_range.end {
break;
} else {
(slice, slice_range) = slice_iter.next()?;
}
}
}
Some(slices)
}
}
/// Error for [`TlsTranscript`].
#[derive(Debug, thiserror::Error)]
#[error(transparent)]
pub struct TlsTranscriptError(#[from] ErrorRepr);
#[derive(Debug, thiserror::Error)]
#[error("TLS transcript error")]
enum ErrorRepr {
#[error("not all application plaintext was committed to in the TLS transcript")]
IncompleteTranscript {},
}
/// Decodes the transcript.
pub fn decode_transcript(
vm: &mut dyn Vm<Binary>,
sent: &Idx,
recv: &Idx,
refs: &TranscriptRefs,
) -> Result<(), VmError> {
let sent_refs = refs.get(Direction::Sent, sent).expect("index is in bounds");
let recv_refs = refs
.get(Direction::Received, recv)
.expect("index is in bounds");
for slice in sent_refs.into_iter().chain(recv_refs) {
// Drop the future, we don't need it.
drop(vm.decode(slice)?);
}
Ok(())
}
/// Verifies a partial transcript.
pub fn verify_transcript(
vm: &mut dyn Vm<Binary>,
transcript: &PartialTranscript,
refs: &TranscriptRefs,
) -> Result<(), InconsistentTranscript> {
let sent_refs = refs
.get(Direction::Sent, transcript.sent_authed())
.expect("index is in bounds");
let recv_refs = refs
.get(Direction::Received, transcript.received_authed())
.expect("index is in bounds");
let mut authenticated_data = Vec::new();
for data in sent_refs.into_iter().chain(recv_refs) {
let plaintext = vm
.get(data)
.expect("reference is valid")
.expect("plaintext is decoded");
authenticated_data.extend_from_slice(&plaintext);
}
let mut purported_data = Vec::with_capacity(authenticated_data.len());
for range in transcript.sent_authed().iter_ranges() {
purported_data.extend_from_slice(&transcript.sent_unsafe()[range]);
}
for range in transcript.received_authed().iter_ranges() {
purported_data.extend_from_slice(&transcript.received_unsafe()[range]);
}
if purported_data != authenticated_data {
return Err(InconsistentTranscript {});
}
Ok(())
}
/// Error for [`verify_transcript`].
#[derive(Debug, thiserror::Error)]
#[error("inconsistent transcript")]
pub struct InconsistentTranscript {}
#[cfg(test)]
mod tests {
use super::TranscriptRefs;
use mpz_memory_core::{binary::U8, FromRaw, Slice, Vector};
use rangeset::RangeSet;
use std::ops::Range;
use tlsn_core::transcript::{Direction, Idx};
// TRANSCRIPT_REFS:
//
// 48..96 -> 6 slots
// 112..176 -> 8 slots
// 240..288 -> 6 slots
// 352..392 -> 5 slots
// 440..480 -> 5 slots
const TRANSCRIPT_REFS: &[Range<usize>] = &[48..96, 112..176, 240..288, 352..392, 440..480];
const IDXS: &[Range<usize>] = &[0..4, 5..10, 14..16, 16..28];
// 1. Take slots 0..4, 4 slots -> 48..80 (4)
// 2. Take slots 5..10, 5 slots -> 88..96 (1) + 112..144 (4)
// 3. Take slots 14..16, 2 slots -> 240..256 (2)
// 4. Take slots 16..28, 12 slots -> 256..288 (4) + 352..392 (5) + 440..464 (3)
//
// 5. Merge slots 240..256 and 256..288 => 240..288 and get EXPECTED_REFS
const EXPECTED_REFS: &[Range<usize>] =
&[48..80, 88..96, 112..144, 240..288, 352..392, 440..464];
#[test]
fn test_transcript_refs_get() {
let transcript_refs: Vec<Vector<U8>> = TRANSCRIPT_REFS
.iter()
.cloned()
.map(|range| Vector::from_raw(Slice::from_range_unchecked(range)))
.collect();
let transcript_refs = TranscriptRefs {
sent: transcript_refs.clone(),
recv: transcript_refs,
};
let vm_refs = transcript_refs
.get(Direction::Sent, &idx_fixture())
.unwrap();
let expected_refs: Vec<Vector<U8>> = EXPECTED_REFS
.iter()
.cloned()
.map(|range| Vector::from_raw(Slice::from_range_unchecked(range)))
.collect();
assert_eq!(
vm_refs.len(),
expected_refs.len(),
"Length of actual and expected refs are not equal"
);
for (&expected, actual) in expected_refs.iter().zip(vm_refs) {
assert_eq!(expected, actual);
}
}
fn idx_fixture() -> Idx {
let set = RangeSet::from(IDXS);
Idx::builder().union(&set).build()
}
}

View File

@@ -0,0 +1,210 @@
//! Zero-knowledge AES-CTR encryption.
use cipher::{
aes::{Aes128, AesError},
Cipher, CipherError, Keystream,
};
use mpz_memory_core::{
binary::{Binary, U8},
Array, Vector,
};
use mpz_vm_core::{prelude::*, Vm};
use crate::Role;
type Nonce = Array<U8, 8>;
type Ctr = Array<U8, 4>;
type Block = Array<U8, 16>;
const START_CTR: u32 = 2;
/// ZK AES-CTR encryption.
#[derive(Debug)]
pub struct ZkAesCtr {
role: Role,
aes: Aes128,
state: State,
}
impl ZkAesCtr {
/// Creates a new ZK AES-CTR instance.
pub fn new(role: Role) -> Self {
Self {
role,
aes: Aes128::default(),
state: State::Init,
}
}
/// Returns the role.
pub fn role(&self) -> &Role {
&self.role
}
/// Allocates `len` bytes for encryption.
pub fn alloc(&mut self, vm: &mut dyn Vm<Binary>, len: usize) -> Result<(), ZkAesCtrError> {
let State::Init = self.state.take() else {
Err(ErrorRepr::State {
reason: "must be in init state to allocate",
})?
};
// Round up to the nearest block size.
let len = 16 * len.div_ceil(16);
let input = vm.alloc_vec::<U8>(len).map_err(ZkAesCtrError::vm)?;
let keystream = self.aes.alloc_keystream(vm, len)?;
match self.role {
Role::Prover => vm.mark_private(input).map_err(ZkAesCtrError::vm)?,
Role::Verifier => vm.mark_blind(input).map_err(ZkAesCtrError::vm)?,
}
self.state = State::Ready { input, keystream };
Ok(())
}
/// Sets the key and IV for the cipher.
pub fn set_key(&mut self, key: Array<U8, 16>, iv: Array<U8, 4>) {
self.aes.set_key(key);
self.aes.set_iv(iv);
}
/// Proves the encryption of `len` bytes.
///
/// Here we only assign certain values in the VM but the actual proving
/// happens later when the plaintext is assigned and the VM is executed.
///
/// # Arguments
///
/// * `vm` - Virtual machine.
/// * `explicit_nonce` - Explicit nonce.
/// * `len` - Length of the plaintext in bytes.
///
/// # Returns
///
/// A VM reference to the plaintext and the ciphertext.
pub fn encrypt(
&mut self,
vm: &mut dyn Vm<Binary>,
explicit_nonce: Vec<u8>,
len: usize,
) -> Result<(Vector<U8>, Vector<U8>), ZkAesCtrError> {
let State::Ready { input, keystream } = &mut self.state else {
Err(ErrorRepr::State {
reason: "must be in ready state to encrypt",
})?
};
let explicit_nonce: [u8; 8] =
explicit_nonce
.try_into()
.map_err(|explicit_nonce: Vec<_>| ErrorRepr::ExplicitNonceLength {
expected: 8,
actual: explicit_nonce.len(),
})?;
let block_count = len.div_ceil(16);
let padded_len = block_count * 16;
let padding_len = padded_len - len;
if padded_len > input.len() {
Err(ErrorRepr::InsufficientPreprocessing {
expected: padded_len,
actual: input.len(),
})?
}
let mut input = input.split_off(input.len() - padded_len);
let keystream = keystream.consume(padded_len)?;
let mut output = keystream.apply(vm, input)?;
// Assign counter block inputs.
let mut ctr = START_CTR..;
keystream.assign(vm, explicit_nonce, move || {
ctr.next().expect("range is unbounded").to_be_bytes()
})?;
// Assign zeroes to the padding.
if padding_len > 0 {
let padding = input.split_off(input.len() - padding_len);
// To simplify the impl, we don't mark the padding as public, that's why only
// the prover assigns it.
if let Role::Prover = self.role {
vm.assign(padding, vec![0; padding_len])
.map_err(ZkAesCtrError::vm)?;
}
vm.commit(padding).map_err(ZkAesCtrError::vm)?;
output.truncate(len);
}
Ok((input, output))
}
}
enum State {
Init,
Ready {
input: Vector<U8>,
keystream: Keystream<Nonce, Ctr, Block>,
},
Error,
}
impl State {
fn take(&mut self) -> Self {
std::mem::replace(self, State::Error)
}
}
impl std::fmt::Debug for State {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
State::Init => write!(f, "Init"),
State::Ready { .. } => write!(f, "Ready"),
State::Error => write!(f, "Error"),
}
}
}
/// Error for [`ZkAesCtr`].
#[derive(Debug, thiserror::Error)]
#[error(transparent)]
pub struct ZkAesCtrError(#[from] ErrorRepr);
impl ZkAesCtrError {
fn vm<E>(err: E) -> Self
where
E: Into<Box<dyn std::error::Error + Send + Sync + 'static>>,
{
Self(ErrorRepr::Vm(err.into()))
}
}
#[derive(Debug, thiserror::Error)]
#[error("zk aes error")]
enum ErrorRepr {
#[error("invalid state: {reason}")]
State { reason: &'static str },
#[error("cipher error: {0}")]
Cipher(Box<dyn std::error::Error + Send + Sync + 'static>),
#[error("vm error: {0}")]
Vm(Box<dyn std::error::Error + Send + Sync + 'static>),
#[error("invalid explicit nonce length: expected {expected}, got {actual}")]
ExplicitNonceLength { expected: usize, actual: usize },
#[error("insufficient preprocessing: expected {expected}, got {actual}")]
InsufficientPreprocessing { expected: usize, actual: usize },
}
impl From<AesError> for ZkAesCtrError {
fn from(err: AesError) -> Self {
Self(ErrorRepr::Cipher(Box::new(err)))
}
}
impl From<CipherError> for ZkAesCtrError {
fn from(err: CipherError) -> Self {
Self(ErrorRepr::Cipher(Box::new(err)))
}
}

View File

@@ -1,41 +0,0 @@
[package]
name = "tlsn-aead"
authors = ["TLSNotary Team"]
description = "This crate provides an implementation of a two-party version of AES-GCM behind an AEAD trait"
keywords = ["tls", "mpc", "2pc", "aead", "aes", "aes-gcm"]
categories = ["cryptography"]
license = "MIT OR Apache-2.0"
version = "0.1.0-alpha.7"
edition = "2021"
[lib]
name = "aead"
[features]
default = ["mock"]
mock = ["mpz-common/test-utils", "dep:mpz-ot"]
[dependencies]
tlsn-block-cipher = { workspace = true }
tlsn-stream-cipher = { workspace = true }
tlsn-universal-hash = { workspace = true }
mpz-common = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "b8ae7ac" }
mpz-core = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "b8ae7ac" }
mpz-garble = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "b8ae7ac" }
mpz-ot = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "b8ae7ac", optional = true, features = [
"ideal",
] }
serio = { workspace = true }
async-trait = { workspace = true }
derive_builder = { workspace = true }
futures = { workspace = true }
serde = { workspace = true }
thiserror = { workspace = true }
tracing = { workspace = true }
[dev-dependencies]
tokio = { version = "1", features = ["macros", "rt", "rt-multi-thread"] }
aes-gcm = { workspace = true }

View File

@@ -1,36 +0,0 @@
use derive_builder::Builder;
/// Protocol role.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
#[allow(missing_docs)]
pub enum Role {
Leader,
Follower,
}
/// Configuration for AES-GCM.
#[derive(Debug, Clone, Builder)]
pub struct AesGcmConfig {
/// The id of this instance.
#[builder(setter(into))]
id: String,
/// The protocol role.
role: Role,
}
impl AesGcmConfig {
/// Creates a new builder for the AES-GCM configuration.
pub fn builder() -> AesGcmConfigBuilder {
AesGcmConfigBuilder::default()
}
/// Returns the id of this instance.
pub fn id(&self) -> &str {
&self.id
}
/// Returns the protocol role.
pub fn role(&self) -> &Role {
&self.role
}
}

View File

@@ -1,102 +0,0 @@
use std::fmt::Display;
/// AES-GCM error.
#[derive(Debug, thiserror::Error)]
pub struct AesGcmError {
kind: ErrorKind,
#[source]
source: Option<Box<dyn std::error::Error + Send + Sync>>,
}
impl AesGcmError {
pub(crate) fn new<E>(kind: ErrorKind, source: E) -> Self
where
E: Into<Box<dyn std::error::Error + Send + Sync>>,
{
Self {
kind,
source: Some(source.into()),
}
}
#[cfg(test)]
pub(crate) fn kind(&self) -> ErrorKind {
self.kind
}
pub(crate) fn invalid_tag() -> Self {
Self {
kind: ErrorKind::Tag,
source: None,
}
}
pub(crate) fn peer(reason: impl Into<String>) -> Self {
Self {
kind: ErrorKind::PeerMisbehaved,
source: Some(reason.into().into()),
}
}
pub(crate) fn payload(reason: impl Into<String>) -> Self {
Self {
kind: ErrorKind::Payload,
source: Some(reason.into().into()),
}
}
}
#[derive(Debug, Clone, Copy, PartialEq)]
pub(crate) enum ErrorKind {
Io,
BlockCipher,
StreamCipher,
Ghash,
Tag,
PeerMisbehaved,
Payload,
}
impl Display for AesGcmError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self.kind {
ErrorKind::Io => write!(f, "io error")?,
ErrorKind::BlockCipher => write!(f, "block cipher error")?,
ErrorKind::StreamCipher => write!(f, "stream cipher error")?,
ErrorKind::Ghash => write!(f, "ghash error")?,
ErrorKind::Tag => write!(f, "payload has corrupted tag")?,
ErrorKind::PeerMisbehaved => write!(f, "peer misbehaved")?,
ErrorKind::Payload => write!(f, "payload error")?,
}
if let Some(source) = &self.source {
write!(f, " caused by: {}", source)?;
}
Ok(())
}
}
impl From<std::io::Error> for AesGcmError {
fn from(err: std::io::Error) -> Self {
Self::new(ErrorKind::Io, err)
}
}
impl From<block_cipher::BlockCipherError> for AesGcmError {
fn from(err: block_cipher::BlockCipherError) -> Self {
Self::new(ErrorKind::BlockCipher, err)
}
}
impl From<tlsn_stream_cipher::StreamCipherError> for AesGcmError {
fn from(err: tlsn_stream_cipher::StreamCipherError) -> Self {
Self::new(ErrorKind::StreamCipher, err)
}
}
impl From<tlsn_universal_hash::UniversalHashError> for AesGcmError {
fn from(err: tlsn_universal_hash::UniversalHashError) -> Self {
Self::new(ErrorKind::Ghash, err)
}
}

View File

@@ -1,96 +0,0 @@
//! Mock implementation of AES-GCM for testing purposes.
use block_cipher::{BlockCipherConfig, MpcBlockCipher};
use mpz_common::executor::{test_st_executor, STExecutor};
use mpz_garble::protocol::deap::mock::{MockFollower, MockLeader};
use mpz_ot::ideal::ot::ideal_ot;
use serio::channel::MemoryDuplex;
use tlsn_stream_cipher::{MpcStreamCipher, StreamCipherConfig};
use tlsn_universal_hash::ghash::ideal_ghash;
use super::*;
/// Creates a mock AES-GCM pair.
///
/// # Arguments
///
/// * `id` - The id of the AES-GCM instances.
/// * `(leader, follower)` - The leader and follower vms.
/// * `leader_config` - The configuration of the leader.
/// * `follower_config` - The configuration of the follower.
pub async fn create_mock_aes_gcm_pair(
id: &str,
(leader, follower): (MockLeader, MockFollower),
leader_config: AesGcmConfig,
follower_config: AesGcmConfig,
) -> (
MpcAesGcm<STExecutor<MemoryDuplex>>,
MpcAesGcm<STExecutor<MemoryDuplex>>,
) {
let block_cipher_id = format!("{}/block_cipher", id);
let (ctx_leader, ctx_follower) = test_st_executor(128);
let (leader_ot_send, follower_ot_recv) = ideal_ot();
let (follower_ot_send, leader_ot_recv) = ideal_ot();
let block_leader = leader
.new_thread(ctx_leader, leader_ot_send, leader_ot_recv)
.unwrap();
let block_follower = follower
.new_thread(ctx_follower, follower_ot_send, follower_ot_recv)
.unwrap();
let leader_block_cipher = MpcBlockCipher::new(
BlockCipherConfig::builder()
.id(block_cipher_id.clone())
.build()
.unwrap(),
block_leader,
);
let follower_block_cipher = MpcBlockCipher::new(
BlockCipherConfig::builder()
.id(block_cipher_id.clone())
.build()
.unwrap(),
block_follower,
);
let stream_cipher_id = format!("{}/stream_cipher", id);
let leader_stream_cipher = MpcStreamCipher::new(
StreamCipherConfig::builder()
.id(stream_cipher_id.clone())
.build()
.unwrap(),
leader,
);
let follower_stream_cipher = MpcStreamCipher::new(
StreamCipherConfig::builder()
.id(stream_cipher_id.clone())
.build()
.unwrap(),
follower,
);
let (ctx_a, ctx_b) = test_st_executor(128);
let (leader_ghash, follower_ghash) = ideal_ghash(ctx_a, ctx_b);
let (ctx_a, ctx_b) = test_st_executor(128);
let leader = MpcAesGcm::new(
leader_config,
ctx_a,
Box::new(leader_block_cipher),
Box::new(leader_stream_cipher),
Box::new(leader_ghash),
);
let follower = MpcAesGcm::new(
follower_config,
ctx_b,
Box::new(follower_block_cipher),
Box::new(follower_stream_cipher),
Box::new(follower_ghash),
);
(leader, follower)
}

View File

@@ -1,712 +0,0 @@
//! This module provides an implementation of 2PC AES-GCM.
mod config;
mod error;
#[cfg(feature = "mock")]
pub mod mock;
mod tag;
pub use config::{AesGcmConfig, AesGcmConfigBuilder, AesGcmConfigBuilderError, Role};
pub use error::AesGcmError;
use async_trait::async_trait;
use block_cipher::{Aes128, BlockCipher};
use futures::TryFutureExt;
use mpz_common::Context;
use mpz_garble::value::ValueRef;
use tlsn_stream_cipher::{Aes128Ctr, StreamCipher};
use tlsn_universal_hash::UniversalHash;
use tracing::instrument;
use crate::{
aes_gcm::tag::{compute_tag, verify_tag, TAG_LEN},
Aead,
};
/// MPC AES-GCM.
pub struct MpcAesGcm<Ctx> {
config: AesGcmConfig,
ctx: Ctx,
aes_block: Box<dyn BlockCipher<Aes128>>,
aes_ctr: Box<dyn StreamCipher<Aes128Ctr>>,
ghash: Box<dyn UniversalHash>,
}
impl<Ctx> std::fmt::Debug for MpcAesGcm<Ctx> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("MpcAesGcm")
.field("config", &self.config)
.finish()
}
}
impl<Ctx: Context> MpcAesGcm<Ctx> {
/// Creates a new instance of [`MpcAesGcm`].
pub fn new(
config: AesGcmConfig,
context: Ctx,
aes_block: Box<dyn BlockCipher<Aes128>>,
aes_ctr: Box<dyn StreamCipher<Aes128Ctr>>,
ghash: Box<dyn UniversalHash>,
) -> Self {
Self {
config,
ctx: context,
aes_block,
aes_ctr,
ghash,
}
}
}
#[async_trait]
impl<Ctx: Context> Aead for MpcAesGcm<Ctx> {
type Error = AesGcmError;
#[instrument(level = "info", skip_all, err)]
async fn set_key(&mut self, key: ValueRef, iv: ValueRef) -> Result<(), AesGcmError> {
self.aes_block.set_key(key.clone());
self.aes_ctr.set_key(key, iv);
Ok(())
}
#[instrument(level = "info", skip_all, err)]
async fn decode_key_private(&mut self) -> Result<(), AesGcmError> {
self.aes_ctr
.decode_key_private()
.await
.map_err(AesGcmError::from)
}
#[instrument(level = "info", skip_all, err)]
async fn decode_key_blind(&mut self) -> Result<(), AesGcmError> {
self.aes_ctr
.decode_key_blind()
.await
.map_err(AesGcmError::from)
}
fn set_transcript_id(&mut self, id: &str) {
self.aes_ctr.set_transcript_id(id)
}
#[instrument(level = "debug", skip(self), err)]
async fn setup(&mut self) -> Result<(), AesGcmError> {
self.ghash.setup().await?;
Ok(())
}
#[instrument(level = "debug", skip(self), err)]
async fn preprocess(&mut self, len: usize) -> Result<(), AesGcmError> {
futures::try_join!(
// Preprocess the GHASH key block.
self.aes_block
.preprocess(block_cipher::Visibility::Public, 1)
.map_err(AesGcmError::from),
self.aes_ctr.preprocess(len).map_err(AesGcmError::from),
self.ghash.preprocess().map_err(AesGcmError::from),
)?;
Ok(())
}
#[instrument(level = "debug", skip_all, err)]
async fn start(&mut self) -> Result<(), AesGcmError> {
let h_share = self.aes_block.encrypt_share(vec![0u8; 16]).await?;
self.ghash.set_key(h_share).await?;
Ok(())
}
#[instrument(level = "debug", skip_all, err)]
async fn encrypt_public(
&mut self,
explicit_nonce: Vec<u8>,
plaintext: Vec<u8>,
aad: Vec<u8>,
) -> Result<Vec<u8>, AesGcmError> {
let ciphertext = self
.aes_ctr
.encrypt_public(explicit_nonce.clone(), plaintext)
.await?;
let tag = compute_tag(
&mut self.ctx,
self.aes_ctr.as_mut(),
self.ghash.as_mut(),
explicit_nonce,
ciphertext.clone(),
aad,
)
.await?;
let mut payload = ciphertext;
payload.extend(tag);
Ok(payload)
}
#[instrument(level = "debug", skip_all, err)]
async fn encrypt_private(
&mut self,
explicit_nonce: Vec<u8>,
plaintext: Vec<u8>,
aad: Vec<u8>,
) -> Result<Vec<u8>, AesGcmError> {
let ciphertext = self
.aes_ctr
.encrypt_private(explicit_nonce.clone(), plaintext)
.await?;
let tag = compute_tag(
&mut self.ctx,
self.aes_ctr.as_mut(),
self.ghash.as_mut(),
explicit_nonce,
ciphertext.clone(),
aad,
)
.await?;
let mut payload = ciphertext;
payload.extend(tag);
Ok(payload)
}
#[instrument(level = "debug", skip_all, err)]
async fn encrypt_blind(
&mut self,
explicit_nonce: Vec<u8>,
plaintext_len: usize,
aad: Vec<u8>,
) -> Result<Vec<u8>, AesGcmError> {
let ciphertext = self
.aes_ctr
.encrypt_blind(explicit_nonce.clone(), plaintext_len)
.await?;
let tag = compute_tag(
&mut self.ctx,
self.aes_ctr.as_mut(),
self.ghash.as_mut(),
explicit_nonce,
ciphertext.clone(),
aad,
)
.await?;
let mut payload = ciphertext;
payload.extend(tag);
Ok(payload)
}
#[instrument(level = "debug", skip_all, err)]
async fn decrypt_public(
&mut self,
explicit_nonce: Vec<u8>,
mut payload: Vec<u8>,
aad: Vec<u8>,
) -> Result<Vec<u8>, AesGcmError> {
let purported_tag: [u8; TAG_LEN] = payload
.split_off(payload.len() - TAG_LEN)
.try_into()
.map_err(|_| AesGcmError::payload("payload is not long enough to contain tag"))?;
let ciphertext = payload;
verify_tag(
&mut self.ctx,
self.aes_ctr.as_mut(),
self.ghash.as_mut(),
*self.config.role(),
explicit_nonce.clone(),
ciphertext.clone(),
aad,
purported_tag,
)
.await?;
let plaintext = self
.aes_ctr
.decrypt_public(explicit_nonce, ciphertext)
.await?;
Ok(plaintext)
}
#[instrument(level = "debug", skip_all, err)]
async fn decrypt_private(
&mut self,
explicit_nonce: Vec<u8>,
mut payload: Vec<u8>,
aad: Vec<u8>,
) -> Result<Vec<u8>, AesGcmError> {
let purported_tag: [u8; TAG_LEN] = payload
.split_off(payload.len() - TAG_LEN)
.try_into()
.map_err(|_| AesGcmError::payload("payload is not long enough to contain tag"))?;
let ciphertext = payload;
verify_tag(
&mut self.ctx,
self.aes_ctr.as_mut(),
self.ghash.as_mut(),
*self.config.role(),
explicit_nonce.clone(),
ciphertext.clone(),
aad,
purported_tag,
)
.await?;
let plaintext = self
.aes_ctr
.decrypt_private(explicit_nonce, ciphertext)
.await?;
Ok(plaintext)
}
#[instrument(level = "debug", skip_all, err)]
async fn decrypt_blind(
&mut self,
explicit_nonce: Vec<u8>,
mut payload: Vec<u8>,
aad: Vec<u8>,
) -> Result<(), AesGcmError> {
let purported_tag: [u8; TAG_LEN] = payload
.split_off(payload.len() - TAG_LEN)
.try_into()
.map_err(|_| AesGcmError::payload("payload is not long enough to contain tag"))?;
let ciphertext = payload;
verify_tag(
&mut self.ctx,
self.aes_ctr.as_mut(),
self.ghash.as_mut(),
*self.config.role(),
explicit_nonce.clone(),
ciphertext.clone(),
aad,
purported_tag,
)
.await?;
self.aes_ctr
.decrypt_blind(explicit_nonce, ciphertext)
.await?;
Ok(())
}
#[instrument(level = "debug", skip_all, err)]
async fn verify_tag(
&mut self,
explicit_nonce: Vec<u8>,
mut payload: Vec<u8>,
aad: Vec<u8>,
) -> Result<(), AesGcmError> {
let purported_tag: [u8; TAG_LEN] = payload
.split_off(payload.len() - TAG_LEN)
.try_into()
.map_err(|_| AesGcmError::payload("payload is not long enough to contain tag"))?;
let ciphertext = payload;
verify_tag(
&mut self.ctx,
self.aes_ctr.as_mut(),
self.ghash.as_mut(),
*self.config.role(),
explicit_nonce,
ciphertext,
aad,
purported_tag,
)
.await?;
Ok(())
}
#[instrument(level = "debug", skip_all, err)]
async fn prove_plaintext(
&mut self,
explicit_nonce: Vec<u8>,
mut payload: Vec<u8>,
aad: Vec<u8>,
) -> Result<Vec<u8>, AesGcmError> {
let purported_tag: [u8; TAG_LEN] = payload
.split_off(payload.len() - TAG_LEN)
.try_into()
.map_err(|_| AesGcmError::payload("payload is not long enough to contain tag"))?;
let ciphertext = payload;
verify_tag(
&mut self.ctx,
self.aes_ctr.as_mut(),
self.ghash.as_mut(),
*self.config.role(),
explicit_nonce.clone(),
ciphertext.clone(),
aad,
purported_tag,
)
.await?;
let plaintext = self
.aes_ctr
.prove_plaintext(explicit_nonce, ciphertext)
.await?;
Ok(plaintext)
}
#[instrument(level = "debug", skip_all, err)]
async fn prove_plaintext_no_tag(
&mut self,
explicit_nonce: Vec<u8>,
ciphertext: Vec<u8>,
) -> Result<Vec<u8>, AesGcmError> {
self.aes_ctr
.prove_plaintext(explicit_nonce, ciphertext)
.map_err(AesGcmError::from)
.await
}
#[instrument(level = "debug", skip_all, err)]
async fn verify_plaintext(
&mut self,
explicit_nonce: Vec<u8>,
mut payload: Vec<u8>,
aad: Vec<u8>,
) -> Result<(), AesGcmError> {
let purported_tag: [u8; TAG_LEN] = payload
.split_off(payload.len() - TAG_LEN)
.try_into()
.map_err(|_| AesGcmError::payload("payload is not long enough to contain tag"))?;
let ciphertext = payload;
verify_tag(
&mut self.ctx,
self.aes_ctr.as_mut(),
self.ghash.as_mut(),
*self.config.role(),
explicit_nonce.clone(),
ciphertext.clone(),
aad,
purported_tag,
)
.await?;
self.aes_ctr
.verify_plaintext(explicit_nonce, ciphertext)
.await?;
Ok(())
}
#[instrument(level = "debug", skip_all, err)]
async fn verify_plaintext_no_tag(
&mut self,
explicit_nonce: Vec<u8>,
ciphertext: Vec<u8>,
) -> Result<(), AesGcmError> {
self.aes_ctr
.verify_plaintext(explicit_nonce, ciphertext)
.map_err(AesGcmError::from)
.await
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{
aes_gcm::{mock::create_mock_aes_gcm_pair, AesGcmConfigBuilder, Role},
Aead,
};
use ::aes_gcm::{aead::AeadInPlace, Aes128Gcm, NewAead, Nonce};
use error::ErrorKind;
use mpz_common::executor::STExecutor;
use mpz_garble::{protocol::deap::mock::create_mock_deap_vm, Memory};
use serio::channel::MemoryDuplex;
fn reference_impl(
key: &[u8],
iv: &[u8],
explicit_nonce: &[u8],
plaintext: &[u8],
aad: &[u8],
) -> Vec<u8> {
let cipher = Aes128Gcm::new_from_slice(key).unwrap();
let nonce = [iv, explicit_nonce].concat();
let nonce = Nonce::from_slice(nonce.as_slice());
let mut ciphertext = plaintext.to_vec();
cipher
.encrypt_in_place(nonce, aad, &mut ciphertext)
.unwrap();
ciphertext
}
async fn setup_pair(
key: Vec<u8>,
iv: Vec<u8>,
) -> (
MpcAesGcm<STExecutor<MemoryDuplex>>,
MpcAesGcm<STExecutor<MemoryDuplex>>,
) {
let (leader_vm, follower_vm) = create_mock_deap_vm();
let leader_key = leader_vm
.new_public_array_input::<u8>("key", key.len())
.unwrap();
let leader_iv = leader_vm
.new_public_array_input::<u8>("iv", iv.len())
.unwrap();
leader_vm.assign(&leader_key, key.clone()).unwrap();
leader_vm.assign(&leader_iv, iv.clone()).unwrap();
let follower_key = follower_vm
.new_public_array_input::<u8>("key", key.len())
.unwrap();
let follower_iv = follower_vm
.new_public_array_input::<u8>("iv", iv.len())
.unwrap();
follower_vm.assign(&follower_key, key.clone()).unwrap();
follower_vm.assign(&follower_iv, iv.clone()).unwrap();
let leader_config = AesGcmConfigBuilder::default()
.id("test".to_string())
.role(Role::Leader)
.build()
.unwrap();
let follower_config = AesGcmConfigBuilder::default()
.id("test".to_string())
.role(Role::Follower)
.build()
.unwrap();
let (mut leader, mut follower) = create_mock_aes_gcm_pair(
"test",
(leader_vm, follower_vm),
leader_config,
follower_config,
)
.await;
futures::try_join!(
leader.set_key(leader_key, leader_iv),
follower.set_key(follower_key, follower_iv)
)
.unwrap();
futures::try_join!(leader.setup(), follower.setup()).unwrap();
futures::try_join!(leader.start(), follower.start()).unwrap();
(leader, follower)
}
#[tokio::test]
#[ignore = "expensive"]
async fn test_aes_gcm_encrypt_private() {
let key = vec![0u8; 16];
let iv = vec![0u8; 4];
let explicit_nonce = vec![0u8; 8];
let plaintext = vec![1u8; 32];
let aad = vec![2u8; 12];
let (mut leader, mut follower) = setup_pair(key.clone(), iv.clone()).await;
let (leader_ciphertext, follower_ciphertext) = tokio::try_join!(
leader.encrypt_private(explicit_nonce.clone(), plaintext.clone(), aad.clone(),),
follower.encrypt_blind(explicit_nonce.clone(), plaintext.len(), aad.clone())
)
.unwrap();
assert_eq!(leader_ciphertext, follower_ciphertext);
assert_eq!(
leader_ciphertext,
reference_impl(&key, &iv, &explicit_nonce, &plaintext, &aad)
);
}
#[tokio::test]
#[ignore = "expensive"]
async fn test_aes_gcm_encrypt_public() {
let key = vec![0u8; 16];
let iv = vec![0u8; 4];
let explicit_nonce = vec![0u8; 8];
let plaintext = vec![1u8; 32];
let aad = vec![2u8; 12];
let (mut leader, mut follower) = setup_pair(key.clone(), iv.clone()).await;
let (leader_ciphertext, follower_ciphertext) = tokio::try_join!(
leader.encrypt_public(explicit_nonce.clone(), plaintext.clone(), aad.clone(),),
follower.encrypt_public(explicit_nonce.clone(), plaintext.clone(), aad.clone(),)
)
.unwrap();
assert_eq!(leader_ciphertext, follower_ciphertext);
assert_eq!(
leader_ciphertext,
reference_impl(&key, &iv, &explicit_nonce, &plaintext, &aad)
);
}
#[tokio::test]
#[ignore = "expensive"]
async fn test_aes_gcm_decrypt_private() {
let key = vec![0u8; 16];
let iv = vec![0u8; 4];
let explicit_nonce = vec![0u8; 8];
let plaintext = vec![1u8; 32];
let aad = vec![2u8; 12];
let ciphertext = reference_impl(&key, &iv, &explicit_nonce, &plaintext, &aad);
let (mut leader, mut follower) = setup_pair(key.clone(), iv.clone()).await;
let (leader_plaintext, _) = tokio::try_join!(
leader.decrypt_private(explicit_nonce.clone(), ciphertext.clone(), aad.clone(),),
follower.decrypt_blind(explicit_nonce.clone(), ciphertext, aad.clone(),)
)
.unwrap();
assert_eq!(leader_plaintext, plaintext);
}
#[tokio::test]
#[ignore = "expensive"]
async fn test_aes_gcm_decrypt_private_bad_tag() {
let key = vec![0u8; 16];
let iv = vec![0u8; 4];
let explicit_nonce = vec![0u8; 8];
let plaintext = vec![1u8; 32];
let aad = vec![2u8; 12];
let ciphertext = reference_impl(&key, &iv, &explicit_nonce, &plaintext, &aad);
let len = ciphertext.len();
// corrupt tag
let mut corrupted = ciphertext.clone();
corrupted[len - 1] -= 1;
let (mut leader, mut follower) = setup_pair(key.clone(), iv.clone()).await;
// leader receives corrupted tag
let err = tokio::try_join!(
leader.decrypt_private(explicit_nonce.clone(), corrupted.clone(), aad.clone(),),
follower.decrypt_blind(explicit_nonce.clone(), ciphertext.clone(), aad.clone(),)
)
.unwrap_err();
assert_eq!(err.kind(), ErrorKind::Tag);
let (mut leader, mut follower) = setup_pair(key.clone(), iv.clone()).await;
// follower receives corrupted tag
let err = tokio::try_join!(
leader.decrypt_private(explicit_nonce.clone(), ciphertext.clone(), aad.clone(),),
follower.decrypt_blind(explicit_nonce.clone(), corrupted.clone(), aad.clone(),)
)
.unwrap_err();
assert_eq!(err.kind(), ErrorKind::Tag);
}
#[tokio::test]
#[ignore = "expensive"]
async fn test_aes_gcm_decrypt_public() {
let key = vec![0u8; 16];
let iv = vec![0u8; 4];
let explicit_nonce = vec![0u8; 8];
let plaintext = vec![1u8; 32];
let aad = vec![2u8; 12];
let ciphertext = reference_impl(&key, &iv, &explicit_nonce, &plaintext, &aad);
let (mut leader, mut follower) = setup_pair(key.clone(), iv.clone()).await;
let (leader_plaintext, follower_plaintext) = tokio::try_join!(
leader.decrypt_public(explicit_nonce.clone(), ciphertext.clone(), aad.clone(),),
follower.decrypt_public(explicit_nonce.clone(), ciphertext, aad.clone(),)
)
.unwrap();
assert_eq!(leader_plaintext, plaintext);
assert_eq!(leader_plaintext, follower_plaintext);
}
#[tokio::test]
#[ignore = "expensive"]
async fn test_aes_gcm_decrypt_public_bad_tag() {
let key = vec![0u8; 16];
let iv = vec![0u8; 4];
let explicit_nonce = vec![0u8; 8];
let plaintext = vec![1u8; 32];
let aad = vec![2u8; 12];
let ciphertext = reference_impl(&key, &iv, &explicit_nonce, &plaintext, &aad);
let len = ciphertext.len();
// Corrupt tag.
let mut corrupted = ciphertext.clone();
corrupted[len - 1] -= 1;
let (mut leader, mut follower) = setup_pair(key.clone(), iv.clone()).await;
// Leader receives corrupted tag.
let err = tokio::try_join!(
leader.decrypt_public(explicit_nonce.clone(), corrupted.clone(), aad.clone(),),
follower.decrypt_public(explicit_nonce.clone(), ciphertext.clone(), aad.clone(),)
)
.unwrap_err();
assert_eq!(err.kind(), ErrorKind::Tag);
let (mut leader, mut follower) = setup_pair(key.clone(), iv.clone()).await;
// Follower receives corrupted tag.
let err = tokio::try_join!(
leader.decrypt_public(explicit_nonce.clone(), ciphertext.clone(), aad.clone(),),
follower.decrypt_public(explicit_nonce.clone(), corrupted.clone(), aad.clone(),)
)
.unwrap_err();
assert_eq!(err.kind(), ErrorKind::Tag);
}
#[tokio::test]
#[ignore = "expensive"]
async fn test_aes_gcm_verify_tag() {
let key = vec![0u8; 16];
let iv = vec![0u8; 4];
let explicit_nonce = vec![0u8; 8];
let plaintext = vec![1u8; 32];
let aad = vec![2u8; 12];
let ciphertext = reference_impl(&key, &iv, &explicit_nonce, &plaintext, &aad);
let len = ciphertext.len();
let (mut leader, mut follower) = setup_pair(key.clone(), iv.clone()).await;
tokio::try_join!(
leader.verify_tag(explicit_nonce.clone(), ciphertext.clone(), aad.clone()),
follower.verify_tag(explicit_nonce.clone(), ciphertext.clone(), aad.clone())
)
.unwrap();
//Corrupt tag.
let mut corrupted = ciphertext.clone();
corrupted[len - 1] -= 1;
let (leader_res, follower_res) = tokio::join!(
leader.verify_tag(explicit_nonce.clone(), corrupted.clone(), aad.clone()),
follower.verify_tag(explicit_nonce.clone(), corrupted, aad.clone())
);
assert_eq!(leader_res.unwrap_err().kind(), ErrorKind::Tag);
assert_eq!(follower_res.unwrap_err().kind(), ErrorKind::Tag);
}
}

View File

@@ -1,179 +0,0 @@
use futures::TryFutureExt;
use mpz_common::Context;
use mpz_core::{
commit::{Decommitment, HashCommit},
hash::Hash,
};
use serde::{Deserialize, Serialize};
use serio::{stream::IoStreamExt, SinkExt};
use std::ops::Add;
use tlsn_stream_cipher::{Aes128Ctr, StreamCipher};
use tlsn_universal_hash::UniversalHash;
use tracing::instrument;
use crate::aes_gcm::{AesGcmError, Role};
pub(crate) const TAG_LEN: usize = 16;
#[derive(Debug, Clone, Serialize, Deserialize)]
struct TagShare([u8; TAG_LEN]);
impl AsRef<[u8]> for TagShare {
fn as_ref(&self) -> &[u8] {
&self.0
}
}
impl Add for TagShare {
type Output = [u8; TAG_LEN];
fn add(self, rhs: Self) -> Self::Output {
core::array::from_fn(|i| self.0[i] ^ rhs.0[i])
}
}
#[instrument(level = "trace", skip_all, err)]
async fn compute_tag_share<C: StreamCipher<Aes128Ctr> + ?Sized, H: UniversalHash + ?Sized>(
aes_ctr: &mut C,
hasher: &mut H,
explicit_nonce: Vec<u8>,
ciphertext: Vec<u8>,
aad: Vec<u8>,
) -> Result<TagShare, AesGcmError> {
let (j0, hash) = futures::try_join!(
aes_ctr
.share_keystream_block(explicit_nonce, 1)
.map_err(AesGcmError::from),
hasher
.finalize(build_ghash_data(aad, ciphertext))
.map_err(AesGcmError::from)
)?;
debug_assert!(j0.len() == TAG_LEN);
debug_assert!(hash.len() == TAG_LEN);
let tag_share = core::array::from_fn(|i| j0[i] ^ hash[i]);
Ok(TagShare(tag_share))
}
/// Computes the tag for a ciphertext and additional data.
///
/// The commit-reveal step is not required for computing a tag sent to the
/// Server, as it will be able to detect if the tag is incorrect.
#[instrument(level = "debug", skip_all, err)]
pub(crate) async fn compute_tag<
Ctx: Context,
C: StreamCipher<Aes128Ctr> + ?Sized,
H: UniversalHash + ?Sized,
>(
ctx: &mut Ctx,
aes_ctr: &mut C,
hasher: &mut H,
explicit_nonce: Vec<u8>,
ciphertext: Vec<u8>,
aad: Vec<u8>,
) -> Result<[u8; TAG_LEN], AesGcmError> {
let tag_share = compute_tag_share(aes_ctr, hasher, explicit_nonce, ciphertext, aad).await?;
// TODO: The follower doesn't really need to learn the tag,
// we could reduce some latency by not sending it.
let io = ctx.io_mut();
io.send(tag_share.clone()).await?;
let other_tag_share: TagShare = io.expect_next().await?;
let tag = tag_share + other_tag_share;
Ok(tag)
}
/// Verifies a purported tag against the ciphertext and additional data.
///
/// Verifying a tag requires a commit-reveal protocol between the leader and
/// follower. Without it, the party which receives the other's tag share first
/// could trivially compute a tag share which would cause an invalid message to
/// be accepted.
#[instrument(level = "debug", skip_all, err)]
#[allow(clippy::too_many_arguments)]
pub(crate) async fn verify_tag<
Ctx: Context,
C: StreamCipher<Aes128Ctr> + ?Sized,
H: UniversalHash + ?Sized,
>(
ctx: &mut Ctx,
aes_ctr: &mut C,
hasher: &mut H,
role: Role,
explicit_nonce: Vec<u8>,
ciphertext: Vec<u8>,
aad: Vec<u8>,
purported_tag: [u8; TAG_LEN],
) -> Result<(), AesGcmError> {
let tag_share = compute_tag_share(aes_ctr, hasher, explicit_nonce, ciphertext, aad).await?;
let io = ctx.io_mut();
let tag = match role {
Role::Leader => {
// Send commitment of tag share to follower.
let (tag_share_decommitment, tag_share_commitment) = tag_share.clone().hash_commit();
io.send(tag_share_commitment).await?;
let follower_tag_share: TagShare = io.expect_next().await?;
// Send decommitment (tag share) to follower.
io.send(tag_share_decommitment).await?;
tag_share + follower_tag_share
}
Role::Follower => {
// Wait for commitment from leader.
let commitment: Hash = io.expect_next().await?;
// Send tag share to leader.
io.send(tag_share.clone()).await?;
// Expect decommitment (tag share) from leader.
let decommitment: Decommitment<TagShare> = io.expect_next().await?;
// Verify decommitment.
decommitment.verify(&commitment).map_err(|_| {
AesGcmError::peer("leader tag share commitment verification failed")
})?;
let leader_tag_share = decommitment.into_inner();
tag_share + leader_tag_share
}
};
// Reject if tag is incorrect.
if tag != purported_tag {
return Err(AesGcmError::invalid_tag());
}
Ok(())
}
/// Builds padded data for GHASH.
fn build_ghash_data(mut aad: Vec<u8>, mut ciphertext: Vec<u8>) -> Vec<u8> {
let associated_data_bitlen = (aad.len() as u64) * 8;
let text_bitlen = (ciphertext.len() as u64) * 8;
let len_block = ((associated_data_bitlen as u128) << 64) + (text_bitlen as u128);
// Pad data to be a multiple of 16 bytes.
let aad_padded_block_count = (aad.len() / 16) + (aad.len() % 16 != 0) as usize;
aad.resize(aad_padded_block_count * 16, 0);
let ciphertext_padded_block_count =
(ciphertext.len() / 16) + (ciphertext.len() % 16 != 0) as usize;
ciphertext.resize(ciphertext_padded_block_count * 16, 0);
let mut data: Vec<u8> = Vec::with_capacity(aad.len() + ciphertext.len() + 16);
data.extend(aad);
data.extend(ciphertext);
data.extend_from_slice(&len_block.to_be_bytes());
data
}

View File

@@ -1,255 +0,0 @@
//! This crate provides implementations of 2PC AEADs for authenticated
//! encryption with a shared key.
//!
//! Both parties can work together to encrypt and decrypt messages with
//! different visibility configurations. See [`Aead`] for more information on
//! the interface.
//!
//! For example, one party can privately provide the plaintext to encrypt, while
//! both parties can see the ciphertext and the tag. Or, both parties can
//! cooperate to decrypt a ciphertext and verify the tag, while only one party
//! can see the plaintext.
#![deny(missing_docs, unreachable_pub, unused_must_use)]
#![deny(clippy::all)]
#![forbid(unsafe_code)]
pub mod aes_gcm;
use async_trait::async_trait;
use mpz_garble::value::ValueRef;
/// This trait defines the interface for AEADs.
#[async_trait]
pub trait Aead: Send {
/// The error type for the AEAD.
type Error: std::error::Error + Send + Sync + 'static;
/// Sets the key for the AEAD.
async fn set_key(&mut self, key: ValueRef, iv: ValueRef) -> Result<(), Self::Error>;
/// Decodes the key for the AEAD, revealing it to this party.
async fn decode_key_private(&mut self) -> Result<(), Self::Error>;
/// Decodes the key for the AEAD, revealing it to the other party(s).
async fn decode_key_blind(&mut self) -> Result<(), Self::Error>;
/// Sets the transcript id.
///
/// The AEAD assigns unique identifiers to each byte of plaintext
/// during encryption and decryption.
///
/// For example, if the transcript id is set to `foo`, then the first byte
/// will be assigned the id `foo/0`, the second byte `foo/1`, and so on.
///
/// Each transcript id has an independent counter.
///
/// # Note
///
/// The state of a transcript counter is preserved between calls to
/// `set_transcript_id`.
fn set_transcript_id(&mut self, id: &str);
/// Performs any necessary one-time setup for the AEAD.
async fn setup(&mut self) -> Result<(), Self::Error>;
/// Preprocesses for the given number of bytes.
async fn preprocess(&mut self, len: usize) -> Result<(), Self::Error>;
/// Starts the AEAD.
///
/// This method performs initialization for the AEAD after setting the key.
async fn start(&mut self) -> Result<(), Self::Error>;
/// Encrypts a plaintext message, returning the ciphertext and tag.
///
/// The plaintext is provided by both parties.
///
/// # Arguments
///
/// * `explicit_nonce` - The explicit nonce to use for encryption.
/// * `plaintext` - The plaintext to encrypt.
/// * `aad` - Additional authenticated data.
async fn encrypt_public(
&mut self,
explicit_nonce: Vec<u8>,
plaintext: Vec<u8>,
aad: Vec<u8>,
) -> Result<Vec<u8>, Self::Error>;
/// Encrypts a plaintext message, hiding it from the other party, returning
/// the ciphertext and tag.
///
/// # Arguments
///
/// * `explicit_nonce` - The explicit nonce to use for encryption.
/// * `plaintext` - The plaintext to encrypt.
/// * `aad` - Additional authenticated data.
async fn encrypt_private(
&mut self,
explicit_nonce: Vec<u8>,
plaintext: Vec<u8>,
aad: Vec<u8>,
) -> Result<Vec<u8>, Self::Error>;
/// Encrypts a plaintext message provided by the other party, returning
/// the ciphertext and tag.
///
/// # Arguments
///
/// * `explicit_nonce` - The explicit nonce to use for encryption.
/// * `plaintext_len` - The length of the plaintext to encrypt.
/// * `aad` - Additional authenticated data.
async fn encrypt_blind(
&mut self,
explicit_nonce: Vec<u8>,
plaintext_len: usize,
aad: Vec<u8>,
) -> Result<Vec<u8>, Self::Error>;
/// Decrypts a ciphertext message, returning the plaintext to both parties.
///
/// This method checks the authenticity of the ciphertext, tag and
/// additional data.
///
/// # Arguments
///
/// * `explicit_nonce` - The explicit nonce to use for decryption.
/// * `payload` - The ciphertext and tag to authenticate and decrypt.
/// * `aad` - Additional authenticated data.
async fn decrypt_public(
&mut self,
explicit_nonce: Vec<u8>,
payload: Vec<u8>,
aad: Vec<u8>,
) -> Result<Vec<u8>, Self::Error>;
/// Decrypts a ciphertext message, returning the plaintext only to this
/// party.
///
/// This method checks the authenticity of the ciphertext, tag and
/// additional data.
///
/// # Arguments
///
/// * `explicit_nonce` - The explicit nonce to use for decryption.
/// * `payload` - The ciphertext and tag to authenticate and decrypt.
/// * `aad` - Additional authenticated data.
async fn decrypt_private(
&mut self,
explicit_nonce: Vec<u8>,
payload: Vec<u8>,
aad: Vec<u8>,
) -> Result<Vec<u8>, Self::Error>;
/// Decrypts a ciphertext message, returning the plaintext only to the other
/// party.
///
/// This method checks the authenticity of the ciphertext, tag and
/// additional data.
///
/// # Arguments
///
/// * `explicit_nonce` - The explicit nonce to use for decryption.
/// * `payload` - The ciphertext and tag to authenticate and decrypt.
/// * `aad` - Additional authenticated data.
async fn decrypt_blind(
&mut self,
explicit_nonce: Vec<u8>,
payload: Vec<u8>,
aad: Vec<u8>,
) -> Result<(), Self::Error>;
/// Verifies the tag of a ciphertext message.
///
/// This method checks the authenticity of the ciphertext, tag and
/// additional data.
///
/// # Arguments
///
/// * `explicit_nonce` - The explicit nonce to use for decryption.
/// * `payload` - The ciphertext and tag to authenticate and decrypt.
/// * `aad` - Additional authenticated data.
async fn verify_tag(
&mut self,
explicit_nonce: Vec<u8>,
payload: Vec<u8>,
aad: Vec<u8>,
) -> Result<(), Self::Error>;
/// Locally decrypts the provided ciphertext and then proves in ZK to the
/// other party(s) that the plaintext is correct.
///
/// Returns the plaintext.
///
/// This method requires this party to know the encryption key, which can be
/// achieved by calling the `decode_key_private` method.
///
/// # Arguments
///
/// * `explicit_nonce` - The explicit nonce to use for the keystream.
/// * `payload` - The ciphertext and tag to decrypt and prove.
/// * `aad` - Additional authenticated data.
async fn prove_plaintext(
&mut self,
explicit_nonce: Vec<u8>,
payload: Vec<u8>,
aad: Vec<u8>,
) -> Result<Vec<u8>, Self::Error>;
/// Locally decrypts the provided ciphertext and then proves in ZK to the
/// other party(s) that the plaintext is correct.
///
/// Returns the plaintext.
///
/// This method requires this party to know the encryption key, which can be
/// achieved by calling the `decode_key_private` method.
///
/// # WARNING
///
/// This method does not verify the tag of the ciphertext. Only use this if
/// you know what you're doing.
///
/// # Arguments
///
/// * `explicit_nonce` - The explicit nonce to use for the keystream.
/// * `ciphertext` - The ciphertext to decrypt and prove.
async fn prove_plaintext_no_tag(
&mut self,
explicit_nonce: Vec<u8>,
ciphertext: Vec<u8>,
) -> Result<Vec<u8>, Self::Error>;
/// Verifies the other party(s) can prove they know a plaintext which
/// encrypts to the given ciphertext.
///
/// # Arguments
///
/// * `explicit_nonce` - The explicit nonce to use for the keystream.
/// * `payload` - The ciphertext and tag to verify.
/// * `aad` - Additional authenticated data.
async fn verify_plaintext(
&mut self,
explicit_nonce: Vec<u8>,
payload: Vec<u8>,
aad: Vec<u8>,
) -> Result<(), Self::Error>;
/// Verifies the other party(s) can prove they know a plaintext which
/// encrypts to the given ciphertext.
///
/// # WARNING
///
/// This method does not verify the tag of the ciphertext. Only use this if
/// you know what you're doing.
///
/// # Arguments
///
/// * `explicit_nonce` - The explicit nonce to use for the keystream.
/// * `ciphertext` - The ciphertext to verify.
async fn verify_plaintext_no_tag(
&mut self,
explicit_nonce: Vec<u8>,
ciphertext: Vec<u8>,
) -> Result<(), Self::Error>;
}

View File

@@ -1,30 +0,0 @@
[package]
name = "tlsn-block-cipher"
authors = ["TLSNotary Team"]
description = "2PC block cipher implementation"
keywords = ["tls", "mpc", "2pc", "block-cipher"]
categories = ["cryptography"]
license = "MIT OR Apache-2.0"
version = "0.1.0-alpha.7"
edition = "2021"
[lib]
name = "block_cipher"
[features]
default = ["mock"]
mock = []
[dependencies]
mpz-circuits = { workspace = true }
mpz-garble = { workspace = true }
tlsn-utils = { workspace = true }
async-trait = { workspace = true }
thiserror = { workspace = true }
derive_builder = { workspace = true }
tracing = { workspace = true }
[dev-dependencies]
aes = { workspace = true }
cipher = { workspace = true }
tokio = { workspace = true, features = ["macros", "rt", "rt-multi-thread"] }

View File

@@ -1,277 +0,0 @@
use std::{collections::VecDeque, marker::PhantomData};
use async_trait::async_trait;
use mpz_garble::{value::ValueRef, Decode, DecodePrivate, Execute, Load, Memory};
use tracing::instrument;
use utils::id::NestedId;
use crate::{BlockCipher, BlockCipherCircuit, BlockCipherConfig, BlockCipherError, Visibility};
#[derive(Debug)]
struct State {
private_execution_id: NestedId,
public_execution_id: NestedId,
preprocessed_private: VecDeque<BlockVars>,
preprocessed_public: VecDeque<BlockVars>,
key: Option<ValueRef>,
}
#[derive(Debug)]
struct BlockVars {
msg: ValueRef,
ciphertext: ValueRef,
}
/// An MPC block cipher.
#[derive(Debug)]
pub struct MpcBlockCipher<C, E>
where
C: BlockCipherCircuit,
E: Memory + Execute + Decode + DecodePrivate + Send + Sync,
{
state: State,
executor: E,
_cipher: PhantomData<C>,
}
impl<C, E> MpcBlockCipher<C, E>
where
C: BlockCipherCircuit,
E: Memory + Execute + Decode + DecodePrivate + Send + Sync,
{
/// Creates a new MPC block cipher.
///
/// # Arguments
///
/// * `config` - The configuration for the block cipher.
/// * `executor` - The executor to use for the MPC.
pub fn new(config: BlockCipherConfig, executor: E) -> Self {
let private_execution_id = NestedId::new(&config.id)
.append_string("private")
.append_counter();
let public_execution_id = NestedId::new(&config.id)
.append_string("public")
.append_counter();
Self {
state: State {
private_execution_id,
public_execution_id,
preprocessed_private: VecDeque::new(),
preprocessed_public: VecDeque::new(),
key: None,
},
executor,
_cipher: PhantomData,
}
}
fn define_block(&mut self, vis: Visibility) -> BlockVars {
let (id, msg) = match vis {
Visibility::Private => {
let id = self
.state
.private_execution_id
.increment_in_place()
.to_string();
let msg = self
.executor
.new_private_input::<C::BLOCK>(&format!("{}/msg", &id))
.expect("message is not defined");
(id, msg)
}
Visibility::Blind => {
let id = self
.state
.private_execution_id
.increment_in_place()
.to_string();
let msg = self
.executor
.new_blind_input::<C::BLOCK>(&format!("{}/msg", &id))
.expect("message is not defined");
(id, msg)
}
Visibility::Public => {
let id = self
.state
.public_execution_id
.increment_in_place()
.to_string();
let msg = self
.executor
.new_public_input::<C::BLOCK>(&format!("{}/msg", &id))
.expect("message is not defined");
(id, msg)
}
};
let ciphertext = self
.executor
.new_output::<C::BLOCK>(&format!("{}/ciphertext", &id))
.expect("message is not defined");
BlockVars { msg, ciphertext }
}
}
#[async_trait]
impl<C, E> BlockCipher<C> for MpcBlockCipher<C, E>
where
C: BlockCipherCircuit,
E: Memory + Load + Execute + Decode + DecodePrivate + Send + Sync + Send,
{
#[instrument(level = "trace", skip_all)]
fn set_key(&mut self, key: ValueRef) {
self.state.key = Some(key);
}
#[instrument(level = "debug", skip_all, err)]
async fn preprocess(
&mut self,
visibility: Visibility,
count: usize,
) -> Result<(), BlockCipherError> {
let key = self
.state
.key
.clone()
.ok_or_else(BlockCipherError::key_not_set)?;
for _ in 0..count {
let vars = self.define_block(visibility);
self.executor
.load(
C::circuit(),
&[key.clone(), vars.msg.clone()],
&[vars.ciphertext.clone()],
)
.await?;
match visibility {
Visibility::Private | Visibility::Blind => {
self.state.preprocessed_private.push_back(vars)
}
Visibility::Public => self.state.preprocessed_public.push_back(vars),
}
}
Ok(())
}
#[instrument(level = "debug", skip_all, err)]
async fn encrypt_private(&mut self, plaintext: Vec<u8>) -> Result<Vec<u8>, BlockCipherError> {
let len = plaintext.len();
let block: C::BLOCK = plaintext
.try_into()
.map_err(|_| BlockCipherError::invalid_message_length::<C>(len))?;
let key = self
.state
.key
.clone()
.ok_or_else(BlockCipherError::key_not_set)?;
let BlockVars { msg, ciphertext } =
if let Some(vars) = self.state.preprocessed_private.pop_front() {
vars
} else {
self.define_block(Visibility::Private)
};
self.executor.assign(&msg, block)?;
self.executor
.execute(C::circuit(), &[key, msg], &[ciphertext.clone()])
.await?;
let mut outputs = self.executor.decode(&[ciphertext]).await?;
let ciphertext: C::BLOCK = if let Ok(ciphertext) = outputs
.pop()
.expect("ciphertext should be present")
.try_into()
{
ciphertext
} else {
panic!("ciphertext should be a block")
};
Ok(ciphertext.into())
}
#[instrument(level = "debug", skip_all, err)]
async fn encrypt_blind(&mut self) -> Result<Vec<u8>, BlockCipherError> {
let key = self
.state
.key
.clone()
.ok_or_else(BlockCipherError::key_not_set)?;
let BlockVars { msg, ciphertext } =
if let Some(vars) = self.state.preprocessed_private.pop_front() {
vars
} else {
self.define_block(Visibility::Blind)
};
self.executor
.execute(C::circuit(), &[key, msg], &[ciphertext.clone()])
.await?;
let mut outputs = self.executor.decode(&[ciphertext]).await?;
let ciphertext: C::BLOCK = if let Ok(ciphertext) = outputs
.pop()
.expect("ciphertext should be present")
.try_into()
{
ciphertext
} else {
panic!("ciphertext should be a block")
};
Ok(ciphertext.into())
}
#[instrument(level = "debug", skip_all, err)]
async fn encrypt_share(&mut self, plaintext: Vec<u8>) -> Result<Vec<u8>, BlockCipherError> {
let len = plaintext.len();
let block: C::BLOCK = plaintext
.try_into()
.map_err(|_| BlockCipherError::invalid_message_length::<C>(len))?;
let key = self
.state
.key
.clone()
.ok_or_else(BlockCipherError::key_not_set)?;
let BlockVars { msg, ciphertext } =
if let Some(vars) = self.state.preprocessed_public.pop_front() {
vars
} else {
self.define_block(Visibility::Public)
};
self.executor.assign(&msg, block)?;
self.executor
.execute(C::circuit(), &[key, msg], &[ciphertext.clone()])
.await?;
let mut outputs = self.executor.decode_shared(&[ciphertext]).await?;
let share: C::BLOCK =
if let Ok(share) = outputs.pop().expect("share should be present").try_into() {
share
} else {
panic!("share should be a block")
};
Ok(share.into())
}
}

View File

@@ -1,39 +0,0 @@
use std::sync::Arc;
use mpz_circuits::{
circuits::AES128,
types::{StaticValueType, Value},
Circuit,
};
/// A block cipher circuit.
pub trait BlockCipherCircuit: Default + Clone + Send + Sync {
/// The key type.
type KEY: StaticValueType + Send + Sync;
/// The block type.
type BLOCK: StaticValueType + TryFrom<Vec<u8>> + TryFrom<Value> + Into<Vec<u8>> + Send + Sync;
/// The length of the key.
const KEY_LEN: usize;
/// The length of the block.
const BLOCK_LEN: usize;
/// Returns the circuit of the cipher.
fn circuit() -> Arc<Circuit>;
}
/// Aes128 block cipher circuit.
#[derive(Default, Debug, Clone)]
pub struct Aes128;
impl BlockCipherCircuit for Aes128 {
type KEY = [u8; 16];
type BLOCK = [u8; 16];
const KEY_LEN: usize = 16;
const BLOCK_LEN: usize = 16;
fn circuit() -> Arc<Circuit> {
AES128.clone()
}
}

View File

@@ -1,16 +0,0 @@
use derive_builder::Builder;
/// Configuration for a block cipher.
#[derive(Debug, Clone, Builder)]
pub struct BlockCipherConfig {
/// The ID of the block cipher.
#[builder(setter(into))]
pub(crate) id: String,
}
impl BlockCipherConfig {
/// Creates a new builder for the block cipher configuration.
pub fn builder() -> BlockCipherConfigBuilder {
BlockCipherConfigBuilder::default()
}
}

View File

@@ -1,92 +0,0 @@
use core::fmt;
use std::error::Error;
use crate::BlockCipherCircuit;
/// A block cipher error.
#[derive(Debug, thiserror::Error)]
pub struct BlockCipherError {
kind: ErrorKind,
#[source]
source: Option<Box<dyn Error + Send + Sync>>,
}
impl BlockCipherError {
pub(crate) fn new<E>(kind: ErrorKind, source: E) -> Self
where
E: Into<Box<dyn Error + Send + Sync>>,
{
Self {
kind,
source: Some(source.into()),
}
}
pub(crate) fn key_not_set() -> Self {
Self {
kind: ErrorKind::Key,
source: Some("key not set".into()),
}
}
pub(crate) fn invalid_message_length<C: BlockCipherCircuit>(len: usize) -> Self {
Self {
kind: ErrorKind::Msg,
source: Some(
format!(
"message length does not equal block length: {} != {}",
len,
C::BLOCK_LEN
)
.into(),
),
}
}
}
#[derive(Debug)]
pub(crate) enum ErrorKind {
Vm,
Key,
Msg,
}
impl fmt::Display for BlockCipherError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self.kind {
ErrorKind::Vm => write!(f, "vm error")?,
ErrorKind::Key => write!(f, "key error")?,
ErrorKind::Msg => write!(f, "message error")?,
}
if let Some(ref source) = self.source {
write!(f, " caused by: {}", source)?;
}
Ok(())
}
}
impl From<mpz_garble::MemoryError> for BlockCipherError {
fn from(error: mpz_garble::MemoryError) -> Self {
Self::new(ErrorKind::Vm, error)
}
}
impl From<mpz_garble::LoadError> for BlockCipherError {
fn from(error: mpz_garble::LoadError) -> Self {
Self::new(ErrorKind::Vm, error)
}
}
impl From<mpz_garble::ExecutionError> for BlockCipherError {
fn from(error: mpz_garble::ExecutionError) -> Self {
Self::new(ErrorKind::Vm, error)
}
}
impl From<mpz_garble::DecodeError> for BlockCipherError {
fn from(error: mpz_garble::DecodeError) -> Self {
Self::new(ErrorKind::Vm, error)
}
}

View File

@@ -1,236 +0,0 @@
//! This crate provides a 2PC block cipher implementation.
//!
//! Both parties work together to encrypt or share an encrypted block using a
//! shared key.
#![deny(missing_docs, unreachable_pub, unused_must_use)]
#![deny(clippy::all)]
#![deny(unsafe_code)]
mod cipher;
mod circuit;
mod config;
mod error;
use async_trait::async_trait;
use mpz_garble::value::ValueRef;
pub use crate::{
cipher::MpcBlockCipher,
circuit::{Aes128, BlockCipherCircuit},
};
pub use config::{BlockCipherConfig, BlockCipherConfigBuilder, BlockCipherConfigBuilderError};
pub use error::BlockCipherError;
/// Visibility of a message plaintext.
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum Visibility {
/// Private message.
Private,
/// Blind message.
Blind,
/// Public message.
Public,
}
/// A trait for MPC block ciphers.
#[async_trait]
pub trait BlockCipher<Cipher>: Send + Sync
where
Cipher: BlockCipherCircuit,
{
/// Sets the key for the block cipher.
fn set_key(&mut self, key: ValueRef);
/// Preprocesses `count` blocks.
///
/// # Arguments
///
/// * `visibility` - The visibility of the plaintext.
/// * `count` - The number of blocks to preprocess.
async fn preprocess(
&mut self,
visibility: Visibility,
count: usize,
) -> Result<(), BlockCipherError>;
/// Encrypts the given plaintext keeping it hidden from the other party(s).
///
/// Returns the ciphertext.
///
/// # Arguments
///
/// * `plaintext` - The plaintext to encrypt.
async fn encrypt_private(&mut self, plaintext: Vec<u8>) -> Result<Vec<u8>, BlockCipherError>;
/// Encrypts a plaintext provided by the other party(s).
///
/// Returns the ciphertext.
async fn encrypt_blind(&mut self) -> Result<Vec<u8>, BlockCipherError>;
/// Encrypts a plaintext provided by both parties. Fails if the
/// plaintext provided by both parties does not match.
///
/// Returns an additive share of the ciphertext.
///
/// # Arguments
///
/// * `plaintext` - The plaintext to encrypt.
async fn encrypt_share(&mut self, plaintext: Vec<u8>) -> Result<Vec<u8>, BlockCipherError>;
}
#[cfg(test)]
mod tests {
use super::*;
use mpz_garble::{protocol::deap::mock::create_mock_deap_vm, Memory};
use crate::circuit::Aes128;
use ::aes::Aes128 as TestAes128;
use ::cipher::{BlockEncrypt, KeyInit};
fn aes128(key: [u8; 16], msg: [u8; 16]) -> [u8; 16] {
let mut msg = msg.into();
let cipher = TestAes128::new(&key.into());
cipher.encrypt_block(&mut msg);
msg.into()
}
#[tokio::test]
#[ignore = "expensive"]
async fn test_block_cipher_blind() {
let leader_config = BlockCipherConfig::builder().id("test").build().unwrap();
let follower_config = BlockCipherConfig::builder().id("test").build().unwrap();
let key = [0u8; 16];
let (leader_vm, follower_vm) = create_mock_deap_vm();
// Key is public just for this test, typically it is private.
let leader_key = leader_vm.new_public_input::<[u8; 16]>("key").unwrap();
let follower_key = follower_vm.new_public_input::<[u8; 16]>("key").unwrap();
leader_vm.assign(&leader_key, key).unwrap();
follower_vm.assign(&follower_key, key).unwrap();
let mut leader = MpcBlockCipher::<Aes128, _>::new(leader_config, leader_vm);
leader.set_key(leader_key);
let mut follower = MpcBlockCipher::<Aes128, _>::new(follower_config, follower_vm);
follower.set_key(follower_key);
let plaintext = [0u8; 16];
let (leader_ciphertext, follower_ciphertext) = tokio::try_join!(
leader.encrypt_private(plaintext.to_vec()),
follower.encrypt_blind()
)
.unwrap();
let expected = aes128(key, plaintext);
assert_eq!(leader_ciphertext, expected.to_vec());
assert_eq!(leader_ciphertext, follower_ciphertext);
}
#[tokio::test]
#[ignore = "expensive"]
async fn test_block_cipher_share() {
let leader_config = BlockCipherConfig::builder().id("test").build().unwrap();
let follower_config = BlockCipherConfig::builder().id("test").build().unwrap();
let key = [0u8; 16];
let (leader_vm, follower_vm) = create_mock_deap_vm();
// Key is public just for this test, typically it is private.
let leader_key = leader_vm.new_public_input::<[u8; 16]>("key").unwrap();
let follower_key = follower_vm.new_public_input::<[u8; 16]>("key").unwrap();
leader_vm.assign(&leader_key, key).unwrap();
follower_vm.assign(&follower_key, key).unwrap();
let mut leader = MpcBlockCipher::<Aes128, _>::new(leader_config, leader_vm);
leader.set_key(leader_key);
let mut follower = MpcBlockCipher::<Aes128, _>::new(follower_config, follower_vm);
follower.set_key(follower_key);
let plaintext = [0u8; 16];
let (leader_share, follower_share) = tokio::try_join!(
leader.encrypt_share(plaintext.to_vec()),
follower.encrypt_share(plaintext.to_vec())
)
.unwrap();
let expected = aes128(key, plaintext);
let result: [u8; 16] = std::array::from_fn(|i| leader_share[i] ^ follower_share[i]);
assert_eq!(result, expected);
}
#[tokio::test]
#[ignore = "expensive"]
async fn test_block_cipher_preprocess() {
let leader_config = BlockCipherConfig::builder().id("test").build().unwrap();
let follower_config = BlockCipherConfig::builder().id("test").build().unwrap();
let key = [0u8; 16];
let (leader_vm, follower_vm) = create_mock_deap_vm();
// Key is public just for this test, typically it is private.
let leader_key = leader_vm.new_public_input::<[u8; 16]>("key").unwrap();
let follower_key = follower_vm.new_public_input::<[u8; 16]>("key").unwrap();
leader_vm.assign(&leader_key, key).unwrap();
follower_vm.assign(&follower_key, key).unwrap();
let mut leader = MpcBlockCipher::<Aes128, _>::new(leader_config, leader_vm);
leader.set_key(leader_key);
let mut follower = MpcBlockCipher::<Aes128, _>::new(follower_config, follower_vm);
follower.set_key(follower_key);
let plaintext = [0u8; 16];
tokio::try_join!(
leader.preprocess(Visibility::Private, 1),
follower.preprocess(Visibility::Blind, 1)
)
.unwrap();
let (leader_ciphertext, follower_ciphertext) = tokio::try_join!(
leader.encrypt_private(plaintext.to_vec()),
follower.encrypt_blind()
)
.unwrap();
let expected = aes128(key, plaintext);
assert_eq!(leader_ciphertext, expected.to_vec());
assert_eq!(leader_ciphertext, follower_ciphertext);
tokio::try_join!(
leader.preprocess(Visibility::Public, 1),
follower.preprocess(Visibility::Public, 1)
)
.unwrap();
let (leader_share, follower_share) = tokio::try_join!(
leader.encrypt_share(plaintext.to_vec()),
follower.encrypt_share(plaintext.to_vec())
)
.unwrap();
let expected = aes128(key, plaintext);
let result: [u8; 16] = std::array::from_fn(|i| leader_share[i] ^ follower_share[i]);
assert_eq!(result, expected);
}
}

View File

@@ -0,0 +1,34 @@
[package]
name = "tlsn-cipher"
authors = ["TLSNotary Team"]
description = "This crate provides implementations of ciphers for two parties"
keywords = ["tls", "mpc", "2pc", "aes"]
categories = ["cryptography"]
license = "MIT OR Apache-2.0"
version = "0.1.0-alpha.12"
edition = "2021"
[lints]
workspace = true
[lib]
name = "cipher"
[dependencies]
mpz-circuits = { workspace = true }
mpz-vm-core = { workspace = true }
mpz-memory-core = { workspace = true }
async-trait = { workspace = true }
thiserror = { workspace = true }
aes = { workspace = true }
[dev-dependencies]
mpz-garble = { workspace = true }
mpz-common = { workspace = true }
mpz-ot = { workspace = true }
tokio = { version = "1", features = ["macros", "rt", "rt-multi-thread"] }
rand = { workspace = true }
ctr = { workspace = true }
cipher = { workspace = true }

View File

@@ -0,0 +1,44 @@
use std::fmt::Display;
/// AES error.
#[derive(Debug, thiserror::Error)]
pub struct AesError {
kind: ErrorKind,
#[source]
source: Option<Box<dyn std::error::Error + Send + Sync>>,
}
impl AesError {
pub(crate) fn new<E>(kind: ErrorKind, source: E) -> Self
where
E: Into<Box<dyn std::error::Error + Send + Sync>>,
{
Self {
kind,
source: Some(source.into()),
}
}
}
#[derive(Debug, Clone, Copy, PartialEq)]
pub(crate) enum ErrorKind {
Vm,
Key,
Iv,
}
impl Display for AesError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self.kind {
ErrorKind::Vm => write!(f, "vm error")?,
ErrorKind::Key => write!(f, "key error")?,
ErrorKind::Iv => write!(f, "iv error")?,
}
if let Some(source) = &self.source {
write!(f, " caused by: {}", source)?;
}
Ok(())
}
}

View File

@@ -0,0 +1,375 @@
//! The AES-128 block cipher.
use crate::{Cipher, CtrBlock, Keystream};
use async_trait::async_trait;
use mpz_circuits::circuits::AES128;
use mpz_memory_core::binary::{Binary, U8};
use mpz_vm_core::{prelude::*, Call, Vm};
use std::fmt::Debug;
mod error;
pub use error::AesError;
use error::ErrorKind;
/// Computes AES-128.
#[derive(Default, Debug)]
pub struct Aes128 {
key: Option<Array<U8, 16>>,
iv: Option<Array<U8, 4>>,
}
#[async_trait]
impl Cipher for Aes128 {
type Error = AesError;
type Key = Array<U8, 16>;
type Iv = Array<U8, 4>;
type Nonce = Array<U8, 8>;
type Counter = Array<U8, 4>;
type Block = Array<U8, 16>;
fn set_key(&mut self, key: Array<U8, 16>) {
self.key = Some(key);
}
fn set_iv(&mut self, iv: Array<U8, 4>) {
self.iv = Some(iv);
}
fn key(&self) -> Option<&Array<U8, 16>> {
self.key.as_ref()
}
fn iv(&self) -> Option<&Array<U8, 4>> {
self.iv.as_ref()
}
fn alloc_block(
&self,
vm: &mut dyn Vm<Binary>,
input: Array<U8, 16>,
) -> Result<Self::Block, Self::Error> {
let key = self
.key
.ok_or_else(|| AesError::new(ErrorKind::Key, "key not set"))?;
let output = vm
.call(
Call::builder(AES128.clone())
.arg(key)
.arg(input)
.build()
.expect("call should be valid"),
)
.map_err(|err| AesError::new(ErrorKind::Vm, err))?;
Ok(output)
}
fn alloc_ctr_block(
&self,
vm: &mut dyn Vm<Binary>,
) -> Result<CtrBlock<Self::Nonce, Self::Counter, Self::Block>, Self::Error> {
let key = self
.key
.ok_or_else(|| AesError::new(ErrorKind::Key, "key not set"))?;
let iv = self
.iv
.ok_or_else(|| AesError::new(ErrorKind::Iv, "iv not set"))?;
let explicit_nonce: Array<U8, 8> = vm
.alloc()
.map_err(|err| AesError::new(ErrorKind::Vm, err))?;
vm.mark_public(explicit_nonce)
.map_err(|err| AesError::new(ErrorKind::Vm, err))?;
let counter: Array<U8, 4> = vm
.alloc()
.map_err(|err| AesError::new(ErrorKind::Vm, err))?;
vm.mark_public(counter)
.map_err(|err| AesError::new(ErrorKind::Vm, err))?;
let output = vm
.call(
Call::builder(AES128.clone())
.arg(key)
.arg(iv)
.arg(explicit_nonce)
.arg(counter)
.build()
.expect("call should be valid"),
)
.map_err(|err| AesError::new(ErrorKind::Vm, err))?;
Ok(CtrBlock {
explicit_nonce,
counter,
output,
})
}
fn alloc_keystream(
&self,
vm: &mut dyn Vm<Binary>,
len: usize,
) -> Result<Keystream<Self::Nonce, Self::Counter, Self::Block>, Self::Error> {
let key = self
.key
.ok_or_else(|| AesError::new(ErrorKind::Key, "key not set"))?;
let iv = self
.iv
.ok_or_else(|| AesError::new(ErrorKind::Iv, "iv not set"))?;
let block_count = len.div_ceil(16);
let inputs = (0..block_count)
.map(|_| {
let explicit_nonce: Array<U8, 8> = vm
.alloc()
.map_err(|err| AesError::new(ErrorKind::Vm, err))?;
let counter: Array<U8, 4> = vm
.alloc()
.map_err(|err| AesError::new(ErrorKind::Vm, err))?;
vm.mark_public(explicit_nonce)
.map_err(|err| AesError::new(ErrorKind::Vm, err))?;
vm.mark_public(counter)
.map_err(|err| AesError::new(ErrorKind::Vm, err))?;
Ok((explicit_nonce, counter))
})
.collect::<Result<Vec<_>, AesError>>()?;
let blocks = inputs
.into_iter()
.map(|(explicit_nonce, counter)| {
let output = vm
.call(
Call::builder(AES128.clone())
.arg(key)
.arg(iv)
.arg(explicit_nonce)
.arg(counter)
.build()
.expect("call should be valid"),
)
.map_err(|err| AesError::new(ErrorKind::Vm, err))?;
Ok(CtrBlock {
explicit_nonce,
counter,
output,
})
})
.collect::<Result<Vec<_>, AesError>>()?;
Ok(Keystream::new(&blocks))
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::Cipher;
use mpz_common::context::test_st_context;
use mpz_garble::protocol::semihonest::{Evaluator, Garbler};
use mpz_memory_core::{
binary::{Binary, U8},
correlated::Delta,
Array, MemoryExt, Vector, ViewExt,
};
use mpz_ot::ideal::cot::ideal_cot;
use mpz_vm_core::{Execute, Vm};
use rand::{rngs::StdRng, SeedableRng};
#[tokio::test]
async fn test_aes_ctr() {
let key = [42_u8; 16];
let iv = [3_u8; 4];
let nonce = [5_u8; 8];
let start_counter = 3u32;
let (mut ctx_a, mut ctx_b) = test_st_context(8);
let (mut gen, mut ev) = mock_vm();
let aes_gen = setup_ctr(key, iv, &mut gen);
let aes_ev = setup_ctr(key, iv, &mut ev);
let msg = vec![42u8; 128];
let keystream_gen = aes_gen.alloc_keystream(&mut gen, msg.len()).unwrap();
let keystream_ev = aes_ev.alloc_keystream(&mut ev, msg.len()).unwrap();
let msg_ref_gen: Vector<U8> = gen.alloc_vec(msg.len()).unwrap();
gen.mark_public(msg_ref_gen).unwrap();
gen.assign(msg_ref_gen, msg.clone()).unwrap();
gen.commit(msg_ref_gen).unwrap();
let msg_ref_ev: Vector<U8> = ev.alloc_vec(msg.len()).unwrap();
ev.mark_public(msg_ref_ev).unwrap();
ev.assign(msg_ref_ev, msg.clone()).unwrap();
ev.commit(msg_ref_ev).unwrap();
let mut ctr = start_counter..;
keystream_gen
.assign(&mut gen, nonce, move || ctr.next().unwrap().to_be_bytes())
.unwrap();
let mut ctr = start_counter..;
keystream_ev
.assign(&mut ev, nonce, move || ctr.next().unwrap().to_be_bytes())
.unwrap();
let cipher_out_gen = keystream_gen.apply(&mut gen, msg_ref_gen).unwrap();
let cipher_out_ev = keystream_ev.apply(&mut ev, msg_ref_ev).unwrap();
let (ct_gen, ct_ev) = tokio::try_join!(
async {
let out = gen.decode(cipher_out_gen).unwrap();
gen.flush(&mut ctx_a).await.unwrap();
gen.execute(&mut ctx_a).await.unwrap();
gen.flush(&mut ctx_a).await.unwrap();
out.await
},
async {
let out = ev.decode(cipher_out_ev).unwrap();
ev.flush(&mut ctx_b).await.unwrap();
ev.execute(&mut ctx_b).await.unwrap();
ev.flush(&mut ctx_b).await.unwrap();
out.await
}
)
.unwrap();
assert_eq!(ct_gen, ct_ev);
let expected = aes_apply_keystream(key, iv, nonce, start_counter as usize, msg);
assert_eq!(ct_gen, expected);
}
#[tokio::test]
async fn test_aes_ecb() {
let key = [1_u8; 16];
let input = [5_u8; 16];
let (mut ctx_a, mut ctx_b) = test_st_context(8);
let (mut gen, mut ev) = mock_vm();
let aes_gen = setup_block(key, &mut gen);
let aes_ev = setup_block(key, &mut ev);
let block_ref_gen: Array<U8, 16> = gen.alloc().unwrap();
gen.mark_public(block_ref_gen).unwrap();
gen.assign(block_ref_gen, input).unwrap();
gen.commit(block_ref_gen).unwrap();
let block_ref_ev: Array<U8, 16> = ev.alloc().unwrap();
ev.mark_public(block_ref_ev).unwrap();
ev.assign(block_ref_ev, input).unwrap();
ev.commit(block_ref_ev).unwrap();
let block_gen = aes_gen.alloc_block(&mut gen, block_ref_gen).unwrap();
let block_ev = aes_ev.alloc_block(&mut ev, block_ref_ev).unwrap();
let (ciphertext_gen, ciphetext_ev) = tokio::try_join!(
async {
let out = gen.decode(block_gen).unwrap();
gen.flush(&mut ctx_a).await.unwrap();
gen.execute(&mut ctx_a).await.unwrap();
gen.flush(&mut ctx_a).await.unwrap();
out.await
},
async {
let out = ev.decode(block_ev).unwrap();
ev.flush(&mut ctx_b).await.unwrap();
ev.execute(&mut ctx_b).await.unwrap();
ev.flush(&mut ctx_b).await.unwrap();
out.await
}
)
.unwrap();
assert_eq!(ciphertext_gen, ciphetext_ev);
let expected = aes128(key, input);
assert_eq!(ciphertext_gen, expected);
}
fn mock_vm() -> (impl Vm<Binary>, impl Vm<Binary>) {
let mut rng = StdRng::seed_from_u64(0);
let delta = Delta::random(&mut rng);
let (cot_send, cot_recv) = ideal_cot(delta.into_inner());
let gen = Garbler::new(cot_send, [0u8; 16], delta);
let ev = Evaluator::new(cot_recv);
(gen, ev)
}
fn setup_ctr(key: [u8; 16], iv: [u8; 4], vm: &mut dyn Vm<Binary>) -> Aes128 {
let key_ref: Array<U8, 16> = vm.alloc().unwrap();
vm.mark_public(key_ref).unwrap();
vm.assign(key_ref, key).unwrap();
vm.commit(key_ref).unwrap();
let iv_ref: Array<U8, 4> = vm.alloc().unwrap();
vm.mark_public(iv_ref).unwrap();
vm.assign(iv_ref, iv).unwrap();
vm.commit(iv_ref).unwrap();
let mut aes = Aes128::default();
aes.set_key(key_ref);
aes.set_iv(iv_ref);
aes
}
fn setup_block(key: [u8; 16], vm: &mut dyn Vm<Binary>) -> Aes128 {
let key_ref: Array<U8, 16> = vm.alloc().unwrap();
vm.mark_public(key_ref).unwrap();
vm.assign(key_ref, key).unwrap();
vm.commit(key_ref).unwrap();
let mut aes = Aes128::default();
aes.set_key(key_ref);
aes
}
fn aes_apply_keystream(
key: [u8; 16],
iv: [u8; 4],
explicit_nonce: [u8; 8],
start_ctr: usize,
msg: Vec<u8>,
) -> Vec<u8> {
use ::cipher::{KeyIvInit, StreamCipher, StreamCipherSeek};
use aes::Aes128;
use ctr::Ctr32BE;
let mut full_iv = [0u8; 16];
full_iv[0..4].copy_from_slice(&iv);
full_iv[4..12].copy_from_slice(&explicit_nonce);
let mut cipher = Ctr32BE::<Aes128>::new(&key.into(), &full_iv.into());
let mut out = msg.clone();
cipher
.try_seek(start_ctr * 16)
.expect("start counter is less than keystream length");
cipher.apply_keystream(&mut out);
out
}
fn aes128(key: [u8; 16], msg: [u8; 16]) -> [u8; 16] {
use ::aes::Aes128 as TestAes128;
use ::cipher::{BlockEncrypt, KeyInit};
let mut msg = msg.into();
let cipher = TestAes128::new(&key.into());
cipher.encrypt_block(&mut msg);
msg.into()
}
}

View File

@@ -0,0 +1,299 @@
//! This crate provides implementations of 2PC ciphers for encryption with a
//! shared key.
//!
//! Both parties can work together to encrypt and decrypt messages with
//! different visibility configurations. See [`Cipher`] and [`Keystream`] for
//! more information on the interface.
#![deny(missing_docs, unreachable_pub, unused_must_use)]
#![deny(clippy::all)]
#![forbid(unsafe_code)]
pub mod aes;
use async_trait::async_trait;
use mpz_circuits::circuits::xor;
use mpz_memory_core::{
binary::{Binary, U8},
MemoryExt, Repr, Slice, StaticSize, ToRaw, Vector,
};
use mpz_vm_core::{prelude::*, Call, CallBuilder, CallError, Vm};
use std::{collections::VecDeque, sync::Arc};
/// Provides computation of 2PC ciphers in counter and ECB mode.
///
/// After setting `key` and `iv` allows to compute the keystream via
/// [`Cipher::alloc`] or a single block in ECB mode via
/// [`Cipher::assign_block`]. [`Keystream`] provides more tooling to compute the
/// final cipher output in counter mode.
#[async_trait]
pub trait Cipher {
/// The error type for the cipher.
type Error: std::error::Error + Send + Sync + 'static;
/// Cipher key.
type Key;
/// Cipher IV.
type Iv;
/// Cipher nonce.
type Nonce;
/// Cipher counter.
type Counter;
/// Cipher block.
type Block;
/// Sets the key.
fn set_key(&mut self, key: Self::Key);
/// Sets the initialization vector.
fn set_iv(&mut self, iv: Self::Iv);
/// Returns the key reference.
fn key(&self) -> Option<&Self::Key>;
/// Returns the iv reference.
fn iv(&self) -> Option<&Self::Iv>;
/// Allocates a single block in ECB mode.
fn alloc_block(
&self,
vm: &mut dyn Vm<Binary>,
input: Self::Block,
) -> Result<Self::Block, Self::Error>;
/// Allocates a single block in counter mode.
#[allow(clippy::type_complexity)]
fn alloc_ctr_block(
&self,
vm: &mut dyn Vm<Binary>,
) -> Result<CtrBlock<Self::Nonce, Self::Counter, Self::Block>, Self::Error>;
/// Allocates a keystream in counter mode.
///
/// # Arguments
///
/// * `vm` - Virtual machine to allocate into.
/// * `len` - Length of the stream in bytes.
#[allow(clippy::type_complexity)]
fn alloc_keystream(
&self,
vm: &mut dyn Vm<Binary>,
len: usize,
) -> Result<Keystream<Self::Nonce, Self::Counter, Self::Block>, Self::Error>;
}
/// A block in counter mode.
#[derive(Debug, Clone, Copy)]
pub struct CtrBlock<N, C, O> {
/// Explicit nonce reference.
pub explicit_nonce: N,
/// Counter reference.
pub counter: C,
/// Output reference.
pub output: O,
}
/// The keystream of the cipher.
///
/// Can be used to XOR with the cipher input to operate the cipher in counter
/// mode.
pub struct Keystream<N, C, O> {
/// Sequential keystream blocks. Outputs are stored in contiguous memory.
blocks: VecDeque<CtrBlock<N, C, O>>,
}
impl<N, C, O> Default for Keystream<N, C, O> {
fn default() -> Self {
Self {
blocks: VecDeque::new(),
}
}
}
impl<N, C, O> Keystream<N, C, O>
where
N: Repr<Binary> + StaticSize<Binary> + Copy,
C: Repr<Binary> + StaticSize<Binary> + Copy,
O: Repr<Binary> + StaticSize<Binary> + Copy,
{
/// Creates a new keystream from the provided blocks.
pub fn new(blocks: &[CtrBlock<N, C, O>]) -> Self {
Self {
blocks: VecDeque::from_iter(blocks.iter().copied()),
}
}
/// Consumes keystream material.
///
/// Returns the consumed keystream material, leaving the remaining material
/// in place.
///
/// # Arguments
///
/// * `len` - Length of the keystream in bytes to return.
pub fn consume(&mut self, len: usize) -> Result<Self, CipherError> {
let block_count = len.div_ceil(self.block_size());
if block_count > self.blocks.len() {
return Err(CipherError::new("insufficient keystream"));
}
let blocks = self.blocks.split_off(self.blocks.len() - block_count);
Ok(Self { blocks })
}
/// Applies the keystream to the provided input.
///
/// # Arguments
///
/// * `vm` - Virtual machine.
/// * `input` - Input data.
pub fn apply(
&self,
vm: &mut dyn Vm<Binary>,
input: Vector<U8>,
) -> Result<Vector<U8>, CipherError> {
if input.len() != self.len() {
return Err(CipherError::new("input length must match keystream length"));
} else if self.blocks.is_empty() {
return Err(CipherError::new("no keystream material available"));
}
let xor = Arc::new(xor(self.block_size() * 8));
let mut pos = 0;
let mut outputs = Vec::with_capacity(self.blocks.len());
for block in &self.blocks {
let call = CallBuilder::new(xor.clone())
.arg(block.output)
.arg(
input
.get(pos..pos + self.block_size())
.expect("input length was checked"),
)
.build()?;
let output: Vector<U8> = vm.call(call).map_err(CipherError::new)?;
outputs.push(output);
pos += self.block_size();
}
let output = flatten_blocks(vm, outputs.iter().map(|block| block.to_raw()))?;
Ok(output)
}
/// Returns `len` bytes of the keystream as a vector.
pub fn to_vector(
&self,
vm: &mut dyn Vm<Binary>,
len: usize,
) -> Result<Vector<U8>, CipherError> {
if len == 0 {
return Err(CipherError::new("length must be greater than 0"));
} else if self.blocks.is_empty() {
return Err(CipherError::new("no keystream material available"));
}
let block_count = len.div_ceil(self.block_size());
if block_count != self.blocks.len() {
return Err(CipherError::new("length does not match keystream length"));
}
let mut keystream =
flatten_blocks(vm, self.blocks.iter().map(|block| block.output.to_raw()))?;
keystream.truncate(len);
Ok(keystream)
}
/// Assigns the keystream inputs.
///
/// # Arguments
///
/// * `vm` - Virtual machine.
/// * `explicit_nonce` - Explicit nonce.
/// * `ctr` - Counter function. The provided function will be called to
/// assign the counter values for each block.
pub fn assign(
&self,
vm: &mut dyn Vm<Binary>,
explicit_nonce: N::Clear,
mut ctr: impl FnMut() -> C::Clear,
) -> Result<(), CipherError>
where
N::Clear: Copy,
C::Clear: Copy,
{
for block in &self.blocks {
vm.assign(block.explicit_nonce, explicit_nonce)
.map_err(CipherError::new)?;
vm.commit(block.explicit_nonce).map_err(CipherError::new)?;
vm.assign(block.counter, ctr()).map_err(CipherError::new)?;
vm.commit(block.counter).map_err(CipherError::new)?;
}
Ok(())
}
/// Returns the block size in bytes.
fn block_size(&self) -> usize {
O::SIZE / 8
}
/// Returns the length of the keystream in bytes.
fn len(&self) -> usize {
self.block_size() * self.blocks.len()
}
}
fn flatten_blocks(
vm: &mut dyn Vm<Binary>,
blocks: impl IntoIterator<Item = Slice>,
) -> Result<Vector<U8>, CipherError> {
use mpz_circuits::CircuitBuilder;
let blocks = blocks.into_iter().collect::<Vec<_>>();
let len: usize = blocks.iter().map(|block| block.len()).sum();
let mut builder = CircuitBuilder::new();
for _ in 0..len {
let i = builder.add_input();
let o = builder.add_id_gate(i);
builder.add_output(o);
}
let circuit = builder.build().expect("flatten circuit should be valid");
let mut builder = Call::builder(Arc::new(circuit));
for block in blocks {
builder = builder.arg(block);
}
let call = builder.build().map_err(CipherError::new)?;
vm.call(call).map_err(CipherError::new)
}
/// A cipher error.
#[derive(Debug, thiserror::Error)]
#[error("{source}")]
pub struct CipherError {
#[source]
source: Box<dyn std::error::Error + Send + Sync>,
}
impl CipherError {
pub(crate) fn new<E>(source: E) -> Self
where
E: Into<Box<dyn std::error::Error + Send + Sync>>,
{
Self {
source: source.into(),
}
}
}
impl From<CallError> for CipherError {
fn from(value: CallError) -> Self {
Self::new(value)
}
}

View File

@@ -0,0 +1,29 @@
[package]
name = "tlsn-deap"
version = "0.1.0-alpha.12"
edition = "2021"
[lints]
workspace = true
[dependencies]
mpz-core = { workspace = true }
mpz-common = { workspace = true }
mpz-vm-core = { workspace = true }
rangeset = { workspace = true }
thiserror = { workspace = true }
serde = { workspace = true, features = ["derive"] }
serio = { workspace = true }
async-trait = { workspace = true }
futures = { workspace = true }
tokio = { workspace = true, features = ["sync"] }
[dev-dependencies]
mpz-circuits = { workspace = true }
mpz-garble = { workspace = true }
mpz-ot = { workspace = true }
mpz-zk = { workspace = true }
tokio = { workspace = true, features = ["macros", "rt", "rt-multi-thread"] }
rand = { workspace = true }
rand06-compat = { workspace = true }

View File

@@ -0,0 +1,651 @@
//! Dual-execution with Asymmetric Privacy (DEAP) protocol.
#![deny(missing_docs, unreachable_pub, unused_must_use)]
#![deny(clippy::all)]
#![forbid(unsafe_code)]
mod map;
use std::{mem, sync::Arc};
use async_trait::async_trait;
use mpz_common::Context;
use mpz_core::bitvec::BitVec;
use mpz_vm_core::{
memory::{binary::Binary, DecodeFuture, Memory, Repr, Slice, View},
Call, Callable, Execute, Vm, VmError,
};
use rangeset::{Difference, RangeSet, UnionMut};
use tokio::sync::{Mutex, MutexGuard, OwnedMutexGuard};
type Error = DeapError;
/// The role of the DEAP VM.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
#[allow(missing_docs)]
pub enum Role {
Leader,
Follower,
}
/// DEAP VM.
#[derive(Debug)]
pub struct Deap<Mpc, Zk> {
role: Role,
mpc: Arc<Mutex<Mpc>>,
zk: Arc<Mutex<Zk>>,
/// Mapping between the memories of the MPC and ZK VMs.
memory_map: map::MemoryMap,
/// Ranges of the follower's private inputs in the MPC VM.
follower_input_ranges: RangeSet<usize>,
/// Private inputs of the follower in the MPC VM.
follower_inputs: Vec<Slice>,
/// Outputs of the follower from the ZK VM. The references
/// correspond to the MPC VM.
outputs: Vec<(Slice, DecodeFuture<BitVec>)>,
}
impl<Mpc, Zk> Deap<Mpc, Zk> {
/// Creates a new DEAP VM.
pub fn new(role: Role, mpc: Mpc, zk: Zk) -> Self {
Self {
role,
mpc: Arc::new(Mutex::new(mpc)),
zk: Arc::new(Mutex::new(zk)),
memory_map: map::MemoryMap::default(),
follower_input_ranges: RangeSet::default(),
follower_inputs: Vec::default(),
outputs: Vec::default(),
}
}
/// Returns the MPC and ZK VMs.
pub fn into_inner(self) -> (Mpc, Zk) {
(
Arc::into_inner(self.mpc).unwrap().into_inner(),
Arc::into_inner(self.zk).unwrap().into_inner(),
)
}
/// Returns a mutable reference to the ZK VM.
///
/// # Panics
///
/// Panics if the mutex is locked by another thread.
pub fn zk(&self) -> MutexGuard<'_, Zk> {
self.zk.try_lock().unwrap()
}
/// Returns an owned mutex guard to the ZK VM.
///
/// # Panics
///
/// Panics if the mutex is locked by another thread.
pub fn zk_owned(&self) -> OwnedMutexGuard<Zk> {
self.zk.clone().try_lock_owned().unwrap()
}
/// Translates a value from the MPC VM address space to the ZK VM address
/// space.
pub fn translate<T: Repr<Binary>>(&self, value: T) -> Result<T, VmError> {
self.memory_map.try_get(value.to_raw()).map(T::from_raw)
}
#[cfg(test)]
fn mpc(&self) -> MutexGuard<'_, Mpc> {
self.mpc.try_lock().unwrap()
}
}
impl<Mpc, Zk> Deap<Mpc, Zk>
where
Mpc: Vm<Binary> + Send + 'static,
Zk: Vm<Binary> + Send + 'static,
{
/// Finalizes the DEAP VM.
///
/// This reveals all private inputs of the follower.
pub async fn finalize(&mut self, ctx: &mut Context) -> Result<(), VmError> {
let mut mpc = self.mpc.try_lock().unwrap();
let mut zk = self.zk.try_lock().unwrap();
// Decode the private inputs of the follower.
//
// # Security
//
// This assumes that the decoding process is authenticated from the leader's
// perspective. In the case of garbled circuits, the leader should be the
// generator such that the follower proves their inputs using their committed
// MACs.
let input_futs = self
.follower_inputs
.iter()
.map(|&input| mpc.decode_raw(input))
.collect::<Result<Vec<_>, _>>()?;
mpc.execute_all(ctx).await?;
// Assign inputs to the ZK VM.
for (mut decode, &input) in input_futs.into_iter().zip(&self.follower_inputs) {
let input = self.memory_map.try_get(input)?;
// Follower has already assigned the inputs.
if let Role::Leader = self.role {
let value = decode
.try_recv()
.map_err(VmError::memory)?
.expect("input should be decoded");
zk.assign_raw(input, value)?;
}
// Now the follower's inputs are public.
zk.commit_raw(input)?;
}
zk.execute_all(ctx).await.map_err(VmError::execute)?;
// Follower verifies the outputs are consistent.
if let Role::Follower = self.role {
for (output, mut value) in mem::take(&mut self.outputs) {
// If the output is not available in the MPC VM, we did not execute and decode
// it. Therefore, we do not need to check for equality.
//
// This can occur if some function was preprocessed but ultimately not used.
if let Some(mpc_output) = mpc.get_raw(output)? {
let zk_output = value
.try_recv()
.map_err(VmError::memory)?
.expect("output should be decoded");
// Asserts equality of all the output values from both VMs.
if zk_output != mpc_output {
return Err(VmError::execute(Error::from(ErrorRepr::EqualityCheck)));
}
}
}
}
Ok(())
}
}
impl<Mpc, Zk> Memory<Binary> for Deap<Mpc, Zk>
where
Mpc: Memory<Binary, Error = VmError>,
Zk: Memory<Binary, Error = VmError>,
{
type Error = VmError;
fn is_alloc_raw(&self, slice: Slice) -> bool {
self.mpc.try_lock().unwrap().is_alloc_raw(slice)
}
fn alloc_raw(&mut self, size: usize) -> Result<Slice, VmError> {
let mpc_slice = self.mpc.try_lock().unwrap().alloc_raw(size)?;
let zk_slice = self.zk.try_lock().unwrap().alloc_raw(size)?;
self.memory_map.insert(mpc_slice, zk_slice);
Ok(mpc_slice)
}
fn is_assigned_raw(&self, slice: Slice) -> bool {
self.mpc.try_lock().unwrap().is_assigned_raw(slice)
}
fn assign_raw(&mut self, slice: Slice, data: BitVec) -> Result<(), VmError> {
self.mpc
.try_lock()
.unwrap()
.assign_raw(slice, data.clone())?;
self.zk
.try_lock()
.unwrap()
.assign_raw(self.memory_map.try_get(slice)?, data)
}
fn is_committed_raw(&self, slice: Slice) -> bool {
self.mpc.try_lock().unwrap().is_committed_raw(slice)
}
fn commit_raw(&mut self, slice: Slice) -> Result<(), VmError> {
// Follower's private inputs are not committed in the ZK VM until finalization.
let input_minus_follower = slice.to_range().difference(&self.follower_input_ranges);
let mut zk = self.zk.try_lock().unwrap();
for input in input_minus_follower.iter_ranges() {
zk.commit_raw(
self.memory_map
.try_get(Slice::from_range_unchecked(input))?,
)?;
}
self.mpc.try_lock().unwrap().commit_raw(slice)
}
fn get_raw(&self, slice: Slice) -> Result<Option<BitVec>, VmError> {
self.mpc.try_lock().unwrap().get_raw(slice)
}
fn decode_raw(&mut self, slice: Slice) -> Result<DecodeFuture<BitVec>, VmError> {
let fut = self
.zk
.try_lock()
.unwrap()
.decode_raw(self.memory_map.try_get(slice)?)?;
self.outputs.push((slice, fut));
self.mpc.try_lock().unwrap().decode_raw(slice)
}
}
impl<Mpc, Zk> View<Binary> for Deap<Mpc, Zk>
where
Mpc: View<Binary, Error = VmError>,
Zk: View<Binary, Error = VmError>,
{
type Error = VmError;
fn mark_public_raw(&mut self, slice: Slice) -> Result<(), VmError> {
self.mpc.try_lock().unwrap().mark_public_raw(slice)?;
self.zk
.try_lock()
.unwrap()
.mark_public_raw(self.memory_map.try_get(slice)?)
}
fn mark_private_raw(&mut self, slice: Slice) -> Result<(), VmError> {
let mut zk = self.zk.try_lock().unwrap();
let mut mpc = self.mpc.try_lock().unwrap();
match self.role {
Role::Leader => {
mpc.mark_private_raw(slice)?;
zk.mark_private_raw(self.memory_map.try_get(slice)?)?;
}
Role::Follower => {
mpc.mark_private_raw(slice)?;
// Follower's private inputs will become public during finalization.
zk.mark_public_raw(self.memory_map.try_get(slice)?)?;
self.follower_input_ranges.union_mut(&slice.to_range());
self.follower_inputs.push(slice);
}
}
Ok(())
}
fn mark_blind_raw(&mut self, slice: Slice) -> Result<(), VmError> {
let mut zk = self.zk.try_lock().unwrap();
let mut mpc = self.mpc.try_lock().unwrap();
match self.role {
Role::Leader => {
mpc.mark_blind_raw(slice)?;
// Follower's private inputs will become public during finalization.
zk.mark_public_raw(self.memory_map.try_get(slice)?)?;
self.follower_input_ranges.union_mut(&slice.to_range());
self.follower_inputs.push(slice);
}
Role::Follower => {
mpc.mark_blind_raw(slice)?;
zk.mark_blind_raw(self.memory_map.try_get(slice)?)?;
}
}
Ok(())
}
}
impl<Mpc, Zk> Callable<Binary> for Deap<Mpc, Zk>
where
Mpc: Vm<Binary>,
Zk: Vm<Binary>,
{
fn call_raw(&mut self, call: Call) -> Result<Slice, VmError> {
let (circ, inputs) = call.clone().into_parts();
let mut builder = Call::builder(circ);
for input in inputs {
builder = builder.arg(self.memory_map.try_get(input)?);
}
let zk_call = builder.build().expect("call should be valid");
let output = self.mpc.try_lock().unwrap().call_raw(call)?;
let zk_output = self.zk.try_lock().unwrap().call_raw(zk_call)?;
self.memory_map.insert(output, zk_output);
Ok(output)
}
}
#[async_trait]
impl<Mpc, Zk> Execute for Deap<Mpc, Zk>
where
Mpc: Execute + Send + 'static,
Zk: Execute + Send + 'static,
{
fn wants_flush(&self) -> bool {
self.mpc.try_lock().unwrap().wants_flush() || self.zk.try_lock().unwrap().wants_flush()
}
async fn flush(&mut self, ctx: &mut Context) -> Result<(), VmError> {
let mut zk = self.zk.clone().try_lock_owned().unwrap();
let mut mpc = self.mpc.clone().try_lock_owned().unwrap();
ctx.try_join(
async move |ctx| zk.flush(ctx).await,
async move |ctx| mpc.flush(ctx).await,
)
.await
.map_err(VmError::execute)??;
Ok(())
}
fn wants_preprocess(&self) -> bool {
self.mpc.try_lock().unwrap().wants_preprocess()
|| self.zk.try_lock().unwrap().wants_preprocess()
}
async fn preprocess(&mut self, ctx: &mut Context) -> Result<(), VmError> {
let mut zk = self.zk.clone().try_lock_owned().unwrap();
let mut mpc = self.mpc.clone().try_lock_owned().unwrap();
ctx.try_join(
async move |ctx| zk.preprocess(ctx).await,
async move |ctx| mpc.preprocess(ctx).await,
)
.await
.map_err(VmError::execute)??;
Ok(())
}
fn wants_execute(&self) -> bool {
self.mpc.try_lock().unwrap().wants_execute()
}
async fn execute(&mut self, ctx: &mut Context) -> Result<(), VmError> {
// Only MPC VM is executed until finalization.
self.mpc.try_lock().unwrap().execute(ctx).await
}
}
#[derive(Debug, thiserror::Error)]
#[error(transparent)]
pub(crate) struct DeapError(#[from] ErrorRepr);
#[derive(Debug, thiserror::Error)]
enum ErrorRepr {
#[error("equality check failed")]
EqualityCheck,
}
#[cfg(test)]
mod tests {
use mpz_circuits::circuits::AES128;
use mpz_common::context::test_st_context;
use mpz_core::Block;
use mpz_garble::protocol::semihonest::{Evaluator, Garbler};
use mpz_ot::ideal::{cot::ideal_cot, rcot::ideal_rcot};
use mpz_vm_core::{
memory::{binary::U8, correlated::Delta, Array},
prelude::*,
};
use mpz_zk::{Prover, Verifier};
use rand::{rngs::StdRng, SeedableRng};
use super::*;
#[tokio::test]
async fn test_deap() {
let mut rng = StdRng::seed_from_u64(0);
let delta_mpc = Delta::random(&mut rng);
let delta_zk = Delta::random(&mut rng);
let (mut ctx_a, mut ctx_b) = test_st_context(8);
let (rcot_send, rcot_recv) = ideal_rcot(Block::ZERO, delta_zk.into_inner());
let (cot_send, cot_recv) = ideal_cot(delta_mpc.into_inner());
let gb = Garbler::new(cot_send, [0u8; 16], delta_mpc);
let ev = Evaluator::new(cot_recv);
let prover = Prover::new(rcot_recv);
let verifier = Verifier::new(delta_zk, rcot_send);
let mut leader = Deap::new(Role::Leader, gb, prover);
let mut follower = Deap::new(Role::Follower, ev, verifier);
let (ct_leader, ct_follower) = futures::join!(
async {
let key: Array<U8, 16> = leader.alloc().unwrap();
let msg: Array<U8, 16> = leader.alloc().unwrap();
leader.mark_private(key).unwrap();
leader.mark_blind(msg).unwrap();
leader.assign(key, [42u8; 16]).unwrap();
leader.commit(key).unwrap();
leader.commit(msg).unwrap();
let ct: Array<U8, 16> = leader
.call(
Call::builder(AES128.clone())
.arg(key)
.arg(msg)
.build()
.unwrap(),
)
.unwrap();
let ct = leader.decode(ct).unwrap();
leader.flush(&mut ctx_a).await.unwrap();
leader.execute(&mut ctx_a).await.unwrap();
leader.flush(&mut ctx_a).await.unwrap();
leader.finalize(&mut ctx_a).await.unwrap();
ct.await.unwrap()
},
async {
let key: Array<U8, 16> = follower.alloc().unwrap();
let msg: Array<U8, 16> = follower.alloc().unwrap();
follower.mark_blind(key).unwrap();
follower.mark_private(msg).unwrap();
follower.assign(msg, [69u8; 16]).unwrap();
follower.commit(key).unwrap();
follower.commit(msg).unwrap();
let ct: Array<U8, 16> = follower
.call(
Call::builder(AES128.clone())
.arg(key)
.arg(msg)
.build()
.unwrap(),
)
.unwrap();
let ct = follower.decode(ct).unwrap();
follower.flush(&mut ctx_b).await.unwrap();
follower.execute(&mut ctx_b).await.unwrap();
follower.flush(&mut ctx_b).await.unwrap();
follower.finalize(&mut ctx_b).await.unwrap();
ct.await.unwrap()
}
);
assert_eq!(ct_leader, ct_follower);
}
#[tokio::test]
async fn test_deap_desync_memory() {
let mut rng = StdRng::seed_from_u64(0);
let delta_mpc = Delta::random(&mut rng);
let delta_zk = Delta::random(&mut rng);
let (mut ctx_a, mut ctx_b) = test_st_context(8);
let (rcot_send, rcot_recv) = ideal_rcot(Block::ZERO, delta_zk.into_inner());
let (cot_send, cot_recv) = ideal_cot(delta_mpc.into_inner());
let gb = Garbler::new(cot_send, [0u8; 16], delta_mpc);
let ev = Evaluator::new(cot_recv);
let prover = Prover::new(rcot_recv);
let verifier = Verifier::new(delta_zk, rcot_send);
let mut leader = Deap::new(Role::Leader, gb, prover);
let mut follower = Deap::new(Role::Follower, ev, verifier);
// Desynchronize the memories.
let _ = leader.zk().alloc_raw(1).unwrap();
let _ = follower.zk().alloc_raw(1).unwrap();
let (ct_leader, ct_follower) = futures::join!(
async {
let key: Array<U8, 16> = leader.alloc().unwrap();
let msg: Array<U8, 16> = leader.alloc().unwrap();
leader.mark_private(key).unwrap();
leader.mark_blind(msg).unwrap();
leader.assign(key, [42u8; 16]).unwrap();
leader.commit(key).unwrap();
leader.commit(msg).unwrap();
let ct: Array<U8, 16> = leader
.call(
Call::builder(AES128.clone())
.arg(key)
.arg(msg)
.build()
.unwrap(),
)
.unwrap();
let ct = leader.decode(ct).unwrap();
leader.flush(&mut ctx_a).await.unwrap();
leader.execute(&mut ctx_a).await.unwrap();
leader.flush(&mut ctx_a).await.unwrap();
leader.finalize(&mut ctx_a).await.unwrap();
ct.await.unwrap()
},
async {
let key: Array<U8, 16> = follower.alloc().unwrap();
let msg: Array<U8, 16> = follower.alloc().unwrap();
follower.mark_blind(key).unwrap();
follower.mark_private(msg).unwrap();
follower.assign(msg, [69u8; 16]).unwrap();
follower.commit(key).unwrap();
follower.commit(msg).unwrap();
let ct: Array<U8, 16> = follower
.call(
Call::builder(AES128.clone())
.arg(key)
.arg(msg)
.build()
.unwrap(),
)
.unwrap();
let ct = follower.decode(ct).unwrap();
follower.flush(&mut ctx_b).await.unwrap();
follower.execute(&mut ctx_b).await.unwrap();
follower.flush(&mut ctx_b).await.unwrap();
follower.finalize(&mut ctx_b).await.unwrap();
ct.await.unwrap()
}
);
assert_eq!(ct_leader, ct_follower);
}
// Tests that the leader can not use different inputs in each VM without
// detection by the follower.
#[tokio::test]
async fn test_malicious() {
let mut rng = StdRng::seed_from_u64(0);
let delta_mpc = Delta::random(&mut rng);
let delta_zk = Delta::random(&mut rng);
let (mut ctx_a, mut ctx_b) = test_st_context(8);
let (rcot_send, rcot_recv) = ideal_rcot(Block::ZERO, delta_zk.into_inner());
let (cot_send, cot_recv) = ideal_cot(delta_mpc.into_inner());
let gb = Garbler::new(cot_send, [1u8; 16], delta_mpc);
let ev = Evaluator::new(cot_recv);
let prover = Prover::new(rcot_recv);
let verifier = Verifier::new(delta_zk, rcot_send);
let mut leader = Deap::new(Role::Leader, gb, prover);
let mut follower = Deap::new(Role::Follower, ev, verifier);
let (_, follower_res) = futures::join!(
async {
let key: Array<U8, 16> = leader.alloc().unwrap();
let msg: Array<U8, 16> = leader.alloc().unwrap();
leader.mark_private(key).unwrap();
leader.mark_blind(msg).unwrap();
// Use different inputs in each VM.
leader.mpc().assign(key, [42u8; 16]).unwrap();
leader
.zk
.try_lock()
.unwrap()
.assign(key, [69u8; 16])
.unwrap();
leader.commit(key).unwrap();
leader.commit(msg).unwrap();
let ct: Array<U8, 16> = leader
.call(
Call::builder(AES128.clone())
.arg(key)
.arg(msg)
.build()
.unwrap(),
)
.unwrap();
let ct = leader.decode(ct).unwrap();
leader.flush(&mut ctx_a).await.unwrap();
leader.execute(&mut ctx_a).await.unwrap();
leader.flush(&mut ctx_a).await.unwrap();
leader.finalize(&mut ctx_a).await.unwrap();
ct.await.unwrap()
},
async {
let key: Array<U8, 16> = follower.alloc().unwrap();
let msg: Array<U8, 16> = follower.alloc().unwrap();
follower.mark_blind(key).unwrap();
follower.mark_private(msg).unwrap();
follower.assign(msg, [69u8; 16]).unwrap();
follower.commit(key).unwrap();
follower.commit(msg).unwrap();
let ct: Array<U8, 16> = follower
.call(
Call::builder(AES128.clone())
.arg(key)
.arg(msg)
.build()
.unwrap(),
)
.unwrap();
drop(follower.decode(ct).unwrap());
follower.flush(&mut ctx_b).await.unwrap();
follower.execute(&mut ctx_b).await.unwrap();
follower.flush(&mut ctx_b).await.unwrap();
follower.finalize(&mut ctx_b).await
}
);
assert!(follower_res.is_err());
}
}

View File

@@ -0,0 +1,111 @@
use std::ops::Range;
use mpz_vm_core::{memory::Slice, VmError};
use rangeset::Subset;
/// A mapping between the memories of the MPC and ZK VMs.
#[derive(Debug, Default)]
pub(crate) struct MemoryMap {
mpc: Vec<Range<usize>>,
zk: Vec<Range<usize>>,
}
impl MemoryMap {
/// Inserts a new allocation into the map.
///
/// # Panics
///
/// - If the slices are not inserted in the order they are allocated.
/// - If the slices are not the same length.
pub(crate) fn insert(&mut self, mpc: Slice, zk: Slice) {
let mpc = mpc.to_range();
let zk = zk.to_range();
assert_eq!(mpc.len(), zk.len(), "slices must be the same length");
if let Some(last) = self.mpc.last() {
if last.end > mpc.start {
panic!("slices must be provided in ascending order");
}
}
self.mpc.push(mpc);
self.zk.push(zk);
}
/// Returns the corresponding allocation in the ZK VM.
pub(crate) fn try_get(&self, mpc: Slice) -> Result<Slice, VmError> {
let mpc_range = mpc.to_range();
let pos = match self
.mpc
.binary_search_by_key(&mpc_range.start, |range| range.start)
{
Ok(pos) => pos,
Err(0) => return Err(VmError::memory(format!("invalid memory slice: {mpc}"))),
Err(pos) => pos - 1,
};
let candidate = &self.mpc[pos];
if mpc_range.is_subset(candidate) {
let offset = mpc_range.start - candidate.start;
let start = self.zk[pos].start + offset;
let slice = Slice::from_range_unchecked(start..start + mpc_range.len());
Ok(slice)
} else {
Err(VmError::memory(format!("invalid memory slice: {mpc}")))
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_map() {
let mut map = MemoryMap::default();
map.insert(
Slice::from_range_unchecked(0..10),
Slice::from_range_unchecked(10..20),
);
// Range is fully contained.
assert_eq!(
map.try_get(Slice::from_range_unchecked(0..10)).unwrap(),
Slice::from_range_unchecked(10..20)
);
// Range is subset.
assert_eq!(
map.try_get(Slice::from_range_unchecked(1..9)).unwrap(),
Slice::from_range_unchecked(11..19)
);
// Range is not subset.
assert!(map.try_get(Slice::from_range_unchecked(0..11)).is_err());
// Insert another range.
map.insert(
Slice::from_range_unchecked(20..30),
Slice::from_range_unchecked(30..40),
);
assert_eq!(
map.try_get(Slice::from_range_unchecked(20..30)).unwrap(),
Slice::from_range_unchecked(30..40)
);
assert_eq!(
map.try_get(Slice::from_range_unchecked(21..29)).unwrap(),
Slice::from_range_unchecked(31..39)
);
assert!(map.try_get(Slice::from_range_unchecked(19..21)).is_err());
}
#[test]
#[should_panic]
fn test_map_length_mismatch() {
let mut map = MemoryMap::default();
map.insert(
Slice::from_range_unchecked(5..10),
Slice::from_range_unchecked(20..30),
);
}
}

View File

@@ -1,19 +0,0 @@
[package]
name = "tlsn-hmac-sha256-circuits"
authors = ["TLSNotary Team"]
description = "The 2PC circuits for TLS HMAC-SHA256 PRF"
keywords = ["tls", "mpc", "2pc", "hmac", "sha256"]
categories = ["cryptography"]
license = "MIT OR Apache-2.0"
version = "0.1.0-alpha.7"
edition = "2021"
[lib]
name = "hmac_sha256_circuits"
[dependencies]
mpz-circuits = { workspace = true }
tracing = { workspace = true }
[dev-dependencies]
ring = { workspace = true }

View File

@@ -1,159 +0,0 @@
use std::cell::RefCell;
use mpz_circuits::{
circuits::{sha256, sha256_compress, sha256_compress_trace, sha256_trace},
types::{U32, U8},
BuilderState, Tracer,
};
static SHA256_INITIAL_STATE: [u32; 8] = [
0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a, 0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19,
];
/// Returns the outer and inner states of HMAC-SHA256 with the provided key.
///
/// Outer state is H(key ⊕ opad)
///
/// Inner state is H(key ⊕ ipad)
///
/// # Arguments
///
/// * `builder_state` - Reference to builder state.
/// * `key` - N-byte key (must be <= 64 bytes).
pub fn hmac_sha256_partial_trace<'a>(
builder_state: &'a RefCell<BuilderState>,
key: &[Tracer<'a, U8>],
) -> ([Tracer<'a, U32>; 8], [Tracer<'a, U32>; 8]) {
assert!(key.len() <= 64);
let mut opad = [Tracer::new(
builder_state,
builder_state.borrow_mut().get_constant(0x5cu8),
); 64];
let mut ipad = [Tracer::new(
builder_state,
builder_state.borrow_mut().get_constant(0x36u8),
); 64];
key.iter().enumerate().for_each(|(i, k)| {
opad[i] = opad[i] ^ *k;
ipad[i] = ipad[i] ^ *k;
});
let sha256_initial_state: [_; 8] = SHA256_INITIAL_STATE
.map(|v| Tracer::new(builder_state, builder_state.borrow_mut().get_constant(v)));
let outer_state = sha256_compress_trace(builder_state, sha256_initial_state, opad);
let inner_state = sha256_compress_trace(builder_state, sha256_initial_state, ipad);
(outer_state, inner_state)
}
/// Reference implementation of HMAC-SHA256 partial function.
///
/// Returns the outer and inner states of HMAC-SHA256 with the provided key.
///
/// Outer state is H(key ⊕ opad)
///
/// Inner state is H(key ⊕ ipad)
///
/// # Arguments
///
/// * `key` - N-byte key (must be <= 64 bytes).
pub fn hmac_sha256_partial(key: &[u8]) -> ([u32; 8], [u32; 8]) {
assert!(key.len() <= 64);
let mut opad = [0x5cu8; 64];
let mut ipad = [0x36u8; 64];
key.iter().enumerate().for_each(|(i, k)| {
opad[i] ^= k;
ipad[i] ^= k;
});
let outer_state = sha256_compress(SHA256_INITIAL_STATE, opad);
let inner_state = sha256_compress(SHA256_INITIAL_STATE, ipad);
(outer_state, inner_state)
}
/// HMAC-SHA256 finalization function.
///
/// Returns the HMAC-SHA256 digest of the provided message using existing outer
/// and inner states.
///
/// # Arguments
///
/// * `outer_state` - 256-bit outer state.
/// * `inner_state` - 256-bit inner state.
/// * `msg` - N-byte message.
pub fn hmac_sha256_finalize_trace<'a>(
builder_state: &'a RefCell<BuilderState>,
outer_state: [Tracer<'a, U32>; 8],
inner_state: [Tracer<'a, U32>; 8],
msg: &[Tracer<'a, U8>],
) -> [Tracer<'a, U8>; 32] {
sha256_trace(
builder_state,
outer_state,
64,
&sha256_trace(builder_state, inner_state, 64, msg),
)
}
/// Reference implementation of the HMAC-SHA256 finalization function.
///
/// Returns the HMAC-SHA256 digest of the provided message using existing outer
/// and inner states.
///
/// # Arguments
///
/// * `outer_state` - 256-bit outer state.
/// * `inner_state` - 256-bit inner state.
/// * `msg` - N-byte message.
pub fn hmac_sha256_finalize(outer_state: [u32; 8], inner_state: [u32; 8], msg: &[u8]) -> [u8; 32] {
sha256(outer_state, 64, &sha256(inner_state, 64, msg))
}
#[cfg(test)]
mod tests {
use mpz_circuits::{test_circ, CircuitBuilder};
use super::*;
#[test]
fn test_hmac_sha256_partial() {
let builder = CircuitBuilder::new();
let key = builder.add_array_input::<u8, 48>();
let (outer_state, inner_state) = hmac_sha256_partial_trace(builder.state(), &key);
builder.add_output(outer_state);
builder.add_output(inner_state);
let circ = builder.build().unwrap();
let key = [69u8; 48];
test_circ!(circ, hmac_sha256_partial, fn(&key) -> ([u32; 8], [u32; 8]));
}
#[test]
fn test_hmac_sha256_finalize() {
let builder = CircuitBuilder::new();
let outer_state = builder.add_array_input::<u32, 8>();
let inner_state = builder.add_array_input::<u32, 8>();
let msg = builder.add_array_input::<u8, 47>();
let hash = hmac_sha256_finalize_trace(builder.state(), outer_state, inner_state, &msg);
builder.add_output(hash);
let circ = builder.build().unwrap();
let key = [69u8; 32];
let (outer_state, inner_state) = hmac_sha256_partial(&key);
let msg = [42u8; 47];
test_circ!(
circ,
hmac_sha256_finalize,
fn(outer_state, inner_state, &msg) -> [u8; 32]
);
}
}

View File

@@ -1,61 +0,0 @@
//! HMAC-SHA256 circuits.
#![deny(missing_docs, unreachable_pub, unused_must_use)]
#![deny(clippy::all)]
#![forbid(unsafe_code)]
mod hmac_sha256;
mod prf;
mod session_keys;
mod verify_data;
pub use hmac_sha256::{
hmac_sha256_finalize, hmac_sha256_finalize_trace, hmac_sha256_partial,
hmac_sha256_partial_trace,
};
pub use prf::{prf, prf_trace};
pub use session_keys::{session_keys, session_keys_trace};
pub use verify_data::{verify_data, verify_data_trace};
use mpz_circuits::{Circuit, CircuitBuilder, Tracer};
use std::sync::Arc;
/// Builds session key derivation circuit.
#[tracing::instrument(level = "trace")]
pub fn build_session_keys() -> Arc<Circuit> {
let builder = CircuitBuilder::new();
let pms = builder.add_array_input::<u8, 32>();
let client_random = builder.add_array_input::<u8, 32>();
let server_random = builder.add_array_input::<u8, 32>();
let (cwk, swk, civ, siv, outer_state, inner_state) =
session_keys_trace(builder.state(), pms, client_random, server_random);
builder.add_output(cwk);
builder.add_output(swk);
builder.add_output(civ);
builder.add_output(siv);
builder.add_output(outer_state);
builder.add_output(inner_state);
Arc::new(builder.build().expect("session keys should build"))
}
/// Builds a verify data circuit.
#[tracing::instrument(level = "trace")]
pub fn build_verify_data(label: &[u8]) -> Arc<Circuit> {
let builder = CircuitBuilder::new();
let outer_state = builder.add_array_input::<u32, 8>();
let inner_state = builder.add_array_input::<u32, 8>();
let handshake_hash = builder.add_array_input::<u8, 32>();
let vd = verify_data_trace(
builder.state(),
outer_state,
inner_state,
&label
.iter()
.map(|v| Tracer::new(builder.state(), builder.get_constant(*v).to_inner()))
.collect::<Vec<_>>(),
handshake_hash,
);
builder.add_output(vd);
Arc::new(builder.build().expect("verify data should build"))
}

View File

@@ -1,227 +0,0 @@
//! This module provides an implementation of the HMAC-SHA256 PRF defined in [RFC 5246](https://www.rfc-editor.org/rfc/rfc5246#section-5).
use std::cell::RefCell;
use mpz_circuits::{
types::{U32, U8},
BuilderState, Tracer,
};
use crate::hmac_sha256::{hmac_sha256_finalize, hmac_sha256_finalize_trace};
fn p_hash_trace<'a>(
builder_state: &'a RefCell<BuilderState>,
outer_state: [Tracer<'a, U32>; 8],
inner_state: [Tracer<'a, U32>; 8],
seed: &[Tracer<'a, U8>],
iterations: usize,
) -> Vec<Tracer<'a, U8>> {
// A() is defined as:
//
// A(0) = seed
// A(i) = HMAC_hash(secret, A(i-1))
let mut a_cache: Vec<_> = Vec::with_capacity(iterations + 1);
a_cache.push(seed.to_vec());
for i in 0..iterations {
let a_i = hmac_sha256_finalize_trace(builder_state, outer_state, inner_state, &a_cache[i]);
a_cache.push(a_i.to_vec());
}
// HMAC_hash(secret, A(i) + seed)
let mut output: Vec<_> = Vec::with_capacity(iterations * 32);
for i in 0..iterations {
let mut a_i_seed = a_cache[i + 1].clone();
a_i_seed.extend_from_slice(seed);
let hash = hmac_sha256_finalize_trace(builder_state, outer_state, inner_state, &a_i_seed);
output.extend_from_slice(&hash);
}
output
}
fn p_hash(outer_state: [u32; 8], inner_state: [u32; 8], seed: &[u8], iterations: usize) -> Vec<u8> {
// A() is defined as:
//
// A(0) = seed
// A(i) = HMAC_hash(secret, A(i-1))
let mut a_cache: Vec<_> = Vec::with_capacity(iterations + 1);
a_cache.push(seed.to_vec());
for i in 0..iterations {
let a_i = hmac_sha256_finalize(outer_state, inner_state, &a_cache[i]);
a_cache.push(a_i.to_vec());
}
// HMAC_hash(secret, A(i) + seed)
let mut output: Vec<_> = Vec::with_capacity(iterations * 32);
for i in 0..iterations {
let mut a_i_seed = a_cache[i + 1].clone();
a_i_seed.extend_from_slice(seed);
let hash = hmac_sha256_finalize(outer_state, inner_state, &a_i_seed);
output.extend_from_slice(&hash);
}
output
}
/// Computes PRF(secret, label, seed).
///
/// # Arguments
///
/// * `builder_state` - Reference to builder state.
/// * `outer_state` - The outer state of HMAC-SHA256.
/// * `inner_state` - The inner state of HMAC-SHA256.
/// * `seed` - The seed to use.
/// * `label` - The label to use.
/// * `bytes` - The number of bytes to output.
pub fn prf_trace<'a>(
builder_state: &'a RefCell<BuilderState>,
outer_state: [Tracer<'a, U32>; 8],
inner_state: [Tracer<'a, U32>; 8],
seed: &[Tracer<'a, U8>],
label: &[Tracer<'a, U8>],
bytes: usize,
) -> Vec<Tracer<'a, U8>> {
let iterations = bytes / 32 + (bytes % 32 != 0) as usize;
let mut label_seed = label.to_vec();
label_seed.extend_from_slice(seed);
let mut output = p_hash_trace(
builder_state,
outer_state,
inner_state,
&label_seed,
iterations,
);
output.truncate(bytes);
output
}
/// Reference implementation of PRF(secret, label, seed).
///
/// # Arguments
///
/// * `outer_state` - The outer state of HMAC-SHA256.
/// * `inner_state` - The inner state of HMAC-SHA256.
/// * `seed` - The seed to use.
/// * `label` - The label to use.
/// * `bytes` - The number of bytes to output.
pub fn prf(
outer_state: [u32; 8],
inner_state: [u32; 8],
seed: &[u8],
label: &[u8],
bytes: usize,
) -> Vec<u8> {
let iterations = bytes / 32 + (bytes % 32 != 0) as usize;
let mut label_seed = label.to_vec();
label_seed.extend_from_slice(seed);
let mut output = p_hash(outer_state, inner_state, &label_seed, iterations);
output.truncate(bytes);
output
}
#[cfg(test)]
mod tests {
use mpz_circuits::{evaluate, CircuitBuilder};
use crate::hmac_sha256::hmac_sha256_partial;
use super::*;
#[test]
fn test_p_hash() {
let builder = CircuitBuilder::new();
let outer_state = builder.add_array_input::<u32, 8>();
let inner_state = builder.add_array_input::<u32, 8>();
let seed = builder.add_array_input::<u8, 64>();
let output = p_hash_trace(builder.state(), outer_state, inner_state, &seed, 2);
builder.add_output(output);
let circ = builder.build().unwrap();
let outer_state = [0u32; 8];
let inner_state = [1u32; 8];
let seed = [42u8; 64];
let expected = p_hash(outer_state, inner_state, &seed, 2);
let actual = evaluate!(circ, fn(outer_state, inner_state, &seed) -> Vec<u8>).unwrap();
assert_eq!(actual, expected);
}
#[test]
fn test_prf() {
let builder = CircuitBuilder::new();
let outer_state = builder.add_array_input::<u32, 8>();
let inner_state = builder.add_array_input::<u32, 8>();
let seed = builder.add_array_input::<u8, 64>();
let label = builder.add_array_input::<u8, 13>();
let output = prf_trace(builder.state(), outer_state, inner_state, &seed, &label, 48);
builder.add_output(output);
let circ = builder.build().unwrap();
let master_secret = [0u8; 48];
let seed = [43u8; 64];
let label = b"master secret";
let (outer_state, inner_state) = hmac_sha256_partial(&master_secret);
let expected = prf(outer_state, inner_state, &seed, label, 48);
let actual =
evaluate!(circ, fn(outer_state, inner_state, &seed, label) -> Vec<u8>).unwrap();
assert_eq!(actual, expected);
let mut expected_ring = [0u8; 48];
ring_prf::prf(&mut expected_ring, &master_secret, label, &seed);
assert_eq!(actual, expected_ring);
}
// Borrowed from Rustls for testing
// https://github.com/rustls/rustls/blob/main/rustls/src/tls12/prf.rs
mod ring_prf {
use ring::{hmac, hmac::HMAC_SHA256};
fn concat_sign(key: &hmac::Key, a: &[u8], b: &[u8]) -> hmac::Tag {
let mut ctx = hmac::Context::with_key(key);
ctx.update(a);
ctx.update(b);
ctx.sign()
}
fn p(out: &mut [u8], secret: &[u8], seed: &[u8]) {
let hmac_key = hmac::Key::new(HMAC_SHA256, secret);
// A(1)
let mut current_a = hmac::sign(&hmac_key, seed);
let chunk_size = HMAC_SHA256.digest_algorithm().output_len();
for chunk in out.chunks_mut(chunk_size) {
// P_hash[i] = HMAC_hash(secret, A(i) + seed)
let p_term = concat_sign(&hmac_key, current_a.as_ref(), seed);
chunk.copy_from_slice(&p_term.as_ref()[..chunk.len()]);
// A(i+1) = HMAC_hash(secret, A(i))
current_a = hmac::sign(&hmac_key, current_a.as_ref());
}
}
fn concat(a: &[u8], b: &[u8]) -> Vec<u8> {
let mut ret = Vec::new();
ret.extend_from_slice(a);
ret.extend_from_slice(b);
ret
}
pub(crate) fn prf(out: &mut [u8], secret: &[u8], label: &[u8], seed: &[u8]) {
let joined_seed = concat(label, seed);
p(out, secret, &joined_seed);
}
}
}

View File

@@ -1,200 +0,0 @@
use std::cell::RefCell;
use mpz_circuits::{
types::{U32, U8},
BuilderState, Tracer,
};
use crate::{
hmac_sha256::{hmac_sha256_partial, hmac_sha256_partial_trace},
prf::{prf, prf_trace},
};
/// Session Keys.
///
/// Computes expanded p1 which consists of client_write_key + server_write_key.
/// Computes expanded p2 which consists of client_IV + server_IV.
///
/// # Arguments
///
/// * `builder_state` - Reference to builder state.
/// * `pms` - 32-byte premaster secret.
/// * `client_random` - 32-byte client random.
/// * `server_random` - 32-byte server random.
///
/// # Returns
///
/// * `client_write_key` - 16-byte client write key.
/// * `server_write_key` - 16-byte server write key.
/// * `client_IV` - 4-byte client IV.
/// * `server_IV` - 4-byte server IV.
/// * `outer_hash_state` - 256-bit master-secret outer HMAC state.
/// * `inner_hash_state` - 256-bit master-secret inner HMAC state.
#[allow(clippy::type_complexity)]
pub fn session_keys_trace<'a>(
builder_state: &'a RefCell<BuilderState>,
pms: [Tracer<'a, U8>; 32],
client_random: [Tracer<'a, U8>; 32],
server_random: [Tracer<'a, U8>; 32],
) -> (
[Tracer<'a, U8>; 16],
[Tracer<'a, U8>; 16],
[Tracer<'a, U8>; 4],
[Tracer<'a, U8>; 4],
[Tracer<'a, U32>; 8],
[Tracer<'a, U32>; 8],
) {
let (pms_outer_state, pms_inner_state) = hmac_sha256_partial_trace(builder_state, &pms);
let master_secret = {
let seed = client_random
.iter()
.chain(&server_random)
.copied()
.collect::<Vec<_>>();
let label = b"master secret"
.map(|v| Tracer::new(builder_state, builder_state.borrow_mut().get_constant(v)));
prf_trace(
builder_state,
pms_outer_state,
pms_inner_state,
&seed,
&label,
48,
)
};
let (master_secret_outer_state, master_secret_inner_state) =
hmac_sha256_partial_trace(builder_state, &master_secret);
let key_material = {
let seed = server_random
.iter()
.chain(&client_random)
.copied()
.collect::<Vec<_>>();
let label = b"key expansion"
.map(|v| Tracer::new(builder_state, builder_state.borrow_mut().get_constant(v)));
prf_trace(
builder_state,
master_secret_outer_state,
master_secret_inner_state,
&seed,
&label,
40,
)
};
let cwk = key_material[0..16].try_into().unwrap();
let swk = key_material[16..32].try_into().unwrap();
let civ = key_material[32..36].try_into().unwrap();
let siv = key_material[36..40].try_into().unwrap();
(
cwk,
swk,
civ,
siv,
master_secret_outer_state,
master_secret_inner_state,
)
}
/// Reference implementation of session keys derivation.
pub fn session_keys(
pms: [u8; 32],
client_random: [u8; 32],
server_random: [u8; 32],
) -> ([u8; 16], [u8; 16], [u8; 4], [u8; 4]) {
let (pms_outer_state, pms_inner_state) = hmac_sha256_partial(&pms);
let master_secret = {
let seed = client_random
.iter()
.chain(&server_random)
.copied()
.collect::<Vec<_>>();
let label = b"master secret";
prf(pms_outer_state, pms_inner_state, &seed, label, 48)
};
let (master_secret_outer_state, master_secret_inner_state) =
hmac_sha256_partial(&master_secret);
let key_material = {
let seed = server_random
.iter()
.chain(&client_random)
.copied()
.collect::<Vec<_>>();
let label = b"key expansion";
prf(
master_secret_outer_state,
master_secret_inner_state,
&seed,
label,
40,
)
};
let cwk = key_material[0..16].try_into().unwrap();
let swk = key_material[16..32].try_into().unwrap();
let civ = key_material[32..36].try_into().unwrap();
let siv = key_material[36..40].try_into().unwrap();
(cwk, swk, civ, siv)
}
#[cfg(test)]
mod tests {
use mpz_circuits::{evaluate, CircuitBuilder};
use super::*;
#[test]
fn test_session_keys() {
let builder = CircuitBuilder::new();
let pms = builder.add_array_input::<u8, 32>();
let client_random = builder.add_array_input::<u8, 32>();
let server_random = builder.add_array_input::<u8, 32>();
let (cwk, swk, civ, siv, outer_state, inner_state) =
session_keys_trace(builder.state(), pms, client_random, server_random);
builder.add_output(cwk);
builder.add_output(swk);
builder.add_output(civ);
builder.add_output(siv);
builder.add_output(outer_state);
builder.add_output(inner_state);
let circ = builder.build().unwrap();
let pms = [0u8; 32];
let client_random = [42u8; 32];
let server_random = [69u8; 32];
let (expected_cwk, expected_swk, expected_civ, expected_siv) =
session_keys(pms, client_random, server_random);
let (cwk, swk, civ, siv, _, _) = evaluate!(
circ,
fn(
pms,
client_random,
server_random,
) -> ([u8; 16], [u8; 16], [u8; 4], [u8; 4], [u32; 8], [u32; 8])
)
.unwrap();
assert_eq!(cwk, expected_cwk);
assert_eq!(swk, expected_swk);
assert_eq!(civ, expected_civ);
assert_eq!(siv, expected_siv);
}
}

View File

@@ -1,88 +0,0 @@
use std::cell::RefCell;
use mpz_circuits::{
types::{U32, U8},
BuilderState, Tracer,
};
use crate::prf::{prf, prf_trace};
/// Computes verify_data as specified in RFC 5246, Section 7.4.9.
///
/// verify_data
/// PRF(master_secret, finished_label,
/// Hash(handshake_messages))[0..verify_data_length-1];
///
/// # Arguments
///
/// * `builder_state` - The builder state.
/// * `outer_state` - The outer HMAC state of the master secret.
/// * `inner_state` - The inner HMAC state of the master secret.
/// * `label` - The label to use.
/// * `hs_hash` - The handshake hash.
pub fn verify_data_trace<'a>(
builder_state: &'a RefCell<BuilderState>,
outer_state: [Tracer<'a, U32>; 8],
inner_state: [Tracer<'a, U32>; 8],
label: &[Tracer<'a, U8>],
hs_hash: [Tracer<'a, U8>; 32],
) -> [Tracer<'a, U8>; 12] {
let vd = prf_trace(builder_state, outer_state, inner_state, &hs_hash, label, 12);
vd.try_into().expect("vd is 12 bytes")
}
/// Reference implementation of verify_data as specified in RFC 5246, Section
/// 7.4.9.
///
/// # Arguments
///
/// * `outer_state` - The outer HMAC state of the master secret.
/// * `inner_state` - The inner HMAC state of the master secret.
/// * `label` - The label to use.
/// * `hs_hash` - The handshake hash.
pub fn verify_data(
outer_state: [u32; 8],
inner_state: [u32; 8],
label: &[u8],
hs_hash: [u8; 32],
) -> [u8; 12] {
let vd = prf(outer_state, inner_state, &hs_hash, label, 12);
vd.try_into().expect("vd is 12 bytes")
}
#[cfg(test)]
mod tests {
use super::*;
use mpz_circuits::{evaluate, CircuitBuilder};
const CF_LABEL: &[u8; 15] = b"client finished";
#[test]
fn test_verify_data() {
let builder = CircuitBuilder::new();
let outer_state = builder.add_array_input::<u32, 8>();
let inner_state = builder.add_array_input::<u32, 8>();
let label = builder.add_array_input::<u8, 15>();
let hs_hash = builder.add_array_input::<u8, 32>();
let vd = verify_data_trace(builder.state(), outer_state, inner_state, &label, hs_hash);
builder.add_output(vd);
let circ = builder.build().unwrap();
let outer_state = [0u32; 8];
let inner_state = [1u32; 8];
let hs_hash = [42u8; 32];
let expected = prf(outer_state, inner_state, &hs_hash, CF_LABEL, 12);
let actual = evaluate!(
circ,
fn(outer_state, inner_state, CF_LABEL, hs_hash) -> [u8; 12]
)
.unwrap();
assert_eq!(actual.to_vec(), expected);
}
}

View File

@@ -5,35 +5,35 @@ description = "A 2PC implementation of TLS HMAC-SHA256 PRF"
keywords = ["tls", "mpc", "2pc", "hmac", "sha256"]
categories = ["cryptography"]
license = "MIT OR Apache-2.0"
version = "0.1.0-alpha.7"
version = "0.1.0-alpha.12"
edition = "2021"
[lints]
workspace = true
[lib]
name = "hmac_sha256"
[features]
default = ["mock"]
rayon = ["mpz-common/rayon"]
mock = []
[dependencies]
tlsn-hmac-sha256-circuits = { workspace = true }
mpz-garble = { workspace = true }
mpz-vm-core = { workspace = true }
mpz-core = { workspace = true }
mpz-circuits = { workspace = true }
mpz-common = { workspace = true }
mpz-hash = { workspace = true }
async-trait = { workspace = true }
derive_builder = { workspace = true }
futures = { workspace = true }
thiserror = { workspace = true }
tracing = { workspace = true }
sha2 = { workspace = true }
[dev-dependencies]
criterion = { workspace = true, features = ["async_tokio"] }
mpz-common = { workspace = true, features = ["test-utils"] }
mpz-ot = { workspace = true, features = ["ideal"] }
mpz-garble = { workspace = true }
mpz-common = { workspace = true, features = ["test-utils"] }
criterion = { workspace = true, features = ["async_tokio"] }
tokio = { workspace = true, features = ["macros", "rt", "rt-multi-thread"] }
rand = { workspace = true }
hex = { workspace = true }
ring = { workspace = true }
[[bench]]
name = "prf"

View File

@@ -1,9 +1,17 @@
#![allow(clippy::let_underscore_future)]
use criterion::{criterion_group, criterion_main, Criterion};
use hmac_sha256::{MpcPrf, Prf, PrfConfig, Role};
use mpz_common::executor::test_mt_executor;
use mpz_garble::{config::Role as DEAPRole, protocol::deap::DEAPThread, Memory};
use mpz_ot::ideal::ot::ideal_ot;
use hmac_sha256::{Mode, MpcPrf};
use mpz_common::context::test_mt_context;
use mpz_garble::protocol::semihonest::{Evaluator, Garbler};
use mpz_ot::ideal::cot::ideal_cot;
use mpz_vm_core::{
memory::{binary::U8, correlated::Delta, Array},
prelude::*,
Execute,
};
use rand::{rngs::StdRng, SeedableRng};
#[allow(clippy::unit_arg)]
fn criterion_benchmark(c: &mut Criterion) {
@@ -11,178 +19,127 @@ fn criterion_benchmark(c: &mut Criterion) {
group.sample_size(10);
let rt = tokio::runtime::Runtime::new().unwrap();
group.bench_function("prf_preprocess", |b| b.to_async(&rt).iter(preprocess));
group.bench_function("prf", |b| b.to_async(&rt).iter(prf));
group.bench_function("prf_normal", |b| b.to_async(&rt).iter(|| prf(Mode::Normal)));
group.bench_function("prf_reduced", |b| {
b.to_async(&rt).iter(|| prf(Mode::Reduced))
});
}
criterion_group!(benches, criterion_benchmark);
criterion_main!(benches);
async fn preprocess() {
let (mut leader_exec, mut follower_exec) = test_mt_executor(128);
let (leader_ot_send_0, follower_ot_recv_0) = ideal_ot();
let (follower_ot_send_0, leader_ot_recv_0) = ideal_ot();
let (leader_ot_send_1, follower_ot_recv_1) = ideal_ot();
let (follower_ot_send_1, leader_ot_recv_1) = ideal_ot();
let leader_thread_0 = DEAPThread::new(
DEAPRole::Leader,
[0u8; 32],
leader_exec.new_thread().await.unwrap(),
leader_ot_send_0,
leader_ot_recv_0,
);
let leader_thread_1 = leader_thread_0
.new_thread(
leader_exec.new_thread().await.unwrap(),
leader_ot_send_1,
leader_ot_recv_1,
)
.unwrap();
let follower_thread_0 = DEAPThread::new(
DEAPRole::Follower,
[0u8; 32],
follower_exec.new_thread().await.unwrap(),
follower_ot_send_0,
follower_ot_recv_0,
);
let follower_thread_1 = follower_thread_0
.new_thread(
follower_exec.new_thread().await.unwrap(),
follower_ot_send_1,
follower_ot_recv_1,
)
.unwrap();
let leader_pms = leader_thread_0.new_public_input::<[u8; 32]>("pms").unwrap();
let follower_pms = follower_thread_0
.new_public_input::<[u8; 32]>("pms")
.unwrap();
let mut leader = MpcPrf::new(
PrfConfig::builder().role(Role::Leader).build().unwrap(),
leader_thread_0,
leader_thread_1,
);
let mut follower = MpcPrf::new(
PrfConfig::builder().role(Role::Follower).build().unwrap(),
follower_thread_0,
follower_thread_1,
);
futures::join!(
async {
leader.setup(leader_pms).await.unwrap();
leader.set_client_random(Some([0u8; 32])).await.unwrap();
leader.preprocess().await.unwrap();
},
async {
follower.setup(follower_pms).await.unwrap();
follower.set_client_random(None).await.unwrap();
follower.preprocess().await.unwrap();
}
);
}
async fn prf() {
let (mut leader_exec, mut follower_exec) = test_mt_executor(128);
let (leader_ot_send_0, follower_ot_recv_0) = ideal_ot();
let (follower_ot_send_0, leader_ot_recv_0) = ideal_ot();
let (leader_ot_send_1, follower_ot_recv_1) = ideal_ot();
let (follower_ot_send_1, leader_ot_recv_1) = ideal_ot();
let leader_thread_0 = DEAPThread::new(
DEAPRole::Leader,
[0u8; 32],
leader_exec.new_thread().await.unwrap(),
leader_ot_send_0,
leader_ot_recv_0,
);
let leader_thread_1 = leader_thread_0
.new_thread(
leader_exec.new_thread().await.unwrap(),
leader_ot_send_1,
leader_ot_recv_1,
)
.unwrap();
let follower_thread_0 = DEAPThread::new(
DEAPRole::Follower,
[0u8; 32],
follower_exec.new_thread().await.unwrap(),
follower_ot_send_0,
follower_ot_recv_0,
);
let follower_thread_1 = follower_thread_0
.new_thread(
follower_exec.new_thread().await.unwrap(),
follower_ot_send_1,
follower_ot_recv_1,
)
.unwrap();
let leader_pms = leader_thread_0.new_public_input::<[u8; 32]>("pms").unwrap();
let follower_pms = follower_thread_0
.new_public_input::<[u8; 32]>("pms")
.unwrap();
let mut leader = MpcPrf::new(
PrfConfig::builder().role(Role::Leader).build().unwrap(),
leader_thread_0,
leader_thread_1,
);
let mut follower = MpcPrf::new(
PrfConfig::builder().role(Role::Follower).build().unwrap(),
follower_thread_0,
follower_thread_1,
);
async fn prf(mode: Mode) {
let mut rng = StdRng::seed_from_u64(0);
let pms = [42u8; 32];
let client_random = [0u8; 32];
let server_random = [1u8; 32];
let cf_hs_hash = [2u8; 32];
let sf_hs_hash = [3u8; 32];
let client_random = [69u8; 32];
let server_random: [u8; 32] = [96u8; 32];
futures::join!(
async {
leader.setup(leader_pms.clone()).await.unwrap();
leader.set_client_random(Some(client_random)).await.unwrap();
leader.preprocess().await.unwrap();
},
async {
follower.setup(follower_pms.clone()).await.unwrap();
follower.set_client_random(None).await.unwrap();
follower.preprocess().await.unwrap();
}
);
let (mut leader_exec, mut follower_exec) = test_mt_context(8);
let mut leader_ctx = leader_exec.new_context().await.unwrap();
let mut follower_ctx = follower_exec.new_context().await.unwrap();
leader.thread_mut().assign(&leader_pms, pms).unwrap();
follower.thread_mut().assign(&follower_pms, pms).unwrap();
let delta = Delta::random(&mut rng);
let (ot_send, ot_recv) = ideal_cot(delta.into_inner());
let (_leader_keys, _follower_keys) = futures::try_join!(
leader.compute_session_keys(server_random),
follower.compute_session_keys(server_random)
)
.unwrap();
let mut leader_vm = Garbler::new(ot_send, [0u8; 16], delta);
let mut follower_vm = Evaluator::new(ot_recv);
let _ = futures::try_join!(
leader.compute_client_finished_vd(cf_hs_hash),
follower.compute_client_finished_vd(cf_hs_hash)
)
.unwrap();
let leader_pms: Array<U8, 32> = leader_vm.alloc().unwrap();
leader_vm.mark_public(leader_pms).unwrap();
leader_vm.assign(leader_pms, pms).unwrap();
leader_vm.commit(leader_pms).unwrap();
let _ = futures::try_join!(
leader.compute_server_finished_vd(sf_hs_hash),
follower.compute_server_finished_vd(sf_hs_hash)
)
.unwrap();
let follower_pms: Array<U8, 32> = follower_vm.alloc().unwrap();
follower_vm.mark_public(follower_pms).unwrap();
follower_vm.assign(follower_pms, pms).unwrap();
follower_vm.commit(follower_pms).unwrap();
futures::try_join!(
leader.thread_mut().finalize(),
follower.thread_mut().finalize()
)
.unwrap();
let mut leader = MpcPrf::new(mode);
let mut follower = MpcPrf::new(mode);
let leader_output = leader.alloc(&mut leader_vm, leader_pms).unwrap();
let follower_output = follower.alloc(&mut follower_vm, follower_pms).unwrap();
leader.set_client_random(client_random).unwrap();
follower.set_client_random(client_random).unwrap();
leader.set_server_random(server_random).unwrap();
follower.set_server_random(server_random).unwrap();
let _ = leader_vm
.decode(leader_output.keys.client_write_key)
.unwrap();
let _ = leader_vm
.decode(leader_output.keys.server_write_key)
.unwrap();
let _ = leader_vm.decode(leader_output.keys.client_iv).unwrap();
let _ = leader_vm.decode(leader_output.keys.server_iv).unwrap();
let _ = follower_vm
.decode(follower_output.keys.client_write_key)
.unwrap();
let _ = follower_vm
.decode(follower_output.keys.server_write_key)
.unwrap();
let _ = follower_vm.decode(follower_output.keys.client_iv).unwrap();
let _ = follower_vm.decode(follower_output.keys.server_iv).unwrap();
while leader.wants_flush() || follower.wants_flush() {
tokio::try_join!(
async {
leader.flush(&mut leader_vm).unwrap();
leader_vm.execute_all(&mut leader_ctx).await
},
async {
follower.flush(&mut follower_vm).unwrap();
follower_vm.execute_all(&mut follower_ctx).await
}
)
.unwrap();
}
let cf_hs_hash = [1u8; 32];
leader.set_cf_hash(cf_hs_hash).unwrap();
follower.set_cf_hash(cf_hs_hash).unwrap();
while leader.wants_flush() || follower.wants_flush() {
tokio::try_join!(
async {
leader.flush(&mut leader_vm).unwrap();
leader_vm.execute_all(&mut leader_ctx).await
},
async {
follower.flush(&mut follower_vm).unwrap();
follower_vm.execute_all(&mut follower_ctx).await
}
)
.unwrap();
}
let _ = leader_vm.decode(leader_output.cf_vd).unwrap();
let _ = follower_vm.decode(follower_output.cf_vd).unwrap();
let sf_hs_hash = [2u8; 32];
leader.set_sf_hash(sf_hs_hash).unwrap();
follower.set_sf_hash(sf_hs_hash).unwrap();
while leader.wants_flush() || follower.wants_flush() {
tokio::try_join!(
async {
leader.flush(&mut leader_vm).unwrap();
leader_vm.execute_all(&mut leader_ctx).await
},
async {
follower.flush(&mut follower_vm).unwrap();
follower_vm.execute_all(&mut follower_ctx).await
}
)
.unwrap();
}
let _ = leader_vm.decode(leader_output.sf_vd).unwrap();
let _ = follower_vm.decode(follower_output.sf_vd).unwrap();
}

View File

@@ -1,24 +1,10 @@
use derive_builder::Builder;
//! PRF modes.
/// Role of this party in the PRF.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum Role {
/// The leader provides the private inputs to the PRF.
Leader,
/// The follower is blind to the inputs to the PRF.
Follower,
}
/// Configuration for the PRF.
#[derive(Debug, Builder)]
pub struct PrfConfig {
/// The role of this party in the PRF.
pub(crate) role: Role,
}
impl PrfConfig {
/// Creates a new builder.
pub fn builder() -> PrfConfigBuilder {
PrfConfigBuilder::default()
}
/// Modes for the PRF.
#[derive(Debug, Clone, Copy)]
pub enum Mode {
/// Computes some hashes locally.
Reduced,
/// Computes the whole PRF in MPC.
Normal,
}

View File

@@ -1,6 +1,8 @@
use core::fmt;
use std::error::Error;
use mpz_hash::sha256::Sha256Error;
/// A PRF error.
#[derive(Debug, thiserror::Error)]
pub struct PrfError {
@@ -20,18 +22,21 @@ impl PrfError {
}
}
pub(crate) fn vm<E: Into<Box<dyn Error + Send + Sync>>>(err: E) -> Self {
Self::new(ErrorKind::Vm, err)
}
pub(crate) fn state(msg: impl Into<String>) -> Self {
Self {
kind: ErrorKind::State,
source: Some(msg.into().into()),
}
}
}
pub(crate) fn role(msg: impl Into<String>) -> Self {
Self {
kind: ErrorKind::Role,
source: Some(msg.into().into()),
}
impl From<Sha256Error> for PrfError {
fn from(value: Sha256Error) -> Self {
Self::new(ErrorKind::Hash, value)
}
}
@@ -39,7 +44,7 @@ impl PrfError {
pub(crate) enum ErrorKind {
Vm,
State,
Role,
Hash,
}
impl fmt::Display for PrfError {
@@ -47,7 +52,7 @@ impl fmt::Display for PrfError {
match self.kind {
ErrorKind::Vm => write!(f, "vm error")?,
ErrorKind::State => write!(f, "state error")?,
ErrorKind::Role => write!(f, "role error")?,
ErrorKind::Hash => write!(f, "hash error")?,
}
if let Some(ref source) = self.source {
@@ -57,27 +62,3 @@ impl fmt::Display for PrfError {
Ok(())
}
}
impl From<mpz_garble::MemoryError> for PrfError {
fn from(error: mpz_garble::MemoryError) -> Self {
Self::new(ErrorKind::Vm, error)
}
}
impl From<mpz_garble::LoadError> for PrfError {
fn from(error: mpz_garble::LoadError) -> Self {
Self::new(ErrorKind::Vm, error)
}
}
impl From<mpz_garble::ExecutionError> for PrfError {
fn from(error: mpz_garble::ExecutionError) -> Self {
Self::new(ErrorKind::Vm, error)
}
}
impl From<mpz_garble::DecodeError> for PrfError {
fn from(error: mpz_garble::DecodeError) -> Self {
Self::new(ErrorKind::Vm, error)
}
}

View File

@@ -0,0 +1,177 @@
//! Computation of HMAC-SHA256.
//!
//! HMAC-SHA256 is defined as
//!
//! HMAC(m) = H((key' xor opad) || H((key' xor ipad) || m))
//!
//! * H - SHA256 hash function
//! * key' - key padded with zero bytes to 64 bytes (we do not support longer
//! keys)
//! * opad - 64 bytes of 0x5c
//! * ipad - 64 bytes of 0x36
//! * m - message
//!
//! This implementation computes HMAC-SHA256 using intermediate results
//! `outer_partial` and `inner_local`. Then HMAC(m) = H(outer_partial ||
//! inner_local)
//!
//! * `outer_partial` - key' xor opad
//! * `inner_local` - H((key' xor ipad) || m)
use mpz_hash::sha256::Sha256;
use mpz_vm_core::{
memory::{
binary::{Binary, U8},
Array,
},
Vm,
};
use crate::PrfError;
pub(crate) const IPAD: [u8; 64] = [0x36; 64];
pub(crate) const OPAD: [u8; 64] = [0x5c; 64];
/// Computes HMAC-SHA256
///
/// # Arguments
///
/// * `vm` - The virtual machine.
/// * `outer_partial` - (key' xor opad)
/// * `inner_local` - H((key' xor ipad) || m)
pub(crate) fn hmac_sha256(
vm: &mut dyn Vm<Binary>,
mut outer_partial: Sha256,
inner_local: Array<U8, 32>,
) -> Result<Array<U8, 32>, PrfError> {
outer_partial.update(&inner_local.into());
outer_partial.compress(vm)?;
outer_partial.finalize(vm).map_err(PrfError::from)
}
#[cfg(test)]
mod tests {
use crate::{
hmac::hmac_sha256,
sha256, state_to_bytes,
test_utils::{compute_inner_local, compute_outer_partial, mock_vm},
};
use mpz_common::context::test_st_context;
use mpz_hash::sha256::Sha256;
use mpz_vm_core::{
memory::{
binary::{U32, U8},
Array, MemoryExt, ViewExt,
},
Execute,
};
#[test]
fn test_hmac_reference() {
let (inputs, references) = test_fixtures();
for (input, &reference) in inputs.iter().zip(references.iter()) {
let outer_partial = compute_outer_partial(input.0.clone());
let inner_local = compute_inner_local(input.0.clone(), &input.1);
let hmac = sha256(outer_partial, 64, &state_to_bytes(inner_local));
assert_eq!(state_to_bytes(hmac), reference);
}
}
#[tokio::test]
async fn test_hmac_circuit() {
let (mut ctx_a, mut ctx_b) = test_st_context(8);
let (mut leader, mut follower) = mock_vm();
let (inputs, references) = test_fixtures();
for (input, &reference) in inputs.iter().zip(references.iter()) {
let outer_partial = compute_outer_partial(input.0.clone());
let inner_local = compute_inner_local(input.0.clone(), &input.1);
let outer_partial_leader: Array<U32, 8> = leader.alloc().unwrap();
leader.mark_public(outer_partial_leader).unwrap();
leader.assign(outer_partial_leader, outer_partial).unwrap();
leader.commit(outer_partial_leader).unwrap();
let inner_local_leader: Array<U8, 32> = leader.alloc().unwrap();
leader.mark_public(inner_local_leader).unwrap();
leader
.assign(inner_local_leader, state_to_bytes(inner_local))
.unwrap();
leader.commit(inner_local_leader).unwrap();
let hmac_leader = hmac_sha256(
&mut leader,
Sha256::new_from_state(outer_partial_leader, 1),
inner_local_leader,
)
.unwrap();
let hmac_leader = leader.decode(hmac_leader).unwrap();
let outer_partial_follower: Array<U32, 8> = follower.alloc().unwrap();
follower.mark_public(outer_partial_follower).unwrap();
follower
.assign(outer_partial_follower, outer_partial)
.unwrap();
follower.commit(outer_partial_follower).unwrap();
let inner_local_follower: Array<U8, 32> = follower.alloc().unwrap();
follower.mark_public(inner_local_follower).unwrap();
follower
.assign(inner_local_follower, state_to_bytes(inner_local))
.unwrap();
follower.commit(inner_local_follower).unwrap();
let hmac_follower = hmac_sha256(
&mut follower,
Sha256::new_from_state(outer_partial_follower, 1),
inner_local_follower,
)
.unwrap();
let hmac_follower = follower.decode(hmac_follower).unwrap();
let (hmac_leader, hmac_follower) = tokio::try_join!(
async {
leader.execute_all(&mut ctx_a).await.unwrap();
hmac_leader.await
},
async {
follower.execute_all(&mut ctx_b).await.unwrap();
hmac_follower.await
}
)
.unwrap();
assert_eq!(hmac_leader, hmac_follower);
assert_eq!(hmac_leader, reference);
}
}
#[allow(clippy::type_complexity)]
fn test_fixtures() -> (Vec<(Vec<u8>, Vec<u8>)>, Vec<[u8; 32]>) {
let test_vectors: Vec<(Vec<u8>, Vec<u8>)> = vec![
(
hex::decode("0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b").unwrap(),
hex::decode("4869205468657265").unwrap(),
),
(
hex::decode("4a656665").unwrap(),
hex::decode("7768617420646f2079612077616e7420666f72206e6f7468696e673f").unwrap(),
),
];
let expected: Vec<[u8; 32]> = vec![
hex::decode("b0344c61d8db38535ca8afceaf0bf12b881dc200c9833da726e9376c2e32cff7")
.unwrap()
.try_into()
.unwrap(),
hex::decode("5bdcc146bf60754e6a042426089575c75a003f089d2739839dec58b964ec3843")
.unwrap()
.try_into()
.unwrap(),
];
(test_vectors, expected)
}
}

View File

@@ -1,267 +1,269 @@
//! This module contains the protocol for computing TLS SHA-256 HMAC PRF.
//! This crate contains the protocol for computing TLS 1.2 SHA-256 HMAC PRF.
#![deny(missing_docs, unreachable_pub, unused_must_use)]
#![deny(clippy::all)]
#![forbid(unsafe_code)]
mod config;
mod error;
mod prf;
mod hmac;
#[cfg(test)]
mod test_utils;
pub use config::{PrfConfig, PrfConfigBuilder, PrfConfigBuilderError, Role};
mod config;
pub use config::Mode;
mod error;
pub use error::PrfError;
mod prf;
pub use prf::MpcPrf;
use async_trait::async_trait;
use mpz_vm_core::memory::{binary::U8, Array};
use mpz_garble::value::ValueRef;
pub(crate) static CF_LABEL: &[u8] = b"client finished";
pub(crate) static SF_LABEL: &[u8] = b"server finished";
/// Session keys computed by the PRF.
#[derive(Debug, Clone)]
pub struct SessionKeys {
/// Client write key.
pub client_write_key: ValueRef,
/// Server write key.
pub server_write_key: ValueRef,
/// Client IV.
pub client_iv: ValueRef,
/// Server IV.
pub server_iv: ValueRef,
/// PRF output.
#[derive(Debug, Clone, Copy)]
pub struct PrfOutput {
/// TLS session keys.
pub keys: SessionKeys,
/// Client finished verify data.
pub cf_vd: Array<U8, 12>,
/// Server finished verify data.
pub sf_vd: Array<U8, 12>,
}
/// PRF trait for computing TLS PRF.
#[async_trait]
pub trait Prf {
/// Sets up the PRF.
///
/// # Arguments
///
/// * `pms` - The pre-master secret.
async fn setup(&mut self, pms: ValueRef) -> Result<SessionKeys, PrfError>;
/// Session keys computed by the PRF.
#[derive(Debug, Clone, Copy)]
pub struct SessionKeys {
/// Client write key.
pub client_write_key: Array<U8, 16>,
/// Server write key.
pub server_write_key: Array<U8, 16>,
/// Client IV.
pub client_iv: Array<U8, 4>,
/// Server IV.
pub server_iv: Array<U8, 4>,
}
/// Sets the client random.
///
/// This must be set after calling [`Prf::setup`].
///
/// Only the leader can provide the client random.
async fn set_client_random(&mut self, client_random: Option<[u8; 32]>) -> Result<(), PrfError>;
fn sha256(mut state: [u32; 8], pos: usize, msg: &[u8]) -> [u32; 8] {
use sha2::{
compress256,
digest::{
block_buffer::{BlockBuffer, Eager},
generic_array::typenum::U64,
},
};
/// Preprocesses the PRF.
async fn preprocess(&mut self) -> Result<(), PrfError>;
let mut buffer = BlockBuffer::<U64, Eager>::default();
buffer.digest_blocks(msg, |b| compress256(&mut state, b));
buffer.digest_pad(0x80, &(((msg.len() + pos) * 8) as u64).to_be_bytes(), |b| {
compress256(&mut state, &[*b])
});
state
}
/// Computes the client finished verify data.
///
/// # Arguments
///
/// * `handshake_hash` - The handshake transcript hash.
async fn compute_client_finished_vd(
&mut self,
handshake_hash: [u8; 32],
) -> Result<[u8; 12], PrfError>;
/// Computes the server finished verify data.
///
/// # Arguments
///
/// * `handshake_hash` - The handshake transcript hash.
async fn compute_server_finished_vd(
&mut self,
handshake_hash: [u8; 32],
) -> Result<[u8; 12], PrfError>;
/// Computes the session keys.
///
/// # Arguments
///
/// * `server_random` - The server random.
async fn compute_session_keys(
&mut self,
server_random: [u8; 32],
) -> Result<SessionKeys, PrfError>;
fn state_to_bytes(input: [u32; 8]) -> [u8; 32] {
let mut output = [0_u8; 32];
for (k, byte_chunk) in input.iter().enumerate() {
let byte_chunk = byte_chunk.to_be_bytes();
output[4 * k..4 * (k + 1)].copy_from_slice(&byte_chunk);
}
output
}
#[cfg(test)]
mod tests {
use mpz_common::executor::test_st_executor;
use mpz_garble::{config::Role as DEAPRole, protocol::deap::DEAPThread, Decode, Memory};
use crate::{
test_utils::{mock_vm, prf_cf_vd, prf_keys, prf_ms, prf_sf_vd},
Mode, MpcPrf, SessionKeys,
};
use mpz_common::context::test_st_context;
use mpz_vm_core::{
memory::{binary::U8, Array, MemoryExt, ViewExt},
Execute,
};
use rand::{rngs::StdRng, Rng, SeedableRng};
use hmac_sha256_circuits::{hmac_sha256_partial, prf, session_keys};
use mpz_ot::ideal::ot::ideal_ot;
use super::*;
fn compute_ms(pms: [u8; 32], client_random: [u8; 32], server_random: [u8; 32]) -> [u8; 48] {
let (outer_state, inner_state) = hmac_sha256_partial(&pms);
let seed = client_random
.iter()
.chain(&server_random)
.copied()
.collect::<Vec<_>>();
let ms = prf(outer_state, inner_state, &seed, b"master secret", 48);
ms.try_into().unwrap()
}
fn compute_vd(ms: [u8; 48], label: &[u8], hs_hash: [u8; 32]) -> [u8; 12] {
let (outer_state, inner_state) = hmac_sha256_partial(&ms);
let vd = prf(outer_state, inner_state, &hs_hash, label, 12);
vd.try_into().unwrap()
}
#[ignore = "expensive"]
#[tokio::test]
async fn test_prf() {
let pms = [42u8; 32];
let client_random = [69u8; 32];
let server_random: [u8; 32] = [96u8; 32];
let ms = compute_ms(pms, client_random, server_random);
async fn test_prf_reduced() {
let mode = Mode::Reduced;
test_prf(mode).await;
}
let (leader_ctx_0, follower_ctx_0) = test_st_executor(128);
let (leader_ctx_1, follower_ctx_1) = test_st_executor(128);
#[tokio::test]
async fn test_prf_normal() {
let mode = Mode::Normal;
test_prf(mode).await;
}
let (leader_ot_send_0, follower_ot_recv_0) = ideal_ot();
let (follower_ot_send_0, leader_ot_recv_0) = ideal_ot();
let (leader_ot_send_1, follower_ot_recv_1) = ideal_ot();
let (follower_ot_send_1, leader_ot_recv_1) = ideal_ot();
async fn test_prf(mode: Mode) {
let mut rng = StdRng::seed_from_u64(1);
// Test input
let pms: [u8; 32] = rng.random();
let client_random: [u8; 32] = rng.random();
let server_random: [u8; 32] = rng.random();
let leader_thread_0 = DEAPThread::new(
DEAPRole::Leader,
[0u8; 32],
leader_ctx_0,
leader_ot_send_0,
leader_ot_recv_0,
);
let leader_thread_1 = leader_thread_0
.new_thread(leader_ctx_1, leader_ot_send_1, leader_ot_recv_1)
.unwrap();
let cf_hs_hash: [u8; 32] = rng.random();
let sf_hs_hash: [u8; 32] = rng.random();
let follower_thread_0 = DEAPThread::new(
DEAPRole::Follower,
[0u8; 32],
follower_ctx_0,
follower_ot_send_0,
follower_ot_recv_0,
);
let follower_thread_1 = follower_thread_0
.new_thread(follower_ctx_1, follower_ot_send_1, follower_ot_recv_1)
.unwrap();
// Expected output
let ms_expected = prf_ms(pms, client_random, server_random);
// Set up public PMS for testing.
let leader_pms = leader_thread_0.new_public_input::<[u8; 32]>("pms").unwrap();
let follower_pms = follower_thread_0
.new_public_input::<[u8; 32]>("pms")
.unwrap();
let [cwk_expected, swk_expected, civ_expected, siv_expected] =
prf_keys(ms_expected, client_random, server_random);
leader_thread_0.assign(&leader_pms, pms).unwrap();
follower_thread_0.assign(&follower_pms, pms).unwrap();
let cwk_expected: [u8; 16] = cwk_expected.try_into().unwrap();
let swk_expected: [u8; 16] = swk_expected.try_into().unwrap();
let civ_expected: [u8; 4] = civ_expected.try_into().unwrap();
let siv_expected: [u8; 4] = siv_expected.try_into().unwrap();
let mut leader = MpcPrf::new(
PrfConfig::builder().role(Role::Leader).build().unwrap(),
leader_thread_0,
leader_thread_1,
);
let mut follower = MpcPrf::new(
PrfConfig::builder().role(Role::Follower).build().unwrap(),
follower_thread_0,
follower_thread_1,
);
let cf_vd_expected = prf_cf_vd(ms_expected, cf_hs_hash);
let sf_vd_expected = prf_sf_vd(ms_expected, sf_hs_hash);
futures::join!(
async {
leader.setup(leader_pms).await.unwrap();
leader.set_client_random(Some(client_random)).await.unwrap();
leader.preprocess().await.unwrap();
},
async {
follower.setup(follower_pms).await.unwrap();
follower.set_client_random(None).await.unwrap();
follower.preprocess().await.unwrap();
}
);
let cf_vd_expected: [u8; 12] = cf_vd_expected.try_into().unwrap();
let sf_vd_expected: [u8; 12] = sf_vd_expected.try_into().unwrap();
let (leader_session_keys, follower_session_keys) = futures::try_join!(
leader.compute_session_keys(server_random),
follower.compute_session_keys(server_random)
)
.unwrap();
// Set up vm and prf
let (mut ctx_a, mut ctx_b) = test_st_context(128);
let (mut leader, mut follower) = mock_vm();
let leader_pms: Array<U8, 32> = leader.alloc().unwrap();
leader.mark_public(leader_pms).unwrap();
leader.assign(leader_pms, pms).unwrap();
leader.commit(leader_pms).unwrap();
let follower_pms: Array<U8, 32> = follower.alloc().unwrap();
follower.mark_public(follower_pms).unwrap();
follower.assign(follower_pms, pms).unwrap();
follower.commit(follower_pms).unwrap();
let mut prf_leader = MpcPrf::new(mode);
let mut prf_follower = MpcPrf::new(mode);
let leader_prf_out = prf_leader.alloc(&mut leader, leader_pms).unwrap();
let follower_prf_out = prf_follower.alloc(&mut follower, follower_pms).unwrap();
// client_random and server_random
prf_leader.set_client_random(client_random).unwrap();
prf_follower.set_client_random(client_random).unwrap();
prf_leader.set_server_random(server_random).unwrap();
prf_follower.set_server_random(server_random).unwrap();
let SessionKeys {
client_write_key: leader_cwk,
server_write_key: leader_swk,
client_iv: leader_civ,
server_iv: leader_siv,
} = leader_session_keys;
client_write_key: cwk_leader,
server_write_key: swk_leader,
client_iv: civ_leader,
server_iv: siv_leader,
} = leader_prf_out.keys;
let mut cwk_leader = leader.decode(cwk_leader).unwrap();
let mut swk_leader = leader.decode(swk_leader).unwrap();
let mut civ_leader = leader.decode(civ_leader).unwrap();
let mut siv_leader = leader.decode(siv_leader).unwrap();
let SessionKeys {
client_write_key: follower_cwk,
server_write_key: follower_swk,
client_iv: follower_civ,
server_iv: follower_siv,
} = follower_session_keys;
client_write_key: cwk_follower,
server_write_key: swk_follower,
client_iv: civ_follower,
server_iv: siv_follower,
} = follower_prf_out.keys;
// Decode session keys
let (leader_session_keys, follower_session_keys) = futures::try_join!(
async {
leader
.thread_mut()
.decode(&[leader_cwk, leader_swk, leader_civ, leader_siv])
.await
},
async {
follower
.thread_mut()
.decode(&[follower_cwk, follower_swk, follower_civ, follower_siv])
.await
}
)
.unwrap();
let mut cwk_follower = follower.decode(cwk_follower).unwrap();
let mut swk_follower = follower.decode(swk_follower).unwrap();
let mut civ_follower = follower.decode(civ_follower).unwrap();
let mut siv_follower = follower.decode(siv_follower).unwrap();
let leader_cwk: [u8; 16] = leader_session_keys[0].clone().try_into().unwrap();
let leader_swk: [u8; 16] = leader_session_keys[1].clone().try_into().unwrap();
let leader_civ: [u8; 4] = leader_session_keys[2].clone().try_into().unwrap();
let leader_siv: [u8; 4] = leader_session_keys[3].clone().try_into().unwrap();
while prf_leader.wants_flush() || prf_follower.wants_flush() {
tokio::try_join!(
async {
prf_leader.flush(&mut leader).unwrap();
leader.execute_all(&mut ctx_a).await
},
async {
prf_follower.flush(&mut follower).unwrap();
follower.execute_all(&mut ctx_b).await
}
)
.unwrap();
}
let follower_cwk: [u8; 16] = follower_session_keys[0].clone().try_into().unwrap();
let follower_swk: [u8; 16] = follower_session_keys[1].clone().try_into().unwrap();
let follower_civ: [u8; 4] = follower_session_keys[2].clone().try_into().unwrap();
let follower_siv: [u8; 4] = follower_session_keys[3].clone().try_into().unwrap();
let cwk_leader = cwk_leader.try_recv().unwrap().unwrap();
let swk_leader = swk_leader.try_recv().unwrap().unwrap();
let civ_leader = civ_leader.try_recv().unwrap().unwrap();
let siv_leader = siv_leader.try_recv().unwrap().unwrap();
let (expected_cwk, expected_swk, expected_civ, expected_siv) =
session_keys(pms, client_random, server_random);
let cwk_follower = cwk_follower.try_recv().unwrap().unwrap();
let swk_follower = swk_follower.try_recv().unwrap().unwrap();
let civ_follower = civ_follower.try_recv().unwrap().unwrap();
let siv_follower = siv_follower.try_recv().unwrap().unwrap();
assert_eq!(leader_cwk, expected_cwk);
assert_eq!(leader_swk, expected_swk);
assert_eq!(leader_civ, expected_civ);
assert_eq!(leader_siv, expected_siv);
assert_eq!(cwk_leader, cwk_follower);
assert_eq!(swk_leader, swk_follower);
assert_eq!(civ_leader, civ_follower);
assert_eq!(siv_leader, siv_follower);
assert_eq!(follower_cwk, expected_cwk);
assert_eq!(follower_swk, expected_swk);
assert_eq!(follower_civ, expected_civ);
assert_eq!(follower_siv, expected_siv);
assert_eq!(cwk_leader, cwk_expected);
assert_eq!(swk_leader, swk_expected);
assert_eq!(civ_leader, civ_expected);
assert_eq!(siv_leader, siv_expected);
let cf_hs_hash = [1u8; 32];
let sf_hs_hash = [2u8; 32];
// client finished
prf_leader.set_cf_hash(cf_hs_hash).unwrap();
prf_follower.set_cf_hash(cf_hs_hash).unwrap();
let (cf_vd, _) = futures::try_join!(
leader.compute_client_finished_vd(cf_hs_hash),
follower.compute_client_finished_vd(cf_hs_hash)
)
.unwrap();
let cf_vd_leader = leader_prf_out.cf_vd;
let cf_vd_follower = follower_prf_out.cf_vd;
let expected_cf_vd = compute_vd(ms, b"client finished", cf_hs_hash);
let mut cf_vd_leader = leader.decode(cf_vd_leader).unwrap();
let mut cf_vd_follower = follower.decode(cf_vd_follower).unwrap();
assert_eq!(cf_vd, expected_cf_vd);
while prf_leader.wants_flush() || prf_follower.wants_flush() {
tokio::try_join!(
async {
prf_leader.flush(&mut leader).unwrap();
leader.execute_all(&mut ctx_a).await
},
async {
prf_follower.flush(&mut follower).unwrap();
follower.execute_all(&mut ctx_b).await
}
)
.unwrap();
}
let (sf_vd, _) = futures::try_join!(
leader.compute_server_finished_vd(sf_hs_hash),
follower.compute_server_finished_vd(sf_hs_hash)
)
.unwrap();
let cf_vd_leader = cf_vd_leader.try_recv().unwrap().unwrap();
let cf_vd_follower = cf_vd_follower.try_recv().unwrap().unwrap();
let expected_sf_vd = compute_vd(ms, b"server finished", sf_hs_hash);
assert_eq!(cf_vd_leader, cf_vd_follower);
assert_eq!(cf_vd_leader, cf_vd_expected);
assert_eq!(sf_vd, expected_sf_vd);
// server finished
prf_leader.set_sf_hash(sf_hs_hash).unwrap();
prf_follower.set_sf_hash(sf_hs_hash).unwrap();
let sf_vd_leader = leader_prf_out.sf_vd;
let sf_vd_follower = follower_prf_out.sf_vd;
let mut sf_vd_leader = leader.decode(sf_vd_leader).unwrap();
let mut sf_vd_follower = follower.decode(sf_vd_follower).unwrap();
while prf_leader.wants_flush() || prf_follower.wants_flush() {
tokio::try_join!(
async {
prf_leader.flush(&mut leader).unwrap();
leader.execute_all(&mut ctx_a).await
},
async {
prf_follower.flush(&mut follower).unwrap();
follower.execute_all(&mut ctx_b).await
}
)
.unwrap();
}
let sf_vd_leader = sf_vd_leader.try_recv().unwrap().unwrap();
let sf_vd_follower = sf_vd_follower.try_recv().unwrap().unwrap();
assert_eq!(sf_vd_leader, sf_vd_follower);
assert_eq!(sf_vd_leader, sf_vd_expected);
}
}

View File

@@ -1,443 +1,407 @@
use std::{
fmt::Debug,
sync::{Arc, OnceLock},
use crate::{
hmac::{IPAD, OPAD},
Mode, PrfError, PrfOutput,
};
use async_trait::async_trait;
use hmac_sha256_circuits::{build_session_keys, build_verify_data};
use mpz_circuits::Circuit;
use mpz_common::cpu::CpuBackend;
use mpz_garble::{config::Visibility, value::ValueRef, Decode, Execute, Load, Memory};
use mpz_circuits::{circuits::xor, Circuit, CircuitBuilder};
use mpz_hash::sha256::Sha256;
use mpz_vm_core::{
memory::{
binary::{Binary, U8},
Array, MemoryExt, StaticSize, Vector, ViewExt,
},
Call, CallableExt, Vm,
};
use std::{fmt::Debug, sync::Arc};
use tracing::instrument;
use crate::{Prf, PrfConfig, PrfError, Role, SessionKeys, CF_LABEL, SF_LABEL};
mod state;
use state::State;
/// Circuit for computing TLS session keys.
static SESSION_KEYS_CIRC: OnceLock<Arc<Circuit>> = OnceLock::new();
/// Circuit for computing TLS client verify data.
static CLIENT_VD_CIRC: OnceLock<Arc<Circuit>> = OnceLock::new();
/// Circuit for computing TLS server verify data.
static SERVER_VD_CIRC: OnceLock<Arc<Circuit>> = OnceLock::new();
mod function;
use function::Prf;
/// MPC PRF for computing TLS 1.2 HMAC-SHA256 PRF.
#[derive(Debug)]
pub(crate) struct Randoms {
pub(crate) client_random: ValueRef,
pub(crate) server_random: ValueRef,
}
#[derive(Debug, Clone)]
pub(crate) struct HashState {
pub(crate) ms_outer_hash_state: ValueRef,
pub(crate) ms_inner_hash_state: ValueRef,
}
#[derive(Debug)]
pub(crate) struct VerifyData {
pub(crate) handshake_hash: ValueRef,
pub(crate) vd: ValueRef,
}
#[derive(Debug)]
pub(crate) enum State {
Initialized,
SessionKeys {
pms: ValueRef,
randoms: Randoms,
hash_state: HashState,
keys: crate::SessionKeys,
cf_vd: VerifyData,
sf_vd: VerifyData,
},
ClientFinished {
hash_state: HashState,
cf_vd: VerifyData,
sf_vd: VerifyData,
},
ServerFinished {
hash_state: HashState,
sf_vd: VerifyData,
},
Complete,
Error,
}
impl State {
fn take(&mut self) -> State {
std::mem::replace(self, State::Error)
}
}
/// MPC PRF for computing TLS HMAC-SHA256 PRF.
pub struct MpcPrf<E> {
config: PrfConfig,
pub struct MpcPrf {
mode: Mode,
state: State,
thread_0: E,
thread_1: E,
}
impl<E> Debug for MpcPrf<E> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("MpcPrf")
.field("config", &self.config)
.field("state", &self.state)
.finish()
}
}
impl<E> MpcPrf<E>
where
E: Load + Memory + Execute + Decode + Send,
{
impl MpcPrf {
/// Creates a new instance of the PRF.
pub fn new(config: PrfConfig, thread_0: E, thread_1: E) -> MpcPrf<E> {
MpcPrf {
config,
///
/// # Arguments
///
/// `mode` - The PRF mode.
pub fn new(mode: Mode) -> MpcPrf {
Self {
mode,
state: State::Initialized,
thread_0,
thread_1,
}
}
/// Returns a mutable reference to the MPC thread.
pub fn thread_mut(&mut self) -> &mut E {
&mut self.thread_0
}
/// Executes a circuit which computes TLS session keys.
/// Allocates resources for the PRF.
///
/// # Arguments
///
/// * `vm` - Virtual machine.
/// * `pms` - The pre-master secret.
#[instrument(level = "debug", skip_all, err)]
async fn execute_session_keys(
pub fn alloc(
&mut self,
server_random: [u8; 32],
) -> Result<SessionKeys, PrfError> {
let State::SessionKeys {
pms,
randoms: randoms_refs,
hash_state,
keys,
cf_vd,
sf_vd,
} = self.state.take()
else {
return Err(PrfError::state("session keys not initialized"));
};
let circ = SESSION_KEYS_CIRC
.get()
.expect("session keys circuit is set");
self.thread_0
.assign(&randoms_refs.server_random, server_random)?;
self.thread_0
.execute(
circ.clone(),
&[pms, randoms_refs.client_random, randoms_refs.server_random],
&[
keys.client_write_key.clone(),
keys.server_write_key.clone(),
keys.client_iv.clone(),
keys.server_iv.clone(),
hash_state.ms_outer_hash_state.clone(),
hash_state.ms_inner_hash_state.clone(),
],
)
.await?;
self.state = State::ClientFinished {
hash_state,
cf_vd,
sf_vd,
};
Ok(keys)
}
#[instrument(level = "debug", skip_all, err)]
async fn execute_cf_vd(&mut self, handshake_hash: [u8; 32]) -> Result<[u8; 12], PrfError> {
let State::ClientFinished {
hash_state,
cf_vd,
sf_vd,
} = self.state.take()
else {
return Err(PrfError::state("PRF not in client finished state"));
};
let circ = CLIENT_VD_CIRC.get().expect("client vd circuit is set");
self.thread_0
.assign(&cf_vd.handshake_hash, handshake_hash)?;
self.thread_0
.execute(
circ.clone(),
&[
hash_state.ms_outer_hash_state.clone(),
hash_state.ms_inner_hash_state.clone(),
cf_vd.handshake_hash,
],
&[cf_vd.vd.clone()],
)
.await?;
let mut outputs = self.thread_0.decode(&[cf_vd.vd]).await?;
let vd: [u8; 12] = outputs.remove(0).try_into().expect("vd is 12 bytes");
self.state = State::ServerFinished { hash_state, sf_vd };
Ok(vd)
}
#[instrument(level = "debug", skip_all, err)]
async fn execute_sf_vd(&mut self, handshake_hash: [u8; 32]) -> Result<[u8; 12], PrfError> {
let State::ServerFinished { hash_state, sf_vd } = self.state.take() else {
return Err(PrfError::state("PRF not in server finished state"));
};
let circ = SERVER_VD_CIRC.get().expect("server vd circuit is set");
self.thread_0
.assign(&sf_vd.handshake_hash, handshake_hash)?;
self.thread_0
.execute(
circ.clone(),
&[
hash_state.ms_outer_hash_state,
hash_state.ms_inner_hash_state,
sf_vd.handshake_hash,
],
&[sf_vd.vd.clone()],
)
.await?;
let mut outputs = self.thread_0.decode(&[sf_vd.vd]).await?;
let vd: [u8; 12] = outputs.remove(0).try_into().expect("vd is 12 bytes");
self.state = State::Complete;
Ok(vd)
}
}
#[async_trait]
impl<E> Prf for MpcPrf<E>
where
E: Memory + Load + Execute + Decode + Send,
{
#[instrument(level = "debug", skip_all, err)]
async fn setup(&mut self, pms: ValueRef) -> Result<SessionKeys, PrfError> {
vm: &mut dyn Vm<Binary>,
pms: Array<U8, 32>,
) -> Result<PrfOutput, PrfError> {
let State::Initialized = self.state.take() else {
return Err(PrfError::state("PRF not in initialized state"));
};
let thread = &mut self.thread_0;
let mode = self.mode;
let pms: Vector<U8> = pms.into();
let randoms = Randoms {
// The client random is kept private so that the handshake transcript
// hashes do not leak information about the server's identity.
client_random: thread.new_input::<[u8; 32]>(
"client_random",
match self.config.role {
Role::Leader => Visibility::Private,
Role::Follower => Visibility::Blind,
},
)?,
server_random: thread.new_input::<[u8; 32]>("server_random", Visibility::Public)?,
};
let outer_partial_pms = compute_partial(vm, pms, OPAD)?;
let inner_partial_pms = compute_partial(vm, pms, IPAD)?;
let keys = SessionKeys {
client_write_key: thread.new_output::<[u8; 16]>("client_write_key")?,
server_write_key: thread.new_output::<[u8; 16]>("server_write_key")?,
client_iv: thread.new_output::<[u8; 4]>("client_write_iv")?,
server_iv: thread.new_output::<[u8; 4]>("server_write_iv")?,
};
let master_secret =
Prf::alloc_master_secret(mode, vm, outer_partial_pms, inner_partial_pms)?;
let ms = master_secret.output();
let ms = merge_outputs(vm, ms, 48)?;
let hash_state = HashState {
ms_outer_hash_state: thread.new_output::<[u32; 8]>("ms_outer_hash_state")?,
ms_inner_hash_state: thread.new_output::<[u32; 8]>("ms_inner_hash_state")?,
};
let outer_partial_ms = compute_partial(vm, ms, OPAD)?;
let inner_partial_ms = compute_partial(vm, ms, IPAD)?;
let cf_vd = VerifyData {
handshake_hash: thread.new_input::<[u8; 32]>("cf_hash", Visibility::Public)?,
vd: thread.new_output::<[u8; 12]>("cf_vd")?,
};
let sf_vd = VerifyData {
handshake_hash: thread.new_input::<[u8; 32]>("sf_hash", Visibility::Public)?,
vd: thread.new_output::<[u8; 12]>("sf_vd")?,
};
let key_expansion =
Prf::alloc_key_expansion(mode, vm, outer_partial_ms.clone(), inner_partial_ms.clone())?;
let client_finished = Prf::alloc_client_finished(
mode,
vm,
outer_partial_ms.clone(),
inner_partial_ms.clone(),
)?;
let server_finished = Prf::alloc_server_finished(
mode,
vm,
outer_partial_ms.clone(),
inner_partial_ms.clone(),
)?;
self.state = State::SessionKeys {
pms,
randoms,
hash_state,
keys: keys.clone(),
cf_vd,
sf_vd,
client_random: None,
master_secret,
key_expansion,
client_finished,
server_finished,
};
Ok(keys)
self.state.prf_output(vm)
}
/// Sets the client random.
///
/// # Arguments
///
/// * `random` - The client random.
#[instrument(level = "debug", skip_all, err)]
async fn set_client_random(&mut self, client_random: Option<[u8; 32]>) -> Result<(), PrfError> {
let State::SessionKeys { randoms, .. } = &self.state else {
pub fn set_client_random(&mut self, random: [u8; 32]) -> Result<(), PrfError> {
let State::SessionKeys { client_random, .. } = &mut self.state else {
return Err(PrfError::state("PRF not set up"));
};
if self.config.role == Role::Leader {
let Some(client_random) = client_random else {
return Err(PrfError::role("leader must provide client random"));
};
self.thread_0
.assign(&randoms.client_random, client_random)?;
} else if client_random.is_some() {
return Err(PrfError::role("only leader can set client random"));
}
self.thread_0
.commit(&[randoms.client_random.clone()])
.await?;
*client_random = Some(random);
Ok(())
}
/// Sets the server random.
///
/// # Arguments
///
/// * `random` - The server random.
#[instrument(level = "debug", skip_all, err)]
async fn preprocess(&mut self) -> Result<(), PrfError> {
pub fn set_server_random(&mut self, random: [u8; 32]) -> Result<(), PrfError> {
let State::SessionKeys {
pms,
randoms,
hash_state,
keys,
cf_vd,
sf_vd,
} = self.state.take()
client_random,
master_secret,
key_expansion,
..
} = &mut self.state
else {
return Err(PrfError::state("PRF not set up"));
};
// Builds all circuits in parallel and preprocesses the session keys circuit.
futures::try_join!(
async {
if SESSION_KEYS_CIRC.get().is_none() {
_ = SESSION_KEYS_CIRC.set(CpuBackend::blocking(build_session_keys).await);
}
let client_random = client_random.expect("Client random should have been set by now");
let server_random = random;
let circ = SESSION_KEYS_CIRC
.get()
.expect("session keys circuit should be built");
let mut seed_ms = client_random.to_vec();
seed_ms.extend_from_slice(&server_random);
master_secret.set_start_seed(seed_ms);
self.thread_0
.load(
circ.clone(),
&[
pms.clone(),
randoms.client_random.clone(),
randoms.server_random.clone(),
],
&[
keys.client_write_key.clone(),
keys.server_write_key.clone(),
keys.client_iv.clone(),
keys.server_iv.clone(),
hash_state.ms_outer_hash_state.clone(),
hash_state.ms_inner_hash_state.clone(),
],
)
.await?;
Ok::<_, PrfError>(())
},
async {
if CLIENT_VD_CIRC.get().is_none() {
_ = CLIENT_VD_CIRC
.set(CpuBackend::blocking(move || build_verify_data(CF_LABEL)).await);
}
Ok::<_, PrfError>(())
},
async {
if SERVER_VD_CIRC.get().is_none() {
_ = SERVER_VD_CIRC
.set(CpuBackend::blocking(move || build_verify_data(SF_LABEL)).await);
}
Ok::<_, PrfError>(())
}
)?;
// Finishes preprocessing the verify data circuits.
futures::try_join!(
async {
self.thread_0
.load(
CLIENT_VD_CIRC
.get()
.expect("client finished circuit should be built")
.clone(),
&[
hash_state.ms_outer_hash_state.clone(),
hash_state.ms_inner_hash_state.clone(),
cf_vd.handshake_hash.clone(),
],
&[cf_vd.vd.clone()],
)
.await
},
async {
self.thread_1
.load(
SERVER_VD_CIRC
.get()
.expect("server finished circuit should be built")
.clone(),
&[
hash_state.ms_outer_hash_state.clone(),
hash_state.ms_inner_hash_state.clone(),
sf_vd.handshake_hash.clone(),
],
&[sf_vd.vd.clone()],
)
.await
}
)?;
self.state = State::SessionKeys {
pms,
randoms,
hash_state,
keys,
cf_vd,
sf_vd,
};
let mut seed_ke = server_random.to_vec();
seed_ke.extend_from_slice(&client_random);
key_expansion.set_start_seed(seed_ke);
Ok(())
}
/// Sets the client finished handshake hash.
///
/// # Arguments
///
/// * `handshake_hash` - The handshake transcript hash.
#[instrument(level = "debug", skip_all, err)]
async fn compute_client_finished_vd(
&mut self,
handshake_hash: [u8; 32],
) -> Result<[u8; 12], PrfError> {
self.execute_cf_vd(handshake_hash).await
pub fn set_cf_hash(&mut self, handshake_hash: [u8; 32]) -> Result<(), PrfError> {
let State::ClientFinished {
client_finished, ..
} = &mut self.state
else {
return Err(PrfError::state("PRF not in client finished state"));
};
let seed_cf = handshake_hash.to_vec();
client_finished.set_start_seed(seed_cf);
Ok(())
}
/// Sets the server finished handshake hash.
///
/// # Arguments
///
/// * `handshake_hash` - The handshake transcript hash.
#[instrument(level = "debug", skip_all, err)]
async fn compute_server_finished_vd(
&mut self,
handshake_hash: [u8; 32],
) -> Result<[u8; 12], PrfError> {
self.execute_sf_vd(handshake_hash).await
pub fn set_sf_hash(&mut self, handshake_hash: [u8; 32]) -> Result<(), PrfError> {
let State::ServerFinished { server_finished } = &mut self.state else {
return Err(PrfError::state("PRF not in server finished state"));
};
let seed_sf = handshake_hash.to_vec();
server_finished.set_start_seed(seed_sf);
Ok(())
}
#[instrument(level = "debug", skip_all, err)]
async fn compute_session_keys(
&mut self,
server_random: [u8; 32],
) -> Result<SessionKeys, PrfError> {
self.execute_session_keys(server_random).await
/// Returns if the PRF needs to be flushed.
pub fn wants_flush(&self) -> bool {
match &self.state {
State::Initialized => false,
State::SessionKeys {
master_secret,
key_expansion,
..
} => master_secret.wants_flush() || key_expansion.wants_flush(),
State::ClientFinished {
client_finished, ..
} => client_finished.wants_flush(),
State::ServerFinished { server_finished } => server_finished.wants_flush(),
State::Complete => false,
State::Error => false,
}
}
/// Flushes the PRF.
pub fn flush(&mut self, vm: &mut dyn Vm<Binary>) -> Result<(), PrfError> {
self.state = match self.state.take() {
State::SessionKeys {
client_random,
mut master_secret,
mut key_expansion,
client_finished,
server_finished,
} => {
master_secret.flush(vm)?;
key_expansion.flush(vm)?;
if !master_secret.wants_flush() && !key_expansion.wants_flush() {
State::ClientFinished {
client_finished,
server_finished,
}
} else {
State::SessionKeys {
client_random,
master_secret,
key_expansion,
client_finished,
server_finished,
}
}
}
State::ClientFinished {
mut client_finished,
server_finished,
} => {
client_finished.flush(vm)?;
if !client_finished.wants_flush() {
State::ServerFinished { server_finished }
} else {
State::ClientFinished {
client_finished,
server_finished,
}
}
}
State::ServerFinished {
mut server_finished,
} => {
server_finished.flush(vm)?;
if !server_finished.wants_flush() {
State::Complete
} else {
State::ServerFinished { server_finished }
}
}
other => other,
};
Ok(())
}
}
/// Depending on the provided `mask` computes and returns `outer_partial` or
/// `inner_partial` for HMAC-SHA256.
///
/// # Arguments
///
/// * `vm` - Virtual machine.
/// * `key` - Key to pad and xor.
/// * `mask`- Mask used for padding.
fn compute_partial(
vm: &mut dyn Vm<Binary>,
key: Vector<U8>,
mask: [u8; 64],
) -> Result<Sha256, PrfError> {
let xor = Arc::new(xor(8 * 64));
let additional_len = 64 - key.len();
let padding = vec![0_u8; additional_len];
let padding_ref: Vector<U8> = vm.alloc_vec(additional_len).map_err(PrfError::vm)?;
vm.mark_public(padding_ref).map_err(PrfError::vm)?;
vm.assign(padding_ref, padding).map_err(PrfError::vm)?;
vm.commit(padding_ref).map_err(PrfError::vm)?;
let mask_ref: Array<U8, 64> = vm.alloc().map_err(PrfError::vm)?;
vm.mark_public(mask_ref).map_err(PrfError::vm)?;
vm.assign(mask_ref, mask).map_err(PrfError::vm)?;
vm.commit(mask_ref).map_err(PrfError::vm)?;
let xor = Call::builder(xor)
.arg(key)
.arg(padding_ref)
.arg(mask_ref)
.build()
.map_err(PrfError::vm)?;
let key_padded: Vector<U8> = vm.call(xor).map_err(PrfError::vm)?;
let mut sha = Sha256::new_with_init(vm)?;
sha.update(&key_padded);
sha.compress(vm)?;
Ok(sha)
}
fn merge_outputs(
vm: &mut dyn Vm<Binary>,
inputs: Vec<Array<U8, 32>>,
output_bytes: usize,
) -> Result<Vector<U8>, PrfError> {
assert!(output_bytes <= 32 * inputs.len());
let bits = Array::<U8, 32>::SIZE * inputs.len();
let circ = gen_merge_circ(bits);
let mut builder = Call::builder(circ);
for &input in inputs.iter() {
builder = builder.arg(input);
}
let call = builder.build().map_err(PrfError::vm)?;
let mut output: Vector<U8> = vm.call(call).map_err(PrfError::vm)?;
output.truncate(output_bytes);
Ok(output)
}
fn gen_merge_circ(size: usize) -> Arc<Circuit> {
let mut builder = CircuitBuilder::new();
let inputs = (0..size).map(|_| builder.add_input()).collect::<Vec<_>>();
for input in inputs.chunks_exact(8) {
for byte in input.chunks_exact(8) {
for &feed in byte.iter() {
let output = builder.add_id_gate(feed);
builder.add_output(output);
}
}
}
Arc::new(builder.build().expect("merge circuit is valid"))
}
#[cfg(test)]
mod tests {
use crate::{prf::merge_outputs, test_utils::mock_vm};
use mpz_common::context::test_st_context;
use mpz_vm_core::{
memory::{binary::U8, Array, MemoryExt, ViewExt},
Execute,
};
#[tokio::test]
async fn test_merge_outputs() {
let (mut ctx_a, mut ctx_b) = test_st_context(8);
let (mut leader, mut follower) = mock_vm();
let input1: [u8; 32] = std::array::from_fn(|i| i as u8);
let input2: [u8; 32] = std::array::from_fn(|i| i as u8 + 32);
let mut expected = input1.to_vec();
expected.extend_from_slice(&input2);
expected.truncate(48);
// leader
let input1_leader: Array<U8, 32> = leader.alloc().unwrap();
let input2_leader: Array<U8, 32> = leader.alloc().unwrap();
leader.mark_public(input1_leader).unwrap();
leader.mark_public(input2_leader).unwrap();
leader.assign(input1_leader, input1).unwrap();
leader.assign(input2_leader, input2).unwrap();
leader.commit(input1_leader).unwrap();
leader.commit(input2_leader).unwrap();
let merged_leader =
merge_outputs(&mut leader, vec![input1_leader, input2_leader], 48).unwrap();
let mut merged_leader = leader.decode(merged_leader).unwrap();
// follower
let input1_follower: Array<U8, 32> = follower.alloc().unwrap();
let input2_follower: Array<U8, 32> = follower.alloc().unwrap();
follower.mark_public(input1_follower).unwrap();
follower.mark_public(input2_follower).unwrap();
follower.assign(input1_follower, input1).unwrap();
follower.assign(input2_follower, input2).unwrap();
follower.commit(input1_follower).unwrap();
follower.commit(input2_follower).unwrap();
let merged_follower =
merge_outputs(&mut follower, vec![input1_follower, input2_follower], 48).unwrap();
let mut merged_follower = follower.decode(merged_follower).unwrap();
tokio::try_join!(
leader.execute_all(&mut ctx_a),
follower.execute_all(&mut ctx_b)
)
.unwrap();
let merged_leader = merged_leader.try_recv().unwrap().unwrap();
let merged_follower = merged_follower.try_recv().unwrap().unwrap();
assert_eq!(merged_leader, merged_follower);
assert_eq!(merged_leader, expected);
}
}

View File

@@ -0,0 +1,257 @@
//! Provides [`Prf`], for computing the TLS 1.2 PRF.
use crate::{Mode, PrfError};
use mpz_hash::sha256::Sha256;
use mpz_vm_core::{
memory::{
binary::{Binary, U8},
Array,
},
Vm,
};
mod normal;
mod reduced;
#[derive(Debug)]
pub(crate) enum Prf {
Reduced(reduced::PrfFunction),
Normal(normal::PrfFunction),
}
impl Prf {
pub(crate) fn alloc_master_secret(
mode: Mode,
vm: &mut dyn Vm<Binary>,
outer_partial: Sha256,
inner_partial: Sha256,
) -> Result<Self, PrfError> {
let prf = match mode {
Mode::Reduced => Self::Reduced(reduced::PrfFunction::alloc_master_secret(
vm,
outer_partial,
inner_partial,
)?),
Mode::Normal => Self::Normal(normal::PrfFunction::alloc_master_secret(
vm,
outer_partial,
inner_partial,
)?),
};
Ok(prf)
}
pub(crate) fn alloc_key_expansion(
mode: Mode,
vm: &mut dyn Vm<Binary>,
outer_partial: Sha256,
inner_partial: Sha256,
) -> Result<Self, PrfError> {
let prf = match mode {
Mode::Reduced => Self::Reduced(reduced::PrfFunction::alloc_key_expansion(
vm,
outer_partial,
inner_partial,
)?),
Mode::Normal => Self::Normal(normal::PrfFunction::alloc_key_expansion(
vm,
outer_partial,
inner_partial,
)?),
};
Ok(prf)
}
pub(crate) fn alloc_client_finished(
config: Mode,
vm: &mut dyn Vm<Binary>,
outer_partial: Sha256,
inner_partial: Sha256,
) -> Result<Self, PrfError> {
let prf = match config {
Mode::Reduced => Self::Reduced(reduced::PrfFunction::alloc_client_finished(
vm,
outer_partial,
inner_partial,
)?),
Mode::Normal => Self::Normal(normal::PrfFunction::alloc_client_finished(
vm,
outer_partial,
inner_partial,
)?),
};
Ok(prf)
}
pub(crate) fn alloc_server_finished(
config: Mode,
vm: &mut dyn Vm<Binary>,
outer_partial: Sha256,
inner_partial: Sha256,
) -> Result<Self, PrfError> {
let prf = match config {
Mode::Reduced => Self::Reduced(reduced::PrfFunction::alloc_server_finished(
vm,
outer_partial,
inner_partial,
)?),
Mode::Normal => Self::Normal(normal::PrfFunction::alloc_server_finished(
vm,
outer_partial,
inner_partial,
)?),
};
Ok(prf)
}
pub(crate) fn wants_flush(&self) -> bool {
match self {
Prf::Reduced(prf) => prf.wants_flush(),
Prf::Normal(prf) => prf.wants_flush(),
}
}
pub(crate) fn flush(&mut self, vm: &mut dyn Vm<Binary>) -> Result<(), PrfError> {
match self {
Prf::Reduced(prf) => prf.flush(vm),
Prf::Normal(prf) => prf.flush(vm),
}
}
pub(crate) fn set_start_seed(&mut self, seed: Vec<u8>) {
match self {
Prf::Reduced(prf) => prf.set_start_seed(seed),
Prf::Normal(prf) => prf.set_start_seed(seed),
}
}
pub(crate) fn output(&self) -> Vec<Array<U8, 32>> {
match self {
Prf::Reduced(prf) => prf.output(),
Prf::Normal(prf) => prf.output(),
}
}
}
#[cfg(test)]
mod tests {
use crate::{
prf::{compute_partial, function::Prf},
test_utils::{mock_vm, phash},
Mode,
};
use mpz_common::context::test_st_context;
use mpz_vm_core::{
memory::{binary::U8, Array, MemoryExt, ViewExt},
Execute,
};
use rand::{rngs::ThreadRng, Rng};
const IPAD: [u8; 64] = [0x36; 64];
const OPAD: [u8; 64] = [0x5c; 64];
#[tokio::test]
async fn test_phash_reduced() {
let mode = Mode::Reduced;
test_phash(mode).await;
}
#[tokio::test]
async fn test_phash_normal() {
let mode = Mode::Normal;
test_phash(mode).await;
}
async fn test_phash(mode: Mode) {
let mut rng = ThreadRng::default();
let (mut ctx_a, mut ctx_b) = test_st_context(8);
let (mut leader, mut follower) = mock_vm();
let key: [u8; 32] = rng.random();
let start_seed: Vec<u8> = vec![42; 64];
let mut label_seed = b"master secret".to_vec();
label_seed.extend_from_slice(&start_seed);
let iterations = 2;
let leader_key: Array<U8, 32> = leader.alloc().unwrap();
leader.mark_public(leader_key).unwrap();
leader.assign(leader_key, key).unwrap();
leader.commit(leader_key).unwrap();
let outer_partial_leader = compute_partial(&mut leader, leader_key.into(), OPAD).unwrap();
let inner_partial_leader = compute_partial(&mut leader, leader_key.into(), IPAD).unwrap();
let mut prf_leader = Prf::alloc_master_secret(
mode,
&mut leader,
outer_partial_leader,
inner_partial_leader,
)
.unwrap();
prf_leader.set_start_seed(start_seed.clone());
let mut prf_out_leader = vec![];
for p in prf_leader.output() {
let p_out = leader.decode(p).unwrap();
prf_out_leader.push(p_out)
}
let follower_key: Array<U8, 32> = follower.alloc().unwrap();
follower.mark_public(follower_key).unwrap();
follower.assign(follower_key, key).unwrap();
follower.commit(follower_key).unwrap();
let outer_partial_follower =
compute_partial(&mut follower, follower_key.into(), OPAD).unwrap();
let inner_partial_follower =
compute_partial(&mut follower, follower_key.into(), IPAD).unwrap();
let mut prf_follower = Prf::alloc_master_secret(
mode,
&mut follower,
outer_partial_follower,
inner_partial_follower,
)
.unwrap();
prf_follower.set_start_seed(start_seed.clone());
let mut prf_out_follower = vec![];
for p in prf_follower.output() {
let p_out = follower.decode(p).unwrap();
prf_out_follower.push(p_out)
}
while prf_leader.wants_flush() || prf_follower.wants_flush() {
tokio::try_join!(
async {
prf_leader.flush(&mut leader).unwrap();
leader.execute_all(&mut ctx_a).await
},
async {
prf_follower.flush(&mut follower).unwrap();
follower.execute_all(&mut ctx_b).await
}
)
.unwrap();
}
assert_eq!(prf_out_leader.len(), 2);
assert_eq!(prf_out_leader.len(), prf_out_follower.len());
let prf_result_leader: Vec<u8> = prf_out_leader
.iter_mut()
.flat_map(|p| p.try_recv().unwrap().unwrap())
.collect();
let prf_result_follower: Vec<u8> = prf_out_follower
.iter_mut()
.flat_map(|p| p.try_recv().unwrap().unwrap())
.collect();
let expected = phash(key.to_vec(), &label_seed, iterations);
assert_eq!(prf_result_leader, prf_result_follower);
assert_eq!(prf_result_leader, expected)
}
}

View File

@@ -0,0 +1,174 @@
//! Computes the whole PRF in MPC.
use crate::{hmac::hmac_sha256, PrfError};
use mpz_hash::sha256::Sha256;
use mpz_vm_core::{
memory::{
binary::{Binary, U8},
Array, MemoryExt, Vector, ViewExt,
},
Vm,
};
#[derive(Debug)]
pub(crate) struct PrfFunction {
// The label, e.g. "master secret".
label: &'static [u8],
state: State,
// The start seed and the label, e.g. client_random + server_random + "master_secret".
start_seed_label: Option<Vec<u8>>,
a: Vec<PHash>,
p: Vec<PHash>,
}
impl PrfFunction {
const MS_LABEL: &[u8] = b"master secret";
const KEY_LABEL: &[u8] = b"key expansion";
const CF_LABEL: &[u8] = b"client finished";
const SF_LABEL: &[u8] = b"server finished";
pub(crate) fn alloc_master_secret(
vm: &mut dyn Vm<Binary>,
outer_partial: Sha256,
inner_partial: Sha256,
) -> Result<Self, PrfError> {
Self::alloc(vm, Self::MS_LABEL, outer_partial, inner_partial, 48, 64)
}
pub(crate) fn alloc_key_expansion(
vm: &mut dyn Vm<Binary>,
outer_partial: Sha256,
inner_partial: Sha256,
) -> Result<Self, PrfError> {
Self::alloc(vm, Self::KEY_LABEL, outer_partial, inner_partial, 40, 64)
}
pub(crate) fn alloc_client_finished(
vm: &mut dyn Vm<Binary>,
outer_partial: Sha256,
inner_partial: Sha256,
) -> Result<Self, PrfError> {
Self::alloc(vm, Self::CF_LABEL, outer_partial, inner_partial, 12, 32)
}
pub(crate) fn alloc_server_finished(
vm: &mut dyn Vm<Binary>,
outer_partial: Sha256,
inner_partial: Sha256,
) -> Result<Self, PrfError> {
Self::alloc(vm, Self::SF_LABEL, outer_partial, inner_partial, 12, 32)
}
pub(crate) fn wants_flush(&self) -> bool {
let is_computing = match self.state {
State::Computing => true,
State::Finished => false,
};
is_computing && self.start_seed_label.is_some()
}
pub(crate) fn flush(&mut self, vm: &mut dyn Vm<Binary>) -> Result<(), PrfError> {
if let State::Computing = self.state {
let a = self.a.first().expect("prf should be allocated");
let msg = *a.msg.first().expect("message for prf should be present");
let msg_value = self
.start_seed_label
.clone()
.expect("Start seed should have been set");
vm.assign(msg, msg_value).map_err(PrfError::vm)?;
vm.commit(msg).map_err(PrfError::vm)?;
self.state = State::Finished;
}
Ok(())
}
pub(crate) fn set_start_seed(&mut self, seed: Vec<u8>) {
let mut start_seed_label = self.label.to_vec();
start_seed_label.extend_from_slice(&seed);
self.start_seed_label = Some(start_seed_label);
}
pub(crate) fn output(&self) -> Vec<Array<U8, 32>> {
self.p.iter().map(|p| p.output).collect()
}
fn alloc(
vm: &mut dyn Vm<Binary>,
label: &'static [u8],
outer_partial: Sha256,
inner_partial: Sha256,
output_len: usize,
seed_len: usize,
) -> Result<Self, PrfError> {
let mut prf = Self {
label,
state: State::Computing,
start_seed_label: None,
a: vec![],
p: vec![],
};
assert!(output_len > 0, "cannot compute 0 bytes for prf");
let iterations = output_len.div_ceil(32);
let msg_len_a = label.len() + seed_len;
let seed_label_ref: Vector<U8> = vm.alloc_vec(msg_len_a).map_err(PrfError::vm)?;
vm.mark_public(seed_label_ref).map_err(PrfError::vm)?;
let mut msg_a = seed_label_ref;
for _ in 0..iterations {
let a = PHash::alloc(vm, outer_partial.clone(), inner_partial.clone(), &[msg_a])?;
msg_a = Vector::<U8>::from(a.output);
prf.a.push(a);
let p = PHash::alloc(
vm,
outer_partial.clone(),
inner_partial.clone(),
&[msg_a, seed_label_ref],
)?;
prf.p.push(p);
}
Ok(prf)
}
}
#[derive(Debug, Clone, Copy)]
enum State {
Computing,
Finished,
}
#[derive(Debug, Clone)]
struct PHash {
msg: Vec<Vector<U8>>,
output: Array<U8, 32>,
}
impl PHash {
fn alloc(
vm: &mut dyn Vm<Binary>,
outer_partial: Sha256,
inner_partial: Sha256,
msg: &[Vector<U8>],
) -> Result<Self, PrfError> {
let mut inner_local = inner_partial;
msg.iter().for_each(|m| inner_local.update(m));
inner_local.compress(vm)?;
let inner_local = inner_local.finalize(vm)?;
let output = hmac_sha256(vm, outer_partial, inner_local)?;
let p_hash = Self {
msg: msg.to_vec(),
output,
};
Ok(p_hash)
}
}

View File

@@ -0,0 +1,247 @@
//! Computes some hashes of the PRF locally.
use std::collections::VecDeque;
use crate::{hmac::hmac_sha256, sha256, state_to_bytes, PrfError};
use mpz_core::bitvec::BitVec;
use mpz_hash::sha256::Sha256;
use mpz_vm_core::{
memory::{
binary::{Binary, U8},
Array, DecodeFutureTyped, MemoryExt, ViewExt,
},
Vm,
};
#[derive(Debug)]
pub(crate) struct PrfFunction {
// The label, e.g. "master secret".
label: &'static [u8],
// The start seed and the label, e.g. client_random + server_random + "master_secret".
start_seed_label: Option<Vec<u8>>,
iterations: usize,
state: PrfState,
a: VecDeque<AHash>,
p: VecDeque<PHash>,
}
#[derive(Debug)]
enum PrfState {
InnerPartial {
inner_partial: DecodeFutureTyped<BitVec, [u32; 8]>,
},
ComputeA {
iter: usize,
inner_partial: [u32; 8],
msg: Vec<u8>,
},
ComputeP {
iter: usize,
inner_partial: [u32; 8],
a_output: DecodeFutureTyped<BitVec, [u8; 32]>,
},
FinishLastP,
Done,
}
impl PrfFunction {
const MS_LABEL: &[u8] = b"master secret";
const KEY_LABEL: &[u8] = b"key expansion";
const CF_LABEL: &[u8] = b"client finished";
const SF_LABEL: &[u8] = b"server finished";
pub(crate) fn alloc_master_secret(
vm: &mut dyn Vm<Binary>,
outer_partial: Sha256,
inner_partial: Sha256,
) -> Result<Self, PrfError> {
Self::alloc(vm, Self::MS_LABEL, outer_partial, inner_partial, 48)
}
pub(crate) fn alloc_key_expansion(
vm: &mut dyn Vm<Binary>,
outer_partial: Sha256,
inner_partial: Sha256,
) -> Result<Self, PrfError> {
Self::alloc(vm, Self::KEY_LABEL, outer_partial, inner_partial, 40)
}
pub(crate) fn alloc_client_finished(
vm: &mut dyn Vm<Binary>,
outer_partial: Sha256,
inner_partial: Sha256,
) -> Result<Self, PrfError> {
Self::alloc(vm, Self::CF_LABEL, outer_partial, inner_partial, 12)
}
pub(crate) fn alloc_server_finished(
vm: &mut dyn Vm<Binary>,
outer_partial: Sha256,
inner_partial: Sha256,
) -> Result<Self, PrfError> {
Self::alloc(vm, Self::SF_LABEL, outer_partial, inner_partial, 12)
}
pub(crate) fn wants_flush(&self) -> bool {
!matches!(self.state, PrfState::Done) && self.start_seed_label.is_some()
}
pub(crate) fn flush(&mut self, vm: &mut dyn Vm<Binary>) -> Result<(), PrfError> {
match &mut self.state {
PrfState::InnerPartial { inner_partial } => {
let Some(inner_partial) = inner_partial.try_recv().map_err(PrfError::vm)? else {
return Ok(());
};
self.state = PrfState::ComputeA {
iter: 1,
inner_partial,
msg: self
.start_seed_label
.clone()
.expect("Start seed should have been set"),
};
self.flush(vm)?;
}
PrfState::ComputeA {
iter,
inner_partial,
msg,
} => {
let a = self.a.pop_front().expect("Prf AHash should be present");
assign_inner_local(vm, a.inner_local, *inner_partial, msg)?;
self.state = PrfState::ComputeP {
iter: *iter,
inner_partial: *inner_partial,
a_output: a.output,
};
}
PrfState::ComputeP {
iter,
inner_partial,
a_output,
} => {
let Some(output) = a_output.try_recv().map_err(PrfError::vm)? else {
return Ok(());
};
let p = self.p.pop_front().expect("Prf PHash should be present");
let mut msg = output.to_vec();
msg.extend_from_slice(
self.start_seed_label
.as_ref()
.expect("Start seed should have been set"),
);
assign_inner_local(vm, p.inner_local, *inner_partial, &msg)?;
if *iter == self.iterations {
self.state = PrfState::FinishLastP;
} else {
self.state = PrfState::ComputeA {
iter: *iter + 1,
inner_partial: *inner_partial,
msg: output.to_vec(),
}
};
}
PrfState::FinishLastP => self.state = PrfState::Done,
_ => (),
}
Ok(())
}
pub(crate) fn set_start_seed(&mut self, seed: Vec<u8>) {
let mut start_seed_label = self.label.to_vec();
start_seed_label.extend_from_slice(&seed);
self.start_seed_label = Some(start_seed_label);
}
pub(crate) fn output(&self) -> Vec<Array<U8, 32>> {
self.p.iter().map(|p| p.output).collect()
}
fn alloc(
vm: &mut dyn Vm<Binary>,
label: &'static [u8],
outer_partial: Sha256,
inner_partial: Sha256,
len: usize,
) -> Result<Self, PrfError> {
assert!(len > 0, "cannot compute 0 bytes for prf");
let iterations = len.div_ceil(32);
let (inner_partial, _) = inner_partial
.state()
.expect("state should be set for inner_partial");
let inner_partial = vm.decode(inner_partial).map_err(PrfError::vm)?;
let mut prf = Self {
label,
start_seed_label: None,
iterations,
state: PrfState::InnerPartial { inner_partial },
a: VecDeque::new(),
p: VecDeque::new(),
};
for _ in 0..iterations {
// setup A[i]
let inner_local: Array<U8, 32> = vm.alloc().map_err(PrfError::vm)?;
let output = hmac_sha256(vm, outer_partial.clone(), inner_local)?;
let output = vm.decode(output).map_err(PrfError::vm)?;
let a_hash = AHash {
inner_local,
output,
};
prf.a.push_front(a_hash);
// setup P[i]
let inner_local: Array<U8, 32> = vm.alloc().map_err(PrfError::vm)?;
let output = hmac_sha256(vm, outer_partial.clone(), inner_local)?;
let p_hash = PHash {
inner_local,
output,
};
prf.p.push_front(p_hash);
}
Ok(prf)
}
}
fn assign_inner_local(
vm: &mut dyn Vm<Binary>,
inner_local: Array<U8, 32>,
inner_partial: [u32; 8],
msg: &[u8],
) -> Result<(), PrfError> {
let inner_local_value = sha256(inner_partial, 64, msg);
vm.mark_public(inner_local).map_err(PrfError::vm)?;
vm.assign(inner_local, state_to_bytes(inner_local_value))
.map_err(PrfError::vm)?;
vm.commit(inner_local).map_err(PrfError::vm)?;
Ok(())
}
/// Like PHash but stores the output as the decoding future because in the
/// reduced Prf we need to decode this output.
#[derive(Debug)]
struct AHash {
inner_local: Array<U8, 32>,
output: DecodeFutureTyped<BitVec, [u8; 32]>,
}
#[derive(Debug, Clone, Copy)]
struct PHash {
inner_local: Array<U8, 32>,
output: Array<U8, 32>,
}

View File

@@ -0,0 +1,103 @@
use crate::{
prf::{function::Prf, merge_outputs},
PrfError, PrfOutput, SessionKeys,
};
use mpz_vm_core::{
memory::{
binary::{Binary, U8},
Array, FromRaw, ToRaw,
},
Vm,
};
#[allow(clippy::large_enum_variant)]
#[derive(Debug)]
pub(crate) enum State {
Initialized,
SessionKeys {
client_random: Option<[u8; 32]>,
master_secret: Prf,
key_expansion: Prf,
client_finished: Prf,
server_finished: Prf,
},
ClientFinished {
client_finished: Prf,
server_finished: Prf,
},
ServerFinished {
server_finished: Prf,
},
Complete,
Error,
}
impl State {
pub(crate) fn take(&mut self) -> State {
std::mem::replace(self, State::Error)
}
pub(crate) fn prf_output(&self, vm: &mut dyn Vm<Binary>) -> Result<PrfOutput, PrfError> {
let State::SessionKeys {
key_expansion,
client_finished,
server_finished,
..
} = self
else {
return Err(PrfError::state(
"Prf output can only be computed while in \"SessionKeys\" state",
));
};
let keys = get_session_keys(key_expansion.output(), vm)?;
let cf_vd = get_client_finished_vd(client_finished.output(), vm)?;
let sf_vd = get_server_finished_vd(server_finished.output(), vm)?;
let output = PrfOutput { keys, cf_vd, sf_vd };
Ok(output)
}
}
fn get_session_keys(
output: Vec<Array<U8, 32>>,
vm: &mut dyn Vm<Binary>,
) -> Result<SessionKeys, PrfError> {
let mut keys = merge_outputs(vm, output, 40)?;
debug_assert!(keys.len() == 40, "session keys len should be 40");
let server_iv = Array::<U8, 4>::try_from(keys.split_off(36)).unwrap();
let client_iv = Array::<U8, 4>::try_from(keys.split_off(32)).unwrap();
let server_write_key = Array::<U8, 16>::try_from(keys.split_off(16)).unwrap();
let client_write_key = Array::<U8, 16>::try_from(keys).unwrap();
let session_keys = SessionKeys {
client_write_key,
server_write_key,
client_iv,
server_iv,
};
Ok(session_keys)
}
fn get_client_finished_vd(
output: Vec<Array<U8, 32>>,
vm: &mut dyn Vm<Binary>,
) -> Result<Array<U8, 12>, PrfError> {
let cf_vd = merge_outputs(vm, output, 12)?;
let cf_vd = <Array<U8, 12> as FromRaw<Binary>>::from_raw(cf_vd.to_raw());
Ok(cf_vd)
}
fn get_server_finished_vd(
output: Vec<Array<U8, 32>>,
vm: &mut dyn Vm<Binary>,
) -> Result<Array<U8, 12>, PrfError> {
let sf_vd = merge_outputs(vm, output, 12)?;
let sf_vd = <Array<U8, 12> as FromRaw<Binary>>::from_raw(sf_vd.to_raw());
Ok(sf_vd)
}

View File

@@ -0,0 +1,261 @@
use crate::{sha256, state_to_bytes};
use mpz_garble::protocol::semihonest::{Evaluator, Garbler};
use mpz_ot::ideal::cot::{ideal_cot, IdealCOTReceiver, IdealCOTSender};
use mpz_vm_core::memory::correlated::Delta;
use rand::{rngs::StdRng, Rng, SeedableRng};
pub(crate) const SHA256_IV: [u32; 8] = [
0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a, 0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19,
];
pub(crate) fn mock_vm() -> (Garbler<IdealCOTSender>, Evaluator<IdealCOTReceiver>) {
let mut rng = StdRng::seed_from_u64(0);
let delta = Delta::random(&mut rng);
let (cot_send, cot_recv) = ideal_cot(delta.into_inner());
let gen = Garbler::new(cot_send, [0u8; 16], delta);
let ev = Evaluator::new(cot_recv);
(gen, ev)
}
pub(crate) fn prf_ms(pms: [u8; 32], client_random: [u8; 32], server_random: [u8; 32]) -> [u8; 48] {
let mut label_start_seed = b"master secret".to_vec();
label_start_seed.extend_from_slice(&client_random);
label_start_seed.extend_from_slice(&server_random);
let ms = phash(pms.to_vec(), &label_start_seed, 2)[..48].to_vec();
ms.try_into().unwrap()
}
pub(crate) fn prf_keys(
ms: [u8; 48],
client_random: [u8; 32],
server_random: [u8; 32],
) -> [Vec<u8>; 4] {
let mut label_start_seed = b"key expansion".to_vec();
label_start_seed.extend_from_slice(&server_random);
label_start_seed.extend_from_slice(&client_random);
let mut session_keys = phash(ms.to_vec(), &label_start_seed, 2)[..40].to_vec();
let server_iv = session_keys.split_off(36);
let client_iv = session_keys.split_off(32);
let server_write_key = session_keys.split_off(16);
let client_write_key = session_keys;
[client_write_key, server_write_key, client_iv, server_iv]
}
pub(crate) fn prf_cf_vd(ms: [u8; 48], hanshake_hash: [u8; 32]) -> Vec<u8> {
let mut label_start_seed = b"client finished".to_vec();
label_start_seed.extend_from_slice(&hanshake_hash);
phash(ms.to_vec(), &label_start_seed, 1)[..12].to_vec()
}
pub(crate) fn prf_sf_vd(ms: [u8; 48], hanshake_hash: [u8; 32]) -> Vec<u8> {
let mut label_start_seed = b"server finished".to_vec();
label_start_seed.extend_from_slice(&hanshake_hash);
phash(ms.to_vec(), &label_start_seed, 1)[..12].to_vec()
}
pub(crate) fn phash(key: Vec<u8>, seed: &[u8], iterations: usize) -> Vec<u8> {
// A() is defined as:
//
// A(0) = seed
// A(i) = HMAC_hash(secret, A(i-1))
let mut a_cache: Vec<_> = Vec::with_capacity(iterations + 1);
a_cache.push(seed.to_vec());
for i in 0..iterations {
let a_i = hmac_sha256(key.clone(), &a_cache[i]);
a_cache.push(a_i.to_vec());
}
// HMAC_hash(secret, A(i) + seed)
let mut output: Vec<_> = Vec::with_capacity(iterations * 32);
for i in 0..iterations {
let mut a_i_seed = a_cache[i + 1].clone();
a_i_seed.extend_from_slice(seed);
let hash = hmac_sha256(key.clone(), &a_i_seed);
output.extend_from_slice(&hash);
}
output
}
pub(crate) fn hmac_sha256(key: Vec<u8>, msg: &[u8]) -> [u8; 32] {
let outer_partial = compute_outer_partial(key.clone());
let inner_local = compute_inner_local(key, msg);
let hmac = sha256(outer_partial, 64, &state_to_bytes(inner_local));
state_to_bytes(hmac)
}
pub(crate) fn compute_outer_partial(mut key: Vec<u8>) -> [u32; 8] {
assert!(key.len() <= 64);
key.resize(64, 0_u8);
let key_padded: [u8; 64] = key
.into_iter()
.map(|b| b ^ 0x5c)
.collect::<Vec<u8>>()
.try_into()
.unwrap();
compress_256(SHA256_IV, &key_padded)
}
pub(crate) fn compute_inner_local(mut key: Vec<u8>, msg: &[u8]) -> [u32; 8] {
assert!(key.len() <= 64);
key.resize(64, 0_u8);
let key_padded: [u8; 64] = key
.into_iter()
.map(|b| b ^ 0x36)
.collect::<Vec<u8>>()
.try_into()
.unwrap();
let state = compress_256(SHA256_IV, &key_padded);
sha256(state, 64, msg)
}
pub(crate) fn compress_256(mut state: [u32; 8], msg: &[u8]) -> [u32; 8] {
use sha2::{
compress256,
digest::{
block_buffer::{BlockBuffer, Eager},
generic_array::typenum::U64,
},
};
let mut buffer = BlockBuffer::<U64, Eager>::default();
buffer.digest_blocks(msg, |b| compress256(&mut state, b));
state
}
// Borrowed from Rustls for testing
// https://github.com/rustls/rustls/blob/main/rustls/src/tls12/prf.rs
mod ring_prf {
use ring::{hmac, hmac::HMAC_SHA256};
fn concat_sign(key: &hmac::Key, a: &[u8], b: &[u8]) -> hmac::Tag {
let mut ctx = hmac::Context::with_key(key);
ctx.update(a);
ctx.update(b);
ctx.sign()
}
fn p(out: &mut [u8], secret: &[u8], seed: &[u8]) {
let hmac_key = hmac::Key::new(HMAC_SHA256, secret);
// A(1)
let mut current_a = hmac::sign(&hmac_key, seed);
let chunk_size = HMAC_SHA256.digest_algorithm().output_len();
for chunk in out.chunks_mut(chunk_size) {
// P_hash[i] = HMAC_hash(secret, A(i) + seed)
let p_term = concat_sign(&hmac_key, current_a.as_ref(), seed);
chunk.copy_from_slice(&p_term.as_ref()[..chunk.len()]);
// A(i+1) = HMAC_hash(secret, A(i))
current_a = hmac::sign(&hmac_key, current_a.as_ref());
}
}
fn concat(a: &[u8], b: &[u8]) -> Vec<u8> {
let mut ret = Vec::new();
ret.extend_from_slice(a);
ret.extend_from_slice(b);
ret
}
pub(crate) fn prf(out: &mut [u8], secret: &[u8], label: &[u8], seed: &[u8]) {
let joined_seed = concat(label, seed);
p(out, secret, &joined_seed);
}
}
#[test]
fn test_prf_reference_ms() {
use ring_prf::prf as prf_ref;
let mut rng = StdRng::from_seed([1; 32]);
let pms: [u8; 32] = rng.random();
let label: &[u8] = b"master secret";
let client_random: [u8; 32] = rng.random();
let server_random: [u8; 32] = rng.random();
let mut seed = Vec::from(client_random);
seed.extend_from_slice(&server_random);
let ms = prf_ms(pms, client_random, server_random);
let mut expected_ms: [u8; 48] = [0; 48];
prf_ref(&mut expected_ms, &pms, label, &seed);
assert_eq!(ms, expected_ms);
}
#[test]
fn test_prf_reference_ke() {
use ring_prf::prf as prf_ref;
let mut rng = StdRng::from_seed([2; 32]);
let ms: [u8; 48] = rng.random();
let label: &[u8] = b"key expansion";
let client_random: [u8; 32] = rng.random();
let server_random: [u8; 32] = rng.random();
let mut seed = Vec::from(server_random);
seed.extend_from_slice(&client_random);
let keys = prf_keys(ms, client_random, server_random);
let keys: Vec<u8> = keys.into_iter().flatten().collect();
let mut expected_keys: [u8; 40] = [0; 40];
prf_ref(&mut expected_keys, &ms, label, &seed);
assert_eq!(keys, expected_keys);
}
#[test]
fn test_prf_reference_cf() {
use ring_prf::prf as prf_ref;
let mut rng = StdRng::from_seed([3; 32]);
let ms: [u8; 48] = rng.random();
let label: &[u8] = b"client finished";
let handshake_hash: [u8; 32] = rng.random();
let cf_vd = prf_cf_vd(ms, handshake_hash);
let mut expected_cf_vd: [u8; 12] = [0; 12];
prf_ref(&mut expected_cf_vd, &ms, label, &handshake_hash);
assert_eq!(cf_vd, expected_cf_vd);
}
#[test]
fn test_prf_reference_sf() {
use ring_prf::prf as prf_ref;
let mut rng = StdRng::from_seed([4; 32]);
let ms: [u8; 48] = rng.random();
let label: &[u8] = b"server finished";
let handshake_hash: [u8; 32] = rng.random();
let sf_vd = prf_sf_vd(ms, handshake_hash);
let mut expected_sf_vd: [u8; 12] = [0; 12];
prf_ref(&mut expected_sf_vd, &ms, label, &handshake_hash);
assert_eq!(sf_vd, expected_sf_vd);
}

View File

@@ -5,41 +5,42 @@ description = "Implementation of the 3-party key-exchange protocol"
keywords = ["tls", "mpc", "2pc", "pms", "key-exchange"]
categories = ["cryptography"]
license = "MIT OR Apache-2.0"
version = "0.1.0-alpha.7"
version = "0.1.0-alpha.12"
edition = "2021"
[lints]
workspace = true
[lib]
name = "key_exchange"
[features]
default = ["mock"]
mock = []
mock = ["mpz-share-conversion/test-utils", "mpz-common/ideal"]
[dependencies]
mpz-garble = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "b8ae7ac" }
mpz-common = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "b8ae7ac" }
mpz-fields = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "b8ae7ac" }
mpz-ot = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "b8ae7ac" }
mpz-share-conversion = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "b8ae7ac", features = [
"ideal",
] }
mpz-circuits = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "b8ae7ac" }
mpz-vm-core = { workspace = true }
mpz-memory-core = { workspace = true }
mpz-common = { workspace = true }
mpz-fields = { workspace = true }
mpz-share-conversion = { workspace = true }
mpz-circuits = { workspace = true }
mpz-core = { workspace = true }
p256 = { workspace = true, features = ["ecdh", "serde"] }
async-trait = { workspace = true }
thiserror = { workspace = true }
serde = { workspace = true }
futures = { workspace = true }
serio = { workspace = true }
derive_builder = { workspace = true }
tracing = { workspace = true }
rand = { workspace = true }
rand06-compat = { workspace = true }
tokio = { workspace = true, features = ["sync"] }
[dev-dependencies]
mpz-share-conversion = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "b8ae7ac", features = [
"ideal",
] }
mpz-ot = { workspace = true, features = ["ideal"] }
mpz-garble = { workspace = true }
rand_chacha = { workspace = true }
rand_core = { workspace = true }
tokio = { workspace = true, features = ["macros", "rt", "rt-multi-thread"] }
rstest = { workspace = true }

View File

@@ -1,15 +1,8 @@
//! This module provides the circuits used in the key exchange protocol.
use mpz_circuits::{ops::add_mod, Circuit, CircuitBuilder, Feed, Node};
use std::sync::Arc;
use mpz_circuits::{circuits::big_num::nbyte_add_mod_trace, Circuit, CircuitBuilder};
/// NIST P-256 prime big-endian.
static P: [u8; 32] = [
0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
];
/// Circuit for combining additive shares of the PMS, twice
///
/// # Inputs
@@ -18,26 +11,65 @@ static P: [u8; 32] = [
/// 1. PMS_SHARE_B0: 32 bytes PMS Additive Share
/// 2. PMS_SHARE_A1: 32 bytes PMS Additive Share
/// 3. PMS_SHARE_B1: 32 bytes PMS Additive Share
/// 4. MODULUS: 32 bytes field modulus
///
/// # Outputs
/// 0. PMS_0: Pre-master Secret = PMS_SHARE_A0 + PMS_SHARE_B0
/// 1. PMS_1: Pre-master Secret = PMS_SHARE_A1 + PMS_SHARE_B1
/// 2. EQ: Equality check of PMS_0 and PMS_1
pub(crate) fn build_pms_circuit() -> Arc<Circuit> {
let builder = CircuitBuilder::new();
let share_a0 = builder.add_array_input::<u8, 32>();
let share_b0 = builder.add_array_input::<u8, 32>();
let share_a1 = builder.add_array_input::<u8, 32>();
let share_b1 = builder.add_array_input::<u8, 32>();
let mut builder = CircuitBuilder::new();
let pms_0 = nbyte_add_mod_trace(builder.state(), share_a0, share_b0, P);
let pms_1 = nbyte_add_mod_trace(builder.state(), share_a1, share_b1, P);
let share_a0 = (0..32 * 8).map(|_| builder.add_input()).collect::<Vec<_>>();
let share_b0 = (0..32 * 8).map(|_| builder.add_input()).collect::<Vec<_>>();
let share_a1 = (0..32 * 8).map(|_| builder.add_input()).collect::<Vec<_>>();
let share_b1 = (0..32 * 8).map(|_| builder.add_input()).collect::<Vec<_>>();
let eq: [_; 32] = std::array::from_fn(|i| pms_0[i] ^ pms_1[i]);
let modulus = (0..32 * 8).map(|_| builder.add_input()).collect::<Vec<_>>();
builder.add_output(pms_0);
builder.add_output(pms_1);
builder.add_output(eq);
/// assumes input is provided as big endian
fn to_little_endian(input: &[Node<Feed>]) -> Vec<Node<Feed>> {
let mut le_lsb0_output = vec![];
for node in input.chunks_exact(8).rev() {
for &bit in node.iter() {
le_lsb0_output.push(bit);
}
}
le_lsb0_output
}
let pms_0 = add_mod(
&mut builder,
&to_little_endian(&share_a0),
&to_little_endian(&share_b0),
&to_little_endian(&modulus),
);
// return output as big endian
for node in pms_0.chunks_exact(8).rev() {
for &bit in node.iter() {
builder.add_output(bit);
}
}
let pms_1 = add_mod(
&mut builder,
&to_little_endian(&share_a1),
&to_little_endian(&share_b1),
&to_little_endian(&modulus),
);
// return output as big endian
for node in pms_1.chunks_exact(8).rev() {
for &bit in node.iter() {
builder.add_output(bit);
}
}
for (a, b) in pms_0.into_iter().zip(pms_1) {
let out = builder.add_xor_gate(a, b);
builder.add_output(out);
}
Arc::new(builder.build().expect("pms circuit is valid"))
}

View File

@@ -1,29 +0,0 @@
use derive_builder::Builder;
/// Role in the key exchange protocol.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum Role {
/// Leader.
Leader,
/// Follower.
Follower,
}
/// A config used for [MpcKeyExchange](super::MpcKeyExchange).
#[derive(Debug, Clone, Builder)]
pub struct KeyExchangeConfig {
/// Protocol role.
role: Role,
}
impl KeyExchangeConfig {
/// Creates a new builder for the key exchange configuration.
pub fn builder() -> KeyExchangeConfigBuilder {
KeyExchangeConfigBuilder::default()
}
/// Get the role of this instance.
pub fn role(&self) -> &Role {
&self.role
}
}

View File

@@ -1,120 +1,87 @@
use core::fmt;
use std::error::Error;
/// A key exchange error.
/// MPC-TLS protocol error.
#[derive(Debug, thiserror::Error)]
pub struct KeyExchangeError {
kind: ErrorKind,
#[source]
source: Option<Box<dyn Error + Send + Sync>>,
#[error(transparent)]
pub struct KeyExchangeError(#[from] pub(crate) ErrorRepr);
#[derive(Debug, thiserror::Error)]
#[error("key exchange error: {0}")]
pub(crate) enum ErrorRepr {
#[error("state error: {0}")]
State(Box<dyn Error + Send + Sync + 'static>),
#[error("context error: {0}")]
Ctx(Box<dyn Error + Send + Sync + 'static>),
#[error("io error: {0}")]
Io(std::io::Error),
#[error("vm error: {0}")]
Vm(Box<dyn Error + Send + Sync + 'static>),
#[error("share conversion error: {0}")]
ShareConversion(Box<dyn Error + Send + Sync + 'static>),
#[error("role error: {0}")]
Role(Box<dyn Error + Send + Sync + 'static>),
#[error("key error: {0}")]
Key(Box<dyn Error + Send + Sync + 'static>),
}
impl KeyExchangeError {
pub(crate) fn new<E>(kind: ErrorKind, source: E) -> Self
pub(crate) fn state<E>(err: E) -> KeyExchangeError
where
E: Into<Box<dyn Error + Send + Sync>>,
E: Into<Box<dyn Error + Send + Sync + 'static>>,
{
Self {
kind,
source: Some(source.into()),
}
Self(ErrorRepr::State(err.into()))
}
#[cfg(test)]
pub(crate) fn kind(&self) -> &ErrorKind {
&self.kind
pub(crate) fn ctx<E>(err: E) -> KeyExchangeError
where
E: Into<Box<dyn Error + Send + Sync + 'static>>,
{
Self(ErrorRepr::Ctx(err.into()))
}
pub(crate) fn state(msg: impl Into<String>) -> Self {
Self {
kind: ErrorKind::State,
source: Some(msg.into().into()),
}
pub(crate) fn vm<E>(err: E) -> KeyExchangeError
where
E: Into<Box<dyn Error + Send + Sync + 'static>>,
{
Self(ErrorRepr::Vm(err.into()))
}
pub(crate) fn role(msg: impl Into<String>) -> Self {
Self {
kind: ErrorKind::Role,
source: Some(msg.into().into()),
}
pub(crate) fn share_conversion<E>(err: E) -> KeyExchangeError
where
E: Into<Box<dyn Error + Send + Sync + 'static>>,
{
Self(ErrorRepr::ShareConversion(err.into()))
}
}
#[derive(Debug)]
pub(crate) enum ErrorKind {
Io,
Context,
Vm,
ShareConversion,
Key,
State,
Role,
}
pub(crate) fn role<E>(err: E) -> KeyExchangeError
where
E: Into<Box<dyn Error + Send + Sync + 'static>>,
{
Self(ErrorRepr::Role(err.into()))
}
impl fmt::Display for KeyExchangeError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self.kind {
ErrorKind::Io => write!(f, "io error")?,
ErrorKind::Context => write!(f, "context error")?,
ErrorKind::Vm => write!(f, "vm error")?,
ErrorKind::ShareConversion => write!(f, "share conversion error")?,
ErrorKind::Key => write!(f, "key error")?,
ErrorKind::State => write!(f, "state error")?,
ErrorKind::Role => write!(f, "role error")?,
}
if let Some(ref source) = self.source {
write!(f, " caused by: {}", source)?;
}
Ok(())
pub(crate) fn key<E>(err: E) -> KeyExchangeError
where
E: Into<Box<dyn Error + Send + Sync + 'static>>,
{
Self(ErrorRepr::Key(err.into()))
}
}
impl From<mpz_common::ContextError> for KeyExchangeError {
fn from(error: mpz_common::ContextError) -> Self {
Self::new(ErrorKind::Context, error)
}
}
impl From<mpz_garble::MemoryError> for KeyExchangeError {
fn from(error: mpz_garble::MemoryError) -> Self {
Self::new(ErrorKind::Vm, error)
}
}
impl From<mpz_garble::LoadError> for KeyExchangeError {
fn from(error: mpz_garble::LoadError) -> Self {
Self::new(ErrorKind::Vm, error)
}
}
impl From<mpz_garble::ExecutionError> for KeyExchangeError {
fn from(error: mpz_garble::ExecutionError) -> Self {
Self::new(ErrorKind::Vm, error)
}
}
impl From<mpz_garble::DecodeError> for KeyExchangeError {
fn from(error: mpz_garble::DecodeError) -> Self {
Self::new(ErrorKind::Vm, error)
}
}
impl From<mpz_share_conversion::ShareConversionError> for KeyExchangeError {
fn from(error: mpz_share_conversion::ShareConversionError) -> Self {
Self::new(ErrorKind::ShareConversion, error)
fn from(value: mpz_common::ContextError) -> Self {
Self::ctx(value)
}
}
impl From<p256::elliptic_curve::Error> for KeyExchangeError {
fn from(error: p256::elliptic_curve::Error) -> Self {
Self::new(ErrorKind::Key, error)
fn from(value: p256::elliptic_curve::Error) -> Self {
Self::key(value)
}
}
impl From<std::io::Error> for KeyExchangeError {
fn from(error: std::io::Error) -> Self {
Self::new(ErrorKind::Io, error)
fn from(err: std::io::Error) -> Self {
Self(ErrorRepr::Io(err))
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -15,63 +15,64 @@
#![forbid(unsafe_code)]
mod circuit;
mod config;
pub(crate) mod error;
mod exchange;
#[cfg(feature = "mock")]
pub mod mock;
pub(crate) mod point_addition;
pub use config::{
KeyExchangeConfig, KeyExchangeConfigBuilder, KeyExchangeConfigBuilderError, Role,
};
pub use error::KeyExchangeError;
pub use exchange::MpcKeyExchange;
use async_trait::async_trait;
use mpz_garble::value::ValueRef;
use mpz_common::Context;
use mpz_memory_core::{
binary::{Binary, U8},
Array,
};
use mpz_vm_core::Vm;
use p256::PublicKey;
/// Pre-master secret.
#[derive(Debug, Clone)]
pub struct Pms(ValueRef);
pub type Pms = Array<U8, 32>;
impl Pms {
/// Creates a new PMS.
pub fn new(value: ValueRef) -> Self {
Self(value)
}
/// Gets the value of the PMS.
pub fn into_value(self) -> ValueRef {
self.0
}
/// Role in the key exchange protocol.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum Role {
/// Leader.
Leader,
/// Follower.
Follower,
}
/// A trait for the 3-party key exchange protocol.
#[async_trait]
pub trait KeyExchange {
/// Gets the server's public key.
fn server_key(&self) -> Option<PublicKey>;
/// Allocate necessary computational resources.
fn alloc(&mut self, vm: &mut dyn Vm<Binary>) -> Result<Pms, KeyExchangeError>;
/// Sets the server's public key.
async fn set_server_key(&mut self, server_key: PublicKey) -> Result<(), KeyExchangeError>;
fn set_server_key(&mut self, server_key: PublicKey) -> Result<(), KeyExchangeError>;
/// Gets the server's public key.
fn server_key(&self) -> Option<PublicKey>;
/// Computes the client's public key.
///
/// The client's public key in this context is the combined public key (EC
/// point addition) of the leader's public key and the follower's public
/// key.
async fn client_key(&mut self) -> Result<PublicKey, KeyExchangeError>;
fn client_key(&self) -> Result<PublicKey, KeyExchangeError>;
/// Performs any necessary one-time setup, returning a reference to the PMS.
///
/// The PMS will not be assigned until `compute_pms` is called.
async fn setup(&mut self) -> Result<Pms, KeyExchangeError>;
/// Performs one-time setup for the key exchange protocol.
async fn setup(&mut self, ctx: &mut Context) -> Result<(), KeyExchangeError>;
/// Preprocesses the key exchange.
async fn preprocess(&mut self) -> Result<(), KeyExchangeError>;
/// Computes the shares of the PMS.
async fn compute_shares(&mut self, ctx: &mut Context) -> Result<(), KeyExchangeError>;
/// Computes the PMS.
async fn compute_pms(&mut self) -> Result<Pms, KeyExchangeError>;
/// Assigns the PMS shares to the VM.
fn assign(&mut self, vm: &mut dyn Vm<Binary>) -> Result<(), KeyExchangeError>;
/// Finalizes the key exchange protocol.
async fn finalize(&mut self) -> Result<(), KeyExchangeError>;
}

View File

@@ -1,71 +1,51 @@
//! This module provides mock types for key exchange leader and follower and a
//! function to create such a pair.
use crate::{KeyExchangeConfig, MpcKeyExchange, Role};
use mpz_common::executor::{test_st_executor, STExecutor};
use mpz_garble::{Decode, Execute, Memory};
use mpz_share_conversion::ideal::{ideal_share_converter, IdealShareConverter};
use serio::channel::MemoryDuplex;
use crate::{MpcKeyExchange, Role};
use mpz_core::Block;
use mpz_fields::p256::P256;
use mpz_share_conversion::ideal::{
ideal_share_convert, IdealShareConvertReceiver, IdealShareConvertSender,
};
/// A mock key exchange instance.
pub type MockKeyExchange<E> =
MpcKeyExchange<STExecutor<MemoryDuplex>, IdealShareConverter, IdealShareConverter, E>;
pub type MockKeyExchange =
MpcKeyExchange<IdealShareConvertSender<P256>, IdealShareConvertReceiver<P256>>;
/// Creates a mock pair of key exchange leader and follower.
pub fn create_mock_key_exchange_pair<E: Memory + Execute + Decode + Send>(
leader_executor: E,
follower_executor: E,
) -> (MockKeyExchange<E>, MockKeyExchange<E>) {
let (leader_ctx, follower_ctx) = test_st_executor(8);
let (leader_converter_0, follower_converter_0) = ideal_share_converter();
let (leader_converter_1, follower_converter_1) = ideal_share_converter();
pub fn create_mock_key_exchange_pair() -> (MockKeyExchange, MockKeyExchange) {
let (leader_converter_0, follower_converter_0) = ideal_share_convert(Block::ZERO);
let (follower_converter_1, leader_converter_1) = ideal_share_convert(Block::ZERO);
let key_exchange_config_leader = KeyExchangeConfig::builder()
.role(Role::Leader)
.build()
.unwrap();
let leader = MpcKeyExchange::new(Role::Leader, leader_converter_0, leader_converter_1);
let key_exchange_config_follower = KeyExchangeConfig::builder()
.role(Role::Follower)
.build()
.unwrap();
let leader = MpcKeyExchange::new(
key_exchange_config_leader,
leader_ctx,
leader_converter_0,
leader_converter_1,
leader_executor,
);
let follower = MpcKeyExchange::new(
key_exchange_config_follower,
follower_ctx,
follower_converter_0,
follower_converter_1,
follower_executor,
);
let follower = MpcKeyExchange::new(Role::Follower, follower_converter_1, follower_converter_0);
(leader, follower)
}
#[cfg(test)]
mod tests {
use mpz_garble::protocol::deap::mock::create_mock_deap_vm;
use crate::KeyExchange;
use mpz_garble::protocol::semihonest::{Evaluator, Garbler};
use mpz_ot::ideal::cot::{IdealCOTReceiver, IdealCOTSender};
use super::*;
use crate::KeyExchange;
#[test]
fn test_mock_is_ke() {
let (leader_vm, follower_vm) = create_mock_deap_vm();
let (leader, follower) = create_mock_key_exchange_pair(leader_vm, follower_vm);
let (leader, follower) = create_mock_key_exchange_pair();
fn is_key_exchange<T: KeyExchange>(_: T) {}
fn is_key_exchange<T: KeyExchange, V>(_: T) {}
is_key_exchange(leader);
is_key_exchange(follower);
is_key_exchange::<
MpcKeyExchange<IdealShareConvertSender<P256>, IdealShareConvertReceiver<P256>>,
Garbler<IdealCOTSender>,
>(leader);
is_key_exchange::<
MpcKeyExchange<IdealShareConvertSender<P256>, IdealShareConvertReceiver<P256>>,
Evaluator<IdealCOTReceiver>,
>(follower);
}
}

View File

@@ -1,47 +0,0 @@
//! This module contains the message types exchanged between the prover and the TLS verifier.
use std::fmt::{self, Display, Formatter};
use p256::{elliptic_curve::sec1::ToEncodedPoint, PublicKey as P256PublicKey};
use serde::{Deserialize, Serialize};
/// A type for messages exchanged between the prover and the TLS verifier during the key exchange
/// protocol.
#[derive(Debug, Clone, Serialize, Deserialize)]
#[allow(missing_docs)]
pub enum KeyExchangeMessage {
FollowerPublicKey(PublicKey),
ServerPublicKey(PublicKey),
}
/// A wrapper for a serialized public key.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PublicKey {
/// The sec1 serialized public key.
pub key: Vec<u8>,
}
/// An error that can occur during parsing of a public key.
#[derive(Debug, thiserror::Error)]
pub struct KeyParseError(#[from] p256::elliptic_curve::Error);
impl Display for KeyParseError {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
write!(f, "Unable to parse public key: {}", self.0)
}
}
impl From<P256PublicKey> for PublicKey {
fn from(value: P256PublicKey) -> Self {
let key = value.to_encoded_point(false).as_bytes().to_vec();
PublicKey { key }
}
}
impl TryFrom<PublicKey> for P256PublicKey {
type Error = KeyParseError;
fn try_from(value: PublicKey) -> Result<Self, Self::Error> {
P256PublicKey::from_sec1_bytes(&value.key).map_err(Into::into)
}
}

View File

@@ -1,27 +1,28 @@
//! This module implements a secure two-party computation protocol for adding
//! two private EC points and secret-sharing the resulting x coordinate (the
//! shares are field elements of the field underlying the elliptic curve).
//! This protocol has semi-honest security.
//! shares are field elements of the field underlying the elliptic curve). This
//! protocol has semi-honest security.
//!
//! The protocol is described in <https://docs.tlsnotary.org/protocol/notarization/key_exchange.html>
//! The protocol is described in
//! <https://docs.tlsnotary.org/protocol/notarization/key_exchange.html>
use mpz_common::Context;
use crate::{KeyExchangeError, Role};
use mpz_common::{Context, Flush};
use mpz_fields::{p256::P256, Field};
use mpz_share_conversion::{AdditiveToMultiplicative, MultiplicativeToAdditive};
use mpz_share_conversion::{AdditiveToMultiplicative, MultiplicativeToAdditive, ShareConvert};
use p256::EncodedPoint;
use crate::{config::Role, error::ErrorKind, KeyExchangeError};
/// Derives the x-coordinate share of an elliptic curve point.
pub(crate) async fn derive_x_coord_share<Ctx, C>(
pub(crate) async fn derive_x_coord_share<C>(
ctx: &mut Context,
role: Role,
ctx: &mut Ctx,
converter: &mut C,
share: EncodedPoint,
) -> Result<P256, KeyExchangeError>
where
Ctx: Context,
C: AdditiveToMultiplicative<Ctx, P256> + MultiplicativeToAdditive<Ctx, P256>,
C: ShareConvert<P256> + Flush + Send,
<C as AdditiveToMultiplicative<P256>>::Future: Send,
<C as MultiplicativeToAdditive<P256>>::Future: Send,
{
let [x, y] = decompose_point(share)?;
@@ -31,16 +32,40 @@ where
Role::Follower => vec![-y, -x],
};
let [a, b] = converter
.to_multiplicative(ctx, inputs)
.await?
let a2m = converter
.queue_to_multiplicative(&inputs)
.map_err(KeyExchangeError::share_conversion)?;
converter
.flush(ctx)
.await
.map_err(KeyExchangeError::share_conversion)?;
let [a, b] = a2m
.await
.map_err(KeyExchangeError::share_conversion)?
.shares
.try_into()
.expect("output is same length as input");
let c = a * b.inverse();
let c = a * b
.inverse()
.expect("field element should not be zero when inverting");
let c = c * c;
let d = converter.to_additive(ctx, vec![c]).await?[0];
let m2a = converter
.queue_to_additive(&[c])
.map_err(KeyExchangeError::share_conversion)?;
converter
.flush(ctx)
.await
.map_err(KeyExchangeError::share_conversion)?;
let d = m2a
.await
.map_err(KeyExchangeError::share_conversion)?
.shares[0];
let x_r = d + -x;
@@ -50,13 +75,11 @@ where
/// Decomposes the x and y coordinates of a SEC1 encoded point.
fn decompose_point(point: EncodedPoint) -> Result<[P256; 2], KeyExchangeError> {
// Coordinates are stored as big-endian bytes.
let mut x: [u8; 32] = (*point.x().ok_or(KeyExchangeError::new(
ErrorKind::Key,
"key share is an identity point",
))?)
let mut x: [u8; 32] = (*point
.x()
.ok_or(KeyExchangeError::key("key share is an identity point"))?)
.into();
let mut y: [u8; 32] = (*point.y().ok_or(KeyExchangeError::new(
ErrorKind::Key,
let mut y: [u8; 32] = (*point.y().ok_or(KeyExchangeError::key(
"key share is an identity point or compressed",
))?)
.into();
@@ -75,34 +98,34 @@ fn decompose_point(point: EncodedPoint) -> Result<[P256; 2], KeyExchangeError> {
mod tests {
use super::*;
use mpz_common::executor::test_st_executor;
use mpz_common::context::test_st_context;
use mpz_core::Block;
use mpz_fields::{p256::P256, Field};
use mpz_share_conversion::ideal::ideal_share_converter;
use mpz_share_conversion::ideal::ideal_share_convert;
use p256::{
elliptic_curve::sec1::{FromEncodedPoint, ToEncodedPoint},
EncodedPoint, NonZeroScalar, ProjectivePoint, PublicKey,
};
use rand::{Rng, SeedableRng};
use rand_chacha::ChaCha12Rng;
use rand::{rngs::StdRng, Rng, SeedableRng};
#[tokio::test]
async fn test_point_addition() {
let (mut ctx_a, mut ctx_b) = test_st_executor(8);
let mut rng = ChaCha12Rng::from_seed([0u8; 32]);
let (mut ctx_a, mut ctx_b) = test_st_context(8);
let mut rng = StdRng::seed_from_u64(0);
let p1: [u8; 32] = rng.gen();
let p2: [u8; 32] = rng.gen();
let p1: [u8; 32] = rng.random();
let p2: [u8; 32] = rng.random();
let p1 = curve_point_from_be_bytes(p1);
let p2 = curve_point_from_be_bytes(p2);
let p = add_curve_points(&p1, &p2);
let (mut c_a, mut c_b) = ideal_share_converter();
let (mut c_a, mut c_b) = ideal_share_convert(Block::ZERO);
let (a, b) = tokio::try_join!(
derive_x_coord_share(Role::Leader, &mut ctx_a, &mut c_a, p1),
derive_x_coord_share(Role::Follower, &mut ctx_b, &mut c_b, p2)
derive_x_coord_share(&mut ctx_a, Role::Leader, &mut c_a, p1),
derive_x_coord_share(&mut ctx_b, Role::Follower, &mut c_b, p2)
)
.unwrap();
@@ -113,9 +136,9 @@ mod tests {
#[test]
fn test_decompose_point() {
let mut rng = ChaCha12Rng::from_seed([0_u8; 32]);
let mut rng = StdRng::seed_from_u64(0);
let p_expected: [u8; 32] = rng.gen();
let p_expected: [u8; 32] = rng.random();
let p_expected = curve_point_from_be_bytes(p_expected);
let p256: [P256; 2] = decompose_point(p_expected).unwrap();

View File

@@ -1,37 +0,0 @@
[package]
name = "tlsn-stream-cipher"
authors = ["TLSNotary Team"]
description = "2PC stream cipher implementation"
keywords = ["tls", "mpc", "2pc", "stream-cipher"]
categories = ["cryptography"]
license = "MIT OR Apache-2.0"
version = "0.1.0-alpha.7"
edition = "2021"
[features]
default = ["mock"]
rayon = ["mpz-garble/rayon"]
mock = []
[dependencies]
mpz-circuits = { workspace = true }
mpz-garble = { workspace = true }
tlsn-utils = { workspace = true }
aes = { workspace = true }
ctr = { workspace = true }
cipher = { workspace = true }
async-trait = { workspace = true }
thiserror = { workspace = true }
derive_builder = { workspace = true }
tracing = { workspace = true }
opaque-debug = { workspace = true }
[dev-dependencies]
futures = { workspace = true }
tokio = { workspace = true, features = ["macros", "rt", "rt-multi-thread"] }
rstest = { workspace = true, features = ["async-timeout"] }
criterion = { workspace = true, features = ["async_tokio"] }
[[bench]]
name = "mock"
harness = false

View File

@@ -1,132 +0,0 @@
use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion, Throughput};
use mpz_garble::{protocol::deap::mock::create_mock_deap_vm, Memory};
use tlsn_stream_cipher::{
Aes128Ctr, CtrCircuit, MpcStreamCipher, StreamCipher, StreamCipherConfigBuilder,
};
async fn bench_stream_cipher_encrypt(len: usize) {
let (leader_vm, follower_vm) = create_mock_deap_vm();
let leader_key = leader_vm.new_public_input::<[u8; 16]>("key").unwrap();
let leader_iv = leader_vm.new_public_input::<[u8; 4]>("iv").unwrap();
leader_vm.assign(&leader_key, [0u8; 16]).unwrap();
leader_vm.assign(&leader_iv, [0u8; 4]).unwrap();
let follower_key = follower_vm.new_public_input::<[u8; 16]>("key").unwrap();
let follower_iv = follower_vm.new_public_input::<[u8; 4]>("iv").unwrap();
follower_vm.assign(&follower_key, [0u8; 16]).unwrap();
follower_vm.assign(&follower_iv, [0u8; 4]).unwrap();
let leader_config = StreamCipherConfigBuilder::default()
.id("test".to_string())
.build()
.unwrap();
let follower_config = StreamCipherConfigBuilder::default()
.id("test".to_string())
.build()
.unwrap();
let mut leader = MpcStreamCipher::<Aes128Ctr, _>::new(leader_config, leader_vm);
leader.set_key(leader_key, leader_iv);
let mut follower = MpcStreamCipher::<Aes128Ctr, _>::new(follower_config, follower_vm);
follower.set_key(follower_key, follower_iv);
let plaintext = vec![0u8; len];
let explicit_nonce = vec![0u8; 8];
_ = tokio::try_join!(
leader.encrypt_private(explicit_nonce.clone(), plaintext),
follower.encrypt_blind(explicit_nonce, len)
)
.unwrap();
_ = tokio::try_join!(
leader.thread_mut().finalize(),
follower.thread_mut().finalize()
)
.unwrap();
}
async fn bench_stream_cipher_zk(len: usize) {
let (leader_vm, follower_vm) = create_mock_deap_vm();
let key = [0u8; 16];
let iv = [0u8; 4];
let leader_key = leader_vm.new_public_input::<[u8; 16]>("key").unwrap();
let leader_iv = leader_vm.new_public_input::<[u8; 4]>("iv").unwrap();
leader_vm.assign(&leader_key, key).unwrap();
leader_vm.assign(&leader_iv, iv).unwrap();
let follower_key = follower_vm.new_public_input::<[u8; 16]>("key").unwrap();
let follower_iv = follower_vm.new_public_input::<[u8; 4]>("iv").unwrap();
follower_vm.assign(&follower_key, key).unwrap();
follower_vm.assign(&follower_iv, iv).unwrap();
let leader_config = StreamCipherConfigBuilder::default()
.id("test".to_string())
.build()
.unwrap();
let follower_config = StreamCipherConfigBuilder::default()
.id("test".to_string())
.build()
.unwrap();
let mut leader = MpcStreamCipher::<Aes128Ctr, _>::new(leader_config, leader_vm);
leader.set_key(leader_key, leader_iv);
let mut follower = MpcStreamCipher::<Aes128Ctr, _>::new(follower_config, follower_vm);
follower.set_key(follower_key, follower_iv);
futures::try_join!(leader.decode_key_private(), follower.decode_key_blind()).unwrap();
let plaintext = vec![0u8; len];
let explicit_nonce = [0u8; 8];
let ciphertext = Aes128Ctr::apply_keystream(&key, &iv, 2, &explicit_nonce, &plaintext).unwrap();
_ = tokio::try_join!(
leader.prove_plaintext(explicit_nonce.to_vec(), plaintext),
follower.verify_plaintext(explicit_nonce.to_vec(), ciphertext)
)
.unwrap();
_ = tokio::try_join!(
leader.thread_mut().finalize(),
follower.thread_mut().finalize()
)
.unwrap();
}
fn criterion_benchmark(c: &mut Criterion) {
let rt = tokio::runtime::Runtime::new().unwrap();
let len = 1024;
let mut group = c.benchmark_group("stream_cipher/encrypt_private");
group.throughput(Throughput::Bytes(len as u64));
group.bench_function(BenchmarkId::from_parameter(len), |b| {
b.to_async(&rt)
.iter(|| async { bench_stream_cipher_encrypt(len).await })
});
drop(group);
let mut group = c.benchmark_group("stream_cipher/zk");
group.throughput(Throughput::Bytes(len as u64));
group.bench_function(BenchmarkId::from_parameter(len), |b| {
b.to_async(&rt)
.iter(|| async { bench_stream_cipher_zk(len).await })
});
drop(group);
}
criterion_group!(benches, criterion_benchmark);
criterion_main!(benches);

View File

@@ -1,118 +0,0 @@
use std::sync::Arc;
use mpz_circuits::{
types::{StaticValueType, Value},
Circuit,
};
use crate::{circuit::AES_CTR, StreamCipherError};
/// A counter-mode block cipher circuit.
pub trait CtrCircuit: Default + Clone + Send + Sync + 'static {
/// The key type.
type KEY: StaticValueType + TryFrom<Vec<u8>> + Send + Sync + 'static;
/// The block type.
type BLOCK: StaticValueType
+ TryFrom<Vec<u8>>
+ TryFrom<Value>
+ Into<Vec<u8>>
+ Default
+ Send
+ Sync
+ 'static;
/// The IV type.
type IV: StaticValueType
+ TryFrom<Vec<u8>>
+ TryFrom<Value>
+ Into<Vec<u8>>
+ Send
+ Sync
+ 'static;
/// The nonce type.
type NONCE: StaticValueType
+ TryFrom<Vec<u8>>
+ TryFrom<Value>
+ Into<Vec<u8>>
+ Clone
+ Copy
+ Send
+ Sync
+ std::fmt::Debug
+ 'static;
/// The length of the key.
const KEY_LEN: usize;
/// The length of the block.
const BLOCK_LEN: usize;
/// The length of the IV.
const IV_LEN: usize;
/// The length of the nonce.
const NONCE_LEN: usize;
/// Returns the circuit of the cipher.
fn circuit() -> Arc<Circuit>;
/// Applies the keystream to the message.
fn apply_keystream(
key: &[u8],
iv: &[u8],
start_ctr: usize,
explicit_nonce: &[u8],
msg: &[u8],
) -> Result<Vec<u8>, StreamCipherError>;
}
/// A circuit for AES-128 in counter mode.
#[derive(Default, Debug, Clone)]
pub struct Aes128Ctr;
impl CtrCircuit for Aes128Ctr {
type KEY = [u8; 16];
type BLOCK = [u8; 16];
type IV = [u8; 4];
type NONCE = [u8; 8];
const KEY_LEN: usize = 16;
const BLOCK_LEN: usize = 16;
const IV_LEN: usize = 4;
const NONCE_LEN: usize = 8;
fn circuit() -> Arc<Circuit> {
AES_CTR.clone()
}
fn apply_keystream(
key: &[u8],
iv: &[u8],
start_ctr: usize,
explicit_nonce: &[u8],
msg: &[u8],
) -> Result<Vec<u8>, StreamCipherError> {
use ::cipher::{KeyIvInit, StreamCipher, StreamCipherSeek};
use aes::Aes128;
use ctr::Ctr32BE;
let key: &[u8; 16] = key
.try_into()
.map_err(|_| StreamCipherError::key_len::<Self>(key.len()))?;
let iv: &[u8; 4] = iv
.try_into()
.map_err(|_| StreamCipherError::iv_len::<Self>(iv.len()))?;
let explicit_nonce: &[u8; 8] = explicit_nonce
.try_into()
.map_err(|_| StreamCipherError::explicit_nonce_len::<Self>(explicit_nonce.len()))?;
let mut full_iv = [0u8; 16];
full_iv[0..4].copy_from_slice(iv);
full_iv[4..12].copy_from_slice(explicit_nonce);
let mut cipher = Ctr32BE::<Aes128>::new(key.into(), &full_iv.into());
let mut buf = msg.to_vec();
cipher
.try_seek(start_ctr * Self::BLOCK_LEN)
.expect("start counter is less than keystream length");
cipher.apply_keystream(&mut buf);
Ok(buf)
}
}

Some files were not shown because too many files have changed in this diff Show More