mirror of
https://github.com/tlsnotary/tlsn.git
synced 2026-01-10 05:48:06 -05:00
Compare commits
192 Commits
v0.1.0-alp
...
blake3_exa
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5e6c5c13aa | ||
|
|
4ab73bdfb5 | ||
|
|
a8fa57f2cb | ||
|
|
d25fb320d4 | ||
|
|
0539268da7 | ||
|
|
427b2896b5 | ||
|
|
89d1e594d1 | ||
|
|
b4380f021e | ||
|
|
8a823d18ec | ||
|
|
7bcfc56bd8 | ||
|
|
2909d5ebaa | ||
|
|
7918494ccc | ||
|
|
92dd47b376 | ||
|
|
5474a748ce | ||
|
|
92da5adc24 | ||
|
|
e0ce1ad31a | ||
|
|
3b76877920 | ||
|
|
783355772a | ||
|
|
e5c59da90b | ||
|
|
f059c53c2d | ||
|
|
a1367b5428 | ||
|
|
9d8124ac9d | ||
|
|
5034366c72 | ||
|
|
afd8f44261 | ||
|
|
21086d2883 | ||
|
|
cca9a318a4 | ||
|
|
cb804a6025 | ||
|
|
9f849e7c18 | ||
|
|
389bceddef | ||
|
|
657838671a | ||
|
|
2f072b2578 | ||
|
|
33153d1124 | ||
|
|
2d399d5e24 | ||
|
|
b6d7249b6d | ||
|
|
2a8c1c3382 | ||
|
|
7c27162875 | ||
|
|
eef813712d | ||
|
|
2e94e08fa6 | ||
|
|
97d9475335 | ||
|
|
38820d6a3f | ||
|
|
af85fa100f | ||
|
|
008b901913 | ||
|
|
db85f68328 | ||
|
|
fb80aa4cc9 | ||
|
|
8dae57d6a7 | ||
|
|
f2ff4ba792 | ||
|
|
9bf3371873 | ||
|
|
9d853eb496 | ||
|
|
6923ceefd3 | ||
|
|
5239c2328a | ||
|
|
6a7c5384a9 | ||
|
|
7e469006c0 | ||
|
|
55091b5e94 | ||
|
|
bc1eba18c9 | ||
|
|
c128ab16ce | ||
|
|
a87125ff88 | ||
|
|
0933d711d2 | ||
|
|
79c230f2fa | ||
|
|
345d5d45ad | ||
|
|
55a26aad77 | ||
|
|
1132d441e1 | ||
|
|
fa2fdfd601 | ||
|
|
24e10d664f | ||
|
|
c0e084c1ca | ||
|
|
b6845dfc5c | ||
|
|
31def9ea81 | ||
|
|
878fe7e87d | ||
|
|
3348ac34b6 | ||
|
|
82767ca2d5 | ||
|
|
c9aaf2e0fa | ||
|
|
241ed3b5a3 | ||
|
|
56f088db7d | ||
|
|
f5250479bd | ||
|
|
0e2eabb833 | ||
|
|
ad530ca500 | ||
|
|
8b1cac6fe0 | ||
|
|
555f65e6b2 | ||
|
|
046485188c | ||
|
|
db53814ee7 | ||
|
|
d924bd6deb | ||
|
|
b3558bef9c | ||
|
|
33c4b9d16f | ||
|
|
edc2a1783d | ||
|
|
c2a6546deb | ||
|
|
2dfa386415 | ||
|
|
5a188e75c7 | ||
|
|
a8bf1026ca | ||
|
|
f900fc51cd | ||
|
|
6ccf102ec8 | ||
|
|
2c500b13bd | ||
|
|
2da0c242cb | ||
|
|
798c22409a | ||
|
|
3b5ac20d5b | ||
|
|
a063f8cc14 | ||
|
|
6f6b24e76c | ||
|
|
a28718923b | ||
|
|
19447aabe5 | ||
|
|
8afb7a4c11 | ||
|
|
43c6877ec0 | ||
|
|
39e14949a0 | ||
|
|
31f62982b5 | ||
|
|
6623734ca0 | ||
|
|
41e215f912 | ||
|
|
9e0f79125b | ||
|
|
7bdd3a724b | ||
|
|
baa486ccfd | ||
|
|
de7a47de5b | ||
|
|
3a57134b3a | ||
|
|
86fed1a90c | ||
|
|
82964c273b | ||
|
|
81aaa338e6 | ||
|
|
f331a7a3c5 | ||
|
|
adb407d03b | ||
|
|
3e54119867 | ||
|
|
71aa90de88 | ||
|
|
93535ca955 | ||
|
|
a34dd57926 | ||
|
|
92d7b59ee8 | ||
|
|
c8e9cb370e | ||
|
|
4dc5570a31 | ||
|
|
198e24c5e4 | ||
|
|
f16d7238e5 | ||
|
|
9253adaaa4 | ||
|
|
8c889ac498 | ||
|
|
f0e2200d22 | ||
|
|
224e41a186 | ||
|
|
328c2af162 | ||
|
|
cdb80e1458 | ||
|
|
eeccbef909 | ||
|
|
190b7b0bf6 | ||
|
|
c70caa5ed9 | ||
|
|
20137b8c6c | ||
|
|
4cdd1395e8 | ||
|
|
c1b3d64d5d | ||
|
|
61ce838f8c | ||
|
|
efca281222 | ||
|
|
b24041b9f5 | ||
|
|
9649d6e4cf | ||
|
|
bc69683ecf | ||
|
|
6c468a91cf | ||
|
|
dcff0b9152 | ||
|
|
5f91926154 | ||
|
|
0496cbaeb1 | ||
|
|
d8747d49e3 | ||
|
|
6fe328581c | ||
|
|
6d1140355b | ||
|
|
5246beabf5 | ||
|
|
29efc35d14 | ||
|
|
32d25e5c69 | ||
|
|
ca9d364fc9 | ||
|
|
5cbafe17f5 | ||
|
|
acabb7761b | ||
|
|
c384a393bf | ||
|
|
be0be19018 | ||
|
|
63bd6abc5d | ||
|
|
cb13169b82 | ||
|
|
25d65734c0 | ||
|
|
119ae4b2a8 | ||
|
|
f59153b0a0 | ||
|
|
bffe9ebb0b | ||
|
|
65299d7def | ||
|
|
c03418a642 | ||
|
|
7bec5a84ee | ||
|
|
85e0f5b467 | ||
|
|
cacca108ed | ||
|
|
c9592f44a1 | ||
|
|
e6be5e1cc9 | ||
|
|
d974fb71d5 | ||
|
|
c0c1c0caa1 | ||
|
|
7d88d1c20b | ||
|
|
c10c9155a7 | ||
|
|
faab999339 | ||
|
|
e6bc93c1f1 | ||
|
|
c6dc262a5e | ||
|
|
db90e28e44 | ||
|
|
30e4e37c0d | ||
|
|
6344410cad | ||
|
|
1d663596c1 | ||
|
|
2c045e5de7 | ||
|
|
38104bca1a | ||
|
|
99ba47c25d | ||
|
|
2042089132 | ||
|
|
504967d09a | ||
|
|
6e80d03ac7 | ||
|
|
b3f79a9e2b | ||
|
|
99e02fb388 | ||
|
|
6b845fd473 | ||
|
|
66db5344ac | ||
|
|
1d4c50f804 | ||
|
|
61ff3a8255 | ||
|
|
2ac9de1edd | ||
|
|
a7a8a83410 |
2
.dockerignore
Normal file
2
.dockerignore
Normal file
@@ -0,0 +1,2 @@
|
||||
/target
|
||||
/.git
|
||||
3
.github/codecov.yml
vendored
Normal file
3
.github/codecov.yml
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
github_checks:
|
||||
annotations: false
|
||||
comment: false
|
||||
12
.github/scripts/build-server.sh
vendored
12
.github/scripts/build-server.sh
vendored
@@ -1,12 +0,0 @@
|
||||
#!/bin/bash
|
||||
# https://github.com/tlsnotary/tlsn/pull/419
|
||||
set -ex
|
||||
|
||||
environment=$1
|
||||
|
||||
aws s3 sync .git s3://tlsn-deploy/$environment/.git --delete
|
||||
|
||||
cargo build -p notary-server --release
|
||||
aws s3 cp ./target/release/notary-server s3://tlsn-deploy/$environment/
|
||||
|
||||
exit 0
|
||||
27
.github/scripts/deploy-server.sh
vendored
27
.github/scripts/deploy-server.sh
vendored
@@ -1,27 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -ex
|
||||
|
||||
environment=$1
|
||||
branch=$2
|
||||
|
||||
INSTANCE_ID=$(aws ec2 describe-instances --filters Name=tag:Name,Values=[tlsnotary-backend-v1] Name=instance-state-name,Values=[running] --query "Reservations[*].Instances[*][InstanceId]" --output text)
|
||||
aws ec2 create-tags --resources $INSTANCE_ID --tags "Key=$environment,Value=$branch"
|
||||
|
||||
COMMIT_HASH=$(git rev-parse HEAD)
|
||||
DEPLOY_ID=$(aws deploy create-deployment --application-name tlsn-$environment-v1 --deployment-group-name tlsn-$environment-v1-group --github-location repository=$GITHUB_REPOSITORY,commitId=$COMMIT_HASH --ignore-application-stop-failures --file-exists OVERWRITE --output text)
|
||||
|
||||
while true; do
|
||||
STATUS=$(aws deploy get-deployment --deployment-id $DEPLOY_ID --query 'deploymentInfo.status' --output text)
|
||||
if [ $STATUS != "InProgress" ] && [ $STATUS != "Created" ]; then
|
||||
if [ $STATUS = "Succeeded" ]; then
|
||||
echo "SUCCESS"
|
||||
exit 0
|
||||
else
|
||||
echo "Failed"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
echo "Deploying..."
|
||||
fi
|
||||
sleep 30
|
||||
done
|
||||
33
.github/scripts/modify-proxy.sh
vendored
33
.github/scripts/modify-proxy.sh
vendored
@@ -1,33 +0,0 @@
|
||||
#!/bin/bash
|
||||
# This script is triggered by Deploy server workflow in order to send an execution command of cd-scripts/modify_proxy.sh via AWS SSM to the proxy server
|
||||
|
||||
set -e
|
||||
|
||||
GH_OWNER="tlsnotary"
|
||||
GH_REPO="tlsn"
|
||||
BACKEND_INSTANCE_ID=$(aws ec2 describe-instances --filters Name=tag:Name,Values=[tlsnotary-backend-v1] Name=instance-state-name,Values=[running] --query "Reservations[*].Instances[*][InstanceId]" --output text)
|
||||
PROXY_INSTANCE_ID=$(aws ec2 describe-instances --filters Name=tag:Name,Values=[tlsnotary-web] Name=instance-state-name,Values=[running] --query "Reservations[*].Instances[*][InstanceId]" --output text)
|
||||
TAGS=$(aws ec2 describe-instances --instance-ids $BACKEND_INSTANCE_ID --query 'Reservations[*].Instances[*].Tags')
|
||||
|
||||
TAG=$(echo $TAGS | jq -r '.[][][] | select(.Key == "stable").Value')
|
||||
PORT=$(echo $TAGS | jq -r '.[][][] | select(.Key == "port").Value')
|
||||
|
||||
COMMAND_ID=$(aws ssm send-command --document-name "AWS-RunRemoteScript" --instance-ids $PROXY_INSTANCE_ID --parameters '{"sourceType":["GitHub"],"sourceInfo":["{\"owner\":\"'${GH_OWNER}'\", \"repository\":\"'${GH_REPO}'\", \"getOptions\":\"branch:'${TAG}'\", \"path\": \"cd-scripts\"}"],"commandLine":["modify_proxy.sh '${PORT}' '${TAG}' "]}' --output text --query "Command.CommandId")
|
||||
|
||||
while true; do
|
||||
SSM_STATUS=$(aws ssm list-command-invocations --command-id $COMMAND_ID --details --query "CommandInvocations[].Status" --output text)
|
||||
|
||||
if [ $SSM_STATUS != "Success" ] && [ $SSM_STATUS != "InProgress" ]; then
|
||||
echo "Proxy modification failed"
|
||||
aws ssm list-command-invocations --command-id $COMMAND_ID --details --query "CommandInvocations[].CommandPlugins[].{Status:Status,Output:Output}"
|
||||
exit 1
|
||||
elif [ $SSM_STATUS = "Success" ]; then
|
||||
aws ssm list-command-invocations --command-id $COMMAND_ID --details --query "CommandInvocations[].CommandPlugins[].{Status:Status,Output:Output}"
|
||||
echo "Success"
|
||||
break
|
||||
fi
|
||||
|
||||
sleep 2
|
||||
done
|
||||
|
||||
exit 0
|
||||
26
.github/workflows/bench.yml
vendored
26
.github/workflows/bench.yml
vendored
@@ -1,7 +1,16 @@
|
||||
name: Run Benchmarks
|
||||
name: Run Benchmarks (Native or Browser)
|
||||
on:
|
||||
# manual trigger
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
bench_type:
|
||||
description: "Specify the benchmark type (native or browser)"
|
||||
required: true
|
||||
default: "native"
|
||||
type: choice
|
||||
options:
|
||||
- native
|
||||
- browser
|
||||
|
||||
jobs:
|
||||
run-benchmarks:
|
||||
@@ -12,16 +21,21 @@ jobs:
|
||||
|
||||
- name: Build Docker Image
|
||||
run: |
|
||||
docker build -t tlsn-bench . -f ./crates/benches/benches.Dockerfile
|
||||
docker build -t tlsn-bench . -f ./crates/harness/harness.Dockerfile
|
||||
|
||||
- name: Run Benchmarks
|
||||
run: |
|
||||
docker run --privileged -v ${{ github.workspace }}/crates/benches/:/benches tlsn-bench
|
||||
docker run --privileged -v ./crates/harness/:/benches tlsn-bench bash -c "runner setup; runner --target ${{ github.event.inputs.bench_type }} bench"
|
||||
|
||||
- name: Upload runtime_vs_latency.html
|
||||
- name: Plot Benchmarks
|
||||
run: |
|
||||
docker run -v ./crates/harness/:/benches tlsn-bench bash -c "tlsn-harness-plot /benches/bench.toml /benches/metrics.csv --min-max-band --prover-kind ${{ github.event.inputs.bench_type }}"
|
||||
- name: Upload graphs
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: benchmark_graphs
|
||||
path: |
|
||||
./crates/benches/runtime_vs_latency.html
|
||||
./crates/benches/runtime_vs_bandwidth.html
|
||||
./crates/harness/metrics.csv
|
||||
./crates/harness/bench.toml
|
||||
./crates/harness/runtime_vs_latency.html
|
||||
./crates/harness/runtime_vs_bandwidth.html
|
||||
86
.github/workflows/cd-server.yml
vendored
86
.github/workflows/cd-server.yml
vendored
@@ -1,86 +0,0 @@
|
||||
name: Deploy server
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- dev
|
||||
tags:
|
||||
- "[v]?[0-9]+.[0-9]+.[0-9]+*"
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
environment:
|
||||
description: "Environment"
|
||||
required: true
|
||||
default: "nightly"
|
||||
type: choice
|
||||
options:
|
||||
- nightly
|
||||
- stable
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}
|
||||
cancel-in-progress: false
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
DATA_ENV: ${{ github.event.inputs.environment || 'nightly' }}
|
||||
permissions:
|
||||
id-token: write
|
||||
contents: read
|
||||
|
||||
steps:
|
||||
- name: Manipulate Environment
|
||||
id: manipulate
|
||||
run: |
|
||||
if [ "${{ github.event_name }}" = "push" ] && [ "$GITHUB_REF_NAME" = "dev" ]; then
|
||||
echo "env=nightly" >> $GITHUB_OUTPUT
|
||||
elif [ "${{ github.event_name }}" = "push" ] && [[ "${{ github.ref }}" = "refs/tags/"* ]]; then
|
||||
echo "env=stable" >> $GITHUB_OUTPUT
|
||||
elif [ "${{ github.event_name }}" = "workflow_dispatch" ]; then
|
||||
echo "env=${{ env.DATA_ENV }}" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "Operation not permitted"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Wait for integration test workflow to succeed
|
||||
if: github.event_name == 'push'
|
||||
uses: lewagon/wait-on-check-action@v1.3.1
|
||||
with:
|
||||
ref: ${{ github.ref }}
|
||||
# More details [here](https://github.com/lewagon/wait-on-check-action#check-name)
|
||||
check-name: 'Run tests release build'
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
# How frequent (in seconds) this job will call GitHub API to check the status of the job specified at 'check-name'
|
||||
wait-interval: 60
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Configure AWS Credentials
|
||||
uses: aws-actions/configure-aws-credentials@v4
|
||||
with:
|
||||
role-to-assume: arn:aws:iam::490752553772:role/tlsn-deploy-slc
|
||||
role-duration-seconds: 1800
|
||||
aws-region: eu-central-1
|
||||
|
||||
- name: Install stable rust toolchain
|
||||
uses: dtolnay/rust-toolchain@stable
|
||||
with:
|
||||
toolchain: stable
|
||||
|
||||
- name: Cargo build
|
||||
run: |
|
||||
.github/scripts/build-server.sh ${{ steps.manipulate.outputs.env }}
|
||||
|
||||
- name: Trigger Deployment
|
||||
run: |
|
||||
.github/scripts/deploy-server.sh ${{ steps.manipulate.outputs.env }} $GITHUB_REF_NAME
|
||||
|
||||
- name: Modify Proxy
|
||||
if: ${{ steps.manipulate.outputs.env == 'stable' }}
|
||||
run: |
|
||||
.github/scripts/modify-proxy.sh
|
||||
52
.github/workflows/cd.yml
vendored
52
.github/workflows/cd.yml
vendored
@@ -1,52 +0,0 @@
|
||||
name: cd
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- "[v]?[0-9]+.[0-9]+.[0-9]+*"
|
||||
|
||||
env:
|
||||
CONTAINER_REGISTRY: ghcr.io
|
||||
|
||||
jobs:
|
||||
build_and_publish_notary_server_image:
|
||||
name: Build and publish notary server's image
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
steps:
|
||||
- name: Wait for integration test workflow to succeed
|
||||
uses: lewagon/wait-on-check-action@v1.3.1
|
||||
with:
|
||||
ref: ${{ github.ref }}
|
||||
# More details [here](https://github.com/lewagon/wait-on-check-action#check-name)
|
||||
check-name: 'Run tests release build'
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
# How frequent (in seconds) this job will call GitHub API to check the status of the job specified at 'check-name'
|
||||
wait-interval: 60
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Log in to the Container registry
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ${{ env.CONTAINER_REGISTRY }}
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Extract metadata (tags, labels) for Docker image of notary server
|
||||
id: meta-notary-server
|
||||
uses: docker/metadata-action@v4
|
||||
with:
|
||||
images: ${{ env.CONTAINER_REGISTRY }}/${{ github.repository }}/notary-server
|
||||
|
||||
- name: Build and push Docker image of notary server
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
tags: ${{ steps.meta-notary-server.outputs.tags }}
|
||||
labels: ${{ steps.meta-notary-server.outputs.labels }}
|
||||
file: ./crates/notary/server/notary-server.Dockerfile
|
||||
136
.github/workflows/ci.yml
vendored
136
.github/workflows/ci.yml
vendored
@@ -7,8 +7,10 @@ on:
|
||||
tags:
|
||||
- "[v]?[0-9]+.[0-9]+.[0-9]+*"
|
||||
pull_request:
|
||||
branches:
|
||||
- dev
|
||||
|
||||
permissions:
|
||||
id-token: write
|
||||
contents: read
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
@@ -16,11 +18,31 @@ env:
|
||||
# We need a higher number of parallel rayon tasks than the default (which is 4)
|
||||
# in order to prevent a deadlock, c.f.
|
||||
# - https://github.com/tlsnotary/tlsn/issues/548
|
||||
# - https://github.com/privacy-scaling-explorations/mpz/issues/178
|
||||
# - https://github.com/privacy-ethereum/mpz/issues/178
|
||||
# 32 seems to be big enough for the foreseeable future
|
||||
RAYON_NUM_THREADS: 32
|
||||
RUST_VERSION: 1.90.0
|
||||
|
||||
jobs:
|
||||
clippy:
|
||||
name: Clippy
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Install rust toolchain
|
||||
uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: ${{ env.RUST_VERSION }}
|
||||
components: clippy
|
||||
|
||||
- name: Use caching
|
||||
uses: Swatinem/rust-cache@v2.7.7
|
||||
|
||||
- name: Clippy
|
||||
run: cargo clippy --keep-going --all-features --all-targets --locked -- -D warnings
|
||||
|
||||
fmt:
|
||||
name: Check formatting
|
||||
runs-on: ubuntu-latest
|
||||
@@ -36,10 +58,11 @@ jobs:
|
||||
components: rustfmt
|
||||
|
||||
- name: Use caching
|
||||
uses: Swatinem/rust-cache@v2.7.3
|
||||
uses: Swatinem/rust-cache@v2.7.7
|
||||
|
||||
- name: Check formatting
|
||||
run: cargo +nightly fmt --check --all
|
||||
|
||||
build-and-test:
|
||||
name: Build and test
|
||||
runs-on: ubuntu-latest
|
||||
@@ -47,35 +70,32 @@ jobs:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Install stable rust toolchain
|
||||
- name: Install rust toolchain
|
||||
uses: dtolnay/rust-toolchain@stable
|
||||
with:
|
||||
toolchain: stable
|
||||
components: clippy
|
||||
toolchain: ${{ env.RUST_VERSION }}
|
||||
|
||||
- name: Use caching
|
||||
uses: Swatinem/rust-cache@v2.7.3
|
||||
|
||||
- name: Clippy
|
||||
run: cargo clippy --all-features --all-targets -- -D warnings
|
||||
uses: Swatinem/rust-cache@v2.7.7
|
||||
|
||||
- name: Build
|
||||
run: cargo build --all-targets
|
||||
run: cargo build --all-targets --locked
|
||||
|
||||
- name: Test
|
||||
run: cargo test
|
||||
build-wasm:
|
||||
name: Build and test wasm
|
||||
run: cargo test --no-fail-fast --locked
|
||||
|
||||
wasm:
|
||||
name: Build and Test wasm
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Install stable rust toolchain
|
||||
- name: Install rust toolchain
|
||||
uses: dtolnay/rust-toolchain@stable
|
||||
with:
|
||||
targets: wasm32-unknown-unknown
|
||||
toolchain: stable
|
||||
toolchain: ${{ env.RUST_VERSION }}
|
||||
|
||||
- name: Install nightly rust toolchain
|
||||
uses: dtolnay/rust-toolchain@stable
|
||||
@@ -90,15 +110,38 @@ jobs:
|
||||
sudo apt-get install -y chromium-chromedriver
|
||||
|
||||
- name: Install wasm-pack
|
||||
run: curl https://rustwasm.github.io/wasm-pack/installer/init.sh -sSf | sh
|
||||
# we install a specific version which supports custom profiles
|
||||
run: cargo install --git https://github.com/rustwasm/wasm-pack.git --rev 32e52ca
|
||||
|
||||
- name: Use caching
|
||||
uses: Swatinem/rust-cache@v2.7.3
|
||||
uses: Swatinem/rust-cache@v2.7.7
|
||||
|
||||
- name: Build harness
|
||||
working-directory: crates/harness
|
||||
run: ./build.sh
|
||||
|
||||
- name: Run tests
|
||||
working-directory: crates/harness
|
||||
run: |
|
||||
cd crates/wasm-test-runner
|
||||
./run.sh
|
||||
./bin/runner setup
|
||||
./bin/runner --target browser test
|
||||
|
||||
- name: Run build
|
||||
working-directory: crates/wasm
|
||||
run: ./build.sh
|
||||
|
||||
- name: Dry Run NPM Publish
|
||||
working-directory: crates/wasm/pkg
|
||||
run: npm publish --dry-run
|
||||
|
||||
- name: Save tlsn-wasm package for tagged builds
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ github.ref_name }}-tlsn-wasm-pkg
|
||||
path: ./crates/wasm/pkg
|
||||
if-no-files-found: error
|
||||
|
||||
tests-integration:
|
||||
name: Run tests release build
|
||||
runs-on: ubuntu-latest
|
||||
@@ -106,16 +149,53 @@ jobs:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Install stable rust toolchain
|
||||
- name: Install rust toolchain
|
||||
uses: dtolnay/rust-toolchain@stable
|
||||
with:
|
||||
toolchain: stable
|
||||
toolchain: ${{ env.RUST_VERSION }}
|
||||
|
||||
- name: Use caching
|
||||
uses: Swatinem/rust-cache@v2.7.3
|
||||
|
||||
- name: Add custom DNS entry to /etc/hosts for notary TLS test
|
||||
run: echo "127.0.0.1 tlsnotaryserver.io" | sudo tee -a /etc/hosts
|
||||
uses: Swatinem/rust-cache@v2.7.7
|
||||
|
||||
- name: Run integration tests
|
||||
run: cargo test --profile tests-integration --workspace --exclude tlsn-tls-client --exclude tlsn-tls-core -- --include-ignored
|
||||
run: cargo test --locked --profile tests-integration --workspace --exclude tlsn-tls-client --exclude tlsn-tls-core --no-fail-fast -- --include-ignored
|
||||
|
||||
coverage:
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Install rust toolchain
|
||||
uses: dtolnay/rust-toolchain@stable
|
||||
with:
|
||||
toolchain: ${{ env.RUST_VERSION }}
|
||||
- name: Install cargo-llvm-cov
|
||||
uses: taiki-e/install-action@cargo-llvm-cov
|
||||
- name: Generate code coverage
|
||||
run: cargo llvm-cov --all-features --workspace --locked --lcov --output-path lcov.info
|
||||
- name: Upload coverage to Codecov
|
||||
uses: codecov/codecov-action@v4
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
files: lcov.info
|
||||
fail_ci_if_error: true
|
||||
|
||||
create-release-draft:
|
||||
name: Create Release Draft
|
||||
needs: build-and-test
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
if: startsWith(github.ref, 'refs/tags/v') && contains(github.ref, '.')
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Create GitHub Release Draft
|
||||
uses: softprops/action-gh-release@v2
|
||||
with:
|
||||
draft: true
|
||||
tag_name: ${{ github.ref_name }}
|
||||
prerelease: true
|
||||
generate_release_notes: true
|
||||
|
||||
62
.github/workflows/releng.yml
vendored
Normal file
62
.github/workflows/releng.yml
vendored
Normal file
@@ -0,0 +1,62 @@
|
||||
name: Publish tlsn-wasm to NPM
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
tag:
|
||||
description: 'Tag to publish to NPM'
|
||||
required: true
|
||||
default: 'v0.1.0-alpha.13-pre'
|
||||
|
||||
jobs:
|
||||
release:
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
|
||||
steps:
|
||||
- name: Find and download tlsn-wasm build from the tagged ci workflow
|
||||
id: find_run
|
||||
run: |
|
||||
# Find the workflow run ID for the tag
|
||||
RUN_ID=$(gh api \
|
||||
-H "Accept: application/vnd.github+json" \
|
||||
"/repos/tlsnotary/tlsn/actions/workflows/ci.yml/runs?per_page=100" \
|
||||
--jq '.workflow_runs[] | select(.head_branch == "${{ github.event.inputs.tag }}") | .id' | sort | tail -1)
|
||||
|
||||
if [ -z "$RUN_ID" ]; then
|
||||
echo "No run found for tag ${{ github.event.inputs.tag }}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Found run: $RUN_ID"
|
||||
echo "run_id=$RUN_ID" >> "$GITHUB_OUTPUT"
|
||||
|
||||
# Find the download URL for the build artifact
|
||||
DOWNLOAD_URL=$(gh api \
|
||||
-H "Accept: application/vnd.github+json" \
|
||||
/repos/tlsnotary/tlsn/actions/runs/${RUN_ID}/artifacts \
|
||||
--jq '.artifacts[] | select(.name == "${{ github.event.inputs.tag }}-tlsn-wasm-pkg") | .archive_download_url')
|
||||
|
||||
if [ -z "$DOWNLOAD_URL" ]; then
|
||||
echo "No download url for build artifact ${{ github.event.inputs.tag }}-tlsn-wasm-pkg in run $RUN_ID"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Download and unzip the build artifact
|
||||
mkdir tlsn-wasm-pkg
|
||||
curl -L -H "Authorization: Bearer ${GH_TOKEN}" \
|
||||
-H "Accept: application/vnd.github+json" \
|
||||
-o tlsn-wasm-pkg.zip \
|
||||
${DOWNLOAD_URL}
|
||||
unzip -q tlsn-wasm-pkg.zip -d tlsn-wasm-pkg
|
||||
|
||||
|
||||
- name: NPM Publish for tlsn-wasm
|
||||
env:
|
||||
NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}
|
||||
run: |
|
||||
cd tlsn-wasm-pkg
|
||||
echo "//registry.npmjs.org/:_authToken=${NODE_AUTH_TOKEN}" > .npmrc
|
||||
npm publish
|
||||
rm .npmrc
|
||||
11
.github/workflows/rustdoc.yml
vendored
11
.github/workflows/rustdoc.yml
vendored
@@ -4,7 +4,6 @@ on:
|
||||
push:
|
||||
branches: [dev]
|
||||
pull_request:
|
||||
branches: [dev]
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
@@ -22,18 +21,12 @@ jobs:
|
||||
toolchain: stable
|
||||
|
||||
- name: "rustdoc"
|
||||
run: cargo doc -p tlsn-core -p tlsn-prover -p tlsn-verifier --no-deps --all-features
|
||||
# --target-dir ${GITHUB_WORKSPACE}/docs
|
||||
|
||||
# https://dev.to/deciduously/prepare-your-rust-api-docs-for-github-pages-2n5i
|
||||
- name: "Add index file -> tlsn_prover"
|
||||
run: |
|
||||
echo "<meta http-equiv=\"refresh\" content=\"0; url=tlsn_prover\">" > target/doc/index.html
|
||||
run: crates/wasm/build-docs.sh
|
||||
|
||||
- name: Deploy
|
||||
uses: peaceiris/actions-gh-pages@v3
|
||||
if: ${{ github.ref == 'refs/heads/dev' }}
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
publish_dir: target/doc/
|
||||
publish_dir: target/wasm32-unknown-unknown/doc/
|
||||
# cname: rustdocs.tlsnotary.org
|
||||
|
||||
24
.github/workflows/updatemain.yml
vendored
Normal file
24
.github/workflows/updatemain.yml
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
name: Fast-forward main branch to published release tag
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [published]
|
||||
|
||||
jobs:
|
||||
ff-main-to-release:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
steps:
|
||||
- name: Checkout main
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: main
|
||||
|
||||
- name: Fast-forward main to release tag
|
||||
run: |
|
||||
tag="${{ github.event.release.tag_name }}"
|
||||
git fetch origin "refs/tags/$tag:refs/tags/$tag"
|
||||
git merge --ff-only "refs/tags/$tag"
|
||||
git push origin main
|
||||
6
.gitignore
vendored
6
.gitignore
vendored
@@ -3,10 +3,6 @@
|
||||
debug/
|
||||
target/
|
||||
|
||||
# Remove Cargo.lock from gitignore if creating an executable, leave it for libraries
|
||||
# More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html
|
||||
Cargo.lock
|
||||
|
||||
# These are backup files generated by rustfmt
|
||||
**/*.rs.bk
|
||||
|
||||
@@ -32,4 +28,4 @@ Cargo.lock
|
||||
*.log
|
||||
|
||||
# metrics
|
||||
*.csv
|
||||
*.csv
|
||||
|
||||
@@ -16,6 +16,8 @@ keywords.
|
||||
|
||||
Try to do one pull request per change.
|
||||
|
||||
**Disclaimer**: While we appreciate all contributions, we do not prioritize minor grammatical fixes (e.g., correcting typos, rewording sentences) unless they significantly improve clarity in technical documentation. These contributions can be a distraction for the team. If you notice a grammatical error, please let us know on our Discord.
|
||||
|
||||
## Linting
|
||||
|
||||
Before a Pull Request (PR) can be merged, the Continuous Integration (CI) pipeline automatically lints all code using [Clippy](https://doc.rust-lang.org/stable/clippy/usage.html). To ensure your code is free of linting issues before creating a PR, run the following command:
|
||||
@@ -59,3 +61,21 @@ Comments for function arguments must adhere to this pattern:
|
||||
/// * `arg2` - The second argument.
|
||||
pub fn compute(...
|
||||
```
|
||||
|
||||
## Cargo.lock
|
||||
|
||||
We check in `Cargo.lock` to ensure reproducible builds. It must be updated whenever `Cargo.toml` changes. The TLSNotary team typically updates `Cargo.lock` in a separate commit after dependency changes.
|
||||
|
||||
If you want to hide `Cargo.lock` changes from your local `git diff`, run:
|
||||
|
||||
```sh
|
||||
git update-index --assume-unchanged Cargo.lock
|
||||
```
|
||||
|
||||
To start tracking changes again:
|
||||
```sh
|
||||
git update-index --no-assume-unchanged Cargo.lock
|
||||
```
|
||||
|
||||
> ⚠️ Note: This only affects your local view. The file is still tracked in the repository and will be checked and used in CI.
|
||||
|
||||
|
||||
9076
Cargo.lock
generated
Normal file
9076
Cargo.lock
generated
Normal file
File diff suppressed because it is too large
Load Diff
134
Cargo.toml
134
Cargo.toml
@@ -1,93 +1,107 @@
|
||||
[workspace]
|
||||
members = [
|
||||
"crates/benches",
|
||||
"crates/common",
|
||||
"crates/components/aead",
|
||||
"crates/components/block-cipher",
|
||||
"crates/attestation",
|
||||
"crates/components/deap",
|
||||
"crates/components/cipher",
|
||||
"crates/components/hmac-sha256",
|
||||
"crates/components/hmac-sha256-circuits",
|
||||
"crates/components/key-exchange",
|
||||
"crates/components/stream-cipher",
|
||||
"crates/components/universal-hash",
|
||||
"crates/core",
|
||||
"crates/data-fixtures",
|
||||
"crates/examples",
|
||||
"crates/formats",
|
||||
"crates/notary/client",
|
||||
"crates/notary/server",
|
||||
"crates/notary/tests-integration",
|
||||
"crates/prover",
|
||||
"crates/server-fixture/certs",
|
||||
"crates/server-fixture/server",
|
||||
"crates/tests-integration",
|
||||
"crates/tls/backend",
|
||||
"crates/tls/client",
|
||||
"crates/tls/client-async",
|
||||
"crates/tls/core",
|
||||
"crates/tls/mpc",
|
||||
"crates/mpc-tls",
|
||||
"crates/tls/server-fixture",
|
||||
"crates/verifier",
|
||||
"crates/wasm",
|
||||
"crates/wasm-test-runner",
|
||||
"crates/harness/core",
|
||||
"crates/harness/executor",
|
||||
"crates/harness/runner",
|
||||
"crates/harness/plot",
|
||||
"crates/tlsn",
|
||||
]
|
||||
resolver = "2"
|
||||
|
||||
[workspace.lints.rust]
|
||||
# unsafe_code = "forbid"
|
||||
|
||||
[workspace.lints.clippy]
|
||||
# enum_glob_use = "deny"
|
||||
|
||||
[profile.tests-integration]
|
||||
inherits = "release"
|
||||
opt-level = 1
|
||||
|
||||
[profile.wasm]
|
||||
inherits = "release"
|
||||
lto = true
|
||||
panic = "abort"
|
||||
codegen-units = 1
|
||||
|
||||
[workspace.dependencies]
|
||||
notary-client = { path = "crates/notary/client" }
|
||||
notary-server = { path = "crates/notary/server" }
|
||||
tls-server-fixture = { path = "crates/tls/server-fixture" }
|
||||
tlsn-aead = { path = "crates/components/aead" }
|
||||
tlsn-block-cipher = { path = "crates/components/block-cipher" }
|
||||
tlsn-common = { path = "crates/common" }
|
||||
tlsn-attestation = { path = "crates/attestation" }
|
||||
tlsn-cipher = { path = "crates/components/cipher" }
|
||||
tlsn-core = { path = "crates/core" }
|
||||
tlsn-data-fixtures = { path = "crates/data-fixtures" }
|
||||
tlsn-deap = { path = "crates/components/deap" }
|
||||
tlsn-formats = { path = "crates/formats" }
|
||||
tlsn-hmac-sha256 = { path = "crates/components/hmac-sha256" }
|
||||
tlsn-hmac-sha256-circuits = { path = "crates/components/hmac-sha256-circuits" }
|
||||
tlsn-key-exchange = { path = "crates/components/key-exchange" }
|
||||
tlsn-prover = { path = "crates/prover" }
|
||||
tlsn-mpc-tls = { path = "crates/mpc-tls" }
|
||||
tlsn-server-fixture = { path = "crates/server-fixture/server" }
|
||||
tlsn-server-fixture-certs = { path = "crates/server-fixture/certs" }
|
||||
tlsn-stream-cipher = { path = "crates/components/stream-cipher" }
|
||||
tlsn-tls-backend = { path = "crates/tls/backend" }
|
||||
tlsn-tls-client = { path = "crates/tls/client" }
|
||||
tlsn-tls-client-async = { path = "crates/tls/client-async" }
|
||||
tlsn-tls-core = { path = "crates/tls/core" }
|
||||
tlsn-tls-mpc = { path = "crates/tls/mpc" }
|
||||
tlsn-universal-hash = { path = "crates/components/universal-hash" }
|
||||
tlsn-utils = { git = "https://github.com/tlsnotary/tlsn-utils", rev = "e7b2db6" }
|
||||
tlsn-utils-aio = { git = "https://github.com/tlsnotary/tlsn-utils", rev = "e7b2db6" }
|
||||
tlsn-verifier = { path = "crates/verifier" }
|
||||
tlsn-utils = { git = "https://github.com/tlsnotary/tlsn-utils", rev = "6168663" }
|
||||
tlsn-harness-core = { path = "crates/harness/core" }
|
||||
tlsn-harness-executor = { path = "crates/harness/executor" }
|
||||
tlsn-harness-runner = { path = "crates/harness/runner" }
|
||||
tlsn-wasm = { path = "crates/wasm" }
|
||||
tlsn = { path = "crates/tlsn" }
|
||||
|
||||
mpz-circuits = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "b8ae7ac" }
|
||||
mpz-common = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "b8ae7ac" }
|
||||
mpz-core = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "b8ae7ac" }
|
||||
mpz-garble = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "b8ae7ac" }
|
||||
mpz-garble-core = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "b8ae7ac" }
|
||||
mpz-ole = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "b8ae7ac" }
|
||||
mpz-ot = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "b8ae7ac" }
|
||||
mpz-share-conversion = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "b8ae7ac" }
|
||||
mpz-circuits = { git = "https://github.com/privacy-ethereum/mpz", rev = "70348c1" }
|
||||
mpz-memory-core = { git = "https://github.com/privacy-ethereum/mpz", rev = "70348c1" }
|
||||
mpz-common = { git = "https://github.com/privacy-ethereum/mpz", rev = "70348c1" }
|
||||
mpz-core = { git = "https://github.com/privacy-ethereum/mpz", rev = "70348c1" }
|
||||
mpz-vm-core = { git = "https://github.com/privacy-ethereum/mpz", rev = "70348c1" }
|
||||
mpz-garble = { git = "https://github.com/privacy-ethereum/mpz", rev = "70348c1" }
|
||||
mpz-garble-core = { git = "https://github.com/privacy-ethereum/mpz", rev = "70348c1" }
|
||||
mpz-ole = { git = "https://github.com/privacy-ethereum/mpz", rev = "70348c1" }
|
||||
mpz-ot = { git = "https://github.com/privacy-ethereum/mpz", rev = "70348c1" }
|
||||
mpz-share-conversion = { git = "https://github.com/privacy-ethereum/mpz", rev = "70348c1" }
|
||||
mpz-fields = { git = "https://github.com/privacy-ethereum/mpz", rev = "70348c1" }
|
||||
mpz-zk = { git = "https://github.com/privacy-ethereum/mpz", rev = "70348c1" }
|
||||
mpz-hash = { git = "https://github.com/privacy-ethereum/mpz", rev = "70348c1" }
|
||||
|
||||
serio = { version = "0.1" }
|
||||
spansy = { git = "https://github.com/tlsnotary/tlsn-utils", rev = "e7b2db6" }
|
||||
uid-mux = { version = "0.1", features = ["serio"] }
|
||||
rangeset = { version = "0.2" }
|
||||
serio = { version = "0.2" }
|
||||
spansy = { git = "https://github.com/tlsnotary/tlsn-utils", rev = "6168663" }
|
||||
uid-mux = { version = "0.2" }
|
||||
websocket-relay = { git = "https://github.com/tlsnotary/tlsn-utils", rev = "6168663" }
|
||||
|
||||
aead = { version = "0.4" }
|
||||
aes = { version = "0.8" }
|
||||
aes-gcm = { version = "0.9" }
|
||||
anyhow = { version = "1.0" }
|
||||
async-trait = { version = "0.1" }
|
||||
async-tungstenite = { version = "0.25" }
|
||||
axum = { version = "0.7" }
|
||||
axum = { version = "0.8" }
|
||||
bcs = { version = "0.1" }
|
||||
bincode = { version = "1.3" }
|
||||
blake3 = { version = "1.5" }
|
||||
bon = { version = "3.6" }
|
||||
bytes = { version = "1.4" }
|
||||
cfg-if = { version = "1" }
|
||||
chromiumoxide = { version = "0.7" }
|
||||
chrono = { version = "0.4" }
|
||||
cipher = { version = "0.4" }
|
||||
clap = { version = "4.5" }
|
||||
criterion = { version = "0.5" }
|
||||
ctr = { version = "0.9" }
|
||||
derive_builder = { version = "0.12" }
|
||||
@@ -96,45 +110,59 @@ elliptic-curve = { version = "0.13" }
|
||||
enum-try-as-inner = { version = "0.1" }
|
||||
env_logger = { version = "0.10" }
|
||||
futures = { version = "0.3" }
|
||||
futures-rustls = { version = "0.26" }
|
||||
futures-util = { version = "0.3" }
|
||||
futures-rustls = { version = "0.25" }
|
||||
generic-array = { version = "0.14" }
|
||||
ghash = { version = "0.5" }
|
||||
hex = { version = "0.4" }
|
||||
hmac = { version = "0.12" }
|
||||
http = { version = "1.1" }
|
||||
http-body-util = { version = "0.1" }
|
||||
hyper = { version = "1.1" }
|
||||
hyper-util = { version = "0.1" }
|
||||
ipnet = { version = "2.11" }
|
||||
inventory = { version = "0.3" }
|
||||
itybity = { version = "0.2" }
|
||||
js-sys = { version = "0.3" }
|
||||
k256 = { version = "0.13" }
|
||||
log = { version = "0.4" }
|
||||
once_cell = { version = "1.19" }
|
||||
opaque-debug = { version = "0.3" }
|
||||
p256 = { version = "0.13" }
|
||||
pkcs8 = { version = "0.10" }
|
||||
pin-project-lite = { version = "0.2" }
|
||||
rand = { version = "0.8" }
|
||||
rand_chacha = { version = "0.3" }
|
||||
rand_core = { version = "0.6" }
|
||||
pollster = { version = "0.4" }
|
||||
rand = { version = "0.9" }
|
||||
rand_chacha = { version = "0.9" }
|
||||
rand_core = { version = "0.9" }
|
||||
rand06-compat = { version = "0.1" }
|
||||
rayon = { version = "1.10" }
|
||||
regex = { version = "1.10" }
|
||||
ring = { version = "0.17" }
|
||||
rs_merkle = { git = "https://github.com/tlsnotary/rs-merkle.git", rev = "85f3e82" }
|
||||
rstest = { version = "0.17" }
|
||||
rustls = { version = "0.21" }
|
||||
rustls-pemfile = { version = "1.0" }
|
||||
rustls-webpki = { version = "0.103" }
|
||||
rustls-pki-types = { version = "1.12" }
|
||||
sct = { version = "0.7" }
|
||||
semver = { version = "1.0" }
|
||||
serde = { version = "1.0" }
|
||||
serde_json = { version = "1.0" }
|
||||
sha2 = { version = "0.10" }
|
||||
signature = { version = "2.2" }
|
||||
thiserror = { version = "1.0" }
|
||||
tiny-keccak = { version = "2.0" }
|
||||
tokio = { version = "1.38" }
|
||||
tokio-rustls = { version = "0.24" }
|
||||
tokio-util = { version = "0.7" }
|
||||
toml = { version = "0.8" }
|
||||
tower = { version = "0.5" }
|
||||
tower-http = { version = "0.5" }
|
||||
tower-service = { version = "0.3" }
|
||||
tracing = { version = "0.1" }
|
||||
tracing-subscriber = { version = "0.3" }
|
||||
uuid = { version = "1.4" }
|
||||
wasm-bindgen = { version = "0.2" }
|
||||
wasm-bindgen-futures = { version = "0.4" }
|
||||
web-spawn = { version = "0.2" }
|
||||
web-time = { version = "0.2" }
|
||||
webpki = { version = "0.22" }
|
||||
webpki-roots = { version = "0.26" }
|
||||
ws_stream_tungstenite = { version = "0.13" }
|
||||
webpki-roots = { version = "1.0" }
|
||||
webpki-root-certs = { version = "1.0" }
|
||||
ws_stream_wasm = { version = "0.7.5" }
|
||||
zeroize = { version = "1.8" }
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
[actions-url]: https://github.com/tlsnotary/tlsn/actions?query=workflow%3Aci+branch%3Adev
|
||||
|
||||
[Website](https://tlsnotary.org) |
|
||||
[Documentation](https://docs.tlsnotary.org) |
|
||||
[Documentation](https://tlsnotary.org/docs/intro) |
|
||||
[API Docs](https://tlsnotary.github.io/tlsn) |
|
||||
[Discord](https://discord.gg/9XwESXtcN7)
|
||||
|
||||
@@ -44,12 +44,9 @@ at your option.
|
||||
## Directory
|
||||
|
||||
- [examples](./crates/examples/): Examples on how to use the TLSNotary protocol.
|
||||
- [tlsn-prover](./crates/prover/): The library for the prover component.
|
||||
- [tlsn-verifier](./crates/verifier/): The library for the verifier component.
|
||||
- [notary](./crates/notary/): Implements the [notary server](https://docs.tlsnotary.org/intro.html#tls-verification-with-a-general-purpose-notary) and its client.
|
||||
- [components](./crates/components/): Houses low-level libraries.
|
||||
- [tlsn](./crates/tlsn/): The TLSNotary library.
|
||||
|
||||
This repository contains the source code for the Rust implementation of the TLSNotary protocol. For additional tools and implementations related to TLSNotary, visit <https://github.com/tlsnotary>. This includes repositories such as [`tlsn-js`](https://github.com/tlsnotary/tlsn-js), [`tlsn-extension`](https://github.com/tlsnotary/tlsn-extension), [`explorer`](https://github.com/tlsnotary/explorer), among others.
|
||||
This repository contains the source code for the Rust implementation of the TLSNotary protocol. For additional tools and implementations related to TLSNotary, visit <https://github.com/tlsnotary>. This includes repositories such as [`tlsn-js`](https://github.com/tlsnotary/tlsn-js), [`tlsn-extension`](https://github.com/tlsnotary/tlsn-extension), among others.
|
||||
|
||||
|
||||
## Development
|
||||
|
||||
31
appspec.yml
31
appspec.yml
@@ -1,31 +0,0 @@
|
||||
# AWS CodeDeploy application specification file
|
||||
version: 0.0
|
||||
os: linux
|
||||
files:
|
||||
- source: /
|
||||
destination: /home/ubuntu/tlsn
|
||||
permissions:
|
||||
- object: /home/ubuntu/tlsn
|
||||
owner: ubuntu
|
||||
group: ubuntu
|
||||
hooks:
|
||||
BeforeInstall:
|
||||
- location: cd-scripts/appspec-scripts/before_install.sh
|
||||
timeout: 300
|
||||
runas: ubuntu
|
||||
AfterInstall:
|
||||
- location: cd-scripts/appspec-scripts/after_install.sh
|
||||
timeout: 300
|
||||
runas: ubuntu
|
||||
ApplicationStart:
|
||||
- location: cd-scripts/appspec-scripts/start_app.sh
|
||||
timeout: 300
|
||||
runas: ubuntu
|
||||
ApplicationStop:
|
||||
- location: cd-scripts/appspec-scripts/stop_app.sh
|
||||
timeout: 300
|
||||
runas: ubuntu
|
||||
ValidateService:
|
||||
- location: cd-scripts/appspec-scripts/validate_app.sh
|
||||
timeout: 300
|
||||
runas: ubuntu
|
||||
@@ -1,35 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
TAG=$(curl http://169.254.169.254/latest/meta-data/tags/instance/stable)
|
||||
APP_NAME=$(echo $APPLICATION_NAME | awk -F- '{ print $2 }')
|
||||
|
||||
if [ $APP_NAME = "stable" ]; then
|
||||
# Prepare directories for stable versions
|
||||
sudo mkdir ~/${APP_NAME}_${TAG}
|
||||
sudo mv ~/tlsn ~/${APP_NAME}_${TAG}
|
||||
sudo mkdir -p ~/${APP_NAME}_${TAG}/tlsn/notary/target/release
|
||||
sudo chown -R ubuntu.ubuntu ~/${APP_NAME}_${TAG}
|
||||
|
||||
# Download .git directory
|
||||
aws s3 cp s3://tlsn-deploy/$APP_NAME/.git ~/${APP_NAME}_${TAG}/tlsn/.git --recursive
|
||||
|
||||
# Download binary
|
||||
aws s3 cp s3://tlsn-deploy/$APP_NAME/notary-server ~/${APP_NAME}_${TAG}/tlsn/notary/target/release
|
||||
chmod +x ~/${APP_NAME}_${TAG}/tlsn/notary/target/release/notary-server
|
||||
else
|
||||
# Prepare directory for dev
|
||||
sudo rm -rf ~/$APP_NAME/tlsn
|
||||
sudo mv ~/tlsn/ ~/$APP_NAME
|
||||
sudo mkdir -p ~/$APP_NAME/tlsn/notary/target/release
|
||||
sudo chown -R ubuntu.ubuntu ~/$APP_NAME
|
||||
|
||||
# Download .git directory
|
||||
aws s3 cp s3://tlsn-deploy/$APP_NAME/.git ~/$APP_NAME/tlsn/.git --recursive
|
||||
|
||||
# Download binary
|
||||
aws s3 cp s3://tlsn-deploy/$APP_NAME/notary-server ~/$APP_NAME/tlsn/notary/target/release
|
||||
chmod +x ~/$APP_NAME/tlsn/notary/target/release/notary-server
|
||||
fi
|
||||
|
||||
exit 0
|
||||
@@ -1,20 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
APP_NAME=$(echo $APPLICATION_NAME | awk -F- '{ print $2 }')
|
||||
|
||||
if [ $APP_NAME = "stable" ]; then
|
||||
VERSIONS_DEPLOYED=$(find ~/ -maxdepth 1 -type d -name 'stable_*')
|
||||
VERSIONS_DEPLOYED_COUNT=$(echo $VERSIONS_DEPLOYED | wc -w)
|
||||
|
||||
if [ $VERSIONS_DEPLOYED_COUNT -gt 3 ]; then
|
||||
echo "More than 3 stable versions found"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
if [ ! -d ~/$APP_NAME ]; then
|
||||
mkdir ~/$APP_NAME
|
||||
fi
|
||||
fi
|
||||
|
||||
exit 0
|
||||
@@ -1,26 +0,0 @@
|
||||
#!/bin/bash
|
||||
# Port tagging will also be used to manipulate proxy server via modify_proxy.sh script
|
||||
set -ex
|
||||
|
||||
TAG=$(curl http://169.254.169.254/latest/meta-data/tags/instance/stable)
|
||||
APP_NAME=$(echo $APPLICATION_NAME | awk -F- '{ print $2 }')
|
||||
|
||||
if [ $APP_NAME = "stable" ]; then
|
||||
# Check if all stable ports are in use. If true, terminate the deployment
|
||||
[[ $(netstat -lnt4 | egrep -c ':(7047|7057|7067)\s') -eq 3 ]] && { echo "All stable ports are in use"; exit 1; }
|
||||
STABLE_PORTS="7047 7057 7067"
|
||||
for PORT in $STABLE_PORTS; do
|
||||
PORT_LISTENING=$(netstat -lnt4 | egrep -cw $PORT || true)
|
||||
if [ $PORT_LISTENING -eq 0 ]; then
|
||||
~/${APP_NAME}_${TAG}/tlsn/notary/target/release/notary-server --config-file ~/.notary/${APP_NAME}_${PORT}/config.yaml &> ~/${APP_NAME}_${TAG}/tlsn/notary.log &
|
||||
# Create a tag that will be used for service validation
|
||||
INSTANCE_ID=$(curl http://169.254.169.254/latest/meta-data/instance-id)
|
||||
aws ec2 create-tags --resources $INSTANCE_ID --tags "Key=port,Value=$PORT"
|
||||
break
|
||||
fi
|
||||
done
|
||||
else
|
||||
~/$APP_NAME/tlsn/notary/target/release/notary-server --config-file ~/.notary/$APP_NAME/config.yaml &> ~/$APP_NAME/tlsn/notary.log &
|
||||
fi
|
||||
|
||||
exit 0
|
||||
@@ -1,36 +0,0 @@
|
||||
#!/bin/bash
|
||||
# AWS CodeDeploy hook sequence: https://docs.aws.amazon.com/codedeploy/latest/userguide/reference-appspec-file-structure-hooks.html#appspec-hooks-server
|
||||
set -ex
|
||||
|
||||
APP_NAME=$(echo $APPLICATION_NAME | awk -F- '{ print $2 }')
|
||||
|
||||
if [ $APP_NAME = "stable" ]; then
|
||||
VERSIONS_DEPLOYED=$(find ~/ -maxdepth 1 -type d -name 'stable_*')
|
||||
VERSIONS_DEPLOYED_COUNT=$(echo $VERSIONS_DEPLOYED | wc -w)
|
||||
|
||||
# Remove oldest version if exists
|
||||
if [ $VERSIONS_DEPLOYED_COUNT -eq 3 ]; then
|
||||
echo "Candidate versions to be removed:"
|
||||
OLDEST_DIR=""
|
||||
OLDEST_TIME=""
|
||||
|
||||
for DIR in $VERSIONS_DEPLOYED; do
|
||||
TIME=$(stat -c %W $DIR)
|
||||
|
||||
if [ -z $OLDEST_TIME ] || [ $TIME -lt $OLDEST_TIME ]; then
|
||||
OLDEST_DIR=$DIR
|
||||
OLDEST_TIME=$TIME
|
||||
fi
|
||||
done
|
||||
|
||||
echo "The oldest version is running under: $OLDEST_DIR"
|
||||
PID=$(lsof $OLDEST_DIR/tlsn/notary/target/release/notary-server | awk '{ print $2 }' | tail -1)
|
||||
kill -15 $PID || true
|
||||
rm -rf $OLDEST_DIR
|
||||
fi
|
||||
else
|
||||
PID=$(pgrep -f notary.*$APP_NAME)
|
||||
kill -15 $PID || true
|
||||
fi
|
||||
|
||||
exit 0
|
||||
@@ -1,21 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
# Verify proccess is running
|
||||
APP_NAME=$(echo $APPLICATION_NAME | awk -F- '{ print $2 }')
|
||||
|
||||
# Verify that listening sockets exist
|
||||
if [ $APP_NAME = "stable" ]; then
|
||||
PORT=$(curl http://169.254.169.254/latest/meta-data/tags/instance/port)
|
||||
ps -ef | grep notary.*$APP_NAME.*$PORT | grep -v grep
|
||||
[ $? -eq 0 ] || exit 1
|
||||
else
|
||||
PORT=7048
|
||||
pgrep -f notary.*$APP_NAME
|
||||
[ $? -eq 0 ] || exit 1
|
||||
fi
|
||||
|
||||
EXPOSED_PORTS=$(netstat -lnt4 | egrep -cw $PORT)
|
||||
[ $EXPOSED_PORTS -eq 1 ] || exit 1
|
||||
|
||||
exit 0
|
||||
@@ -1,14 +0,0 @@
|
||||
#!/bin/bash
|
||||
# This script is executed on proxy side, in order to assign the available port to latest stable version
|
||||
set -e
|
||||
|
||||
PORT=$1
|
||||
VERSION=$2
|
||||
|
||||
sed -i "/# Port $PORT/{n;s/v[0-9].[0-9].[0-9]-[a-z]*.[0-9]*/$VERSION/g}" /etc/nginx/sites-available/tlsnotary-pse
|
||||
sed -i "/# Port $PORT/{n;n;s/v[0-9].[0-9].[0-9]-[a-z]*.[0-9]*/$VERSION/g}" /etc/nginx/sites-available/tlsnotary-pse
|
||||
|
||||
nginx -t
|
||||
nginx -s reload
|
||||
|
||||
exit 0
|
||||
39
crates/attestation/Cargo.toml
Normal file
39
crates/attestation/Cargo.toml
Normal file
@@ -0,0 +1,39 @@
|
||||
[package]
|
||||
name = "tlsn-attestation"
|
||||
version = "0.1.0-alpha.13-pre"
|
||||
edition = "2024"
|
||||
|
||||
[features]
|
||||
default = []
|
||||
fixtures = ["tlsn-core/fixtures", "dep:tlsn-data-fixtures"]
|
||||
|
||||
[dependencies]
|
||||
tlsn-tls-core = { workspace = true }
|
||||
tlsn-core = { workspace = true }
|
||||
tlsn-data-fixtures = { workspace = true, optional = true }
|
||||
|
||||
bcs = { workspace = true }
|
||||
blake3 = { workspace = true }
|
||||
p256 = { workspace = true, features = ["serde"] }
|
||||
k256 = { workspace = true }
|
||||
opaque-debug = { workspace = true }
|
||||
rand = { workspace = true }
|
||||
serde = { workspace = true, features = ["derive"] }
|
||||
thiserror = { workspace = true }
|
||||
tiny-keccak = { workspace = true, features = ["keccak"] }
|
||||
|
||||
[dev-dependencies]
|
||||
alloy-primitives = { version = "1.3.1", default-features = false }
|
||||
alloy-signer = { version = "1.0", default-features = false }
|
||||
alloy-signer-local = { version = "1.0", default-features = false }
|
||||
rand06-compat = { workspace = true }
|
||||
rstest = { workspace = true }
|
||||
tlsn-core = { workspace = true, features = ["fixtures"] }
|
||||
tlsn-data-fixtures = { workspace = true }
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[[test]]
|
||||
name = "api"
|
||||
required-features = ["fixtures"]
|
||||
@@ -1,34 +1,36 @@
|
||||
use std::error::Error;
|
||||
|
||||
use rand::{thread_rng, Rng};
|
||||
use rand::{Rng, rng};
|
||||
|
||||
use tlsn_core::{
|
||||
connection::{ConnectionInfo, ServerEphemKey},
|
||||
hash::HashAlgId,
|
||||
transcript::TranscriptCommitment,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
attestation::{
|
||||
Attestation, AttestationConfig, Body, EncodingCommitment, FieldId, FieldKind, Header,
|
||||
ServerCertCommitment, VERSION,
|
||||
},
|
||||
connection::{ConnectionInfo, ServerEphemKey},
|
||||
hash::{HashAlgId, TypedHash},
|
||||
request::Request,
|
||||
serialize::CanonicalSerialize,
|
||||
Attestation, AttestationConfig, Body, CryptoProvider, Extension, FieldId, Header,
|
||||
ServerCertCommitment, VERSION, request::Request, serialize::CanonicalSerialize,
|
||||
signing::SignatureAlgId,
|
||||
CryptoProvider,
|
||||
};
|
||||
|
||||
/// Attestation builder state for accepting a request.
|
||||
#[derive(Debug)]
|
||||
pub struct Accept {}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct Sign {
|
||||
signature_alg: SignatureAlgId,
|
||||
hash_alg: HashAlgId,
|
||||
connection_info: Option<ConnectionInfo>,
|
||||
server_ephemeral_key: Option<ServerEphemKey>,
|
||||
cert_commitment: ServerCertCommitment,
|
||||
encoding_commitment_root: Option<TypedHash>,
|
||||
encoding_seed: Option<Vec<u8>>,
|
||||
extensions: Vec<Extension>,
|
||||
transcript_commitments: Vec<TranscriptCommitment>,
|
||||
}
|
||||
|
||||
/// An attestation builder.
|
||||
#[derive(Debug)]
|
||||
pub struct AttestationBuilder<'a, T = Accept> {
|
||||
config: &'a AttestationConfig,
|
||||
state: T,
|
||||
@@ -54,7 +56,7 @@ impl<'a> AttestationBuilder<'a, Accept> {
|
||||
signature_alg,
|
||||
hash_alg,
|
||||
server_cert_commitment: cert_commitment,
|
||||
encoding_commitment_root,
|
||||
extensions,
|
||||
} = request;
|
||||
|
||||
if !config.supported_signature_algs().contains(&signature_alg) {
|
||||
@@ -71,15 +73,9 @@ impl<'a> AttestationBuilder<'a, Accept> {
|
||||
));
|
||||
}
|
||||
|
||||
if encoding_commitment_root.is_some()
|
||||
&& !config
|
||||
.supported_fields()
|
||||
.contains(&FieldKind::EncodingCommitment)
|
||||
{
|
||||
return Err(AttestationBuilderError::new(
|
||||
ErrorKind::Request,
|
||||
"encoding commitment is not supported",
|
||||
));
|
||||
if let Some(validator) = config.extension_validator() {
|
||||
validator(&extensions)
|
||||
.map_err(|err| AttestationBuilderError::new(ErrorKind::Extension, err))?;
|
||||
}
|
||||
|
||||
Ok(AttestationBuilder {
|
||||
@@ -90,8 +86,8 @@ impl<'a> AttestationBuilder<'a, Accept> {
|
||||
connection_info: None,
|
||||
server_ephemeral_key: None,
|
||||
cert_commitment,
|
||||
encoding_commitment_root,
|
||||
encoding_seed: None,
|
||||
transcript_commitments: Vec::new(),
|
||||
extensions,
|
||||
},
|
||||
})
|
||||
}
|
||||
@@ -110,9 +106,18 @@ impl AttestationBuilder<'_, Sign> {
|
||||
self
|
||||
}
|
||||
|
||||
/// Sets the encoding seed.
|
||||
pub fn encoding_seed(&mut self, seed: Vec<u8>) -> &mut Self {
|
||||
self.state.encoding_seed = Some(seed);
|
||||
/// Adds an extension to the attestation.
|
||||
pub fn extension(&mut self, extension: Extension) -> &mut Self {
|
||||
self.state.extensions.push(extension);
|
||||
self
|
||||
}
|
||||
|
||||
/// Sets the transcript commitments.
|
||||
pub fn transcript_commitments(
|
||||
&mut self,
|
||||
transcript_commitments: Vec<TranscriptCommitment>,
|
||||
) -> &mut Self {
|
||||
self.state.transcript_commitments = transcript_commitments;
|
||||
self
|
||||
}
|
||||
|
||||
@@ -124,8 +129,8 @@ impl AttestationBuilder<'_, Sign> {
|
||||
connection_info,
|
||||
server_ephemeral_key,
|
||||
cert_commitment,
|
||||
encoding_commitment_root,
|
||||
encoding_seed,
|
||||
extensions,
|
||||
transcript_commitments,
|
||||
} = self.state;
|
||||
|
||||
let hasher = provider.hash.get(&hash_alg).map_err(|_| {
|
||||
@@ -143,19 +148,6 @@ impl AttestationBuilder<'_, Sign> {
|
||||
)
|
||||
})?;
|
||||
|
||||
let encoding_commitment = if let Some(root) = encoding_commitment_root {
|
||||
let Some(seed) = encoding_seed else {
|
||||
return Err(AttestationBuilderError::new(
|
||||
ErrorKind::Field,
|
||||
"encoding commitment requested but seed was not set",
|
||||
));
|
||||
};
|
||||
|
||||
Some(EncodingCommitment { root, seed })
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let mut field_id = FieldId::default();
|
||||
|
||||
let body = Body {
|
||||
@@ -167,12 +159,18 @@ impl AttestationBuilder<'_, Sign> {
|
||||
AttestationBuilderError::new(ErrorKind::Field, "handshake data was not set")
|
||||
})?),
|
||||
cert_commitment: field_id.next(cert_commitment),
|
||||
encoding_commitment: encoding_commitment.map(|commitment| field_id.next(commitment)),
|
||||
plaintext_hashes: Default::default(),
|
||||
extensions: extensions
|
||||
.into_iter()
|
||||
.map(|extension| field_id.next(extension))
|
||||
.collect(),
|
||||
transcript_commitments: transcript_commitments
|
||||
.into_iter()
|
||||
.map(|commitment| field_id.next(commitment))
|
||||
.collect(),
|
||||
};
|
||||
|
||||
let header = Header {
|
||||
id: thread_rng().gen(),
|
||||
id: rng().random(),
|
||||
version: VERSION,
|
||||
root: body.root(hasher),
|
||||
};
|
||||
@@ -202,6 +200,7 @@ enum ErrorKind {
|
||||
Config,
|
||||
Field,
|
||||
Signature,
|
||||
Extension,
|
||||
}
|
||||
|
||||
impl AttestationBuilderError {
|
||||
@@ -228,10 +227,11 @@ impl std::fmt::Display for AttestationBuilderError {
|
||||
ErrorKind::Config => f.write_str("config error")?,
|
||||
ErrorKind::Field => f.write_str("field error")?,
|
||||
ErrorKind::Signature => f.write_str("signature error")?,
|
||||
ErrorKind::Extension => f.write_str("extension error")?,
|
||||
}
|
||||
|
||||
if let Some(source) = &self.source {
|
||||
write!(f, " caused by: {}", source)?;
|
||||
write!(f, " caused by: {source}")?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@@ -241,68 +241,21 @@ impl std::fmt::Display for AttestationBuilderError {
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use rstest::{fixture, rstest};
|
||||
use tlsn_core::{
|
||||
connection::{CertBinding, CertBindingV1_2},
|
||||
fixtures::{ConnectionFixture, encoding_provider},
|
||||
hash::Blake3,
|
||||
transcript::Transcript,
|
||||
};
|
||||
use tlsn_data_fixtures::http::{request::GET_WITH_HEADER, response::OK_JSON};
|
||||
|
||||
use crate::{
|
||||
connection::{HandshakeData, HandshakeDataV1_2},
|
||||
fixtures::{encoder_seed, encoding_provider, ConnectionFixture},
|
||||
hash::Blake3,
|
||||
request::RequestConfig,
|
||||
transcript::{encoding::EncodingTree, Transcript, TranscriptCommitConfigBuilder},
|
||||
};
|
||||
use crate::fixtures::{RequestFixture, request_fixture};
|
||||
|
||||
use super::*;
|
||||
|
||||
fn request_and_connection() -> (Request, ConnectionFixture) {
|
||||
let provider = CryptoProvider::default();
|
||||
|
||||
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
|
||||
let (sent_len, recv_len) = transcript.len();
|
||||
// Plaintext encodings which the Prover obtained from GC evaluation
|
||||
let encodings_provider = encoding_provider(GET_WITH_HEADER, OK_JSON);
|
||||
|
||||
// At the end of the TLS connection the Prover holds the:
|
||||
let ConnectionFixture {
|
||||
server_name,
|
||||
server_cert_data,
|
||||
..
|
||||
} = ConnectionFixture::tlsnotary(transcript.length());
|
||||
|
||||
// Prover specifies the ranges it wants to commit to.
|
||||
let mut transcript_commitment_builder = TranscriptCommitConfigBuilder::new(&transcript);
|
||||
transcript_commitment_builder
|
||||
.commit_sent(&(0..sent_len))
|
||||
.unwrap()
|
||||
.commit_recv(&(0..recv_len))
|
||||
.unwrap();
|
||||
|
||||
let transcripts_commitment_config = transcript_commitment_builder.build().unwrap();
|
||||
|
||||
// Prover constructs encoding tree.
|
||||
let encoding_tree = EncodingTree::new(
|
||||
&Blake3::default(),
|
||||
transcripts_commitment_config.iter_encoding(),
|
||||
&encodings_provider,
|
||||
&transcript.length(),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let request_config = RequestConfig::default();
|
||||
let mut request_builder = Request::builder(&request_config);
|
||||
|
||||
request_builder
|
||||
.server_name(server_name.clone())
|
||||
.server_cert_data(server_cert_data)
|
||||
.transcript(transcript.clone())
|
||||
.encoding_tree(encoding_tree);
|
||||
let (request, _) = request_builder.build(&provider).unwrap();
|
||||
|
||||
(request, ConnectionFixture::tlsnotary(transcript.length()))
|
||||
}
|
||||
|
||||
#[fixture]
|
||||
#[once]
|
||||
fn default_attestation_config() -> AttestationConfig {
|
||||
fn attestation_config() -> AttestationConfig {
|
||||
AttestationConfig::builder()
|
||||
.supported_signature_algs([SignatureAlgId::SECP256K1])
|
||||
.build()
|
||||
@@ -319,7 +272,17 @@ mod test {
|
||||
|
||||
#[rstest]
|
||||
fn test_attestation_builder_accept_unsupported_signer() {
|
||||
let (request, _) = request_and_connection();
|
||||
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
|
||||
let connection = ConnectionFixture::tlsnotary(transcript.length());
|
||||
|
||||
let RequestFixture { request, .. } = request_fixture(
|
||||
transcript,
|
||||
encoding_provider(GET_WITH_HEADER, OK_JSON),
|
||||
connection,
|
||||
Blake3::default(),
|
||||
Vec::new(),
|
||||
);
|
||||
|
||||
let attestation_config = AttestationConfig::builder()
|
||||
.supported_signature_algs([SignatureAlgId::SECP256R1])
|
||||
.build()
|
||||
@@ -334,7 +297,16 @@ mod test {
|
||||
|
||||
#[rstest]
|
||||
fn test_attestation_builder_accept_unsupported_hasher() {
|
||||
let (request, _) = request_and_connection();
|
||||
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
|
||||
let connection = ConnectionFixture::tlsnotary(transcript.length());
|
||||
|
||||
let RequestFixture { request, .. } = request_fixture(
|
||||
transcript,
|
||||
encoding_provider(GET_WITH_HEADER, OK_JSON),
|
||||
connection,
|
||||
Blake3::default(),
|
||||
Vec::new(),
|
||||
);
|
||||
|
||||
let attestation_config = AttestationConfig::builder()
|
||||
.supported_signature_algs([SignatureAlgId::SECP256K1])
|
||||
@@ -350,121 +322,168 @@ mod test {
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
fn test_attestation_builder_accept_unsupported_encoding_commitment() {
|
||||
let (request, _) = request_and_connection();
|
||||
fn test_attestation_builder_sign_missing_signer(attestation_config: &AttestationConfig) {
|
||||
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
|
||||
let connection = ConnectionFixture::tlsnotary(transcript.length());
|
||||
|
||||
let attestation_config = AttestationConfig::builder()
|
||||
.supported_signature_algs([SignatureAlgId::SECP256K1])
|
||||
.supported_fields([
|
||||
FieldKind::ConnectionInfo,
|
||||
FieldKind::ServerEphemKey,
|
||||
FieldKind::ServerIdentityCommitment,
|
||||
])
|
||||
.build()
|
||||
.unwrap();
|
||||
let RequestFixture { request, .. } = request_fixture(
|
||||
transcript,
|
||||
encoding_provider(GET_WITH_HEADER, OK_JSON),
|
||||
connection,
|
||||
Blake3::default(),
|
||||
Vec::new(),
|
||||
);
|
||||
|
||||
let err = Attestation::builder(&attestation_config)
|
||||
let attestation_builder = Attestation::builder(attestation_config)
|
||||
.accept_request(request)
|
||||
.err()
|
||||
.unwrap();
|
||||
assert!(err.is_request());
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
fn test_attestation_builder_sign_missing_signer(
|
||||
default_attestation_config: &AttestationConfig,
|
||||
) {
|
||||
let (request, _) = request_and_connection();
|
||||
|
||||
let attestation_builder = Attestation::builder(default_attestation_config)
|
||||
.accept_request(request.clone())
|
||||
.unwrap();
|
||||
|
||||
let mut provider = CryptoProvider::default();
|
||||
provider.signer.set_secp256r1(&[42u8; 32]).unwrap();
|
||||
|
||||
let err = attestation_builder.build(&provider).err().unwrap();
|
||||
let err = attestation_builder.build(&provider).unwrap_err();
|
||||
assert!(matches!(err.kind, ErrorKind::Config));
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
fn test_attestation_builder_sign_missing_encoding_seed(
|
||||
default_attestation_config: &AttestationConfig,
|
||||
crypto_provider: &CryptoProvider,
|
||||
) {
|
||||
let (request, connection) = request_and_connection();
|
||||
|
||||
let mut attestation_builder = Attestation::builder(default_attestation_config)
|
||||
.accept_request(request.clone())
|
||||
.unwrap();
|
||||
|
||||
let ConnectionFixture {
|
||||
connection_info,
|
||||
server_cert_data,
|
||||
..
|
||||
} = connection;
|
||||
|
||||
let HandshakeData::V1_2(HandshakeDataV1_2 {
|
||||
server_ephemeral_key,
|
||||
..
|
||||
}) = server_cert_data.handshake.clone();
|
||||
|
||||
attestation_builder
|
||||
.connection_info(connection_info.clone())
|
||||
.server_ephemeral_key(server_ephemeral_key);
|
||||
|
||||
let err = attestation_builder.build(crypto_provider).err().unwrap();
|
||||
assert!(matches!(err.kind, ErrorKind::Field));
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
fn test_attestation_builder_sign_missing_server_ephemeral_key(
|
||||
default_attestation_config: &AttestationConfig,
|
||||
attestation_config: &AttestationConfig,
|
||||
crypto_provider: &CryptoProvider,
|
||||
) {
|
||||
let (request, connection) = request_and_connection();
|
||||
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
|
||||
let connection = ConnectionFixture::tlsnotary(transcript.length());
|
||||
|
||||
let mut attestation_builder = Attestation::builder(default_attestation_config)
|
||||
.accept_request(request.clone())
|
||||
let RequestFixture { request, .. } = request_fixture(
|
||||
transcript,
|
||||
encoding_provider(GET_WITH_HEADER, OK_JSON),
|
||||
connection.clone(),
|
||||
Blake3::default(),
|
||||
Vec::new(),
|
||||
);
|
||||
|
||||
let mut attestation_builder = Attestation::builder(attestation_config)
|
||||
.accept_request(request)
|
||||
.unwrap();
|
||||
|
||||
let ConnectionFixture {
|
||||
connection_info, ..
|
||||
} = connection;
|
||||
|
||||
attestation_builder
|
||||
.connection_info(connection_info.clone())
|
||||
.encoding_seed(encoder_seed().to_vec());
|
||||
attestation_builder.connection_info(connection_info);
|
||||
|
||||
let err = attestation_builder.build(crypto_provider).err().unwrap();
|
||||
let err = attestation_builder.build(crypto_provider).unwrap_err();
|
||||
assert!(matches!(err.kind, ErrorKind::Field));
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
fn test_attestation_builder_sign_missing_connection_info(
|
||||
default_attestation_config: &AttestationConfig,
|
||||
attestation_config: &AttestationConfig,
|
||||
crypto_provider: &CryptoProvider,
|
||||
) {
|
||||
let (request, connection) = request_and_connection();
|
||||
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
|
||||
let connection = ConnectionFixture::tlsnotary(transcript.length());
|
||||
|
||||
let mut attestation_builder = Attestation::builder(default_attestation_config)
|
||||
.accept_request(request.clone())
|
||||
let RequestFixture { request, .. } = request_fixture(
|
||||
transcript,
|
||||
encoding_provider(GET_WITH_HEADER, OK_JSON),
|
||||
connection.clone(),
|
||||
Blake3::default(),
|
||||
Vec::new(),
|
||||
);
|
||||
|
||||
let mut attestation_builder = Attestation::builder(attestation_config)
|
||||
.accept_request(request)
|
||||
.unwrap();
|
||||
|
||||
let ConnectionFixture {
|
||||
server_cert_data, ..
|
||||
} = connection;
|
||||
|
||||
let HandshakeData::V1_2(HandshakeDataV1_2 {
|
||||
let CertBinding::V1_2(CertBindingV1_2 {
|
||||
server_ephemeral_key,
|
||||
..
|
||||
}) = server_cert_data.handshake.clone();
|
||||
}) = server_cert_data.binding
|
||||
else {
|
||||
panic!("expected v1.2 handshake data");
|
||||
};
|
||||
|
||||
attestation_builder
|
||||
.server_ephemeral_key(server_ephemeral_key)
|
||||
.encoding_seed(encoder_seed().to_vec());
|
||||
attestation_builder.server_ephemeral_key(server_ephemeral_key);
|
||||
|
||||
let err = attestation_builder.build(crypto_provider).err().unwrap();
|
||||
let err = attestation_builder.build(crypto_provider).unwrap_err();
|
||||
assert!(matches!(err.kind, ErrorKind::Field));
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
fn test_attestation_builder_reject_extensions_by_default(
|
||||
attestation_config: &AttestationConfig,
|
||||
) {
|
||||
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
|
||||
let connection = ConnectionFixture::tlsnotary(transcript.length());
|
||||
|
||||
let RequestFixture { request, .. } = request_fixture(
|
||||
transcript,
|
||||
encoding_provider(GET_WITH_HEADER, OK_JSON),
|
||||
connection.clone(),
|
||||
Blake3::default(),
|
||||
vec![Extension {
|
||||
id: b"foo".to_vec(),
|
||||
value: b"bar".to_vec(),
|
||||
}],
|
||||
);
|
||||
|
||||
let err = Attestation::builder(attestation_config)
|
||||
.accept_request(request)
|
||||
.unwrap_err();
|
||||
|
||||
assert!(matches!(err.kind, ErrorKind::Extension));
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
fn test_attestation_builder_accept_extension(crypto_provider: &CryptoProvider) {
|
||||
let attestation_config = AttestationConfig::builder()
|
||||
.supported_signature_algs([SignatureAlgId::SECP256K1])
|
||||
.extension_validator(|_| Ok(()))
|
||||
.build()
|
||||
.unwrap();
|
||||
|
||||
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
|
||||
let connection = ConnectionFixture::tlsnotary(transcript.length());
|
||||
|
||||
let RequestFixture { request, .. } = request_fixture(
|
||||
transcript,
|
||||
encoding_provider(GET_WITH_HEADER, OK_JSON),
|
||||
connection.clone(),
|
||||
Blake3::default(),
|
||||
vec![Extension {
|
||||
id: b"foo".to_vec(),
|
||||
value: b"bar".to_vec(),
|
||||
}],
|
||||
);
|
||||
|
||||
let mut attestation_builder = Attestation::builder(&attestation_config)
|
||||
.accept_request(request)
|
||||
.unwrap();
|
||||
|
||||
let ConnectionFixture {
|
||||
server_cert_data,
|
||||
connection_info,
|
||||
..
|
||||
} = connection;
|
||||
|
||||
let CertBinding::V1_2(CertBindingV1_2 {
|
||||
server_ephemeral_key,
|
||||
..
|
||||
}) = server_cert_data.binding
|
||||
else {
|
||||
panic!("expected v1.2 handshake data");
|
||||
};
|
||||
|
||||
attestation_builder
|
||||
.connection_info(connection_info)
|
||||
.server_ephemeral_key(server_ephemeral_key);
|
||||
|
||||
let attestation = attestation_builder.build(crypto_provider).unwrap();
|
||||
|
||||
assert_eq!(attestation.body.extensions().count(), 1);
|
||||
}
|
||||
}
|
||||
@@ -1,15 +1,12 @@
|
||||
use std::{fmt::Debug, sync::Arc};
|
||||
|
||||
use tlsn_core::hash::HashAlgId;
|
||||
|
||||
use crate::{
|
||||
attestation::FieldKind,
|
||||
hash::{HashAlgId, DEFAULT_SUPPORTED_HASH_ALGS},
|
||||
signing::SignatureAlgId,
|
||||
Extension, InvalidExtension, hash::DEFAULT_SUPPORTED_HASH_ALGS, signing::SignatureAlgId,
|
||||
};
|
||||
|
||||
const DEFAULT_SUPPORTED_FIELDS: &[FieldKind] = &[
|
||||
FieldKind::ConnectionInfo,
|
||||
FieldKind::ServerEphemKey,
|
||||
FieldKind::ServerIdentityCommitment,
|
||||
FieldKind::EncodingCommitment,
|
||||
];
|
||||
type ExtensionValidator = Arc<dyn Fn(&[Extension]) -> Result<(), InvalidExtension> + Send + Sync>;
|
||||
|
||||
#[derive(Debug)]
|
||||
#[allow(dead_code)]
|
||||
@@ -44,11 +41,11 @@ impl AttestationConfigError {
|
||||
}
|
||||
|
||||
/// Attestation configuration.
|
||||
#[derive(Debug, Clone)]
|
||||
#[derive(Clone)]
|
||||
pub struct AttestationConfig {
|
||||
supported_signature_algs: Vec<SignatureAlgId>,
|
||||
supported_hash_algs: Vec<HashAlgId>,
|
||||
supported_fields: Vec<FieldKind>,
|
||||
extension_validator: Option<ExtensionValidator>,
|
||||
}
|
||||
|
||||
impl AttestationConfig {
|
||||
@@ -65,17 +62,25 @@ impl AttestationConfig {
|
||||
&self.supported_hash_algs
|
||||
}
|
||||
|
||||
pub(crate) fn supported_fields(&self) -> &[FieldKind] {
|
||||
&self.supported_fields
|
||||
pub(crate) fn extension_validator(&self) -> Option<&ExtensionValidator> {
|
||||
self.extension_validator.as_ref()
|
||||
}
|
||||
}
|
||||
|
||||
impl Debug for AttestationConfig {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("AttestationConfig")
|
||||
.field("supported_signature_algs", &self.supported_signature_algs)
|
||||
.field("supported_hash_algs", &self.supported_hash_algs)
|
||||
.finish_non_exhaustive()
|
||||
}
|
||||
}
|
||||
|
||||
/// Builder for [`AttestationConfig`].
|
||||
#[derive(Debug)]
|
||||
pub struct AttestationConfigBuilder {
|
||||
supported_signature_algs: Vec<SignatureAlgId>,
|
||||
supported_hash_algs: Vec<HashAlgId>,
|
||||
supported_fields: Vec<FieldKind>,
|
||||
extension_validator: Option<ExtensionValidator>,
|
||||
}
|
||||
|
||||
impl Default for AttestationConfigBuilder {
|
||||
@@ -83,7 +88,15 @@ impl Default for AttestationConfigBuilder {
|
||||
Self {
|
||||
supported_signature_algs: Vec::default(),
|
||||
supported_hash_algs: DEFAULT_SUPPORTED_HASH_ALGS.to_vec(),
|
||||
supported_fields: DEFAULT_SUPPORTED_FIELDS.to_vec(),
|
||||
extension_validator: Some(Arc::new(|e| {
|
||||
if !e.is_empty() {
|
||||
Err(InvalidExtension::new(
|
||||
"all extensions are disallowed by default",
|
||||
))
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
})),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -107,9 +120,26 @@ impl AttestationConfigBuilder {
|
||||
self
|
||||
}
|
||||
|
||||
/// Sets the supported attestation fields.
|
||||
pub fn supported_fields(&mut self, supported_fields: impl Into<Vec<FieldKind>>) -> &mut Self {
|
||||
self.supported_fields = supported_fields.into();
|
||||
/// Sets the extension validator.
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// # use tlsn_attestation::{AttestationConfig, InvalidExtension};
|
||||
/// # let mut builder = AttestationConfig::builder();
|
||||
/// builder.extension_validator(|extensions| {
|
||||
/// for extension in extensions {
|
||||
/// if extension.id != b"example.type" {
|
||||
/// return Err(InvalidExtension::new("invalid extension type"));
|
||||
/// }
|
||||
/// }
|
||||
/// Ok(())
|
||||
/// });
|
||||
/// ```
|
||||
pub fn extension_validator<F>(&mut self, f: F) -> &mut Self
|
||||
where
|
||||
F: Fn(&[Extension]) -> Result<(), InvalidExtension> + Send + Sync + 'static,
|
||||
{
|
||||
self.extension_validator = Some(Arc::new(f));
|
||||
self
|
||||
}
|
||||
|
||||
@@ -118,7 +148,16 @@ impl AttestationConfigBuilder {
|
||||
Ok(AttestationConfig {
|
||||
supported_signature_algs: self.supported_signature_algs.clone(),
|
||||
supported_hash_algs: self.supported_hash_algs.clone(),
|
||||
supported_fields: self.supported_fields.clone(),
|
||||
extension_validator: self.extension_validator.clone(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl Debug for AttestationConfigBuilder {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("AttestationConfigBuilder")
|
||||
.field("supported_signature_algs", &self.supported_signature_algs)
|
||||
.field("supported_hash_algs", &self.supported_hash_algs)
|
||||
.finish_non_exhaustive()
|
||||
}
|
||||
}
|
||||
149
crates/attestation/src/connection.rs
Normal file
149
crates/attestation/src/connection.rs
Normal file
@@ -0,0 +1,149 @@
|
||||
//! Types for committing details of a connection.
|
||||
//!
|
||||
//! ## Commitment
|
||||
//!
|
||||
//! During the TLS handshake the Notary receives the Server's ephemeral public
|
||||
//! key, and this key serves as a binding commitment to the identity of the
|
||||
//! Server. The ephemeral key itself does not reveal the Server's identity, but
|
||||
//! it is bound to it via a signature created using the Server's
|
||||
//! X.509 certificate.
|
||||
//!
|
||||
//! A Prover can withhold the Server's signature and certificate chain from the
|
||||
//! Notary to improve privacy and censorship resistance.
|
||||
//!
|
||||
//! ## Proving the Server's identity
|
||||
//!
|
||||
//! A Prover can prove the Server's identity to a Verifier by sending a
|
||||
//! [`ServerIdentityProof`]. This proof contains all the information required to
|
||||
//! establish the link between the TLS connection and the Server's X.509
|
||||
//! certificate. A Verifier checks the Server's certificate against their own
|
||||
//! trust anchors, the same way a typical TLS client would.
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use tlsn_core::{
|
||||
connection::{HandshakeData, HandshakeVerificationError, ServerEphemKey, ServerName},
|
||||
hash::{Blinded, HashAlgorithm, HashProviderError, TypedHash},
|
||||
};
|
||||
|
||||
use crate::{CryptoProvider, hash::HashAlgorithmExt, serialize::impl_domain_separator};
|
||||
|
||||
/// Opens a [`ServerCertCommitment`].
|
||||
#[derive(Clone, Serialize, Deserialize)]
|
||||
pub struct ServerCertOpening(Blinded<HandshakeData>);
|
||||
|
||||
impl_domain_separator!(ServerCertOpening);
|
||||
|
||||
opaque_debug::implement!(ServerCertOpening);
|
||||
|
||||
impl ServerCertOpening {
|
||||
pub(crate) fn new(data: HandshakeData) -> Self {
|
||||
Self(Blinded::new(data))
|
||||
}
|
||||
|
||||
pub(crate) fn commit(&self, hasher: &dyn HashAlgorithm) -> ServerCertCommitment {
|
||||
ServerCertCommitment(TypedHash {
|
||||
alg: hasher.id(),
|
||||
value: hasher.hash_separated(self),
|
||||
})
|
||||
}
|
||||
|
||||
/// Returns the server identity data.
|
||||
pub fn data(&self) -> &HandshakeData {
|
||||
self.0.data()
|
||||
}
|
||||
}
|
||||
|
||||
/// Commitment to a server certificate.
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
pub struct ServerCertCommitment(pub(crate) TypedHash);
|
||||
|
||||
impl_domain_separator!(ServerCertCommitment);
|
||||
|
||||
/// TLS server identity proof.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ServerIdentityProof {
|
||||
name: ServerName,
|
||||
opening: ServerCertOpening,
|
||||
}
|
||||
|
||||
impl ServerIdentityProof {
|
||||
pub(crate) fn new(name: ServerName, opening: ServerCertOpening) -> Self {
|
||||
Self { name, opening }
|
||||
}
|
||||
|
||||
/// Verifies the server identity proof.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `provider` - Crypto provider.
|
||||
/// * `time` - The time of the connection.
|
||||
/// * `server_ephemeral_key` - The server's ephemeral key.
|
||||
/// * `commitment` - Commitment to the server certificate.
|
||||
pub fn verify_with_provider(
|
||||
self,
|
||||
provider: &CryptoProvider,
|
||||
time: u64,
|
||||
server_ephemeral_key: &ServerEphemKey,
|
||||
commitment: &ServerCertCommitment,
|
||||
) -> Result<ServerName, ServerIdentityProofError> {
|
||||
let hasher = provider.hash.get(&commitment.0.alg)?;
|
||||
|
||||
if commitment.0.value != hasher.hash_separated(&self.opening) {
|
||||
return Err(ServerIdentityProofError {
|
||||
kind: ErrorKind::Commitment,
|
||||
message: "certificate opening does not match commitment".to_string(),
|
||||
});
|
||||
}
|
||||
|
||||
// Verify certificate and identity.
|
||||
self.opening
|
||||
.data()
|
||||
.verify(&provider.cert, time, server_ephemeral_key, &self.name)?;
|
||||
|
||||
Ok(self.name)
|
||||
}
|
||||
}
|
||||
|
||||
/// Error for [`ServerIdentityProof`].
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
#[error("server identity proof error: {kind}: {message}")]
|
||||
pub struct ServerIdentityProofError {
|
||||
kind: ErrorKind,
|
||||
message: String,
|
||||
}
|
||||
|
||||
impl From<HashProviderError> for ServerIdentityProofError {
|
||||
fn from(err: HashProviderError) -> Self {
|
||||
Self {
|
||||
kind: ErrorKind::Provider,
|
||||
message: err.to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<HandshakeVerificationError> for ServerIdentityProofError {
|
||||
fn from(err: HandshakeVerificationError) -> Self {
|
||||
Self {
|
||||
kind: ErrorKind::Certificate,
|
||||
message: err.to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
enum ErrorKind {
|
||||
Provider,
|
||||
Commitment,
|
||||
Certificate,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for ErrorKind {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
ErrorKind::Provider => write!(f, "provider"),
|
||||
ErrorKind::Commitment => write!(f, "commitment"),
|
||||
ErrorKind::Certificate => write!(f, "certificate"),
|
||||
}
|
||||
}
|
||||
}
|
||||
35
crates/attestation/src/extension.rs
Normal file
35
crates/attestation/src/extension.rs
Normal file
@@ -0,0 +1,35 @@
|
||||
use std::error::Error;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::serialize::impl_domain_separator;
|
||||
|
||||
/// An attestation extension.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
||||
pub struct Extension {
|
||||
/// Extension identifier.
|
||||
pub id: Vec<u8>,
|
||||
/// Extension data.
|
||||
pub value: Vec<u8>,
|
||||
}
|
||||
|
||||
impl_domain_separator!(Extension);
|
||||
|
||||
/// Invalid extension error.
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
#[error("invalid extension: {reason}")]
|
||||
pub struct InvalidExtension {
|
||||
reason: Box<dyn Error + Send + Sync + 'static>,
|
||||
}
|
||||
|
||||
impl InvalidExtension {
|
||||
/// Creates a new invalid extension error.
|
||||
pub fn new<E>(reason: E) -> Self
|
||||
where
|
||||
E: Into<Box<dyn Error + Send + Sync + 'static>>,
|
||||
{
|
||||
Self {
|
||||
reason: reason.into(),
|
||||
}
|
||||
}
|
||||
}
|
||||
124
crates/attestation/src/fixtures.rs
Normal file
124
crates/attestation/src/fixtures.rs
Normal file
@@ -0,0 +1,124 @@
|
||||
//! Attestation fixtures.
|
||||
|
||||
use tlsn_core::{
|
||||
connection::{CertBinding, CertBindingV1_2},
|
||||
fixtures::ConnectionFixture,
|
||||
hash::HashAlgorithm,
|
||||
transcript::{
|
||||
Transcript, TranscriptCommitConfigBuilder, TranscriptCommitment,
|
||||
encoding::{EncodingProvider, EncodingTree},
|
||||
},
|
||||
};
|
||||
|
||||
use crate::{
|
||||
Attestation, AttestationConfig, CryptoProvider, Extension,
|
||||
request::{Request, RequestConfig},
|
||||
signing::SignatureAlgId,
|
||||
};
|
||||
|
||||
/// A Request fixture used for testing.
|
||||
#[allow(missing_docs)]
|
||||
pub struct RequestFixture {
|
||||
pub encoding_tree: EncodingTree,
|
||||
pub request: Request,
|
||||
}
|
||||
|
||||
/// Returns a request fixture for testing.
|
||||
pub fn request_fixture(
|
||||
transcript: Transcript,
|
||||
encodings_provider: impl EncodingProvider,
|
||||
connection: ConnectionFixture,
|
||||
encoding_hasher: impl HashAlgorithm,
|
||||
extensions: Vec<Extension>,
|
||||
) -> RequestFixture {
|
||||
let provider = CryptoProvider::default();
|
||||
let (sent_len, recv_len) = transcript.len();
|
||||
|
||||
let ConnectionFixture {
|
||||
server_name,
|
||||
server_cert_data,
|
||||
..
|
||||
} = connection;
|
||||
|
||||
let mut transcript_commitment_builder = TranscriptCommitConfigBuilder::new(&transcript);
|
||||
transcript_commitment_builder
|
||||
.commit_sent(&(0..sent_len))
|
||||
.unwrap()
|
||||
.commit_recv(&(0..recv_len))
|
||||
.unwrap();
|
||||
let transcripts_commitment_config = transcript_commitment_builder.build().unwrap();
|
||||
|
||||
// Prover constructs encoding tree.
|
||||
let encoding_tree = EncodingTree::new(
|
||||
&encoding_hasher,
|
||||
transcripts_commitment_config.iter_encoding(),
|
||||
&encodings_provider,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let mut builder = RequestConfig::builder();
|
||||
|
||||
for extension in extensions {
|
||||
builder.extension(extension);
|
||||
}
|
||||
|
||||
let request_config = builder.build().unwrap();
|
||||
|
||||
let mut request_builder = Request::builder(&request_config);
|
||||
request_builder
|
||||
.server_name(server_name)
|
||||
.handshake_data(server_cert_data)
|
||||
.transcript(transcript);
|
||||
|
||||
let (request, _) = request_builder.build(&provider).unwrap();
|
||||
|
||||
RequestFixture {
|
||||
encoding_tree,
|
||||
request,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns an attestation fixture for testing.
|
||||
pub fn attestation_fixture(
|
||||
request: Request,
|
||||
connection: ConnectionFixture,
|
||||
signature_alg: SignatureAlgId,
|
||||
transcript_commitments: &[TranscriptCommitment],
|
||||
) -> Attestation {
|
||||
let ConnectionFixture {
|
||||
connection_info,
|
||||
server_cert_data,
|
||||
..
|
||||
} = connection;
|
||||
|
||||
let CertBinding::V1_2(CertBindingV1_2 {
|
||||
server_ephemeral_key,
|
||||
..
|
||||
}) = server_cert_data.binding
|
||||
else {
|
||||
panic!("expected v1.2 binding data");
|
||||
};
|
||||
|
||||
let mut provider = CryptoProvider::default();
|
||||
match signature_alg {
|
||||
SignatureAlgId::SECP256K1 => provider.signer.set_secp256k1(&[42u8; 32]).unwrap(),
|
||||
SignatureAlgId::SECP256R1 => provider.signer.set_secp256r1(&[42u8; 32]).unwrap(),
|
||||
_ => unimplemented!(),
|
||||
};
|
||||
|
||||
let attestation_config = AttestationConfig::builder()
|
||||
.supported_signature_algs([signature_alg])
|
||||
.build()
|
||||
.unwrap();
|
||||
|
||||
let mut attestation_builder = Attestation::builder(&attestation_config)
|
||||
.accept_request(request)
|
||||
.unwrap();
|
||||
|
||||
attestation_builder
|
||||
.connection_info(connection_info)
|
||||
.server_ephemeral_key(server_ephemeral_key)
|
||||
.transcript_commitments(transcript_commitments.to_vec());
|
||||
|
||||
attestation_builder.build(&provider).unwrap()
|
||||
}
|
||||
19
crates/attestation/src/hash.rs
Normal file
19
crates/attestation/src/hash.rs
Normal file
@@ -0,0 +1,19 @@
|
||||
use tlsn_core::hash::{Hash, HashAlgId, HashAlgorithm};
|
||||
|
||||
use crate::serialize::{CanonicalSerialize, DomainSeparator};
|
||||
|
||||
pub(crate) const DEFAULT_SUPPORTED_HASH_ALGS: &[HashAlgId] =
|
||||
&[HashAlgId::SHA256, HashAlgId::BLAKE3, HashAlgId::KECCAK256];
|
||||
|
||||
pub(crate) trait HashAlgorithmExt: HashAlgorithm {
|
||||
#[allow(dead_code)]
|
||||
fn hash_canonical<T: CanonicalSerialize>(&self, data: &T) -> Hash {
|
||||
self.hash(&data.serialize())
|
||||
}
|
||||
|
||||
fn hash_separated<T: DomainSeparator + CanonicalSerialize>(&self, data: &T) -> Hash {
|
||||
self.hash_prefixed(data.domain(), &data.serialize())
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: HashAlgorithm + ?Sized> HashAlgorithmExt for T {}
|
||||
449
crates/attestation/src/lib.rs
Normal file
449
crates/attestation/src/lib.rs
Normal file
@@ -0,0 +1,449 @@
|
||||
//! TLSNotary attestation types.
|
||||
//!
|
||||
//! # Introduction
|
||||
//!
|
||||
//! This library provides core functionality for TLSNotary **attestations**.
|
||||
//!
|
||||
//! Once the TLS commitment protocol has been completed the Prover holds a
|
||||
//! collection of commitments pertaining to the TLS connection. Most
|
||||
//! importantly, the Prover is committed to the
|
||||
//! [`ServerName`](tlsn_core::connection::ServerName),
|
||||
//! and the [`Transcript`](tlsn_core::transcript::Transcript) of application
|
||||
//! data. Subsequently, the Prover can request an [`Attestation`] from the
|
||||
//! Notary who will include the commitments as well as any additional
|
||||
//! information which may be useful to an attestation Verifier.
|
||||
//!
|
||||
//! Holding an attestation, the Prover can construct a
|
||||
//! [`Presentation`](crate::presentation::Presentation) which facilitates
|
||||
//! selectively disclosing various aspects of the TLS connection to a Verifier.
|
||||
//! If the Verifier trusts the Notary, or more specifically the verifying key of
|
||||
//! the attestation, then the Verifier can trust the authenticity of the
|
||||
//! information disclosed in the presentation.
|
||||
//!
|
||||
//! **Be sure to check out the various submodules for more information.**
|
||||
//!
|
||||
//! # Structure
|
||||
//!
|
||||
//! An attestation is a cryptographically signed document issued by a Notary who
|
||||
//! witnessed a TLS connection. It contains various fields which can be used to
|
||||
//! verify statements about the connection and the associated application data.
|
||||
//!
|
||||
//! Attestations are comprised of two parts: a [`Header`] and a [`Body`].
|
||||
//!
|
||||
//! The header is the data structure which is signed by a Notary. It
|
||||
//! contains a unique identifier, the protocol version, and a Merkle root
|
||||
//! of the body fields.
|
||||
//!
|
||||
//! The body contains the fields of the attestation. These fields include data
|
||||
//! which can be used to verify aspects of a TLS connection, such as the
|
||||
//! server's identity, and facts about the transcript.
|
||||
//!
|
||||
//! # Extensions
|
||||
//!
|
||||
//! An attestation may be extended using [`Extension`] fields included in the
|
||||
//! body. Extensions (currently) have no canonical semantics, but may be used to
|
||||
//! implement application specific functionality.
|
||||
//!
|
||||
//! A Prover may [append
|
||||
//! extensions](crate::request::RequestConfigBuilder::extension)
|
||||
//! to their attestation request, provided that the Notary supports them
|
||||
//! (disallowed by default). A Notary may also be configured to
|
||||
//! [validate](crate::AttestationConfigBuilder::extension_validator)
|
||||
//! any extensions requested by a Prover using custom application logic.
|
||||
//! Additionally, a Notary may
|
||||
//! [include](crate::AttestationBuilder::extension)
|
||||
//! their own extensions.
|
||||
//!
|
||||
//! # Committing to the transcript
|
||||
//!
|
||||
//! The TLS commitment protocol produces commitments to the entire transcript of
|
||||
//! application data. However, we may want to disclose only a subset of the data
|
||||
//! in a presentation. Prior to attestation, the Prover has the opportunity to
|
||||
//! slice and dice the commitments into smaller sections which can be
|
||||
//! selectively disclosed. Additionally, the Prover may want to use different
|
||||
//! commitment schemes depending on the context they expect to disclose.
|
||||
//!
|
||||
//! The primary API for this process is the
|
||||
//! [`TranscriptCommitConfigBuilder`](tlsn_core::transcript::TranscriptCommitConfigBuilder)
|
||||
//! which is used to build up a configuration.
|
||||
//!
|
||||
//! ```no_run
|
||||
//! # use tlsn_core::transcript::{TranscriptCommitConfigBuilder, Transcript, Direction};
|
||||
//! # use tlsn_core::hash::HashAlgId;
|
||||
//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
//! # let transcript: Transcript = unimplemented!();
|
||||
//! let (sent_len, recv_len) = transcript.len();
|
||||
//!
|
||||
//! // Create a new configuration builder.
|
||||
//! let mut builder = TranscriptCommitConfigBuilder::new(&transcript);
|
||||
//!
|
||||
//! // Specify all the transcript commitments we want to make.
|
||||
//! builder
|
||||
//! // Use BLAKE3 for encoding commitments.
|
||||
//! .encoding_hash_alg(HashAlgId::BLAKE3)
|
||||
//! // Commit to all sent data.
|
||||
//! .commit_sent(&(0..sent_len))?
|
||||
//! // Commit to the first 10 bytes of sent data.
|
||||
//! .commit_sent(&(0..10))?
|
||||
//! // Skip some bytes so it can be omitted in the presentation.
|
||||
//! .commit_sent(&(20..sent_len))?
|
||||
//! // Commit to all received data.
|
||||
//! .commit_recv(&(0..recv_len))?;
|
||||
//!
|
||||
//! let config = builder.build()?;
|
||||
//! # Ok(())
|
||||
//! # }
|
||||
//! ```
|
||||
//!
|
||||
//! # Requesting an attestation
|
||||
//!
|
||||
//! The first step in the attestation protocol is for the Prover to make a
|
||||
//! [`Request`](crate::request::Request), which can be configured using the
|
||||
//! associated [builder](crate::request::RequestConfigBuilder). With it the
|
||||
//! Prover can configure some of the details of the attestation, such as which
|
||||
//! cryptographic algorithms are used (if the Notary supports them).
|
||||
//!
|
||||
//! The Prover may also request for extensions to be added to the attestation,
|
||||
//! see [here](#extensions) for more information.
|
||||
//!
|
||||
//! Upon being issued an attestation, the Prover will also hold a corresponding
|
||||
//! [`Secrets`] which contains all private information. This pair can be stored
|
||||
//! and used later to construct a
|
||||
//! [`Presentation`](crate::presentation::Presentation), [see
|
||||
//! below](#constructing-a-presentation).
|
||||
//!
|
||||
//! # Issuing an attestation
|
||||
//!
|
||||
//! Upon receiving a request, the Notary can issue an [`Attestation`] which can
|
||||
//! be configured using the associated
|
||||
//! [builder](crate::AttestationConfigBuilder).
|
||||
//!
|
||||
//! The Notary's [`CryptoProvider`] must be configured with an appropriate
|
||||
//! signing key for attestations. See
|
||||
//! [`SignerProvider`](crate::signing::SignerProvider) for more information.
|
||||
//!
|
||||
//! # Constructing a presentation
|
||||
//!
|
||||
//! A Prover can use an [`Attestation`] and the corresponding [`Secrets`] to
|
||||
//! construct a verifiable [`Presentation`](crate::presentation::Presentation).
|
||||
//!
|
||||
//! ```no_run
|
||||
//! # use tlsn_attestation::{Attestation, CryptoProvider, Secrets, presentation::Presentation};
|
||||
//! # use tlsn_core::transcript::{TranscriptCommitmentKind, Direction};
|
||||
//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
//! # let attestation: Attestation = unimplemented!();
|
||||
//! # let secrets: Secrets = unimplemented!();
|
||||
//! # let crypto_provider: CryptoProvider = unimplemented!();
|
||||
//! let (_sent_len, recv_len) = secrets.transcript().len();
|
||||
//!
|
||||
//! // First, we decide which application data we would like to disclose.
|
||||
//! let mut builder = secrets.transcript_proof_builder();
|
||||
//!
|
||||
//! builder
|
||||
//! // Use transcript encoding commitments.
|
||||
//! .commitment_kinds(&[TranscriptCommitmentKind::Encoding])
|
||||
//! // Disclose the first 10 bytes of the sent data.
|
||||
//! .reveal(&(0..10), Direction::Sent)?
|
||||
//! // Disclose all of the received data.
|
||||
//! .reveal(&(0..recv_len), Direction::Received)?;
|
||||
//!
|
||||
//! let transcript_proof = builder.build()?;
|
||||
//!
|
||||
//! // Most cases we will also disclose the server identity.
|
||||
//! let identity_proof = secrets.identity_proof();
|
||||
//!
|
||||
//! // Now we can construct the presentation.
|
||||
//! let mut builder = attestation.presentation_builder(&crypto_provider);
|
||||
//!
|
||||
//! builder
|
||||
//! .identity_proof(identity_proof)
|
||||
//! .transcript_proof(transcript_proof);
|
||||
//!
|
||||
//! // Finally, we build the presentation. Send it to a verifier!
|
||||
//! let presentation: Presentation = builder.build()?;
|
||||
//! # Ok(())
|
||||
//! # }
|
||||
//! ```
|
||||
//!
|
||||
//! # Verifying a presentation
|
||||
//!
|
||||
//! Verifying a presentation is as simple as checking the verifier trusts the
|
||||
//! verifying key then calling
|
||||
//! [`Presentation::verify`](crate::presentation::Presentation::verify).
|
||||
//!
|
||||
//! ```no_run
|
||||
//! # use tlsn_attestation::{CryptoProvider, presentation::{Presentation, PresentationOutput}, signing::VerifyingKey};
|
||||
//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
//! # let presentation: Presentation = unimplemented!();
|
||||
//! # let trusted_key: VerifyingKey = unimplemented!();
|
||||
//! # let crypto_provider: CryptoProvider = unimplemented!();
|
||||
//! // Assert that we trust the verifying key.
|
||||
//! assert_eq!(presentation.verifying_key(), &trusted_key);
|
||||
//!
|
||||
//! let PresentationOutput {
|
||||
//! attestation,
|
||||
//! server_name,
|
||||
//! connection_info,
|
||||
//! transcript,
|
||||
//! ..
|
||||
//! } = presentation.verify(&crypto_provider)?;
|
||||
//! # Ok(())
|
||||
//! # }
|
||||
//! ```
|
||||
|
||||
#![deny(missing_docs, unreachable_pub, unused_must_use)]
|
||||
#![deny(clippy::all)]
|
||||
#![forbid(unsafe_code)]
|
||||
|
||||
mod builder;
|
||||
mod config;
|
||||
pub mod connection;
|
||||
mod extension;
|
||||
#[cfg(any(test, feature = "fixtures"))]
|
||||
pub mod fixtures;
|
||||
pub(crate) mod hash;
|
||||
pub mod presentation;
|
||||
mod proof;
|
||||
mod provider;
|
||||
pub mod request;
|
||||
mod secrets;
|
||||
pub(crate) mod serialize;
|
||||
pub mod signing;
|
||||
|
||||
use std::fmt;
|
||||
|
||||
use rand::distr::{Distribution, StandardUniform};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use tlsn_core::{
|
||||
connection::{ConnectionInfo, ServerEphemKey},
|
||||
hash::{Hash, HashAlgorithm, TypedHash},
|
||||
merkle::MerkleTree,
|
||||
transcript::TranscriptCommitment,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
connection::ServerCertCommitment,
|
||||
hash::HashAlgorithmExt,
|
||||
presentation::PresentationBuilder,
|
||||
serialize::impl_domain_separator,
|
||||
signing::{Signature, VerifyingKey},
|
||||
};
|
||||
|
||||
pub use builder::{AttestationBuilder, AttestationBuilderError};
|
||||
pub use config::{AttestationConfig, AttestationConfigBuilder, AttestationConfigError};
|
||||
pub use extension::{Extension, InvalidExtension};
|
||||
pub use proof::{AttestationError, AttestationProof};
|
||||
pub use provider::CryptoProvider;
|
||||
pub use secrets::Secrets;
|
||||
/// Current version of attestations.
|
||||
pub const VERSION: Version = Version(0);
|
||||
|
||||
/// Unique identifier for an attestation.
|
||||
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)]
|
||||
pub struct Uid(pub [u8; 16]);
|
||||
|
||||
impl From<[u8; 16]> for Uid {
|
||||
fn from(id: [u8; 16]) -> Self {
|
||||
Self(id)
|
||||
}
|
||||
}
|
||||
|
||||
impl Distribution<Uid> for StandardUniform {
|
||||
fn sample<R: rand::Rng + ?Sized>(&self, rng: &mut R) -> Uid {
|
||||
Uid(self.sample(rng))
|
||||
}
|
||||
}
|
||||
|
||||
/// Version of an attestation.
|
||||
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)]
|
||||
pub struct Version(u32);
|
||||
|
||||
impl_domain_separator!(Version);
|
||||
|
||||
/// Public attestation field.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct Field<T> {
|
||||
/// Identifier of the field.
|
||||
pub id: FieldId,
|
||||
/// Field data.
|
||||
pub data: T,
|
||||
}
|
||||
|
||||
/// Identifier for a field.
|
||||
#[derive(
|
||||
Debug, Default, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize,
|
||||
)]
|
||||
pub struct FieldId(pub u32);
|
||||
|
||||
impl FieldId {
|
||||
pub(crate) fn next<T>(&mut self, data: T) -> Field<T> {
|
||||
let id = *self;
|
||||
self.0 += 1;
|
||||
|
||||
Field { id, data }
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for FieldId {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "{}", self.0)
|
||||
}
|
||||
}
|
||||
|
||||
/// Kind of an attestation field.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
|
||||
#[repr(u8)]
|
||||
pub enum FieldKind {
|
||||
/// Connection information.
|
||||
ConnectionInfo = 0x01,
|
||||
/// Server ephemeral key.
|
||||
ServerEphemKey = 0x02,
|
||||
/// Server identity commitment.
|
||||
ServerIdentityCommitment = 0x03,
|
||||
/// Encoding commitment.
|
||||
EncodingCommitment = 0x04,
|
||||
/// Plaintext hash commitment.
|
||||
PlaintextHash = 0x05,
|
||||
}
|
||||
|
||||
/// Attestation header.
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct Header {
|
||||
/// An identifier for the attestation.
|
||||
pub id: Uid,
|
||||
/// Version of the attestation.
|
||||
pub version: Version,
|
||||
/// Merkle root of the attestation fields.
|
||||
pub root: TypedHash,
|
||||
}
|
||||
|
||||
impl_domain_separator!(Header);
|
||||
|
||||
/// Attestation body.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct Body {
|
||||
verifying_key: Field<VerifyingKey>,
|
||||
connection_info: Field<ConnectionInfo>,
|
||||
server_ephemeral_key: Field<ServerEphemKey>,
|
||||
cert_commitment: Field<ServerCertCommitment>,
|
||||
extensions: Vec<Field<Extension>>,
|
||||
transcript_commitments: Vec<Field<TranscriptCommitment>>,
|
||||
}
|
||||
|
||||
impl Body {
|
||||
/// Returns an iterator over the extensions.
|
||||
pub fn extensions(&self) -> impl Iterator<Item = &Extension> {
|
||||
self.extensions.iter().map(|field| &field.data)
|
||||
}
|
||||
|
||||
/// Returns the attestation verifying key.
|
||||
pub fn verifying_key(&self) -> &VerifyingKey {
|
||||
&self.verifying_key.data
|
||||
}
|
||||
|
||||
/// Computes the Merkle root of the attestation fields.
|
||||
///
|
||||
/// This is only used when building an attestation.
|
||||
pub(crate) fn root(&self, hasher: &dyn HashAlgorithm) -> TypedHash {
|
||||
let mut tree = MerkleTree::new(hasher.id());
|
||||
let fields = self
|
||||
.hash_fields(hasher)
|
||||
.into_iter()
|
||||
.map(|(_, hash)| hash)
|
||||
.collect::<Vec<_>>();
|
||||
tree.insert(hasher, fields);
|
||||
tree.root()
|
||||
}
|
||||
|
||||
/// Returns the fields of the body hashed and sorted by id.
|
||||
///
|
||||
/// Each field is hashed with a domain separator to mitigate type confusion
|
||||
/// attacks.
|
||||
///
|
||||
/// # Note
|
||||
///
|
||||
/// The order of fields is not stable across versions.
|
||||
pub(crate) fn hash_fields(&self, hasher: &dyn HashAlgorithm) -> Vec<(FieldId, Hash)> {
|
||||
// CRITICAL: ensure all fields are included! If a new field is added to the
|
||||
// struct without including it here, it will not be included in the attestation.
|
||||
let Self {
|
||||
verifying_key,
|
||||
connection_info: conn_info,
|
||||
server_ephemeral_key,
|
||||
cert_commitment,
|
||||
extensions,
|
||||
transcript_commitments,
|
||||
} = self;
|
||||
|
||||
let mut fields: Vec<(FieldId, Hash)> = vec![
|
||||
(verifying_key.id, hasher.hash_separated(&verifying_key.data)),
|
||||
(conn_info.id, hasher.hash_separated(&conn_info.data)),
|
||||
(
|
||||
server_ephemeral_key.id,
|
||||
hasher.hash_separated(&server_ephemeral_key.data),
|
||||
),
|
||||
(
|
||||
cert_commitment.id,
|
||||
hasher.hash_separated(&cert_commitment.data),
|
||||
),
|
||||
];
|
||||
|
||||
for field in extensions.iter() {
|
||||
fields.push((field.id, hasher.hash_separated(&field.data)));
|
||||
}
|
||||
|
||||
for field in transcript_commitments.iter() {
|
||||
fields.push((field.id, hasher.hash_separated(&field.data)));
|
||||
}
|
||||
|
||||
fields.sort_by_key(|(id, _)| *id);
|
||||
fields
|
||||
}
|
||||
|
||||
/// Returns the connection information.
|
||||
pub(crate) fn connection_info(&self) -> &ConnectionInfo {
|
||||
&self.connection_info.data
|
||||
}
|
||||
|
||||
/// Returns the server's ephemeral public key.
|
||||
pub(crate) fn server_ephemeral_key(&self) -> &ServerEphemKey {
|
||||
&self.server_ephemeral_key.data
|
||||
}
|
||||
|
||||
/// Returns the commitment to a server certificate.
|
||||
pub(crate) fn cert_commitment(&self) -> &ServerCertCommitment {
|
||||
&self.cert_commitment.data
|
||||
}
|
||||
|
||||
/// Returns the transcript commitments.
|
||||
pub(crate) fn transcript_commitments(&self) -> impl Iterator<Item = &TranscriptCommitment> {
|
||||
self.transcript_commitments.iter().map(|field| &field.data)
|
||||
}
|
||||
}
|
||||
|
||||
/// An attestation document.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct Attestation {
|
||||
/// The signature of the attestation.
|
||||
pub signature: Signature,
|
||||
/// The attestation header.
|
||||
pub header: Header,
|
||||
/// The attestation body.
|
||||
pub body: Body,
|
||||
}
|
||||
|
||||
impl Attestation {
|
||||
/// Returns an attestation builder.
|
||||
pub fn builder(config: &AttestationConfig) -> AttestationBuilder<'_> {
|
||||
AttestationBuilder::new(config)
|
||||
}
|
||||
|
||||
/// Returns a presentation builder.
|
||||
pub fn presentation_builder<'a>(
|
||||
&'a self,
|
||||
provider: &'a CryptoProvider,
|
||||
) -> PresentationBuilder<'a> {
|
||||
PresentationBuilder::new(provider, self)
|
||||
}
|
||||
}
|
||||
@@ -26,12 +26,15 @@ use std::fmt;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::{
|
||||
attestation::{Attestation, AttestationError, AttestationProof},
|
||||
connection::{ConnectionInfo, ServerIdentityProof, ServerIdentityProofError, ServerName},
|
||||
signing::VerifyingKey,
|
||||
use tlsn_core::{
|
||||
connection::{ConnectionInfo, ServerName},
|
||||
transcript::{PartialTranscript, TranscriptProof, TranscriptProofError},
|
||||
CryptoProvider,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
Attestation, AttestationError, AttestationProof, CryptoProvider, Extension,
|
||||
connection::{ServerIdentityProof, ServerIdentityProofError},
|
||||
signing::VerifyingKey,
|
||||
};
|
||||
|
||||
/// A verifiable presentation.
|
||||
@@ -84,16 +87,25 @@ impl Presentation {
|
||||
.transpose()?;
|
||||
|
||||
let transcript = transcript
|
||||
.map(|transcript| transcript.verify_with_provider(provider, &attestation.body))
|
||||
.map(|transcript| {
|
||||
transcript.verify_with_provider(
|
||||
&provider.hash,
|
||||
&attestation.body.connection_info().transcript_length,
|
||||
attestation.body.transcript_commitments(),
|
||||
)
|
||||
})
|
||||
.transpose()?;
|
||||
|
||||
let connection_info = attestation.body.connection_info().clone();
|
||||
|
||||
let extensions = attestation.body.extensions().cloned().collect();
|
||||
|
||||
Ok(PresentationOutput {
|
||||
attestation,
|
||||
server_name,
|
||||
connection_info,
|
||||
transcript,
|
||||
extensions,
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -110,6 +122,8 @@ pub struct PresentationOutput {
|
||||
pub connection_info: ConnectionInfo,
|
||||
/// Authenticated transcript data.
|
||||
pub transcript: Option<PartialTranscript>,
|
||||
/// Extensions.
|
||||
pub extensions: Vec<Extension>,
|
||||
}
|
||||
|
||||
/// Builder for [`Presentation`].
|
||||
@@ -175,7 +189,7 @@ impl fmt::Display for PresentationBuilderError {
|
||||
}
|
||||
|
||||
if let Some(source) = &self.source {
|
||||
write!(f, " caused by: {}", source)?;
|
||||
write!(f, " caused by: {source}")?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@@ -216,7 +230,7 @@ impl fmt::Display for PresentationError {
|
||||
}
|
||||
|
||||
if let Some(source) = &self.source {
|
||||
write!(f, " caused by: {}", source)?;
|
||||
write!(f, " caused by: {source}")?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@@ -2,13 +2,15 @@ use std::fmt;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::{
|
||||
attestation::{Attestation, Body, Header},
|
||||
use tlsn_core::{
|
||||
hash::HashAlgorithm,
|
||||
merkle::{MerkleProof, MerkleTree},
|
||||
};
|
||||
|
||||
use crate::{
|
||||
Attestation, Body, CryptoProvider, Header,
|
||||
serialize::CanonicalSerialize,
|
||||
signing::{Signature, VerifyingKey},
|
||||
CryptoProvider,
|
||||
};
|
||||
|
||||
/// Proof of an attestation.
|
||||
@@ -55,7 +57,7 @@ impl AttestationProof {
|
||||
.get(&self.signature.alg)
|
||||
.map_err(|e| AttestationError::new(ErrorKind::Provider, e))?;
|
||||
|
||||
// Verify body corresponding to the header.
|
||||
// Verify that the body is corresponding to the header.
|
||||
let body = self.body.verify_with_provider(provider, &self.header)?;
|
||||
|
||||
// Verify signature of the header.
|
||||
@@ -79,12 +81,15 @@ impl AttestationProof {
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub(crate) struct BodyProof {
|
||||
body: Body,
|
||||
/// A proof of inclusion of a subset of fields in the `body`.
|
||||
// Currently, proves the inclusion of all fields.
|
||||
proof: MerkleProof,
|
||||
}
|
||||
|
||||
impl BodyProof {
|
||||
/// Returns a new body proof.
|
||||
// TODO: Support including a subset of fields instead of the entire body.
|
||||
// TODO: Support creating a proof for a subset of fields instead of the entire
|
||||
// body.
|
||||
pub(crate) fn new(
|
||||
hasher: &dyn HashAlgorithm,
|
||||
body: Body,
|
||||
@@ -162,7 +167,7 @@ impl fmt::Display for AttestationError {
|
||||
}
|
||||
|
||||
if let Some(source) = &self.source {
|
||||
write!(f, " caused by: {}", source)?;
|
||||
write!(f, " caused by: {source}")?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@@ -1,12 +1,6 @@
|
||||
use tls_core::{
|
||||
anchors::{OwnedTrustAnchor, RootCertStore},
|
||||
verify::WebPkiVerifier,
|
||||
};
|
||||
use tlsn_core::{hash::HashProvider, webpki::ServerCertVerifier};
|
||||
|
||||
use crate::{
|
||||
hash::HashProvider,
|
||||
signing::{SignatureVerifierProvider, SignerProvider},
|
||||
};
|
||||
use crate::signing::{SignatureVerifierProvider, SignerProvider};
|
||||
|
||||
/// Cryptography provider.
|
||||
///
|
||||
@@ -17,7 +11,7 @@ use crate::{
|
||||
/// implementations.
|
||||
///
|
||||
/// Algorithms are uniquely identified using an 8-bit ID, eg.
|
||||
/// [`HashAlgId`](crate::hash::HashAlgId), half of which is reserved for the
|
||||
/// [`HashAlgId`](tlsn_core::hash::HashAlgId), half of which is reserved for the
|
||||
/// officially supported algorithms. If you think that a new algorithm should be
|
||||
/// added to the official set, please open an issue. Beware that other parties
|
||||
/// may assign different algorithms to the same ID as you, and we make no effort
|
||||
@@ -30,7 +24,7 @@ pub struct CryptoProvider {
|
||||
/// This is used to verify the server's certificate chain.
|
||||
///
|
||||
/// The default verifier uses the Mozilla root certificates.
|
||||
pub cert: WebPkiVerifier,
|
||||
pub cert: ServerCertVerifier,
|
||||
/// Signer provider.
|
||||
///
|
||||
/// This is used for signing attestations.
|
||||
@@ -47,21 +41,9 @@ impl Default for CryptoProvider {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
hash: Default::default(),
|
||||
cert: default_cert_verifier(),
|
||||
cert: ServerCertVerifier::mozilla(),
|
||||
signer: Default::default(),
|
||||
signature: Default::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn default_cert_verifier() -> WebPkiVerifier {
|
||||
let mut root_store = RootCertStore::empty();
|
||||
root_store.add_server_trust_anchors(webpki_roots::TLS_SERVER_ROOTS.iter().map(|ta| {
|
||||
OwnedTrustAnchor::from_subject_spki_name_constraints(
|
||||
ta.subject.as_ref(),
|
||||
ta.subject_public_key_info.as_ref(),
|
||||
ta.name_constraints.as_ref().map(|nc| nc.as_ref()),
|
||||
)
|
||||
}));
|
||||
WebPkiVerifier::new(root_store, None)
|
||||
}
|
||||
194
crates/attestation/src/request.rs
Normal file
194
crates/attestation/src/request.rs
Normal file
@@ -0,0 +1,194 @@
|
||||
//! Attestation requests.
|
||||
//!
|
||||
//! After the TLS connection, a Prover can request an attestation from the
|
||||
//! Notary which contains various information about the connection. During this
|
||||
//! process the Prover has the opportunity to configure certain aspects of the
|
||||
//! attestation, such as which signature algorithm the Notary should use to sign
|
||||
//! the attestation. Or which hash algorithm the Notary should use to merkelize
|
||||
//! the fields.
|
||||
//!
|
||||
//! A [`Request`] can be created using a [`RequestBuilder`]. The builder will
|
||||
//! take both configuration via a [`RequestConfig`] as well as the Prover's
|
||||
//! secret data. The [`Secrets`](crate::Secrets) are of course not shared with
|
||||
//! the Notary but are used to create commitments which are included in the
|
||||
//! attestation.
|
||||
|
||||
mod builder;
|
||||
mod config;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use tlsn_core::hash::HashAlgId;
|
||||
|
||||
use crate::{Attestation, Extension, connection::ServerCertCommitment, signing::SignatureAlgId};
|
||||
|
||||
pub use builder::{RequestBuilder, RequestBuilderError};
|
||||
pub use config::{RequestConfig, RequestConfigBuilder, RequestConfigBuilderError};
|
||||
|
||||
/// Attestation request.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct Request {
|
||||
pub(crate) signature_alg: SignatureAlgId,
|
||||
pub(crate) hash_alg: HashAlgId,
|
||||
pub(crate) server_cert_commitment: ServerCertCommitment,
|
||||
pub(crate) extensions: Vec<Extension>,
|
||||
}
|
||||
|
||||
impl Request {
|
||||
/// Returns a new request builder.
|
||||
pub fn builder(config: &RequestConfig) -> RequestBuilder<'_> {
|
||||
RequestBuilder::new(config)
|
||||
}
|
||||
|
||||
/// Validates the content of the attestation against this request.
|
||||
pub fn validate(&self, attestation: &Attestation) -> Result<(), InconsistentAttestation> {
|
||||
if attestation.signature.alg != self.signature_alg {
|
||||
return Err(InconsistentAttestation(format!(
|
||||
"signature algorithm: expected {:?}, got {:?}",
|
||||
self.signature_alg, attestation.signature.alg
|
||||
)));
|
||||
}
|
||||
|
||||
if attestation.header.root.alg != self.hash_alg {
|
||||
return Err(InconsistentAttestation(format!(
|
||||
"hash algorithm: expected {:?}, got {:?}",
|
||||
self.hash_alg, attestation.header.root.alg
|
||||
)));
|
||||
}
|
||||
|
||||
if attestation.body.cert_commitment() != &self.server_cert_commitment {
|
||||
return Err(InconsistentAttestation(
|
||||
"server certificate commitment does not match".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
// TODO: improve the O(M*N) complexity of this check.
|
||||
for extension in &self.extensions {
|
||||
if !attestation.body.extensions().any(|e| e == extension) {
|
||||
return Err(InconsistentAttestation(
|
||||
"extension is missing from the attestation".to_string(),
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Error for [`Request::validate`].
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
#[error("inconsistent attestation: {0}")]
|
||||
pub struct InconsistentAttestation(String);
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use tlsn_core::{
|
||||
connection::TranscriptLength,
|
||||
fixtures::{ConnectionFixture, encoding_provider},
|
||||
hash::{Blake3, HashAlgId},
|
||||
transcript::Transcript,
|
||||
};
|
||||
use tlsn_data_fixtures::http::{request::GET_WITH_HEADER, response::OK_JSON};
|
||||
|
||||
use crate::{
|
||||
CryptoProvider,
|
||||
connection::ServerCertOpening,
|
||||
fixtures::{RequestFixture, attestation_fixture, request_fixture},
|
||||
signing::SignatureAlgId,
|
||||
};
|
||||
|
||||
#[test]
|
||||
fn test_success() {
|
||||
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
|
||||
let connection = ConnectionFixture::tlsnotary(transcript.length());
|
||||
|
||||
let RequestFixture { request, .. } = request_fixture(
|
||||
transcript,
|
||||
encoding_provider(GET_WITH_HEADER, OK_JSON),
|
||||
connection.clone(),
|
||||
Blake3::default(),
|
||||
Vec::new(),
|
||||
);
|
||||
|
||||
let attestation =
|
||||
attestation_fixture(request.clone(), connection, SignatureAlgId::SECP256K1, &[]);
|
||||
|
||||
assert!(request.validate(&attestation).is_ok())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_wrong_signature_alg() {
|
||||
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
|
||||
let connection = ConnectionFixture::tlsnotary(transcript.length());
|
||||
|
||||
let RequestFixture { mut request, .. } = request_fixture(
|
||||
transcript,
|
||||
encoding_provider(GET_WITH_HEADER, OK_JSON),
|
||||
connection.clone(),
|
||||
Blake3::default(),
|
||||
Vec::new(),
|
||||
);
|
||||
|
||||
let attestation =
|
||||
attestation_fixture(request.clone(), connection, SignatureAlgId::SECP256K1, &[]);
|
||||
|
||||
request.signature_alg = SignatureAlgId::SECP256R1;
|
||||
|
||||
let res = request.validate(&attestation);
|
||||
assert!(res.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_wrong_hash_alg() {
|
||||
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
|
||||
let connection = ConnectionFixture::tlsnotary(transcript.length());
|
||||
|
||||
let RequestFixture { mut request, .. } = request_fixture(
|
||||
transcript,
|
||||
encoding_provider(GET_WITH_HEADER, OK_JSON),
|
||||
connection.clone(),
|
||||
Blake3::default(),
|
||||
Vec::new(),
|
||||
);
|
||||
|
||||
let attestation =
|
||||
attestation_fixture(request.clone(), connection, SignatureAlgId::SECP256K1, &[]);
|
||||
|
||||
request.hash_alg = HashAlgId::SHA256;
|
||||
|
||||
let res = request.validate(&attestation);
|
||||
assert!(res.is_err())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_wrong_server_commitment() {
|
||||
let transcript = Transcript::new(GET_WITH_HEADER, OK_JSON);
|
||||
let connection = ConnectionFixture::tlsnotary(transcript.length());
|
||||
|
||||
let RequestFixture { mut request, .. } = request_fixture(
|
||||
transcript,
|
||||
encoding_provider(GET_WITH_HEADER, OK_JSON),
|
||||
connection.clone(),
|
||||
Blake3::default(),
|
||||
Vec::new(),
|
||||
);
|
||||
|
||||
let attestation =
|
||||
attestation_fixture(request.clone(), connection, SignatureAlgId::SECP256K1, &[]);
|
||||
|
||||
let ConnectionFixture {
|
||||
server_cert_data, ..
|
||||
} = ConnectionFixture::appliedzkp(TranscriptLength {
|
||||
sent: 100,
|
||||
received: 100,
|
||||
});
|
||||
let opening = ServerCertOpening::new(server_cert_data);
|
||||
|
||||
let crypto_provider = CryptoProvider::default();
|
||||
request.server_cert_commitment =
|
||||
opening.commit(crypto_provider.hash.get(&HashAlgId::BLAKE3).unwrap());
|
||||
|
||||
let res = request.validate(&attestation);
|
||||
assert!(res.is_err())
|
||||
}
|
||||
}
|
||||
@@ -1,19 +1,22 @@
|
||||
use tlsn_core::{
|
||||
connection::{HandshakeData, ServerName},
|
||||
transcript::{Transcript, TranscriptCommitment, TranscriptSecret},
|
||||
};
|
||||
|
||||
use crate::{
|
||||
connection::{ServerCertData, ServerCertOpening, ServerName},
|
||||
index::Index,
|
||||
CryptoProvider, Secrets,
|
||||
connection::ServerCertOpening,
|
||||
request::{Request, RequestConfig},
|
||||
secrets::Secrets,
|
||||
transcript::{encoding::EncodingTree, Transcript},
|
||||
CryptoProvider,
|
||||
};
|
||||
|
||||
/// Builder for [`Request`].
|
||||
pub struct RequestBuilder<'a> {
|
||||
config: &'a RequestConfig,
|
||||
server_name: Option<ServerName>,
|
||||
server_cert_data: Option<ServerCertData>,
|
||||
encoding_tree: Option<EncodingTree>,
|
||||
handshake_data: Option<HandshakeData>,
|
||||
transcript: Option<Transcript>,
|
||||
transcript_commitments: Vec<TranscriptCommitment>,
|
||||
transcript_commitment_secrets: Vec<TranscriptSecret>,
|
||||
}
|
||||
|
||||
impl<'a> RequestBuilder<'a> {
|
||||
@@ -22,9 +25,10 @@ impl<'a> RequestBuilder<'a> {
|
||||
Self {
|
||||
config,
|
||||
server_name: None,
|
||||
server_cert_data: None,
|
||||
encoding_tree: None,
|
||||
handshake_data: None,
|
||||
transcript: None,
|
||||
transcript_commitments: Vec::new(),
|
||||
transcript_commitment_secrets: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -34,15 +38,9 @@ impl<'a> RequestBuilder<'a> {
|
||||
self
|
||||
}
|
||||
|
||||
/// Sets the server identity data.
|
||||
pub fn server_cert_data(&mut self, data: ServerCertData) -> &mut Self {
|
||||
self.server_cert_data = Some(data);
|
||||
self
|
||||
}
|
||||
|
||||
/// Sets the tree to commit to the transcript encodings.
|
||||
pub fn encoding_tree(&mut self, tree: EncodingTree) -> &mut Self {
|
||||
self.encoding_tree = Some(tree);
|
||||
/// Sets the handshake data.
|
||||
pub fn handshake_data(&mut self, data: HandshakeData) -> &mut Self {
|
||||
self.handshake_data = Some(data);
|
||||
self
|
||||
}
|
||||
|
||||
@@ -52,6 +50,17 @@ impl<'a> RequestBuilder<'a> {
|
||||
self
|
||||
}
|
||||
|
||||
/// Sets the transcript commitments.
|
||||
pub fn transcript_commitments(
|
||||
&mut self,
|
||||
secrets: Vec<TranscriptSecret>,
|
||||
commitments: Vec<TranscriptCommitment>,
|
||||
) -> &mut Self {
|
||||
self.transcript_commitment_secrets = secrets;
|
||||
self.transcript_commitments = commitments;
|
||||
self
|
||||
}
|
||||
|
||||
/// Builds the attestation request and returns the corresponding secrets.
|
||||
pub fn build(
|
||||
self,
|
||||
@@ -60,9 +69,10 @@ impl<'a> RequestBuilder<'a> {
|
||||
let Self {
|
||||
config,
|
||||
server_name,
|
||||
server_cert_data,
|
||||
encoding_tree,
|
||||
handshake_data: server_cert_data,
|
||||
transcript,
|
||||
transcript_commitments,
|
||||
transcript_commitment_secrets,
|
||||
} = self;
|
||||
|
||||
let signature_alg = *config.signature_alg();
|
||||
@@ -85,21 +95,21 @@ impl<'a> RequestBuilder<'a> {
|
||||
|
||||
let server_cert_commitment = server_cert_opening.commit(hasher);
|
||||
|
||||
let encoding_commitment_root = encoding_tree.as_ref().map(|tree| tree.root());
|
||||
let extensions = config.extensions().to_vec();
|
||||
|
||||
let request = Request {
|
||||
signature_alg,
|
||||
hash_alg,
|
||||
server_cert_commitment,
|
||||
encoding_commitment_root,
|
||||
extensions,
|
||||
};
|
||||
|
||||
let secrets = Secrets {
|
||||
server_name,
|
||||
server_cert_opening,
|
||||
encoding_tree,
|
||||
plaintext_hashes: Index::default(),
|
||||
transcript,
|
||||
transcript_commitments,
|
||||
transcript_commitment_secrets,
|
||||
};
|
||||
|
||||
Ok((request, secrets))
|
||||
@@ -1,10 +1,14 @@
|
||||
use crate::{hash::HashAlgId, signing::SignatureAlgId};
|
||||
use tlsn_core::{hash::HashAlgId, transcript::TranscriptCommitConfig};
|
||||
|
||||
use crate::{Extension, signing::SignatureAlgId};
|
||||
|
||||
/// Request configuration.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct RequestConfig {
|
||||
signature_alg: SignatureAlgId,
|
||||
hash_alg: HashAlgId,
|
||||
extensions: Vec<Extension>,
|
||||
transcript_commit: Option<TranscriptCommitConfig>,
|
||||
}
|
||||
|
||||
impl Default for RequestConfig {
|
||||
@@ -28,6 +32,16 @@ impl RequestConfig {
|
||||
pub fn hash_alg(&self) -> &HashAlgId {
|
||||
&self.hash_alg
|
||||
}
|
||||
|
||||
/// Returns the extensions.
|
||||
pub fn extensions(&self) -> &[Extension] {
|
||||
&self.extensions
|
||||
}
|
||||
|
||||
/// Returns the transcript commitment configuration.
|
||||
pub fn transcript_commit(&self) -> Option<&TranscriptCommitConfig> {
|
||||
self.transcript_commit.as_ref()
|
||||
}
|
||||
}
|
||||
|
||||
/// Builder for [`RequestConfig`].
|
||||
@@ -35,6 +49,8 @@ impl RequestConfig {
|
||||
pub struct RequestConfigBuilder {
|
||||
signature_alg: SignatureAlgId,
|
||||
hash_alg: HashAlgId,
|
||||
extensions: Vec<Extension>,
|
||||
transcript_commit: Option<TranscriptCommitConfig>,
|
||||
}
|
||||
|
||||
impl Default for RequestConfigBuilder {
|
||||
@@ -42,6 +58,8 @@ impl Default for RequestConfigBuilder {
|
||||
Self {
|
||||
signature_alg: SignatureAlgId::SECP256K1,
|
||||
hash_alg: HashAlgId::BLAKE3,
|
||||
extensions: Vec::new(),
|
||||
transcript_commit: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -59,11 +77,25 @@ impl RequestConfigBuilder {
|
||||
self
|
||||
}
|
||||
|
||||
/// Adds an extension to the request.
|
||||
pub fn extension(&mut self, extension: Extension) -> &mut Self {
|
||||
self.extensions.push(extension);
|
||||
self
|
||||
}
|
||||
|
||||
/// Sets the transcript commitment configuration.
|
||||
pub fn transcript_commit(&mut self, transcript_commit: TranscriptCommitConfig) -> &mut Self {
|
||||
self.transcript_commit = Some(transcript_commit);
|
||||
self
|
||||
}
|
||||
|
||||
/// Builds the config.
|
||||
pub fn build(self) -> Result<RequestConfig, RequestConfigBuilderError> {
|
||||
Ok(RequestConfig {
|
||||
signature_alg: self.signature_alg,
|
||||
hash_alg: self.hash_alg,
|
||||
extensions: self.extensions,
|
||||
transcript_commit: self.transcript_commit,
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,21 +1,20 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::{
|
||||
connection::{ServerCertOpening, ServerIdentityProof, ServerName},
|
||||
index::Index,
|
||||
transcript::{
|
||||
encoding::EncodingTree, hash::PlaintextHashSecret, Transcript, TranscriptProofBuilder,
|
||||
},
|
||||
use tlsn_core::{
|
||||
connection::ServerName,
|
||||
transcript::{Transcript, TranscriptCommitment, TranscriptProofBuilder, TranscriptSecret},
|
||||
};
|
||||
|
||||
/// Secret data of an [`Attestation`](crate::attestation::Attestation).
|
||||
use crate::connection::{ServerCertOpening, ServerIdentityProof};
|
||||
|
||||
/// Secret data of an [`Attestation`](crate::Attestation).
|
||||
#[derive(Clone, Serialize, Deserialize)]
|
||||
pub struct Secrets {
|
||||
pub(crate) server_name: ServerName,
|
||||
pub(crate) server_cert_opening: ServerCertOpening,
|
||||
pub(crate) encoding_tree: Option<EncodingTree>,
|
||||
pub(crate) plaintext_hashes: Index<PlaintextHashSecret>,
|
||||
pub(crate) transcript: Transcript,
|
||||
pub(crate) transcript_commitments: Vec<TranscriptCommitment>,
|
||||
pub(crate) transcript_commitment_secrets: Vec<TranscriptSecret>,
|
||||
}
|
||||
|
||||
opaque_debug::implement!(Secrets);
|
||||
@@ -38,10 +37,6 @@ impl Secrets {
|
||||
|
||||
/// Returns a transcript proof builder.
|
||||
pub fn transcript_proof_builder(&self) -> TranscriptProofBuilder<'_> {
|
||||
TranscriptProofBuilder::new(
|
||||
&self.transcript,
|
||||
self.encoding_tree.as_ref(),
|
||||
&self.plaintext_hashes,
|
||||
)
|
||||
TranscriptProofBuilder::new(&self.transcript, &self.transcript_commitment_secrets)
|
||||
}
|
||||
}
|
||||
53
crates/attestation/src/serialize.rs
Normal file
53
crates/attestation/src/serialize.rs
Normal file
@@ -0,0 +1,53 @@
|
||||
/// Canonical serialization of TLSNotary types.
|
||||
///
|
||||
/// This trait is used to serialize types into a canonical byte representation.
|
||||
pub(crate) trait CanonicalSerialize {
|
||||
/// Serializes the type.
|
||||
fn serialize(&self) -> Vec<u8>;
|
||||
}
|
||||
|
||||
impl<T> CanonicalSerialize for T
|
||||
where
|
||||
T: serde::Serialize,
|
||||
{
|
||||
fn serialize(&self) -> Vec<u8> {
|
||||
// For now we use BCS for serialization. In future releases we will want to
|
||||
// consider this further, particularly with respect to EVM compatibility.
|
||||
bcs::to_bytes(self).unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
/// A type with a domain separator which is used during hashing to mitigate type
|
||||
/// confusion attacks.
|
||||
pub(crate) trait DomainSeparator {
|
||||
/// Returns the domain separator for the type.
|
||||
fn domain(&self) -> &[u8];
|
||||
}
|
||||
|
||||
macro_rules! impl_domain_separator {
|
||||
($type:ty) => {
|
||||
impl $crate::serialize::DomainSeparator for $type {
|
||||
fn domain(&self) -> &[u8] {
|
||||
use std::sync::LazyLock;
|
||||
|
||||
// Computes a 16 byte hash of the type's name to use as a domain separator.
|
||||
static DOMAIN: LazyLock<[u8; 16]> = LazyLock::new(|| {
|
||||
let domain: [u8; 32] = blake3::hash(stringify!($type).as_bytes()).into();
|
||||
domain[..16].try_into().unwrap()
|
||||
});
|
||||
|
||||
&*DOMAIN
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
pub(crate) use impl_domain_separator;
|
||||
|
||||
impl_domain_separator!(tlsn_core::connection::ServerEphemKey);
|
||||
impl_domain_separator!(tlsn_core::connection::ConnectionInfo);
|
||||
impl_domain_separator!(tlsn_core::connection::CertBinding);
|
||||
impl_domain_separator!(tlsn_core::transcript::TranscriptCommitment);
|
||||
impl_domain_separator!(tlsn_core::transcript::TranscriptSecret);
|
||||
impl_domain_separator!(tlsn_core::transcript::encoding::EncodingCommitment);
|
||||
impl_domain_separator!(tlsn_core::transcript::hash::PlaintextHash);
|
||||
@@ -4,7 +4,7 @@ use std::collections::HashMap;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::hash::impl_domain_separator;
|
||||
use crate::serialize::impl_domain_separator;
|
||||
|
||||
/// Key algorithm identifier.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
|
||||
@@ -52,10 +52,15 @@ impl std::fmt::Display for KeyAlgId {
|
||||
pub struct SignatureAlgId(u8);
|
||||
|
||||
impl SignatureAlgId {
|
||||
/// secp256k1 signature algorithm.
|
||||
/// secp256k1 signature algorithm with SHA-256 hashing.
|
||||
pub const SECP256K1: Self = Self(1);
|
||||
/// secp256r1 signature algorithm.
|
||||
/// secp256r1 signature algorithm with SHA-256 hashing.
|
||||
pub const SECP256R1: Self = Self(2);
|
||||
/// Ethereum-compatible signature algorithm.
|
||||
///
|
||||
/// Uses secp256k1 with Keccak-256 hashing. The signature is a concatenation
|
||||
/// of `r || s || v` as defined in Solidity's ecrecover().
|
||||
pub const SECP256K1ETH: Self = Self(3);
|
||||
|
||||
/// Creates a new signature algorithm identifier.
|
||||
///
|
||||
@@ -83,6 +88,7 @@ impl std::fmt::Display for SignatureAlgId {
|
||||
match *self {
|
||||
SignatureAlgId::SECP256K1 => write!(f, "secp256k1"),
|
||||
SignatureAlgId::SECP256R1 => write!(f, "secp256r1"),
|
||||
SignatureAlgId::SECP256K1ETH => write!(f, "secp256k1eth"),
|
||||
_ => write!(f, "custom({:02x})", self.0),
|
||||
}
|
||||
}
|
||||
@@ -124,6 +130,13 @@ impl SignerProvider {
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
/// Configures a secp256k1eth signer with the provided signing key.
|
||||
pub fn set_secp256k1eth(&mut self, key: &[u8]) -> Result<&mut Self, SignerError> {
|
||||
self.set_signer(Box::new(Secp256k1EthSigner::new(key)?));
|
||||
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
/// Returns a signer for the given algorithm.
|
||||
pub(crate) fn get(
|
||||
&self,
|
||||
@@ -164,6 +177,10 @@ impl Default for SignatureVerifierProvider {
|
||||
|
||||
verifiers.insert(SignatureAlgId::SECP256K1, Box::new(Secp256k1Verifier) as _);
|
||||
verifiers.insert(SignatureAlgId::SECP256R1, Box::new(Secp256r1Verifier) as _);
|
||||
verifiers.insert(
|
||||
SignatureAlgId::SECP256K1ETH,
|
||||
Box::new(Secp256k1EthVerifier) as _,
|
||||
);
|
||||
|
||||
Self { verifiers }
|
||||
}
|
||||
@@ -207,7 +224,7 @@ pub struct VerifyingKey {
|
||||
|
||||
impl_domain_separator!(VerifyingKey);
|
||||
|
||||
/// Error occurred while verifying a signature.
|
||||
/// Error that can occur while verifying a signature.
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
#[error("signature verification failed: {0}")]
|
||||
pub struct SignatureError(String);
|
||||
@@ -225,13 +242,13 @@ mod secp256k1 {
|
||||
use std::sync::{Arc, Mutex};
|
||||
|
||||
use k256::ecdsa::{
|
||||
signature::{SignerMut, Verifier},
|
||||
Signature as Secp256K1Signature, SigningKey,
|
||||
signature::{SignerMut, Verifier},
|
||||
};
|
||||
|
||||
use super::*;
|
||||
|
||||
/// secp256k1 signer.
|
||||
/// secp256k1 signer with SHA-256 hashing.
|
||||
pub struct Secp256k1Signer(Arc<Mutex<SigningKey>>);
|
||||
|
||||
impl Secp256k1Signer {
|
||||
@@ -267,7 +284,7 @@ mod secp256k1 {
|
||||
}
|
||||
}
|
||||
|
||||
/// secp256k1 verifier.
|
||||
/// secp256k1 verifier with SHA-256 hashing.
|
||||
pub struct Secp256k1Verifier;
|
||||
|
||||
impl SignatureVerifier for Secp256k1Verifier {
|
||||
@@ -301,13 +318,13 @@ mod secp256r1 {
|
||||
use std::sync::{Arc, Mutex};
|
||||
|
||||
use p256::ecdsa::{
|
||||
signature::{SignerMut, Verifier},
|
||||
Signature as Secp256R1Signature, SigningKey,
|
||||
signature::{SignerMut, Verifier},
|
||||
};
|
||||
|
||||
use super::*;
|
||||
|
||||
/// secp256r1 signer.
|
||||
/// secp256r1 signer with SHA-256 hashing.
|
||||
pub struct Secp256r1Signer(Arc<Mutex<SigningKey>>);
|
||||
|
||||
impl Secp256r1Signer {
|
||||
@@ -343,7 +360,7 @@ mod secp256r1 {
|
||||
}
|
||||
}
|
||||
|
||||
/// secp256r1 verifier.
|
||||
/// secp256r1 verifier with SHA-256 hashing.
|
||||
pub struct Secp256r1Verifier;
|
||||
|
||||
impl SignatureVerifier for Secp256r1Verifier {
|
||||
@@ -373,63 +390,208 @@ mod secp256r1 {
|
||||
|
||||
pub use secp256r1::{Secp256r1Signer, Secp256r1Verifier};
|
||||
|
||||
mod secp256k1eth {
|
||||
use std::sync::{Arc, Mutex};
|
||||
|
||||
use k256::ecdsa::{
|
||||
Signature as Secp256K1Signature, SigningKey, signature::hazmat::PrehashVerifier,
|
||||
};
|
||||
use tiny_keccak::{Hasher, Keccak};
|
||||
|
||||
use super::*;
|
||||
|
||||
/// secp256k1eth signer.
|
||||
pub struct Secp256k1EthSigner(Arc<Mutex<SigningKey>>);
|
||||
|
||||
impl Secp256k1EthSigner {
|
||||
/// Creates a new secp256k1eth signer with the provided signing key.
|
||||
pub fn new(key: &[u8]) -> Result<Self, SignerError> {
|
||||
SigningKey::from_slice(key)
|
||||
.map(|key| Self(Arc::new(Mutex::new(key))))
|
||||
.map_err(|_| SignerError("invalid key".to_string()))
|
||||
}
|
||||
}
|
||||
|
||||
impl Signer for Secp256k1EthSigner {
|
||||
fn alg_id(&self) -> SignatureAlgId {
|
||||
SignatureAlgId::SECP256K1ETH
|
||||
}
|
||||
|
||||
fn sign(&self, msg: &[u8]) -> Result<Signature, SignatureError> {
|
||||
// Pre-hash the message.
|
||||
let mut hasher = Keccak::v256();
|
||||
hasher.update(msg);
|
||||
let mut output = vec![0; 32];
|
||||
hasher.finalize(&mut output);
|
||||
|
||||
let (signature, recid) = self
|
||||
.0
|
||||
.lock()
|
||||
.unwrap()
|
||||
.sign_prehash_recoverable(&output)
|
||||
.map_err(|_| SignatureError("error in sign_prehash_recoverable".to_string()))?;
|
||||
|
||||
let mut sig = signature.to_vec();
|
||||
let recid = recid.to_byte();
|
||||
|
||||
// Based on Ethereum Yellow Paper Appendix F, only values 0 and 1 are valid.
|
||||
if recid > 1 {
|
||||
return Err(SignatureError(format!(
|
||||
"expected recovery id 0 or 1, got {recid:?}"
|
||||
)));
|
||||
}
|
||||
// `ecrecover` expects that 0 and 1 are mapped to 27 and 28.
|
||||
sig.push(recid + 27);
|
||||
|
||||
Ok(Signature {
|
||||
alg: SignatureAlgId::SECP256K1ETH,
|
||||
data: sig,
|
||||
})
|
||||
}
|
||||
|
||||
fn verifying_key(&self) -> VerifyingKey {
|
||||
let key = self.0.lock().unwrap().verifying_key().to_sec1_bytes();
|
||||
|
||||
VerifyingKey {
|
||||
alg: KeyAlgId::K256,
|
||||
data: key.to_vec(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// secp256k1eth verifier.
|
||||
pub struct Secp256k1EthVerifier;
|
||||
|
||||
impl SignatureVerifier for Secp256k1EthVerifier {
|
||||
fn alg_id(&self) -> SignatureAlgId {
|
||||
SignatureAlgId::SECP256K1ETH
|
||||
}
|
||||
|
||||
fn verify(&self, key: &VerifyingKey, msg: &[u8], sig: &[u8]) -> Result<(), SignatureError> {
|
||||
if key.alg != KeyAlgId::K256 {
|
||||
return Err(SignatureError("key algorithm is not k256".to_string()));
|
||||
}
|
||||
|
||||
if sig.len() != 65 {
|
||||
return Err(SignatureError(
|
||||
"ethereum signature length must be 65 bytes".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
let key = k256::ecdsa::VerifyingKey::from_sec1_bytes(&key.data)
|
||||
.map_err(|_| SignatureError("invalid k256 key".to_string()))?;
|
||||
|
||||
// `sig` is a concatenation of `r || s || v`. We ignore `v` since it is only
|
||||
// useful when recovering the verifying key.
|
||||
let sig = Secp256K1Signature::from_slice(&sig[..64])
|
||||
.map_err(|_| SignatureError("invalid secp256k1 signature".to_string()))?;
|
||||
|
||||
// Pre-hash the message.
|
||||
let mut hasher = Keccak::v256();
|
||||
hasher.update(msg);
|
||||
let mut output = vec![0; 32];
|
||||
hasher.finalize(&mut output);
|
||||
|
||||
key.verify_prehash(&output, &sig).map_err(|_| {
|
||||
SignatureError("secp256k1 signature verification failed".to_string())
|
||||
})?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub use secp256k1eth::{Secp256k1EthSigner, Secp256k1EthVerifier};
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use rand_core::OsRng;
|
||||
use alloy_primitives::utils::eip191_message;
|
||||
use alloy_signer::SignerSync;
|
||||
use alloy_signer_local::PrivateKeySigner;
|
||||
use rand06_compat::Rand0_6CompatExt;
|
||||
use rstest::{fixture, rstest};
|
||||
|
||||
use super::*;
|
||||
|
||||
#[fixture]
|
||||
#[once]
|
||||
fn secp256k1_signer() -> Secp256k1Signer {
|
||||
let signing_key = k256::ecdsa::SigningKey::random(&mut OsRng);
|
||||
Secp256k1Signer::new(&signing_key.to_bytes()).unwrap()
|
||||
fn secp256k1_pair() -> (Box<dyn Signer>, Box<dyn SignatureVerifier>) {
|
||||
let signing_key = k256::ecdsa::SigningKey::random(&mut rand::rng().compat());
|
||||
(
|
||||
Box::new(Secp256k1Signer::new(&signing_key.to_bytes()).unwrap()),
|
||||
Box::new(Secp256k1Verifier {}),
|
||||
)
|
||||
}
|
||||
|
||||
#[fixture]
|
||||
#[once]
|
||||
fn secp256r1_signer() -> Secp256r1Signer {
|
||||
let signing_key = p256::ecdsa::SigningKey::random(&mut OsRng);
|
||||
Secp256r1Signer::new(&signing_key.to_bytes()).unwrap()
|
||||
fn secp256r1_pair() -> (Box<dyn Signer>, Box<dyn SignatureVerifier>) {
|
||||
let signing_key = p256::ecdsa::SigningKey::random(&mut rand::rng().compat());
|
||||
(
|
||||
Box::new(Secp256r1Signer::new(&signing_key.to_bytes()).unwrap()),
|
||||
Box::new(Secp256r1Verifier {}),
|
||||
)
|
||||
}
|
||||
|
||||
#[fixture]
|
||||
#[once]
|
||||
fn secp256k1eth_pair() -> (Box<dyn Signer>, Box<dyn SignatureVerifier>) {
|
||||
let signing_key = k256::ecdsa::SigningKey::random(&mut rand::rng().compat());
|
||||
(
|
||||
Box::new(Secp256k1EthSigner::new(&signing_key.to_bytes()).unwrap()),
|
||||
Box::new(Secp256k1EthVerifier {}),
|
||||
)
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
fn test_secp256k1_success(secp256k1_signer: &Secp256k1Signer) {
|
||||
assert_eq!(secp256k1_signer.alg_id(), SignatureAlgId::SECP256K1);
|
||||
#[case::r1(secp256r1_pair(), SignatureAlgId::SECP256R1)]
|
||||
#[case::k1(secp256k1_pair(), SignatureAlgId::SECP256K1)]
|
||||
#[case::k1eth(secp256k1eth_pair(), SignatureAlgId::SECP256K1ETH)]
|
||||
fn test_success(
|
||||
#[case] pair: (Box<dyn Signer>, Box<dyn SignatureVerifier>),
|
||||
#[case] alg: SignatureAlgId,
|
||||
) {
|
||||
let (signer, verifier) = pair;
|
||||
assert_eq!(signer.alg_id(), alg);
|
||||
|
||||
let msg = "test payload";
|
||||
let signature = secp256k1_signer.sign(msg.as_bytes()).unwrap();
|
||||
let verifying_key = secp256k1_signer.verifying_key();
|
||||
let signature = signer.sign(msg.as_bytes()).unwrap();
|
||||
let verifying_key = signer.verifying_key();
|
||||
|
||||
assert_eq!(verifier.alg_id(), alg);
|
||||
let result = verifier.verify(&verifying_key, msg.as_bytes(), &signature.data);
|
||||
assert!(result.is_ok());
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[case::r1(secp256r1_pair())]
|
||||
#[case::k1eth(secp256k1eth_pair())]
|
||||
fn test_wrong_signer(#[case] pair: (Box<dyn Signer>, Box<dyn SignatureVerifier>)) {
|
||||
let (signer, _) = pair;
|
||||
|
||||
let msg = "test payload";
|
||||
let signature = signer.sign(msg.as_bytes()).unwrap();
|
||||
let verifying_key = signer.verifying_key();
|
||||
|
||||
let verifier = Secp256k1Verifier {};
|
||||
assert_eq!(verifier.alg_id(), SignatureAlgId::SECP256K1);
|
||||
let result = verifier.verify(&verifying_key, msg.as_bytes(), &signature.data);
|
||||
assert!(result.is_ok());
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
fn test_secp256r1_success(secp256r1_signer: &Secp256r1Signer) {
|
||||
assert_eq!(secp256r1_signer.alg_id(), SignatureAlgId::SECP256R1);
|
||||
|
||||
let msg = "test payload";
|
||||
let signature = secp256r1_signer.sign(msg.as_bytes()).unwrap();
|
||||
let verifying_key = secp256r1_signer.verifying_key();
|
||||
|
||||
let verifier = Secp256r1Verifier {};
|
||||
assert_eq!(verifier.alg_id(), SignatureAlgId::SECP256R1);
|
||||
let result = verifier.verify(&verifying_key, msg.as_bytes(), &signature.data);
|
||||
assert!(result.is_ok());
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[case::wrong_signer(&secp256r1_signer(), false, false)]
|
||||
#[case::corrupted_signature(&secp256k1_signer(), true, false)]
|
||||
#[case::wrong_signature(&secp256k1_signer(), false, true)]
|
||||
#[case::corrupted_signature_r1(secp256r1_pair(), true, false)]
|
||||
#[case::corrupted_signature_k1(secp256k1_pair(), true, false)]
|
||||
#[case::corrupted_signature_k1eth(secp256k1eth_pair(), true, false)]
|
||||
#[case::wrong_signature_r1(secp256r1_pair(), false, true)]
|
||||
#[case::wrong_signature_k1(secp256k1_pair(), false, true)]
|
||||
#[case::wrong_signature_k1eth(secp256k1eth_pair(), false, true)]
|
||||
fn test_failure(
|
||||
#[case] signer: &dyn Signer,
|
||||
#[case] pair: (Box<dyn Signer>, Box<dyn SignatureVerifier>),
|
||||
#[case] corrupted_signature: bool,
|
||||
#[case] wrong_signature: bool,
|
||||
) {
|
||||
let (signer, verifier) = pair;
|
||||
|
||||
let msg = "test payload";
|
||||
let mut signature = signer.sign(msg.as_bytes()).unwrap();
|
||||
let verifying_key = signer.verifying_key();
|
||||
@@ -442,8 +604,32 @@ mod test {
|
||||
signature = signer.sign("different payload".as_bytes()).unwrap();
|
||||
}
|
||||
|
||||
let verifier = Secp256k1Verifier {};
|
||||
let result = verifier.verify(&verifying_key, msg.as_bytes(), &signature.data);
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
// Tests secp256k1eth signatures against a reference implementation.
|
||||
fn test_secp256k1eth_sig() {
|
||||
// An arbitrary signing key.
|
||||
let sk = vec![1; 32];
|
||||
let mut msg = "test message".as_bytes().to_vec();
|
||||
|
||||
let signer: Secp256k1EthSigner = Secp256k1EthSigner::new(&sk).unwrap();
|
||||
|
||||
// Testing multiple signatures.
|
||||
for i in 0..10 {
|
||||
msg.push(i);
|
||||
// Convert to EIP-191 since the reference signer can't sign raw bytes.
|
||||
let sig = signer.sign(&eip191_message(&msg)).unwrap().data;
|
||||
|
||||
assert_eq!(sig, reference_eth_signature(&sk, &msg));
|
||||
}
|
||||
}
|
||||
|
||||
// Returns a reference Ethereum signature.
|
||||
fn reference_eth_signature(sk: &[u8], msg: &[u8]) -> Vec<u8> {
|
||||
let signer = PrivateKeySigner::from_slice(sk).unwrap();
|
||||
signer.sign_message_sync(msg).unwrap().as_bytes().to_vec()
|
||||
}
|
||||
}
|
||||
@@ -1,13 +1,18 @@
|
||||
use tlsn_core::{
|
||||
attestation::{Attestation, AttestationConfig},
|
||||
connection::{HandshakeData, HandshakeDataV1_2},
|
||||
fixtures::{self, encoder_seed, ConnectionFixture},
|
||||
hash::Blake3,
|
||||
use tlsn_attestation::{
|
||||
Attestation, AttestationConfig, CryptoProvider,
|
||||
presentation::PresentationOutput,
|
||||
request::{Request, RequestConfig},
|
||||
signing::SignatureAlgId,
|
||||
transcript::{encoding::EncodingTree, Direction, Transcript, TranscriptCommitConfigBuilder},
|
||||
CryptoProvider,
|
||||
};
|
||||
use tlsn_core::{
|
||||
connection::{CertBinding, CertBindingV1_2},
|
||||
fixtures::{self, ConnectionFixture, encoder_secret},
|
||||
hash::Blake3,
|
||||
transcript::{
|
||||
Direction, Transcript, TranscriptCommitConfigBuilder, TranscriptCommitment,
|
||||
TranscriptSecret,
|
||||
encoding::{EncodingCommitment, EncodingTree},
|
||||
},
|
||||
};
|
||||
use tlsn_data_fixtures::http::{request::GET_WITH_HEADER, response::OK_JSON};
|
||||
|
||||
@@ -31,10 +36,10 @@ fn test_api() {
|
||||
server_cert_data,
|
||||
} = ConnectionFixture::tlsnotary(transcript.length());
|
||||
|
||||
let HandshakeData::V1_2(HandshakeDataV1_2 {
|
||||
let CertBinding::V1_2(CertBindingV1_2 {
|
||||
server_ephemeral_key,
|
||||
..
|
||||
}) = server_cert_data.handshake.clone()
|
||||
}) = server_cert_data.binding.clone()
|
||||
else {
|
||||
unreachable!()
|
||||
};
|
||||
@@ -54,18 +59,25 @@ fn test_api() {
|
||||
&Blake3::default(),
|
||||
transcripts_commitment_config.iter_encoding(),
|
||||
&encodings_provider,
|
||||
&transcript.length(),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let encoding_commitment = EncodingCommitment {
|
||||
root: encoding_tree.root(),
|
||||
secret: encoder_secret(),
|
||||
};
|
||||
|
||||
let request_config = RequestConfig::default();
|
||||
let mut request_builder = Request::builder(&request_config);
|
||||
|
||||
request_builder
|
||||
.server_name(server_name.clone())
|
||||
.server_cert_data(server_cert_data)
|
||||
.handshake_data(server_cert_data)
|
||||
.transcript(transcript)
|
||||
.encoding_tree(encoding_tree);
|
||||
.transcript_commitments(
|
||||
vec![TranscriptSecret::Encoding(encoding_tree)],
|
||||
vec![TranscriptCommitment::Encoding(encoding_commitment.clone())],
|
||||
);
|
||||
|
||||
let (request, secrets) = request_builder.build(&provider).unwrap();
|
||||
|
||||
@@ -84,7 +96,7 @@ fn test_api() {
|
||||
.connection_info(connection_info.clone())
|
||||
// Server key Notary received during handshake
|
||||
.server_ephemeral_key(server_ephemeral_key)
|
||||
.encoding_seed(encoder_seed().to_vec());
|
||||
.transcript_commitments(vec![TranscriptCommitment::Encoding(encoding_commitment)]);
|
||||
|
||||
let attestation = attestation_builder.build(&provider).unwrap();
|
||||
|
||||
@@ -1,46 +0,0 @@
|
||||
[package]
|
||||
edition = "2021"
|
||||
name = "tlsn-benches"
|
||||
publish = false
|
||||
version = "0.0.0"
|
||||
|
||||
[dependencies]
|
||||
anyhow = { workspace = true }
|
||||
charming = { version = "0.3.1", features = ["ssr"] }
|
||||
csv = "1.3.0"
|
||||
futures = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
tlsn-common = { workspace = true }
|
||||
tlsn-core = { workspace = true }
|
||||
tlsn-prover = { workspace = true }
|
||||
tlsn-server-fixture = { workspace = true }
|
||||
tlsn-server-fixture-certs = { workspace = true }
|
||||
tlsn-tls-core = { workspace = true }
|
||||
tlsn-verifier = { workspace = true }
|
||||
tokio = { workspace = true, features = [
|
||||
"rt",
|
||||
"rt-multi-thread",
|
||||
"macros",
|
||||
"net",
|
||||
"io-std",
|
||||
"fs",
|
||||
] }
|
||||
tokio-util = { workspace = true }
|
||||
toml = "0.8.11"
|
||||
tracing-subscriber = { workspace = true, features = ["env-filter"] }
|
||||
|
||||
[[bin]]
|
||||
name = "bench"
|
||||
path = "bin/bench.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "prover"
|
||||
path = "bin/prover.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "verifier"
|
||||
path = "bin/verifier.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "plot"
|
||||
path = "bin/plot.rs"
|
||||
@@ -1,35 +0,0 @@
|
||||
# TLSNotary bench utilities
|
||||
|
||||
This crate provides utilities for benchmarking protocol performance under various network conditions and usage patterns.
|
||||
|
||||
As the protocol is mostly IO bound, it's important to track how it performs in low bandwidth and/or high latency environments. To do this we set up temporary network namespaces and add virtual ethernet interfaces which we can control using the linux `tc` (Traffic Control) utility.
|
||||
|
||||
## Configuration
|
||||
|
||||
See the `bench.toml` file for benchmark configurations.
|
||||
|
||||
## Preliminaries
|
||||
|
||||
To run the benchmarks you will need `iproute2` installed, eg:
|
||||
```sh
|
||||
sudo apt-get install iproute2 -y
|
||||
```
|
||||
|
||||
## Running benches
|
||||
|
||||
Running the benches requires root privileges because they will set up virtual interfaces. The script is designed to fully clean up when the benches are done, but run them at your own risk.
|
||||
|
||||
Make sure you're in the `crates/benches/` directory, build the binaries then run the script:
|
||||
|
||||
```sh
|
||||
cargo build --release
|
||||
sudo ./bench.sh
|
||||
```
|
||||
|
||||
## Metrics
|
||||
|
||||
After you run the benches you will see a `metrics.csv` file in the working directory. It will be owned by `root`, so you probably want to run
|
||||
|
||||
```sh
|
||||
sudo chown $USER metrics.csv
|
||||
```
|
||||
@@ -1,13 +0,0 @@
|
||||
#! /bin/bash
|
||||
|
||||
# Check if we are running as root
|
||||
if [ "$EUID" -ne 0 ]; then
|
||||
echo "This script must be run as root"
|
||||
exit
|
||||
fi
|
||||
|
||||
# Run the benchmark binary
|
||||
../../target/release/bench
|
||||
|
||||
# Plot the results
|
||||
../../target/release/plot metrics.csv
|
||||
@@ -1,39 +0,0 @@
|
||||
[[benches]]
|
||||
name = "latency"
|
||||
upload = 250
|
||||
upload-delay = [10, 25, 50]
|
||||
download = 250
|
||||
download-delay = [10, 25, 50]
|
||||
upload-size = 1024
|
||||
download-size = 4096
|
||||
defer-decryption = true
|
||||
|
||||
[[benches]]
|
||||
name = "download_bandwidth"
|
||||
upload = 250
|
||||
upload-delay = 25
|
||||
download = [10, 25, 50, 100, 250]
|
||||
download-delay = 25
|
||||
upload-size = 1024
|
||||
download-size = 4096
|
||||
defer-decryption = true
|
||||
|
||||
[[benches]]
|
||||
name = "upload_bandwidth"
|
||||
upload = [10, 25, 50, 100, 250]
|
||||
upload-delay = 25
|
||||
download = 250
|
||||
download-delay = 25
|
||||
upload-size = 1024
|
||||
download-size = 4096
|
||||
defer-decryption = [false, true]
|
||||
|
||||
[[benches]]
|
||||
name = "download_volume"
|
||||
upload = 250
|
||||
upload-delay = 25
|
||||
download = 250
|
||||
download-delay = 25
|
||||
upload-size = 1024
|
||||
download-size = [1024, 4096, 16384, 65536]
|
||||
defer-decryption = true
|
||||
@@ -1,21 +0,0 @@
|
||||
FROM rust AS builder
|
||||
WORKDIR /usr/src/tlsn
|
||||
COPY . .
|
||||
RUN cd crates/benches && cargo build --release
|
||||
|
||||
FROM ubuntu:latest
|
||||
|
||||
RUN apt-get update && apt-get -y upgrade && apt-get install -y --no-install-recommends \
|
||||
iproute2 \
|
||||
sudo \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
COPY --from=builder ["/usr/src/tlsn/target/release/bench", "/usr/src/tlsn/target/release/prover", "/usr/src/tlsn/target/release/verifier", "/usr/src/tlsn/target/release/plot", "/usr/local/bin/"]
|
||||
|
||||
ENV PROVER_PATH="/usr/local/bin/prover"
|
||||
ENV VERIFIER_PATH="/usr/local/bin/verifier"
|
||||
|
||||
VOLUME [ "/benches" ]
|
||||
WORKDIR "/benches"
|
||||
CMD ["/bin/bash", "-c", "bench && plot /benches/metrics.csv && cat /benches/metrics.csv"]
|
||||
@@ -1,44 +0,0 @@
|
||||
use std::process::Command;
|
||||
|
||||
use tlsn_benches::{clean_up, set_up};
|
||||
|
||||
fn main() {
|
||||
let prover_path =
|
||||
std::env::var("PROVER_PATH").unwrap_or_else(|_| "../../target/release/prover".to_string());
|
||||
let verifier_path = std::env::var("VERIFIER_PATH")
|
||||
.unwrap_or_else(|_| "../../target/release/verifier".to_string());
|
||||
|
||||
if let Err(e) = set_up() {
|
||||
println!("Error setting up: {}", e);
|
||||
clean_up();
|
||||
}
|
||||
|
||||
// Run prover and verifier binaries in parallel
|
||||
let Ok(mut verifier) = Command::new("ip")
|
||||
.arg("netns")
|
||||
.arg("exec")
|
||||
.arg("verifier-ns")
|
||||
.arg(verifier_path)
|
||||
.spawn()
|
||||
else {
|
||||
println!("Failed to start verifier");
|
||||
return clean_up();
|
||||
};
|
||||
|
||||
let Ok(mut prover) = Command::new("ip")
|
||||
.arg("netns")
|
||||
.arg("exec")
|
||||
.arg("prover-ns")
|
||||
.arg(prover_path)
|
||||
.spawn()
|
||||
else {
|
||||
println!("Failed to start prover");
|
||||
return clean_up();
|
||||
};
|
||||
|
||||
// Wait for both to finish
|
||||
_ = prover.wait();
|
||||
_ = verifier.wait();
|
||||
|
||||
clean_up();
|
||||
}
|
||||
@@ -1,156 +0,0 @@
|
||||
use charming::{
|
||||
component::{
|
||||
Axis, DataView, Feature, Legend, Restore, SaveAsImage, Title, Toolbox, ToolboxDataZoom,
|
||||
},
|
||||
element::{NameLocation, Orient, Tooltip, Trigger},
|
||||
series::{Line, Scatter},
|
||||
theme::Theme,
|
||||
Chart, HtmlRenderer,
|
||||
};
|
||||
use tlsn_benches::metrics::Metrics;
|
||||
|
||||
const THEME: Theme = Theme::Default;
|
||||
|
||||
fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
let csv_file = std::env::args()
|
||||
.nth(1)
|
||||
.expect("Usage: plot <path_to_csv_file>");
|
||||
|
||||
let mut rdr = csv::Reader::from_path(csv_file)?;
|
||||
|
||||
// Prepare data for plotting
|
||||
let all_data: Vec<Metrics> = rdr
|
||||
.deserialize::<Metrics>()
|
||||
.collect::<Result<Vec<_>, _>>()?; // Attempt to collect all results, return an error if any fail
|
||||
|
||||
let _chart = runtime_vs_latency(&all_data)?;
|
||||
let _chart = runtime_vs_bandwidth(&all_data)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn runtime_vs_latency(all_data: &[Metrics]) -> Result<Chart, Box<dyn std::error::Error>> {
|
||||
const TITLE: &str = "Runtime vs Latency";
|
||||
|
||||
let data: Vec<Vec<f32>> = all_data
|
||||
.iter()
|
||||
.filter(|record| record.name == "latency")
|
||||
.map(|record| {
|
||||
let total_delay = record.upload_delay + record.download_delay; // Calculate the sum of upload and download delays.
|
||||
vec![total_delay as f32, record.runtime as f32]
|
||||
})
|
||||
.collect();
|
||||
|
||||
// https://github.com/yuankunzhang/charming
|
||||
let chart = Chart::new()
|
||||
.title(Title::new().text(TITLE))
|
||||
.tooltip(Tooltip::new().trigger(Trigger::Axis))
|
||||
.legend(Legend::new().orient(Orient::Vertical))
|
||||
.toolbox(
|
||||
Toolbox::new().show(true).feature(
|
||||
Feature::new()
|
||||
.save_as_image(SaveAsImage::new())
|
||||
.restore(Restore::new())
|
||||
.data_zoom(ToolboxDataZoom::new().y_axis_index("none"))
|
||||
.data_view(DataView::new().read_only(false)),
|
||||
),
|
||||
)
|
||||
.x_axis(
|
||||
Axis::new()
|
||||
.scale(true)
|
||||
.name("Upload + Download Latency (ms)")
|
||||
.name_location(NameLocation::Center),
|
||||
)
|
||||
.y_axis(
|
||||
Axis::new()
|
||||
.scale(true)
|
||||
.name("Runtime (s)")
|
||||
.name_location(NameLocation::Middle),
|
||||
)
|
||||
.series(
|
||||
Scatter::new()
|
||||
.name("Combined Latency")
|
||||
.symbol_size(10)
|
||||
.data(data),
|
||||
);
|
||||
|
||||
// Save the chart as HTML file.
|
||||
HtmlRenderer::new(TITLE, 1000, 800)
|
||||
.theme(THEME)
|
||||
.save(&chart, "runtime_vs_latency.html")
|
||||
.unwrap();
|
||||
|
||||
Ok(chart)
|
||||
}
|
||||
|
||||
fn runtime_vs_bandwidth(all_data: &[Metrics]) -> Result<Chart, Box<dyn std::error::Error>> {
|
||||
const TITLE: &str = "Runtime vs Bandwidth";
|
||||
|
||||
let download_data: Vec<Vec<f32>> = all_data
|
||||
.iter()
|
||||
.filter(|record| record.name == "download_bandwidth")
|
||||
.map(|record| vec![record.download as f32, record.runtime as f32])
|
||||
.collect();
|
||||
let upload_deferred_data: Vec<Vec<f32>> = all_data
|
||||
.iter()
|
||||
.filter(|record| record.name == "upload_bandwidth" && record.defer_decryption)
|
||||
.map(|record| vec![record.upload as f32, record.runtime as f32])
|
||||
.collect();
|
||||
let upload_non_deferred_data: Vec<Vec<f32>> = all_data
|
||||
.iter()
|
||||
.filter(|record| record.name == "upload_bandwidth" && !record.defer_decryption)
|
||||
.map(|record| vec![record.upload as f32, record.runtime as f32])
|
||||
.collect();
|
||||
|
||||
// https://github.com/yuankunzhang/charming
|
||||
let chart = Chart::new()
|
||||
.title(Title::new().text(TITLE))
|
||||
.tooltip(Tooltip::new().trigger(Trigger::Axis))
|
||||
.legend(Legend::new().orient(Orient::Vertical))
|
||||
.toolbox(
|
||||
Toolbox::new().show(true).feature(
|
||||
Feature::new()
|
||||
.save_as_image(SaveAsImage::new())
|
||||
.restore(Restore::new())
|
||||
.data_zoom(ToolboxDataZoom::new().y_axis_index("none"))
|
||||
.data_view(DataView::new().read_only(false)),
|
||||
),
|
||||
)
|
||||
.x_axis(
|
||||
Axis::new()
|
||||
.scale(true)
|
||||
.name("Bandwidth (Mbps)")
|
||||
.name_location(NameLocation::Center),
|
||||
)
|
||||
.y_axis(
|
||||
Axis::new()
|
||||
.scale(true)
|
||||
.name("Runtime (s)")
|
||||
.name_location(NameLocation::Middle),
|
||||
)
|
||||
.series(
|
||||
Line::new()
|
||||
.name("Download bandwidth")
|
||||
.symbol_size(10)
|
||||
.data(download_data),
|
||||
)
|
||||
.series(
|
||||
Line::new()
|
||||
.name("Upload bandwidth (deferred decryption)")
|
||||
.symbol_size(10)
|
||||
.data(upload_deferred_data),
|
||||
)
|
||||
.series(
|
||||
Line::new()
|
||||
.name("Upload bandwidth")
|
||||
.symbol_size(10)
|
||||
.data(upload_non_deferred_data),
|
||||
);
|
||||
// Save the chart as HTML file.
|
||||
HtmlRenderer::new(TITLE, 1000, 800)
|
||||
.theme(THEME)
|
||||
.save(&chart, "runtime_vs_bandwidth.html")
|
||||
.unwrap();
|
||||
|
||||
Ok(chart)
|
||||
}
|
||||
@@ -1,197 +0,0 @@
|
||||
use std::{
|
||||
io::Write,
|
||||
sync::{
|
||||
atomic::{AtomicU64, Ordering},
|
||||
Arc,
|
||||
},
|
||||
time::Instant,
|
||||
};
|
||||
|
||||
use anyhow::Context;
|
||||
use futures::{AsyncReadExt, AsyncWriteExt};
|
||||
use tls_core::verify::WebPkiVerifier;
|
||||
use tlsn_benches::{
|
||||
config::{BenchInstance, Config},
|
||||
metrics::Metrics,
|
||||
set_interface, PROVER_INTERFACE,
|
||||
};
|
||||
use tlsn_common::config::ProtocolConfig;
|
||||
use tlsn_core::{transcript::Idx, CryptoProvider};
|
||||
use tlsn_server_fixture::bind;
|
||||
use tlsn_server_fixture_certs::{CA_CERT_DER, SERVER_DOMAIN};
|
||||
use tokio::io::{AsyncRead, AsyncWrite};
|
||||
use tokio_util::{
|
||||
compat::TokioAsyncReadCompatExt,
|
||||
io::{InspectReader, InspectWriter},
|
||||
};
|
||||
|
||||
use tlsn_prover::{Prover, ProverConfig};
|
||||
use tracing_subscriber::{fmt::format::FmtSpan, EnvFilter};
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
let config_path = std::env::var("CFG").unwrap_or_else(|_| "bench.toml".to_string());
|
||||
let config: Config = toml::from_str(
|
||||
&std::fs::read_to_string(config_path).context("failed to read config file")?,
|
||||
)
|
||||
.context("failed to parse config")?;
|
||||
|
||||
tracing_subscriber::fmt()
|
||||
.with_env_filter(EnvFilter::from_default_env())
|
||||
.with_span_events(FmtSpan::NEW | FmtSpan::CLOSE)
|
||||
.init();
|
||||
|
||||
let ip = std::env::var("VERIFIER_IP").unwrap_or_else(|_| "10.10.1.1".to_string());
|
||||
let port: u16 = std::env::var("VERIFIER_PORT")
|
||||
.map(|port| port.parse().expect("port is valid u16"))
|
||||
.unwrap_or(8000);
|
||||
let verifier_host = (ip.as_str(), port);
|
||||
|
||||
let mut file = std::fs::OpenOptions::new()
|
||||
.create(true)
|
||||
.append(true)
|
||||
.open("metrics.csv")
|
||||
.context("failed to open metrics file")?;
|
||||
|
||||
{
|
||||
let mut metric_wtr = csv::Writer::from_writer(&mut file);
|
||||
for bench in config.benches {
|
||||
let instances = bench.flatten();
|
||||
for instance in instances {
|
||||
println!("{:?}", &instance);
|
||||
|
||||
let io = tokio::net::TcpStream::connect(verifier_host)
|
||||
.await
|
||||
.context("failed to open tcp connection")?;
|
||||
metric_wtr.serialize(
|
||||
run_instance(instance, io)
|
||||
.await
|
||||
.context("failed to run instance")?,
|
||||
)?;
|
||||
metric_wtr.flush()?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
file.flush()?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn run_instance<S: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>(
|
||||
instance: BenchInstance,
|
||||
io: S,
|
||||
) -> anyhow::Result<Metrics> {
|
||||
let uploaded = Arc::new(AtomicU64::new(0));
|
||||
let downloaded = Arc::new(AtomicU64::new(0));
|
||||
let io = InspectWriter::new(
|
||||
InspectReader::new(io, {
|
||||
let downloaded = downloaded.clone();
|
||||
move |data| {
|
||||
downloaded.fetch_add(data.len() as u64, Ordering::Relaxed);
|
||||
}
|
||||
}),
|
||||
{
|
||||
let uploaded = uploaded.clone();
|
||||
move |data| {
|
||||
uploaded.fetch_add(data.len() as u64, Ordering::Relaxed);
|
||||
}
|
||||
},
|
||||
);
|
||||
|
||||
let BenchInstance {
|
||||
name,
|
||||
upload,
|
||||
upload_delay,
|
||||
download,
|
||||
download_delay,
|
||||
upload_size,
|
||||
download_size,
|
||||
defer_decryption,
|
||||
} = instance.clone();
|
||||
|
||||
set_interface(PROVER_INTERFACE, upload, 1, upload_delay)?;
|
||||
|
||||
let (client_conn, server_conn) = tokio::io::duplex(2 << 16);
|
||||
tokio::spawn(bind(server_conn.compat()));
|
||||
|
||||
let start_time = Instant::now();
|
||||
|
||||
let provider = CryptoProvider {
|
||||
cert: WebPkiVerifier::new(root_store(), None),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let protocol_config = if defer_decryption {
|
||||
ProtocolConfig::builder()
|
||||
.max_sent_data(upload_size + 256)
|
||||
.max_recv_data(download_size + 256)
|
||||
.build()
|
||||
.unwrap()
|
||||
} else {
|
||||
ProtocolConfig::builder()
|
||||
.max_sent_data(upload_size + 256)
|
||||
.max_recv_data(download_size + 256)
|
||||
.max_recv_data_online(download_size + 256)
|
||||
.build()
|
||||
.unwrap()
|
||||
};
|
||||
|
||||
let prover = Prover::new(
|
||||
ProverConfig::builder()
|
||||
.server_name(SERVER_DOMAIN)
|
||||
.protocol_config(protocol_config)
|
||||
.defer_decryption_from_start(defer_decryption)
|
||||
.crypto_provider(provider)
|
||||
.build()
|
||||
.context("invalid prover config")?,
|
||||
)
|
||||
.setup(io.compat())
|
||||
.await?;
|
||||
|
||||
let (mut mpc_tls_connection, prover_fut) = prover.connect(client_conn.compat()).await.unwrap();
|
||||
|
||||
let prover_task = tokio::spawn(prover_fut);
|
||||
|
||||
let request = format!(
|
||||
"GET /bytes?size={} HTTP/1.1\r\nConnection: close\r\nData: {}\r\n\r\n",
|
||||
download_size,
|
||||
String::from_utf8(vec![0x42u8; upload_size]).unwrap(),
|
||||
);
|
||||
|
||||
mpc_tls_connection.write_all(request.as_bytes()).await?;
|
||||
mpc_tls_connection.close().await?;
|
||||
|
||||
let mut response = vec![];
|
||||
mpc_tls_connection.read_to_end(&mut response).await?;
|
||||
|
||||
let mut prover = prover_task.await??.start_prove();
|
||||
|
||||
let (sent_len, recv_len) = prover.transcript().len();
|
||||
prover
|
||||
.prove_transcript(Idx::new(0..sent_len), Idx::new(0..recv_len))
|
||||
.await?;
|
||||
prover.finalize().await?;
|
||||
|
||||
Ok(Metrics {
|
||||
name,
|
||||
upload,
|
||||
upload_delay,
|
||||
download,
|
||||
download_delay,
|
||||
upload_size,
|
||||
download_size,
|
||||
defer_decryption,
|
||||
runtime: Instant::now().duration_since(start_time).as_secs(),
|
||||
uploaded: uploaded.load(Ordering::SeqCst),
|
||||
downloaded: downloaded.load(Ordering::SeqCst),
|
||||
})
|
||||
}
|
||||
|
||||
fn root_store() -> tls_core::anchors::RootCertStore {
|
||||
let mut root_store = tls_core::anchors::RootCertStore::empty();
|
||||
root_store
|
||||
.add(&tls_core::key::Certificate(CA_CERT_DER.to_vec()))
|
||||
.unwrap();
|
||||
root_store
|
||||
}
|
||||
@@ -1,100 +0,0 @@
|
||||
use anyhow::Context;
|
||||
use tokio::io::{AsyncRead, AsyncWrite};
|
||||
use tokio_util::compat::TokioAsyncReadCompatExt;
|
||||
use tracing_subscriber::{fmt::format::FmtSpan, EnvFilter};
|
||||
|
||||
use tls_core::verify::WebPkiVerifier;
|
||||
use tlsn_benches::{
|
||||
config::{BenchInstance, Config},
|
||||
set_interface, VERIFIER_INTERFACE,
|
||||
};
|
||||
use tlsn_common::config::ProtocolConfigValidator;
|
||||
use tlsn_core::CryptoProvider;
|
||||
use tlsn_server_fixture_certs::CA_CERT_DER;
|
||||
use tlsn_verifier::{Verifier, VerifierConfig};
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
let config_path = std::env::var("CFG").unwrap_or_else(|_| "bench.toml".to_string());
|
||||
let config: Config = toml::from_str(
|
||||
&std::fs::read_to_string(config_path).context("failed to read config file")?,
|
||||
)
|
||||
.context("failed to parse config")?;
|
||||
|
||||
tracing_subscriber::fmt()
|
||||
.with_env_filter(EnvFilter::from_default_env())
|
||||
.with_span_events(FmtSpan::NEW | FmtSpan::CLOSE)
|
||||
.init();
|
||||
|
||||
let ip = std::env::var("VERIFIER_IP").unwrap_or_else(|_| "10.10.1.1".to_string());
|
||||
let port: u16 = std::env::var("VERIFIER_PORT")
|
||||
.map(|port| port.parse().expect("port is valid u16"))
|
||||
.unwrap_or(8000);
|
||||
let host = (ip.as_str(), port);
|
||||
|
||||
let listener = tokio::net::TcpListener::bind(host)
|
||||
.await
|
||||
.context("failed to bind to port")?;
|
||||
|
||||
for bench in config.benches {
|
||||
for instance in bench.flatten() {
|
||||
let (io, _) = listener
|
||||
.accept()
|
||||
.await
|
||||
.context("failed to accept connection")?;
|
||||
run_instance(instance, io)
|
||||
.await
|
||||
.context("failed to run instance")?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn run_instance<S: AsyncWrite + AsyncRead + Send + Sync + Unpin + 'static>(
|
||||
instance: BenchInstance,
|
||||
io: S,
|
||||
) -> anyhow::Result<()> {
|
||||
let BenchInstance {
|
||||
download,
|
||||
download_delay,
|
||||
upload_size,
|
||||
download_size,
|
||||
..
|
||||
} = instance;
|
||||
|
||||
set_interface(VERIFIER_INTERFACE, download, 1, download_delay)?;
|
||||
|
||||
let provider = CryptoProvider {
|
||||
cert: cert_verifier(),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let config_validator = ProtocolConfigValidator::builder()
|
||||
.max_sent_data(upload_size + 256)
|
||||
.max_recv_data(download_size + 256)
|
||||
.build()
|
||||
.unwrap();
|
||||
|
||||
let verifier = Verifier::new(
|
||||
VerifierConfig::builder()
|
||||
.protocol_config_validator(config_validator)
|
||||
.crypto_provider(provider)
|
||||
.build()?,
|
||||
);
|
||||
|
||||
_ = verifier.verify(io.compat()).await?;
|
||||
|
||||
println!("verifier done");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn cert_verifier() -> WebPkiVerifier {
|
||||
let mut root_store = tls_core::anchors::RootCertStore::empty();
|
||||
root_store
|
||||
.add(&tls_core::key::Certificate(CA_CERT_DER.to_vec()))
|
||||
.unwrap();
|
||||
|
||||
WebPkiVerifier::new(root_store, None)
|
||||
}
|
||||
@@ -1,12 +0,0 @@
|
||||
# Run the TLSN benches with Docker
|
||||
|
||||
In the root folder of this repository, run:
|
||||
```
|
||||
docker build -t tlsn-bench . -f ./crates/benches/benches.Dockerfile
|
||||
```
|
||||
|
||||
Next run the benches with:
|
||||
```
|
||||
docker run -it --privileged -v ./crates/benches/:/benches tlsn-bench
|
||||
```
|
||||
The `--privileged` parameter is required because this test bench needs permission to create networks with certain parameters
|
||||
@@ -1,111 +0,0 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[serde(untagged)]
|
||||
pub enum Field<T> {
|
||||
Single(T),
|
||||
Multiple(Vec<T>),
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
pub struct Config {
|
||||
pub benches: Vec<Bench>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
pub struct Bench {
|
||||
pub name: String,
|
||||
pub upload: Field<usize>,
|
||||
#[serde(rename = "upload-delay")]
|
||||
pub upload_delay: Field<usize>,
|
||||
pub download: Field<usize>,
|
||||
#[serde(rename = "download-delay")]
|
||||
pub download_delay: Field<usize>,
|
||||
#[serde(rename = "upload-size")]
|
||||
pub upload_size: Field<usize>,
|
||||
#[serde(rename = "download-size")]
|
||||
pub download_size: Field<usize>,
|
||||
#[serde(rename = "defer-decryption")]
|
||||
pub defer_decryption: Field<bool>,
|
||||
}
|
||||
|
||||
impl Bench {
|
||||
/// Flattens the config into a list of instances
|
||||
pub fn flatten(self) -> Vec<BenchInstance> {
|
||||
let mut instances = vec![];
|
||||
|
||||
let upload = match self.upload {
|
||||
Field::Single(u) => vec![u],
|
||||
Field::Multiple(u) => u,
|
||||
};
|
||||
|
||||
let upload_delay = match self.upload_delay {
|
||||
Field::Single(u) => vec![u],
|
||||
Field::Multiple(u) => u,
|
||||
};
|
||||
|
||||
let download = match self.download {
|
||||
Field::Single(u) => vec![u],
|
||||
Field::Multiple(u) => u,
|
||||
};
|
||||
|
||||
let download_latency = match self.download_delay {
|
||||
Field::Single(u) => vec![u],
|
||||
Field::Multiple(u) => u,
|
||||
};
|
||||
|
||||
let upload_size = match self.upload_size {
|
||||
Field::Single(u) => vec![u],
|
||||
Field::Multiple(u) => u,
|
||||
};
|
||||
|
||||
let download_size = match self.download_size {
|
||||
Field::Single(u) => vec![u],
|
||||
Field::Multiple(u) => u,
|
||||
};
|
||||
|
||||
let defer_decryption = match self.defer_decryption {
|
||||
Field::Single(u) => vec![u],
|
||||
Field::Multiple(u) => u,
|
||||
};
|
||||
|
||||
for u in upload {
|
||||
for ul in &upload_delay {
|
||||
for d in &download {
|
||||
for dl in &download_latency {
|
||||
for us in &upload_size {
|
||||
for ds in &download_size {
|
||||
for dd in &defer_decryption {
|
||||
instances.push(BenchInstance {
|
||||
name: self.name.clone(),
|
||||
upload: u,
|
||||
upload_delay: *ul,
|
||||
download: *d,
|
||||
download_delay: *dl,
|
||||
upload_size: *us,
|
||||
download_size: *ds,
|
||||
defer_decryption: *dd,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
instances
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct BenchInstance {
|
||||
pub name: String,
|
||||
pub upload: usize,
|
||||
pub upload_delay: usize,
|
||||
pub download: usize,
|
||||
pub download_delay: usize,
|
||||
pub upload_size: usize,
|
||||
pub download_size: usize,
|
||||
pub defer_decryption: bool,
|
||||
}
|
||||
@@ -1,255 +0,0 @@
|
||||
pub mod config;
|
||||
pub mod metrics;
|
||||
|
||||
use std::{io, process::Command};
|
||||
|
||||
pub const PROVER_NAMESPACE: &str = "prover-ns";
|
||||
pub const PROVER_INTERFACE: &str = "prover-veth";
|
||||
pub const PROVER_SUBNET: &str = "10.10.1.0/24";
|
||||
pub const VERIFIER_NAMESPACE: &str = "verifier-ns";
|
||||
pub const VERIFIER_INTERFACE: &str = "verifier-veth";
|
||||
pub const VERIFIER_SUBNET: &str = "10.10.1.1/24";
|
||||
|
||||
pub fn set_up() -> io::Result<()> {
|
||||
// Create network namespaces
|
||||
create_network_namespace(PROVER_NAMESPACE)?;
|
||||
create_network_namespace(VERIFIER_NAMESPACE)?;
|
||||
|
||||
// Create veth pair and attach to namespaces
|
||||
create_veth_pair(
|
||||
PROVER_NAMESPACE,
|
||||
PROVER_INTERFACE,
|
||||
VERIFIER_NAMESPACE,
|
||||
VERIFIER_INTERFACE,
|
||||
)?;
|
||||
|
||||
// Set devices up
|
||||
set_device_up(PROVER_NAMESPACE, PROVER_INTERFACE)?;
|
||||
set_device_up(VERIFIER_NAMESPACE, VERIFIER_INTERFACE)?;
|
||||
|
||||
// Assign IPs
|
||||
assign_ip_to_interface(PROVER_NAMESPACE, PROVER_INTERFACE, PROVER_SUBNET)?;
|
||||
assign_ip_to_interface(VERIFIER_NAMESPACE, VERIFIER_INTERFACE, VERIFIER_SUBNET)?;
|
||||
|
||||
// Set default routes
|
||||
set_default_route(
|
||||
PROVER_NAMESPACE,
|
||||
PROVER_INTERFACE,
|
||||
PROVER_SUBNET.split('/').next().unwrap(),
|
||||
)?;
|
||||
set_default_route(
|
||||
VERIFIER_NAMESPACE,
|
||||
VERIFIER_INTERFACE,
|
||||
VERIFIER_SUBNET.split('/').next().unwrap(),
|
||||
)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn clean_up() {
|
||||
// Delete interface pair
|
||||
if let Err(e) = Command::new("ip")
|
||||
.args([
|
||||
"netns",
|
||||
"exec",
|
||||
PROVER_NAMESPACE,
|
||||
"ip",
|
||||
"link",
|
||||
"delete",
|
||||
PROVER_INTERFACE,
|
||||
])
|
||||
.status()
|
||||
{
|
||||
println!("Error deleting interface {}: {}", PROVER_INTERFACE, e);
|
||||
}
|
||||
|
||||
// Delete namespaces
|
||||
if let Err(e) = Command::new("ip")
|
||||
.args(["netns", "del", PROVER_NAMESPACE])
|
||||
.status()
|
||||
{
|
||||
println!("Error deleting namespace {}: {}", PROVER_NAMESPACE, e);
|
||||
}
|
||||
|
||||
if let Err(e) = Command::new("ip")
|
||||
.args(["netns", "del", VERIFIER_NAMESPACE])
|
||||
.status()
|
||||
{
|
||||
println!("Error deleting namespace {}: {}", VERIFIER_NAMESPACE, e);
|
||||
}
|
||||
}
|
||||
|
||||
/// Sets the interface parameters.
|
||||
///
|
||||
/// Must be run in the correct namespace.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `egress` - The egress bandwidth in mbps.
|
||||
/// * `burst` - The burst in mbps.
|
||||
/// * `delay` - The delay in ms.
|
||||
pub fn set_interface(interface: &str, egress: usize, burst: usize, delay: usize) -> io::Result<()> {
|
||||
// Clear rules
|
||||
_ = Command::new("tc")
|
||||
.arg("qdisc")
|
||||
.arg("del")
|
||||
.arg("dev")
|
||||
.arg(interface)
|
||||
.arg("root")
|
||||
.status();
|
||||
|
||||
// Egress
|
||||
Command::new("tc")
|
||||
.arg("qdisc")
|
||||
.arg("add")
|
||||
.arg("dev")
|
||||
.arg(interface)
|
||||
.arg("root")
|
||||
.arg("handle")
|
||||
.arg("1:")
|
||||
.arg("tbf")
|
||||
.arg("rate")
|
||||
.arg(format!("{}mbit", egress))
|
||||
.arg("burst")
|
||||
.arg(format!("{}mbit", burst))
|
||||
.arg("latency")
|
||||
.arg("60s")
|
||||
.status()?;
|
||||
|
||||
// Delay
|
||||
Command::new("tc")
|
||||
.arg("qdisc")
|
||||
.arg("add")
|
||||
.arg("dev")
|
||||
.arg(interface)
|
||||
.arg("parent")
|
||||
.arg("1:1")
|
||||
.arg("handle")
|
||||
.arg("10:")
|
||||
.arg("netem")
|
||||
.arg("delay")
|
||||
.arg(format!("{}ms", delay))
|
||||
.status()?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Create a network namespace with the given name if it does not already exist.
|
||||
fn create_network_namespace(name: &str) -> io::Result<()> {
|
||||
// Check if namespace already exists
|
||||
if Command::new("ip")
|
||||
.args(["netns", "list"])
|
||||
.output()?
|
||||
.stdout
|
||||
.windows(name.len())
|
||||
.any(|ns| ns == name.as_bytes())
|
||||
{
|
||||
println!("Namespace {} already exists", name);
|
||||
return Ok(());
|
||||
} else {
|
||||
println!("Creating namespace {}", name);
|
||||
Command::new("ip").args(["netns", "add", name]).status()?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn create_veth_pair(
|
||||
left_namespace: &str,
|
||||
left_interface: &str,
|
||||
right_namespace: &str,
|
||||
right_interface: &str,
|
||||
) -> io::Result<()> {
|
||||
// Check if interfaces are already present in namespaces
|
||||
if is_interface_present_in_namespace(left_namespace, left_interface)?
|
||||
|| is_interface_present_in_namespace(right_namespace, right_interface)?
|
||||
{
|
||||
println!("Virtual interface already exists.");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Create veth pair
|
||||
Command::new("ip")
|
||||
.args([
|
||||
"link",
|
||||
"add",
|
||||
left_interface,
|
||||
"type",
|
||||
"veth",
|
||||
"peer",
|
||||
"name",
|
||||
right_interface,
|
||||
])
|
||||
.status()?;
|
||||
|
||||
println!(
|
||||
"Created veth pair {} and {}",
|
||||
left_interface, right_interface
|
||||
);
|
||||
|
||||
// Attach veth pair to namespaces
|
||||
attach_interface_to_namespace(left_namespace, left_interface)?;
|
||||
attach_interface_to_namespace(right_namespace, right_interface)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn attach_interface_to_namespace(namespace: &str, interface: &str) -> io::Result<()> {
|
||||
Command::new("ip")
|
||||
.args(["link", "set", interface, "netns", namespace])
|
||||
.status()?;
|
||||
|
||||
println!("Attached {} to namespace {}", interface, namespace);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn set_default_route(namespace: &str, interface: &str, ip: &str) -> io::Result<()> {
|
||||
Command::new("ip")
|
||||
.args([
|
||||
"netns", "exec", namespace, "ip", "route", "add", "default", "via", ip, "dev",
|
||||
interface,
|
||||
])
|
||||
.status()?;
|
||||
|
||||
println!(
|
||||
"Set default route for namespace {} ip {} to {}",
|
||||
namespace, ip, interface
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn is_interface_present_in_namespace(
|
||||
namespace: &str,
|
||||
interface: &str,
|
||||
) -> Result<bool, std::io::Error> {
|
||||
Ok(Command::new("ip")
|
||||
.args([
|
||||
"netns", "exec", namespace, "ip", "link", "list", "dev", interface,
|
||||
])
|
||||
.output()?
|
||||
.stdout
|
||||
.windows(interface.len())
|
||||
.any(|ns| ns == interface.as_bytes()))
|
||||
}
|
||||
|
||||
fn set_device_up(namespace: &str, interface: &str) -> io::Result<()> {
|
||||
Command::new("ip")
|
||||
.args([
|
||||
"netns", "exec", namespace, "ip", "link", "set", interface, "up",
|
||||
])
|
||||
.status()?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn assign_ip_to_interface(namespace: &str, interface: &str, ip: &str) -> io::Result<()> {
|
||||
Command::new("ip")
|
||||
.args([
|
||||
"netns", "exec", namespace, "ip", "addr", "add", ip, "dev", interface,
|
||||
])
|
||||
.status()?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -1,26 +0,0 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct Metrics {
|
||||
pub name: String,
|
||||
/// Upload bandwidth in Mbps.
|
||||
pub upload: usize,
|
||||
/// Upload latency in ms.
|
||||
pub upload_delay: usize,
|
||||
/// Download bandwidth in Mbps.
|
||||
pub download: usize,
|
||||
/// Download latency in ms.
|
||||
pub download_delay: usize,
|
||||
/// Total bytes sent to the server.
|
||||
pub upload_size: usize,
|
||||
/// Total bytes received from the server.
|
||||
pub download_size: usize,
|
||||
/// Whether deferred decryption was used.
|
||||
pub defer_decryption: bool,
|
||||
/// The total runtime of the benchmark in seconds.
|
||||
pub runtime: u64,
|
||||
/// The total amount of data uploaded to the verifier in bytes.
|
||||
pub uploaded: u64,
|
||||
/// The total amount of data downloaded from the verifier in bytes.
|
||||
pub downloaded: u64,
|
||||
}
|
||||
@@ -1,28 +0,0 @@
|
||||
[package]
|
||||
name = "tlsn-common"
|
||||
description = "Common code shared between tlsn-prover and tlsn-verifier"
|
||||
version = "0.1.0-alpha.7"
|
||||
edition = "2021"
|
||||
|
||||
[features]
|
||||
default = []
|
||||
|
||||
[dependencies]
|
||||
tlsn-core = { workspace = true }
|
||||
mpz-common = { workspace = true }
|
||||
mpz-garble = { workspace = true }
|
||||
mpz-ot = { workspace = true }
|
||||
|
||||
derive_builder = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
once_cell = { workspace = true }
|
||||
serio = { workspace = true, features = ["codec", "bincode"] }
|
||||
thiserror = { workspace = true }
|
||||
tracing = { workspace = true }
|
||||
uid-mux = { workspace = true, features = ["serio"] }
|
||||
serde = { workspace = true, features = ["derive"] }
|
||||
tlsn-utils = { workspace = true }
|
||||
semver = { version = "1.0", features = ["serde"] }
|
||||
|
||||
[dev-dependencies]
|
||||
rstest = { workspace = true }
|
||||
@@ -1,40 +0,0 @@
|
||||
//! Common code shared between `tlsn-prover` and `tlsn-verifier`.
|
||||
|
||||
#![deny(missing_docs, unreachable_pub, unused_must_use)]
|
||||
#![deny(clippy::all)]
|
||||
#![forbid(unsafe_code)]
|
||||
|
||||
pub mod config;
|
||||
pub mod msg;
|
||||
pub mod mux;
|
||||
|
||||
use serio::codec::Codec;
|
||||
|
||||
use crate::mux::MuxControl;
|
||||
|
||||
/// IO type.
|
||||
pub type Io = <serio::codec::Bincode as Codec<uid_mux::yamux::Stream>>::Framed;
|
||||
/// Base OT sender.
|
||||
pub type BaseOTSender = mpz_ot::chou_orlandi::Sender;
|
||||
/// Base OT receiver.
|
||||
pub type BaseOTReceiver = mpz_ot::chou_orlandi::Receiver;
|
||||
/// OT sender.
|
||||
pub type OTSender = mpz_ot::kos::SharedSender<BaseOTReceiver>;
|
||||
/// OT receiver.
|
||||
pub type OTReceiver = mpz_ot::kos::SharedReceiver<BaseOTSender>;
|
||||
/// MPC executor.
|
||||
pub type Executor = mpz_common::executor::MTExecutor<MuxControl>;
|
||||
/// MPC thread context.
|
||||
pub type Context = mpz_common::executor::MTContext<MuxControl, Io>;
|
||||
/// DEAP thread.
|
||||
pub type DEAPThread = mpz_garble::protocol::deap::DEAPThread<Context, OTSender, OTReceiver>;
|
||||
|
||||
/// The party's role in the TLSN protocol.
|
||||
///
|
||||
/// A Notary is classified as a Verifier.
|
||||
pub enum Role {
|
||||
/// The prover.
|
||||
Prover,
|
||||
/// The verifier.
|
||||
Verifier,
|
||||
}
|
||||
@@ -1,41 +0,0 @@
|
||||
[package]
|
||||
name = "tlsn-aead"
|
||||
authors = ["TLSNotary Team"]
|
||||
description = "This crate provides an implementation of a two-party version of AES-GCM behind an AEAD trait"
|
||||
keywords = ["tls", "mpc", "2pc", "aead", "aes", "aes-gcm"]
|
||||
categories = ["cryptography"]
|
||||
license = "MIT OR Apache-2.0"
|
||||
version = "0.1.0-alpha.7"
|
||||
edition = "2021"
|
||||
|
||||
[lib]
|
||||
name = "aead"
|
||||
|
||||
[features]
|
||||
default = ["mock"]
|
||||
mock = ["mpz-common/test-utils", "dep:mpz-ot"]
|
||||
|
||||
[dependencies]
|
||||
tlsn-block-cipher = { workspace = true }
|
||||
tlsn-stream-cipher = { workspace = true }
|
||||
tlsn-universal-hash = { workspace = true }
|
||||
|
||||
mpz-common = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "b8ae7ac" }
|
||||
mpz-core = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "b8ae7ac" }
|
||||
mpz-garble = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "b8ae7ac" }
|
||||
mpz-ot = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "b8ae7ac", optional = true, features = [
|
||||
"ideal",
|
||||
] }
|
||||
|
||||
serio = { workspace = true }
|
||||
|
||||
async-trait = { workspace = true }
|
||||
derive_builder = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
thiserror = { workspace = true }
|
||||
tracing = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
tokio = { version = "1", features = ["macros", "rt", "rt-multi-thread"] }
|
||||
aes-gcm = { workspace = true }
|
||||
@@ -1,36 +0,0 @@
|
||||
use derive_builder::Builder;
|
||||
|
||||
/// Protocol role.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
#[allow(missing_docs)]
|
||||
pub enum Role {
|
||||
Leader,
|
||||
Follower,
|
||||
}
|
||||
|
||||
/// Configuration for AES-GCM.
|
||||
#[derive(Debug, Clone, Builder)]
|
||||
pub struct AesGcmConfig {
|
||||
/// The id of this instance.
|
||||
#[builder(setter(into))]
|
||||
id: String,
|
||||
/// The protocol role.
|
||||
role: Role,
|
||||
}
|
||||
|
||||
impl AesGcmConfig {
|
||||
/// Creates a new builder for the AES-GCM configuration.
|
||||
pub fn builder() -> AesGcmConfigBuilder {
|
||||
AesGcmConfigBuilder::default()
|
||||
}
|
||||
|
||||
/// Returns the id of this instance.
|
||||
pub fn id(&self) -> &str {
|
||||
&self.id
|
||||
}
|
||||
|
||||
/// Returns the protocol role.
|
||||
pub fn role(&self) -> &Role {
|
||||
&self.role
|
||||
}
|
||||
}
|
||||
@@ -1,102 +0,0 @@
|
||||
use std::fmt::Display;
|
||||
|
||||
/// AES-GCM error.
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub struct AesGcmError {
|
||||
kind: ErrorKind,
|
||||
#[source]
|
||||
source: Option<Box<dyn std::error::Error + Send + Sync>>,
|
||||
}
|
||||
|
||||
impl AesGcmError {
|
||||
pub(crate) fn new<E>(kind: ErrorKind, source: E) -> Self
|
||||
where
|
||||
E: Into<Box<dyn std::error::Error + Send + Sync>>,
|
||||
{
|
||||
Self {
|
||||
kind,
|
||||
source: Some(source.into()),
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) fn kind(&self) -> ErrorKind {
|
||||
self.kind
|
||||
}
|
||||
|
||||
pub(crate) fn invalid_tag() -> Self {
|
||||
Self {
|
||||
kind: ErrorKind::Tag,
|
||||
source: None,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn peer(reason: impl Into<String>) -> Self {
|
||||
Self {
|
||||
kind: ErrorKind::PeerMisbehaved,
|
||||
source: Some(reason.into().into()),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn payload(reason: impl Into<String>) -> Self {
|
||||
Self {
|
||||
kind: ErrorKind::Payload,
|
||||
source: Some(reason.into().into()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq)]
|
||||
pub(crate) enum ErrorKind {
|
||||
Io,
|
||||
BlockCipher,
|
||||
StreamCipher,
|
||||
Ghash,
|
||||
Tag,
|
||||
PeerMisbehaved,
|
||||
Payload,
|
||||
}
|
||||
|
||||
impl Display for AesGcmError {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self.kind {
|
||||
ErrorKind::Io => write!(f, "io error")?,
|
||||
ErrorKind::BlockCipher => write!(f, "block cipher error")?,
|
||||
ErrorKind::StreamCipher => write!(f, "stream cipher error")?,
|
||||
ErrorKind::Ghash => write!(f, "ghash error")?,
|
||||
ErrorKind::Tag => write!(f, "payload has corrupted tag")?,
|
||||
ErrorKind::PeerMisbehaved => write!(f, "peer misbehaved")?,
|
||||
ErrorKind::Payload => write!(f, "payload error")?,
|
||||
}
|
||||
|
||||
if let Some(source) = &self.source {
|
||||
write!(f, " caused by: {}", source)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<std::io::Error> for AesGcmError {
|
||||
fn from(err: std::io::Error) -> Self {
|
||||
Self::new(ErrorKind::Io, err)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<block_cipher::BlockCipherError> for AesGcmError {
|
||||
fn from(err: block_cipher::BlockCipherError) -> Self {
|
||||
Self::new(ErrorKind::BlockCipher, err)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<tlsn_stream_cipher::StreamCipherError> for AesGcmError {
|
||||
fn from(err: tlsn_stream_cipher::StreamCipherError) -> Self {
|
||||
Self::new(ErrorKind::StreamCipher, err)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<tlsn_universal_hash::UniversalHashError> for AesGcmError {
|
||||
fn from(err: tlsn_universal_hash::UniversalHashError) -> Self {
|
||||
Self::new(ErrorKind::Ghash, err)
|
||||
}
|
||||
}
|
||||
@@ -1,96 +0,0 @@
|
||||
//! Mock implementation of AES-GCM for testing purposes.
|
||||
|
||||
use block_cipher::{BlockCipherConfig, MpcBlockCipher};
|
||||
use mpz_common::executor::{test_st_executor, STExecutor};
|
||||
use mpz_garble::protocol::deap::mock::{MockFollower, MockLeader};
|
||||
use mpz_ot::ideal::ot::ideal_ot;
|
||||
use serio::channel::MemoryDuplex;
|
||||
use tlsn_stream_cipher::{MpcStreamCipher, StreamCipherConfig};
|
||||
use tlsn_universal_hash::ghash::ideal_ghash;
|
||||
|
||||
use super::*;
|
||||
|
||||
/// Creates a mock AES-GCM pair.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `id` - The id of the AES-GCM instances.
|
||||
/// * `(leader, follower)` - The leader and follower vms.
|
||||
/// * `leader_config` - The configuration of the leader.
|
||||
/// * `follower_config` - The configuration of the follower.
|
||||
pub async fn create_mock_aes_gcm_pair(
|
||||
id: &str,
|
||||
(leader, follower): (MockLeader, MockFollower),
|
||||
leader_config: AesGcmConfig,
|
||||
follower_config: AesGcmConfig,
|
||||
) -> (
|
||||
MpcAesGcm<STExecutor<MemoryDuplex>>,
|
||||
MpcAesGcm<STExecutor<MemoryDuplex>>,
|
||||
) {
|
||||
let block_cipher_id = format!("{}/block_cipher", id);
|
||||
let (ctx_leader, ctx_follower) = test_st_executor(128);
|
||||
|
||||
let (leader_ot_send, follower_ot_recv) = ideal_ot();
|
||||
let (follower_ot_send, leader_ot_recv) = ideal_ot();
|
||||
|
||||
let block_leader = leader
|
||||
.new_thread(ctx_leader, leader_ot_send, leader_ot_recv)
|
||||
.unwrap();
|
||||
|
||||
let block_follower = follower
|
||||
.new_thread(ctx_follower, follower_ot_send, follower_ot_recv)
|
||||
.unwrap();
|
||||
|
||||
let leader_block_cipher = MpcBlockCipher::new(
|
||||
BlockCipherConfig::builder()
|
||||
.id(block_cipher_id.clone())
|
||||
.build()
|
||||
.unwrap(),
|
||||
block_leader,
|
||||
);
|
||||
let follower_block_cipher = MpcBlockCipher::new(
|
||||
BlockCipherConfig::builder()
|
||||
.id(block_cipher_id.clone())
|
||||
.build()
|
||||
.unwrap(),
|
||||
block_follower,
|
||||
);
|
||||
|
||||
let stream_cipher_id = format!("{}/stream_cipher", id);
|
||||
let leader_stream_cipher = MpcStreamCipher::new(
|
||||
StreamCipherConfig::builder()
|
||||
.id(stream_cipher_id.clone())
|
||||
.build()
|
||||
.unwrap(),
|
||||
leader,
|
||||
);
|
||||
let follower_stream_cipher = MpcStreamCipher::new(
|
||||
StreamCipherConfig::builder()
|
||||
.id(stream_cipher_id.clone())
|
||||
.build()
|
||||
.unwrap(),
|
||||
follower,
|
||||
);
|
||||
|
||||
let (ctx_a, ctx_b) = test_st_executor(128);
|
||||
let (leader_ghash, follower_ghash) = ideal_ghash(ctx_a, ctx_b);
|
||||
|
||||
let (ctx_a, ctx_b) = test_st_executor(128);
|
||||
let leader = MpcAesGcm::new(
|
||||
leader_config,
|
||||
ctx_a,
|
||||
Box::new(leader_block_cipher),
|
||||
Box::new(leader_stream_cipher),
|
||||
Box::new(leader_ghash),
|
||||
);
|
||||
|
||||
let follower = MpcAesGcm::new(
|
||||
follower_config,
|
||||
ctx_b,
|
||||
Box::new(follower_block_cipher),
|
||||
Box::new(follower_stream_cipher),
|
||||
Box::new(follower_ghash),
|
||||
);
|
||||
|
||||
(leader, follower)
|
||||
}
|
||||
@@ -1,712 +0,0 @@
|
||||
//! This module provides an implementation of 2PC AES-GCM.
|
||||
|
||||
mod config;
|
||||
mod error;
|
||||
#[cfg(feature = "mock")]
|
||||
pub mod mock;
|
||||
mod tag;
|
||||
|
||||
pub use config::{AesGcmConfig, AesGcmConfigBuilder, AesGcmConfigBuilderError, Role};
|
||||
pub use error::AesGcmError;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use block_cipher::{Aes128, BlockCipher};
|
||||
use futures::TryFutureExt;
|
||||
use mpz_common::Context;
|
||||
use mpz_garble::value::ValueRef;
|
||||
use tlsn_stream_cipher::{Aes128Ctr, StreamCipher};
|
||||
use tlsn_universal_hash::UniversalHash;
|
||||
use tracing::instrument;
|
||||
|
||||
use crate::{
|
||||
aes_gcm::tag::{compute_tag, verify_tag, TAG_LEN},
|
||||
Aead,
|
||||
};
|
||||
|
||||
/// MPC AES-GCM.
|
||||
pub struct MpcAesGcm<Ctx> {
|
||||
config: AesGcmConfig,
|
||||
ctx: Ctx,
|
||||
aes_block: Box<dyn BlockCipher<Aes128>>,
|
||||
aes_ctr: Box<dyn StreamCipher<Aes128Ctr>>,
|
||||
ghash: Box<dyn UniversalHash>,
|
||||
}
|
||||
|
||||
impl<Ctx> std::fmt::Debug for MpcAesGcm<Ctx> {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("MpcAesGcm")
|
||||
.field("config", &self.config)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl<Ctx: Context> MpcAesGcm<Ctx> {
|
||||
/// Creates a new instance of [`MpcAesGcm`].
|
||||
pub fn new(
|
||||
config: AesGcmConfig,
|
||||
context: Ctx,
|
||||
aes_block: Box<dyn BlockCipher<Aes128>>,
|
||||
aes_ctr: Box<dyn StreamCipher<Aes128Ctr>>,
|
||||
ghash: Box<dyn UniversalHash>,
|
||||
) -> Self {
|
||||
Self {
|
||||
config,
|
||||
ctx: context,
|
||||
aes_block,
|
||||
aes_ctr,
|
||||
ghash,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<Ctx: Context> Aead for MpcAesGcm<Ctx> {
|
||||
type Error = AesGcmError;
|
||||
|
||||
#[instrument(level = "info", skip_all, err)]
|
||||
async fn set_key(&mut self, key: ValueRef, iv: ValueRef) -> Result<(), AesGcmError> {
|
||||
self.aes_block.set_key(key.clone());
|
||||
self.aes_ctr.set_key(key, iv);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(level = "info", skip_all, err)]
|
||||
async fn decode_key_private(&mut self) -> Result<(), AesGcmError> {
|
||||
self.aes_ctr
|
||||
.decode_key_private()
|
||||
.await
|
||||
.map_err(AesGcmError::from)
|
||||
}
|
||||
|
||||
#[instrument(level = "info", skip_all, err)]
|
||||
async fn decode_key_blind(&mut self) -> Result<(), AesGcmError> {
|
||||
self.aes_ctr
|
||||
.decode_key_blind()
|
||||
.await
|
||||
.map_err(AesGcmError::from)
|
||||
}
|
||||
|
||||
fn set_transcript_id(&mut self, id: &str) {
|
||||
self.aes_ctr.set_transcript_id(id)
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip(self), err)]
|
||||
async fn setup(&mut self) -> Result<(), AesGcmError> {
|
||||
self.ghash.setup().await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip(self), err)]
|
||||
async fn preprocess(&mut self, len: usize) -> Result<(), AesGcmError> {
|
||||
futures::try_join!(
|
||||
// Preprocess the GHASH key block.
|
||||
self.aes_block
|
||||
.preprocess(block_cipher::Visibility::Public, 1)
|
||||
.map_err(AesGcmError::from),
|
||||
self.aes_ctr.preprocess(len).map_err(AesGcmError::from),
|
||||
self.ghash.preprocess().map_err(AesGcmError::from),
|
||||
)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip_all, err)]
|
||||
async fn start(&mut self) -> Result<(), AesGcmError> {
|
||||
let h_share = self.aes_block.encrypt_share(vec![0u8; 16]).await?;
|
||||
self.ghash.set_key(h_share).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip_all, err)]
|
||||
async fn encrypt_public(
|
||||
&mut self,
|
||||
explicit_nonce: Vec<u8>,
|
||||
plaintext: Vec<u8>,
|
||||
aad: Vec<u8>,
|
||||
) -> Result<Vec<u8>, AesGcmError> {
|
||||
let ciphertext = self
|
||||
.aes_ctr
|
||||
.encrypt_public(explicit_nonce.clone(), plaintext)
|
||||
.await?;
|
||||
|
||||
let tag = compute_tag(
|
||||
&mut self.ctx,
|
||||
self.aes_ctr.as_mut(),
|
||||
self.ghash.as_mut(),
|
||||
explicit_nonce,
|
||||
ciphertext.clone(),
|
||||
aad,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let mut payload = ciphertext;
|
||||
payload.extend(tag);
|
||||
|
||||
Ok(payload)
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip_all, err)]
|
||||
async fn encrypt_private(
|
||||
&mut self,
|
||||
explicit_nonce: Vec<u8>,
|
||||
plaintext: Vec<u8>,
|
||||
aad: Vec<u8>,
|
||||
) -> Result<Vec<u8>, AesGcmError> {
|
||||
let ciphertext = self
|
||||
.aes_ctr
|
||||
.encrypt_private(explicit_nonce.clone(), plaintext)
|
||||
.await?;
|
||||
|
||||
let tag = compute_tag(
|
||||
&mut self.ctx,
|
||||
self.aes_ctr.as_mut(),
|
||||
self.ghash.as_mut(),
|
||||
explicit_nonce,
|
||||
ciphertext.clone(),
|
||||
aad,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let mut payload = ciphertext;
|
||||
payload.extend(tag);
|
||||
|
||||
Ok(payload)
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip_all, err)]
|
||||
async fn encrypt_blind(
|
||||
&mut self,
|
||||
explicit_nonce: Vec<u8>,
|
||||
plaintext_len: usize,
|
||||
aad: Vec<u8>,
|
||||
) -> Result<Vec<u8>, AesGcmError> {
|
||||
let ciphertext = self
|
||||
.aes_ctr
|
||||
.encrypt_blind(explicit_nonce.clone(), plaintext_len)
|
||||
.await?;
|
||||
|
||||
let tag = compute_tag(
|
||||
&mut self.ctx,
|
||||
self.aes_ctr.as_mut(),
|
||||
self.ghash.as_mut(),
|
||||
explicit_nonce,
|
||||
ciphertext.clone(),
|
||||
aad,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let mut payload = ciphertext;
|
||||
payload.extend(tag);
|
||||
|
||||
Ok(payload)
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip_all, err)]
|
||||
async fn decrypt_public(
|
||||
&mut self,
|
||||
explicit_nonce: Vec<u8>,
|
||||
mut payload: Vec<u8>,
|
||||
aad: Vec<u8>,
|
||||
) -> Result<Vec<u8>, AesGcmError> {
|
||||
let purported_tag: [u8; TAG_LEN] = payload
|
||||
.split_off(payload.len() - TAG_LEN)
|
||||
.try_into()
|
||||
.map_err(|_| AesGcmError::payload("payload is not long enough to contain tag"))?;
|
||||
let ciphertext = payload;
|
||||
|
||||
verify_tag(
|
||||
&mut self.ctx,
|
||||
self.aes_ctr.as_mut(),
|
||||
self.ghash.as_mut(),
|
||||
*self.config.role(),
|
||||
explicit_nonce.clone(),
|
||||
ciphertext.clone(),
|
||||
aad,
|
||||
purported_tag,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let plaintext = self
|
||||
.aes_ctr
|
||||
.decrypt_public(explicit_nonce, ciphertext)
|
||||
.await?;
|
||||
|
||||
Ok(plaintext)
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip_all, err)]
|
||||
async fn decrypt_private(
|
||||
&mut self,
|
||||
explicit_nonce: Vec<u8>,
|
||||
mut payload: Vec<u8>,
|
||||
aad: Vec<u8>,
|
||||
) -> Result<Vec<u8>, AesGcmError> {
|
||||
let purported_tag: [u8; TAG_LEN] = payload
|
||||
.split_off(payload.len() - TAG_LEN)
|
||||
.try_into()
|
||||
.map_err(|_| AesGcmError::payload("payload is not long enough to contain tag"))?;
|
||||
let ciphertext = payload;
|
||||
|
||||
verify_tag(
|
||||
&mut self.ctx,
|
||||
self.aes_ctr.as_mut(),
|
||||
self.ghash.as_mut(),
|
||||
*self.config.role(),
|
||||
explicit_nonce.clone(),
|
||||
ciphertext.clone(),
|
||||
aad,
|
||||
purported_tag,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let plaintext = self
|
||||
.aes_ctr
|
||||
.decrypt_private(explicit_nonce, ciphertext)
|
||||
.await?;
|
||||
|
||||
Ok(plaintext)
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip_all, err)]
|
||||
async fn decrypt_blind(
|
||||
&mut self,
|
||||
explicit_nonce: Vec<u8>,
|
||||
mut payload: Vec<u8>,
|
||||
aad: Vec<u8>,
|
||||
) -> Result<(), AesGcmError> {
|
||||
let purported_tag: [u8; TAG_LEN] = payload
|
||||
.split_off(payload.len() - TAG_LEN)
|
||||
.try_into()
|
||||
.map_err(|_| AesGcmError::payload("payload is not long enough to contain tag"))?;
|
||||
let ciphertext = payload;
|
||||
|
||||
verify_tag(
|
||||
&mut self.ctx,
|
||||
self.aes_ctr.as_mut(),
|
||||
self.ghash.as_mut(),
|
||||
*self.config.role(),
|
||||
explicit_nonce.clone(),
|
||||
ciphertext.clone(),
|
||||
aad,
|
||||
purported_tag,
|
||||
)
|
||||
.await?;
|
||||
|
||||
self.aes_ctr
|
||||
.decrypt_blind(explicit_nonce, ciphertext)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip_all, err)]
|
||||
async fn verify_tag(
|
||||
&mut self,
|
||||
explicit_nonce: Vec<u8>,
|
||||
mut payload: Vec<u8>,
|
||||
aad: Vec<u8>,
|
||||
) -> Result<(), AesGcmError> {
|
||||
let purported_tag: [u8; TAG_LEN] = payload
|
||||
.split_off(payload.len() - TAG_LEN)
|
||||
.try_into()
|
||||
.map_err(|_| AesGcmError::payload("payload is not long enough to contain tag"))?;
|
||||
let ciphertext = payload;
|
||||
|
||||
verify_tag(
|
||||
&mut self.ctx,
|
||||
self.aes_ctr.as_mut(),
|
||||
self.ghash.as_mut(),
|
||||
*self.config.role(),
|
||||
explicit_nonce,
|
||||
ciphertext,
|
||||
aad,
|
||||
purported_tag,
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip_all, err)]
|
||||
async fn prove_plaintext(
|
||||
&mut self,
|
||||
explicit_nonce: Vec<u8>,
|
||||
mut payload: Vec<u8>,
|
||||
aad: Vec<u8>,
|
||||
) -> Result<Vec<u8>, AesGcmError> {
|
||||
let purported_tag: [u8; TAG_LEN] = payload
|
||||
.split_off(payload.len() - TAG_LEN)
|
||||
.try_into()
|
||||
.map_err(|_| AesGcmError::payload("payload is not long enough to contain tag"))?;
|
||||
let ciphertext = payload;
|
||||
|
||||
verify_tag(
|
||||
&mut self.ctx,
|
||||
self.aes_ctr.as_mut(),
|
||||
self.ghash.as_mut(),
|
||||
*self.config.role(),
|
||||
explicit_nonce.clone(),
|
||||
ciphertext.clone(),
|
||||
aad,
|
||||
purported_tag,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let plaintext = self
|
||||
.aes_ctr
|
||||
.prove_plaintext(explicit_nonce, ciphertext)
|
||||
.await?;
|
||||
|
||||
Ok(plaintext)
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip_all, err)]
|
||||
async fn prove_plaintext_no_tag(
|
||||
&mut self,
|
||||
explicit_nonce: Vec<u8>,
|
||||
ciphertext: Vec<u8>,
|
||||
) -> Result<Vec<u8>, AesGcmError> {
|
||||
self.aes_ctr
|
||||
.prove_plaintext(explicit_nonce, ciphertext)
|
||||
.map_err(AesGcmError::from)
|
||||
.await
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip_all, err)]
|
||||
async fn verify_plaintext(
|
||||
&mut self,
|
||||
explicit_nonce: Vec<u8>,
|
||||
mut payload: Vec<u8>,
|
||||
aad: Vec<u8>,
|
||||
) -> Result<(), AesGcmError> {
|
||||
let purported_tag: [u8; TAG_LEN] = payload
|
||||
.split_off(payload.len() - TAG_LEN)
|
||||
.try_into()
|
||||
.map_err(|_| AesGcmError::payload("payload is not long enough to contain tag"))?;
|
||||
let ciphertext = payload;
|
||||
|
||||
verify_tag(
|
||||
&mut self.ctx,
|
||||
self.aes_ctr.as_mut(),
|
||||
self.ghash.as_mut(),
|
||||
*self.config.role(),
|
||||
explicit_nonce.clone(),
|
||||
ciphertext.clone(),
|
||||
aad,
|
||||
purported_tag,
|
||||
)
|
||||
.await?;
|
||||
|
||||
self.aes_ctr
|
||||
.verify_plaintext(explicit_nonce, ciphertext)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip_all, err)]
|
||||
async fn verify_plaintext_no_tag(
|
||||
&mut self,
|
||||
explicit_nonce: Vec<u8>,
|
||||
ciphertext: Vec<u8>,
|
||||
) -> Result<(), AesGcmError> {
|
||||
self.aes_ctr
|
||||
.verify_plaintext(explicit_nonce, ciphertext)
|
||||
.map_err(AesGcmError::from)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::{
|
||||
aes_gcm::{mock::create_mock_aes_gcm_pair, AesGcmConfigBuilder, Role},
|
||||
Aead,
|
||||
};
|
||||
use ::aes_gcm::{aead::AeadInPlace, Aes128Gcm, NewAead, Nonce};
|
||||
use error::ErrorKind;
|
||||
use mpz_common::executor::STExecutor;
|
||||
use mpz_garble::{protocol::deap::mock::create_mock_deap_vm, Memory};
|
||||
use serio::channel::MemoryDuplex;
|
||||
|
||||
fn reference_impl(
|
||||
key: &[u8],
|
||||
iv: &[u8],
|
||||
explicit_nonce: &[u8],
|
||||
plaintext: &[u8],
|
||||
aad: &[u8],
|
||||
) -> Vec<u8> {
|
||||
let cipher = Aes128Gcm::new_from_slice(key).unwrap();
|
||||
let nonce = [iv, explicit_nonce].concat();
|
||||
let nonce = Nonce::from_slice(nonce.as_slice());
|
||||
|
||||
let mut ciphertext = plaintext.to_vec();
|
||||
cipher
|
||||
.encrypt_in_place(nonce, aad, &mut ciphertext)
|
||||
.unwrap();
|
||||
|
||||
ciphertext
|
||||
}
|
||||
|
||||
async fn setup_pair(
|
||||
key: Vec<u8>,
|
||||
iv: Vec<u8>,
|
||||
) -> (
|
||||
MpcAesGcm<STExecutor<MemoryDuplex>>,
|
||||
MpcAesGcm<STExecutor<MemoryDuplex>>,
|
||||
) {
|
||||
let (leader_vm, follower_vm) = create_mock_deap_vm();
|
||||
|
||||
let leader_key = leader_vm
|
||||
.new_public_array_input::<u8>("key", key.len())
|
||||
.unwrap();
|
||||
let leader_iv = leader_vm
|
||||
.new_public_array_input::<u8>("iv", iv.len())
|
||||
.unwrap();
|
||||
|
||||
leader_vm.assign(&leader_key, key.clone()).unwrap();
|
||||
leader_vm.assign(&leader_iv, iv.clone()).unwrap();
|
||||
|
||||
let follower_key = follower_vm
|
||||
.new_public_array_input::<u8>("key", key.len())
|
||||
.unwrap();
|
||||
let follower_iv = follower_vm
|
||||
.new_public_array_input::<u8>("iv", iv.len())
|
||||
.unwrap();
|
||||
|
||||
follower_vm.assign(&follower_key, key.clone()).unwrap();
|
||||
follower_vm.assign(&follower_iv, iv.clone()).unwrap();
|
||||
|
||||
let leader_config = AesGcmConfigBuilder::default()
|
||||
.id("test".to_string())
|
||||
.role(Role::Leader)
|
||||
.build()
|
||||
.unwrap();
|
||||
let follower_config = AesGcmConfigBuilder::default()
|
||||
.id("test".to_string())
|
||||
.role(Role::Follower)
|
||||
.build()
|
||||
.unwrap();
|
||||
|
||||
let (mut leader, mut follower) = create_mock_aes_gcm_pair(
|
||||
"test",
|
||||
(leader_vm, follower_vm),
|
||||
leader_config,
|
||||
follower_config,
|
||||
)
|
||||
.await;
|
||||
|
||||
futures::try_join!(
|
||||
leader.set_key(leader_key, leader_iv),
|
||||
follower.set_key(follower_key, follower_iv)
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
futures::try_join!(leader.setup(), follower.setup()).unwrap();
|
||||
futures::try_join!(leader.start(), follower.start()).unwrap();
|
||||
|
||||
(leader, follower)
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[ignore = "expensive"]
|
||||
async fn test_aes_gcm_encrypt_private() {
|
||||
let key = vec![0u8; 16];
|
||||
let iv = vec![0u8; 4];
|
||||
let explicit_nonce = vec![0u8; 8];
|
||||
let plaintext = vec![1u8; 32];
|
||||
let aad = vec![2u8; 12];
|
||||
|
||||
let (mut leader, mut follower) = setup_pair(key.clone(), iv.clone()).await;
|
||||
|
||||
let (leader_ciphertext, follower_ciphertext) = tokio::try_join!(
|
||||
leader.encrypt_private(explicit_nonce.clone(), plaintext.clone(), aad.clone(),),
|
||||
follower.encrypt_blind(explicit_nonce.clone(), plaintext.len(), aad.clone())
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(leader_ciphertext, follower_ciphertext);
|
||||
assert_eq!(
|
||||
leader_ciphertext,
|
||||
reference_impl(&key, &iv, &explicit_nonce, &plaintext, &aad)
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[ignore = "expensive"]
|
||||
async fn test_aes_gcm_encrypt_public() {
|
||||
let key = vec![0u8; 16];
|
||||
let iv = vec![0u8; 4];
|
||||
let explicit_nonce = vec![0u8; 8];
|
||||
let plaintext = vec![1u8; 32];
|
||||
let aad = vec![2u8; 12];
|
||||
|
||||
let (mut leader, mut follower) = setup_pair(key.clone(), iv.clone()).await;
|
||||
|
||||
let (leader_ciphertext, follower_ciphertext) = tokio::try_join!(
|
||||
leader.encrypt_public(explicit_nonce.clone(), plaintext.clone(), aad.clone(),),
|
||||
follower.encrypt_public(explicit_nonce.clone(), plaintext.clone(), aad.clone(),)
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(leader_ciphertext, follower_ciphertext);
|
||||
assert_eq!(
|
||||
leader_ciphertext,
|
||||
reference_impl(&key, &iv, &explicit_nonce, &plaintext, &aad)
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[ignore = "expensive"]
|
||||
async fn test_aes_gcm_decrypt_private() {
|
||||
let key = vec![0u8; 16];
|
||||
let iv = vec![0u8; 4];
|
||||
let explicit_nonce = vec![0u8; 8];
|
||||
let plaintext = vec![1u8; 32];
|
||||
let aad = vec![2u8; 12];
|
||||
let ciphertext = reference_impl(&key, &iv, &explicit_nonce, &plaintext, &aad);
|
||||
|
||||
let (mut leader, mut follower) = setup_pair(key.clone(), iv.clone()).await;
|
||||
|
||||
let (leader_plaintext, _) = tokio::try_join!(
|
||||
leader.decrypt_private(explicit_nonce.clone(), ciphertext.clone(), aad.clone(),),
|
||||
follower.decrypt_blind(explicit_nonce.clone(), ciphertext, aad.clone(),)
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(leader_plaintext, plaintext);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[ignore = "expensive"]
|
||||
async fn test_aes_gcm_decrypt_private_bad_tag() {
|
||||
let key = vec![0u8; 16];
|
||||
let iv = vec![0u8; 4];
|
||||
let explicit_nonce = vec![0u8; 8];
|
||||
let plaintext = vec![1u8; 32];
|
||||
let aad = vec![2u8; 12];
|
||||
let ciphertext = reference_impl(&key, &iv, &explicit_nonce, &plaintext, &aad);
|
||||
|
||||
let len = ciphertext.len();
|
||||
|
||||
// corrupt tag
|
||||
let mut corrupted = ciphertext.clone();
|
||||
corrupted[len - 1] -= 1;
|
||||
|
||||
let (mut leader, mut follower) = setup_pair(key.clone(), iv.clone()).await;
|
||||
|
||||
// leader receives corrupted tag
|
||||
let err = tokio::try_join!(
|
||||
leader.decrypt_private(explicit_nonce.clone(), corrupted.clone(), aad.clone(),),
|
||||
follower.decrypt_blind(explicit_nonce.clone(), ciphertext.clone(), aad.clone(),)
|
||||
)
|
||||
.unwrap_err();
|
||||
assert_eq!(err.kind(), ErrorKind::Tag);
|
||||
|
||||
let (mut leader, mut follower) = setup_pair(key.clone(), iv.clone()).await;
|
||||
|
||||
// follower receives corrupted tag
|
||||
let err = tokio::try_join!(
|
||||
leader.decrypt_private(explicit_nonce.clone(), ciphertext.clone(), aad.clone(),),
|
||||
follower.decrypt_blind(explicit_nonce.clone(), corrupted.clone(), aad.clone(),)
|
||||
)
|
||||
.unwrap_err();
|
||||
assert_eq!(err.kind(), ErrorKind::Tag);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[ignore = "expensive"]
|
||||
async fn test_aes_gcm_decrypt_public() {
|
||||
let key = vec![0u8; 16];
|
||||
let iv = vec![0u8; 4];
|
||||
let explicit_nonce = vec![0u8; 8];
|
||||
let plaintext = vec![1u8; 32];
|
||||
let aad = vec![2u8; 12];
|
||||
let ciphertext = reference_impl(&key, &iv, &explicit_nonce, &plaintext, &aad);
|
||||
|
||||
let (mut leader, mut follower) = setup_pair(key.clone(), iv.clone()).await;
|
||||
|
||||
let (leader_plaintext, follower_plaintext) = tokio::try_join!(
|
||||
leader.decrypt_public(explicit_nonce.clone(), ciphertext.clone(), aad.clone(),),
|
||||
follower.decrypt_public(explicit_nonce.clone(), ciphertext, aad.clone(),)
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(leader_plaintext, plaintext);
|
||||
assert_eq!(leader_plaintext, follower_plaintext);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[ignore = "expensive"]
|
||||
async fn test_aes_gcm_decrypt_public_bad_tag() {
|
||||
let key = vec![0u8; 16];
|
||||
let iv = vec![0u8; 4];
|
||||
let explicit_nonce = vec![0u8; 8];
|
||||
let plaintext = vec![1u8; 32];
|
||||
let aad = vec![2u8; 12];
|
||||
let ciphertext = reference_impl(&key, &iv, &explicit_nonce, &plaintext, &aad);
|
||||
|
||||
let len = ciphertext.len();
|
||||
|
||||
// Corrupt tag.
|
||||
let mut corrupted = ciphertext.clone();
|
||||
corrupted[len - 1] -= 1;
|
||||
|
||||
let (mut leader, mut follower) = setup_pair(key.clone(), iv.clone()).await;
|
||||
|
||||
// Leader receives corrupted tag.
|
||||
let err = tokio::try_join!(
|
||||
leader.decrypt_public(explicit_nonce.clone(), corrupted.clone(), aad.clone(),),
|
||||
follower.decrypt_public(explicit_nonce.clone(), ciphertext.clone(), aad.clone(),)
|
||||
)
|
||||
.unwrap_err();
|
||||
assert_eq!(err.kind(), ErrorKind::Tag);
|
||||
|
||||
let (mut leader, mut follower) = setup_pair(key.clone(), iv.clone()).await;
|
||||
|
||||
// Follower receives corrupted tag.
|
||||
let err = tokio::try_join!(
|
||||
leader.decrypt_public(explicit_nonce.clone(), ciphertext.clone(), aad.clone(),),
|
||||
follower.decrypt_public(explicit_nonce.clone(), corrupted.clone(), aad.clone(),)
|
||||
)
|
||||
.unwrap_err();
|
||||
assert_eq!(err.kind(), ErrorKind::Tag);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[ignore = "expensive"]
|
||||
async fn test_aes_gcm_verify_tag() {
|
||||
let key = vec![0u8; 16];
|
||||
let iv = vec![0u8; 4];
|
||||
let explicit_nonce = vec![0u8; 8];
|
||||
let plaintext = vec![1u8; 32];
|
||||
let aad = vec![2u8; 12];
|
||||
let ciphertext = reference_impl(&key, &iv, &explicit_nonce, &plaintext, &aad);
|
||||
|
||||
let len = ciphertext.len();
|
||||
|
||||
let (mut leader, mut follower) = setup_pair(key.clone(), iv.clone()).await;
|
||||
|
||||
tokio::try_join!(
|
||||
leader.verify_tag(explicit_nonce.clone(), ciphertext.clone(), aad.clone()),
|
||||
follower.verify_tag(explicit_nonce.clone(), ciphertext.clone(), aad.clone())
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
//Corrupt tag.
|
||||
let mut corrupted = ciphertext.clone();
|
||||
corrupted[len - 1] -= 1;
|
||||
|
||||
let (leader_res, follower_res) = tokio::join!(
|
||||
leader.verify_tag(explicit_nonce.clone(), corrupted.clone(), aad.clone()),
|
||||
follower.verify_tag(explicit_nonce.clone(), corrupted, aad.clone())
|
||||
);
|
||||
|
||||
assert_eq!(leader_res.unwrap_err().kind(), ErrorKind::Tag);
|
||||
assert_eq!(follower_res.unwrap_err().kind(), ErrorKind::Tag);
|
||||
}
|
||||
}
|
||||
@@ -1,179 +0,0 @@
|
||||
use futures::TryFutureExt;
|
||||
use mpz_common::Context;
|
||||
use mpz_core::{
|
||||
commit::{Decommitment, HashCommit},
|
||||
hash::Hash,
|
||||
};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serio::{stream::IoStreamExt, SinkExt};
|
||||
use std::ops::Add;
|
||||
use tlsn_stream_cipher::{Aes128Ctr, StreamCipher};
|
||||
use tlsn_universal_hash::UniversalHash;
|
||||
use tracing::instrument;
|
||||
|
||||
use crate::aes_gcm::{AesGcmError, Role};
|
||||
|
||||
pub(crate) const TAG_LEN: usize = 16;
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
struct TagShare([u8; TAG_LEN]);
|
||||
|
||||
impl AsRef<[u8]> for TagShare {
|
||||
fn as_ref(&self) -> &[u8] {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl Add for TagShare {
|
||||
type Output = [u8; TAG_LEN];
|
||||
|
||||
fn add(self, rhs: Self) -> Self::Output {
|
||||
core::array::from_fn(|i| self.0[i] ^ rhs.0[i])
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", skip_all, err)]
|
||||
async fn compute_tag_share<C: StreamCipher<Aes128Ctr> + ?Sized, H: UniversalHash + ?Sized>(
|
||||
aes_ctr: &mut C,
|
||||
hasher: &mut H,
|
||||
explicit_nonce: Vec<u8>,
|
||||
ciphertext: Vec<u8>,
|
||||
aad: Vec<u8>,
|
||||
) -> Result<TagShare, AesGcmError> {
|
||||
let (j0, hash) = futures::try_join!(
|
||||
aes_ctr
|
||||
.share_keystream_block(explicit_nonce, 1)
|
||||
.map_err(AesGcmError::from),
|
||||
hasher
|
||||
.finalize(build_ghash_data(aad, ciphertext))
|
||||
.map_err(AesGcmError::from)
|
||||
)?;
|
||||
|
||||
debug_assert!(j0.len() == TAG_LEN);
|
||||
debug_assert!(hash.len() == TAG_LEN);
|
||||
|
||||
let tag_share = core::array::from_fn(|i| j0[i] ^ hash[i]);
|
||||
|
||||
Ok(TagShare(tag_share))
|
||||
}
|
||||
|
||||
/// Computes the tag for a ciphertext and additional data.
|
||||
///
|
||||
/// The commit-reveal step is not required for computing a tag sent to the
|
||||
/// Server, as it will be able to detect if the tag is incorrect.
|
||||
#[instrument(level = "debug", skip_all, err)]
|
||||
pub(crate) async fn compute_tag<
|
||||
Ctx: Context,
|
||||
C: StreamCipher<Aes128Ctr> + ?Sized,
|
||||
H: UniversalHash + ?Sized,
|
||||
>(
|
||||
ctx: &mut Ctx,
|
||||
aes_ctr: &mut C,
|
||||
hasher: &mut H,
|
||||
explicit_nonce: Vec<u8>,
|
||||
ciphertext: Vec<u8>,
|
||||
aad: Vec<u8>,
|
||||
) -> Result<[u8; TAG_LEN], AesGcmError> {
|
||||
let tag_share = compute_tag_share(aes_ctr, hasher, explicit_nonce, ciphertext, aad).await?;
|
||||
|
||||
// TODO: The follower doesn't really need to learn the tag,
|
||||
// we could reduce some latency by not sending it.
|
||||
let io = ctx.io_mut();
|
||||
io.send(tag_share.clone()).await?;
|
||||
let other_tag_share: TagShare = io.expect_next().await?;
|
||||
|
||||
let tag = tag_share + other_tag_share;
|
||||
|
||||
Ok(tag)
|
||||
}
|
||||
|
||||
/// Verifies a purported tag against the ciphertext and additional data.
|
||||
///
|
||||
/// Verifying a tag requires a commit-reveal protocol between the leader and
|
||||
/// follower. Without it, the party which receives the other's tag share first
|
||||
/// could trivially compute a tag share which would cause an invalid message to
|
||||
/// be accepted.
|
||||
#[instrument(level = "debug", skip_all, err)]
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub(crate) async fn verify_tag<
|
||||
Ctx: Context,
|
||||
C: StreamCipher<Aes128Ctr> + ?Sized,
|
||||
H: UniversalHash + ?Sized,
|
||||
>(
|
||||
ctx: &mut Ctx,
|
||||
aes_ctr: &mut C,
|
||||
hasher: &mut H,
|
||||
role: Role,
|
||||
explicit_nonce: Vec<u8>,
|
||||
ciphertext: Vec<u8>,
|
||||
aad: Vec<u8>,
|
||||
purported_tag: [u8; TAG_LEN],
|
||||
) -> Result<(), AesGcmError> {
|
||||
let tag_share = compute_tag_share(aes_ctr, hasher, explicit_nonce, ciphertext, aad).await?;
|
||||
|
||||
let io = ctx.io_mut();
|
||||
let tag = match role {
|
||||
Role::Leader => {
|
||||
// Send commitment of tag share to follower.
|
||||
let (tag_share_decommitment, tag_share_commitment) = tag_share.clone().hash_commit();
|
||||
|
||||
io.send(tag_share_commitment).await?;
|
||||
|
||||
let follower_tag_share: TagShare = io.expect_next().await?;
|
||||
|
||||
// Send decommitment (tag share) to follower.
|
||||
io.send(tag_share_decommitment).await?;
|
||||
|
||||
tag_share + follower_tag_share
|
||||
}
|
||||
Role::Follower => {
|
||||
// Wait for commitment from leader.
|
||||
let commitment: Hash = io.expect_next().await?;
|
||||
|
||||
// Send tag share to leader.
|
||||
io.send(tag_share.clone()).await?;
|
||||
|
||||
// Expect decommitment (tag share) from leader.
|
||||
let decommitment: Decommitment<TagShare> = io.expect_next().await?;
|
||||
|
||||
// Verify decommitment.
|
||||
decommitment.verify(&commitment).map_err(|_| {
|
||||
AesGcmError::peer("leader tag share commitment verification failed")
|
||||
})?;
|
||||
|
||||
let leader_tag_share = decommitment.into_inner();
|
||||
|
||||
tag_share + leader_tag_share
|
||||
}
|
||||
};
|
||||
|
||||
// Reject if tag is incorrect.
|
||||
if tag != purported_tag {
|
||||
return Err(AesGcmError::invalid_tag());
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Builds padded data for GHASH.
|
||||
fn build_ghash_data(mut aad: Vec<u8>, mut ciphertext: Vec<u8>) -> Vec<u8> {
|
||||
let associated_data_bitlen = (aad.len() as u64) * 8;
|
||||
let text_bitlen = (ciphertext.len() as u64) * 8;
|
||||
|
||||
let len_block = ((associated_data_bitlen as u128) << 64) + (text_bitlen as u128);
|
||||
|
||||
// Pad data to be a multiple of 16 bytes.
|
||||
let aad_padded_block_count = (aad.len() / 16) + (aad.len() % 16 != 0) as usize;
|
||||
aad.resize(aad_padded_block_count * 16, 0);
|
||||
|
||||
let ciphertext_padded_block_count =
|
||||
(ciphertext.len() / 16) + (ciphertext.len() % 16 != 0) as usize;
|
||||
ciphertext.resize(ciphertext_padded_block_count * 16, 0);
|
||||
|
||||
let mut data: Vec<u8> = Vec::with_capacity(aad.len() + ciphertext.len() + 16);
|
||||
data.extend(aad);
|
||||
data.extend(ciphertext);
|
||||
data.extend_from_slice(&len_block.to_be_bytes());
|
||||
|
||||
data
|
||||
}
|
||||
@@ -1,255 +0,0 @@
|
||||
//! This crate provides implementations of 2PC AEADs for authenticated
|
||||
//! encryption with a shared key.
|
||||
//!
|
||||
//! Both parties can work together to encrypt and decrypt messages with
|
||||
//! different visibility configurations. See [`Aead`] for more information on
|
||||
//! the interface.
|
||||
//!
|
||||
//! For example, one party can privately provide the plaintext to encrypt, while
|
||||
//! both parties can see the ciphertext and the tag. Or, both parties can
|
||||
//! cooperate to decrypt a ciphertext and verify the tag, while only one party
|
||||
//! can see the plaintext.
|
||||
|
||||
#![deny(missing_docs, unreachable_pub, unused_must_use)]
|
||||
#![deny(clippy::all)]
|
||||
#![forbid(unsafe_code)]
|
||||
|
||||
pub mod aes_gcm;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use mpz_garble::value::ValueRef;
|
||||
|
||||
/// This trait defines the interface for AEADs.
|
||||
#[async_trait]
|
||||
pub trait Aead: Send {
|
||||
/// The error type for the AEAD.
|
||||
type Error: std::error::Error + Send + Sync + 'static;
|
||||
|
||||
/// Sets the key for the AEAD.
|
||||
async fn set_key(&mut self, key: ValueRef, iv: ValueRef) -> Result<(), Self::Error>;
|
||||
|
||||
/// Decodes the key for the AEAD, revealing it to this party.
|
||||
async fn decode_key_private(&mut self) -> Result<(), Self::Error>;
|
||||
|
||||
/// Decodes the key for the AEAD, revealing it to the other party(s).
|
||||
async fn decode_key_blind(&mut self) -> Result<(), Self::Error>;
|
||||
|
||||
/// Sets the transcript id.
|
||||
///
|
||||
/// The AEAD assigns unique identifiers to each byte of plaintext
|
||||
/// during encryption and decryption.
|
||||
///
|
||||
/// For example, if the transcript id is set to `foo`, then the first byte
|
||||
/// will be assigned the id `foo/0`, the second byte `foo/1`, and so on.
|
||||
///
|
||||
/// Each transcript id has an independent counter.
|
||||
///
|
||||
/// # Note
|
||||
///
|
||||
/// The state of a transcript counter is preserved between calls to
|
||||
/// `set_transcript_id`.
|
||||
fn set_transcript_id(&mut self, id: &str);
|
||||
|
||||
/// Performs any necessary one-time setup for the AEAD.
|
||||
async fn setup(&mut self) -> Result<(), Self::Error>;
|
||||
|
||||
/// Preprocesses for the given number of bytes.
|
||||
async fn preprocess(&mut self, len: usize) -> Result<(), Self::Error>;
|
||||
|
||||
/// Starts the AEAD.
|
||||
///
|
||||
/// This method performs initialization for the AEAD after setting the key.
|
||||
async fn start(&mut self) -> Result<(), Self::Error>;
|
||||
|
||||
/// Encrypts a plaintext message, returning the ciphertext and tag.
|
||||
///
|
||||
/// The plaintext is provided by both parties.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `explicit_nonce` - The explicit nonce to use for encryption.
|
||||
/// * `plaintext` - The plaintext to encrypt.
|
||||
/// * `aad` - Additional authenticated data.
|
||||
async fn encrypt_public(
|
||||
&mut self,
|
||||
explicit_nonce: Vec<u8>,
|
||||
plaintext: Vec<u8>,
|
||||
aad: Vec<u8>,
|
||||
) -> Result<Vec<u8>, Self::Error>;
|
||||
|
||||
/// Encrypts a plaintext message, hiding it from the other party, returning
|
||||
/// the ciphertext and tag.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `explicit_nonce` - The explicit nonce to use for encryption.
|
||||
/// * `plaintext` - The plaintext to encrypt.
|
||||
/// * `aad` - Additional authenticated data.
|
||||
async fn encrypt_private(
|
||||
&mut self,
|
||||
explicit_nonce: Vec<u8>,
|
||||
plaintext: Vec<u8>,
|
||||
aad: Vec<u8>,
|
||||
) -> Result<Vec<u8>, Self::Error>;
|
||||
|
||||
/// Encrypts a plaintext message provided by the other party, returning
|
||||
/// the ciphertext and tag.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `explicit_nonce` - The explicit nonce to use for encryption.
|
||||
/// * `plaintext_len` - The length of the plaintext to encrypt.
|
||||
/// * `aad` - Additional authenticated data.
|
||||
async fn encrypt_blind(
|
||||
&mut self,
|
||||
explicit_nonce: Vec<u8>,
|
||||
plaintext_len: usize,
|
||||
aad: Vec<u8>,
|
||||
) -> Result<Vec<u8>, Self::Error>;
|
||||
|
||||
/// Decrypts a ciphertext message, returning the plaintext to both parties.
|
||||
///
|
||||
/// This method checks the authenticity of the ciphertext, tag and
|
||||
/// additional data.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `explicit_nonce` - The explicit nonce to use for decryption.
|
||||
/// * `payload` - The ciphertext and tag to authenticate and decrypt.
|
||||
/// * `aad` - Additional authenticated data.
|
||||
async fn decrypt_public(
|
||||
&mut self,
|
||||
explicit_nonce: Vec<u8>,
|
||||
payload: Vec<u8>,
|
||||
aad: Vec<u8>,
|
||||
) -> Result<Vec<u8>, Self::Error>;
|
||||
|
||||
/// Decrypts a ciphertext message, returning the plaintext only to this
|
||||
/// party.
|
||||
///
|
||||
/// This method checks the authenticity of the ciphertext, tag and
|
||||
/// additional data.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `explicit_nonce` - The explicit nonce to use for decryption.
|
||||
/// * `payload` - The ciphertext and tag to authenticate and decrypt.
|
||||
/// * `aad` - Additional authenticated data.
|
||||
async fn decrypt_private(
|
||||
&mut self,
|
||||
explicit_nonce: Vec<u8>,
|
||||
payload: Vec<u8>,
|
||||
aad: Vec<u8>,
|
||||
) -> Result<Vec<u8>, Self::Error>;
|
||||
|
||||
/// Decrypts a ciphertext message, returning the plaintext only to the other
|
||||
/// party.
|
||||
///
|
||||
/// This method checks the authenticity of the ciphertext, tag and
|
||||
/// additional data.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `explicit_nonce` - The explicit nonce to use for decryption.
|
||||
/// * `payload` - The ciphertext and tag to authenticate and decrypt.
|
||||
/// * `aad` - Additional authenticated data.
|
||||
async fn decrypt_blind(
|
||||
&mut self,
|
||||
explicit_nonce: Vec<u8>,
|
||||
payload: Vec<u8>,
|
||||
aad: Vec<u8>,
|
||||
) -> Result<(), Self::Error>;
|
||||
|
||||
/// Verifies the tag of a ciphertext message.
|
||||
///
|
||||
/// This method checks the authenticity of the ciphertext, tag and
|
||||
/// additional data.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `explicit_nonce` - The explicit nonce to use for decryption.
|
||||
/// * `payload` - The ciphertext and tag to authenticate and decrypt.
|
||||
/// * `aad` - Additional authenticated data.
|
||||
async fn verify_tag(
|
||||
&mut self,
|
||||
explicit_nonce: Vec<u8>,
|
||||
payload: Vec<u8>,
|
||||
aad: Vec<u8>,
|
||||
) -> Result<(), Self::Error>;
|
||||
|
||||
/// Locally decrypts the provided ciphertext and then proves in ZK to the
|
||||
/// other party(s) that the plaintext is correct.
|
||||
///
|
||||
/// Returns the plaintext.
|
||||
///
|
||||
/// This method requires this party to know the encryption key, which can be
|
||||
/// achieved by calling the `decode_key_private` method.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `explicit_nonce` - The explicit nonce to use for the keystream.
|
||||
/// * `payload` - The ciphertext and tag to decrypt and prove.
|
||||
/// * `aad` - Additional authenticated data.
|
||||
async fn prove_plaintext(
|
||||
&mut self,
|
||||
explicit_nonce: Vec<u8>,
|
||||
payload: Vec<u8>,
|
||||
aad: Vec<u8>,
|
||||
) -> Result<Vec<u8>, Self::Error>;
|
||||
|
||||
/// Locally decrypts the provided ciphertext and then proves in ZK to the
|
||||
/// other party(s) that the plaintext is correct.
|
||||
///
|
||||
/// Returns the plaintext.
|
||||
///
|
||||
/// This method requires this party to know the encryption key, which can be
|
||||
/// achieved by calling the `decode_key_private` method.
|
||||
///
|
||||
/// # WARNING
|
||||
///
|
||||
/// This method does not verify the tag of the ciphertext. Only use this if
|
||||
/// you know what you're doing.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `explicit_nonce` - The explicit nonce to use for the keystream.
|
||||
/// * `ciphertext` - The ciphertext to decrypt and prove.
|
||||
async fn prove_plaintext_no_tag(
|
||||
&mut self,
|
||||
explicit_nonce: Vec<u8>,
|
||||
ciphertext: Vec<u8>,
|
||||
) -> Result<Vec<u8>, Self::Error>;
|
||||
|
||||
/// Verifies the other party(s) can prove they know a plaintext which
|
||||
/// encrypts to the given ciphertext.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `explicit_nonce` - The explicit nonce to use for the keystream.
|
||||
/// * `payload` - The ciphertext and tag to verify.
|
||||
/// * `aad` - Additional authenticated data.
|
||||
async fn verify_plaintext(
|
||||
&mut self,
|
||||
explicit_nonce: Vec<u8>,
|
||||
payload: Vec<u8>,
|
||||
aad: Vec<u8>,
|
||||
) -> Result<(), Self::Error>;
|
||||
|
||||
/// Verifies the other party(s) can prove they know a plaintext which
|
||||
/// encrypts to the given ciphertext.
|
||||
///
|
||||
/// # WARNING
|
||||
///
|
||||
/// This method does not verify the tag of the ciphertext. Only use this if
|
||||
/// you know what you're doing.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `explicit_nonce` - The explicit nonce to use for the keystream.
|
||||
/// * `ciphertext` - The ciphertext to verify.
|
||||
async fn verify_plaintext_no_tag(
|
||||
&mut self,
|
||||
explicit_nonce: Vec<u8>,
|
||||
ciphertext: Vec<u8>,
|
||||
) -> Result<(), Self::Error>;
|
||||
}
|
||||
@@ -1,30 +0,0 @@
|
||||
[package]
|
||||
name = "tlsn-block-cipher"
|
||||
authors = ["TLSNotary Team"]
|
||||
description = "2PC block cipher implementation"
|
||||
keywords = ["tls", "mpc", "2pc", "block-cipher"]
|
||||
categories = ["cryptography"]
|
||||
license = "MIT OR Apache-2.0"
|
||||
version = "0.1.0-alpha.7"
|
||||
edition = "2021"
|
||||
|
||||
[lib]
|
||||
name = "block_cipher"
|
||||
|
||||
[features]
|
||||
default = ["mock"]
|
||||
mock = []
|
||||
|
||||
[dependencies]
|
||||
mpz-circuits = { workspace = true }
|
||||
mpz-garble = { workspace = true }
|
||||
tlsn-utils = { workspace = true }
|
||||
async-trait = { workspace = true }
|
||||
thiserror = { workspace = true }
|
||||
derive_builder = { workspace = true }
|
||||
tracing = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
aes = { workspace = true }
|
||||
cipher = { workspace = true }
|
||||
tokio = { workspace = true, features = ["macros", "rt", "rt-multi-thread"] }
|
||||
@@ -1,277 +0,0 @@
|
||||
use std::{collections::VecDeque, marker::PhantomData};
|
||||
|
||||
use async_trait::async_trait;
|
||||
|
||||
use mpz_garble::{value::ValueRef, Decode, DecodePrivate, Execute, Load, Memory};
|
||||
use tracing::instrument;
|
||||
use utils::id::NestedId;
|
||||
|
||||
use crate::{BlockCipher, BlockCipherCircuit, BlockCipherConfig, BlockCipherError, Visibility};
|
||||
|
||||
#[derive(Debug)]
|
||||
struct State {
|
||||
private_execution_id: NestedId,
|
||||
public_execution_id: NestedId,
|
||||
preprocessed_private: VecDeque<BlockVars>,
|
||||
preprocessed_public: VecDeque<BlockVars>,
|
||||
key: Option<ValueRef>,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct BlockVars {
|
||||
msg: ValueRef,
|
||||
ciphertext: ValueRef,
|
||||
}
|
||||
|
||||
/// An MPC block cipher.
|
||||
#[derive(Debug)]
|
||||
pub struct MpcBlockCipher<C, E>
|
||||
where
|
||||
C: BlockCipherCircuit,
|
||||
E: Memory + Execute + Decode + DecodePrivate + Send + Sync,
|
||||
{
|
||||
state: State,
|
||||
|
||||
executor: E,
|
||||
|
||||
_cipher: PhantomData<C>,
|
||||
}
|
||||
|
||||
impl<C, E> MpcBlockCipher<C, E>
|
||||
where
|
||||
C: BlockCipherCircuit,
|
||||
E: Memory + Execute + Decode + DecodePrivate + Send + Sync,
|
||||
{
|
||||
/// Creates a new MPC block cipher.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `config` - The configuration for the block cipher.
|
||||
/// * `executor` - The executor to use for the MPC.
|
||||
pub fn new(config: BlockCipherConfig, executor: E) -> Self {
|
||||
let private_execution_id = NestedId::new(&config.id)
|
||||
.append_string("private")
|
||||
.append_counter();
|
||||
let public_execution_id = NestedId::new(&config.id)
|
||||
.append_string("public")
|
||||
.append_counter();
|
||||
Self {
|
||||
state: State {
|
||||
private_execution_id,
|
||||
public_execution_id,
|
||||
preprocessed_private: VecDeque::new(),
|
||||
preprocessed_public: VecDeque::new(),
|
||||
key: None,
|
||||
},
|
||||
executor,
|
||||
_cipher: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
fn define_block(&mut self, vis: Visibility) -> BlockVars {
|
||||
let (id, msg) = match vis {
|
||||
Visibility::Private => {
|
||||
let id = self
|
||||
.state
|
||||
.private_execution_id
|
||||
.increment_in_place()
|
||||
.to_string();
|
||||
let msg = self
|
||||
.executor
|
||||
.new_private_input::<C::BLOCK>(&format!("{}/msg", &id))
|
||||
.expect("message is not defined");
|
||||
(id, msg)
|
||||
}
|
||||
Visibility::Blind => {
|
||||
let id = self
|
||||
.state
|
||||
.private_execution_id
|
||||
.increment_in_place()
|
||||
.to_string();
|
||||
let msg = self
|
||||
.executor
|
||||
.new_blind_input::<C::BLOCK>(&format!("{}/msg", &id))
|
||||
.expect("message is not defined");
|
||||
(id, msg)
|
||||
}
|
||||
Visibility::Public => {
|
||||
let id = self
|
||||
.state
|
||||
.public_execution_id
|
||||
.increment_in_place()
|
||||
.to_string();
|
||||
let msg = self
|
||||
.executor
|
||||
.new_public_input::<C::BLOCK>(&format!("{}/msg", &id))
|
||||
.expect("message is not defined");
|
||||
(id, msg)
|
||||
}
|
||||
};
|
||||
|
||||
let ciphertext = self
|
||||
.executor
|
||||
.new_output::<C::BLOCK>(&format!("{}/ciphertext", &id))
|
||||
.expect("message is not defined");
|
||||
|
||||
BlockVars { msg, ciphertext }
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<C, E> BlockCipher<C> for MpcBlockCipher<C, E>
|
||||
where
|
||||
C: BlockCipherCircuit,
|
||||
E: Memory + Load + Execute + Decode + DecodePrivate + Send + Sync + Send,
|
||||
{
|
||||
#[instrument(level = "trace", skip_all)]
|
||||
fn set_key(&mut self, key: ValueRef) {
|
||||
self.state.key = Some(key);
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip_all, err)]
|
||||
async fn preprocess(
|
||||
&mut self,
|
||||
visibility: Visibility,
|
||||
count: usize,
|
||||
) -> Result<(), BlockCipherError> {
|
||||
let key = self
|
||||
.state
|
||||
.key
|
||||
.clone()
|
||||
.ok_or_else(BlockCipherError::key_not_set)?;
|
||||
|
||||
for _ in 0..count {
|
||||
let vars = self.define_block(visibility);
|
||||
|
||||
self.executor
|
||||
.load(
|
||||
C::circuit(),
|
||||
&[key.clone(), vars.msg.clone()],
|
||||
&[vars.ciphertext.clone()],
|
||||
)
|
||||
.await?;
|
||||
|
||||
match visibility {
|
||||
Visibility::Private | Visibility::Blind => {
|
||||
self.state.preprocessed_private.push_back(vars)
|
||||
}
|
||||
Visibility::Public => self.state.preprocessed_public.push_back(vars),
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip_all, err)]
|
||||
async fn encrypt_private(&mut self, plaintext: Vec<u8>) -> Result<Vec<u8>, BlockCipherError> {
|
||||
let len = plaintext.len();
|
||||
let block: C::BLOCK = plaintext
|
||||
.try_into()
|
||||
.map_err(|_| BlockCipherError::invalid_message_length::<C>(len))?;
|
||||
|
||||
let key = self
|
||||
.state
|
||||
.key
|
||||
.clone()
|
||||
.ok_or_else(BlockCipherError::key_not_set)?;
|
||||
|
||||
let BlockVars { msg, ciphertext } =
|
||||
if let Some(vars) = self.state.preprocessed_private.pop_front() {
|
||||
vars
|
||||
} else {
|
||||
self.define_block(Visibility::Private)
|
||||
};
|
||||
|
||||
self.executor.assign(&msg, block)?;
|
||||
|
||||
self.executor
|
||||
.execute(C::circuit(), &[key, msg], &[ciphertext.clone()])
|
||||
.await?;
|
||||
|
||||
let mut outputs = self.executor.decode(&[ciphertext]).await?;
|
||||
|
||||
let ciphertext: C::BLOCK = if let Ok(ciphertext) = outputs
|
||||
.pop()
|
||||
.expect("ciphertext should be present")
|
||||
.try_into()
|
||||
{
|
||||
ciphertext
|
||||
} else {
|
||||
panic!("ciphertext should be a block")
|
||||
};
|
||||
|
||||
Ok(ciphertext.into())
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip_all, err)]
|
||||
async fn encrypt_blind(&mut self) -> Result<Vec<u8>, BlockCipherError> {
|
||||
let key = self
|
||||
.state
|
||||
.key
|
||||
.clone()
|
||||
.ok_or_else(BlockCipherError::key_not_set)?;
|
||||
|
||||
let BlockVars { msg, ciphertext } =
|
||||
if let Some(vars) = self.state.preprocessed_private.pop_front() {
|
||||
vars
|
||||
} else {
|
||||
self.define_block(Visibility::Blind)
|
||||
};
|
||||
|
||||
self.executor
|
||||
.execute(C::circuit(), &[key, msg], &[ciphertext.clone()])
|
||||
.await?;
|
||||
|
||||
let mut outputs = self.executor.decode(&[ciphertext]).await?;
|
||||
|
||||
let ciphertext: C::BLOCK = if let Ok(ciphertext) = outputs
|
||||
.pop()
|
||||
.expect("ciphertext should be present")
|
||||
.try_into()
|
||||
{
|
||||
ciphertext
|
||||
} else {
|
||||
panic!("ciphertext should be a block")
|
||||
};
|
||||
|
||||
Ok(ciphertext.into())
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip_all, err)]
|
||||
async fn encrypt_share(&mut self, plaintext: Vec<u8>) -> Result<Vec<u8>, BlockCipherError> {
|
||||
let len = plaintext.len();
|
||||
let block: C::BLOCK = plaintext
|
||||
.try_into()
|
||||
.map_err(|_| BlockCipherError::invalid_message_length::<C>(len))?;
|
||||
|
||||
let key = self
|
||||
.state
|
||||
.key
|
||||
.clone()
|
||||
.ok_or_else(BlockCipherError::key_not_set)?;
|
||||
|
||||
let BlockVars { msg, ciphertext } =
|
||||
if let Some(vars) = self.state.preprocessed_public.pop_front() {
|
||||
vars
|
||||
} else {
|
||||
self.define_block(Visibility::Public)
|
||||
};
|
||||
|
||||
self.executor.assign(&msg, block)?;
|
||||
|
||||
self.executor
|
||||
.execute(C::circuit(), &[key, msg], &[ciphertext.clone()])
|
||||
.await?;
|
||||
|
||||
let mut outputs = self.executor.decode_shared(&[ciphertext]).await?;
|
||||
|
||||
let share: C::BLOCK =
|
||||
if let Ok(share) = outputs.pop().expect("share should be present").try_into() {
|
||||
share
|
||||
} else {
|
||||
panic!("share should be a block")
|
||||
};
|
||||
|
||||
Ok(share.into())
|
||||
}
|
||||
}
|
||||
@@ -1,39 +0,0 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use mpz_circuits::{
|
||||
circuits::AES128,
|
||||
types::{StaticValueType, Value},
|
||||
Circuit,
|
||||
};
|
||||
|
||||
/// A block cipher circuit.
|
||||
pub trait BlockCipherCircuit: Default + Clone + Send + Sync {
|
||||
/// The key type.
|
||||
type KEY: StaticValueType + Send + Sync;
|
||||
/// The block type.
|
||||
type BLOCK: StaticValueType + TryFrom<Vec<u8>> + TryFrom<Value> + Into<Vec<u8>> + Send + Sync;
|
||||
|
||||
/// The length of the key.
|
||||
const KEY_LEN: usize;
|
||||
/// The length of the block.
|
||||
const BLOCK_LEN: usize;
|
||||
|
||||
/// Returns the circuit of the cipher.
|
||||
fn circuit() -> Arc<Circuit>;
|
||||
}
|
||||
|
||||
/// Aes128 block cipher circuit.
|
||||
#[derive(Default, Debug, Clone)]
|
||||
pub struct Aes128;
|
||||
|
||||
impl BlockCipherCircuit for Aes128 {
|
||||
type KEY = [u8; 16];
|
||||
type BLOCK = [u8; 16];
|
||||
|
||||
const KEY_LEN: usize = 16;
|
||||
const BLOCK_LEN: usize = 16;
|
||||
|
||||
fn circuit() -> Arc<Circuit> {
|
||||
AES128.clone()
|
||||
}
|
||||
}
|
||||
@@ -1,16 +0,0 @@
|
||||
use derive_builder::Builder;
|
||||
|
||||
/// Configuration for a block cipher.
|
||||
#[derive(Debug, Clone, Builder)]
|
||||
pub struct BlockCipherConfig {
|
||||
/// The ID of the block cipher.
|
||||
#[builder(setter(into))]
|
||||
pub(crate) id: String,
|
||||
}
|
||||
|
||||
impl BlockCipherConfig {
|
||||
/// Creates a new builder for the block cipher configuration.
|
||||
pub fn builder() -> BlockCipherConfigBuilder {
|
||||
BlockCipherConfigBuilder::default()
|
||||
}
|
||||
}
|
||||
@@ -1,92 +0,0 @@
|
||||
use core::fmt;
|
||||
use std::error::Error;
|
||||
|
||||
use crate::BlockCipherCircuit;
|
||||
|
||||
/// A block cipher error.
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub struct BlockCipherError {
|
||||
kind: ErrorKind,
|
||||
#[source]
|
||||
source: Option<Box<dyn Error + Send + Sync>>,
|
||||
}
|
||||
|
||||
impl BlockCipherError {
|
||||
pub(crate) fn new<E>(kind: ErrorKind, source: E) -> Self
|
||||
where
|
||||
E: Into<Box<dyn Error + Send + Sync>>,
|
||||
{
|
||||
Self {
|
||||
kind,
|
||||
source: Some(source.into()),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn key_not_set() -> Self {
|
||||
Self {
|
||||
kind: ErrorKind::Key,
|
||||
source: Some("key not set".into()),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn invalid_message_length<C: BlockCipherCircuit>(len: usize) -> Self {
|
||||
Self {
|
||||
kind: ErrorKind::Msg,
|
||||
source: Some(
|
||||
format!(
|
||||
"message length does not equal block length: {} != {}",
|
||||
len,
|
||||
C::BLOCK_LEN
|
||||
)
|
||||
.into(),
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) enum ErrorKind {
|
||||
Vm,
|
||||
Key,
|
||||
Msg,
|
||||
}
|
||||
|
||||
impl fmt::Display for BlockCipherError {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self.kind {
|
||||
ErrorKind::Vm => write!(f, "vm error")?,
|
||||
ErrorKind::Key => write!(f, "key error")?,
|
||||
ErrorKind::Msg => write!(f, "message error")?,
|
||||
}
|
||||
|
||||
if let Some(ref source) = self.source {
|
||||
write!(f, " caused by: {}", source)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<mpz_garble::MemoryError> for BlockCipherError {
|
||||
fn from(error: mpz_garble::MemoryError) -> Self {
|
||||
Self::new(ErrorKind::Vm, error)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<mpz_garble::LoadError> for BlockCipherError {
|
||||
fn from(error: mpz_garble::LoadError) -> Self {
|
||||
Self::new(ErrorKind::Vm, error)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<mpz_garble::ExecutionError> for BlockCipherError {
|
||||
fn from(error: mpz_garble::ExecutionError) -> Self {
|
||||
Self::new(ErrorKind::Vm, error)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<mpz_garble::DecodeError> for BlockCipherError {
|
||||
fn from(error: mpz_garble::DecodeError) -> Self {
|
||||
Self::new(ErrorKind::Vm, error)
|
||||
}
|
||||
}
|
||||
@@ -1,236 +0,0 @@
|
||||
//! This crate provides a 2PC block cipher implementation.
|
||||
//!
|
||||
//! Both parties work together to encrypt or share an encrypted block using a
|
||||
//! shared key.
|
||||
|
||||
#![deny(missing_docs, unreachable_pub, unused_must_use)]
|
||||
#![deny(clippy::all)]
|
||||
#![deny(unsafe_code)]
|
||||
|
||||
mod cipher;
|
||||
mod circuit;
|
||||
mod config;
|
||||
mod error;
|
||||
|
||||
use async_trait::async_trait;
|
||||
|
||||
use mpz_garble::value::ValueRef;
|
||||
|
||||
pub use crate::{
|
||||
cipher::MpcBlockCipher,
|
||||
circuit::{Aes128, BlockCipherCircuit},
|
||||
};
|
||||
pub use config::{BlockCipherConfig, BlockCipherConfigBuilder, BlockCipherConfigBuilderError};
|
||||
pub use error::BlockCipherError;
|
||||
|
||||
/// Visibility of a message plaintext.
|
||||
#[derive(Debug, Clone, Copy, PartialEq)]
|
||||
pub enum Visibility {
|
||||
/// Private message.
|
||||
Private,
|
||||
/// Blind message.
|
||||
Blind,
|
||||
/// Public message.
|
||||
Public,
|
||||
}
|
||||
|
||||
/// A trait for MPC block ciphers.
|
||||
#[async_trait]
|
||||
pub trait BlockCipher<Cipher>: Send + Sync
|
||||
where
|
||||
Cipher: BlockCipherCircuit,
|
||||
{
|
||||
/// Sets the key for the block cipher.
|
||||
fn set_key(&mut self, key: ValueRef);
|
||||
|
||||
/// Preprocesses `count` blocks.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `visibility` - The visibility of the plaintext.
|
||||
/// * `count` - The number of blocks to preprocess.
|
||||
async fn preprocess(
|
||||
&mut self,
|
||||
visibility: Visibility,
|
||||
count: usize,
|
||||
) -> Result<(), BlockCipherError>;
|
||||
|
||||
/// Encrypts the given plaintext keeping it hidden from the other party(s).
|
||||
///
|
||||
/// Returns the ciphertext.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `plaintext` - The plaintext to encrypt.
|
||||
async fn encrypt_private(&mut self, plaintext: Vec<u8>) -> Result<Vec<u8>, BlockCipherError>;
|
||||
|
||||
/// Encrypts a plaintext provided by the other party(s).
|
||||
///
|
||||
/// Returns the ciphertext.
|
||||
async fn encrypt_blind(&mut self) -> Result<Vec<u8>, BlockCipherError>;
|
||||
|
||||
/// Encrypts a plaintext provided by both parties. Fails if the
|
||||
/// plaintext provided by both parties does not match.
|
||||
///
|
||||
/// Returns an additive share of the ciphertext.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `plaintext` - The plaintext to encrypt.
|
||||
async fn encrypt_share(&mut self, plaintext: Vec<u8>) -> Result<Vec<u8>, BlockCipherError>;
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
use mpz_garble::{protocol::deap::mock::create_mock_deap_vm, Memory};
|
||||
|
||||
use crate::circuit::Aes128;
|
||||
|
||||
use ::aes::Aes128 as TestAes128;
|
||||
use ::cipher::{BlockEncrypt, KeyInit};
|
||||
|
||||
fn aes128(key: [u8; 16], msg: [u8; 16]) -> [u8; 16] {
|
||||
let mut msg = msg.into();
|
||||
let cipher = TestAes128::new(&key.into());
|
||||
cipher.encrypt_block(&mut msg);
|
||||
msg.into()
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[ignore = "expensive"]
|
||||
async fn test_block_cipher_blind() {
|
||||
let leader_config = BlockCipherConfig::builder().id("test").build().unwrap();
|
||||
let follower_config = BlockCipherConfig::builder().id("test").build().unwrap();
|
||||
|
||||
let key = [0u8; 16];
|
||||
|
||||
let (leader_vm, follower_vm) = create_mock_deap_vm();
|
||||
|
||||
// Key is public just for this test, typically it is private.
|
||||
let leader_key = leader_vm.new_public_input::<[u8; 16]>("key").unwrap();
|
||||
let follower_key = follower_vm.new_public_input::<[u8; 16]>("key").unwrap();
|
||||
|
||||
leader_vm.assign(&leader_key, key).unwrap();
|
||||
follower_vm.assign(&follower_key, key).unwrap();
|
||||
|
||||
let mut leader = MpcBlockCipher::<Aes128, _>::new(leader_config, leader_vm);
|
||||
leader.set_key(leader_key);
|
||||
|
||||
let mut follower = MpcBlockCipher::<Aes128, _>::new(follower_config, follower_vm);
|
||||
follower.set_key(follower_key);
|
||||
|
||||
let plaintext = [0u8; 16];
|
||||
|
||||
let (leader_ciphertext, follower_ciphertext) = tokio::try_join!(
|
||||
leader.encrypt_private(plaintext.to_vec()),
|
||||
follower.encrypt_blind()
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let expected = aes128(key, plaintext);
|
||||
|
||||
assert_eq!(leader_ciphertext, expected.to_vec());
|
||||
assert_eq!(leader_ciphertext, follower_ciphertext);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[ignore = "expensive"]
|
||||
async fn test_block_cipher_share() {
|
||||
let leader_config = BlockCipherConfig::builder().id("test").build().unwrap();
|
||||
let follower_config = BlockCipherConfig::builder().id("test").build().unwrap();
|
||||
|
||||
let key = [0u8; 16];
|
||||
|
||||
let (leader_vm, follower_vm) = create_mock_deap_vm();
|
||||
|
||||
// Key is public just for this test, typically it is private.
|
||||
let leader_key = leader_vm.new_public_input::<[u8; 16]>("key").unwrap();
|
||||
let follower_key = follower_vm.new_public_input::<[u8; 16]>("key").unwrap();
|
||||
|
||||
leader_vm.assign(&leader_key, key).unwrap();
|
||||
follower_vm.assign(&follower_key, key).unwrap();
|
||||
|
||||
let mut leader = MpcBlockCipher::<Aes128, _>::new(leader_config, leader_vm);
|
||||
leader.set_key(leader_key);
|
||||
|
||||
let mut follower = MpcBlockCipher::<Aes128, _>::new(follower_config, follower_vm);
|
||||
follower.set_key(follower_key);
|
||||
|
||||
let plaintext = [0u8; 16];
|
||||
|
||||
let (leader_share, follower_share) = tokio::try_join!(
|
||||
leader.encrypt_share(plaintext.to_vec()),
|
||||
follower.encrypt_share(plaintext.to_vec())
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let expected = aes128(key, plaintext);
|
||||
|
||||
let result: [u8; 16] = std::array::from_fn(|i| leader_share[i] ^ follower_share[i]);
|
||||
|
||||
assert_eq!(result, expected);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[ignore = "expensive"]
|
||||
async fn test_block_cipher_preprocess() {
|
||||
let leader_config = BlockCipherConfig::builder().id("test").build().unwrap();
|
||||
let follower_config = BlockCipherConfig::builder().id("test").build().unwrap();
|
||||
|
||||
let key = [0u8; 16];
|
||||
|
||||
let (leader_vm, follower_vm) = create_mock_deap_vm();
|
||||
|
||||
// Key is public just for this test, typically it is private.
|
||||
let leader_key = leader_vm.new_public_input::<[u8; 16]>("key").unwrap();
|
||||
let follower_key = follower_vm.new_public_input::<[u8; 16]>("key").unwrap();
|
||||
|
||||
leader_vm.assign(&leader_key, key).unwrap();
|
||||
follower_vm.assign(&follower_key, key).unwrap();
|
||||
|
||||
let mut leader = MpcBlockCipher::<Aes128, _>::new(leader_config, leader_vm);
|
||||
leader.set_key(leader_key);
|
||||
|
||||
let mut follower = MpcBlockCipher::<Aes128, _>::new(follower_config, follower_vm);
|
||||
follower.set_key(follower_key);
|
||||
|
||||
let plaintext = [0u8; 16];
|
||||
|
||||
tokio::try_join!(
|
||||
leader.preprocess(Visibility::Private, 1),
|
||||
follower.preprocess(Visibility::Blind, 1)
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let (leader_ciphertext, follower_ciphertext) = tokio::try_join!(
|
||||
leader.encrypt_private(plaintext.to_vec()),
|
||||
follower.encrypt_blind()
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let expected = aes128(key, plaintext);
|
||||
|
||||
assert_eq!(leader_ciphertext, expected.to_vec());
|
||||
assert_eq!(leader_ciphertext, follower_ciphertext);
|
||||
|
||||
tokio::try_join!(
|
||||
leader.preprocess(Visibility::Public, 1),
|
||||
follower.preprocess(Visibility::Public, 1)
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let (leader_share, follower_share) = tokio::try_join!(
|
||||
leader.encrypt_share(plaintext.to_vec()),
|
||||
follower.encrypt_share(plaintext.to_vec())
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let expected = aes128(key, plaintext);
|
||||
|
||||
let result: [u8; 16] = std::array::from_fn(|i| leader_share[i] ^ follower_share[i]);
|
||||
|
||||
assert_eq!(result, expected);
|
||||
}
|
||||
}
|
||||
34
crates/components/cipher/Cargo.toml
Normal file
34
crates/components/cipher/Cargo.toml
Normal file
@@ -0,0 +1,34 @@
|
||||
[package]
|
||||
name = "tlsn-cipher"
|
||||
authors = ["TLSNotary Team"]
|
||||
description = "This crate provides implementations of ciphers for two parties"
|
||||
keywords = ["tls", "mpc", "2pc", "aes"]
|
||||
categories = ["cryptography"]
|
||||
license = "MIT OR Apache-2.0"
|
||||
version = "0.1.0-alpha.13-pre"
|
||||
edition = "2021"
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[lib]
|
||||
name = "cipher"
|
||||
|
||||
[dependencies]
|
||||
mpz-circuits = { workspace = true }
|
||||
mpz-vm-core = { workspace = true }
|
||||
mpz-memory-core = { workspace = true }
|
||||
|
||||
async-trait = { workspace = true }
|
||||
thiserror = { workspace = true }
|
||||
aes = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
mpz-garble = { workspace = true }
|
||||
mpz-common = { workspace = true }
|
||||
mpz-ot = { workspace = true }
|
||||
|
||||
tokio = { version = "1", features = ["macros", "rt", "rt-multi-thread"] }
|
||||
rand = { workspace = true }
|
||||
ctr = { workspace = true }
|
||||
cipher = { workspace = true }
|
||||
44
crates/components/cipher/src/aes/error.rs
Normal file
44
crates/components/cipher/src/aes/error.rs
Normal file
@@ -0,0 +1,44 @@
|
||||
use std::fmt::Display;
|
||||
|
||||
/// AES error.
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub struct AesError {
|
||||
kind: ErrorKind,
|
||||
#[source]
|
||||
source: Option<Box<dyn std::error::Error + Send + Sync>>,
|
||||
}
|
||||
|
||||
impl AesError {
|
||||
pub(crate) fn new<E>(kind: ErrorKind, source: E) -> Self
|
||||
where
|
||||
E: Into<Box<dyn std::error::Error + Send + Sync>>,
|
||||
{
|
||||
Self {
|
||||
kind,
|
||||
source: Some(source.into()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq)]
|
||||
pub(crate) enum ErrorKind {
|
||||
Vm,
|
||||
Key,
|
||||
Iv,
|
||||
}
|
||||
|
||||
impl Display for AesError {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self.kind {
|
||||
ErrorKind::Vm => write!(f, "vm error")?,
|
||||
ErrorKind::Key => write!(f, "key error")?,
|
||||
ErrorKind::Iv => write!(f, "iv error")?,
|
||||
}
|
||||
|
||||
if let Some(source) = &self.source {
|
||||
write!(f, " caused by: {source}")?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
375
crates/components/cipher/src/aes/mod.rs
Normal file
375
crates/components/cipher/src/aes/mod.rs
Normal file
@@ -0,0 +1,375 @@
|
||||
//! The AES-128 block cipher.
|
||||
|
||||
use crate::{Cipher, CtrBlock, Keystream};
|
||||
use async_trait::async_trait;
|
||||
use mpz_circuits::circuits::AES128;
|
||||
use mpz_memory_core::binary::{Binary, U8};
|
||||
use mpz_vm_core::{prelude::*, Call, Vm};
|
||||
use std::fmt::Debug;
|
||||
|
||||
mod error;
|
||||
|
||||
pub use error::AesError;
|
||||
use error::ErrorKind;
|
||||
|
||||
/// Computes AES-128.
|
||||
#[derive(Default, Debug)]
|
||||
pub struct Aes128 {
|
||||
key: Option<Array<U8, 16>>,
|
||||
iv: Option<Array<U8, 4>>,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Cipher for Aes128 {
|
||||
type Error = AesError;
|
||||
type Key = Array<U8, 16>;
|
||||
type Iv = Array<U8, 4>;
|
||||
type Nonce = Array<U8, 8>;
|
||||
type Counter = Array<U8, 4>;
|
||||
type Block = Array<U8, 16>;
|
||||
|
||||
fn set_key(&mut self, key: Array<U8, 16>) {
|
||||
self.key = Some(key);
|
||||
}
|
||||
|
||||
fn set_iv(&mut self, iv: Array<U8, 4>) {
|
||||
self.iv = Some(iv);
|
||||
}
|
||||
|
||||
fn key(&self) -> Option<&Array<U8, 16>> {
|
||||
self.key.as_ref()
|
||||
}
|
||||
|
||||
fn iv(&self) -> Option<&Array<U8, 4>> {
|
||||
self.iv.as_ref()
|
||||
}
|
||||
|
||||
fn alloc_block(
|
||||
&self,
|
||||
vm: &mut dyn Vm<Binary>,
|
||||
input: Array<U8, 16>,
|
||||
) -> Result<Self::Block, Self::Error> {
|
||||
let key = self
|
||||
.key
|
||||
.ok_or_else(|| AesError::new(ErrorKind::Key, "key not set"))?;
|
||||
|
||||
let output = vm
|
||||
.call(
|
||||
Call::builder(AES128.clone())
|
||||
.arg(key)
|
||||
.arg(input)
|
||||
.build()
|
||||
.expect("call should be valid"),
|
||||
)
|
||||
.map_err(|err| AesError::new(ErrorKind::Vm, err))?;
|
||||
|
||||
Ok(output)
|
||||
}
|
||||
|
||||
fn alloc_ctr_block(
|
||||
&self,
|
||||
vm: &mut dyn Vm<Binary>,
|
||||
) -> Result<CtrBlock<Self::Nonce, Self::Counter, Self::Block>, Self::Error> {
|
||||
let key = self
|
||||
.key
|
||||
.ok_or_else(|| AesError::new(ErrorKind::Key, "key not set"))?;
|
||||
let iv = self
|
||||
.iv
|
||||
.ok_or_else(|| AesError::new(ErrorKind::Iv, "iv not set"))?;
|
||||
|
||||
let explicit_nonce: Array<U8, 8> = vm
|
||||
.alloc()
|
||||
.map_err(|err| AesError::new(ErrorKind::Vm, err))?;
|
||||
vm.mark_public(explicit_nonce)
|
||||
.map_err(|err| AesError::new(ErrorKind::Vm, err))?;
|
||||
|
||||
let counter: Array<U8, 4> = vm
|
||||
.alloc()
|
||||
.map_err(|err| AesError::new(ErrorKind::Vm, err))?;
|
||||
vm.mark_public(counter)
|
||||
.map_err(|err| AesError::new(ErrorKind::Vm, err))?;
|
||||
|
||||
let output = vm
|
||||
.call(
|
||||
Call::builder(AES128.clone())
|
||||
.arg(key)
|
||||
.arg(iv)
|
||||
.arg(explicit_nonce)
|
||||
.arg(counter)
|
||||
.build()
|
||||
.expect("call should be valid"),
|
||||
)
|
||||
.map_err(|err| AesError::new(ErrorKind::Vm, err))?;
|
||||
|
||||
Ok(CtrBlock {
|
||||
explicit_nonce,
|
||||
counter,
|
||||
output,
|
||||
})
|
||||
}
|
||||
|
||||
fn alloc_keystream(
|
||||
&self,
|
||||
vm: &mut dyn Vm<Binary>,
|
||||
len: usize,
|
||||
) -> Result<Keystream<Self::Nonce, Self::Counter, Self::Block>, Self::Error> {
|
||||
let key = self
|
||||
.key
|
||||
.ok_or_else(|| AesError::new(ErrorKind::Key, "key not set"))?;
|
||||
let iv = self
|
||||
.iv
|
||||
.ok_or_else(|| AesError::new(ErrorKind::Iv, "iv not set"))?;
|
||||
|
||||
let block_count = len.div_ceil(16);
|
||||
|
||||
let inputs = (0..block_count)
|
||||
.map(|_| {
|
||||
let explicit_nonce: Array<U8, 8> = vm
|
||||
.alloc()
|
||||
.map_err(|err| AesError::new(ErrorKind::Vm, err))?;
|
||||
let counter: Array<U8, 4> = vm
|
||||
.alloc()
|
||||
.map_err(|err| AesError::new(ErrorKind::Vm, err))?;
|
||||
|
||||
vm.mark_public(explicit_nonce)
|
||||
.map_err(|err| AesError::new(ErrorKind::Vm, err))?;
|
||||
vm.mark_public(counter)
|
||||
.map_err(|err| AesError::new(ErrorKind::Vm, err))?;
|
||||
|
||||
Ok((explicit_nonce, counter))
|
||||
})
|
||||
.collect::<Result<Vec<_>, AesError>>()?;
|
||||
|
||||
let blocks = inputs
|
||||
.into_iter()
|
||||
.map(|(explicit_nonce, counter)| {
|
||||
let output = vm
|
||||
.call(
|
||||
Call::builder(AES128.clone())
|
||||
.arg(key)
|
||||
.arg(iv)
|
||||
.arg(explicit_nonce)
|
||||
.arg(counter)
|
||||
.build()
|
||||
.expect("call should be valid"),
|
||||
)
|
||||
.map_err(|err| AesError::new(ErrorKind::Vm, err))?;
|
||||
|
||||
Ok(CtrBlock {
|
||||
explicit_nonce,
|
||||
counter,
|
||||
output,
|
||||
})
|
||||
})
|
||||
.collect::<Result<Vec<_>, AesError>>()?;
|
||||
|
||||
Ok(Keystream::new(&blocks))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::Cipher;
|
||||
use mpz_common::context::test_st_context;
|
||||
use mpz_garble::protocol::semihonest::{Evaluator, Garbler};
|
||||
use mpz_memory_core::{
|
||||
binary::{Binary, U8},
|
||||
correlated::Delta,
|
||||
Array, MemoryExt, Vector, ViewExt,
|
||||
};
|
||||
use mpz_ot::ideal::cot::ideal_cot;
|
||||
use mpz_vm_core::{Execute, Vm};
|
||||
use rand::{rngs::StdRng, SeedableRng};
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_aes_ctr() {
|
||||
let key = [42_u8; 16];
|
||||
let iv = [3_u8; 4];
|
||||
let nonce = [5_u8; 8];
|
||||
let start_counter = 3u32;
|
||||
|
||||
let (mut ctx_a, mut ctx_b) = test_st_context(8);
|
||||
let (mut gen, mut ev) = mock_vm();
|
||||
|
||||
let aes_gen = setup_ctr(key, iv, &mut gen);
|
||||
let aes_ev = setup_ctr(key, iv, &mut ev);
|
||||
|
||||
let msg = vec![42u8; 128];
|
||||
|
||||
let keystream_gen = aes_gen.alloc_keystream(&mut gen, msg.len()).unwrap();
|
||||
let keystream_ev = aes_ev.alloc_keystream(&mut ev, msg.len()).unwrap();
|
||||
|
||||
let msg_ref_gen: Vector<U8> = gen.alloc_vec(msg.len()).unwrap();
|
||||
gen.mark_public(msg_ref_gen).unwrap();
|
||||
gen.assign(msg_ref_gen, msg.clone()).unwrap();
|
||||
gen.commit(msg_ref_gen).unwrap();
|
||||
|
||||
let msg_ref_ev: Vector<U8> = ev.alloc_vec(msg.len()).unwrap();
|
||||
ev.mark_public(msg_ref_ev).unwrap();
|
||||
ev.assign(msg_ref_ev, msg.clone()).unwrap();
|
||||
ev.commit(msg_ref_ev).unwrap();
|
||||
|
||||
let mut ctr = start_counter..;
|
||||
keystream_gen
|
||||
.assign(&mut gen, nonce, move || ctr.next().unwrap().to_be_bytes())
|
||||
.unwrap();
|
||||
let mut ctr = start_counter..;
|
||||
keystream_ev
|
||||
.assign(&mut ev, nonce, move || ctr.next().unwrap().to_be_bytes())
|
||||
.unwrap();
|
||||
|
||||
let cipher_out_gen = keystream_gen.apply(&mut gen, msg_ref_gen).unwrap();
|
||||
let cipher_out_ev = keystream_ev.apply(&mut ev, msg_ref_ev).unwrap();
|
||||
|
||||
let (ct_gen, ct_ev) = tokio::try_join!(
|
||||
async {
|
||||
let out = gen.decode(cipher_out_gen).unwrap();
|
||||
gen.flush(&mut ctx_a).await.unwrap();
|
||||
gen.execute(&mut ctx_a).await.unwrap();
|
||||
gen.flush(&mut ctx_a).await.unwrap();
|
||||
out.await
|
||||
},
|
||||
async {
|
||||
let out = ev.decode(cipher_out_ev).unwrap();
|
||||
ev.flush(&mut ctx_b).await.unwrap();
|
||||
ev.execute(&mut ctx_b).await.unwrap();
|
||||
ev.flush(&mut ctx_b).await.unwrap();
|
||||
out.await
|
||||
}
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(ct_gen, ct_ev);
|
||||
|
||||
let expected = aes_apply_keystream(key, iv, nonce, start_counter as usize, msg);
|
||||
assert_eq!(ct_gen, expected);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_aes_ecb() {
|
||||
let key = [1_u8; 16];
|
||||
let input = [5_u8; 16];
|
||||
|
||||
let (mut ctx_a, mut ctx_b) = test_st_context(8);
|
||||
let (mut gen, mut ev) = mock_vm();
|
||||
|
||||
let aes_gen = setup_block(key, &mut gen);
|
||||
let aes_ev = setup_block(key, &mut ev);
|
||||
|
||||
let block_ref_gen: Array<U8, 16> = gen.alloc().unwrap();
|
||||
gen.mark_public(block_ref_gen).unwrap();
|
||||
gen.assign(block_ref_gen, input).unwrap();
|
||||
gen.commit(block_ref_gen).unwrap();
|
||||
|
||||
let block_ref_ev: Array<U8, 16> = ev.alloc().unwrap();
|
||||
ev.mark_public(block_ref_ev).unwrap();
|
||||
ev.assign(block_ref_ev, input).unwrap();
|
||||
ev.commit(block_ref_ev).unwrap();
|
||||
|
||||
let block_gen = aes_gen.alloc_block(&mut gen, block_ref_gen).unwrap();
|
||||
let block_ev = aes_ev.alloc_block(&mut ev, block_ref_ev).unwrap();
|
||||
|
||||
let (ciphertext_gen, ciphetext_ev) = tokio::try_join!(
|
||||
async {
|
||||
let out = gen.decode(block_gen).unwrap();
|
||||
gen.flush(&mut ctx_a).await.unwrap();
|
||||
gen.execute(&mut ctx_a).await.unwrap();
|
||||
gen.flush(&mut ctx_a).await.unwrap();
|
||||
out.await
|
||||
},
|
||||
async {
|
||||
let out = ev.decode(block_ev).unwrap();
|
||||
ev.flush(&mut ctx_b).await.unwrap();
|
||||
ev.execute(&mut ctx_b).await.unwrap();
|
||||
ev.flush(&mut ctx_b).await.unwrap();
|
||||
out.await
|
||||
}
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(ciphertext_gen, ciphetext_ev);
|
||||
|
||||
let expected = aes128(key, input);
|
||||
assert_eq!(ciphertext_gen, expected);
|
||||
}
|
||||
|
||||
fn mock_vm() -> (impl Vm<Binary>, impl Vm<Binary>) {
|
||||
let mut rng = StdRng::seed_from_u64(0);
|
||||
let delta = Delta::random(&mut rng);
|
||||
|
||||
let (cot_send, cot_recv) = ideal_cot(delta.into_inner());
|
||||
|
||||
let gen = Garbler::new(cot_send, [0u8; 16], delta);
|
||||
let ev = Evaluator::new(cot_recv);
|
||||
|
||||
(gen, ev)
|
||||
}
|
||||
|
||||
fn setup_ctr(key: [u8; 16], iv: [u8; 4], vm: &mut dyn Vm<Binary>) -> Aes128 {
|
||||
let key_ref: Array<U8, 16> = vm.alloc().unwrap();
|
||||
vm.mark_public(key_ref).unwrap();
|
||||
vm.assign(key_ref, key).unwrap();
|
||||
vm.commit(key_ref).unwrap();
|
||||
|
||||
let iv_ref: Array<U8, 4> = vm.alloc().unwrap();
|
||||
vm.mark_public(iv_ref).unwrap();
|
||||
vm.assign(iv_ref, iv).unwrap();
|
||||
vm.commit(iv_ref).unwrap();
|
||||
|
||||
let mut aes = Aes128::default();
|
||||
|
||||
aes.set_key(key_ref);
|
||||
aes.set_iv(iv_ref);
|
||||
|
||||
aes
|
||||
}
|
||||
|
||||
fn setup_block(key: [u8; 16], vm: &mut dyn Vm<Binary>) -> Aes128 {
|
||||
let key_ref: Array<U8, 16> = vm.alloc().unwrap();
|
||||
vm.mark_public(key_ref).unwrap();
|
||||
vm.assign(key_ref, key).unwrap();
|
||||
vm.commit(key_ref).unwrap();
|
||||
|
||||
let mut aes = Aes128::default();
|
||||
aes.set_key(key_ref);
|
||||
|
||||
aes
|
||||
}
|
||||
|
||||
fn aes_apply_keystream(
|
||||
key: [u8; 16],
|
||||
iv: [u8; 4],
|
||||
explicit_nonce: [u8; 8],
|
||||
start_ctr: usize,
|
||||
msg: Vec<u8>,
|
||||
) -> Vec<u8> {
|
||||
use ::cipher::{KeyIvInit, StreamCipher, StreamCipherSeek};
|
||||
use aes::Aes128;
|
||||
use ctr::Ctr32BE;
|
||||
|
||||
let mut full_iv = [0u8; 16];
|
||||
full_iv[0..4].copy_from_slice(&iv);
|
||||
full_iv[4..12].copy_from_slice(&explicit_nonce);
|
||||
|
||||
let mut cipher = Ctr32BE::<Aes128>::new(&key.into(), &full_iv.into());
|
||||
let mut out = msg.clone();
|
||||
|
||||
cipher
|
||||
.try_seek(start_ctr * 16)
|
||||
.expect("start counter is less than keystream length");
|
||||
cipher.apply_keystream(&mut out);
|
||||
|
||||
out
|
||||
}
|
||||
|
||||
fn aes128(key: [u8; 16], msg: [u8; 16]) -> [u8; 16] {
|
||||
use ::aes::Aes128 as TestAes128;
|
||||
use ::cipher::{BlockEncrypt, KeyInit};
|
||||
|
||||
let mut msg = msg.into();
|
||||
let cipher = TestAes128::new(&key.into());
|
||||
cipher.encrypt_block(&mut msg);
|
||||
msg.into()
|
||||
}
|
||||
}
|
||||
299
crates/components/cipher/src/lib.rs
Normal file
299
crates/components/cipher/src/lib.rs
Normal file
@@ -0,0 +1,299 @@
|
||||
//! This crate provides implementations of 2PC ciphers for encryption with a
|
||||
//! shared key.
|
||||
//!
|
||||
//! Both parties can work together to encrypt and decrypt messages with
|
||||
//! different visibility configurations. See [`Cipher`] and [`Keystream`] for
|
||||
//! more information on the interface.
|
||||
|
||||
#![deny(missing_docs, unreachable_pub, unused_must_use)]
|
||||
#![deny(clippy::all)]
|
||||
#![forbid(unsafe_code)]
|
||||
|
||||
pub mod aes;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use mpz_circuits::circuits::xor;
|
||||
use mpz_memory_core::{
|
||||
binary::{Binary, U8},
|
||||
MemoryExt, Repr, Slice, StaticSize, ToRaw, Vector,
|
||||
};
|
||||
use mpz_vm_core::{prelude::*, Call, CallBuilder, CallError, Vm};
|
||||
use std::{collections::VecDeque, sync::Arc};
|
||||
|
||||
/// Provides computation of 2PC ciphers in counter and ECB mode.
|
||||
///
|
||||
/// After setting `key` and `iv` allows to compute the keystream via
|
||||
/// [`Cipher::alloc`] or a single block in ECB mode via
|
||||
/// [`Cipher::assign_block`]. [`Keystream`] provides more tooling to compute the
|
||||
/// final cipher output in counter mode.
|
||||
#[async_trait]
|
||||
pub trait Cipher {
|
||||
/// The error type for the cipher.
|
||||
type Error: std::error::Error + Send + Sync + 'static;
|
||||
/// Cipher key.
|
||||
type Key;
|
||||
/// Cipher IV.
|
||||
type Iv;
|
||||
/// Cipher nonce.
|
||||
type Nonce;
|
||||
/// Cipher counter.
|
||||
type Counter;
|
||||
/// Cipher block.
|
||||
type Block;
|
||||
|
||||
/// Sets the key.
|
||||
fn set_key(&mut self, key: Self::Key);
|
||||
|
||||
/// Sets the initialization vector.
|
||||
fn set_iv(&mut self, iv: Self::Iv);
|
||||
|
||||
/// Returns the key reference.
|
||||
fn key(&self) -> Option<&Self::Key>;
|
||||
|
||||
/// Returns the iv reference.
|
||||
fn iv(&self) -> Option<&Self::Iv>;
|
||||
|
||||
/// Allocates a single block in ECB mode.
|
||||
fn alloc_block(
|
||||
&self,
|
||||
vm: &mut dyn Vm<Binary>,
|
||||
input: Self::Block,
|
||||
) -> Result<Self::Block, Self::Error>;
|
||||
|
||||
/// Allocates a single block in counter mode.
|
||||
#[allow(clippy::type_complexity)]
|
||||
fn alloc_ctr_block(
|
||||
&self,
|
||||
vm: &mut dyn Vm<Binary>,
|
||||
) -> Result<CtrBlock<Self::Nonce, Self::Counter, Self::Block>, Self::Error>;
|
||||
|
||||
/// Allocates a keystream in counter mode.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `vm` - Virtual machine to allocate into.
|
||||
/// * `len` - Length of the stream in bytes.
|
||||
#[allow(clippy::type_complexity)]
|
||||
fn alloc_keystream(
|
||||
&self,
|
||||
vm: &mut dyn Vm<Binary>,
|
||||
len: usize,
|
||||
) -> Result<Keystream<Self::Nonce, Self::Counter, Self::Block>, Self::Error>;
|
||||
}
|
||||
|
||||
/// A block in counter mode.
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct CtrBlock<N, C, O> {
|
||||
/// Explicit nonce reference.
|
||||
pub explicit_nonce: N,
|
||||
/// Counter reference.
|
||||
pub counter: C,
|
||||
/// Output reference.
|
||||
pub output: O,
|
||||
}
|
||||
|
||||
/// The keystream of the cipher.
|
||||
///
|
||||
/// Can be used to XOR with the cipher input to operate the cipher in counter
|
||||
/// mode.
|
||||
pub struct Keystream<N, C, O> {
|
||||
/// Sequential keystream blocks. Outputs are stored in contiguous memory.
|
||||
blocks: VecDeque<CtrBlock<N, C, O>>,
|
||||
}
|
||||
|
||||
impl<N, C, O> Default for Keystream<N, C, O> {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
blocks: VecDeque::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<N, C, O> Keystream<N, C, O>
|
||||
where
|
||||
N: Repr<Binary> + StaticSize<Binary> + Copy,
|
||||
C: Repr<Binary> + StaticSize<Binary> + Copy,
|
||||
O: Repr<Binary> + StaticSize<Binary> + Copy,
|
||||
{
|
||||
/// Creates a new keystream from the provided blocks.
|
||||
pub fn new(blocks: &[CtrBlock<N, C, O>]) -> Self {
|
||||
Self {
|
||||
blocks: VecDeque::from_iter(blocks.iter().copied()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Consumes keystream material.
|
||||
///
|
||||
/// Returns the consumed keystream material, leaving the remaining material
|
||||
/// in place.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `len` - Length of the keystream in bytes to return.
|
||||
pub fn consume(&mut self, len: usize) -> Result<Self, CipherError> {
|
||||
let block_count = len.div_ceil(self.block_size());
|
||||
|
||||
if block_count > self.blocks.len() {
|
||||
return Err(CipherError::new("insufficient keystream"));
|
||||
}
|
||||
|
||||
let blocks = self.blocks.split_off(self.blocks.len() - block_count);
|
||||
|
||||
Ok(Self { blocks })
|
||||
}
|
||||
|
||||
/// Applies the keystream to the provided input.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `vm` - Virtual machine.
|
||||
/// * `input` - Input data.
|
||||
pub fn apply(
|
||||
&self,
|
||||
vm: &mut dyn Vm<Binary>,
|
||||
input: Vector<U8>,
|
||||
) -> Result<Vector<U8>, CipherError> {
|
||||
if input.len() != self.len() {
|
||||
return Err(CipherError::new("input length must match keystream length"));
|
||||
} else if self.blocks.is_empty() {
|
||||
return Err(CipherError::new("no keystream material available"));
|
||||
}
|
||||
|
||||
let xor = Arc::new(xor(self.block_size() * 8));
|
||||
let mut pos = 0;
|
||||
let mut outputs = Vec::with_capacity(self.blocks.len());
|
||||
for block in &self.blocks {
|
||||
let call = CallBuilder::new(xor.clone())
|
||||
.arg(block.output)
|
||||
.arg(
|
||||
input
|
||||
.get(pos..pos + self.block_size())
|
||||
.expect("input length was checked"),
|
||||
)
|
||||
.build()?;
|
||||
let output: Vector<U8> = vm.call(call).map_err(CipherError::new)?;
|
||||
outputs.push(output);
|
||||
pos += self.block_size();
|
||||
}
|
||||
|
||||
let output = flatten_blocks(vm, outputs.iter().map(|block| block.to_raw()))?;
|
||||
|
||||
Ok(output)
|
||||
}
|
||||
|
||||
/// Returns `len` bytes of the keystream as a vector.
|
||||
pub fn to_vector(
|
||||
&self,
|
||||
vm: &mut dyn Vm<Binary>,
|
||||
len: usize,
|
||||
) -> Result<Vector<U8>, CipherError> {
|
||||
if len == 0 {
|
||||
return Err(CipherError::new("length must be greater than 0"));
|
||||
} else if self.blocks.is_empty() {
|
||||
return Err(CipherError::new("no keystream material available"));
|
||||
}
|
||||
|
||||
let block_count = len.div_ceil(self.block_size());
|
||||
if block_count != self.blocks.len() {
|
||||
return Err(CipherError::new("length does not match keystream length"));
|
||||
}
|
||||
|
||||
let mut keystream =
|
||||
flatten_blocks(vm, self.blocks.iter().map(|block| block.output.to_raw()))?;
|
||||
keystream.truncate(len);
|
||||
|
||||
Ok(keystream)
|
||||
}
|
||||
|
||||
/// Assigns the keystream inputs.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `vm` - Virtual machine.
|
||||
/// * `explicit_nonce` - Explicit nonce.
|
||||
/// * `ctr` - Counter function. The provided function will be called to
|
||||
/// assign the counter values for each block.
|
||||
pub fn assign(
|
||||
&self,
|
||||
vm: &mut dyn Vm<Binary>,
|
||||
explicit_nonce: N::Clear,
|
||||
mut ctr: impl FnMut() -> C::Clear,
|
||||
) -> Result<(), CipherError>
|
||||
where
|
||||
N::Clear: Copy,
|
||||
C::Clear: Copy,
|
||||
{
|
||||
for block in &self.blocks {
|
||||
vm.assign(block.explicit_nonce, explicit_nonce)
|
||||
.map_err(CipherError::new)?;
|
||||
vm.commit(block.explicit_nonce).map_err(CipherError::new)?;
|
||||
vm.assign(block.counter, ctr()).map_err(CipherError::new)?;
|
||||
vm.commit(block.counter).map_err(CipherError::new)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Returns the block size in bytes.
|
||||
fn block_size(&self) -> usize {
|
||||
O::SIZE / 8
|
||||
}
|
||||
|
||||
/// Returns the length of the keystream in bytes.
|
||||
fn len(&self) -> usize {
|
||||
self.block_size() * self.blocks.len()
|
||||
}
|
||||
}
|
||||
|
||||
fn flatten_blocks(
|
||||
vm: &mut dyn Vm<Binary>,
|
||||
blocks: impl IntoIterator<Item = Slice>,
|
||||
) -> Result<Vector<U8>, CipherError> {
|
||||
use mpz_circuits::CircuitBuilder;
|
||||
|
||||
let blocks = blocks.into_iter().collect::<Vec<_>>();
|
||||
let len: usize = blocks.iter().map(|block| block.len()).sum();
|
||||
|
||||
let mut builder = CircuitBuilder::new();
|
||||
for _ in 0..len {
|
||||
let i = builder.add_input();
|
||||
let o = builder.add_id_gate(i);
|
||||
builder.add_output(o);
|
||||
}
|
||||
|
||||
let circuit = builder.build().expect("flatten circuit should be valid");
|
||||
|
||||
let mut builder = Call::builder(Arc::new(circuit));
|
||||
for block in blocks {
|
||||
builder = builder.arg(block);
|
||||
}
|
||||
|
||||
let call = builder.build().map_err(CipherError::new)?;
|
||||
|
||||
vm.call(call).map_err(CipherError::new)
|
||||
}
|
||||
|
||||
/// A cipher error.
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
#[error("{source}")]
|
||||
pub struct CipherError {
|
||||
#[source]
|
||||
source: Box<dyn std::error::Error + Send + Sync>,
|
||||
}
|
||||
|
||||
impl CipherError {
|
||||
pub(crate) fn new<E>(source: E) -> Self
|
||||
where
|
||||
E: Into<Box<dyn std::error::Error + Send + Sync>>,
|
||||
{
|
||||
Self {
|
||||
source: source.into(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<CallError> for CipherError {
|
||||
fn from(value: CallError) -> Self {
|
||||
Self::new(value)
|
||||
}
|
||||
}
|
||||
29
crates/components/deap/Cargo.toml
Normal file
29
crates/components/deap/Cargo.toml
Normal file
@@ -0,0 +1,29 @@
|
||||
[package]
|
||||
name = "tlsn-deap"
|
||||
version = "0.1.0-alpha.13-pre"
|
||||
edition = "2021"
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
mpz-core = { workspace = true }
|
||||
mpz-common = { workspace = true }
|
||||
mpz-vm-core = { workspace = true }
|
||||
rangeset = { workspace = true }
|
||||
thiserror = { workspace = true }
|
||||
serde = { workspace = true, features = ["derive"] }
|
||||
serio = { workspace = true }
|
||||
async-trait = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
tokio = { workspace = true, features = ["sync"] }
|
||||
|
||||
[dev-dependencies]
|
||||
mpz-circuits = { workspace = true }
|
||||
mpz-garble = { workspace = true }
|
||||
mpz-ot = { workspace = true }
|
||||
mpz-zk = { workspace = true }
|
||||
|
||||
tokio = { workspace = true, features = ["macros", "rt", "rt-multi-thread"] }
|
||||
rand = { workspace = true }
|
||||
rand06-compat = { workspace = true }
|
||||
651
crates/components/deap/src/lib.rs
Normal file
651
crates/components/deap/src/lib.rs
Normal file
@@ -0,0 +1,651 @@
|
||||
//! Dual-execution with Asymmetric Privacy (DEAP) protocol.
|
||||
|
||||
#![deny(missing_docs, unreachable_pub, unused_must_use)]
|
||||
#![deny(clippy::all)]
|
||||
#![forbid(unsafe_code)]
|
||||
|
||||
mod map;
|
||||
|
||||
use std::{mem, sync::Arc};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use mpz_common::Context;
|
||||
use mpz_core::bitvec::BitVec;
|
||||
use mpz_vm_core::{
|
||||
memory::{binary::Binary, DecodeFuture, Memory, Repr, Slice, View},
|
||||
Call, Callable, Execute, Vm, VmError,
|
||||
};
|
||||
use rangeset::{Difference, RangeSet, UnionMut};
|
||||
use tokio::sync::{Mutex, MutexGuard, OwnedMutexGuard};
|
||||
|
||||
type Error = DeapError;
|
||||
|
||||
/// The role of the DEAP VM.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
#[allow(missing_docs)]
|
||||
pub enum Role {
|
||||
Leader,
|
||||
Follower,
|
||||
}
|
||||
|
||||
/// DEAP VM.
|
||||
#[derive(Debug)]
|
||||
pub struct Deap<Mpc, Zk> {
|
||||
role: Role,
|
||||
mpc: Arc<Mutex<Mpc>>,
|
||||
zk: Arc<Mutex<Zk>>,
|
||||
/// Mapping between the memories of the MPC and ZK VMs.
|
||||
memory_map: map::MemoryMap,
|
||||
/// Ranges of the follower's private inputs in the MPC VM.
|
||||
follower_input_ranges: RangeSet<usize>,
|
||||
/// Private inputs of the follower in the MPC VM.
|
||||
follower_inputs: Vec<Slice>,
|
||||
/// Outputs of the follower from the ZK VM. The references
|
||||
/// correspond to the MPC VM.
|
||||
outputs: Vec<(Slice, DecodeFuture<BitVec>)>,
|
||||
}
|
||||
|
||||
impl<Mpc, Zk> Deap<Mpc, Zk> {
|
||||
/// Creates a new DEAP VM.
|
||||
pub fn new(role: Role, mpc: Mpc, zk: Zk) -> Self {
|
||||
Self {
|
||||
role,
|
||||
mpc: Arc::new(Mutex::new(mpc)),
|
||||
zk: Arc::new(Mutex::new(zk)),
|
||||
memory_map: map::MemoryMap::default(),
|
||||
follower_input_ranges: RangeSet::default(),
|
||||
follower_inputs: Vec::default(),
|
||||
outputs: Vec::default(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the MPC and ZK VMs.
|
||||
pub fn into_inner(self) -> (Mpc, Zk) {
|
||||
(
|
||||
Arc::into_inner(self.mpc).unwrap().into_inner(),
|
||||
Arc::into_inner(self.zk).unwrap().into_inner(),
|
||||
)
|
||||
}
|
||||
|
||||
/// Returns a mutable reference to the ZK VM.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// Panics if the mutex is locked by another thread.
|
||||
pub fn zk(&self) -> MutexGuard<'_, Zk> {
|
||||
self.zk.try_lock().unwrap()
|
||||
}
|
||||
|
||||
/// Returns an owned mutex guard to the ZK VM.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// Panics if the mutex is locked by another thread.
|
||||
pub fn zk_owned(&self) -> OwnedMutexGuard<Zk> {
|
||||
self.zk.clone().try_lock_owned().unwrap()
|
||||
}
|
||||
|
||||
/// Translates a value from the MPC VM address space to the ZK VM address
|
||||
/// space.
|
||||
pub fn translate<T: Repr<Binary>>(&self, value: T) -> Result<T, VmError> {
|
||||
self.memory_map.try_get(value.to_raw()).map(T::from_raw)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
fn mpc(&self) -> MutexGuard<'_, Mpc> {
|
||||
self.mpc.try_lock().unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
impl<Mpc, Zk> Deap<Mpc, Zk>
|
||||
where
|
||||
Mpc: Vm<Binary> + Send + 'static,
|
||||
Zk: Vm<Binary> + Send + 'static,
|
||||
{
|
||||
/// Finalizes the DEAP VM.
|
||||
///
|
||||
/// This reveals all private inputs of the follower.
|
||||
pub async fn finalize(&mut self, ctx: &mut Context) -> Result<(), VmError> {
|
||||
let mut mpc = self.mpc.try_lock().unwrap();
|
||||
let mut zk = self.zk.try_lock().unwrap();
|
||||
|
||||
// Decode the private inputs of the follower.
|
||||
//
|
||||
// # Security
|
||||
//
|
||||
// This assumes that the decoding process is authenticated from the leader's
|
||||
// perspective. In the case of garbled circuits, the leader should be the
|
||||
// generator such that the follower proves their inputs using their committed
|
||||
// MACs.
|
||||
let input_futs = self
|
||||
.follower_inputs
|
||||
.iter()
|
||||
.map(|&input| mpc.decode_raw(input))
|
||||
.collect::<Result<Vec<_>, _>>()?;
|
||||
|
||||
mpc.execute_all(ctx).await?;
|
||||
|
||||
// Assign inputs to the ZK VM.
|
||||
for (mut decode, &input) in input_futs.into_iter().zip(&self.follower_inputs) {
|
||||
let input = self.memory_map.try_get(input)?;
|
||||
|
||||
// Follower has already assigned the inputs.
|
||||
if let Role::Leader = self.role {
|
||||
let value = decode
|
||||
.try_recv()
|
||||
.map_err(VmError::memory)?
|
||||
.expect("input should be decoded");
|
||||
zk.assign_raw(input, value)?;
|
||||
}
|
||||
|
||||
// Now the follower's inputs are public.
|
||||
zk.commit_raw(input)?;
|
||||
}
|
||||
|
||||
zk.execute_all(ctx).await.map_err(VmError::execute)?;
|
||||
|
||||
// Follower verifies the outputs are consistent.
|
||||
if let Role::Follower = self.role {
|
||||
for (output, mut value) in mem::take(&mut self.outputs) {
|
||||
// If the output is not available in the MPC VM, we did not execute and decode
|
||||
// it. Therefore, we do not need to check for equality.
|
||||
//
|
||||
// This can occur if some function was preprocessed but ultimately not used.
|
||||
if let Some(mpc_output) = mpc.get_raw(output)? {
|
||||
let zk_output = value
|
||||
.try_recv()
|
||||
.map_err(VmError::memory)?
|
||||
.expect("output should be decoded");
|
||||
|
||||
// Asserts equality of all the output values from both VMs.
|
||||
if zk_output != mpc_output {
|
||||
return Err(VmError::execute(Error::from(ErrorRepr::EqualityCheck)));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<Mpc, Zk> Memory<Binary> for Deap<Mpc, Zk>
|
||||
where
|
||||
Mpc: Memory<Binary, Error = VmError>,
|
||||
Zk: Memory<Binary, Error = VmError>,
|
||||
{
|
||||
type Error = VmError;
|
||||
|
||||
fn is_alloc_raw(&self, slice: Slice) -> bool {
|
||||
self.mpc.try_lock().unwrap().is_alloc_raw(slice)
|
||||
}
|
||||
|
||||
fn alloc_raw(&mut self, size: usize) -> Result<Slice, VmError> {
|
||||
let mpc_slice = self.mpc.try_lock().unwrap().alloc_raw(size)?;
|
||||
let zk_slice = self.zk.try_lock().unwrap().alloc_raw(size)?;
|
||||
|
||||
self.memory_map.insert(mpc_slice, zk_slice);
|
||||
|
||||
Ok(mpc_slice)
|
||||
}
|
||||
|
||||
fn is_assigned_raw(&self, slice: Slice) -> bool {
|
||||
self.mpc.try_lock().unwrap().is_assigned_raw(slice)
|
||||
}
|
||||
|
||||
fn assign_raw(&mut self, slice: Slice, data: BitVec) -> Result<(), VmError> {
|
||||
self.mpc
|
||||
.try_lock()
|
||||
.unwrap()
|
||||
.assign_raw(slice, data.clone())?;
|
||||
|
||||
self.zk
|
||||
.try_lock()
|
||||
.unwrap()
|
||||
.assign_raw(self.memory_map.try_get(slice)?, data)
|
||||
}
|
||||
|
||||
fn is_committed_raw(&self, slice: Slice) -> bool {
|
||||
self.mpc.try_lock().unwrap().is_committed_raw(slice)
|
||||
}
|
||||
|
||||
fn commit_raw(&mut self, slice: Slice) -> Result<(), VmError> {
|
||||
// Follower's private inputs are not committed in the ZK VM until finalization.
|
||||
let input_minus_follower = slice.to_range().difference(&self.follower_input_ranges);
|
||||
let mut zk = self.zk.try_lock().unwrap();
|
||||
for input in input_minus_follower.iter_ranges() {
|
||||
zk.commit_raw(
|
||||
self.memory_map
|
||||
.try_get(Slice::from_range_unchecked(input))?,
|
||||
)?;
|
||||
}
|
||||
|
||||
self.mpc.try_lock().unwrap().commit_raw(slice)
|
||||
}
|
||||
|
||||
fn get_raw(&self, slice: Slice) -> Result<Option<BitVec>, VmError> {
|
||||
self.mpc.try_lock().unwrap().get_raw(slice)
|
||||
}
|
||||
|
||||
fn decode_raw(&mut self, slice: Slice) -> Result<DecodeFuture<BitVec>, VmError> {
|
||||
let fut = self
|
||||
.zk
|
||||
.try_lock()
|
||||
.unwrap()
|
||||
.decode_raw(self.memory_map.try_get(slice)?)?;
|
||||
self.outputs.push((slice, fut));
|
||||
|
||||
self.mpc.try_lock().unwrap().decode_raw(slice)
|
||||
}
|
||||
}
|
||||
|
||||
impl<Mpc, Zk> View<Binary> for Deap<Mpc, Zk>
|
||||
where
|
||||
Mpc: View<Binary, Error = VmError>,
|
||||
Zk: View<Binary, Error = VmError>,
|
||||
{
|
||||
type Error = VmError;
|
||||
|
||||
fn mark_public_raw(&mut self, slice: Slice) -> Result<(), VmError> {
|
||||
self.mpc.try_lock().unwrap().mark_public_raw(slice)?;
|
||||
self.zk
|
||||
.try_lock()
|
||||
.unwrap()
|
||||
.mark_public_raw(self.memory_map.try_get(slice)?)
|
||||
}
|
||||
|
||||
fn mark_private_raw(&mut self, slice: Slice) -> Result<(), VmError> {
|
||||
let mut zk = self.zk.try_lock().unwrap();
|
||||
let mut mpc = self.mpc.try_lock().unwrap();
|
||||
match self.role {
|
||||
Role::Leader => {
|
||||
mpc.mark_private_raw(slice)?;
|
||||
zk.mark_private_raw(self.memory_map.try_get(slice)?)?;
|
||||
}
|
||||
Role::Follower => {
|
||||
mpc.mark_private_raw(slice)?;
|
||||
// Follower's private inputs will become public during finalization.
|
||||
zk.mark_public_raw(self.memory_map.try_get(slice)?)?;
|
||||
self.follower_input_ranges.union_mut(&slice.to_range());
|
||||
self.follower_inputs.push(slice);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn mark_blind_raw(&mut self, slice: Slice) -> Result<(), VmError> {
|
||||
let mut zk = self.zk.try_lock().unwrap();
|
||||
let mut mpc = self.mpc.try_lock().unwrap();
|
||||
match self.role {
|
||||
Role::Leader => {
|
||||
mpc.mark_blind_raw(slice)?;
|
||||
// Follower's private inputs will become public during finalization.
|
||||
zk.mark_public_raw(self.memory_map.try_get(slice)?)?;
|
||||
self.follower_input_ranges.union_mut(&slice.to_range());
|
||||
self.follower_inputs.push(slice);
|
||||
}
|
||||
Role::Follower => {
|
||||
mpc.mark_blind_raw(slice)?;
|
||||
zk.mark_blind_raw(self.memory_map.try_get(slice)?)?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<Mpc, Zk> Callable<Binary> for Deap<Mpc, Zk>
|
||||
where
|
||||
Mpc: Vm<Binary>,
|
||||
Zk: Vm<Binary>,
|
||||
{
|
||||
fn call_raw(&mut self, call: Call) -> Result<Slice, VmError> {
|
||||
let (circ, inputs) = call.clone().into_parts();
|
||||
let mut builder = Call::builder(circ);
|
||||
|
||||
for input in inputs {
|
||||
builder = builder.arg(self.memory_map.try_get(input)?);
|
||||
}
|
||||
|
||||
let zk_call = builder.build().expect("call should be valid");
|
||||
|
||||
let output = self.mpc.try_lock().unwrap().call_raw(call)?;
|
||||
let zk_output = self.zk.try_lock().unwrap().call_raw(zk_call)?;
|
||||
|
||||
self.memory_map.insert(output, zk_output);
|
||||
|
||||
Ok(output)
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<Mpc, Zk> Execute for Deap<Mpc, Zk>
|
||||
where
|
||||
Mpc: Execute + Send + 'static,
|
||||
Zk: Execute + Send + 'static,
|
||||
{
|
||||
fn wants_flush(&self) -> bool {
|
||||
self.mpc.try_lock().unwrap().wants_flush() || self.zk.try_lock().unwrap().wants_flush()
|
||||
}
|
||||
|
||||
async fn flush(&mut self, ctx: &mut Context) -> Result<(), VmError> {
|
||||
let mut zk = self.zk.clone().try_lock_owned().unwrap();
|
||||
let mut mpc = self.mpc.clone().try_lock_owned().unwrap();
|
||||
ctx.try_join(
|
||||
async move |ctx| zk.flush(ctx).await,
|
||||
async move |ctx| mpc.flush(ctx).await,
|
||||
)
|
||||
.await
|
||||
.map_err(VmError::execute)??;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn wants_preprocess(&self) -> bool {
|
||||
self.mpc.try_lock().unwrap().wants_preprocess()
|
||||
|| self.zk.try_lock().unwrap().wants_preprocess()
|
||||
}
|
||||
|
||||
async fn preprocess(&mut self, ctx: &mut Context) -> Result<(), VmError> {
|
||||
let mut zk = self.zk.clone().try_lock_owned().unwrap();
|
||||
let mut mpc = self.mpc.clone().try_lock_owned().unwrap();
|
||||
ctx.try_join(
|
||||
async move |ctx| zk.preprocess(ctx).await,
|
||||
async move |ctx| mpc.preprocess(ctx).await,
|
||||
)
|
||||
.await
|
||||
.map_err(VmError::execute)??;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn wants_execute(&self) -> bool {
|
||||
self.mpc.try_lock().unwrap().wants_execute()
|
||||
}
|
||||
|
||||
async fn execute(&mut self, ctx: &mut Context) -> Result<(), VmError> {
|
||||
// Only MPC VM is executed until finalization.
|
||||
self.mpc.try_lock().unwrap().execute(ctx).await
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
#[error(transparent)]
|
||||
pub(crate) struct DeapError(#[from] ErrorRepr);
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
enum ErrorRepr {
|
||||
#[error("equality check failed")]
|
||||
EqualityCheck,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use mpz_circuits::circuits::AES128;
|
||||
use mpz_common::context::test_st_context;
|
||||
use mpz_core::Block;
|
||||
use mpz_garble::protocol::semihonest::{Evaluator, Garbler};
|
||||
use mpz_ot::ideal::{cot::ideal_cot, rcot::ideal_rcot};
|
||||
use mpz_vm_core::{
|
||||
memory::{binary::U8, correlated::Delta, Array},
|
||||
prelude::*,
|
||||
};
|
||||
use mpz_zk::{Prover, ProverConfig, Verifier, VerifierConfig};
|
||||
use rand::{rngs::StdRng, SeedableRng};
|
||||
|
||||
use super::*;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_deap() {
|
||||
let mut rng = StdRng::seed_from_u64(0);
|
||||
let delta_mpc = Delta::random(&mut rng);
|
||||
let delta_zk = Delta::random(&mut rng);
|
||||
|
||||
let (mut ctx_a, mut ctx_b) = test_st_context(8);
|
||||
let (rcot_send, rcot_recv) = ideal_rcot(Block::ZERO, delta_zk.into_inner());
|
||||
let (cot_send, cot_recv) = ideal_cot(delta_mpc.into_inner());
|
||||
|
||||
let gb = Garbler::new(cot_send, [0u8; 16], delta_mpc);
|
||||
let ev = Evaluator::new(cot_recv);
|
||||
let prover = Prover::new(ProverConfig::default(), rcot_recv);
|
||||
let verifier = Verifier::new(VerifierConfig::default(), delta_zk, rcot_send);
|
||||
|
||||
let mut leader = Deap::new(Role::Leader, gb, prover);
|
||||
let mut follower = Deap::new(Role::Follower, ev, verifier);
|
||||
|
||||
let (ct_leader, ct_follower) = futures::join!(
|
||||
async {
|
||||
let key: Array<U8, 16> = leader.alloc().unwrap();
|
||||
let msg: Array<U8, 16> = leader.alloc().unwrap();
|
||||
|
||||
leader.mark_private(key).unwrap();
|
||||
leader.mark_blind(msg).unwrap();
|
||||
leader.assign(key, [42u8; 16]).unwrap();
|
||||
leader.commit(key).unwrap();
|
||||
leader.commit(msg).unwrap();
|
||||
|
||||
let ct: Array<U8, 16> = leader
|
||||
.call(
|
||||
Call::builder(AES128.clone())
|
||||
.arg(key)
|
||||
.arg(msg)
|
||||
.build()
|
||||
.unwrap(),
|
||||
)
|
||||
.unwrap();
|
||||
let ct = leader.decode(ct).unwrap();
|
||||
|
||||
leader.flush(&mut ctx_a).await.unwrap();
|
||||
leader.execute(&mut ctx_a).await.unwrap();
|
||||
leader.flush(&mut ctx_a).await.unwrap();
|
||||
leader.finalize(&mut ctx_a).await.unwrap();
|
||||
|
||||
ct.await.unwrap()
|
||||
},
|
||||
async {
|
||||
let key: Array<U8, 16> = follower.alloc().unwrap();
|
||||
let msg: Array<U8, 16> = follower.alloc().unwrap();
|
||||
|
||||
follower.mark_blind(key).unwrap();
|
||||
follower.mark_private(msg).unwrap();
|
||||
follower.assign(msg, [69u8; 16]).unwrap();
|
||||
follower.commit(key).unwrap();
|
||||
follower.commit(msg).unwrap();
|
||||
|
||||
let ct: Array<U8, 16> = follower
|
||||
.call(
|
||||
Call::builder(AES128.clone())
|
||||
.arg(key)
|
||||
.arg(msg)
|
||||
.build()
|
||||
.unwrap(),
|
||||
)
|
||||
.unwrap();
|
||||
let ct = follower.decode(ct).unwrap();
|
||||
|
||||
follower.flush(&mut ctx_b).await.unwrap();
|
||||
follower.execute(&mut ctx_b).await.unwrap();
|
||||
follower.flush(&mut ctx_b).await.unwrap();
|
||||
follower.finalize(&mut ctx_b).await.unwrap();
|
||||
|
||||
ct.await.unwrap()
|
||||
}
|
||||
);
|
||||
|
||||
assert_eq!(ct_leader, ct_follower);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_deap_desync_memory() {
|
||||
let mut rng = StdRng::seed_from_u64(0);
|
||||
let delta_mpc = Delta::random(&mut rng);
|
||||
let delta_zk = Delta::random(&mut rng);
|
||||
|
||||
let (mut ctx_a, mut ctx_b) = test_st_context(8);
|
||||
let (rcot_send, rcot_recv) = ideal_rcot(Block::ZERO, delta_zk.into_inner());
|
||||
let (cot_send, cot_recv) = ideal_cot(delta_mpc.into_inner());
|
||||
|
||||
let gb = Garbler::new(cot_send, [0u8; 16], delta_mpc);
|
||||
let ev = Evaluator::new(cot_recv);
|
||||
let prover = Prover::new(ProverConfig::default(), rcot_recv);
|
||||
let verifier = Verifier::new(VerifierConfig::default(), delta_zk, rcot_send);
|
||||
|
||||
let mut leader = Deap::new(Role::Leader, gb, prover);
|
||||
let mut follower = Deap::new(Role::Follower, ev, verifier);
|
||||
|
||||
// Desynchronize the memories.
|
||||
let _ = leader.zk().alloc_raw(1).unwrap();
|
||||
let _ = follower.zk().alloc_raw(1).unwrap();
|
||||
|
||||
let (ct_leader, ct_follower) = futures::join!(
|
||||
async {
|
||||
let key: Array<U8, 16> = leader.alloc().unwrap();
|
||||
let msg: Array<U8, 16> = leader.alloc().unwrap();
|
||||
|
||||
leader.mark_private(key).unwrap();
|
||||
leader.mark_blind(msg).unwrap();
|
||||
leader.assign(key, [42u8; 16]).unwrap();
|
||||
leader.commit(key).unwrap();
|
||||
leader.commit(msg).unwrap();
|
||||
|
||||
let ct: Array<U8, 16> = leader
|
||||
.call(
|
||||
Call::builder(AES128.clone())
|
||||
.arg(key)
|
||||
.arg(msg)
|
||||
.build()
|
||||
.unwrap(),
|
||||
)
|
||||
.unwrap();
|
||||
let ct = leader.decode(ct).unwrap();
|
||||
|
||||
leader.flush(&mut ctx_a).await.unwrap();
|
||||
leader.execute(&mut ctx_a).await.unwrap();
|
||||
leader.flush(&mut ctx_a).await.unwrap();
|
||||
leader.finalize(&mut ctx_a).await.unwrap();
|
||||
|
||||
ct.await.unwrap()
|
||||
},
|
||||
async {
|
||||
let key: Array<U8, 16> = follower.alloc().unwrap();
|
||||
let msg: Array<U8, 16> = follower.alloc().unwrap();
|
||||
|
||||
follower.mark_blind(key).unwrap();
|
||||
follower.mark_private(msg).unwrap();
|
||||
follower.assign(msg, [69u8; 16]).unwrap();
|
||||
follower.commit(key).unwrap();
|
||||
follower.commit(msg).unwrap();
|
||||
|
||||
let ct: Array<U8, 16> = follower
|
||||
.call(
|
||||
Call::builder(AES128.clone())
|
||||
.arg(key)
|
||||
.arg(msg)
|
||||
.build()
|
||||
.unwrap(),
|
||||
)
|
||||
.unwrap();
|
||||
let ct = follower.decode(ct).unwrap();
|
||||
|
||||
follower.flush(&mut ctx_b).await.unwrap();
|
||||
follower.execute(&mut ctx_b).await.unwrap();
|
||||
follower.flush(&mut ctx_b).await.unwrap();
|
||||
follower.finalize(&mut ctx_b).await.unwrap();
|
||||
|
||||
ct.await.unwrap()
|
||||
}
|
||||
);
|
||||
|
||||
assert_eq!(ct_leader, ct_follower);
|
||||
}
|
||||
|
||||
// Tests that the leader can not use different inputs in each VM without
|
||||
// detection by the follower.
|
||||
#[tokio::test]
|
||||
async fn test_malicious() {
|
||||
let mut rng = StdRng::seed_from_u64(0);
|
||||
let delta_mpc = Delta::random(&mut rng);
|
||||
let delta_zk = Delta::random(&mut rng);
|
||||
|
||||
let (mut ctx_a, mut ctx_b) = test_st_context(8);
|
||||
let (rcot_send, rcot_recv) = ideal_rcot(Block::ZERO, delta_zk.into_inner());
|
||||
let (cot_send, cot_recv) = ideal_cot(delta_mpc.into_inner());
|
||||
|
||||
let gb = Garbler::new(cot_send, [1u8; 16], delta_mpc);
|
||||
let ev = Evaluator::new(cot_recv);
|
||||
let prover = Prover::new(ProverConfig::default(), rcot_recv);
|
||||
let verifier = Verifier::new(VerifierConfig::default(), delta_zk, rcot_send);
|
||||
|
||||
let mut leader = Deap::new(Role::Leader, gb, prover);
|
||||
let mut follower = Deap::new(Role::Follower, ev, verifier);
|
||||
|
||||
let (_, follower_res) = futures::join!(
|
||||
async {
|
||||
let key: Array<U8, 16> = leader.alloc().unwrap();
|
||||
let msg: Array<U8, 16> = leader.alloc().unwrap();
|
||||
|
||||
leader.mark_private(key).unwrap();
|
||||
leader.mark_blind(msg).unwrap();
|
||||
|
||||
// Use different inputs in each VM.
|
||||
leader.mpc().assign(key, [42u8; 16]).unwrap();
|
||||
leader
|
||||
.zk
|
||||
.try_lock()
|
||||
.unwrap()
|
||||
.assign(key, [69u8; 16])
|
||||
.unwrap();
|
||||
|
||||
leader.commit(key).unwrap();
|
||||
leader.commit(msg).unwrap();
|
||||
|
||||
let ct: Array<U8, 16> = leader
|
||||
.call(
|
||||
Call::builder(AES128.clone())
|
||||
.arg(key)
|
||||
.arg(msg)
|
||||
.build()
|
||||
.unwrap(),
|
||||
)
|
||||
.unwrap();
|
||||
let ct = leader.decode(ct).unwrap();
|
||||
|
||||
leader.flush(&mut ctx_a).await.unwrap();
|
||||
leader.execute(&mut ctx_a).await.unwrap();
|
||||
leader.flush(&mut ctx_a).await.unwrap();
|
||||
leader.finalize(&mut ctx_a).await.unwrap();
|
||||
|
||||
ct.await.unwrap()
|
||||
},
|
||||
async {
|
||||
let key: Array<U8, 16> = follower.alloc().unwrap();
|
||||
let msg: Array<U8, 16> = follower.alloc().unwrap();
|
||||
|
||||
follower.mark_blind(key).unwrap();
|
||||
follower.mark_private(msg).unwrap();
|
||||
follower.assign(msg, [69u8; 16]).unwrap();
|
||||
follower.commit(key).unwrap();
|
||||
follower.commit(msg).unwrap();
|
||||
|
||||
let ct: Array<U8, 16> = follower
|
||||
.call(
|
||||
Call::builder(AES128.clone())
|
||||
.arg(key)
|
||||
.arg(msg)
|
||||
.build()
|
||||
.unwrap(),
|
||||
)
|
||||
.unwrap();
|
||||
drop(follower.decode(ct).unwrap());
|
||||
|
||||
follower.flush(&mut ctx_b).await.unwrap();
|
||||
follower.execute(&mut ctx_b).await.unwrap();
|
||||
follower.flush(&mut ctx_b).await.unwrap();
|
||||
follower.finalize(&mut ctx_b).await
|
||||
}
|
||||
);
|
||||
|
||||
assert!(follower_res.is_err());
|
||||
}
|
||||
}
|
||||
111
crates/components/deap/src/map.rs
Normal file
111
crates/components/deap/src/map.rs
Normal file
@@ -0,0 +1,111 @@
|
||||
use std::ops::Range;
|
||||
|
||||
use mpz_vm_core::{memory::Slice, VmError};
|
||||
use rangeset::Subset;
|
||||
|
||||
/// A mapping between the memories of the MPC and ZK VMs.
|
||||
#[derive(Debug, Default)]
|
||||
pub(crate) struct MemoryMap {
|
||||
mpc: Vec<Range<usize>>,
|
||||
zk: Vec<Range<usize>>,
|
||||
}
|
||||
|
||||
impl MemoryMap {
|
||||
/// Inserts a new allocation into the map.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// - If the slices are not inserted in the order they are allocated.
|
||||
/// - If the slices are not the same length.
|
||||
pub(crate) fn insert(&mut self, mpc: Slice, zk: Slice) {
|
||||
let mpc = mpc.to_range();
|
||||
let zk = zk.to_range();
|
||||
|
||||
assert_eq!(mpc.len(), zk.len(), "slices must be the same length");
|
||||
|
||||
if let Some(last) = self.mpc.last() {
|
||||
if last.end > mpc.start {
|
||||
panic!("slices must be provided in ascending order");
|
||||
}
|
||||
}
|
||||
|
||||
self.mpc.push(mpc);
|
||||
self.zk.push(zk);
|
||||
}
|
||||
|
||||
/// Returns the corresponding allocation in the ZK VM.
|
||||
pub(crate) fn try_get(&self, mpc: Slice) -> Result<Slice, VmError> {
|
||||
let mpc_range = mpc.to_range();
|
||||
let pos = match self
|
||||
.mpc
|
||||
.binary_search_by_key(&mpc_range.start, |range| range.start)
|
||||
{
|
||||
Ok(pos) => pos,
|
||||
Err(0) => return Err(VmError::memory(format!("invalid memory slice: {mpc}"))),
|
||||
Err(pos) => pos - 1,
|
||||
};
|
||||
|
||||
let candidate = &self.mpc[pos];
|
||||
if mpc_range.is_subset(candidate) {
|
||||
let offset = mpc_range.start - candidate.start;
|
||||
let start = self.zk[pos].start + offset;
|
||||
let slice = Slice::from_range_unchecked(start..start + mpc_range.len());
|
||||
|
||||
Ok(slice)
|
||||
} else {
|
||||
Err(VmError::memory(format!("invalid memory slice: {mpc}")))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_map() {
|
||||
let mut map = MemoryMap::default();
|
||||
map.insert(
|
||||
Slice::from_range_unchecked(0..10),
|
||||
Slice::from_range_unchecked(10..20),
|
||||
);
|
||||
|
||||
// Range is fully contained.
|
||||
assert_eq!(
|
||||
map.try_get(Slice::from_range_unchecked(0..10)).unwrap(),
|
||||
Slice::from_range_unchecked(10..20)
|
||||
);
|
||||
// Range is subset.
|
||||
assert_eq!(
|
||||
map.try_get(Slice::from_range_unchecked(1..9)).unwrap(),
|
||||
Slice::from_range_unchecked(11..19)
|
||||
);
|
||||
// Range is not subset.
|
||||
assert!(map.try_get(Slice::from_range_unchecked(0..11)).is_err());
|
||||
|
||||
// Insert another range.
|
||||
map.insert(
|
||||
Slice::from_range_unchecked(20..30),
|
||||
Slice::from_range_unchecked(30..40),
|
||||
);
|
||||
assert_eq!(
|
||||
map.try_get(Slice::from_range_unchecked(20..30)).unwrap(),
|
||||
Slice::from_range_unchecked(30..40)
|
||||
);
|
||||
assert_eq!(
|
||||
map.try_get(Slice::from_range_unchecked(21..29)).unwrap(),
|
||||
Slice::from_range_unchecked(31..39)
|
||||
);
|
||||
assert!(map.try_get(Slice::from_range_unchecked(19..21)).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn test_map_length_mismatch() {
|
||||
let mut map = MemoryMap::default();
|
||||
map.insert(
|
||||
Slice::from_range_unchecked(5..10),
|
||||
Slice::from_range_unchecked(20..30),
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -1,19 +0,0 @@
|
||||
[package]
|
||||
name = "tlsn-hmac-sha256-circuits"
|
||||
authors = ["TLSNotary Team"]
|
||||
description = "The 2PC circuits for TLS HMAC-SHA256 PRF"
|
||||
keywords = ["tls", "mpc", "2pc", "hmac", "sha256"]
|
||||
categories = ["cryptography"]
|
||||
license = "MIT OR Apache-2.0"
|
||||
version = "0.1.0-alpha.7"
|
||||
edition = "2021"
|
||||
|
||||
[lib]
|
||||
name = "hmac_sha256_circuits"
|
||||
|
||||
[dependencies]
|
||||
mpz-circuits = { workspace = true }
|
||||
tracing = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
ring = { workspace = true }
|
||||
@@ -1,159 +0,0 @@
|
||||
use std::cell::RefCell;
|
||||
|
||||
use mpz_circuits::{
|
||||
circuits::{sha256, sha256_compress, sha256_compress_trace, sha256_trace},
|
||||
types::{U32, U8},
|
||||
BuilderState, Tracer,
|
||||
};
|
||||
|
||||
static SHA256_INITIAL_STATE: [u32; 8] = [
|
||||
0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a, 0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19,
|
||||
];
|
||||
|
||||
/// Returns the outer and inner states of HMAC-SHA256 with the provided key.
|
||||
///
|
||||
/// Outer state is H(key ⊕ opad)
|
||||
///
|
||||
/// Inner state is H(key ⊕ ipad)
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `builder_state` - Reference to builder state.
|
||||
/// * `key` - N-byte key (must be <= 64 bytes).
|
||||
pub fn hmac_sha256_partial_trace<'a>(
|
||||
builder_state: &'a RefCell<BuilderState>,
|
||||
key: &[Tracer<'a, U8>],
|
||||
) -> ([Tracer<'a, U32>; 8], [Tracer<'a, U32>; 8]) {
|
||||
assert!(key.len() <= 64);
|
||||
|
||||
let mut opad = [Tracer::new(
|
||||
builder_state,
|
||||
builder_state.borrow_mut().get_constant(0x5cu8),
|
||||
); 64];
|
||||
|
||||
let mut ipad = [Tracer::new(
|
||||
builder_state,
|
||||
builder_state.borrow_mut().get_constant(0x36u8),
|
||||
); 64];
|
||||
|
||||
key.iter().enumerate().for_each(|(i, k)| {
|
||||
opad[i] = opad[i] ^ *k;
|
||||
ipad[i] = ipad[i] ^ *k;
|
||||
});
|
||||
|
||||
let sha256_initial_state: [_; 8] = SHA256_INITIAL_STATE
|
||||
.map(|v| Tracer::new(builder_state, builder_state.borrow_mut().get_constant(v)));
|
||||
|
||||
let outer_state = sha256_compress_trace(builder_state, sha256_initial_state, opad);
|
||||
let inner_state = sha256_compress_trace(builder_state, sha256_initial_state, ipad);
|
||||
|
||||
(outer_state, inner_state)
|
||||
}
|
||||
|
||||
/// Reference implementation of HMAC-SHA256 partial function.
|
||||
///
|
||||
/// Returns the outer and inner states of HMAC-SHA256 with the provided key.
|
||||
///
|
||||
/// Outer state is H(key ⊕ opad)
|
||||
///
|
||||
/// Inner state is H(key ⊕ ipad)
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `key` - N-byte key (must be <= 64 bytes).
|
||||
pub fn hmac_sha256_partial(key: &[u8]) -> ([u32; 8], [u32; 8]) {
|
||||
assert!(key.len() <= 64);
|
||||
|
||||
let mut opad = [0x5cu8; 64];
|
||||
let mut ipad = [0x36u8; 64];
|
||||
|
||||
key.iter().enumerate().for_each(|(i, k)| {
|
||||
opad[i] ^= k;
|
||||
ipad[i] ^= k;
|
||||
});
|
||||
|
||||
let outer_state = sha256_compress(SHA256_INITIAL_STATE, opad);
|
||||
let inner_state = sha256_compress(SHA256_INITIAL_STATE, ipad);
|
||||
|
||||
(outer_state, inner_state)
|
||||
}
|
||||
|
||||
/// HMAC-SHA256 finalization function.
|
||||
///
|
||||
/// Returns the HMAC-SHA256 digest of the provided message using existing outer
|
||||
/// and inner states.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `outer_state` - 256-bit outer state.
|
||||
/// * `inner_state` - 256-bit inner state.
|
||||
/// * `msg` - N-byte message.
|
||||
pub fn hmac_sha256_finalize_trace<'a>(
|
||||
builder_state: &'a RefCell<BuilderState>,
|
||||
outer_state: [Tracer<'a, U32>; 8],
|
||||
inner_state: [Tracer<'a, U32>; 8],
|
||||
msg: &[Tracer<'a, U8>],
|
||||
) -> [Tracer<'a, U8>; 32] {
|
||||
sha256_trace(
|
||||
builder_state,
|
||||
outer_state,
|
||||
64,
|
||||
&sha256_trace(builder_state, inner_state, 64, msg),
|
||||
)
|
||||
}
|
||||
|
||||
/// Reference implementation of the HMAC-SHA256 finalization function.
|
||||
///
|
||||
/// Returns the HMAC-SHA256 digest of the provided message using existing outer
|
||||
/// and inner states.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `outer_state` - 256-bit outer state.
|
||||
/// * `inner_state` - 256-bit inner state.
|
||||
/// * `msg` - N-byte message.
|
||||
pub fn hmac_sha256_finalize(outer_state: [u32; 8], inner_state: [u32; 8], msg: &[u8]) -> [u8; 32] {
|
||||
sha256(outer_state, 64, &sha256(inner_state, 64, msg))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use mpz_circuits::{test_circ, CircuitBuilder};
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_hmac_sha256_partial() {
|
||||
let builder = CircuitBuilder::new();
|
||||
let key = builder.add_array_input::<u8, 48>();
|
||||
let (outer_state, inner_state) = hmac_sha256_partial_trace(builder.state(), &key);
|
||||
builder.add_output(outer_state);
|
||||
builder.add_output(inner_state);
|
||||
let circ = builder.build().unwrap();
|
||||
|
||||
let key = [69u8; 48];
|
||||
|
||||
test_circ!(circ, hmac_sha256_partial, fn(&key) -> ([u32; 8], [u32; 8]));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_hmac_sha256_finalize() {
|
||||
let builder = CircuitBuilder::new();
|
||||
let outer_state = builder.add_array_input::<u32, 8>();
|
||||
let inner_state = builder.add_array_input::<u32, 8>();
|
||||
let msg = builder.add_array_input::<u8, 47>();
|
||||
let hash = hmac_sha256_finalize_trace(builder.state(), outer_state, inner_state, &msg);
|
||||
builder.add_output(hash);
|
||||
let circ = builder.build().unwrap();
|
||||
|
||||
let key = [69u8; 32];
|
||||
let (outer_state, inner_state) = hmac_sha256_partial(&key);
|
||||
let msg = [42u8; 47];
|
||||
|
||||
test_circ!(
|
||||
circ,
|
||||
hmac_sha256_finalize,
|
||||
fn(outer_state, inner_state, &msg) -> [u8; 32]
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -1,61 +0,0 @@
|
||||
//! HMAC-SHA256 circuits.
|
||||
|
||||
#![deny(missing_docs, unreachable_pub, unused_must_use)]
|
||||
#![deny(clippy::all)]
|
||||
#![forbid(unsafe_code)]
|
||||
|
||||
mod hmac_sha256;
|
||||
mod prf;
|
||||
mod session_keys;
|
||||
mod verify_data;
|
||||
|
||||
pub use hmac_sha256::{
|
||||
hmac_sha256_finalize, hmac_sha256_finalize_trace, hmac_sha256_partial,
|
||||
hmac_sha256_partial_trace,
|
||||
};
|
||||
|
||||
pub use prf::{prf, prf_trace};
|
||||
pub use session_keys::{session_keys, session_keys_trace};
|
||||
pub use verify_data::{verify_data, verify_data_trace};
|
||||
|
||||
use mpz_circuits::{Circuit, CircuitBuilder, Tracer};
|
||||
use std::sync::Arc;
|
||||
|
||||
/// Builds session key derivation circuit.
|
||||
#[tracing::instrument(level = "trace")]
|
||||
pub fn build_session_keys() -> Arc<Circuit> {
|
||||
let builder = CircuitBuilder::new();
|
||||
let pms = builder.add_array_input::<u8, 32>();
|
||||
let client_random = builder.add_array_input::<u8, 32>();
|
||||
let server_random = builder.add_array_input::<u8, 32>();
|
||||
let (cwk, swk, civ, siv, outer_state, inner_state) =
|
||||
session_keys_trace(builder.state(), pms, client_random, server_random);
|
||||
builder.add_output(cwk);
|
||||
builder.add_output(swk);
|
||||
builder.add_output(civ);
|
||||
builder.add_output(siv);
|
||||
builder.add_output(outer_state);
|
||||
builder.add_output(inner_state);
|
||||
Arc::new(builder.build().expect("session keys should build"))
|
||||
}
|
||||
|
||||
/// Builds a verify data circuit.
|
||||
#[tracing::instrument(level = "trace")]
|
||||
pub fn build_verify_data(label: &[u8]) -> Arc<Circuit> {
|
||||
let builder = CircuitBuilder::new();
|
||||
let outer_state = builder.add_array_input::<u32, 8>();
|
||||
let inner_state = builder.add_array_input::<u32, 8>();
|
||||
let handshake_hash = builder.add_array_input::<u8, 32>();
|
||||
let vd = verify_data_trace(
|
||||
builder.state(),
|
||||
outer_state,
|
||||
inner_state,
|
||||
&label
|
||||
.iter()
|
||||
.map(|v| Tracer::new(builder.state(), builder.get_constant(*v).to_inner()))
|
||||
.collect::<Vec<_>>(),
|
||||
handshake_hash,
|
||||
);
|
||||
builder.add_output(vd);
|
||||
Arc::new(builder.build().expect("verify data should build"))
|
||||
}
|
||||
@@ -1,227 +0,0 @@
|
||||
//! This module provides an implementation of the HMAC-SHA256 PRF defined in [RFC 5246](https://www.rfc-editor.org/rfc/rfc5246#section-5).
|
||||
|
||||
use std::cell::RefCell;
|
||||
|
||||
use mpz_circuits::{
|
||||
types::{U32, U8},
|
||||
BuilderState, Tracer,
|
||||
};
|
||||
|
||||
use crate::hmac_sha256::{hmac_sha256_finalize, hmac_sha256_finalize_trace};
|
||||
|
||||
fn p_hash_trace<'a>(
|
||||
builder_state: &'a RefCell<BuilderState>,
|
||||
outer_state: [Tracer<'a, U32>; 8],
|
||||
inner_state: [Tracer<'a, U32>; 8],
|
||||
seed: &[Tracer<'a, U8>],
|
||||
iterations: usize,
|
||||
) -> Vec<Tracer<'a, U8>> {
|
||||
// A() is defined as:
|
||||
//
|
||||
// A(0) = seed
|
||||
// A(i) = HMAC_hash(secret, A(i-1))
|
||||
let mut a_cache: Vec<_> = Vec::with_capacity(iterations + 1);
|
||||
a_cache.push(seed.to_vec());
|
||||
|
||||
for i in 0..iterations {
|
||||
let a_i = hmac_sha256_finalize_trace(builder_state, outer_state, inner_state, &a_cache[i]);
|
||||
a_cache.push(a_i.to_vec());
|
||||
}
|
||||
|
||||
// HMAC_hash(secret, A(i) + seed)
|
||||
let mut output: Vec<_> = Vec::with_capacity(iterations * 32);
|
||||
for i in 0..iterations {
|
||||
let mut a_i_seed = a_cache[i + 1].clone();
|
||||
a_i_seed.extend_from_slice(seed);
|
||||
|
||||
let hash = hmac_sha256_finalize_trace(builder_state, outer_state, inner_state, &a_i_seed);
|
||||
output.extend_from_slice(&hash);
|
||||
}
|
||||
|
||||
output
|
||||
}
|
||||
|
||||
fn p_hash(outer_state: [u32; 8], inner_state: [u32; 8], seed: &[u8], iterations: usize) -> Vec<u8> {
|
||||
// A() is defined as:
|
||||
//
|
||||
// A(0) = seed
|
||||
// A(i) = HMAC_hash(secret, A(i-1))
|
||||
let mut a_cache: Vec<_> = Vec::with_capacity(iterations + 1);
|
||||
a_cache.push(seed.to_vec());
|
||||
|
||||
for i in 0..iterations {
|
||||
let a_i = hmac_sha256_finalize(outer_state, inner_state, &a_cache[i]);
|
||||
a_cache.push(a_i.to_vec());
|
||||
}
|
||||
|
||||
// HMAC_hash(secret, A(i) + seed)
|
||||
let mut output: Vec<_> = Vec::with_capacity(iterations * 32);
|
||||
for i in 0..iterations {
|
||||
let mut a_i_seed = a_cache[i + 1].clone();
|
||||
a_i_seed.extend_from_slice(seed);
|
||||
|
||||
let hash = hmac_sha256_finalize(outer_state, inner_state, &a_i_seed);
|
||||
output.extend_from_slice(&hash);
|
||||
}
|
||||
|
||||
output
|
||||
}
|
||||
|
||||
/// Computes PRF(secret, label, seed).
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `builder_state` - Reference to builder state.
|
||||
/// * `outer_state` - The outer state of HMAC-SHA256.
|
||||
/// * `inner_state` - The inner state of HMAC-SHA256.
|
||||
/// * `seed` - The seed to use.
|
||||
/// * `label` - The label to use.
|
||||
/// * `bytes` - The number of bytes to output.
|
||||
pub fn prf_trace<'a>(
|
||||
builder_state: &'a RefCell<BuilderState>,
|
||||
outer_state: [Tracer<'a, U32>; 8],
|
||||
inner_state: [Tracer<'a, U32>; 8],
|
||||
seed: &[Tracer<'a, U8>],
|
||||
label: &[Tracer<'a, U8>],
|
||||
bytes: usize,
|
||||
) -> Vec<Tracer<'a, U8>> {
|
||||
let iterations = bytes / 32 + (bytes % 32 != 0) as usize;
|
||||
let mut label_seed = label.to_vec();
|
||||
label_seed.extend_from_slice(seed);
|
||||
|
||||
let mut output = p_hash_trace(
|
||||
builder_state,
|
||||
outer_state,
|
||||
inner_state,
|
||||
&label_seed,
|
||||
iterations,
|
||||
);
|
||||
output.truncate(bytes);
|
||||
|
||||
output
|
||||
}
|
||||
|
||||
/// Reference implementation of PRF(secret, label, seed).
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `outer_state` - The outer state of HMAC-SHA256.
|
||||
/// * `inner_state` - The inner state of HMAC-SHA256.
|
||||
/// * `seed` - The seed to use.
|
||||
/// * `label` - The label to use.
|
||||
/// * `bytes` - The number of bytes to output.
|
||||
pub fn prf(
|
||||
outer_state: [u32; 8],
|
||||
inner_state: [u32; 8],
|
||||
seed: &[u8],
|
||||
label: &[u8],
|
||||
bytes: usize,
|
||||
) -> Vec<u8> {
|
||||
let iterations = bytes / 32 + (bytes % 32 != 0) as usize;
|
||||
let mut label_seed = label.to_vec();
|
||||
label_seed.extend_from_slice(seed);
|
||||
|
||||
let mut output = p_hash(outer_state, inner_state, &label_seed, iterations);
|
||||
output.truncate(bytes);
|
||||
|
||||
output
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use mpz_circuits::{evaluate, CircuitBuilder};
|
||||
|
||||
use crate::hmac_sha256::hmac_sha256_partial;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_p_hash() {
|
||||
let builder = CircuitBuilder::new();
|
||||
let outer_state = builder.add_array_input::<u32, 8>();
|
||||
let inner_state = builder.add_array_input::<u32, 8>();
|
||||
let seed = builder.add_array_input::<u8, 64>();
|
||||
let output = p_hash_trace(builder.state(), outer_state, inner_state, &seed, 2);
|
||||
builder.add_output(output);
|
||||
let circ = builder.build().unwrap();
|
||||
|
||||
let outer_state = [0u32; 8];
|
||||
let inner_state = [1u32; 8];
|
||||
let seed = [42u8; 64];
|
||||
|
||||
let expected = p_hash(outer_state, inner_state, &seed, 2);
|
||||
let actual = evaluate!(circ, fn(outer_state, inner_state, &seed) -> Vec<u8>).unwrap();
|
||||
|
||||
assert_eq!(actual, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_prf() {
|
||||
let builder = CircuitBuilder::new();
|
||||
let outer_state = builder.add_array_input::<u32, 8>();
|
||||
let inner_state = builder.add_array_input::<u32, 8>();
|
||||
let seed = builder.add_array_input::<u8, 64>();
|
||||
let label = builder.add_array_input::<u8, 13>();
|
||||
let output = prf_trace(builder.state(), outer_state, inner_state, &seed, &label, 48);
|
||||
builder.add_output(output);
|
||||
let circ = builder.build().unwrap();
|
||||
|
||||
let master_secret = [0u8; 48];
|
||||
let seed = [43u8; 64];
|
||||
let label = b"master secret";
|
||||
|
||||
let (outer_state, inner_state) = hmac_sha256_partial(&master_secret);
|
||||
|
||||
let expected = prf(outer_state, inner_state, &seed, label, 48);
|
||||
let actual =
|
||||
evaluate!(circ, fn(outer_state, inner_state, &seed, label) -> Vec<u8>).unwrap();
|
||||
|
||||
assert_eq!(actual, expected);
|
||||
|
||||
let mut expected_ring = [0u8; 48];
|
||||
ring_prf::prf(&mut expected_ring, &master_secret, label, &seed);
|
||||
|
||||
assert_eq!(actual, expected_ring);
|
||||
}
|
||||
|
||||
// Borrowed from Rustls for testing
|
||||
// https://github.com/rustls/rustls/blob/main/rustls/src/tls12/prf.rs
|
||||
mod ring_prf {
|
||||
use ring::{hmac, hmac::HMAC_SHA256};
|
||||
|
||||
fn concat_sign(key: &hmac::Key, a: &[u8], b: &[u8]) -> hmac::Tag {
|
||||
let mut ctx = hmac::Context::with_key(key);
|
||||
ctx.update(a);
|
||||
ctx.update(b);
|
||||
ctx.sign()
|
||||
}
|
||||
|
||||
fn p(out: &mut [u8], secret: &[u8], seed: &[u8]) {
|
||||
let hmac_key = hmac::Key::new(HMAC_SHA256, secret);
|
||||
|
||||
// A(1)
|
||||
let mut current_a = hmac::sign(&hmac_key, seed);
|
||||
let chunk_size = HMAC_SHA256.digest_algorithm().output_len();
|
||||
for chunk in out.chunks_mut(chunk_size) {
|
||||
// P_hash[i] = HMAC_hash(secret, A(i) + seed)
|
||||
let p_term = concat_sign(&hmac_key, current_a.as_ref(), seed);
|
||||
chunk.copy_from_slice(&p_term.as_ref()[..chunk.len()]);
|
||||
|
||||
// A(i+1) = HMAC_hash(secret, A(i))
|
||||
current_a = hmac::sign(&hmac_key, current_a.as_ref());
|
||||
}
|
||||
}
|
||||
|
||||
fn concat(a: &[u8], b: &[u8]) -> Vec<u8> {
|
||||
let mut ret = Vec::new();
|
||||
ret.extend_from_slice(a);
|
||||
ret.extend_from_slice(b);
|
||||
ret
|
||||
}
|
||||
|
||||
pub(crate) fn prf(out: &mut [u8], secret: &[u8], label: &[u8], seed: &[u8]) {
|
||||
let joined_seed = concat(label, seed);
|
||||
p(out, secret, &joined_seed);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,200 +0,0 @@
|
||||
use std::cell::RefCell;
|
||||
|
||||
use mpz_circuits::{
|
||||
types::{U32, U8},
|
||||
BuilderState, Tracer,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
hmac_sha256::{hmac_sha256_partial, hmac_sha256_partial_trace},
|
||||
prf::{prf, prf_trace},
|
||||
};
|
||||
|
||||
/// Session Keys.
|
||||
///
|
||||
/// Computes expanded p1 which consists of client_write_key + server_write_key.
|
||||
/// Computes expanded p2 which consists of client_IV + server_IV.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `builder_state` - Reference to builder state.
|
||||
/// * `pms` - 32-byte premaster secret.
|
||||
/// * `client_random` - 32-byte client random.
|
||||
/// * `server_random` - 32-byte server random.
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// * `client_write_key` - 16-byte client write key.
|
||||
/// * `server_write_key` - 16-byte server write key.
|
||||
/// * `client_IV` - 4-byte client IV.
|
||||
/// * `server_IV` - 4-byte server IV.
|
||||
/// * `outer_hash_state` - 256-bit master-secret outer HMAC state.
|
||||
/// * `inner_hash_state` - 256-bit master-secret inner HMAC state.
|
||||
#[allow(clippy::type_complexity)]
|
||||
pub fn session_keys_trace<'a>(
|
||||
builder_state: &'a RefCell<BuilderState>,
|
||||
pms: [Tracer<'a, U8>; 32],
|
||||
client_random: [Tracer<'a, U8>; 32],
|
||||
server_random: [Tracer<'a, U8>; 32],
|
||||
) -> (
|
||||
[Tracer<'a, U8>; 16],
|
||||
[Tracer<'a, U8>; 16],
|
||||
[Tracer<'a, U8>; 4],
|
||||
[Tracer<'a, U8>; 4],
|
||||
[Tracer<'a, U32>; 8],
|
||||
[Tracer<'a, U32>; 8],
|
||||
) {
|
||||
let (pms_outer_state, pms_inner_state) = hmac_sha256_partial_trace(builder_state, &pms);
|
||||
|
||||
let master_secret = {
|
||||
let seed = client_random
|
||||
.iter()
|
||||
.chain(&server_random)
|
||||
.copied()
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let label = b"master secret"
|
||||
.map(|v| Tracer::new(builder_state, builder_state.borrow_mut().get_constant(v)));
|
||||
|
||||
prf_trace(
|
||||
builder_state,
|
||||
pms_outer_state,
|
||||
pms_inner_state,
|
||||
&seed,
|
||||
&label,
|
||||
48,
|
||||
)
|
||||
};
|
||||
|
||||
let (master_secret_outer_state, master_secret_inner_state) =
|
||||
hmac_sha256_partial_trace(builder_state, &master_secret);
|
||||
|
||||
let key_material = {
|
||||
let seed = server_random
|
||||
.iter()
|
||||
.chain(&client_random)
|
||||
.copied()
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let label = b"key expansion"
|
||||
.map(|v| Tracer::new(builder_state, builder_state.borrow_mut().get_constant(v)));
|
||||
|
||||
prf_trace(
|
||||
builder_state,
|
||||
master_secret_outer_state,
|
||||
master_secret_inner_state,
|
||||
&seed,
|
||||
&label,
|
||||
40,
|
||||
)
|
||||
};
|
||||
|
||||
let cwk = key_material[0..16].try_into().unwrap();
|
||||
let swk = key_material[16..32].try_into().unwrap();
|
||||
let civ = key_material[32..36].try_into().unwrap();
|
||||
let siv = key_material[36..40].try_into().unwrap();
|
||||
|
||||
(
|
||||
cwk,
|
||||
swk,
|
||||
civ,
|
||||
siv,
|
||||
master_secret_outer_state,
|
||||
master_secret_inner_state,
|
||||
)
|
||||
}
|
||||
|
||||
/// Reference implementation of session keys derivation.
|
||||
pub fn session_keys(
|
||||
pms: [u8; 32],
|
||||
client_random: [u8; 32],
|
||||
server_random: [u8; 32],
|
||||
) -> ([u8; 16], [u8; 16], [u8; 4], [u8; 4]) {
|
||||
let (pms_outer_state, pms_inner_state) = hmac_sha256_partial(&pms);
|
||||
|
||||
let master_secret = {
|
||||
let seed = client_random
|
||||
.iter()
|
||||
.chain(&server_random)
|
||||
.copied()
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let label = b"master secret";
|
||||
|
||||
prf(pms_outer_state, pms_inner_state, &seed, label, 48)
|
||||
};
|
||||
|
||||
let (master_secret_outer_state, master_secret_inner_state) =
|
||||
hmac_sha256_partial(&master_secret);
|
||||
|
||||
let key_material = {
|
||||
let seed = server_random
|
||||
.iter()
|
||||
.chain(&client_random)
|
||||
.copied()
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let label = b"key expansion";
|
||||
|
||||
prf(
|
||||
master_secret_outer_state,
|
||||
master_secret_inner_state,
|
||||
&seed,
|
||||
label,
|
||||
40,
|
||||
)
|
||||
};
|
||||
|
||||
let cwk = key_material[0..16].try_into().unwrap();
|
||||
let swk = key_material[16..32].try_into().unwrap();
|
||||
let civ = key_material[32..36].try_into().unwrap();
|
||||
let siv = key_material[36..40].try_into().unwrap();
|
||||
|
||||
(cwk, swk, civ, siv)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use mpz_circuits::{evaluate, CircuitBuilder};
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_session_keys() {
|
||||
let builder = CircuitBuilder::new();
|
||||
let pms = builder.add_array_input::<u8, 32>();
|
||||
let client_random = builder.add_array_input::<u8, 32>();
|
||||
let server_random = builder.add_array_input::<u8, 32>();
|
||||
let (cwk, swk, civ, siv, outer_state, inner_state) =
|
||||
session_keys_trace(builder.state(), pms, client_random, server_random);
|
||||
builder.add_output(cwk);
|
||||
builder.add_output(swk);
|
||||
builder.add_output(civ);
|
||||
builder.add_output(siv);
|
||||
builder.add_output(outer_state);
|
||||
builder.add_output(inner_state);
|
||||
let circ = builder.build().unwrap();
|
||||
|
||||
let pms = [0u8; 32];
|
||||
let client_random = [42u8; 32];
|
||||
let server_random = [69u8; 32];
|
||||
|
||||
let (expected_cwk, expected_swk, expected_civ, expected_siv) =
|
||||
session_keys(pms, client_random, server_random);
|
||||
|
||||
let (cwk, swk, civ, siv, _, _) = evaluate!(
|
||||
circ,
|
||||
fn(
|
||||
pms,
|
||||
client_random,
|
||||
server_random,
|
||||
) -> ([u8; 16], [u8; 16], [u8; 4], [u8; 4], [u32; 8], [u32; 8])
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(cwk, expected_cwk);
|
||||
assert_eq!(swk, expected_swk);
|
||||
assert_eq!(civ, expected_civ);
|
||||
assert_eq!(siv, expected_siv);
|
||||
}
|
||||
}
|
||||
@@ -1,88 +0,0 @@
|
||||
use std::cell::RefCell;
|
||||
|
||||
use mpz_circuits::{
|
||||
types::{U32, U8},
|
||||
BuilderState, Tracer,
|
||||
};
|
||||
|
||||
use crate::prf::{prf, prf_trace};
|
||||
|
||||
/// Computes verify_data as specified in RFC 5246, Section 7.4.9.
|
||||
///
|
||||
/// verify_data
|
||||
/// PRF(master_secret, finished_label,
|
||||
/// Hash(handshake_messages))[0..verify_data_length-1];
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `builder_state` - The builder state.
|
||||
/// * `outer_state` - The outer HMAC state of the master secret.
|
||||
/// * `inner_state` - The inner HMAC state of the master secret.
|
||||
/// * `label` - The label to use.
|
||||
/// * `hs_hash` - The handshake hash.
|
||||
pub fn verify_data_trace<'a>(
|
||||
builder_state: &'a RefCell<BuilderState>,
|
||||
outer_state: [Tracer<'a, U32>; 8],
|
||||
inner_state: [Tracer<'a, U32>; 8],
|
||||
label: &[Tracer<'a, U8>],
|
||||
hs_hash: [Tracer<'a, U8>; 32],
|
||||
) -> [Tracer<'a, U8>; 12] {
|
||||
let vd = prf_trace(builder_state, outer_state, inner_state, &hs_hash, label, 12);
|
||||
|
||||
vd.try_into().expect("vd is 12 bytes")
|
||||
}
|
||||
|
||||
/// Reference implementation of verify_data as specified in RFC 5246, Section
|
||||
/// 7.4.9.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `outer_state` - The outer HMAC state of the master secret.
|
||||
/// * `inner_state` - The inner HMAC state of the master secret.
|
||||
/// * `label` - The label to use.
|
||||
/// * `hs_hash` - The handshake hash.
|
||||
pub fn verify_data(
|
||||
outer_state: [u32; 8],
|
||||
inner_state: [u32; 8],
|
||||
label: &[u8],
|
||||
hs_hash: [u8; 32],
|
||||
) -> [u8; 12] {
|
||||
let vd = prf(outer_state, inner_state, &hs_hash, label, 12);
|
||||
|
||||
vd.try_into().expect("vd is 12 bytes")
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
use mpz_circuits::{evaluate, CircuitBuilder};
|
||||
|
||||
const CF_LABEL: &[u8; 15] = b"client finished";
|
||||
|
||||
#[test]
|
||||
fn test_verify_data() {
|
||||
let builder = CircuitBuilder::new();
|
||||
let outer_state = builder.add_array_input::<u32, 8>();
|
||||
let inner_state = builder.add_array_input::<u32, 8>();
|
||||
let label = builder.add_array_input::<u8, 15>();
|
||||
let hs_hash = builder.add_array_input::<u8, 32>();
|
||||
let vd = verify_data_trace(builder.state(), outer_state, inner_state, &label, hs_hash);
|
||||
builder.add_output(vd);
|
||||
let circ = builder.build().unwrap();
|
||||
|
||||
let outer_state = [0u32; 8];
|
||||
let inner_state = [1u32; 8];
|
||||
let hs_hash = [42u8; 32];
|
||||
|
||||
let expected = prf(outer_state, inner_state, &hs_hash, CF_LABEL, 12);
|
||||
|
||||
let actual = evaluate!(
|
||||
circ,
|
||||
fn(outer_state, inner_state, CF_LABEL, hs_hash) -> [u8; 12]
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(actual.to_vec(), expected);
|
||||
}
|
||||
}
|
||||
@@ -5,35 +5,35 @@ description = "A 2PC implementation of TLS HMAC-SHA256 PRF"
|
||||
keywords = ["tls", "mpc", "2pc", "hmac", "sha256"]
|
||||
categories = ["cryptography"]
|
||||
license = "MIT OR Apache-2.0"
|
||||
version = "0.1.0-alpha.7"
|
||||
version = "0.1.0-alpha.13-pre"
|
||||
edition = "2021"
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[lib]
|
||||
name = "hmac_sha256"
|
||||
|
||||
[features]
|
||||
default = ["mock"]
|
||||
rayon = ["mpz-common/rayon"]
|
||||
mock = []
|
||||
|
||||
[dependencies]
|
||||
tlsn-hmac-sha256-circuits = { workspace = true }
|
||||
|
||||
mpz-garble = { workspace = true }
|
||||
mpz-vm-core = { workspace = true }
|
||||
mpz-core = { workspace = true }
|
||||
mpz-circuits = { workspace = true }
|
||||
mpz-common = { workspace = true }
|
||||
mpz-hash = { workspace = true }
|
||||
|
||||
async-trait = { workspace = true }
|
||||
derive_builder = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
thiserror = { workspace = true }
|
||||
tracing = { workspace = true }
|
||||
sha2 = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
criterion = { workspace = true, features = ["async_tokio"] }
|
||||
mpz-common = { workspace = true, features = ["test-utils"] }
|
||||
mpz-ot = { workspace = true, features = ["ideal"] }
|
||||
mpz-garble = { workspace = true }
|
||||
mpz-common = { workspace = true, features = ["test-utils"] }
|
||||
|
||||
criterion = { workspace = true, features = ["async_tokio"] }
|
||||
tokio = { workspace = true, features = ["macros", "rt", "rt-multi-thread"] }
|
||||
rand = { workspace = true }
|
||||
hex = { workspace = true }
|
||||
ring = { workspace = true }
|
||||
|
||||
[[bench]]
|
||||
name = "prf"
|
||||
|
||||
@@ -1,9 +1,17 @@
|
||||
#![allow(clippy::let_underscore_future)]
|
||||
|
||||
use criterion::{criterion_group, criterion_main, Criterion};
|
||||
|
||||
use hmac_sha256::{MpcPrf, Prf, PrfConfig, Role};
|
||||
use mpz_common::executor::test_mt_executor;
|
||||
use mpz_garble::{config::Role as DEAPRole, protocol::deap::DEAPThread, Memory};
|
||||
use mpz_ot::ideal::ot::ideal_ot;
|
||||
use hmac_sha256::{Mode, MpcPrf};
|
||||
use mpz_common::context::test_mt_context;
|
||||
use mpz_garble::protocol::semihonest::{Evaluator, Garbler};
|
||||
use mpz_ot::ideal::cot::ideal_cot;
|
||||
use mpz_vm_core::{
|
||||
memory::{binary::U8, correlated::Delta, Array},
|
||||
prelude::*,
|
||||
Execute,
|
||||
};
|
||||
use rand::{rngs::StdRng, SeedableRng};
|
||||
|
||||
#[allow(clippy::unit_arg)]
|
||||
fn criterion_benchmark(c: &mut Criterion) {
|
||||
@@ -11,178 +19,127 @@ fn criterion_benchmark(c: &mut Criterion) {
|
||||
group.sample_size(10);
|
||||
let rt = tokio::runtime::Runtime::new().unwrap();
|
||||
|
||||
group.bench_function("prf_preprocess", |b| b.to_async(&rt).iter(preprocess));
|
||||
group.bench_function("prf", |b| b.to_async(&rt).iter(prf));
|
||||
group.bench_function("prf_normal", |b| b.to_async(&rt).iter(|| prf(Mode::Normal)));
|
||||
group.bench_function("prf_reduced", |b| {
|
||||
b.to_async(&rt).iter(|| prf(Mode::Reduced))
|
||||
});
|
||||
}
|
||||
|
||||
criterion_group!(benches, criterion_benchmark);
|
||||
criterion_main!(benches);
|
||||
|
||||
async fn preprocess() {
|
||||
let (mut leader_exec, mut follower_exec) = test_mt_executor(128);
|
||||
|
||||
let (leader_ot_send_0, follower_ot_recv_0) = ideal_ot();
|
||||
let (follower_ot_send_0, leader_ot_recv_0) = ideal_ot();
|
||||
let (leader_ot_send_1, follower_ot_recv_1) = ideal_ot();
|
||||
let (follower_ot_send_1, leader_ot_recv_1) = ideal_ot();
|
||||
|
||||
let leader_thread_0 = DEAPThread::new(
|
||||
DEAPRole::Leader,
|
||||
[0u8; 32],
|
||||
leader_exec.new_thread().await.unwrap(),
|
||||
leader_ot_send_0,
|
||||
leader_ot_recv_0,
|
||||
);
|
||||
let leader_thread_1 = leader_thread_0
|
||||
.new_thread(
|
||||
leader_exec.new_thread().await.unwrap(),
|
||||
leader_ot_send_1,
|
||||
leader_ot_recv_1,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let follower_thread_0 = DEAPThread::new(
|
||||
DEAPRole::Follower,
|
||||
[0u8; 32],
|
||||
follower_exec.new_thread().await.unwrap(),
|
||||
follower_ot_send_0,
|
||||
follower_ot_recv_0,
|
||||
);
|
||||
let follower_thread_1 = follower_thread_0
|
||||
.new_thread(
|
||||
follower_exec.new_thread().await.unwrap(),
|
||||
follower_ot_send_1,
|
||||
follower_ot_recv_1,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let leader_pms = leader_thread_0.new_public_input::<[u8; 32]>("pms").unwrap();
|
||||
let follower_pms = follower_thread_0
|
||||
.new_public_input::<[u8; 32]>("pms")
|
||||
.unwrap();
|
||||
|
||||
let mut leader = MpcPrf::new(
|
||||
PrfConfig::builder().role(Role::Leader).build().unwrap(),
|
||||
leader_thread_0,
|
||||
leader_thread_1,
|
||||
);
|
||||
let mut follower = MpcPrf::new(
|
||||
PrfConfig::builder().role(Role::Follower).build().unwrap(),
|
||||
follower_thread_0,
|
||||
follower_thread_1,
|
||||
);
|
||||
|
||||
futures::join!(
|
||||
async {
|
||||
leader.setup(leader_pms).await.unwrap();
|
||||
leader.set_client_random(Some([0u8; 32])).await.unwrap();
|
||||
leader.preprocess().await.unwrap();
|
||||
},
|
||||
async {
|
||||
follower.setup(follower_pms).await.unwrap();
|
||||
follower.set_client_random(None).await.unwrap();
|
||||
follower.preprocess().await.unwrap();
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
async fn prf() {
|
||||
let (mut leader_exec, mut follower_exec) = test_mt_executor(128);
|
||||
|
||||
let (leader_ot_send_0, follower_ot_recv_0) = ideal_ot();
|
||||
let (follower_ot_send_0, leader_ot_recv_0) = ideal_ot();
|
||||
let (leader_ot_send_1, follower_ot_recv_1) = ideal_ot();
|
||||
let (follower_ot_send_1, leader_ot_recv_1) = ideal_ot();
|
||||
|
||||
let leader_thread_0 = DEAPThread::new(
|
||||
DEAPRole::Leader,
|
||||
[0u8; 32],
|
||||
leader_exec.new_thread().await.unwrap(),
|
||||
leader_ot_send_0,
|
||||
leader_ot_recv_0,
|
||||
);
|
||||
let leader_thread_1 = leader_thread_0
|
||||
.new_thread(
|
||||
leader_exec.new_thread().await.unwrap(),
|
||||
leader_ot_send_1,
|
||||
leader_ot_recv_1,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let follower_thread_0 = DEAPThread::new(
|
||||
DEAPRole::Follower,
|
||||
[0u8; 32],
|
||||
follower_exec.new_thread().await.unwrap(),
|
||||
follower_ot_send_0,
|
||||
follower_ot_recv_0,
|
||||
);
|
||||
let follower_thread_1 = follower_thread_0
|
||||
.new_thread(
|
||||
follower_exec.new_thread().await.unwrap(),
|
||||
follower_ot_send_1,
|
||||
follower_ot_recv_1,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let leader_pms = leader_thread_0.new_public_input::<[u8; 32]>("pms").unwrap();
|
||||
let follower_pms = follower_thread_0
|
||||
.new_public_input::<[u8; 32]>("pms")
|
||||
.unwrap();
|
||||
|
||||
let mut leader = MpcPrf::new(
|
||||
PrfConfig::builder().role(Role::Leader).build().unwrap(),
|
||||
leader_thread_0,
|
||||
leader_thread_1,
|
||||
);
|
||||
let mut follower = MpcPrf::new(
|
||||
PrfConfig::builder().role(Role::Follower).build().unwrap(),
|
||||
follower_thread_0,
|
||||
follower_thread_1,
|
||||
);
|
||||
async fn prf(mode: Mode) {
|
||||
let mut rng = StdRng::seed_from_u64(0);
|
||||
|
||||
let pms = [42u8; 32];
|
||||
let client_random = [0u8; 32];
|
||||
let server_random = [1u8; 32];
|
||||
let cf_hs_hash = [2u8; 32];
|
||||
let sf_hs_hash = [3u8; 32];
|
||||
let client_random = [69u8; 32];
|
||||
let server_random: [u8; 32] = [96u8; 32];
|
||||
|
||||
futures::join!(
|
||||
async {
|
||||
leader.setup(leader_pms.clone()).await.unwrap();
|
||||
leader.set_client_random(Some(client_random)).await.unwrap();
|
||||
leader.preprocess().await.unwrap();
|
||||
},
|
||||
async {
|
||||
follower.setup(follower_pms.clone()).await.unwrap();
|
||||
follower.set_client_random(None).await.unwrap();
|
||||
follower.preprocess().await.unwrap();
|
||||
}
|
||||
);
|
||||
let (mut leader_exec, mut follower_exec) = test_mt_context(8);
|
||||
let mut leader_ctx = leader_exec.new_context().await.unwrap();
|
||||
let mut follower_ctx = follower_exec.new_context().await.unwrap();
|
||||
|
||||
leader.thread_mut().assign(&leader_pms, pms).unwrap();
|
||||
follower.thread_mut().assign(&follower_pms, pms).unwrap();
|
||||
let delta = Delta::random(&mut rng);
|
||||
let (ot_send, ot_recv) = ideal_cot(delta.into_inner());
|
||||
|
||||
let (_leader_keys, _follower_keys) = futures::try_join!(
|
||||
leader.compute_session_keys(server_random),
|
||||
follower.compute_session_keys(server_random)
|
||||
)
|
||||
.unwrap();
|
||||
let mut leader_vm = Garbler::new(ot_send, [0u8; 16], delta);
|
||||
let mut follower_vm = Evaluator::new(ot_recv);
|
||||
|
||||
let _ = futures::try_join!(
|
||||
leader.compute_client_finished_vd(cf_hs_hash),
|
||||
follower.compute_client_finished_vd(cf_hs_hash)
|
||||
)
|
||||
.unwrap();
|
||||
let leader_pms: Array<U8, 32> = leader_vm.alloc().unwrap();
|
||||
leader_vm.mark_public(leader_pms).unwrap();
|
||||
leader_vm.assign(leader_pms, pms).unwrap();
|
||||
leader_vm.commit(leader_pms).unwrap();
|
||||
|
||||
let _ = futures::try_join!(
|
||||
leader.compute_server_finished_vd(sf_hs_hash),
|
||||
follower.compute_server_finished_vd(sf_hs_hash)
|
||||
)
|
||||
.unwrap();
|
||||
let follower_pms: Array<U8, 32> = follower_vm.alloc().unwrap();
|
||||
follower_vm.mark_public(follower_pms).unwrap();
|
||||
follower_vm.assign(follower_pms, pms).unwrap();
|
||||
follower_vm.commit(follower_pms).unwrap();
|
||||
|
||||
futures::try_join!(
|
||||
leader.thread_mut().finalize(),
|
||||
follower.thread_mut().finalize()
|
||||
)
|
||||
.unwrap();
|
||||
let mut leader = MpcPrf::new(mode);
|
||||
let mut follower = MpcPrf::new(mode);
|
||||
|
||||
let leader_output = leader.alloc(&mut leader_vm, leader_pms).unwrap();
|
||||
let follower_output = follower.alloc(&mut follower_vm, follower_pms).unwrap();
|
||||
|
||||
leader.set_client_random(client_random).unwrap();
|
||||
follower.set_client_random(client_random).unwrap();
|
||||
|
||||
leader.set_server_random(server_random).unwrap();
|
||||
follower.set_server_random(server_random).unwrap();
|
||||
|
||||
let _ = leader_vm
|
||||
.decode(leader_output.keys.client_write_key)
|
||||
.unwrap();
|
||||
let _ = leader_vm
|
||||
.decode(leader_output.keys.server_write_key)
|
||||
.unwrap();
|
||||
let _ = leader_vm.decode(leader_output.keys.client_iv).unwrap();
|
||||
let _ = leader_vm.decode(leader_output.keys.server_iv).unwrap();
|
||||
|
||||
let _ = follower_vm
|
||||
.decode(follower_output.keys.client_write_key)
|
||||
.unwrap();
|
||||
let _ = follower_vm
|
||||
.decode(follower_output.keys.server_write_key)
|
||||
.unwrap();
|
||||
let _ = follower_vm.decode(follower_output.keys.client_iv).unwrap();
|
||||
let _ = follower_vm.decode(follower_output.keys.server_iv).unwrap();
|
||||
|
||||
while leader.wants_flush() || follower.wants_flush() {
|
||||
tokio::try_join!(
|
||||
async {
|
||||
leader.flush(&mut leader_vm).unwrap();
|
||||
leader_vm.execute_all(&mut leader_ctx).await
|
||||
},
|
||||
async {
|
||||
follower.flush(&mut follower_vm).unwrap();
|
||||
follower_vm.execute_all(&mut follower_ctx).await
|
||||
}
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
let cf_hs_hash = [1u8; 32];
|
||||
|
||||
leader.set_cf_hash(cf_hs_hash).unwrap();
|
||||
follower.set_cf_hash(cf_hs_hash).unwrap();
|
||||
|
||||
while leader.wants_flush() || follower.wants_flush() {
|
||||
tokio::try_join!(
|
||||
async {
|
||||
leader.flush(&mut leader_vm).unwrap();
|
||||
leader_vm.execute_all(&mut leader_ctx).await
|
||||
},
|
||||
async {
|
||||
follower.flush(&mut follower_vm).unwrap();
|
||||
follower_vm.execute_all(&mut follower_ctx).await
|
||||
}
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
let _ = leader_vm.decode(leader_output.cf_vd).unwrap();
|
||||
let _ = follower_vm.decode(follower_output.cf_vd).unwrap();
|
||||
|
||||
let sf_hs_hash = [2u8; 32];
|
||||
|
||||
leader.set_sf_hash(sf_hs_hash).unwrap();
|
||||
follower.set_sf_hash(sf_hs_hash).unwrap();
|
||||
|
||||
while leader.wants_flush() || follower.wants_flush() {
|
||||
tokio::try_join!(
|
||||
async {
|
||||
leader.flush(&mut leader_vm).unwrap();
|
||||
leader_vm.execute_all(&mut leader_ctx).await
|
||||
},
|
||||
async {
|
||||
follower.flush(&mut follower_vm).unwrap();
|
||||
follower_vm.execute_all(&mut follower_ctx).await
|
||||
}
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
let _ = leader_vm.decode(leader_output.sf_vd).unwrap();
|
||||
let _ = follower_vm.decode(follower_output.sf_vd).unwrap();
|
||||
}
|
||||
|
||||
@@ -1,24 +1,10 @@
|
||||
use derive_builder::Builder;
|
||||
//! PRF modes.
|
||||
|
||||
/// Role of this party in the PRF.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum Role {
|
||||
/// The leader provides the private inputs to the PRF.
|
||||
Leader,
|
||||
/// The follower is blind to the inputs to the PRF.
|
||||
Follower,
|
||||
}
|
||||
|
||||
/// Configuration for the PRF.
|
||||
#[derive(Debug, Builder)]
|
||||
pub struct PrfConfig {
|
||||
/// The role of this party in the PRF.
|
||||
pub(crate) role: Role,
|
||||
}
|
||||
|
||||
impl PrfConfig {
|
||||
/// Creates a new builder.
|
||||
pub fn builder() -> PrfConfigBuilder {
|
||||
PrfConfigBuilder::default()
|
||||
}
|
||||
/// Modes for the PRF.
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub enum Mode {
|
||||
/// Computes some hashes locally.
|
||||
Reduced,
|
||||
/// Computes the whole PRF in MPC.
|
||||
Normal,
|
||||
}
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
use core::fmt;
|
||||
use std::error::Error;
|
||||
|
||||
use mpz_hash::sha256::Sha256Error;
|
||||
|
||||
/// A PRF error.
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub struct PrfError {
|
||||
@@ -20,18 +22,21 @@ impl PrfError {
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn vm<E: Into<Box<dyn Error + Send + Sync>>>(err: E) -> Self {
|
||||
Self::new(ErrorKind::Vm, err)
|
||||
}
|
||||
|
||||
pub(crate) fn state(msg: impl Into<String>) -> Self {
|
||||
Self {
|
||||
kind: ErrorKind::State,
|
||||
source: Some(msg.into().into()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn role(msg: impl Into<String>) -> Self {
|
||||
Self {
|
||||
kind: ErrorKind::Role,
|
||||
source: Some(msg.into().into()),
|
||||
}
|
||||
impl From<Sha256Error> for PrfError {
|
||||
fn from(value: Sha256Error) -> Self {
|
||||
Self::new(ErrorKind::Hash, value)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -39,7 +44,7 @@ impl PrfError {
|
||||
pub(crate) enum ErrorKind {
|
||||
Vm,
|
||||
State,
|
||||
Role,
|
||||
Hash,
|
||||
}
|
||||
|
||||
impl fmt::Display for PrfError {
|
||||
@@ -47,37 +52,13 @@ impl fmt::Display for PrfError {
|
||||
match self.kind {
|
||||
ErrorKind::Vm => write!(f, "vm error")?,
|
||||
ErrorKind::State => write!(f, "state error")?,
|
||||
ErrorKind::Role => write!(f, "role error")?,
|
||||
ErrorKind::Hash => write!(f, "hash error")?,
|
||||
}
|
||||
|
||||
if let Some(ref source) = self.source {
|
||||
write!(f, " caused by: {}", source)?;
|
||||
write!(f, " caused by: {source}")?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<mpz_garble::MemoryError> for PrfError {
|
||||
fn from(error: mpz_garble::MemoryError) -> Self {
|
||||
Self::new(ErrorKind::Vm, error)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<mpz_garble::LoadError> for PrfError {
|
||||
fn from(error: mpz_garble::LoadError) -> Self {
|
||||
Self::new(ErrorKind::Vm, error)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<mpz_garble::ExecutionError> for PrfError {
|
||||
fn from(error: mpz_garble::ExecutionError) -> Self {
|
||||
Self::new(ErrorKind::Vm, error)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<mpz_garble::DecodeError> for PrfError {
|
||||
fn from(error: mpz_garble::DecodeError) -> Self {
|
||||
Self::new(ErrorKind::Vm, error)
|
||||
}
|
||||
}
|
||||
|
||||
177
crates/components/hmac-sha256/src/hmac.rs
Normal file
177
crates/components/hmac-sha256/src/hmac.rs
Normal file
@@ -0,0 +1,177 @@
|
||||
//! Computation of HMAC-SHA256.
|
||||
//!
|
||||
//! HMAC-SHA256 is defined as
|
||||
//!
|
||||
//! HMAC(m) = H((key' xor opad) || H((key' xor ipad) || m))
|
||||
//!
|
||||
//! * H - SHA256 hash function
|
||||
//! * key' - key padded with zero bytes to 64 bytes (we do not support longer
|
||||
//! keys)
|
||||
//! * opad - 64 bytes of 0x5c
|
||||
//! * ipad - 64 bytes of 0x36
|
||||
//! * m - message
|
||||
//!
|
||||
//! This implementation computes HMAC-SHA256 using intermediate results
|
||||
//! `outer_partial` and `inner_local`. Then HMAC(m) = H(outer_partial ||
|
||||
//! inner_local)
|
||||
//!
|
||||
//! * `outer_partial` - key' xor opad
|
||||
//! * `inner_local` - H((key' xor ipad) || m)
|
||||
|
||||
use mpz_hash::sha256::Sha256;
|
||||
use mpz_vm_core::{
|
||||
memory::{
|
||||
binary::{Binary, U8},
|
||||
Array,
|
||||
},
|
||||
Vm,
|
||||
};
|
||||
|
||||
use crate::PrfError;
|
||||
|
||||
pub(crate) const IPAD: [u8; 64] = [0x36; 64];
|
||||
pub(crate) const OPAD: [u8; 64] = [0x5c; 64];
|
||||
|
||||
/// Computes HMAC-SHA256
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `vm` - The virtual machine.
|
||||
/// * `outer_partial` - (key' xor opad)
|
||||
/// * `inner_local` - H((key' xor ipad) || m)
|
||||
pub(crate) fn hmac_sha256(
|
||||
vm: &mut dyn Vm<Binary>,
|
||||
mut outer_partial: Sha256,
|
||||
inner_local: Array<U8, 32>,
|
||||
) -> Result<Array<U8, 32>, PrfError> {
|
||||
outer_partial.update(&inner_local.into());
|
||||
outer_partial.compress(vm)?;
|
||||
outer_partial.finalize(vm).map_err(PrfError::from)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::{
|
||||
hmac::hmac_sha256,
|
||||
sha256, state_to_bytes,
|
||||
test_utils::{compute_inner_local, compute_outer_partial, mock_vm},
|
||||
};
|
||||
use mpz_common::context::test_st_context;
|
||||
use mpz_hash::sha256::Sha256;
|
||||
use mpz_vm_core::{
|
||||
memory::{
|
||||
binary::{U32, U8},
|
||||
Array, MemoryExt, ViewExt,
|
||||
},
|
||||
Execute,
|
||||
};
|
||||
|
||||
#[test]
|
||||
fn test_hmac_reference() {
|
||||
let (inputs, references) = test_fixtures();
|
||||
|
||||
for (input, &reference) in inputs.iter().zip(references.iter()) {
|
||||
let outer_partial = compute_outer_partial(input.0.clone());
|
||||
let inner_local = compute_inner_local(input.0.clone(), &input.1);
|
||||
|
||||
let hmac = sha256(outer_partial, 64, &state_to_bytes(inner_local));
|
||||
|
||||
assert_eq!(state_to_bytes(hmac), reference);
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_hmac_circuit() {
|
||||
let (mut ctx_a, mut ctx_b) = test_st_context(8);
|
||||
let (mut leader, mut follower) = mock_vm();
|
||||
|
||||
let (inputs, references) = test_fixtures();
|
||||
for (input, &reference) in inputs.iter().zip(references.iter()) {
|
||||
let outer_partial = compute_outer_partial(input.0.clone());
|
||||
let inner_local = compute_inner_local(input.0.clone(), &input.1);
|
||||
|
||||
let outer_partial_leader: Array<U32, 8> = leader.alloc().unwrap();
|
||||
leader.mark_public(outer_partial_leader).unwrap();
|
||||
leader.assign(outer_partial_leader, outer_partial).unwrap();
|
||||
leader.commit(outer_partial_leader).unwrap();
|
||||
|
||||
let inner_local_leader: Array<U8, 32> = leader.alloc().unwrap();
|
||||
leader.mark_public(inner_local_leader).unwrap();
|
||||
leader
|
||||
.assign(inner_local_leader, state_to_bytes(inner_local))
|
||||
.unwrap();
|
||||
leader.commit(inner_local_leader).unwrap();
|
||||
|
||||
let hmac_leader = hmac_sha256(
|
||||
&mut leader,
|
||||
Sha256::new_from_state(outer_partial_leader, 1),
|
||||
inner_local_leader,
|
||||
)
|
||||
.unwrap();
|
||||
let hmac_leader = leader.decode(hmac_leader).unwrap();
|
||||
|
||||
let outer_partial_follower: Array<U32, 8> = follower.alloc().unwrap();
|
||||
follower.mark_public(outer_partial_follower).unwrap();
|
||||
follower
|
||||
.assign(outer_partial_follower, outer_partial)
|
||||
.unwrap();
|
||||
follower.commit(outer_partial_follower).unwrap();
|
||||
|
||||
let inner_local_follower: Array<U8, 32> = follower.alloc().unwrap();
|
||||
follower.mark_public(inner_local_follower).unwrap();
|
||||
follower
|
||||
.assign(inner_local_follower, state_to_bytes(inner_local))
|
||||
.unwrap();
|
||||
follower.commit(inner_local_follower).unwrap();
|
||||
|
||||
let hmac_follower = hmac_sha256(
|
||||
&mut follower,
|
||||
Sha256::new_from_state(outer_partial_follower, 1),
|
||||
inner_local_follower,
|
||||
)
|
||||
.unwrap();
|
||||
let hmac_follower = follower.decode(hmac_follower).unwrap();
|
||||
|
||||
let (hmac_leader, hmac_follower) = tokio::try_join!(
|
||||
async {
|
||||
leader.execute_all(&mut ctx_a).await.unwrap();
|
||||
hmac_leader.await
|
||||
},
|
||||
async {
|
||||
follower.execute_all(&mut ctx_b).await.unwrap();
|
||||
hmac_follower.await
|
||||
}
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(hmac_leader, hmac_follower);
|
||||
assert_eq!(hmac_leader, reference);
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::type_complexity)]
|
||||
fn test_fixtures() -> (Vec<(Vec<u8>, Vec<u8>)>, Vec<[u8; 32]>) {
|
||||
let test_vectors: Vec<(Vec<u8>, Vec<u8>)> = vec![
|
||||
(
|
||||
hex::decode("0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b").unwrap(),
|
||||
hex::decode("4869205468657265").unwrap(),
|
||||
),
|
||||
(
|
||||
hex::decode("4a656665").unwrap(),
|
||||
hex::decode("7768617420646f2079612077616e7420666f72206e6f7468696e673f").unwrap(),
|
||||
),
|
||||
];
|
||||
let expected: Vec<[u8; 32]> = vec![
|
||||
hex::decode("b0344c61d8db38535ca8afceaf0bf12b881dc200c9833da726e9376c2e32cff7")
|
||||
.unwrap()
|
||||
.try_into()
|
||||
.unwrap(),
|
||||
hex::decode("5bdcc146bf60754e6a042426089575c75a003f089d2739839dec58b964ec3843")
|
||||
.unwrap()
|
||||
.try_into()
|
||||
.unwrap(),
|
||||
];
|
||||
|
||||
(test_vectors, expected)
|
||||
}
|
||||
}
|
||||
@@ -1,267 +1,269 @@
|
||||
//! This module contains the protocol for computing TLS SHA-256 HMAC PRF.
|
||||
//! This crate contains the protocol for computing TLS 1.2 SHA-256 HMAC PRF.
|
||||
|
||||
#![deny(missing_docs, unreachable_pub, unused_must_use)]
|
||||
#![deny(clippy::all)]
|
||||
#![forbid(unsafe_code)]
|
||||
|
||||
mod config;
|
||||
mod error;
|
||||
mod prf;
|
||||
mod hmac;
|
||||
#[cfg(test)]
|
||||
mod test_utils;
|
||||
|
||||
pub use config::{PrfConfig, PrfConfigBuilder, PrfConfigBuilderError, Role};
|
||||
mod config;
|
||||
pub use config::Mode;
|
||||
|
||||
mod error;
|
||||
pub use error::PrfError;
|
||||
|
||||
mod prf;
|
||||
pub use prf::MpcPrf;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use mpz_vm_core::memory::{binary::U8, Array};
|
||||
|
||||
use mpz_garble::value::ValueRef;
|
||||
|
||||
pub(crate) static CF_LABEL: &[u8] = b"client finished";
|
||||
pub(crate) static SF_LABEL: &[u8] = b"server finished";
|
||||
|
||||
/// Session keys computed by the PRF.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct SessionKeys {
|
||||
/// Client write key.
|
||||
pub client_write_key: ValueRef,
|
||||
/// Server write key.
|
||||
pub server_write_key: ValueRef,
|
||||
/// Client IV.
|
||||
pub client_iv: ValueRef,
|
||||
/// Server IV.
|
||||
pub server_iv: ValueRef,
|
||||
/// PRF output.
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct PrfOutput {
|
||||
/// TLS session keys.
|
||||
pub keys: SessionKeys,
|
||||
/// Client finished verify data.
|
||||
pub cf_vd: Array<U8, 12>,
|
||||
/// Server finished verify data.
|
||||
pub sf_vd: Array<U8, 12>,
|
||||
}
|
||||
|
||||
/// PRF trait for computing TLS PRF.
|
||||
#[async_trait]
|
||||
pub trait Prf {
|
||||
/// Sets up the PRF.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `pms` - The pre-master secret.
|
||||
async fn setup(&mut self, pms: ValueRef) -> Result<SessionKeys, PrfError>;
|
||||
/// Session keys computed by the PRF.
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct SessionKeys {
|
||||
/// Client write key.
|
||||
pub client_write_key: Array<U8, 16>,
|
||||
/// Server write key.
|
||||
pub server_write_key: Array<U8, 16>,
|
||||
/// Client IV.
|
||||
pub client_iv: Array<U8, 4>,
|
||||
/// Server IV.
|
||||
pub server_iv: Array<U8, 4>,
|
||||
}
|
||||
|
||||
/// Sets the client random.
|
||||
///
|
||||
/// This must be set after calling [`Prf::setup`].
|
||||
///
|
||||
/// Only the leader can provide the client random.
|
||||
async fn set_client_random(&mut self, client_random: Option<[u8; 32]>) -> Result<(), PrfError>;
|
||||
fn sha256(mut state: [u32; 8], pos: usize, msg: &[u8]) -> [u32; 8] {
|
||||
use sha2::{
|
||||
compress256,
|
||||
digest::{
|
||||
block_buffer::{BlockBuffer, Eager},
|
||||
generic_array::typenum::U64,
|
||||
},
|
||||
};
|
||||
|
||||
/// Preprocesses the PRF.
|
||||
async fn preprocess(&mut self) -> Result<(), PrfError>;
|
||||
let mut buffer = BlockBuffer::<U64, Eager>::default();
|
||||
buffer.digest_blocks(msg, |b| compress256(&mut state, b));
|
||||
buffer.digest_pad(0x80, &(((msg.len() + pos) * 8) as u64).to_be_bytes(), |b| {
|
||||
compress256(&mut state, &[*b])
|
||||
});
|
||||
state
|
||||
}
|
||||
|
||||
/// Computes the client finished verify data.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `handshake_hash` - The handshake transcript hash.
|
||||
async fn compute_client_finished_vd(
|
||||
&mut self,
|
||||
handshake_hash: [u8; 32],
|
||||
) -> Result<[u8; 12], PrfError>;
|
||||
|
||||
/// Computes the server finished verify data.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `handshake_hash` - The handshake transcript hash.
|
||||
async fn compute_server_finished_vd(
|
||||
&mut self,
|
||||
handshake_hash: [u8; 32],
|
||||
) -> Result<[u8; 12], PrfError>;
|
||||
|
||||
/// Computes the session keys.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `server_random` - The server random.
|
||||
async fn compute_session_keys(
|
||||
&mut self,
|
||||
server_random: [u8; 32],
|
||||
) -> Result<SessionKeys, PrfError>;
|
||||
fn state_to_bytes(input: [u32; 8]) -> [u8; 32] {
|
||||
let mut output = [0_u8; 32];
|
||||
for (k, byte_chunk) in input.iter().enumerate() {
|
||||
let byte_chunk = byte_chunk.to_be_bytes();
|
||||
output[4 * k..4 * (k + 1)].copy_from_slice(&byte_chunk);
|
||||
}
|
||||
output
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use mpz_common::executor::test_st_executor;
|
||||
use mpz_garble::{config::Role as DEAPRole, protocol::deap::DEAPThread, Decode, Memory};
|
||||
use crate::{
|
||||
test_utils::{mock_vm, prf_cf_vd, prf_keys, prf_ms, prf_sf_vd},
|
||||
Mode, MpcPrf, SessionKeys,
|
||||
};
|
||||
use mpz_common::context::test_st_context;
|
||||
use mpz_vm_core::{
|
||||
memory::{binary::U8, Array, MemoryExt, ViewExt},
|
||||
Execute,
|
||||
};
|
||||
use rand::{rngs::StdRng, Rng, SeedableRng};
|
||||
|
||||
use hmac_sha256_circuits::{hmac_sha256_partial, prf, session_keys};
|
||||
use mpz_ot::ideal::ot::ideal_ot;
|
||||
|
||||
use super::*;
|
||||
|
||||
fn compute_ms(pms: [u8; 32], client_random: [u8; 32], server_random: [u8; 32]) -> [u8; 48] {
|
||||
let (outer_state, inner_state) = hmac_sha256_partial(&pms);
|
||||
let seed = client_random
|
||||
.iter()
|
||||
.chain(&server_random)
|
||||
.copied()
|
||||
.collect::<Vec<_>>();
|
||||
let ms = prf(outer_state, inner_state, &seed, b"master secret", 48);
|
||||
ms.try_into().unwrap()
|
||||
}
|
||||
|
||||
fn compute_vd(ms: [u8; 48], label: &[u8], hs_hash: [u8; 32]) -> [u8; 12] {
|
||||
let (outer_state, inner_state) = hmac_sha256_partial(&ms);
|
||||
let vd = prf(outer_state, inner_state, &hs_hash, label, 12);
|
||||
vd.try_into().unwrap()
|
||||
}
|
||||
|
||||
#[ignore = "expensive"]
|
||||
#[tokio::test]
|
||||
async fn test_prf() {
|
||||
let pms = [42u8; 32];
|
||||
let client_random = [69u8; 32];
|
||||
let server_random: [u8; 32] = [96u8; 32];
|
||||
let ms = compute_ms(pms, client_random, server_random);
|
||||
async fn test_prf_reduced() {
|
||||
let mode = Mode::Reduced;
|
||||
test_prf(mode).await;
|
||||
}
|
||||
|
||||
let (leader_ctx_0, follower_ctx_0) = test_st_executor(128);
|
||||
let (leader_ctx_1, follower_ctx_1) = test_st_executor(128);
|
||||
#[tokio::test]
|
||||
async fn test_prf_normal() {
|
||||
let mode = Mode::Normal;
|
||||
test_prf(mode).await;
|
||||
}
|
||||
|
||||
let (leader_ot_send_0, follower_ot_recv_0) = ideal_ot();
|
||||
let (follower_ot_send_0, leader_ot_recv_0) = ideal_ot();
|
||||
let (leader_ot_send_1, follower_ot_recv_1) = ideal_ot();
|
||||
let (follower_ot_send_1, leader_ot_recv_1) = ideal_ot();
|
||||
async fn test_prf(mode: Mode) {
|
||||
let mut rng = StdRng::seed_from_u64(1);
|
||||
// Test input
|
||||
let pms: [u8; 32] = rng.random();
|
||||
let client_random: [u8; 32] = rng.random();
|
||||
let server_random: [u8; 32] = rng.random();
|
||||
|
||||
let leader_thread_0 = DEAPThread::new(
|
||||
DEAPRole::Leader,
|
||||
[0u8; 32],
|
||||
leader_ctx_0,
|
||||
leader_ot_send_0,
|
||||
leader_ot_recv_0,
|
||||
);
|
||||
let leader_thread_1 = leader_thread_0
|
||||
.new_thread(leader_ctx_1, leader_ot_send_1, leader_ot_recv_1)
|
||||
.unwrap();
|
||||
let cf_hs_hash: [u8; 32] = rng.random();
|
||||
let sf_hs_hash: [u8; 32] = rng.random();
|
||||
|
||||
let follower_thread_0 = DEAPThread::new(
|
||||
DEAPRole::Follower,
|
||||
[0u8; 32],
|
||||
follower_ctx_0,
|
||||
follower_ot_send_0,
|
||||
follower_ot_recv_0,
|
||||
);
|
||||
let follower_thread_1 = follower_thread_0
|
||||
.new_thread(follower_ctx_1, follower_ot_send_1, follower_ot_recv_1)
|
||||
.unwrap();
|
||||
// Expected output
|
||||
let ms_expected = prf_ms(pms, client_random, server_random);
|
||||
|
||||
// Set up public PMS for testing.
|
||||
let leader_pms = leader_thread_0.new_public_input::<[u8; 32]>("pms").unwrap();
|
||||
let follower_pms = follower_thread_0
|
||||
.new_public_input::<[u8; 32]>("pms")
|
||||
.unwrap();
|
||||
let [cwk_expected, swk_expected, civ_expected, siv_expected] =
|
||||
prf_keys(ms_expected, client_random, server_random);
|
||||
|
||||
leader_thread_0.assign(&leader_pms, pms).unwrap();
|
||||
follower_thread_0.assign(&follower_pms, pms).unwrap();
|
||||
let cwk_expected: [u8; 16] = cwk_expected.try_into().unwrap();
|
||||
let swk_expected: [u8; 16] = swk_expected.try_into().unwrap();
|
||||
let civ_expected: [u8; 4] = civ_expected.try_into().unwrap();
|
||||
let siv_expected: [u8; 4] = siv_expected.try_into().unwrap();
|
||||
|
||||
let mut leader = MpcPrf::new(
|
||||
PrfConfig::builder().role(Role::Leader).build().unwrap(),
|
||||
leader_thread_0,
|
||||
leader_thread_1,
|
||||
);
|
||||
let mut follower = MpcPrf::new(
|
||||
PrfConfig::builder().role(Role::Follower).build().unwrap(),
|
||||
follower_thread_0,
|
||||
follower_thread_1,
|
||||
);
|
||||
let cf_vd_expected = prf_cf_vd(ms_expected, cf_hs_hash);
|
||||
let sf_vd_expected = prf_sf_vd(ms_expected, sf_hs_hash);
|
||||
|
||||
futures::join!(
|
||||
async {
|
||||
leader.setup(leader_pms).await.unwrap();
|
||||
leader.set_client_random(Some(client_random)).await.unwrap();
|
||||
leader.preprocess().await.unwrap();
|
||||
},
|
||||
async {
|
||||
follower.setup(follower_pms).await.unwrap();
|
||||
follower.set_client_random(None).await.unwrap();
|
||||
follower.preprocess().await.unwrap();
|
||||
}
|
||||
);
|
||||
let cf_vd_expected: [u8; 12] = cf_vd_expected.try_into().unwrap();
|
||||
let sf_vd_expected: [u8; 12] = sf_vd_expected.try_into().unwrap();
|
||||
|
||||
let (leader_session_keys, follower_session_keys) = futures::try_join!(
|
||||
leader.compute_session_keys(server_random),
|
||||
follower.compute_session_keys(server_random)
|
||||
)
|
||||
.unwrap();
|
||||
// Set up vm and prf
|
||||
let (mut ctx_a, mut ctx_b) = test_st_context(128);
|
||||
let (mut leader, mut follower) = mock_vm();
|
||||
|
||||
let leader_pms: Array<U8, 32> = leader.alloc().unwrap();
|
||||
leader.mark_public(leader_pms).unwrap();
|
||||
leader.assign(leader_pms, pms).unwrap();
|
||||
leader.commit(leader_pms).unwrap();
|
||||
|
||||
let follower_pms: Array<U8, 32> = follower.alloc().unwrap();
|
||||
follower.mark_public(follower_pms).unwrap();
|
||||
follower.assign(follower_pms, pms).unwrap();
|
||||
follower.commit(follower_pms).unwrap();
|
||||
|
||||
let mut prf_leader = MpcPrf::new(mode);
|
||||
let mut prf_follower = MpcPrf::new(mode);
|
||||
|
||||
let leader_prf_out = prf_leader.alloc(&mut leader, leader_pms).unwrap();
|
||||
let follower_prf_out = prf_follower.alloc(&mut follower, follower_pms).unwrap();
|
||||
|
||||
// client_random and server_random
|
||||
prf_leader.set_client_random(client_random).unwrap();
|
||||
prf_follower.set_client_random(client_random).unwrap();
|
||||
|
||||
prf_leader.set_server_random(server_random).unwrap();
|
||||
prf_follower.set_server_random(server_random).unwrap();
|
||||
|
||||
let SessionKeys {
|
||||
client_write_key: leader_cwk,
|
||||
server_write_key: leader_swk,
|
||||
client_iv: leader_civ,
|
||||
server_iv: leader_siv,
|
||||
} = leader_session_keys;
|
||||
client_write_key: cwk_leader,
|
||||
server_write_key: swk_leader,
|
||||
client_iv: civ_leader,
|
||||
server_iv: siv_leader,
|
||||
} = leader_prf_out.keys;
|
||||
|
||||
let mut cwk_leader = leader.decode(cwk_leader).unwrap();
|
||||
let mut swk_leader = leader.decode(swk_leader).unwrap();
|
||||
let mut civ_leader = leader.decode(civ_leader).unwrap();
|
||||
let mut siv_leader = leader.decode(siv_leader).unwrap();
|
||||
|
||||
let SessionKeys {
|
||||
client_write_key: follower_cwk,
|
||||
server_write_key: follower_swk,
|
||||
client_iv: follower_civ,
|
||||
server_iv: follower_siv,
|
||||
} = follower_session_keys;
|
||||
client_write_key: cwk_follower,
|
||||
server_write_key: swk_follower,
|
||||
client_iv: civ_follower,
|
||||
server_iv: siv_follower,
|
||||
} = follower_prf_out.keys;
|
||||
|
||||
// Decode session keys
|
||||
let (leader_session_keys, follower_session_keys) = futures::try_join!(
|
||||
async {
|
||||
leader
|
||||
.thread_mut()
|
||||
.decode(&[leader_cwk, leader_swk, leader_civ, leader_siv])
|
||||
.await
|
||||
},
|
||||
async {
|
||||
follower
|
||||
.thread_mut()
|
||||
.decode(&[follower_cwk, follower_swk, follower_civ, follower_siv])
|
||||
.await
|
||||
}
|
||||
)
|
||||
.unwrap();
|
||||
let mut cwk_follower = follower.decode(cwk_follower).unwrap();
|
||||
let mut swk_follower = follower.decode(swk_follower).unwrap();
|
||||
let mut civ_follower = follower.decode(civ_follower).unwrap();
|
||||
let mut siv_follower = follower.decode(siv_follower).unwrap();
|
||||
|
||||
let leader_cwk: [u8; 16] = leader_session_keys[0].clone().try_into().unwrap();
|
||||
let leader_swk: [u8; 16] = leader_session_keys[1].clone().try_into().unwrap();
|
||||
let leader_civ: [u8; 4] = leader_session_keys[2].clone().try_into().unwrap();
|
||||
let leader_siv: [u8; 4] = leader_session_keys[3].clone().try_into().unwrap();
|
||||
while prf_leader.wants_flush() || prf_follower.wants_flush() {
|
||||
tokio::try_join!(
|
||||
async {
|
||||
prf_leader.flush(&mut leader).unwrap();
|
||||
leader.execute_all(&mut ctx_a).await
|
||||
},
|
||||
async {
|
||||
prf_follower.flush(&mut follower).unwrap();
|
||||
follower.execute_all(&mut ctx_b).await
|
||||
}
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
let follower_cwk: [u8; 16] = follower_session_keys[0].clone().try_into().unwrap();
|
||||
let follower_swk: [u8; 16] = follower_session_keys[1].clone().try_into().unwrap();
|
||||
let follower_civ: [u8; 4] = follower_session_keys[2].clone().try_into().unwrap();
|
||||
let follower_siv: [u8; 4] = follower_session_keys[3].clone().try_into().unwrap();
|
||||
let cwk_leader = cwk_leader.try_recv().unwrap().unwrap();
|
||||
let swk_leader = swk_leader.try_recv().unwrap().unwrap();
|
||||
let civ_leader = civ_leader.try_recv().unwrap().unwrap();
|
||||
let siv_leader = siv_leader.try_recv().unwrap().unwrap();
|
||||
|
||||
let (expected_cwk, expected_swk, expected_civ, expected_siv) =
|
||||
session_keys(pms, client_random, server_random);
|
||||
let cwk_follower = cwk_follower.try_recv().unwrap().unwrap();
|
||||
let swk_follower = swk_follower.try_recv().unwrap().unwrap();
|
||||
let civ_follower = civ_follower.try_recv().unwrap().unwrap();
|
||||
let siv_follower = siv_follower.try_recv().unwrap().unwrap();
|
||||
|
||||
assert_eq!(leader_cwk, expected_cwk);
|
||||
assert_eq!(leader_swk, expected_swk);
|
||||
assert_eq!(leader_civ, expected_civ);
|
||||
assert_eq!(leader_siv, expected_siv);
|
||||
assert_eq!(cwk_leader, cwk_follower);
|
||||
assert_eq!(swk_leader, swk_follower);
|
||||
assert_eq!(civ_leader, civ_follower);
|
||||
assert_eq!(siv_leader, siv_follower);
|
||||
|
||||
assert_eq!(follower_cwk, expected_cwk);
|
||||
assert_eq!(follower_swk, expected_swk);
|
||||
assert_eq!(follower_civ, expected_civ);
|
||||
assert_eq!(follower_siv, expected_siv);
|
||||
assert_eq!(cwk_leader, cwk_expected);
|
||||
assert_eq!(swk_leader, swk_expected);
|
||||
assert_eq!(civ_leader, civ_expected);
|
||||
assert_eq!(siv_leader, siv_expected);
|
||||
|
||||
let cf_hs_hash = [1u8; 32];
|
||||
let sf_hs_hash = [2u8; 32];
|
||||
// client finished
|
||||
prf_leader.set_cf_hash(cf_hs_hash).unwrap();
|
||||
prf_follower.set_cf_hash(cf_hs_hash).unwrap();
|
||||
|
||||
let (cf_vd, _) = futures::try_join!(
|
||||
leader.compute_client_finished_vd(cf_hs_hash),
|
||||
follower.compute_client_finished_vd(cf_hs_hash)
|
||||
)
|
||||
.unwrap();
|
||||
let cf_vd_leader = leader_prf_out.cf_vd;
|
||||
let cf_vd_follower = follower_prf_out.cf_vd;
|
||||
|
||||
let expected_cf_vd = compute_vd(ms, b"client finished", cf_hs_hash);
|
||||
let mut cf_vd_leader = leader.decode(cf_vd_leader).unwrap();
|
||||
let mut cf_vd_follower = follower.decode(cf_vd_follower).unwrap();
|
||||
|
||||
assert_eq!(cf_vd, expected_cf_vd);
|
||||
while prf_leader.wants_flush() || prf_follower.wants_flush() {
|
||||
tokio::try_join!(
|
||||
async {
|
||||
prf_leader.flush(&mut leader).unwrap();
|
||||
leader.execute_all(&mut ctx_a).await
|
||||
},
|
||||
async {
|
||||
prf_follower.flush(&mut follower).unwrap();
|
||||
follower.execute_all(&mut ctx_b).await
|
||||
}
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
let (sf_vd, _) = futures::try_join!(
|
||||
leader.compute_server_finished_vd(sf_hs_hash),
|
||||
follower.compute_server_finished_vd(sf_hs_hash)
|
||||
)
|
||||
.unwrap();
|
||||
let cf_vd_leader = cf_vd_leader.try_recv().unwrap().unwrap();
|
||||
let cf_vd_follower = cf_vd_follower.try_recv().unwrap().unwrap();
|
||||
|
||||
let expected_sf_vd = compute_vd(ms, b"server finished", sf_hs_hash);
|
||||
assert_eq!(cf_vd_leader, cf_vd_follower);
|
||||
assert_eq!(cf_vd_leader, cf_vd_expected);
|
||||
|
||||
assert_eq!(sf_vd, expected_sf_vd);
|
||||
// server finished
|
||||
prf_leader.set_sf_hash(sf_hs_hash).unwrap();
|
||||
prf_follower.set_sf_hash(sf_hs_hash).unwrap();
|
||||
|
||||
let sf_vd_leader = leader_prf_out.sf_vd;
|
||||
let sf_vd_follower = follower_prf_out.sf_vd;
|
||||
|
||||
let mut sf_vd_leader = leader.decode(sf_vd_leader).unwrap();
|
||||
let mut sf_vd_follower = follower.decode(sf_vd_follower).unwrap();
|
||||
|
||||
while prf_leader.wants_flush() || prf_follower.wants_flush() {
|
||||
tokio::try_join!(
|
||||
async {
|
||||
prf_leader.flush(&mut leader).unwrap();
|
||||
leader.execute_all(&mut ctx_a).await
|
||||
},
|
||||
async {
|
||||
prf_follower.flush(&mut follower).unwrap();
|
||||
follower.execute_all(&mut ctx_b).await
|
||||
}
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
let sf_vd_leader = sf_vd_leader.try_recv().unwrap().unwrap();
|
||||
let sf_vd_follower = sf_vd_follower.try_recv().unwrap().unwrap();
|
||||
|
||||
assert_eq!(sf_vd_leader, sf_vd_follower);
|
||||
assert_eq!(sf_vd_leader, sf_vd_expected);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,443 +1,407 @@
|
||||
use std::{
|
||||
fmt::Debug,
|
||||
sync::{Arc, OnceLock},
|
||||
use crate::{
|
||||
hmac::{IPAD, OPAD},
|
||||
Mode, PrfError, PrfOutput,
|
||||
};
|
||||
|
||||
use async_trait::async_trait;
|
||||
|
||||
use hmac_sha256_circuits::{build_session_keys, build_verify_data};
|
||||
use mpz_circuits::Circuit;
|
||||
use mpz_common::cpu::CpuBackend;
|
||||
use mpz_garble::{config::Visibility, value::ValueRef, Decode, Execute, Load, Memory};
|
||||
use mpz_circuits::{circuits::xor, Circuit, CircuitBuilder};
|
||||
use mpz_hash::sha256::Sha256;
|
||||
use mpz_vm_core::{
|
||||
memory::{
|
||||
binary::{Binary, U8},
|
||||
Array, MemoryExt, StaticSize, Vector, ViewExt,
|
||||
},
|
||||
Call, CallableExt, Vm,
|
||||
};
|
||||
use std::{fmt::Debug, sync::Arc};
|
||||
use tracing::instrument;
|
||||
|
||||
use crate::{Prf, PrfConfig, PrfError, Role, SessionKeys, CF_LABEL, SF_LABEL};
|
||||
mod state;
|
||||
use state::State;
|
||||
|
||||
/// Circuit for computing TLS session keys.
|
||||
static SESSION_KEYS_CIRC: OnceLock<Arc<Circuit>> = OnceLock::new();
|
||||
/// Circuit for computing TLS client verify data.
|
||||
static CLIENT_VD_CIRC: OnceLock<Arc<Circuit>> = OnceLock::new();
|
||||
/// Circuit for computing TLS server verify data.
|
||||
static SERVER_VD_CIRC: OnceLock<Arc<Circuit>> = OnceLock::new();
|
||||
mod function;
|
||||
use function::Prf;
|
||||
|
||||
/// MPC PRF for computing TLS 1.2 HMAC-SHA256 PRF.
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct Randoms {
|
||||
pub(crate) client_random: ValueRef,
|
||||
pub(crate) server_random: ValueRef,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub(crate) struct HashState {
|
||||
pub(crate) ms_outer_hash_state: ValueRef,
|
||||
pub(crate) ms_inner_hash_state: ValueRef,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct VerifyData {
|
||||
pub(crate) handshake_hash: ValueRef,
|
||||
pub(crate) vd: ValueRef,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) enum State {
|
||||
Initialized,
|
||||
SessionKeys {
|
||||
pms: ValueRef,
|
||||
randoms: Randoms,
|
||||
hash_state: HashState,
|
||||
keys: crate::SessionKeys,
|
||||
cf_vd: VerifyData,
|
||||
sf_vd: VerifyData,
|
||||
},
|
||||
ClientFinished {
|
||||
hash_state: HashState,
|
||||
cf_vd: VerifyData,
|
||||
sf_vd: VerifyData,
|
||||
},
|
||||
ServerFinished {
|
||||
hash_state: HashState,
|
||||
sf_vd: VerifyData,
|
||||
},
|
||||
Complete,
|
||||
Error,
|
||||
}
|
||||
|
||||
impl State {
|
||||
fn take(&mut self) -> State {
|
||||
std::mem::replace(self, State::Error)
|
||||
}
|
||||
}
|
||||
|
||||
/// MPC PRF for computing TLS HMAC-SHA256 PRF.
|
||||
pub struct MpcPrf<E> {
|
||||
config: PrfConfig,
|
||||
pub struct MpcPrf {
|
||||
mode: Mode,
|
||||
state: State,
|
||||
thread_0: E,
|
||||
thread_1: E,
|
||||
}
|
||||
|
||||
impl<E> Debug for MpcPrf<E> {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("MpcPrf")
|
||||
.field("config", &self.config)
|
||||
.field("state", &self.state)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl<E> MpcPrf<E>
|
||||
where
|
||||
E: Load + Memory + Execute + Decode + Send,
|
||||
{
|
||||
impl MpcPrf {
|
||||
/// Creates a new instance of the PRF.
|
||||
pub fn new(config: PrfConfig, thread_0: E, thread_1: E) -> MpcPrf<E> {
|
||||
MpcPrf {
|
||||
config,
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// `mode` - The PRF mode.
|
||||
pub fn new(mode: Mode) -> MpcPrf {
|
||||
Self {
|
||||
mode,
|
||||
state: State::Initialized,
|
||||
thread_0,
|
||||
thread_1,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a mutable reference to the MPC thread.
|
||||
pub fn thread_mut(&mut self) -> &mut E {
|
||||
&mut self.thread_0
|
||||
}
|
||||
|
||||
/// Executes a circuit which computes TLS session keys.
|
||||
/// Allocates resources for the PRF.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `vm` - Virtual machine.
|
||||
/// * `pms` - The pre-master secret.
|
||||
#[instrument(level = "debug", skip_all, err)]
|
||||
async fn execute_session_keys(
|
||||
pub fn alloc(
|
||||
&mut self,
|
||||
server_random: [u8; 32],
|
||||
) -> Result<SessionKeys, PrfError> {
|
||||
let State::SessionKeys {
|
||||
pms,
|
||||
randoms: randoms_refs,
|
||||
hash_state,
|
||||
keys,
|
||||
cf_vd,
|
||||
sf_vd,
|
||||
} = self.state.take()
|
||||
else {
|
||||
return Err(PrfError::state("session keys not initialized"));
|
||||
};
|
||||
|
||||
let circ = SESSION_KEYS_CIRC
|
||||
.get()
|
||||
.expect("session keys circuit is set");
|
||||
|
||||
self.thread_0
|
||||
.assign(&randoms_refs.server_random, server_random)?;
|
||||
|
||||
self.thread_0
|
||||
.execute(
|
||||
circ.clone(),
|
||||
&[pms, randoms_refs.client_random, randoms_refs.server_random],
|
||||
&[
|
||||
keys.client_write_key.clone(),
|
||||
keys.server_write_key.clone(),
|
||||
keys.client_iv.clone(),
|
||||
keys.server_iv.clone(),
|
||||
hash_state.ms_outer_hash_state.clone(),
|
||||
hash_state.ms_inner_hash_state.clone(),
|
||||
],
|
||||
)
|
||||
.await?;
|
||||
|
||||
self.state = State::ClientFinished {
|
||||
hash_state,
|
||||
cf_vd,
|
||||
sf_vd,
|
||||
};
|
||||
|
||||
Ok(keys)
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip_all, err)]
|
||||
async fn execute_cf_vd(&mut self, handshake_hash: [u8; 32]) -> Result<[u8; 12], PrfError> {
|
||||
let State::ClientFinished {
|
||||
hash_state,
|
||||
cf_vd,
|
||||
sf_vd,
|
||||
} = self.state.take()
|
||||
else {
|
||||
return Err(PrfError::state("PRF not in client finished state"));
|
||||
};
|
||||
|
||||
let circ = CLIENT_VD_CIRC.get().expect("client vd circuit is set");
|
||||
|
||||
self.thread_0
|
||||
.assign(&cf_vd.handshake_hash, handshake_hash)?;
|
||||
|
||||
self.thread_0
|
||||
.execute(
|
||||
circ.clone(),
|
||||
&[
|
||||
hash_state.ms_outer_hash_state.clone(),
|
||||
hash_state.ms_inner_hash_state.clone(),
|
||||
cf_vd.handshake_hash,
|
||||
],
|
||||
&[cf_vd.vd.clone()],
|
||||
)
|
||||
.await?;
|
||||
|
||||
let mut outputs = self.thread_0.decode(&[cf_vd.vd]).await?;
|
||||
let vd: [u8; 12] = outputs.remove(0).try_into().expect("vd is 12 bytes");
|
||||
|
||||
self.state = State::ServerFinished { hash_state, sf_vd };
|
||||
|
||||
Ok(vd)
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip_all, err)]
|
||||
async fn execute_sf_vd(&mut self, handshake_hash: [u8; 32]) -> Result<[u8; 12], PrfError> {
|
||||
let State::ServerFinished { hash_state, sf_vd } = self.state.take() else {
|
||||
return Err(PrfError::state("PRF not in server finished state"));
|
||||
};
|
||||
|
||||
let circ = SERVER_VD_CIRC.get().expect("server vd circuit is set");
|
||||
|
||||
self.thread_0
|
||||
.assign(&sf_vd.handshake_hash, handshake_hash)?;
|
||||
|
||||
self.thread_0
|
||||
.execute(
|
||||
circ.clone(),
|
||||
&[
|
||||
hash_state.ms_outer_hash_state,
|
||||
hash_state.ms_inner_hash_state,
|
||||
sf_vd.handshake_hash,
|
||||
],
|
||||
&[sf_vd.vd.clone()],
|
||||
)
|
||||
.await?;
|
||||
|
||||
let mut outputs = self.thread_0.decode(&[sf_vd.vd]).await?;
|
||||
let vd: [u8; 12] = outputs.remove(0).try_into().expect("vd is 12 bytes");
|
||||
|
||||
self.state = State::Complete;
|
||||
|
||||
Ok(vd)
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<E> Prf for MpcPrf<E>
|
||||
where
|
||||
E: Memory + Load + Execute + Decode + Send,
|
||||
{
|
||||
#[instrument(level = "debug", skip_all, err)]
|
||||
async fn setup(&mut self, pms: ValueRef) -> Result<SessionKeys, PrfError> {
|
||||
vm: &mut dyn Vm<Binary>,
|
||||
pms: Array<U8, 32>,
|
||||
) -> Result<PrfOutput, PrfError> {
|
||||
let State::Initialized = self.state.take() else {
|
||||
return Err(PrfError::state("PRF not in initialized state"));
|
||||
};
|
||||
|
||||
let thread = &mut self.thread_0;
|
||||
let mode = self.mode;
|
||||
let pms: Vector<U8> = pms.into();
|
||||
|
||||
let randoms = Randoms {
|
||||
// The client random is kept private so that the handshake transcript
|
||||
// hashes do not leak information about the server's identity.
|
||||
client_random: thread.new_input::<[u8; 32]>(
|
||||
"client_random",
|
||||
match self.config.role {
|
||||
Role::Leader => Visibility::Private,
|
||||
Role::Follower => Visibility::Blind,
|
||||
},
|
||||
)?,
|
||||
server_random: thread.new_input::<[u8; 32]>("server_random", Visibility::Public)?,
|
||||
};
|
||||
let outer_partial_pms = compute_partial(vm, pms, OPAD)?;
|
||||
let inner_partial_pms = compute_partial(vm, pms, IPAD)?;
|
||||
|
||||
let keys = SessionKeys {
|
||||
client_write_key: thread.new_output::<[u8; 16]>("client_write_key")?,
|
||||
server_write_key: thread.new_output::<[u8; 16]>("server_write_key")?,
|
||||
client_iv: thread.new_output::<[u8; 4]>("client_write_iv")?,
|
||||
server_iv: thread.new_output::<[u8; 4]>("server_write_iv")?,
|
||||
};
|
||||
let master_secret =
|
||||
Prf::alloc_master_secret(mode, vm, outer_partial_pms, inner_partial_pms)?;
|
||||
let ms = master_secret.output();
|
||||
let ms = merge_outputs(vm, ms, 48)?;
|
||||
|
||||
let hash_state = HashState {
|
||||
ms_outer_hash_state: thread.new_output::<[u32; 8]>("ms_outer_hash_state")?,
|
||||
ms_inner_hash_state: thread.new_output::<[u32; 8]>("ms_inner_hash_state")?,
|
||||
};
|
||||
let outer_partial_ms = compute_partial(vm, ms, OPAD)?;
|
||||
let inner_partial_ms = compute_partial(vm, ms, IPAD)?;
|
||||
|
||||
let cf_vd = VerifyData {
|
||||
handshake_hash: thread.new_input::<[u8; 32]>("cf_hash", Visibility::Public)?,
|
||||
vd: thread.new_output::<[u8; 12]>("cf_vd")?,
|
||||
};
|
||||
|
||||
let sf_vd = VerifyData {
|
||||
handshake_hash: thread.new_input::<[u8; 32]>("sf_hash", Visibility::Public)?,
|
||||
vd: thread.new_output::<[u8; 12]>("sf_vd")?,
|
||||
};
|
||||
let key_expansion =
|
||||
Prf::alloc_key_expansion(mode, vm, outer_partial_ms.clone(), inner_partial_ms.clone())?;
|
||||
let client_finished = Prf::alloc_client_finished(
|
||||
mode,
|
||||
vm,
|
||||
outer_partial_ms.clone(),
|
||||
inner_partial_ms.clone(),
|
||||
)?;
|
||||
let server_finished = Prf::alloc_server_finished(
|
||||
mode,
|
||||
vm,
|
||||
outer_partial_ms.clone(),
|
||||
inner_partial_ms.clone(),
|
||||
)?;
|
||||
|
||||
self.state = State::SessionKeys {
|
||||
pms,
|
||||
randoms,
|
||||
hash_state,
|
||||
keys: keys.clone(),
|
||||
cf_vd,
|
||||
sf_vd,
|
||||
client_random: None,
|
||||
master_secret,
|
||||
key_expansion,
|
||||
client_finished,
|
||||
server_finished,
|
||||
};
|
||||
|
||||
Ok(keys)
|
||||
self.state.prf_output(vm)
|
||||
}
|
||||
|
||||
/// Sets the client random.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `random` - The client random.
|
||||
#[instrument(level = "debug", skip_all, err)]
|
||||
async fn set_client_random(&mut self, client_random: Option<[u8; 32]>) -> Result<(), PrfError> {
|
||||
let State::SessionKeys { randoms, .. } = &self.state else {
|
||||
pub fn set_client_random(&mut self, random: [u8; 32]) -> Result<(), PrfError> {
|
||||
let State::SessionKeys { client_random, .. } = &mut self.state else {
|
||||
return Err(PrfError::state("PRF not set up"));
|
||||
};
|
||||
|
||||
if self.config.role == Role::Leader {
|
||||
let Some(client_random) = client_random else {
|
||||
return Err(PrfError::role("leader must provide client random"));
|
||||
};
|
||||
|
||||
self.thread_0
|
||||
.assign(&randoms.client_random, client_random)?;
|
||||
} else if client_random.is_some() {
|
||||
return Err(PrfError::role("only leader can set client random"));
|
||||
}
|
||||
|
||||
self.thread_0
|
||||
.commit(&[randoms.client_random.clone()])
|
||||
.await?;
|
||||
|
||||
*client_random = Some(random);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Sets the server random.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `random` - The server random.
|
||||
#[instrument(level = "debug", skip_all, err)]
|
||||
async fn preprocess(&mut self) -> Result<(), PrfError> {
|
||||
pub fn set_server_random(&mut self, random: [u8; 32]) -> Result<(), PrfError> {
|
||||
let State::SessionKeys {
|
||||
pms,
|
||||
randoms,
|
||||
hash_state,
|
||||
keys,
|
||||
cf_vd,
|
||||
sf_vd,
|
||||
} = self.state.take()
|
||||
client_random,
|
||||
master_secret,
|
||||
key_expansion,
|
||||
..
|
||||
} = &mut self.state
|
||||
else {
|
||||
return Err(PrfError::state("PRF not set up"));
|
||||
};
|
||||
|
||||
// Builds all circuits in parallel and preprocesses the session keys circuit.
|
||||
futures::try_join!(
|
||||
async {
|
||||
if SESSION_KEYS_CIRC.get().is_none() {
|
||||
_ = SESSION_KEYS_CIRC.set(CpuBackend::blocking(build_session_keys).await);
|
||||
}
|
||||
let client_random = client_random.expect("Client random should have been set by now");
|
||||
let server_random = random;
|
||||
|
||||
let circ = SESSION_KEYS_CIRC
|
||||
.get()
|
||||
.expect("session keys circuit should be built");
|
||||
let mut seed_ms = client_random.to_vec();
|
||||
seed_ms.extend_from_slice(&server_random);
|
||||
master_secret.set_start_seed(seed_ms);
|
||||
|
||||
self.thread_0
|
||||
.load(
|
||||
circ.clone(),
|
||||
&[
|
||||
pms.clone(),
|
||||
randoms.client_random.clone(),
|
||||
randoms.server_random.clone(),
|
||||
],
|
||||
&[
|
||||
keys.client_write_key.clone(),
|
||||
keys.server_write_key.clone(),
|
||||
keys.client_iv.clone(),
|
||||
keys.server_iv.clone(),
|
||||
hash_state.ms_outer_hash_state.clone(),
|
||||
hash_state.ms_inner_hash_state.clone(),
|
||||
],
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok::<_, PrfError>(())
|
||||
},
|
||||
async {
|
||||
if CLIENT_VD_CIRC.get().is_none() {
|
||||
_ = CLIENT_VD_CIRC
|
||||
.set(CpuBackend::blocking(move || build_verify_data(CF_LABEL)).await);
|
||||
}
|
||||
|
||||
Ok::<_, PrfError>(())
|
||||
},
|
||||
async {
|
||||
if SERVER_VD_CIRC.get().is_none() {
|
||||
_ = SERVER_VD_CIRC
|
||||
.set(CpuBackend::blocking(move || build_verify_data(SF_LABEL)).await);
|
||||
}
|
||||
|
||||
Ok::<_, PrfError>(())
|
||||
}
|
||||
)?;
|
||||
|
||||
// Finishes preprocessing the verify data circuits.
|
||||
futures::try_join!(
|
||||
async {
|
||||
self.thread_0
|
||||
.load(
|
||||
CLIENT_VD_CIRC
|
||||
.get()
|
||||
.expect("client finished circuit should be built")
|
||||
.clone(),
|
||||
&[
|
||||
hash_state.ms_outer_hash_state.clone(),
|
||||
hash_state.ms_inner_hash_state.clone(),
|
||||
cf_vd.handshake_hash.clone(),
|
||||
],
|
||||
&[cf_vd.vd.clone()],
|
||||
)
|
||||
.await
|
||||
},
|
||||
async {
|
||||
self.thread_1
|
||||
.load(
|
||||
SERVER_VD_CIRC
|
||||
.get()
|
||||
.expect("server finished circuit should be built")
|
||||
.clone(),
|
||||
&[
|
||||
hash_state.ms_outer_hash_state.clone(),
|
||||
hash_state.ms_inner_hash_state.clone(),
|
||||
sf_vd.handshake_hash.clone(),
|
||||
],
|
||||
&[sf_vd.vd.clone()],
|
||||
)
|
||||
.await
|
||||
}
|
||||
)?;
|
||||
|
||||
self.state = State::SessionKeys {
|
||||
pms,
|
||||
randoms,
|
||||
hash_state,
|
||||
keys,
|
||||
cf_vd,
|
||||
sf_vd,
|
||||
};
|
||||
let mut seed_ke = server_random.to_vec();
|
||||
seed_ke.extend_from_slice(&client_random);
|
||||
key_expansion.set_start_seed(seed_ke);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Sets the client finished handshake hash.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `handshake_hash` - The handshake transcript hash.
|
||||
#[instrument(level = "debug", skip_all, err)]
|
||||
async fn compute_client_finished_vd(
|
||||
&mut self,
|
||||
handshake_hash: [u8; 32],
|
||||
) -> Result<[u8; 12], PrfError> {
|
||||
self.execute_cf_vd(handshake_hash).await
|
||||
pub fn set_cf_hash(&mut self, handshake_hash: [u8; 32]) -> Result<(), PrfError> {
|
||||
let State::ClientFinished {
|
||||
client_finished, ..
|
||||
} = &mut self.state
|
||||
else {
|
||||
return Err(PrfError::state("PRF not in client finished state"));
|
||||
};
|
||||
|
||||
let seed_cf = handshake_hash.to_vec();
|
||||
client_finished.set_start_seed(seed_cf);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Sets the server finished handshake hash.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `handshake_hash` - The handshake transcript hash.
|
||||
#[instrument(level = "debug", skip_all, err)]
|
||||
async fn compute_server_finished_vd(
|
||||
&mut self,
|
||||
handshake_hash: [u8; 32],
|
||||
) -> Result<[u8; 12], PrfError> {
|
||||
self.execute_sf_vd(handshake_hash).await
|
||||
pub fn set_sf_hash(&mut self, handshake_hash: [u8; 32]) -> Result<(), PrfError> {
|
||||
let State::ServerFinished { server_finished } = &mut self.state else {
|
||||
return Err(PrfError::state("PRF not in server finished state"));
|
||||
};
|
||||
|
||||
let seed_sf = handshake_hash.to_vec();
|
||||
server_finished.set_start_seed(seed_sf);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip_all, err)]
|
||||
async fn compute_session_keys(
|
||||
&mut self,
|
||||
server_random: [u8; 32],
|
||||
) -> Result<SessionKeys, PrfError> {
|
||||
self.execute_session_keys(server_random).await
|
||||
/// Returns if the PRF needs to be flushed.
|
||||
pub fn wants_flush(&self) -> bool {
|
||||
match &self.state {
|
||||
State::Initialized => false,
|
||||
State::SessionKeys {
|
||||
master_secret,
|
||||
key_expansion,
|
||||
..
|
||||
} => master_secret.wants_flush() || key_expansion.wants_flush(),
|
||||
State::ClientFinished {
|
||||
client_finished, ..
|
||||
} => client_finished.wants_flush(),
|
||||
State::ServerFinished { server_finished } => server_finished.wants_flush(),
|
||||
State::Complete => false,
|
||||
State::Error => false,
|
||||
}
|
||||
}
|
||||
|
||||
/// Flushes the PRF.
|
||||
pub fn flush(&mut self, vm: &mut dyn Vm<Binary>) -> Result<(), PrfError> {
|
||||
self.state = match self.state.take() {
|
||||
State::SessionKeys {
|
||||
client_random,
|
||||
mut master_secret,
|
||||
mut key_expansion,
|
||||
client_finished,
|
||||
server_finished,
|
||||
} => {
|
||||
master_secret.flush(vm)?;
|
||||
key_expansion.flush(vm)?;
|
||||
|
||||
if !master_secret.wants_flush() && !key_expansion.wants_flush() {
|
||||
State::ClientFinished {
|
||||
client_finished,
|
||||
server_finished,
|
||||
}
|
||||
} else {
|
||||
State::SessionKeys {
|
||||
client_random,
|
||||
master_secret,
|
||||
key_expansion,
|
||||
client_finished,
|
||||
server_finished,
|
||||
}
|
||||
}
|
||||
}
|
||||
State::ClientFinished {
|
||||
mut client_finished,
|
||||
server_finished,
|
||||
} => {
|
||||
client_finished.flush(vm)?;
|
||||
|
||||
if !client_finished.wants_flush() {
|
||||
State::ServerFinished { server_finished }
|
||||
} else {
|
||||
State::ClientFinished {
|
||||
client_finished,
|
||||
server_finished,
|
||||
}
|
||||
}
|
||||
}
|
||||
State::ServerFinished {
|
||||
mut server_finished,
|
||||
} => {
|
||||
server_finished.flush(vm)?;
|
||||
|
||||
if !server_finished.wants_flush() {
|
||||
State::Complete
|
||||
} else {
|
||||
State::ServerFinished { server_finished }
|
||||
}
|
||||
}
|
||||
other => other,
|
||||
};
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Depending on the provided `mask` computes and returns `outer_partial` or
|
||||
/// `inner_partial` for HMAC-SHA256.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `vm` - Virtual machine.
|
||||
/// * `key` - Key to pad and xor.
|
||||
/// * `mask`- Mask used for padding.
|
||||
fn compute_partial(
|
||||
vm: &mut dyn Vm<Binary>,
|
||||
key: Vector<U8>,
|
||||
mask: [u8; 64],
|
||||
) -> Result<Sha256, PrfError> {
|
||||
let xor = Arc::new(xor(8 * 64));
|
||||
|
||||
let additional_len = 64 - key.len();
|
||||
let padding = vec![0_u8; additional_len];
|
||||
|
||||
let padding_ref: Vector<U8> = vm.alloc_vec(additional_len).map_err(PrfError::vm)?;
|
||||
vm.mark_public(padding_ref).map_err(PrfError::vm)?;
|
||||
vm.assign(padding_ref, padding).map_err(PrfError::vm)?;
|
||||
vm.commit(padding_ref).map_err(PrfError::vm)?;
|
||||
|
||||
let mask_ref: Array<U8, 64> = vm.alloc().map_err(PrfError::vm)?;
|
||||
vm.mark_public(mask_ref).map_err(PrfError::vm)?;
|
||||
vm.assign(mask_ref, mask).map_err(PrfError::vm)?;
|
||||
vm.commit(mask_ref).map_err(PrfError::vm)?;
|
||||
|
||||
let xor = Call::builder(xor)
|
||||
.arg(key)
|
||||
.arg(padding_ref)
|
||||
.arg(mask_ref)
|
||||
.build()
|
||||
.map_err(PrfError::vm)?;
|
||||
let key_padded: Vector<U8> = vm.call(xor).map_err(PrfError::vm)?;
|
||||
|
||||
let mut sha = Sha256::new_with_init(vm)?;
|
||||
sha.update(&key_padded);
|
||||
sha.compress(vm)?;
|
||||
Ok(sha)
|
||||
}
|
||||
|
||||
fn merge_outputs(
|
||||
vm: &mut dyn Vm<Binary>,
|
||||
inputs: Vec<Array<U8, 32>>,
|
||||
output_bytes: usize,
|
||||
) -> Result<Vector<U8>, PrfError> {
|
||||
assert!(output_bytes <= 32 * inputs.len());
|
||||
|
||||
let bits = Array::<U8, 32>::SIZE * inputs.len();
|
||||
let circ = gen_merge_circ(bits);
|
||||
|
||||
let mut builder = Call::builder(circ);
|
||||
for &input in inputs.iter() {
|
||||
builder = builder.arg(input);
|
||||
}
|
||||
let call = builder.build().map_err(PrfError::vm)?;
|
||||
|
||||
let mut output: Vector<U8> = vm.call(call).map_err(PrfError::vm)?;
|
||||
output.truncate(output_bytes);
|
||||
Ok(output)
|
||||
}
|
||||
|
||||
fn gen_merge_circ(size: usize) -> Arc<Circuit> {
|
||||
let mut builder = CircuitBuilder::new();
|
||||
let inputs = (0..size).map(|_| builder.add_input()).collect::<Vec<_>>();
|
||||
|
||||
for input in inputs.chunks_exact(8) {
|
||||
for byte in input.chunks_exact(8) {
|
||||
for &feed in byte.iter() {
|
||||
let output = builder.add_id_gate(feed);
|
||||
builder.add_output(output);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Arc::new(builder.build().expect("merge circuit is valid"))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::{prf::merge_outputs, test_utils::mock_vm};
|
||||
use mpz_common::context::test_st_context;
|
||||
use mpz_vm_core::{
|
||||
memory::{binary::U8, Array, MemoryExt, ViewExt},
|
||||
Execute,
|
||||
};
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_merge_outputs() {
|
||||
let (mut ctx_a, mut ctx_b) = test_st_context(8);
|
||||
let (mut leader, mut follower) = mock_vm();
|
||||
|
||||
let input1: [u8; 32] = std::array::from_fn(|i| i as u8);
|
||||
let input2: [u8; 32] = std::array::from_fn(|i| i as u8 + 32);
|
||||
|
||||
let mut expected = input1.to_vec();
|
||||
expected.extend_from_slice(&input2);
|
||||
expected.truncate(48);
|
||||
|
||||
// leader
|
||||
let input1_leader: Array<U8, 32> = leader.alloc().unwrap();
|
||||
let input2_leader: Array<U8, 32> = leader.alloc().unwrap();
|
||||
|
||||
leader.mark_public(input1_leader).unwrap();
|
||||
leader.mark_public(input2_leader).unwrap();
|
||||
|
||||
leader.assign(input1_leader, input1).unwrap();
|
||||
leader.assign(input2_leader, input2).unwrap();
|
||||
|
||||
leader.commit(input1_leader).unwrap();
|
||||
leader.commit(input2_leader).unwrap();
|
||||
|
||||
let merged_leader =
|
||||
merge_outputs(&mut leader, vec![input1_leader, input2_leader], 48).unwrap();
|
||||
let mut merged_leader = leader.decode(merged_leader).unwrap();
|
||||
|
||||
// follower
|
||||
let input1_follower: Array<U8, 32> = follower.alloc().unwrap();
|
||||
let input2_follower: Array<U8, 32> = follower.alloc().unwrap();
|
||||
|
||||
follower.mark_public(input1_follower).unwrap();
|
||||
follower.mark_public(input2_follower).unwrap();
|
||||
|
||||
follower.assign(input1_follower, input1).unwrap();
|
||||
follower.assign(input2_follower, input2).unwrap();
|
||||
|
||||
follower.commit(input1_follower).unwrap();
|
||||
follower.commit(input2_follower).unwrap();
|
||||
|
||||
let merged_follower =
|
||||
merge_outputs(&mut follower, vec![input1_follower, input2_follower], 48).unwrap();
|
||||
let mut merged_follower = follower.decode(merged_follower).unwrap();
|
||||
|
||||
tokio::try_join!(
|
||||
leader.execute_all(&mut ctx_a),
|
||||
follower.execute_all(&mut ctx_b)
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let merged_leader = merged_leader.try_recv().unwrap().unwrap();
|
||||
let merged_follower = merged_follower.try_recv().unwrap().unwrap();
|
||||
|
||||
assert_eq!(merged_leader, merged_follower);
|
||||
assert_eq!(merged_leader, expected);
|
||||
}
|
||||
}
|
||||
|
||||
257
crates/components/hmac-sha256/src/prf/function.rs
Normal file
257
crates/components/hmac-sha256/src/prf/function.rs
Normal file
@@ -0,0 +1,257 @@
|
||||
//! Provides [`Prf`], for computing the TLS 1.2 PRF.
|
||||
|
||||
use crate::{Mode, PrfError};
|
||||
use mpz_hash::sha256::Sha256;
|
||||
use mpz_vm_core::{
|
||||
memory::{
|
||||
binary::{Binary, U8},
|
||||
Array,
|
||||
},
|
||||
Vm,
|
||||
};
|
||||
|
||||
mod normal;
|
||||
mod reduced;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) enum Prf {
|
||||
Reduced(reduced::PrfFunction),
|
||||
Normal(normal::PrfFunction),
|
||||
}
|
||||
|
||||
impl Prf {
|
||||
pub(crate) fn alloc_master_secret(
|
||||
mode: Mode,
|
||||
vm: &mut dyn Vm<Binary>,
|
||||
outer_partial: Sha256,
|
||||
inner_partial: Sha256,
|
||||
) -> Result<Self, PrfError> {
|
||||
let prf = match mode {
|
||||
Mode::Reduced => Self::Reduced(reduced::PrfFunction::alloc_master_secret(
|
||||
vm,
|
||||
outer_partial,
|
||||
inner_partial,
|
||||
)?),
|
||||
Mode::Normal => Self::Normal(normal::PrfFunction::alloc_master_secret(
|
||||
vm,
|
||||
outer_partial,
|
||||
inner_partial,
|
||||
)?),
|
||||
};
|
||||
Ok(prf)
|
||||
}
|
||||
|
||||
pub(crate) fn alloc_key_expansion(
|
||||
mode: Mode,
|
||||
vm: &mut dyn Vm<Binary>,
|
||||
outer_partial: Sha256,
|
||||
inner_partial: Sha256,
|
||||
) -> Result<Self, PrfError> {
|
||||
let prf = match mode {
|
||||
Mode::Reduced => Self::Reduced(reduced::PrfFunction::alloc_key_expansion(
|
||||
vm,
|
||||
outer_partial,
|
||||
inner_partial,
|
||||
)?),
|
||||
Mode::Normal => Self::Normal(normal::PrfFunction::alloc_key_expansion(
|
||||
vm,
|
||||
outer_partial,
|
||||
inner_partial,
|
||||
)?),
|
||||
};
|
||||
Ok(prf)
|
||||
}
|
||||
|
||||
pub(crate) fn alloc_client_finished(
|
||||
config: Mode,
|
||||
vm: &mut dyn Vm<Binary>,
|
||||
outer_partial: Sha256,
|
||||
inner_partial: Sha256,
|
||||
) -> Result<Self, PrfError> {
|
||||
let prf = match config {
|
||||
Mode::Reduced => Self::Reduced(reduced::PrfFunction::alloc_client_finished(
|
||||
vm,
|
||||
outer_partial,
|
||||
inner_partial,
|
||||
)?),
|
||||
Mode::Normal => Self::Normal(normal::PrfFunction::alloc_client_finished(
|
||||
vm,
|
||||
outer_partial,
|
||||
inner_partial,
|
||||
)?),
|
||||
};
|
||||
Ok(prf)
|
||||
}
|
||||
|
||||
pub(crate) fn alloc_server_finished(
|
||||
config: Mode,
|
||||
vm: &mut dyn Vm<Binary>,
|
||||
outer_partial: Sha256,
|
||||
inner_partial: Sha256,
|
||||
) -> Result<Self, PrfError> {
|
||||
let prf = match config {
|
||||
Mode::Reduced => Self::Reduced(reduced::PrfFunction::alloc_server_finished(
|
||||
vm,
|
||||
outer_partial,
|
||||
inner_partial,
|
||||
)?),
|
||||
Mode::Normal => Self::Normal(normal::PrfFunction::alloc_server_finished(
|
||||
vm,
|
||||
outer_partial,
|
||||
inner_partial,
|
||||
)?),
|
||||
};
|
||||
Ok(prf)
|
||||
}
|
||||
|
||||
pub(crate) fn wants_flush(&self) -> bool {
|
||||
match self {
|
||||
Prf::Reduced(prf) => prf.wants_flush(),
|
||||
Prf::Normal(prf) => prf.wants_flush(),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn flush(&mut self, vm: &mut dyn Vm<Binary>) -> Result<(), PrfError> {
|
||||
match self {
|
||||
Prf::Reduced(prf) => prf.flush(vm),
|
||||
Prf::Normal(prf) => prf.flush(vm),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn set_start_seed(&mut self, seed: Vec<u8>) {
|
||||
match self {
|
||||
Prf::Reduced(prf) => prf.set_start_seed(seed),
|
||||
Prf::Normal(prf) => prf.set_start_seed(seed),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn output(&self) -> Vec<Array<U8, 32>> {
|
||||
match self {
|
||||
Prf::Reduced(prf) => prf.output(),
|
||||
Prf::Normal(prf) => prf.output(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::{
|
||||
prf::{compute_partial, function::Prf},
|
||||
test_utils::{mock_vm, phash},
|
||||
Mode,
|
||||
};
|
||||
use mpz_common::context::test_st_context;
|
||||
use mpz_vm_core::{
|
||||
memory::{binary::U8, Array, MemoryExt, ViewExt},
|
||||
Execute,
|
||||
};
|
||||
use rand::{rngs::ThreadRng, Rng};
|
||||
|
||||
const IPAD: [u8; 64] = [0x36; 64];
|
||||
const OPAD: [u8; 64] = [0x5c; 64];
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_phash_reduced() {
|
||||
let mode = Mode::Reduced;
|
||||
test_phash(mode).await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_phash_normal() {
|
||||
let mode = Mode::Normal;
|
||||
test_phash(mode).await;
|
||||
}
|
||||
|
||||
async fn test_phash(mode: Mode) {
|
||||
let mut rng = ThreadRng::default();
|
||||
|
||||
let (mut ctx_a, mut ctx_b) = test_st_context(8);
|
||||
let (mut leader, mut follower) = mock_vm();
|
||||
|
||||
let key: [u8; 32] = rng.random();
|
||||
let start_seed: Vec<u8> = vec![42; 64];
|
||||
|
||||
let mut label_seed = b"master secret".to_vec();
|
||||
label_seed.extend_from_slice(&start_seed);
|
||||
let iterations = 2;
|
||||
|
||||
let leader_key: Array<U8, 32> = leader.alloc().unwrap();
|
||||
leader.mark_public(leader_key).unwrap();
|
||||
leader.assign(leader_key, key).unwrap();
|
||||
leader.commit(leader_key).unwrap();
|
||||
|
||||
let outer_partial_leader = compute_partial(&mut leader, leader_key.into(), OPAD).unwrap();
|
||||
let inner_partial_leader = compute_partial(&mut leader, leader_key.into(), IPAD).unwrap();
|
||||
|
||||
let mut prf_leader = Prf::alloc_master_secret(
|
||||
mode,
|
||||
&mut leader,
|
||||
outer_partial_leader,
|
||||
inner_partial_leader,
|
||||
)
|
||||
.unwrap();
|
||||
prf_leader.set_start_seed(start_seed.clone());
|
||||
|
||||
let mut prf_out_leader = vec![];
|
||||
for p in prf_leader.output() {
|
||||
let p_out = leader.decode(p).unwrap();
|
||||
prf_out_leader.push(p_out)
|
||||
}
|
||||
|
||||
let follower_key: Array<U8, 32> = follower.alloc().unwrap();
|
||||
follower.mark_public(follower_key).unwrap();
|
||||
follower.assign(follower_key, key).unwrap();
|
||||
follower.commit(follower_key).unwrap();
|
||||
|
||||
let outer_partial_follower =
|
||||
compute_partial(&mut follower, follower_key.into(), OPAD).unwrap();
|
||||
let inner_partial_follower =
|
||||
compute_partial(&mut follower, follower_key.into(), IPAD).unwrap();
|
||||
|
||||
let mut prf_follower = Prf::alloc_master_secret(
|
||||
mode,
|
||||
&mut follower,
|
||||
outer_partial_follower,
|
||||
inner_partial_follower,
|
||||
)
|
||||
.unwrap();
|
||||
prf_follower.set_start_seed(start_seed.clone());
|
||||
|
||||
let mut prf_out_follower = vec![];
|
||||
for p in prf_follower.output() {
|
||||
let p_out = follower.decode(p).unwrap();
|
||||
prf_out_follower.push(p_out)
|
||||
}
|
||||
|
||||
while prf_leader.wants_flush() || prf_follower.wants_flush() {
|
||||
tokio::try_join!(
|
||||
async {
|
||||
prf_leader.flush(&mut leader).unwrap();
|
||||
leader.execute_all(&mut ctx_a).await
|
||||
},
|
||||
async {
|
||||
prf_follower.flush(&mut follower).unwrap();
|
||||
follower.execute_all(&mut ctx_b).await
|
||||
}
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
assert_eq!(prf_out_leader.len(), 2);
|
||||
assert_eq!(prf_out_leader.len(), prf_out_follower.len());
|
||||
|
||||
let prf_result_leader: Vec<u8> = prf_out_leader
|
||||
.iter_mut()
|
||||
.flat_map(|p| p.try_recv().unwrap().unwrap())
|
||||
.collect();
|
||||
let prf_result_follower: Vec<u8> = prf_out_follower
|
||||
.iter_mut()
|
||||
.flat_map(|p| p.try_recv().unwrap().unwrap())
|
||||
.collect();
|
||||
|
||||
let expected = phash(key.to_vec(), &label_seed, iterations);
|
||||
|
||||
assert_eq!(prf_result_leader, prf_result_follower);
|
||||
assert_eq!(prf_result_leader, expected)
|
||||
}
|
||||
}
|
||||
174
crates/components/hmac-sha256/src/prf/function/normal.rs
Normal file
174
crates/components/hmac-sha256/src/prf/function/normal.rs
Normal file
@@ -0,0 +1,174 @@
|
||||
//! Computes the whole PRF in MPC.
|
||||
|
||||
use crate::{hmac::hmac_sha256, PrfError};
|
||||
use mpz_hash::sha256::Sha256;
|
||||
use mpz_vm_core::{
|
||||
memory::{
|
||||
binary::{Binary, U8},
|
||||
Array, MemoryExt, Vector, ViewExt,
|
||||
},
|
||||
Vm,
|
||||
};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct PrfFunction {
|
||||
// The label, e.g. "master secret".
|
||||
label: &'static [u8],
|
||||
state: State,
|
||||
// The start seed and the label, e.g. client_random + server_random + "master_secret".
|
||||
start_seed_label: Option<Vec<u8>>,
|
||||
a: Vec<PHash>,
|
||||
p: Vec<PHash>,
|
||||
}
|
||||
|
||||
impl PrfFunction {
|
||||
const MS_LABEL: &[u8] = b"master secret";
|
||||
const KEY_LABEL: &[u8] = b"key expansion";
|
||||
const CF_LABEL: &[u8] = b"client finished";
|
||||
const SF_LABEL: &[u8] = b"server finished";
|
||||
|
||||
pub(crate) fn alloc_master_secret(
|
||||
vm: &mut dyn Vm<Binary>,
|
||||
outer_partial: Sha256,
|
||||
inner_partial: Sha256,
|
||||
) -> Result<Self, PrfError> {
|
||||
Self::alloc(vm, Self::MS_LABEL, outer_partial, inner_partial, 48, 64)
|
||||
}
|
||||
|
||||
pub(crate) fn alloc_key_expansion(
|
||||
vm: &mut dyn Vm<Binary>,
|
||||
outer_partial: Sha256,
|
||||
inner_partial: Sha256,
|
||||
) -> Result<Self, PrfError> {
|
||||
Self::alloc(vm, Self::KEY_LABEL, outer_partial, inner_partial, 40, 64)
|
||||
}
|
||||
|
||||
pub(crate) fn alloc_client_finished(
|
||||
vm: &mut dyn Vm<Binary>,
|
||||
outer_partial: Sha256,
|
||||
inner_partial: Sha256,
|
||||
) -> Result<Self, PrfError> {
|
||||
Self::alloc(vm, Self::CF_LABEL, outer_partial, inner_partial, 12, 32)
|
||||
}
|
||||
|
||||
pub(crate) fn alloc_server_finished(
|
||||
vm: &mut dyn Vm<Binary>,
|
||||
outer_partial: Sha256,
|
||||
inner_partial: Sha256,
|
||||
) -> Result<Self, PrfError> {
|
||||
Self::alloc(vm, Self::SF_LABEL, outer_partial, inner_partial, 12, 32)
|
||||
}
|
||||
|
||||
pub(crate) fn wants_flush(&self) -> bool {
|
||||
let is_computing = match self.state {
|
||||
State::Computing => true,
|
||||
State::Finished => false,
|
||||
};
|
||||
is_computing && self.start_seed_label.is_some()
|
||||
}
|
||||
|
||||
pub(crate) fn flush(&mut self, vm: &mut dyn Vm<Binary>) -> Result<(), PrfError> {
|
||||
if let State::Computing = self.state {
|
||||
let a = self.a.first().expect("prf should be allocated");
|
||||
let msg = *a.msg.first().expect("message for prf should be present");
|
||||
|
||||
let msg_value = self
|
||||
.start_seed_label
|
||||
.clone()
|
||||
.expect("Start seed should have been set");
|
||||
|
||||
vm.assign(msg, msg_value).map_err(PrfError::vm)?;
|
||||
vm.commit(msg).map_err(PrfError::vm)?;
|
||||
|
||||
self.state = State::Finished;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) fn set_start_seed(&mut self, seed: Vec<u8>) {
|
||||
let mut start_seed_label = self.label.to_vec();
|
||||
start_seed_label.extend_from_slice(&seed);
|
||||
|
||||
self.start_seed_label = Some(start_seed_label);
|
||||
}
|
||||
|
||||
pub(crate) fn output(&self) -> Vec<Array<U8, 32>> {
|
||||
self.p.iter().map(|p| p.output).collect()
|
||||
}
|
||||
|
||||
fn alloc(
|
||||
vm: &mut dyn Vm<Binary>,
|
||||
label: &'static [u8],
|
||||
outer_partial: Sha256,
|
||||
inner_partial: Sha256,
|
||||
output_len: usize,
|
||||
seed_len: usize,
|
||||
) -> Result<Self, PrfError> {
|
||||
let mut prf = Self {
|
||||
label,
|
||||
state: State::Computing,
|
||||
start_seed_label: None,
|
||||
a: vec![],
|
||||
p: vec![],
|
||||
};
|
||||
|
||||
assert!(output_len > 0, "cannot compute 0 bytes for prf");
|
||||
|
||||
let iterations = output_len.div_ceil(32);
|
||||
|
||||
let msg_len_a = label.len() + seed_len;
|
||||
let seed_label_ref: Vector<U8> = vm.alloc_vec(msg_len_a).map_err(PrfError::vm)?;
|
||||
vm.mark_public(seed_label_ref).map_err(PrfError::vm)?;
|
||||
|
||||
let mut msg_a = seed_label_ref;
|
||||
for _ in 0..iterations {
|
||||
let a = PHash::alloc(vm, outer_partial.clone(), inner_partial.clone(), &[msg_a])?;
|
||||
msg_a = Vector::<U8>::from(a.output);
|
||||
prf.a.push(a);
|
||||
|
||||
let p = PHash::alloc(
|
||||
vm,
|
||||
outer_partial.clone(),
|
||||
inner_partial.clone(),
|
||||
&[msg_a, seed_label_ref],
|
||||
)?;
|
||||
prf.p.push(p);
|
||||
}
|
||||
|
||||
Ok(prf)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
enum State {
|
||||
Computing,
|
||||
Finished,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
struct PHash {
|
||||
msg: Vec<Vector<U8>>,
|
||||
output: Array<U8, 32>,
|
||||
}
|
||||
|
||||
impl PHash {
|
||||
fn alloc(
|
||||
vm: &mut dyn Vm<Binary>,
|
||||
outer_partial: Sha256,
|
||||
inner_partial: Sha256,
|
||||
msg: &[Vector<U8>],
|
||||
) -> Result<Self, PrfError> {
|
||||
let mut inner_local = inner_partial;
|
||||
|
||||
msg.iter().for_each(|m| inner_local.update(m));
|
||||
inner_local.compress(vm)?;
|
||||
let inner_local = inner_local.finalize(vm)?;
|
||||
|
||||
let output = hmac_sha256(vm, outer_partial, inner_local)?;
|
||||
let p_hash = Self {
|
||||
msg: msg.to_vec(),
|
||||
output,
|
||||
};
|
||||
Ok(p_hash)
|
||||
}
|
||||
}
|
||||
248
crates/components/hmac-sha256/src/prf/function/reduced.rs
Normal file
248
crates/components/hmac-sha256/src/prf/function/reduced.rs
Normal file
@@ -0,0 +1,248 @@
|
||||
//! Computes some hashes of the PRF locally.
|
||||
|
||||
use std::collections::VecDeque;
|
||||
|
||||
use crate::{hmac::hmac_sha256, sha256, state_to_bytes, PrfError};
|
||||
use mpz_core::bitvec::BitVec;
|
||||
use mpz_hash::sha256::Sha256;
|
||||
use mpz_vm_core::{
|
||||
memory::{
|
||||
binary::{Binary, U8},
|
||||
Array, DecodeFutureTyped, MemoryExt, ViewExt,
|
||||
},
|
||||
Vm,
|
||||
};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct PrfFunction {
|
||||
// The label, e.g. "master secret".
|
||||
label: &'static [u8],
|
||||
// The start seed and the label, e.g. client_random + server_random + "master_secret".
|
||||
start_seed_label: Option<Vec<u8>>,
|
||||
iterations: usize,
|
||||
state: PrfState,
|
||||
a: VecDeque<AHash>,
|
||||
p: VecDeque<PHash>,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
enum PrfState {
|
||||
InnerPartial {
|
||||
inner_partial: DecodeFutureTyped<BitVec, [u32; 8]>,
|
||||
},
|
||||
ComputeA {
|
||||
iter: usize,
|
||||
inner_partial: [u32; 8],
|
||||
msg: Vec<u8>,
|
||||
},
|
||||
ComputeP {
|
||||
iter: usize,
|
||||
inner_partial: [u32; 8],
|
||||
a_output: DecodeFutureTyped<BitVec, [u8; 32]>,
|
||||
},
|
||||
Done,
|
||||
}
|
||||
|
||||
impl PrfFunction {
|
||||
const MS_LABEL: &[u8] = b"master secret";
|
||||
const KEY_LABEL: &[u8] = b"key expansion";
|
||||
const CF_LABEL: &[u8] = b"client finished";
|
||||
const SF_LABEL: &[u8] = b"server finished";
|
||||
|
||||
pub(crate) fn alloc_master_secret(
|
||||
vm: &mut dyn Vm<Binary>,
|
||||
outer_partial: Sha256,
|
||||
inner_partial: Sha256,
|
||||
) -> Result<Self, PrfError> {
|
||||
Self::alloc(vm, Self::MS_LABEL, outer_partial, inner_partial, 48)
|
||||
}
|
||||
|
||||
pub(crate) fn alloc_key_expansion(
|
||||
vm: &mut dyn Vm<Binary>,
|
||||
outer_partial: Sha256,
|
||||
inner_partial: Sha256,
|
||||
) -> Result<Self, PrfError> {
|
||||
Self::alloc(vm, Self::KEY_LABEL, outer_partial, inner_partial, 40)
|
||||
}
|
||||
|
||||
pub(crate) fn alloc_client_finished(
|
||||
vm: &mut dyn Vm<Binary>,
|
||||
outer_partial: Sha256,
|
||||
inner_partial: Sha256,
|
||||
) -> Result<Self, PrfError> {
|
||||
Self::alloc(vm, Self::CF_LABEL, outer_partial, inner_partial, 12)
|
||||
}
|
||||
|
||||
pub(crate) fn alloc_server_finished(
|
||||
vm: &mut dyn Vm<Binary>,
|
||||
outer_partial: Sha256,
|
||||
inner_partial: Sha256,
|
||||
) -> Result<Self, PrfError> {
|
||||
Self::alloc(vm, Self::SF_LABEL, outer_partial, inner_partial, 12)
|
||||
}
|
||||
|
||||
pub(crate) fn wants_flush(&self) -> bool {
|
||||
!matches!(self.state, PrfState::Done) && self.start_seed_label.is_some()
|
||||
}
|
||||
|
||||
pub(crate) fn flush(&mut self, vm: &mut dyn Vm<Binary>) -> Result<(), PrfError> {
|
||||
match &mut self.state {
|
||||
PrfState::InnerPartial { inner_partial } => {
|
||||
let Some(inner_partial) = inner_partial.try_recv().map_err(PrfError::vm)? else {
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
self.state = PrfState::ComputeA {
|
||||
iter: 1,
|
||||
inner_partial,
|
||||
msg: self
|
||||
.start_seed_label
|
||||
.clone()
|
||||
.expect("Start seed should have been set"),
|
||||
};
|
||||
self.flush(vm)?;
|
||||
}
|
||||
PrfState::ComputeA {
|
||||
iter,
|
||||
inner_partial,
|
||||
msg,
|
||||
} => {
|
||||
let a = self.a.pop_front().expect("Prf AHash should be present");
|
||||
assign_inner_local(vm, a.inner_local, *inner_partial, msg)?;
|
||||
|
||||
self.state = PrfState::ComputeP {
|
||||
iter: *iter,
|
||||
inner_partial: *inner_partial,
|
||||
a_output: a.output,
|
||||
};
|
||||
}
|
||||
PrfState::ComputeP {
|
||||
iter,
|
||||
inner_partial,
|
||||
a_output,
|
||||
} => {
|
||||
let Some(output) = a_output.try_recv().map_err(PrfError::vm)? else {
|
||||
return Ok(());
|
||||
};
|
||||
let p = self.p.pop_front().expect("Prf PHash should be present");
|
||||
|
||||
let mut msg = output.to_vec();
|
||||
msg.extend_from_slice(
|
||||
self.start_seed_label
|
||||
.as_ref()
|
||||
.expect("Start seed should have been set"),
|
||||
);
|
||||
|
||||
assign_inner_local(vm, p.inner_local, *inner_partial, &msg)?;
|
||||
|
||||
if *iter == self.iterations {
|
||||
self.state = PrfState::Done;
|
||||
} else {
|
||||
self.state = PrfState::ComputeA {
|
||||
iter: *iter + 1,
|
||||
inner_partial: *inner_partial,
|
||||
msg: output.to_vec(),
|
||||
};
|
||||
// We recurse, so that this PHash and the next AHash could
|
||||
// be computed in a single VM execute call.
|
||||
self.flush(vm)?;
|
||||
}
|
||||
}
|
||||
_ => (),
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) fn set_start_seed(&mut self, seed: Vec<u8>) {
|
||||
let mut start_seed_label = self.label.to_vec();
|
||||
start_seed_label.extend_from_slice(&seed);
|
||||
|
||||
self.start_seed_label = Some(start_seed_label);
|
||||
}
|
||||
|
||||
pub(crate) fn output(&self) -> Vec<Array<U8, 32>> {
|
||||
self.p.iter().map(|p| p.output).collect()
|
||||
}
|
||||
|
||||
fn alloc(
|
||||
vm: &mut dyn Vm<Binary>,
|
||||
label: &'static [u8],
|
||||
outer_partial: Sha256,
|
||||
inner_partial: Sha256,
|
||||
len: usize,
|
||||
) -> Result<Self, PrfError> {
|
||||
assert!(len > 0, "cannot compute 0 bytes for prf");
|
||||
|
||||
let iterations = len.div_ceil(32);
|
||||
|
||||
let (inner_partial, _) = inner_partial
|
||||
.state()
|
||||
.expect("state should be set for inner_partial");
|
||||
let inner_partial = vm.decode(inner_partial).map_err(PrfError::vm)?;
|
||||
|
||||
let mut prf = Self {
|
||||
label,
|
||||
start_seed_label: None,
|
||||
iterations,
|
||||
state: PrfState::InnerPartial { inner_partial },
|
||||
a: VecDeque::new(),
|
||||
p: VecDeque::new(),
|
||||
};
|
||||
|
||||
for _ in 0..iterations {
|
||||
// setup A[i]
|
||||
let inner_local: Array<U8, 32> = vm.alloc().map_err(PrfError::vm)?;
|
||||
let output = hmac_sha256(vm, outer_partial.clone(), inner_local)?;
|
||||
|
||||
let output = vm.decode(output).map_err(PrfError::vm)?;
|
||||
let a_hash = AHash {
|
||||
inner_local,
|
||||
output,
|
||||
};
|
||||
|
||||
prf.a.push_front(a_hash);
|
||||
|
||||
// setup P[i]
|
||||
let inner_local: Array<U8, 32> = vm.alloc().map_err(PrfError::vm)?;
|
||||
let output = hmac_sha256(vm, outer_partial.clone(), inner_local)?;
|
||||
let p_hash = PHash {
|
||||
inner_local,
|
||||
output,
|
||||
};
|
||||
prf.p.push_front(p_hash);
|
||||
}
|
||||
|
||||
Ok(prf)
|
||||
}
|
||||
}
|
||||
|
||||
fn assign_inner_local(
|
||||
vm: &mut dyn Vm<Binary>,
|
||||
inner_local: Array<U8, 32>,
|
||||
inner_partial: [u32; 8],
|
||||
msg: &[u8],
|
||||
) -> Result<(), PrfError> {
|
||||
let inner_local_value = sha256(inner_partial, 64, msg);
|
||||
|
||||
vm.mark_public(inner_local).map_err(PrfError::vm)?;
|
||||
vm.assign(inner_local, state_to_bytes(inner_local_value))
|
||||
.map_err(PrfError::vm)?;
|
||||
vm.commit(inner_local).map_err(PrfError::vm)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Like PHash but stores the output as the decoding future because in the
|
||||
/// reduced Prf we need to decode this output.
|
||||
#[derive(Debug)]
|
||||
struct AHash {
|
||||
inner_local: Array<U8, 32>,
|
||||
output: DecodeFutureTyped<BitVec, [u8; 32]>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
struct PHash {
|
||||
inner_local: Array<U8, 32>,
|
||||
output: Array<U8, 32>,
|
||||
}
|
||||
103
crates/components/hmac-sha256/src/prf/state.rs
Normal file
103
crates/components/hmac-sha256/src/prf/state.rs
Normal file
@@ -0,0 +1,103 @@
|
||||
use crate::{
|
||||
prf::{function::Prf, merge_outputs},
|
||||
PrfError, PrfOutput, SessionKeys,
|
||||
};
|
||||
use mpz_vm_core::{
|
||||
memory::{
|
||||
binary::{Binary, U8},
|
||||
Array, FromRaw, ToRaw,
|
||||
},
|
||||
Vm,
|
||||
};
|
||||
|
||||
#[allow(clippy::large_enum_variant)]
|
||||
#[derive(Debug)]
|
||||
pub(crate) enum State {
|
||||
Initialized,
|
||||
SessionKeys {
|
||||
client_random: Option<[u8; 32]>,
|
||||
master_secret: Prf,
|
||||
key_expansion: Prf,
|
||||
client_finished: Prf,
|
||||
server_finished: Prf,
|
||||
},
|
||||
ClientFinished {
|
||||
client_finished: Prf,
|
||||
server_finished: Prf,
|
||||
},
|
||||
ServerFinished {
|
||||
server_finished: Prf,
|
||||
},
|
||||
Complete,
|
||||
Error,
|
||||
}
|
||||
|
||||
impl State {
|
||||
pub(crate) fn take(&mut self) -> State {
|
||||
std::mem::replace(self, State::Error)
|
||||
}
|
||||
|
||||
pub(crate) fn prf_output(&self, vm: &mut dyn Vm<Binary>) -> Result<PrfOutput, PrfError> {
|
||||
let State::SessionKeys {
|
||||
key_expansion,
|
||||
client_finished,
|
||||
server_finished,
|
||||
..
|
||||
} = self
|
||||
else {
|
||||
return Err(PrfError::state(
|
||||
"Prf output can only be computed while in \"SessionKeys\" state",
|
||||
));
|
||||
};
|
||||
|
||||
let keys = get_session_keys(key_expansion.output(), vm)?;
|
||||
let cf_vd = get_client_finished_vd(client_finished.output(), vm)?;
|
||||
let sf_vd = get_server_finished_vd(server_finished.output(), vm)?;
|
||||
|
||||
let output = PrfOutput { keys, cf_vd, sf_vd };
|
||||
|
||||
Ok(output)
|
||||
}
|
||||
}
|
||||
|
||||
fn get_session_keys(
|
||||
output: Vec<Array<U8, 32>>,
|
||||
vm: &mut dyn Vm<Binary>,
|
||||
) -> Result<SessionKeys, PrfError> {
|
||||
let mut keys = merge_outputs(vm, output, 40)?;
|
||||
debug_assert!(keys.len() == 40, "session keys len should be 40");
|
||||
|
||||
let server_iv = Array::<U8, 4>::try_from(keys.split_off(36)).unwrap();
|
||||
let client_iv = Array::<U8, 4>::try_from(keys.split_off(32)).unwrap();
|
||||
let server_write_key = Array::<U8, 16>::try_from(keys.split_off(16)).unwrap();
|
||||
let client_write_key = Array::<U8, 16>::try_from(keys).unwrap();
|
||||
|
||||
let session_keys = SessionKeys {
|
||||
client_write_key,
|
||||
server_write_key,
|
||||
client_iv,
|
||||
server_iv,
|
||||
};
|
||||
|
||||
Ok(session_keys)
|
||||
}
|
||||
|
||||
fn get_client_finished_vd(
|
||||
output: Vec<Array<U8, 32>>,
|
||||
vm: &mut dyn Vm<Binary>,
|
||||
) -> Result<Array<U8, 12>, PrfError> {
|
||||
let cf_vd = merge_outputs(vm, output, 12)?;
|
||||
let cf_vd = <Array<U8, 12> as FromRaw<Binary>>::from_raw(cf_vd.to_raw());
|
||||
|
||||
Ok(cf_vd)
|
||||
}
|
||||
|
||||
fn get_server_finished_vd(
|
||||
output: Vec<Array<U8, 32>>,
|
||||
vm: &mut dyn Vm<Binary>,
|
||||
) -> Result<Array<U8, 12>, PrfError> {
|
||||
let sf_vd = merge_outputs(vm, output, 12)?;
|
||||
let sf_vd = <Array<U8, 12> as FromRaw<Binary>>::from_raw(sf_vd.to_raw());
|
||||
|
||||
Ok(sf_vd)
|
||||
}
|
||||
261
crates/components/hmac-sha256/src/test_utils.rs
Normal file
261
crates/components/hmac-sha256/src/test_utils.rs
Normal file
@@ -0,0 +1,261 @@
|
||||
use crate::{sha256, state_to_bytes};
|
||||
use mpz_garble::protocol::semihonest::{Evaluator, Garbler};
|
||||
use mpz_ot::ideal::cot::{ideal_cot, IdealCOTReceiver, IdealCOTSender};
|
||||
use mpz_vm_core::memory::correlated::Delta;
|
||||
use rand::{rngs::StdRng, Rng, SeedableRng};
|
||||
|
||||
pub(crate) const SHA256_IV: [u32; 8] = [
|
||||
0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a, 0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19,
|
||||
];
|
||||
|
||||
pub(crate) fn mock_vm() -> (Garbler<IdealCOTSender>, Evaluator<IdealCOTReceiver>) {
|
||||
let mut rng = StdRng::seed_from_u64(0);
|
||||
let delta = Delta::random(&mut rng);
|
||||
|
||||
let (cot_send, cot_recv) = ideal_cot(delta.into_inner());
|
||||
|
||||
let gen = Garbler::new(cot_send, [0u8; 16], delta);
|
||||
let ev = Evaluator::new(cot_recv);
|
||||
|
||||
(gen, ev)
|
||||
}
|
||||
|
||||
pub(crate) fn prf_ms(pms: [u8; 32], client_random: [u8; 32], server_random: [u8; 32]) -> [u8; 48] {
|
||||
let mut label_start_seed = b"master secret".to_vec();
|
||||
label_start_seed.extend_from_slice(&client_random);
|
||||
label_start_seed.extend_from_slice(&server_random);
|
||||
|
||||
let ms = phash(pms.to_vec(), &label_start_seed, 2)[..48].to_vec();
|
||||
|
||||
ms.try_into().unwrap()
|
||||
}
|
||||
|
||||
pub(crate) fn prf_keys(
|
||||
ms: [u8; 48],
|
||||
client_random: [u8; 32],
|
||||
server_random: [u8; 32],
|
||||
) -> [Vec<u8>; 4] {
|
||||
let mut label_start_seed = b"key expansion".to_vec();
|
||||
label_start_seed.extend_from_slice(&server_random);
|
||||
label_start_seed.extend_from_slice(&client_random);
|
||||
|
||||
let mut session_keys = phash(ms.to_vec(), &label_start_seed, 2)[..40].to_vec();
|
||||
|
||||
let server_iv = session_keys.split_off(36);
|
||||
let client_iv = session_keys.split_off(32);
|
||||
let server_write_key = session_keys.split_off(16);
|
||||
let client_write_key = session_keys;
|
||||
|
||||
[client_write_key, server_write_key, client_iv, server_iv]
|
||||
}
|
||||
|
||||
pub(crate) fn prf_cf_vd(ms: [u8; 48], hanshake_hash: [u8; 32]) -> Vec<u8> {
|
||||
let mut label_start_seed = b"client finished".to_vec();
|
||||
label_start_seed.extend_from_slice(&hanshake_hash);
|
||||
|
||||
phash(ms.to_vec(), &label_start_seed, 1)[..12].to_vec()
|
||||
}
|
||||
|
||||
pub(crate) fn prf_sf_vd(ms: [u8; 48], hanshake_hash: [u8; 32]) -> Vec<u8> {
|
||||
let mut label_start_seed = b"server finished".to_vec();
|
||||
label_start_seed.extend_from_slice(&hanshake_hash);
|
||||
|
||||
phash(ms.to_vec(), &label_start_seed, 1)[..12].to_vec()
|
||||
}
|
||||
|
||||
pub(crate) fn phash(key: Vec<u8>, seed: &[u8], iterations: usize) -> Vec<u8> {
|
||||
// A() is defined as:
|
||||
//
|
||||
// A(0) = seed
|
||||
// A(i) = HMAC_hash(secret, A(i-1))
|
||||
let mut a_cache: Vec<_> = Vec::with_capacity(iterations + 1);
|
||||
a_cache.push(seed.to_vec());
|
||||
|
||||
for i in 0..iterations {
|
||||
let a_i = hmac_sha256(key.clone(), &a_cache[i]);
|
||||
a_cache.push(a_i.to_vec());
|
||||
}
|
||||
|
||||
// HMAC_hash(secret, A(i) + seed)
|
||||
let mut output: Vec<_> = Vec::with_capacity(iterations * 32);
|
||||
for i in 0..iterations {
|
||||
let mut a_i_seed = a_cache[i + 1].clone();
|
||||
a_i_seed.extend_from_slice(seed);
|
||||
|
||||
let hash = hmac_sha256(key.clone(), &a_i_seed);
|
||||
output.extend_from_slice(&hash);
|
||||
}
|
||||
|
||||
output
|
||||
}
|
||||
|
||||
pub(crate) fn hmac_sha256(key: Vec<u8>, msg: &[u8]) -> [u8; 32] {
|
||||
let outer_partial = compute_outer_partial(key.clone());
|
||||
let inner_local = compute_inner_local(key, msg);
|
||||
|
||||
let hmac = sha256(outer_partial, 64, &state_to_bytes(inner_local));
|
||||
state_to_bytes(hmac)
|
||||
}
|
||||
|
||||
pub(crate) fn compute_outer_partial(mut key: Vec<u8>) -> [u32; 8] {
|
||||
assert!(key.len() <= 64);
|
||||
|
||||
key.resize(64, 0_u8);
|
||||
let key_padded: [u8; 64] = key
|
||||
.into_iter()
|
||||
.map(|b| b ^ 0x5c)
|
||||
.collect::<Vec<u8>>()
|
||||
.try_into()
|
||||
.unwrap();
|
||||
|
||||
compress_256(SHA256_IV, &key_padded)
|
||||
}
|
||||
|
||||
pub(crate) fn compute_inner_local(mut key: Vec<u8>, msg: &[u8]) -> [u32; 8] {
|
||||
assert!(key.len() <= 64);
|
||||
|
||||
key.resize(64, 0_u8);
|
||||
let key_padded: [u8; 64] = key
|
||||
.into_iter()
|
||||
.map(|b| b ^ 0x36)
|
||||
.collect::<Vec<u8>>()
|
||||
.try_into()
|
||||
.unwrap();
|
||||
|
||||
let state = compress_256(SHA256_IV, &key_padded);
|
||||
sha256(state, 64, msg)
|
||||
}
|
||||
|
||||
pub(crate) fn compress_256(mut state: [u32; 8], msg: &[u8]) -> [u32; 8] {
|
||||
use sha2::{
|
||||
compress256,
|
||||
digest::{
|
||||
block_buffer::{BlockBuffer, Eager},
|
||||
generic_array::typenum::U64,
|
||||
},
|
||||
};
|
||||
|
||||
let mut buffer = BlockBuffer::<U64, Eager>::default();
|
||||
buffer.digest_blocks(msg, |b| compress256(&mut state, b));
|
||||
state
|
||||
}
|
||||
|
||||
// Borrowed from Rustls for testing
|
||||
// https://github.com/rustls/rustls/blob/main/rustls/src/tls12/prf.rs
|
||||
mod ring_prf {
|
||||
use ring::{hmac, hmac::HMAC_SHA256};
|
||||
|
||||
fn concat_sign(key: &hmac::Key, a: &[u8], b: &[u8]) -> hmac::Tag {
|
||||
let mut ctx = hmac::Context::with_key(key);
|
||||
ctx.update(a);
|
||||
ctx.update(b);
|
||||
ctx.sign()
|
||||
}
|
||||
|
||||
fn p(out: &mut [u8], secret: &[u8], seed: &[u8]) {
|
||||
let hmac_key = hmac::Key::new(HMAC_SHA256, secret);
|
||||
|
||||
// A(1)
|
||||
let mut current_a = hmac::sign(&hmac_key, seed);
|
||||
let chunk_size = HMAC_SHA256.digest_algorithm().output_len();
|
||||
for chunk in out.chunks_mut(chunk_size) {
|
||||
// P_hash[i] = HMAC_hash(secret, A(i) + seed)
|
||||
let p_term = concat_sign(&hmac_key, current_a.as_ref(), seed);
|
||||
chunk.copy_from_slice(&p_term.as_ref()[..chunk.len()]);
|
||||
|
||||
// A(i+1) = HMAC_hash(secret, A(i))
|
||||
current_a = hmac::sign(&hmac_key, current_a.as_ref());
|
||||
}
|
||||
}
|
||||
|
||||
fn concat(a: &[u8], b: &[u8]) -> Vec<u8> {
|
||||
let mut ret = Vec::new();
|
||||
ret.extend_from_slice(a);
|
||||
ret.extend_from_slice(b);
|
||||
ret
|
||||
}
|
||||
|
||||
pub(crate) fn prf(out: &mut [u8], secret: &[u8], label: &[u8], seed: &[u8]) {
|
||||
let joined_seed = concat(label, seed);
|
||||
p(out, secret, &joined_seed);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_prf_reference_ms() {
|
||||
use ring_prf::prf as prf_ref;
|
||||
|
||||
let mut rng = StdRng::from_seed([1; 32]);
|
||||
|
||||
let pms: [u8; 32] = rng.random();
|
||||
let label: &[u8] = b"master secret";
|
||||
let client_random: [u8; 32] = rng.random();
|
||||
let server_random: [u8; 32] = rng.random();
|
||||
let mut seed = Vec::from(client_random);
|
||||
seed.extend_from_slice(&server_random);
|
||||
|
||||
let ms = prf_ms(pms, client_random, server_random);
|
||||
|
||||
let mut expected_ms: [u8; 48] = [0; 48];
|
||||
prf_ref(&mut expected_ms, &pms, label, &seed);
|
||||
|
||||
assert_eq!(ms, expected_ms);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_prf_reference_ke() {
|
||||
use ring_prf::prf as prf_ref;
|
||||
|
||||
let mut rng = StdRng::from_seed([2; 32]);
|
||||
|
||||
let ms: [u8; 48] = rng.random();
|
||||
let label: &[u8] = b"key expansion";
|
||||
let client_random: [u8; 32] = rng.random();
|
||||
let server_random: [u8; 32] = rng.random();
|
||||
let mut seed = Vec::from(server_random);
|
||||
seed.extend_from_slice(&client_random);
|
||||
|
||||
let keys = prf_keys(ms, client_random, server_random);
|
||||
let keys: Vec<u8> = keys.into_iter().flatten().collect();
|
||||
|
||||
let mut expected_keys: [u8; 40] = [0; 40];
|
||||
prf_ref(&mut expected_keys, &ms, label, &seed);
|
||||
|
||||
assert_eq!(keys, expected_keys);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_prf_reference_cf() {
|
||||
use ring_prf::prf as prf_ref;
|
||||
|
||||
let mut rng = StdRng::from_seed([3; 32]);
|
||||
|
||||
let ms: [u8; 48] = rng.random();
|
||||
let label: &[u8] = b"client finished";
|
||||
let handshake_hash: [u8; 32] = rng.random();
|
||||
|
||||
let cf_vd = prf_cf_vd(ms, handshake_hash);
|
||||
|
||||
let mut expected_cf_vd: [u8; 12] = [0; 12];
|
||||
prf_ref(&mut expected_cf_vd, &ms, label, &handshake_hash);
|
||||
|
||||
assert_eq!(cf_vd, expected_cf_vd);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_prf_reference_sf() {
|
||||
use ring_prf::prf as prf_ref;
|
||||
|
||||
let mut rng = StdRng::from_seed([4; 32]);
|
||||
|
||||
let ms: [u8; 48] = rng.random();
|
||||
let label: &[u8] = b"server finished";
|
||||
let handshake_hash: [u8; 32] = rng.random();
|
||||
|
||||
let sf_vd = prf_sf_vd(ms, handshake_hash);
|
||||
|
||||
let mut expected_sf_vd: [u8; 12] = [0; 12];
|
||||
prf_ref(&mut expected_sf_vd, &ms, label, &handshake_hash);
|
||||
|
||||
assert_eq!(sf_vd, expected_sf_vd);
|
||||
}
|
||||
@@ -5,41 +5,42 @@ description = "Implementation of the 3-party key-exchange protocol"
|
||||
keywords = ["tls", "mpc", "2pc", "pms", "key-exchange"]
|
||||
categories = ["cryptography"]
|
||||
license = "MIT OR Apache-2.0"
|
||||
version = "0.1.0-alpha.7"
|
||||
version = "0.1.0-alpha.13-pre"
|
||||
edition = "2021"
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[lib]
|
||||
name = "key_exchange"
|
||||
|
||||
[features]
|
||||
default = ["mock"]
|
||||
mock = []
|
||||
mock = ["mpz-share-conversion/test-utils", "mpz-common/ideal"]
|
||||
|
||||
[dependencies]
|
||||
mpz-garble = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "b8ae7ac" }
|
||||
mpz-common = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "b8ae7ac" }
|
||||
mpz-fields = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "b8ae7ac" }
|
||||
mpz-ot = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "b8ae7ac" }
|
||||
mpz-share-conversion = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "b8ae7ac", features = [
|
||||
"ideal",
|
||||
] }
|
||||
mpz-circuits = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "b8ae7ac" }
|
||||
mpz-vm-core = { workspace = true }
|
||||
mpz-memory-core = { workspace = true }
|
||||
mpz-common = { workspace = true }
|
||||
mpz-fields = { workspace = true }
|
||||
mpz-share-conversion = { workspace = true }
|
||||
mpz-circuits = { workspace = true }
|
||||
mpz-core = { workspace = true }
|
||||
|
||||
p256 = { workspace = true, features = ["ecdh", "serde"] }
|
||||
async-trait = { workspace = true }
|
||||
thiserror = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
serio = { workspace = true }
|
||||
derive_builder = { workspace = true }
|
||||
tracing = { workspace = true }
|
||||
rand = { workspace = true }
|
||||
rand06-compat = { workspace = true }
|
||||
tokio = { workspace = true, features = ["sync"] }
|
||||
|
||||
[dev-dependencies]
|
||||
mpz-share-conversion = { git = "https://github.com/privacy-scaling-explorations/mpz", rev = "b8ae7ac", features = [
|
||||
"ideal",
|
||||
] }
|
||||
mpz-ot = { workspace = true, features = ["ideal"] }
|
||||
mpz-garble = { workspace = true }
|
||||
|
||||
rand_chacha = { workspace = true }
|
||||
rand_core = { workspace = true }
|
||||
tokio = { workspace = true, features = ["macros", "rt", "rt-multi-thread"] }
|
||||
rstest = { workspace = true }
|
||||
|
||||
@@ -1,15 +1,8 @@
|
||||
//! This module provides the circuits used in the key exchange protocol.
|
||||
|
||||
use mpz_circuits::{ops::add_mod, Circuit, CircuitBuilder, Feed, Node};
|
||||
use std::sync::Arc;
|
||||
|
||||
use mpz_circuits::{circuits::big_num::nbyte_add_mod_trace, Circuit, CircuitBuilder};
|
||||
|
||||
/// NIST P-256 prime big-endian.
|
||||
static P: [u8; 32] = [
|
||||
0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
|
||||
];
|
||||
|
||||
/// Circuit for combining additive shares of the PMS, twice
|
||||
///
|
||||
/// # Inputs
|
||||
@@ -18,26 +11,65 @@ static P: [u8; 32] = [
|
||||
/// 1. PMS_SHARE_B0: 32 bytes PMS Additive Share
|
||||
/// 2. PMS_SHARE_A1: 32 bytes PMS Additive Share
|
||||
/// 3. PMS_SHARE_B1: 32 bytes PMS Additive Share
|
||||
/// 4. MODULUS: 32 bytes field modulus
|
||||
///
|
||||
/// # Outputs
|
||||
/// 0. PMS_0: Pre-master Secret = PMS_SHARE_A0 + PMS_SHARE_B0
|
||||
/// 1. PMS_1: Pre-master Secret = PMS_SHARE_A1 + PMS_SHARE_B1
|
||||
/// 2. EQ: Equality check of PMS_0 and PMS_1
|
||||
pub(crate) fn build_pms_circuit() -> Arc<Circuit> {
|
||||
let builder = CircuitBuilder::new();
|
||||
let share_a0 = builder.add_array_input::<u8, 32>();
|
||||
let share_b0 = builder.add_array_input::<u8, 32>();
|
||||
let share_a1 = builder.add_array_input::<u8, 32>();
|
||||
let share_b1 = builder.add_array_input::<u8, 32>();
|
||||
let mut builder = CircuitBuilder::new();
|
||||
|
||||
let pms_0 = nbyte_add_mod_trace(builder.state(), share_a0, share_b0, P);
|
||||
let pms_1 = nbyte_add_mod_trace(builder.state(), share_a1, share_b1, P);
|
||||
let share_a0 = (0..32 * 8).map(|_| builder.add_input()).collect::<Vec<_>>();
|
||||
let share_b0 = (0..32 * 8).map(|_| builder.add_input()).collect::<Vec<_>>();
|
||||
let share_a1 = (0..32 * 8).map(|_| builder.add_input()).collect::<Vec<_>>();
|
||||
let share_b1 = (0..32 * 8).map(|_| builder.add_input()).collect::<Vec<_>>();
|
||||
|
||||
let eq: [_; 32] = std::array::from_fn(|i| pms_0[i] ^ pms_1[i]);
|
||||
let modulus = (0..32 * 8).map(|_| builder.add_input()).collect::<Vec<_>>();
|
||||
|
||||
builder.add_output(pms_0);
|
||||
builder.add_output(pms_1);
|
||||
builder.add_output(eq);
|
||||
/// assumes input is provided as big endian
|
||||
fn to_little_endian(input: &[Node<Feed>]) -> Vec<Node<Feed>> {
|
||||
let mut le_lsb0_output = vec![];
|
||||
for node in input.chunks_exact(8).rev() {
|
||||
for &bit in node.iter() {
|
||||
le_lsb0_output.push(bit);
|
||||
}
|
||||
}
|
||||
le_lsb0_output
|
||||
}
|
||||
|
||||
let pms_0 = add_mod(
|
||||
&mut builder,
|
||||
&to_little_endian(&share_a0),
|
||||
&to_little_endian(&share_b0),
|
||||
&to_little_endian(&modulus),
|
||||
);
|
||||
|
||||
// return output as big endian
|
||||
for node in pms_0.chunks_exact(8).rev() {
|
||||
for &bit in node.iter() {
|
||||
builder.add_output(bit);
|
||||
}
|
||||
}
|
||||
|
||||
let pms_1 = add_mod(
|
||||
&mut builder,
|
||||
&to_little_endian(&share_a1),
|
||||
&to_little_endian(&share_b1),
|
||||
&to_little_endian(&modulus),
|
||||
);
|
||||
|
||||
// return output as big endian
|
||||
for node in pms_1.chunks_exact(8).rev() {
|
||||
for &bit in node.iter() {
|
||||
builder.add_output(bit);
|
||||
}
|
||||
}
|
||||
|
||||
for (a, b) in pms_0.into_iter().zip(pms_1) {
|
||||
let out = builder.add_xor_gate(a, b);
|
||||
builder.add_output(out);
|
||||
}
|
||||
|
||||
Arc::new(builder.build().expect("pms circuit is valid"))
|
||||
}
|
||||
|
||||
@@ -1,29 +0,0 @@
|
||||
use derive_builder::Builder;
|
||||
|
||||
/// Role in the key exchange protocol.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum Role {
|
||||
/// Leader.
|
||||
Leader,
|
||||
/// Follower.
|
||||
Follower,
|
||||
}
|
||||
|
||||
/// A config used for [MpcKeyExchange](super::MpcKeyExchange).
|
||||
#[derive(Debug, Clone, Builder)]
|
||||
pub struct KeyExchangeConfig {
|
||||
/// Protocol role.
|
||||
role: Role,
|
||||
}
|
||||
|
||||
impl KeyExchangeConfig {
|
||||
/// Creates a new builder for the key exchange configuration.
|
||||
pub fn builder() -> KeyExchangeConfigBuilder {
|
||||
KeyExchangeConfigBuilder::default()
|
||||
}
|
||||
|
||||
/// Get the role of this instance.
|
||||
pub fn role(&self) -> &Role {
|
||||
&self.role
|
||||
}
|
||||
}
|
||||
@@ -1,120 +1,87 @@
|
||||
use core::fmt;
|
||||
use std::error::Error;
|
||||
|
||||
/// A key exchange error.
|
||||
/// MPC-TLS protocol error.
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub struct KeyExchangeError {
|
||||
kind: ErrorKind,
|
||||
#[source]
|
||||
source: Option<Box<dyn Error + Send + Sync>>,
|
||||
#[error(transparent)]
|
||||
pub struct KeyExchangeError(#[from] pub(crate) ErrorRepr);
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
#[error("key exchange error: {0}")]
|
||||
pub(crate) enum ErrorRepr {
|
||||
#[error("state error: {0}")]
|
||||
State(Box<dyn Error + Send + Sync + 'static>),
|
||||
#[error("context error: {0}")]
|
||||
Ctx(Box<dyn Error + Send + Sync + 'static>),
|
||||
#[error("io error: {0}")]
|
||||
Io(std::io::Error),
|
||||
#[error("vm error: {0}")]
|
||||
Vm(Box<dyn Error + Send + Sync + 'static>),
|
||||
#[error("share conversion error: {0}")]
|
||||
ShareConversion(Box<dyn Error + Send + Sync + 'static>),
|
||||
#[error("role error: {0}")]
|
||||
Role(Box<dyn Error + Send + Sync + 'static>),
|
||||
#[error("key error: {0}")]
|
||||
Key(Box<dyn Error + Send + Sync + 'static>),
|
||||
}
|
||||
|
||||
impl KeyExchangeError {
|
||||
pub(crate) fn new<E>(kind: ErrorKind, source: E) -> Self
|
||||
pub(crate) fn state<E>(err: E) -> KeyExchangeError
|
||||
where
|
||||
E: Into<Box<dyn Error + Send + Sync>>,
|
||||
E: Into<Box<dyn Error + Send + Sync + 'static>>,
|
||||
{
|
||||
Self {
|
||||
kind,
|
||||
source: Some(source.into()),
|
||||
}
|
||||
Self(ErrorRepr::State(err.into()))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) fn kind(&self) -> &ErrorKind {
|
||||
&self.kind
|
||||
pub(crate) fn ctx<E>(err: E) -> KeyExchangeError
|
||||
where
|
||||
E: Into<Box<dyn Error + Send + Sync + 'static>>,
|
||||
{
|
||||
Self(ErrorRepr::Ctx(err.into()))
|
||||
}
|
||||
|
||||
pub(crate) fn state(msg: impl Into<String>) -> Self {
|
||||
Self {
|
||||
kind: ErrorKind::State,
|
||||
source: Some(msg.into().into()),
|
||||
}
|
||||
pub(crate) fn vm<E>(err: E) -> KeyExchangeError
|
||||
where
|
||||
E: Into<Box<dyn Error + Send + Sync + 'static>>,
|
||||
{
|
||||
Self(ErrorRepr::Vm(err.into()))
|
||||
}
|
||||
|
||||
pub(crate) fn role(msg: impl Into<String>) -> Self {
|
||||
Self {
|
||||
kind: ErrorKind::Role,
|
||||
source: Some(msg.into().into()),
|
||||
}
|
||||
pub(crate) fn share_conversion<E>(err: E) -> KeyExchangeError
|
||||
where
|
||||
E: Into<Box<dyn Error + Send + Sync + 'static>>,
|
||||
{
|
||||
Self(ErrorRepr::ShareConversion(err.into()))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) enum ErrorKind {
|
||||
Io,
|
||||
Context,
|
||||
Vm,
|
||||
ShareConversion,
|
||||
Key,
|
||||
State,
|
||||
Role,
|
||||
}
|
||||
pub(crate) fn role<E>(err: E) -> KeyExchangeError
|
||||
where
|
||||
E: Into<Box<dyn Error + Send + Sync + 'static>>,
|
||||
{
|
||||
Self(ErrorRepr::Role(err.into()))
|
||||
}
|
||||
|
||||
impl fmt::Display for KeyExchangeError {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self.kind {
|
||||
ErrorKind::Io => write!(f, "io error")?,
|
||||
ErrorKind::Context => write!(f, "context error")?,
|
||||
ErrorKind::Vm => write!(f, "vm error")?,
|
||||
ErrorKind::ShareConversion => write!(f, "share conversion error")?,
|
||||
ErrorKind::Key => write!(f, "key error")?,
|
||||
ErrorKind::State => write!(f, "state error")?,
|
||||
ErrorKind::Role => write!(f, "role error")?,
|
||||
}
|
||||
|
||||
if let Some(ref source) = self.source {
|
||||
write!(f, " caused by: {}", source)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
pub(crate) fn key<E>(err: E) -> KeyExchangeError
|
||||
where
|
||||
E: Into<Box<dyn Error + Send + Sync + 'static>>,
|
||||
{
|
||||
Self(ErrorRepr::Key(err.into()))
|
||||
}
|
||||
}
|
||||
|
||||
impl From<mpz_common::ContextError> for KeyExchangeError {
|
||||
fn from(error: mpz_common::ContextError) -> Self {
|
||||
Self::new(ErrorKind::Context, error)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<mpz_garble::MemoryError> for KeyExchangeError {
|
||||
fn from(error: mpz_garble::MemoryError) -> Self {
|
||||
Self::new(ErrorKind::Vm, error)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<mpz_garble::LoadError> for KeyExchangeError {
|
||||
fn from(error: mpz_garble::LoadError) -> Self {
|
||||
Self::new(ErrorKind::Vm, error)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<mpz_garble::ExecutionError> for KeyExchangeError {
|
||||
fn from(error: mpz_garble::ExecutionError) -> Self {
|
||||
Self::new(ErrorKind::Vm, error)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<mpz_garble::DecodeError> for KeyExchangeError {
|
||||
fn from(error: mpz_garble::DecodeError) -> Self {
|
||||
Self::new(ErrorKind::Vm, error)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<mpz_share_conversion::ShareConversionError> for KeyExchangeError {
|
||||
fn from(error: mpz_share_conversion::ShareConversionError) -> Self {
|
||||
Self::new(ErrorKind::ShareConversion, error)
|
||||
fn from(value: mpz_common::ContextError) -> Self {
|
||||
Self::ctx(value)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<p256::elliptic_curve::Error> for KeyExchangeError {
|
||||
fn from(error: p256::elliptic_curve::Error) -> Self {
|
||||
Self::new(ErrorKind::Key, error)
|
||||
fn from(value: p256::elliptic_curve::Error) -> Self {
|
||||
Self::key(value)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<std::io::Error> for KeyExchangeError {
|
||||
fn from(error: std::io::Error) -> Self {
|
||||
Self::new(ErrorKind::Io, error)
|
||||
fn from(err: std::io::Error) -> Self {
|
||||
Self(ErrorRepr::Io(err))
|
||||
}
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user